public void testDuellMemIndex() throws IOException { LineFileDocs lineFileDocs = new LineFileDocs(random()); int numDocs = atLeast(10); MemoryIndex memory = randomMemoryIndex(); for (int i = 0; i < numDocs; i++) { Directory dir = newDirectory(); MockAnalyzer mockAnalyzer = new MockAnalyzer(random()); mockAnalyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), mockAnalyzer)); Document nextDoc = lineFileDocs.nextDoc(); Document doc = new Document(); for (IndexableField field : nextDoc.getFields()) { if (field.fieldType().indexOptions() != IndexOptions.NONE) { doc.add(field); if (random().nextInt(3) == 0) { doc.add(field); // randomly add the same field twice } } } writer.addDocument(doc); writer.close(); for (IndexableField field : doc.getFields()) { memory.addField(field.name(), ((Field) field).stringValue(), mockAnalyzer); } DirectoryReader competitor = DirectoryReader.open(dir); LeafReader memIndexReader = (LeafReader) memory.createSearcher().getIndexReader(); TestUtil.checkReader(memIndexReader); duellReaders(competitor, memIndexReader); IOUtils.close(competitor, memIndexReader); memory.reset(); dir.close(); } lineFileDocs.close(); }
public void testEmptyDocs() throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwConf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); iwConf.setMaxBufferedDocs(RandomInts.randomIntBetween(random(), 2, 30)); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf); // make sure that the fact that documents might be empty is not a problem final Document emptyDoc = new Document(); final int numDocs = random().nextBoolean() ? 1 : atLeast(1000); for (int i = 0; i < numDocs; ++i) { iw.addDocument(emptyDoc); } iw.commit(); final DirectoryReader rd = DirectoryReader.open(dir); for (int i = 0; i < numDocs; ++i) { final Document doc = rd.document(i); assertNotNull(doc); assertTrue(doc.getFields().isEmpty()); } rd.close(); iw.close(); dir.close(); }
public void doTest(int[] docs) throws Exception { Directory dir = makeIndex(); IndexReader reader = IndexReader.open(dir, true); for (int i = 0; i < docs.length; i++) { Document d = reader.document(docs[i], SELECTOR); d.get(MAGIC_FIELD); List<Fieldable> fields = d.getFields(); for (Iterator<Fieldable> fi = fields.iterator(); fi.hasNext(); ) { Fieldable f = null; try { f = fi.next(); String fname = f.name(); String fval = f.stringValue(); assertNotNull(docs[i] + " FIELD: " + fname, fval); String[] vals = fval.split("#"); if (!dataset.contains(vals[0]) || !dataset.contains(vals[1])) { fail("FIELD:" + fname + ",VAL:" + fval); } } catch (Exception e) { throw new Exception(docs[i] + " WTF: " + f.name(), e); } } } reader.close(); dir.close(); }
/** @return the indexs */ public List<Index> getIndexes() { List<Index> indexes = new ArrayList<Index>(); // Method[] methods = Index.class.getDeclaredMethods(); int numDocs = reader.numDocs(); // System.out.println(numDocs); for (int i = 0; i < numDocs; i++) { try { Document document = reader.document(i); List<Fieldable> f = document.getFields(); Index index = new Index(); for (Fieldable fieldable : f) { Field field = (Field) fieldable; Method m = Index.class.getDeclaredMethod("set" + field.name(), new Class[] {String.class}); m.invoke(index, new Object[] {field.stringValue()}); // Method m2 = Index.class.getDeclaredMethod("get" + field.name(), new Class[]{}); // Object val = m2.invoke(index, new Object[]{}); // System.out.println(m2.getName()+" = "+val); // System.out.println(m.getName() + " " + field.stringValue()); } // System.out.println("RHAAR-"+i+" = "+index.getRHaarFeature()); indexes.add(index); } catch (Exception e) { e.printStackTrace(); } } return indexes; }
/** * Find words for a more-like-this query former. * * @param docNum the id of the lucene document from which to find terms */ private PriorityQueue<ScoreTerm> retrieveTerms(int docNum) throws IOException { Map<String, Map<String, Int>> field2termFreqMap = new HashMap<>(); for (String fieldName : fieldNames) { final Fields vectors = ir.getTermVectors(docNum); final Terms vector; if (vectors != null) { vector = vectors.terms(fieldName); } else { vector = null; } // field does not store term vector info if (vector == null) { Document d = ir.document(docNum); IndexableField[] fields = d.getFields(fieldName); for (IndexableField field : fields) { final String stringValue = field.stringValue(); if (stringValue != null) { addTermFrequencies(new StringReader(stringValue), field2termFreqMap, fieldName); } } } else { addTermFrequencies(field2termFreqMap, vector, fieldName); } } return createQueue(field2termFreqMap); }
private void alternateField( NamedList docSummaries, SolrParams params, Document doc, String fieldName) { String alternateField = params.getFieldParam(fieldName, HighlightParams.ALTERNATE_FIELD); if (alternateField != null && alternateField.length() > 0) { IndexableField[] docFields = doc.getFields(alternateField); List<String> listFields = new ArrayList<String>(); for (IndexableField field : docFields) { if (field.binaryValue() == null) listFields.add(field.stringValue()); } String[] altTexts = listFields.toArray(new String[listFields.size()]); if (altTexts != null && altTexts.length > 0) { Encoder encoder = getEncoder(fieldName, params); int alternateFieldLen = params.getFieldInt(fieldName, HighlightParams.ALTERNATE_FIELD_LENGTH, 0); List<String> altList = new ArrayList<String>(); int len = 0; for (String altText : altTexts) { if (alternateFieldLen <= 0) { altList.add(encoder.encodeText(altText)); } else { altList.add( len + altText.length() > alternateFieldLen ? encoder.encodeText(new String(altText.substring(0, alternateFieldLen - len))) : encoder.encodeText(altText)); len += altText.length(); if (len >= alternateFieldLen) break; } } docSummaries.add(fieldName, altList); } } }
@Test public void testNullIssue() { IssueKeyIndexer indexer = new IssueKeyIndexer(null); Document doc = new Document(); indexer.addIndex(doc, null); assertTrue(doc.getFields().isEmpty()); }
public static void indexSerial(Random random, Map<String, Document> docs, Directory dir) throws IOException { IndexWriter w = new IndexWriter( dir, LuceneTestCase.newIndexWriterConfig( random, TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setMergePolicy(newLogMergePolicy())); // index all docs in a single thread Iterator<Document> iter = docs.values().iterator(); while (iter.hasNext()) { Document d = iter.next(); ArrayList<Field> fields = new ArrayList<>(); fields.addAll(d.getFields()); // put fields in same order each time Collections.sort(fields, fieldNameComparator); Document d1 = new Document(); for (int i = 0; i < fields.size(); i++) { d1.add(fields.get(i)); } w.addDocument(d1); // System.out.println("indexing "+d1); } w.close(); }
// LUCENE-1727: make sure doc fields are stored in order public void testStoredFieldsOrder() throws Throwable { Directory d = newDirectory(); IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); FieldType customType = new FieldType(); customType.setStored(true); doc.add(newField("zzz", "a b c", customType)); doc.add(newField("aaa", "a b c", customType)); doc.add(newField("zzz", "1 2 3", customType)); w.addDocument(doc); IndexReader r = w.getReader(); Document doc2 = r.document(0); Iterator<IndexableField> it = doc2.getFields().iterator(); assertTrue(it.hasNext()); Field f = (Field) it.next(); assertEquals(f.name(), "zzz"); assertEquals(f.stringValue(), "a b c"); assertTrue(it.hasNext()); f = (Field) it.next(); assertEquals(f.name(), "aaa"); assertEquals(f.stringValue(), "a b c"); assertTrue(it.hasNext()); f = (Field) it.next(); assertEquals(f.name(), "zzz"); assertEquals(f.stringValue(), "1 2 3"); assertFalse(it.hasNext()); r.close(); w.close(); d.close(); }
private void dumpDocument(int docNum, Document doc) throws IOException { outputLn(); outputLn("Document " + docNum); if (doc == null) { outputLn(" deleted"); return; } // note: only stored fields will be returned for (Fieldable field : doc.getFields()) { String fieldName = field.name(); boolean isDate = "l.date".equals(fieldName); outputLn(" Field [" + fieldName + "]: " + field.toString()); String[] values = doc.getValues(fieldName); if (values != null) { int i = 0; for (String value : values) { output(" " + "(" + i++ + ") " + value); if (isDate) { try { Date date = DateTools.stringToDate(value); output(" (" + date.toString() + " (" + date.getTime() + "))"); } catch (java.text.ParseException e) { assert false; } } outputLn(); } } } }
@Override public boolean reload(String collectionName, int docNum) { if (collectionName == null) return false; CrescentCollectionHandler collectionHandler = SpringApplicationContext.getBean( "crescentCollectionHandler", CrescentCollectionHandler.class); CrescentCollection collection = collectionHandler.getCrescentCollections().getCrescentCollection(collectionName); if (collection == null) { logger.debug("doesn't Collection Info => {}", collectionName); return false; } List<String> fieldName = new ArrayList<String>(); List<String> flag = new ArrayList<String>(); List<String> norm = new ArrayList<String>(); List<String> value = new ArrayList<String>(); try { Directory directory = FSDirectory.open(new File(collection.getIndexingDirectory())); IndexReader reader = IndexReader.open(directory); Document document = null; try { document = reader.document(docNum); } catch (IllegalArgumentException e) { e.printStackTrace(); return false; } String fName = null; for (Fieldable field : document.getFields()) { fName = field.name(); fieldName.add(fName); flag.add(fieldFlag(field)); if (reader.hasNorms(fName)) { norm.add(String.valueOf(Similarity.decodeNorm(reader.norms(fName)[docNum]))); } else { norm.add("---"); } value.add(field.stringValue()); } } catch (IOException e) { e.printStackTrace(); return false; } result.put("collection", collectionName); result.put("docNum", docNum); result.put("fieldName", fieldName); result.put("flag", flag); result.put("norm", norm); result.put("value", value); return true; }
public static MapContainer convert(Document obj) { MapContainer mc = new MapContainer(); for (IndexableField field : obj.getFields()) { mc.put(field.name(), field.stringValue()); } return mc; }
@Override public void modifyIndex(final IndexWriter writer, final IndexSearcher searcher) throws ModifyKnowledgeBaseException { for (final Map.Entry<String, HashMap<String, String>> entry : this.attributes.entrySet()) { final String key = entry.getKey(); final HashMap<String, String> hash = entry.getValue(); final QueryParser qp = new QueryParser(this.docPrimaryKey, new DoserIDAnalyzer()); try { final TopDocs top = searcher.search(qp.parse(QueryParserBase.escape(key)), 1); final ScoreDoc[] scores = top.scoreDocs; if (scores.length > 0) { final Document doc = new Document(); final Document currentDoc = searcher.getIndexReader().document(scores[0].doc); // BugFix create new Document und copy Fields. final List<IndexableField> fields = currentDoc.getFields(); for (final IndexableField field : fields) { if (field.stringValue() != null) { if (field.name().equalsIgnoreCase(docPrimaryKey)) { doc.add(new StringField(field.name(), field.stringValue(), Field.Store.YES)); } else { doc.add(new TextField(field.name(), field.stringValue(), Field.Store.YES)); } } } final List<Document> docListToAdd = new LinkedList<Document>(); docListToAdd.add(doc); for (final Map.Entry<String, String> subentry : hash.entrySet()) { final IndexableField field = doc.getField(subentry.getKey()); if (field == null) { throw new ModifyKnowledgeBaseException("UpdateField no found", null); } if (this.action.equals(KBModifications.OVERRIDEFIELD)) { doc.removeFields(subentry.getKey()); String[] newentries = generateSeperatedFieldStrings(subentry.getValue()); for (int i = 0; i < newentries.length; i++) { doc.add(new TextField(subentry.getKey(), newentries[i], Field.Store.YES)); } } else if (this.action.equals(KBModifications.UPDATERELATEDLABELS)) { doc.removeFields(subentry.getKey()); doc.add(updateOccurrences(subentry.getValue(), field, "surroundinglabels")); } else if (this.action.equals(KBModifications.UPDATEOCCURRENCES)) { doc.removeFields(subentry.getKey()); IndexableField f = updateOccurrences(subentry.getValue(), field, "occurrences"); doc.add(f); } } writer.updateDocuments(new Term(this.docPrimaryKey, key), docListToAdd); } else { throw new ModifyKnowledgeBaseException("Document not found", null); } } catch (final IOException e) { throw new ModifyKnowledgeBaseException("IOException in IndexSearcher", e); } catch (ParseException e) { throw new ModifyKnowledgeBaseException("Queryparser Exception", e); } } }
public static MapContainer convert(Document obj, Collection<String> filters) { MapContainer mc = new MapContainer(); for (IndexableField field : obj.getFields()) { if (filters.contains(field.name())) continue; mc.put(field.name(), field.stringValue()); } return mc; }
@Override public void writeDoc( String name, Document doc, Set<String> returnFields, float score, boolean includeScore) throws IOException { Map other = null; if (includeScore) { other = scoreMap; scoreMap.put("score", score); } writeDoc(name, doc.getFields(), returnFields, other); }
private static SimpleOrderedMap<Object> getDocumentFieldsInfo( Document doc, int docId, IndexReader reader, IndexSchema schema) throws IOException { SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>(); for (Object o : doc.getFields()) { Fieldable fieldable = (Fieldable) o; SimpleOrderedMap<Object> f = new SimpleOrderedMap<Object>(); SchemaField sfield = schema.getFieldOrNull(fieldable.name()); FieldType ftype = (sfield == null) ? null : sfield.getType(); f.add("type", (ftype == null) ? null : ftype.getTypeName()); f.add("schema", getFieldFlags(sfield)); f.add("flags", getFieldFlags(fieldable)); Term t = new Term( fieldable.name(), ftype != null ? ftype.storedToIndexed(fieldable) : fieldable.stringValue()); f.add("value", (ftype == null) ? null : ftype.toExternal(fieldable)); // TODO: this really should be "stored" f.add("internal", fieldable.stringValue()); // may be a binary number byte[] arr = fieldable.getBinaryValue(); if (arr != null) { f.add("binary", Base64.byteArrayToBase64(arr, 0, arr.length)); } f.add("boost", fieldable.getBoost()); f.add( "docFreq", t.text() == null ? 0 : reader.docFreq(t)); // this can be 0 for non-indexed fields // If we have a term vector, return that if (fieldable.isTermVectorStored()) { try { TermFreqVector v = reader.getTermFreqVector(docId, fieldable.name()); if (v != null) { SimpleOrderedMap<Integer> tfv = new SimpleOrderedMap<Integer>(); for (int i = 0; i < v.size(); i++) { tfv.add(v.getTerms()[i], v.getTermFrequencies()[i]); } f.add("termVector", tfv); } } catch (Exception ex) { log.warn("error writing term vector", ex); } } finfo.add(fieldable.name(), f); } return finfo; }
@Override protected void onRecord(DiscoveredRecord record, Document document) { Map<String, List<String>> fieldMap = new HashMap<String, List<String>>(); for (Fieldable field : document.getFields()) { String name = field.name(); List<String> fieldValues = fieldMap.get(name); if (fieldValues == null) { fieldValues = new ArrayList<String>(); fieldMap.put(name, fieldValues); } fieldValues.add(field.stringValue()); } mapping.put(record, fieldMap); }
private static SolrDocument toSolrDoc(SolrInputDocument sdoc, IndexSchema schema) { // TODO: do something more performant than this double conversion Document doc = DocumentBuilder.toDocument(sdoc, schema); // copy the stored fields only StoredDocument out = new StoredDocument(); for (IndexableField f : doc.getFields()) { if (f.fieldType().stored()) { out.add((StorableField) f); } } return toSolrDoc(out, schema); }
public HitDetails getDetails(Hit hit) throws IOException { Document doc = luceneSearcher.doc(Integer.valueOf(hit.getUniqueKey())); List docFields = doc.getFields(); String[] fields = new String[docFields.size()]; String[] values = new String[docFields.size()]; for (int i = 0; i < docFields.size(); i++) { Field field = (Field) docFields.get(i); fields[i] = field.name(); values[i] = field.stringValue(); } return new HitDetails(fields, values); }
// 分页显示 public static void getResult(IndexSearcher searcher, TopDocs docs, int pageNo, int pageSize) throws IOException { ScoreDoc[] hits = docs.scoreDocs; int endIndex = pageNo * pageSize; int len = hits.length; if (endIndex > len) { endIndex = len; } for (int i = (pageNo - 1) * pageSize; i < endIndex; i++) { Document d = searcher.doc(hits[i].doc); System.out.println("分页如下:"); System.out.println((i + 1) + ". " + d.get("isbn") + "\t" + d.get("title") + "\t"); IndexableField[] feilds = d.getFields("details"); for (IndexableField indexableField : feilds) { System.out.println(indexableField.stringValue()); } } }
@BeforeClass public static void beforeClass() throws Exception { testDoc = new Document(); fieldInfos = new FieldInfos.Builder(); DocHelper.setupDoc(testDoc); for (IndexableField field : testDoc.getFields()) { fieldInfos.addOrUpdate(field.name(), field.fieldType()); } dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false); IndexWriter writer = new IndexWriter(dir, conf); writer.addDocument(testDoc); writer.close(); FaultyIndexInput.doFail = false; }
private HashSet<String> getVClassUrisForHits(TopDocs topDocs, IndexSearcher searcherForRequest) { HashSet<String> typesInHits = new HashSet<String>(); for (int i = 0; i < topDocs.scoreDocs.length; i++) { try { Document doc = searcherForRequest.doc(topDocs.scoreDocs[i].doc); Field[] types = doc.getFields(Entity2LuceneDoc.term.RDFTYPE); if (types != null) { for (int j = 0; j < types.length; j++) { String typeUri = types[j].stringValue(); typesInHits.add(typeUri); } } } catch (Exception e) { log.error("problems getting rdf:type for search hits", e); } } return typesInHits; }
/** * @Title: highlight @Description: 生成高亮文本 * * @param document 索引文档对象 * @param highlighter 高亮器 * @param analyzer 索引分词器 * @param field 高亮字段 * @return * @throws IOException * @throws InvalidTokenOffsetsException */ public static String highlight( Document document, Highlighter highlighter, Analyzer analyzer, String field) throws IOException { List<IndexableField> list = document.getFields(); for (IndexableField fieldable : list) { String fieldValue = fieldable.stringValue(); if (fieldable.name().equals(field)) { try { fieldValue = highlighter.getBestFragment(analyzer, field, fieldValue); } catch (InvalidTokenOffsetsException e) { fieldValue = fieldable.stringValue(); } return (fieldValue == null || fieldValue.trim().length() == 0) ? fieldable.stringValue() : fieldValue; } } return null; }
/** Get the class groups represented for the individuals in the topDocs. */ private List<VClassGroup> getClassGroups( VClassGroupDao grpDao, TopDocs topDocs, IndexSearcher searcherForRequest) { LinkedHashMap<String, VClassGroup> grpMap = grpDao.getClassGroupMap(); int n = grpMap.size(); HashSet<String> classGroupsInHits = new HashSet<String>(n); int grpsFound = 0; for (int i = 0; i < topDocs.scoreDocs.length && n > grpsFound; i++) { try { Document doc = searcherForRequest.doc(topDocs.scoreDocs[i].doc); Field[] grps = doc.getFields(Entity2LuceneDoc.term.CLASSGROUP_URI); if (grps != null || grps.length > 0) { for (int j = 0; j < grps.length; j++) { String groupUri = grps[j].stringValue(); if (groupUri != null && !classGroupsInHits.contains(groupUri)) { classGroupsInHits.add(groupUri); grpsFound++; if (grpsFound >= n) break; } } } } catch (Exception e) { log.error("problem getting VClassGroups from search hits " + e.getMessage()); } } List<String> classgroupURIs = Collections.list(Collections.enumeration(classGroupsInHits)); List<VClassGroup> classgroups = new ArrayList<VClassGroup>(classgroupURIs.size()); for (String cgUri : classgroupURIs) { if (cgUri != null && !"".equals(cgUri)) { VClassGroup vcg = grpDao.getGroupByURI(cgUri); if (vcg == null) { log.debug("could not get classgroup for URI " + cgUri); } else { classgroups.add(vcg); } } } grpDao.sortGroupList(classgroups); return classgroups; }
@Test public void testWriteFields() { String[] fields = new String[] {"s", "i"}; PdxLuceneSerializer mapper = new PdxLuceneSerializer(fields); PdxInstance i = Mockito.mock(PdxInstance.class); Mockito.when(i.hasField("s")).thenReturn(true); Mockito.when(i.hasField("i")).thenReturn(true); Mockito.when(i.getField("s")).thenReturn("a"); Mockito.when(i.getField("i")).thenReturn(5); Document doc = new Document(); mapper.toDocument(i, doc); assertEquals(2, doc.getFields().size()); assertEquals("a", doc.getField("s").stringValue()); assertEquals(5, doc.getField("i").numericValue()); }
public static final SolrDocument toSolrDocument(Document doc, final IndexSchema schema) { SolrDocument out = new SolrDocument(); for (IndexableField f : doc.getFields()) { // Make sure multivalued fields are represented as lists Object existing = out.get(f.name()); if (existing == null) { SchemaField sf = schema.getFieldOrNull(f.name()); if (sf != null && sf.multiValued()) { List<Object> vals = new ArrayList<>(); vals.add(f); out.setField(f.name(), vals); } else { out.setField(f.name(), f); } } else { out.addField(f.name(), f); } } return out; }
private static List<Fieldable> getNonEmptyFields(final Document doc) { @SuppressWarnings({"unchecked"}) final List<Fieldable> fields = doc.getFields(); final List<Fieldable> allVals = Lists.newArrayList(); for (final Fieldable field : fields) { // NOTE: we do not store the field value since we are never interested in reading the value // out of the // document, we are just interested in searching it. This will keep us from adding to the size // of the issue // document. if (field.isIndexed()) { allVals.add( new Field( DocumentConstants.ISSUE_NON_EMPTY_FIELD_IDS, field.name(), Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS)); } } return allVals; }
@BeforeClass public static void beforeClass() throws Exception { testDoc = new Document(); fieldInfos = new FieldInfos.Builder(); DocHelper.setupDoc(testDoc); for (IndexableField field : testDoc.getFields()) { FieldInfo fieldInfo = fieldInfos.getOrAdd(field.name()); IndexableFieldType ift = field.fieldType(); fieldInfo.setIndexOptions(ift.indexOptions()); if (ift.omitNorms()) { fieldInfo.setOmitsNorms(); } fieldInfo.setDocValuesType(ift.docValuesType()); } dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()); conf.getMergePolicy().setNoCFSRatio(0.0); IndexWriter writer = new IndexWriter(dir, conf); writer.addDocument(testDoc); writer.close(); }
public void testMultiValuedFieldAndDocBoosts() throws Exception { SolrCore core = h.getCore(); IndexSchema schema = core.getLatestSchema(); SolrInputDocument doc = new SolrInputDocument(); doc.setDocumentBoost(3.0f); SolrInputField field = new SolrInputField("foo_t"); field.addValue("summer time", 1.0f); field.addValue("in the city", 5.0f); // using boost field.addValue("living is easy", 1.0f); doc.put(field.getName(), field); Document out = DocumentBuilder.toDocument(doc, schema); IndexableField[] outF = out.getFields(field.getName()); assertEquals("wrong number of field values", 3, outF.length); // since Lucene no longer has native documnt boosts, we should find // the doc boost multiplied into the boost o nthe first field value // all other field values should be 1.0f // (lucene will multiply all of the field boosts later) assertEquals(15.0f, outF[0].boost(), 0.0f); assertEquals(1.0f, outF[1].boost(), 0.0f); assertEquals(1.0f, outF[2].boost(), 0.0f); }
@Override DocWriter processDocument() throws IOException { // this is where we process the geo-search components of the document Document doc = docState.doc; int docID = docState.docID; List<Fieldable> fields = doc.getFields(); List<GeoCoordinateField> geoFields = new Vector<GeoCoordinateField>(); for (Fieldable field : fields) { if (field instanceof GeoCoordinateField) { geoFields.add((GeoCoordinateField) field); } } for (GeoCoordinateField geoField : geoFields) { // process field into GeoIndex here geoIndexer.index(docID, geoField); doc.removeFields(geoField.name()); } return defaultDocConsumerPerThread.processDocument(); }