public Scorer scorer(IndexReader reader) throws IOException { TermDocs termDocs = reader.termDocs(term); if (termDocs == null) return null; return new MyTermScorer(this, termDocs, similarity, reader.norms(term.field())); }
@Override public boolean reload(String collectionName, int docNum) { if (collectionName == null) return false; CrescentCollectionHandler collectionHandler = SpringApplicationContext.getBean( "crescentCollectionHandler", CrescentCollectionHandler.class); CrescentCollection collection = collectionHandler.getCrescentCollections().getCrescentCollection(collectionName); if (collection == null) { logger.debug("doesn't Collection Info => {}", collectionName); return false; } List<String> fieldName = new ArrayList<String>(); List<String> flag = new ArrayList<String>(); List<String> norm = new ArrayList<String>(); List<String> value = new ArrayList<String>(); try { Directory directory = FSDirectory.open(new File(collection.getIndexingDirectory())); IndexReader reader = IndexReader.open(directory); Document document = null; try { document = reader.document(docNum); } catch (IllegalArgumentException e) { e.printStackTrace(); return false; } String fName = null; for (Fieldable field : document.getFields()) { fName = field.name(); fieldName.add(fName); flag.add(fieldFlag(field)); if (reader.hasNorms(fName)) { norm.add(String.valueOf(Similarity.decodeNorm(reader.norms(fName)[docNum]))); } else { norm.add("---"); } value.add(field.stringValue()); } } catch (IOException e) { e.printStackTrace(); return false; } result.put("collection", collectionName); result.put("docNum", docNum); result.put("fieldName", fieldName); result.put("flag", flag); result.put("norm", norm); result.put("value", value); return true; }
public Explanation explain(IndexReader reader, int doc) throws IOException { ComplexExplanation result = new ComplexExplanation(); result.setDescription("weight(" + getQuery() + " in " + doc + "), product of:"); Explanation idfExpl = new Explanation(idf, "idf(docFreq=" + reader.docFreq(term) + ")"); // explain query weight Explanation queryExpl = new Explanation(); queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:"); Explanation boostExpl = new Explanation(getBoost(), "boost"); if (getBoost() != 1.0f) queryExpl.addDetail(boostExpl); queryExpl.addDetail(idfExpl); Explanation queryNormExpl = new Explanation(queryNorm, "queryNorm"); queryExpl.addDetail(queryNormExpl); queryExpl.setValue(boostExpl.getValue() * idfExpl.getValue() * queryNormExpl.getValue()); result.addDetail(queryExpl); // explain field weight String field = term.field(); ComplexExplanation fieldExpl = new ComplexExplanation(); fieldExpl.setDescription("fieldWeight(" + term + " in " + doc + "), product of:"); Explanation tfExpl = scorer(reader).explain(doc); fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(idfExpl); Explanation fieldNormExpl = new Explanation(); byte[] fieldNorms = reader.norms(field); float fieldNorm = fieldNorms != null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f; fieldNormExpl.setValue(fieldNorm); fieldNormExpl.setDescription("fieldNorm(field=" + field + ", doc=" + doc + ")"); fieldExpl.addDetail(fieldNormExpl); fieldExpl.setMatch(Boolean.valueOf(tfExpl.isMatch())); fieldExpl.setValue(tfExpl.getValue() * idfExpl.getValue() * fieldNormExpl.getValue()); result.addDetail(fieldExpl); result.setMatch(fieldExpl.getMatch()); // combine them result.setValue(queryExpl.getValue() * fieldExpl.getValue()); if (queryExpl.getValue() == 1.0f) return fieldExpl; return result; }
@Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new PayloadTermSpanScorer( (TermSpans) query.getSpans(reader), this, similarity, reader.norms(query.getField())); }
@Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new MatchNoneScorer( reader, similarity, this, normsField != null ? reader.norms(normsField) : null); }
@Override public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException { if (termArrays.size() == 0) // optimize zero-term case return null; final IndexReader reader = context.reader; final Bits delDocs = reader.getDeletedDocs(); PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()]; for (int pos = 0; pos < postingsFreqs.length; pos++) { Term[] terms = termArrays.get(pos); final DocsAndPositionsEnum postingsEnum; int docFreq; if (terms.length > 1) { postingsEnum = new UnionDocsAndPositionsEnum(reader, terms); // coarse -- this overcounts since a given doc can // have more than one terms: docFreq = 0; for (int termIdx = 0; termIdx < terms.length; termIdx++) { docFreq += reader.docFreq(terms[termIdx]); } } else { final Term term = terms[0]; postingsEnum = reader.termPositionsEnum(delDocs, term.field(), term.bytes()); if (postingsEnum == null) { if (reader.termDocsEnum(delDocs, term.field(), term.bytes()) != null) { // term does exist, but has no positions throw new IllegalStateException( "field \"" + term.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + term.text() + ")"); } else { // term does not exist return null; } } docFreq = reader.docFreq(term.field(), term.bytes()); } postingsFreqs[pos] = new PhraseQuery.PostingsAndFreq(postingsEnum, docFreq, positions.get(pos).intValue()); } // sort by increasing docFreq order if (slop == 0) { ArrayUtil.quickSort(postingsFreqs); } if (slop == 0) { ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, reader.norms(field)); if (s.noDocs) { return null; } else { return s; } } else { return new SloppyPhraseScorer(this, postingsFreqs, similarity, slop, reader.norms(field)); } }
/** @see LuceneIndexReader#norms(String) */ public byte[] norms(String field) throws IOException { return indexReader.norms(field); }
/** @see LuceneIndexReader#norms(String, byte[], int) */ public void norms(String field, byte[] bytes, int offset) throws IOException { indexReader.norms(field, bytes, offset); }