コード例 #1
0
  /**
   * Returns Doc Ids by searching the index for document having the correct spatial hash cell id at
   * given grid level
   *
   * @param reader reader to the index
   */
  @Override
  public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
    if (spatialHashCellsIds.size() == 0) {
      return null;
    }

    final AtomicReader atomicReader = context.reader();

    OpenBitSet matchedDocumentsIds = new OpenBitSet(atomicReader.maxDoc());
    Boolean found = false;
    for (int i = 0; i < spatialHashCellsIds.size(); i++) {
      Term spatialHashCellTerm = new Term(fieldName, spatialHashCellsIds.get(i));
      DocsEnum spatialHashCellsDocs = atomicReader.termDocsEnum(spatialHashCellTerm);
      if (spatialHashCellsDocs != null) {
        while (true) {
          final int docId = spatialHashCellsDocs.nextDoc();
          if (docId == DocIdSetIterator.NO_MORE_DOCS) {
            break;
          } else {
            if (acceptDocs == null || acceptDocs.get(docId)) {
              matchedDocumentsIds.fastSet(docId);
              found = true;
            }
          }
        }
      }
    }

    if (found) {
      return matchedDocumentsIds;
    } else {
      return null;
    }
  }
コード例 #2
0
 protected AtomicReaderContext refreshReader() throws Exception {
   if (readerContext != null) {
     readerContext.reader().close();
   }
   AtomicReader reader =
       SlowCompositeReaderWrapper.wrap(topLevelReader = DirectoryReader.open(writer, true));
   readerContext = reader.getContext();
   return readerContext;
 }
コード例 #3
0
 @Override
 public void tearDown() throws Exception {
   readerA.close();
   readerAclone.close();
   readerB.close();
   readerX.close();
   dirA.close();
   dirB.close();
   super.tearDown();
 }
コード例 #4
0
 @Test
 public void testNoTerms() throws Exception {
   FieldDataTermsFilter hFilterBytes =
       FieldDataTermsFilter.newBytes(getFieldData(strMapper), new ObjectOpenHashSet<BytesRef>());
   FieldDataTermsFilter hFilterLongs =
       FieldDataTermsFilter.newLongs(getFieldData(lngMapper), new LongOpenHashSet());
   FieldDataTermsFilter hFilterDoubles =
       FieldDataTermsFilter.newDoubles(getFieldData(dblMapper), new DoubleOpenHashSet());
   assertNull(hFilterBytes.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
   assertNull(hFilterLongs.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
   assertNull(hFilterDoubles.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
 }
コード例 #5
0
 private Map<String, Object> loadSourceIfNeeded() {
   if (source != null) {
     return source;
   }
   if (sourceAsBytes != null) {
     Tuple<XContentType, Map<String, Object>> tuple = sourceAsMapAndType(sourceAsBytes);
     sourceContentType = tuple.v1();
     source = tuple.v2();
     return source;
   }
   try {
     JustSourceFieldsVisitor sourceFieldVisitor = new JustSourceFieldsVisitor();
     reader.document(docId, sourceFieldVisitor);
     BytesReference source = sourceFieldVisitor.source();
     if (source == null) {
       this.source = ImmutableMap.of();
       this.sourceContentType = null;
     } else {
       Tuple<XContentType, Map<String, Object>> tuple = sourceAsMapAndType(source);
       this.sourceContentType = tuple.v1();
       this.source = tuple.v2();
     }
   } catch (Exception e) {
     throw new ElasticsearchParseException("failed to parse / load source", e);
   }
   return this.source;
 }
コード例 #6
0
 @After
 public void tearDown() throws Exception {
   super.tearDown();
   reader.close();
   writer.close();
   ifdService.clear();
   SearchContext.removeCurrent();
 }
コード例 #7
0
  void createDocumentNode(final DocumentDescriptor inDescriptor) throws IOException {
    try {
      _document = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
      _rootNode = _document.createElement("document");
    } catch (ParserConfigurationException e) {
      e.printStackTrace();
      System.exit(1);
    }
    AtomicReader segmentReader = _indexReader.leaves().get(inDescriptor.segmentNumber).reader();
    _rootNode.setAttribute("id", DocumentIdOperations.documentDescriptorToId(inDescriptor));
    // TODO: implement the proper way of building a title from the production report
    _rootNode.setAttribute("title", buildDocumentTitle(segmentReader, inDescriptor));
    _rootNode.setAttribute("path", "ruscorpora.ru");
    _rootNode.setAttribute(
        "tagging", Attributes.getDocAttribute(segmentReader, inDescriptor.localId, "tagging"));
    _rootNode.setAttribute("snippets", "0");
    Element attributesNode = _document.createElement("attributes");

    _rootNode.appendChild(attributesNode);

    FieldInfos fields = segmentReader.getFieldInfos();
    for (int fieldIndex = 0; fieldIndex != fields.size(); ++fieldIndex) {
      FieldInfo field = fields.fieldInfo(fieldIndex);
      // TODO: understand why field may turn into null
      if (field == null) {
        continue;
      }
      String name = field.name;
      if (Attributes.ATTRIBUTES.contains(name)
          || Attributes.ATTRIBUTES_FOR_REPORT.contains(name)
          || Attributes.ATTRIBUTES_FOR_WORD_INFO.contains(name)
          || !field.hasDocValues()) {
        // it's a word attribute
        continue;
      }
      Element attrNode = _document.createElement("attr");
      attrNode.setAttribute("name", name);
      attrNode.setAttribute(
          "value", Attributes.getDocAttribute(segmentReader, inDescriptor.localId, name));
      attributesNode.appendChild(attrNode);
    }
  }
コード例 #8
0
  @Override
  public BytesValues getBytesValues(boolean needsHashes) {
    // if you want hashes to be cached, you should rather store them on disk alongside the values
    // rather than loading them into memory
    // here - not supported for now, and probably not useful since this field data only applies to
    // _id and _uid?
    final BinaryDocValues values;
    final Bits docsWithField;
    try {
      final BinaryDocValues v = reader.getBinaryDocValues(field);
      if (v == null) {
        // segment has no value
        values = DocValues.EMPTY_BINARY;
        docsWithField = new Bits.MatchNoBits(reader.maxDoc());
      } else {
        values = v;
        final Bits b = reader.getDocsWithField(field);
        docsWithField = b == null ? new Bits.MatchAllBits(reader.maxDoc()) : b;
      }
    } catch (IOException e) {
      throw new ElasticsearchIllegalStateException("Cannot load doc values", e);
    }

    return new BytesValues(false) {

      @Override
      public int setDocument(int docId) {
        this.docId = docId;
        return docsWithField.get(docId) ? 1 : 0;
      }

      @Override
      public BytesRef nextValue() {
        values.get(docId, scratch);
        return scratch;
      }
    };
  }
コード例 #9
0
  protected int[] lookupDocIdByPK(final IndexSearcher searcher, final String... ids)
      throws IOException {
    final List<AtomicReaderContext> subReaders = searcher.getIndexReader().leaves();
    final TermsEnum[] termsEnums = new TermsEnum[subReaders.size()];
    final DocsEnum[] docsEnums = new DocsEnum[subReaders.size()];
    for (int subIDX = 0; subIDX < subReaders.size(); subIDX++) {
      termsEnums[subIDX] = subReaders.get(subIDX).reader().fields().terms("id").iterator(null);
    }

    int[] results = new int[ids.length];

    for (int i = 0; i < results.length; i++) {
      results[i] = -1;
    }

    for (int idx = 0; idx < ids.length; idx++) {
      int base = 0;
      final BytesRef id = new BytesRef(ids[idx]);
      for (int subIDX = 0; subIDX < subReaders.size(); subIDX++) {
        final AtomicReader sub = subReaders.get(subIDX).reader();
        final TermsEnum termsEnum = termsEnums[subIDX];
        if (termsEnum.seekExact(id, false)) {
          final DocsEnum docs =
              docsEnums[subIDX] = termsEnum.docs(sub.getLiveDocs(), docsEnums[subIDX], 0);
          if (docs != null) {
            final int docID = docs.nextDoc();
            if (docID != DocIdSetIterator.NO_MORE_DOCS) {
              results[idx] = base + docID;
              break;
            }
          }
        }
        base += sub.maxDoc();
      }
    }

    return results;
  }
コード例 #10
0
ファイル: HighFreqTerms.java プロジェクト: shaie/lucene-solr
  public static long getTotalTermFreq(IndexReader reader, Term term) throws Exception {
    long totalTF = 0L;
    for (final AtomicReaderContext ctx : reader.leaves()) {
      AtomicReader r = ctx.reader();
      if (!r.hasDeletions()) {
        // TODO: we could do this up front, during the scan
        // (next()), instead of after-the-fact here w/ seek,
        // if the codec supports it and there are no del
        // docs...
        final long totTF = r.totalTermFreq(term);
        if (totTF != -1) {
          totalTF += totTF;
          continue;
        } // otherwise we fall-through
      }
      // note: what should we do if field omits freqs? currently it counts as 1...
      DocsEnum de = r.termDocsEnum(term);
      if (de != null) {
        while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) totalTF += de.freq();
      }
    }

    return totalTF;
  }
コード例 #11
0
  @Test
  public void testDoubles() throws Exception {
    List<Integer> docs = Arrays.asList(1, 5, 7);

    DoubleOpenHashSet hTerms = new DoubleOpenHashSet();
    List<Double> cTerms = new ArrayList<Double>(docs.size());
    for (int i = 0; i < docs.size(); i++) {
      double term = Double.valueOf(docs.get(i));
      hTerms.add(term);
      cTerms.add(term);
    }

    FieldDataTermsFilter hFilter = FieldDataTermsFilter.newDoubles(getFieldData(dblMapper), hTerms);

    int size = reader.maxDoc();
    FixedBitSet result = new FixedBitSet(size);

    result.clear(0, size);
    assertThat(result.cardinality(), equalTo(0));
    result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
    assertThat(result.cardinality(), equalTo(docs.size()));
    for (int i = 0; i < reader.maxDoc(); i++) {
      assertThat(result.get(i), equalTo(docs.contains(i)));
    }

    // filter from mapper
    result.clear(0, size);
    assertThat(result.cardinality(), equalTo(0));
    result.or(
        dblMapper
            .termsFilter(ifdService, cTerms, null)
            .getDocIdSet(reader.getContext(), reader.getLiveDocs())
            .iterator());
    assertThat(result.cardinality(), equalTo(docs.size()));
    for (int i = 0; i < reader.maxDoc(); i++) {
      assertThat(result.get(i), equalTo(docs.contains(i)));
    }

    hFilter = FieldDataTermsFilter.newDoubles(getFieldData(lngMapper), hTerms);
    assertNull(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
  }
コード例 #12
0
  private List<Document> lookupDocs(Term term, final LoadFieldCallback lfc) throws IOException {
    final List<Document> documents = new ArrayList<Document>();
    final TermFilter tf = new TermFilter(term);
    try {
      for (AtomicReaderContext arc : searcher.getIndexReader().leaves()) {
        AtomicReader ar = arc.reader();
        Bits liveDocs = ar.getLiveDocs();
        DocIdSet docSet = tf.getDocIdSet(arc, liveDocs);
        if (docSet != null) {
          DocIdSetIterator disi = docSet.iterator();
          if (disi != null) {
            int docId;
            while ((docId = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
              DocumentStoredFieldVisitor fv =
                  new DocumentStoredFieldVisitor() {
                    @Override
                    public StoredFieldVisitor.Status needsField(FieldInfo fieldInfo)
                        throws IOException {
                      if (lfc == null || lfc.loadField(fieldInfo.name)) {
                        return StoredFieldVisitor.Status.YES;
                      }
                      return StoredFieldVisitor.Status.NO;
                    }
                  };
              ar.document(docId, fv);
              documents.add(fv.getDocument());
            }
          }
        }
      }
    } catch (IOException io) {
      throw new IndexException(io);
    }

    return documents;
  }
コード例 #13
0
 @Override
 protected boolean matchDoc(int doc) {
   if (fieldsVisitorEnabled) {
     fieldsVisitor.reset();
     try {
       reader.document(doc, fieldsVisitor);
     } catch (IOException e) {
       throw Throwables.propagate(e);
     }
   }
   for (LuceneCollectorExpression expression : expressions) {
     expression.setNextDocId(doc);
   }
   Boolean value = condition.value();
   if (value == null) {
     return false;
   }
   return value;
 }
コード例 #14
0
    public static TermsEnum filter(
        TermsEnum toFilter, Terms terms, AtomicReader reader, Settings settings)
        throws IOException {
      int docCount = terms.getDocCount();
      if (docCount == -1) {
        docCount = reader.maxDoc();
      }
      final double minFrequency = settings.getAsDouble("min", 0d);
      final double maxFrequency = settings.getAsDouble("max", docCount + 1d);
      final double minSegmentSize = settings.getAsInt("min_segment_size", 0);
      if (minSegmentSize < docCount) {
        final int minFreq =
            minFrequency >= 1.0 ? (int) minFrequency : (int) (docCount * minFrequency);
        final int maxFreq =
            maxFrequency >= 1.0 ? (int) maxFrequency : (int) (docCount * maxFrequency);
        assert minFreq < maxFreq;
        return new FrequencyFilter(toFilter, minFreq, maxFreq);
      }

      return toFilter;
    }
コード例 #15
0
  @Override
  public DoubleArrayAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
    AtomicReader reader = context.reader();

    Terms terms = reader.terms(getFieldNames().indexName());
    if (terms == null) {
      return DoubleArrayAtomicFieldData.EMPTY;
    }

    // TODO: how can we guess the number of terms? numerics end up creating more terms per value...
    final TDoubleArrayList values = new TDoubleArrayList();
    ArrayList<int[]> ordinals = new ArrayList<int[]>();
    int[] idx = new int[reader.maxDoc()];
    ordinals.add(new int[reader.maxDoc()]);

    values.add(0); // first "t" indicates null value
    int termOrd = 1; // current term number

    TermsEnum termsEnum = terms.iterator(null);
    try {
      DocsEnum docsEnum = null;
      for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
        values.add(FieldCache.NUMERIC_UTILS_DOUBLE_PARSER.parseDouble(term));
        docsEnum = termsEnum.docs(reader.getLiveDocs(), docsEnum, 0);
        for (int docId = docsEnum.nextDoc();
            docId != DocsEnum.NO_MORE_DOCS;
            docId = docsEnum.nextDoc()) {
          int[] ordinal;
          if (idx[docId] >= ordinals.size()) {
            ordinal = new int[reader.maxDoc()];
            ordinals.add(ordinal);
          } else {
            ordinal = ordinals.get(idx[docId]);
          }
          ordinal[docId] = termOrd;
          idx[docId]++;
        }
        termOrd++;
      }
    } catch (RuntimeException e) {
      if (e.getClass().getName().endsWith("StopFillCacheException")) {
        // all is well, in case numeric parsers are used.
      } else {
        throw e;
      }
    }

    if (ordinals.size() == 1) {
      int[] nativeOrdinals = ordinals.get(0);
      FixedBitSet set = new FixedBitSet(reader.maxDoc());
      double[] sValues = new double[reader.maxDoc()];
      boolean allHaveValue = true;
      for (int i = 0; i < nativeOrdinals.length; i++) {
        int nativeOrdinal = nativeOrdinals[i];
        if (nativeOrdinal == 0) {
          allHaveValue = false;
        } else {
          set.set(i);
          sValues[i] = values.get(nativeOrdinal);
        }
      }
      if (allHaveValue) {
        return new DoubleArrayAtomicFieldData.Single(sValues, reader.maxDoc());
      } else {
        return new DoubleArrayAtomicFieldData.SingleFixedSet(sValues, reader.maxDoc(), set);
      }
    } else {
      int[][] nativeOrdinals = new int[ordinals.size()][];
      for (int i = 0; i < nativeOrdinals.length; i++) {
        nativeOrdinals[i] = ordinals.get(i);
      }
      return new DoubleArrayAtomicFieldData.WithOrdinals(
          values.toArray(new double[values.size()]),
          reader.maxDoc(),
          Ordinals.Factories.createFromFlatOrdinals(
              nativeOrdinals, termOrd, fieldDataType.getSettings()));
    }
  }
コード例 #16
0
  @Override
  public void execute(String[] args, PrintStream out) throws Exception {
    String field = null;
    String termVal = null;
    try {
      field = args[0];
    } catch (Exception e) {
      field = null;
    }

    if (field != null) {
      String[] parts = field.split(":");
      if (parts.length > 1) {
        field = parts[0];
        termVal = parts[1];
      }
    }

    if (field == null || termVal == null) {
      out.println("usage: field:term");
      out.flush();
      return;
    }

    IndexReader reader = ctx.getIndexReader();
    List<AtomicReaderContext> leaves = reader.leaves();
    int docBase = 0;
    int numPerPage = 20;
    for (AtomicReaderContext leaf : leaves) {
      AtomicReader atomicReader = leaf.reader();
      Terms terms = atomicReader.terms(field);
      if (terms == null) {
        continue;
      }
      boolean hasPositions = terms.hasPositions();
      if (terms != null && termVal != null) {
        TermsEnum te = terms.iterator(null);
        int count = 0;
        if (te.seekExact(new BytesRef(termVal), true)) {

          if (hasPositions) {
            DocsAndPositionsEnum iter = te.docsAndPositions(atomicReader.getLiveDocs(), null);
            int docid;
            while ((docid = iter.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
              count++;
              out.print("docid: " + (docid + docBase) + ", freq: " + iter.freq() + ", ");
              for (int i = 0; i < iter.freq(); ++i) {
                out.print("pos " + i + ": " + iter.nextPosition());
                BytesRef payload = iter.getPayload();
                if (payload != null) {
                  out.print(",payload: " + payload);
                }
                out.print(";");
              }
              out.println();
              if (ctx.isInteractiveMode()) {
                if (count % numPerPage == 0) {
                  out.println("Ctrl-D to break");
                  int ch = System.in.read();
                  if (ch == -1) {
                    out.flush();
                    return;
                  }
                }
              }
            }
          } else {
            DocsEnum iter = te.docs(atomicReader.getLiveDocs(), null);

            int docid;
            while ((docid = iter.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
              count++;
              out.println("docid: " + (docid + docBase));
              if (ctx.isInteractiveMode()) {
                if (count % numPerPage == 0) {
                  out.println("Ctrl-D to break");
                  int ch = System.in.read();
                  if (ch == -1) {
                    out.flush();
                    return;
                  }
                }
              }
            }
          }
        }
      }
      docBase += atomicReader.maxDoc();
    }
  }
コード例 #17
0
  @Override
  public AtomicNumericFieldData loadDirect(AtomicReaderContext context) throws Exception {
    AtomicReader reader = context.reader();
    Terms terms = reader.terms(getFieldNames().indexName());
    PackedArrayAtomicFieldData data = null;
    PackedArrayEstimator estimator =
        new PackedArrayEstimator(breakerService.getBreaker(), getNumericType());
    if (terms == null) {
      data = PackedArrayAtomicFieldData.empty(reader.maxDoc());
      estimator.adjustForNoTerms(data.getMemorySizeInBytes());
      return data;
    }
    // TODO: how can we guess the number of terms? numerics end up creating more terms per value...
    // Lucene encodes numeric data so that the lexicographical (encoded) order matches the integer
    // order so we know the sequence of
    // longs is going to be monotonically increasing
    final MonotonicAppendingLongBuffer values = new MonotonicAppendingLongBuffer();

    final float acceptableTransientOverheadRatio =
        fieldDataType
            .getSettings()
            .getAsFloat(
                "acceptable_transient_overhead_ratio",
                OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
    TermsEnum termsEnum = estimator.beforeLoad(terms);
    boolean success = false;
    try (OrdinalsBuilder builder =
        new OrdinalsBuilder(-1, reader.maxDoc(), acceptableTransientOverheadRatio)) {
      BytesRefIterator iter = builder.buildFromTerms(termsEnum);
      BytesRef term;
      assert !getNumericType().isFloatingPoint();
      final boolean indexedAsLong = getNumericType().requiredBits() > 32;
      while ((term = iter.next()) != null) {
        final long value =
            indexedAsLong
                ? NumericUtils.prefixCodedToLong(term)
                : NumericUtils.prefixCodedToInt(term);
        assert values.size() == 0 || value > values.get(values.size() - 1);
        values.add(value);
      }
      Ordinals build = builder.build(fieldDataType.getSettings());

      if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) {
        Docs ordinals = build.ordinals();
        final FixedBitSet set = builder.buildDocsWithValuesSet();

        long minValue, maxValue;
        minValue = maxValue = 0;
        if (values.size() > 0) {
          minValue = values.get(0);
          maxValue = values.get(values.size() - 1);
        }

        // Encode document without a value with a special value
        long missingValue = 0;
        if (set != null) {
          if ((maxValue - minValue + 1) == values.size()) {
            // values are dense
            if (minValue > Long.MIN_VALUE) {
              missingValue = --minValue;
            } else {
              assert maxValue != Long.MAX_VALUE;
              missingValue = ++maxValue;
            }
          } else {
            for (long i = 1; i < values.size(); ++i) {
              if (values.get(i) > values.get(i - 1) + 1) {
                missingValue = values.get(i - 1) + 1;
                break;
              }
            }
          }
          missingValue -= minValue; // delta
        }

        final long delta = maxValue - minValue;
        final int bitsRequired = delta < 0 ? 64 : PackedInts.bitsRequired(delta);
        final float acceptableOverheadRatio =
            fieldDataType.getSettings().getAsFloat("acceptable_overhead_ratio", PackedInts.DEFAULT);
        final PackedInts.FormatAndBits formatAndBits =
            PackedInts.fastestFormatAndBits(reader.maxDoc(), bitsRequired, acceptableOverheadRatio);

        // there's sweet spot where due to low unique value count, using ordinals will consume less
        // memory
        final long singleValuesSize =
            formatAndBits.format.longCount(
                    PackedInts.VERSION_CURRENT, reader.maxDoc(), formatAndBits.bitsPerValue)
                * 8L;
        final long uniqueValuesSize = values.ramBytesUsed();
        final long ordinalsSize = build.getMemorySizeInBytes();

        if (uniqueValuesSize + ordinalsSize < singleValuesSize) {
          data = new PackedArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
        } else {
          final PackedInts.Mutable sValues =
              PackedInts.getMutable(reader.maxDoc(), bitsRequired, acceptableOverheadRatio);
          if (missingValue != 0) {
            sValues.fill(0, sValues.size(), missingValue);
          }
          for (int i = 0; i < reader.maxDoc(); i++) {
            final long ord = ordinals.getOrd(i);
            if (ord != Ordinals.MISSING_ORDINAL) {
              sValues.set(i, values.get(ord - 1) - minValue);
            }
          }
          if (set == null) {
            data =
                new PackedArrayAtomicFieldData.Single(
                    sValues, minValue, reader.maxDoc(), ordinals.getNumOrds());
          } else {
            data =
                new PackedArrayAtomicFieldData.SingleSparse(
                    sValues, minValue, reader.maxDoc(), missingValue, ordinals.getNumOrds());
          }
        }
      } else {
        data = new PackedArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
      }

      success = true;
      return data;
    } finally {
      if (!success) {
        // If something went wrong, unwind any current estimations we've made
        estimator.afterLoad(termsEnum, 0);
      } else {
        // Adjust as usual, based on the actual size of the field data
        estimator.afterLoad(termsEnum, data.getMemorySizeInBytes());
      }
    }
  }
コード例 #18
0
  public void testCopyFieldsAndFieldBoostsAndDocBoosts() throws Exception {
    SolrCore core = h.getCore();
    IndexSchema schema = core.getLatestSchema();
    SolrInputDocument doc = new SolrInputDocument();

    final float DOC_BOOST = 3.0F;
    doc.setDocumentBoost(DOC_BOOST);
    doc.addField("id", "42");

    SolrInputField inTitle = new SolrInputField("title");
    inTitle.addValue("titleA", 2.0F);
    inTitle.addValue("titleB", 7.0F);
    final float TITLE_BOOST = 2.0F * 7.0F;
    assertEquals(TITLE_BOOST, inTitle.getBoost(), 0.0F);
    doc.put(inTitle.getName(), inTitle);

    SolrInputField inFoo = new SolrInputField("foo_t");
    inFoo.addValue("summer time", 1.0F);
    inFoo.addValue("in the city", 5.0F);
    inFoo.addValue("living is easy", 11.0F);
    final float FOO_BOOST = 1.0F * 5.0F * 11.0F;
    assertEquals(FOO_BOOST, inFoo.getBoost(), 0.0F);
    doc.put(inFoo.getName(), inFoo);

    Document out = DocumentBuilder.toDocument(doc, schema);

    IndexableField[] outTitle = out.getFields(inTitle.getName());
    assertEquals("wrong number of title values", 2, outTitle.length);

    IndexableField[] outNoNorms = out.getFields("title_stringNoNorms");
    assertEquals("wrong number of nonorms values", 2, outNoNorms.length);

    IndexableField[] outFoo = out.getFields(inFoo.getName());
    assertEquals("wrong number of foo values", 3, outFoo.length);

    IndexableField[] outText = out.getFields("text");
    assertEquals("wrong number of text values", 5, outText.length);

    // since Lucene no longer has native document boosts, we should find
    // the doc boost multiplied into the boost on the first field value
    // of each field.  All other field values should be 1.0f
    // (lucene will multiply all of the field value boosts later)
    assertEquals(TITLE_BOOST * DOC_BOOST, outTitle[0].boost(), 0.0F);
    assertEquals(1.0F, outTitle[1].boost(), 0.0F);
    //
    assertEquals(FOO_BOOST * DOC_BOOST, outFoo[0].boost(), 0.0F);
    assertEquals(1.0F, outFoo[1].boost(), 0.0F);
    assertEquals(1.0F, outFoo[2].boost(), 0.0F);
    //
    assertEquals(TITLE_BOOST * DOC_BOOST, outText[0].boost(), 0.0F);
    assertEquals(1.0F, outText[1].boost(), 0.0F);
    assertEquals(FOO_BOOST, outText[2].boost(), 0.0F);
    assertEquals(1.0F, outText[3].boost(), 0.0F);
    assertEquals(1.0F, outText[4].boost(), 0.0F);

    // copyField dest with no norms should not have recieved any boost
    assertEquals(1.0F, outNoNorms[0].boost(), 0.0F);
    assertEquals(1.0F, outNoNorms[1].boost(), 0.0F);

    // now index that SolrInputDocument to check the computed norms

    assertU(adoc(doc));
    assertU(commit());

    SolrQueryRequest req = req("q", "id:42");
    try {
      // very hack-ish

      SolrQueryResponse rsp = new SolrQueryResponse();
      core.execute(core.getRequestHandler(req.getParams().get(CommonParams.QT)), req, rsp);

      DocList dl = ((ResultContext) rsp.getValues().get("response")).docs;
      assertTrue("can't find the doc we just added", 1 == dl.size());
      int docid = dl.iterator().nextDoc();

      SolrIndexSearcher searcher = req.getSearcher();
      AtomicReader reader =
          SlowCompositeReaderWrapper.wrap(searcher.getTopReaderContext().reader());

      assertTrue(
          "similarity doesn't extend DefaultSimilarity, "
              + "config or defaults have changed since test was written",
          searcher.getSimilarity() instanceof DefaultSimilarity);

      DefaultSimilarity sim = (DefaultSimilarity) searcher.getSimilarity();

      NumericDocValues titleNorms = reader.getNormValues("title");
      NumericDocValues fooNorms = reader.getNormValues("foo_t");
      NumericDocValues textNorms = reader.getNormValues("text");

      assertEquals(expectedNorm(sim, 2, TITLE_BOOST * DOC_BOOST), titleNorms.get(docid));

      assertEquals(expectedNorm(sim, 8 - 3, FOO_BOOST * DOC_BOOST), fooNorms.get(docid));

      assertEquals(
          expectedNorm(sim, 2 + 8 - 3, TITLE_BOOST * FOO_BOOST * DOC_BOOST), textNorms.get(docid));

    } finally {
      req.close();
    }
  }
コード例 #19
0
  private Map<Integer, Object> highlightField(
      String field,
      String contents[],
      BreakIterator bi,
      BytesRef terms[],
      int[] docids,
      List<AtomicReaderContext> leaves,
      int maxPassages,
      Query query)
      throws IOException {
    Map<Integer, Object> highlights = new HashMap<>();

    PassageFormatter fieldFormatter = getFormatter(field);
    if (fieldFormatter == null) {
      throw new NullPointerException("PassageFormatter cannot be null");
    }

    // check if we should do any multiterm processing
    Analyzer analyzer = getIndexAnalyzer(field);
    CharacterRunAutomaton automata[] = new CharacterRunAutomaton[0];
    if (analyzer != null) {
      automata = MultiTermHighlighting.extractAutomata(query, field);
    }

    // resize 'terms', where the last term is the multiterm matcher
    if (automata.length > 0) {
      BytesRef newTerms[] = new BytesRef[terms.length + 1];
      System.arraycopy(terms, 0, newTerms, 0, terms.length);
      terms = newTerms;
    }

    // we are processing in increasing docid order, so we only need to reinitialize stuff on segment
    // changes
    // otherwise, we will just advance() existing enums to the new document in the same segment.
    DocsAndPositionsEnum postings[] = null;
    TermsEnum termsEnum = null;
    int lastLeaf = -1;

    for (int i = 0; i < docids.length; i++) {
      String content = contents[i];
      if (content.length() == 0) {
        continue; // nothing to do
      }
      bi.setText(content);
      int doc = docids[i];
      int leaf = ReaderUtil.subIndex(doc, leaves);
      AtomicReaderContext subContext = leaves.get(leaf);
      AtomicReader r = subContext.reader();

      assert leaf >= lastLeaf; // increasing order

      // if the segment has changed, we must initialize new enums.
      if (leaf != lastLeaf) {
        Terms t = r.terms(field);
        if (t != null) {
          termsEnum = t.iterator(null);
          postings = new DocsAndPositionsEnum[terms.length];
        }
      }
      if (termsEnum == null) {
        continue; // no terms for this field, nothing to do
      }

      // if there are multi-term matches, we have to initialize the "fake" enum for each document
      if (automata.length > 0) {
        DocsAndPositionsEnum dp =
            MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
        dp.advance(doc - subContext.docBase);
        postings[terms.length - 1] = dp; // last term is the multiterm matcher
      }

      Passage passages[] =
          highlightDoc(
              field,
              terms,
              content.length(),
              bi,
              doc - subContext.docBase,
              termsEnum,
              postings,
              maxPassages);

      if (passages.length == 0) {
        // no passages were returned, so ask for a default summary
        passages = getEmptyHighlight(field, bi, maxPassages);
      }

      if (passages.length > 0) {
        highlights.put(doc, fieldFormatter.format(passages, content));
      }

      lastLeaf = leaf;
    }

    return highlights;
  }
コード例 #20
0
  @Test
  public void testRandom() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numUniqueChildValues = 1 + random().nextInt(TEST_NIGHTLY ? 10000 : 1000);
    String[] childValues = new String[numUniqueChildValues];
    for (int i = 0; i < numUniqueChildValues; i++) {
      childValues[i] = Integer.toString(i);
    }

    IntOpenHashSet filteredOrDeletedDocs = new IntOpenHashSet();
    int childDocId = 0;
    int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
    ObjectObjectOpenHashMap<String, NavigableSet<String>> childValueToParentIds =
        new ObjectObjectOpenHashMap<String, NavigableSet<String>>();
    for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
      boolean markParentAsDeleted = rarely();
      boolean filterMe = rarely();
      String parent = Integer.toString(parentDocId);
      Document document = new Document();
      document.add(
          new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
      document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
      if (markParentAsDeleted) {
        filteredOrDeletedDocs.add(parentDocId);
        document.add(new StringField("delete", "me", Field.Store.NO));
      }
      if (filterMe) {
        filteredOrDeletedDocs.add(parentDocId);
        document.add(new StringField("filter", "me", Field.Store.NO));
      }
      indexWriter.addDocument(document);

      int numChildDocs;
      if (rarely()) {
        numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
      } else {
        numChildDocs = random().nextInt(TEST_NIGHTLY ? 40 : 10);
      }
      for (int i = 0; i < numChildDocs; i++) {
        boolean markChildAsDeleted = rarely();
        String childValue = childValues[random().nextInt(childValues.length)];

        document = new Document();
        document.add(
            new StringField(
                UidFieldMapper.NAME,
                Uid.createUid("child", Integer.toString(childDocId)),
                Field.Store.NO));
        document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
        document.add(
            new StringField(
                ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
        document.add(new StringField("field1", childValue, Field.Store.NO));
        if (markChildAsDeleted) {
          document.add(new StringField("delete", "me", Field.Store.NO));
        }
        indexWriter.addDocument(document);

        if (!markChildAsDeleted) {
          NavigableSet<String> parentIds;
          if (childValueToParentIds.containsKey(childValue)) {
            parentIds = childValueToParentIds.lget();
          } else {
            childValueToParentIds.put(childValue, parentIds = new TreeSet<String>());
          }
          if (!markParentAsDeleted && !filterMe) {
            parentIds.add(parent);
          }
        }
      }
    }

    // Delete docs that are marked to be deleted.
    indexWriter.deleteDocuments(new Term("delete", "me"));

    indexWriter.commit();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    Engine.Searcher engineSearcher =
        new Engine.SimpleSearcher(ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher);
    ((TestSearchContext) SearchContext.current())
        .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));

    Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
    Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
    int max = numUniqueChildValues / 4;
    for (int i = 0; i < max; i++) {
      // Randomly pick a cached version: there is specific logic inside ChildrenQuery that deals
      // with the fact
      // that deletes are applied at the top level when filters are cached.
      Filter parentFilter;
      if (random().nextBoolean()) {
        parentFilter = SearchContext.current().filterCache().cache(rawParentFilter);
      } else {
        parentFilter = rawParentFilter;
      }

      // Using this in FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer
      // not get live docs as acceptedDocs
      Filter filterMe;
      if (random().nextBoolean()) {
        filterMe = SearchContext.current().filterCache().cache(rawFilterMe);
      } else {
        filterMe = rawFilterMe;
      }

      // Simulate a parent update
      if (random().nextBoolean()) {
        int numberOfUpdates = 1 + random().nextInt(TEST_NIGHTLY ? 25 : 5);
        for (int j = 0; j < numberOfUpdates; j++) {
          int parentId;
          do {
            parentId = random().nextInt(numParentDocs);
          } while (filteredOrDeletedDocs.contains(parentId));

          String parentUid = Uid.createUid("parent", Integer.toString(parentId));
          indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));

          Document document = new Document();
          document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
          document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
          indexWriter.addDocument(document);
        }

        indexReader.close();
        indexReader = DirectoryReader.open(indexWriter.w, true);
        searcher = new IndexSearcher(indexReader);
        engineSearcher =
            new Engine.SimpleSearcher(
                ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher);
        ((TestSearchContext) SearchContext.current())
            .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
      }

      String childValue = childValues[random().nextInt(numUniqueChildValues)];
      TermQuery childQuery = new TermQuery(new Term("field1", childValue));
      int shortCircuitParentDocSet = random().nextInt(numParentDocs);
      Filter nonNestedDocsFilter = random().nextBoolean() ? NonNestedDocsFilter.INSTANCE : null;
      Query query;
      if (random().nextBoolean()) {
        // Usage in HasChildQueryParser
        query =
            new ChildrenConstantScoreQuery(
                childQuery,
                "parent",
                "child",
                parentFilter,
                shortCircuitParentDocSet,
                nonNestedDocsFilter);
      } else {
        // Usage in HasChildFilterParser
        query =
            new XConstantScoreQuery(
                new CustomQueryWrappingFilter(
                    new ChildrenConstantScoreQuery(
                        childQuery,
                        "parent",
                        "child",
                        parentFilter,
                        shortCircuitParentDocSet,
                        nonNestedDocsFilter)));
      }
      query = new XFilteredQuery(query, filterMe);
      BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
      searcher.search(query, collector);
      FixedBitSet actualResult = collector.getResult();

      FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
      if (childValueToParentIds.containsKey(childValue)) {
        AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
        Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
        if (terms != null) {
          NavigableSet<String> parentIds = childValueToParentIds.lget();
          TermsEnum termsEnum = terms.iterator(null);
          DocsEnum docsEnum = null;
          for (String id : parentIds) {
            TermsEnum.SeekStatus seekStatus =
                termsEnum.seekCeil(Uid.createUidAsBytes("parent", id));
            if (seekStatus == TermsEnum.SeekStatus.FOUND) {
              docsEnum =
                  termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
              expectedResult.set(docsEnum.nextDoc());
            } else if (seekStatus == TermsEnum.SeekStatus.END) {
              break;
            }
          }
        }
      }

      assertBitSet(actualResult, expectedResult, searcher);
    }

    indexWriter.close();
    indexReader.close();
    directory.close();
  }
コード例 #21
0
  @Override
  public void process(ResponseBuilder rb) throws IOException {
    SolrParams params = rb.req.getParams();
    if (!params.getBool(TermsParams.TERMS, false)) return;

    String[] fields = params.getParams(TermsParams.TERMS_FIELD);

    NamedList<Object> termsResult = new SimpleOrderedMap<>();
    rb.rsp.add("terms", termsResult);

    if (fields == null || fields.length == 0) return;

    int limit = params.getInt(TermsParams.TERMS_LIMIT, 10);
    if (limit < 0) {
      limit = Integer.MAX_VALUE;
    }

    String lowerStr = params.get(TermsParams.TERMS_LOWER);
    String upperStr = params.get(TermsParams.TERMS_UPPER);
    boolean upperIncl = params.getBool(TermsParams.TERMS_UPPER_INCLUSIVE, false);
    boolean lowerIncl = params.getBool(TermsParams.TERMS_LOWER_INCLUSIVE, true);
    boolean sort =
        !TermsParams.TERMS_SORT_INDEX.equals(
            params.get(TermsParams.TERMS_SORT, TermsParams.TERMS_SORT_COUNT));
    int freqmin = params.getInt(TermsParams.TERMS_MINCOUNT, 1);
    int freqmax = params.getInt(TermsParams.TERMS_MAXCOUNT, UNLIMITED_MAX_COUNT);
    if (freqmax < 0) {
      freqmax = Integer.MAX_VALUE;
    }
    String prefix = params.get(TermsParams.TERMS_PREFIX_STR);
    String regexp = params.get(TermsParams.TERMS_REGEXP_STR);
    Pattern pattern = regexp != null ? Pattern.compile(regexp, resolveRegexpFlags(params)) : null;

    boolean raw = params.getBool(TermsParams.TERMS_RAW, false);

    final AtomicReader indexReader = rb.req.getSearcher().getAtomicReader();
    Fields lfields = indexReader.fields();

    for (String field : fields) {
      NamedList<Integer> fieldTerms = new NamedList<>();
      termsResult.add(field, fieldTerms);

      Terms terms = lfields == null ? null : lfields.terms(field);
      if (terms == null) {
        // no terms for this field
        continue;
      }

      FieldType ft = raw ? null : rb.req.getSchema().getFieldTypeNoEx(field);
      if (ft == null) ft = new StrField();

      // prefix must currently be text
      BytesRef prefixBytes = prefix == null ? null : new BytesRef(prefix);

      BytesRef upperBytes = null;
      if (upperStr != null) {
        upperBytes = new BytesRef();
        ft.readableToIndexed(upperStr, upperBytes);
      }

      BytesRef lowerBytes;
      if (lowerStr == null) {
        // If no lower bound was specified, use the prefix
        lowerBytes = prefixBytes;
      } else {
        lowerBytes = new BytesRef();
        if (raw) {
          // TODO: how to handle binary? perhaps we don't for "raw"... or if the field exists
          // perhaps we detect if the FieldType is non-character and expect hex if so?
          lowerBytes = new BytesRef(lowerStr);
        } else {
          lowerBytes = new BytesRef();
          ft.readableToIndexed(lowerStr, lowerBytes);
        }
      }

      TermsEnum termsEnum = terms.iterator(null);
      BytesRef term = null;

      if (lowerBytes != null) {
        if (termsEnum.seekCeil(lowerBytes) == TermsEnum.SeekStatus.END) {
          termsEnum = null;
        } else {
          term = termsEnum.term();
          // Only advance the enum if we are excluding the lower bound and the lower Term actually
          // matches
          if (lowerIncl == false && term.equals(lowerBytes)) {
            term = termsEnum.next();
          }
        }
      } else {
        // position termsEnum on first term
        term = termsEnum.next();
      }

      int i = 0;
      BoundedTreeSet<CountPair<BytesRef, Integer>> queue =
          (sort ? new BoundedTreeSet<CountPair<BytesRef, Integer>>(limit) : null);
      CharsRef external = new CharsRef();
      while (term != null && (i < limit || sort)) {
        boolean externalized = false; // did we fill in "external" yet for this term?

        // stop if the prefix doesn't match
        if (prefixBytes != null && !StringHelper.startsWith(term, prefixBytes)) break;

        if (pattern != null) {
          // indexed text or external text?
          // TODO: support "raw" mode?
          ft.indexedToReadable(term, external);
          externalized = true;
          if (!pattern.matcher(external).matches()) {
            term = termsEnum.next();
            continue;
          }
        }

        if (upperBytes != null) {
          int upperCmp = term.compareTo(upperBytes);
          // if we are past the upper term, or equal to it (when don't include upper) then stop.
          if (upperCmp > 0 || (upperCmp == 0 && !upperIncl)) break;
        }

        // This is a good term in the range.  Check if mincount/maxcount conditions are satisfied.
        int docFreq = termsEnum.docFreq();
        if (docFreq >= freqmin && docFreq <= freqmax) {
          // add the term to the list
          if (sort) {
            queue.add(new CountPair<>(BytesRef.deepCopyOf(term), docFreq));
          } else {

            // TODO: handle raw somehow
            if (!externalized) {
              ft.indexedToReadable(term, external);
            }
            fieldTerms.add(external.toString(), docFreq);
            i++;
          }
        }

        term = termsEnum.next();
      }

      if (sort) {
        for (CountPair<BytesRef, Integer> item : queue) {
          if (i >= limit) break;
          ft.indexedToReadable(item.key, external);
          fieldTerms.add(external.toString(), item.val);
          i++;
        }
      }
    }
  }
コード例 #22
0
  @Test
  public void testRandom() throws Exception {
    Directory directory = newDirectory();
    RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
    int numUniqueChildValues = 1 + random().nextInt(TEST_NIGHTLY ? 6000 : 600);
    String[] childValues = new String[numUniqueChildValues];
    for (int i = 0; i < numUniqueChildValues; i++) {
      childValues[i] = Integer.toString(i);
    }

    int childDocId = 0;
    int numParentDocs = 1 + random().nextInt(TEST_NIGHTLY ? 20000 : 1000);
    ObjectObjectOpenHashMap<String, NavigableMap<String, FloatArrayList>> childValueToParentIds =
        new ObjectObjectOpenHashMap<String, NavigableMap<String, FloatArrayList>>();
    for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
      boolean markParentAsDeleted = rarely();
      String parent = Integer.toString(parentDocId);
      Document document = new Document();
      document.add(
          new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
      document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
      if (markParentAsDeleted) {
        document.add(new StringField("delete", "me", Field.Store.NO));
      }
      indexWriter.addDocument(document);

      int numChildDocs = random().nextInt(TEST_NIGHTLY ? 100 : 25);
      for (int i = 0; i < numChildDocs; i++) {
        boolean markChildAsDeleted = rarely();
        String childValue = childValues[random().nextInt(childValues.length)];

        document = new Document();
        document.add(
            new StringField(
                UidFieldMapper.NAME,
                Uid.createUid("child", Integer.toString(childDocId)),
                Field.Store.NO));
        document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
        document.add(
            new StringField(
                ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
        document.add(new StringField("field1", childValue, Field.Store.NO));
        if (markChildAsDeleted) {
          document.add(new StringField("delete", "me", Field.Store.NO));
        }
        indexWriter.addDocument(document);

        if (!markChildAsDeleted) {
          NavigableMap<String, FloatArrayList> parentIdToChildScores;
          if (childValueToParentIds.containsKey(childValue)) {
            parentIdToChildScores = childValueToParentIds.lget();
          } else {
            childValueToParentIds.put(
                childValue, parentIdToChildScores = new TreeMap<String, FloatArrayList>());
          }
          if (!markParentAsDeleted) {
            FloatArrayList childScores = parentIdToChildScores.get(parent);
            if (childScores == null) {
              parentIdToChildScores.put(parent, childScores = new FloatArrayList());
            }
            childScores.add(1f);
          }
        }
      }
    }

    // Delete docs that are marked to be deleted.
    indexWriter.deleteDocuments(new Term("delete", "me"));

    indexWriter.close();
    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    Engine.Searcher engineSearcher =
        new Engine.SimpleSearcher(ChildrenQueryTests.class.getSimpleName(), searcher);
    ((TestSearchContext) SearchContext.current())
        .setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));

    TermFilter parentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
    for (String childValue : childValues) {
      Query childQuery = new ConstantScoreQuery(new TermQuery(new Term("field1", childValue)));
      int shortCircuitParentDocSet = random().nextInt(numParentDocs);
      ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)];
      Query query =
          new ChildrenQuery(
              "parent", "child", parentFilter, childQuery, scoreType, shortCircuitParentDocSet);
      BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
      int numHits = 1 + random().nextInt(25);
      TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits, false);
      searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
      FixedBitSet actualResult = collector.getResult();

      FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
      MockScorer mockScorer = new MockScorer(scoreType);
      TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits, false);
      expectedTopDocsCollector.setScorer(mockScorer);
      if (childValueToParentIds.containsKey(childValue)) {
        AtomicReader slowAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);
        Terms terms = slowAtomicReader.terms(UidFieldMapper.NAME);
        if (terms != null) {
          NavigableMap<String, FloatArrayList> parentIdToChildScores = childValueToParentIds.lget();
          TermsEnum termsEnum = terms.iterator(null);
          DocsEnum docsEnum = null;
          for (Map.Entry<String, FloatArrayList> entry : parentIdToChildScores.entrySet()) {
            TermsEnum.SeekStatus seekStatus =
                termsEnum.seekCeil(Uid.createUidAsBytes("parent", entry.getKey()));
            if (seekStatus == TermsEnum.SeekStatus.FOUND) {
              docsEnum =
                  termsEnum.docs(slowAtomicReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
              expectedResult.set(docsEnum.nextDoc());
              mockScorer.scores = entry.getValue();
              expectedTopDocsCollector.collect(docsEnum.docID());
            } else if (seekStatus == TermsEnum.SeekStatus.END) {
              break;
            }
          }
        }
      }

      assertBitSet(actualResult, expectedResult, searcher);
      assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
    }

    indexReader.close();
    directory.close();
  }
コード例 #23
0
 @Override
 public long getNumberUniqueValues() {
   // probably not accurate, but a good upper limit
   return reader.maxDoc();
 }
コード例 #24
0
 private void verify(Directory dir) throws Exception {
   DirectoryReader ir = DirectoryReader.open(dir);
   for (AtomicReaderContext leaf : ir.leaves()) {
     AtomicReader leafReader = leaf.reader();
     assertTerms(leafReader.terms("field1docs"), leafReader.terms("field2freqs"), true);
     assertTerms(leafReader.terms("field3positions"), leafReader.terms("field4offsets"), true);
     assertTerms(leafReader.terms("field4offsets"), leafReader.terms("field5payloadsFixed"), true);
     assertTerms(
         leafReader.terms("field5payloadsFixed"),
         leafReader.terms("field6payloadsVariable"),
         true);
     assertTerms(
         leafReader.terms("field6payloadsVariable"),
         leafReader.terms("field7payloadsFixedOffsets"),
         true);
     assertTerms(
         leafReader.terms("field7payloadsFixedOffsets"),
         leafReader.terms("field8payloadsVariableOffsets"),
         true);
   }
   ir.close();
 }
コード例 #25
0
  @Test
  public void testBytes() throws Exception {
    List<Integer> docs = Arrays.asList(1, 5, 7);

    ObjectOpenHashSet<BytesRef> hTerms = new ObjectOpenHashSet<BytesRef>();
    List<BytesRef> cTerms = new ArrayList<BytesRef>(docs.size());
    for (int i = 0; i < docs.size(); i++) {
      BytesRef term = new BytesRef("str" + docs.get(i));
      hTerms.add(term);
      cTerms.add(term);
    }

    FieldDataTermsFilter hFilter = FieldDataTermsFilter.newBytes(getFieldData(strMapper), hTerms);

    int size = reader.maxDoc();
    FixedBitSet result = new FixedBitSet(size);

    result.clear(0, size);
    assertThat(result.cardinality(), equalTo(0));
    result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
    assertThat(result.cardinality(), equalTo(docs.size()));
    for (int i = 0; i < reader.maxDoc(); i++) {
      assertThat(result.get(i), equalTo(docs.contains(i)));
    }

    // filter from mapper
    result.clear(0, size);
    assertThat(result.cardinality(), equalTo(0));
    result.or(
        strMapper
            .termsFilter(ifdService, cTerms, null)
            .getDocIdSet(reader.getContext(), reader.getLiveDocs())
            .iterator());
    assertThat(result.cardinality(), equalTo(docs.size()));
    for (int i = 0; i < reader.maxDoc(); i++) {
      assertThat(result.get(i), equalTo(docs.contains(i)));
    }

    result.clear(0, size);
    assertThat(result.cardinality(), equalTo(0));

    // filter on a numeric field using BytesRef terms
    // should not match any docs
    hFilter = FieldDataTermsFilter.newBytes(getFieldData(lngMapper), hTerms);
    result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
    assertThat(result.cardinality(), equalTo(0));

    // filter on a numeric field using BytesRef terms
    // should not match any docs
    hFilter = FieldDataTermsFilter.newBytes(getFieldData(dblMapper), hTerms);
    result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
    assertThat(result.cardinality(), equalTo(0));
  }
コード例 #26
0
  @Override
  protected Suggest.Suggestion<
          ? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>>
      innerExecute(
          String name,
          CompletionSuggestionContext suggestionContext,
          IndexReader indexReader,
          CharsRef spare)
          throws IOException {
    if (suggestionContext.mapper() == null
        || !(suggestionContext.mapper() instanceof CompletionFieldMapper)) {
      throw new ElasticsearchException(
          "Field [" + suggestionContext.getField() + "] is not a completion suggest field");
    }

    CompletionSuggestion completionSuggestion =
        new CompletionSuggestion(name, suggestionContext.getSize());
    UnicodeUtil.UTF8toUTF16(suggestionContext.getText(), spare);

    CompletionSuggestion.Entry completionSuggestEntry =
        new CompletionSuggestion.Entry(new StringText(spare.toString()), 0, spare.length());
    completionSuggestion.addTerm(completionSuggestEntry);

    String fieldName = suggestionContext.getField();
    Map<String, CompletionSuggestion.Entry.Option> results =
        Maps.newHashMapWithExpectedSize(indexReader.leaves().size() * suggestionContext.getSize());
    for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
      AtomicReader atomicReader = atomicReaderContext.reader();
      Terms terms = atomicReader.fields().terms(fieldName);
      if (terms instanceof Completion090PostingsFormat.CompletionTerms) {
        final Completion090PostingsFormat.CompletionTerms lookupTerms =
            (Completion090PostingsFormat.CompletionTerms) terms;
        final Lookup lookup = lookupTerms.getLookup(suggestionContext.mapper(), suggestionContext);
        if (lookup == null) {
          // we don't have a lookup for this segment.. this might be possible if a merge dropped all
          // docs from the segment that had a value in this segment.
          continue;
        }
        List<Lookup.LookupResult> lookupResults =
            lookup.lookup(spare, false, suggestionContext.getSize());
        for (Lookup.LookupResult res : lookupResults) {

          final String key = res.key.toString();
          final float score = res.value;
          final Option value = results.get(key);
          if (value == null) {
            final Option option =
                new CompletionSuggestion.Entry.Option(
                    new StringText(key),
                    score,
                    res.payload == null ? null : new BytesArray(res.payload));
            results.put(key, option);
          } else if (value.getScore() < score) {
            value.setScore(score);
            value.setPayload(res.payload == null ? null : new BytesArray(res.payload));
          }
        }
      }
    }
    final List<CompletionSuggestion.Entry.Option> options =
        new ArrayList<CompletionSuggestion.Entry.Option>(results.values());
    CollectionUtil.introSort(options, scoreComparator);

    int optionCount = Math.min(suggestionContext.getSize(), options.size());
    for (int i = 0; i < optionCount; i++) {
      completionSuggestEntry.addOption(options.get(i));
    }

    return completionSuggestion;
  }
コード例 #27
0
 public SpansFiltered(Spans spans, Filter filter, AtomicReader reader) throws IOException {
   this(spans, filter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
 }
コード例 #28
0
ファイル: TestJoinUtil.java プロジェクト: pengzong1111/solr4
  private IndexIterationContext createContext(
      int nDocs,
      RandomIndexWriter fromWriter,
      RandomIndexWriter toWriter,
      boolean multipleValuesPerDocument,
      boolean scoreDocsInOrder)
      throws IOException {
    IndexIterationContext context = new IndexIterationContext();
    int numRandomValues = nDocs / 2;
    context.randomUniqueValues = new String[numRandomValues];
    Set<String> trackSet = new HashSet<String>();
    context.randomFrom = new boolean[numRandomValues];
    for (int i = 0; i < numRandomValues; i++) {
      String uniqueRandomValue;
      do {
        uniqueRandomValue = _TestUtil.randomRealisticUnicodeString(random());
        //        uniqueRandomValue = _TestUtil.randomSimpleString(random);
      } while ("".equals(uniqueRandomValue) || trackSet.contains(uniqueRandomValue));
      // Generate unique values and empty strings aren't allowed.
      trackSet.add(uniqueRandomValue);
      context.randomFrom[i] = random().nextBoolean();
      context.randomUniqueValues[i] = uniqueRandomValue;
    }

    RandomDoc[] docs = new RandomDoc[nDocs];
    for (int i = 0; i < nDocs; i++) {
      String id = Integer.toString(i);
      int randomI = random().nextInt(context.randomUniqueValues.length);
      String value = context.randomUniqueValues[randomI];
      Document document = new Document();
      document.add(newTextField(random(), "id", id, Field.Store.NO));
      document.add(newTextField(random(), "value", value, Field.Store.NO));

      boolean from = context.randomFrom[randomI];
      int numberOfLinkValues = multipleValuesPerDocument ? 2 + random().nextInt(10) : 1;
      docs[i] = new RandomDoc(id, numberOfLinkValues, value, from);
      for (int j = 0; j < numberOfLinkValues; j++) {
        String linkValue =
            context.randomUniqueValues[random().nextInt(context.randomUniqueValues.length)];
        docs[i].linkValues.add(linkValue);
        if (from) {
          if (!context.fromDocuments.containsKey(linkValue)) {
            context.fromDocuments.put(linkValue, new ArrayList<RandomDoc>());
          }
          if (!context.randomValueFromDocs.containsKey(value)) {
            context.randomValueFromDocs.put(value, new ArrayList<RandomDoc>());
          }

          context.fromDocuments.get(linkValue).add(docs[i]);
          context.randomValueFromDocs.get(value).add(docs[i]);
          document.add(newTextField(random(), "from", linkValue, Field.Store.NO));
        } else {
          if (!context.toDocuments.containsKey(linkValue)) {
            context.toDocuments.put(linkValue, new ArrayList<RandomDoc>());
          }
          if (!context.randomValueToDocs.containsKey(value)) {
            context.randomValueToDocs.put(value, new ArrayList<RandomDoc>());
          }

          context.toDocuments.get(linkValue).add(docs[i]);
          context.randomValueToDocs.get(value).add(docs[i]);
          document.add(newTextField(random(), "to", linkValue, Field.Store.NO));
        }
      }

      final RandomIndexWriter w;
      if (from) {
        w = fromWriter;
      } else {
        w = toWriter;
      }

      w.addDocument(document);
      if (random().nextInt(10) == 4) {
        w.commit();
      }
      if (VERBOSE) {
        System.out.println("Added document[" + docs[i].id + "]: " + document);
      }
    }

    // Pre-compute all possible hits for all unique random values. On top of this also compute all
    // possible score for
    // any ScoreMode.
    IndexSearcher fromSearcher = newSearcher(fromWriter.getReader());
    IndexSearcher toSearcher = newSearcher(toWriter.getReader());
    for (int i = 0; i < context.randomUniqueValues.length; i++) {
      String uniqueRandomValue = context.randomUniqueValues[i];
      final String fromField;
      final String toField;
      final Map<String, Map<Integer, JoinScore>> queryVals;
      if (context.randomFrom[i]) {
        fromField = "from";
        toField = "to";
        queryVals = context.fromHitsToJoinScore;
      } else {
        fromField = "to";
        toField = "from";
        queryVals = context.toHitsToJoinScore;
      }
      final Map<BytesRef, JoinScore> joinValueToJoinScores = new HashMap<BytesRef, JoinScore>();
      if (multipleValuesPerDocument) {
        fromSearcher.search(
            new TermQuery(new Term("value", uniqueRandomValue)),
            new Collector() {

              private Scorer scorer;
              private SortedSetDocValues docTermOrds;
              final BytesRef joinValue = new BytesRef();

              @Override
              public void collect(int doc) throws IOException {
                docTermOrds.setDocument(doc);
                long ord;
                while ((ord = docTermOrds.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
                  docTermOrds.lookupOrd(ord, joinValue);
                  JoinScore joinScore = joinValueToJoinScores.get(joinValue);
                  if (joinScore == null) {
                    joinValueToJoinScores.put(
                        BytesRef.deepCopyOf(joinValue), joinScore = new JoinScore());
                  }
                  joinScore.addScore(scorer.score());
                }
              }

              @Override
              public void setNextReader(AtomicReaderContext context) throws IOException {
                docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), fromField);
              }

              @Override
              public void setScorer(Scorer scorer) {
                this.scorer = scorer;
              }

              @Override
              public boolean acceptsDocsOutOfOrder() {
                return false;
              }
            });
      } else {
        fromSearcher.search(
            new TermQuery(new Term("value", uniqueRandomValue)),
            new Collector() {

              private Scorer scorer;
              private BinaryDocValues terms;
              private Bits docsWithField;
              private final BytesRef spare = new BytesRef();

              @Override
              public void collect(int doc) throws IOException {
                terms.get(doc, spare);
                BytesRef joinValue = spare;
                if (joinValue.length == 0 && !docsWithField.get(doc)) {
                  return;
                }

                JoinScore joinScore = joinValueToJoinScores.get(joinValue);
                if (joinScore == null) {
                  joinValueToJoinScores.put(
                      BytesRef.deepCopyOf(joinValue), joinScore = new JoinScore());
                }
                joinScore.addScore(scorer.score());
              }

              @Override
              public void setNextReader(AtomicReaderContext context) throws IOException {
                terms = FieldCache.DEFAULT.getTerms(context.reader(), fromField, true);
                docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), fromField);
              }

              @Override
              public void setScorer(Scorer scorer) {
                this.scorer = scorer;
              }

              @Override
              public boolean acceptsDocsOutOfOrder() {
                return false;
              }
            });
      }

      final Map<Integer, JoinScore> docToJoinScore = new HashMap<Integer, JoinScore>();
      if (multipleValuesPerDocument) {
        if (scoreDocsInOrder) {
          AtomicReader slowCompositeReader =
              SlowCompositeReaderWrapper.wrap(toSearcher.getIndexReader());
          Terms terms = slowCompositeReader.terms(toField);
          if (terms != null) {
            DocsEnum docsEnum = null;
            TermsEnum termsEnum = null;
            SortedSet<BytesRef> joinValues =
                new TreeSet<BytesRef>(BytesRef.getUTF8SortedAsUnicodeComparator());
            joinValues.addAll(joinValueToJoinScores.keySet());
            for (BytesRef joinValue : joinValues) {
              termsEnum = terms.iterator(termsEnum);
              if (termsEnum.seekExact(joinValue)) {
                docsEnum =
                    termsEnum.docs(slowCompositeReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
                JoinScore joinScore = joinValueToJoinScores.get(joinValue);

                for (int doc = docsEnum.nextDoc();
                    doc != DocIdSetIterator.NO_MORE_DOCS;
                    doc = docsEnum.nextDoc()) {
                  // First encountered join value determines the score.
                  // Something to keep in mind for many-to-many relations.
                  if (!docToJoinScore.containsKey(doc)) {
                    docToJoinScore.put(doc, joinScore);
                  }
                }
              }
            }
          }
        } else {
          toSearcher.search(
              new MatchAllDocsQuery(),
              new Collector() {

                private SortedSetDocValues docTermOrds;
                private final BytesRef scratch = new BytesRef();
                private int docBase;

                @Override
                public void collect(int doc) throws IOException {
                  docTermOrds.setDocument(doc);
                  long ord;
                  while ((ord = docTermOrds.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
                    docTermOrds.lookupOrd(ord, scratch);
                    JoinScore joinScore = joinValueToJoinScores.get(scratch);
                    if (joinScore == null) {
                      continue;
                    }
                    Integer basedDoc = docBase + doc;
                    // First encountered join value determines the score.
                    // Something to keep in mind for many-to-many relations.
                    if (!docToJoinScore.containsKey(basedDoc)) {
                      docToJoinScore.put(basedDoc, joinScore);
                    }
                  }
                }

                @Override
                public void setNextReader(AtomicReaderContext context) throws IOException {
                  docBase = context.docBase;
                  docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), toField);
                }

                @Override
                public boolean acceptsDocsOutOfOrder() {
                  return false;
                }

                @Override
                public void setScorer(Scorer scorer) {}
              });
        }
      } else {
        toSearcher.search(
            new MatchAllDocsQuery(),
            new Collector() {

              private BinaryDocValues terms;
              private int docBase;
              private final BytesRef spare = new BytesRef();

              @Override
              public void collect(int doc) {
                terms.get(doc, spare);
                JoinScore joinScore = joinValueToJoinScores.get(spare);
                if (joinScore == null) {
                  return;
                }
                docToJoinScore.put(docBase + doc, joinScore);
              }

              @Override
              public void setNextReader(AtomicReaderContext context) throws IOException {
                terms = FieldCache.DEFAULT.getTerms(context.reader(), toField, false);
                docBase = context.docBase;
              }

              @Override
              public boolean acceptsDocsOutOfOrder() {
                return false;
              }

              @Override
              public void setScorer(Scorer scorer) {}
            });
      }
      queryVals.put(uniqueRandomValue, docToJoinScore);
    }

    fromSearcher.getIndexReader().close();
    toSearcher.getIndexReader().close();

    return context;
  }
コード例 #29
0
  /**
   * Merges the sortedset docvalues from <code>toMerge</code>.
   *
   * <p>The default implementation calls {@link #addSortedSetField}, passing an Iterable that merges
   * ordinals and values and filters deleted documents .
   */
  public void mergeSortedSetField(
      FieldInfo fieldInfo, final MergeState mergeState, List<SortedSetDocValues> toMerge)
      throws IOException {

    mergeState.checkAbort.work(mergeState.segmentInfo.getDocCount());

    final AtomicReader readers[] = mergeState.readers.toArray(new AtomicReader[toMerge.size()]);
    final SortedSetDocValues dvs[] = toMerge.toArray(new SortedSetDocValues[toMerge.size()]);

    // step 1: iterate thru each sub and mark terms still in use
    TermsEnum liveTerms[] = new TermsEnum[dvs.length];
    long[] weights = new long[liveTerms.length];
    for (int sub = 0; sub < liveTerms.length; sub++) {
      AtomicReader reader = readers[sub];
      SortedSetDocValues dv = dvs[sub];
      Bits liveDocs = reader.getLiveDocs();
      if (liveDocs == null) {
        liveTerms[sub] = dv.termsEnum();
        weights[sub] = dv.getValueCount();
      } else {
        LongBitSet bitset = new LongBitSet(dv.getValueCount());
        for (int i = 0; i < reader.maxDoc(); i++) {
          if (liveDocs.get(i)) {
            dv.setDocument(i);
            long ord;
            while ((ord = dv.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
              bitset.set(ord);
            }
          }
        }
        liveTerms[sub] = new BitsFilteredTermsEnum(dv.termsEnum(), bitset);
        weights[sub] = bitset.cardinality();
      }
    }

    // step 2: create ordinal map (this conceptually does the "merging")
    final OrdinalMap map = OrdinalMap.build(this, liveTerms, weights, PackedInts.COMPACT);

    // step 3: add field
    addSortedSetField(
        fieldInfo,
        // ord -> value
        new Iterable<BytesRef>() {
          @Override
          public Iterator<BytesRef> iterator() {
            return new Iterator<BytesRef>() {
              long currentOrd;

              @Override
              public boolean hasNext() {
                return currentOrd < map.getValueCount();
              }

              @Override
              public BytesRef next() {
                if (!hasNext()) {
                  throw new NoSuchElementException();
                }
                int segmentNumber = map.getFirstSegmentNumber(currentOrd);
                long segmentOrd = map.getFirstSegmentOrd(currentOrd);
                final BytesRef term = dvs[segmentNumber].lookupOrd(segmentOrd);
                currentOrd++;
                return term;
              }

              @Override
              public void remove() {
                throw new UnsupportedOperationException();
              }
            };
          }
        },
        // doc -> ord count
        new Iterable<Number>() {
          @Override
          public Iterator<Number> iterator() {
            return new Iterator<Number>() {
              int readerUpto = -1;
              int docIDUpto;
              int nextValue;
              AtomicReader currentReader;
              Bits currentLiveDocs;
              boolean nextIsSet;

              @Override
              public boolean hasNext() {
                return nextIsSet || setNext();
              }

              @Override
              public void remove() {
                throw new UnsupportedOperationException();
              }

              @Override
              public Number next() {
                if (!hasNext()) {
                  throw new NoSuchElementException();
                }
                assert nextIsSet;
                nextIsSet = false;
                // TODO make a mutable number
                return nextValue;
              }

              private boolean setNext() {
                while (true) {
                  if (readerUpto == readers.length) {
                    return false;
                  }

                  if (currentReader == null || docIDUpto == currentReader.maxDoc()) {
                    readerUpto++;
                    if (readerUpto < readers.length) {
                      currentReader = readers[readerUpto];
                      currentLiveDocs = currentReader.getLiveDocs();
                    }
                    docIDUpto = 0;
                    continue;
                  }

                  if (currentLiveDocs == null || currentLiveDocs.get(docIDUpto)) {
                    nextIsSet = true;
                    SortedSetDocValues dv = dvs[readerUpto];
                    dv.setDocument(docIDUpto);
                    nextValue = 0;
                    while (dv.nextOrd() != SortedSetDocValues.NO_MORE_ORDS) {
                      nextValue++;
                    }
                    docIDUpto++;
                    return true;
                  }

                  docIDUpto++;
                }
              }
            };
          }
        },
        // ords
        new Iterable<Number>() {
          @Override
          public Iterator<Number> iterator() {
            return new Iterator<Number>() {
              int readerUpto = -1;
              int docIDUpto;
              long nextValue;
              AtomicReader currentReader;
              Bits currentLiveDocs;
              LongValues currentMap;
              boolean nextIsSet;
              long ords[] = new long[8];
              int ordUpto;
              int ordLength;

              @Override
              public boolean hasNext() {
                return nextIsSet || setNext();
              }

              @Override
              public void remove() {
                throw new UnsupportedOperationException();
              }

              @Override
              public Number next() {
                if (!hasNext()) {
                  throw new NoSuchElementException();
                }
                assert nextIsSet;
                nextIsSet = false;
                // TODO make a mutable number
                return nextValue;
              }

              private boolean setNext() {
                while (true) {
                  if (readerUpto == readers.length) {
                    return false;
                  }

                  if (ordUpto < ordLength) {
                    nextValue = ords[ordUpto];
                    ordUpto++;
                    nextIsSet = true;
                    return true;
                  }

                  if (currentReader == null || docIDUpto == currentReader.maxDoc()) {
                    readerUpto++;
                    if (readerUpto < readers.length) {
                      currentReader = readers[readerUpto];
                      currentLiveDocs = currentReader.getLiveDocs();
                      currentMap = map.getGlobalOrds(readerUpto);
                    }
                    docIDUpto = 0;
                    continue;
                  }

                  if (currentLiveDocs == null || currentLiveDocs.get(docIDUpto)) {
                    assert docIDUpto < currentReader.maxDoc();
                    SortedSetDocValues dv = dvs[readerUpto];
                    dv.setDocument(docIDUpto);
                    ordUpto = ordLength = 0;
                    long ord;
                    while ((ord = dv.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
                      if (ordLength == ords.length) {
                        ords = ArrayUtil.grow(ords, ordLength + 1);
                      }
                      ords[ordLength] = currentMap.get(ord);
                      ordLength++;
                    }
                    docIDUpto++;
                    continue;
                  }

                  docIDUpto++;
                }
              }
            };
          }
        });
  }
コード例 #30
0
  int resolveParentDocuments(
      TopDocs topDocs,
      SearchContext context,
      Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) {
    int parentHitsResolved = 0;
    Recycler.V<ObjectObjectOpenHashMap<Object, Recycler.V<IntObjectOpenHashMap<ParentDoc>>>>
        parentDocsPerReader =
            cacheRecycler.hashMap(context.searcher().getIndexReader().leaves().size());
    for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
      int readerIndex =
          ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
      AtomicReaderContext subContext =
          context.searcher().getIndexReader().leaves().get(readerIndex);
      int subDoc = scoreDoc.doc - subContext.docBase;

      // find the parent id
      HashedBytesArray parentId =
          context.idCache().reader(subContext.reader()).parentIdByDoc(parentType, subDoc);
      if (parentId == null) {
        // no parent found
        continue;
      }
      // now go over and find the parent doc Id and reader tuple
      for (AtomicReaderContext atomicReaderContext : context.searcher().getIndexReader().leaves()) {
        AtomicReader indexReader = atomicReaderContext.reader();
        int parentDocId = context.idCache().reader(indexReader).docById(parentType, parentId);
        Bits liveDocs = indexReader.getLiveDocs();
        if (parentDocId != -1 && (liveDocs == null || liveDocs.get(parentDocId))) {
          // we found a match, add it and break

          Recycler.V<IntObjectOpenHashMap<ParentDoc>> readerParentDocs =
              parentDocsPerReader.v().get(indexReader.getCoreCacheKey());
          if (readerParentDocs == null) {
            readerParentDocs = cacheRecycler.intObjectMap(indexReader.maxDoc());
            parentDocsPerReader.v().put(indexReader.getCoreCacheKey(), readerParentDocs);
          }

          ParentDoc parentDoc = readerParentDocs.v().get(parentDocId);
          if (parentDoc == null) {
            parentHitsResolved++; // we have a hit on a parent
            parentDoc = new ParentDoc();
            parentDoc.docId = parentDocId;
            parentDoc.count = 1;
            parentDoc.maxScore = scoreDoc.score;
            parentDoc.sumScores = scoreDoc.score;
            readerParentDocs.v().put(parentDocId, parentDoc);
          } else {
            parentDoc.count++;
            parentDoc.sumScores += scoreDoc.score;
            if (scoreDoc.score > parentDoc.maxScore) {
              parentDoc.maxScore = scoreDoc.score;
            }
          }
        }
      }
    }
    boolean[] states = parentDocsPerReader.v().allocated;
    Object[] keys = parentDocsPerReader.v().keys;
    Object[] values = parentDocsPerReader.v().values;
    for (int i = 0; i < states.length; i++) {
      if (states[i]) {
        Recycler.V<IntObjectOpenHashMap<ParentDoc>> value =
            (Recycler.V<IntObjectOpenHashMap<ParentDoc>>) values[i];
        ParentDoc[] _parentDocs = value.v().values().toArray(ParentDoc.class);
        Arrays.sort(_parentDocs, PARENT_DOC_COMP);
        parentDocs.v().put(keys[i], _parentDocs);
        Releasables.release(value);
      }
    }
    Releasables.release(parentDocsPerReader);
    return parentHitsResolved;
  }