@Test
  public void testStream_bytesRefArray() throws Exception {
    final BytesRefArray bArr = new BytesRefArray(Counter.newCounter(false));
    bArr.append(new BytesRef("foo"));
    bArr.append(new BytesRef("bar"));
    bArr.append(new BytesRef("baz"));

    Assert.assertEquals("Not all items streamed.", 3L, StreamUtils.stream(bArr).count());

    Assert.assertEquals(
        "Term not found.",
        1L,
        StreamUtils.stream(bArr).filter(br -> br.bytesEquals(new BytesRef("foo"))).count());
    Assert.assertEquals(
        "Term not found.",
        1L,
        StreamUtils.stream(bArr).filter(br -> br.bytesEquals(new BytesRef("bar"))).count());
    Assert.assertEquals(
        "Term not found.",
        1L,
        StreamUtils.stream(bArr).filter(br -> br.bytesEquals(new BytesRef("baz"))).count());

    Assert.assertEquals(
        "Unknown term found.",
        0L,
        StreamUtils.stream(bArr)
            .filter(
                t ->
                    !t.bytesEquals(new BytesRef("foo"))
                        && !t.bytesEquals(new BytesRef("bar"))
                        && !t.bytesEquals(new BytesRef("baz")))
            .count());
  }
 /** Creates a new iterator, buffering entries from the specified iterator */
 public BufferedInputIterator(InputIterator source) throws IOException {
   BytesRef spare;
   int freqIndex = 0;
   hasPayloads = source.hasPayloads();
   hasContexts = source.hasContexts();
   while ((spare = source.next()) != null) {
     entries.append(spare);
     if (hasPayloads) {
       payloads.append(source.payload());
     }
     if (hasContexts) {
       contextSets.add(source.contexts());
     }
     if (freqIndex >= freqs.length) {
       freqs = ArrayUtil.grow(freqs, freqs.length + 1);
     }
     freqs[freqIndex++] = source.weight();
   }
 }
Beispiel #3
0
  /**
   * Iterates over the given token stream and adds the resulting terms to the index; Equivalent to
   * adding a tokenized, indexed, termVectorStored, unstored, Lucene {@link
   * org.apache.lucene.document.Field}. Finally closes the token stream. Note that untokenized
   * keywords can be added with this method via {@link #keywordTokenStream(Collection)}, the Lucene
   * <code>KeywordTokenizer</code> or similar utilities.
   *
   * @param fieldName a name to be associated with the text
   * @param tokenStream the token stream to retrieve tokens from. It's guaranteed to be closed no
   *     matter what.
   * @param boost the boost factor for hits for this field
   * @param positionIncrementGap the position increment gap if fields with the same name are added
   *     more than once
   * @param offsetGap the offset gap if fields with the same name are added more than once
   * @see org.apache.lucene.document.Field#setBoost(float)
   */
  public void addField(
      String fieldName,
      TokenStream tokenStream,
      float boost,
      int positionIncrementGap,
      int offsetGap) {
    try (TokenStream stream = tokenStream) {
      if (frozen)
        throw new IllegalArgumentException("Cannot call addField() when MemoryIndex is frozen");
      if (fieldName == null) throw new IllegalArgumentException("fieldName must not be null");
      if (stream == null) throw new IllegalArgumentException("token stream must not be null");
      if (boost <= 0.0f)
        throw new IllegalArgumentException("boost factor must be greater than 0.0");
      int numTokens = 0;
      int numOverlapTokens = 0;
      int pos = -1;
      final BytesRefHash terms;
      final SliceByteStartArray sliceArray;
      Info info;
      long sumTotalTermFreq = 0;
      int offset = 0;
      FieldInfo fieldInfo;
      if ((info = fields.get(fieldName)) != null) {
        fieldInfo = info.fieldInfo;
        numTokens = info.numTokens;
        numOverlapTokens = info.numOverlapTokens;
        pos = info.lastPosition + positionIncrementGap;
        offset = info.lastOffset + offsetGap;
        terms = info.terms;
        boost *= info.boost;
        sliceArray = info.sliceArray;
        sumTotalTermFreq = info.sumTotalTermFreq;
      } else {
        fieldInfo =
            new FieldInfo(
                fieldName,
                fields.size(),
                true,
                false,
                this.storePayloads,
                this.storeOffsets
                    ? IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS
                    : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS,
                DocValuesType.NONE,
                -1,
                Collections.<String, String>emptyMap());
        sliceArray = new SliceByteStartArray(BytesRefHash.DEFAULT_CAPACITY);
        terms = new BytesRefHash(byteBlockPool, BytesRefHash.DEFAULT_CAPACITY, sliceArray);
      }

      TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
      PositionIncrementAttribute posIncrAttribute =
          stream.addAttribute(PositionIncrementAttribute.class);
      OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
      PayloadAttribute payloadAtt =
          storePayloads ? stream.addAttribute(PayloadAttribute.class) : null;
      BytesRef ref = termAtt.getBytesRef();
      stream.reset();

      while (stream.incrementToken()) {
        termAtt.fillBytesRef();
        //        if (DEBUG) System.err.println("token='" + term + "'");
        numTokens++;
        final int posIncr = posIncrAttribute.getPositionIncrement();
        if (posIncr == 0) numOverlapTokens++;
        pos += posIncr;
        int ord = terms.add(ref);
        if (ord < 0) {
          ord = (-ord) - 1;
          postingsWriter.reset(sliceArray.end[ord]);
        } else {
          sliceArray.start[ord] = postingsWriter.startNewSlice();
        }
        sliceArray.freq[ord]++;
        sumTotalTermFreq++;
        postingsWriter.writeInt(pos);
        if (storeOffsets) {
          postingsWriter.writeInt(offsetAtt.startOffset() + offset);
          postingsWriter.writeInt(offsetAtt.endOffset() + offset);
        }
        if (storePayloads) {
          final BytesRef payload = payloadAtt.getPayload();
          final int pIndex;
          if (payload == null || payload.length == 0) {
            pIndex = -1;
          } else {
            pIndex = payloadsBytesRefs.append(payload);
          }
          postingsWriter.writeInt(pIndex);
        }
        sliceArray.end[ord] = postingsWriter.getCurrentOffset();
      }
      stream.end();

      // ensure infos.numTokens > 0 invariant; needed for correct operation of terms()
      if (numTokens > 0) {
        fields.put(
            fieldName,
            new Info(
                fieldInfo,
                terms,
                sliceArray,
                numTokens,
                numOverlapTokens,
                boost,
                pos,
                offsetAtt.endOffset() + offset,
                sumTotalTermFreq));
      }
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
  }