public TermsLongFacetExecutor(
      IndexNumericFieldData indexFieldData,
      int size,
      int shardSize,
      TermsFacet.ComparatorType comparatorType,
      boolean allTerms,
      SearchContext context,
      ImmutableSet<BytesRef> excluded,
      SearchScript script,
      CacheRecycler cacheRecycler) {
    this.indexFieldData = indexFieldData;
    this.size = size;
    this.shardSize = shardSize;
    this.comparatorType = comparatorType;
    this.script = script;
    this.excluded = excluded;
    this.facets = cacheRecycler.longIntMap(-1);

    if (allTerms) {
      for (AtomicReaderContext readerContext : context.searcher().getTopReaderContext().leaves()) {
        int maxDoc = readerContext.reader().maxDoc();
        LongValues values = indexFieldData.load(readerContext).getLongValues();
        for (int docId = 0; docId < maxDoc; docId++) {
          final int numValues = values.setDocument(docId);
          final LongIntOpenHashMap v = facets.v();
          for (int i = 0; i < numValues; i++) {
            v.putIfAbsent(values.nextValue(), 0);
          }
        }
      }
    }
  }
 public void onDoc(int docId, LongValues values) {
   final int numValues = values.setDocument(docId);
   int tempMissing = 1;
   for (int i = 0; i < numValues; i++) {
     tempMissing = 0;
     onValue(docId, values.nextValue());
     total++;
   }
   missing += tempMissing;
 }
  private void test(List<TLongSet> values) throws Exception {
    StringField id = new StringField("_id", "", Field.Store.NO);

    for (int i = 0; i < values.size(); ++i) {
      Document doc = new Document();
      id.setStringValue("" + i);
      doc.add(id);
      final TLongSet v = values.get(i);
      for (TLongIterator it = v.iterator(); it.hasNext(); ) {
        LongField value = new LongField("value", it.next(), Field.Store.NO);
        doc.add(value);
      }
      writer.addDocument(doc);
    }
    writer.forceMerge(1);

    final IndexNumericFieldData indexFieldData = getForField("value");
    final AtomicNumericFieldData atomicFieldData = indexFieldData.load(refreshReader());
    final LongValues data = atomicFieldData.getLongValues();
    final DoubleValues doubleData = atomicFieldData.getDoubleValues();
    final TLongSet set = new TLongHashSet();
    final TDoubleSet doubleSet = new TDoubleHashSet();
    for (int i = 0; i < values.size(); ++i) {
      final TLongSet v = values.get(i);

      assertThat(data.hasValue(i), equalTo(!v.isEmpty()));
      assertThat(doubleData.hasValue(i), equalTo(!v.isEmpty()));

      if (v.isEmpty()) {
        assertThat(data.getValue(i), equalTo(0L));
        assertThat(doubleData.getValue(i), equalTo(0d));
      }

      set.clear();
      for (LongValues.Iter iter = data.getIter(i); iter.hasNext(); ) {
        set.add(iter.next());
      }
      assertThat(set, equalTo(v));

      final TDoubleSet doubleV = new TDoubleHashSet();
      for (TLongIterator it = v.iterator(); it.hasNext(); ) {
        doubleV.add((double) it.next());
      }
      doubleSet.clear();
      for (DoubleValues.Iter iter = doubleData.getIter(i); iter.hasNext(); ) {
        doubleSet.add(iter.next());
      }
      assertThat(doubleSet, equalTo(doubleV));
    }
  }
Ejemplo n.º 4
0
  @Override
  public void collect(int doc, long owningBucketOrdinal) throws IOException {
    assert owningBucketOrdinal == 0;
    final int valuesCount = values.setDocument(doc);

    long previousKey = Long.MIN_VALUE;
    for (int i = 0; i < valuesCount; ++i) {
      long value = values.nextValue();
      long key = rounding.roundKey(value);
      assert key >= previousKey;
      if (key == previousKey) {
        continue;
      }
      long bucketOrd = bucketOrds.add(key);
      if (bucketOrd < 0) { // already seen
        bucketOrd = -1 - bucketOrd;
      }
      collectBucket(doc, bucketOrd);
      previousKey = key;
    }
  }
 @Override
 protected void doCollect(int doc) throws IOException {
   keyValues.forEachValueInDoc(doc, histoProc);
 }