コード例 #1
0
 @Override
 public BytesRef indexedValueForSearch(Object value) {
   if (value instanceof BytesRef) {
     BytesRef bytesRef = (BytesRef) value;
     if (Uid.hasDelimiter(bytesRef)) {
       return bytesRef;
     }
     return Uid.createUidAsBytes(typeAsBytes, bytesRef);
   }
   String sValue = value.toString();
   if (sValue.indexOf(Uid.DELIMITER) == -1) {
     return Uid.createUidAsBytes(type, sValue);
   }
   return super.indexedValueForSearch(value);
 }
コード例 #2
0
 @Override
 public Filter termFilter(Object value, @Nullable QueryParseContext context) {
   if (context == null) {
     return super.termFilter(value, context);
   }
   BytesRef bValue = BytesRefs.toBytesRef(value);
   // we use all types, cause we don't know if its exact or not...
   BytesRef[] typesValues = new BytesRef[context.mapperService().types().size()];
   int i = 0;
   for (String type : context.mapperService().types()) {
     typesValues[i++] = Uid.createUidAsBytes(type, bValue);
   }
   return new TermsFilter(names.indexName(), typesValues);
 }
コード例 #3
0
ファイル: UidTests.java プロジェクト: 289/elasticsearch
 @Test
 public void testCreateAndSplitId() {
   BytesRef createUid = Uid.createUidAsBytes("foo", "bar");
   BytesRef[] splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(createUid);
   assertThat("foo", equalTo(splitUidIntoTypeAndId[0].utf8ToString()));
   assertThat("bar", equalTo(splitUidIntoTypeAndId[1].utf8ToString()));
   // split also with an offset
   BytesRef ref = new BytesRef(createUid.length + 10);
   ref.offset = 9;
   ref.length = createUid.length;
   System.arraycopy(createUid.bytes, createUid.offset, ref.bytes, ref.offset, ref.length);
   splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(ref);
   assertThat("foo", equalTo(splitUidIntoTypeAndId[0].utf8ToString()));
   assertThat("bar", equalTo(splitUidIntoTypeAndId[1].utf8ToString()));
 }
コード例 #4
0
 @Override
 public Filter termsFilter(List values, @Nullable QueryParseContext context) {
   if (context == null) {
     return super.termsFilter(values, context);
   }
   List<BytesRef> bValues = new ArrayList<BytesRef>(values.size());
   for (Object value : values) {
     BytesRef bValue = BytesRefs.toBytesRef(value);
     // we use all types, cause we don't know if its exact or not...
     for (String type : context.mapperService().types()) {
       bValues.add(Uid.createUidAsBytes(type, bValue));
     }
   }
   return new TermsFilter(names.indexName(), bValues);
 }
コード例 #5
0
  public TermVectorResponse getTermVector(TermVectorRequest request, String concreteIndex) {
    final Engine.Searcher searcher = indexShard.acquireSearcher("term_vector");
    IndexReader topLevelReader = searcher.reader();
    final TermVectorResponse termVectorResponse =
        new TermVectorResponse(concreteIndex, request.type(), request.id());

    final Term uidTerm =
        new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
    Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), uidTerm));
    boolean docFromTranslog = get.source() != null;
    AggregatedDfs dfs = null;

    /* fetched from translog is treated as an artificial document */
    if (docFromTranslog) {
      request.doc(get.source().source, false);
      termVectorResponse.setDocVersion(get.version());
    }

    /* handle potential wildcards in fields */
    if (request.selectedFields() != null) {
      handleFieldWildcards(request);
    }

    try {
      Fields topLevelFields = MultiFields.getFields(topLevelReader);
      Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
      /* from an artificial document */
      if (request.doc() != null) {
        Fields termVectorsByField = generateTermVectorsFromDoc(request, !docFromTranslog);
        // if no document indexed in shard, take the queried document itself for stats
        if (topLevelFields == null) {
          topLevelFields = termVectorsByField;
        }
        if (termVectorsByField != null && useDfs(request)) {
          dfs = getAggregatedDfs(termVectorsByField, request);
        }
        termVectorResponse.setFields(
            termVectorsByField, request.selectedFields(), request.getFlags(), topLevelFields, dfs);
        termVectorResponse.setExists(true);
        termVectorResponse.setArtificial(!docFromTranslog);
      }
      /* or from an existing document */
      else if (docIdAndVersion != null) {
        // fields with stored term vectors
        Fields termVectorsByField =
            docIdAndVersion.context.reader().getTermVectors(docIdAndVersion.docId);
        Set<String> selectedFields = request.selectedFields();
        // generate tvs for fields where analyzer is overridden
        if (selectedFields == null && request.perFieldAnalyzer() != null) {
          selectedFields = getFieldsToGenerate(request.perFieldAnalyzer(), termVectorsByField);
        }
        // fields without term vectors
        if (selectedFields != null) {
          termVectorsByField =
              addGeneratedTermVectors(get, termVectorsByField, request, selectedFields);
        }
        if (termVectorsByField != null && useDfs(request)) {
          dfs = getAggregatedDfs(termVectorsByField, request);
        }
        termVectorResponse.setFields(
            termVectorsByField, request.selectedFields(), request.getFlags(), topLevelFields, dfs);
        termVectorResponse.setDocVersion(docIdAndVersion.version);
        termVectorResponse.setExists(true);
      } else {
        termVectorResponse.setExists(false);
      }
    } catch (Throwable ex) {
      throw new ElasticsearchException("failed to execute term vector request", ex);
    } finally {
      searcher.close();
      get.release();
    }
    return termVectorResponse;
  }