@Override
 public StringFieldMapper build(BuilderContext context) {
   if (positionOffsetGap > 0) {
     indexAnalyzer = new NamedCustomAnalyzer(indexAnalyzer, positionOffsetGap);
     searchAnalyzer = new NamedCustomAnalyzer(searchAnalyzer, positionOffsetGap);
     searchQuotedAnalyzer = new NamedCustomAnalyzer(searchQuotedAnalyzer, positionOffsetGap);
   }
   // if the field is not analyzed, then by default, we should omit norms and have docs only
   // index options, as probably what the user really wants
   // if they are set explicitly, we will use those values
   if (fieldType.indexed() && !fieldType.tokenized()) {
     if (!omitNormsSet && boost == Defaults.BOOST) {
       fieldType.setOmitNorms(true);
     }
     if (!indexOptionsSet) {
       fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
     }
   }
   StringFieldMapper fieldMapper =
       new StringFieldMapper(
           buildNames(context),
           boost,
           fieldType,
           nullValue,
           indexAnalyzer,
           searchAnalyzer,
           searchQuotedAnalyzer,
           positionOffsetGap,
           ignoreAbove,
           provider,
           similarity);
   fieldMapper.includeInAll(includeInAll);
   return fieldMapper;
 }
 @Override
 public StringFieldMapper unsetIncludeInAll() {
   if (includeInAll != null) {
     StringFieldMapper clone = clone();
     clone.includeInAll = null;
     return clone;
   } else {
     return this;
   }
 }
 @Override
 public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
   if (includeInAll != null && this.includeInAll == null) {
     StringFieldMapper clone = clone();
     clone.includeInAll = includeInAll;
     return clone;
   } else {
     return this;
   }
 }
Esempio n. 4
0
    @Override
    public OntologyMapper build(BuilderContext context) {
      ContentPath.Type origPathType = context.path().pathType();
      context.path().pathType(pathType);

      Map<String, FieldMapper<String>> fieldMappers = Maps.newHashMap();

      context.path().add(name);

      if (propertyBuilders != null) {
        for (String property : propertyBuilders.keySet()) {
          StringFieldMapper sfm = propertyBuilders.get(property).build(context);
          fieldMappers.put(sfm.names().indexName(), sfm);
        }
      }

      // Initialise field mappers for the pre-defined fields
      for (FieldMappings mapping : ontologySettings.getFieldMappings()) {
        if (!fieldMappers.containsKey(context.path().fullPathAsText(mapping.getFieldName()))) {
          StringFieldMapper mapper =
              MapperBuilders.stringField(mapping.getFieldName())
                  .store(true)
                  .index(true)
                  .tokenized(!mapping.isUriField())
                  .build(context);
          fieldMappers.put(mapper.names().indexName(), mapper);
        }
      }

      context.path().remove(); // remove name
      context.path().pathType(origPathType);

      return new OntologyMapper(
          buildNames(context),
          fieldType,
          docValues,
          indexAnalyzer,
          searchAnalyzer,
          postingsProvider,
          docValuesProvider,
          similarity,
          fieldDataSettings,
          context.indexSettings(),
          new MultiFields.Builder().build(this, context),
          ontologySettings,
          fieldMappers,
          threadPool);
    }
 @Override
 public StringFieldMapper build(BuilderContext context) {
   if (positionOffsetGap > 0) {
     indexAnalyzer = new NamedAnalyzer(indexAnalyzer, positionOffsetGap);
     searchAnalyzer = new NamedAnalyzer(searchAnalyzer, positionOffsetGap);
     searchQuotedAnalyzer = new NamedAnalyzer(searchQuotedAnalyzer, positionOffsetGap);
   }
   // if the field is not analyzed, then by default, we should omit norms and have docs only
   // index options, as probably what the user really wants
   // if they are set explicitly, we will use those values
   // we also change the values on the default field type so that toXContent emits what
   // differs from the defaults
   FieldType defaultFieldType = new FieldType(Defaults.FIELD_TYPE);
   if (fieldType.indexOptions() != IndexOptions.NONE && !fieldType.tokenized()) {
     defaultFieldType.setOmitNorms(true);
     defaultFieldType.setIndexOptions(IndexOptions.DOCS);
     if (!omitNormsSet && boost == Defaults.BOOST) {
       fieldType.setOmitNorms(true);
     }
     if (!indexOptionsSet) {
       fieldType.setIndexOptions(IndexOptions.DOCS);
     }
   }
   defaultFieldType.freeze();
   StringFieldMapper fieldMapper =
       new StringFieldMapper(
           buildNames(context),
           boost,
           fieldType,
           defaultFieldType,
           docValues,
           nullValue,
           indexAnalyzer,
           searchAnalyzer,
           searchQuotedAnalyzer,
           positionOffsetGap,
           ignoreAbove,
           similarity,
           normsLoading,
           fieldDataSettings,
           context.indexSettings(),
           multiFieldsBuilder.build(this, context),
           copyTo);
   fieldMapper.includeInAll(includeInAll);
   return fieldMapper;
 }
  private void addGeohashField(ParseContext context, String geohash) throws IOException {
    int len = Math.min(fieldType().geohashPrecision(), geohash.length());
    int min = fieldType().isGeohashPrefixEnabled() ? 1 : geohash.length();

    for (int i = len; i >= min; i--) {
      // side effect of this call is adding the field
      geohashMapper.parse(context.createExternalValueContext(geohash.substring(0, i)));
    }
  }
  @Test
  public void testBytes() throws Exception {
    List<Integer> docs = Arrays.asList(1, 5, 7);

    ObjectOpenHashSet<BytesRef> hTerms = new ObjectOpenHashSet<BytesRef>();
    List<BytesRef> cTerms = new ArrayList<BytesRef>(docs.size());
    for (int i = 0; i < docs.size(); i++) {
      BytesRef term = new BytesRef("str" + docs.get(i));
      hTerms.add(term);
      cTerms.add(term);
    }

    FieldDataTermsFilter hFilter = FieldDataTermsFilter.newBytes(getFieldData(strMapper), hTerms);

    int size = reader.maxDoc();
    FixedBitSet result = new FixedBitSet(size);

    result.clear(0, size);
    assertThat(result.cardinality(), equalTo(0));
    result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
    assertThat(result.cardinality(), equalTo(docs.size()));
    for (int i = 0; i < reader.maxDoc(); i++) {
      assertThat(result.get(i), equalTo(docs.contains(i)));
    }

    // filter from mapper
    result.clear(0, size);
    assertThat(result.cardinality(), equalTo(0));
    result.or(
        strMapper
            .termsFilter(ifdService, cTerms, null)
            .getDocIdSet(reader.getContext(), reader.getLiveDocs())
            .iterator());
    assertThat(result.cardinality(), equalTo(docs.size()));
    for (int i = 0; i < reader.maxDoc(); i++) {
      assertThat(result.get(i), equalTo(docs.contains(i)));
    }

    result.clear(0, size);
    assertThat(result.cardinality(), equalTo(0));

    // filter on a numeric field using BytesRef terms
    // should not match any docs
    hFilter = FieldDataTermsFilter.newBytes(getFieldData(lngMapper), hTerms);
    result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
    assertThat(result.cardinality(), equalTo(0));

    // filter on a numeric field using BytesRef terms
    // should not match any docs
    hFilter = FieldDataTermsFilter.newBytes(getFieldData(dblMapper), hTerms);
    result.or(hFilter.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator());
    assertThat(result.cardinality(), equalTo(0));
  }
 @Override
 public StringFieldMapper build(BuilderContext context) {
   if (positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) {
     fieldType.setIndexAnalyzer(
         new NamedAnalyzer(fieldType.indexAnalyzer(), positionIncrementGap));
     fieldType.setSearchAnalyzer(
         new NamedAnalyzer(fieldType.searchAnalyzer(), positionIncrementGap));
     fieldType.setSearchQuoteAnalyzer(
         new NamedAnalyzer(fieldType.searchQuoteAnalyzer(), positionIncrementGap));
   }
   // if the field is not analyzed, then by default, we should omit norms and have docs only
   // index options, as probably what the user really wants
   // if they are set explicitly, we will use those values
   // we also change the values on the default field type so that toXContent emits what
   // differs from the defaults
   if (fieldType.indexOptions() != IndexOptions.NONE && !fieldType.tokenized()) {
     defaultFieldType.setOmitNorms(true);
     defaultFieldType.setIndexOptions(IndexOptions.DOCS);
     if (!omitNormsSet && fieldType.boost() == 1.0f) {
       fieldType.setOmitNorms(true);
     }
     if (!indexOptionsSet) {
       fieldType.setIndexOptions(IndexOptions.DOCS);
     }
   }
   setupFieldType(context);
   StringFieldMapper fieldMapper =
       new StringFieldMapper(
           name,
           fieldType,
           defaultFieldType,
           positionIncrementGap,
           ignoreAbove,
           context.indexSettings(),
           multiFieldsBuilder.build(this, context),
           copyTo);
   return fieldMapper.includeInAll(includeInAll);
 }
  @Override
  protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    ValueAndBoost valueAndBoost =
        StringFieldMapper.parseCreateFieldForString(
            context, null /* Out null value is an int so we convert*/, fieldType().boost());
    if (valueAndBoost.value() == null && fieldType().nullValue() == null) {
      return;
    }

    if (fieldType().indexOptions() != NONE || fieldType().stored() || fieldType().hasDocValues()) {
      int count;
      if (valueAndBoost.value() == null) {
        count = fieldType().nullValue();
      } else {
        count = countPositions(analyzer, simpleName(), valueAndBoost.value());
      }
      addIntegerFields(context, fields, count, valueAndBoost.boost());
    }
  }
  @Before
  public void setup() throws Exception {
    super.setUp();

    // setup field mappers
    strMapper =
        new StringFieldMapper.Builder("str_value")
            .build(new Mapper.BuilderContext(null, new ContentPath(1)));

    lngMapper =
        new LongFieldMapper.Builder("lng_value")
            .build(new Mapper.BuilderContext(null, new ContentPath(1)));

    dblMapper =
        new DoubleFieldMapper.Builder("dbl_value")
            .build(new Mapper.BuilderContext(null, new ContentPath(1)));

    // create index and fielddata service
    ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
    MapperService mapperService =
        MapperTestUtils.newMapperService(
            ifdService.index(), ImmutableSettings.Builder.EMPTY_SETTINGS);
    ifdService.setIndexService(new StubIndexService(mapperService));
    writer =
        new IndexWriter(
            new RAMDirectory(),
            new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)));

    int numDocs = 10;
    for (int i = 0; i < numDocs; i++) {
      Document d = new Document();
      d.add(new StringField(strMapper.names().indexName(), "str" + i, Field.Store.NO));
      d.add(new LongField(lngMapper.names().indexName(), i, Field.Store.NO));
      d.add(new DoubleField(dblMapper.names().indexName(), Double.valueOf(i), Field.Store.NO));
      writer.addDocument(d);
    }

    reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(writer, true));
  }
    @Override
    public GeoPointFieldMapper build(BuilderContext context) {
      ContentPath.Type origPathType = context.path().pathType();
      context.path().pathType(pathType);

      DoubleFieldMapper latMapper = null;
      DoubleFieldMapper lonMapper = null;
      GeoPointFieldType geoPointFieldType = (GeoPointFieldType) fieldType;

      context.path().add(name);
      if (enableLatLon) {
        NumberFieldMapper.Builder<?, ?> latMapperBuilder =
            doubleField(Names.LAT).includeInAll(false);
        NumberFieldMapper.Builder<?, ?> lonMapperBuilder =
            doubleField(Names.LON).includeInAll(false);
        if (precisionStep != null) {
          latMapperBuilder.precisionStep(precisionStep);
          lonMapperBuilder.precisionStep(precisionStep);
        }
        latMapper =
            (DoubleFieldMapper)
                latMapperBuilder
                    .includeInAll(false)
                    .store(fieldType.stored())
                    .docValues(false)
                    .build(context);
        lonMapper =
            (DoubleFieldMapper)
                lonMapperBuilder
                    .includeInAll(false)
                    .store(fieldType.stored())
                    .docValues(false)
                    .build(context);
        geoPointFieldType.setLatLonEnabled(latMapper.fieldType(), lonMapper.fieldType());
      }
      StringFieldMapper geohashMapper = null;
      if (enableGeoHash || enableGeohashPrefix) {
        // TODO: possible also implicitly enable geohash if geohash precision is set
        geohashMapper =
            stringField(Names.GEOHASH)
                .index(true)
                .tokenized(false)
                .includeInAll(false)
                .omitNorms(true)
                .indexOptions(IndexOptions.DOCS)
                .build(context);
        geoPointFieldType.setGeohashEnabled(
            geohashMapper.fieldType(), geoHashPrecision, enableGeohashPrefix);
      }
      context.path().remove();

      context.path().pathType(origPathType);

      // this is important: even if geo points feel like they need to be tokenized to distinguish
      // lat from lon, we actually want to
      // store them as a single token.
      fieldType.setTokenized(false);
      setupFieldType(context);
      fieldType.setHasDocValues(false);
      defaultFieldType.setHasDocValues(false);
      return new GeoPointFieldMapper(
          name,
          fieldType,
          defaultFieldType,
          context.indexSettings(),
          origPathType,
          latMapper,
          lonMapper,
          geohashMapper,
          multiFieldsBuilder.build(this, context));
    }