private static TInfo parseTerm(FunctionQParser fp) throws SyntaxError {
    TInfo tinfo = new TInfo();

    tinfo.indexedField = tinfo.field = fp.parseArg();
    tinfo.val = fp.parseArg();
    tinfo.indexedBytes = new BytesRef();

    FieldType ft = fp.getReq().getSchema().getFieldTypeNoEx(tinfo.field);
    if (ft == null) ft = new StrField();

    if (ft instanceof TextField) {
      // need to do analysis on the term
      String indexedVal = tinfo.val;
      Query q =
          ft.getFieldQuery(fp, fp.getReq().getSchema().getFieldOrNull(tinfo.field), tinfo.val);
      if (q instanceof TermQuery) {
        Term term = ((TermQuery) q).getTerm();
        tinfo.indexedField = term.field();
        indexedVal = term.text();
      }
      UnicodeUtil.UTF16toUTF8(indexedVal, 0, indexedVal.length(), tinfo.indexedBytes);
    } else {
      ft.readableToIndexed(tinfo.val, tinfo.indexedBytes);
    }

    return tinfo;
  }
Пример #2
0
  /** @return a string representing a SchemaField's flags. */
  private static String getFieldFlags(SchemaField f) {
    FieldType t = (f == null) ? null : f.getType();

    // see: http://www.nabble.com/schema-field-properties-tf3437753.html#a9585549
    boolean lazy = false; // "lazy" is purely a property of reading fields
    boolean binary = false; // Currently not possible

    StringBuilder flags = new StringBuilder();
    flags.append((f != null && f.indexed()) ? FieldFlag.INDEXED.getAbbreviation() : '-');
    flags.append((t != null && t.isTokenized()) ? FieldFlag.TOKENIZED.getAbbreviation() : '-');
    flags.append((f != null && f.stored()) ? FieldFlag.STORED.getAbbreviation() : '-');
    flags.append((f != null && f.multiValued()) ? FieldFlag.MULTI_VALUED.getAbbreviation() : '-');
    flags.append(
        (f != null && f.storeTermVector()) ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-');
    flags.append(
        (f != null && f.storeTermOffsets()) ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-');
    flags.append(
        (f != null && f.storeTermPositions())
            ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation()
            : '-');
    flags.append((f != null && f.omitNorms()) ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-');
    flags.append(
        (f != null && f.omitTermFreqAndPositions()) ? FieldFlag.OMIT_TF.getAbbreviation() : '-');
    flags.append(
        (f != null && f.omitPositions()) ? FieldFlag.OMIT_POSITIONS.getAbbreviation() : '-');
    flags.append((lazy) ? FieldFlag.LAZY.getAbbreviation() : '-');
    flags.append((binary) ? FieldFlag.BINARY.getAbbreviation() : '-');
    flags.append(
        (f != null && f.sortMissingFirst()) ? FieldFlag.SORT_MISSING_FIRST.getAbbreviation() : '-');
    flags.append(
        (f != null && f.sortMissingLast()) ? FieldFlag.SORT_MISSING_LAST.getAbbreviation() : '-');
    return flags.toString();
  }
    @Override
    protected Query newPrefixQuery(String text) {
      BooleanQuery.Builder bq = new BooleanQuery.Builder();
      bq.setDisableCoord(true);

      for (Map.Entry<String, Float> entry : weights.entrySet()) {
        String field = entry.getKey();
        FieldType type = schema.getFieldType(field);
        Query prefix;

        if (type instanceof TextField) {
          // If the field type is a TextField then use the multi term analyzer.
          Analyzer analyzer = ((TextField) type).getMultiTermAnalyzer();
          String term = TextField.analyzeMultiTerm(field, text, analyzer).utf8ToString();
          SchemaField sf = schema.getField(field);
          prefix = sf.getType().getPrefixQuery(qParser, sf, term);
        } else {
          // If the type is *not* a TextField don't do any analysis.
          SchemaField sf = schema.getField(field);
          prefix = type.getPrefixQuery(qParser, sf, text);
        }

        float boost = entry.getValue();
        if (boost != 1f) {
          prefix = new BoostQuery(prefix, boost);
        }
        bq.add(prefix, BooleanClause.Occur.SHOULD);
      }

      return simplify(bq.build());
    }
Пример #4
0
 /**
  * Computes the term-&gt;count counts for the specified term values relative to the
  *
  * @param field the name of the field to compute term counts against
  * @param parsed contains the docset to compute term counts relative to
  * @param terms a list of term values (in the specified field) to compute the counts for
  */
 protected NamedList<Integer> getListedTermCounts(
     String field, final ParsedParams parsed, List<String> terms) throws IOException {
   FieldType ft = searcher.getSchema().getFieldType(field);
   NamedList<Integer> res = new NamedList<>();
   for (String term : terms) {
     String internal = ft.toInternal(term);
     int count = searcher.numDocs(new TermQuery(new Term(field, internal)), parsed.docs);
     res.add(term, count);
   }
   return res;
 }
Пример #5
0
  private static SimpleOrderedMap<Object> getDocumentFieldsInfo(
      Document doc, int docId, IndexReader reader, IndexSchema schema) throws IOException {
    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
    for (Object o : doc.getFields()) {
      Fieldable fieldable = (Fieldable) o;
      SimpleOrderedMap<Object> f = new SimpleOrderedMap<Object>();

      SchemaField sfield = schema.getFieldOrNull(fieldable.name());
      FieldType ftype = (sfield == null) ? null : sfield.getType();

      f.add("type", (ftype == null) ? null : ftype.getTypeName());
      f.add("schema", getFieldFlags(sfield));
      f.add("flags", getFieldFlags(fieldable));

      Term t =
          new Term(
              fieldable.name(),
              ftype != null ? ftype.storedToIndexed(fieldable) : fieldable.stringValue());

      f.add("value", (ftype == null) ? null : ftype.toExternal(fieldable));

      // TODO: this really should be "stored"
      f.add("internal", fieldable.stringValue()); // may be a binary number

      byte[] arr = fieldable.getBinaryValue();
      if (arr != null) {
        f.add("binary", Base64.byteArrayToBase64(arr, 0, arr.length));
      }
      f.add("boost", fieldable.getBoost());
      f.add(
          "docFreq",
          t.text() == null ? 0 : reader.docFreq(t)); // this can be 0 for non-indexed fields

      // If we have a term vector, return that
      if (fieldable.isTermVectorStored()) {
        try {
          TermFreqVector v = reader.getTermFreqVector(docId, fieldable.name());
          if (v != null) {
            SimpleOrderedMap<Integer> tfv = new SimpleOrderedMap<Integer>();
            for (int i = 0; i < v.size(); i++) {
              tfv.add(v.getTerms()[i], v.getTermFrequencies()[i]);
            }
            f.add("termVector", tfv);
          }
        } catch (Exception ex) {
          log.warn("error writing term vector", ex);
        }
      }

      finfo.add(fieldable.name(), f);
    }
    return finfo;
  }
Пример #6
0
 /** Check if all fields are of type {@link SirenField}. */
 private void checkFieldTypes() {
   for (final String fieldName : boosts.keySet()) {
     final FieldType fieldType = req.getSchema().getFieldType(fieldName);
     if (!(fieldType instanceof SirenField)) {
       throw new SolrException(
           SolrException.ErrorCode.SERVER_ERROR,
           "FieldType: "
               + fieldName
               + " ("
               + fieldType.getTypeName()
               + ") do not support NTriple Query");
     }
   }
 }
Пример #7
0
  @Override
  public String init(NamedList config, SolrCore core) {
    LOG.info("init: " + config);
    String name = super.init(config, core);
    threshold =
        config.get(THRESHOLD_TOKEN_FREQUENCY) == null
            ? 0.0f
            : (Float) config.get(THRESHOLD_TOKEN_FREQUENCY);
    sourceLocation = (String) config.get(LOCATION);
    lookupImpl = (String) config.get(LOOKUP_IMPL);

    IndexSchema schema = core.getLatestSchema();
    suggestionAnalyzerFieldTypeName = (String) config.get(SUGGESTION_ANALYZER_FIELDTYPE);
    if (schema.getFieldTypes().containsKey(suggestionAnalyzerFieldTypeName)) {
      FieldType fieldType = schema.getFieldTypes().get(suggestionAnalyzerFieldTypeName);
      suggestionAnalyzer = fieldType.getQueryAnalyzer();
    }

    // support the old classnames without -Factory for config file backwards compatibility.
    if (lookupImpl == null
        || "org.apache.solr.spelling.suggest.jaspell.JaspellLookup".equals(lookupImpl)) {
      lookupImpl = JaspellLookupFactory.class.getName();
    } else if ("org.apache.solr.spelling.suggest.tst.TSTLookup".equals(lookupImpl)) {
      lookupImpl = TSTLookupFactory.class.getName();
    } else if ("org.apache.solr.spelling.suggest.fst.FSTLookup".equals(lookupImpl)) {
      lookupImpl = FSTLookupFactory.class.getName();
    }

    factory = core.getResourceLoader().newInstance(lookupImpl, LookupFactory.class);

    lookup = factory.create(config, core);
    String store = (String) config.get(STORE_DIR);
    if (store != null) {
      storeDir = new File(store);
      if (!storeDir.isAbsolute()) {
        storeDir = new File(core.getDataDir() + File.separator + storeDir);
      }
      if (!storeDir.exists()) {
        storeDir.mkdirs();
      } else {
        // attempt reload of the stored lookup
        try {
          lookup.load(new FileInputStream(new File(storeDir, factory.storeFileName())));
        } catch (IOException e) {
          LOG.warn("Loading stored lookup data failed", e);
        }
      }
    }
    return name;
  }
  protected NamedList serializeTopDocs(QueryCommandResult result) throws IOException {
    NamedList<Object> queryResult = new NamedList<>();
    queryResult.add("matches", result.getMatches());
    queryResult.add("totalHits", result.getTopDocs().totalHits);
    // debug: assert !Float.isNaN(result.getTopDocs().getMaxScore()) ==
    // rb.getGroupingSpec().isNeedScore();
    if (!Float.isNaN(result.getTopDocs().getMaxScore())) {
      queryResult.add("maxScore", result.getTopDocs().getMaxScore());
    }
    List<NamedList> documents = new ArrayList<>();
    queryResult.add("documents", documents);

    final IndexSchema schema = rb.req.getSearcher().getSchema();
    SchemaField uniqueField = schema.getUniqueKeyField();
    for (ScoreDoc scoreDoc : result.getTopDocs().scoreDocs) {
      NamedList<Object> document = new NamedList<>();
      documents.add(document);

      Document doc = retrieveDocument(uniqueField, scoreDoc.doc);
      document.add("id", uniqueField.getType().toExternal(doc.getField(uniqueField.getName())));
      if (!Float.isNaN(scoreDoc.score)) {
        document.add("score", scoreDoc.score);
      }
      if (!FieldDoc.class.isInstance(scoreDoc)) {
        continue; // thus don't add sortValues below
      }

      FieldDoc fieldDoc = (FieldDoc) scoreDoc;
      Object[] convertedSortValues = new Object[fieldDoc.fields.length];
      for (int j = 0; j < fieldDoc.fields.length; j++) {
        Object sortValue = fieldDoc.fields[j];
        Sort groupSort = rb.getGroupingSpec().getGroupSort();
        SchemaField field =
            groupSort.getSort()[j].getField() != null
                ? schema.getFieldOrNull(groupSort.getSort()[j].getField())
                : null;
        if (field != null) {
          FieldType fieldType = field.getType();
          if (sortValue != null) {
            sortValue = fieldType.marshalSortValue(sortValue);
          }
        }
        convertedSortValues[j] = sortValue;
      }
      document.add("sortValues", convertedSortValues);
    }

    return queryResult;
  }
 /** {@inheritDoc} */
 public void setNextReader(LeafReaderContext ctx) throws IOException {
   if (valueSource == null) {
     // first time we've collected local values, get the right ValueSource
     valueSource = (null == ft) ? statsField.getValueSource() : ft.getValueSource(sf, null);
     vsContext = ValueSource.newContext(statsField.getSearcher());
   }
   values = valueSource.getValues(vsContext, ctx);
 }
Пример #10
0
  /** Return info from the index */
  private static SimpleOrderedMap<Object> getSchemaInfo(IndexSchema schema) {
    Map<String, List<String>> typeusemap = new HashMap<String, List<String>>();
    SimpleOrderedMap<Object> fields = new SimpleOrderedMap<Object>();
    SchemaField uniqueField = schema.getUniqueKeyField();
    for (SchemaField f : schema.getFields().values()) {
      populateFieldInfo(schema, typeusemap, fields, uniqueField, f);
    }

    SimpleOrderedMap<Object> dynamicFields = new SimpleOrderedMap<Object>();
    for (SchemaField f : schema.getDynamicFieldPrototypes()) {
      populateFieldInfo(schema, typeusemap, dynamicFields, uniqueField, f);
    }
    SimpleOrderedMap<Object> types = new SimpleOrderedMap<Object>();
    for (FieldType ft : schema.getFieldTypes().values()) {
      SimpleOrderedMap<Object> field = new SimpleOrderedMap<Object>();
      field.add("fields", typeusemap.get(ft.getTypeName()));
      field.add("tokenized", ft.isTokenized());
      field.add("className", ft.getClass().getName());
      field.add("indexAnalyzer", getAnalyzerInfo(ft.getAnalyzer()));
      field.add("queryAnalyzer", getAnalyzerInfo(ft.getQueryAnalyzer()));
      types.add(ft.getTypeName(), field);
    }

    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
    finfo.add("fields", fields);
    finfo.add("dynamicFields", dynamicFields);
    finfo.add("uniqueKeyField", null == uniqueField ? null : uniqueField.getName());
    finfo.add("defaultSearchField", schema.getDefaultSearchFieldName());
    finfo.add("types", types);
    return finfo;
  }
 /** {@inheritDoc} */
 @Override
 public void accumulate(BytesRef value, int count) {
   if (null == ft) {
     throw new IllegalStateException(
         "Can't collect & convert BytesRefs on stats that do't use a a FieldType: " + statsField);
   }
   T typedValue = (T) ft.toObject(sf, value);
   accumulate(typedValue, count);
 }
  public SolrDocument doc2SolrDoc(Document doc) {
    SolrDocument solrDoc = new SolrDocument();
    for (IndexableField field : doc) {
      String fieldName = field.name();
      SchemaField sf =
          getSchemaField(
              fieldName); // hack-patch of this.core.getLatestSchema().getFieldOrNull(fieldName);
                          // makes it a lot faster!!
      Object val = null;
      try {
        FieldType ft = null;
        if (sf != null) ft = sf.getType();
        if (ft == null) {
          BytesRef bytesRef = field.binaryValue();
          if (bytesRef != null) {
            if (bytesRef.offset == 0 && bytesRef.length == bytesRef.bytes.length) {
              val = bytesRef.bytes;
            } else {
              final byte[] bytes = new byte[bytesRef.length];
              System.arraycopy(bytesRef.bytes, bytesRef.offset, bytes, 0, bytesRef.length);
              val = bytes;
            }
          } else {
            val = field.stringValue();
          }
        } else {
          val = ft.toObject(field);
        }
      } catch (Throwable e) {
        continue;
      }

      if (sf != null && sf.multiValued() && !solrDoc.containsKey(fieldName)) {
        ArrayList<Object> l = new ArrayList<Object>();
        l.add(val);
        solrDoc.addField(fieldName, l);
      } else {
        solrDoc.addField(fieldName, val);
      }
    }
    return solrDoc;
  }
Пример #13
0
  /**
   * Create a new SchemaField with the given name and type, and with the specified properties.
   * Properties are *not* inherited from the type in this case, so users of this constructor should
   * derive the properties from type.getSolrProperties() using all the default properties from the
   * type.
   */
  public SchemaField(String name, FieldType type, int properties, String defaultValue) {
    this.name = name;
    this.type = type;
    this.properties = properties;
    this.defaultValue = defaultValue;

    // initialize with the required property flag
    required = (properties & REQUIRED) != 0;

    type.checkSchemaField(this);
  }
Пример #14
0
 @Override
 public String toString() {
   return name
       + "{type="
       + type.getTypeName()
       + ((defaultValue == null) ? "" : (",default=" + defaultValue))
       + ",properties="
       + propertiesToString(properties)
       + (required ? ", required=true" : "")
       + "}";
 }
Пример #15
0
 public String init(NamedList config, SolrCore core) {
   name = (String) config.get(DICTIONARY_NAME);
   if (name == null) {
     name = DEFAULT_DICTIONARY_NAME;
   }
   field = (String) config.get(FIELD);
   IndexSchema schema = core.getLatestSchema();
   if (field != null && schema.getFieldTypeNoEx(field) != null) {
     analyzer = schema.getFieldType(field).getQueryAnalyzer();
   }
   fieldTypeName = (String) config.get(FIELD_TYPE);
   if (schema.getFieldTypes().containsKey(fieldTypeName)) {
     FieldType fieldType = schema.getFieldTypes().get(fieldTypeName);
     analyzer = fieldType.getQueryAnalyzer();
   }
   if (analyzer == null) {
     analyzer = new WhitespaceAnalyzer();
   }
   return name;
 }
Пример #16
0
  @SuppressWarnings("unchecked")
  private void loadExternalFileDictionary(IndexSchema schema, SolrResourceLoader loader) {
    try {

      // Get the field's analyzer
      if (fieldTypeName != null && schema.getFieldTypeNoEx(fieldTypeName) != null) {
        FieldType fieldType = schema.getFieldTypes().get(fieldTypeName);
        // Do index-time analysis using the given fieldType's analyzer
        RAMDirectory ramDir = new RAMDirectory();
        IndexWriter writer =
            new IndexWriter(
                ramDir, fieldType.getAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
        writer.setMergeFactor(300);
        writer.setMaxBufferedDocs(150);

        List<String> lines = loader.getLines(sourceLocation, characterEncoding);

        for (String s : lines) {
          Document d = new Document();
          d.add(new Field(WORD_FIELD_NAME, s, Field.Store.NO, Field.Index.TOKENIZED));
          writer.addDocument(d);
        }
        writer.optimize();
        writer.close();

        dictionary = new HighFrequencyDictionary(IndexReader.open(ramDir), WORD_FIELD_NAME, 0.0f);
      } else {
        // check if character encoding is defined
        if (characterEncoding == null) {
          dictionary = new PlainTextDictionary(loader.openResource(sourceLocation));
        } else {
          dictionary =
              new PlainTextDictionary(
                  new InputStreamReader(loader.openResource(sourceLocation), characterEncoding));
        }
      }

    } catch (IOException e) {
      log.error("Unable to load spellings", e);
    }
  }
  @Override
  public ValueSource parse(FunctionQParser fp) throws SyntaxError {
    String fieldName = fp.parseId();
    SchemaField field = fp.getReq().getSchema().getField(fieldName);
    FieldType type = field.getType();
    if (!(type instanceof MultiPointDocValuesField))
      throw new SyntaxError(
          "This function only supports fields of type "
              + MultiPointDocValuesField.class.getName()
              + ", not "
              + type.getClass().getName());
    MultiPointDocValuesField mpdvFieldType = (MultiPointDocValuesField) type;

    double[] parsedLatLong = null;
    try {
      parsedLatLong = ParseUtils.parseLatitudeLongitude(fp.parseArg());
    } catch (InvalidShapeException e) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
    }

    double y = parsedLatLong[0];
    double x = parsedLatLong[1];

    SpatialContext ctx = mpdvFieldType.getCtx();
    Point point = ctx.makePoint(x, y);

    String score = fp.getLocalParams().get("score", "distance");
    ValueSource valueSource = new MultiPointDistanceValueSource(fieldName, point, ctx);

    if ("distance".equals(score)) {
      return valueSource;
    } else if ("recipDistance".equals(score)) {
      int shift = fp.getLocalParams().getInt("shift", 100);
      int maxScore = fp.getLocalParams().getInt("maxScore", 10);
      return new ReciprocalFloatFunction(valueSource, maxScore, shift, shift);
    } else {
      throw new SolrException(
          SolrException.ErrorCode.BAD_REQUEST,
          "'score' local-param must be one of 'distance', or 'recipDistance'");
    }
  }
Пример #18
0
  private static void populateFieldInfo(
      IndexSchema schema,
      Map<String, List<String>> typeusemap,
      SimpleOrderedMap<Object> fields,
      SchemaField uniqueField,
      SchemaField f) {
    FieldType ft = f.getType();
    SimpleOrderedMap<Object> field = new SimpleOrderedMap<Object>();
    field.add("type", ft.getTypeName());
    field.add("flags", getFieldFlags(f));
    if (f.isRequired()) {
      field.add("required", f.isRequired());
    }
    if (f.getDefaultValue() != null) {
      field.add("default", f.getDefaultValue());
    }
    if (f == uniqueField) {
      field.add("uniqueKey", true);
    }
    if (ft.getAnalyzer().getPositionIncrementGap(f.getName()) != 0) {
      field.add("positionIncrementGap", ft.getAnalyzer().getPositionIncrementGap(f.getName()));
    }
    field.add("copyDests", schema.getCopyFields(f.getName()));
    field.add("copySources", schema.getCopySources(f.getName()));

    fields.add(f.getName(), field);

    List<String> v = typeusemap.get(ft.getTypeName());
    if (v == null) {
      v = new ArrayList<String>();
    }
    v.add(f.getName());
    typeusemap.put(ft.getTypeName(), v);
  }
Пример #19
0
    private NamedList unmarshalSortValues(
        SortSpec sortSpec, NamedList sortFieldValues, IndexSchema schema) {
      NamedList unmarshalledSortValsPerField = new NamedList();

      if (0 == sortFieldValues.size()) return unmarshalledSortValsPerField;

      List<SchemaField> schemaFields = sortSpec.getSchemaFields();
      SortField[] sortFields = sortSpec.getSort().getSort();

      int marshalledFieldNum = 0;
      for (int sortFieldNum = 0; sortFieldNum < sortFields.length; sortFieldNum++) {
        final SortField sortField = sortFields[sortFieldNum];
        final SortField.Type type = sortField.getType();

        // :TODO: would be simpler to always serialize every position of SortField[]
        if (type == SortField.Type.SCORE || type == SortField.Type.DOC) continue;

        final String sortFieldName = sortField.getField();
        final String valueFieldName = sortFieldValues.getName(marshalledFieldNum);
        assert sortFieldName.equals(valueFieldName)
            : "sortFieldValues name key does not match expected SortField.getField";

        List sortVals = (List) sortFieldValues.getVal(marshalledFieldNum);

        final SchemaField schemaField = schemaFields.get(sortFieldNum);
        if (null == schemaField) {
          unmarshalledSortValsPerField.add(sortField.getField(), sortVals);
        } else {
          FieldType fieldType = schemaField.getType();
          List unmarshalledSortVals = new ArrayList();
          for (Object sortVal : sortVals) {
            unmarshalledSortVals.add(fieldType.unmarshalSortValue(sortVal));
          }
          unmarshalledSortValsPerField.add(sortField.getField(), unmarshalledSortVals);
        }
        marshalledFieldNum++;
      }
      return unmarshalledSortValsPerField;
    }
 protected ScoreDoc[] transformToNativeShardDoc(
     List<NamedList<Object>> documents, Sort groupSort, String shard, IndexSchema schema) {
   ScoreDoc[] scoreDocs = new ScoreDoc[documents.size()];
   int j = 0;
   for (NamedList<Object> document : documents) {
     Object docId = document.get("id");
     if (docId != null) {
       docId = docId.toString();
     } else {
       log.error("doc {} has null 'id'", document);
     }
     Float score = (Float) document.get("score");
     if (score == null) {
       score = Float.NaN;
     }
     Object[] sortValues = null;
     Object sortValuesVal = document.get("sortValues");
     if (sortValuesVal != null) {
       sortValues = ((List) sortValuesVal).toArray();
       for (int k = 0; k < sortValues.length; k++) {
         SchemaField field =
             groupSort.getSort()[k].getField() != null
                 ? schema.getFieldOrNull(groupSort.getSort()[k].getField())
                 : null;
         if (field != null) {
           FieldType fieldType = field.getType();
           if (sortValues[k] != null) {
             sortValues[k] = fieldType.unmarshalSortValue(sortValues[k]);
           }
         }
       }
     } else {
       log.debug("doc {} has null 'sortValues'", document);
     }
     scoreDocs[j++] = new ShardDoc(score, sortValues, docId, shard);
   }
   return scoreDocs;
 }
Пример #21
0
 static void writeFieldVal(String val, FieldType ft, Appendable out, int flags)
     throws IOException {
   if (ft != null) {
     try {
       out.append(ft.indexedToReadable(val));
     } catch (Exception e) {
       out.append("EXCEPTION(val=");
       out.append(val);
       out.append(")");
     }
   } else {
     out.append(val);
   }
 }
Пример #22
0
 static void writeFieldVal(BytesRef val, FieldType ft, Appendable out, int flags)
     throws IOException {
   if (ft != null) {
     try {
       CharsRef readable = new CharsRef();
       ft.indexedToReadable(val, readable);
       out.append(readable);
     } catch (Exception e) {
       out.append("EXCEPTION(val=");
       out.append(val.utf8ToString());
       out.append(")");
     }
   } else {
     out.append(val.utf8ToString());
   }
 }
Пример #23
0
 @Override
 protected void init(IndexSchema schema, Map<String, String> args) {
   properties |= TOKENIZED;
   if (schema.getVersion() > 1.1F
       &&
       // only override if it's not explicitly true
       0 == (trueProperties & OMIT_TF_POSITIONS)) {
     properties &= ~OMIT_TF_POSITIONS;
   }
   if (schema.getVersion() > 1.3F) {
     autoGeneratePhraseQueries = false;
   } else {
     autoGeneratePhraseQueries = true;
   }
   String autoGeneratePhraseQueriesStr = args.remove("autoGeneratePhraseQueries");
   if (autoGeneratePhraseQueriesStr != null)
     autoGeneratePhraseQueries = Boolean.parseBoolean(autoGeneratePhraseQueriesStr);
   super.init(schema, args);
 }
  @Before
  public void setUp() throws Exception {

    Answer<GroupCollapseSummary> answer =
        new Answer<GroupCollapseSummary>() {

          @Override
          public GroupCollapseSummary answer(InvocationOnMock invocation) throws Throwable {
            Object[] args = invocation.getArguments();
            DummyGroupCollapseSummary dummyObject =
                new DummyGroupCollapseSummary(
                    (String) args[0],
                    (SolrIndexSearcher) args[1],
                    (Set<String>) args[2],
                    (String) args[3]);
            return dummyObject;
          }
        };

    PowerMockito.whenNew(GroupCollapseSummary.class).withAnyArguments().thenAnswer(answer);

    initMocks(this);

    schema = PowerMockito.mock(IndexSchema.class);

    rb.req = req;
    rb.rsp = rsp;
    when(rb.getGroupingSpec()).thenReturn(groupSpec);
    when(req.getParams()).thenReturn(params);
    when(req.getSchema()).thenReturn(schema);
    when(req.getSearcher()).thenReturn(searcher);
    mockResponse();

    when(schema.getFieldType(FIELD_PRICE)).thenReturn(priceType);
    when(schema.getFieldType(FIELD_DISCOUNT)).thenReturn(discountType);
    when(schema.getFieldType(FIELD_CLOSEOUT)).thenReturn(booleanType);
    when(schema.getFieldType(FIELD_COLOR)).thenReturn(stringType);
    when(schema.getFieldType(FIELD_COLORFAMILY)).thenReturn(stringType);

    numericType = PowerMockito.mock(org.apache.lucene.document.FieldType.NumericType.class);
    when(priceType.getNumericType()).thenReturn(numericType);
    when(priceType.getTypeName()).thenReturn("tfloat");
    when(discountType.getNumericType()).thenReturn(numericType);
    when(discountType.getTypeName()).thenReturn("tint");
    when(booleanType.getTypeName()).thenReturn("boolean");
    when(stringType.getTypeName()).thenReturn("string");

    when(groupSpec.getFields()).thenReturn(new String[] {"productId"});
  }
Пример #25
0
 public SortField getSortField(boolean top) {
   return type.getSortField(this, top);
 }
Пример #26
0
 public void write(TextResponseWriter writer, String name, Fieldable val) throws IOException {
   // name is passed in because it may be null if name should not be used.
   type.write(writer, name, val);
 }
Пример #27
0
 /**
  * If true, then use {@link #createFields(String, float)}, else use {@link #createField} to save
  * an extra allocation
  *
  * @return true if this field is a poly field
  */
 public boolean isPolyField() {
   return type.isPolyField();
 }
Пример #28
0
 public Fieldable[] createFields(String val, float boost) {
   return type.createFields(this, val, boost);
 }
Пример #29
0
 public Field createField(String val, float boost) {
   return type.createField(this, val, boost);
 }
  @Override
  public Lookup create(NamedList params, SolrCore core) {

    // mandatory parameter
    Object fieldTypeName = params.get(AnalyzingLookupFactory.QUERY_ANALYZER);
    if (fieldTypeName == null) {
      throw new IllegalArgumentException(
          "Error in configuration: "
              + AnalyzingLookupFactory.QUERY_ANALYZER
              + " parameter is mandatory");
    }
    // retrieve index and query analyzers for the field
    FieldType ft = core.getLatestSchema().getFieldTypeByName(fieldTypeName.toString());
    if (ft == null) {
      throw new IllegalArgumentException(
          "Error in configuration: " + fieldTypeName.toString() + " is not defined in the schema");
    }
    Analyzer indexAnalyzer = ft.getAnalyzer();
    Analyzer queryAnalyzer = ft.getQueryAnalyzer();

    // optional parameters
    boolean exactMatchFirst =
        (params.get(AnalyzingLookupFactory.EXACT_MATCH_FIRST) != null)
            ? Boolean.valueOf(params.get(AnalyzingLookupFactory.EXACT_MATCH_FIRST).toString())
            : true;

    boolean preserveSep =
        (params.get(AnalyzingLookupFactory.PRESERVE_SEP) != null)
            ? Boolean.valueOf(params.get(AnalyzingLookupFactory.PRESERVE_SEP).toString())
            : true;

    int options = 0;
    if (exactMatchFirst) {
      options |= FuzzySuggester.EXACT_FIRST;
    }
    if (preserveSep) {
      options |= FuzzySuggester.PRESERVE_SEP;
    }

    int maxSurfaceFormsPerAnalyzedForm =
        (params.get(AnalyzingLookupFactory.MAX_SURFACE_FORMS) != null)
            ? Integer.parseInt(params.get(AnalyzingLookupFactory.MAX_SURFACE_FORMS).toString())
            : 256;

    int maxGraphExpansions =
        (params.get(AnalyzingLookupFactory.MAX_EXPANSIONS) != null)
            ? Integer.parseInt(params.get(AnalyzingLookupFactory.MAX_EXPANSIONS).toString())
            : -1;

    boolean preservePositionIncrements =
        params.get(AnalyzingLookupFactory.PRESERVE_POSITION_INCREMENTS) != null
            ? Boolean.valueOf(
                params.get(AnalyzingLookupFactory.PRESERVE_POSITION_INCREMENTS).toString())
            : false;

    int maxEdits =
        (params.get(MAX_EDITS) != null)
            ? Integer.parseInt(params.get(MAX_EDITS).toString())
            : FuzzySuggester.DEFAULT_MAX_EDITS;

    boolean transpositions =
        (params.get(TRANSPOSITIONS) != null)
            ? Boolean.parseBoolean(params.get(TRANSPOSITIONS).toString())
            : FuzzySuggester.DEFAULT_TRANSPOSITIONS;

    int nonFuzzyPrefix =
        (params.get(NON_FUZZY_PREFIX) != null)
            ? Integer.parseInt(params.get(NON_FUZZY_PREFIX).toString())
            : FuzzySuggester.DEFAULT_NON_FUZZY_PREFIX;

    int minFuzzyLength =
        (params.get(MIN_FUZZY_LENGTH) != null)
            ? Integer.parseInt(params.get(MIN_FUZZY_LENGTH).toString())
            : FuzzySuggester.DEFAULT_MIN_FUZZY_LENGTH;

    boolean unicodeAware =
        (params.get(UNICODE_AWARE) != null)
            ? Boolean.valueOf(params.get(UNICODE_AWARE).toString())
            : FuzzySuggester.DEFAULT_UNICODE_AWARE;

    return new FuzzySuggester(
        indexAnalyzer,
        queryAnalyzer,
        options,
        maxSurfaceFormsPerAnalyzedForm,
        maxGraphExpansions,
        preservePositionIncrements,
        maxEdits,
        transpositions,
        nonFuzzyPrefix,
        minFuzzyLength,
        unicodeAware);
  }