コード例 #1
0
  /** {@inheritDoc} */
  @Override
  public void transform(
      Map<String, ?> result, ResponseBuilder rb, SolrDocumentSource solrDocumentSource) {
    Object value = result.get(rb.getGroupingSpec().getFields()[0]);
    if (TopGroups.class.isInstance(value)) {
      @SuppressWarnings("unchecked")
      TopGroups<BytesRef> topGroups = (TopGroups<BytesRef>) value;
      SolrDocumentList docList = new SolrDocumentList();
      docList.setStart(rb.getGroupingSpec().getOffset());
      docList.setNumFound(rb.totalHitCount);

      Float maxScore = Float.NEGATIVE_INFINITY;
      for (GroupDocs<BytesRef> group : topGroups.groups) {
        for (ScoreDoc scoreDoc : group.scoreDocs) {
          if (maxScore < scoreDoc.score) {
            maxScore = scoreDoc.score;
          }
          docList.add(solrDocumentSource.retrieve(scoreDoc));
        }
      }
      if (maxScore != Float.NEGATIVE_INFINITY) {
        docList.setMaxScore(maxScore);
      }
      rb.rsp.addResponse(docList);
    }
  }
コード例 #2
0
  private void handleFilterExclusions() throws IOException {
    List<String> excludeTags = freq.domain.excludeTags;

    if (excludeTags == null || excludeTags.size() == 0) {
      return;
    }

    // TODO: somehow remove responsebuilder dependency
    ResponseBuilder rb = SolrRequestInfo.getRequestInfo().getResponseBuilder();
    Map tagMap = (Map) rb.req.getContext().get("tags");
    if (tagMap == null) {
      // no filters were tagged
      return;
    }

    IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<>();
    for (String excludeTag : excludeTags) {
      Object olst = tagMap.get(excludeTag);
      // tagMap has entries of List<String,List<QParser>>, but subject to change in the future
      if (!(olst instanceof Collection)) continue;
      for (Object o : (Collection<?>) olst) {
        if (!(o instanceof QParser)) continue;
        QParser qp = (QParser) o;
        try {
          excludeSet.put(qp.getQuery(), Boolean.TRUE);
        } catch (SyntaxError syntaxError) {
          // This should not happen since we should only be retrieving a previously parsed query
          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
        }
      }
    }
    if (excludeSet.size() == 0) return;

    List<Query> qlist = new ArrayList<>();

    // add the base query
    if (!excludeSet.containsKey(rb.getQuery())) {
      qlist.add(rb.getQuery());
    }

    // add the filters
    if (rb.getFilters() != null) {
      for (Query q : rb.getFilters()) {
        if (!excludeSet.containsKey(q)) {
          qlist.add(q);
        }
      }
    }

    // now walk back up the context tree
    // TODO: we lose parent exclusions...
    for (FacetContext curr = fcontext; curr != null; curr = curr.parent) {
      if (curr.filter != null) {
        qlist.add(curr.filter);
      }
    }

    // recompute the base domain
    fcontext.base = fcontext.searcher.getDocSet(qlist);
  }
コード例 #3
0
 @SuppressWarnings({"rawtypes", "unchecked"})
 private Iterator<Integer> docIterator(ResponseBuilder rb) {
   if (rb.grouping()) {
     List<Integer> docList = new ArrayList<>();
     NamedList values = rb.rsp.getValues();
     NamedList grouped = (NamedList) values.get("grouped");
     for (String field : rb.getGroupingSpec().getFields()) {
       NamedList fieldResults = (NamedList) grouped.get(field);
       if (rb.getGroupingSpec().getResponseFormat() == Grouping.Format.grouped) {
         List<NamedList> groups = (List<NamedList>) fieldResults.get("groups");
         for (NamedList group : groups) {
           for (DocIterator it = ((DocList) group.get("doclist")).iterator(); it.hasNext(); ) {
             docList.add(it.nextDoc());
           }
         }
       } else {
         for (DocIterator it = ((DocList) fieldResults.get("doclist")).iterator();
             it.hasNext(); ) {
           docList.add(it.nextDoc());
         }
       }
     }
     return docList.iterator();
   } else {
     return rb.getResults().docList.iterator();
   }
 }
コード例 #4
0
  protected NamedList serializeTopDocs(QueryCommandResult result) throws IOException {
    NamedList<Object> queryResult = new NamedList<>();
    queryResult.add("matches", result.getMatches());
    queryResult.add("totalHits", result.getTopDocs().totalHits);
    if (rb.getGroupingSpec().isNeedScore()) {
      queryResult.add("maxScore", result.getTopDocs().getMaxScore());
    }
    List<NamedList> documents = new ArrayList<>();
    queryResult.add("documents", documents);

    final IndexSchema schema = rb.req.getSearcher().getSchema();
    SchemaField uniqueField = schema.getUniqueKeyField();
    CharsRef spare = new CharsRef();
    for (ScoreDoc scoreDoc : result.getTopDocs().scoreDocs) {
      NamedList<Object> document = new NamedList<>();
      documents.add(document);

      StoredDocument doc = retrieveDocument(uniqueField, scoreDoc.doc);
      document.add("id", uniqueField.getType().toExternal(doc.getField(uniqueField.getName())));
      if (rb.getGroupingSpec().isNeedScore()) {
        document.add("score", scoreDoc.score);
      }
      if (!FieldDoc.class.isInstance(scoreDoc)) {
        continue;
      }

      FieldDoc fieldDoc = (FieldDoc) scoreDoc;
      Object[] convertedSortValues = new Object[fieldDoc.fields.length];
      for (int j = 0; j < fieldDoc.fields.length; j++) {
        Object sortValue = fieldDoc.fields[j];
        Sort groupSort = rb.getGroupingSpec().getGroupSort();
        SchemaField field =
            groupSort.getSort()[j].getField() != null
                ? schema.getFieldOrNull(groupSort.getSort()[j].getField())
                : null;
        if (field != null) {
          FieldType fieldType = field.getType();
          if (sortValue != null) {
            sortValue = fieldType.marshalSortValue(sortValue);
          }
        }
        convertedSortValues[j] = sortValue;
      }
      document.add("sortValues", convertedSortValues);
    }

    return queryResult;
  }
コード例 #5
0
 @Override
 public void finishStage(ResponseBuilder rb) {
   SolrParams params = rb.req.getParams();
   if (!params.getBool(COMPONENT_NAME, false)
       || !params.getBool(ClusteringParams.USE_SEARCH_RESULTS, false)) {
     return;
   }
   if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
     SearchClusteringEngine engine = getSearchClusteringEngine(rb);
     if (engine != null) {
       SolrDocumentList solrDocList = (SolrDocumentList) rb.rsp.getValues().get("response");
       // TODO: Currently, docIds is set to null in distributed environment.
       // This causes CarrotParams.PRODUCE_SUMMARY doesn't work.
       // To work CarrotParams.PRODUCE_SUMMARY under distributed mode, we can choose either one of:
       // (a) In each shard, ClusteringComponent produces summary and finishStage()
       //     merges these summaries.
       // (b) Adding doHighlighting(SolrDocumentList, ...) method to SolrHighlighter and
       //     making SolrHighlighter uses "external text" rather than stored values to produce
       // snippets.
       Map<SolrDocument, Integer> docIds = null;
       Object clusters = engine.cluster(rb.getQuery(), solrDocList, docIds, rb.req);
       rb.rsp.add("clusters", clusters);
     } else {
       String name = getClusteringEngineName(rb);
       log.warn("No engine for: " + name);
     }
   }
 }
コード例 #6
0
  /**
   * Actually run the query
   *
   * @param rb
   */
  @Override
  public void process(ResponseBuilder rb) throws IOException {
    if (rb.doFacets) {
      SolrParams params = rb.req.getParams();
      RangeFieldFacets f = new RangeFieldFacets(rb.req, rb.getResults().docSet, params, rb);

      rb.rsp.add("facet_counts", f.getFacetCounts());
    }
  }
コード例 #7
0
  @Override
  public void process(ResponseBuilder rb) throws IOException {
    SolrParams params = rb.req.getParams();
    if (!params.getBool(COMPONENT_NAME, false)) {
      return;
    }
    String name = getClusteringEngineName(rb);
    boolean useResults = params.getBool(ClusteringParams.USE_SEARCH_RESULTS, false);
    if (useResults == true) {
      SearchClusteringEngine engine = getSearchClusteringEngine(rb);
      if (engine != null) {
        DocListAndSet results = rb.getResults();
        Map<SolrDocument, Integer> docIds = Maps.newHashMapWithExpectedSize(results.docList.size());
        SolrDocumentList solrDocList =
            SolrPluginUtils.docListToSolrDocumentList(
                results.docList, rb.req.getSearcher(), engine.getFieldsToLoad(rb.req), docIds);
        Object clusters = engine.cluster(rb.getQuery(), solrDocList, docIds, rb.req);
        rb.rsp.add("clusters", clusters);
      } else {
        log.warn("No engine for: " + name);
      }
    }
    boolean useCollection = params.getBool(ClusteringParams.USE_COLLECTION, false);
    if (useCollection == true) {
      DocumentClusteringEngine engine = documentClusteringEngines.get(name);
      if (engine != null) {
        boolean useDocSet = params.getBool(ClusteringParams.USE_DOC_SET, false);
        NamedList<?> nl = null;

        // TODO: This likely needs to be made into a background task that runs in an executor
        if (useDocSet == true) {
          nl = engine.cluster(rb.getResults().docSet, params);
        } else {
          nl = engine.cluster(params);
        }
        rb.rsp.add("clusters", nl);
      } else {
        log.warn("No engine for " + name);
      }
    }
  }
コード例 #8
0
  /**
   * Match up search results and add corresponding data for each result (if we have query results
   * available).
   */
  @Override
  @SuppressWarnings({"rawtypes", "unchecked"})
  public void process(ResponseBuilder rb) throws IOException {
    SolrParams params = rb.req.getParams();
    if (!params.getBool(getName(), false)) {
      return;
    }

    XJoinResults<?> results = (XJoinResults<?>) rb.req.getContext().get(getResultsTag());
    if (results == null || rb.getResults() == null) {
      return;
    }

    // general results
    FieldAppender appender =
        new FieldAppender(
            (String) params.get(getName() + "." + XJoinParameters.RESULTS_FIELD_LIST, "*"));
    NamedList general = appender.addNamedList(rb.rsp.getValues(), getName(), results);

    // per join id results
    FieldAppender docAppender =
        new FieldAppender(
            (String) params.get(getName() + "." + XJoinParameters.DOC_FIELD_LIST, "*"));
    Set<String> joinFields = new HashSet<>();
    joinFields.add(joinField);

    List<String> joinIds = new ArrayList<>();
    for (Iterator<Integer> it = docIterator(rb); it.hasNext(); ) {
      StoredDocument doc = rb.req.getSearcher().doc(it.next(), joinFields);
      for (String joinId : doc.getValues(joinField)) {
        if (!joinIds.contains(joinId)) {
          joinIds.add(joinId);
        }
      }
    }

    for (String joinId : joinIds) {
      Object object = results.getResult(joinId);
      if (object == null) continue;
      NamedList external = new NamedList<>();
      general.add("external", external);
      external.add("joinId", joinId);
      if (object instanceof Iterable) {
        for (Object item : (Iterable) object) {
          docAppender.addNamedList(external, "doc", item);
        }
      } else {
        docAppender.addNamedList(external, "doc", object);
      }
    }
  }
コード例 #9
0
  public void process(ResponseBuilder rb) throws IOException {
    SolrParams params = rb.req.getParams();
    if (!params.getBool(COMPONENT_NAME, false)) {
      return;
    }
    String name = params.get(ClusteringParams.ENGINE_NAME, ClusteringEngine.DEFAULT_ENGINE_NAME);
    boolean useResults = params.getBool(ClusteringParams.USE_SEARCH_RESULTS, false);
    if (useResults == true) {
      SearchClusteringEngine engine = searchClusteringEngines.get(name);
      if (engine != null) {
        DocListAndSet results = rb.getResults();
        Object clusters = engine.cluster(rb.getQuery(), results.docList, rb.req);
        rb.rsp.add("clusters", clusters);
      } else {
        log.warn("No engine for: " + name);
      }
    }
    boolean useCollection = params.getBool(ClusteringParams.USE_COLLECTION, false);
    if (useCollection == true) {
      DocumentClusteringEngine engine = documentClusteringEngines.get(name);
      if (engine != null) {
        boolean useDocSet = params.getBool(ClusteringParams.USE_DOC_SET, false);
        NamedList nl = null;

        // TODO: This likely needs to be made into a background task that runs in an executor
        if (useDocSet == true) {
          nl = engine.cluster(rb.getResults().docSet, params);
        } else {
          nl = engine.cluster(params);
        }
        rb.rsp.add("clusters", nl);
      } else {
        log.warn("No engine for " + name);
      }
    }
  }
コード例 #10
0
  public NamedList<Integer> getGroupedCounts(
      SolrIndexSearcher searcher,
      DocSet base,
      String field,
      boolean multiToken,
      int offset,
      int limit,
      int mincount,
      boolean missing,
      String sort,
      String prefix,
      String contains,
      boolean ignoreCase)
      throws IOException {
    GroupingSpecification groupingSpecification = rb.getGroupingSpec();
    final String groupField =
        groupingSpecification != null ? groupingSpecification.getFields()[0] : null;
    if (groupField == null) {
      throw new SolrException(
          SolrException.ErrorCode.BAD_REQUEST,
          "Specify the group.field as parameter or local parameter");
    }

    BytesRef prefixBytesRef = prefix != null ? new BytesRef(prefix) : null;
    final TermGroupFacetCollector collector =
        TermGroupFacetCollector.createTermGroupFacetCollector(
            groupField, field, multiToken, prefixBytesRef, 128);

    SchemaField sf = searcher.getSchema().getFieldOrNull(groupField);

    if (sf != null
        && sf.hasDocValues() == false
        && sf.multiValued() == false
        && sf.getType().getNumericType() != null) {
      // it's a single-valued numeric field: we must currently create insanity :(
      // there isn't a GroupedFacetCollector that works on numerics right now...
      searcher.search(
          base.getTopFilter(),
          new FilterCollector(collector) {
            @Override
            public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
              LeafReader insane = Insanity.wrapInsanity(context.reader(), groupField);
              return in.getLeafCollector(insane.getContext());
            }
          });
    } else {
      searcher.search(base.getTopFilter(), collector);
    }

    boolean orderByCount =
        sort.equals(FacetParams.FACET_SORT_COUNT)
            || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY);
    TermGroupFacetCollector.GroupedFacetResult result =
        collector.mergeSegmentResults(
            limit < 0 ? Integer.MAX_VALUE : (offset + limit), mincount, orderByCount);

    CharsRefBuilder charsRef = new CharsRefBuilder();
    FieldType facetFieldType = searcher.getSchema().getFieldType(field);
    NamedList<Integer> facetCounts = new NamedList<>();
    List<TermGroupFacetCollector.FacetEntry> scopedEntries =
        result.getFacetEntries(offset, limit < 0 ? Integer.MAX_VALUE : limit);
    for (TermGroupFacetCollector.FacetEntry facetEntry : scopedEntries) {
      // :TODO:can we do contains earlier than this to make it more efficient?
      if (contains != null
          && !contains(facetEntry.getValue().utf8ToString(), contains, ignoreCase)) {
        continue;
      }
      facetFieldType.indexedToReadable(facetEntry.getValue(), charsRef);
      facetCounts.add(charsRef.toString(), facetEntry.getCount());
    }

    if (missing) {
      facetCounts.add(null, result.getTotalMissingCount());
    }

    return facetCounts;
  }
コード例 #11
0
  protected DocSet computeDocSet(DocSet baseDocSet, List<String> excludeTagList)
      throws SyntaxError, IOException {
    Map<?, ?> tagMap = (Map<?, ?>) req.getContext().get("tags");
    // rb can be null if facets are being calculated from a RequestHandler e.g. MoreLikeThisHandler
    if (tagMap == null || rb == null) {
      return baseDocSet;
    }

    IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<>();
    for (String excludeTag : excludeTagList) {
      Object olst = tagMap.get(excludeTag);
      // tagMap has entries of List<String,List<QParser>>, but subject to change in the future
      if (!(olst instanceof Collection)) continue;
      for (Object o : (Collection<?>) olst) {
        if (!(o instanceof QParser)) continue;
        QParser qp = (QParser) o;
        excludeSet.put(qp.getQuery(), Boolean.TRUE);
      }
    }
    if (excludeSet.size() == 0) return baseDocSet;

    List<Query> qlist = new ArrayList<>();

    // add the base query
    if (!excludeSet.containsKey(rb.getQuery())) {
      qlist.add(rb.getQuery());
    }

    // add the filters
    if (rb.getFilters() != null) {
      for (Query q : rb.getFilters()) {
        if (!excludeSet.containsKey(q)) {
          qlist.add(q);
        }
      }
    }

    // get the new base docset for this facet
    DocSet base = searcher.getDocSet(qlist);
    if (rb.grouping() && rb.getGroupingSpec().isTruncateGroups()) {
      Grouping grouping = new Grouping(searcher, null, rb.getQueryCommand(), false, 0, false);
      grouping.setWithinGroupSort(rb.getGroupingSpec().getSortWithinGroup());
      if (rb.getGroupingSpec().getFields().length > 0) {
        grouping.addFieldCommand(rb.getGroupingSpec().getFields()[0], req);
      } else if (rb.getGroupingSpec().getFunctions().length > 0) {
        grouping.addFunctionCommand(rb.getGroupingSpec().getFunctions()[0], req);
      } else {
        return base;
      }
      AbstractAllGroupHeadsCollector allGroupHeadsCollector =
          grouping.getCommands().get(0).createAllGroupCollector();
      searcher.search(base.getTopFilter(), allGroupHeadsCollector);
      return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
    } else {
      return base;
    }
  }
コード例 #12
0
  protected NamedList serializeTopGroups(TopGroups<BytesRef> data, SchemaField groupField)
      throws IOException {
    NamedList<Object> result = new NamedList<>();
    result.add("totalGroupedHitCount", data.totalGroupedHitCount);
    result.add("totalHitCount", data.totalHitCount);
    if (data.totalGroupCount != null) {
      result.add("totalGroupCount", data.totalGroupCount);
    }
    CharsRef spare = new CharsRef();

    final IndexSchema schema = rb.req.getSearcher().getSchema();
    SchemaField uniqueField = schema.getUniqueKeyField();
    for (GroupDocs<BytesRef> searchGroup : data.groups) {
      NamedList<Object> groupResult = new NamedList<>();
      groupResult.add("totalHits", searchGroup.totalHits);
      if (!Float.isNaN(searchGroup.maxScore)) {
        groupResult.add("maxScore", searchGroup.maxScore);
      }

      List<NamedList<Object>> documents = new ArrayList<>();
      for (int i = 0; i < searchGroup.scoreDocs.length; i++) {
        NamedList<Object> document = new NamedList<>();
        documents.add(document);

        StoredDocument doc = retrieveDocument(uniqueField, searchGroup.scoreDocs[i].doc);
        document.add("id", uniqueField.getType().toExternal(doc.getField(uniqueField.getName())));
        if (!Float.isNaN(searchGroup.scoreDocs[i].score)) {
          document.add("score", searchGroup.scoreDocs[i].score);
        }
        if (!(searchGroup.scoreDocs[i] instanceof FieldDoc)) {
          continue;
        }

        FieldDoc fieldDoc = (FieldDoc) searchGroup.scoreDocs[i];
        Object[] convertedSortValues = new Object[fieldDoc.fields.length];
        for (int j = 0; j < fieldDoc.fields.length; j++) {
          Object sortValue = fieldDoc.fields[j];
          Sort sortWithinGroup = rb.getGroupingSpec().getSortWithinGroup();
          SchemaField field =
              sortWithinGroup.getSort()[j].getField() != null
                  ? schema.getFieldOrNull(sortWithinGroup.getSort()[j].getField())
                  : null;
          if (field != null) {
            FieldType fieldType = field.getType();
            if (sortValue != null) {
              sortValue = fieldType.marshalSortValue(sortValue);
            }
          }
          convertedSortValues[j] = sortValue;
        }
        document.add("sortValues", convertedSortValues);
      }
      groupResult.add("documents", documents);
      String groupValue =
          searchGroup.groupValue != null
              ? groupField.getType().indexedToReadable(searchGroup.groupValue.utf8ToString())
              : null;
      result.add(groupValue, groupResult);
    }

    return result;
  }
コード例 #13
0
    public void merge(ResponseBuilder rb, ShardRequest sreq) {

      // id to shard mapping, to eliminate any accidental dups
      HashMap<Object, String> uniqueDoc = new HashMap<>();

      NamedList<Object> shardInfo = null;
      if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
        shardInfo = new SimpleOrderedMap<>();
        rb.rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo);
      }

      IndexSchema schema = rb.req.getSchema();
      SchemaField uniqueKeyField = schema.getUniqueKeyField();

      long numFound = 0;
      Float maxScore = null;
      boolean partialResults = false;
      List<ShardDoc> shardDocs = new ArrayList();

      for (ShardResponse srsp : sreq.responses) {
        SolrDocumentList docs = null;

        if (shardInfo != null) {
          SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();

          if (srsp.getException() != null) {
            Throwable t = srsp.getException();
            if (t instanceof SolrServerException) {
              t = ((SolrServerException) t).getCause();
            }
            nl.add("error", t.toString());
            StringWriter trace = new StringWriter();
            t.printStackTrace(new PrintWriter(trace));
            nl.add("trace", trace.toString());
            if (srsp.getShardAddress() != null) {
              nl.add("shardAddress", srsp.getShardAddress());
            }
          } else {
            docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
            nl.add("numFound", docs.getNumFound());
            nl.add("maxScore", docs.getMaxScore());
            nl.add("shardAddress", srsp.getShardAddress());
          }
          if (srsp.getSolrResponse() != null) {
            nl.add("time", srsp.getSolrResponse().getElapsedTime());
          }

          shardInfo.add(srsp.getShard(), nl);
        }
        // now that we've added the shard info, let's only proceed if we have no error.
        if (srsp.getException() != null) {
          partialResults = true;
          continue;
        }

        if (docs == null) { // could have been initialized in the shards info block above
          docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
        }

        NamedList<?> responseHeader =
            (NamedList<?>) srsp.getSolrResponse().getResponse().get("responseHeader");
        if (responseHeader != null
            && Boolean.TRUE.equals(
                responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY))) {
          partialResults = true;
        }

        // calculate global maxScore and numDocsFound
        if (docs.getMaxScore() != null) {
          maxScore = maxScore == null ? docs.getMaxScore() : Math.max(maxScore, docs.getMaxScore());
        }
        numFound += docs.getNumFound();

        SortSpec ss = rb.getSortSpec();
        Sort sort = ss.getSort();

        NamedList sortFieldValues =
            (NamedList) (srsp.getSolrResponse().getResponse().get("merge_values"));
        NamedList unmarshalledSortFieldValues = unmarshalSortValues(ss, sortFieldValues, schema);
        List lst = (List) unmarshalledSortFieldValues.getVal(0);

        for (int i = 0; i < docs.size(); i++) {
          SolrDocument doc = docs.get(i);
          Object id = doc.getFieldValue(uniqueKeyField.getName());

          String prevShard = uniqueDoc.put(id, srsp.getShard());
          if (prevShard != null) {
            // duplicate detected
            numFound--;

            // For now, just always use the first encountered since we can't currently
            // remove the previous one added to the priority queue.  If we switched
            // to the Java5 PriorityQueue, this would be easier.
            continue;
            // make which duplicate is used deterministic based on shard
            // if (prevShard.compareTo(srsp.shard) >= 0) {
            //  TODO: remove previous from priority queue
            //  continue;
            // }
          }

          ShardDoc shardDoc = new ShardDoc();
          shardDoc.id = id;
          shardDoc.shard = srsp.getShard();
          shardDoc.orderInShard = i;
          Object scoreObj = lst.get(i);
          if (scoreObj != null) {
            shardDoc.score = ((Integer) scoreObj).floatValue();
          }
          shardDocs.add(shardDoc);
        } // end for-each-doc-in-response
      } // end for-each-response

      Collections.sort(
          shardDocs,
          new Comparator<ShardDoc>() {
            @Override
            public int compare(ShardDoc o1, ShardDoc o2) {
              if (o1.score < o2.score) {
                return 1;
              } else if (o1.score > o2.score) {
                return -1;
              } else {
                return 0; // To change body of implemented methods use File | Settings | File
                // Templates.
              }
            }
          });

      int resultSize = shardDocs.size();

      Map<Object, ShardDoc> resultIds = new HashMap<>();
      for (int i = 0; i < shardDocs.size(); i++) {
        ShardDoc shardDoc = shardDocs.get(i);
        shardDoc.positionInResponse = i;
        // Need the toString() for correlation with other lists that must
        // be strings (like keys in highlighting, explain, etc)
        resultIds.put(shardDoc.id.toString(), shardDoc);
      }

      // Add hits for distributed requests
      // https://issues.apache.org/jira/browse/SOLR-3518
      rb.rsp.addToLog("hits", numFound);

      SolrDocumentList responseDocs = new SolrDocumentList();
      if (maxScore != null) responseDocs.setMaxScore(maxScore);
      responseDocs.setNumFound(numFound);
      responseDocs.setStart(0);
      // size appropriately
      for (int i = 0; i < resultSize; i++) responseDocs.add(null);

      // save these results in a private area so we can access them
      // again when retrieving stored fields.
      // TODO: use ResponseBuilder (w/ comments) or the request context?
      rb.resultIds = resultIds;
      rb.setResponseDocs(responseDocs);

      if (partialResults) {
        rb.rsp
            .getResponseHeader()
            .add(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
      }
    }
コード例 #14
0
    public void handleMergeFields(ResponseBuilder rb, SolrIndexSearcher searcher)
        throws IOException {
      SolrQueryRequest req = rb.req;
      SolrQueryResponse rsp = rb.rsp;
      // The query cache doesn't currently store sort field values, and SolrIndexSearcher doesn't
      // currently have an option to return sort field values.  Because of this, we
      // take the documents given and re-derive the sort values.
      //
      // TODO: See SOLR-5595
      boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES, false);
      if (fsv) {
        NamedList<Object[]> sortVals = new NamedList<>(); // order is important for the sort fields
        IndexReaderContext topReaderContext = searcher.getTopReaderContext();
        List<LeafReaderContext> leaves = topReaderContext.leaves();
        LeafReaderContext currentLeaf = null;
        if (leaves.size() == 1) {
          // if there is a single segment, use that subReader and avoid looking up each time
          currentLeaf = leaves.get(0);
          leaves = null;
        }

        DocList docList = rb.getResults().docList;

        // sort ids from lowest to highest so we can access them in order
        int nDocs = docList.size();
        final long[] sortedIds = new long[nDocs];
        final float[] scores = new float[nDocs]; // doc scores, parallel to sortedIds
        DocList docs = rb.getResults().docList;
        DocIterator it = docs.iterator();
        for (int i = 0; i < nDocs; i++) {
          sortedIds[i] = (((long) it.nextDoc()) << 32) | i;
          scores[i] = docs.hasScores() ? it.score() : Float.NaN;
        }

        // sort ids and scores together
        new InPlaceMergeSorter() {
          @Override
          protected void swap(int i, int j) {
            long tmpId = sortedIds[i];
            float tmpScore = scores[i];
            sortedIds[i] = sortedIds[j];
            scores[i] = scores[j];
            sortedIds[j] = tmpId;
            scores[j] = tmpScore;
          }

          @Override
          protected int compare(int i, int j) {
            return Long.compare(sortedIds[i], sortedIds[j]);
          }
        }.sort(0, sortedIds.length);

        SortSpec sortSpec = rb.getSortSpec();
        Sort sort = searcher.weightSort(sortSpec.getSort());
        SortField[] sortFields =
            sort == null ? new SortField[] {SortField.FIELD_SCORE} : sort.getSort();
        List<SchemaField> schemaFields = sortSpec.getSchemaFields();

        for (int fld = 0; fld < schemaFields.size(); fld++) {
          SchemaField schemaField = schemaFields.get(fld);
          FieldType ft = null == schemaField ? null : schemaField.getType();
          SortField sortField = sortFields[fld];

          SortField.Type type = sortField.getType();
          // :TODO: would be simpler to always serialize every position of SortField[]
          if (type == SortField.Type.SCORE || type == SortField.Type.DOC) continue;

          FieldComparator<?> comparator = null;
          LeafFieldComparator leafComparator = null;
          Object[] vals = new Object[nDocs];

          int lastIdx = -1;
          int idx = 0;

          for (int i = 0; i < sortedIds.length; ++i) {
            long idAndPos = sortedIds[i];
            float score = scores[i];
            int doc = (int) (idAndPos >>> 32);
            int position = (int) idAndPos;

            if (leaves != null) {
              idx = ReaderUtil.subIndex(doc, leaves);
              currentLeaf = leaves.get(idx);
              if (idx != lastIdx) {
                // we switched segments.  invalidate comparator.
                comparator = null;
              }
            }

            if (comparator == null) {
              comparator = sortField.getComparator(1, 0);
              leafComparator = comparator.getLeafComparator(currentLeaf);
            }

            doc -= currentLeaf.docBase; // adjust for what segment this is in
            leafComparator.setScorer(new FakeScorer(doc, score));
            leafComparator.copy(0, doc);
            Object val = comparator.value(0);
            if (null != ft) val = ft.marshalSortValue(val);
            vals[position] = val;
          }

          sortVals.add(sortField.getField(), vals);
        }

        rsp.add("merge_values", sortVals);
      }
    }