コード例 #1
0
ファイル: JSONDumper.java プロジェクト: romanchyla/montysolr
  public void writeResponse() throws IOException {
    Boolean omitHeader = req.getParams().getBool(CommonParams.OMIT_HEADER);
    if (omitHeader != null && omitHeader) rsp.getValues().remove("responseHeader");

    SolrIndexSearcher searcher = req.getSearcher();

    if (liveDocs == null) {
      liveDocs = searcher.getLeafReader().getLiveDocs();
    }

    int maxDoc = searcher.maxDoc();

    try {

      // responseWriter.write(sw,req,rsp);

      ReturnFields fields = rsp.getReturnFields(); // return everything
      Set<String> fnames = fields.getLuceneFieldNames();
      int docCounter = 0;
      for (int i = 0; i < maxDoc; i++) {
        if (liveDocs != null && !liveDocs.get(i)) {
          continue;
        }
        Document doc = searcher.doc(i);
        SolrDocument sdoc = toSolrDocument(doc, schema);
        writeSolrDocument(null, sdoc, fields, docCounter++);
        getWriter().write("\n");
      }
    } finally {
      close();
      writer.write('\n'); // ending with a newline looks much better from the command line
      writer.close();
    }
  }
コード例 #2
0
  protected DocSet computeDocSet(DocSet baseDocSet, List<String> excludeTagList)
      throws SyntaxError, IOException {
    Map<?, ?> tagMap = (Map<?, ?>) req.getContext().get("tags");
    // rb can be null if facets are being calculated from a RequestHandler e.g. MoreLikeThisHandler
    if (tagMap == null || rb == null) {
      return baseDocSet;
    }

    IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<>();
    for (String excludeTag : excludeTagList) {
      Object olst = tagMap.get(excludeTag);
      // tagMap has entries of List<String,List<QParser>>, but subject to change in the future
      if (!(olst instanceof Collection)) continue;
      for (Object o : (Collection<?>) olst) {
        if (!(o instanceof QParser)) continue;
        QParser qp = (QParser) o;
        excludeSet.put(qp.getQuery(), Boolean.TRUE);
      }
    }
    if (excludeSet.size() == 0) return baseDocSet;

    List<Query> qlist = new ArrayList<>();

    // add the base query
    if (!excludeSet.containsKey(rb.getQuery())) {
      qlist.add(rb.getQuery());
    }

    // add the filters
    if (rb.getFilters() != null) {
      for (Query q : rb.getFilters()) {
        if (!excludeSet.containsKey(q)) {
          qlist.add(q);
        }
      }
    }

    // get the new base docset for this facet
    DocSet base = searcher.getDocSet(qlist);
    if (rb.grouping() && rb.getGroupingSpec().isTruncateGroups()) {
      Grouping grouping = new Grouping(searcher, null, rb.getQueryCommand(), false, 0, false);
      grouping.setWithinGroupSort(rb.getGroupingSpec().getSortWithinGroup());
      if (rb.getGroupingSpec().getFields().length > 0) {
        grouping.addFieldCommand(rb.getGroupingSpec().getFields()[0], req);
      } else if (rb.getGroupingSpec().getFunctions().length > 0) {
        grouping.addFunctionCommand(rb.getGroupingSpec().getFunctions()[0], req);
      } else {
        return base;
      }
      AbstractAllGroupHeadsCollector allGroupHeadsCollector =
          grouping.getCommands().get(0).createAllGroupCollector();
      searcher.search(base.getTopFilter(), allGroupHeadsCollector);
      return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
    } else {
      return base;
    }
  }
コード例 #3
0
  public static SolrReaderSetScorer createReaderSetScorer(
      Weight weight,
      AtomicReaderContext context,
      Bits acceptDocs,
      SolrIndexSearcher searcher,
      String authorities,
      AtomicReader reader)
      throws IOException {

    DocSet readableDocSet =
        (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_READER_CACHE, authorities);

    if (readableDocSet == null) {

      String[] auths = authorities.substring(1).split(authorities.substring(0, 1));

      readableDocSet = new BitDocSet(new FixedBitSet(searcher.maxDoc()));

      BooleanQuery bQuery = new BooleanQuery();
      for (String current : auths) {
        bQuery.add(new TermQuery(new Term(QueryConstants.FIELD_READER, current)), Occur.SHOULD);
      }

      DocSet aclDocs = searcher.getDocSet(bQuery);

      BooleanQuery aQuery = new BooleanQuery();
      for (DocIterator it = aclDocs.iterator(); it.hasNext(); /**/ ) {
        int docID = it.nextDoc();
        // Obtain the ACL ID for this ACL doc.
        long aclID =
            searcher.getAtomicReader().getNumericDocValues(QueryConstants.FIELD_ACLID).get(docID);
        SchemaField schemaField = searcher.getSchema().getField(QueryConstants.FIELD_ACLID);
        Query query = schemaField.getType().getFieldQuery(null, schemaField, Long.toString(aclID));
        aQuery.add(query, Occur.SHOULD);

        if ((aQuery.clauses().size() > 999) || !it.hasNext()) {
          DocSet docsForAclId = searcher.getDocSet(aQuery);
          readableDocSet = readableDocSet.union(docsForAclId);

          aQuery = new BooleanQuery();
        }
      }
      // Exclude the ACL docs from the results, we only want real docs that match.
      // Probably not very efficient, what we really want is remove(docID)
      readableDocSet = readableDocSet.andNot(aclDocs);
      searcher.cacheInsert(CacheConstants.ALFRESCO_READER_CACHE, authorities, readableDocSet);
    }

    // TODO: cache the full set? e.g. searcher.cacheInsert(CacheConstants.ALFRESCO_READERSET_CACHE,
    // authorities, readableDocSet)
    // plus check of course, for presence in cache at start of method.
    return new SolrReaderSetScorer(weight, readableDocSet, context, acceptDocs, searcher);
  }