コード例 #1
0
  public static SolrReaderSetScorer createReaderSetScorer(
      Weight weight,
      AtomicReaderContext context,
      Bits acceptDocs,
      SolrIndexSearcher searcher,
      String authorities,
      AtomicReader reader)
      throws IOException {

    DocSet readableDocSet =
        (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_READER_CACHE, authorities);

    if (readableDocSet == null) {

      String[] auths = authorities.substring(1).split(authorities.substring(0, 1));

      readableDocSet = new BitDocSet(new FixedBitSet(searcher.maxDoc()));

      BooleanQuery bQuery = new BooleanQuery();
      for (String current : auths) {
        bQuery.add(new TermQuery(new Term(QueryConstants.FIELD_READER, current)), Occur.SHOULD);
      }

      DocSet aclDocs = searcher.getDocSet(bQuery);

      BooleanQuery aQuery = new BooleanQuery();
      for (DocIterator it = aclDocs.iterator(); it.hasNext(); /**/ ) {
        int docID = it.nextDoc();
        // Obtain the ACL ID for this ACL doc.
        long aclID =
            searcher.getAtomicReader().getNumericDocValues(QueryConstants.FIELD_ACLID).get(docID);
        SchemaField schemaField = searcher.getSchema().getField(QueryConstants.FIELD_ACLID);
        Query query = schemaField.getType().getFieldQuery(null, schemaField, Long.toString(aclID));
        aQuery.add(query, Occur.SHOULD);

        if ((aQuery.clauses().size() > 999) || !it.hasNext()) {
          DocSet docsForAclId = searcher.getDocSet(aQuery);
          readableDocSet = readableDocSet.union(docsForAclId);

          aQuery = new BooleanQuery();
        }
      }
      // Exclude the ACL docs from the results, we only want real docs that match.
      // Probably not very efficient, what we really want is remove(docID)
      readableDocSet = readableDocSet.andNot(aclDocs);
      searcher.cacheInsert(CacheConstants.ALFRESCO_READER_CACHE, authorities, readableDocSet);
    }

    // TODO: cache the full set? e.g. searcher.cacheInsert(CacheConstants.ALFRESCO_READERSET_CACHE,
    // authorities, readableDocSet)
    // plus check of course, for presence in cache at start of method.
    return new SolrReaderSetScorer(weight, readableDocSet, context, acceptDocs, searcher);
  }
コード例 #2
0
 /**
  * Returns a count of the documents in the set which do not have any terms for for the specified
  * field.
  *
  * @see FacetParams#FACET_MISSING
  */
 public static int getFieldMissingCount(SolrIndexSearcher searcher, DocSet docs, String fieldName)
     throws IOException {
   SchemaField sf = searcher.getSchema().getField(fieldName);
   DocSet hasVal =
       searcher.getDocSet(sf.getType().getRangeQuery(null, sf, null, null, false, false));
   return docs.andNotSize(hasVal);
 }
コード例 #3
0
  protected DocSet computeDocSet(DocSet baseDocSet, List<String> excludeTagList)
      throws SyntaxError, IOException {
    Map<?, ?> tagMap = (Map<?, ?>) req.getContext().get("tags");
    // rb can be null if facets are being calculated from a RequestHandler e.g. MoreLikeThisHandler
    if (tagMap == null || rb == null) {
      return baseDocSet;
    }

    IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<>();
    for (String excludeTag : excludeTagList) {
      Object olst = tagMap.get(excludeTag);
      // tagMap has entries of List<String,List<QParser>>, but subject to change in the future
      if (!(olst instanceof Collection)) continue;
      for (Object o : (Collection<?>) olst) {
        if (!(o instanceof QParser)) continue;
        QParser qp = (QParser) o;
        excludeSet.put(qp.getQuery(), Boolean.TRUE);
      }
    }
    if (excludeSet.size() == 0) return baseDocSet;

    List<Query> qlist = new ArrayList<>();

    // add the base query
    if (!excludeSet.containsKey(rb.getQuery())) {
      qlist.add(rb.getQuery());
    }

    // add the filters
    if (rb.getFilters() != null) {
      for (Query q : rb.getFilters()) {
        if (!excludeSet.containsKey(q)) {
          qlist.add(q);
        }
      }
    }

    // get the new base docset for this facet
    DocSet base = searcher.getDocSet(qlist);
    if (rb.grouping() && rb.getGroupingSpec().isTruncateGroups()) {
      Grouping grouping = new Grouping(searcher, null, rb.getQueryCommand(), false, 0, false);
      grouping.setWithinGroupSort(rb.getGroupingSpec().getSortWithinGroup());
      if (rb.getGroupingSpec().getFields().length > 0) {
        grouping.addFieldCommand(rb.getGroupingSpec().getFields()[0], req);
      } else if (rb.getGroupingSpec().getFunctions().length > 0) {
        grouping.addFunctionCommand(rb.getGroupingSpec().getFunctions()[0], req);
      } else {
        return base;
      }
      AbstractAllGroupHeadsCollector allGroupHeadsCollector =
          grouping.getCommands().get(0).createAllGroupCollector();
      searcher.search(base.getTopFilter(), allGroupHeadsCollector);
      return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
    } else {
      return base;
    }
  }
コード例 #4
0
 @SuppressWarnings("unused")
 static DocSet getFieldMissing(SolrIndexSearcher searcher, DocSet docs, String fieldName)
     throws IOException {
   SchemaField sf = searcher.getSchema().getField(fieldName);
   DocSet hasVal =
       searcher.getDocSet(sf.getType().getRangeQuery(null, sf, null, null, false, false));
   DocSet answer = docs.andNot(hasVal);
   // hasVal.decref(); // OFF-HEAP
   return answer;
 }
コード例 #5
0
  public static SolrCachingAuxDocScorer createAuxDocScorer(
      SolrIndexSearcher searcher, Similarity similarity, Query query, SolrIndexReader reader)
      throws IOException {
    // Get hold of solr top level searcher
    // Execute query with caching
    // translate reults to leaf docs
    // build ordered doc list

    DocSet auxDocSet = searcher.getDocSet(query);

    CacheEntry[] indexedByDocId =
        (CacheEntry[])
            searcher.cacheLookup(
                AlfrescoSolrEventListener.ALFRESCO_CACHE,
                AlfrescoSolrEventListener.KEY_DBID_LEAF_PATH_BY_DOC_ID);

    // List<ScoreDoc> auxDocs = pathCollector.getDocs();
    OpenBitSet translated = new OpenBitSet();

    if (auxDocSet instanceof BitDocSet) {
      BitDocSet source = (BitDocSet) auxDocSet;
      OpenBitSet openBitSet = source.getBits();
      int current = -1;
      while ((current = openBitSet.nextSetBit(current + 1)) != -1) {
        CacheEntry entry = indexedByDocId[current];
        translated.set(entry.getLeaf());
      }
    } else {
      for (DocIterator it = auxDocSet.iterator(); it.hasNext(); /* */ ) {
        CacheEntry entry = indexedByDocId[it.nextDoc()];
        translated.set(entry.getLeaf());
      }
    }

    return new SolrCachingAuxDocScorer(similarity, new BitDocSet(translated), reader);
  }
コード例 #6
0
  public static SolrAuthoritySetScorer createAuthoritySetScorer(
      SolrIndexSearcher searcher, Similarity similarity, String authorities, SolrIndexReader reader)
      throws IOException {
    // Get hold of solr top level searcher
    // Execute query with caching
    // translate reults to leaf docs
    // build ordered doc list

    Properties p = searcher.getSchema().getResourceLoader().getCoreProperties();
    boolean doPermissionChecks =
        Boolean.parseBoolean(p.getProperty("alfresco.doPermissionChecks", "true"));

    Query key = new SolrAuthoritySetQuery(authorities);

    DocSet answer =
        (DocSet) searcher.cacheLookup(AlfrescoSolrEventListener.ALFRESCO_AUTHORITY_CACHE, key);
    if (answer != null) {
      return new SolrAuthoritySetScorer(similarity, answer, reader);
    }

    HashSet<String> globalReaders =
        (HashSet<String>)
            searcher.cacheLookup(
                AlfrescoSolrEventListener.ALFRESCO_CACHE,
                AlfrescoSolrEventListener.KEY_GLOBAL_READERS);

    String[] auths = authorities.substring(1).split(authorities.substring(0, 1));

    boolean hasGlobalRead = false;

    for (String auth : auths) {
      if (globalReaders.contains(auth)) {
        hasGlobalRead = true;
        break;
      }
    }

    if (hasGlobalRead || (doPermissionChecks == false)) {
      // can read all
      OpenBitSet allLeafDocs =
          (OpenBitSet)
              searcher.cacheLookup(
                  AlfrescoSolrEventListener.ALFRESCO_CACHE,
                  AlfrescoSolrEventListener.KEY_ALL_LEAF_DOCS);
      return new SolrAuthoritySetScorer(similarity, new BitDocSet(allLeafDocs), reader);
    }

    DocSet readableDocSet = searcher.getDocSet(new SolrReaderSetQuery(authorities));

    if (globalReaders.contains(PermissionService.OWNER_AUTHORITY)) {
      DocSet authorityOwnedDocs = searcher.getDocSet(new SolrOwnerSetQuery(authorities));

      DocSet toCache = readableDocSet.union(authorityOwnedDocs);
      searcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_AUTHORITY_CACHE, key, toCache);
      return new SolrAuthoritySetScorer(similarity, toCache, reader);
    } else {
      // for that docs I own that have owner Read rights
      DocSet ownerReadableDocSet =
          searcher.getDocSet(new SolrReaderSetQuery("|" + PermissionService.OWNER_AUTHORITY));
      DocSet authorityOwnedDocs = searcher.getDocSet(new SolrOwnerSetQuery(authorities));

      DocSet docsAuthorityOwnsAndCanRead = ownerReadableDocSet.intersection(authorityOwnedDocs);

      DocSet toCache = readableDocSet.union(docsAuthorityOwnsAndCanRead);
      searcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_AUTHORITY_CACHE, key, toCache);
      return new SolrAuthoritySetScorer(similarity, toCache, reader);
    }
  }
コード例 #7
0
  /** Actually run the query */
  @Override
  public void process(ResponseBuilder rb) throws IOException {
    SolrQueryRequest req = rb.req;
    SolrQueryResponse rsp = rb.rsp;
    SolrParams params = req.getParams();
    if (!params.getBool(COMPONENT_NAME, true)) {
      return;
    }
    SolrIndexSearcher searcher = req.getSearcher();

    if (rb.getQueryCommand().getOffset() < 0) {
      throw new SolrException(
          SolrException.ErrorCode.BAD_REQUEST, "'start' parameter cannot be negative");
    }

    // -1 as flag if not set.
    long timeAllowed = (long) params.getInt(CommonParams.TIME_ALLOWED, -1);

    // Optional: This could also be implemented by the top-level searcher sending
    // a filter that lists the ids... that would be transparent to
    // the request handler, but would be more expensive (and would preserve score
    // too if desired).
    String ids = params.get(ShardParams.IDS);
    if (ids != null) {
      SchemaField idField = req.getSchema().getUniqueKeyField();
      List<String> idArr = StrUtils.splitSmart(ids, ",", true);
      int[] luceneIds = new int[idArr.size()];
      int docs = 0;
      for (int i = 0; i < idArr.size(); i++) {
        int id =
            req.getSearcher()
                .getFirstMatch(
                    new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
        if (id >= 0) luceneIds[docs++] = id;
      }

      DocListAndSet res = new DocListAndSet();
      res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0);
      if (rb.isNeedDocSet()) {
        List<Query> queries = new ArrayList<Query>();
        queries.add(rb.getQuery());
        List<Query> filters = rb.getFilters();
        if (filters != null) queries.addAll(filters);
        res.docSet = searcher.getDocSet(queries);
      }
      rb.setResults(res);
      rsp.add("response", rb.getResults().docList);
      return;
    }

    SolrIndexSearcher.QueryCommand cmd = rb.getQueryCommand();
    cmd.setTimeAllowed(timeAllowed);
    SolrIndexSearcher.QueryResult result = new SolrIndexSearcher.QueryResult();

    //
    // grouping / field collapsing
    //
    boolean doGroup = params.getBool(GroupParams.GROUP, false);
    if (doGroup) {
      try {
        cmd.groupCommands = new ArrayList<Grouping.Command>();

        String[] fields = params.getParams(GroupParams.GROUP_FIELD);
        String[] funcs = params.getParams(GroupParams.GROUP_FUNC);
        String[] queries = params.getParams(GroupParams.GROUP_QUERY);
        String groupSortStr = params.get(GroupParams.GROUP_SORT);
        Sort groupSort =
            groupSortStr != null ? QueryParsing.parseSort(groupSortStr, req.getSchema()) : null;

        int limitDefault = cmd.getLen(); // this is normally from "rows"
        int docsPerGroupDefault = params.getInt(GroupParams.GROUP_LIMIT, 1);

        // temporary: implement all group-by-field as group-by-func
        if (funcs == null) {
          funcs = fields;
        } else if (fields != null) {
          // catenate functions and fields
          String[] both = new String[fields.length + funcs.length];
          System.arraycopy(fields, 0, both, 0, fields.length);
          System.arraycopy(funcs, 0, both, fields.length, funcs.length);
          funcs = both;
        }

        if (funcs != null) {
          for (String groupByStr : funcs) {
            QParser parser = QParser.getParser(groupByStr, "func", rb.req);
            Query q = parser.getQuery();
            Grouping.CommandFunc gc = new Grouping.CommandFunc();
            gc.groupSort = groupSort;

            if (q instanceof FunctionQuery) {
              gc.groupBy = ((FunctionQuery) q).getValueSource();
            } else {
              gc.groupBy = new QueryValueSource(q, 0.0f);
            }
            gc.key = groupByStr;
            gc.groupLimit = limitDefault;
            gc.docsPerGroup = docsPerGroupDefault;

            cmd.groupCommands.add(gc);
          }
        }

        if (cmd.groupCommands.size() == 0) cmd.groupCommands = null;

        if (cmd.groupCommands != null) {
          if (rb.doHighlights || rb.isDebug()) {
            // we need a single list of the returned docs
            cmd.setFlags(SolrIndexSearcher.GET_DOCLIST);
          }

          searcher.search(result, cmd);
          rb.setResult(result);
          rsp.add("grouped", result.groupedResults);
          // TODO: get "hits" a different way to log
          return;
        }
      } catch (ParseException e) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
      }
    }

    // normal search result
    searcher.search(result, cmd);
    rb.setResult(result);

    rsp.add("response", rb.getResults().docList);
    rsp.getToLog().add("hits", rb.getResults().docList.matches());

    doFieldSortValues(rb, searcher);
    doPrefetch(rb);
  }