int doListGen(int iter, Query q, List<Query> filt, boolean cacheQuery, boolean cacheFilt) throws Exception { SolrQueryRequest req = lrf.makeRequest(); SolrIndexSearcher searcher = req.getSearcher(); long start = System.currentTimeMillis(); // These aren't public in SolrIndexSearcher int NO_CHECK_QCACHE = 0x80000000; int GET_DOCSET = 0x40000000; int NO_CHECK_FILTERCACHE = 0x20000000; int GET_SCORES = 0x01; int ret = 0; for (int i = 0; i < iter; i++) { DocList l = searcher.getDocList( q, filt, (Sort) null, 0, 10, (cacheQuery ? 0 : NO_CHECK_QCACHE) | (cacheFilt ? 0 : NO_CHECK_FILTERCACHE)); ret += l.matches(); } long end = System.currentTimeMillis(); System.out.println( "ret=" + ret + " time=" + (end - start) + " throughput=" + iter * 1000 / (end - start + 1)); req.close(); assertTrue(ret > 0); // make sure we did some work return ret; }
int doSetGen(int iter, Query q) throws Exception { SolrQueryRequest req = lrf.makeRequest(); SolrIndexSearcher searcher = req.getSearcher(); long start = System.currentTimeMillis(); int ret = 0; for (int i = 0; i < iter; i++) { DocSet set = searcher.getDocSetNC(q, null); ret += set.size(); } long end = System.currentTimeMillis(); System.out.println( "ret=" + ret + " time=" + (end - start) + " throughput=" + iter * 1000 / (end - start + 1)); req.close(); assertTrue(ret > 0); // make sure we did some work return ret; }
protected void doFieldSortValues(ResponseBuilder rb, SolrIndexSearcher searcher) throws IOException { SolrQueryRequest req = rb.req; SolrQueryResponse rsp = rb.rsp; // The query cache doesn't currently store sort field values, and SolrIndexSearcher doesn't // currently have an option to return sort field values. Because of this, we // take the documents given and re-derive the sort values. boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES, false); if (fsv) { Sort sort = rb.getSortSpec().getSort(); SortField[] sortFields = sort == null ? new SortField[] {SortField.FIELD_SCORE} : sort.getSort(); NamedList sortVals = new NamedList(); // order is important for the sort fields Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO); // a dummy Field SolrIndexReader reader = searcher.getReader(); SolrIndexReader[] readers = reader.getLeafReaders(); SolrIndexReader subReader = reader; if (readers.length == 1) { // if there is a single segment, use that subReader and avoid looking up each time subReader = readers[0]; readers = null; } int[] offsets = reader.getLeafOffsets(); for (SortField sortField : sortFields) { int type = sortField.getType(); if (type == SortField.SCORE || type == SortField.DOC) continue; FieldComparator comparator = null; FieldComparator comparators[] = (readers == null) ? null : new FieldComparator[readers.length]; String fieldname = sortField.getField(); FieldType ft = fieldname == null ? null : req.getSchema().getFieldTypeNoEx(fieldname); DocList docList = rb.getResults().docList; ArrayList<Object> vals = new ArrayList<Object>(docList.size()); DocIterator it = rb.getResults().docList.iterator(); int offset = 0; int idx = 0; while (it.hasNext()) { int doc = it.nextDoc(); if (readers != null) { idx = SolrIndexReader.readerIndex(doc, offsets); subReader = readers[idx]; offset = offsets[idx]; comparator = comparators[idx]; } if (comparator == null) { comparator = sortField.getComparator(1, 0); comparator = comparator.setNextReader(subReader, offset); if (comparators != null) comparators[idx] = comparator; } doc -= offset; // adjust for what segment this is in comparator.copy(0, doc); Object val = comparator.value(0); // Sortable float, double, int, long types all just use a string // comparator. For these, we need to put the type into a readable // format. One reason for this is that XML can't represent all // string values (or even all unicode code points). // indexedToReadable() should be a no-op and should // thus be harmless anyway (for all current ways anyway) if (val instanceof String) { field.setValue((String) val); val = ft.toObject(field); } // Must do the same conversion when sorting by a // String field in Lucene, which returns the terms // data as BytesRef: if (val instanceof BytesRef) { field.setValue(((BytesRef) val).utf8ToString()); val = ft.toObject(field); } vals.add(val); } sortVals.add(fieldname, vals); } rsp.add("sort_values", sortVals); } }
/** Actually run the query */ @Override public void process(ResponseBuilder rb) throws IOException { SolrQueryRequest req = rb.req; SolrQueryResponse rsp = rb.rsp; SolrParams params = req.getParams(); if (!params.getBool(COMPONENT_NAME, true)) { return; } SolrIndexSearcher searcher = req.getSearcher(); if (rb.getQueryCommand().getOffset() < 0) { throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "'start' parameter cannot be negative"); } // -1 as flag if not set. long timeAllowed = (long) params.getInt(CommonParams.TIME_ALLOWED, -1); // Optional: This could also be implemented by the top-level searcher sending // a filter that lists the ids... that would be transparent to // the request handler, but would be more expensive (and would preserve score // too if desired). String ids = params.get(ShardParams.IDS); if (ids != null) { SchemaField idField = req.getSchema().getUniqueKeyField(); List<String> idArr = StrUtils.splitSmart(ids, ",", true); int[] luceneIds = new int[idArr.size()]; int docs = 0; for (int i = 0; i < idArr.size(); i++) { int id = req.getSearcher() .getFirstMatch( new Term(idField.getName(), idField.getType().toInternal(idArr.get(i)))); if (id >= 0) luceneIds[docs++] = id; } DocListAndSet res = new DocListAndSet(); res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0); if (rb.isNeedDocSet()) { List<Query> queries = new ArrayList<Query>(); queries.add(rb.getQuery()); List<Query> filters = rb.getFilters(); if (filters != null) queries.addAll(filters); res.docSet = searcher.getDocSet(queries); } rb.setResults(res); rsp.add("response", rb.getResults().docList); return; } SolrIndexSearcher.QueryCommand cmd = rb.getQueryCommand(); cmd.setTimeAllowed(timeAllowed); SolrIndexSearcher.QueryResult result = new SolrIndexSearcher.QueryResult(); // // grouping / field collapsing // boolean doGroup = params.getBool(GroupParams.GROUP, false); if (doGroup) { try { cmd.groupCommands = new ArrayList<Grouping.Command>(); String[] fields = params.getParams(GroupParams.GROUP_FIELD); String[] funcs = params.getParams(GroupParams.GROUP_FUNC); String[] queries = params.getParams(GroupParams.GROUP_QUERY); String groupSortStr = params.get(GroupParams.GROUP_SORT); Sort groupSort = groupSortStr != null ? QueryParsing.parseSort(groupSortStr, req.getSchema()) : null; int limitDefault = cmd.getLen(); // this is normally from "rows" int docsPerGroupDefault = params.getInt(GroupParams.GROUP_LIMIT, 1); // temporary: implement all group-by-field as group-by-func if (funcs == null) { funcs = fields; } else if (fields != null) { // catenate functions and fields String[] both = new String[fields.length + funcs.length]; System.arraycopy(fields, 0, both, 0, fields.length); System.arraycopy(funcs, 0, both, fields.length, funcs.length); funcs = both; } if (funcs != null) { for (String groupByStr : funcs) { QParser parser = QParser.getParser(groupByStr, "func", rb.req); Query q = parser.getQuery(); Grouping.CommandFunc gc = new Grouping.CommandFunc(); gc.groupSort = groupSort; if (q instanceof FunctionQuery) { gc.groupBy = ((FunctionQuery) q).getValueSource(); } else { gc.groupBy = new QueryValueSource(q, 0.0f); } gc.key = groupByStr; gc.groupLimit = limitDefault; gc.docsPerGroup = docsPerGroupDefault; cmd.groupCommands.add(gc); } } if (cmd.groupCommands.size() == 0) cmd.groupCommands = null; if (cmd.groupCommands != null) { if (rb.doHighlights || rb.isDebug()) { // we need a single list of the returned docs cmd.setFlags(SolrIndexSearcher.GET_DOCLIST); } searcher.search(result, cmd); rb.setResult(result); rsp.add("grouped", result.groupedResults); // TODO: get "hits" a different way to log return; } } catch (ParseException e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e); } } // normal search result searcher.search(result, cmd); rb.setResult(result); rsp.add("response", rb.getResults().docList); rsp.getToLog().add("hits", rb.getResults().docList.matches()); doFieldSortValues(rb, searcher); doPrefetch(rb); }