Ejemplo n.º 1
0
  private static void populateFieldInfo(
      IndexSchema schema,
      Map<String, List<String>> typeusemap,
      SimpleOrderedMap<Object> fields,
      SchemaField uniqueField,
      SchemaField f) {
    FieldType ft = f.getType();
    SimpleOrderedMap<Object> field = new SimpleOrderedMap<Object>();
    field.add("type", ft.getTypeName());
    field.add("flags", getFieldFlags(f));
    if (f.isRequired()) {
      field.add("required", f.isRequired());
    }
    if (f.getDefaultValue() != null) {
      field.add("default", f.getDefaultValue());
    }
    if (f == uniqueField) {
      field.add("uniqueKey", true);
    }
    if (ft.getAnalyzer().getPositionIncrementGap(f.getName()) != 0) {
      field.add("positionIncrementGap", ft.getAnalyzer().getPositionIncrementGap(f.getName()));
    }
    field.add("copyDests", schema.getCopyFields(f.getName()));
    field.add("copySources", schema.getCopySources(f.getName()));

    fields.add(f.getName(), field);

    List<String> v = typeusemap.get(ft.getTypeName());
    if (v == null) {
      v = new ArrayList<String>();
    }
    v.add(f.getName());
    typeusemap.put(ft.getTypeName(), v);
  }
Ejemplo n.º 2
0
  public static SimpleOrderedMap<Object> getIndexInfo(IndexReader reader, boolean countTerms)
      throws IOException {
    Directory dir = reader.directory();
    SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<Object>();

    indexInfo.add("numDocs", reader.numDocs());
    indexInfo.add("maxDoc", reader.maxDoc());

    if (countTerms) {
      TermEnum te = null;
      try {
        te = reader.terms();
        int numTerms = 0;
        while (te.next()) {
          numTerms++;
        }
        indexInfo.add("numTerms", numTerms);
      } finally {
        if (te != null) te.close();
      }
    }

    indexInfo.add(
        "version",
        reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
    indexInfo.add("optimized", reader.isOptimized());
    indexInfo.add("current", reader.isCurrent());
    indexInfo.add("hasDeletions", reader.hasDeletions());
    indexInfo.add("directory", dir);
    indexInfo.add("lastModified", new Date(IndexReader.lastModified(dir)));
    return indexInfo;
  }
Ejemplo n.º 3
0
  /**
   * Returns a <code>NamedList</code> with each entry having the "key" of the interval as name and
   * the count of docs in that interval as value. All intervals added in the request are included in
   * the returned <code>NamedList</code> (included those with 0 count), and it's required that the
   * order of the intervals is deterministic and equals in all shards of a distributed request,
   * otherwise the collation of results will fail.
   */
  public NamedList<Object> getFacetIntervalCounts() throws IOException, SyntaxError {
    NamedList<Object> res = new SimpleOrderedMap<Object>();
    String[] fields = global.getParams(FacetParams.FACET_INTERVAL);
    if (fields == null || fields.length == 0) return res;

    for (String field : fields) {
      final ParsedParams parsed = parseParams(FacetParams.FACET_INTERVAL, field);
      String[] intervalStrs =
          parsed.required.getFieldParams(parsed.facetValue, FacetParams.FACET_INTERVAL_SET);
      SchemaField schemaField = searcher.getCore().getLatestSchema().getField(parsed.facetValue);
      if (parsed.params.getBool(GroupParams.GROUP_FACET, false)) {
        throw new SolrException(
            SolrException.ErrorCode.BAD_REQUEST,
            "Interval Faceting can't be used with " + GroupParams.GROUP_FACET);
      }

      SimpleOrderedMap<Integer> fieldResults = new SimpleOrderedMap<Integer>();
      res.add(parsed.key, fieldResults);
      IntervalFacets intervalFacets =
          new IntervalFacets(schemaField, searcher, parsed.docs, intervalStrs, parsed.params);
      for (FacetInterval interval : intervalFacets) {
        fieldResults.add(interval.getKey(), interval.getCount());
      }
    }

    return res;
  }
Ejemplo n.º 4
0
 void addStats(SimpleOrderedMap<Object> target, int slotNum) throws IOException {
   int count = countAcc.getCount(slotNum);
   target.add("count", count);
   if (count > 0 || freq.processEmpty) {
     for (SlotAcc acc : accs) {
       acc.setValues(target, slotNum);
     }
   }
 }
Ejemplo n.º 5
0
  /**
   * Convert this object to a SimpleOrderedMap, making it easier to serialize.
   *
   * @return the equivalent SimpleOrderedMap for this object.
   */
  public SimpleOrderedMap<Object> toMap() {
    SimpleOrderedMap<Object> map = new SimpleOrderedMap<>();

    if (label != null) {
      map.add(LABEL_KEY, label);
    }
    map.add(VALUE_KEY, value);
    map.add(COUNT_KEY, count);
    map.add(TOTAL_KEY, getTotal());
    if (hierarchy != null && hierarchy.size() > 0) {
      // Recurse through the child nodes, converting each to a map
      List<NamedList<Object>> hierarchyList =
          hierarchy.stream().map(TreeFacetField::toMap).collect(Collectors.toList());
      map.add(HIERARCHY_KEY, hierarchyList);
    }

    return map;
  }
Ejemplo n.º 6
0
 protected void processStats(SimpleOrderedMap<Object> bucket, DocSet docs, int docCount)
     throws IOException {
   if (docCount == 0 && !freq.processEmpty || freq.getFacetStats().size() == 0) {
     bucket.add("count", docCount);
     return;
   }
   createAccs(docCount, 1);
   int collected = collect(docs, 0);
   countAcc.incrementCount(0, collected);
   assert collected == docCount;
   addStats(bucket, 0);
 }
Ejemplo n.º 7
0
  void processSubs(SimpleOrderedMap<Object> response, Query filter, DocSet domain)
      throws IOException {

    // TODO: what if a zero bucket has a sub-facet with an exclusion that would yield results?
    // should we check for domain-altering exclusions, or even ask the sub-facet for
    // it's domain and then only skip it if it's 0?

    if (domain == null || domain.size() == 0 && !freq.processEmpty) {
      return;
    }

    for (Map.Entry<String, FacetRequest> sub : freq.getSubFacets().entrySet()) {
      // make a new context for each sub-facet since they can change the domain
      FacetContext subContext = fcontext.sub(filter, domain);
      FacetProcessor subProcessor = sub.getValue().createFacetProcessor(subContext);
      if (fcontext.getDebugInfo()
          != null) { // if fcontext.debugInfo != null, it means rb.debug() == true
        FacetDebugInfo fdebug = new FacetDebugInfo();
        subContext.setDebugInfo(fdebug);
        fcontext.getDebugInfo().addChild(fdebug);

        fdebug.setReqDescription(sub.getValue().getFacetDescription());
        fdebug.setProcessor(subProcessor.getClass().getSimpleName());
        if (subContext.filter != null) fdebug.setFilter(subContext.filter.toString());

        final RTimer timer = new RTimer();
        subProcessor.process();
        long timeElapsed = (long) timer.getTime();
        fdebug.setElapse(timeElapsed);
        fdebug.putInfoItem("domainSize", (long) subContext.base.size());
      } else {
        subProcessor.process();
      }

      response.add(sub.getKey(), subProcessor.getResponse());
    }
  }
Ejemplo n.º 8
0
  private static SimpleOrderedMap<Object> getAnalyzerInfo(Analyzer analyzer) {
    SimpleOrderedMap<Object> aninfo = new SimpleOrderedMap<Object>();
    aninfo.add("className", analyzer.getClass().getName());
    if (analyzer instanceof TokenizerChain) {

      TokenizerChain tchain = (TokenizerChain) analyzer;

      CharFilterFactory[] cfiltfacs = tchain.getCharFilterFactories();
      SimpleOrderedMap<Map<String, Object>> cfilters = new SimpleOrderedMap<Map<String, Object>>();
      for (CharFilterFactory cfiltfac : cfiltfacs) {
        Map<String, Object> tok = new HashMap<String, Object>();
        String className = cfiltfac.getClass().getName();
        tok.put("className", className);
        tok.put("args", cfiltfac.getArgs());
        cfilters.add(className.substring(className.lastIndexOf('.') + 1), tok);
      }
      if (cfilters.size() > 0) {
        aninfo.add("charFilters", cfilters);
      }

      SimpleOrderedMap<Object> tokenizer = new SimpleOrderedMap<Object>();
      TokenizerFactory tfac = tchain.getTokenizerFactory();
      tokenizer.add("className", tfac.getClass().getName());
      tokenizer.add("args", tfac.getArgs());
      aninfo.add("tokenizer", tokenizer);

      TokenFilterFactory[] filtfacs = tchain.getTokenFilterFactories();
      SimpleOrderedMap<Map<String, Object>> filters = new SimpleOrderedMap<Map<String, Object>>();
      for (TokenFilterFactory filtfac : filtfacs) {
        Map<String, Object> tok = new HashMap<String, Object>();
        String className = filtfac.getClass().getName();
        tok.put("className", className);
        tok.put("args", filtfac.getArgs());
        filters.add(className.substring(className.lastIndexOf('.') + 1), tok);
      }
      if (filters.size() > 0) {
        aninfo.add("filters", filters);
      }
    }
    return aninfo;
  }
Ejemplo n.º 9
0
  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
    SimpleOrderedMap<Object> system = new SimpleOrderedMap<Object>();
    rsp.add("system", system);

    ThreadMXBean tmbean = ManagementFactory.getThreadMXBean();

    // Thread Count
    SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();
    nl.add("current", tmbean.getThreadCount());
    nl.add("peak", tmbean.getPeakThreadCount());
    nl.add("daemon", tmbean.getDaemonThreadCount());
    system.add("threadCount", nl);

    // Deadlocks
    ThreadInfo[] tinfos;
    long[] tids = tmbean.findMonitorDeadlockedThreads();
    if (tids != null) {
      tinfos = tmbean.getThreadInfo(tids, Integer.MAX_VALUE);
      NamedList<SimpleOrderedMap<Object>> lst = new NamedList<SimpleOrderedMap<Object>>();
      for (ThreadInfo ti : tinfos) {
        if (ti != null) {
          lst.add("thread", getThreadInfo(ti, tmbean));
        }
      }
      system.add("deadlocks", lst);
    }

    // Now show all the threads....
    tids = tmbean.getAllThreadIds();
    tinfos = tmbean.getThreadInfo(tids, Integer.MAX_VALUE);
    NamedList<SimpleOrderedMap<Object>> lst = new NamedList<SimpleOrderedMap<Object>>();
    for (ThreadInfo ti : tinfos) {
      if (ti != null) {
        lst.add("thread", getThreadInfo(ti, tmbean));
      }
    }
    system.add("threadDump", lst);
    rsp.setHttpCaching(false);
  }
Ejemplo n.º 10
0
  private static SimpleOrderedMap<Object> getThreadInfo(ThreadInfo ti, ThreadMXBean tmbean) {
    SimpleOrderedMap<Object> info = new SimpleOrderedMap<Object>();
    long tid = ti.getThreadId();

    info.add("id", tid);
    info.add("name", ti.getThreadName());
    info.add("state", ti.getThreadState().toString());

    if (ti.getLockName() != null) {
      info.add("lock", ti.getLockName());
    }
    if (ti.isSuspended()) {
      info.add("suspended", true);
    }
    if (ti.isInNative()) {
      info.add("native", true);
    }

    if (tmbean.isThreadCpuTimeSupported()) {
      info.add("cpuTime", formatNanos(tmbean.getThreadCpuTime(tid)));
      info.add("userTime", formatNanos(tmbean.getThreadUserTime(tid)));
    }

    if (ti.getLockOwnerName() != null) {
      SimpleOrderedMap<Object> owner = new SimpleOrderedMap<Object>();
      owner.add("name", ti.getLockOwnerName());
      owner.add("id", ti.getLockOwnerId());
    }

    // Add the stack trace
    int i = 0;
    String[] trace = new String[ti.getStackTrace().length];
    for (StackTraceElement ste : ti.getStackTrace()) {
      trace[i++] = ste.toString();
    }
    info.add("stackTrace", trace);
    return info;
  }
Ejemplo n.º 11
0
  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    // Don't do anything if the framework is unknown
    if (watcher == null) {
      rsp.add("error", "Logging Not Initalized");
      return;
    }
    rsp.add("watcher", watcher.getName());

    SolrParams params = req.getParams();
    if (params.get("threshold") != null) {
      watcher.setThreshold(params.get("threshold"));
    }

    // Write something at each level
    if (params.get("test") != null) {
      log.trace("trace message");
      log.debug("debug message");
      log.info("info (with exception)", new RuntimeException("test"));
      log.warn("warn (with exception)", new RuntimeException("test"));
      log.error("error (with exception)", new RuntimeException("test"));
    }

    String[] set = params.getParams("set");
    if (set != null) {
      for (String pair : set) {
        String[] split = pair.split(":");
        if (split.length != 2) {
          throw new SolrException(
              SolrException.ErrorCode.SERVER_ERROR,
              "Invalid format, expected level:value, got " + pair);
        }
        String category = split[0];
        String level = split[1];

        watcher.setLogLevel(category, level);
      }
    }

    String since = req.getParams().get("since");
    if (since != null) {
      long time = -1;
      try {
        time = Long.parseLong(since);
      } catch (Exception ex) {
        throw new SolrException(ErrorCode.BAD_REQUEST, "invalid timestamp: " + since);
      }
      AtomicBoolean found = new AtomicBoolean(false);
      SolrDocumentList docs = watcher.getHistory(time, found);
      if (docs == null) {
        rsp.add("error", "History not enabled");
        return;
      } else {
        SimpleOrderedMap<Object> info = new SimpleOrderedMap<Object>();
        if (time > 0) {
          info.add("since", time);
          info.add("found", found);
        } else {
          info.add("levels", watcher.getAllLevels()); // show for the first request
        }
        info.add("last", watcher.getLastEvent());
        info.add("buffer", watcher.getHistorySize());
        info.add("threshold", watcher.getThreshold());

        rsp.add("info", info);
        rsp.add("history", docs);
      }
    } else {
      rsp.add("levels", watcher.getAllLevels());

      List<LoggerInfo> loggers = new ArrayList<LoggerInfo>(watcher.getAllLoggers());
      Collections.sort(loggers);

      List<SimpleOrderedMap<?>> info = new ArrayList<SimpleOrderedMap<?>>();
      for (LoggerInfo wrap : loggers) {
        info.add(wrap.getInfo());
      }
      rsp.add("loggers", info);
    }
    rsp.setHttpCaching(false);
  }
Ejemplo n.º 12
0
 /** @return a key to what each character means */
 public static SimpleOrderedMap<String> getFieldFlagsKey() {
   SimpleOrderedMap<String> key = new SimpleOrderedMap<String>();
   key.add(String.valueOf(FieldFlag.INDEXED.getAbbreviation()), FieldFlag.INDEXED.getDisplay());
   key.add(
       String.valueOf(FieldFlag.TOKENIZED.getAbbreviation()), FieldFlag.TOKENIZED.getDisplay());
   key.add(String.valueOf(FieldFlag.STORED.getAbbreviation()), FieldFlag.STORED.getDisplay());
   key.add(
       String.valueOf(FieldFlag.MULTI_VALUED.getAbbreviation()),
       FieldFlag.MULTI_VALUED.getDisplay());
   key.add(
       String.valueOf(FieldFlag.TERM_VECTOR_STORED.getAbbreviation()),
       FieldFlag.TERM_VECTOR_STORED.getDisplay());
   key.add(
       String.valueOf(FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation()),
       FieldFlag.TERM_VECTOR_OFFSET.getDisplay());
   key.add(
       String.valueOf(FieldFlag.TERM_VECTOR_POSITION.getAbbreviation()),
       FieldFlag.TERM_VECTOR_POSITION.getDisplay());
   key.add(
       String.valueOf(FieldFlag.OMIT_NORMS.getAbbreviation()), FieldFlag.OMIT_NORMS.getDisplay());
   key.add(String.valueOf(FieldFlag.LAZY.getAbbreviation()), FieldFlag.LAZY.getDisplay());
   key.add(String.valueOf(FieldFlag.BINARY.getAbbreviation()), FieldFlag.BINARY.getDisplay());
   key.add(
       String.valueOf(FieldFlag.SORT_MISSING_FIRST.getAbbreviation()),
       FieldFlag.SORT_MISSING_FIRST.getDisplay());
   key.add(
       String.valueOf(FieldFlag.SORT_MISSING_LAST.getAbbreviation()),
       FieldFlag.SORT_MISSING_LAST.getDisplay());
   return key;
 }
Ejemplo n.º 13
0
 /**
  * Get a map of property name -&gt; value for this field. If showDefaults is true, include default
  * properties (those inherited from the declared property type and not overridden in the field
  * declaration).
  */
 public SimpleOrderedMap<Object> getNamedPropertyValues(boolean showDefaults) {
   SimpleOrderedMap<Object> properties = new SimpleOrderedMap<>();
   properties.add(FIELD_NAME, getName());
   properties.add(TYPE_NAME, getType().getTypeName());
   if (showDefaults) {
     if (null != getDefaultValue()) {
       properties.add(DEFAULT_VALUE, getDefaultValue());
     }
     properties.add(getPropertyName(INDEXED), indexed());
     properties.add(getPropertyName(STORED), stored());
     properties.add(getPropertyName(DOC_VALUES), hasDocValues());
     properties.add(getPropertyName(STORE_TERMVECTORS), storeTermVector());
     properties.add(getPropertyName(STORE_TERMPOSITIONS), storeTermPositions());
     properties.add(getPropertyName(STORE_TERMOFFSETS), storeTermOffsets());
     properties.add(getPropertyName(STORE_TERMPAYLOADS), storeTermPayloads());
     properties.add(getPropertyName(OMIT_NORMS), omitNorms());
     properties.add(getPropertyName(OMIT_TF_POSITIONS), omitTermFreqAndPositions());
     properties.add(getPropertyName(OMIT_POSITIONS), omitPositions());
     properties.add(getPropertyName(STORE_OFFSETS), storeOffsetsWithPositions());
     properties.add(getPropertyName(MULTIVALUED), multiValued());
     if (sortMissingFirst()) {
       properties.add(getPropertyName(SORT_MISSING_FIRST), sortMissingFirst());
     } else if (sortMissingLast()) {
       properties.add(getPropertyName(SORT_MISSING_LAST), sortMissingLast());
     }
     properties.add(getPropertyName(REQUIRED), isRequired());
     properties.add(getPropertyName(TOKENIZED), isTokenized());
     properties.add(getPropertyName(USE_DOCVALUES_AS_STORED), useDocValuesAsStored());
     // The BINARY property is always false
     // properties.add(getPropertyName(BINARY), isBinary());
   } else {
     for (Map.Entry<String, ?> arg : args.entrySet()) {
       String key = arg.getKey();
       Object value = arg.getValue();
       if (key.equals(DEFAULT_VALUE)) {
         properties.add(key, value);
       } else {
         boolean boolVal =
             value instanceof Boolean ? (Boolean) value : Boolean.parseBoolean(value.toString());
         properties.add(key, boolVal);
       }
     }
   }
   return properties;
 }
  @Override
  public void prepare(ResponseBuilder rb) throws IOException {
    SolrQueryRequest req = rb.req;
    SolrParams params = req.getParams();
    // A runtime param can skip
    if (!params.getBool(QueryElevationParams.ENABLE, true)) {
      return;
    }

    boolean exclusive = params.getBool(QueryElevationParams.EXCLUSIVE, false);
    // A runtime parameter can alter the config value for forceElevation
    boolean force = params.getBool(QueryElevationParams.FORCE_ELEVATION, forceElevation);
    boolean markExcludes = params.getBool(QueryElevationParams.MARK_EXCLUDES, false);
    String boostStr = params.get(QueryElevationParams.IDS);
    String exStr = params.get(QueryElevationParams.EXCLUDE);

    Query query = rb.getQuery();
    String qstr = rb.getQueryString();
    if (query == null || qstr == null) {
      return;
    }

    ElevationObj booster = null;
    try {
      if (boostStr != null || exStr != null) {
        List<String> boosts =
            (boostStr != null)
                ? StrUtils.splitSmart(boostStr, ",", true)
                : new ArrayList<String>(0);
        List<String> excludes =
            (exStr != null) ? StrUtils.splitSmart(exStr, ",", true) : new ArrayList<String>(0);
        booster = new ElevationObj(qstr, boosts, excludes);
      } else {
        IndexReader reader = req.getSearcher().getIndexReader();
        qstr = getAnalyzedQuery(qstr);
        booster = getElevationMap(reader, req.getCore()).get(qstr);
      }
    } catch (Exception ex) {
      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error loading elevation", ex);
    }

    if (booster != null) {
      rb.req.getContext().put(BOOSTED, booster.ids);

      // Change the query to insert forced documents
      if (exclusive == true) {
        // we only want these results
        rb.setQuery(booster.include);
      } else {
        BooleanQuery newq = new BooleanQuery(true);
        newq.add(query, BooleanClause.Occur.SHOULD);
        newq.add(booster.include, BooleanClause.Occur.SHOULD);
        if (booster.exclude != null) {
          if (markExcludes == false) {
            for (TermQuery tq : booster.exclude) {
              newq.add(new BooleanClause(tq, BooleanClause.Occur.MUST_NOT));
            }
          } else {
            // we are only going to mark items as excluded, not actually exclude them.  This works
            // with the EditorialMarkerFactory
            rb.req.getContext().put(EXCLUDED, booster.excludeIds);
          }
        }
        rb.setQuery(newq);
      }

      ElevationComparatorSource comparator = new ElevationComparatorSource(booster);
      // if the sort is 'score desc' use a custom sorting method to
      // insert documents in their proper place
      SortSpec sortSpec = rb.getSortSpec();
      if (sortSpec.getSort() == null) {
        sortSpec.setSortAndFields(
            new Sort(
                new SortField[] {
                  new SortField("_elevate_", comparator, true),
                  new SortField(null, SortField.Type.SCORE, false)
                }),
            Arrays.asList(new SchemaField[2]));
      } else {
        // Check if the sort is based on score
        SortSpec modSortSpec = this.modifySortSpec(sortSpec, force, comparator);
        if (null != modSortSpec) {
          rb.setSortSpec(modSortSpec);
        }
      }

      // alter the sorting in the grouping specification if there is one
      GroupingSpecification groupingSpec = rb.getGroupingSpec();
      if (groupingSpec != null) {
        SortField[] groupSort = groupingSpec.getGroupSort().getSort();
        Sort modGroupSort = this.modifySort(groupSort, force, comparator);
        if (modGroupSort != null) {
          groupingSpec.setGroupSort(modGroupSort);
        }
        SortField[] withinGroupSort = groupingSpec.getSortWithinGroup().getSort();
        Sort modWithinGroupSort = this.modifySort(withinGroupSort, force, comparator);
        if (modWithinGroupSort != null) {
          groupingSpec.setSortWithinGroup(modWithinGroupSort);
        }
      }
    }

    // Add debugging information
    if (rb.isDebug()) {
      List<String> match = null;
      if (booster != null) {
        // Extract the elevated terms into a list
        match = new ArrayList<String>(booster.priority.size());
        for (Object o : booster.include.clauses()) {
          TermQuery tq = (TermQuery) ((BooleanClause) o).getQuery();
          match.add(tq.getTerm().text());
        }
      }

      SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<Object>();
      dbg.add("q", qstr);
      dbg.add("match", match);
      if (rb.isDebugQuery()) {
        rb.addDebugInfo("queryBoosting", dbg);
      }
    }
  }
Ejemplo n.º 15
0
    public void merge(ResponseBuilder rb, ShardRequest sreq) {

      // id to shard mapping, to eliminate any accidental dups
      HashMap<Object, String> uniqueDoc = new HashMap<>();

      NamedList<Object> shardInfo = null;
      if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
        shardInfo = new SimpleOrderedMap<>();
        rb.rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo);
      }

      IndexSchema schema = rb.req.getSchema();
      SchemaField uniqueKeyField = schema.getUniqueKeyField();

      long numFound = 0;
      Float maxScore = null;
      boolean partialResults = false;
      List<ShardDoc> shardDocs = new ArrayList();

      for (ShardResponse srsp : sreq.responses) {
        SolrDocumentList docs = null;

        if (shardInfo != null) {
          SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>();

          if (srsp.getException() != null) {
            Throwable t = srsp.getException();
            if (t instanceof SolrServerException) {
              t = ((SolrServerException) t).getCause();
            }
            nl.add("error", t.toString());
            StringWriter trace = new StringWriter();
            t.printStackTrace(new PrintWriter(trace));
            nl.add("trace", trace.toString());
            if (srsp.getShardAddress() != null) {
              nl.add("shardAddress", srsp.getShardAddress());
            }
          } else {
            docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
            nl.add("numFound", docs.getNumFound());
            nl.add("maxScore", docs.getMaxScore());
            nl.add("shardAddress", srsp.getShardAddress());
          }
          if (srsp.getSolrResponse() != null) {
            nl.add("time", srsp.getSolrResponse().getElapsedTime());
          }

          shardInfo.add(srsp.getShard(), nl);
        }
        // now that we've added the shard info, let's only proceed if we have no error.
        if (srsp.getException() != null) {
          partialResults = true;
          continue;
        }

        if (docs == null) { // could have been initialized in the shards info block above
          docs = (SolrDocumentList) srsp.getSolrResponse().getResponse().get("response");
        }

        NamedList<?> responseHeader =
            (NamedList<?>) srsp.getSolrResponse().getResponse().get("responseHeader");
        if (responseHeader != null
            && Boolean.TRUE.equals(
                responseHeader.get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY))) {
          partialResults = true;
        }

        // calculate global maxScore and numDocsFound
        if (docs.getMaxScore() != null) {
          maxScore = maxScore == null ? docs.getMaxScore() : Math.max(maxScore, docs.getMaxScore());
        }
        numFound += docs.getNumFound();

        SortSpec ss = rb.getSortSpec();
        Sort sort = ss.getSort();

        NamedList sortFieldValues =
            (NamedList) (srsp.getSolrResponse().getResponse().get("merge_values"));
        NamedList unmarshalledSortFieldValues = unmarshalSortValues(ss, sortFieldValues, schema);
        List lst = (List) unmarshalledSortFieldValues.getVal(0);

        for (int i = 0; i < docs.size(); i++) {
          SolrDocument doc = docs.get(i);
          Object id = doc.getFieldValue(uniqueKeyField.getName());

          String prevShard = uniqueDoc.put(id, srsp.getShard());
          if (prevShard != null) {
            // duplicate detected
            numFound--;

            // For now, just always use the first encountered since we can't currently
            // remove the previous one added to the priority queue.  If we switched
            // to the Java5 PriorityQueue, this would be easier.
            continue;
            // make which duplicate is used deterministic based on shard
            // if (prevShard.compareTo(srsp.shard) >= 0) {
            //  TODO: remove previous from priority queue
            //  continue;
            // }
          }

          ShardDoc shardDoc = new ShardDoc();
          shardDoc.id = id;
          shardDoc.shard = srsp.getShard();
          shardDoc.orderInShard = i;
          Object scoreObj = lst.get(i);
          if (scoreObj != null) {
            shardDoc.score = ((Integer) scoreObj).floatValue();
          }
          shardDocs.add(shardDoc);
        } // end for-each-doc-in-response
      } // end for-each-response

      Collections.sort(
          shardDocs,
          new Comparator<ShardDoc>() {
            @Override
            public int compare(ShardDoc o1, ShardDoc o2) {
              if (o1.score < o2.score) {
                return 1;
              } else if (o1.score > o2.score) {
                return -1;
              } else {
                return 0; // To change body of implemented methods use File | Settings | File
                // Templates.
              }
            }
          });

      int resultSize = shardDocs.size();

      Map<Object, ShardDoc> resultIds = new HashMap<>();
      for (int i = 0; i < shardDocs.size(); i++) {
        ShardDoc shardDoc = shardDocs.get(i);
        shardDoc.positionInResponse = i;
        // Need the toString() for correlation with other lists that must
        // be strings (like keys in highlighting, explain, etc)
        resultIds.put(shardDoc.id.toString(), shardDoc);
      }

      // Add hits for distributed requests
      // https://issues.apache.org/jira/browse/SOLR-3518
      rb.rsp.addToLog("hits", numFound);

      SolrDocumentList responseDocs = new SolrDocumentList();
      if (maxScore != null) responseDocs.setMaxScore(maxScore);
      responseDocs.setNumFound(numFound);
      responseDocs.setStart(0);
      // size appropriately
      for (int i = 0; i < resultSize; i++) responseDocs.add(null);

      // save these results in a private area so we can access them
      // again when retrieving stored fields.
      // TODO: use ResponseBuilder (w/ comments) or the request context?
      rb.resultIds = resultIds;
      rb.setResponseDocs(responseDocs);

      if (partialResults) {
        rb.rsp
            .getResponseHeader()
            .add(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY, Boolean.TRUE);
      }
    }
Ejemplo n.º 16
0
  private static SimpleOrderedMap<Object> getDocumentFieldsInfo(
      Document doc, int docId, IndexReader reader, IndexSchema schema) throws IOException {
    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
    for (Object o : doc.getFields()) {
      Fieldable fieldable = (Fieldable) o;
      SimpleOrderedMap<Object> f = new SimpleOrderedMap<Object>();

      SchemaField sfield = schema.getFieldOrNull(fieldable.name());
      FieldType ftype = (sfield == null) ? null : sfield.getType();

      f.add("type", (ftype == null) ? null : ftype.getTypeName());
      f.add("schema", getFieldFlags(sfield));
      f.add("flags", getFieldFlags(fieldable));

      Term t =
          new Term(
              fieldable.name(),
              ftype != null ? ftype.storedToIndexed(fieldable) : fieldable.stringValue());

      f.add("value", (ftype == null) ? null : ftype.toExternal(fieldable));

      // TODO: this really should be "stored"
      f.add("internal", fieldable.stringValue()); // may be a binary number

      byte[] arr = fieldable.getBinaryValue();
      if (arr != null) {
        f.add("binary", Base64.byteArrayToBase64(arr, 0, arr.length));
      }
      f.add("boost", fieldable.getBoost());
      f.add(
          "docFreq",
          t.text() == null ? 0 : reader.docFreq(t)); // this can be 0 for non-indexed fields

      // If we have a term vector, return that
      if (fieldable.isTermVectorStored()) {
        try {
          TermFreqVector v = reader.getTermFreqVector(docId, fieldable.name());
          if (v != null) {
            SimpleOrderedMap<Integer> tfv = new SimpleOrderedMap<Integer>();
            for (int i = 0; i < v.size(); i++) {
              tfv.add(v.getTerms()[i], v.getTermFrequencies()[i]);
            }
            f.add("termVector", tfv);
          }
        } catch (Exception ex) {
          log.warn("error writing term vector", ex);
        }
      }

      finfo.add(fieldable.name(), f);
    }
    return finfo;
  }
Ejemplo n.º 17
0
  /**
   * Get a map of property name -&gt; value for this field type.
   *
   * @param showDefaults if true, include default properties.
   */
  public SimpleOrderedMap<Object> getNamedPropertyValues(boolean showDefaults) {
    SimpleOrderedMap<Object> namedPropertyValues = new SimpleOrderedMap<>();
    namedPropertyValues.add(TYPE_NAME, getTypeName());
    namedPropertyValues.add(CLASS_NAME, getClassArg());
    if (showDefaults) {
      Map<String, String> fieldTypeArgs = getNonFieldPropertyArgs();
      if (null != fieldTypeArgs) {
        for (String key : fieldTypeArgs.keySet()) {
          if (!CLASS_NAME.equals(key) && !TYPE_NAME.equals(key)) {
            namedPropertyValues.add(key, fieldTypeArgs.get(key));
          }
        }
      }
      if (this instanceof TextField) {
        namedPropertyValues.add(
            AUTO_GENERATE_PHRASE_QUERIES, ((TextField) this).getAutoGeneratePhraseQueries());
      }
      namedPropertyValues.add(getPropertyName(INDEXED), hasProperty(INDEXED));
      namedPropertyValues.add(getPropertyName(STORED), hasProperty(STORED));
      namedPropertyValues.add(getPropertyName(DOC_VALUES), hasProperty(DOC_VALUES));
      namedPropertyValues.add(getPropertyName(STORE_TERMVECTORS), hasProperty(STORE_TERMVECTORS));
      namedPropertyValues.add(
          getPropertyName(STORE_TERMPOSITIONS), hasProperty(STORE_TERMPOSITIONS));
      namedPropertyValues.add(getPropertyName(STORE_TERMOFFSETS), hasProperty(STORE_TERMOFFSETS));
      namedPropertyValues.add(getPropertyName(OMIT_NORMS), hasProperty(OMIT_NORMS));
      namedPropertyValues.add(getPropertyName(OMIT_TF_POSITIONS), hasProperty(OMIT_TF_POSITIONS));
      namedPropertyValues.add(getPropertyName(OMIT_POSITIONS), hasProperty(OMIT_POSITIONS));
      namedPropertyValues.add(getPropertyName(STORE_OFFSETS), hasProperty(STORE_OFFSETS));
      namedPropertyValues.add(getPropertyName(MULTIVALUED), hasProperty(MULTIVALUED));
      if (hasProperty(SORT_MISSING_FIRST)) {
        namedPropertyValues.add(getPropertyName(SORT_MISSING_FIRST), true);
      } else if (hasProperty(SORT_MISSING_LAST)) {
        namedPropertyValues.add(getPropertyName(SORT_MISSING_LAST), true);
      }
      namedPropertyValues.add(getPropertyName(TOKENIZED), isTokenized());
      // The BINARY property is always false
      // namedPropertyValues.add(getPropertyName(BINARY), hasProperty(BINARY));
      if (null != getPostingsFormat()) {
        namedPropertyValues.add(POSTINGS_FORMAT, getPostingsFormat());
      }
      if (null != getDocValuesFormat()) {
        namedPropertyValues.add(DOC_VALUES_FORMAT, getDocValuesFormat());
      }
    } else { // Don't show defaults
      Set<String> fieldProperties = new HashSet<>();
      for (String propertyName : FieldProperties.propertyNames) {
        fieldProperties.add(propertyName);
      }

      for (String key : args.keySet()) {
        if (fieldProperties.contains(key)) {
          namedPropertyValues.add(key, StrUtils.parseBool(args.get(key)));
        } else if (!CLASS_NAME.equals(key) && !TYPE_NAME.equals(key)) {
          namedPropertyValues.add(key, args.get(key));
        }
      }
    }

    if (null != getSimilarityFactory()) {
      namedPropertyValues.add(SIMILARITY, getSimilarityFactory().getNamedPropertyValues());
    }

    if (this instanceof HasImplicitIndexAnalyzer) {
      if (isExplicitQueryAnalyzer()) {
        namedPropertyValues.add(QUERY_ANALYZER, getAnalyzerProperties(getQueryAnalyzer()));
      }
    } else {
      if (isExplicitAnalyzer()) {
        String analyzerProperty = isExplicitQueryAnalyzer() ? INDEX_ANALYZER : ANALYZER;
        namedPropertyValues.add(analyzerProperty, getAnalyzerProperties(getIndexAnalyzer()));
      }
      if (isExplicitQueryAnalyzer()) {
        String analyzerProperty = isExplicitAnalyzer() ? QUERY_ANALYZER : ANALYZER;
        namedPropertyValues.add(analyzerProperty, getAnalyzerProperties(getQueryAnalyzer()));
      }
    }
    if (this instanceof TextField) {
      if (((TextField) this).isExplicitMultiTermAnalyzer()) {
        namedPropertyValues.add(
            MULTI_TERM_ANALYZER, getAnalyzerProperties(((TextField) this).getMultiTermAnalyzer()));
      }
    }

    return namedPropertyValues;
  }
Ejemplo n.º 18
0
  @SuppressWarnings("unchecked")
  private static SimpleOrderedMap<Object> getIndexedFieldsInfo(
      final SolrIndexSearcher searcher, final Set<String> fields, final int numTerms)
      throws Exception {

    IndexReader reader = searcher.getReader();
    IndexSchema schema = searcher.getSchema();

    // Walk the term enum and keep a priority queue for each map in our set
    Map<String, TopTermQueue> ttinfo = null;
    if (numTerms > 0) {
      ttinfo = getTopTerms(reader, fields, numTerms, null);
    }
    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
    Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
    for (String fieldName : fieldNames) {
      if (fields != null && !fields.contains(fieldName)) {
        continue; // if a field is specified, only them
      }

      SimpleOrderedMap<Object> f = new SimpleOrderedMap<Object>();

      SchemaField sfield = schema.getFieldOrNull(fieldName);
      FieldType ftype = (sfield == null) ? null : sfield.getType();

      f.add("type", (ftype == null) ? null : ftype.getTypeName());
      f.add("schema", getFieldFlags(sfield));
      if (sfield != null
          && schema.isDynamicField(sfield.getName())
          && schema.getDynamicPattern(sfield.getName()) != null) {
        f.add("dynamicBase", schema.getDynamicPattern(sfield.getName()));
      }

      // If numTerms==0, the call is just asking for a quick field list
      if (ttinfo != null && sfield != null && sfield.indexed()) {
        Query q = new TermRangeQuery(fieldName, null, null, false, false);
        TopDocs top = searcher.search(q, 1);
        if (top.totalHits > 0) {
          // Find a document with this field
          try {
            Document doc = searcher.doc(top.scoreDocs[0].doc);
            Fieldable fld = doc.getFieldable(fieldName);
            if (fld != null) {
              f.add("index", getFieldFlags(fld));
            } else {
              // it is a non-stored field...
              f.add("index", "(unstored field)");
            }
          } catch (Exception ex) {
            log.warn("error reading field: " + fieldName);
          }
        }
        f.add("docs", top.totalHits);

        TopTermQueue topTerms = ttinfo.get(fieldName);
        if (topTerms != null) {
          f.add("distinct", topTerms.distinctTerms);

          // Include top terms
          f.add("topTerms", topTerms.toNamedList(searcher.getSchema()));

          // Add a histogram
          f.add("histogram", topTerms.histogram.toNamedList());
        }
      }

      // Add the field
      finfo.add(fieldName, f);
    }
    return finfo;
  }
Ejemplo n.º 19
0
  /** Return info from the index */
  private static SimpleOrderedMap<Object> getSchemaInfo(IndexSchema schema) {
    Map<String, List<String>> typeusemap = new HashMap<String, List<String>>();
    SimpleOrderedMap<Object> fields = new SimpleOrderedMap<Object>();
    SchemaField uniqueField = schema.getUniqueKeyField();
    for (SchemaField f : schema.getFields().values()) {
      populateFieldInfo(schema, typeusemap, fields, uniqueField, f);
    }

    SimpleOrderedMap<Object> dynamicFields = new SimpleOrderedMap<Object>();
    for (SchemaField f : schema.getDynamicFieldPrototypes()) {
      populateFieldInfo(schema, typeusemap, dynamicFields, uniqueField, f);
    }
    SimpleOrderedMap<Object> types = new SimpleOrderedMap<Object>();
    for (FieldType ft : schema.getFieldTypes().values()) {
      SimpleOrderedMap<Object> field = new SimpleOrderedMap<Object>();
      field.add("fields", typeusemap.get(ft.getTypeName()));
      field.add("tokenized", ft.isTokenized());
      field.add("className", ft.getClass().getName());
      field.add("indexAnalyzer", getAnalyzerInfo(ft.getAnalyzer()));
      field.add("queryAnalyzer", getAnalyzerInfo(ft.getQueryAnalyzer()));
      types.add(ft.getTypeName(), field);
    }

    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
    finfo.add("fields", fields);
    finfo.add("dynamicFields", dynamicFields);
    finfo.add("uniqueKeyField", null == uniqueField ? null : uniqueField.getName());
    finfo.add("defaultSearchField", schema.getDefaultSearchFieldName());
    finfo.add("types", types);
    return finfo;
  }
Ejemplo n.º 20
0
  /**
   * Returns a description of the given analyzer, by either reporting the Analyzer class name (and
   * optionally luceneMatchVersion) if it's not a TokenizerChain, or if it is, querying each
   * analysis factory for its name and args.
   */
  protected static SimpleOrderedMap<Object> getAnalyzerProperties(Analyzer analyzer) {
    SimpleOrderedMap<Object> analyzerProps = new SimpleOrderedMap<>();

    if (analyzer instanceof TokenizerChain) {
      Map<String, String> factoryArgs;
      TokenizerChain tokenizerChain = (TokenizerChain) analyzer;
      CharFilterFactory[] charFilterFactories = tokenizerChain.getCharFilterFactories();
      if (0 < charFilterFactories.length) {
        List<SimpleOrderedMap<Object>> charFilterProps = new ArrayList<>();
        for (CharFilterFactory charFilterFactory : charFilterFactories) {
          SimpleOrderedMap<Object> props = new SimpleOrderedMap<>();
          props.add(CLASS_NAME, charFilterFactory.getClassArg());
          factoryArgs = charFilterFactory.getOriginalArgs();
          if (null != factoryArgs) {
            for (String key : factoryArgs.keySet()) {
              if (!CLASS_NAME.equals(key)) {
                if (LUCENE_MATCH_VERSION_PARAM.equals(key)) {
                  if (charFilterFactory.isExplicitLuceneMatchVersion()) {
                    props.add(key, factoryArgs.get(key));
                  }
                } else {
                  props.add(key, factoryArgs.get(key));
                }
              }
            }
          }
          charFilterProps.add(props);
        }
        analyzerProps.add(CHAR_FILTERS, charFilterProps);
      }

      SimpleOrderedMap<Object> tokenizerProps = new SimpleOrderedMap<>();
      TokenizerFactory tokenizerFactory = tokenizerChain.getTokenizerFactory();
      tokenizerProps.add(CLASS_NAME, tokenizerFactory.getClassArg());
      factoryArgs = tokenizerFactory.getOriginalArgs();
      if (null != factoryArgs) {
        for (String key : factoryArgs.keySet()) {
          if (!CLASS_NAME.equals(key)) {
            if (LUCENE_MATCH_VERSION_PARAM.equals(key)) {
              if (tokenizerFactory.isExplicitLuceneMatchVersion()) {
                tokenizerProps.add(key, factoryArgs.get(key));
              }
            } else {
              tokenizerProps.add(key, factoryArgs.get(key));
            }
          }
        }
      }
      analyzerProps.add(TOKENIZER, tokenizerProps);

      TokenFilterFactory[] filterFactories = tokenizerChain.getTokenFilterFactories();
      if (0 < filterFactories.length) {
        List<SimpleOrderedMap<Object>> filterProps = new ArrayList<>();
        for (TokenFilterFactory filterFactory : filterFactories) {
          SimpleOrderedMap<Object> props = new SimpleOrderedMap<>();
          props.add(CLASS_NAME, filterFactory.getClassArg());
          factoryArgs = filterFactory.getOriginalArgs();
          if (null != factoryArgs) {
            for (String key : factoryArgs.keySet()) {
              if (!CLASS_NAME.equals(key)) {
                if (LUCENE_MATCH_VERSION_PARAM.equals(key)) {
                  if (filterFactory.isExplicitLuceneMatchVersion()) {
                    props.add(key, factoryArgs.get(key));
                  }
                } else {
                  props.add(key, factoryArgs.get(key));
                }
              }
            }
          }
          filterProps.add(props);
        }
        analyzerProps.add(FILTERS, filterProps);
      }
    } else { // analyzer is not instanceof TokenizerChain
      analyzerProps.add(CLASS_NAME, analyzer.getClass().getName());
      if (analyzer.getVersion() != Version.LATEST) {
        analyzerProps.add(LUCENE_MATCH_VERSION_PARAM, analyzer.getVersion().toString());
      }
    }
    return analyzerProps;
  }
Ejemplo n.º 21
0
  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    IndexSchema schema = req.getSchema();
    SolrIndexSearcher searcher = req.getSearcher();
    IndexReader reader = searcher.getReader();
    SolrParams params = req.getParams();
    int numTerms = params.getInt(NUMTERMS, DEFAULT_COUNT);

    // Always show the core lucene info
    rsp.add("index", getIndexInfo(reader, numTerms > 0));

    Integer docId = params.getInt(DOC_ID);
    if (docId == null && params.get(ID) != null) {
      // Look for something with a given solr ID
      SchemaField uniqueKey = schema.getUniqueKeyField();
      String v = uniqueKey.getType().toInternal(params.get(ID));
      Term t = new Term(uniqueKey.getName(), v);
      docId = searcher.getFirstMatch(t);
      if (docId < 0) {
        throw new SolrException(
            SolrException.ErrorCode.NOT_FOUND, "Can't find document: " + params.get(ID));
      }
    }

    // Read the document from the index
    if (docId != null) {
      Document doc = null;
      try {
        doc = reader.document(docId);
      } catch (Exception ex) {
      }
      if (doc == null) {
        throw new SolrException(SolrException.ErrorCode.NOT_FOUND, "Can't find document: " + docId);
      }

      SimpleOrderedMap<Object> info = getDocumentFieldsInfo(doc, docId, reader, schema);

      SimpleOrderedMap<Object> docinfo = new SimpleOrderedMap<Object>();
      docinfo.add("docId", docId);
      docinfo.add("lucene", info);
      docinfo.add("solr", doc);
      rsp.add("doc", docinfo);
    } else if ("schema".equals(params.get("show"))) {
      rsp.add("schema", getSchemaInfo(req.getSchema()));
    } else {
      // If no doc is given, show all fields and top terms
      Set<String> fields = null;
      if (params.get(CommonParams.FL) != null) {
        fields = new HashSet<String>();
        for (String f : params.getParams(CommonParams.FL)) {
          fields.add(f);
        }
      }
      rsp.add("fields", getIndexedFieldsInfo(searcher, fields, numTerms));
    }

    // Add some generally helpful information
    NamedList<Object> info = new SimpleOrderedMap<Object>();
    info.add("key", getFieldFlagsKey());
    info.add(
        "NOTE",
        "Document Frequency (df) is not updated when a document is marked for deletion.  df values include deleted documents.");
    rsp.add("info", info);
    rsp.setHttpCaching(false);
  }