예제 #1
0
  private void loadFiles() throws IOException {
    File rawDir = new File(ioConf.getRawDir());

    if (!rawDir.exists() || !rawDir.isDirectory()) {
      throw new IOException(rawDir.getAbsolutePath() + " is not a valid directory");
    }

    Doc doc;
    File parentDir;
    File anotFile;
    for (File d : rawDir.listFiles()) {
      if (d.isDirectory()) {
        for (File f : d.listFiles()) {
          doc = XMLDoc.readXML(f);

          parentDir = new File(ioConf.getParsedDir(), doc.getParentDirName());
          anotFile = new File(parentDir, doc.getAnotFileName());
          doc.anotFile = anotFile;

          documents.add(doc);

          // addAnnotation(doc);
        }
      }
    }
  }
예제 #2
0
  public static Map<Comparable, Grp> groupBy(Collection<Doc> docs, String field) {
    Map<Comparable, Grp> groups = new HashMap<Comparable, Grp>();
    for (Doc doc : docs) {
      List<Comparable> vals = doc.getValues(field);
      if (vals == null) {
        Grp grp = groups.get(null);
        if (grp == null) {
          grp = new Grp();
          grp.groupValue = null;
          grp.docs = new ArrayList<Doc>();
          groups.put(null, grp);
        }
        grp.docs.add(doc);
      } else {
        for (Comparable val : vals) {

          Grp grp = groups.get(val);
          if (grp == null) {
            grp = new Grp();
            grp.groupValue = val;
            grp.docs = new ArrayList<Doc>();
            groups.put(grp.groupValue, grp);
          }
          grp.docs.add(doc);
        }
      }
    }
    return groups;
  }
예제 #3
0
  @Test
  public void testInIdList() throws Exception {
    final Doc doc = new Doc();
    doc.id = 1;
    ds.save(doc);

    // this works
    ds.find(Doc.class).field("_id").equal(1).asList();

    final List<Long> idList = new ArrayList<Long>();
    idList.add(1L);
    // this causes an NPE
    ds.find(Doc.class).field("_id").in(idList).asList();
  }
 /**
  * Return true if the given Doc should be included in the serialized form.
  *
  * @param doc the Doc object to check for serializability.
  */
 private static boolean serialDocInclude(Doc doc) {
   if (doc.isEnum()) {
     return false;
   }
   Tag[] serial = doc.tags("serial");
   if (serial.length > 0) {
     String serialtext = StringUtils.toLowerCase(serial[0].text());
     if (serialtext.indexOf("exclude") >= 0) {
       return false;
     } else if (serialtext.indexOf("include") >= 0) {
       return true;
     }
   }
   return true;
 }
예제 #5
0
 public void addAnnotation(Doc doc) throws IOException {
   File parentDir = new File(ioConf.getParsedDir(), doc.getParentDirName());
   File anotFile = new File(parentDir, doc.getAnotFileName());
   if (!anotFile.exists()) {
     System.err.println("Generating annotation for " + doc.f.getName());
     genAnnotation(doc);
     saveAnnotation(doc, anotFile);
   } else {
     try {
       doc.setAno(ParsedDocReader.read(anotFile));
     } catch (IOException e) {
       System.err.println("Generating annotation for " + doc.f.getName());
       genAnnotation(doc);
     }
   }
 }
예제 #6
0
 /**
  * Determine if the program element is shown, according to the given level of visibility.
  *
  * @param ped The given program element.
  * @param visLevel The desired visibility level; "public", "protected", "package" or "private". If
  *     null, only check for an exclude tag.
  * @return boolean Set if this element is shown.
  */
 public boolean shownElement(Doc doc, String visLevel) {
   // If a doc block contains @exclude or a similar such tag,
   // then don't display it.
   if (doExclude && excludeTag != null && doc != null) {
     String rct = doc.getRawCommentText();
     if (rct != null && rct.indexOf(excludeTag) != -1) {
       return false;
     }
   }
   if (visLevel == null) {
     return true;
   }
   ProgramElementDoc ped = null;
   if (doc instanceof ProgramElementDoc) {
     ped = (ProgramElementDoc) doc;
   }
   if (visLevel.compareTo("private") == 0) return true;
   // Show all that is not private
   if (visLevel.compareTo("package") == 0) return !ped.isPrivate();
   // Show all that is not private or package
   if (visLevel.compareTo("protected") == 0) return !(ped.isPrivate() || ped.isPackagePrivate());
   // Show all that is not private or package or protected,
   // i.e. all that is public
   if (visLevel.compareTo("public") == 0) return ped.isPublic();
   return false;
 } // shownElement()
예제 #7
0
 /**
  * Get C_BPartner_Location_ID
  *
  * @return BPartner Location
  */
 public int getC_BPartner_Location_ID() {
   int index = p_po.get_ColumnIndex("C_BPartner_Location_ID");
   if (index != -1) {
     Integer ii = (Integer) p_po.get_Value(index);
     if (ii != null) return ii.intValue();
   }
   return m_doc.getC_BPartner_Location_ID();
 } //	getC_BPartner_Location_ID
예제 #8
0
  /** {@inheritDoc} */
  public Content seeTagOutput(Doc holder, SeeTag[] seeTags) {
    ContentBuilder body = new ContentBuilder();
    if (seeTags.length > 0) {
      for (int i = 0; i < seeTags.length; ++i) {
        appendSeparatorIfNotEmpty(body);
        body.addContent(htmlWriter.seeTagToContent(seeTags[i]));
      }
    }
    if (holder.isField()
        && ((FieldDoc) holder).constantValue() != null
        && htmlWriter instanceof ClassWriterImpl) {
      // Automatically add link to constant values page for constant fields.
      appendSeparatorIfNotEmpty(body);
      DocPath constantsPath = htmlWriter.pathToRoot.resolve(DocPaths.CONSTANT_VALUES);
      String whichConstant =
          ((ClassWriterImpl) htmlWriter).getClassDoc().qualifiedName()
              + "."
              + ((FieldDoc) holder).name();
      DocLink link = constantsPath.fragment(whichConstant);
      body.addContent(
          htmlWriter.getHyperLink(
              link, new StringContent(configuration.getText("doclet.Constants_Summary"))));
    }
    if (holder.isClass() && ((ClassDoc) holder).isSerializable()) {
      // Automatically add link to serialized form page for serializable classes.
      if ((SerializedFormBuilder.serialInclude(holder)
          && SerializedFormBuilder.serialInclude(((ClassDoc) holder).containingPackage()))) {
        appendSeparatorIfNotEmpty(body);
        DocPath serialPath = htmlWriter.pathToRoot.resolve(DocPaths.SERIALIZED_FORM);
        DocLink link = serialPath.fragment(((ClassDoc) holder).qualifiedName());
        body.addContent(
            htmlWriter.getHyperLink(
                link, new StringContent(configuration.getText("doclet.Serialized_Form"))));
      }
    }
    if (body.isEmpty()) return body;

    ContentBuilder result = new ContentBuilder();
    result.addContent(
        HtmlTree.DT(
            HtmlTree.SPAN(
                HtmlStyle.seeLabel, new StringContent(configuration.getText("doclet.See_Also")))));
    result.addContent(HtmlTree.DD(body));
    return result;
  }
예제 #9
0
 /**
  * Get Document Date
  *
  * @return document date
  */
 public Timestamp getDateDoc() {
   if (m_DateDoc != null) return m_DateDoc;
   int index = p_po.get_ColumnIndex("DateDoc");
   if (index != -1) {
     m_DateDoc = (Timestamp) p_po.get_Value(index);
     if (m_DateDoc != null) return m_DateDoc;
   }
   m_DateDoc = m_doc.getDateDoc();
   return m_DateDoc;
 } //  getDateDoc
예제 #10
0
 /**
  * Get Accounting Date
  *
  * @return accounting date
  */
 public Timestamp getDateAcct() {
   if (m_DateAcct != null) return m_DateAcct;
   int index = p_po.get_ColumnIndex("DateAcct");
   if (index != -1) {
     m_DateAcct = (Timestamp) p_po.get_Value(index);
     if (m_DateAcct != null) return m_DateAcct;
   }
   m_DateAcct = m_doc.getDateAcct();
   return m_DateAcct;
 } //  getDateAcct
예제 #11
0
  /** {@inheritDoc} */
  public TagletOutput getTagletOutput(Doc holder, TagletWriter writer) {
    Type returnType = ((MethodDoc) holder).returnType();
    Tag[] tags = holder.tags(name);

    // Make sure we are not using @return tag on method with void return type.
    if (returnType.isPrimitive() && returnType.typeName().equals("void")) {
      if (tags.length > 0) {
        writer.getMsgRetriever().warning(holder.position(), "doclet.Return_tag_on_void_method");
      }
      return null;
    }
    // Inherit @return tag if necessary.
    if (tags.length == 0) {
      DocFinder.Output inheritedDoc =
          DocFinder.search(new DocFinder.Input((MethodDoc) holder, this));
      tags = inheritedDoc.holderTag == null ? tags : new Tag[] {inheritedDoc.holderTag};
    }
    return tags.length > 0 ? writer.returnTagOutput(tags[0]) : null;
  }
예제 #12
0
 /**
  * Get BPartner
  *
  * @return C_BPartner_ID
  */
 public int getC_BPartner_ID() {
   if (m_C_BPartner_ID == -1) {
     int index = p_po.get_ColumnIndex("C_BPartner_ID");
     if (index != -1) {
       Integer ii = (Integer) p_po.get_Value(index);
       if (ii != null) m_C_BPartner_ID = ii.intValue();
     }
     if (m_C_BPartner_ID <= 0) m_C_BPartner_ID = m_doc.getC_BPartner_ID();
   }
   return m_C_BPartner_ID;
 } //  getC_BPartner_ID
예제 #13
0
 /**
  * Line Account from Product (or Charge).
  *
  * @param AcctType see ProductCost.ACCTTYPE_* (0..3)
  * @param as Accounting schema
  * @return Requested Product Account
  */
 public MAccount getAccount(int AcctType, MAcctSchema as) {
   //	Charge Account
   if (getM_Product_ID() == 0 && getC_Charge_ID() != 0) {
     BigDecimal amt = new BigDecimal(-1); // 	Revenue (-)
     if (!m_doc.isSOTrx()) amt = new BigDecimal(+1); // 	Expense (+)
     MAccount acct = getChargeAccount(as, amt);
     if (acct != null) return acct;
   }
   //	Product Account
   return getProductCost().getAccount(AcctType, as);
 } //  getAccount
예제 #14
0
  private Map<Doc, String[]> linkModels() {
    // Mapping of doc names to models
    Map<Doc, String[]> map = new HashMap<Doc, String[]>();

    File modelDir = new File(ioConf.getModelDir());

    String prefix, suffix;
    for (Doc d : documents) {
      // Set model prefix/suffix
      prefix = d.getParentDirName();
      prefix = prefix.substring(0, prefix.length() - 1).toUpperCase();

      suffix = d.f.getName();
      suffix = suffix.substring(suffix.lastIndexOf(".") + 1);

      // Add doc/models to the map
      map.put(d, modelDir.list(new ModelFileFilter(prefix, suffix)));
    }
    return map;
  }
예제 #15
0
 private void genAnnotation(Doc doc) {
   // Create the pipeline if it doesn't yet exist
   if (pipeline == null) {
     Properties props = new Properties();
     props.put("annotators", conf.getAnnotators());
     pipeline = new StanfordCoreNLP(props);
   }
   Annotation anot = new Annotation(doc.cont);
   pipeline.annotate(anot);
   doc.setAno(anot);
 }
예제 #16
0
  private void saveAnnotation(Doc doc, File outFile) throws IOException {
    // Create the parsed output parent directory
    File parentDir = outFile.getParentFile();
    if (!parentDir.exists()) {
      if (!parentDir.mkdirs()) {
        throw new IOException("Unable to create parsed output directory: " + parentDir);
      }
    }

    try {
      ParsedDocWriter.writeOutput(doc.getAno(), outFile);
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
예제 #17
0
  Map<Comparable, Set<Comparable>> createJoinMap(
      Map<Comparable, Doc> model, String fromField, String toField) {
    Map<Comparable, Set<Comparable>> id_to_id = new HashMap<Comparable, Set<Comparable>>();

    Map<Comparable, List<Comparable>> value_to_id = invertField(model, toField);

    for (Comparable fromId : model.keySet()) {
      Doc doc = model.get(fromId);
      List<Comparable> vals = doc.getValues(fromField);
      if (vals == null) continue;
      for (Comparable val : vals) {
        List<Comparable> toIds = value_to_id.get(val);
        if (toIds == null) continue;
        Set<Comparable> ids = id_to_id.get(fromId);
        if (ids == null) {
          ids = new HashSet<Comparable>();
          id_to_id.put(fromId, ids);
        }
        for (Comparable toId : toIds) ids.add(toId);
      }
    }

    return id_to_id;
  }
예제 #18
0
 /**
  * Return true if the given Doc is deprecated.
  *
  * @param doc the Doc to check.
  * @return true if the given Doc is deprecated.
  */
 public static boolean isDeprecated(Doc doc) {
   if (doc.tags("deprecated").length > 0) {
     return true;
   }
   AnnotationDesc[] annotationDescList;
   if (doc instanceof PackageDoc) annotationDescList = ((PackageDoc) doc).annotations();
   else annotationDescList = ((ProgramElementDoc) doc).annotations();
   for (int i = 0; i < annotationDescList.length; i++) {
     if (annotationDescList[i]
         .annotationType()
         .qualifiedName()
         .equals(java.lang.Deprecated.class.getName())) {
       return true;
     }
   }
   return false;
 }
 protected void printIndexComment(Doc member, Tag[] firstSentenceTags) {
   Tag[] deprs = member.tags("deprecated");
   if (Util.isDeprecated((ProgramElementDoc) member)) {
     boldText("doclet.Deprecated");
     space();
     if (deprs.length > 0) {
       printInlineDeprecatedComment(member, deprs[0]);
     }
     return;
   } else {
     ClassDoc cd = ((ProgramElementDoc) member).containingClass();
     if (cd != null && Util.isDeprecated(cd)) {
       boldText("doclet.Deprecated");
       space();
     }
   }
   printSummaryComment(member, firstSentenceTags);
 }
예제 #20
0
 /** {@inheritDoc} */
 public Content deprecatedTagOutput(Doc doc) {
   ContentBuilder result = new ContentBuilder();
   Tag[] deprs = doc.tags("deprecated");
   if (doc instanceof ClassDoc) {
     if (Util.isDeprecated((ProgramElementDoc) doc)) {
       result.addContent(
           HtmlTree.SPAN(
               HtmlStyle.deprecatedLabel,
               new StringContent(configuration.getText("doclet.Deprecated"))));
       result.addContent(RawHtml.nbsp);
       if (deprs.length > 0) {
         Tag[] commentTags = deprs[0].inlineTags();
         if (commentTags.length > 0) {
           result.addContent(commentTagsToOutput(null, doc, deprs[0].inlineTags(), false));
         }
       }
     }
   } else {
     MemberDoc member = (MemberDoc) doc;
     if (Util.isDeprecated((ProgramElementDoc) doc)) {
       result.addContent(
           HtmlTree.SPAN(
               HtmlStyle.deprecatedLabel,
               new StringContent(configuration.getText("doclet.Deprecated"))));
       result.addContent(RawHtml.nbsp);
       if (deprs.length > 0) {
         Content body = commentTagsToOutput(null, doc, deprs[0].inlineTags(), false);
         if (!body.isEmpty()) result.addContent(HtmlTree.SPAN(HtmlStyle.deprecationComment, body));
       }
     } else {
       if (Util.isDeprecated(member.containingClass())) {
         result.addContent(
             HtmlTree.SPAN(
                 HtmlStyle.deprecatedLabel,
                 new StringContent(configuration.getText("doclet.Deprecated"))));
         result.addContent(RawHtml.nbsp);
       }
     }
   }
   return result;
 }
 protected void printIndexComment(Doc member) {
   printIndexComment(member, member.firstSentenceTags());
 }
예제 #22
0
  @Test
  public void testRandomGrouping() throws Exception {
    try {
      int indexIter = 50 * RANDOM_MULTIPLIER; // make >0 to enable test
      int queryIter = 100 * RANDOM_MULTIPLIER;

      while (--indexIter >= 0) {

        int indexSize = random.nextInt(25 * RANDOM_MULTIPLIER);
        List<FldType> types = new ArrayList<FldType>();
        types.add(new FldType("id", ONE_ONE, new SVal('A', 'Z', 4, 4)));
        types.add(
            new FldType("score_s1", ONE_ONE, new SVal('a', 'c', 1, 1))); // field used to score
        types.add(new FldType("bar_s1", ONE_ONE, new SVal('a', 'z', 3, 5)));
        types.add(new FldType(FOO_STRING_FIELD, ONE_ONE, new SVal('a', 'z', 1, 2)));
        types.add(
            new FldType(
                SMALL_STRING_FIELD, ZERO_ONE, new SVal('a', (char) ('c' + indexSize / 10), 1, 1)));

        clearIndex();
        Map<Comparable, Doc> model = indexDocs(types, null, indexSize);

        // test with specific docs
        if (false) {
          clearIndex();
          model.clear();
          Doc d1 = createDoc(types);
          d1.getValues(SMALL_STRING_FIELD).set(0, "c");
          d1.getValues(SMALL_INT_FIELD).set(0, 5);
          d1.order = 0;
          updateJ(toJSON(d1), params("commit", "true"));
          model.put(d1.id, d1);

          d1 = createDoc(types);
          d1.getValues(SMALL_STRING_FIELD).set(0, "b");
          d1.getValues(SMALL_INT_FIELD).set(0, 5);
          d1.order = 1;
          updateJ(toJSON(d1), params("commit", "false"));
          model.put(d1.id, d1);

          d1 = createDoc(types);
          d1.getValues(SMALL_STRING_FIELD).set(0, "c");
          d1.getValues(SMALL_INT_FIELD).set(0, 5);
          d1.order = 2;
          updateJ(toJSON(d1), params("commit", "false"));
          model.put(d1.id, d1);

          d1 = createDoc(types);
          d1.getValues(SMALL_STRING_FIELD).set(0, "c");
          d1.getValues(SMALL_INT_FIELD).set(0, 5);
          d1.order = 3;
          updateJ(toJSON(d1), params("commit", "false"));
          model.put(d1.id, d1);

          d1 = createDoc(types);
          d1.getValues(SMALL_STRING_FIELD).set(0, "b");
          d1.getValues(SMALL_INT_FIELD).set(0, 2);
          d1.order = 4;
          updateJ(toJSON(d1), params("commit", "true"));
          model.put(d1.id, d1);
        }

        for (int qiter = 0; qiter < queryIter; qiter++) {
          String groupField = types.get(random.nextInt(types.size())).fname;

          int rows =
              random.nextInt(10) == 0 ? random.nextInt(model.size() + 2) : random.nextInt(11) - 1;
          int start =
              random.nextInt(5) == 0
                  ? random.nextInt(model.size() + 2)
                  : random.nextInt(5); // pick a small start normally for better coverage
          int group_limit =
              random.nextInt(10) == 0 ? random.nextInt(model.size() + 2) : random.nextInt(11) - 1;
          int group_offset =
              random.nextInt(10) == 0
                  ? random.nextInt(model.size() + 2)
                  : random.nextInt(2); // pick a small start normally for better coverage

          String[] stringSortA = new String[1];
          Comparator<Doc> sortComparator = createSort(h.getCore().getSchema(), types, stringSortA);
          String sortStr = stringSortA[0];
          Comparator<Doc> groupComparator =
              random.nextBoolean()
                  ? sortComparator
                  : createSort(h.getCore().getSchema(), types, stringSortA);
          String groupSortStr = stringSortA[0];

          // since groupSortStr defaults to sortStr, we need to normalize null to "score desc" if
          // sortStr != null.
          if (groupSortStr == null && groupSortStr != sortStr) {
            groupSortStr = "score desc";
          }

          // Test specific case
          if (false) {
            groupField = SMALL_INT_FIELD;
            sortComparator =
                createComparator(
                    Arrays.asList(createComparator(SMALL_STRING_FIELD, true, true, false, true)));
            sortStr = SMALL_STRING_FIELD + " asc";
            groupComparator =
                createComparator(
                    Arrays.asList(createComparator(SMALL_STRING_FIELD, true, true, false, false)));
            groupSortStr = SMALL_STRING_FIELD + " asc";
            rows = 1;
            start = 0;
            group_offset = 1;
            group_limit = 1;
          }

          Map<Comparable, Grp> groups = groupBy(model.values(), groupField);

          // first sort the docs in each group
          for (Grp grp : groups.values()) {
            Collections.sort(grp.docs, groupComparator);
          }

          // now sort the groups

          // if sort != group.sort, we need to find the max doc by "sort"
          if (groupComparator != sortComparator) {
            for (Grp grp : groups.values()) grp.setMaxDoc(sortComparator);
          }

          List<Grp> sortedGroups = new ArrayList<Grp>(groups.values());
          Collections.sort(
              sortedGroups,
              groupComparator == sortComparator
                  ? createFirstDocComparator(sortComparator)
                  : createMaxDocComparator(sortComparator));

          boolean includeNGroups = random.nextBoolean();
          Object modelResponse =
              buildGroupedResult(
                  h.getCore().getSchema(),
                  sortedGroups,
                  start,
                  rows,
                  group_offset,
                  group_limit,
                  includeNGroups);

          boolean truncateGroups = random.nextBoolean();
          Map<String, Integer> facetCounts = new TreeMap<String, Integer>();
          if (truncateGroups) {
            for (Grp grp : sortedGroups) {
              Doc doc = grp.docs.get(0);
              if (doc.getValues(FOO_STRING_FIELD) == null) {
                continue;
              }

              String key = doc.getFirstValue(FOO_STRING_FIELD).toString();
              boolean exists = facetCounts.containsKey(key);
              int count = exists ? facetCounts.get(key) : 0;
              facetCounts.put(key, ++count);
            }
          } else {
            for (Doc doc : model.values()) {
              if (doc.getValues(FOO_STRING_FIELD) == null) {
                continue;
              }

              for (Comparable field : doc.getValues(FOO_STRING_FIELD)) {
                String key = field.toString();
                boolean exists = facetCounts.containsKey(key);
                int count = exists ? facetCounts.get(key) : 0;
                facetCounts.put(key, ++count);
              }
            }
          }
          List<Comparable> expectedFacetResponse = new ArrayList<Comparable>();
          for (Map.Entry<String, Integer> stringIntegerEntry : facetCounts.entrySet()) {
            expectedFacetResponse.add(stringIntegerEntry.getKey());
            expectedFacetResponse.add(stringIntegerEntry.getValue());
          }

          int randomPercentage = random.nextInt(101);
          // TODO: create a random filter too
          SolrQueryRequest req =
              req(
                  "group",
                  "true",
                  "wt",
                  "json",
                  "indent",
                  "true",
                  "echoParams",
                  "all",
                  "q",
                  "{!func}score_f",
                  "group.field",
                  groupField,
                  sortStr == null ? "nosort" : "sort",
                  sortStr == null ? "" : sortStr,
                  (groupSortStr == null || groupSortStr == sortStr) ? "noGroupsort" : "group.sort",
                  groupSortStr == null ? "" : groupSortStr,
                  "rows",
                  "" + rows,
                  "start",
                  "" + start,
                  "group.offset",
                  "" + group_offset,
                  "group.limit",
                  "" + group_limit,
                  GroupParams.GROUP_CACHE_PERCENTAGE,
                  Integer.toString(randomPercentage),
                  GroupParams.GROUP_TOTAL_COUNT,
                  includeNGroups ? "true" : "false",
                  "facet",
                  "true",
                  "facet.sort",
                  "index",
                  "facet.limit",
                  "-1",
                  "facet.field",
                  FOO_STRING_FIELD,
                  GroupParams.GROUP_TRUNCATE,
                  truncateGroups ? "true" : "false",
                  "facet.mincount",
                  "1");

          String strResponse = h.query(req);

          Object realResponse = ObjectBuilder.fromJSON(strResponse);
          String err = JSONTestUtil.matchObj("/grouped/" + groupField, realResponse, modelResponse);
          if (err != null) {
            log.error(
                "GROUPING MISMATCH: "
                    + err
                    + "\n\trequest="
                    + req
                    + "\n\tresult="
                    + strResponse
                    + "\n\texpected="
                    + JSONUtil.toJSON(modelResponse)
                    + "\n\tsorted_model="
                    + sortedGroups);

            // re-execute the request... good for putting a breakpoint here for debugging
            String rsp = h.query(req);

            fail(err);
          }

          // assert post / pre grouping facets
          err =
              JSONTestUtil.matchObj(
                  "/facet_counts/facet_fields/" + FOO_STRING_FIELD,
                  realResponse,
                  expectedFacetResponse);
          if (err != null) {
            log.error(
                "GROUPING MISMATCH: "
                    + err
                    + "\n\trequest="
                    + req
                    + "\n\tresult="
                    + strResponse
                    + "\n\texpected="
                    + JSONUtil.toJSON(expectedFacetResponse));

            // re-execute the request... good for putting a breakpoint here for debugging
            h.query(req);
            fail(err);
          }
        } // end query iter
      } // end index iter
    } finally {
      // B/c the facet.field is also used of grouping we have the purge the FC to avoid FC insanity
      FieldCache.DEFAULT.purgeAllCaches();
    }
  }
 /**
  * Given a <code>Doc</code>, return an anchor name for it.
  *
  * @param d the <code>Doc</code> to check.
  * @return the name of the anchor.
  */
 public static String getAnchorName(Doc d) {
   return "line." + d.position().line();
 }
예제 #24
0
 /**
  * Generates a simple &lt;doc&gt;... XML String with no options
  *
  * @param fieldsAndValues 0th and Even numbered args are fields names, Odds are field values.
  * @see TestHarness#makeSimpleDoc
  */
 public Doc doc(String... fieldsAndValues) {
   Doc d = new Doc();
   d.xml = TestHarness.makeSimpleDoc(fieldsAndValues).toString();
   return d;
 }
예제 #25
0
 private List<Field> parseFields(List nodes, DocResult outerDoc, Node parent) {
   List<Field> fields = new ArrayList<Field>(nodes.size());
   for (Element node : (List<Element>) nodes) {
     Field field = new Field();
     String name = node.attributeValue("name");
     field.setName(name);
     field.setParent(parent);
     DocResult docResult = parseDoc(node, field);
     String type = node.attributeValue("type");
     field.setType(type);
     field.setRequired(!"optional".equals(node.attributeValue("required")));
     String index = node.attributeValue("index");
     if (index != null) {
       field.setIndex(Integer.parseInt(index));
     }
     String nodeName = node.getName();
     if ("const".equals(nodeName)) { // 常量
       field.setValue(node.element("value").getStringValue());
     } else if ("item".equals(nodeName)) { // 枚举项
       String value = node.attributeValue("value");
       field.setIndex(Integer.parseInt(value)); // 设置枚举项的index为对应value
       field.setValue(value);
     } else if ("ex".equals(nodeName)) { // 异常
       Doc fieldDoc = field.getDoc();
       if (fieldDoc == DocResult.NULL) {
         field.setDoc(fieldDoc = new Doc());
       }
       for (Map.Entry<String, String> entry : docResult.getErrors().entrySet()) { // 将doc中的异常信息写入属性
         fieldDoc.putTag(entry.getKey(), entry.getValue());
       }
       if (outerDoc != null) { // 方法文档不为空
         String outerEx = outerDoc.getError(name); // 读取方法文档中的异常描述
         if (StringUtils.isEmpty(fieldDoc.getDesc()) && outerEx != null) { // 如果自己的文档没有注释,则设置为方法的注释
           fieldDoc.setDesc(outerEx);
         }
         if ("Type.AnyException"
             .equals(type)) { // 如果是通用的AnyException,则将外部定义的类似@error 315 无效用户状态写入属性
           for (Map.Entry<String, String> entry : outerDoc.getErrors().entrySet()) {
             String key = entry.getKey();
             if (!key.endsWith("Exception")) { // 若果是Exception说明不是一个异常代码
               fieldDoc.putTag(entry.getKey(), entry.getValue());
             }
           }
         }
       }
     } else {
       field.setValue(node.attributeValue("default"));
     }
     if (outerDoc != null) {
       Doc outerFieldDoc = outerDoc.getFidleDoc(field.getName()); // 判断外部是否定义了字段的描述文档
       if (outerFieldDoc != null) {
         Doc fieldDoc = field.getDoc();
         if (fieldDoc == DocResult.NULL) {
           field.setDoc(outerFieldDoc);
         } else {
           if (StringUtils.isEmpty(fieldDoc.getDesc())) {
             fieldDoc.setDesc(outerFieldDoc.getDesc());
           }
           for (Map.Entry<String, String> entry : outerFieldDoc.getTags().entrySet()) {
             fieldDoc.putTagIfAbsent(entry.getKey(), entry.getValue()); // 默认内部注视优先
           }
         }
       }
     }
     fields.add(field);
   }
   return fields;
 }
예제 #26
0
  @Test
  public void testRandomJoin() throws Exception {
    int indexIter = 50 * RANDOM_MULTIPLIER;
    int queryIter = 50 * RANDOM_MULTIPLIER;

    // groups of fields that have any chance of matching... used to
    // increase test effectiveness by avoiding 0 resultsets much of the time.
    String[][] compat =
        new String[][] {
          {"small_s", "small2_s", "small2_ss", "small3_ss"},
          {"small_i", "small2_i", "small2_is", "small3_is"}
        };

    while (--indexIter >= 0) {
      int indexSize = random().nextInt(20 * RANDOM_MULTIPLIER);

      List<FldType> types = new ArrayList<FldType>();
      types.add(new FldType("id", ONE_ONE, new SVal('A', 'Z', 4, 4)));
      types.add(new FldType("score_f", ONE_ONE, new FVal(1, 100))); // field used to score
      types.add(
          new FldType("small_s", ZERO_ONE, new SVal('a', (char) ('c' + indexSize / 3), 1, 1)));
      types.add(
          new FldType("small2_s", ZERO_ONE, new SVal('a', (char) ('c' + indexSize / 3), 1, 1)));
      types.add(
          new FldType("small2_ss", ZERO_TWO, new SVal('a', (char) ('c' + indexSize / 3), 1, 1)));
      types.add(new FldType("small3_ss", new IRange(0, 25), new SVal('A', 'z', 1, 1)));
      types.add(new FldType("small_i", ZERO_ONE, new IRange(0, 5 + indexSize / 3)));
      types.add(new FldType("small2_i", ZERO_ONE, new IRange(0, 5 + indexSize / 3)));
      types.add(new FldType("small2_is", ZERO_TWO, new IRange(0, 5 + indexSize / 3)));
      types.add(new FldType("small3_is", new IRange(0, 25), new IRange(0, 100)));

      clearIndex();
      Map<Comparable, Doc> model = indexDocs(types, null, indexSize);
      Map<String, Map<Comparable, Set<Comparable>>> pivots =
          new HashMap<String, Map<Comparable, Set<Comparable>>>();

      for (int qiter = 0; qiter < queryIter; qiter++) {
        String fromField;
        String toField;
        if (random().nextInt(100) < 5) {
          // pick random fields 5% of the time
          fromField = types.get(random().nextInt(types.size())).fname;
          // pick the same field 50% of the time we pick a random field (since other fields won't
          // match anything)
          toField =
              (random().nextInt(100) < 50)
                  ? fromField
                  : types.get(random().nextInt(types.size())).fname;
        } else {
          // otherwise, pick compatible fields that have a chance of matching indexed tokens
          String[] group = compat[random().nextInt(compat.length)];
          fromField = group[random().nextInt(group.length)];
          toField = group[random().nextInt(group.length)];
        }

        Map<Comparable, Set<Comparable>> pivot = pivots.get(fromField + "/" + toField);
        if (pivot == null) {
          pivot = createJoinMap(model, fromField, toField);
          pivots.put(fromField + "/" + toField, pivot);
        }

        Collection<Doc> fromDocs = model.values();
        Set<Comparable> docs = join(fromDocs, pivot);
        List<Doc> docList = new ArrayList<Doc>(docs.size());
        for (Comparable id : docs) docList.add(model.get(id));
        Collections.sort(docList, createComparator("_docid_", true, false, false, false));
        List sortedDocs = new ArrayList();
        for (Doc doc : docList) {
          if (sortedDocs.size() >= 10) break;
          sortedDocs.add(doc.toObject(h.getCore().getLatestSchema()));
        }

        Map<String, Object> resultSet = new LinkedHashMap<String, Object>();
        resultSet.put("numFound", docList.size());
        resultSet.put("start", 0);
        resultSet.put("docs", sortedDocs);

        // todo: use different join queries for better coverage

        SolrQueryRequest req =
            req(
                "wt",
                "json",
                "indent",
                "true",
                "echoParams",
                "all",
                "q",
                "{!join from="
                    + fromField
                    + " to="
                    + toField
                    + (random().nextInt(4) == 0 ? " fromIndex=collection1" : "")
                    + "}*:*");

        String strResponse = h.query(req);

        Object realResponse = ObjectBuilder.fromJSON(strResponse);
        String err = JSONTestUtil.matchObj("/response", realResponse, resultSet);
        if (err != null) {
          log.error(
              "JOIN MISMATCH: "
                  + err
                  + "\n\trequest="
                  + req
                  + "\n\tresult="
                  + strResponse
                  + "\n\texpected="
                  + JSONUtil.toJSON(resultSet)
                  + "\n\tmodel="
                  + JSONUtil.toJSON(model));

          // re-execute the request... good for putting a breakpoint here for debugging
          String rsp = h.query(req);

          fail(err);
        }
      }
    }
  }
예제 #27
0
 private void generateSummary(Doc d, Summerizer s) {
   d.summary = s.summary();
 }
예제 #28
0
  public RankedResults[] retrieveRankedDocuments(String query) {

    String[] terms = getQueryTerms(query);
    Map<Integer, Doc> documentList = obIndex.getDocsList();

    double avgDocLen = obIndex.getAvgDocLength();
    long collectionSize = obIndex.getTotalNoFiles();

    Map<Integer, Double> weight1Map = new TreeMap<Integer, Double>();
    Map<Integer, Double> weight2Map = new TreeMap<Integer, Double>();

    for (String term : terms) {
      PostingList postingList = obIndex.getPostingList(term);

      if (postingList == null) {
        System.out.println("Term: " + term + " doesn't exist in document collection");
        continue;
      }

      int df = postingList.getNumberofDocs();

      for (PostingListCell postingListCell : postingList.getDocsList()) {

        Doc doc = documentList.get(postingListCell.getDocId());

        int maxTf = doc.getMaxTermFrequency();
        int docLen = doc.getTotalTerms();

        int tf = postingListCell.getStemFreq();

        double w1 =
            (0.4 + 0.6 * Math.log(tf + 0.5) / Math.log(maxTf + 0.5))
                * (Math.log(collectionSize / df) / Math.log(collectionSize));

        double w2 =
            0.4
                + 0.6
                    * (tf / (tf + 0.5 + 1.5 * docLen / avgDocLen))
                    * (Math.log(collectionSize / df) / Math.log(collectionSize));

        int docId = doc.getDocumentId();

        if (!weight1Map.containsKey(docId)) {
          weight1Map.put(docId, 0.0);
        }
        weight1Map.put(docId, weight1Map.get(docId) + w1);

        if (!weight2Map.containsKey(docId)) {
          weight2Map.put(docId, 0.0);
        }
        weight2Map.put(docId, weight2Map.get(docId) + w2);
      }
    }

    Comparator<Map.Entry<Integer, Double>> comparator =
        new Comparator<Map.Entry<Integer, Double>>() {

          public int compare(Map.Entry<Integer, Double> o1, Map.Entry<Integer, Double> o2) {
            return o1.getValue().equals(o2.getValue())
                ? o1.getKey().compareTo(o2.getKey())
                : o2.getValue().compareTo(o1.getValue());
          }
        };

    List<Map.Entry<Integer, Double>> weight1List =
        new ArrayList<Map.Entry<Integer, Double>>(weight1Map.entrySet());
    List<Map.Entry<Integer, Double>> weight2List =
        new ArrayList<Map.Entry<Integer, Double>>(weight2Map.entrySet());

    Collections.sort(weight1List, comparator);
    Collections.sort(weight2List, comparator);

    RankedResults w1Results = new RankedResults();
    RankedResults w2Results = new RankedResults();

    for (int i = 0; i < 10; i++) {
      if (weight1List.size() >= i) {
        Doc docForW1 = documentList.get(weight1List.get(i).getKey());
        w1Results.add(
            new Result(
                (i + 1),
                weight1List.get(i).getValue(),
                weight1List.get(i).getKey(),
                docForW1.getDocumentName(),
                docForW1.getDocumentTitle()));
      }

      if (weight2List.size() >= i) {
        Doc docForW2 = documentList.get(weight2List.get(i).getKey());
        w2Results.add(
            new Result(
                (i + 1),
                weight2List.get(i).getValue(),
                weight2List.get(i).getKey(),
                docForW2.getDocumentName(),
                docForW2.getDocumentTitle()));
      }
    }

    return new RankedResults[] {w1Results, w2Results};
  }
예제 #29
0
 public Doc docFieldData(int docId) {
   Doc docFieldData = cachedDocFieldData.get().get();
   docFieldData.setDocId(docId);
   return docFieldData;
 }
예제 #30
0
파일: ERXML.java 프로젝트: hypronet/wonder
 /**
  * Converts a W3C Document into an XML.Doc.
  *
  * @param w3cDocument the W3C Document
  * @return the equivalent XML.Doc
  */
 public static ERXML.Doc doc(org.w3c.dom.Document w3cDocument) {
   org.w3c.dom.Element w3cElement = w3cDocument.getDocumentElement();
   Doc doc = ERXML.doc();
   doc.setRoot(ERXML.e(w3cElement));
   return doc;
 }