Esempio n. 1
0
  /**
   * returns docs with ALL given sentiments. special cases: sentiments can be an array of length 1
   * and be "None", in which case all documents with no sentiments are returned. special cases:
   * sentiments can be an array of length 1 and be "all", in which case all documents with any
   * sentiments are returned.
   *
   * @param captions
   */
  public Collection<Document> getDocsWithSentiments(
      String sentiments[],
      Indexer indexer,
      Collection<Document> docs,
      int cluster,
      boolean originalContentOnly,
      String... captions) {
    Collection<Document> result = null;
    // note: multiple sentiments are possible, they are ANDED
    if (sentiments == null || sentiments.length == 0) return result;

    Set<Document> docs_set = Util.castOrCloneAsSet(docs);
    if (sentiments.length == 1 && "all".equalsIgnoreCase(sentiments[0]))
      return getDocsWithAnyEmotions(indexer, docs_set, originalContentOnly);

    // note: we'll pass in null for docs, and intersect with the given set of docs later
    // otherwise we'd just be doing it again and again for each category and lexer
    Map<String, Collection<Document>> map =
        getEmotions(indexer, null, false, originalContentOnly, captions);
    for (int i = 0; i < sentiments.length; i++) {
      Collection<Document> temp1 =
          ("None".equalsIgnoreCase(sentiments[i]))
              ? getDocsWithNoEmotions(indexer, docs_set, originalContentOnly)
              : map.get(sentiments[i]);
      if (temp1 == null) { // no matches, just return
        result = new LinkedHashSet<Document>();
        return result;
      }
      if (result == null) result = temp1;
      else result.retainAll(temp1);
    }
    // result.retainAll(docs);
    return Util.setIntersection(result, docs_set);
  }
Esempio n. 2
0
    public Lexicon1Lang(String filename) throws IOException {
      captionToRawQuery = new LinkedHashMap<String, String>();
      captionToExpandedQuery = new LinkedHashMap<String, String>();
      List<String> lines =
          Util.getLinesFromInputStream(
              new FileInputStream(filename),
              false /* ignore comment lines = false, we'll strip comments here */);
      for (String line : lines) {
        int idx = line.indexOf('#'); // strip everything after the comment char
        if (idx >= 0) line = line.substring(0, idx);
        line = line.trim();
        if (line.length() == 0) continue; // ignore blank lines
        StringTokenizer st = new StringTokenizer(line, ":");
        if (st.countTokens() != 2) {
          log.warn("line ignored: " + line);
          continue;
        }

        String caption = st.nextToken().trim();
        String query = st.nextToken().trim();
        String existingQuery = captionToRawQuery.get(caption);
        if (!Util.nullOrEmpty(existingQuery)) query = existingQuery + "|" + query;
        captionToRawQuery.put(caption, query);
      }
      expandQueries();
    }
Esempio n. 3
0
  private static void assignTypes(
      Collection<MemoryQuestion> questions, Map<String, NameInfo> nameMap) throws IOException {
    // assign categories. we have to assemble all the terms we want to look up together for
    // efficient lookup..
    // we'll put them in map
    Map<String, NameInfo> map = new LinkedHashMap<String, NameInfo>();
    for (MemoryQuestion mq : questions) {
      String name = mq.correctAnswer;
      String cname = name.toLowerCase().trim();
      map.put(cname, nameMap.get(cname));
    }

    // now we do a bulk type lookup...
    NameTypes.readTypes(map);

    // ... and copy the types back into the mq's
    for (MemoryQuestion mq : questions)
      try {
        if (mq != null && mq.correctAnswer != null) {
          NameInfo ni = nameMap.get(mq.correctAnswer.toLowerCase().trim().replaceAll(" ", "_"));
          if (ni != null) mq.clue.clueStats.answerCategory = ni.type;
        }
      } catch (Exception e) {
        Util.print_exception("Error reading types for question: " + mq, e, log);
      }
  }
Esempio n. 4
0
 public String toHTMLString() {
   String str = "";
   for (Clue clue : clues) {
     str +=
         "<tr><td><a href='"
             + link
             + "' target='_blank'>"
             + displayEntity
             + "</a></td><td>"
             + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(lastSeenDate)
             + "</td><td>"
             + nMessages
             + "</td><td>"
             + nThreads
             + "</td><td>"
             + (clue != null ? clue.clueStats.finalScore : "-")
             + "</td></tr>"
             + "<tr><td class=\"clue\" colspan=\"6\">"
             + (clue != null
                 ? (clue.clue
                     + "<br/><br/><div class=\"stats\"> stats: "
                     + Util.fieldsToString(clue.clueStats, false))
                 : "No clue")
             + "</div><br/><br/></td></tr><br>";
   }
   return str;
 }
Esempio n. 5
0
  /**
   * Core sentiment detection method. doNota = none of the above
   *
   * @param captions (null/none = all)
   */
  public Map<String, Collection<Document>> getEmotions(
      Indexer indexer,
      Collection<Document> docs,
      boolean doNota,
      boolean originalContentOnly,
      String... captions) {
    Collection<Lexicon1Lang> lexicons = getRelevantLexicon1Langs(docs);
    Map<String, Collection<Document>> result = new LinkedHashMap<>();
    Set<Document> docs_set = Util.castOrCloneAsSet(docs);
    // aggregate results for each lang into result
    for (Lexicon1Lang lex : lexicons) {
      Map<String, Collection<Document>> resultsForThisLang =
          (doNota
              ? lex.getEmotionsWithNOTA(indexer, docs_set, originalContentOnly)
              : lex.getEmotions(indexer, docs_set, originalContentOnly, captions));
      if (resultsForThisLang == null) continue;

      for (String caption : resultsForThisLang.keySet()) {
        Collection<Document> resultDocsThisLang = resultsForThisLang.get(caption);
        Collection<Document> resultDocs = result.get(caption);
        // if caption doesn't exist already, create a new entry, or else add to the existing set of
        // docs that match this caption
        if (resultDocs == null) result.put(caption, resultDocsThisLang);
        else resultDocs.addAll(resultDocsThisLang);
      }
    }
    // TODO: the result can be cached at server to avoid redundant computation (by concurrent users,
    // which are few for now)
    return result;
  }
Esempio n. 6
0
    private void expandQueries() {
      captionToExpandedQuery
          .clear(); // clear the expanded query map first, we don't want any residue from the
      // previous state
      for (String caption : captionToRawQuery.keySet()) {
        String query = captionToRawQuery.get(caption);
        List<String> orTerms = Util.tokenize(query, "|");
        String expandedQuery = "";
        for (int i = 0; i < orTerms.size(); i++) {
          String t = orTerms.get(i).trim();
          if (t.length() == 0) continue;
          if (t.startsWith("{") && t.endsWith("}")) {
            String c = t.substring(1, t.length() - 1);
            String exp =
                captionToExpandedQuery.get(
                    c); // note: expanded map, not rawmap, to allow multi-level expansion
            if (exp == null) {
              t = captionToRawQuery.get(c);
              if (t == null) {
                log.warn("ERROR: no prev. caption: " + c + " in query " + query);
                continue;
              }
            } else t = exp;

            usedInOtherCaptions.add(c);
          }
          expandedQuery += t;
          // there is no point adding or(|), as the query is treated just as a text string and is
          // not handled specially in Indexer.lookupDocsAsId
          // however, adding a non-word, non-special character will enable tokenization at that
          // index and will be appended as many "or" terms.
          if (i < orTerms.size() - 1) expandedQuery += "|";
        }

        if (caption.length() > 0 && expandedQuery.length() > 0) {
          // if caption already exists, just add to it
          String existingQuery = captionToExpandedQuery.get(caption);
          if (!Util.nullOrEmpty(existingQuery)) expandedQuery = existingQuery + "|" + expandedQuery;
          captionToExpandedQuery.put(caption, expandedQuery);
        }
      }

      // remove the non top-level captions
      for (String caption : usedInOtherCaptions) captionToExpandedQuery.remove(caption);
    }
Esempio n. 7
0
 public static synchronized boolean anyCodesAvailable()
     throws IOException, GeneralSecurityException, ClassNotFoundException {
   try {
     List<UserStats> users = readUsersFile();
     return (users.size() < codes.size());
   } catch (Exception e) {
     Util.print_exception(e, log);
   }
   return false;
 }
Esempio n. 8
0
    /**
     * main entry point: returns a category -> docs map for each (non-zero) category in the current
     * captionToQueryMap.
     *
     * @indexer must already have run
     * @docs results are restrictes to these docs. assumes all docs if docs is null or empty.
     * @captions (null/none = all)
     *     <p>vihari This is a weird name for a method that returns documents with emotions instead
     *     of emotions.
     */
    public Map<String, Collection<Document>> getEmotions(
        Indexer indexer,
        Collection<Document> docs,
        boolean originalContentOnly,
        String... captions) {
      Map<String, Collection<Document>> result = new LinkedHashMap<String, Collection<Document>>();
      Set<Document> docs_set = Util.castOrCloneAsSet(docs);
      //			for (String[] emotion: emotionsData)
      String[] selected_captions =
          captions.length > 0 ? captions : captionToExpandedQuery.keySet().toArray(new String[0]);
      for (String caption : selected_captions) {
        String query = captionToExpandedQuery.get(caption);
        if (query == null) {
          log.warn("Skipping unknown caption '" + caption + "'");
          continue;
        }

        // query is simply word1|word2|word3 etc for that sentiment
        // the -1 indicates that we want all docs in the indexer that match the query
        int threshold = 1;
        Indexer.QueryOptions options = new Indexer.QueryOptions();
        options.setThreshold(threshold);
        options.setQueryType(Indexer.QueryType.ORIGINAL);
        Collection<Document> docsForCaption = indexer.docsForQuery(query, options);
        /*
        log.info (docsForCaption.size() + " before");
        threshold = 2;
        docsForCaption = indexer.docsForQuery(query, -1, threshold);
        log.info (docsForCaption.size() + " after");
        */
        //				Set<Document> docs = indexer.docsWithPhraseThreshold(query, -1, 2); // in future, we
        // may have a higher threshold for sentiment matching
        // if @param docs is present, retain only those docs that match, otherwise retain all
        if (!Util.nullOrEmpty(docs_set))
          // docsForCaption.retainAll(docs_set);
          docsForCaption = Util.listIntersection(docsForCaption, docs_set);

        // put it in the result only if at least 1 doc matches
        if (docsForCaption.size() > 0) result.put(caption, docsForCaption);
      }
      return result;
    }
Esempio n. 9
0
 public String toString() {
   StringBuilder sb = new StringBuilder();
   sb.append(
       "Lexicon " + name + " with " + Util.pluralize(languageToLexicon.size(), "language") + "\n");
   int count = 0;
   for (String language : languageToLexicon.keySet()) {
     sb.append("Language #" + count++ + ": " + language + "\n");
     sb.append(languageToLexicon.get(language));
     sb.append(
         "\n-----------------------------------------------------------------------------\n");
   }
   return sb.toString();
 }
Esempio n. 10
0
  static {
    // read all the codes at bootup -- the codes file is not encrypted and never changes
    CODES_FILE =
        System.getProperty("user.home")
            + java.io.File.separator
            + "results"
            + java.io.File.separator
            + "codes.txt";
    USERS_FILE =
        System.getProperty("user.home")
            + File.separator
            + "results"
            + File.separator
            + "users"; // remember to change cryptoutils if you change this

    try {
      // read the codes statically at startup
      codes = Util.getLinesFromFile(CODES_FILE, true);
    } catch (Exception e) {
      Util.print_exception("\n\n\n\nSEVERE WARNING IN BOOTUP\n\n\n, codes file not read", e, log);
      codes = new ArrayList<String>(); // dummy list of codes (empty)
    }
  }
Esempio n. 11
0
 public Lexicon(String dir, String name) throws IOException {
   name = sanitizeLexiconName(name);
   this.name = name;
   Set<String> languages = Util.filesWithPrefixAndSuffix(dir, name + ".", LEXICON_SUFFIX);
   for (String language : languages) {
     Lexicon1Lang lex =
         new Lexicon1Lang(
             dir
                 + File.separator
                 + name
                 + "."
                 + language
                 + LEXICON_SUFFIX); // LEXICON_SUFFIX already has a .
     language = language.toLowerCase();
     languageToLexicon.put(language, lex);
   }
 }
Esempio n. 12
0
  // accumulates counts returned by lexicons in each language
  // TODO: It is possible to write a generic accumulator that accumulates sum over all the languages
  public Map<String, Integer> getLexiconCounts(Indexer indexer, boolean originalContentOnly) {
    List<Document> docs = indexer.docs;
    Collection<Lexicon1Lang> lexicons = getRelevantLexicon1Langs(docs);
    Map<String, Integer> result = new LinkedHashMap<String, Integer>();
    Set<Document> docs_set = Util.castOrCloneAsSet(docs);
    // aggregate results for each lang into result
    for (Lexicon1Lang lex : lexicons) {
      Map<String, Integer> resultsForThisLang = lex.getLexiconCounts(indexer, originalContentOnly);
      if (resultsForThisLang == null) continue;

      for (String caption : resultsForThisLang.keySet()) {
        Integer resultCountsThisLang = resultsForThisLang.get(caption);
        Integer resultCounts = result.get(caption);
        // if caption doesn't exist already, create a new entry, or else add to the existing set of
        // docs that match this caption
        if (resultCounts == null) result.put(caption, resultCountsThisLang);
        else result.put(caption, resultCounts + resultCountsThisLang);
      }
    }
    return result;
  }
Esempio n. 13
0
 public Map<String, Integer> getLexiconCounts(Indexer indexer, boolean originalContentOnly) {
   Map<String, Integer> map = new LinkedHashMap<String, Integer>();
   String[] captions =
       captionToExpandedQuery.keySet().toArray(new String[captionToExpandedQuery.size()]);
   for (String caption : captions) {
     String query = captionToExpandedQuery.get(caption);
     if (query == null) {
       log.warn("Skipping unknown caption '" + caption + "'");
       continue;
     }
     Integer cnt = 0;
     try {
       if (originalContentOnly)
         cnt = indexer.getTotalHits(query, false, Indexer.QueryType.ORIGINAL);
       else cnt = indexer.getTotalHits(query, false, Indexer.QueryType.FULL);
     } catch (Exception e) {
       Util.print_exception("Exception while collecting lexicon counts", e, log);
     }
     map.put(caption, cnt);
   }
   return map;
 }
Esempio n. 14
0
 /** writes out csv stats as an encrypted file in RESULTS_DIR/<userid>/filename */
 public void logStats(String filename, boolean nullClues) {
   Indexer.IndexStats stats = archive.getIndexStats();
   StringBuilder statsLog = new StringBuilder();
   Pair<String, String> indexStats = Util.fieldsToCSV(stats, true);
   Pair<String, String> addressBookStats = Util.fieldsToCSV(archive.addressBook.getStats(), true);
   Pair<String, String> studyStats = Util.fieldsToCSV(stats, true);
   Pair<String, String> archiveStats = Util.fieldsToCSV(archive.stats, true);
   statsLog.append(
       "STUDYSTATS-1: "
           + studyStats.getFirst()
           + indexStats.getFirst()
           + addressBookStats.getFirst()
           + archiveStats.getFirst()
           + "\n");
   statsLog.append(
       "STUDYSTATS-2: "
           + studyStats.getSecond()
           + indexStats.getSecond()
           + addressBookStats.getSecond()
           + archiveStats.getSecond()
           + "\n");
   int idx = 1;
   for (MemoryQuestion mq : this.getQuestions()) {
     if (nullClues) mq.clue.clue = null;
     Pair<String, String> p = Util.fieldsToCSV(mq.clue.clueStats, true);
     Pair<String, String> p1 = Util.fieldsToCSV(mq.stats, true);
     if (idx == 1)
       statsLog.append(
           "QUESTIONSTATS-header: "
               + p.getFirst()
               + ','
               + p1.getFirst()
               + "correct answer, user answer, user answer before hint, clue"
               + "\n");
     //			statsLog.append("QUESTIONSTATS-2: " + p.getSecond()  + ',' + p1.getSecond() +
     // mq.correctAnswer + "," + mq.userAnswer + "," + mq.userAnswerBeforeHint + "," +
     // mq.clue.clue.replaceAll(",", " ") + "\n");
     statsLog.append(
         "QUESTIONSTATS-2: "
             + p.getSecond()
             + ','
             + p1.getSecond()
             + mq.correctAnswer
             + ","
             + mq.userAnswer
             + ","
             + mq.userAnswerBeforeHint
             + "\n");
     idx = idx + 1;
   }
   String RESULTS_DIR =
       System.getProperty("user.home")
           + File.separator
           + "results"
           + File.separator
           + this.stats.userid;
   new File(RESULTS_DIR).mkdirs();
   String file = RESULTS_DIR + File.separator + filename;
   try {
     CryptoUtils.writeEncryptedBytes(statsLog.toString().getBytes("UTF-8"), file);
   } catch (UnsupportedEncodingException e) {
     Util.print_exception(e, log);
   } catch (Exception e) {
     Util.print_exception("NOC ERROR: encryption failed!", e, log);
   }
   log.info(statsLog);
 }
Esempio n. 15
0
  /** Generates person names tests from the given archive. @throws IOException */
  public void generatePersonNameQuestions(
      Archive archive,
      NERModel nerModel,
      Collection<EmailDocument> allDocs,
      Lexicon lex,
      int numClues)
      throws IOException, GeneralSecurityException, ClassNotFoundException, ReadContentsException,
          ParseException {
    this.archive = archive;
    questions = new ArrayList<>();
    ArchiveCluer cluer = new ArchiveCluer(null, archive, nerModel, null, lex);

    tabooCluesSet = new LinkedHashSet<>();
    archive.assignThreadIds();

    List<ClueEvaluator> evaluators = getDefaultEvals();

    List<Document> docs = archive.getAllDocs();
    Multimap<Contact, EmailDocument> contactToMessages = LinkedHashMultimap.create();
    Multimap<Contact, Long> contactToThreadIds = LinkedHashMultimap.create();

    // sort by date
    Collections.sort(docs);

    Date earliestDate = null, latestDate = null;
    Map<Contact, Date> contactToLatestDate = new LinkedHashMap<>();

    // compute earliest and latest date across all messages in corpus
    for (Document doc : docs) {
      EmailDocument ed = (EmailDocument) doc;

      if (earliestDate == null || ed.date.before(earliestDate)) earliestDate = ed.date;
      if (latestDate == null || ed.date.after(latestDate)) latestDate = ed.date;
    }
    JSPHelper.log.info(
        "===================\nStarting to generate person names memory questions from "
            + docs.size()
            + " messages with "
            + numClues
            + " questions"
            + ", earliest date = "
            + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(earliestDate)
            + " latest date = "
            + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(latestDate));

    Set<Integer> tabooSentenceHashes = new LinkedHashSet<>();

    // create hashes of all sentences seen at least twice (case insensitive, lower cased)
    {
      Set<Integer> hashesSeen = new LinkedHashSet<>();
      for (Document d : docs) {
        String contents = archive.getContents(d, true);
        String cleanedContents = EmailUtils.cleanupEmailMessage(contents);
        SentenceTokenizer st = new SentenceTokenizer(cleanedContents);
        while (st.hasMoreSentences()) {
          String sentence = st.nextSentence();
          sentence = canonicalizeSentence(sentence);
          int hashCode = sentence.hashCode();
          if (hashesSeen.contains(hashCode)) {
            tabooSentenceHashes.add(hashCode);
            log.info("Marking sentence as taboo: " + sentence);
          } else hashesSeen.add(hashCode);
        }
      }
    }

    // compute contactToLatestDate that contact has been seen on
    for (Document doc : docs) {
      EmailDocument ed = (EmailDocument) doc;
      // discard doc if it is not a sent mail
      if ((ed.sentOrReceived(archive.addressBook) & EmailDocument.SENT_MASK) == 0) continue;

      for (Contact c : ed.getParticipatingContactsExceptOwn(archive.addressBook)) {
        Date currentLatestDate = contactToLatestDate.get(c);
        if (currentLatestDate == null || currentLatestDate.before(ed.date))
          contactToLatestDate.put(c, ed.date);
        contactToMessages.put(c, ed);
        contactToThreadIds.put(c, ed.threadID);
      }
    }

    log.info("We are considering " + contactToLatestDate.size() + " contacts");

    Date currentDate = new Date();
    List<Pair<Date, Date>> intervals =
        computeDateIntervals(earliestDate, currentDate); // go back from current date
    // intervals[0] is the most recent.
    JSPHelper.log.info("done computing " + intervals.size() + " intervals");
    for (Pair<Date, Date> p : intervals)
      JSPHelper.log.info(
          "Interval: "
              + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(p.getFirst())
              + " - "
              + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(p.getSecond()));

    int cluesPerInterval =
        (numClues > 0 && intervals.size() > 0)
            ? (numClues + intervals.size() - 1) / intervals.size()
            : 0;
    JSPHelper.log.info(
        "Will try to generate " + Util.pluralize(cluesPerInterval, "questions") + " per interval");

    Multimap<Integer, Contact> intervalToContacts = LinkedHashMultimap.create();

    // nSent is the number of sentences allowed in a clue text
    int nSent = 2;
    for (Contact c : contactToLatestDate.keySet()) {
      Date lastSeenDate = contactToLatestDate.get(c);

      // which interval does this date belong to? we'll assign this contact in that interval in the
      // intervalToContacts map
      int interval = -1;
      Date intervalStart = null, intervalEnd = null;
      {
        int i = 0;
        for (Pair<Date, Date> p : intervals) {
          intervalStart = p.getFirst();
          intervalEnd = p.getSecond();

          if ((intervalStart.before(lastSeenDate) && intervalEnd.after(lastSeenDate))
              || intervalStart.equals(lastSeenDate)
              || intervalEnd.equals(lastSeenDate)) {
            interval = i;
            break;
          }
          i++;
        }
      }

      if (interval < 0 || interval == intervals.size()) {
        JSPHelper.log.info(
            "What, no interval!? for "
                + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(lastSeenDate));
        continue;
      }

      intervalToContacts.put(interval, c);
    }

    log.info("Interval information (interval 0 is the most recent):");
    for (int interval = 0; interval < intervals.size(); interval++) {
      Collection<Contact> contacts = intervalToContacts.get(interval);
      int nContactsForThisInterval = (contacts == null) ? 0 : contacts.size();
      log.info(
          "In interval "
              + interval
              + " there are "
              + Util.pluralize(nContactsForThisInterval, "candidate contact")
              + " who were last seen in this interval");
    }

    for (int interval = 0; interval < intervals.size(); interval++) {
      Date intervalStart = intervals.get(interval).getFirst();
      Date intervalEnd = intervals.get(interval).getSecond();
      Collection<Contact> candidateContactsForThisInterval = intervalToContacts.get(interval);
      if (candidateContactsForThisInterval == null) {
        log.info("Skipping interval " + interval + " because there are no contacts");
        continue;
      }

      Map<Clue, Contact> clueToContact = new LinkedHashMap<>();
      log.info("=======\nGenerating questions for interval " + interval);

      outer:
      for (Contact c : candidateContactsForThisInterval) {
        String name = c.pickBestName();
        if (name.length() < 2) // could also check if alphanumberic only
        continue outer;

        // ignore contact if name does not contain all alphabets. Even a period is not allowed. only
        // space is allowed.
        for (char ch : name.toCharArray()) {
          if (!Character.isAlphabetic(ch) && !Character.isSpaceChar(ch)) continue outer;
        }

        Clue clue =
            cluer.createPersonNameClue(
                c,
                evaluators,
                nerModel,
                intervalStart,
                intervalEnd,
                nSent,
                archive,
                tabooSentenceHashes);
        if (clue != null) clueToContact.put(clue, c);
      }

      List<Clue> clueList = new ArrayList(clueToContact.keySet());
      Collections.sort(clueList);
      List<Clue> selectedClues = new ArrayList<>();
      for (int i = 0; i < cluesPerInterval && i < clueList.size(); i++) {
        selectedClues.add(clueList.get(i));
      }

      log.info(
          "For interval "
              + interval
              + " selected "
              + selectedClues.size()
              + " contacts out of "
              + clueList.size()
              + " possible candidates.");
      //            for (Clue c: clueList)
      //               log.info ("Clue candidate for " + clueToContact.get(c).pickBestName() + "
      // score = " + c.clueStats.finalScore+ " clue is " + c );
      //          for (Clue c: selectedClues)
      //             log.info ("Selected clue: " + clueToContact.get(c).pickBestName() + " score = "
      // + c.clueStats.finalScore+ " clue is " + c);

      for (Clue selectedClue : selectedClues) {
        Contact c = clueToContact.get(selectedClue);
        String name = c.pickBestName();

        List<Integer> lengthList = Crossword.convertToWord(name).getSecond();
        String lengthDescr = "";
        if (lengthList.size() > 1) lengthDescr += Integer.toString(lengthList.size()) + " words: ";

        for (Integer i : lengthList) {
          lengthDescr += Util.pluralize(i, "letter") + ", ";
        }
        lengthDescr =
            lengthDescr.substring(0, lengthDescr.length() - 2); // subtract the extra comma.

        ClueInfo ci = new ClueInfo();
        ci.lastSeenDate = contactToLatestDate.get(c);
        ci.nMessages = contactToThreadIds.get(c).size();
        ci.nThreads = contactToThreadIds.get(c).size();

        questions.add(new MemoryQuestion(this, name, selectedClue, 1, lengthDescr));
      }
    }

    log.info(questions.size() + " questions generated");

    log.info("Top candidates are:");

    // sort q's by clue score
    Collections.sort(questions);

    //		log.info("Based on clue score, top answers:");
    //		for (MemoryQuestion mq: questions)
    //			log.info (mq.correctAnswer + " times= clue=" + mq.clue.clue);

    int count = 0;
    for (MemoryQuestion mq : questions) {
      mq.setQuestionNum(count++);
    }

    // log the questions as well, just in case we don't get to the final point due to user fatigue
    // or crashes
    logStats("questions.final", false);
  }
Esempio n. 16
0
  /**
   * Generates list of questions and stores it in the current instance of MemoryStudy We handle two
   * kinds of questions namely, person names tests and non-person name tests. Non-person name test
   * is a fill in the blank kind where the blank is to be filled with the correct non-person entity
   * to complete the sentence person name test is to guess the person in correspondent list based on
   * some distinctive sentences in the mail
   *
   * @param maxInt - max. number of questions from a interval
   * @throws IOException
   */
  public void generateQuestions(
      Archive archive,
      NERModel nerModel,
      Collection<EmailDocument> allDocs,
      Lexicon lex,
      int maxInt,
      boolean personTest)
      throws IOException, GeneralSecurityException, ClassNotFoundException, ReadContentsException,
          ParseException {
    this.archive = archive;
    if (allDocs == null) allDocs = (Collection) archive.getAllDocs();
    questions = new ArrayList<>();
    ArchiveCluer cluer = new ArchiveCluer(null, archive, nerModel, null, lex);

    Short[] itypes =
        new Short[] {
          FeatureDictionary.BUILDING,
          FeatureDictionary.PLACE,
          FeatureDictionary.RIVER,
          FeatureDictionary.ROAD,
          FeatureDictionary.UNIVERSITY,
          FeatureDictionary.MOUNTAIN,
          FeatureDictionary.AIRPORT,
          FeatureDictionary.ISLAND,
          FeatureDictionary.MUSEUM,
          FeatureDictionary.BRIDGE,
          FeatureDictionary.AIRLINE,
          FeatureDictionary.THEATRE,
          FeatureDictionary.LIBRARY,
          FeatureDictionary.LAWFIRM,
          FeatureDictionary.GOVAGENCY
        };
    double CUTOFF = 0.001;
    tabooCluesSet = new LinkedHashSet<>();
    archive.assignThreadIds();

    List<Document> docs = archive.getAllDocs();
    Map<String, Date> entityToLastDate = new LinkedHashMap<>();
    Multimap<String, EmailDocument> entityToMessages = LinkedHashMultimap.create();
    Multimap<String, Long> entityToThreads = LinkedHashMultimap.create();
    Multimap<String, String> ceToDisplayEntity = LinkedHashMultimap.create();

    int di = 0;

    // sort by date
    Collections.sort(docs);

    Set<String> ownerNames = archive.ownerNames;
    Date earliestDate = null, latestDate = null;
    Set<String> allEntities = new LinkedHashSet<>();
    for (Document doc : docs) {
      EmailDocument ed = (EmailDocument) doc;
      if (earliestDate == null || ed.date.before(earliestDate)) earliestDate = ed.date;
      if (latestDate == null || ed.date.after(latestDate)) latestDate = ed.date;

      List<String> entities = new ArrayList<>();
      if (!personTest) {
        entities.addAll(
            Arrays.asList(archive.getAllNamesInDoc(doc, true))
                .stream()
                .filter(n -> n.typeScore > CUTOFF)
                .map(n -> n.text)
                .collect(Collectors.toList()));
      } else {
        // do not consider mailing lists
        if (ed.sentToMailingLists != null && ed.sentToMailingLists.length > 0) continue;
        // discard doc if it is not a sent mail
        if ((ed.sentOrReceived(archive.addressBook) & EmailDocument.SENT_MASK) == 0) continue;

        List<Address> addrs = new ArrayList<>();
        if (ed.to != null) for (Address addr : ed.to) addrs.add(addr);

        List<String> names = new ArrayList<>();
        for (Address addr : addrs) {
          Contact c = archive.addressBook.lookupByAddress(addr);
          names.add(c.pickBestName());
        }

        for (String name : names) {
          if (!ownerNames.contains(name) && !DictUtils.hasDictionaryWord(name)) {
            entities.add(name);
          }
        }
      }
      allEntities.addAll(entities);

      // get entities
      for (String e : entities) {
        if (Util.nullOrEmpty(e)) continue;
        e = e.replaceAll("^\\W+|\\W+$", "");
        if (e.length() > 10 && e.toUpperCase().equals(e))
          continue; // all upper case, more than 10 letters, you're out.

        String ce = DictUtils.canonicalize(e); // canonicalize
        if (ce == null) {
          JSPHelper.log.info("Dropping entity: " + e);
          continue;
        }

        ceToDisplayEntity.put(ce, e);
        entityToLastDate.put(ce, ed.date);
        entityToMessages.put(ce, ed);
        entityToThreads.put(ce, ed.threadID);
      }

      if ((++di) % 1000 == 0) log.info(di + " of " + docs.size() + " messages processed...<br/>");
    }
    log.info(
        "Considered #"
            + allEntities.size()
            + " unique entities and #"
            + ceToDisplayEntity.size()
            + " good ones in #"
            + docs.size()
            + " docs<br>");
    log.info("Owner Names: " + ownerNames);
    JSPHelper.log.info(
        "Considered #"
            + allEntities.size()
            + " unique entities and #"
            + ceToDisplayEntity.size()
            + " good ones in #"
            + docs.size()
            + "docs");

    JSPHelper.log.info(
        "earliest date = "
            + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(earliestDate));
    JSPHelper.log.info(
        "latest date = " + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(latestDate));

    Multimap<String, String> tokenToCE = LinkedHashMultimap.create();
    for (String ce : ceToDisplayEntity.keySet()) {
      List<String> tokens = Util.tokenize(ce);
      for (String t : tokens) tokenToCE.put(t, ce);
    }

    // Compute date intervals
    int DAYS_PER_INTERVAL = 30;
    List<Pair<Date, Date>> intervals = new ArrayList<Pair<Date, Date>>();
    {
      JSPHelper.log.info("computing time intervals");
      Date closingDate = latestDate;

      JSPHelper.log.info(
          "closing = " + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(closingDate));
      while (earliestDate.before(closingDate)) {
        Calendar cal = new GregorianCalendar();
        cal.setTime(closingDate); // this is the time of the last sighting of the term
        // scroll to the beginning of this month
        cal.set(Calendar.HOUR_OF_DAY, 23);
        cal.set(Calendar.MINUTE, 59);
        cal.set(Calendar.SECOND, 59);
        Date endDate = cal.getTime();

        cal.add(
            Calendar.DATE,
            (1
                - DAYS_PER_INTERVAL)); // 1- because we want from 0:00 of first date to 23:59 of
                                       // last date
        cal.set(Calendar.HOUR_OF_DAY, 0);
        cal.set(Calendar.MINUTE, 0);
        cal.set(Calendar.SECOND, 0);
        Date startDate = cal.getTime();

        intervals.add(new Pair<Date, Date>(startDate, endDate));
        // ok we got an interval

        // closing date for the next interval is 1 day before endDate
        cal.add(Calendar.DATE, -1);
        closingDate = cal.getTime();
      }
      JSPHelper.log.info("done computing intervals, #time intervals: " + intervals.size());
      for (Pair<Date, Date> p : intervals)
        JSPHelper.log.info(
            "Interval: "
                + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(p.getFirst())
                + " - "
                + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(p.getSecond()));
    }

    // initialize clueInfos to empty lists
    List<ClueInfo> clueInfos[] = new ArrayList[intervals.size()];
    for (int i = 0; i < intervals.size(); i++) {
      clueInfos[i] = new ArrayList<ClueInfo>();
    }

    Map<Integer, Integer> intervalCount = new LinkedHashMap<>();
    // nSent is the number of sentences allowed in a clue text
    int nvalidclues = 0, nSent = 2;
    // generate clueInfos for each entity
    for (String ce : entityToLastDate.keySet()) {
      Date lastSeenDate = entityToLastDate.get(ce);

      // compute displayEntity (which has red for core words) and fullAnswer, which is a simple
      // string
      String fullAnswer = "";
      {
        List<String> tokens = Util.tokenize(ceToDisplayEntity.get(ce).iterator().next());
        for (String t : tokens) {
          if (EnglishDictionary.stopWords.contains(t.toLowerCase())) continue;
          fullAnswer += t + " ";
        }
        fullAnswer = fullAnswer.trim();
      }
      // dont want the answer to be scored low just because it has extra non-word chars in the begin
      // or end
      fullAnswer = fullAnswer.replaceAll("^\\W+|\\W+$", "");

      // which interval does this date belong to?
      int interval = -1;
      Date intervalStart = null, intervalEnd = null;
      {
        int i = 0;
        for (Pair<Date, Date> p : intervals) {
          intervalStart = p.getFirst();
          intervalEnd = p.getSecond();

          if ((intervalStart.before(lastSeenDate) && intervalEnd.after(lastSeenDate))
              || intervalStart.equals(lastSeenDate)
              || intervalEnd.equals(lastSeenDate)) {
            interval = i;
            break;
          }
          i++;
        }
      }
      if (interval < 0 || interval == intervals.size())
        JSPHelper.log.info(
            "What, no interval!? for "
                + edu.stanford.muse.email.CalendarUtil.formatDateForDisplay(lastSeenDate));
      if (!intervalCount.containsKey(interval)) intervalCount.put(interval, 0);
      if (intervalCount.get(interval) > maxInt) continue;
      intervalCount.put(interval, intervalCount.get(interval) + 1);

      List<Integer> lengthList = Crossword.convertToWord(fullAnswer).getSecond();
      String lengthDescr = "";
      if (lengthList.size() > 1) lengthDescr += Integer.toString(lengthList.size()) + " words: ";

      for (Integer i : lengthList) {
        lengthDescr += Util.pluralize(i, "letter") + ", ";
      }
      lengthDescr = lengthDescr.substring(0, lengthDescr.length() - 2); // subtract the extra comma.

      ClueInfo ci = new ClueInfo();
      ci.link = "../browse?term=\"" + fullAnswer + "\"&sort_by=recent&searchType=original";
      ci.lastSeenDate = lastSeenDate;
      ci.nMessages = entityToMessages.get(ce).size();
      ci.nThreads = entityToThreads.get(ce).size();

      // TODO: we are doing default initialisation of evaluators by setting it to null below, it is
      // more appropriate to consider it as an argument for this method
      Clue clue =
          cluer.createClue(
              fullAnswer,
              (personTest
                  ? ArchiveCluer.QuestionType.GUESS_CORRESPONDENT
                  : ArchiveCluer.QuestionType.FILL_IN_THE_BLANK),
              null,
              tabooCluesSet,
              null,
              intervalStart,
              intervalEnd,
              nSent,
              archive);
      if (clue != null) ci.clues = new Clue[] {clue};

      if (ci.clues == null || ci.clues.length == 0 || clue == null) {
        JSPHelper.log.warn("Did not find any clue for: " + fullAnswer);
      } else {
        // is the times value of the clue important?
        questions.add(new MemoryQuestion(this, fullAnswer, clue, 1, lengthDescr));
        nvalidclues++;
        // makes sure that the clue with the same statement is not generated again
        tabooCluesSet.add(clue.clue);
      }
      clueInfos[interval].add(ci);
    }
    log.info("Found valid clues for " + nvalidclues + " answers");
    JSPHelper.log.info("Found valid clues for " + nvalidclues + " answers");

    log.info("Top candidates are:");
    for (MemoryQuestion mq : questions)
      log.info(mq.correctAnswer + " times=" + mq.stats.nMessagesWithAnswer);

    // sort q's by clue score
    Collections.sort(questions);

    //		log.info("Based on clue score, top answers:");
    //		for (MemoryQuestion mq: questions)
    //			log.info (mq.correctAnswer + " times= clue=" + mq.clue.clue);

    // now we have up to 2*N questions, sorted by cluescore.
    // drop ones that are prefix/suffix of another, and cap to N
    int prev_size = questions.size();

    int new_size = questions.size();

    //	log.info ("#questions before prefix-suffix elim: " + prev_size + " after: " + new_size);

    int count = 0;
    for (MemoryQuestion mq : questions) {
      mq.setQuestionNum(count++);
    }

    // log the questions as well, just in case we don't get to the final point due to user fatigue
    // or crashes
    logStats("questions.final", false);
  }
Esempio n. 17
0
 public String toString() {
   return Util.fieldsToString(this);
 }