Ejemplo n.º 1
0
 /** {@inheritDoc} */
 public void pageRemoved(WikiPage page) {
   IndexWriter writer = null;
   try {
     Directory luceneDir = new SimpleFSDirectory(new File(m_luceneDirectory), null);
     writer = getIndexWriter(luceneDir);
     Query query = new TermQuery(new Term(LUCENE_ID, page.getName()));
     writer.deleteDocuments(query);
   } catch (Exception e) {
     log.error("Unable to remove page '" + page.getName() + "' from Lucene index", e);
   } finally {
     close(writer);
   }
 }
Ejemplo n.º 2
0
 void close(IndexWriter writer) {
   try {
     if (writer != null) {
       writer.close(true);
     }
   } catch (IOException e) {
     log.error(e);
   }
 }
Ejemplo n.º 3
0
  /**
   * Indexes page using the given IndexWriter.
   *
   * @param page WikiPage
   * @param text Page text to index
   * @param writer The Lucene IndexWriter to use for indexing
   * @return the created index Document
   * @throws IOException If there's an indexing problem
   */
  protected Document luceneIndexPage(WikiPage page, String text, IndexWriter writer)
      throws IOException {
    if (log.isDebugEnabled()) log.debug("Indexing " + page.getName() + "...");

    // make a new, empty document
    Document doc = new Document();

    if (text == null) return doc;

    // Raw name is the keyword we'll use to refer to this document for updates.
    Field field = new Field(LUCENE_ID, page.getName(), Field.Store.YES, Field.Index.NOT_ANALYZED);
    doc.add(field);

    // Body text.  It is stored in the doc for search contexts.
    field =
        new Field(
            LUCENE_PAGE_CONTENTS, text, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO);
    doc.add(field);

    // Allow searching by page name. Both beautified and raw
    String unTokenizedTitle =
        StringUtils.replaceChars(
            page.getName(), MarkupParser.PUNCTUATION_CHARS_ALLOWED, c_punctuationSpaces);

    field =
        new Field(
            LUCENE_PAGE_NAME,
            TextUtil.beautifyString(page.getName()) + " " + unTokenizedTitle,
            Field.Store.YES,
            Field.Index.ANALYZED,
            Field.TermVector.NO);
    doc.add(field);

    // Allow searching by authorname

    if (page.getAuthor() != null) {
      field =
          new Field(
              LUCENE_AUTHOR,
              page.getAuthor(),
              Field.Store.YES,
              Field.Index.ANALYZED,
              Field.TermVector.NO);
      doc.add(field);
    }

    // Now add the names of the attachments of this page
    try {
      Collection attachments = m_engine.getAttachmentManager().listAttachments(page);
      String attachmentNames = "";

      for (Iterator it = attachments.iterator(); it.hasNext(); ) {
        Attachment att = (Attachment) it.next();
        attachmentNames += att.getName() + ";";
      }
      field =
          new Field(
              LUCENE_ATTACHMENTS,
              attachmentNames,
              Field.Store.YES,
              Field.Index.ANALYZED,
              Field.TermVector.NO);
      doc.add(field);

    } catch (ProviderException e) {
      // Unable to read attachments
      log.error("Failed to get attachments for page", e);
    }
    writer.addDocument(doc);

    return doc;
  }