/** * Fetches the attachment content from the repository. Content is flat text that can be used for * indexing/searching or display * * @param attachmentName Name of the attachment. * @param version The version of the attachment. * @return the content of the Attachment as a String. */ protected String getAttachmentContent(String attachmentName, int version) { AttachmentManager mgr = m_engine.getAttachmentManager(); try { Attachment att = mgr.getAttachmentInfo(attachmentName, version); // FIXME: Find out why sometimes att is null if (att != null) { return getAttachmentContent(att); } } catch (ProviderException e) { log.error("Attachment cannot be loaded", e); } // Something was wrong, no result is returned. return null; }
/** {@inheritDoc} */ public void initialize(WikiEngine engine, Properties props) throws NoRequiredPropertyException, IOException { m_engine = engine; m_luceneDirectory = engine.getWorkDir() + File.separator + LUCENE_DIR; int initialDelay = TextUtil.getIntegerProperty(props, PROP_LUCENE_INITIALDELAY, LuceneUpdater.INITIAL_DELAY); int indexDelay = TextUtil.getIntegerProperty(props, PROP_LUCENE_INDEXDELAY, LuceneUpdater.INDEX_DELAY); m_analyzerClass = TextUtil.getStringProperty(props, PROP_LUCENE_ANALYZER, m_analyzerClass); // FIXME: Just to be simple for now, we will do full reindex // only if no files are in lucene directory. File dir = new File(m_luceneDirectory); log.info("Lucene enabled, cache will be in: " + dir.getAbsolutePath()); try { if (!dir.exists()) { dir.mkdirs(); } if (!dir.exists() || !dir.canWrite() || !dir.canRead()) { log.error("Cannot write to Lucene directory, disabling Lucene: " + dir.getAbsolutePath()); throw new IOException("Invalid Lucene directory."); } String[] filelist = dir.list(); if (filelist == null) { throw new IOException( "Invalid Lucene directory: cannot produce listing: " + dir.getAbsolutePath()); } } catch (IOException e) { log.error("Problem while creating Lucene index - not using Lucene.", e); } // Start the Lucene update thread, which waits first // for a little while before starting to go through // the Lucene "pages that need updating". LuceneUpdater updater = new LuceneUpdater(m_engine, this, initialDelay, indexDelay); updater.start(); }
/** * Adds a page-text pair to the lucene update queue. Safe to call always * * @param page WikiPage to add to the update queue. */ public void reindexPage(WikiPage page) { if (page != null) { String text; // TODO: Think if this was better done in the thread itself? if (page instanceof Attachment) { text = getAttachmentContent((Attachment) page); } else { text = m_engine.getPureText(page); } if (text != null) { // Add work item to m_updates queue. Object[] pair = new Object[2]; pair[0] = page; pair[1] = text; m_updates.add(pair); log.debug("Scheduling page " + page.getName() + " for index update"); } } }
/** * @param att Attachment to get content for. Filename extension is used to determine the type of * the attachment. * @return String representing the content of the file. FIXME This is a very simple implementation * of some text-based attachment, mainly used for testing. This should be replaced /moved to * Attachment search providers or some other 'pluggable' wat to search attachments */ protected String getAttachmentContent(Attachment att) { AttachmentManager mgr = m_engine.getAttachmentManager(); // FIXME: Add attachment plugin structure String filename = att.getFileName(); boolean searchSuffix = false; for (String suffix : SEARCHABLE_FILE_SUFFIXES) { if (filename.endsWith(suffix)) { searchSuffix = true; } } if (searchSuffix) { InputStream attStream; try { attStream = mgr.getAttachmentStream(att); StringWriter sout = new StringWriter(); FileUtil.copyContents(new InputStreamReader(attStream), sout); attStream.close(); sout.close(); return sout.toString(); } catch (ProviderException e) { log.error("Attachment cannot be loaded", e); return null; } catch (IOException e) { log.error("Attachment cannot be loaded", e); return null; } } return null; }
/** * Searches pages using a particular combination of flags. * * @param query The query to perform in Lucene query language * @param flags A set of flags * @return A Collection of SearchResult instances * @throws ProviderException if there is a problem with the backend */ public Collection findPages(String query, int flags) throws ProviderException { IndexSearcher searcher = null; ArrayList<SearchResult> list = null; Highlighter highlighter = null; try { String[] queryfields = { LUCENE_PAGE_CONTENTS, LUCENE_PAGE_NAME, LUCENE_AUTHOR, LUCENE_ATTACHMENTS }; QueryParser qp = new MultiFieldQueryParser(Version.LUCENE_36, queryfields, getLuceneAnalyzer()); // QueryParser qp = new QueryParser( LUCENE_PAGE_CONTENTS, getLuceneAnalyzer() ); Query luceneQuery = qp.parse(query); if ((flags & FLAG_CONTEXTS) != 0) { highlighter = new Highlighter( new SimpleHTMLFormatter("<span class=\"searchmatch\">", "</span>"), new SimpleHTMLEncoder(), new QueryScorer(luceneQuery)); } try { File dir = new File(m_luceneDirectory); Directory luceneDir = new SimpleFSDirectory(dir, null); IndexReader reader = IndexReader.open(luceneDir); searcher = new IndexSearcher(reader); } catch (Exception ex) { log.info("Lucene not yet ready; indexing not started", ex); return null; } ScoreDoc[] hits = searcher.search(luceneQuery, MAX_SEARCH_HITS).scoreDocs; list = new ArrayList<SearchResult>(hits.length); for (int curr = 0; curr < hits.length; curr++) { int docID = hits[curr].doc; Document doc = searcher.doc(docID); String pageName = doc.get(LUCENE_ID); WikiPage page = m_engine.getPage(pageName, WikiPageProvider.LATEST_VERSION); if (page != null) { if (page instanceof Attachment) { // Currently attachments don't look nice on the search-results page // When the search-results are cleaned up this can be enabled again. } int score = (int) (hits[curr].score * 100); // Get highlighted search contexts String text = doc.get(LUCENE_PAGE_CONTENTS); String[] fragments = new String[0]; if (text != null && highlighter != null) { TokenStream tokenStream = getLuceneAnalyzer().tokenStream(LUCENE_PAGE_CONTENTS, new StringReader(text)); fragments = highlighter.getBestFragments(tokenStream, text, MAX_FRAGMENTS); } SearchResult result = new SearchResultImpl(page, score, fragments); list.add(result); } else { log.error( "Lucene found a result page '" + pageName + "' that could not be loaded, removing from Lucene cache"); pageRemoved(new WikiPage(m_engine, pageName)); } } } catch (IOException e) { log.error("Failed during lucene search", e); } catch (ParseException e) { log.info("Broken query; cannot parse query ", e); throw new ProviderException( "You have entered a query Lucene cannot process: " + e.getMessage()); } catch (InvalidTokenOffsetsException e) { log.error("Tokens are incompatible with provided text ", e); } finally { if (searcher != null) { try { searcher.close(); } catch (IOException e) { log.error(e); } } } return list; }
/** * Indexes page using the given IndexWriter. * * @param page WikiPage * @param text Page text to index * @param writer The Lucene IndexWriter to use for indexing * @return the created index Document * @throws IOException If there's an indexing problem */ protected Document luceneIndexPage(WikiPage page, String text, IndexWriter writer) throws IOException { if (log.isDebugEnabled()) log.debug("Indexing " + page.getName() + "..."); // make a new, empty document Document doc = new Document(); if (text == null) return doc; // Raw name is the keyword we'll use to refer to this document for updates. Field field = new Field(LUCENE_ID, page.getName(), Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(field); // Body text. It is stored in the doc for search contexts. field = new Field( LUCENE_PAGE_CONTENTS, text, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO); doc.add(field); // Allow searching by page name. Both beautified and raw String unTokenizedTitle = StringUtils.replaceChars( page.getName(), MarkupParser.PUNCTUATION_CHARS_ALLOWED, c_punctuationSpaces); field = new Field( LUCENE_PAGE_NAME, TextUtil.beautifyString(page.getName()) + " " + unTokenizedTitle, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO); doc.add(field); // Allow searching by authorname if (page.getAuthor() != null) { field = new Field( LUCENE_AUTHOR, page.getAuthor(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO); doc.add(field); } // Now add the names of the attachments of this page try { Collection attachments = m_engine.getAttachmentManager().listAttachments(page); String attachmentNames = ""; for (Iterator it = attachments.iterator(); it.hasNext(); ) { Attachment att = (Attachment) it.next(); attachmentNames += att.getName() + ";"; } field = new Field( LUCENE_ATTACHMENTS, attachmentNames, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO); doc.add(field); } catch (ProviderException e) { // Unable to read attachments log.error("Failed to get attachments for page", e); } writer.addDocument(doc); return doc; }
/** * Performs a full Lucene reindex, if necessary. * * @throws IOException If there's a problem during indexing */ protected void doFullLuceneReindex() throws IOException { File dir = new File(m_luceneDirectory); String[] filelist = dir.list(); if (filelist == null) { throw new IOException( "Invalid Lucene directory: cannot produce listing: " + dir.getAbsolutePath()); } try { if (filelist.length == 0) { // // No files? Reindex! // Date start = new Date(); IndexWriter writer = null; log.info("Starting Lucene reindexing, this can take a couple minutes..."); Directory luceneDir = new SimpleFSDirectory(dir, null); try { writer = getIndexWriter(luceneDir); Collection allPages = m_engine.getPageManager().getAllPages(); for (Iterator iterator = allPages.iterator(); iterator.hasNext(); ) { WikiPage page = (WikiPage) iterator.next(); try { String text = m_engine .getPageManager() .getPageText(page.getName(), WikiProvider.LATEST_VERSION); luceneIndexPage(page, text, writer); } catch (IOException e) { log.warn("Unable to index page " + page.getName() + ", continuing to next ", e); } } Collection allAttachments = m_engine.getAttachmentManager().getAllAttachments(); for (Iterator iterator = allAttachments.iterator(); iterator.hasNext(); ) { Attachment att = (Attachment) iterator.next(); try { String text = getAttachmentContent(att.getName(), WikiProvider.LATEST_VERSION); luceneIndexPage(att, text, writer); } catch (IOException e) { log.warn("Unable to index attachment " + att.getName() + ", continuing to next", e); } } } finally { close(writer); } Date end = new Date(); log.info( "Full Lucene index finished in " + (end.getTime() - start.getTime()) + " milliseconds."); } else { log.info("Files found in Lucene directory, not reindexing."); } } catch (NoClassDefFoundError e) { log.info("Lucene libraries do not exist - not using Lucene."); } catch (IOException e) { log.error("Problem while creating Lucene index - not using Lucene.", e); } catch (ProviderException e) { log.error("Problem reading pages while creating Lucene index (JSPWiki won't start.)", e); throw new IllegalArgumentException("unable to create Lucene index"); } catch (Exception e) { log.error("Unable to start lucene", e); } }