@Test public void formQuoteEscape() { assertEquals("", Util.formQuoteEscape(null)); assertEquals("abc", Util.formQuoteEscape("abc")); assertEquals(""abc"", Util.formQuoteEscape("\"abc\"")); assertEquals("å", Util.formQuoteEscape("å")); }
protected void appendLink(String url) throws IOException { out.write("<a href=\""); out.write(Util.formQuoteEscape(url)); out.write("\">"); Util.htmlize(url, out); out.write("</a>"); }
@Test public void URIEncode() { assertEquals("", Util.URIEncode("")); assertEquals("a+b", Util.URIEncode("a b")); assertEquals("a%23b", Util.URIEncode("a#b")); assertEquals("a%2Fb", Util.URIEncode("a/b")); assertEquals("README.txt", Util.URIEncode("README.txt")); }
@Test public void readableLine() throws Exception { StringWriter out = new StringWriter(); // hmmm - where do meaningful test start? Util.readableLine(42, out, null, null, null); assertEquals("\n<a class=\"l\" name=\"42\" href=\"#42\">42</a>", out.toString()); out.getBuffer().setLength(0); // clear buffer Util.readableLine(110, out, null, null, null); assertEquals("\n<a class=\"hl\" name=\"110\" href=\"#110\">110</a>", out.toString()); }
@Test public void redableSize() { assertEquals("0 ", Util.readableSize(0)); assertEquals("1 ", Util.readableSize(1)); assertEquals("-1 ", Util.readableSize(-1)); assertEquals("1,000 ", Util.readableSize(1000)); assertEquals("1 KiB", Util.readableSize(1024)); assertEquals("2.4 KiB", Util.readableSize(2500)); assertEquals("<b>1.4 MiB</b>", Util.readableSize(1474560)); assertEquals("<b>3,584.4 MiB</b>", Util.readableSize(3758489600L)); assertEquals("<b>8,796,093,022,208 MiB</b>", Util.readableSize(Long.MAX_VALUE)); }
/** * List all of the files in this index database * * @throws IOException If an IO error occurs while reading from the database */ public void listFiles() throws IOException { IndexReader ireader = null; TermsEnum iter; Terms terms = null; try { ireader = DirectoryReader.open(indexDirectory); // open existing index int numDocs = ireader.numDocs(); if (numDocs > 0) { Fields uFields = MultiFields.getFields(ireader); // reader.getTermVectors(0); terms = uFields.terms(QueryBuilder.U); } iter = terms.iterator(null); // init uid iterator while (iter.term() != null) { log.fine(Util.uid2url(iter.term().utf8ToString())); iter.next(); } } finally { if (ireader != null) { try { ireader.close(); } catch (IOException e) { log.log(Level.WARNING, "An error occured while closing index reader", e); } } } }
/** * Remove a stale file (uidIter.term().text()) from the index database (and the xref file) * * @throws java.io.IOException if an error occurs */ private void removeFile() throws IOException { String path = Util.uid2url(uidIter.term().utf8ToString()); for (IndexChangedListener listener : listeners) { listener.fileRemove(path); } writer.deleteDocuments(new Term(QueryBuilder.U, uidIter.term())); writer.prepareCommit(); writer.commit(); File xrefFile; if (RuntimeEnvironment.getInstance().isCompressXref()) { xrefFile = new File(xrefDir, path + ".gz"); } else { xrefFile = new File(xrefDir, path); } File parent = xrefFile.getParentFile(); if (!xrefFile.delete() && xrefFile.exists()) { log.log(Level.INFO, "Failed to remove obsolete xref-file: {0}", xrefFile.getAbsolutePath()); } // Remove the parent directory if it's empty if (parent.delete()) { log.log(Level.FINE, "Removed empty xref dir:{0}", parent.getAbsolutePath()); } setDirty(); for (IndexChangedListener listener : listeners) { listener.fileRemoved(path); } }
@Override public void analyze(Document doc, StreamSource src, Writer xrefOut) throws IOException { try (ZipInputStream zis = new ZipInputStream(src.getStream())) { ZipEntry entry; while ((entry = zis.getNextEntry()) != null) { String ename = entry.getName(); if (xrefOut != null) { xrefOut.append("<br/><b>"); Util.htmlize(ename, xrefOut); xrefOut.append("</b>"); } doc.add(new TextField("full", ename, Store.NO)); IFileAnalyzerFactory fac = AnalyzerGuru.find(ename); if (fac instanceof JavaClassAnalyzerFactory) { if (xrefOut != null) { xrefOut.append("<pre>"); } JavaClassAnalyzer jca = (JavaClassAnalyzer) fac.getAnalyzer(); jca.analyze(doc, new BufferedInputStream(zis), xrefOut); if (xrefOut != null) { xrefOut.append("</pre>"); } } } } }
@Test public void URIEncodePath() { assertEquals("", Util.URIEncodePath("")); assertEquals("/", Util.URIEncodePath("/")); assertEquals("a", Util.URIEncodePath("a")); assertEquals("%09", Util.URIEncodePath("\t")); assertEquals("a%2Bb", Util.URIEncodePath("a+b")); assertEquals("a%20b", Util.URIEncodePath("a b")); assertEquals("/a//x/yz/%23%23/%20/%20%3F", Util.URIEncodePath("/a//x/yz/##/ / ?")); assertEquals("foo%3A%3Abar%3A%3Atest.js", Util.URIEncodePath("foo::bar::test.js")); assertEquals( "bl%C3%A5b%C3%A6rsyltet%C3%B8y", Util.URIEncodePath("bl\u00E5b\u00E6rsyltet\u00F8y")); }
@Test public void htmlize() throws IOException { String[][] input_output = { {"This is a test", "This is a test"}, {"Newline\nshould become <br/>", "Newline<br/>should become <br/>"}, {"Open & Grok", "Open & Grok"}, {"&<>", "&amp;&lt;&gt;"}, }; for (String[] in_out : input_output) { // 1 arg assertEquals(in_out[1], Util.htmlize(in_out[0])); // 2 args StringBuilder sb = new StringBuilder(); Util.htmlize(in_out[0], sb); assertEquals(in_out[1], sb.toString()); } }
/** * Get the canonical path of the related resource relative to the source root directory (used file * separators are all '/'). No check is made, whether the obtained path is really an accessible * resource on disk. * * @see HttpServletRequest#getPathInfo() * @return a possible empty String (denotes the source root directory) but not {@code null}. */ public String getPath() { if (path == null) { path = Util.getCanonicalPath(req.getPathInfo(), '/'); if ("/".equals(path)) { path = ""; } } return path; }
@Test public void breadcrumbPath() { assertEquals(null, Util.breadcrumbPath("/root/", null)); assertEquals("", Util.breadcrumbPath("/root/", "")); assertEquals("<a href=\"/root/x\">x</a>", Util.breadcrumbPath("/root/", "x")); assertEquals("<a href=\"/root/xx\">xx</a>", Util.breadcrumbPath("/root/", "xx")); // parent directories have a trailing slash in href assertEquals( "<a href=\"/r/a/\">a</a>/<a href=\"/r/a/b\">b</a>", Util.breadcrumbPath("/r/", "a/b")); // if basename is a dir (ends with file seperator), href link also // ends with a '/' assertEquals( "<a href=\"/r/a/\">a</a>/<a href=\"/r/a/b/\">b</a>/", Util.breadcrumbPath("/r/", "a/b/")); // should work the same way with a '.' as file separator assertEquals( "<a href=\"/r/java/\">java</a>." + "<a href=\"/r/java/lang/\">lang</a>." + "<a href=\"/r/java/lang/String\">String</a>", Util.breadcrumbPath("/r/", "java.lang.String", '.')); // suffix added to the link? assertEquals( "<a href=\"/root/xx&project=y\">xx</a>", Util.breadcrumbPath("/root/", "xx", '/', "&project=y", false)); // compact: path needs to be resolved to /xx and no link is added // for the virtual root directory (parent) but emitted as plain text. // Prefix gets just prefixed as is and not mangled wrt. path -> "//" assertEquals( "/<a href=\"/root//xx&project=y\">xx</a>", Util.breadcrumbPath("/root/", "../xx", '/', "&project=y", true)); // relative pathes are resolved wrt. / , so path resolves to /a/c/d assertEquals( "/<a href=\"/r//a/\">a</a>/" + "<a href=\"/r//a/c/\">c</a>/" + "<a href=\"/r//a/c/d\">d</a>", Util.breadcrumbPath("/r/", "../a/b/../c//d", '/', "", true)); }
/** * Terminate the current line and insert preamble for the next line. The line count will be * incremented. * * @throws IOException on error when writing the xref */ protected void startNewLine() throws IOException { String iconId = null; int line = getLineNumber() + 1; boolean skipNl = false; setLineNumber(line); if (scopesEnabled) { startScope(); if (scopeOpen && scope == null) { scopeOpen = false; out.write("</span>"); skipNl = true; } else if (scope != null) { String scopeId = generateId(scope); if (scope.getLineFrom() == line) { out.write("<span id='"); out.write(scopeId); out.write("' class='scope-head'><span class='scope-signature'>"); out.write(htmlize(scope.getName() + scope.getSignature())); out.write("</span>"); iconId = scopeId + "_fold_icon"; skipNl = true; } else if (scope.getLineFrom() == line - 1) { if (scopeOpen) { out.write("</span>"); } out.write("<span id='"); out.write(scopeId); out.write("_fold' class='scope-body'>"); skipNl = true; } scopeOpen = true; } } Util.readableLine( line, out, annotation, userPageLink, userPageSuffix, getProjectPostfix(true), skipNl); if (foldingEnabled && scopesEnabled) { if (iconId != null) { out.write("<a href=\"#\" onclick='fold(this.parentNode.id)' id='"); out.write(iconId); /* space inside span for IE support */ out.write("'><span class='fold-icon'> </span></a>"); } else { out.write("<span class='fold-space'> </span>"); } } }
/** * Build the {@code queryAsURI} string that holds the query in a form that's suitable for sending * it as part of a URI. * * @param subqueries a map containing the query text for each field */ private void buildQueryAsURI(Map<String, String> subqueries) { boolean first = true; StringBuilder sb = new StringBuilder(); for (Map.Entry<String, String> entry : subqueries.entrySet()) { String field = entry.getKey(); String queryText = entry.getValue(); if (!first) { sb.append('&'); } sb.append(field).append("=").append(Util.URIEncode(queryText)); first = false; } queryAsURI = sb.toString(); }
@Test public void dumpConfiguration() throws Exception { StringBuilder out = new StringBuilder(); Util.dumpConfiguration(out); String s = out.toString(); // Verify that we got a table. assertTrue(s.startsWith("<table")); // Verify that the output is well-formed. String xml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + s; DocumentBuilderFactory.newInstance() .newDocumentBuilder() .parse(new ByteArrayInputStream(xml.getBytes("UTF-8"))); }
/** * Populate a Lucene document with the required fields. * * @param doc The document to populate * @param file The file to index * @param path Where the file is located (from source root) * @param fa The analyzer to use on the file * @param xrefOut Where to write the xref (possibly {@code null}) * @throws IOException If an exception occurs while collecting the data */ public void populateDocument( Document doc, File file, String path, FileAnalyzer fa, Writer xrefOut) throws IOException { String date = DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND); doc.add(new Field(QueryBuilder.U, Util.path2uid(path, date), string_ft_stored_nanalyzed_norms)); doc.add( new Field( QueryBuilder.FULLPATH, file.getAbsolutePath(), string_ft_nstored_nanalyzed_norms)); doc.add(new SortedDocValuesField(QueryBuilder.FULLPATH, new BytesRef(file.getAbsolutePath()))); try { HistoryReader hr = HistoryGuru.getInstance().getHistoryReader(file); if (hr != null) { doc.add(new TextField(QueryBuilder.HIST, hr)); // date = hr.getLastCommentDate() //RFE } } catch (HistoryException e) { LOGGER.log(Level.WARNING, "An error occurred while reading history: ", e); } doc.add(new Field(QueryBuilder.DATE, date, string_ft_stored_nanalyzed_norms)); doc.add(new SortedDocValuesField(QueryBuilder.DATE, new BytesRef(date))); if (path != null) { doc.add(new TextField(QueryBuilder.PATH, path, Store.YES)); Project project = Project.getProject(path); if (project != null) { doc.add(new TextField(QueryBuilder.PROJECT, project.getPath(), Store.YES)); } } if (fa != null) { Genre g = fa.getGenre(); if (g == Genre.PLAIN || g == Genre.XREFABLE || g == Genre.HTML) { doc.add(new Field(QueryBuilder.T, g.typeName(), string_ft_stored_nanalyzed_norms)); } fa.analyze(doc, StreamSource.fromFile(file), xrefOut); String type = fa.getFileTypeName(); doc.add(new StringField(QueryBuilder.TYPE, type, Store.YES)); } }
@Test public void diffline() { String[][] tests = { { "\"(ses_id, mer_id, pass_id, \" + refCol +\" , mer_ref, amnt, " + "cur, ps_id, ret_url, d_req_time, d_req_mil, h_resp_time, " + "h_resp_mil) \"", "\"(ses_id, mer_id, pass_id, \" + refCol +\" , mer_ref, amnt, " + "cur, ps_id, ret_url, exp_url, d_req_time, d_req_mil, " + "h_resp_time, h_resp_mil) \"", "\"(ses_id, mer_id, pass_id, \" + refCol +\" , mer_ref, amnt, " + "cur, ps_id, ret_url, <span class=\"a\">exp_url, " + "</span>d_req_time, d_req_mil, h_resp_time, h_resp_mil) \"" }, { "\"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", values);", "\"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", values);", "\"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?<span " + "class=\"a\">, ?</span>)\", values);" }, { "char *config_list = NULL;", "char **config_list = NULL;", "char *<span class=\"a\">*</span>config_list = NULL;" }, { "* An error occured or there is non-numeric stuff at the end", "* An error occurred or there is non-numeric stuff at the end", "* An error occur<span class=\"a\">r</span>ed or there is " + "non-numeric stuff at the end" } }; for (int i = 0; i < tests.length; i++) { String[] strings = Util.diffline(new StringBuilder(tests[i][0]), new StringBuilder(tests[i][1])); assertEquals("" + i + "," + 0, strings[0], tests[i][0]); assertEquals("" + i + "," + 1, strings[1], tests[i][2]); } }
/** * Write a JavaScript function that returns an array with the definitions to list in the * navigation panel. Each element of the array is itself an array containing the name of the * definition type, the CSS class name for the type, and an array of (symbol, line) pairs for the * definitions of that type. */ private void writeSymbolTable() throws IOException { if (defs == null) { // No definitions, no symbol table to write return; } // We want the symbol table to be sorted Comparator<Tag> cmp = new Comparator<Tag>() { @Override public int compare(Tag tag1, Tag tag2) { // Order by symbol name, and then by line number if multiple // definitions use the same symbol name int ret = tag1.symbol.compareTo(tag2.symbol); if (ret == 0) { ret = tag1.line - tag2.line; } return ret; } }; Map<String, SortedSet<Tag>> symbols = new HashMap<>(); for (Tag tag : defs.getTags()) { Style style = getStyle(tag.type); if (style != null && style.title != null) { SortedSet<Tag> tags = symbols.get(style.name); if (tags == null) { tags = new TreeSet<>(cmp); symbols.put(style.name, tags); } tags.add(tag); } } // TODO try to get rid of included js scripts generated from here (all js should ideally be in // util) out.append("<script type=\"text/javascript\">/* <![CDATA[ */\n"); out.append("function get_sym_list(){return ["); boolean first = true; for (Style style : DEFINITION_STYLES) { SortedSet<Tag> tags = symbols.get(style.name); if (tags != null) { if (!first) { out.append(','); } out.append("[\""); out.append(style.title); out.append("\",\""); out.append(style.ssClass); out.append("\",["); boolean firstTag = true; for (Tag tag : tags) { if (!firstTag) { out.append(','); } out.append('['); out.append(Util.jsStringLiteral(tag.symbol)); out.append(','); out.append(Integer.toString(tag.line)); out.append(']'); firstTag = false; } out.append("]]"); first = false; } } /* no LF intentionally - xml is whitespace aware ... */ out.append("];} /* ]]> */</script>"); }
@Test public void stripPathPrefix() { assertEquals("/", Util.stripPathPrefix("/", "/")); assertEquals("/abc", Util.stripPathPrefix("/abc", "/abc")); assertEquals("/abc/", Util.stripPathPrefix("/abc", "/abc/")); assertEquals("/abc", Util.stripPathPrefix("/abc/", "/abc")); assertEquals("/abc/", Util.stripPathPrefix("/abc/", "/abc/")); assertEquals("abc", Util.stripPathPrefix("/", "/abc")); assertEquals("abc/def", Util.stripPathPrefix("/", "/abc/def")); assertEquals("def", Util.stripPathPrefix("/abc", "/abc/def")); assertEquals("def", Util.stripPathPrefix("/abc/", "/abc/def")); assertEquals("/abcdef", Util.stripPathPrefix("/abc", "/abcdef")); assertEquals("/abcdef", Util.stripPathPrefix("/abc/", "/abcdef")); assertEquals("def/ghi", Util.stripPathPrefix("/abc", "/abc/def/ghi")); assertEquals("def/ghi", Util.stripPathPrefix("/abc/", "/abc/def/ghi")); }
@Test public void jsStringLiteral() { assertEquals("\"abc\\n\\r\\\"\\\\\"", Util.jsStringLiteral("abc\n\r\"\\")); }
/** * Update the content of this index database * * @throws IOException if an error occurs * @throws HistoryException if an error occurs when accessing the history */ public void update() throws IOException, HistoryException { synchronized (lock) { if (running) { throw new IOException("Indexer already running!"); } running = true; interrupted = false; } String ctgs = RuntimeEnvironment.getInstance().getCtags(); if (ctgs != null) { ctags = new Ctags(); ctags.setBinary(ctgs); } if (ctags == null) { log.severe("Unable to run ctags! searching definitions will not work!"); } if (ctags != null) { String filename = RuntimeEnvironment.getInstance().getCTagsExtraOptionsFile(); if (filename != null) { ctags.setCTagsExtraOptionsFile(filename); } } try { Analyzer analyzer = AnalyzerGuru.getAnalyzer(); IndexWriterConfig iwc = new IndexWriterConfig(SearchEngine.LUCENE_VERSION, analyzer); iwc.setOpenMode(OpenMode.CREATE_OR_APPEND); // iwc.setRAMBufferSizeMB(256.0); //TODO check what is the sweet spot writer = new IndexWriter(indexDirectory, iwc); writer.commit(); // to make sure index exists on the disk // writer.setMaxFieldLength(RuntimeEnvironment.getInstance().getIndexWordLimit()); if (directories.isEmpty()) { if (project == null) { directories.add(""); } else { directories.add(project.getPath()); } } for (String dir : directories) { File sourceRoot; if ("".equals(dir)) { sourceRoot = RuntimeEnvironment.getInstance().getSourceRootFile(); } else { sourceRoot = new File(RuntimeEnvironment.getInstance().getSourceRootFile(), dir); } HistoryGuru.getInstance().ensureHistoryCacheExists(sourceRoot); String startuid = Util.path2uid(dir, ""); IndexReader reader = DirectoryReader.open(indexDirectory); // open existing index Terms terms = null; int numDocs = reader.numDocs(); if (numDocs > 0) { Fields uFields = MultiFields.getFields(reader); // reader.getTermVectors(0); terms = uFields.terms(QueryBuilder.U); } try { if (numDocs > 0) { uidIter = terms.iterator(null); TermsEnum.SeekStatus stat = uidIter.seekCeil(new BytesRef(startuid), true); // init uid if (stat == TermsEnum.SeekStatus.END || stat == TermsEnum.SeekStatus.NOT_FOUND) { uidIter = null; } } // TODO below should be optional, since it traverses the tree once more to get total // count! :( int file_cnt = 0; if (RuntimeEnvironment.getInstance().isPrintProgress()) { log.log(Level.INFO, "Counting files in {0} ...", dir); file_cnt = indexDown(sourceRoot, dir, true, 0, 0); if (log.isLoggable(Level.INFO)) { log.log( Level.INFO, "Need to process: {0} files for {1}", new Object[] {file_cnt, dir}); } } indexDown(sourceRoot, dir, false, 0, file_cnt); while (uidIter != null && uidIter.term() != null && uidIter.term().utf8ToString().startsWith(startuid)) { removeFile(); uidIter.next(); } } finally { reader.close(); } } } finally { if (writer != null) { try { writer.prepareCommit(); writer.commit(); writer.close(); } catch (IOException e) { log.log(Level.WARNING, "An error occured while closing writer", e); } } if (ctags != null) { try { ctags.close(); } catch (IOException e) { log.log(Level.WARNING, "An error occured while closing ctags process", e); } } synchronized (lock) { running = false; } } if (!isInterrupted() && isDirty()) { if (RuntimeEnvironment.getInstance().isOptimizeDatabase()) { optimize(); } createSpellingSuggestions(); RuntimeEnvironment env = RuntimeEnvironment.getInstance(); File timestamp = new File(env.getDataRootFile(), "timestamp"); if (timestamp.exists()) { if (!timestamp.setLastModified(System.currentTimeMillis())) { log.log( Level.WARNING, "Failed to set last modified time on ''{0}'', used for timestamping the index database.", timestamp.getAbsolutePath()); } } else { if (!timestamp.createNewFile()) { log.log( Level.WARNING, "Failed to create file ''{0}'', used for timestamping the index database.", timestamp.getAbsolutePath()); } } } }
/** * Generate indexes recursively * * @param dir the root indexDirectory to generate indexes for * @param path the path * @param count_only if true will just traverse the source root and count files * @param cur_count current count during the traversal of the tree * @param est_total estimate total files to process */ private int indexDown(File dir, String parent, boolean count_only, int cur_count, int est_total) throws IOException { int lcur_count = cur_count; if (isInterrupted()) { return lcur_count; } if (!accept(dir)) { return lcur_count; } File[] files = dir.listFiles(); if (files == null) { log.log(Level.SEVERE, "Failed to get file listing for: {0}", dir.getAbsolutePath()); return lcur_count; } Arrays.sort( files, new Comparator<File>() { @Override public int compare(File p1, File p2) { return p1.getName().compareTo(p2.getName()); } }); for (File file : files) { if (accept(dir, file)) { String path = parent + '/' + file.getName(); if (file.isDirectory()) { lcur_count = indexDown(file, path, count_only, lcur_count, est_total); } else { lcur_count++; if (count_only) { continue; } if (RuntimeEnvironment.getInstance().isPrintProgress() && est_total > 0 && log.isLoggable(Level.INFO)) { log.log( Level.INFO, "Progress: {0} ({1}%)", new Object[] {lcur_count, (lcur_count * 100.0f / est_total)}); } if (uidIter != null) { String uid = Util.path2uid( path, DateTools.timeToString( file.lastModified(), DateTools.Resolution.MILLISECOND)); // construct uid for doc BytesRef buid = new BytesRef(uid); while (uidIter.term() != null && uidIter.term().compareTo(emptyBR) != 0 && uidIter.term().compareTo(buid) < 0) { removeFile(); uidIter.next(); } if (uidIter.term() != null && uidIter.term().bytesEquals(buid)) { uidIter.next(); // keep matching docs continue; } } try { addFile(file, path); } catch (Exception e) { log.log(Level.WARNING, "Failed to add file " + file.getAbsolutePath(), e); } } } } return lcur_count; }
/** * Write a cross referenced HTML file. * * @param out Writer to store HTML cross-reference */ @Override public void writeXref(Writer out) throws IOException { out.write(Util.htmlize(content)); }
/** * Get the URI encoded canonical path to the related file or directory (the URI part between the * servlet path and the start of the query string). * * @return an URI encoded path which might be an empty string but not {@code null}. * @see #getPath() */ public String getUriEncodedPath() { if (uriEncodedPath == null) { uriEncodedPath = Util.URIEncodePath(getPath()); } return uriEncodedPath; }
/** * Get all data required to create a diff view wrt. to this request in one go. * * @return an instance with just enough information to render a sufficient view. If not all * required parameters were given either they are supplemented with reasonable defaults if * possible, otherwise the related field(s) are {@code null}. {@link DiffData#errorMsg} {@code * != null} indicates, that an error occured and one should not try to render a view. */ public DiffData getDiffData() { DiffData data = new DiffData(); data.path = getPath().substring(0, path.lastIndexOf('/')); data.filename = Util.htmlize(getResourceFile().getName()); String srcRoot = getSourceRootPath(); String context = req.getContextPath(); String[] filepath = new String[2]; data.rev = new String[2]; data.file = new String[2][]; data.param = new String[2]; /* * Basically the request URI looks like this: * http://$site/$webapp/diff/$resourceFile?r1=$fileA@$revA&r2=$fileB@$revB * The code below extracts file path and revision from the URI. */ for (int i = 1; i <= 2; i++) { String[] tmp = null; String p = req.getParameter("r" + i); if (p != null) { tmp = p.split("@"); } if (tmp != null && tmp.length == 2) { filepath[i - 1] = tmp[0]; data.rev[i - 1] = tmp[1]; } } if (data.rev[0] == null || data.rev[1] == null || data.rev[0].length() == 0 || data.rev[1].length() == 0 || data.rev[0].equals(data.rev[1])) { data.errorMsg = "Please pick two revisions to compare the changed " + "from the <a href=\"" + context + Prefix.HIST_L + getUriEncodedPath() + "\">history</a>"; return data; } data.genre = AnalyzerGuru.getGenre(getResourceFile().getName()); if (data.genre == null || txtGenres.contains(data.genre)) { InputStream[] in = new InputStream[2]; try { // Get input stream for both older and newer file. for (int i = 0; i < 2; i++) { File f = new File(srcRoot + filepath[i]); in[i] = HistoryGuru.getInstance().getRevision(f.getParent(), f.getName(), data.rev[i]); if (in[i] == null) { data.errorMsg = "Unable to get revision " + Util.htmlize(data.rev[i]) + " for file: " + Util.htmlize(getPath()); return data; } } /* * If the genre of the older revision cannot be determined, * (this can happen if the file was empty), try with newer * version. */ for (int i = 0; i < 2 && data.genre == null; i++) { try { data.genre = AnalyzerGuru.getGenre(in[i]); } catch (IOException e) { data.errorMsg = "Unable to determine the file type: " + Util.htmlize(e.getMessage()); } } if (data.genre != Genre.PLAIN && data.genre != Genre.HTML) { return data; } ArrayList<String> lines = new ArrayList<>(); Project p = getProject(); for (int i = 0; i < 2; i++) { try (BufferedReader br = new BufferedReader(ExpandTabsReader.wrap(new InputStreamReader(in[i]), p))) { String line; while ((line = br.readLine()) != null) { lines.add(line); } data.file[i] = lines.toArray(new String[lines.size()]); lines.clear(); } in[i] = null; } } catch (Exception e) { data.errorMsg = "Error reading revisions: " + Util.htmlize(e.getMessage()); } finally { for (int i = 0; i < 2; i++) { IOUtils.close(in[i]); } } if (data.errorMsg != null) { return data; } try { data.revision = Diff.diff(data.file[0], data.file[1]); } catch (DifferentiationFailedException e) { data.errorMsg = "Unable to get diffs: " + Util.htmlize(e.getMessage()); } for (int i = 0; i < 2; i++) { try { URI u = new URI(null, null, null, filepath[i] + "@" + data.rev[i], null); data.param[i] = u.getRawQuery(); } catch (URISyntaxException e) { LOGGER.log(Level.WARNING, "Failed to create URI: ", e); } } data.full = fullDiff(); data.type = getDiffType(); } return data; }
@Test public void path2uid() { assertEquals("\u0000etc\u0000passwd\u0000date", Util.path2uid("/etc/passwd", "date")); }
@Test public void uid2url() { assertEquals("/etc/passwd", Util.uid2url(Util.path2uid("/etc/passwd", "date"))); }
/** * @param in File to be matched * @param out to write the context * @param morePrefix to link to more... page * @param path path of the file * @param tags format to highlight defs. * @param limit should the number of matching lines be limited? * @return Did it get any matching context? */ public boolean getContext( Reader in, Writer out, String urlPrefix, String morePrefix, String path, Definitions tags, boolean limit, List<Hit> hits) { alt = !alt; if (m == null) { return false; } boolean anything = false; TreeMap<Integer, String[]> matchingTags = null; if (tags != null) { matchingTags = new TreeMap<Integer, String[]>(); try { for (Definitions.Tag tag : tags.getTags()) { for (int i = 0; i < m.length; i++) { if (m[i].match(tag.symbol) == LineMatcher.MATCHED) { /* * desc[1] is line number * desc[2] is type * desc[3] is matching line; */ String[] desc = { tag.symbol, Integer.toString(tag.line), tag.type, tag.text, }; if (in == null) { if (out == null) { Hit hit = new Hit( path, Util.htmlize(desc[3]).replaceAll(desc[0], "<em>" + desc[0] + "</em>"), desc[1], false, alt); hits.add(hit); anything = true; } else { out.write("<a class=\"s\" href=\""); out.write(Util.URIEncodePath(urlPrefix)); out.write(Util.URIEncodePath(path)); out.write("#"); out.write(desc[1]); out.write("\"><span class=\"l\">"); out.write(desc[1]); out.write("</span> "); out.write(Util.htmlize(desc[3]).replaceAll(desc[0], "<em>" + desc[0] + "</em>")); out.write("</a> <i> "); out.write(desc[2]); out.write(" </i><br/>"); anything = true; } } else { matchingTags.put(tag.line, desc); } break; } } } } catch (IOException e) { if (hits != null) { // @todo verify why we ignore all exceptions? OpenGrokLogger.getLogger().log(Level.WARNING, "Could not get context for " + path, e); } } } /** Just to get the matching tag send a null in */ if (in == null) { return anything; } int charsRead = 0; boolean truncated = false; boolean lim = limit; if (!RuntimeEnvironment.getInstance().isQuickContextScan()) { lim = false; } if (lim) { try { charsRead = in.read(buffer); if (charsRead == MAXFILEREAD) { // we probably only read parts of the file, so set the // truncated flag to enable the [all...] link that // requests all matches truncated = true; // truncate to last line read (don't look more than 100 // characters back) for (int i = charsRead - 1; i > charsRead - 100; i--) { if (buffer[i] == '\n') { charsRead = i; break; } } } } catch (IOException e) { OpenGrokLogger.getLogger().log(Level.WARNING, "An error occured while reading data", e); return anything; } if (charsRead == 0) { return anything; } tokens.reInit( buffer, charsRead, out, Util.URIEncodePath(urlPrefix + path) + "#", matchingTags); } else { tokens.reInit(in, out, Util.URIEncodePath(urlPrefix + path) + "#", matchingTags); } if (hits != null) { tokens.setAlt(alt); tokens.setHitList(hits); tokens.setFilename(path); } try { String token; int matchState = LineMatcher.NOT_MATCHED; int matchedLines = 0; while ((token = tokens.yylex()) != null && (!lim || matchedLines < 10)) { for (int i = 0; i < m.length; i++) { matchState = m[i].match(token); if (matchState == LineMatcher.MATCHED) { tokens.printContext(urlPrefix); matchedLines++; // out.write("<br> <i>Matched " + token + " maxlines = " + matchedLines + "</i><br>"); break; } else if (matchState == LineMatcher.WAIT) { tokens.holdOn(); } else { tokens.neverMind(); } } } anything = matchedLines > 0; tokens.dumpRest(); if (lim && (truncated || matchedLines == 10) && out != null) { out.write( " [<a href=\"" + Util.URIEncodePath(morePrefix + path) + "?" + queryAsURI + "\">all</a>...]"); } } catch (IOException e) { OpenGrokLogger.getLogger().log(Level.WARNING, "Could not get context for " + path, e); } finally { if (in != null) { try { in.close(); } catch (IOException e) { OpenGrokLogger.getLogger().log(Level.WARNING, "An error occured while closing stream", e); } } if (out != null) { try { out.flush(); } catch (IOException e) { OpenGrokLogger.getLogger() .log(Level.WARNING, "An error occured while flushing stream", e); } } } return anything; }