@Override public boolean makeChange(BasePanel panel, BibtexDatabase secondary, NamedCompound undoEdit) { panel.database().removeEntry(memEntry.getId()); undoEdit.addEdit(new UndoableRemoveEntry(panel.database(), memEntry, panel)); secondary.removeEntry(tmpEntry.getId()); return true; }
/** * Checks if the two entries represent the same publication. * * @param one BibtexEntry * @param two BibtexEntry * @return boolean */ public static boolean isDuplicate(BibtexEntry one, BibtexEntry two) { // First check if they are of the same type - a necessary condition: if (one.getType() != two.getType()) { return false; } // The check if they have the same required fields: String[] fields = one.getType().getRequiredFields().toArray(new String[0]); double[] req; if (fields == null) { req = new double[] {0., 0.}; } else { req = DuplicateCheck.compareFieldSet(fields, one, two); } if (Math.abs(req[0] - DuplicateCheck.duplicateThreshold) > DuplicateCheck.doubtRange) { // Far from the threshold value, so we base our decision on the req. fields only return req[0] >= DuplicateCheck.duplicateThreshold; } // Close to the threshold value, so we take a look at the optional fields, if any: fields = one.getType().getOptionalFields().toArray(new String[0]); if (fields != null) { double[] opt = DuplicateCheck.compareFieldSet(fields, one, two); double totValue = ((DuplicateCheck.reqWeight * req[0] * req[1]) + (opt[0] * opt[1])) / ((req[1] * DuplicateCheck.reqWeight) + opt[1]); return totValue >= DuplicateCheck.duplicateThreshold; } return req[0] >= DuplicateCheck.duplicateThreshold; }
@Test @Ignore public void testAddEntrysFromFiles() throws Exception { ParserResult result = BibtexParser.parse(new FileReader(ImportDataTest.UNLINKED_FILES_TEST_BIB)); BibtexDatabase database = result.getDatabase(); List<File> files = new ArrayList<File>(); files.add(ImportDataTest.FILE_NOT_IN_DATABASE); files.add(ImportDataTest.NOT_EXISTING_PDF); EntryFromFileCreatorManager manager = new EntryFromFileCreatorManager(); List<String> errors = manager.addEntrysFromFiles(files, database, null, true); /** One file doesn't exist, so adding it as an entry should lead to an error message. */ Assert.assertEquals(1, errors.size()); boolean file1Found = false; boolean file2Found = false; for (BibtexEntry entry : database.getEntries()) { String filesInfo = entry.getField("file"); if (filesInfo.contains(files.get(0).getName())) { file1Found = true; } if (filesInfo.contains(files.get(1).getName())) { file2Found = true; } } Assert.assertTrue(file1Found); Assert.assertFalse(file2Found); }
private static void fixWrongFileEntries(BibtexEntry entry, NamedCompound ce) { String oldValue = entry.getField(Globals.FILE_FIELD); if (oldValue == null) { return; } FileListTableModel flModel = new FileListTableModel(); flModel.setContent(oldValue); if (flModel.getRowCount() == 0) { return; } boolean changed = false; for (int i = 0; i < flModel.getRowCount(); i++) { FileListEntry flEntry = flModel.getEntry(i); String link = flEntry.getLink(); String description = flEntry.getDescription(); if ("".equals(link) && (!"".equals(description))) { // link and description seem to be switched, quickly fix that flEntry.setLink(flEntry.getDescription()); flEntry.setDescription(""); changed = true; } } if (changed) { String newValue = flModel.getStringRepresentation(); assert (!oldValue.equals(newValue)); entry.setField(Globals.FILE_FIELD, newValue); ce.addEdit(new UndoableFieldChange(entry, Globals.FILE_FIELD, oldValue, newValue)); } }
private void doMakePathsRelative(BibtexEntry entry, NamedCompound ce) { String oldValue = entry.getField(Globals.FILE_FIELD); if (oldValue == null) { return; } FileListTableModel flModel = new FileListTableModel(); flModel.setContent(oldValue); if (flModel.getRowCount() == 0) { return; } boolean changed = false; for (int i = 0; i < flModel.getRowCount(); i++) { FileListEntry flEntry = flModel.getEntry(i); String oldFileName = flEntry.getLink(); String newFileName = FileUtil.shortenFileName( new File(oldFileName), panel.metaData().getFileDirectory(Globals.FILE_FIELD)) .toString(); if (!oldFileName.equals(newFileName)) { flEntry.setLink(newFileName); changed = true; } } if (changed) { String newValue = flModel.getStringRepresentation(); assert (!oldValue.equals(newValue)); entry.setField(Globals.FILE_FIELD, newValue); ce.addEdit(new UndoableFieldChange(entry, Globals.FILE_FIELD, oldValue, newValue)); } }
/** * Format page numbers, separated either by commas or double-hyphens. Converts the range number * format of the <code>pages</code> field to page_number--page_number. * * @see{PageNumbersFormatter} */ public void cleanup() { final String field = "pages"; String value = entry.getField(field); String newValue = BibtexFieldFormatters.PAGE_NUMBERS.format(value); entry.setField(field, newValue); }
/** * Recursive method to include a tree of groups. * * @param cursor The current GroupTreeNode in the GroupsTree * @param parentID The integer ID associated with the cursors's parent node * @param currentID The integer value to associate with the cursor * @param out The output (PrintStream or Connection) object to which the DML should be written. * @param database_id Id of jabref database to which the group is part of */ private int populateEntryGroupsTable( GroupTreeNode cursor, int parentID, int currentID, Object out, int database_id) throws SQLException { // if this group contains entries... if (cursor.getGroup() instanceof ExplicitGroup) { ExplicitGroup grp = (ExplicitGroup) cursor.getGroup(); for (BibtexEntry be : grp.getEntries()) { SQLUtil.processQuery( out, "INSERT INTO entry_group (entries_id, groups_id) " + "VALUES (" + "(SELECT entries_id FROM entries WHERE jabref_eid=" + '\'' + be.getId() + "' AND database_id = " + database_id + "), " + "(SELECT groups_id FROM groups WHERE database_id=" + '\'' + database_id + "' AND parent_id=" + '\'' + parentID + "' AND label=" + '\'' + grp.getName() + "')" + ");"); } } // recurse on child nodes (depth-first traversal) Object response = SQLUtil.processQueryWithResults( out, "SELECT groups_id FROM groups WHERE label='" + cursor.getGroup().getName() + "' AND database_id='" + database_id + "' AND parent_id='" + parentID + "';"); // setting values to ID and myID to be used in case of textual SQL // export ++currentID; int myID = currentID; if (response instanceof Statement) { ResultSet rs = ((Statement) response).getResultSet(); rs.next(); myID = rs.getInt("groups_id"); } for (Enumeration<GroupTreeNode> e = cursor.children(); e.hasMoreElements(); ) { currentID = populateEntryGroupsTable(e.nextElement(), myID, currentID, out, database_id); } return currentID; }
private static int compareSingleField(String field, BibtexEntry one, BibtexEntry two) { String s1 = one.getField(field); String s2 = two.getField(field); if (s1 == null) { if (s2 == null) { return EMPTY_IN_BOTH; } return EMPTY_IN_ONE; } else if (s2 == null) { return EMPTY_IN_TWO; } if (field.equals("author") || field.equals("editor")) { // Specific for name fields. // Harmonise case: String auth1 = AuthorList.fixAuthor_lastNameOnlyCommas(s1, false).replaceAll(" and ", " ").toLowerCase(); String auth2 = AuthorList.fixAuthor_lastNameOnlyCommas(s2, false).replaceAll(" and ", " ").toLowerCase(); double similarity = DuplicateCheck.correlateByWords(auth1, auth2, false); if (similarity > 0.8) { return EQUAL; } return NOT_EQUAL; } else if (field.equals("pages")) { // Pages can be given with a variety of delimiters, "-", "--", " - ", " -- ". // We do a replace to harmonize these to a simple "-": // After this, a simple test for equality should be enough: s1 = s1.replaceAll("[- ]+", "-"); s2 = s2.replaceAll("[- ]+", "-"); if (s1.equals(s2)) { return EQUAL; } return NOT_EQUAL; } else if (field.equals("journal")) { // We do not attempt to harmonize abbreviation state of the journal names, // but we remove periods from the names in case they are abbreviated with // and without dots: s1 = s1.replaceAll("\\.", "").toLowerCase(); s2 = s2.replaceAll("\\.", "").toLowerCase(); double similarity = DuplicateCheck.correlateByWords(s1, s2, true); if (similarity > 0.8) { return EQUAL; } return NOT_EQUAL; } else { s1 = s1.toLowerCase(); s2 = s2.toLowerCase(); double similarity = DuplicateCheck.correlateByWords(s1, s2, false); if (similarity > 0.8) { return EQUAL; } return NOT_EQUAL; } }
/** * Converts the text in 1st, 2nd, ... to real superscripts by wrapping in \textsuperscript{st}, * ... */ private static void doCleanUpSuperscripts(BibtexEntry entry, NamedCompound ce) { for (String name : entry.getFieldNames()) { String oldValue = entry.getField(name); // run formatter String newValue = BibtexFieldFormatters.SUPERSCRIPTS.format(oldValue); // undo action if (!oldValue.equals(newValue)) { entry.setField(name, newValue); ce.addEdit(new UndoableFieldChange(entry, name, oldValue, newValue)); } } }
/** Converts HTML code to LaTeX code */ private static void doConvertHTML(BibtexEntry entry, NamedCompound ce) { final String field = "title"; String oldValue = entry.getField(field); if (oldValue == null) { return; } final HTMLConverter htmlConverter = new HTMLConverter(); String newValue = htmlConverter.format(oldValue); if (!oldValue.equals(newValue)) { entry.setField(field, newValue); ce.addEdit(new UndoableFieldChange(entry, field, oldValue, newValue)); } }
/** Converts Unicode characters to LaTeX code */ private static void doConvertUnicode(BibtexEntry entry, NamedCompound ce) { final String[] fields = {"title", "author", "abstract"}; for (String field : fields) { String oldValue = entry.getField(field); if (oldValue == null) { return; } final HTMLConverter htmlConverter = new HTMLConverter(); String newValue = htmlConverter.formatUnicode(oldValue); if (!oldValue.equals(newValue)) { entry.setField(field, newValue); ce.addEdit(new UndoableFieldChange(entry, field, oldValue, newValue)); } } }
@Override public void redo() { super.redo(); // Redo the change. try { if (newValue != null) { entry.setField(field, newValue); } else { entry.clearField(field); } } catch (IllegalArgumentException ex) { LOGGER.info("Cannot perform redo", ex); } }
@Override public void undo() { super.undo(); // Revert the change. try { if (oldValue != null) { entry.setField(field, oldValue); } else { entry.clearField(field); } // this is the only exception explicitly thrown here } catch (IllegalArgumentException ex) { LOGGER.info("Cannot perform undo", ex); } }
/** Runs the field formatter on the entry and records the change. */ private static void doFieldFormatterCleanup( BibtexEntry entry, FieldFormatterCleanup cleaner, NamedCompound ce) { String oldValue = entry.getField(cleaner.getField()); if (oldValue == null) { return; } // run formatter cleaner.cleanup(entry); String newValue = entry.getField(cleaner.getField()); // undo action if (!oldValue.equals(newValue)) { ce.addEdit(new UndoableFieldChange(entry, cleaner.getField(), oldValue, newValue)); } }
private static void doCleanUpMonth(BibtexEntry entry, NamedCompound ce) { // implementation based on patch 3470076 by Mathias Walter String oldValue = entry.getField("month"); if (oldValue == null) { return; } String newValue = oldValue; MonthUtil.Month month = MonthUtil.getMonth(oldValue); if (month.isValid()) { newValue = month.bibtexFormat; } if (!oldValue.equals(newValue)) { entry.setField("month", newValue); ce.addEdit(new UndoableFieldChange(entry, "month", oldValue, newValue)); } }
public static double compareEntriesStrictly(BibtexEntry one, BibtexEntry two) { HashSet<String> allFields = new HashSet<>(); allFields.addAll(one.getFieldNames()); allFields.addAll(two.getFieldNames()); int score = 0; for (String field : allFields) { Object en = one.getField(field); Object to = two.getField(field); if (((en != null) && (to != null) && en.equals(to)) || ((en == null) && (to == null))) { score++; } } if (score == allFields.size()) { return 1.01; // Just to make sure we can // use score>1 without // trouble. } return (double) score / allFields.size(); }
@Override public Object getColumnValue(BibtexEntry entry, int column) { if (column < PAD) { Object o; switch (column) { case FILE_COL: o = entry.getField(Globals.FILE_FIELD); if (o != null) { FileListTableModel model = new FileListTableModel(); model.setContent((String) o); fileLabel.setToolTipText(model.getToolTipHTMLRepresentation()); if (model.getRowCount() > 0) { fileLabel.setIcon(model.getEntry(0).getType().getIcon()); } return fileLabel; } else { return null; } case URL_COL: o = entry.getField("url"); if (o != null) { urlLabel.setToolTipText((String) o); return urlLabel; } else { return null; } default: return null; } } else { String field = fields[column - PAD]; if (field.equals("author") || field.equals("editor")) { // For name fields, tap into a MainTableFormat instance and use // the same name formatting as is used in the entry table: if (frame.basePanel() != null) { return frame.basePanel().tableFormat.formatName(entry.getField(field)); } } return entry.getField(field); } }
@Override public void mouseClicked(MouseEvent e) { if (e.isPopupTrigger()) { processPopupTrigger(e); return; } // if (e.) final int col = entryTable.columnAtPoint(e.getPoint()); final int row = entryTable.rowAtPoint(e.getPoint()); if (col < PAD) { BibtexEntry entry = sortedEntries.get(row); BasePanel p = entryHome.get(entry); switch (col) { case FILE_COL: Object o = entry.getField(Globals.FILE_FIELD); if (o != null) { FileListTableModel tableModel = new FileListTableModel(); tableModel.setContent((String) o); if (tableModel.getRowCount() == 0) { return; } FileListEntry fl = tableModel.getEntry(0); (new ExternalFileMenuItem( frame, entry, "", fl.getLink(), null, p.metaData(), fl.getType())) .actionPerformed(null); } break; case URL_COL: Object link = entry.getField("url"); try { if (link != null) { JabRefDesktop.openExternalViewer(p.metaData(), (String) link, "url"); } } catch (IOException ex) { ex.printStackTrace(); } break; } } }
/** * If the user has signalled the opening of a context menu, the event gets redirected to this * method. Here we open a file link menu if the user is pointing at a file link icon. Otherwise * a general context menu should be shown. * * @param e The triggering mouse event. */ public void processPopupTrigger(MouseEvent e) { BibtexEntry entry = sortedEntries.get(entryTable.rowAtPoint(e.getPoint())); BasePanel p = entryHome.get(entry); int col = entryTable.columnAtPoint(e.getPoint()); JPopupMenu menu = new JPopupMenu(); int count = 0; if (col == FILE_COL) { // We use a FileListTableModel to parse the field content: Object o = entry.getField(Globals.FILE_FIELD); FileListTableModel fileList = new FileListTableModel(); fileList.setContent((String) o); // If there are one or more links, open the first one: for (int i = 0; i < fileList.getRowCount(); i++) { FileListEntry flEntry = fileList.getEntry(i); String description = flEntry.getDescription(); if ((description == null) || (description.trim().isEmpty())) { description = flEntry.getLink(); } menu.add( new ExternalFileMenuItem( p.frame(), entry, description, flEntry.getLink(), flEntry.getType().getIcon(), p.metaData(), flEntry.getType())); count++; } } if (count > 0) { menu.show(entryTable, e.getX(), e.getY()); } }
/** * Generates the DML required to populate the entries table with jabref data and writes it to the * output PrintStream. * * @param database_id ID of Jabref database related to the entries to be exported This information * can be gathered using getDatabaseIDByPath(metaData, out) * @param entries The BibtexEntries to export * @param out The output (PrintStream or Connection) object to which the DML should be written. */ private void populateEntriesTable(int database_id, List<BibtexEntry> entries, Object out) throws SQLException { String query; String val; String insert = "INSERT INTO entries (jabref_eid, entry_types_id, cite_key, " + fieldStr + ", database_id) VALUES ("; for (BibtexEntry entry : entries) { query = insert + '\'' + entry.getId() + '\'' + ", (SELECT entry_types_id FROM entry_types WHERE label='" + entry.getType().getName().toLowerCase() + "'), '" + entry.getCiteKey() + '\''; for (int i = 0; i < SQLUtil.getAllFields().size(); i++) { query = query + ", "; val = entry.getField(SQLUtil.getAllFields().get(i)); if (val != null) { val = val.replace("\\", "\\\\"); val = val.replace("\"", "\\\""); val = val.replace("\'", "''"); val = val.replace("`", "\\`"); query = query + '\'' + val + '\''; } else { query = query + "NULL"; } } query = query + ", '" + database_id + "');"; SQLUtil.processQuery(out, query); } }
/** Update the merged BibtexEntry with source and preview panel everytime something is changed */ private void updateAll() { if (!doneBuilding) { // If we've not done adding everything, do not do anything... return; } // Check if the type is changed if (!identical[0]) { if (rb[0][0].isSelected()) { mergedEntry.setType(one.getType()); } else { mergedEntry.setType(two.getType()); } } // Check all fields for (int i = 0; i < joint.size(); i++) { if (!identical[i + 1]) { if (rb[0][i + 1].isSelected()) { mergedEntry.setField(jointStrings[i], one.getField(jointStrings[i])); } else if (rb[2][i + 1].isSelected()) { mergedEntry.setField(jointStrings[i], two.getField(jointStrings[i])); } else { mergedEntry.setField(jointStrings[i], null); } } } // Update the PreviewPanel pp.setEntry(mergedEntry); // Update the Bibtex source view StringWriter sw = new StringWriter(); try { new BibtexEntryWriter(new LatexFieldFormatter(), false).write(mergedEntry, sw); } catch (IOException ex) { LOGGER.error("Error in entry" + ": " + ex.getMessage(), ex); } jta.setText(sw.getBuffer().toString()); jta.setCaretPosition(0); }
/** * Tries to find a fulltext URL for a given BibTex entry. * * <p>Currently only uses the DOI if found. * * @param entry The Bibtex entry * @return The fulltext PDF URL Optional, if found, or an empty Optional if not found. * @throws NullPointerException if no BibTex entry is given * @throws java.io.IOException */ public Optional<URL> findFullText(BibtexEntry entry) throws IOException { Objects.requireNonNull(entry); Optional<URL> pdfLink = Optional.empty(); // DOI search Optional<DOI> doi = DOI.build(entry.getField("doi")); if (doi.isPresent()) { String source = String.format(SOURCE, doi.get().getDOI()); // Retrieve PDF link Document html = Jsoup.connect(source).ignoreHttpErrors(true).get(); Element link = html.select(".pdf-high-res a").first(); if (link != null) { LOGGER.info("Fulltext PDF found @ ACS."); pdfLink = Optional.of(new URL(source.replaceFirst("/abs/", "/pdf/"))); } } return pdfLink; }
/** * Removes the http://... for each DOI Moves DOIs from URL and NOTE filed to DOI field * * @param ce */ private static void doCleanUpDOI(BibtexEntry bes, NamedCompound ce) { // fields to check String[] fields = {"note", "url", "ee"}; // First check if the Doi Field is empty if (bes.getField("doi") != null) { String doiFieldValue = bes.getField("doi"); Optional<DOI> doi = DOI.build(doiFieldValue); if (doi.isPresent()) { String newValue = doi.get().getDOI(); if (!doiFieldValue.equals(newValue)) { ce.addEdit(new UndoableFieldChange(bes, "doi", doiFieldValue, newValue)); bes.setField("doi", newValue); } // Doi field seems to contain Doi // -> cleanup note, url, ee field for (String field : fields) { DOI.build(bes.getField((field))).ifPresent(unused -> removeFieldValue(bes, field, ce)); } } } else { // As the Doi field is empty we now check if note, url, or ee field contains a Doi for (String field : fields) { Optional<DOI> doi = DOI.build(bes.getField(field)); if (doi.isPresent()) { // update Doi String oldValue = bes.getField("doi"); String newValue = doi.get().getDOI(); ce.addEdit(new UndoableFieldChange(bes, "doi", oldValue, newValue)); bes.setField("doi", newValue); removeFieldValue(bes, field, ce); } } } }
private void doRenamePDFs(BibtexEntry entry, NamedCompound ce) { // Extract the path String oldValue = entry.getField(Globals.FILE_FIELD); if (oldValue == null) { return; } FileListTableModel flModel = new FileListTableModel(); flModel.setContent(oldValue); if (flModel.getRowCount() == 0) { return; } boolean changed = false; for (int i = 0; i < flModel.getRowCount(); i++) { String realOldFilename = flModel.getEntry(i).getLink(); if (cleanUpRenamePDFonlyRelativePaths.isSelected() && (new File(realOldFilename).isAbsolute())) { continue; } String newFilename = Util.getLinkedFileName(panel.database(), entry); // String oldFilename = bes.getField(GUIGlobals.FILE_FIELD); // would have to be stored for // undoing purposes // Add extension to newFilename newFilename = newFilename + "." + flModel.getEntry(i).getType().getExtension(); // get new Filename with path // Create new Path based on old Path and new filename File expandedOldFile = FileUtil.expandFilename( realOldFilename, panel.metaData().getFileDirectory(Globals.FILE_FIELD)); if (expandedOldFile.getParent() == null) { // something went wrong. Just skip this entry continue; } String newPath = expandedOldFile .getParent() .concat(System.getProperty("file.separator")) .concat(newFilename); if (new File(newPath).exists()) { // we do not overwrite files // TODO: we could check here if the newPath file is linked with the current entry. And if // not, we could add a link continue; } // do rename boolean renameSuccessful = FileUtil.renameFile(expandedOldFile.toString(), newPath); if (renameSuccessful) { changed = true; // Change the path for this entry String description = flModel.getEntry(i).getDescription(); ExternalFileType type = flModel.getEntry(i).getType(); flModel.removeEntry(i); // we cannot use "newPath" to generate a FileListEntry as newPath is absolute, but we want // to keep relative paths whenever possible File parent = (new File(realOldFilename)).getParentFile(); String newFileEntryFileName; if (parent == null) { newFileEntryFileName = newFilename; } else { newFileEntryFileName = parent.toString().concat(System.getProperty("file.separator")).concat(newFilename); } flModel.addEntry(i, new FileListEntry(description, newFileEntryFileName, type)); } else { unsuccessfulRenames++; } } if (changed) { String newValue = flModel.getStringRepresentation(); assert (!oldValue.equals(newValue)); entry.setField(Globals.FILE_FIELD, newValue); // we put an undo of the field content here // the file is not being renamed back, which leads to inconsistencies // if we put a null undo object here, the change by "doMakePathsRelative" would overwrite the // field value nevertheless. ce.addEdit(new UndoableFieldChange(entry, Globals.FILE_FIELD, oldValue, newValue)); } }
/** Converts to BibLatex format */ public static void convertToBiblatex(BibtexEntry entry, NamedCompound ce) { for (Map.Entry<String, String> alias : EntryConverter.FIELD_ALIASES_TEX_TO_LTX.entrySet()) { String oldFieldName = alias.getKey(); String newFieldName = alias.getValue(); String oldValue = entry.getField(oldFieldName); String newValue = entry.getField(newFieldName); if ((oldValue != null) && (!oldValue.isEmpty()) && (newValue == null)) { // There is content in the old field and no value in the new, so just copy entry.setField(newFieldName, oldValue); ce.addEdit(new UndoableFieldChange(entry, newFieldName, null, oldValue)); entry.setField(oldFieldName, null); ce.addEdit(new UndoableFieldChange(entry, oldFieldName, oldValue, null)); } } // Dates: create date out of year and month, save it and delete old fields if ((entry.getField("date") == null) || (entry.getField("date").isEmpty())) { String newDate = entry.getFieldOrAlias("date"); String oldYear = entry.getField("year"); String oldMonth = entry.getField("month"); entry.setField("date", newDate); entry.setField("year", null); entry.setField("month", null); ce.addEdit(new UndoableFieldChange(entry, "date", null, newDate)); ce.addEdit(new UndoableFieldChange(entry, "year", oldYear, null)); ce.addEdit(new UndoableFieldChange(entry, "month", oldMonth, null)); } }
private static void removeFieldValue( BibtexEntry entry, String fieldName, NamedCompound compound) { String origValue = entry.getField(fieldName); compound.addEdit(new UndoableFieldChange(entry, fieldName, origValue, null)); entry.setField(fieldName, null); }
/** Main function for building the merge entry JPanel */ private void initialize() { joint = new TreeSet<>(one.getFieldNames()); joint.addAll(two.getFieldNames()); // Remove field starting with __ TreeSet<String> toberemoved = new TreeSet<>(); for (String field : joint) { if (field.startsWith("__")) { toberemoved.add(field); } } for (String field : toberemoved) { joint.remove(field); } // Create storage arrays rb = new JRadioButton[3][joint.size() + 1]; ButtonGroup[] rbg = new ButtonGroup[joint.size() + 1]; identical = new Boolean[joint.size() + 1]; jointStrings = new String[joint.size()]; // Create main layout String colSpecMain = "left:pref, 5px, center:3cm:grow, 5px, center:pref, 3px, center:pref, 3px, center:pref, 5px, center:3cm:grow"; String colSpecMerge = "left:pref, 5px, fill:3cm:grow, 5px, center:pref, 3px, center:pref, 3px, center:pref, 5px, fill:3cm:grow"; String rowSpec = "pref, pref, 10px, fill:5cm:grow, 10px, pref, 10px, fill:3cm:grow"; StringBuilder rowBuilder = new StringBuilder(""); for (int i = 0; i < joint.size(); i++) { rowBuilder.append("pref, "); } rowBuilder.append("pref"); FormLayout mainLayout = new FormLayout(colSpecMain, rowSpec); FormLayout mergeLayout = new FormLayout(colSpecMerge, rowBuilder.toString()); mainPanel.setLayout(mainLayout); mergePanel.setLayout(mergeLayout); JLabel label = new JLabel(Localization.lang("Use")); Font font = label.getFont(); label.setFont(font.deriveFont(font.getStyle() | Font.BOLD)); mainPanel.add(label, cc.xyw(4, 1, 7, "center, bottom")); // Set headings JLabel headingLabels[] = new JLabel[6]; for (int i = 0; i < 6; i++) { headingLabels[i] = new JLabel(columnHeadings[i]); font = headingLabels[i].getFont(); headingLabels[i].setFont(font.deriveFont(font.getStyle() | Font.BOLD)); mainPanel.add(headingLabels[i], cc.xy(1 + (i * 2), 2)); } mainPanel.add(new JSeparator(), cc.xyw(1, 3, 11)); // Start with entry type EntryType type1 = one.getType(); EntryType type2 = two.getType(); mergedEntry.setType(type1); label = new JLabel(Localization.lang("Entry type")); font = label.getFont(); label.setFont(font.deriveFont(font.getStyle() | Font.BOLD)); mergePanel.add(label, cc.xy(1, 1)); JTextArea type1ta = new JTextArea(type1.getName()); type1ta.setEditable(false); mergePanel.add(type1ta, cc.xy(3, 1)); if (type1.compareTo(type2) != 0) { identical[0] = false; rbg[0] = new ButtonGroup(); for (int k = 0; k < 3; k += 2) { rb[k][0] = new JRadioButton(); rbg[0].add(rb[k][0]); mergePanel.add(rb[k][0], cc.xy(5 + (k * 2), 1)); rb[k][0].addChangeListener( new ChangeListener() { @Override public void stateChanged(ChangeEvent e) { updateAll(); } }); } rb[0][0].setSelected(true); } else { identical[0] = true; } JTextArea type2ta = new JTextArea(type2.getName()); type2ta.setEditable(false); mergePanel.add(type2ta, cc.xy(11, 1)); // For all fields in joint add a row and possibly radio buttons int row = 2; int maxLabelWidth = -1; int tmpLabelWidth = 0; for (String field : joint) { jointStrings[row - 2] = field; label = new JLabel(CaseChangers.UPPER_FIRST.format(field)); font = label.getFont(); label.setFont(font.deriveFont(font.getStyle() | Font.BOLD)); mergePanel.add(label, cc.xy(1, row)); String string1 = one.getField(field); String string2 = two.getField(field); identical[row - 1] = false; if ((string1 != null) && (string2 != null)) { if (string1.equals(string2)) { identical[row - 1] = true; } } tmpLabelWidth = label.getPreferredSize().width; if (tmpLabelWidth > maxLabelWidth) { maxLabelWidth = tmpLabelWidth; } if ("abstract".equals(field) || "review".equals(field)) { // Treat the abstract and review fields special JTextArea tf = new JTextArea(); tf.setLineWrap(true); tf.setEditable(false); JScrollPane jsptf = new JScrollPane(tf); mergeLayout.setRowSpec(row, RowSpec.decode("center:2cm:grow")); mergePanel.add(jsptf, cc.xy(3, row, "f, f")); tf.setText(string1); tf.setCaretPosition(0); } else { JTextArea tf = new JTextArea(string1); mergePanel.add(tf, cc.xy(3, row)); tf.setCaretPosition(0); tf.setEditable(false); } // Add radio buttons if the two entries do not have identical fields if (!identical[row - 1]) { rbg[row - 1] = new ButtonGroup(); for (int k = 0; k < 3; k++) { rb[k][row - 1] = new JRadioButton(); rbg[row - 1].add(rb[k][row - 1]); mergePanel.add(rb[k][row - 1], cc.xy(5 + (k * 2), row)); rb[k][row - 1].addChangeListener( new ChangeListener() { @Override public void stateChanged(ChangeEvent e) { updateAll(); } }); } if (string1 != null) { mergedEntry.setField(field, string1); rb[0][row - 1].setSelected(true); if (string2 == null) { rb[2][row - 1].setEnabled(false); } } else { rb[0][row - 1].setEnabled(false); mergedEntry.setField(field, string2); rb[2][row - 1].setSelected(true); } } else { mergedEntry.setField(field, string1); } if ("abstract".equals(field) || "review".equals(field)) { // Again, treat abstract and review special JTextArea tf = new JTextArea(); tf.setLineWrap(true); tf.setEditable(false); JScrollPane jsptf = new JScrollPane(tf); mergePanel.add(jsptf, cc.xy(11, row, "f, f")); tf.setText(string2); tf.setCaretPosition(0); } else { JTextArea tf = new JTextArea(string2); mergePanel.add(tf, cc.xy(11, row)); tf.setCaretPosition(0); tf.setEditable(false); } row++; } JScrollPane scrollPane = new JScrollPane( mergePanel, JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, JScrollPane.HORIZONTAL_SCROLLBAR_NEVER); scrollPane.setBorder(BorderFactory.createEmptyBorder()); mainPanel.add(scrollPane, cc.xyw(1, 4, 11)); mainPanel.add(new JSeparator(), cc.xyw(1, 5, 11)); // Synchronize column widths String rbAlign[] = {"right", "center", "left"}; mainLayout.setColumnSpec(1, ColumnSpec.decode(Integer.toString(maxLabelWidth) + "px")); Integer maxRBWidth = -1; Integer tmpRBWidth; for (int k = 0; k < 3; k++) { tmpRBWidth = headingLabels[k + 2].getPreferredSize().width; if (tmpRBWidth > maxRBWidth) { maxRBWidth = tmpRBWidth; } } for (int k = 0; k < 3; k++) { mergeLayout.setColumnSpec( 5 + (k * 2), ColumnSpec.decode(rbAlign[k] + ":" + maxRBWidth + "px")); } // Setup a PreviewPanel and a Bibtex source box for the merged entry label = new JLabel(Localization.lang("Merged entry")); font = label.getFont(); label.setFont(font.deriveFont(font.getStyle() | Font.BOLD)); mainPanel.add(label, cc.xyw(1, 6, 6)); String layoutString = Globals.prefs.get(JabRefPreferences.PREVIEW_0); pp = new PreviewPanel(null, mergedEntry, null, new MetaData(), layoutString); // JScrollPane jsppp = new JScrollPane(pp, JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, // JScrollPane.HORIZONTAL_SCROLLBAR_NEVER); mainPanel.add(pp, cc.xyw(1, 8, 6)); label = new JLabel(Localization.lang("Merged BibTeX source code")); font = label.getFont(); label.setFont(font.deriveFont(font.getStyle() | Font.BOLD)); mainPanel.add(label, cc.xyw(8, 6, 4)); jta = new JTextArea(); jta.setLineWrap(true); JScrollPane jspta = new JScrollPane(jta); mainPanel.add(jspta, cc.xyw(8, 8, 4)); jta.setEditable(false); StringWriter sw = new StringWriter(); try { new BibtexEntryWriter(new LatexFieldFormatter(), false).write(mergedEntry, sw); } catch (IOException ex) { LOGGER.error("Error in entry" + ": " + ex.getMessage(), ex); } jta.setText(sw.getBuffer().toString()); jta.setCaretPosition(0); // Add some margin around the layout mainLayout.appendRow(RowSpec.decode("10px")); mainLayout.appendColumn(ColumnSpec.decode("10px")); mainLayout.insertRow(1, RowSpec.decode("10px")); mainLayout.insertColumn(1, ColumnSpec.decode("10px")); if (mainPanel.getHeight() > DIM.height) { mainPanel.setSize(new Dimension(mergePanel.getWidth(), DIM.height)); } if (mainPanel.getWidth() > DIM.width) { mainPanel.setSize(new Dimension(DIM.width, mergePanel.getHeight())); } // Everything done, allow any action to actually update the merged entry doneBuilding = true; // Show what we've got mainPanel.setVisible(true); javax.swing.SwingUtilities.invokeLater( new Runnable() { @Override public void run() { scrollPane.getVerticalScrollBar().setValue(0); } }); }
/** Parse the entries in the source, and return a List of BibtexEntry objects. */ @Override public List<BibtexEntry> importEntries(InputStream stream, OutputPrinter status) throws IOException { ArrayList<BibtexEntry> bibItems = new ArrayList<BibtexEntry>(); BufferedReader in = new BufferedReader(ImportFormatReader.getReaderDefaultEncoding(stream)); String line; HashMap<String, String> hm = new HashMap<String, String>(); HashMap<String, StringBuffer> lines = new HashMap<String, StringBuffer>(); StringBuffer previousLine = null; while ((line = in.readLine()) != null) { if (line.isEmpty()) { continue; // ignore empty lines, e.g. at file } // end // entry delimiter -> item complete if (line.equals("------")) { String[] type = new String[2]; String[] pages = new String[2]; String country = null; String address = null; String titleST = null; String titleTI = null; Vector<String> comments = new Vector<String>(); // add item for (Map.Entry<String, StringBuffer> entry : lines.entrySet()) { if (entry.getKey().equals("AU")) { hm.put("author", entry.getValue().toString()); } else if (entry.getKey().equals("TI")) { titleTI = entry.getValue().toString(); } else if (entry.getKey().equals("ST")) { titleST = entry.getValue().toString(); } else if (entry.getKey().equals("YP")) { hm.put("year", entry.getValue().toString()); } else if (entry.getKey().equals("VL")) { hm.put("volume", entry.getValue().toString()); } else if (entry.getKey().equals("NB")) { hm.put("number", entry.getValue().toString()); } else if (entry.getKey().equals("PS")) { pages[0] = entry.getValue().toString(); } else if (entry.getKey().equals("PE")) { pages[1] = entry.getValue().toString(); } else if (entry.getKey().equals("KW")) { hm.put("keywords", entry.getValue().toString()); } else if (entry.getKey().equals("RT")) { type[0] = entry.getValue().toString(); } else if (entry.getKey().equals("SB")) { comments.add("Subject: " + entry.getValue()); } else if (entry.getKey().equals("SA")) { comments.add("Secondary Authors: " + entry.getValue()); } else if (entry.getKey().equals("NT")) { hm.put("note", entry.getValue().toString()); } else if (entry.getKey().equals("PB")) { hm.put("publisher", entry.getValue().toString()); } else if (entry.getKey().equals("TA")) { comments.add("Tertiary Authors: " + entry.getValue()); } else if (entry.getKey().equals("TT")) { comments.add("Tertiary Title: " + entry.getValue()); } else if (entry.getKey().equals("ED")) { hm.put("edition", entry.getValue().toString()); } else if (entry.getKey().equals("TW")) { type[1] = entry.getValue().toString(); } else if (entry.getKey().equals("QA")) { comments.add("Quaternary Authors: " + entry.getValue()); } else if (entry.getKey().equals("QT")) { comments.add("Quaternary Title: " + entry.getValue()); } else if (entry.getKey().equals("IS")) { hm.put("isbn", entry.getValue().toString()); } else if (entry.getKey().equals("AB")) { hm.put("abstract", entry.getValue().toString()); } else if (entry.getKey().equals("AD")) { address = entry.getValue().toString(); } else if (entry.getKey().equals("LG")) { hm.put("language", entry.getValue().toString()); } else if (entry.getKey().equals("CO")) { country = entry.getValue().toString(); } else if (entry.getKey().equals("UR") || entry.getKey().equals("AT")) { String s = entry.getValue().toString().trim(); hm.put( s.startsWith("http://") || s.startsWith("ftp://") ? "url" : "pdf", entry.getValue().toString()); } else if (entry.getKey().equals("C1")) { comments.add("Custom1: " + entry.getValue()); } else if (entry.getKey().equals("C2")) { comments.add("Custom2: " + entry.getValue()); } else if (entry.getKey().equals("C3")) { comments.add("Custom3: " + entry.getValue()); } else if (entry.getKey().equals("C4")) { comments.add("Custom4: " + entry.getValue()); } else if (entry.getKey().equals("C5")) { comments.add("Custom5: " + entry.getValue()); } else if (entry.getKey().equals("C6")) { comments.add("Custom6: " + entry.getValue()); } else if (entry.getKey().equals("DE")) { hm.put("annote", entry.getValue().toString()); } else if (entry.getKey().equals("CA")) { comments.add("Categories: " + entry.getValue()); } else if (entry.getKey().equals("TH")) { comments.add("Short Title: " + entry.getValue()); } else if (entry.getKey().equals("SE")) { hm.put("chapter", entry.getValue().toString()); // else if (entry.getKey().equals("AC")) // hm.put("",entry.getValue().toString()); // else if (entry.getKey().equals("LP")) // hm.put("",entry.getValue().toString()); } } String bibtexType = "misc"; // to find type, first check TW, then RT for (int i = 1; i >= 0 && bibtexType.equals("misc"); --i) { if (type[i] == null) { continue; } type[i] = type[i].toLowerCase(); if (type[i].contains("article")) { bibtexType = "article"; } else if (type[i].contains("journal")) { bibtexType = "article"; } else if (type[i].contains("book section")) { bibtexType = "inbook"; } else if (type[i].contains("book")) { bibtexType = "book"; } else if (type[i].contains("conference")) { bibtexType = "inproceedings"; } else if (type[i].contains("proceedings")) { bibtexType = "inproceedings"; } else if (type[i].contains("report")) { bibtexType = "techreport"; } else if (type[i].contains("thesis") && type[i].contains("master")) { bibtexType = "mastersthesis"; } else if (type[i].contains("thesis")) { bibtexType = "phdthesis"; } } // depending on bibtexType, decide where to place the titleRT and // titleTI if (bibtexType.equals("article")) { if (titleST != null) { hm.put("journal", titleST); } if (titleTI != null) { hm.put("title", titleTI); } } else if (bibtexType.equals("inbook")) { if (titleST != null) { hm.put("booktitle", titleST); } if (titleTI != null) { hm.put("title", titleTI); } } else { if (titleST != null) { hm.put("booktitle", titleST); // should not } // happen, I // think if (titleTI != null) { hm.put("title", titleTI); } } // concatenate pages if (pages[0] != null || pages[1] != null) { hm.put( "pages", (pages[0] != null ? pages[0] : "") + (pages[1] != null ? "--" + pages[1] : "")); } // concatenate address and country if (address != null) { hm.put("address", address + (country != null ? ", " + country : "")); } if (!comments.isEmpty()) { // set comment if present StringBuilder s = new StringBuilder(); for (int i = 0; i < comments.size(); ++i) { s.append(i > 0 ? "; " : "").append(comments.elementAt(i)); } hm.put("comment", s.toString()); } BibtexEntry b = new BibtexEntry(DEFAULT_BIBTEXENTRY_ID, BibtexEntryTypes.getEntryType(bibtexType)); b.setField(hm); bibItems.add(b); hm.clear(); lines.clear(); previousLine = null; continue; } // new key if (line.startsWith("--") && line.length() >= 7 && line.substring(4, 7).equals("-- ")) { lines.put(line.substring(2, 4), previousLine = new StringBuffer(line.substring(7))); continue; } // continuation (folding) of previous line if (previousLine == null) { return null; } previousLine.append(line.trim()); } return bibItems; }
@Override public List<BibtexEntry> importEntries(InputStream in, OutputPrinter status) throws IOException { final ArrayList<BibtexEntry> res = new ArrayList<BibtexEntry>(1); PDDocument document; try { document = PDDocument.load(in); } catch (IOException e) { LOGGER.error("Could not load document", e); return res; } try { if (document.isEncrypted()) { LOGGER.error(Localization.lang("Encrypted documents are not supported")); // return res; } PDFTextStripper stripper = new PDFTextStripper(); stripper.setStartPage(1); stripper.setEndPage(1); stripper.setSortByPosition(true); stripper.setParagraphEnd(System.lineSeparator()); StringWriter writer = new StringWriter(); stripper.writeText(document, writer); String textResult = writer.toString(); String doi = new DOI(textResult).getDOI(); if (doi.length() < textResult.length()) { // A Doi was found in the text // We do NO parsing of the text, but use the Doi fetcher ImportInspector i = new ImportInspector() { @Override public void toFront() {} @Override public void setProgress(int current, int max) {} @Override public void addEntry(BibtexEntry entry) { // add the entry to the result object res.add(entry); } }; PdfContentImporter.doiToBibTeXFetcher.processQuery(doi, i, status); if (!res.isEmpty()) { // if something has been found, return the result return res; } else { // otherwise, we just parse the PDF } } String author; String editor = null; String institution = null; String abstractT = null; String keywords = null; String title; String conference = null; String DOI = null; String series = null; String volume = null; String number = null; String pages = null; // year is a class variable as the method extractYear() uses it; String publisher = null; BibtexEntryType type = BibtexEntryTypes.INPROCEEDINGS; final String lineBreak = System.lineSeparator(); split = textResult.split(lineBreak); // idea: split[] contains the different lines // blocks are separated by empty lines // treat each block // or do special treatment at authors (which are not broken) // therefore, we do a line-based and not a block-based splitting // i points to the current line // curString (mostly) contains the current block // the different lines are joined into one and thereby separated by " " proceedToNextNonEmptyLine(); if (i >= split.length) { // PDF could not be parsed or is empty // return empty list return res; } curString = split[i]; i = i + 1; if (curString.length() > 4) { // special case: possibly conference as first line on the page extractYear(); if (curString.contains("Conference")) { fillCurStringWithNonEmptyLines(); conference = curString; curString = ""; } else { // e.g. Copyright (c) 1998 by the Genetics Society of America // future work: get year using RegEx String lower = curString.toLowerCase(); if (lower.contains("copyright")) { fillCurStringWithNonEmptyLines(); publisher = curString; curString = ""; } } } // start: title fillCurStringWithNonEmptyLines(); title = streamlineTitle(curString); curString = ""; // i points to the next non-empty line // after title: authors author = null; while (i < split.length && !split[i].equals("")) { // author names are unlikely to be split among different lines // treat them line by line curString = streamlineNames(split[i]); if (author == null) { author = curString; } else { if (curString.equals("")) { // if split[i] is "and" then "" is returned by streamlineNames -> do nothing } else { author = author.concat(" and ").concat(curString); } } i++; } curString = ""; i++; // then, abstract and keywords follow while (i < split.length) { curString = split[i]; if (curString.length() >= "Abstract".length() && curString.substring(0, "Abstract".length()).equalsIgnoreCase("Abstract")) { if (curString.length() == "Abstract".length()) { // only word "abstract" found -- skip line curString = ""; } else { curString = curString.substring("Abstract".length() + 1).trim().concat(lineBreak); } i++; // fillCurStringWithNonEmptyLines() cannot be used as that uses " " as line separator // whereas we need linebreak as separator while (i < split.length && !split[i].equals("")) { curString = curString.concat(split[i]).concat(lineBreak); i++; } abstractT = curString; i++; } else if (curString.length() >= "Keywords".length() && curString.substring(0, "Keywords".length()).equalsIgnoreCase("Keywords")) { if (curString.length() == "Keywords".length()) { // only word "Keywords" found -- skip line curString = ""; } else { curString = curString.substring("Keywords".length() + 1).trim(); } i++; fillCurStringWithNonEmptyLines(); keywords = removeNonLettersAtEnd(curString); } else { String lower = curString.toLowerCase(); int pos = lower.indexOf("technical"); if (pos >= 0) { type = BibtexEntryTypes.TECHREPORT; pos = curString.trim().lastIndexOf(' '); if (pos >= 0) { // assumption: last character of curString is NOT ' ' // otherwise pos+1 leads to an out-of-bounds exception number = curString.substring(pos + 1); } } i++; proceedToNextNonEmptyLine(); } } i = split.length - 1; // last block: DOI, detailed information // sometimes, this information is in the third last block etc... // therefore, read until the beginning of the file while (i >= 0) { readLastBlock(); // i now points to the block before or is -1 // curString contains the last block, separated by " " extractYear(); int pos = curString.indexOf("(Eds.)"); if (pos >= 0 && publisher == null) { // looks like a Springer last line // e.g: A. Persson and J. Stirna (Eds.): PoEM 2009, LNBIP 39, pp. 161-175, 2009. publisher = "Springer"; editor = streamlineNames(curString.substring(0, pos - 1)); curString = curString.substring( pos + "(Eds.)".length() + 2); // +2 because of ":" after (Eds.) and the subsequent space String[] springerSplit = curString.split(", "); if (springerSplit.length >= 4) { conference = springerSplit[0]; String seriesData = springerSplit[1]; int lastSpace = seriesData.lastIndexOf(' '); series = seriesData.substring(0, lastSpace); volume = seriesData.substring(lastSpace + 1); pages = springerSplit[2].substring(4); if (springerSplit[3].length() >= 4) { year = springerSplit[3].substring(0, 4); } } } else { if (DOI == null) { pos = curString.indexOf("DOI"); if (pos < 0) { pos = curString.indexOf("doi"); } if (pos >= 0) { pos += 3; char delimiter = curString.charAt(pos); if (delimiter == ':' || delimiter == ' ') { pos++; } int nextSpace = curString.indexOf(' ', pos); if (nextSpace > 0) { DOI = curString.substring(pos, nextSpace); } else { DOI = curString.substring(pos); } } } if (publisher == null && curString.contains("IEEE")) { // IEEE has the conference things at the end publisher = "IEEE"; // year is extracted by extractYear // otherwise, we could it determine as follows: // String yearStr = curString.substring(curString.length()-4); // if (isYear(yearStr)) { // year = yearStr; // } if (conference == null) { pos = curString.indexOf('$'); if (pos > 0) { // we found the price // before the price, the ISSN is stated // skip that pos -= 2; while (pos >= 0 && curString.charAt(pos) != ' ') { pos--; } if (pos > 0) { conference = curString.substring(0, pos); } } } } // String lower = curString.toLowerCase(); // if (institution == null) { // // } } } BibtexEntry entry = new BibtexEntry(); entry.setType(type); if (author != null) { entry.setField("author", author); } if (editor != null) { entry.setField("editor", editor); } if (institution != null) { entry.setField("institution", institution); } if (abstractT != null) { entry.setField("abstract", abstractT); } if (keywords != null) { entry.setField("keywords", keywords); } if (title != null) { entry.setField("title", title); } if (conference != null) { entry.setField("booktitle", conference); } if (DOI != null) { entry.setField("doi", DOI); } if (series != null) { entry.setField("series", series); } if (volume != null) { entry.setField("volume", volume); } if (number != null) { entry.setField("number", number); } if (pages != null) { entry.setField("pages", pages); } if (year != null) { entry.setField("year", year); } if (publisher != null) { entry.setField("publisher", publisher); } entry.setField("review", textResult); res.add(entry); } catch (NoClassDefFoundError e) { if (e.getMessage().equals("org/bouncycastle/jce/provider/BouncyCastleProvider")) { status.showMessage( Localization.lang( "Java Bouncy Castle library not found. Please download and install it. For more information see http://www.bouncycastle.org/.")); } else { LOGGER.error("Could not find class", e); } } finally { document.close(); } return res; }
/** Parse the entries in the source, and return a List of BibtexEntry objects. */ @Override public List<BibtexEntry> importEntries(InputStream stream, OutputPrinter status) throws IOException { if (stream == null) { throw new IOException("No stream given."); } ArrayList<BibtexEntry> bibitems = new ArrayList<BibtexEntry>(); StringBuilder sb = new StringBuilder(); BufferedReader in = new BufferedReader(ImportFormatReader.getReaderDefaultEncoding(stream)); // Pattern fieldPattern = Pattern.compile("^AU |^TI |^SO |^DT |^C1 |^AB // |^ID |^BP |^PY |^SE |^PY |^VL |^IS "); String str; while ((str = in.readLine()) != null) { if (str.length() < 3) { continue; } // begining of a new item if (str.substring(0, 3).equals("PT ")) { sb.append("::").append(str); } else { String beg = str.substring(0, 3).trim(); // I could have used the fieldPattern regular expression instead // however this seems to be // quick and dirty and it works! if (beg.length() == 2) { sb.append(" ## "); // mark the begining of each field sb.append(str); } else { sb.append("EOLEOL"); // mark the end of each line sb.append(str.trim()); // remove the initial spaces } } } String[] entries = sb.toString().split("::"); HashMap<String, String> hm = new HashMap<String, String>(); // skip the first entry as it is either empty or has document header for (String entry : entries) { String[] fields = entry.split(" ## "); if (fields.length == 0) { fields = entry.split("\n"); } String Type = ""; String PT = ""; String pages = ""; hm.clear(); for (String field : fields) { // empty field don't do anything if (field.length() <= 2) { continue; } String beg = field.substring(0, 2); String value = field.substring(3); if (value.startsWith(" - ")) { value = value.substring(3); } value = value.trim(); if (beg.equals("PT")) { if (value.startsWith("J")) { PT = "article"; } else { PT = value; } Type = "article"; // make all of them PT? } else if (beg.equals("TY")) { if ("JOUR".equals(value)) { Type = "article"; } else if ("CONF".equals(value)) { Type = "inproceedings"; } } else if (beg.equals("JO")) { hm.put("booktitle", value); } else if (beg.equals("AU")) { String author = IsiImporter.isiAuthorsConvert(value.replaceAll("EOLEOL", " and ")); // if there is already someone there then append with "and" if (hm.get("author") != null) { author = hm.get("author") + " and " + author; } hm.put("author", author); } else if (beg.equals("TI")) { hm.put("title", value.replaceAll("EOLEOL", " ")); } else if (beg.equals("SO") || beg.equals("JA")) { hm.put("journal", value.replaceAll("EOLEOL", " ")); } else if (beg.equals("ID") || beg.equals("KW")) { value = value.replaceAll("EOLEOL", " "); String existingKeywords = hm.get("keywords"); if (existingKeywords != null && !existingKeywords.contains(value)) { existingKeywords += ", " + value; } else { existingKeywords = value; } hm.put("keywords", existingKeywords); } else if (beg.equals("AB")) { hm.put("abstract", value.replaceAll("EOLEOL", " ")); } else if (beg.equals("BP") || beg.equals("BR") || beg.equals("SP")) { pages = value; } else if (beg.equals("EP")) { int detpos = value.indexOf(' '); // tweak for IEEE Explore if (detpos != -1 && !value.substring(0, detpos).trim().isEmpty()) { value = value.substring(0, detpos); } pages = pages + "--" + value; } else if (beg.equals("PS")) { pages = IsiImporter.parsePages(value); } else if (beg.equals("AR")) { pages = value; } else if (beg.equals("IS")) { hm.put("number", value); } else if (beg.equals("PY")) { hm.put("year", value); } else if (beg.equals("VL")) { hm.put("volume", value); } else if (beg.equals("PU")) { hm.put("publisher", value); } else if (beg.equals("DI")) { hm.put("doi", value); } else if (beg.equals("PD")) { String month = IsiImporter.parseMonth(value); if (month != null) { hm.put("month", month); } } else if (beg.equals("DT")) { Type = value; if (Type.equals("Review")) { Type = "article"; // set "Review" in Note/Comment? } else if (Type.startsWith("Article") || Type.startsWith("Journal") || PT.equals("article")) { Type = "article"; } else { Type = "misc"; } } else if (beg.equals("CR")) { hm.put("CitedReferences", value.replaceAll("EOLEOL", " ; ").trim()); } else { // Preserve all other entries except if (beg.equals("ER") || beg.equals("EF") || beg.equals("VR") || beg.equals("FN")) { continue; } hm.put(beg, value); } } if (!"".equals(pages)) { hm.put("pages", pages); } // Skip empty entries if (hm.isEmpty()) { continue; } BibtexEntry b = new BibtexEntry(DEFAULT_BIBTEXENTRY_ID, BibtexEntryTypes.getEntryType(Type)); // id assumes an existing database so don't // Remove empty fields: ArrayList<Object> toRemove = new ArrayList<Object>(); for (String key : hm.keySet()) { String content = hm.get(key); if (content == null || content.trim().isEmpty()) { toRemove.add(key); } } for (Object aToRemove : toRemove) { hm.remove(aToRemove); } // Polish entries IsiImporter.processSubSup(hm); IsiImporter.processCapitalization(hm); b.setField(hm); bibitems.add(b); } return bibitems; }