/**
  * Overwrite this method if you want to filter the input, apply hashing, etc.
  *
  * @param feature the current feature.
  * @param document the current document.
  * @param featureFieldName the field hashFunctionsFileName of the feature.
  */
 protected void addToDocument(LireFeature feature, Document document, String featureFieldName) {
   if (run == 0) {
   } // just count documents
   else if (run == 1) { // Select the representatives ...
     if (representativesID.contains(docCount)
         && feature
             .getClass()
             .getCanonicalName()
             .equals(featureClass.getCanonicalName())) { // it's a representative.
       // put it into a temporary data structure ...
       representatives.add(feature);
     }
   } else if (run
       == 2) { // actual hashing: find the nearest representatives and put those as a hash into a
     // document.
     if (feature
         .getClass()
         .getCanonicalName()
         .equals(featureClass.getCanonicalName())) { // it's a feature to be hashed
       int[] hashes = getHashes(feature);
       document.add(
           new TextField(
               featureFieldName + "_hash",
               createDocumentString(hashes, hashes.length),
               Field.Store.YES));
       document.add(
           new TextField(
               featureFieldName + "_hash_q", createDocumentString(hashes, 10), Field.Store.YES));
     }
     document.add(new StoredField(featureFieldName, feature.getByteArrayRepresentation()));
   }
 }
  private void addProductDetails(Document document) throws BadElementException, DocumentException {
    Paragraph preface = new Paragraph();
    addEmptyLine(preface, 5);
    document.add(preface);

    PdfPTable table = new PdfPTable(3);

    table.getDefaultCell().setHorizontalAlignment(Element.ALIGN_CENTER);
    PdfPCell c1 = new PdfPCell(new Phrase("Product ID"));
    c1.setHorizontalAlignment(Element.ALIGN_CENTER);
    table.addCell(c1);

    PdfPCell c2 = new PdfPCell(new Phrase("Current Stock"));
    c2.setHorizontalAlignment(Element.ALIGN_CENTER);
    table.addCell(c2);

    PdfPCell c3 = new PdfPCell(new Phrase("Minimum Stock"));
    c3.setHorizontalAlignment(Element.ALIGN_CENTER);
    table.addCell(c3);

    table.setHeaderRows(1);

    ArrayList<ArrayList<String>> products = rprtDBAccess.getLowStockedProducts();

    for (int i = 0; i < products.size(); i++) {
      ArrayList<String> product = products.get(i);
      table.addCell(product.get(0)); // product id
      table.addCell(product.get(1)); // currentstock
      table.addCell(product.get(2)); // minimumstock

      table.completeRow();
    }
    document.add(table);
  }
 private Document getDocument(JournalEntry entry) {
   Document doc = new Document();
   doc.add(
       new Field(
           EntryIndexFields.ID.name(),
           entry.getUid().toString(),
           Field.Store.YES,
           Field.Index.NOT_ANALYZED));
   doc.add(
       new Field(
           EntryIndexFields.NAME.name(),
           entry.getName(),
           Field.Store.YES,
           Field.Index.NOT_ANALYZED));
   doc.add(
       new Field(
           EntryIndexFields.FULLTEXT.name(),
           entry.getText(),
           Field.Store.YES,
           Field.Index.ANALYZED));
   doc.add(
       new Field(
           EntryIndexFields.DATE.name(),
           Long.toString(entry.getDateTime().getMillis()),
           Field.Store.YES,
           Field.Index.NOT_ANALYZED));
   doc.add(
       new Field(
           EntryIndexFields.TYPE.name(),
           entry.getItemType().name(),
           Field.Store.YES,
           Field.Index.NOT_ANALYZED));
   return doc;
 }
Example #4
0
  public void print(String filename) throws IOException, DocumentException {
    EventTree tree = app.getTimelines().getCurrentTree();
    if (tree == null || tree.isEmpty()) return;

    ComplexEvent parent = tree.getTopSelectionParent();
    if (parent == null) return;

    // Instantiation of document object
    Document document = new Document(PageSize.A4, 50, 50, 50, 50);

    // Creation of PdfWriter object
    PdfWriter writer = PdfWriter.getInstance(document, new FileOutputStream(filename));
    document.open();

    // Creation of table
    Paragraph title =
        new Paragraph(
            "A sample output from Zeitline:",
            FontFactory.getFont(FontFactory.TIMES_BOLD, 14, BaseColor.BLUE));
    title.setSpacingAfter(20);
    document.add(title);

    // Setting width rations
    PdfPTable table = new PdfPTable(3);
    float[] tableWidth = {(float) 0.2, (float) 0.12, (float) 0.68};
    table.setWidths(tableWidth);

    // Setting the header
    java.util.List<PdfPCell> headerCells =
        asList(getHeaderCell("Date"), getHeaderCell("MACB"), getHeaderCell("Short Description"));

    for (PdfPCell cell : headerCells) table.addCell(cell);

    // Setting the body
    int max = parent.countChildren();
    for (int i = 0; i < max; i++) {
      AbstractTimeEvent entry = parent.getEventByIndex(i);
      table.addCell(getBodyCell(entry.getStartTime().toString()));

      String name = entry.getName();
      if (name != null && name.length() > 5) {
        String macb = name.substring(0, 4);
        String desc = name.substring(5);

        table.addCell(getBodyCell(macb));
        table.addCell(getBodyCell(desc));
      } else {
        table.addCell("");
        table.addCell("");
      }
    }
    document.add(table);

    // Closure
    document.close();
    writer.close();
  }
 private void addHeader(Document document, Examination examination) throws DocumentException {
   Paragraph p;
   p = new Paragraph("SZÁMLA", headerFont);
   document.add(p);
   p = new Paragraph("Számlaszám: " + examination.getInvoice().getInvoiceId(), boldDataFont);
   p.setAlignment(Element.ALIGN_RIGHT);
   addEmptyLine(p, 2);
   document.add(p);
 }
  protected DirectoryReader indexDocsWithLucene(TestDoc[] testDocs) throws IOException {

    Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
    for (TestFieldSetting field : testDocs[0].fieldSettings) {
      if (field.storedPayloads) {
        mapping.put(
            field.name,
            new Analyzer() {
              @Override
              protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
                Tokenizer tokenizer = new StandardTokenizer(Version.CURRENT.luceneVersion, reader);
                TokenFilter filter = new LowerCaseFilter(Version.CURRENT.luceneVersion, tokenizer);
                filter = new TypeAsPayloadTokenFilter(filter);
                return new TokenStreamComponents(tokenizer, filter);
              }
            });
      }
    }
    PerFieldAnalyzerWrapper wrapper =
        new PerFieldAnalyzerWrapper(
            new StandardAnalyzer(Version.CURRENT.luceneVersion, CharArraySet.EMPTY_SET), mapping);

    Directory dir = new RAMDirectory();
    IndexWriterConfig conf = new IndexWriterConfig(Version.CURRENT.luceneVersion, wrapper);

    conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(dir, conf);

    for (TestDoc doc : testDocs) {
      Document d = new Document();
      d.add(new Field("id", doc.id, StringField.TYPE_STORED));
      for (int i = 0; i < doc.fieldContent.length; i++) {
        FieldType type = new FieldType(TextField.TYPE_STORED);
        TestFieldSetting fieldSetting = doc.fieldSettings[i];

        type.setStoreTermVectorOffsets(fieldSetting.storedOffset);
        type.setStoreTermVectorPayloads(fieldSetting.storedPayloads);
        type.setStoreTermVectorPositions(
            fieldSetting.storedPositions
                || fieldSetting.storedPayloads
                || fieldSetting.storedOffset);
        type.setStoreTermVectors(true);
        type.freeze();
        d.add(new Field(fieldSetting.name, doc.fieldContent[i], type));
      }
      writer.updateDocument(new Term("id", doc.id), d);
      writer.commit();
    }
    writer.close();

    return DirectoryReader.open(dir);
  }
Example #7
0
  public void addTitlePage(Document document, CustomerOrder customerOrder)
      throws DocumentException {
    Paragraph preface = new Paragraph();
    // We add one empty line
    addEmptyLine(preface, 1);
    Image image = null;
    try {
      image = Image.getInstance("img/gfcLogo.jpg");
      image.scaleAbsolute(100f, 100f);
      image.setAlignment(Image.ALIGN_LEFT);
    } catch (IOException e) {
      e.printStackTrace();
    }
    document.add(image);
    // Lets write a big header
    Phrase invoice = new Phrase();
    invoice.add(new Chunk("Generation For Christ", companyName));
    invoice.add(new Chunk("                                   Invoice", companyName));
    preface.add(new Paragraph(invoice));
    // preface.add(new Paragraph( "));

    preface.add(new Paragraph("                      We Make Disciples", companySlogan));
    // preface.add(new Paragraph( "                Invoice", companyName));
    // addEmptyLine(preface, 1);
    Date date = new Date();
    String dateFormat = new SimpleDateFormat("dd/MM/yyyy").format(date);
    preface.add(
        new Paragraph(
            "                                                                                                                                         DATE:   "
                + dateFormat,
            details));
    preface.add(
        new Paragraph(
            "25 James Street, Dandenong                                                                                   ORDER ID:    "
                + customerOrder.getOrderId(),
            details));
    preface.add(new Paragraph("Melbourne, Victoria, 3175", details));
    preface.add(new Paragraph("Phone # ", details));

    // Will create: Report generated by: _name, _date
    // preface.add(new Paragraph("Report generated by: " + System.getProperty("user.name") + ", " +
    // new Date(), //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
    //         smallBold));
    // addEmptyLine(preface, 3);
    // preface.add(new Paragraph("This document contains confidential information of GFC's member",
    //         smallBold));

    document.add(preface);
    // Start a new page
    // document.newPage();
  }
  @Override
  public Document createDocument(BufferedImage image, String identifier) {
    Document doc = new Document();

    if (identifier != null) {
      doc.add(new StringField(DocumentBuilder.FIELD_NAME_IDENTIFIER, identifier, Field.Store.YES));
    }

    Field[] fields = createDescriptorFields(image);
    for (Field field : fields) {
      doc.add(field);
    }

    return doc;
  }
Example #9
0
  /**
   * Loads a given text file into a document recording any spelling errors, i.e., words that are not
   * in this SpellChecker's dictionary.
   *
   * @require fr is not null, document is not null
   * @ensure The lines of files are loaded into the document along with their associated errors.
   */
  private void loadLines(FileReader fr, Document document) throws IOException {
    BufferedReader br = new BufferedReader(fr); // Optimize reading

    /* line holds the text of the first line of the text file*/
    String line = br.readLine();

    /* errors holds the list of errors for each line in the document*/
    ArrayList<Error> errors;
    while (line != null) {
      /* loop invariant: 	errors contains all errors from the current
       * line of the document.
       */

      /* Populate the list of errors */
      errors = generateErrorList(line);

      /* Add the line along with errors to the document */
      document.add(new Line(line, errors));

      /* Get the next line */
      line = br.readLine();
    }

    br.close();
  }
  private void addSaleData(Document document, Examination examination) throws DocumentException {
    PdfPTable table = new PdfPTable(4);
    table.setWidthPercentage(100);
    table.setSpacingBefore(10);
    table.setSpacingAfter(10);
    table.setWidths(new int[] {7, 5, 2, 2});
    table.addCell(getCell("Csoport", Element.ALIGN_CENTER, boldtableHeaderFont));
    table.addCell(getCell("Megjegyzés", Element.ALIGN_CENTER, boldtableHeaderFont));
    table.addCell(getCell("Ár", Element.ALIGN_CENTER, boldtableHeaderFont));
    table.addCell(getCell("Összesen", Element.ALIGN_CENTER, boldtableHeaderFont));

    for (InvoiceGroups invoiceGroup : examination.getInvoice().getInvoiceGroup()) {
      PdfPCell invoiceGroupCell = getCell(invoiceGroup.getName(), Element.ALIGN_LEFT, boldDataFont);
      invoiceGroupCell.setColspan(3);
      table.addCell(invoiceGroupCell);
      table.addCell(
          getCell(invoiceGroup.getNetPrice().toString(), Element.ALIGN_LEFT, boldDataFont));

      if (invoiceGroup.getItems() != null && invoiceGroup.getItems().size() != 0) {
        for (Item item : invoiceGroup.getItems()) {
          table.addCell(getCell("", Element.ALIGN_LEFT, dataFont));
          table.addCell(getCell(item.getDescription(), Element.ALIGN_LEFT, dataFont));
          table.addCell(getCell(item.getPrice().toString(), Element.ALIGN_LEFT, dataFont));
          table.addCell(getCell("", Element.ALIGN_LEFT, dataFont));
        }
      }
    }
    document.add(table);
  }
Example #11
0
  private void addField(
      Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize)
      throws IOException {

    // we have a binary stored field, and it may be compressed
    if (binary) {
      int toRead = fieldsStream.readVInt();
      final byte[] b = new byte[toRead];
      fieldsStream.readBytes(b, 0, b.length);
      if (compressed) doc.add(new Field(fi.name, uncompress(b), Field.Store.COMPRESS));
      else doc.add(new Field(fi.name, b, Field.Store.YES));

    } else {
      Field.Store store = Field.Store.YES;
      Field.Index index = getIndexType(fi, tokenize);
      Field.TermVector termVector = getTermVectorType(fi);

      Fieldable f;
      if (compressed) {
        store = Field.Store.COMPRESS;
        int toRead = fieldsStream.readVInt();

        final byte[] b = new byte[toRead];
        fieldsStream.readBytes(b, 0, b.length);
        f =
            new Field(
                fi.name, // field name
                new String(uncompress(b), "UTF-8"), // uncompress the value and add as string
                store,
                index,
                termVector);
        f.setOmitNorms(fi.omitNorms);
      } else {
        f =
            new Field(
                fi.name, // name
                fieldsStream.readString(), // read value
                store,
                index,
                termVector);
        f.setOmitNorms(fi.omitNorms);
      }
      doc.add(f);
    }
  }
Example #12
0
 // Add the size of field as a byte[] containing the 4 bytes of the integer byte size (high order
 // byte first; char = 2 bytes)
 // Read just the size -- caller must skip the field content to continue reading fields
 // Return the size in bytes or chars, depending on field type
 private int addFieldSize(Document doc, FieldInfo fi, boolean binary, boolean compressed)
     throws IOException {
   int size = fieldsStream.readVInt(), bytesize = binary || compressed ? size : 2 * size;
   byte[] sizebytes = new byte[4];
   sizebytes[0] = (byte) (bytesize >>> 24);
   sizebytes[1] = (byte) (bytesize >>> 16);
   sizebytes[2] = (byte) (bytesize >>> 8);
   sizebytes[3] = (byte) bytesize;
   doc.add(new Field(fi.name, sizebytes, Field.Store.YES));
   return size;
 }
  private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, int MAX_DOCS)
      throws Exception {
    Directory directory = newDirectory();
    Analyzer analyzer = new MockAnalyzer(random);
    IndexWriterConfig conf = newIndexWriterConfig(analyzer);
    final MergePolicy mp = conf.getMergePolicy();
    mp.setNoCFSRatio(useCompoundFiles ? 1.0 : 0.0);
    IndexWriter writer = new IndexWriter(directory, conf);
    if (VERBOSE) {
      System.out.println("TEST: now build index MAX_DOCS=" + MAX_DOCS);
    }

    for (int j = 0; j < MAX_DOCS; j++) {
      Document d = new Document();
      d.add(newTextField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES));
      d.add(newTextField(ID_FIELD, Integer.toString(j), Field.Store.YES));
      writer.addDocument(d);
    }
    writer.close();

    // try a search without OR
    IndexReader reader = DirectoryReader.open(directory);
    IndexSearcher searcher = newSearcher(reader);

    Query query = new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY));
    out.println("Query: " + query.toString(PRIORITY_FIELD));
    if (VERBOSE) {
      System.out.println("TEST: search query=" + query);
    }

    final Sort sort = new Sort(SortField.FIELD_SCORE, new SortField(ID_FIELD, SortField.Type.INT));

    ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
    printHits(out, hits, searcher);
    checkHits(hits, MAX_DOCS, searcher);

    // try a new search with OR
    searcher = newSearcher(reader);
    hits = null;

    BooleanQuery booleanQuery = new BooleanQuery();
    booleanQuery.add(
        new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY)), BooleanClause.Occur.SHOULD);
    booleanQuery.add(
        new TermQuery(new Term(PRIORITY_FIELD, MED_PRIORITY)), BooleanClause.Occur.SHOULD);
    out.println("Query: " + booleanQuery.toString(PRIORITY_FIELD));

    hits = searcher.search(booleanQuery, null, MAX_DOCS, sort).scoreDocs;
    printHits(out, hits, searcher);
    checkHits(hits, MAX_DOCS, searcher);

    reader.close();
    directory.close();
  }
    public void run() {
      try {
        for (int i = 0; i < 1024 * 16; i++) {
          Document d = new Document();
          int n = RANDOM.nextInt();
          d.add(Field.Keyword("id", Integer.toString(n)));
          d.add(Field.UnStored("contents", intToEnglish(n)));
          System.out.println("Adding " + n);
          writer.addDocument(d);

          if (i % reopenInterval == 0) {
            writer.close();
            writer = new IndexWriter("index", ANALYZER, false);
          }
        }
      } catch (Exception e) {
        System.out.println(e.toString());
        e.printStackTrace();
        System.exit(0);
      }
    }
Example #15
0
  /**
   * set values to writer
   *
   * @param writer
   * @throws Exception
   */
  protected void setValues(IExporter<?> exporter, FileOutputStream writer) throws Exception {

    if (exporter != null && writer != null) {

      String encoding = SettingsManager.getInstance().getValue(SettingProperty.ENCODING);
      BaseFont baseFont = BaseFont.createFont(BaseFont.HELVETICA, encoding, BaseFont.NOT_EMBEDDED);
      Font font = new Font(baseFont);

      List<String> properties = exporter.getProperties();
      Document document = new Document(PageSize.A4);

      // step 2
      PdfWriter.getInstance(document, writer);
      // step 3
      document.open();
      PdfPTable table = new PdfPTable(properties.size());

      table.setFooterRows(1);
      table.setWidthPercentage(100f);

      table.getDefaultCell().setColspan(1);

      table.getDefaultCell().setBackgroundColor(BaseColor.LIGHT_GRAY);

      for (String p : properties) {

        table.addCell(new Phrase(p, font));
      }

      //            table.setHeaderRows(1);
      table.getDefaultCell().setBackgroundColor(null);

      List<List<String>> values = ((IExporter<String>) exporter).getValues();

      String pValue;

      for (List<String> value : values) {

        for (String pv : value) {

          pValue = pv;

          if (pValue == null) {
            pValue = "";
          }
          table.addCell(new Phrase(pValue, font));
        }
      }

      document.add(table);
      document.close();
    }
  }
Example #16
0
  public static Document Document(File f) throws IOException, InterruptedException {
    // make a new, empty document
    Document doc = new Document();

    // Add the url as a field named "url".  Use an UnIndexed field, so
    // that the url is just stored with the document, but is not searchable.
    doc.add(Field.UnIndexed("url", f.getPath().replace(dirSep, '/')));

    // Add the last modified date of the file a field named "modified".  Use a
    // Keyword field, so that it's searchable, but so that no attempt is made
    // to tokenize the field into words.
    doc.add(Field.Keyword("modified", DateField.timeToString(f.lastModified())));

    // Add the uid as a field, so that index can be incrementally maintained.
    // This field is not stored with document, it is indexed, but it is not
    // tokenized prior to indexing.
    doc.add(new Field("uid", uid(f), false, true, false));

    HTMLParser parser = new HTMLParser(f);

    // Add the tag-stripped contents as a Reader-valued Text field so it will
    // get tokenized and indexed.
    doc.add(Field.Text("contents", parser.getReader()));

    // Add the summary as an UnIndexed field, so that it is stored and returned
    // with hit documents for display.
    doc.add(Field.UnIndexed("summary", parser.getSummary()));

    // Add the title as a separate Text field, so that it can be searched
    // separately.
    doc.add(Field.Text("title", parser.getTitle()));

    // return the document
    return doc;
  }
Example #17
0
  private void addFieldLazy(
      Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize)
      throws IOException {
    if (binary == true) {
      int toRead = fieldsStream.readVInt();
      long pointer = fieldsStream.getFilePointer();
      if (compressed) {
        // was: doc.add(new Fieldable(fi.name, uncompress(b), Fieldable.Store.COMPRESS));
        doc.add(new LazyField(fi.name, Field.Store.COMPRESS, toRead, pointer));
      } else {
        // was: doc.add(new Fieldable(fi.name, b, Fieldable.Store.YES));
        doc.add(new LazyField(fi.name, Field.Store.YES, toRead, pointer));
      }
      // Need to move the pointer ahead by toRead positions
      fieldsStream.seek(pointer + toRead);
    } else {
      Field.Store store = Field.Store.YES;
      Field.Index index = getIndexType(fi, tokenize);
      Field.TermVector termVector = getTermVectorType(fi);

      Fieldable f;
      if (compressed) {
        store = Field.Store.COMPRESS;
        int toRead = fieldsStream.readVInt();
        long pointer = fieldsStream.getFilePointer();
        f = new LazyField(fi.name, store, toRead, pointer);
        // skip over the part that we aren't loading
        fieldsStream.seek(pointer + toRead);
        f.setOmitNorms(fi.omitNorms);
      } else {
        int length = fieldsStream.readVInt();
        long pointer = fieldsStream.getFilePointer();
        // Skip ahead of where we are by the length of what is stored
        fieldsStream.skipChars(length);
        f = new LazyField(fi.name, store, index, termVector, length, pointer);
        f.setOmitNorms(fi.omitNorms);
      }
      doc.add(f);
    }
  }
  @Before
  public void setup() throws Exception {
    super.setUp();

    // setup field mappers
    strMapper =
        new StringFieldMapper.Builder("str_value")
            .build(new Mapper.BuilderContext(null, new ContentPath(1)));

    lngMapper =
        new LongFieldMapper.Builder("lng_value")
            .build(new Mapper.BuilderContext(null, new ContentPath(1)));

    dblMapper =
        new DoubleFieldMapper.Builder("dbl_value")
            .build(new Mapper.BuilderContext(null, new ContentPath(1)));

    // create index and fielddata service
    ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
    MapperService mapperService =
        MapperTestUtils.newMapperService(
            ifdService.index(), ImmutableSettings.Builder.EMPTY_SETTINGS);
    ifdService.setIndexService(new StubIndexService(mapperService));
    writer =
        new IndexWriter(
            new RAMDirectory(),
            new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)));

    int numDocs = 10;
    for (int i = 0; i < numDocs; i++) {
      Document d = new Document();
      d.add(new StringField(strMapper.names().indexName(), "str" + i, Field.Store.NO));
      d.add(new LongField(lngMapper.names().indexName(), i, Field.Store.NO));
      d.add(new DoubleField(dblMapper.names().indexName(), Double.valueOf(i), Field.Store.NO));
      writer.addDocument(d);
    }

    reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(writer, true));
  }
Example #19
0
  /**
   * Writes out the information and PDF file to disk, and closes the stream. Returns a negative
   * Result if there was an error creating the document, and a positive result if the document was
   * successfully written and closed.
   */
  public Result finish() {
    if (_open) {
      fillNullCells();
      try {
        _d.add(_table);
      } catch (DocumentException e) {
        return new Result(false, "The document was not successfully constructed.");
      }
      _d.close();
      _open = false;
    }

    return new Result(true, "The document is closed.");
  }
 private void addBuyerAndSeller(Document document, Examination examination)
     throws DocumentException {
   PdfPTable table = new PdfPTable(2);
   table.setWidthPercentage(100);
   PdfPCell seller = getPartyAddress("Kibocsátó:", "Ceg neve", "Ceg Varosa", "Ceg cime");
   table.addCell(seller);
   PdfPCell buyer =
       getPartyAddress(
           "Vevő:",
           examination.getPatient().getName(),
           examination.getPatient().getCity(),
           examination.getPatient().getAddress());
   table.addCell(buyer);
   document.add(table);
 }
Example #21
0
  // in merge mode we don't uncompress the data of a compressed field
  private void addFieldForMerge(
      Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize)
      throws IOException {
    Object data;

    if (binary || compressed) {
      int toRead = fieldsStream.readVInt();
      final byte[] b = new byte[toRead];
      fieldsStream.readBytes(b, 0, b.length);
      data = b;
    } else {
      data = fieldsStream.readString();
    }

    doc.add(new FieldForMerge(data, fi, binary, compressed, tokenize));
  }
 public void addDocument(JSONObject object) {
   Document doc = new Document();
   String body = (String) object.get("body");
   String id = (String) object.get("id");
   long idLong = Long.valueOf(id, 36);
   //            System.out.println(body);
   doc.add(new TextField("body", body, Field.Store.YES));
   doc.add(new LongField("id", idLong, Field.Store.YES));
   //            doc.add(new StoredField("body_store",CompressionTools.compressString(body)));
   try {
     indexWriter.addDocument(doc);
   } catch (IOException ex) {
     System.err.println("Error adding documents to the index. " + ex.getMessage());
   } catch (Exception ex) {
     System.err.println("Error adding documents to the index. " + ex.getMessage());
   }
 }
 public void testTermDocsEnum() throws Exception {
   Directory dir = newDirectory();
   IndexWriter w =
       new IndexWriter(
           dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
   Document d = new Document();
   d.add(newStringField("f", "j", Field.Store.NO));
   w.addDocument(d);
   w.commit();
   w.addDocument(d);
   IndexReader r = w.getReader();
   w.close();
   DocsEnum de = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j"));
   assertEquals(0, de.nextDoc());
   assertEquals(1, de.nextDoc());
   assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc());
   r.close();
   dir.close();
 }
 public void testSeparateEnums() throws Exception {
   Directory dir = newDirectory();
   IndexWriter w =
       new IndexWriter(
           dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
   Document d = new Document();
   d.add(newStringField("f", "j", Field.Store.NO));
   w.addDocument(d);
   w.commit();
   w.addDocument(d);
   IndexReader r = w.getReader();
   w.close();
   DocsEnum d1 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, 0);
   DocsEnum d2 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, 0);
   assertEquals(0, d1.nextDoc());
   assertEquals(0, d2.nextDoc());
   r.close();
   dir.close();
 }
 private static void createCheckbox(
     PdfWriter writer,
     Document accountingDocument,
     Font font,
     String[] label,
     int xPosition,
     int yPosition,
     boolean[] checked,
     int pageNr) {
   PdfContentByte canvas = writer.getDirectContent();
   //    Rectangle rect;
   //    PdfFormField field;
   //    RadioCheckField checkbox;
   try {
     Image checkbox_checked =
         Image.getInstance(MainWindow.class.getResource("checkbox_checked.jpg"));
     checkbox_checked.scaleAbsolute(10f, 10f);
     Image checkbox = Image.getInstance(MainWindow.class.getResource("checkbox.jpg"));
     checkbox.scaleAbsolute(10f, 10f);
     for (int i = 0; i < label.length; i++) {
       Image checkboxImage;
       if (checked[i]) {
         checkboxImage = Image.getInstance(checkbox_checked);
       } else {
         checkboxImage = Image.getInstance(checkbox);
       }
       checkboxImage.setAbsolutePosition(xPosition, (yPosition - 10 - i * 15));
       accountingDocument.add(checkboxImage);
       ColumnText.showTextAligned(
           canvas,
           Element.ALIGN_LEFT,
           new Phrase(label[i], font),
           (xPosition + 16),
           (yPosition - 8 - i * 15),
           0);
     }
     // TODO: for JDK7 use Multicatch
   } catch (Exception e) { // com.itextpdf.text.DocumentException | java.io.IOException e) {
     UtilityBox.getInstance()
         .displayErrorPopup(
             "Abrechnung", "Fehler beim Erstellen der Abrechnung: " + e.getMessage());
   }
 }
 /**
  * Reads data from a file and writes it to an index.
  *
  * @param indexWriter the index to write to.
  * @param inputFile the input data for the process.
  * @throws IOException
  * @throws InstantiationException
  * @throws IllegalAccessException
  * @throws ClassNotFoundException
  */
 private void readFile(IndexWriter indexWriter, File inputFile)
     throws IOException, InstantiationException, IllegalAccessException, ClassNotFoundException {
   BufferedInputStream in = new BufferedInputStream(new FileInputStream(inputFile));
   byte[] tempInt = new byte[4];
   int tmp, tmpFeature, count = 0;
   byte[] temp = new byte[100 * 1024];
   // read file hashFunctionsFileName length:
   while (in.read(tempInt, 0, 4) > 0) {
     Document d = new Document();
     tmp = SerializationUtils.toInt(tempInt);
     // read file hashFunctionsFileName:
     in.read(temp, 0, tmp);
     String filename = new String(temp, 0, tmp);
     // normalize Filename to full path.
     filename =
         inputFile
                 .getCanonicalPath()
                 .substring(0, inputFile.getCanonicalPath().lastIndexOf(inputFile.getName()))
             + filename;
     d.add(new StringField(DocumentBuilder.FIELD_NAME_IDENTIFIER, filename, Field.Store.YES));
     //            System.out.print(filename);
     while ((tmpFeature = in.read()) < 255) {
       //                System.out.print(", " + tmpFeature);
       LireFeature f = (LireFeature) Class.forName(Extractor.features[tmpFeature]).newInstance();
       // byte[] length ...
       in.read(tempInt, 0, 4);
       tmp = SerializationUtils.toInt(tempInt);
       // read feature byte[]
       in.read(temp, 0, tmp);
       f.setByteArrayRepresentation(temp, 0, tmp);
       addToDocument(f, d, Extractor.featureFieldNames[tmpFeature]);
       //                d.add(new StoredField(Extractor.featureFieldNames[tmpFeature],
       // f.getByteArrayRepresentation()));
     }
     if (run == 2) indexWriter.addDocument(d);
     docCount++;
     //            if (count%1000==0) System.out.print('.');
     //            if (count%10000==0) System.out.println(" " + count);
   }
   in.close();
 }
Example #27
0
 @Override
 public void run() {
   try {
     DirectoryReader open = null;
     for (int i = 0; i < num; i++) {
       Document doc = new Document(); // docs.nextDoc();
       BytesRef br = new BytesRef("test");
       doc.add(newStringField("id", br, Field.Store.NO));
       writer.updateDocument(new Term("id", br), doc);
       if (random().nextInt(3) == 0) {
         if (open == null) {
           open = DirectoryReader.open(writer, true);
         }
         DirectoryReader reader = DirectoryReader.openIfChanged(open);
         if (reader != null) {
           open.close();
           open = reader;
         }
         assertEquals(
             "iter: "
                 + i
                 + " numDocs: "
                 + open.numDocs()
                 + " del: "
                 + open.numDeletedDocs()
                 + " max: "
                 + open.maxDoc(),
             1,
             open.numDocs());
       }
     }
     if (open != null) {
       open.close();
     }
   } catch (Exception e) {
     throw new RuntimeException(e);
   }
 }
 /**
  * Add documents to the index
  *
  * @param jsonObjects
  */
 public void addDocuments(JSONArray jsonObjects) {
   for (JSONObject object : (List<JSONObject>) jsonObjects) {
     Document doc = new Document();
     //            for(String field : (Set<String>) object.keySet()){
     //                if(object==null || object.get(field)==null)
     //                    continue;
     //                Class type = object.get(field).getClass();
     //                if(type==null)
     //                    continue;
     //                if(type.equals(String.class)){
     //                    doc.add(new StringField(field, (String)object.get(field),
     // Field.Store.NO));
     //                }else if(type.equals(Long.class)){
     //                    doc.add(new LongField(field, (long)object.get(field), Field.Store.YES));
     //                }else if(type.equals(Double.class)){
     //                    doc.add(new DoubleField(field, (double)object.get(field),
     // Field.Store.YES));
     //                }else if(type.equals(Boolean.class)){
     //                    doc.add(new StringField(field, object.get(field).toString(),
     // Field.Store.YES));
     //                }
     //            }
     String body = (String) object.get("body");
     String id = (String) object.get("id");
     long idLong = Long.valueOf(id, 36);
     //            System.out.println(body);
     doc.add(new TextField("body", body, Field.Store.YES));
     doc.add(new LongField("id", idLong, Field.Store.YES));
     try {
       indexWriter.addDocument(doc);
     } catch (IOException ex) {
       System.err.println("Error adding documents to the index. " + ex.getMessage());
     } catch (Exception ex) {
       System.err.println("Error adding documents to the index. " + ex.getMessage());
     }
   }
 }
Example #29
0
  public void testDocBoost() throws Exception {
    Directory store = newDirectory();
    RandomIndexWriter writer =
        new RandomIndexWriter(
            random(),
            store,
            newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));

    Field f1 = newTextField("field", "word", Field.Store.YES);
    Field f2 = newTextField("field", "word", Field.Store.YES);
    f2.setBoost(2.0f);

    Document d1 = new Document();
    Document d2 = new Document();

    d1.add(f1); // boost = 1
    d2.add(f2); // boost = 2

    writer.addDocument(d1);
    writer.addDocument(d2);

    IndexReader reader = writer.getReader();
    writer.close();

    final float[] scores = new float[4];

    IndexSearcher searcher = newSearcher(reader);
    searcher.search(
        new TermQuery(new Term("field", "word")),
        new SimpleCollector() {
          private int base = 0;
          private Scorer scorer;

          @Override
          public void setScorer(Scorer scorer) {
            this.scorer = scorer;
          }

          @Override
          public final void collect(int doc) throws IOException {
            scores[doc + base] = scorer.score();
          }

          @Override
          protected void doSetNextReader(LeafReaderContext context) throws IOException {
            base = context.docBase;
          }

          @Override
          public boolean needsScores() {
            return true;
          }
        });

    float lastScore = 0.0f;

    for (int i = 0; i < 2; i++) {
      if (VERBOSE) {
        System.out.println(searcher.explain(new TermQuery(new Term("field", "word")), i));
      }
      if (scores[i] != 0.0) {
        assertTrue(
            "score: " + scores[i] + " should be > lastScore: " + lastScore, scores[i] > lastScore);
      }
      lastScore = scores[i];
    }

    reader.close();
    store.close();
  }
Example #30
0
  @Override
  // Implementation methods
  // -------------------------------------------------------------------------
  protected Document parseDocument() throws DocumentException, IOException, XmlPullParserException {
    DocumentFactory df = getDocumentFactory();
    Document document = df.createDocument();
    Element parent = null;
    XmlPullParser pp = getXPPParser();
    pp.setFeature(XmlPullParser.FEATURE_PROCESS_NAMESPACES, true);

    while (true) {
      int type = pp.nextToken();

      switch (type) {
        case XmlPullParser.PROCESSING_INSTRUCTION:
          {
            String text = pp.getText();
            int loc = text.indexOf(' ');

            if (loc >= 0) {
              String target = text.substring(0, loc);
              String txt = text.substring(loc + 1);
              document.addProcessingInstruction(target, txt);
            } else {
              document.addProcessingInstruction(text, "");
            }

            break;
          }

        case XmlPullParser.COMMENT:
          {
            if (parent != null) {
              parent.addComment(pp.getText());
            } else {
              document.addComment(pp.getText());
            }

            break;
          }

        case XmlPullParser.CDSECT:
          {
            if (parent != null) {
              parent.addCDATA(pp.getText());
            } else {
              String msg = "Cannot have text content outside of the " + "root document";
              throw new DocumentException(msg);
            }

            break;
          }

        case XmlPullParser.END_DOCUMENT:
          return document;

        case XmlPullParser.START_TAG:
          {
            QName qname =
                (pp.getPrefix() == null)
                    ? df.createQName(pp.getName(), pp.getNamespace())
                    : df.createQName(pp.getName(), pp.getPrefix(), pp.getNamespace());
            Element newElement = df.createElement(qname);
            int nsStart = pp.getNamespaceCount(pp.getDepth() - 1);
            int nsEnd = pp.getNamespaceCount(pp.getDepth());

            for (int i = nsStart; i < nsEnd; i++) {
              if (pp.getNamespacePrefix(i) != null) {
                newElement.addNamespace(pp.getNamespacePrefix(i), pp.getNamespaceUri(i));
              }
            }

            for (int i = 0; i < pp.getAttributeCount(); i++) {
              QName qa =
                  (pp.getAttributePrefix(i) == null)
                      ? df.createQName(pp.getAttributeName(i))
                      : df.createQName(
                          pp.getAttributeName(i),
                          pp.getAttributePrefix(i),
                          pp.getAttributeNamespace(i));
              newElement.addAttribute(qa, pp.getAttributeValue(i));
            }

            if (parent != null) {
              parent.add(newElement);
            } else {
              document.add(newElement);
            }

            parent = newElement;

            break;
          }

        case XmlPullParser.END_TAG:
          {
            if (parent != null) {
              parent = parent.getParent();
            }

            break;
          }

        case XmlPullParser.ENTITY_REF:
        case XmlPullParser.TEXT:
          {
            String text = pp.getText();

            if (parent != null) {
              parent.addText(text);
            } else {
              String msg = "Cannot have text content outside of the " + "root document";
              throw new DocumentException(msg);
            }

            break;
          }

        default:
          break;
      }
    }
  }