示例#1
0
  @Override
  protected boolean maybeFailEngine(String source, Throwable t) {
    boolean shouldFail = super.maybeFailEngine(source, t);
    if (shouldFail) {
      return true;
    }

    // Check for AlreadyClosedException
    if (t instanceof AlreadyClosedException) {
      // if we are already closed due to some tragic exception
      // we need to fail the engine. it might have already been failed before
      // but we are double-checking it's failed and closed
      if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) {
        failEngine(
            "already closed by tragic event on the index writer", indexWriter.getTragicException());
      } else if (translog.isOpen() == false && translog.getTragicException() != null) {
        failEngine("already closed by tragic event on the translog", translog.getTragicException());
      }
      return true;
    } else if (t != null
        && ((indexWriter.isOpen() == false && indexWriter.getTragicException() == t)
            || (translog.isOpen() == false && translog.getTragicException() == t))) {
      // this spot on - we are handling the tragic event exception here so we have to fail the
      // engine
      // right away
      failEngine(source, t);
      return true;
    }
    return false;
  }
  /** Test that core cache key (needed for NRT) is working */
  public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId("fake", "_na_", 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(
        ir.leaves().get(0).reader().getCoreCacheKey(),
        ir2.leaves().get(0).reader().getCoreCacheKey());
    IOUtils.close(ir, ir2, iw, dir);
  }
示例#3
0
  public void addDocument(List<Document> documentList, String collectionName) {

    IndexWriter indexWriter = indexWriterManager.getIndexWriter(collectionName);

    try {

      logger.info("collectionName : {}", collectionName);
      logger.info("add indexing start................");

      int indexingDocumentCount = 0;
      for (Document doc : documentList) {
        indexingDocumentCount++;
        if ((indexingDocumentCount % 50000) == 0) {
          logger.info("{} indexed...", indexingDocumentCount);
        }

        indexWriter.addDocument(doc);
      }

      logger.info("total indexed document count {}", indexingDocumentCount);

      logger.info("end");

    } catch (IOException e) {

      logger.error("error : ", e);
      throw new RuntimeException("색인 중 에러가 발생하였습니다. [" + e.getMessage() + "]");
    }
  }
  /** Build the example index. */
  public void index() throws IOException {
    IndexWriter writer =
        new IndexWriter(
            indexDir, new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(OpenMode.CREATE));

    // TODO: we could index in radians instead ... saves all the conversions in getBoundingBoxFilter

    // Add documents with latitude/longitude location:
    // we index these both as DoublePoints (for bounding box/ranges) and as NumericDocValuesFields
    // (for scoring)
    Document doc = new Document();
    doc.add(new DoublePoint("latitude", 40.759011));
    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.759011)));
    doc.add(new DoublePoint("longitude", -73.9844722));
    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-73.9844722)));
    writer.addDocument(doc);

    doc = new Document();
    doc.add(new DoublePoint("latitude", 40.718266));
    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.718266)));
    doc.add(new DoublePoint("longitude", -74.007819));
    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.007819)));
    writer.addDocument(doc);

    doc = new Document();
    doc.add(new DoublePoint("latitude", 40.7051157));
    doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.7051157)));
    doc.add(new DoublePoint("longitude", -74.0088305));
    doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.0088305)));
    writer.addDocument(doc);

    // Open near-real-time searcher
    searcher = new IndexSearcher(DirectoryReader.open(writer));
    writer.close();
  }
  public void testFarsiRangeFilterCollating(
      Analyzer analyzer, String firstBeg, String firstEnd, String secondBeg, String secondEnd)
      throws Exception {
    Directory dir = newDirectory();
    IndexWriter writer =
        new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
    Document doc = new Document();
    doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, Field.Index.ANALYZED));
    doc.add(new Field("body", "body", Field.Store.YES, Field.Index.NOT_ANALYZED));
    writer.addDocument(doc);
    writer.close();
    IndexReader reader = IndexReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    Query query = new TermQuery(new Term("body", "body"));

    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
    // orders the U+0698 character before the U+0633 character, so the single
    // index Term below should NOT be returned by a TermRangeFilter with a Farsi
    // Collator (or an Arabic one for the case when Farsi searcher not
    // supported).
    ScoreDoc[] result =
        searcher.search(query, new TermRangeFilter("content", firstBeg, firstEnd, true, true), 1)
            .scoreDocs;
    assertEquals("The index Term should not be included.", 0, result.length);

    result =
        searcher.search(query, new TermRangeFilter("content", secondBeg, secondEnd, true, true), 1)
            .scoreDocs;
    assertEquals("The index Term should be included.", 1, result.length);

    searcher.close();
    reader.close();
    dir.close();
  }
示例#6
0
  @Before
  public void setUp() throws Exception {
    serializer = new LuceneSerializer(true, true);
    entityPath = new PathBuilder<Object>(Object.class, "obj");
    title = entityPath.getString("title");
    author = entityPath.getString("author");
    text = entityPath.getString("text");
    publisher = entityPath.getString("publisher");
    year = entityPath.getNumber("year", Integer.class);
    rating = entityPath.getString("rating");
    gross = entityPath.getNumber("gross", Double.class);
    titles = entityPath.getCollection("title", String.class, StringPath.class);

    longField = entityPath.getNumber("longField", Long.class);
    shortField = entityPath.getNumber("shortField", Short.class);
    byteField = entityPath.getNumber("byteField", Byte.class);
    floatField = entityPath.getNumber("floatField", Float.class);

    idx = new RAMDirectory();
    config =
        new IndexWriterConfig(new StandardAnalyzer())
            .setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    writer = new IndexWriter(idx, config);

    writer.addDocument(createDocument());

    writer.close();

    IndexReader reader = DirectoryReader.open(idx);
    searcher = new IndexSearcher(reader);
  }
  /**
   * Merges the given taxonomy and index directories and commits the changes to the given writers.
   */
  public static void merge(
      Directory srcIndexDir,
      Directory srcTaxDir,
      OrdinalMap map,
      IndexWriter destIndexWriter,
      DirectoryTaxonomyWriter destTaxWriter)
      throws IOException {
    // merge the taxonomies
    destTaxWriter.addTaxonomy(srcTaxDir, map);

    int ordinalMap[] = map.getMap();
    FacetIndexingParams params = new DefaultFacetIndexingParams();

    DirectoryReader reader = DirectoryReader.open(srcIndexDir, -1);
    List<AtomicReaderContext> leaves = reader.leaves();
    AtomicReader wrappedLeaves[] = new AtomicReader[leaves.size()];
    for (int i = 0; i < leaves.size(); i++) {
      wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, params);
    }
    try {
      destIndexWriter.addIndexes(new MultiReader(wrappedLeaves));

      // commit changes to taxonomy and index respectively.
      destTaxWriter.commit();
      destIndexWriter.commit();
    } finally {
      reader.close();
    }
  }
示例#8
0
 public void update() {
   IndexWriter writer = null;
   try {
     writer =
         new IndexWriter(
             directory,
             new IndexWriterConfig(Version.LUCENE_35, new StandardAnalyzer(Version.LUCENE_35)));
     /*
      * Lucene并没有提供更新,这里的更新操作其实是如下两个操作的合集
      * 先删除之后再添加
      */
     Document doc = new Document();
     doc.add(new Field("id", "11", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
     doc.add(new Field("email", emails[0], Field.Store.YES, Field.Index.NOT_ANALYZED));
     doc.add(new Field("content", contents[0], Field.Store.NO, Field.Index.ANALYZED));
     doc.add(new Field("name", names[0], Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
     writer.updateDocument(new Term("id", "1"), doc);
   } catch (CorruptIndexException e) {
     e.printStackTrace();
   } catch (LockObtainFailedException e) {
     e.printStackTrace();
   } catch (IOException e) {
     e.printStackTrace();
   } finally {
     try {
       if (writer != null) writer.close();
     } catch (CorruptIndexException e) {
       e.printStackTrace();
     } catch (IOException e) {
       e.printStackTrace();
     }
   }
 }
  @Test
  public void testDirectoryCleaned() throws Exception {
    final RAMDirectory directory = new RAMDirectory();
    final StandardAnalyzer analyzer = new StandardAnalyzer(DefaultIndexManager.LUCENE_VERSION);
    {
      IndexWriterConfig conf = new IndexWriterConfig(DefaultIndexManager.LUCENE_VERSION, analyzer);
      final IndexWriter writer = new IndexWriter(directory, conf);
      writer.addDocument(new Document());
      writer.close();
    }
    final DefaultConfiguration configuration = new DefaultConfiguration(directory, analyzer);
    final DefaultIndexEngine engine =
        new DefaultIndexEngine(
            new Supplier<IndexSearcher>() {
              public IndexSearcher get() {
                throw new AssertionFailedError("no searcher required");
              }
            },
            new Function<Index.UpdateMode, Writer>() {
              public Writer get(final Index.UpdateMode mode) {
                throw new AssertionFailedError("no writer required");
              }
            },
            configuration,
            FlushPolicy.NONE);

    assertEquals(1, new IndexSearcher(directory).getIndexReader().numDocs());
    engine.clean();
    assertEquals(0, new IndexSearcher(directory).getIndexReader().numDocs());
  }
示例#10
0
  public void forceDelete() {
    IndexWriter writer = null;

    try {
      writer =
          new IndexWriter(
              directory,
              new IndexWriterConfig(Version.LUCENE_35, new StandardAnalyzer(Version.LUCENE_35)));
      writer.forceMergeDeletes();
    } catch (CorruptIndexException e) {
      e.printStackTrace();
    } catch (LockObtainFailedException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } finally {
      try {
        if (writer != null) writer.close();
      } catch (CorruptIndexException e) {
        e.printStackTrace();
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  }
示例#11
0
  public void delete() {
    IndexWriter writer = null;

    try {
      writer =
          new IndexWriter(
              directory,
              new IndexWriterConfig(Version.LUCENE_35, new StandardAnalyzer(Version.LUCENE_35)));
      // 参数是一个选项,可以是一个Query,也可以是一个term,term是一个精确查找的值
      // 此时删除的文档并不会被完全删除,而是存储在一个回收站中的,可以恢复
      writer.deleteAll(); // 删除所有
      // writer.deleteDocuments(new Term("id","1"));
      writer.commit(); // 执行删除
    } catch (CorruptIndexException e) {
      e.printStackTrace();
    } catch (LockObtainFailedException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } finally {
      try {
        if (writer != null) writer.close();
      } catch (CorruptIndexException e) {
        e.printStackTrace();
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  }
示例#12
0
 public void merge() {
   IndexWriter writer = null;
   try {
     writer =
         new IndexWriter(
             directory,
             new IndexWriterConfig(Version.LUCENE_35, new StandardAnalyzer(Version.LUCENE_35)));
     // 会将索引合并为两段,这两段中被删除的数据会被清空
     // 特别注意:此处Lucene在3.5之后不建议使用,因为会消耗大量的开销
     // Lucene会根据情况自己处理
     writer.forceMerge(2);
   } catch (CorruptIndexException e) {
     e.printStackTrace();
   } catch (LockObtainFailedException e) {
     e.printStackTrace();
   } catch (IOException e) {
     e.printStackTrace();
   } finally {
     try {
       if (writer != null) writer.close();
     } catch (CorruptIndexException e) {
       e.printStackTrace();
     } catch (IOException e) {
       e.printStackTrace();
     }
   }
 }
示例#13
0
  @Test
  public void cretaeIndex() throws Exception {

    IndexWriter indexWriter = LuceneUtil.getIndexWriter();
    indexWriter.addDocument(getDocument());
    indexWriter.commit();
  }
示例#14
0
 @Override
 protected final void writerSegmentStats(SegmentsStats stats) {
   stats.addVersionMapMemoryInBytes(versionMap.ramBytesUsed());
   stats.addIndexWriterMemoryInBytes(indexWriter.ramBytesUsed());
   stats.addIndexWriterMaxMemoryInBytes(
       (long) (indexWriter.getConfig().getRAMBufferSizeMB() * 1024 * 1024));
 }
 /**
  * Optimizes the index forcing merge segments leaving the specified number of segments. This
  * operation may block until all merging completes.
  *
  * @param maxNumSegments The maximum number of segments left in the index after merging finishes.
  * @param doWait {@code true} if the call should block until the operation completes.
  * @throws IOException If Lucene throws IO errors.
  */
 @Override
 public void forceMerge(int maxNumSegments, boolean doWait) throws IOException {
   Log.info("%s merging index segments to %d", logName, maxNumSegments);
   indexWriter.forceMerge(maxNumSegments, doWait);
   indexWriter.commit();
   Log.info("%s segments merge completed", logName);
 }
示例#16
0
  private void buildIndex(
      final List<GeoEntry> geoEntryList,
      final boolean create,
      final ProgressCallback progressCallback) {
    Directory directory;

    try {
      directory = FSDirectory.open(Paths.get(indexLocation));
    } catch (IOException e) {
      throw new GeoEntryIndexingException(
          "Couldn't open the directory for the index, " + indexLocation, e);
    }

    // Try-with-resources to ensure the IndexWriter always gets closed.
    try (final IndexWriter indexWriter = createIndexWriter(create, directory)) {
      try {
        indexGeoEntries(indexWriter, geoEntryList, progressCallback);
      } catch (IOException e) {
        // Need to roll back here before the IndexWriter is closed at the end of the try
        // block.
        indexWriter.rollback();
        throw e;
      }
    } catch (IOException e) {
      throw new GeoEntryIndexingException("Error writing to the index.", e);
    }
  }
 /**
  * Optimizes the index forcing merge of all segments that have deleted documents.. This operation
  * may block until all merging completes.
  *
  * @param doWait {@code true} if the call should block until the operation completes.
  * @throws IOException If Lucene throws IO errors.
  */
 @Override
 public void forceMergeDeletes(boolean doWait) throws IOException {
   Log.info("%s merging index segments with deletions", logName);
   indexWriter.forceMergeDeletes(doWait);
   indexWriter.commit();
   Log.info("%s merging index segments with deletions completed", logName);
 }
示例#18
0
  public void testDemo() throws IOException, ParseException {

    Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);

    // Store the index in memory:
    Directory directory = new RAMDirectory();
    // To store an index on disk, use this instead:
    // Directory directory = FSDirectory.open("/tmp/testindex");
    IndexWriter iwriter =
        new IndexWriter(directory, analyzer, true, new IndexWriter.MaxFieldLength(25000));
    Document doc = new Document();
    String text = "This is the text to be indexed.";
    doc.add(new Field("fieldname", text, Field.Store.YES, Field.Index.ANALYZED));
    iwriter.addDocument(doc);
    iwriter.close();

    // Now search the index:
    IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
    // Parse a simple query that searches for "text":
    QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer);
    Query query = parser.parse("text");
    ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs;
    assertEquals(1, hits.length);
    // Iterate through the results:
    for (int i = 0; i < hits.length; i++) {
      Document hitDoc = isearcher.doc(hits[i].doc);
      assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
    }
    isearcher.close();
    directory.close();
  }
示例#19
0
  private static Directory index(Analyzer analyzer, String processingPath) {
    RAMDirectory directory = null;
    IndexWriter indexWriter = null;
    try {
      directory = new RAMDirectory();
      IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_35, analyzer);
      indexWriter = new IndexWriter(directory, iwc);
      File file = new File(processingPath);
      index_h("", file, indexWriter);
    } catch (IOException e) {
      e.printStackTrace();
    } finally {
      if (indexWriter != null) {
        try {
          indexWriter.close();
        } catch (CorruptIndexException e1) {
          // TODO Auto-generated catch block
          e1.printStackTrace();
        } catch (IOException e1) {
          // TODO Auto-generated catch block
          e1.printStackTrace();
        }
      }
    }

    return directory;
  }
  public void testListenerCalled() throws Exception {
    Directory dir = newDirectory();
    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, null));
    final AtomicBoolean afterRefreshCalled = new AtomicBoolean(false);
    SearcherManager sm = new SearcherManager(iw, true, new SearcherFactory());
    sm.addListener(
        new ReferenceManager.RefreshListener() {
          @Override
          public void beforeRefresh() {}

          @Override
          public void afterRefresh(boolean didRefresh) {
            if (didRefresh) {
              afterRefreshCalled.set(true);
            }
          }
        });
    iw.addDocument(new Document());
    iw.commit();
    assertFalse(afterRefreshCalled.get());
    sm.maybeRefreshBlocking();
    assertTrue(afterRefreshCalled.get());
    sm.close();
    iw.close();
    dir.close();
  }
示例#21
0
  // TODO: randomize
  public IndexSearcher setUp(Random random, Similarity similarity, int numDocs) throws IOException {
    Directory directory = new MockDirectoryWrapper(random, new RAMDirectory());
    PayloadAnalyzer analyzer = new PayloadAnalyzer();
    IndexWriter writer =
        new IndexWriter(
            directory,
            new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setSimilarity(similarity));
    // writer.infoStream = System.out;
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      doc.add(new Field(FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
      doc.add(
          new Field(
              MULTI_FIELD,
              English.intToEnglish(i) + "  " + English.intToEnglish(i),
              Field.Store.YES,
              Field.Index.ANALYZED));
      doc.add(
          new Field(
              NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
      writer.addDocument(doc);
    }
    reader = IndexReader.open(writer, true);
    writer.close();

    IndexSearcher searcher = LuceneTestCase.newSearcher(reader);
    searcher.setSimilarity(similarity);
    return searcher;
  }
示例#22
0
 private void initializeIndex(String[] values) throws IOException {
   IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
   for (int i = 0; i < values.length; i++) {
     insertDoc(writer, values[i]);
   }
   writer.close();
 }
示例#23
0
  @Override
  public void index(List<AgeObject> aol) {
    try {
      IndexWriter iWriter =
          new IndexWriter(
              index, analyzer, objectList == null, IndexWriter.MaxFieldLength.UNLIMITED);

      if (objectList == null) objectList = aol;
      else objectList.addAll(aol);

      for (AgeObject ao : aol) {
        Document doc = new Document();

        for (TextFieldExtractor tfe : extractors)
          doc.add(
              new Field(
                  tfe.getName(),
                  tfe.getExtractor().getValue(ao),
                  Field.Store.NO,
                  Field.Index.ANALYZED));

        iWriter.addDocument(doc);
      }

      iWriter.close();

      defaultFieldName = extractors.iterator().next().getName();
    } catch (CorruptIndexException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
  }
示例#24
0
  public void createRandomTerms(int nDocs, int nTerms, double power, Directory dir)
      throws Exception {
    int[] freq = new int[nTerms];
    terms = new Term[nTerms];
    for (int i = 0; i < nTerms; i++) {
      int f = (nTerms + 1) - i; // make first terms less frequent
      freq[i] = (int) Math.ceil(Math.pow(f, power));
      terms[i] = new Term("f", Character.toString((char) ('A' + i)));
    }

    IndexWriter iw =
        new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    for (int i = 0; i < nDocs; i++) {
      Document d = new Document();
      for (int j = 0; j < nTerms; j++) {
        if (r.nextInt(freq[j]) == 0) {
          d.add(new Field("f", terms[j].text(), Field.Store.NO, Field.Index.NOT_ANALYZED));
          // System.out.println(d);
        }
      }
      iw.addDocument(d);
    }
    iw.optimize();
    iw.close();
  }
 public void index() throws IOException {
   final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(this.analyzer);
   indexWriterConfig.setOpenMode(OpenMode.CREATE);
   final IndexWriter indexWriter = new IndexWriter(this.dir, indexWriterConfig);
   indexDocs(indexWriter);
   indexWriter.close();
 }
示例#26
0
  public void createIndex() {

    loadTweets("datasets/sentiment-short.csv", 100);

    directory = new RAMDirectory();

    try {
      IndexWriter writer = getWriter();
      for (int i = 0; i < tweets.size(); i++) {
        Document doc = new Document();
        doc.add(
            new Field(
                "tweet",
                tweets.get(i).getText(),
                Field.Store.YES,
                Field.Index.ANALYZED,
                TermVector.YES));
        writer.addDocument(doc);
      }

      System.out.println("Docs: " + writer.numDocs());
      writer.close();

    } catch (Exception e) {
      e.printStackTrace();
    }
  }
示例#27
0
 private void indexFiles(String dir, String index, int featureIndex, boolean createNewIndex)
     throws IOException {
   ArrayList<String> images = FileUtils.getAllImages(new File(dir), true);
   IndexWriter iw =
       LuceneUtils.createIndexWriter(
           index, createNewIndex, LuceneUtils.AnalyzerType.WhitespaceAnalyzer);
   // select one feature for the large index:
   int count = 0;
   long ms = System.currentTimeMillis();
   DocumentBuilder builder = new ChainedDocumentBuilder();
   ((ChainedDocumentBuilder) builder).addBuilder(builders[featureIndex]);
   //        ((ChainedDocumentBuilder) builder).addBuilder(builders[0]);
   for (Iterator<String> iterator = images.iterator(); iterator.hasNext(); ) {
     count++;
     if (count > 100 && count % 5000 == 0) {
       System.out.println(
           count
               + " files indexed. "
               + (System.currentTimeMillis() - ms) / (count)
               + " ms per file");
     }
     String file = iterator.next();
     try {
       iw.addDocument(builder.createDocument(new FileInputStream(file), file));
     } catch (Exception e) {
       System.err.println("Error: " + e.getMessage());
     }
   }
   iw.close();
 }
 public OperationResponse commitAndOptimize() {
   try {
     if (logger.isDebugEnabled()) {
       logger.debug("commiting...");
     }
     indexWriter.commit();
     if (logger.isDebugEnabled()) {
       logger.debug("commit finish.");
     }
     if (logger.isDebugEnabled()) {
       logger.debug("optimizing...");
     }
     indexWriter.forceMerge(defaultMergeSize);
     if (logger.isDebugEnabled()) {
       logger.debug("optimize finish.");
     }
     reopenSearcher();
   } catch (IOException e) {
     logger.error("optimize error", e);
     return new OperationResponse(e.getMessage(), ResultCodes.COMMON_ERROR);
   } catch (OutOfMemoryError e) {
     CloseUtil.close(indexWriter);
     logger.error("error of OOM", e);
     return new OperationResponse(e.getMessage(), ResultCodes.COMMON_ERROR);
   }
   return new OperationResponse();
 }
  @Test
  public void testVectorHighlighter() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter =
        new IndexWriter(dir, Lucene.STANDARD_ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);

    indexWriter.addDocument(
        doc()
            .add(field("_id", "1"))
            .add(
                field(
                    "content",
                    "the big bad dog",
                    Field.Store.YES,
                    Field.Index.ANALYZED,
                    Field.TermVector.WITH_POSITIONS_OFFSETS))
            .build());

    IndexReader reader = indexWriter.getReader();
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);

    assertThat(topDocs.totalHits, equalTo(1));

    FastVectorHighlighter highlighter = new FastVectorHighlighter();
    String fragment =
        highlighter.getBestFragment(
            highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
            reader,
            topDocs.scoreDocs[0].doc,
            "content",
            30);
    assertThat(fragment, notNullValue());
    System.out.println(fragment);
  }
示例#30
0
 @Override
 public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId)
     throws EngineException {
   // best effort attempt before we acquire locks
   ensureOpen();
   if (indexWriter.hasUncommittedChanges()) {
     logger.trace("can't sync commit [{}]. have pending changes", syncId);
     return SyncedFlushResult.PENDING_OPERATIONS;
   }
   if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
     logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
     return SyncedFlushResult.COMMIT_MISMATCH;
   }
   try (ReleasableLock lock = writeLock.acquire()) {
     ensureOpen();
     ensureCanFlush();
     if (indexWriter.hasUncommittedChanges()) {
       logger.trace("can't sync commit [{}]. have pending changes", syncId);
       return SyncedFlushResult.PENDING_OPERATIONS;
     }
     if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
       logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
       return SyncedFlushResult.COMMIT_MISMATCH;
     }
     logger.trace("starting sync commit [{}]", syncId);
     commitIndexWriter(indexWriter, translog, syncId);
     logger.debug("successfully sync committed. sync id [{}].", syncId);
     lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
     return SyncedFlushResult.SUCCESS;
   } catch (IOException ex) {
     maybeFailEngine("sync commit", ex);
     throw new EngineException(shardId, "failed to sync commit", ex);
   }
 }