Example #1
0
  public void testLongFieldMinMax() throws Exception {
    Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    int numDocs = atLeast(100);
    long minValue = Long.MAX_VALUE;
    long maxValue = Long.MIN_VALUE;
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      long num = random().nextLong();
      minValue = Math.min(num, minValue);
      maxValue = Math.max(num, maxValue);
      doc.add(new LegacyLongField("field", num, Field.Store.NO));
      w.addDocument(doc);
    }

    IndexReader r = w.getReader();

    Terms terms = MultiFields.getTerms(r, "field");
    assertEquals(new Long(minValue), LegacyNumericUtils.getMinLong(terms));
    assertEquals(new Long(maxValue), LegacyNumericUtils.getMaxLong(terms));

    r.close();
    w.close();
    dir.close();
  }
  public void testMoreThan32ProhibitedClauses() throws Exception {
    final Directory d = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), d);
    Document doc = new Document();
    doc.add(
        new TextField(
            "field",
            "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33",
            Field.Store.NO));
    w.addDocument(doc);
    doc = new Document();
    doc.add(new TextField("field", "33", Field.Store.NO));
    w.addDocument(doc);
    final IndexReader r = w.getReader();
    w.close();
    final IndexSearcher s = newSearcher(r);

    final BooleanQuery q = new BooleanQuery();
    for (int term = 0; term < 33; term++) {
      q.add(
          new BooleanClause(
              new TermQuery(new Term("field", "" + term)), BooleanClause.Occur.MUST_NOT));
    }
    q.add(new BooleanClause(new TermQuery(new Term("field", "33")), BooleanClause.Occur.SHOULD));

    final int[] count = new int[1];
    s.search(
        q,
        new Collector() {
          private Scorer scorer;

          @Override
          public void setScorer(Scorer scorer) {
            // Make sure we got BooleanScorer:
            this.scorer = scorer;
            assertEquals(
                "Scorer is implemented by wrong class",
                BooleanScorer.class.getName() + "$BucketScorer",
                scorer.getClass().getName());
          }

          @Override
          public void collect(int doc) {
            count[0]++;
          }

          @Override
          public void setNextReader(AtomicReaderContext context) {}

          @Override
          public boolean acceptsDocsOutOfOrder() {
            return true;
          }
        });

    assertEquals(1, count[0]);

    r.close();
    d.close();
  }
  public void startSearch(String searchString) throws IOException {

    /*analyze(searchString);*/

    try {
      Directory directory = FSDirectory.open(new File(".//Index")); // где находится индекс
      IndexSearcher is = new IndexSearcher(directory); // объект поиска
      QueryParser parser =
          new QueryParser(
              Version.LUCENE_31,
              "name",
              new RussianAnalyzer(Version.LUCENE_31)); // поле поиска + анализатор
      /* String str1 = "фотоаппарат";
      String str2 = "телевизор";
      String str3 = "SONY";
      String total = "(" + str1 + " OR " + str2 + ")" + " AND " + str3;
      System.out.println(total);*/
      Query query = parser.parse(searchString); // что ищем
      TopDocs results =
          is.search(
              query, null,
              10); // включаем поиск ограничиваемся 10 документами, results содержит ...
      System.out.println(
          "getMaxScore()="
              + results.getMaxScore()
              + " totalHits="
              + results
                  .totalHits); // MaxScore - наилучший результат(приоритет), totalHits - количество
      // найденных документов

      /*proposalController.getProposalList().clear();*/

      for (ScoreDoc hits : results.scoreDocs) { // получаем подсказки
        Document doc = is.doc(hits.doc); // получаем документ по спец сылке doc

        for (Proposal proposal :
            proposalFacade.findPropolsalsByProduct(Long.valueOf(doc.get("recid")))) {

          proposalController.getProposalList().add(proposal);
          _log.info(
              "Предложение найдено:"
                  + proposal.getRecid().toString()
                  + ",Товар: "
                  + doc.get("recid")
                  + ", "
                  + doc.get("name"));
        }

        /*System.out.println("doc="+hits.doc+" score="+hits.score);//выводим спец сылку doc + приоритет
        addMessage(doc.get("id") + " | " + doc.get("recid") + " | " + doc.get("name"));//выводим поля найденного документа*/
      }

      directory.close();
    } catch (ParseException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    }
    addMessage("Поиск выполнен");
  }
Example #4
0
  public void testPrefix() throws Exception {
    Directory dir = TestUtil.getBookIndexDirectory();
    IndexReader ireader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(ireader);

    Term term =
        new Term(
            "category", // #A
            "/technology/computers/programming"); // #A
    PrefixQuery query = new PrefixQuery(term); // #A

    TopDocs matches = searcher.search(query, 10); // #A
    int programmingAndBelow = matches.totalHits;
    System.out.println("Programming and below:   ");
    for (int i = 0; i < matches.totalHits; i++) {
      Document doc = searcher.doc(matches.scoreDocs[i].doc);
      System.out.println("match " + i + ": " + doc.get("category") + " | " + doc.get("title"));
    }

    System.out.println("Just /technology/computers/programming:   ");
    matches = searcher.search(new TermQuery(term), 10); // #B
    int justProgramming = matches.totalHits;
    System.out.println("Just Programming:   ");
    for (int i = 0; i < matches.totalHits; i++) {
      Document doc = searcher.doc(matches.scoreDocs[i].doc);
      System.out.println("match " + i + ": " + doc.get("category") + " | " + doc.get("title"));
    }

    assertTrue(programmingAndBelow > justProgramming);
    // searcher.close();
    dir.close();
  }
Example #5
0
  // Verifies no *.nrm exists when all fields omit norms:
  public void testNoNrmFile() throws Throwable {
    Directory ram = newDirectory();
    Analyzer analyzer = new MockAnalyzer(random());
    IndexWriter writer =
        new IndexWriter(
            ram,
            newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)
                .setMaxBufferedDocs(3)
                .setMergePolicy(newLogMergePolicy()));
    LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
    lmp.setMergeFactor(2);
    lmp.setNoCFSRatio(0.0);
    Document d = new Document();

    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
    customType.setOmitNorms(true);
    Field f1 = newField("f1", "This field has no norms", customType);
    d.add(f1);

    for (int i = 0; i < 30; i++) {
      writer.addDocument(d);
    }

    writer.commit();

    assertNoNrm(ram);

    // force merge
    writer.forceMerge(1);
    // flush
    writer.close();

    assertNoNrm(ram);
    ram.close();
  }
 /**
  * testWriterTwice3 is yet another test which tests creating a taxonomy in two separate writing
  * sessions. This test used to fail because of a bug involving commit(), explained below, and now
  * should succeed.
  */
 @Test
 public void testWriterTwice3() throws Exception {
   Directory indexDir = newDirectory();
   // First, create and fill the taxonomy
   TaxonomyWriter tw = new DirectoryTaxonomyWriter(indexDir);
   fillTaxonomy(tw);
   tw.close();
   // Now, open the same taxonomy and add the same categories again.
   // After a few categories, the LuceneTaxonomyWriter implementation
   // will stop looking for each category on disk, and rather read them
   // all into memory and close it's reader. The bug was that it closed
   // the reader, but forgot that it did (because it didn't set the reader
   // reference to null).
   tw = new DirectoryTaxonomyWriter(indexDir);
   fillTaxonomy(tw);
   // Add one new category, just to make commit() do something:
   tw.addCategory(new FacetLabel("hi"));
   // Do a commit(). Here was a bug - if tw had a reader open, it should
   // be reopened after the commit. However, in our case the reader should
   // not be open (as explained above) but because it was not set to null,
   // we forgot that, tried to reopen it, and got an AlreadyClosedException.
   tw.commit();
   assertEquals(expectedCategories.length + 1, tw.getSize());
   tw.close();
   indexDir.close();
 }
  public void testCachingWorks() throws Exception {
    Directory dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    writer.close();

    IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
    AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
    MockFilter filter = new MockFilter();
    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);

    // first time, nested filter is called
    DocIdSet strongRef = cacher.getDocIdSet(context, context.reader().getLiveDocs());
    assertTrue("first time", filter.wasCalled());

    // make sure no exception if cache is holding the wrong docIdSet
    cacher.getDocIdSet(context, context.reader().getLiveDocs());

    // second time, nested filter should not be called
    filter.clear();
    cacher.getDocIdSet(context, context.reader().getLiveDocs());
    assertFalse("second time", filter.wasCalled());

    reader.close();
    dir.close();
  }
 public void testDifferentTypedDocValuesField2() throws Exception {
   Directory d = newDirectory();
   RandomIndexWriter w = new RandomIndexWriter(random(), d);
   Document doc = new Document();
   // Index doc values are single-valued so we should not
   // be able to add same field more than once:
   Field f = new NumericDocValuesField("field", 17);
   doc.add(f);
   doc.add(new SortedDocValuesField("field", new BytesRef("hello")));
   try {
     w.addDocument(doc);
     fail("didn't hit expected exception");
   } catch (IllegalArgumentException iae) {
     // expected
   }
   doc = new Document();
   doc.add(f);
   w.addDocument(doc);
   w.forceMerge(1);
   DirectoryReader r = w.getReader();
   assertEquals(17, getOnlySegmentReader(r).getNumericDocValues("field").get(0));
   r.close();
   w.close();
   d.close();
 }
  // LUCENE-3870
  public void testLengthPrefixAcrossTwoPages() throws Exception {
    Directory d = newDirectory();
    IndexWriter w =
        new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    Document doc = new Document();
    byte[] bytes = new byte[32764];
    BytesRef b = new BytesRef();
    b.bytes = bytes;
    b.length = bytes.length;
    doc.add(new SortedDocValuesField("field", b));
    w.addDocument(doc);
    bytes[0] = 1;
    w.addDocument(doc);
    w.forceMerge(1);
    DirectoryReader r = w.getReader();
    BinaryDocValues s = FieldCache.DEFAULT.getTerms(getOnlySegmentReader(r), "field");

    BytesRef bytes1 = new BytesRef();
    s.get(0, bytes1);
    assertEquals(bytes.length, bytes1.length);
    bytes[0] = 0;
    assertEquals(b, bytes1);

    s.get(1, bytes1);
    assertEquals(bytes.length, bytes1.length);
    bytes[0] = 1;
    assertEquals(b, bytes1);
    r.close();
    w.close();
    d.close();
  }
 private void testOn(Directory dir, int writeSize, int readSize, Cache cache) throws IOException {
   if (cache != null)
     cache
         .clear(); // needed to make sure no chunks are left over in case of Infinispan
                   // implementation
   final String filename = "chunkTest";
   IndexOutput indexOutput = dir.createOutput(filename);
   byte[] toWrite = fillBytes(writeSize);
   indexOutput.writeBytes(toWrite, writeSize);
   indexOutput.close();
   if (cache != null) {
     AssertJUnit.assertEquals(
         writeSize,
         DirectoryIntegrityCheck.deepCountFileSize(new FileCacheKey(INDEXNAME, filename), cache));
   }
   AssertJUnit.assertEquals(writeSize, indexOutput.length());
   byte[] results = new byte[readSize];
   IndexInput openInput = dir.openInput(filename);
   try {
     openInput.readBytes(results, 0, readSize);
     for (int i = 0; i < writeSize && i < readSize; i++) {
       AssertJUnit.assertEquals(results[i], toWrite[i]);
     }
     if (readSize > writeSize)
       AssertJUnit.fail("should have thrown an IOException for reading past EOF");
   } catch (IOException ioe) {
     if (readSize <= writeSize)
       AssertJUnit.fail("should not have thrown an IOException" + ioe.getMessage());
   }
 }
 public void multipleFlushTest() throws IOException {
   final String filename = "longFile.writtenInMultipleFlushes";
   final int bufferSize = 300;
   Cache cache = cacheManager.getCache();
   cache.clear();
   Directory dir =
       DirectoryBuilder.newDirectoryInstance(cache, cache, cache, INDEXNAME)
           .chunkSize(13)
           .create();
   byte[] manyBytes = fillBytes(bufferSize);
   IndexOutput indexOutput = dir.createOutput(filename);
   for (int i = 0; i < 10; i++) {
     indexOutput.writeBytes(manyBytes, bufferSize);
     indexOutput.flush();
   }
   indexOutput.close();
   IndexInput input = dir.openInput(filename);
   final int finalSize = (10 * bufferSize);
   AssertJUnit.assertEquals(finalSize, input.length());
   final byte[] resultingBuffer = new byte[finalSize];
   input.readBytes(resultingBuffer, 0, finalSize);
   int index = 0;
   for (int i = 0; i < 10; i++) {
     for (int j = 0; j < bufferSize; j++)
       AssertJUnit.assertEquals(resultingBuffer[index++], manyBytes[j]);
   }
 }
  public static void main(String[] args) throws IOException, ParseException {
    String indexDir = "C:/lucenedir";
    Directory directory = FSDirectory.open(Paths.get(indexDir));
    IndexReader reader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(reader);

    int day = (int) (new Date().getTime() / Constans.DAY_MILLIS);
    QueryParser parser = new QueryParser("contents", new StandardAnalyzer());
    Query query = parser.parse("java in action");
    Query customScoreQuery =
        new RecencyBoostCustomScoreQuery(query, 2.0, day, 6 * 365, "pubmonthAsDay");
    Sort sort =
        new Sort(
            new SortField[] {
              SortField.FIELD_SCORE, new SortField("title2", SortField.Type.STRING)
            });
    TopDocs hits = searcher.search(customScoreQuery, null, Integer.MAX_VALUE, sort, true, false);

    for (int i = 0; i < hits.scoreDocs.length; i++) {
      // 两种方式取Document都行,其实searcher.doc内部本质还是调用reader.document
      // Document doc = reader.document(hits.scoreDocs[i].doc);
      Document doc = searcher.doc(hits.scoreDocs[i].doc);
      System.out.println(
          (1 + i)
              + ": "
              + doc.get("title")
              + ": pubmonth="
              + doc.get("pubmonth")
              + " score="
              + hits.scoreDocs[i].score);
    }
    reader.close();
    directory.close();
  }
  private float checkPhraseQuery(Document doc, PhraseQuery query, int slop, int expectedNumResults)
      throws Exception {
    query.setSlop(slop);

    Directory ramDir = newDirectory();
    RandomIndexWriter writer =
        new RandomIndexWriter(random, ramDir, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
    writer.addDocument(doc);

    IndexReader reader = writer.getReader();

    IndexSearcher searcher = newSearcher(reader);
    TopDocs td = searcher.search(query, null, 10);
    // System.out.println("slop: "+slop+"  query: "+query+"  doc: "+doc+"  Expecting number of hits:
    // "+expectedNumResults+" maxScore="+td.getMaxScore());
    assertEquals(
        "slop: " + slop + "  query: " + query + "  doc: " + doc + "  Wrong number of hits",
        expectedNumResults,
        td.totalHits);

    // QueryUtils.check(query,searcher);
    writer.close();
    searcher.close();
    reader.close();
    ramDir.close();

    return td.getMaxScore();
  }
Example #14
0
  public void testDoubleFieldMinMax() throws Exception {
    Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    int numDocs = atLeast(100);
    double minValue = Double.POSITIVE_INFINITY;
    double maxValue = Double.NEGATIVE_INFINITY;
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      double num = random().nextDouble();
      minValue = Math.min(num, minValue);
      maxValue = Math.max(num, maxValue);
      doc.add(new LegacyDoubleField("field", num, Field.Store.NO));
      w.addDocument(doc);
    }

    IndexReader r = w.getReader();

    Terms terms = MultiFields.getTerms(r, "field");

    assertEquals(
        minValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)), 0.0);
    assertEquals(
        maxValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)), 0.0);

    r.close();
    w.close();
    dir.close();
  }
  public void testListenerCalled() throws Exception {
    Directory dir = newDirectory();
    IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, null));
    final AtomicBoolean afterRefreshCalled = new AtomicBoolean(false);
    SearcherManager sm = new SearcherManager(iw, true, new SearcherFactory());
    sm.addListener(
        new ReferenceManager.RefreshListener() {
          @Override
          public void beforeRefresh() {}

          @Override
          public void afterRefresh(boolean didRefresh) {
            if (didRefresh) {
              afterRefreshCalled.set(true);
            }
          }
        });
    iw.addDocument(new Document());
    iw.commit();
    assertFalse(afterRefreshCalled.get());
    sm.maybeRefreshBlocking();
    assertTrue(afterRefreshCalled.get());
    sm.close();
    iw.close();
    dir.close();
  }
 public void testDocValuesUnstored() throws IOException {
   Directory dir = newDirectory();
   IndexWriterConfig iwconfig =
       newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
   iwconfig.setMergePolicy(newLogMergePolicy());
   IndexWriter writer = new IndexWriter(dir, iwconfig);
   for (int i = 0; i < 50; i++) {
     Document doc = new Document();
     doc.add(new NumericDocValuesField("dv", i));
     doc.add(new TextField("docId", "" + i, Field.Store.YES));
     writer.addDocument(doc);
   }
   DirectoryReader r = writer.getReader();
   SlowCompositeReaderWrapper slow = new SlowCompositeReaderWrapper(r);
   FieldInfos fi = slow.getFieldInfos();
   FieldInfo dvInfo = fi.fieldInfo("dv");
   assertTrue(dvInfo.hasDocValues());
   NumericDocValues dv = slow.getNumericDocValues("dv");
   for (int i = 0; i < 50; i++) {
     assertEquals(i, dv.get(i));
     StoredDocument d = slow.document(i);
     // cannot use d.get("dv") due to another bug!
     assertNull(d.getField("dv"));
     assertEquals(Integer.toString(i), d.get("docId"));
   }
   slow.close();
   writer.close();
   dir.close();
 }
Example #17
0
  public void testUpdateSameDoc() throws Exception {
    final Directory dir = newDirectory();

    final LineFileDocs docs = new LineFileDocs(random());
    for (int r = 0; r < 3; r++) {
      final IndexWriter w =
          new IndexWriter(
              dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2));
      final int numUpdates = atLeast(20);
      int numThreads = TestUtil.nextInt(random(), 2, 6);
      IndexingThread[] threads = new IndexingThread[numThreads];
      for (int i = 0; i < numThreads; i++) {
        threads[i] = new IndexingThread(docs, w, numUpdates);
        threads[i].start();
      }

      for (int i = 0; i < numThreads; i++) {
        threads[i].join();
      }

      w.close();
    }

    IndexReader open = DirectoryReader.open(dir);
    assertEquals(1, open.numDocs());
    open.close();
    docs.close();
    dir.close();
  }
  public void testTypeChangeViaAddIndexesIR2() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
    IndexWriter writer = new IndexWriter(dir, conf);
    Document doc = new Document();
    doc.add(new NumericDocValuesField("dv", 0L));
    writer.addDocument(doc);
    writer.close();

    Directory dir2 = newDirectory();
    writer = new IndexWriter(dir2, conf);
    IndexReader[] readers = new IndexReader[] {DirectoryReader.open(dir)};
    writer.addIndexes(readers);
    readers[0].close();
    doc = new Document();
    doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
    try {
      writer.addDocument(doc);
      fail("did not hit exception");
    } catch (IllegalArgumentException iae) {
      // expected
    }
    writer.close();
    dir2.close();
    dir.close();
  }
  @Test
  public void testSeparateReaderAndWriter2() throws Exception {
    Directory indexDir = newDirectory();
    TaxonomyWriter tw = new DirectoryTaxonomyWriter(indexDir);
    tw.commit();
    TaxonomyReader tr = new DirectoryTaxonomyReader(indexDir);

    // Test getOrdinal():
    FacetLabel author = new FacetLabel("Author");

    assertEquals(1, tr.getSize()); // the empty taxonomy has size 1 (the root)
    assertEquals(TaxonomyReader.INVALID_ORDINAL, tr.getOrdinal(author));
    tw.addCategory(author);
    // before commit and refresh, no change:
    assertEquals(TaxonomyReader.INVALID_ORDINAL, tr.getOrdinal(author));
    assertEquals(1, tr.getSize()); // still root only...
    assertNull(
        TaxonomyReader.openIfChanged(
            tr)); // this is not enough, because tw.commit() hasn't been done yet
    assertEquals(TaxonomyReader.INVALID_ORDINAL, tr.getOrdinal(author));
    assertEquals(1, tr.getSize()); // still root only...
    tw.commit();
    // still not enough before refresh:
    assertEquals(TaxonomyReader.INVALID_ORDINAL, tr.getOrdinal(author));
    assertEquals(1, tr.getSize()); // still root only...
    TaxonomyReader newTaxoReader = TaxonomyReader.openIfChanged(tr);
    assertNotNull(newTaxoReader);
    tr.close();
    tr = newTaxoReader;
    assertEquals(1, tr.getOrdinal(author));
    assertEquals(2, tr.getSize());
    tw.close();
    tr.close();
    indexDir.close();
  }
  public void testDocsWithField() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
    IndexWriter writer = new IndexWriter(dir, conf);
    Document doc = new Document();
    doc.add(new NumericDocValuesField("dv", 0L));
    writer.addDocument(doc);

    doc = new Document();
    doc.add(new TextField("dv", "some text", Field.Store.NO));
    doc.add(new NumericDocValuesField("dv", 0L));
    writer.addDocument(doc);

    DirectoryReader r = writer.getReader();
    writer.close();

    AtomicReader subR = r.leaves().get(0).reader();
    assertEquals(2, subR.numDocs());

    Bits bits = FieldCache.DEFAULT.getDocsWithField(subR, "dv");
    assertTrue(bits.get(0));
    assertTrue(bits.get(1));
    r.close();
    dir.close();
  }
  public void testNullDocIdSetIterator() throws Exception {
    Directory dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    writer.close();

    IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
    AtomicReaderContext context = (AtomicReaderContext) reader.getContext();

    final Filter filter =
        new Filter() {
          @Override
          public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
            return new DocIdSet() {
              @Override
              public DocIdSetIterator iterator() {
                return null;
              }
            };
          }
        };
    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);

    // the caching filter should return the empty set constant
    assertNull(cacher.getDocIdSet(context, context.reader().getLiveDocs()));

    reader.close();
    dir.close();
  }
  public void testMultiValuedDocValuesField() throws Exception {
    Directory d = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), d);
    Document doc = new Document();
    Field f = new NumericDocValuesField("field", 17);
    // Index doc values are single-valued so we should not
    // be able to add same field more than once:
    doc.add(f);
    doc.add(f);
    try {
      w.addDocument(doc);
      fail("didn't hit expected exception");
    } catch (IllegalArgumentException iae) {
      // expected
    }

    doc = new Document();
    doc.add(f);
    w.addDocument(doc);
    w.forceMerge(1);
    DirectoryReader r = w.getReader();
    w.close();
    assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0));
    r.close();
    d.close();
  }
  public void testSpanNot() throws Exception {
    SpanQuery[] clauses = new SpanQuery[2];
    clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
    clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "three"));
    SpanQuery spq = new SpanNearQuery(clauses, 5, true);
    SpanNotQuery snq =
        new SpanNotQuery(spq, new SpanTermQuery(new Term(PayloadHelper.FIELD, "two")));

    Directory directory = newDirectory();
    RandomIndexWriter writer =
        new RandomIndexWriter(
            random(),
            directory,
            newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(similarity));

    Document doc = new Document();
    doc.add(newTextField(PayloadHelper.FIELD, "one two three one four three", Field.Store.YES));
    writer.addDocument(doc);
    IndexReader reader = writer.getReader();
    writer.close();

    checkSpans(MultiSpansWrapper.wrap(reader, snq, SpanWeight.Postings.PAYLOADS), 1, new int[] {2});
    reader.close();
    directory.close();
  }
  public void doTest(int[] docs) throws Exception {
    Directory dir = makeIndex();
    IndexReader reader = IndexReader.open(dir, true);
    for (int i = 0; i < docs.length; i++) {
      Document d = reader.document(docs[i], SELECTOR);
      d.get(MAGIC_FIELD);

      List<Fieldable> fields = d.getFields();
      for (Iterator<Fieldable> fi = fields.iterator(); fi.hasNext(); ) {
        Fieldable f = null;
        try {
          f = fi.next();
          String fname = f.name();
          String fval = f.stringValue();
          assertNotNull(docs[i] + " FIELD: " + fname, fval);
          String[] vals = fval.split("#");
          if (!dataset.contains(vals[0]) || !dataset.contains(vals[1])) {
            fail("FIELD:" + fname + ",VAL:" + fval);
          }
        } catch (Exception e) {
          throw new Exception(docs[i] + " WTF: " + f.name(), e);
        }
      }
    }
    reader.close();
    dir.close();
  }
  public void testFarsiRangeFilterCollating(
      Analyzer analyzer, String firstBeg, String firstEnd, String secondBeg, String secondEnd)
      throws Exception {
    Directory dir = newDirectory();
    IndexWriter writer =
        new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
    Document doc = new Document();
    doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, Field.Index.ANALYZED));
    doc.add(new Field("body", "body", Field.Store.YES, Field.Index.NOT_ANALYZED));
    writer.addDocument(doc);
    writer.close();
    IndexReader reader = IndexReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    Query query = new TermQuery(new Term("body", "body"));

    // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
    // orders the U+0698 character before the U+0633 character, so the single
    // index Term below should NOT be returned by a TermRangeFilter with a Farsi
    // Collator (or an Arabic one for the case when Farsi searcher not
    // supported).
    ScoreDoc[] result =
        searcher.search(query, new TermRangeFilter("content", firstBeg, firstEnd, true, true), 1)
            .scoreDocs;
    assertEquals("The index Term should not be included.", 0, result.length);

    result =
        searcher.search(query, new TermRangeFilter("content", secondBeg, secondEnd, true, true), 1)
            .scoreDocs;
    assertEquals("The index Term should be included.", 1, result.length);

    searcher.close();
    reader.close();
    dir.close();
  }
Example #26
0
  public void testDemo() throws IOException, ParseException {

    Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);

    // Store the index in memory:
    Directory directory = new RAMDirectory();
    // To store an index on disk, use this instead:
    // Directory directory = FSDirectory.open("/tmp/testindex");
    IndexWriter iwriter =
        new IndexWriter(directory, analyzer, true, new IndexWriter.MaxFieldLength(25000));
    Document doc = new Document();
    String text = "This is the text to be indexed.";
    doc.add(new Field("fieldname", text, Field.Store.YES, Field.Index.ANALYZED));
    iwriter.addDocument(doc);
    iwriter.close();

    // Now search the index:
    IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
    // Parse a simple query that searches for "text":
    QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer);
    Query query = parser.parse("text");
    ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs;
    assertEquals(1, hits.length);
    // Iterate through the results:
    for (int i = 0; i < hits.length; i++) {
      Document hitDoc = isearcher.doc(hits[i].doc);
      assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
    }
    isearcher.close();
    directory.close();
  }
  public void testMethod() throws Exception {
    Directory directory = newDirectory();

    String[] values = new String[] {"1", "2", "3", "4"};

    RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
    for (int i = 0; i < values.length; i++) {
      Document doc = new Document();
      doc.add(newStringField(FIELD, values[i], Field.Store.YES));
      writer.addDocument(doc);
    }
    IndexReader ir = writer.getReader();
    writer.close();

    BooleanQuery booleanQuery1 = new BooleanQuery();
    booleanQuery1.add(new TermQuery(new Term(FIELD, "1")), BooleanClause.Occur.SHOULD);
    booleanQuery1.add(new TermQuery(new Term(FIELD, "2")), BooleanClause.Occur.SHOULD);

    BooleanQuery query = new BooleanQuery();
    query.add(booleanQuery1, BooleanClause.Occur.MUST);
    query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT);

    IndexSearcher indexSearcher = newSearcher(ir);
    ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs;
    assertEquals("Number of matched documents", 2, hits.length);
    ir.close();
    directory.close();
  }
  public void testEvilSearcherFactory() throws Exception {
    final Directory dir = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    w.commit();

    final IndexReader other = DirectoryReader.open(dir);

    final SearcherFactory theEvilOne =
        new SearcherFactory() {
          @Override
          public IndexSearcher newSearcher(IndexReader ignored) {
            return LuceneTestCase.newSearcher(other);
          }
        };

    try {
      new SearcherManager(w.w, false, theEvilOne);
      fail("didn't hit expected exception");
    } catch (IllegalStateException ise) {
      // expected
    }
    w.close();
    other.close();
    dir.close();
  }
  /** Test attributes map */
  public void testAttributes() throws Exception {
    Directory dir = newDirectory();
    Codec codec = getCodec();
    byte id[] = StringHelper.randomId();
    Map<String, String> attributes = new HashMap<>();
    attributes.put("key1", "value1");
    attributes.put("key2", "value2");
    SegmentInfo info =
        new SegmentInfo(
            dir,
            getVersions()[0],
            "_123",
            1,
            false,
            codec,
            Collections.emptyMap(),
            id,
            attributes,
            null);
    info.setFiles(Collections.<String>emptySet());
    codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT);
    SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT);
    assertEquals(attributes, info2.getAttributes());

    // attributes map should be immutable
    expectThrows(
        UnsupportedOperationException.class,
        () -> {
          info2.getAttributes().put("bogus", "bogus");
        });

    dir.close();
  }
  /**
   * @param worker
   * @param connection
   * @throws Exception
   */
  private void createIndex(SearchIndexBuilderWorker worker, Connection connection)
      throws Exception {
    IndexWriter indexWrite = null;
    try {
      if (worker.isRunning()) {
        indexWrite = indexStorage.getIndexWriter(false);
      }
      if (indexWrite != null) {
        Document doc = new Document();
        // The date of indexing
        String timeStamp = String.valueOf(System.currentTimeMillis());
        doc.add(
            new Field(
                SearchService.DATE_STAMP, timeStamp, Field.Store.NO, Field.Index.NOT_ANALYZED));
        doc.add(
            new Field(
                SearchService.DATE_STAMP,
                CompressionTools.compressString(timeStamp),
                Field.Store.YES));

        String ref = "---INDEX-CREATED---";
        doc.add(
            new Field(
                SearchService.FIELD_REFERENCE,
                CompressionTools.compressString(ref),
                Field.Store.YES));
        doc.add(
            new Field(
                SearchService.FIELD_REFERENCE, ref, Field.Store.NO, Field.Index.NOT_ANALYZED));

        indexWrite.addDocument(doc);
      } else {
        log.error("Couldn't get indexWriter to add document!");
      }

    } catch (Exception ex) {
      log.error("Failed to Add Documents ", ex);
      throw new Exception(ex);
    } finally {
      if (indexWrite != null) {
        if (log.isDebugEnabled()) {
          log.debug("Closing Index Writer With " + indexWrite.maxDoc() + " documents");
          Directory d = indexWrite.getDirectory();
          String[] s = d.listAll();
          log.debug("Directory Contains ");
          for (int i = 0; i < s.length; i++) {
            File f = new File(s[i]);
            log.debug(
                "\t"
                    + String.valueOf(f.length())
                    + "\t"
                    + new Date(f.lastModified())
                    + "\t"
                    + s[i]);
          }
        }
        indexStorage.closeIndexWriter(indexWrite);
      }
    }
  }