コード例 #1
0
 public void testPartiallyAppliedGlobalSlice()
     throws SecurityException, NoSuchFieldException, IllegalArgumentException,
         IllegalAccessException, InterruptedException {
   final DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();
   Field field = DocumentsWriterDeleteQueue.class.getDeclaredField("globalBufferLock");
   field.setAccessible(true);
   ReentrantLock lock = (ReentrantLock) field.get(queue);
   lock.lock();
   Thread t =
       new Thread() {
         @Override
         public void run() {
           queue.addDelete(new Term("foo", "bar"));
         }
       };
   t.start();
   t.join();
   lock.unlock();
   assertTrue("changes in del queue but not in slice yet", queue.anyChanges());
   queue.tryApplyGlobalSlice();
   assertTrue("changes in global buffer", queue.anyChanges());
   FrozenBufferedUpdates freezeGlobalBuffer = queue.freezeGlobalBuffer(null);
   assertTrue(freezeGlobalBuffer.any());
   assertEquals(1, freezeGlobalBuffer.termCount);
   assertFalse("all changes applied", queue.anyChanges());
 }
コード例 #2
0
 // Appends a new packet of buffered deletes to the stream,
 // setting its generation:
 public synchronized long push(FrozenBufferedUpdates packet) {
   /*
    * The insert operation must be atomic. If we let threads increment the gen
    * and push the packet afterwards we risk that packets are out of order.
    * With DWPT this is possible if two or more flushes are racing for pushing
    * updates. If the pushed packets get our of order would loose documents
    * since deletes are applied to the wrong segments.
    */
   packet.setDelGen(nextGen++);
   assert packet.any();
   assert checkDeleteStats();
   assert packet.delGen() < nextGen;
   assert updates.isEmpty() || updates.get(updates.size() - 1).delGen() < packet.delGen()
       : "Delete packets must be in order";
   updates.add(packet);
   numTerms.addAndGet(packet.numTermDeletes);
   bytesUsed.addAndGet(packet.bytesUsed);
   if (infoStream.isEnabled("BD")) {
     infoStream.message(
         "BD",
         "push deletes "
             + packet
             + " delGen="
             + packet.delGen()
             + " packetCount="
             + updates.size()
             + " totBytesUsed="
             + bytesUsed.get());
   }
   assert checkDeleteStats();
   return packet.delGen();
 }
コード例 #3
0
  /**
   * Resolves the buffered deleted Term/Query/docIDs, into actual deleted docIDs in the liveDocs
   * MutableBits for each SegmentReader.
   */
  public synchronized ApplyDeletesResult applyDeletesAndUpdates(
      IndexWriter.ReaderPool readerPool, List<SegmentCommitInfo> infos) throws IOException {
    final long t0 = System.currentTimeMillis();

    if (infos.size() == 0) {
      return new ApplyDeletesResult(false, nextGen++, null);
    }

    assert checkDeleteStats();

    if (!any()) {
      if (infoStream.isEnabled("BD")) {
        infoStream.message("BD", "applyDeletes: no deletes; skipping");
      }
      return new ApplyDeletesResult(false, nextGen++, null);
    }

    if (infoStream.isEnabled("BD")) {
      infoStream.message("BD", "applyDeletes: infos=" + infos + " packetCount=" + updates.size());
    }

    final long gen = nextGen++;

    List<SegmentCommitInfo> infos2 = new ArrayList<SegmentCommitInfo>();
    infos2.addAll(infos);
    Collections.sort(infos2, sortSegInfoByDelGen);

    CoalescedUpdates coalescedUpdates = null;
    boolean anyNewDeletes = false;

    int infosIDX = infos2.size() - 1;
    int delIDX = updates.size() - 1;

    List<SegmentCommitInfo> allDeleted = null;

    while (infosIDX >= 0) {
      // System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX);

      final FrozenBufferedUpdates packet = delIDX >= 0 ? updates.get(delIDX) : null;
      final SegmentCommitInfo info = infos2.get(infosIDX);
      final long segGen = info.getBufferedDeletesGen();

      if (packet != null && segGen < packet.delGen()) {
        //        System.out.println("  coalesce");
        if (coalescedUpdates == null) {
          coalescedUpdates = new CoalescedUpdates();
        }
        if (!packet.isSegmentPrivate) {
          /*
           * Only coalesce if we are NOT on a segment private del packet: the segment private del packet
           * must only applied to segments with the same delGen.  Yet, if a segment is already deleted
           * from the SI since it had no more documents remaining after some del packets younger than
           * its segPrivate packet (higher delGen) have been applied, the segPrivate packet has not been
           * removed.
           */
          coalescedUpdates.update(packet);
        }

        delIDX--;
      } else if (packet != null && segGen == packet.delGen()) {
        assert packet.isSegmentPrivate
            : "Packet and Segments deletegen can only match on a segment private del packet gen="
                + segGen;
        // System.out.println("  eq");

        // Lock order: IW -> BD -> RP
        assert readerPool.infoIsLive(info);
        final ReadersAndUpdates rld = readerPool.get(info, true);
        final SegmentReader reader = rld.getReader(IOContext.READ);
        int delCount = 0;
        final boolean segAllDeletes;
        try {
          Map<String, NumericFieldUpdates> fieldUpdates = null;
          if (coalescedUpdates != null) {
            // System.out.println("    del coalesced");
            delCount += applyTermDeletes(coalescedUpdates.termsIterable(), rld, reader);
            delCount += applyQueryDeletes(coalescedUpdates.queriesIterable(), rld, reader);
            fieldUpdates =
                applyNumericDocValuesUpdates(
                    coalescedUpdates.numericDVUpdates, rld, reader, fieldUpdates);
          }
          // System.out.println("    del exact");
          // Don't delete by Term here; DocumentsWriterPerThread
          // already did that on flush:
          delCount += applyQueryDeletes(packet.queriesIterable(), rld, reader);
          fieldUpdates =
              applyNumericDocValuesUpdates(
                  Arrays.asList(packet.updates), rld, reader, fieldUpdates);
          if (!fieldUpdates.isEmpty()) {
            rld.writeFieldUpdates(info.info.dir, fieldUpdates);
          }
          final int fullDelCount = rld.info.getDelCount() + rld.getPendingDeleteCount();
          assert fullDelCount <= rld.info.info.getDocCount();
          segAllDeletes = fullDelCount == rld.info.info.getDocCount();
        } finally {
          rld.release(reader);
          readerPool.release(rld);
        }
        anyNewDeletes |= delCount > 0;

        if (segAllDeletes) {
          if (allDeleted == null) {
            allDeleted = new ArrayList<SegmentCommitInfo>();
          }
          allDeleted.add(info);
        }

        if (infoStream.isEnabled("BD")) {
          infoStream.message(
              "BD",
              "seg="
                  + info
                  + " segGen="
                  + segGen
                  + " segDeletes=["
                  + packet
                  + "]; coalesced deletes=["
                  + (coalescedUpdates == null ? "null" : coalescedUpdates)
                  + "] newDelCount="
                  + delCount
                  + (segAllDeletes ? " 100% deleted" : ""));
        }

        if (coalescedUpdates == null) {
          coalescedUpdates = new CoalescedUpdates();
        }

        /*
         * Since we are on a segment private del packet we must not
         * update the coalescedDeletes here! We can simply advance to the
         * next packet and seginfo.
         */
        delIDX--;
        infosIDX--;
        info.setBufferedDeletesGen(gen);

      } else {
        // System.out.println("  gt");

        if (coalescedUpdates != null) {
          // Lock order: IW -> BD -> RP
          assert readerPool.infoIsLive(info);
          final ReadersAndUpdates rld = readerPool.get(info, true);
          final SegmentReader reader = rld.getReader(IOContext.READ);
          int delCount = 0;
          final boolean segAllDeletes;
          try {
            delCount += applyTermDeletes(coalescedUpdates.termsIterable(), rld, reader);
            delCount += applyQueryDeletes(coalescedUpdates.queriesIterable(), rld, reader);
            Map<String, NumericFieldUpdates> fieldUpdates =
                applyNumericDocValuesUpdates(coalescedUpdates.numericDVUpdates, rld, reader, null);
            if (!fieldUpdates.isEmpty()) {
              rld.writeFieldUpdates(info.info.dir, fieldUpdates);
            }
            final int fullDelCount = rld.info.getDelCount() + rld.getPendingDeleteCount();
            assert fullDelCount <= rld.info.info.getDocCount();
            segAllDeletes = fullDelCount == rld.info.info.getDocCount();
          } finally {
            rld.release(reader);
            readerPool.release(rld);
          }
          anyNewDeletes |= delCount > 0;

          if (segAllDeletes) {
            if (allDeleted == null) {
              allDeleted = new ArrayList<SegmentCommitInfo>();
            }
            allDeleted.add(info);
          }

          if (infoStream.isEnabled("BD")) {
            infoStream.message(
                "BD",
                "seg="
                    + info
                    + " segGen="
                    + segGen
                    + " coalesced deletes=["
                    + coalescedUpdates
                    + "] newDelCount="
                    + delCount
                    + (segAllDeletes ? " 100% deleted" : ""));
          }
        }
        info.setBufferedDeletesGen(gen);

        infosIDX--;
      }
    }

    assert checkDeleteStats();
    if (infoStream.isEnabled("BD")) {
      infoStream.message("BD", "applyDeletes took " + (System.currentTimeMillis() - t0) + " msec");
    }
    // assert infos != segmentInfos || !any() : "infos=" + infos + " segmentInfos=" + segmentInfos +
    // " any=" + any;

    return new ApplyDeletesResult(anyNewDeletes, gen, allDeleted);
  }