Exemplo n.º 1
0
 /** Close all streams. */
 final void close() throws IOException {
   try {
     closeDocument();
   } finally {
     // make an effort to close all streams we can but remember and re-throw
     // the first exception encountered in this process
     IOException keep = null;
     if (tvx != null)
       try {
         tvx.close();
       } catch (IOException e) {
         if (keep == null) keep = e;
       }
     if (tvd != null)
       try {
         tvd.close();
       } catch (IOException e) {
         if (keep == null) keep = e;
       }
     if (tvf != null)
       try {
         tvf.close();
       } catch (IOException e) {
         if (keep == null) keep = e;
       }
     if (keep != null) throw (IOException) keep.fillInStackTrace();
   }
 }
Exemplo n.º 2
0
 public void writeChecksums() throws IOException {
   String checksumName = CHECKSUMS_PREFIX + System.currentTimeMillis();
   ImmutableMap<String, StoreFileMetaData> files = list();
   synchronized (mutex) {
     Map<String, String> checksums = new HashMap<String, String>();
     for (StoreFileMetaData metaData : files.values()) {
       if (metaData.checksum() != null) {
         checksums.put(metaData.name(), metaData.checksum());
       }
     }
     IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT, true);
     output.writeInt(0); // version
     output.writeStringStringMap(checksums);
     output.close();
   }
   for (StoreFileMetaData metaData : files.values()) {
     if (metaData.name().startsWith(CHECKSUMS_PREFIX) && !checksumName.equals(metaData.name())) {
       try {
         directory.deleteFileChecksum(metaData.name());
       } catch (Exception e) {
         // ignore
       }
     }
   }
 }
  // Make sure we don't somehow use more than 1 descriptor
  // when reading a CFS with many subs:
  public void testManySubFiles() throws IOException {

    final Directory d = newFSDirectory(_TestUtil.getTempDir("CFSManySubFiles"));
    final int FILE_COUNT = 10000;

    for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
      IndexOutput out = d.createOutput("file." + fileIdx);
      out.writeByte((byte) fileIdx);
      out.close();
    }

    final CompoundFileWriter cfw = new CompoundFileWriter(d, "c.cfs");
    for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
      cfw.addFile("file." + fileIdx);
    }
    cfw.close();

    final IndexInput[] ins = new IndexInput[FILE_COUNT];
    final CompoundFileReader cfr = new CompoundFileReader(d, "c.cfs");
    for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
      ins[fileIdx] = cfr.openInput("file." + fileIdx);
    }

    for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
      assertEquals((byte) fileIdx, ins[fileIdx].readByte());
    }

    for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) {
      ins[fileIdx].close();
    }
    cfr.close();
    d.close();
  }
 private void testOn(Directory dir, int writeSize, int readSize, Cache cache) throws IOException {
   if (cache != null)
     cache
         .clear(); // needed to make sure no chunks are left over in case of Infinispan
                   // implementation
   final String filename = "chunkTest";
   IndexOutput indexOutput = dir.createOutput(filename);
   byte[] toWrite = fillBytes(writeSize);
   indexOutput.writeBytes(toWrite, writeSize);
   indexOutput.close();
   if (cache != null) {
     AssertJUnit.assertEquals(
         writeSize,
         DirectoryIntegrityCheck.deepCountFileSize(new FileCacheKey(INDEXNAME, filename), cache));
   }
   AssertJUnit.assertEquals(writeSize, indexOutput.length());
   byte[] results = new byte[readSize];
   IndexInput openInput = dir.openInput(filename);
   try {
     openInput.readBytes(results, 0, readSize);
     for (int i = 0; i < writeSize && i < readSize; i++) {
       AssertJUnit.assertEquals(results[i], toWrite[i]);
     }
     if (readSize > writeSize)
       AssertJUnit.fail("should have thrown an IOException for reading past EOF");
   } catch (IOException ioe) {
     if (readSize <= writeSize)
       AssertJUnit.fail("should not have thrown an IOException" + ioe.getMessage());
   }
 }
 public void multipleFlushTest() throws IOException {
   final String filename = "longFile.writtenInMultipleFlushes";
   final int bufferSize = 300;
   Cache cache = cacheManager.getCache();
   cache.clear();
   Directory dir =
       DirectoryBuilder.newDirectoryInstance(cache, cache, cache, INDEXNAME)
           .chunkSize(13)
           .create();
   byte[] manyBytes = fillBytes(bufferSize);
   IndexOutput indexOutput = dir.createOutput(filename);
   for (int i = 0; i < 10; i++) {
     indexOutput.writeBytes(manyBytes, bufferSize);
     indexOutput.flush();
   }
   indexOutput.close();
   IndexInput input = dir.openInput(filename);
   final int finalSize = (10 * bufferSize);
   AssertJUnit.assertEquals(finalSize, input.length());
   final byte[] resultingBuffer = new byte[finalSize];
   input.readBytes(resultingBuffer, 0, finalSize);
   int index = 0;
   for (int i = 0; i < 10; i++) {
     for (int j = 0; j < bufferSize; j++)
       AssertJUnit.assertEquals(resultingBuffer[index++], manyBytes[j]);
   }
 }
  private void demo_FSIndexInputBug(Directory fsdir, String file) throws IOException {
    // Setup the test file - we need more than 1024 bytes
    IndexOutput os = fsdir.createOutput(file);
    for (int i = 0; i < 2000; i++) {
      os.writeByte((byte) i);
    }
    os.close();

    IndexInput in = fsdir.openInput(file);

    // This read primes the buffer in IndexInput
    in.readByte();

    // Close the file
    in.close();

    // ERROR: this call should fail, but succeeds because the buffer
    // is still filled
    in.readByte();

    // ERROR: this call should fail, but succeeds for some reason as well
    in.seek(1099);

    try {
      // OK: this call correctly fails. We are now past the 1024 internal
      // buffer, so an actual IO is attempted, which fails
      in.readByte();
      fail("expected readByte() to throw exception");
    } catch (IOException e) {
      // expected exception
    }
  }
  public void close() throws IOException {
    try {
      final long dirStart = out.getFilePointer();
      final int fieldCount = fields.size();

      int nonNullFieldCount = 0;
      for (int i = 0; i < fieldCount; i++) {
        FSTFieldWriter field = fields.get(i);
        if (field.fst != null) {
          nonNullFieldCount++;
        }
      }

      out.writeVInt(nonNullFieldCount);
      for (int i = 0; i < fieldCount; i++) {
        FSTFieldWriter field = fields.get(i);
        if (field.fst != null) {
          out.writeVInt(field.fieldInfo.number);
          out.writeVLong(field.indexStart);
        }
      }
      writeTrailer(dirStart);
    } finally {
      out.close();
    }
  }
 @Test
 @Override
 public void testIndexWritingAndFinding() throws IOException, InterruptedException {
   verifyBoth(cache0, cache1);
   IndexOutput indexOutput = dirA.createOutput(filename, IOContext.DEFAULT);
   indexOutput.writeString("no need to write, nobody ever will read this");
   indexOutput.flush();
   indexOutput.close();
   assertFileExistsHavingRLCount(filename, 1, true);
   IndexInput firstOpenOnB = dirB.openInput(filename, IOContext.DEFAULT);
   assertFileExistsHavingRLCount(filename, 2, true);
   dirA.deleteFile(filename);
   assertFileExistsHavingRLCount(filename, 1, false);
   // Lucene does use clone() - lock implementation ignores it as a clone is
   // cast on locked segments and released before the close on the parent object
   IndexInput cloneOfFirstOpenOnB = (IndexInput) firstOpenOnB.clone();
   assertFileExistsHavingRLCount(filename, 1, false);
   cloneOfFirstOpenOnB.close();
   assertFileExistsHavingRLCount(filename, 1, false);
   IndexInput firstOpenOnA = dirA.openInput(filename, IOContext.DEFAULT);
   assertFileExistsHavingRLCount(filename, 2, false);
   IndexInput secondOpenOnA = dirA.openInput(filename, IOContext.DEFAULT);
   assertFileExistsHavingRLCount(filename, 2, false);
   firstOpenOnA.close();
   assertFileExistsHavingRLCount(filename, 2, false);
   secondOpenOnA.close();
   assertFileExistsHavingRLCount(filename, 1, false);
   firstOpenOnB.close();
   assertFileNotExists(filename);
   dirA.close();
   dirB.close();
   verifyBoth(cache0, cache1);
 }
  public void testWriteChunks() throws Exception {
    final int BUFFER_SIZE = 64;

    Cache cache = cacheManager.getCache();
    Directory dir =
        DirectoryBuilder.newDirectoryInstance(cache, cache, cache, INDEXNAME)
            .chunkSize(BUFFER_SIZE)
            .create();

    IndexOutput io = dir.createOutput("MyNewFile.txt");

    io.writeByte((byte) 66);
    io.writeByte((byte) 69);

    io.flush();
    io.close();

    assert dir.fileExists("MyNewFile.txt");
    assert null != cache.get(new ChunkCacheKey(INDEXNAME, "MyNewFile.txt", 0, BUFFER_SIZE));

    // test contents by reading:
    byte[] buf = new byte[9];
    IndexInput ii = dir.openInput("MyNewFile.txt");
    ii.readBytes(buf, 0, (int) ii.length());
    ii.close();

    assert new String(new byte[] {66, 69}).equals(new String(buf).trim());

    String testText =
        "This is some rubbish again that will span more than one chunk - one hopes.  Who knows, maybe even three or four chunks.";
    io = dir.createOutput("MyNewFile.txt");
    io.seek(0);
    io.writeBytes(testText.getBytes(), 0, testText.length());
    io.close();
    // now compare.
    byte[] chunk1 =
        (byte[]) cache.get(new ChunkCacheKey(INDEXNAME, "MyNewFile.txt", 0, BUFFER_SIZE));
    byte[] chunk2 =
        (byte[]) cache.get(new ChunkCacheKey(INDEXNAME, "MyNewFile.txt", 1, BUFFER_SIZE));
    assert null != chunk1;
    assert null != chunk2;

    assert testText.equals(new String(chunk1) + new String(chunk2).trim());

    dir.close();
    DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
  }
Exemplo n.º 10
0
 /** Creates a file of the specified size with random data. */
 private void createRandomFile(Directory dir, String name, int size) throws IOException {
   IndexOutput os = dir.createOutput(name);
   for (int i = 0; i < size; i++) {
     byte b = (byte) (Math.random() * 256);
     os.writeByte(b);
   }
   os.close();
 }
 /**
  * It creates a file with fixed size using a RepeatableLongByteSequence object to generate a
  * repeatable content
  *
  * @param dir The Directory containing the file to create
  * @param fileName The file name to create
  * @param contentFileSize The size content file to create
  * @throws IOException
  */
 private void createFileWithRepeatableContent(
     Directory dir, String fileName, final int contentFileSize) throws IOException {
   IndexOutput indexOutput = dir.createOutput(fileName);
   RepeatableLongByteSequence bytesGenerator = new RepeatableLongByteSequence();
   for (int i = 0; i < contentFileSize; i++) {
     indexOutput.writeByte(bytesGenerator.nextByte());
   }
   indexOutput.close();
 }
Exemplo n.º 12
0
 /**
  * Creates a file of the specified size with sequential data. The first byte is written as the
  * start byte provided. All subsequent bytes are computed as start + offset where offset is the
  * number of the byte.
  */
 private void createSequenceFile(Directory dir, String name, byte start, int size)
     throws IOException {
   IndexOutput os = dir.createOutput(name);
   for (int i = 0; i < size; i++) {
     os.writeByte(start);
     start++;
   }
   os.close();
 }
  public void testEncodeDecode() throws IOException {
    final int iterations = RandomInts.randomIntBetween(random(), 1, 1000);
    final float acceptableOverheadRatio = random().nextFloat();
    final int[] values = new int[(iterations - 1) * BLOCK_SIZE + ForUtil.MAX_DATA_SIZE];
    for (int i = 0; i < iterations; ++i) {
      final int bpv = random().nextInt(32);
      if (bpv == 0) {
        final int value = RandomInts.randomIntBetween(random(), 0, Integer.MAX_VALUE);
        for (int j = 0; j < BLOCK_SIZE; ++j) {
          values[i * BLOCK_SIZE + j] = value;
        }
      } else {
        for (int j = 0; j < BLOCK_SIZE; ++j) {
          values[i * BLOCK_SIZE + j] =
              RandomInts.randomIntBetween(random(), 0, (int) PackedInts.maxValue(bpv));
        }
      }
    }

    final Directory d = new RAMDirectory();
    final long endPointer;

    {
      // encode
      IndexOutput out = d.createOutput("test.bin", IOContext.DEFAULT);
      final ForUtil forUtil = new ForUtil(acceptableOverheadRatio, out);

      for (int i = 0; i < iterations; ++i) {
        forUtil.writeBlock(
            Arrays.copyOfRange(values, i * BLOCK_SIZE, values.length),
            new byte[MAX_ENCODED_SIZE],
            out);
      }
      endPointer = out.getFilePointer();
      out.close();
    }

    {
      // decode
      IndexInput in = d.openInput("test.bin", IOContext.READONCE);
      final ForUtil forUtil = new ForUtil(in);
      for (int i = 0; i < iterations; ++i) {
        if (random().nextBoolean()) {
          forUtil.skipBlock(in);
          continue;
        }
        final int[] restored = new int[MAX_DATA_SIZE];
        forUtil.readBlock(in, new byte[MAX_ENCODED_SIZE], restored);
        assertArrayEquals(
            Arrays.copyOfRange(values, i * BLOCK_SIZE, (i + 1) * BLOCK_SIZE),
            Arrays.copyOf(restored, BLOCK_SIZE));
      }
      assertEquals(endPointer, in.getFilePointer());
      in.close();
    }
  }
Exemplo n.º 14
0
  final void finishCommit(Directory dir) throws IOException {
    if (pendingSegnOutput == null) throw new IllegalStateException("prepareCommit was not called");
    boolean success = false;
    try {
      pendingSegnOutput.finishCommit();
      pendingSegnOutput.close();
      pendingSegnOutput = null;
      success = true;
    } finally {
      if (!success) rollbackCommit(dir);
    }

    // NOTE: if we crash here, we have left a segments_N
    // file in the directory in a possibly corrupt state (if
    // some bytes made it to stable storage and others
    // didn't).  But, the segments_N file includes checksum
    // at the end, which should catch this case.  So when a
    // reader tries to read it, it will throw a
    // CorruptIndexException, which should cause the retry
    // logic in SegmentInfos to kick in and load the last
    // good (previous) segments_N-1 file.

    final String fileName =
        IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
    success = false;
    try {
      dir.sync(Collections.singleton(fileName));
      success = true;
    } finally {
      if (!success) {
        try {
          dir.deleteFile(fileName);
        } catch (Throwable t) {
          // Suppress so we keep throwing the original exception
        }
      }
    }

    lastGeneration = generation;

    try {
      IndexOutput genOutput = dir.createOutput(IndexFileNames.SEGMENTS_GEN);
      try {
        genOutput.writeInt(FORMAT_LOCKLESS);
        genOutput.writeLong(generation);
        genOutput.writeLong(generation);
      } finally {
        genOutput.close();
      }
    } catch (ThreadInterruptedException t) {
      throw t;
    } catch (Throwable t) {
      // It's OK if we fail to write this file since it's
      // used only as one of the retry fallbacks.
    }
  }
Exemplo n.º 15
0
 @Override
 public void close() {
   try {
     output.flush();
     output.close();
     super.close();
   } catch (IOException e) {
     // TODO Auto-generated catch block
     e.printStackTrace();
   }
 }
Exemplo n.º 16
0
 @Override
 public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel)
     throws Exception {
   try (RecoveriesCollection.StatusRef statusRef =
       onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
     final RecoveryStatus recoveryStatus = statusRef.status();
     final Store store = recoveryStatus.store();
     recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
     final RecoveryState.Index indexState = recoveryStatus.state().getIndex();
     if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
       indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
     }
     IndexOutput indexOutput;
     if (request.position() == 0) {
       indexOutput =
           recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store);
     } else {
       indexOutput = recoveryStatus.getOpenIndexOutput(request.name());
     }
     BytesReference content = request.content();
     if (!content.hasArray()) {
       content = content.toBytesArray();
     }
     RateLimiter rl = recoverySettings.rateLimiter();
     if (rl != null) {
       long bytes = bytesSinceLastPause.addAndGet(content.length());
       if (bytes > rl.getMinPauseCheckBytes()) {
         // Time to pause
         bytesSinceLastPause.addAndGet(-bytes);
         long throttleTimeInNanos = rl.pause(bytes);
         indexState.addTargetThrottling(throttleTimeInNanos);
         recoveryStatus.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
       }
     }
     indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
     indexState.addRecoveredBytesToFile(request.name(), content.length());
     if (indexOutput.getFilePointer() >= request.length() || request.lastChunk()) {
       try {
         Store.verify(indexOutput);
       } finally {
         // we are done
         indexOutput.close();
       }
       // write the checksum
       recoveryStatus.legacyChecksums().add(request.metadata());
       final String temporaryFileName = recoveryStatus.getTempNameForFile(request.name());
       assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
       store.directory().sync(Collections.singleton(temporaryFileName));
       IndexOutput remove = recoveryStatus.removeOpenIndexOutputs(request.name());
       assert remove == null || remove == indexOutput; // remove maybe null if we got finished
     }
   }
   channel.sendResponse(TransportResponse.Empty.INSTANCE);
 }
  @Test
  public void testNoDocs() throws IOException {
    AnalyzingCompletionLookupProvider provider =
        new AnalyzingCompletionLookupProvider(true, false, true, true);
    RAMDirectory dir = new RAMDirectory();
    IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
    FieldsConsumer consumer = provider.consumer(output);
    consumer.write(
        new Fields() {
          @Override
          public Iterator<String> iterator() {
            return Arrays.asList("foo").iterator();
          }

          @Override
          public Terms terms(String field) throws IOException {
            return null;
          }

          @Override
          public int size() {
            return 1;
          }
        });
    consumer.close();
    output.close();

    IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
    LookupFactory load = provider.load(input);
    PostingsFormat format = new Elasticsearch090PostingsFormat();
    NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer());
    assertNull(
        load.getLookup(
            new CompletionFieldMapper(
                new Names("foo"),
                analyzer,
                analyzer,
                format,
                null,
                true,
                true,
                true,
                Integer.MAX_VALUE,
                indexSettings,
                AbstractFieldMapper.MultiFields.empty(),
                null,
                ContextMapping.EMPTY_MAPPING),
            new CompletionSuggestionContext(null)));
    dir.close();
  }
Exemplo n.º 18
0
  /**
   * This test that writes larger than the size of the buffer output will correctly increment the
   * file pointer.
   */
  public void testLargeWrites() throws IOException {
    IndexOutput os = dir.createOutput("testBufferStart.txt");

    byte[] largeBuf = new byte[2048];
    for (int i = 0; i < largeBuf.length; i++) {
      largeBuf[i] = (byte) (Math.random() * 256);
    }

    long currentPos = os.getFilePointer();
    os.writeBytes(largeBuf, largeBuf.length);

    try {
      assertEquals(currentPos + largeBuf.length, os.getFilePointer());
    } finally {
      os.close();
    }
  }
  private void insertData(ByteBufferDirectory dir, int bufferSizeInBytes) throws IOException {
    byte[] test = new byte[] {1, 2, 3, 4, 5, 6, 7, 8};
    IndexOutput indexOutput = dir.createOutput("value1", IOContext.DEFAULT);
    indexOutput.writeBytes(new byte[] {2, 4, 6, 7, 8}, 5);
    indexOutput.writeInt(-1);
    indexOutput.writeLong(10);
    indexOutput.writeInt(0);
    indexOutput.writeInt(0);
    indexOutput.writeBytes(test, 8);
    indexOutput.writeBytes(test, 5);

    indexOutput.seek(0);
    indexOutput.writeByte((byte) 8);
    if (bufferSizeInBytes > 4) {
      indexOutput.seek(2);
      indexOutput.writeBytes(new byte[] {1, 2}, 2);
    }

    indexOutput.close();
  }
  private void insertData(CoherenceDirectory dir, String fileName) throws IOException {
    byte[] test = new byte[] {1, 2, 3, 4, 5, 6, 7, 8};
    IndexOutput indexOutput = dir.createOutput(fileName);
    indexOutput.writeBytes(new byte[] {2, 4, 6, 7, 8}, 5);
    indexOutput.writeInt(-1);
    indexOutput.writeLong(10);
    indexOutput.writeInt(0);
    indexOutput.writeInt(0);
    indexOutput.writeBytes(test, 8);
    indexOutput.writeBytes(test, 5);

    indexOutput.seek(0);
    indexOutput.writeByte((byte) 8);
    if (dir.getBucketSize() > 4) {
      indexOutput.seek(2);
      indexOutput.writeBytes(new byte[] {1, 2}, 2);
    }

    indexOutput.close();
  }
Exemplo n.º 21
0
 @Test
 public void testVerifyingIndexOutput() throws IOException {
   Directory dir = newDirectory();
   IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
   int iters = scaledRandomIntBetween(10, 100);
   for (int i = 0; i < iters; i++) {
     BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
     output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
   }
   CodecUtil.writeFooter(output);
   output.close();
   IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
   String checksum = Store.digestToString(CodecUtil.retrieveChecksum(indexInput));
   indexInput.seek(0);
   BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024));
   long length = indexInput.length();
   IndexOutput verifyingOutput =
       new Store.LuceneVerifyingIndexOutput(
           new StoreFileMetaData("foo1.bar", length, checksum),
           dir.createOutput("foo1.bar", IOContext.DEFAULT));
   while (length > 0) {
     if (random().nextInt(10) == 0) {
       verifyingOutput.writeByte(indexInput.readByte());
       length--;
     } else {
       int min = (int) Math.min(length, ref.bytes.length);
       indexInput.readBytes(ref.bytes, ref.offset, min);
       verifyingOutput.writeBytes(ref.bytes, ref.offset, min);
       length -= min;
     }
   }
   Store.verify(verifyingOutput);
   verifyingOutput.writeByte((byte) 0x0);
   try {
     Store.verify(verifyingOutput);
     fail("should be a corrupted index");
   } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
     // ok
   }
   IOUtils.close(indexInput, verifyingOutput, dir);
 }
  @Test
  public void testWriteChunksDefaultChunks() throws Exception {
    Cache cache = cacheManager.getCache();
    Directory dir = DirectoryBuilder.newDirectoryInstance(cache, cache, cache, INDEXNAME).create();

    final String testText = "This is some rubbish";
    final byte[] testTextAsBytes = testText.getBytes();

    IndexOutput io = dir.createOutput("MyNewFile.txt");

    io.writeByte((byte) 1);
    io.writeByte((byte) 2);
    io.writeByte((byte) 3);
    io.writeBytes(testTextAsBytes, testTextAsBytes.length);
    io.close();
    DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);

    FileCacheKey fileCacheKey = new FileCacheKey(INDEXNAME, "MyNewFile.txt");
    assert null != cache.get(fileCacheKey);
    FileMetadata metadata = (FileMetadata) cache.get(fileCacheKey);
    AssertJUnit.assertEquals(testTextAsBytes.length + 3, metadata.getSize());
    assert null
        != cache.get(
            new ChunkCacheKey(
                INDEXNAME, "MyNewFile.txt", 0, DirectoryBuilderImpl.DEFAULT_BUFFER_SIZE));

    // test contents by reading:
    IndexInput ii = dir.openInput("MyNewFile.txt");
    assert ii.readByte() == 1;
    assert ii.readByte() == 2;
    assert ii.readByte() == 3;
    byte[] buf = new byte[testTextAsBytes.length];

    ii.readBytes(buf, 0, testTextAsBytes.length);
    ii.close();

    assert testText.equals(new String(buf).trim());

    dir.close();
    DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
  }
Exemplo n.º 23
0
 @Override
 public void close() throws IOException {
   out.close();
   String checksum = null;
   IndexOutput underlying = out;
   // TODO: cut over to lucene's CRC
   // *WARNING*: lucene has classes in same o.a.l.store package with very similar names,
   // but using CRC, not Adler!
   if (underlying instanceof BufferedChecksumIndexOutput) {
     Checksum digest = ((BufferedChecksumIndexOutput) underlying).digest();
     assert digest instanceof Adler32;
     checksum = Long.toString(digest.getValue(), Character.MAX_RADIX);
   }
   synchronized (mutex) {
     StoreFileMetaData md =
         new StoreFileMetaData(
             name, metaData.directory().fileLength(name), checksum, metaData.directory());
     filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, md).build();
     files = filesMetadata.keys().toArray(String.class);
   }
 }
  /**
   * closes temporary file, compresses data and removes temporary file.
   *
   * @throws IOException
   */
  @Override
  public void close() throws IOException {
    byte[] buffer = new byte[chunkSize];
    tempOut.close();
    // directory with offsets offsets of compressed chunks with
    // real position in decompressed stream
    IndexInput in = tempDirectory.openInput(tmpName);
    long len = closeLength = in.length();
    // write length of the file at the begining for easier retreval
    output.writeLong(-1);

    // write configuration
    writeConfig();
    int toRead;
    // read all data and compresse it in variable block chunks
    while (len > 0) {
      if (len > buffer.length) {
        toRead = buffer.length;
      } else {
        toRead = (int) len;
      }

      // just for safety --- can be improoved
      long bufferPos = in.getFilePointer();
      // read original data
      in.readBytes(buffer, 0, toRead);

      writeChunk(buffer, bufferPos, toRead);

      len -= toRead;
    }
    // now let's crate directory entry of all chunks and their's original
    // position in inflated stream

    in.close();
    if (tempDirectory.fileExists(tmpName)) {
      tempDirectory.deleteFile(tmpName);
    }
    super.close();
  }
 public void multipleFlushTest() throws IOException {
   final String filename = "longFile.writtenInMultipleFlushes";
   final int bufferSize = 300;
   Cache cache = cacheManager.getCache();
   cache.clear();
   InfinispanDirectory dir = new InfinispanDirectory(cache, cache, cache, INDEXNAME, 13);
   byte[] manyBytes = fillBytes(bufferSize);
   IndexOutput indexOutput = dir.createOutput(filename);
   for (int i = 0; i < 10; i++) {
     indexOutput.writeBytes(manyBytes, bufferSize);
     indexOutput.flush();
   }
   indexOutput.close();
   IndexInput input = dir.openInput(filename);
   final int finalSize = (10 * bufferSize);
   assert input.length() == finalSize;
   final byte[] resultingBuffer = new byte[finalSize];
   input.readBytes(resultingBuffer, 0, finalSize);
   int index = 0;
   for (int i = 0; i < 10; i++) {
     for (int j = 0; j < bufferSize; j++) assert resultingBuffer[index++] == manyBytes[j];
   }
 }
Exemplo n.º 26
0
 @Override
 public void close() throws IOException {
   out.close();
   String checksum = null;
   IndexOutput underlying = out;
   if (underlying instanceof BufferedChecksumIndexOutput) {
     checksum =
         Long.toString(
             ((BufferedChecksumIndexOutput) underlying).digest().getValue(),
             Character.MAX_RADIX);
   } else if (underlying instanceof ChecksumIndexOutput) {
     checksum =
         Long.toString(
             ((ChecksumIndexOutput) underlying).digest().getValue(), Character.MAX_RADIX);
   }
   synchronized (mutex) {
     StoreFileMetaData md =
         new StoreFileMetaData(
             name, metaData.directory().fileLength(name), checksum, metaData.directory());
     filesMetadata = MapBuilder.newMapBuilder(filesMetadata).put(name, md).immutableMap();
     files = filesMetadata.keySet().toArray(new String[filesMetadata.size()]);
   }
 }
Exemplo n.º 27
0
  @Test
  public void testVerifyingIndexInput() throws IOException {
    Directory dir = newDirectory();
    IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
    int iters = scaledRandomIntBetween(10, 100);
    for (int i = 0; i < iters; i++) {
      BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
      output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
    }
    CodecUtil.writeFooter(output);
    output.close();

    // Check file
    IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
    long checksum = CodecUtil.retrieveChecksum(indexInput);
    indexInput.seek(0);
    IndexInput verifyingIndexInput =
        new Store.VerifyingIndexInput(dir.openInput("foo.bar", IOContext.DEFAULT));
    readIndexInputFullyWithRandomSeeks(verifyingIndexInput);
    Store.verify(verifyingIndexInput);
    assertThat(checksum, equalTo(((ChecksumIndexInput) verifyingIndexInput).getChecksum()));
    IOUtils.close(indexInput, verifyingIndexInput);

    // Corrupt file and check again
    corruptFile(dir, "foo.bar", "foo1.bar");
    verifyingIndexInput =
        new Store.VerifyingIndexInput(dir.openInput("foo1.bar", IOContext.DEFAULT));
    readIndexInputFullyWithRandomSeeks(verifyingIndexInput);
    try {
      Store.verify(verifyingIndexInput);
      fail("should be a corrupted index");
    } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
      // ok
    }
    IOUtils.close(verifyingIndexInput);
    IOUtils.close(dir);
  }
 @Override
 public void close() throws IOException {
   delegate.close();
   openOutputs.remove(IndexOutputDelegate.this);
 }
  @Override
  public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
    // we pull this before the seed intentionally: because its not consumed at runtime
    // (the skipInterval is written into postings header)
    int skipInterval = _TestUtil.nextInt(seedRandom, 2, 10);

    if (LuceneTestCase.VERBOSE) {
      System.out.println("MockRandomCodec: skipInterval=" + skipInterval);
    }

    final long seed = seedRandom.nextLong();

    if (LuceneTestCase.VERBOSE) {
      System.out.println(
          "MockRandomCodec: writing to seg="
              + state.segmentName
              + " formatID="
              + state.segmentSuffix
              + " seed="
              + seed);
    }

    final String seedFileName =
        IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, SEED_EXT);
    final IndexOutput out = state.directory.createOutput(seedFileName, state.context);
    try {
      out.writeLong(seed);
    } finally {
      out.close();
    }

    final Random random = new Random(seed);

    random.nextInt(); // consume a random for buffersize

    PostingsWriterBase postingsWriter;
    if (random.nextBoolean()) {
      postingsWriter = new SepPostingsWriter(state, new MockIntStreamFactory(random), skipInterval);
    } else {
      if (LuceneTestCase.VERBOSE) {
        System.out.println("MockRandomCodec: writing Standard postings");
      }
      postingsWriter = new Lucene40PostingsWriter(state, skipInterval);
    }

    if (random.nextBoolean()) {
      final int totTFCutoff = _TestUtil.nextInt(random, 1, 20);
      if (LuceneTestCase.VERBOSE) {
        System.out.println(
            "MockRandomCodec: writing pulsing postings with totTFCutoff=" + totTFCutoff);
      }
      postingsWriter = new PulsingPostingsWriter(totTFCutoff, postingsWriter);
    }

    final FieldsConsumer fields;

    if (random.nextBoolean()) {
      // Use BlockTree terms dict

      if (LuceneTestCase.VERBOSE) {
        System.out.println("MockRandomCodec: writing BlockTree terms dict");
      }

      // TODO: would be nice to allow 1 but this is very
      // slow to write
      final int minTermsInBlock = _TestUtil.nextInt(random, 2, 100);
      final int maxTermsInBlock = Math.max(2, (minTermsInBlock - 1) * 2 + random.nextInt(100));

      boolean success = false;
      try {
        fields = new BlockTreeTermsWriter(state, postingsWriter, minTermsInBlock, maxTermsInBlock);
        success = true;
      } finally {
        if (!success) {
          postingsWriter.close();
        }
      }
    } else {

      if (LuceneTestCase.VERBOSE) {
        System.out.println("MockRandomCodec: writing Block terms dict");
      }

      boolean success = false;

      final TermsIndexWriterBase indexWriter;
      try {
        if (random.nextBoolean()) {
          state.termIndexInterval = _TestUtil.nextInt(random, 1, 100);
          if (LuceneTestCase.VERBOSE) {
            System.out.println(
                "MockRandomCodec: fixed-gap terms index (tii=" + state.termIndexInterval + ")");
          }
          indexWriter = new FixedGapTermsIndexWriter(state);
        } else {
          final VariableGapTermsIndexWriter.IndexTermSelector selector;
          final int n2 = random.nextInt(3);
          if (n2 == 0) {
            final int tii = _TestUtil.nextInt(random, 1, 100);
            selector = new VariableGapTermsIndexWriter.EveryNTermSelector(tii);
            if (LuceneTestCase.VERBOSE) {
              System.out.println("MockRandomCodec: variable-gap terms index (tii=" + tii + ")");
            }
          } else if (n2 == 1) {
            final int docFreqThresh = _TestUtil.nextInt(random, 2, 100);
            final int tii = _TestUtil.nextInt(random, 1, 100);
            selector =
                new VariableGapTermsIndexWriter.EveryNOrDocFreqTermSelector(docFreqThresh, tii);
          } else {
            final long seed2 = random.nextLong();
            final int gap = _TestUtil.nextInt(random, 2, 40);
            if (LuceneTestCase.VERBOSE) {
              System.out.println("MockRandomCodec: random-gap terms index (max gap=" + gap + ")");
            }
            selector =
                new VariableGapTermsIndexWriter.IndexTermSelector() {
                  final Random rand = new Random(seed2);

                  @Override
                  public boolean isIndexTerm(BytesRef term, TermStats stats) {
                    return rand.nextInt(gap) == gap / 2;
                  }

                  @Override
                  public void newField(FieldInfo fieldInfo) {}
                };
          }
          indexWriter = new VariableGapTermsIndexWriter(state, selector);
        }
        success = true;
      } finally {
        if (!success) {
          postingsWriter.close();
        }
      }

      success = false;
      try {
        fields = new BlockTermsWriter(indexWriter, state, postingsWriter);
        success = true;
      } finally {
        if (!success) {
          try {
            postingsWriter.close();
          } finally {
            indexWriter.close();
          }
        }
      }
    }

    return fields;
  }
Exemplo n.º 30
0
 @Override
 public void close() throws IOException {
   dataOut.close();
 }