@Test @Override public void testIndexWritingAndFinding() throws IOException, InterruptedException { verifyBoth(cache0, cache1); IndexOutput indexOutput = dirA.createOutput(filename, IOContext.DEFAULT); indexOutput.writeString("no need to write, nobody ever will read this"); indexOutput.flush(); indexOutput.close(); assertFileExistsHavingRLCount(filename, 1, true); IndexInput firstOpenOnB = dirB.openInput(filename, IOContext.DEFAULT); assertFileExistsHavingRLCount(filename, 2, true); dirA.deleteFile(filename); assertFileExistsHavingRLCount(filename, 1, false); // Lucene does use clone() - lock implementation ignores it as a clone is // cast on locked segments and released before the close on the parent object IndexInput cloneOfFirstOpenOnB = (IndexInput) firstOpenOnB.clone(); assertFileExistsHavingRLCount(filename, 1, false); cloneOfFirstOpenOnB.close(); assertFileExistsHavingRLCount(filename, 1, false); IndexInput firstOpenOnA = dirA.openInput(filename, IOContext.DEFAULT); assertFileExistsHavingRLCount(filename, 2, false); IndexInput secondOpenOnA = dirA.openInput(filename, IOContext.DEFAULT); assertFileExistsHavingRLCount(filename, 2, false); firstOpenOnA.close(); assertFileExistsHavingRLCount(filename, 2, false); secondOpenOnA.close(); assertFileExistsHavingRLCount(filename, 1, false); firstOpenOnB.close(); assertFileNotExists(filename); dirA.close(); dirB.close(); verifyBoth(cache0, cache1); }
// Make sure we don't somehow use more than 1 descriptor // when reading a CFS with many subs: public void testManySubFiles() throws IOException { final Directory d = newFSDirectory(_TestUtil.getTempDir("CFSManySubFiles")); final int FILE_COUNT = 10000; for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { IndexOutput out = d.createOutput("file." + fileIdx); out.writeByte((byte) fileIdx); out.close(); } final CompoundFileWriter cfw = new CompoundFileWriter(d, "c.cfs"); for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { cfw.addFile("file." + fileIdx); } cfw.close(); final IndexInput[] ins = new IndexInput[FILE_COUNT]; final CompoundFileReader cfr = new CompoundFileReader(d, "c.cfs"); for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { ins[fileIdx] = cfr.openInput("file." + fileIdx); } for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { assertEquals((byte) fileIdx, ins[fileIdx].readByte()); } for (int fileIdx = 0; fileIdx < FILE_COUNT; fileIdx++) { ins[fileIdx].close(); } cfr.close(); d.close(); }
public void close() throws IOException { try { final long dirStart = out.getFilePointer(); final int fieldCount = fields.size(); int nonNullFieldCount = 0; for (int i = 0; i < fieldCount; i++) { FSTFieldWriter field = fields.get(i); if (field.fst != null) { nonNullFieldCount++; } } out.writeVInt(nonNullFieldCount); for (int i = 0; i < fieldCount; i++) { FSTFieldWriter field = fields.get(i); if (field.fst != null) { out.writeVInt(field.fieldInfo.number); out.writeVLong(field.indexStart); } } writeTrailer(dirStart); } finally { out.close(); } }
private void testOn(Directory dir, int writeSize, int readSize, Cache cache) throws IOException { if (cache != null) cache .clear(); // needed to make sure no chunks are left over in case of Infinispan // implementation final String filename = "chunkTest"; IndexOutput indexOutput = dir.createOutput(filename); byte[] toWrite = fillBytes(writeSize); indexOutput.writeBytes(toWrite, writeSize); indexOutput.close(); if (cache != null) { AssertJUnit.assertEquals( writeSize, DirectoryIntegrityCheck.deepCountFileSize(new FileCacheKey(INDEXNAME, filename), cache)); } AssertJUnit.assertEquals(writeSize, indexOutput.length()); byte[] results = new byte[readSize]; IndexInput openInput = dir.openInput(filename); try { openInput.readBytes(results, 0, readSize); for (int i = 0; i < writeSize && i < readSize; i++) { AssertJUnit.assertEquals(results[i], toWrite[i]); } if (readSize > writeSize) AssertJUnit.fail("should have thrown an IOException for reading past EOF"); } catch (IOException ioe) { if (readSize <= writeSize) AssertJUnit.fail("should not have thrown an IOException" + ioe.getMessage()); } }
private void demo_FSIndexInputBug(Directory fsdir, String file) throws IOException { // Setup the test file - we need more than 1024 bytes IndexOutput os = fsdir.createOutput(file); for (int i = 0; i < 2000; i++) { os.writeByte((byte) i); } os.close(); IndexInput in = fsdir.openInput(file); // This read primes the buffer in IndexInput in.readByte(); // Close the file in.close(); // ERROR: this call should fail, but succeeds because the buffer // is still filled in.readByte(); // ERROR: this call should fail, but succeeds for some reason as well in.seek(1099); try { // OK: this call correctly fails. We are now past the 1024 internal // buffer, so an actual IO is attempted, which fails in.readByte(); fail("expected readByte() to throw exception"); } catch (IOException e) { // expected exception } }
public void multipleFlushTest() throws IOException { final String filename = "longFile.writtenInMultipleFlushes"; final int bufferSize = 300; Cache cache = cacheManager.getCache(); cache.clear(); Directory dir = DirectoryBuilder.newDirectoryInstance(cache, cache, cache, INDEXNAME) .chunkSize(13) .create(); byte[] manyBytes = fillBytes(bufferSize); IndexOutput indexOutput = dir.createOutput(filename); for (int i = 0; i < 10; i++) { indexOutput.writeBytes(manyBytes, bufferSize); indexOutput.flush(); } indexOutput.close(); IndexInput input = dir.openInput(filename); final int finalSize = (10 * bufferSize); AssertJUnit.assertEquals(finalSize, input.length()); final byte[] resultingBuffer = new byte[finalSize]; input.readBytes(resultingBuffer, 0, finalSize); int index = 0; for (int i = 0; i < 10; i++) { for (int j = 0; j < bufferSize; j++) AssertJUnit.assertEquals(resultingBuffer[index++], manyBytes[j]); } }
private void writeDoc() throws IOException { if (isFieldOpen()) throw new IllegalStateException("Field is still open while writing document"); // System.out.println("Writing doc pointer: " + currentDocPointer); // write document index record tvx.writeLong(currentDocPointer); // write document data record final int size = fields.size(); // write the number of fields tvd.writeVInt(size); // write field numbers for (int i = 0; i < size; i++) { TVField field = (TVField) fields.elementAt(i); tvd.writeVInt(field.number); } // write field pointers long lastFieldPointer = 0; for (int i = 0; i < size; i++) { TVField field = (TVField) fields.elementAt(i); tvd.writeVLong(field.tvfPointer - lastFieldPointer); lastFieldPointer = field.tvfPointer; } // System.out.println("After writing doc pointer: " + tvx.getFilePointer()); }
// encodes values as sparse array: keys[] and values[] // access is log(N) where N = keys.length (slow!) // so this is only appropriate as an exception table for patched, or when common value is 0 (wont // be accessed by searching) private void addIndirect( FieldInfo field, final Iterable<Number> values, int count, final NormMap uniqueValues, final int minOrd) throws IOException { int commonCount = uniqueValues.freqs[minOrd]; meta.writeVInt(count - commonCount); meta.writeByte(INDIRECT); meta.writeLong(data.getFilePointer()); // write docs with value writeDocsWithValue(values, uniqueValues, minOrd); // write actual values writeNormsField( field, new Iterable<Number>() { @Override public Iterator<Number> iterator() { return new FilterIterator<Number, Number>(values.iterator()) { @Override protected boolean predicateFunction(Number value) { return uniqueValues.ord(value.byteValue()) > minOrd; } }; } }, 1); }
/** Close all streams. */ final void close() throws IOException { try { closeDocument(); } finally { // make an effort to close all streams we can but remember and re-throw // the first exception encountered in this process IOException keep = null; if (tvx != null) try { tvx.close(); } catch (IOException e) { if (keep == null) keep = e; } if (tvd != null) try { tvd.close(); } catch (IOException e) { if (keep == null) keep = e; } if (tvf != null) try { tvf.close(); } catch (IOException e) { if (keep == null) keep = e; } if (keep != null) throw (IOException) keep.fillInStackTrace(); } }
private void writeTable( Iterable<Number> values, FormatAndBits compression, int count, NormMap uniqueValues, int numOrds) throws IOException { data.writeVInt(PackedInts.VERSION_CURRENT); data.writeVInt(compression.format.getId()); data.writeVInt(compression.bitsPerValue); data.writeVInt(numOrds); for (int i = 0; i < numOrds; i++) { data.writeByte(uniqueValues.values[i]); } final PackedInts.Writer writer = PackedInts.getWriterNoHeader( data, compression.format, count, compression.bitsPerValue, PackedInts.DEFAULT_BUFFER_SIZE); for (Number nv : values) { int ord = uniqueValues.ord(nv.byteValue()); if (ord < numOrds) { writer.add(ord); } else { writer.add(numOrds); // collapses all ords >= numOrds into a single value } } writer.finish(); }
public void writeChecksums() throws IOException { String checksumName = CHECKSUMS_PREFIX + System.currentTimeMillis(); ImmutableMap<String, StoreFileMetaData> files = list(); synchronized (mutex) { Map<String, String> checksums = new HashMap<String, String>(); for (StoreFileMetaData metaData : files.values()) { if (metaData.checksum() != null) { checksums.put(metaData.name(), metaData.checksum()); } } IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT, true); output.writeInt(0); // version output.writeStringStringMap(checksums); output.close(); } for (StoreFileMetaData metaData : files.values()) { if (metaData.name().startsWith(CHECKSUMS_PREFIX) && !checksumName.equals(metaData.name())) { try { directory.deleteFileChecksum(metaData.name()); } catch (Exception e) { // ignore } } } }
@Test public void testCleanUpWithLegacyChecksums() throws IOException { Map<String, StoreFileMetaData> metaDataMap = new HashMap<>(); metaDataMap.put( "segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[] {1}))); metaDataMap.put( "_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef())); Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(metaDataMap); final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random()); Store store = new Store( shardId, ImmutableSettings.EMPTY, directoryService, randomDistributor(directoryService), new DummyShardLock(shardId)); for (String file : metaDataMap.keySet()) { try (IndexOutput output = store.directory().createOutput(file, IOContext.DEFAULT)) { BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024)); output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); CodecUtil.writeFooter(output); } } store.verifyAfterCleanup(snapshot, snapshot); store.deleteContent(); IOUtils.close(store); }
public CompletionFieldsConsumer(SegmentWriteState state) throws IOException { this.delegatesFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state); String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION); IndexOutput output = null; boolean success = false; try { output = state.directory.createOutput(suggestFSTFile, state.context); CodecUtil.writeIndexHeader( output, CODEC_NAME, SUGGEST_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); /* * we write the delegate postings format name so we can load it * without getting an instance in the ctor */ output.writeString(delegatePostingsFormat.getName()); output.writeString(writeProvider.getName()); this.suggestFieldsConsumer = writeProvider.consumer(output); success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(output); } } }
private void addUncompressed(Iterable<Number> values, int count) throws IOException { meta.writeVInt(count); meta.writeByte(UNCOMPRESSED); // uncompressed byte[] meta.writeLong(data.getFilePointer()); for (Number nv : values) { data.writeByte(nv.byteValue()); } }
/** Creates a file of the specified size with random data. */ private void createRandomFile(Directory dir, String name, int size) throws IOException { IndexOutput os = dir.createOutput(name); for (int i = 0; i < size; i++) { byte b = (byte) (Math.random() * 256); os.writeByte(b); } os.close(); }
@Override public void flushTermsBlock(int start, int count) throws IOException { if (DEBUG) System.out.println( "PW: flushTermsBlock start=" + start + " count=" + count + " pendingTerms.size()=" + pendingTerms.size()); int wrappedCount = 0; assert buffer.getFilePointer() == 0; assert start >= count; final int limit = pendingTerms.size() - start + count; for (int idx = pendingTerms.size() - start; idx < limit; idx++) { final PendingTerm term = pendingTerms.get(idx); if (term == null) { wrappedCount++; } else { buffer.writeVInt(term.bytes.length); buffer.writeBytes(term.bytes, 0, term.bytes.length); } } termsOut.writeVInt((int) buffer.getFilePointer()); buffer.writeTo(termsOut); buffer.reset(); // TDOO: this could be somewhat costly since // pendingTerms.size() could be biggish? int futureWrappedCount = 0; final int limit2 = pendingTerms.size(); for (int idx = limit; idx < limit2; idx++) { if (pendingTerms.get(idx) == null) { futureWrappedCount++; } } // Remove the terms we just wrote: pendingTerms.subList(pendingTerms.size() - start, limit).clear(); if (DEBUG) System.out.println( "PW: len=" + buffer.getFilePointer() + " fp=" + termsOut.getFilePointer() + " futureWrappedCount=" + futureWrappedCount + " wrappedCount=" + wrappedCount); // TODO: can we avoid calling this if all terms // were inlined...? Eg for a "primary key" field, the // wrapped codec is never invoked... wrappedPostingsWriter.flushTermsBlock(futureWrappedCount + wrappedCount, wrappedCount); }
/** * It creates a file with fixed size using a RepeatableLongByteSequence object to generate a * repeatable content * * @param dir The Directory containing the file to create * @param fileName The file name to create * @param contentFileSize The size content file to create * @throws IOException */ private void createFileWithRepeatableContent( Directory dir, String fileName, final int contentFileSize) throws IOException { IndexOutput indexOutput = dir.createOutput(fileName); RepeatableLongByteSequence bytesGenerator = new RepeatableLongByteSequence(); for (int i = 0; i < contentFileSize; i++) { indexOutput.writeByte(bytesGenerator.nextByte()); } indexOutput.close(); }
/** * Creates a file of the specified size with sequential data. The first byte is written as the * start byte provided. All subsequent bytes are computed as start + offset where offset is the * number of the byte. */ private void createSequenceFile(Directory dir, String name, byte start, int size) throws IOException { IndexOutput os = dir.createOutput(name); for (int i = 0; i < size; i++) { os.writeByte(start); start++; } os.close(); }
private void addTableCompressed( Iterable<Number> values, FormatAndBits compression, int count, NormMap uniqueValues) throws IOException { meta.writeVInt(count); meta.writeByte(TABLE_COMPRESSED); // table-compressed meta.writeLong(data.getFilePointer()); writeTable(values, compression, count, uniqueValues, uniqueValues.size); }
public void testEncodeDecode() throws IOException { final int iterations = RandomInts.randomIntBetween(random(), 1, 1000); final float acceptableOverheadRatio = random().nextFloat(); final int[] values = new int[(iterations - 1) * BLOCK_SIZE + ForUtil.MAX_DATA_SIZE]; for (int i = 0; i < iterations; ++i) { final int bpv = random().nextInt(32); if (bpv == 0) { final int value = RandomInts.randomIntBetween(random(), 0, Integer.MAX_VALUE); for (int j = 0; j < BLOCK_SIZE; ++j) { values[i * BLOCK_SIZE + j] = value; } } else { for (int j = 0; j < BLOCK_SIZE; ++j) { values[i * BLOCK_SIZE + j] = RandomInts.randomIntBetween(random(), 0, (int) PackedInts.maxValue(bpv)); } } } final Directory d = new RAMDirectory(); final long endPointer; { // encode IndexOutput out = d.createOutput("test.bin", IOContext.DEFAULT); final ForUtil forUtil = new ForUtil(acceptableOverheadRatio, out); for (int i = 0; i < iterations; ++i) { forUtil.writeBlock( Arrays.copyOfRange(values, i * BLOCK_SIZE, values.length), new byte[MAX_ENCODED_SIZE], out); } endPointer = out.getFilePointer(); out.close(); } { // decode IndexInput in = d.openInput("test.bin", IOContext.READONCE); final ForUtil forUtil = new ForUtil(in); for (int i = 0; i < iterations; ++i) { if (random().nextBoolean()) { forUtil.skipBlock(in); continue; } final int[] restored = new int[MAX_DATA_SIZE]; forUtil.readBlock(in, new byte[MAX_ENCODED_SIZE], restored); assertArrayEquals( Arrays.copyOfRange(values, i * BLOCK_SIZE, (i + 1) * BLOCK_SIZE), Arrays.copyOf(restored, BLOCK_SIZE)); } assertEquals(endPointer, in.getFilePointer()); in.close(); } }
final void finishCommit(Directory dir) throws IOException { if (pendingSegnOutput == null) throw new IllegalStateException("prepareCommit was not called"); boolean success = false; try { pendingSegnOutput.finishCommit(); pendingSegnOutput.close(); pendingSegnOutput = null; success = true; } finally { if (!success) rollbackCommit(dir); } // NOTE: if we crash here, we have left a segments_N // file in the directory in a possibly corrupt state (if // some bytes made it to stable storage and others // didn't). But, the segments_N file includes checksum // at the end, which should catch this case. So when a // reader tries to read it, it will throw a // CorruptIndexException, which should cause the retry // logic in SegmentInfos to kick in and load the last // good (previous) segments_N-1 file. final String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation); success = false; try { dir.sync(Collections.singleton(fileName)); success = true; } finally { if (!success) { try { dir.deleteFile(fileName); } catch (Throwable t) { // Suppress so we keep throwing the original exception } } } lastGeneration = generation; try { IndexOutput genOutput = dir.createOutput(IndexFileNames.SEGMENTS_GEN); try { genOutput.writeInt(FORMAT_LOCKLESS); genOutput.writeLong(generation); genOutput.writeLong(generation); } finally { genOutput.close(); } } catch (ThreadInterruptedException t) { throw t; } catch (Throwable t) { // It's OK if we fail to write this file since it's // used only as one of the retry fallbacks. } }
@Override public void write(Directory dir, SegmentInfo si, IOContext context) throws IOException { String dataFile = IndexFileNames.segmentFileName(si.name, "", DATA_EXTENSION); int numFiles = si.files().size(); String names[] = si.files().toArray(new String[numFiles]); Arrays.sort(names); long startOffsets[] = new long[numFiles]; long endOffsets[] = new long[numFiles]; BytesRefBuilder scratch = new BytesRefBuilder(); try (IndexOutput out = dir.createOutput(dataFile, context)) { for (int i = 0; i < names.length; i++) { // write header for file SimpleTextUtil.write(out, HEADER); SimpleTextUtil.write(out, names[i], scratch); SimpleTextUtil.writeNewline(out); // write bytes for file startOffsets[i] = out.getFilePointer(); try (IndexInput in = dir.openInput(names[i], IOContext.READONCE)) { out.copyBytes(in, in.length()); } endOffsets[i] = out.getFilePointer(); } long tocPos = out.getFilePointer(); // write CFS table SimpleTextUtil.write(out, TABLE); SimpleTextUtil.write(out, Integer.toString(numFiles), scratch); SimpleTextUtil.writeNewline(out); for (int i = 0; i < names.length; i++) { SimpleTextUtil.write(out, TABLENAME); SimpleTextUtil.write(out, names[i], scratch); SimpleTextUtil.writeNewline(out); SimpleTextUtil.write(out, TABLESTART); SimpleTextUtil.write(out, Long.toString(startOffsets[i]), scratch); SimpleTextUtil.writeNewline(out); SimpleTextUtil.write(out, TABLEEND); SimpleTextUtil.write(out, Long.toString(endOffsets[i]), scratch); SimpleTextUtil.writeNewline(out); } DecimalFormat df = new DecimalFormat(OFFSETPATTERN, DecimalFormatSymbols.getInstance(Locale.ROOT)); SimpleTextUtil.write(out, TABLEPOS); SimpleTextUtil.write(out, df.format(tocPos), scratch); SimpleTextUtil.writeNewline(out); } }
@Override public void resetSkip() { super.resetSkip(); Arrays.fill(lastSkipDoc, 0); Arrays.fill( lastSkipPayloadLength, -1); // we don't have to write the first length in the skip list Arrays.fill( lastSkipOffsetLength, -1); // we don't have to write the first length in the skip list Arrays.fill(lastSkipFreqPointer, freqOutput.getFilePointer()); if (proxOutput != null) Arrays.fill(lastSkipProxPointer, proxOutput.getFilePointer()); }
private void writeField() throws IOException { // remember where this field is written currentField.tvfPointer = tvf.getFilePointer(); // System.out.println("Field Pointer: " + currentField.tvfPointer); final int size = terms.size(); tvf.writeVInt(size); boolean storePositions = currentField.storePositions; boolean storeOffsets = currentField.storeOffsets; byte bits = 0x0; if (storePositions) bits |= STORE_POSITIONS_WITH_TERMVECTOR; if (storeOffsets) bits |= STORE_OFFSET_WITH_TERMVECTOR; tvf.writeByte(bits); String lastTermText = ""; for (int i = 0; i < size; i++) { TVTerm term = (TVTerm) terms.elementAt(i); int start = StringHelper.stringDifference(lastTermText, term.termText); int length = term.termText.length() - start; tvf.writeVInt(start); // write shared prefix length tvf.writeVInt(length); // write delta length tvf.writeChars(term.termText, start, length); // write delta chars tvf.writeVInt(term.freq); lastTermText = term.termText; if (storePositions) { if (term.positions == null) throw new IllegalStateException("Trying to write positions that are null!"); // use delta encoding for positions int position = 0; for (int j = 0; j < term.freq; j++) { tvf.writeVInt(term.positions[j] - position); position = term.positions[j]; } } if (storeOffsets) { if (term.offsets == null) throw new IllegalStateException("Trying to write offsets that are null!"); // use delta encoding for offsets int position = 0; for (int j = 0; j < term.freq; j++) { tvf.writeVInt(term.offsets[j].getStartOffset() - position); tvf.writeVInt( term.offsets[j].getEndOffset() - term.offsets[j].getStartOffset()); // Save the diff between the two. position = term.offsets[j].getEndOffset(); } } } }
@Override public void close() { try { output.flush(); output.close(); super.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
@Override public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception { try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { final RecoveryStatus recoveryStatus = statusRef.status(); final Store store = recoveryStatus.store(); recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps()); final RecoveryState.Index indexState = recoveryStatus.state().getIndex(); if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) { indexState.addSourceThrottling(request.sourceThrottleTimeInNanos()); } IndexOutput indexOutput; if (request.position() == 0) { indexOutput = recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store); } else { indexOutput = recoveryStatus.getOpenIndexOutput(request.name()); } BytesReference content = request.content(); if (!content.hasArray()) { content = content.toBytesArray(); } RateLimiter rl = recoverySettings.rateLimiter(); if (rl != null) { long bytes = bytesSinceLastPause.addAndGet(content.length()); if (bytes > rl.getMinPauseCheckBytes()) { // Time to pause bytesSinceLastPause.addAndGet(-bytes); long throttleTimeInNanos = rl.pause(bytes); indexState.addTargetThrottling(throttleTimeInNanos); recoveryStatus.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); } } indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length()); indexState.addRecoveredBytesToFile(request.name(), content.length()); if (indexOutput.getFilePointer() >= request.length() || request.lastChunk()) { try { Store.verify(indexOutput); } finally { // we are done indexOutput.close(); } // write the checksum recoveryStatus.legacyChecksums().add(request.metadata()); final String temporaryFileName = recoveryStatus.getTempNameForFile(request.name()); assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName); store.directory().sync(Collections.singleton(temporaryFileName)); IndexOutput remove = recoveryStatus.removeOpenIndexOutputs(request.name()); assert remove == null || remove == indexOutput; // remove maybe null if we got finished } } channel.sendResponse(TransportResponse.Empty.INSTANCE); }
/** Sets the values for the current skip data. */ public void setSkipData( int doc, boolean storePayloads, int payloadLength, boolean storeOffsets, int offsetLength) { assert storePayloads || payloadLength == -1; assert storeOffsets || offsetLength == -1; this.curDoc = doc; this.curStorePayloads = storePayloads; this.curPayloadLength = payloadLength; this.curStoreOffsets = storeOffsets; this.curOffsetLength = offsetLength; this.curFreqPointer = freqOutput.getFilePointer(); if (proxOutput != null) this.curProxPointer = proxOutput.getFilePointer(); }
@Test public void testNoDocs() throws IOException { AnalyzingCompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, true); RAMDirectory dir = new RAMDirectory(); IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT); FieldsConsumer consumer = provider.consumer(output); consumer.write( new Fields() { @Override public Iterator<String> iterator() { return Arrays.asList("foo").iterator(); } @Override public Terms terms(String field) throws IOException { return null; } @Override public int size() { return 1; } }); consumer.close(); output.close(); IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT); LookupFactory load = provider.load(input); PostingsFormat format = new Elasticsearch090PostingsFormat(); NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer()); assertNull( load.getLookup( new CompletionFieldMapper( new Names("foo"), analyzer, analyzer, format, null, true, true, true, Integer.MAX_VALUE, indexSettings, AbstractFieldMapper.MultiFields.empty(), null, ContextMapping.EMPTY_MAPPING), new CompletionSuggestionContext(null))); dir.close(); }
public TermVectorsWriter(Directory directory, String segment, FieldInfos fieldInfos) throws IOException { // Open files for TermVector storage tvx = directory.createOutput(segment + TVX_EXTENSION); tvx.writeInt(FORMAT_VERSION); tvd = directory.createOutput(segment + TVD_EXTENSION); tvd.writeInt(FORMAT_VERSION); tvf = directory.createOutput(segment + TVF_EXTENSION); tvf.writeInt(FORMAT_VERSION); this.fieldInfos = fieldInfos; fields = new Vector(fieldInfos.size()); terms = new Vector(); }
@Override public void start(IndexOutput termsOut) throws IOException { this.termsOut = termsOut; CodecUtil.writeHeader(termsOut, CODEC, VERSION_CURRENT); termsOut.writeVInt(pending.length); // encode maxPositions in header wrappedPostingsWriter.start(termsOut); }