/** Test attributes map */ public void testAttributes() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); Map<String, String> attributes = new HashMap<>(); attributes.put("key1", "value1"); attributes.put("key2", "value2"); SegmentInfo info = new SegmentInfo( dir, getVersions()[0], "_123", 1, false, codec, Collections.emptyMap(), id, attributes, null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(attributes, info2.getAttributes()); // attributes map should be immutable expectThrows( UnsupportedOperationException.class, () -> { info2.getAttributes().put("bogus", "bogus"); }); dir.close(); }
/** initialize native resources */ public static void initializeNatives( Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) { final ESLogger logger = Loggers.getLogger(Bootstrap.class); // check if the user is running as root, and bail if (Natives.definitelyRunningAsRoot()) { if (Boolean.parseBoolean(System.getProperty("es.insecure.allow.root"))) { logger.warn("running as ROOT user. this is a bad idea!"); } else { throw new RuntimeException("don't run elasticsearch as root."); } } // enable secure computing mode if (seccomp) { Natives.trySeccomp(tmpFile); } // mlockall if requested if (mlockAll) { if (Constants.WINDOWS) { Natives.tryVirtualLock(); } else { Natives.tryMlockall(); } } // listener for windows close event if (ctrlHandler) { Natives.addConsoleCtrlHandler( new ConsoleCtrlHandler() { @Override public boolean handle(int code) { if (CTRL_CLOSE_EVENT == code) { logger.info("running graceful exit on windows"); try { Bootstrap.stop(); } catch (IOException e) { throw new ElasticsearchException("failed to stop node", e); } return true; } return false; } }); } // force remainder of JNA to be loaded (if available). try { JNAKernel32Library.getInstance(); } catch (Throwable ignored) { // we've already logged this. } // init lucene random seed. it will use /dev/urandom where available: StringHelper.randomId(); }
/** * Test segment infos read that hits exception on close make sure we get our exception back, no * file handle leaks, etc. */ public void testExceptionOnCloseInput() throws Exception { Failure fail = new Failure() { @Override public void eval(MockDirectoryWrapper dir) throws IOException { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { if (doFail && "close".equals(e.getMethodName())) { throw new FakeIOException(); } } } }; MockDirectoryWrapper dir = newMockDirectory(); dir.failOn(fail); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); fail.setDoFail(); expectThrows( FakeIOException.class, () -> { codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); }); fail.clearDoFail(); dir.close(); }
/** Test files map */ public void testFiles() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(info.files(), info2.files()); dir.close(); }
/** Test sort */ public void testSort() throws IOException { assumeTrue("test requires a codec that can read/write index sort", supportsIndexSort()); final int iters = atLeast(5); for (int i = 0; i < iters; ++i) { Sort sort; if (i == 0) { sort = null; } else { final int numSortFields = TestUtil.nextInt(random(), 1, 3); SortField[] sortFields = new SortField[numSortFields]; for (int j = 0; j < numSortFields; ++j) { sortFields[j] = randomIndexSortField(); } sort = new Sort(sortFields); } Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), sort); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(sort, info2.getIndexSort()); dir.close(); } }
/** Tests SI writer adds itself to files... */ public void testAddsSelfToFiles() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo( dir, getVersions()[0], "_123", 1, false, codec, Collections.<String, String>emptyMap(), id, new HashMap<>(), null); Set<String> originalFiles = Collections.singleton("_123.a"); info.setFiles(originalFiles); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); Set<String> modifiedFiles = info.files(); assertTrue(modifiedFiles.containsAll(originalFiles)); assertTrue( "did you forget to add yourself to files()", modifiedFiles.size() > originalFiles.size()); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(info.files(), info2.files()); // files set should be immutable expectThrows( UnsupportedOperationException.class, () -> { info2.files().add("bogus"); }); dir.close(); }
public void testMerge() throws IOException { final Codec codec = Codec.getDefault(); final SegmentInfo si = new SegmentInfo( mergedDir, Version.LATEST, mergedSegment, -1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>()); SegmentMerger merger = new SegmentMerger( Arrays.<CodecReader>asList(reader1, reader2), si, InfoStream.getDefault(), mergedDir, new FieldInfos.FieldNumbers(), newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1)))); MergeState mergeState = merger.merge(); int docsMerged = mergeState.segmentInfo.maxDoc(); assertTrue(docsMerged == 2); // Should be able to open a new SegmentReader against the new directory SegmentReader mergedReader = new SegmentReader( new SegmentCommitInfo(mergeState.segmentInfo, 0, -1L, -1L, -1L), newIOContext(random())); assertTrue(mergedReader != null); assertTrue(mergedReader.numDocs() == 2); Document newDoc1 = mergedReader.document(0); assertTrue(newDoc1 != null); // There are 2 unstored fields on the document assertTrue( DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); Document newDoc2 = mergedReader.document(1); assertTrue(newDoc2 != null); assertTrue( DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); PostingsEnum termDocs = TestUtil.docs( random(), mergedReader, DocHelper.TEXT_FIELD_2_KEY, new BytesRef("field"), null, 0); assertTrue(termDocs != null); assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); int tvCount = 0; for (FieldInfo fieldInfo : mergedReader.getFieldInfos()) { if (fieldInfo.hasVectors()) { tvCount++; } } // System.out.println("stored size: " + stored.size()); assertEquals("We do not have 3 fields that were indexed with term vector", 3, tvCount); Terms vector = mergedReader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY); assertNotNull(vector); assertEquals(3, vector.size()); TermsEnum termsEnum = vector.iterator(); int i = 0; while (termsEnum.next() != null) { String term = termsEnum.term().utf8ToString(); int freq = (int) termsEnum.totalTermFreq(); // System.out.println("Term: " + term + " Freq: " + freq); assertTrue(DocHelper.FIELD_2_TEXT.indexOf(term) != -1); assertTrue(DocHelper.FIELD_2_FREQS[i] == freq); i++; } TestSegmentReader.checkNorms(mergedReader); mergedReader.close(); }
private void write(Directory directory) throws IOException { long nextGeneration = getNextPendingGeneration(); String segmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, "", nextGeneration); // Always advance the generation on write: generation = nextGeneration; IndexOutput segnOutput = null; boolean success = false; try { segnOutput = directory.createOutput(segmentFileName, IOContext.DEFAULT); CodecUtil.writeIndexHeader( segnOutput, "segments", VERSION_CURRENT, StringHelper.randomId(), Long.toString(nextGeneration, Character.MAX_RADIX)); segnOutput.writeVInt(Version.LATEST.major); segnOutput.writeVInt(Version.LATEST.minor); segnOutput.writeVInt(Version.LATEST.bugfix); segnOutput.writeLong(version); segnOutput.writeInt(counter); // write counter segnOutput.writeInt(size()); if (size() > 0) { Version minSegmentVersion = null; // We do a separate loop up front so we can write the minSegmentVersion before // any SegmentInfo; this makes it cleaner to throw IndexFormatTooOldExc at read time: for (SegmentCommitInfo siPerCommit : this) { Version segmentVersion = siPerCommit.info.getVersion(); if (minSegmentVersion == null || segmentVersion.onOrAfter(minSegmentVersion) == false) { minSegmentVersion = segmentVersion; } } segnOutput.writeVInt(minSegmentVersion.major); segnOutput.writeVInt(minSegmentVersion.minor); segnOutput.writeVInt(minSegmentVersion.bugfix); } // write infos for (SegmentCommitInfo siPerCommit : this) { SegmentInfo si = siPerCommit.info; segnOutput.writeString(si.name); byte segmentID[] = si.getId(); // TODO: remove this in lucene 6, we don't need to include 4.x segments in commits anymore if (segmentID == null) { segnOutput.writeByte((byte) 0); } else { if (segmentID.length != StringHelper.ID_LENGTH) { throw new IllegalStateException( "cannot write segment: invalid id segment=" + si.name + "id=" + StringHelper.idToString(segmentID)); } segnOutput.writeByte((byte) 1); segnOutput.writeBytes(segmentID, segmentID.length); } segnOutput.writeString(si.getCodec().getName()); segnOutput.writeLong(siPerCommit.getDelGen()); int delCount = siPerCommit.getDelCount(); if (delCount < 0 || delCount > si.maxDoc()) { throw new IllegalStateException( "cannot write segment: invalid maxDoc segment=" + si.name + " maxDoc=" + si.maxDoc() + " delCount=" + delCount); } segnOutput.writeInt(delCount); segnOutput.writeLong(siPerCommit.getFieldInfosGen()); segnOutput.writeLong(siPerCommit.getDocValuesGen()); segnOutput.writeSetOfStrings(siPerCommit.getFieldInfosFiles()); final Map<Integer, Set<String>> dvUpdatesFiles = siPerCommit.getDocValuesUpdatesFiles(); segnOutput.writeInt(dvUpdatesFiles.size()); for (Entry<Integer, Set<String>> e : dvUpdatesFiles.entrySet()) { segnOutput.writeInt(e.getKey()); segnOutput.writeSetOfStrings(e.getValue()); } } segnOutput.writeMapOfStrings(userData); CodecUtil.writeFooter(segnOutput); segnOutput.close(); directory.sync(Collections.singleton(segmentFileName)); success = true; } finally { if (success) { pendingCommit = true; } else { // We hit an exception above; try to close the file // but suppress any exception: IOUtils.closeWhileHandlingException(segnOutput); // Try not to leave a truncated segments_N file in // the index: IOUtils.deleteFilesIgnoringExceptions(directory, segmentFileName); } } }