private void checkRotation() { if (rotationThreshold != null && fileSystem.getFileSize(file) > rotationThreshold && !doingRotation) { doRotation(); } }
public boolean recoveryNeededAt(File dataDir) throws IOException { long logVersion = fs.fileExists(new File(dataDir, NeoStore.DEFAULT_NAME)) ? new NeoStoreUtil(dataDir, fs).getLogVersion() : 0; return recoveryNeededAt(dataDir, logVersion); }
public int dump(String filenameOrDirectory, PrintStream out, TimeZone timeZone) throws IOException { int logsFound = 0; for (String fileName : filenamesOf(filenameOrDirectory, getLogPrefix())) { logsFound++; out.println("=== " + fileName + " ==="); StoreChannel fileChannel = fileSystem.open(new File(fileName), "r"); ByteBuffer buffer = ByteBuffer.allocateDirect(9 + Xid.MAXGTRIDSIZE + Xid.MAXBQUALSIZE * 10); long logVersion, prevLastCommittedTx; try { long[] header = VersionAwareLogEntryReader.readLogHeader(buffer, fileChannel, false); logVersion = header[0]; prevLastCommittedTx = header[1]; } catch (IOException ex) { out.println("Unable to read timestamp information, no records in logical log."); out.println(ex.getMessage()); fileChannel.close(); throw ex; } out.println( "Logical log version: " + logVersion + " with prev committed tx[" + prevLastCommittedTx + "]"); LogDeserializer deserializer = new LogDeserializer(buffer, instantiateCommandReaderFactory()); PrintingConsumer consumer = new PrintingConsumer(out, timeZone); try (Cursor<LogEntry, IOException> cursor = deserializer.cursor(fileChannel)) { while (cursor.next(consumer)) ; } } return logsFound; }
/** @return a {@link RequestContext} specifying at which point the store copy started. */ public RequestContext flushStoresAndStreamStoreFiles(StoreWriter writer, boolean includeLogs) { try { long lastAppliedTransaction = transactionIdStore.getLastClosedTransactionId(); monitor.startFlushingEverything(); logRotationControl.forceEverything(); monitor.finishFlushingEverything(); ByteBuffer temporaryBuffer = ByteBuffer.allocateDirect(1024 * 1024); // Copy the store files monitor.startStreamingStoreFiles(); try (ResourceIterator<File> files = dataSource.listStoreFiles(includeLogs)) { while (files.hasNext()) { File file = files.next(); try (StoreChannel fileChannel = fileSystem.open(file, "r")) { monitor.startStreamingStoreFile(file); writer.write( relativePath(storeDirectory, file), fileChannel, temporaryBuffer, file.length() > 0); monitor.finishStreamingStoreFile(file); } } } finally { monitor.finishStreamingStoreFiles(); } return anonymous(lastAppliedTransaction); } catch (IOException e) { throw new ServerFailureException(e); } }
private void deleteIndexesContainingArrayValues( File storeDir, PageCache pageCache, SchemaIndexProvider schemaIndexProvider) throws IOException { File indexRoot = getRootDirectory(storeDir, schemaIndexProvider.getProviderDescriptor().getKey()); IndexSamplingConfig samplingConfig = new IndexSamplingConfig(new Config()); List<File> indexesToBeDeleted = new ArrayList<>(); try (SchemaStore schema = schemaStoreProvider.provide(storeDir, pageCache)) { Iterator<SchemaRule> rules = schema.loadAllSchemaRules(); while (rules.hasNext()) { SchemaRule rule = rules.next(); IndexConfiguration indexConfig = new IndexConfiguration(rule.getKind() == UNIQUENESS_CONSTRAINT); try (IndexAccessor accessor = schemaIndexProvider.getOnlineAccessor(rule.getId(), indexConfig, samplingConfig)) { try (IndexReader reader = accessor.newReader()) { if (reader.valueTypesInIndex().contains(Array.class)) { indexesToBeDeleted.add(new File(indexRoot, "" + rule.getId())); } } } } } for (File index : indexesToBeDeleted) { fileSystem.deleteRecursively(index); } }
private void instantiateWriter() throws IOException { out = new PrintWriter( new OutputStreamWriter(fileSystem.openAsOutputStream(file, true), encoding)); for (Runnable trigger : onRotation) { trigger.run(); } }
/** * Will move: messages.log.1 -> messages.log.2 messages.log -> messages.log.1 * * <p>Will delete (if exists): messages.log.2 */ private void moveAwayFile() { File oldLogFile = new File(file.getParentFile(), file.getName() + "." + NUMBER_OF_OLD_LOGS_TO_KEEP); if (fileSystem.fileExists(oldLogFile)) { fileSystem.deleteFile(oldLogFile); } for (int i = NUMBER_OF_OLD_LOGS_TO_KEEP - 1; i >= 0; i--) { oldLogFile = new File(file.getParentFile(), file.getName() + (i == 0 ? "" : ("." + i))); if (fileSystem.fileExists(oldLogFile)) { try { fileSystem.renameFile( oldLogFile, new File(file.getParentFile(), file.getName() + "." + (i + 1))); } catch (IOException e) { throw new RuntimeException(e); } } } }
@Test public void shouldReadALogHeaderFromAFile() throws IOException { // given final FileSystemAbstraction fs = new DefaultFileSystemAbstraction(); final File file = File.createTempFile("ReadLogHeader", getClass().getSimpleName()); final ByteBuffer buffer = ByteBuffer.allocate(LOG_HEADER_SIZE); buffer.putLong(encodeLogVersion(expectedLogVersion)); buffer.putLong(expectedTxId); try (OutputStream stream = fs.openAsOutputStream(file, false)) { stream.write(buffer.array()); } // when final LogHeader result = readLogHeader(fs, file); // then assertEquals(new LogHeader(CURRENT_LOG_VERSION, expectedLogVersion, expectedTxId), result); }
public boolean recoveryNeededAt(File dataDir, long currentLogVersion) throws IOException { // We need config to determine where the logical log files are File neoStorePath = new File(dataDir, NeoStore.DEFAULT_NAME); if (!fs.fileExists(neoStorePath)) { // No database in the specified directory. return false; } PhysicalLogFiles logFiles = new PhysicalLogFiles(dataDir, fs); File log = logFiles.getLogFileForVersion(currentLogVersion); if (!fs.fileExists(log)) { // This most likely means that the db has been cleanly shut down, i.e. force then inc log // version, // then NOT creating a new log file (will be done the next startup) return false; } try (StoreChannel logChannel = fs.open(log, "r")) { return LogRecoveryCheck.recoveryRequired(logChannel); } }
/** * Moves a file from one directory to another, by a rename op. * * @param fs * @param fileName The base filename of the file to move, not the complete path * @param fromDirectory The directory currently containing filename * @param toDirectory The directory to host filename - must be in the same disk partition as * filename * @param allowOverwriteTarget * @throws java.io.IOException */ static void moveFile( FileSystemAbstraction fs, String fileName, File fromDirectory, File toDirectory, boolean allowSkipNonExistentFiles, boolean allowOverwriteTarget) throws IOException { File sourceFile = new File(fromDirectory, fileName); if (allowSkipNonExistentFiles && !fs.fileExists( sourceFile)) { // The source file doesn't exist and we allow skipping, so return return; } File toFile = new File(toDirectory, fileName); if (allowOverwriteTarget && fs.fileExists(toFile)) { fs.deleteFile(toFile); } fs.moveToDirectory(sourceFile, toDirectory); }
private ActualStringLogger( FileSystemAbstraction fileSystem, String filename, int rotationThreshold, boolean debugEnabled) { this.fileSystem = fileSystem; this.rotationThreshold = rotationThreshold; this.debugEnabled = debugEnabled; try { file = new File(filename); if (file.getParentFile() != null) { fileSystem.mkdirs(file.getParentFile()); } instantiateWriter(); } catch (IOException e) { throw new RuntimeException(e); } }
@Test public void shouldRewriteLogFiles() throws IOException { // given final IOCursor<LogEntry> cursor = mock(IOCursor.class); final LogVersionedStoreChannel writeChannel = mock(LogVersionedStoreChannel.class); final LogHeader header = new LogHeader(CURRENT_LOG_VERSION, 1, 42); when(fs.listFiles(storeDir, versionedLegacyLogFilesFilter)) .thenReturn(new File[] {new File(getLegacyLogFilename(1))}); when(reader.openReadableChannel(new File(getLegacyLogFilename(1)))) .thenReturn(Pair.of(header, cursor)); when(writer.openWritableChannel(new File(migrationDir, getLegacyLogFilename(1)))) .thenReturn(writeChannel); // when new LegacyLogs(fs, reader, writer).migrateLogs(storeDir, migrationDir); // then verify(writer, times(1)).writeLogHeader(writeChannel, header); verify(writer, times(1)).writeAllLogEntries(writeChannel, cursor); }