public void log(LogRecord record) { if (logToFile != null) { logToFile(record); } if (ignoreLogging) return; CapedwarfRequestLogs requestLogs = getCurrentRequestLogs(); if (requestLogs != null) { CapedwarfAppLogLine capedwarfAppLogLine = new CapedwarfAppLogLine(getCurrentRequestId(), record.getSequenceNumber()); AppLogLine appLogLine = capedwarfAppLogLine.getAppLogLine(); appLogLine.setLogLevel(getLogLevel(record)); appLogLine.setLogMessage( record.getSourceClassName() + " " + record.getSourceMethodName() + ": " + getFormattedMessage(record) + "\n"); appLogLine.setTimeUsec(record.getMillis() * 1000); logWriter.put(capedwarfAppLogLine); requestLogs.logLineAdded(appLogLine); logWriter.put(requestLogs); } }
public Object request(Object command, int timeout) throws IOException { // Changed this method to use a LogWriter object to actually // print the messages to the log, and only in case of logging // being active, instead of logging the message directly. if (logging) logWriter.logRequest(log, command); Object rc = super.request(command, timeout); if (logging) logWriter.logResponse(log, command); return rc; }
public void shutdown() { try { Database.conn.close(); } catch (SQLException e) { LogWriter.write("BusDB:Database : Shutdown with errors.."); LogWriter.write("BusDB:Dataase : Exception: " + e.getMessage()); return; } LogWriter.write("BusDB:Database : Shutting down.."); }
public void render(@NotNull RenderMode mode, @NotNull LogWriter writer) { if (myShortErrors.isEmpty()) return; writer.println(myKind + " (" + getNumberOfErrors() + ")"); final LogWriter offset = writer.offset(); for (ReportShortFileError error : myShortErrors.values()) { error.render(mode, offset); } writer.println(); }
private void send(MessagePrepareOK prepareOK) { LogWriter.log( replica.getReplicaID(), "Sending message PREPAREOK to Replica " + receiverID + Constants.NEWLINE + prepareOK.toString()); DataOutputStream dataOutput = null; try { dataOutput = new DataOutputStream(clientSocket.getOutputStream()); byte[] messageIDBytes = MyByteUtils.toByteArray(prepareOK.getMessageID()); dataOutput.writeInt(messageIDBytes.length); dataOutput.write(messageIDBytes); byte[] viewNumberBytes = MyByteUtils.toByteArray(prepareOK.getViewNumber()); dataOutput.writeInt(viewNumberBytes.length); dataOutput.write(viewNumberBytes); byte[] operationNumberBytes = MyByteUtils.toByteArray(prepareOK.getOperationNumber()); dataOutput.writeInt(operationNumberBytes.length); dataOutput.write(operationNumberBytes); byte[] replicaIDBytes = MyByteUtils.toByteArray(prepareOK.getReplicaID()); dataOutput.writeInt(replicaIDBytes.length); dataOutput.write(replicaIDBytes); } catch (IOException ex) { Logger.getLogger(ReplicaClientRunnable.class.getName()).log(Level.SEVERE, null, ex); } finally { try { dataOutput.flush(); dataOutput.close(); clientSocket.close(); } catch (IOException ex) { Logger.getLogger(ReplicaClientRunnable.class.getName()).log(Level.SEVERE, null, ex); } } }
private void send(MessageReply reply) { LogWriter.log( replica.getReplicaID(), "Sending message REPLY to Client " + receiverID + Constants.NEWLINE + reply.toString()); DataOutputStream dataOutput = null; try { dataOutput = new DataOutputStream(clientSocket.getOutputStream()); byte[] messageIDBytes = MyByteUtils.toByteArray(reply.getMessageID()); dataOutput.writeInt(messageIDBytes.length); dataOutput.write(messageIDBytes); byte[] viewNumberBytes = MyByteUtils.toByteArray(reply.getViewNumber()); dataOutput.writeInt(viewNumberBytes.length); dataOutput.write(viewNumberBytes); byte[] requestNumberBytes = MyByteUtils.toByteArray(reply.getRequestNumber()); dataOutput.writeInt(requestNumberBytes.length); dataOutput.write(requestNumberBytes); byte[] resultBytes = MyByteUtils.toByteArray(reply.getResult()); dataOutput.writeInt(resultBytes.length); dataOutput.write(resultBytes); } catch (IOException ex) { Logger.getLogger(ReplicaClientRunnable.class.getName()).log(Level.SEVERE, null, ex); } finally { try { if (dataOutput != null) { dataOutput.flush(); dataOutput.close(); } clientSocket.close(); } catch (IOException ex) { Logger.getLogger(ReplicaClientRunnable.class.getName()).log(Level.SEVERE, null, ex); } } }
public VncViewer(String[] argv) { applet = false; // Override defaults with command-line options for (int i = 0; i < argv.length; i++) { if (argv[i].equalsIgnoreCase("-log")) { if (++i >= argv.length) usage(); System.err.println("Log setting: " + argv[i]); LogWriter.setLogParams(argv[i]); continue; } if (Configuration.setParam(argv[i])) continue; if (argv[i].charAt(0) == '-') { if (i + 1 < argv.length) { if (Configuration.setParam(argv[i].substring(1), argv[i + 1])) { i++; continue; } } usage(); } if (vncServerName.getValue() != null) usage(); vncServerName.setParam(argv[i]); } }
public int addCatalog(int response, String producer) { try { this.statement = Database.conn.createStatement(); String sql = "INSERT INTO catalog (response_time_stamp, producer_ref, time_of_rec) VALUES ( " + "" + response + ", '" + producer + "', 'now')"; this.statement.executeUpdate(sql, Statement.RETURN_GENERATED_KEYS); ResultSet rs = this.statement.getGeneratedKeys(); if (rs.next()) { return rs.getInt(1); } } catch (SQLException e) { LogWriter.write("BusDB:Database : Failed to add new data into catalog table. "); e.printStackTrace(); } return -1; }
@Override public void init(ServletConfig config) throws ServletException { super.init(config); System.setProperty("kotlin.running.in.server.mode", "true"); System.setProperty("java.awt.headless", "true"); ApplicationSettings.WEBAPP_ROOT_DIRECTORY = getServletContext().getRealPath("/"); ApplicationSettings.EXAMPLES_DIRECTORY = ApplicationSettings.WEBAPP_ROOT_DIRECTORY + "examples"; CommonSettings.HELP_DIRECTORY = ApplicationSettings.WEBAPP_ROOT_DIRECTORY; if (!loadTomcatParameters()) { ErrorWriter.writeErrorToConsole( "FATAL ERROR: Cannot load parameters from tomcat config, server didn't start"); System.exit(1); } ErrorWriter.ERROR_WRITER = ErrorWriter.getInstance(); // Initializer.INITIALIZER = ServerInitializer.getInstance(); try { ErrorWriter.writeInfoToConsole("Use \"help\" to look at all options"); new File(CommonSettings.LOGS_DIRECTORY).mkdirs(); LogWriter.init(); ExamplesLoader.loadAllExamples(); HelpLoader.getInstance(); MySqlConnector.getInstance(); } catch (Throwable e) { ErrorWriter.writeExceptionToConsole( "FATAL ERROR: Initialisation of java core environment failed, server didn't start", e); System.exit(1); } }
/** * Set a logger * * @param logger a <code>Logger</code> value */ public void setLog(Logger logger) { m_log = logger; if (m_Filter != null && m_Filter instanceof LogWriter) { ((LogWriter) m_Filter).setLog(m_log); } }
private void compactMemTableInternal() throws IOException { Preconditions.checkState(mutex.isHeldByCurrentThread()); if (immutableMemTable == null) { return; } try { // Save the contents of the memtable as a new Table VersionEdit edit = new VersionEdit(); Version base = versions.getCurrent(); writeLevel0Table(immutableMemTable, edit, base); if (shuttingDown.get()) { throw new DatabaseShutdownException("Database shutdown during memtable compaction"); } // Replace immutable memtable with the generated Table edit.setPreviousLogNumber(0); edit.setLogNumber(log.getFileNumber()); // Earlier logs no longer needed versions.logAndApply(edit); immutableMemTable = null; deleteObsoleteFiles(); } finally { backgroundCondition.signalAll(); } }
public void close() { if (shuttingDown.getAndSet(true)) { return; } mutex.lock(); try { while (backgroundCompaction != null) { backgroundCondition.awaitUninterruptibly(); } } finally { mutex.unlock(); } compactionExecutor.shutdown(); try { compactionExecutor.awaitTermination(1, TimeUnit.DAYS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } try { versions.destroy(); } catch (IOException ignored) { } try { log.close(); } catch (IOException ignored) { } tableCache.close(); dbLock.release(); }
@Override public void run() { char c; try { while (running) { char_recieved = telnetHelper.read(); c = (char) char_recieved; c = ansiCoding(c); if (ansiTransmitCode) { if (ansiCode.toString().charAt(ansiCode.toString().length() - 1) == '\n') { telnetHelper.addInputStringFragment(ansiCode.toString(), true); } else { telnetHelper.addInputStringFragment(ansiCode.toString(), false); } ansiCode.delete(0, ansiCode.length()); ansiTransmitCode = false; } if (c == '\r') { // Suppress \r characters } else if (c == '\n') { telnetHelper.addInputStringFragment(java.lang.Character.toString(c), true); } else if (char_recieved > 0) { telnetHelper.addInputStringFragment(java.lang.Character.toString(c), false); } LogWriter.write(EnumLogType.TELNET, java.lang.Character.toString(c)); } } catch (IOException e) { e.printStackTrace(); } }
public void addDataEntry( int id, String lineref, int directionref, String origin, String destination, String longitude, String latitude, String delay, String vehicleref, int djvref, String bearing, String operatorref) { try { this.statement = Database.conn.createStatement(); String sql = "INSERT INTO busdata VALUES ( " + "" + id + "," + "'" + lineref + "', " + directionref + ", " + "'" + origin + "', '" + destination + "', " + "" + longitude + ", " + latitude + ", " + "'" + delay + "', '" + vehicleref + "'," + "" + djvref + ", '" + bearing + "', '" + "" + operatorref + "')"; this.statement.executeUpdate(sql); this.statement.close(); } catch (SQLException e) { LogWriter.write("BusDB:Database : Failed to add new data into busdata table. "); e.printStackTrace(); } }
public void oneway(Object command) throws IOException { // Changed this method to use a LogWriter object to actually // print the messages to the log, and only in case of logging // being active, instead of logging the message directly. if (logging && log.isDebugEnabled()) { logWriter.logOneWay(log, command); } next.oneway(command); }
public FutureResponse asyncRequest(Object command, ResponseCallback responseCallback) throws IOException { // Changed this method to use a LogWriter object to actually // print the messages to the log, and only in case of logging // being active, instead of logging the message directly. if (logging) logWriter.logAsyncRequest(log, command); FutureResponse rc = next.asyncRequest(command, responseCallback); return rc; }
public void onException(IOException error) { // Changed this method to use a LogWriter object to actually // print the messages to the log, and only in case of logging // being active, instead of logging the message directly. if (logging && log.isDebugEnabled()) { logWriter.logReceivedException(log, error); } getTransportListener().onException(error); }
public void onCommand(Object command) { // Changed this method to use a LogWriter object to actually // print the messages to the log, and only in case of logging // being active, instead of logging the message directly. if (logging && log.isDebugEnabled()) { logWriter.logReceivedCommand(log, command); } getTransportListener().onCommand(command); }
// 测试 public static void main(String[] args) { String txt = "select * from\"旧襄阳市市本级\".\"分析表_近三年税款征收数据表\" a,\"旧襄阳市市本级\".\"基础表_纳税人基本信息表\" b," + "\"旧襄阳市市本级\".\"基础表_纳税人证件信息表\" c where a.\"纳税人名称\" = b.\"纳税人名称\" and " + "a.\"税务登记证号\" = c.\"税务登记证号\""; String txt2 = "select * from a inner join b on a.id=b.id"; String txt3 = "SELECT DISTINCT 学生表1.* FROM 学生表 学生表1 INNER JOIN 学生表 学生表2 ON 学生表1.学号 IN (SELECT TOP 2 学生表.学号FROM 学生表 WHERE 学生表.功课编号 = 学生表1.功课编号 ORDER BY 学生成绩 DESC)"; String txt4 = "SELECT R1.company, R1.num FROM route R1, route R2, stops S1, stops S2 WHERE R1.num=R2.num AND R1.company=R2.company AND R1.stop=S1.id AND R2.stop=S2.id AND S1.name='Craiglockhart' AND S2.name='Tollcross' "; String txt5 = "update 学生表 set 学号=10001 where name='牛中超'"; String txt6 = "SELECT count(*) FROM DSDATA.US"; // txt2 int res = getOriginalTableName(txt2); LogWriter.println("txt2检验返回值=>" + res); if (res == -1) { System.err.println("SQL语句错误,请输入SELECT语句"); } else if (res == 0) { System.err.println("SQL语句错误,请检查"); } else { LogWriter.println("共统计出表个数=>" + res); } // txt4 res = getOriginalTableName(txt4); LogWriter.println("txt4检验返回值=>" + res); if (res == -1) { System.err.println("SQL语句错误,请输入SELECT语句"); } else if (res == 0) { System.err.println("SQL语句错误,请检查"); } else { LogWriter.println("共统计出表个数=>" + res); } // txt5 res = getOriginalTableName(txt5); LogWriter.println("txt5检验返回值=>" + res); if (res == -1) { System.err.println("SQL语句错误,请输入SELECT语句"); } else if (res == 0) { System.err.println("SQL语句错误,请检查"); } else { LogWriter.println("共统计出表个数=>" + res); } // txt6 res = getOriginalTableName(txt6); LogWriter.println("txt6检验返回值=>" + res); if (res == -1) { System.err.println("SQL语句错误,请输入SELECT语句"); } else if (res == 0) { System.err.println("SQL语句错误,请检查"); } else { LogWriter.println("共统计出表个数=>" + res); } }
public void requestFinished(ServletRequest servletRequest, int status, int contentLength) { CapedwarfRequestLogs capedwarfRequestLogs = getCapedwarfRequestLogs(servletRequest); // check if all went well if (capedwarfRequestLogs != null) { RequestLogs requestLogs = capedwarfRequestLogs.getRequestLogs(); requestLogs.setEndTimeUsec(System.currentTimeMillis() * 1000); requestLogs.setStatus(status); requestLogs.setResponseSize(contentLength); requestLogs.setFinished(true); logWriter.put(capedwarfRequestLogs); } }
/** * 静态调用接口,获取所有表的ID * * @param sql 传入的sql语句 * @return 返回-1表示当前语句不是select语句,返回0表示当前SQL有问题,返回>0的数字表示查找出的表数目 */ public static int getOriginalTableName(String sql) { int num_Table = 0; Statement statement = null; try { statement = CCJSqlParserUtil.parse(sql); if (statement instanceof Select) { Select selectStatement = (Select) statement; System.err.println(sql); PlainSelect plainSelect = null; plainSelect = (PlainSelect) selectStatement.getSelectBody(); if (plainSelect != null) { System.out.println("\n-------------------------------------------"); if (plainSelect.getFromItem() != null) { plainSelect.getFromItem().toString(); num_Table++; } System.out.print(plainSelect.getFromItem().toString() + "\t"); if (plainSelect.getJoins() != null) { for (Join join2 : plainSelect.getJoins()) { System.out.print(join2.toString() + "\t"); num_Table++; } } System.out.println("\n-------------------------------------------"); } else { num_Table = -1; return num_Table; } } } catch (JSQLParserException ex) { ex.printStackTrace(); num_Table = 0; LogWriter.println(num_Table); return num_Table; } LogWriter.println("共统计出表个数=>" + num_Table); return num_Table; }
public void requestStarted(ServletRequest servletRequest, long startTimeMillis) { if (ignoreLogging || !isLoggable(servletRequest)) { return; } CapedwarfEnvironment environment = CapedwarfEnvironment.getThreadLocalInstance(); CapedwarfRequestLogs capedwarfRequestLogs = createCapedwarfRequestLogs(servletRequest, startTimeMillis, environment); logWriter.put(capedwarfRequestLogs); servletRequest.setAttribute(REQUEST_LOGS_REQUEST_ATTRIBUTE, capedwarfRequestLogs); environment.getAttributes().put(REQUEST_LOGS_ENV_ATTRIBUTE, capedwarfRequestLogs); environment .getAttributes() .put(REQUEST_LOG_ID, capedwarfRequestLogs.getRequestLogs().getRequestId()); }
@Override public void run() { // LogWriter.log( replica.getReplicaID(), "Client started"); synchronized (this) { this.runningThread = Thread.currentThread(); } try { clientSocket = new Socket(serverAddress, serverPort); sendMessage(); } catch (IOException e) { if (isStopped()) { LogWriter.log(replica.getReplicaID(), "Client communication crashed."); return; } throw new RuntimeException("Error accepting server connection", e); } // LogWriter.log( replica.getReplicaID(), "Client communication Stopped.") ; }
public BatchUpdateException( LogWriter logWriter, ClientMessageId msgid, Object[] args, int[] updateCounts, SqlException cause) { super( msgutil_.getCompleteMessage(msgid.msgid, args), ExceptionUtil.getSQLStateFromIdentifier(msgid.msgid), ExceptionUtil.getSeverityFromIdentifier(msgid.msgid), updateCounts); if (logWriter != null) { logWriter.traceDiagnosable(this); } if (cause != null) { initCause(cause); setNextException(cause.getSQLException()); } }
public BlockCanaryInternals() { stackSampler = new StackSampler(Looper.getMainLooper().getThread(), sContext.provideDumpInterval()); cpuSampler = new CpuSampler(sContext.provideDumpInterval()); setMonitor( new LooperMonitor( new LooperMonitor.BlockListener() { @Override public void onBlockEvent( long realTimeStart, long realTimeEnd, long threadTimeStart, long threadTimeEnd) { // Get recent thread-stack entries and cpu usage ArrayList<String> threadStackEntries = stackSampler.getThreadStackEntries(realTimeStart, realTimeEnd); if (!threadStackEntries.isEmpty()) { BlockInfo blockInfo = BlockInfo.newInstance() .setMainThreadTimeCost( realTimeStart, realTimeEnd, threadTimeStart, threadTimeEnd) .setCpuBusyFlag(cpuSampler.isCpuBusy(realTimeStart, realTimeEnd)) .setRecentCpuRate(cpuSampler.getCpuRateInfo()) .setThreadStackEntries(threadStackEntries) .flushString(); LogWriter.save(blockInfo.toString()); if (getContext().displayNotification() && mInterceptorChain.size() != 0) { for (BlockInterceptor interceptor : mInterceptorChain) { interceptor.onBlock(getContext().provideContext(), blockInfo); } } } } }, getContext().provideBlockThreshold())); LogWriter.cleanObsolete(); }
/** * Set the filter to be wrapped by this bean * * @param c a <code>features.filters.Filter</code> value */ public void setFilter(features.filters.Filter c) { boolean loadImages = true; if (c.getClass().getName().compareTo(m_Filter.getClass().getName()) == 0) { loadImages = false; } m_Filter = c; String filterName = c.getClass().toString(); filterName = filterName.substring(filterName.indexOf('.') + 1, filterName.length()); if (loadImages) { if (m_Filter instanceof Visible) { m_visual = ((Visible) m_Filter).getVisual(); } else { if (!m_visual.loadIcons( BeanVisual.ICON_PATH + filterName + ".gif", BeanVisual.ICON_PATH + filterName + "_animated.gif")) { useDefaultVisual(); } } } m_visual.setText(filterName.substring(filterName.lastIndexOf('.') + 1, filterName.length())); if (m_Filter instanceof LogWriter && m_log != null) { ((LogWriter) m_Filter).setLog(m_log); } if (!(m_Filter instanceof StreamableFilter) && (m_listenees.containsKey("instance"))) { if (m_log != null) { m_log.logMessage( "[Filter] " + statusMessagePrefix() + " WARNING : " + m_Filter.getClass().getName() + " is not an incremental filter"); m_log.statusMessage(statusMessagePrefix() + "WARNING: Not an incremental filter."); } } // get global info m_globalInfo = KnowledgeFlowApp.getGlobalInfo(m_Filter); }
public ArrayList<Object> getMyHitAnswers(RequesterService service, String hitId) { Assignment[] assignments = service.getAllAssignmentsForHIT(hitId); ArrayList<Object> rawAnswers = new ArrayList<Object>(); for (Assignment assignment : assignments) { String log = assignment.getWorkerId() + " had the following" + " answers for HIT(" + assignment.getHITId() + "): " + (new Date()).toString() + "\n"; // Interpret the XML and parse answers out. String answerXML = assignment.getAnswer(); QuestionFormAnswers qfa = RequesterService.parseAnswers(answerXML); @SuppressWarnings("unchecked") ArrayList<QuestionFormAnswersType.AnswerType> answers = (ArrayList<QuestionFormAnswersType.AnswerType>) qfa.getAnswer(); for (QuestionFormAnswersType.AnswerType answer : answers) { String assignmentId = assignment.getAssignmentId(); String answerValues = RequesterService.getAnswerValue(assignmentId, answer); String[] rawAnswerValues = null; if (answerValues != null) { rawAnswerValues = answerValues.split("\\|"); } for (String ans : rawAnswerValues) { if (ans.startsWith("desc_identifier:")) { rawAnswers.add(ans.substring(16)); log += ans.substring(16) + "\n"; } } } log += "---------\n\n"; LogWriter.writeLog(log, "detail.txt"); } return rawAnswers; }
public Snapshot writeInternal(WriteBatchImpl updates, WriteOptions options) throws DBException { checkBackgroundException(); mutex.lock(); try { long sequenceEnd; if (updates.size() != 0) { makeRoomForWrite(false); // Get sequence numbers for this change set final long sequenceBegin = versions.getLastSequence() + 1; sequenceEnd = sequenceBegin + updates.size() - 1; // Reserve this sequence in the version set versions.setLastSequence(sequenceEnd); // Log write Slice record = writeWriteBatch(updates, sequenceBegin); try { log.addRecord(record, options.sync()); } catch (IOException e) { throw Throwables.propagate(e); } // Update memtable updates.forEach(new InsertIntoHandler(memTable, sequenceBegin)); } else { sequenceEnd = versions.getLastSequence(); } if (options.snapshot()) { return new SnapshotImpl(versions.getCurrent(), sequenceEnd); } else { return null; } } finally { mutex.unlock(); } }
private void makeRoomForWrite(boolean force) { Preconditions.checkState(mutex.isHeldByCurrentThread()); boolean allowDelay = !force; while (true) { // todo background processing system need work // if (!bg_error_.ok()) { // // Yield previous error // s = bg_error_; // break; // } else if (allowDelay && versions.numberOfFilesInLevel(0) > L0_SLOWDOWN_WRITES_TRIGGER) { // We are getting close to hitting a hard limit on the number of // L0 files. Rather than delaying a single write by several // seconds when we hit the hard limit, start delaying each // individual write by 1ms to reduce latency variance. Also, // this delay hands over some CPU to the compaction thread in // case it is sharing the same core as the writer. try { mutex.unlock(); Thread.sleep(1); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } finally { mutex.lock(); } // Do not delay a single write more than once allowDelay = false; } else if (!force && memTable.approximateMemoryUsage() <= options.writeBufferSize()) { // There is room in current memtable break; } else if (immutableMemTable != null) { // We have filled up the current memtable, but the previous // one is still being compacted, so we wait. backgroundCondition.awaitUninterruptibly(); } else if (versions.numberOfFilesInLevel(0) >= L0_STOP_WRITES_TRIGGER) { // There are too many level-0 files. // Log(options_.info_log, "waiting...\n"); backgroundCondition.awaitUninterruptibly(); } else { // Attempt to switch to a new memtable and trigger compaction of old Preconditions.checkState(versions.getPrevLogNumber() == 0); // close the existing log try { log.close(); } catch (IOException e) { throw new RuntimeException("Unable to close log file " + log.getFile(), e); } // open a new log long logNumber = versions.getNextFileNumber(); try { this.log = Logs.createLogWriter( new File(databaseDir, Filename.logFileName(logNumber)), logNumber); } catch (IOException e) { throw new RuntimeException( "Unable to open new log file " + new File(databaseDir, Filename.logFileName(logNumber)).getAbsoluteFile(), e); } // create a new mem table immutableMemTable = memTable; memTable = new MemTable(internalKeyComparator); // Do not force another compaction there is space available force = false; maybeScheduleCompaction(); } } }
public DbImpl(Options options, File databaseDir) throws IOException { Preconditions.checkNotNull(options, "options is null"); Preconditions.checkNotNull(databaseDir, "databaseDir is null"); this.options = options; if (this.options.compressionType() == CompressionType.ZLIB && !Zlib.available()) { // There's little hope to continue. this.options.compressionType(CompressionType.NONE); } if (this.options.compressionType() == CompressionType.SNAPPY && !Snappy.available()) { // Disable snappy if it's not available. this.options.compressionType(CompressionType.NONE); } this.databaseDir = databaseDir; // use custom comparator if set DBComparator comparator = options.comparator(); UserComparator userComparator; if (comparator != null) { userComparator = new CustomUserComparator(comparator); } else { userComparator = new BytewiseComparator(); } internalKeyComparator = new InternalKeyComparator(userComparator); memTable = new MemTable(internalKeyComparator); immutableMemTable = null; ThreadFactory compactionThreadFactory = new ThreadFactoryBuilder() .setNameFormat("leveldb-compaction-%s") .setUncaughtExceptionHandler( new UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { // todo need a real UncaughtExceptionHandler System.out.printf("%s%n", t); e.printStackTrace(); } }) .build(); compactionExecutor = Executors.newSingleThreadExecutor(compactionThreadFactory); // Reserve ten files or so for other uses and give the rest to TableCache. int tableCacheSize = options.maxOpenFiles() - 10; tableCache = new TableCache( databaseDir, tableCacheSize, new InternalUserComparator(internalKeyComparator), options.verifyChecksums()); // create the version set // create the database dir if it does not already exist databaseDir.mkdirs(); Preconditions.checkArgument( databaseDir.exists(), "Database directory '%s' does not exist and could not be created", databaseDir); Preconditions.checkArgument( databaseDir.isDirectory(), "Database directory '%s' is not a directory", databaseDir); mutex.lock(); try { // lock the database dir dbLock = new DbLock(new File(databaseDir, Filename.lockFileName())); // verify the "current" file File currentFile = new File(databaseDir, Filename.currentFileName()); if (!currentFile.canRead()) { Preconditions.checkArgument( options.createIfMissing(), "Database '%s' does not exist and the create if missing option is disabled", databaseDir); } else { Preconditions.checkArgument( !options.errorIfExists(), "Database '%s' exists and the error if exists option is enabled", databaseDir); } versions = new VersionSet(databaseDir, tableCache, internalKeyComparator); // load (and recover) current version versions.recover(); // Recover from all newer log files than the ones named in the // descriptor (new log files may have been added by the previous // incarnation without registering them in the descriptor). // // Note that PrevLogNumber() is no longer used, but we pay // attention to it in case we are recovering a database // produced by an older version of leveldb. long minLogNumber = versions.getLogNumber(); long previousLogNumber = versions.getPrevLogNumber(); List<File> filenames = Filename.listFiles(databaseDir); List<Long> logs = Lists.newArrayList(); for (File filename : filenames) { FileInfo fileInfo = Filename.parseFileName(filename); if (fileInfo != null && fileInfo.getFileType() == FileType.LOG && ((fileInfo.getFileNumber() >= minLogNumber) || (fileInfo.getFileNumber() == previousLogNumber))) { logs.add(fileInfo.getFileNumber()); } } // Recover in the order in which the logs were generated VersionEdit edit = new VersionEdit(); Collections.sort(logs); for (Long fileNumber : logs) { long maxSequence = recoverLogFile(fileNumber, edit); if (versions.getLastSequence() < maxSequence) { versions.setLastSequence(maxSequence); } } // open transaction log long logFileNumber = versions.getNextFileNumber(); this.log = Logs.createLogWriter( new File(databaseDir, Filename.logFileName(logFileNumber)), logFileNumber); edit.setLogNumber(log.getFileNumber()); // apply recovered edits versions.logAndApply(edit); // cleanup unused files deleteObsoleteFiles(); // schedule compactions maybeScheduleCompaction(); } finally { mutex.unlock(); } }