private static void printHeader() { if (printedHeader) return; Logs.reportString("----------------------------------------------"); Logs.reportMessage("DBLOOK_KeysHeader"); Logs.reportString("----------------------------------------------\n"); printedHeader = true; }
private static String makeFKReferenceClause(String constraintId, char deleteChar, char updateChar) throws SQLException { StringBuffer refClause = new StringBuffer(); getReferenceCols.setString(1, constraintId); ResultSet colsRS = getReferenceCols.executeQuery(); colsRS.next(); refClause.append(" REFERENCES "); refClause.append(dblook.lookupTableId(colsRS.getString(1))); refClause.append(" ("); refClause.append(dblook.getColumnListFromDescription(colsRS.getString(1), colsRS.getString(2))); refClause.append(")"); // On delete. refClause.append(" ON DELETE "); switch (deleteChar) { case 'R': refClause.append("NO ACTION"); break; case 'S': refClause.append("RESTRICT"); break; case 'C': refClause.append("CASCADE"); break; case 'U': refClause.append("SET NULL"); break; default: // shouldn't happen. Logs.debug("INTERNAL ERROR: unexpected 'on-delete' action: " + deleteChar, (String) null); break; } // On update refClause.append(" ON UPDATE "); switch (updateChar) { case 'R': refClause.append("NO ACTION"); break; case 'S': refClause.append("RESTRICT"); break; default: // shouldn't happen. Logs.debug("INTERNAL ERROR: unexpected 'on-update' action: " + updateChar, (String) null); break; } colsRS.close(); return refClause.toString(); }
public void Calculate_AddressSOCK(byte AType) { switch (AType) { // Version IP 4 case 0x01: RemoteHost = Tools.calcInetAddress(DST_Addr); RemotePort = Tools.calcPort(DST_Port); break; // Version IP DOMAIN NAME case 0x03: if (DST_Addr[0] <= 0) { Logs.Println( Logger.ERROR, "SOCKS 5 - calcInetAddress() : BAD IP in command - size : " + DST_Addr[0], true); return; } String sIA = ""; for (int i = 1; i <= DST_Addr[0]; i++) { sIA += (char) DST_Addr[i]; } RemoteHost = sIA; RemotePort = Tools.calcPort(DST_Port); break; } }
@SuppressWarnings("unused") public void onEnable() { plugin = this; ConfigManager.registerConfig("config", "config.yml", this); ConfigManager.registerConfig("stone", "stoniarki.yml", this); ConfigUtils.prepareConfig(); Logs.prepareLogFile(); try { Log.info(prx + " Uruchamianie Metrics! (mcstats.org)"); MetricsLite metrics = new MetricsLite(this); metrics.start(); } catch (IOException e) { Log.info(prx + " Nie mozna bylo uruchomic MetricsLite!"); Log.info(e.getStackTrace().toString()); } getCommand("stoniarkibynorthpl").setExecutor(new Commands(this)); getServer().addRecipe(CraftingRegister.recepta_na_stoniarke_by_northpl); BukkitTask task = new StoniarkiTask(this).runTaskTimerAsynchronously(Main.plugin, 5, 5); Logs.Log("Plugin Stoniarki v" + v + " by NorthPL uruchomiony! :)"); }
private static void createKeysFrom(ResultSet rs) throws SQLException { boolean firstTime = true; while (rs.next()) { if (!rs.getBoolean(7)) // this row is NOT for a constraint, so skip it. continue; String tableId = rs.getString(3); String tableName = dblook.lookupTableId(tableId); if (dblook.isExcludedTable(tableName)) // table isn't included in user-given list; skip it. continue; if (firstTime) { printHeader(); if (rs.getString(2).equals("F")) Logs.reportMessage("DBLOOK_ForeignHeader"); else Logs.reportMessage("DBLOOK_PrimUniqueHeader"); } StringBuffer kString = createKeyString(tableId, tableName, rs); if (rs.getString(2).equals("F")) { // foreign key; we have to figure out the references info. kString.append( makeFKReferenceClause( rs.getString(10), rs.getString(8).charAt(0), rs.getString(9).charAt(0))); } Logs.writeToNewDDL(kString.toString()); Logs.writeStmtEndToNewDDL(); Logs.writeNewlineToNewDDL(); firstTime = false; } return; }
private Slice newLogRecordHeader(LogChunkType type, Slice slice, int length) { int crc = Logs.getChunkChecksum( type.getPersistentId(), slice.getRawArray(), slice.getRawOffset(), length); // Format the header SliceOutput header = Slices.allocate(HEADER_SIZE).output(); header.writeInt(crc); header.writeByte((byte) (length & 0xff)); header.writeByte((byte) (length >>> 8)); header.writeByte((byte) (type.getPersistentId())); return header.slice(); }
private static String expandKeyType(char keyType) { switch (keyType) { case 'P': return " PRIMARY KEY "; case 'U': return " UNIQUE "; case 'F': return " FOREIGN KEY "; default: // shouldn't happen. Logs.debug("INTERNAL ERROR: unexpected key type" + keyType, (String) null); return ""; } }
public final void handle(Transaction transaction) throws Exception { SessionEnvironment sessionEnvironment = transaction.getSessionEnvironment(); if (sessionEnvironment.getUser() != null) { Logs.log( Logs.SECURITY_WARNING_CAT, "User tries to login while logged.", Logs.USER_ID_TAG, Integer.toString(sessionEnvironment.getUser().userId)); sessionEnvironment.setUser(null); } transaction.sendHttpRedirection(SSLLogin); }
@Before public void parseLog1() { parser.parse(Logs.log0()); }
private void makeRoomForWrite(boolean force) { Preconditions.checkState(mutex.isHeldByCurrentThread()); boolean allowDelay = !force; while (true) { // todo background processing system need work // if (!bg_error_.ok()) { // // Yield previous error // s = bg_error_; // break; // } else if (allowDelay && versions.numberOfFilesInLevel(0) > L0_SLOWDOWN_WRITES_TRIGGER) { // We are getting close to hitting a hard limit on the number of // L0 files. Rather than delaying a single write by several // seconds when we hit the hard limit, start delaying each // individual write by 1ms to reduce latency variance. Also, // this delay hands over some CPU to the compaction thread in // case it is sharing the same core as the writer. try { mutex.unlock(); Thread.sleep(1); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } finally { mutex.lock(); } // Do not delay a single write more than once allowDelay = false; } else if (!force && memTable.approximateMemoryUsage() <= options.writeBufferSize()) { // There is room in current memtable break; } else if (immutableMemTable != null) { // We have filled up the current memtable, but the previous // one is still being compacted, so we wait. backgroundCondition.awaitUninterruptibly(); } else if (versions.numberOfFilesInLevel(0) >= L0_STOP_WRITES_TRIGGER) { // There are too many level-0 files. // Log(options_.info_log, "waiting...\n"); backgroundCondition.awaitUninterruptibly(); } else { // Attempt to switch to a new memtable and trigger compaction of old Preconditions.checkState(versions.getPrevLogNumber() == 0); // close the existing log try { log.close(); } catch (IOException e) { throw new RuntimeException("Unable to close log file " + log.getFile(), e); } // open a new log long logNumber = versions.getNextFileNumber(); try { this.log = Logs.createLogWriter( new File(databaseDir, Filename.logFileName(logNumber)), logNumber); } catch (IOException e) { throw new RuntimeException( "Unable to open new log file " + new File(databaseDir, Filename.logFileName(logNumber)).getAbsoluteFile(), e); } // create a new mem table immutableMemTable = memTable; memTable = new MemTable(internalKeyComparator); // Do not force another compaction there is space available force = false; maybeScheduleCompaction(); } } }
public DbImpl(Options options, File databaseDir) throws IOException { Preconditions.checkNotNull(options, "options is null"); Preconditions.checkNotNull(databaseDir, "databaseDir is null"); this.options = options; if (this.options.compressionType() == CompressionType.ZLIB && !Zlib.available()) { // There's little hope to continue. this.options.compressionType(CompressionType.NONE); } if (this.options.compressionType() == CompressionType.SNAPPY && !Snappy.available()) { // Disable snappy if it's not available. this.options.compressionType(CompressionType.NONE); } this.databaseDir = databaseDir; // use custom comparator if set DBComparator comparator = options.comparator(); UserComparator userComparator; if (comparator != null) { userComparator = new CustomUserComparator(comparator); } else { userComparator = new BytewiseComparator(); } internalKeyComparator = new InternalKeyComparator(userComparator); memTable = new MemTable(internalKeyComparator); immutableMemTable = null; ThreadFactory compactionThreadFactory = new ThreadFactoryBuilder() .setNameFormat("leveldb-compaction-%s") .setUncaughtExceptionHandler( new UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { // todo need a real UncaughtExceptionHandler System.out.printf("%s%n", t); e.printStackTrace(); } }) .build(); compactionExecutor = Executors.newSingleThreadExecutor(compactionThreadFactory); // Reserve ten files or so for other uses and give the rest to TableCache. int tableCacheSize = options.maxOpenFiles() - 10; tableCache = new TableCache( databaseDir, tableCacheSize, new InternalUserComparator(internalKeyComparator), options.verifyChecksums()); // create the version set // create the database dir if it does not already exist databaseDir.mkdirs(); Preconditions.checkArgument( databaseDir.exists(), "Database directory '%s' does not exist and could not be created", databaseDir); Preconditions.checkArgument( databaseDir.isDirectory(), "Database directory '%s' is not a directory", databaseDir); mutex.lock(); try { // lock the database dir dbLock = new DbLock(new File(databaseDir, Filename.lockFileName())); // verify the "current" file File currentFile = new File(databaseDir, Filename.currentFileName()); if (!currentFile.canRead()) { Preconditions.checkArgument( options.createIfMissing(), "Database '%s' does not exist and the create if missing option is disabled", databaseDir); } else { Preconditions.checkArgument( !options.errorIfExists(), "Database '%s' exists and the error if exists option is enabled", databaseDir); } versions = new VersionSet(databaseDir, tableCache, internalKeyComparator); // load (and recover) current version versions.recover(); // Recover from all newer log files than the ones named in the // descriptor (new log files may have been added by the previous // incarnation without registering them in the descriptor). // // Note that PrevLogNumber() is no longer used, but we pay // attention to it in case we are recovering a database // produced by an older version of leveldb. long minLogNumber = versions.getLogNumber(); long previousLogNumber = versions.getPrevLogNumber(); List<File> filenames = Filename.listFiles(databaseDir); List<Long> logs = Lists.newArrayList(); for (File filename : filenames) { FileInfo fileInfo = Filename.parseFileName(filename); if (fileInfo != null && fileInfo.getFileType() == FileType.LOG && ((fileInfo.getFileNumber() >= minLogNumber) || (fileInfo.getFileNumber() == previousLogNumber))) { logs.add(fileInfo.getFileNumber()); } } // Recover in the order in which the logs were generated VersionEdit edit = new VersionEdit(); Collections.sort(logs); for (Long fileNumber : logs) { long maxSequence = recoverLogFile(fileNumber, edit); if (versions.getLastSequence() < maxSequence) { versions.setLastSequence(maxSequence); } } // open transaction log long logFileNumber = versions.getNextFileNumber(); this.log = Logs.createLogWriter( new File(databaseDir, Filename.logFileName(logFileNumber)), logFileNumber); edit.setLogNumber(log.getFileNumber()); // apply recovered edits versions.logAndApply(edit); // cleanup unused files deleteObsoleteFiles(); // schedule compactions maybeScheduleCompaction(); } finally { mutex.unlock(); } }
public void GetClientCommand() throws Exception { // +----+-----+-------+------+----------+----------+ // |VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT | // +----+-----+-------+------+----------+----------+ // | 1 | 1 | X'00' | 1 | Variable | 2 | // +----+-----+-------+------+----------+----------+ int Addr_Len; SOCKS_Version = GetByte(); Command = GetByte(); RSV = GetByte(); ATYP = GetByte(); // Address Addr_Len = ADDR_Size[ATYP]; DST_Addr[0] = GetByte(); // Shift Out " " 0x0e if (ATYP == 0x03) { Addr_Len = DST_Addr[0] + 1; // | len | [0]SO | 192 .... | } for (int i = 1; i < Addr_Len; i++) { DST_Addr[i] = GetByte(); } // Port DST_Port[0] = GetByte(); DST_Port[1] = GetByte(); // --------------------- if (SOCKS_Version != SOCKS5_Version) { Logs.Println( Logger.ERROR, "SOCKS 5 - Incorrect SOCKS Version of Command: " + SOCKS_Version, true); Refuse_Command((byte) 0xFF); throw new Exception("Incorrect SOCKS Version of Command: " + SOCKS_Version); } if ((Command < SC_CONNECT) || (Command > SC_UDP)) { Logs.Println( Logger.ERROR, "SOCKS 5 - GetClientCommand() - Unsupported Command : \"" + commName(Command) + "\"", true); Refuse_Command((byte) 0x07); throw new Exception("SOCKS 5 - Unsupported Command: \"" + Command + "\""); } if (ATYP == 0x04) { Logs.Println( Logger.ERROR, "SOCKS 5 - GetClientCommand() - Unsupported Address Type - IP v6", true); Refuse_Command((byte) 0x08); throw new Exception("Unsupported Address Type - IP v6"); } if ((ATYP >= 0x04) || (ATYP <= 0)) { Logs.Println( Logger.ERROR, "SOCKS 5 - GetClientCommand() - Unsupported Address Type: " + ATYP, true); Refuse_Command((byte) 0x08); throw new Exception("SOCKS 5 - Unsupported Address Type: " + ATYP); } if (!Calculate_Address()) { // Gets the IP Address Refuse_Command((byte) 0x04); // Host Not Exists... throw new Exception("SOCKS 5 - Unknown Host/IP address '" + RemoteHost.toString() + "'"); } Logs.Println( Logger.INFO, "SOCKS 5 - Accepted SOCKS5 Command: \"" + commName(Command) + "\"", true); }
// ----------------------- private void Authenticate() { GetUserInfo(); Logs.setUsername(Username); Parent.SendToClient(SRE_AuthSuccess); }
// ----------------------- public void Refuse_Authentication(String msg) { Logs.PrintlnProxy(Logger.ERROR, "SOCKS 5 - Refuse Authentication: '" + msg + "'", true); Parent.SendToClient(SRE_Refuse); Parent.Close(); }
public void onDisable() { Logs.Log("Plugin Stoniarki v" + v + " by NorthPL wylacza sie! :("); ConfigManager.save("stone"); }