/** * Creates an option container * * @param index The index of the option container (0..n). */ public OptionContainer(int index) throws OptionException { this.index = index; savedOptionMap = Collections.synchronizedSortedMap(new TreeMap<Option, Object>()); dependenciesResolvedOptionMap = Collections.synchronizedSortedMap(new TreeMap<Option, Object>()); commandLineOptionMap = Collections.synchronizedSortedMap(new TreeMap<Option, Object>()); optionFileOptionMap = Collections.synchronizedSortedMap(new TreeMap<Option, Object>()); }
/** @param iCapacity Maximum number of entries that cache can hold */ public DBSubsetCache(int iCapacity) { iUsed = 0; iTopIndex = 0; iCacheCapacity = iCapacity; LRUList = new String[iCacheCapacity]; for (int s = 0; s < iCacheCapacity; s++) LRUList[s] = null; oCache = Collections.synchronizedSortedMap(new TreeMap<String, DBCacheEntry>()); }
/** Creates a new instance of CMapFormat0 */ protected CMapFormat4(short language) { super((short) 4, language); segments = Collections.synchronizedSortedMap(new TreeMap<Segment, Object>()); char[] map = new char[1]; map[0] = (char) 0; addSegment((short) 0xffff, (short) 0xffff, map); }
/** * 計測対象オブジェクトを与えて初期化 * * @param measuredObject 計測対象オブジェクト */ public MetricsInfo(final T measuredObject) { if (null == measuredObject) { throw new NullPointerException(); } this.measuredObject = measuredObject; this.metrics = Collections.synchronizedSortedMap( new TreeMap<AbstractPlugin, Number>(new MetricTypeAndNamePluginComparator())); }
public BoteMailbox(EmailFolder folder, long uidValidity, long uid) { super(new MailboxPath("I2P-Bote", "bote", folder.getName()), uidValidity); this.folder = folder; this.messageMap = Collections.synchronizedSortedMap( new TreeMap<Email, BoteMessage>( new Comparator<Email>() { @Override public int compare(Email email1, Email email2) { // Try received dates first, this is set for all received. // emails. If not set, this is a sent email, use sent date. Date msg1date = email1.getReceivedDate(); if (msg1date == null) try { msg1date = email1.getSentDate(); } catch (MessagingException e) { } Date msg2date = email2.getReceivedDate(); if (msg2date == null) try { msg2date = email2.getSentDate(); } catch (MessagingException e) { } if (msg1date != null && msg2date != null) return msg1date.compareTo(msg2date); // Catch-all return email1.getMessageID().compareTo(email2.getMessageID()); } })); this.uid = uid; modSeq = System.currentTimeMillis(); modSeq <<= 32; startListening(); try { List<Email> emails = folder.getElements(); for (Email email : emails) messageMap.put(email, new BoteMessage(email, getFolderName())); updateMessages(); } catch (PasswordException e) { throw new RuntimeException(_t("Password required or invalid password provided"), e); } }
/** * Regionserver which provides transactional support for atomic transactions. This is achieved with * optimistic concurrency control (see http://www.seas.upenn.edu/~zives/cis650/papers/opt-cc.pdf). * We keep track read and write sets for each transaction, and hold off on processing the writes. To * decide to commit a transaction we check its read sets with all transactions that have committed * while it was running for overlaps. * * <p>Because transactions can span multiple regions, all regions must agree to commit a * transactions. The client side of this commit protocol is encoded in * org.apache.hadoop.hbase.client.transactional.TransactionManger * * <p>In the event of an failure of the client mid-commit, (after we voted yes), we will have to * consult the transaction log to determine the final decision of the transaction. This is not yet * implemented. */ public class TransactionalRegion extends HRegion { private static final String OLD_TRANSACTION_FLUSH = "hbase.transaction.flush"; private static final int DEFAULT_OLD_TRANSACTION_FLUSH = 100; // Do a flush if // we have this // many old // transactions.. static final Log LOG = LogFactory.getLog(TransactionalRegion.class); // Collection of active transactions (PENDING) keyed by id. protected Map<String, TransactionState> transactionsById = new HashMap<String, TransactionState>(); // Map of recent transactions that are COMMIT_PENDING or COMMITED keyed by // their sequence number private SortedMap<Integer, TransactionState> commitedTransactionsBySequenceNumber = Collections.synchronizedSortedMap(new TreeMap<Integer, TransactionState>()); // Collection of transactions that are COMMIT_PENDING private Set<TransactionState> commitPendingTransactions = Collections.synchronizedSet(new HashSet<TransactionState>()); private AtomicInteger nextSequenceId = new AtomicInteger(0); private Object commitCheckLock = new Object(); private THLog transactionLog; private final int oldTransactionFlushTrigger; private Leases transactionLeases; /** * @param basedir * @param log * @param fs * @param conf * @param regionInfo * @param flushListener */ public TransactionalRegion( final Path basedir, final HLog log, final FileSystem fs, final Configuration conf, final HRegionInfo regionInfo, final FlushRequester flushListener) { oldTransactionFlushTrigger = conf.getInt(OLD_TRANSACTION_FLUSH, DEFAULT_OLD_TRANSACTION_FLUSH); } /** * Open HRegion. Calls initialize and sets sequenceid to both regular WAL and trx WAL. * * @param reporter * @return Returns <code>this</code> * @throws IOException */ @Override protected HRegion openHRegion(final CancelableProgressable reporter) throws IOException { super.openHRegion(reporter); if (this.transactionLog != null) { this.transactionLog.setSequenceNumber(super.getLog().getSequenceNumber()); } return this; } /* protected long replayRecoveredEditsIfAny(final Path regiondir, final long minSeqId, final Progressable reporter) throws UnsupportedEncodingException, IOException { long maxSeqId = super.replayRecoveredEdits(regiondir, minSeqId, reporter); Path recoveredEdits = new Path(regiondir, HLogSplitter.RECOVERED_EDITS); doReconstructionLog(recoveredEdits, minSeqId, maxSeqId, reporter); return maxSeqId; }*/ protected void doReconstructionLog( final Path oldCoreLogFile, final long minSeqId, final long maxSeqId, final Progressable reporter) throws UnsupportedEncodingException, IOException { Path trxPath = new Path(oldCoreLogFile.getParent(), THLog.HREGION_OLD_THLOGFILE_NAME); // We can ignore doing anything with the Trx Log table, it is // not-transactional. if (super.getTableDesc().getNameAsString().equals(HBaseBackedTransactionLogger.TABLE_NAME)) { return; } THLogRecoveryManager recoveryManager = new THLogRecoveryManager(this); Map<Long, WALEdit> commitedTransactionsById = recoveryManager.getCommitsFromLog(trxPath, minSeqId, reporter); if (commitedTransactionsById != null && commitedTransactionsById.size() > 0) { LOG.debug("found " + commitedTransactionsById.size() + " COMMITED transactions to recover."); for (Entry<Long, WALEdit> entry : commitedTransactionsById.entrySet()) { LOG.debug( "Writing " + entry.getValue().size() + " updates for transaction " + entry.getKey()); WALEdit b = entry.getValue(); for (KeyValue kv : b.getKeyValues()) { // FIXME need to convert these into puts and deletes. Not sure this is // the write way. // Could probably combine multiple KV's into single put/delete. // Also timestamps? if (kv.getType() == KeyValue.Type.Put.getCode()) { Put put = new Put(); put.add(kv); super.put(put); } else if (kv.isDelete()) { Delete del = new Delete(kv.getRow()); if (kv.isDeleteFamily()) { del.deleteFamily(kv.getFamily()); } else if (kv.isDeleteType()) { del.deleteColumn(kv.getFamily(), kv.getQualifier()); } } } } LOG.debug("Flushing cache"); // We must trigger a cache flush, // otherwise we will would ignore the log on subsequent failure if (!super.flushcache()) { LOG.warn("Did not flush cache"); } } } /** * We need to make sure that we don't complete a cache flush between running transactions. If we * did, then we would not find all log messages needed to restore the transaction, as some of them * would be before the last "complete" flush id. */ @Override protected long getCompleteCacheFlushSequenceId(final long currentSequenceId) { LinkedList<TransactionState> transactionStates; synchronized (transactionsById) { transactionStates = new LinkedList<TransactionState>(transactionsById.values()); } long minPendingStartSequenceId = currentSequenceId; for (TransactionState transactionState : transactionStates) { minPendingStartSequenceId = Math.min(minPendingStartSequenceId, transactionState.getHLogStartSequenceId()); } return minPendingStartSequenceId; } /** * @param transactionId * @throws IOException */ public void beginTransaction(final long transactionId) throws IOException { checkClosing(); String key = String.valueOf(transactionId); if (transactionsById.get(key) != null) { TransactionState alias = getTransactionState(transactionId); if (alias != null) { alias.setStatus(Status.ABORTED); retireTransaction(alias); } LOG.error( "Existing trasaction with id [" + key + "] in region [" + super.getRegionInfo().getRegionNameAsString() + "]"); throw new IOException("Already exiting transaction id: " + key); } TransactionState state = new TransactionState( transactionId, super.getLog().getSequenceNumber(), super.getRegionInfo()); state.setStartSequenceNumber(nextSequenceId.get()); List<TransactionState> commitPendingCopy = new ArrayList<TransactionState>(commitPendingTransactions); for (TransactionState commitPending : commitPendingCopy) { state.addTransactionToCheck(commitPending); } synchronized (transactionsById) { transactionsById.put(key, state); } try { transactionLeases.createLease(getLeaseId(transactionId), new TransactionLeaseListener(key)); } catch (LeaseStillHeldException e) { LOG.error( "Lease still held for [" + key + "] in region [" + super.getRegionInfo().getRegionNameAsString() + "]"); throw new RuntimeException(e); } LOG.debug( "Begining transaction " + key + " in region " + super.getRegionInfo().getRegionNameAsString()); maybeTriggerOldTransactionFlush(); } private String getLeaseId(final long transactionId) { return super.getRegionInfo().getRegionNameAsString() + transactionId; } public Result get(final long transactionId, final Get get) throws IOException { Scan scan = new Scan(get); List<KeyValue> results = new ArrayList<KeyValue>(); InternalScanner scanner = null; try { scanner = getScanner(transactionId, scan); scanner.next(results); } finally { if (scanner != null) { scanner.close(); } } return new Result(results); } /** Get a transactional scanner. */ public RegionScanner getScanner(final long transactionId, final Scan scan) throws IOException { checkClosing(); TransactionState state = getTransactionState(transactionId); state.addScan(scan); List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(1); scanners.add(state.getScanner(scan)); return super.getScanner(wrapWithDeleteFilter(scan, state), scanners); } private Scan wrapWithDeleteFilter(final Scan scan, final TransactionState state) { FilterBase deleteFilter = new FilterBase() { private boolean rowFiltered = false; @Override public void reset() { rowFiltered = false; } @Override public boolean hasFilterRow() { return true; } @Override public void filterRow(final List<KeyValue> kvs) { state.applyDeletes(kvs, scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); rowFiltered = kvs.isEmpty(); } @Override public boolean filterRow() { return rowFiltered; } @Override public void write(final DataOutput out) throws IOException { // does nothing } @Override public void readFields(final DataInput in) throws IOException { // does nothing } }; if (scan.getFilter() == null) { scan.setFilter(deleteFilter); return scan; } FilterList wrappedFilter = new FilterList(Arrays.asList(deleteFilter, scan.getFilter())); scan.setFilter(wrappedFilter); return scan; } /** * Add a write to the transaction. Does not get applied until commit process. * * @param transactionId * @param put * @throws IOException */ public void put(final long transactionId, final Put put) throws IOException { checkClosing(); TransactionState state = getTransactionState(transactionId); state.addWrite(put); } /** * Add multiple writes to the transaction. Does not get applied until commit process. * * @param transactionId * @param puts * @throws IOException */ public void put(final long transactionId, final Put[] puts) throws IOException { checkClosing(); TransactionState state = getTransactionState(transactionId); for (Put put : puts) { state.addWrite(put); } } /** * Add a delete to the transaction. Does not get applied until commit process. * * @param transactionId * @param delete * @throws IOException */ public void delete(final long transactionId, final Delete delete) throws IOException { checkClosing(); TransactionState state = getTransactionState(transactionId); state.addDelete(delete); } /** * @param transactionId * @return TransactionRegionInterface commit code * @throws IOException */ public int commitRequest(final long transactionId) throws IOException { checkClosing(); synchronized (commitCheckLock) { TransactionState state = getTransactionState(transactionId); if (state == null) { return TransactionalRegionInterface.COMMIT_UNSUCESSFUL; } if (hasConflict(state)) { state.setStatus(Status.ABORTED); retireTransaction(state); return TransactionalRegionInterface.COMMIT_UNSUCESSFUL; } // No conflicts, we can commit. LOG.trace( "No conflicts for transaction " + transactionId + " found in region " + super.getRegionInfo().getRegionNameAsString() + ". Voting for commit"); // If there are writes we must keep record off the transaction if (state.hasWrite()) { // Order is important state.setStatus(Status.COMMIT_PENDING); commitPendingTransactions.add(state); state.setSequenceNumber(nextSequenceId.getAndIncrement()); commitedTransactionsBySequenceNumber.put(state.getSequenceNumber(), state); transactionLog.writeCommitResuestToLog(getRegionInfo(), state); return TransactionalRegionInterface.COMMIT_OK; } // Otherwise we were read-only and commitable, so we can forget it. state.setStatus(Status.COMMITED); retireTransaction(state); return TransactionalRegionInterface.COMMIT_OK_READ_ONLY; } } /** * @param transactionId * @return true if commit is successful * @throws IOException */ public boolean commitIfPossible(final long transactionId) throws IOException { int status = commitRequest(transactionId); if (status == TransactionalRegionInterface.COMMIT_OK) { commit(transactionId); return true; } else if (status == TransactionalRegionInterface.COMMIT_OK_READ_ONLY) { return true; } return false; } private boolean hasConflict(final TransactionState state) { // Check transactions that were committed while we were running for (int i = state.getStartSequenceNumber(); i < nextSequenceId.get(); i++) { TransactionState other = commitedTransactionsBySequenceNumber.get(i); if (other == null) { continue; } state.addTransactionToCheck(other); } return state.hasConflict(); } /** * Commit the transaction. * * @param transactionId * @throws IOException */ public void commit(final long transactionId) throws IOException { TransactionState state; try { state = getTransactionState(transactionId); } catch (UnknownTransactionException e) { LOG.fatal( "Asked to commit unknown transaction: " + transactionId + " in region " + super.getRegionInfo().getRegionNameAsString()); // TODO. Anything to handle here? throw e; } if (!state.getStatus().equals(Status.COMMIT_PENDING)) { LOG.fatal("Asked to commit a non pending transaction"); // TODO. Anything to handle here? throw new IOException("commit failure"); } commit(state); } /** * Abort the transaction. * * @param transactionId * @throws IOException */ public void abort(final long transactionId) throws IOException { // Not checking closing... TransactionState state; try { state = getTransactionState(transactionId); } catch (UnknownTransactionException e) { LOG.info( "Asked to abort unknown transaction [" + transactionId + "] in region [" + getRegionInfo().getRegionNameAsString() + "], ignoring"); return; } state.setStatus(Status.ABORTED); if (state.hasWrite()) { this.transactionLog.writeAbortToLog(super.getRegionInfo(), state.getTransactionId()); } // Following removes needed if we have voted if (state.getSequenceNumber() != null) { commitedTransactionsBySequenceNumber.remove(state.getSequenceNumber()); } commitPendingTransactions.remove(state); retireTransaction(state); } private void commit(final TransactionState state) throws IOException { LOG.debug( "Commiting transaction: " + state.toString() + " to " + super.getRegionInfo().getRegionNameAsString()); // Perform write operations timestamped to right now List<WriteAction> writeOrdering = state.getWriteOrdering(); for (WriteAction action : writeOrdering) { Put put = action.getPut(); if (null != put) { this.put(put, true); } Delete delete = action.getDelete(); if (null != delete) { delete(delete, null, true); } } // Now the transactional writes live in the core WAL, we can write a commit // to the log // so we don't have to recover it from the transactional WAL. if (state.hasWrite()) { this.transactionLog.writeCommitToLog(super.getRegionInfo(), state.getTransactionId()); } state.setStatus(Status.COMMITED); if (state.hasWrite() && !commitPendingTransactions.remove(state)) { LOG.fatal("Commiting a non-query transaction that is not in commitPendingTransactions"); // Something has gone really wrong. throw new IOException("commit failure"); } retireTransaction(state); } @Override public List<StoreFile> close(final boolean abort) throws IOException { prepareToClose(); if (!commitPendingTransactions.isEmpty()) { LOG.warn( "Closing transactional region [" + getRegionInfo().getRegionNameAsString() + "], but still have [" + commitPendingTransactions.size() + "] transactions that are pending commit."); // TODO resolve from the Global Trx Log. } return super.close(abort); } @Override protected void prepareToSplit() { prepareToClose(); } boolean closing = false; private static final int CLOSE_WAIT_ON_COMMIT_PENDING = 1000; /** Get ready to close. */ void prepareToClose() { if (closing) { return; } LOG.info("Preparing to close region " + getRegionInfo().getRegionNameAsString()); closing = true; while (!commitPendingTransactions.isEmpty()) { LOG.info( "Preparing to closing transactional region [" + getRegionInfo().getRegionNameAsString() + "], but still have [" + commitPendingTransactions.size() + "] transactions that are pending commit. Sleeping"); for (TransactionState s : commitPendingTransactions) { LOG.info("commit pending: " + s.toString()); } try { Thread.sleep(CLOSE_WAIT_ON_COMMIT_PENDING); } catch (InterruptedException e) { throw new RuntimeException(e); } } } private void checkClosing() throws IOException { if (closing) { throw new IOException("closing region, no more transaction allowed"); } } // Cancel leases, and removed from lease lookup. This transaction may still // live in commitedTransactionsBySequenceNumber and commitPendingTransactions private void retireTransaction(final TransactionState state) { String key = String.valueOf(state.getTransactionId()); try { transactionLeases.cancelLease(getLeaseId(state.getTransactionId())); } catch (LeaseException e) { // Ignore } synchronized (transactionsById) { transactionsById.remove(key); } } protected TransactionState getTransactionState(final long transactionId) throws UnknownTransactionException { String key = String.valueOf(transactionId); TransactionState state = null; state = transactionsById.get(key); if (state == null) { LOG.debug( "Unknown transaction: [" + key + "], region: [" + getRegionInfo().getRegionNameAsString() + "]"); throw new UnknownTransactionException( "transaction: [" + key + "], region: [" + getRegionInfo().getRegionNameAsString() + "]"); } try { transactionLeases.renewLease(getLeaseId(transactionId)); } catch (LeaseException e) { throw new RuntimeException(e); } return state; } private void maybeTriggerOldTransactionFlush() { if (commitedTransactionsBySequenceNumber.size() > oldTransactionFlushTrigger) { removeUnNeededCommitedTransactions(); } } /** Cleanup references to committed transactions that are no longer needed. */ synchronized void removeUnNeededCommitedTransactions() { Integer minStartSeqNumber = getMinStartSequenceNumber(); if (minStartSeqNumber == null) { minStartSeqNumber = Integer.MAX_VALUE; // Remove all } int numRemoved = 0; // Copy list to avoid conc update exception for (Entry<Integer, TransactionState> entry : new LinkedList<Entry<Integer, TransactionState>>( commitedTransactionsBySequenceNumber.entrySet())) { if (entry.getKey() >= minStartSeqNumber) { break; } numRemoved = numRemoved + (commitedTransactionsBySequenceNumber.remove(entry.getKey()) == null ? 0 : 1); numRemoved++; } if (LOG.isDebugEnabled()) { StringBuilder debugMessage = new StringBuilder(); if (numRemoved > 0) { debugMessage.append("Removed [").append(numRemoved).append("] commited transactions"); if (minStartSeqNumber == Integer.MAX_VALUE) { debugMessage.append(" with any sequence number."); } else { debugMessage.append(" with sequence lower than [").append(minStartSeqNumber).append("]."); } if (!commitedTransactionsBySequenceNumber.isEmpty()) { debugMessage .append(" Still have [") .append(commitedTransactionsBySequenceNumber.size()) .append("] left."); } else { debugMessage.append(" None left."); } LOG.debug(debugMessage.toString()); } else if (commitedTransactionsBySequenceNumber.size() > 0) { debugMessage .append("Could not remove any transactions, and still have ") .append(commitedTransactionsBySequenceNumber.size()) .append(" left"); LOG.debug(debugMessage.toString()); } } } private Integer getMinStartSequenceNumber() { List<TransactionState> transactionStates; synchronized (transactionsById) { transactionStates = new ArrayList<TransactionState>(transactionsById.values()); } Integer min = null; for (TransactionState transactionState : transactionStates) { if (min == null || transactionState.getStartSequenceNumber() < min) { min = transactionState.getStartSequenceNumber(); } } return min; } private void resolveTransactionFromLog(final TransactionState transactionState) throws IOException { LOG.error( "Global transaction log is not Implemented. (Optimisticly) assuming transaction commit!"); commit(transactionState); // throw new RuntimeException("Global transaction log is not Implemented"); } private static final int MAX_COMMIT_PENDING_WAITS = 10; private class TransactionLeaseListener implements LeaseListener { private final String transactionName; TransactionLeaseListener(final String n) { this.transactionName = n; } @Override public void leaseExpired() { LOG.info( "Transaction [" + this.transactionName + "] expired in region [" + getRegionInfo().getRegionNameAsString() + "]"); TransactionState s = null; synchronized (transactionsById) { s = transactionsById.remove(transactionName); } if (s == null) { LOG.warn("Unknown transaction expired " + this.transactionName); return; } switch (s.getStatus()) { case PENDING: s.setStatus(Status.ABORTED); // Other transactions may have a ref break; case COMMIT_PENDING: LOG.info("Transaction " + s.getTransactionId() + " expired in COMMIT_PENDING state"); try { if (s.getCommitPendingWaits() > MAX_COMMIT_PENDING_WAITS) { LOG.info("Checking transaction status in transaction log"); resolveTransactionFromLog(s); break; } LOG.info("renewing lease and hoping for commit"); s.incrementCommitPendingWaits(); String key = Long.toString(s.getTransactionId()); transactionsById.put(key, s); try { transactionLeases.createLease(getLeaseId(s.getTransactionId()), this); } catch (LeaseStillHeldException e) { transactionLeases.renewLease(getLeaseId(s.getTransactionId())); } } catch (IOException e) { throw new RuntimeException(e); } break; default: LOG.warn("Unexpected status on expired lease"); } } } public void setTransactionLog(final THLog trxHLog) { this.transactionLog = trxHLog; } public void setTransactionalLeases(final Leases transactionalLeases) { this.transactionLeases = transactionalLeases; } }
public static void main(String[] args) { try { final Options options = new Options(); { final Option d = new Option("d", "directory", true, "target directory"); d.setArgName("directory"); d.setArgs(1); d.setRequired(true); options.addOption(d); } { final Option ad = new Option("ad", "another-directory", true, "another target directory"); ad.setArgName("another-directory"); ad.setArgs(1); ad.setRequired(false); options.addOption(ad); } { final Option o = new Option("o", "output", true, "output file"); o.setArgName("file"); o.setArgs(1); o.setRequired(true); options.addOption(o); } { final Option s = new Option("s", "size", true, "size"); s.setArgName("size"); s.setArgs(1); s.setRequired(true); options.addOption(s); } { final Option t = new Option("t", "thread", true, "number of threads"); t.setArgName("thread"); t.setArgs(1); t.setRequired(false); options.addOption(t); } { final Option cross = new Option( "cross", "cross-project-only", true, "whether to detect cross project clones only"); cross.setArgName("on or off"); cross.setRequired(false); options.addOption(cross); } { final Option lowmem = new Option("lowmem", "low-memory-mode", true, "whether to run on the low memory mode"); lowmem.setArgName("on or off"); lowmem.setRequired(false); options.addOption(lowmem); } { final Option v = new Option("v", "verbose", true, "verbose output"); v.setArgName("on or off"); v.setRequired(false); options.addOption(v); } { final Option C = new Option("C", "control", true, "use of control dependency"); C.setArgName("on or off"); C.setArgs(1); C.setRequired(false); options.addOption(C); } { final Option D = new Option("D", "data", true, "use of data dependency"); D.setArgName("on or off"); D.setArgs(1); D.setRequired(false); options.addOption(D); } { final Option E = new Option("E", "execution", true, "use of execution dependency"); E.setArgName("on or off"); E.setArgs(1); E.setRequired(false); options.addOption(E); } { final Option M = new Option("M", "merging", true, "merging consecutive similar nodes"); M.setArgName("on or off"); M.setArgs(1); M.setRequired(false); options.addOption(M); } final CommandLineParser parser = new PosixParser(); final CommandLine cmd = parser.parse(options, args); final File target = new File(cmd.getOptionValue("d")); if (!target.exists()) { System.err.println("specified directory or file does not exist."); System.exit(0); } final String output = cmd.getOptionValue("o"); final int SIZE_THRESHOLD = Integer.parseInt(cmd.getOptionValue("s")); final int NUMBER_OF_THREADS = cmd.hasOption("t") ? Integer.parseInt(cmd.getOptionValue("t")) : 1; boolean useOfControl = !cmd.hasOption("C"); if (!useOfControl) { if (cmd.getOptionValue("C").equals("on")) { useOfControl = true; } else if (cmd.getOptionValue("C").equals("off")) { useOfControl = false; } else { System.err.println("option of \"-C\" must be \"on\" or \"off\"."); } } boolean useOfData = !cmd.hasOption("D"); if (!useOfData) { if (cmd.getOptionValue("D").equals("on")) { useOfData = true; } else if (cmd.getOptionValue("D").equals("off")) { useOfData = false; } else { System.err.println("option of \"-D\" must be \"on\" or \"off\"."); } } boolean useOfExecution = !cmd.hasOption("E"); if (!useOfExecution) { if (cmd.getOptionValue("E").equals("on")) { useOfExecution = true; } else if (cmd.getOptionValue("E").equals("off")) { useOfExecution = false; } else { System.err.println("option of \"-E\" must be \"on\" or \"off\"."); } } boolean useOfMerging = !cmd.hasOption("M"); if (!useOfMerging) { if (cmd.getOptionValue("M").equals("on")) { useOfMerging = true; } else if (cmd.getOptionValue("M").equals("off")) { useOfMerging = false; } else { System.err.println("option of \"-M\" must be \"on\" or \"off\"."); } } if (!useOfExecution && useOfMerging) { useOfMerging = false; } boolean crossProjectOnly = false; if (cmd.hasOption("cross")) { if (cmd.getOptionValue("cross").equals("on")) { crossProjectOnly = true; } else if (cmd.getOptionValue("cross").equals("off")) { crossProjectOnly = false; } else { System.err.println("option of \"-cross\" must be \"on\" or \"off\"."); } } boolean lowMemoryMode = false; if (cmd.hasOption("lowmem")) { if (cmd.getOptionValue("lowmem").equals("on")) { lowMemoryMode = true; } else if (cmd.getOptionValue("lowmem").equals("off")) { lowMemoryMode = false; } else { System.err.println("option of \"-lowmem\" must be \"on\" or \"off\"."); } } // default verbose level is "off" if (cmd.hasOption("v")) { if (cmd.getOptionValue("v").equals("on")) { Message.setVerbose(true); } else if (cmd.getOptionValue("v").equals("off")) { Message.setVerbose(false); } else { System.err.println("option of \"-v\" must be \"on\" or \"off\"."); } } File anotherTarget = null; if (cmd.hasOption("ad")) { anotherTarget = new File(cmd.getOptionValue("ad")); if (!anotherTarget.exists()) { System.err.println("specified directory or file does not exist."); System.exit(0); } } if (crossProjectOnly && anotherTarget == null) { System.err.println( "detecting cross project only is ON, but no second directory or file has been specified"); System.exit(0); } final long time1 = System.nanoTime(); System.out.print("generating PDGs ... "); Message.log(""); final PDG[] pdgArray; { final List<File> files = getFiles(target); if (anotherTarget != null) { files.addAll(getFiles(anotherTarget)); } int count = 0; final int numOfFiles = files.size(); final List<MethodInfo> methods = new ArrayList<MethodInfo>(); for (final File file : files) { Message.log( "\t[" + (++count) + "/" + numOfFiles + "] building an AST for " + file.getAbsolutePath()); final CompilationUnit unit = TinyPDGASTVisitor.createAST(file); final ASTVisitor visitor; if (lowMemoryMode) { visitor = new OffsetBasedTinyPDGASTVisitor(file.getAbsolutePath(), unit, methods); } else { visitor = new TinyPDGASTVisitor(file.getAbsolutePath(), unit, methods); } unit.accept(visitor); } long memoryElapsed = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); System.out.println("MEMORY: " + (memoryElapsed / 1024)); final SortedSet<PDG> pdgs = Collections.synchronizedSortedSet(new TreeSet<PDG>()); final CFGNodeFactory cfgNodeFactory = new CFGNodeFactory(); final PDGNodeFactory pdgNodeFactory = new PDGNodeFactory(); final Thread[] pdgGenerationThreads = new Thread[NUMBER_OF_THREADS]; for (int i = 0; i < pdgGenerationThreads.length; i++) { pdgGenerationThreads[i] = new Thread( new PDGGenerationThread( methods, pdgs, cfgNodeFactory, pdgNodeFactory, useOfControl, useOfData, useOfExecution, useOfMerging, SIZE_THRESHOLD)); pdgGenerationThreads[i].start(); } for (final Thread thread : pdgGenerationThreads) { try { thread.join(); } catch (InterruptedException e) { e.printStackTrace(); } } pdgArray = pdgs.toArray(new PDG[0]); } System.out.print("done: "); final long time2 = System.nanoTime(); printTime(time2 - time1); System.out.print("calculating hash values ... "); Message.log(""); final SortedMap<PDG, SortedMap<PDGNode<?>, Integer>> mappingPDGToPDGNodes = Collections.synchronizedSortedMap(new TreeMap<PDG, SortedMap<PDGNode<?>, Integer>>()); final SortedMap<PDG, SortedMap<PDGEdge, Integer>> mappingPDGToPDGEdges = Collections.synchronizedSortedMap(new TreeMap<PDG, SortedMap<PDGEdge, Integer>>()); { final Thread[] hashCalculationThreads = new Thread[NUMBER_OF_THREADS]; for (int i = 0; i < hashCalculationThreads.length; i++) { hashCalculationThreads[i] = new Thread( new HashCalculationThread(pdgArray, mappingPDGToPDGNodes, mappingPDGToPDGEdges)); hashCalculationThreads[i].start(); } for (final Thread thread : hashCalculationThreads) { try { thread.join(); } catch (InterruptedException e) { e.printStackTrace(); } } } System.out.print("done: "); final long time3 = System.nanoTime(); printTime(time3 - time2); System.out.print("detecting clone pairs ... "); Message.log(""); final SortedSet<ClonePairInfo> clonepairs = Collections.synchronizedSortedSet(new TreeSet<ClonePairInfo>()); { int numIgnored = 0; final List<PDGPairInfo> pdgpairs = new ArrayList<PDGPairInfo>(); Message.log("\tmaking PDG pairs ... "); for (int i = 0; i < pdgArray.length; i++) { for (int j = i + 1; j < pdgArray.length; j++) { final PDG pdg1 = pdgArray[i]; final PDG pdg2 = pdgArray[j]; if (!crossProjectOnly || isCrossProject(pdg1, pdg2, target, anotherTarget)) { pdgpairs.add(new PDGPairInfo(pdgArray[i], pdgArray[j])); } else { // Message.log("\t\tignore the PDG pair \"" + // pdg1.unit.name + " in " // + pdg1.unit.path + "\" and \"" // + pdg2.unit.name + " in " + pdg2.unit.path // + "\""); numIgnored++; } } } Message.log("\tdone: the number of ignored PDG pairs is " + numIgnored); final PDGPairInfo[] pdgpairArray = pdgpairs.toArray(new PDGPairInfo[0]); final Thread[] slicingThreads = new Thread[NUMBER_OF_THREADS]; for (int i = 0; i < slicingThreads.length; i++) { slicingThreads[i] = new Thread( new SlicingThread( pdgpairArray, pdgArray, mappingPDGToPDGNodes, mappingPDGToPDGEdges, clonepairs, SIZE_THRESHOLD, crossProjectOnly)); slicingThreads[i].start(); } for (final Thread thread : slicingThreads) { try { thread.join(); } catch (InterruptedException e) { e.printStackTrace(); } } } System.out.print("done: "); final long time4 = System.nanoTime(); printTime(time4 - time3); System.out.print("writing to a file ... "); final Writer writer = new BellonWriter(output, clonepairs); writer.write(); System.out.print("done: "); final long time5 = System.nanoTime(); printTime(time5 - time4); System.out.print("total elapsed time: "); printTime(time5 - time1); System.out.print("number of comparisons: "); printNumberOfComparison(Slicing.getNumberOfComparison()); } catch (Exception e) { System.err.println(e.getMessage()); System.exit(0); } }
/** Created with IntelliJ IDEA. User: ex3ndr Date: 03.11.13 Time: 8:51 */ public class Scheduller { private final String TAG; // = "MTProtoScheduller"; // Share identity values across all connections to avoid collisions private static AtomicInteger messagesIds = new AtomicInteger(1); private static HashMap<Long, Long> idGenerationTime = new HashMap<Long, Long>(); private static final int SCHEDULLER_TIMEOUT = 15 * 1000; // 15 sec private static final long CONFIRM_TIMEOUT = 60 * 1000; // 60 sec private static final int MAX_WORKLOAD_SIZE = 1024; private static final int BIG_MESSAGE_SIZE = 1024; private static final long RETRY_TIMEOUT = 15 * 1000; private static final int MAX_ACK_COUNT = 16; private SortedMap<Integer, SchedullerPackage> messages = Collections.synchronizedSortedMap(new TreeMap<Integer, SchedullerPackage>()); private HashSet<Long> currentMessageGeneration = new HashSet<Long>(); private HashSet<Long> confirmedMessages = new HashSet<Long>(); private long firstConfirmTime = 0; private long lastMessageId = 0; private long lastDependId = 0; private int seqNo = 0; private CallWrapper wrapper; public Scheduller(MTProto mtProto, CallWrapper wrapper) { TAG = "MTProto#" + mtProto.getInstanceIndex() + "#Scheduller"; this.wrapper = wrapper; } private synchronized long generateMessageId() { long messageId = TimeOverlord.getInstance().createWeakMessageId(); if (messageId <= lastMessageId) { messageId = lastMessageId = lastMessageId + 4; } while (idGenerationTime.containsKey(messageId)) { messageId += 4; } lastMessageId = messageId; idGenerationTime.put(messageId, getCurrentTime()); currentMessageGeneration.add(messageId); return messageId; } private synchronized int generateSeqNoWeak() { return seqNo * 2; } private synchronized int generateSeqNo() { int res = seqNo * 2 + 1; seqNo++; return res; } private synchronized void generateParams(SchedullerPackage schedullerPackage) { schedullerPackage.messageId = generateMessageId(); schedullerPackage.seqNo = generateSeqNo(); schedullerPackage.idGenerationTime = getCurrentTime(); schedullerPackage.relatedMessageIds.add(schedullerPackage.messageId); schedullerPackage.generatedMessageIds.add(schedullerPackage.messageId); } private long getCurrentTime() { return System.nanoTime() / 1000000; } public long getMessageIdGenerationTime(long msgId) { if (idGenerationTime.containsKey(msgId)) { return idGenerationTime.get(msgId); } return 0; } public int postMessageDelayed( TLObject object, boolean isRpc, long timeout, int delay, int contextId, boolean highPrioroty) { int id = messagesIds.incrementAndGet(); SchedullerPackage schedullerPackage = new SchedullerPackage(id); schedullerPackage.object = object; schedullerPackage.addTime = getCurrentTime(); schedullerPackage.scheduleTime = schedullerPackage.addTime + delay; schedullerPackage.expiresTime = schedullerPackage.scheduleTime + timeout; schedullerPackage.ttlTime = schedullerPackage.scheduleTime + timeout * 2; schedullerPackage.isRpc = isRpc; schedullerPackage.queuedToChannel = contextId; schedullerPackage.priority = highPrioroty ? PRIORITY_HIGH : PRIORITY_NORMAL; schedullerPackage.isDepend = highPrioroty; schedullerPackage.supportTag = object.toString(); messages.put(id, schedullerPackage); return id; } public int postMessage(TLObject object, boolean isApi, long timeout) { return postMessageDelayed(object, isApi, timeout, 0, -1, false); } public int postMessage(TLObject object, boolean isApi, long timeout, boolean highPrioroty) { return postMessageDelayed(object, isApi, timeout, 0, -1, highPrioroty); } public synchronized void prepareScheduller(PrepareSchedule prepareSchedule, int[] connectionIds) { long time = getCurrentTime(); // Clear packages for unknown channels outer: for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { if (schedullerPackage.queuedToChannel != -1) { for (int id : connectionIds) { if (schedullerPackage.queuedToChannel == id) { continue outer; } } forgetMessage(schedullerPackage.id); } } if (connectionIds.length == 0) { prepareSchedule.setDelay(SCHEDULLER_TIMEOUT); prepareSchedule.setAllowedContexts(connectionIds); prepareSchedule.setDoWait(true); return; } long minDelay = SCHEDULLER_TIMEOUT; boolean allConnections = false; boolean doWait = true; HashSet<Integer> supportedConnections = new HashSet<Integer>(); for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { boolean isPendingPackage = false; long packageTime = 0; if (schedullerPackage.state == STATE_QUEUED) { isPendingPackage = true; if (schedullerPackage.scheduleTime <= time) { packageTime = 0; } else { packageTime = Math.max(schedullerPackage.scheduleTime - time, 0); } } else if (schedullerPackage.state == STATE_SENT) { if (getCurrentTime() <= schedullerPackage.expiresTime) { if (schedullerPackage.serialized == null || schedullerPackage.serialized.length < BIG_MESSAGE_SIZE) { if (time - schedullerPackage.lastAttemptTime >= RETRY_TIMEOUT) { isPendingPackage = true; packageTime = 0; } } } } if (isPendingPackage) { if (schedullerPackage.queuedToChannel == -1) { allConnections = true; } else { supportedConnections.add(schedullerPackage.queuedToChannel); } if (packageTime == 0) { minDelay = 0; doWait = false; } else { minDelay = Math.min(packageTime, minDelay); } } } prepareSchedule.setDoWait(doWait); prepareSchedule.setDelay(minDelay); if (allConnections) { prepareSchedule.setAllowedContexts(connectionIds); } else { Integer[] allowedBoxed = supportedConnections.toArray(new Integer[0]); int[] allowed = new int[allowedBoxed.length]; for (int i = 0; i < allowed.length; i++) { allowed[i] = allowedBoxed[i]; } prepareSchedule.setAllowedContexts(allowed); } } public void registerFastConfirm(long msgId, int fastConfirm) { for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { boolean contains = false; for (Long relatedMsgId : schedullerPackage.relatedMessageIds) { if (relatedMsgId == msgId) { contains = true; break; } } if (contains) { schedullerPackage.relatedFastConfirm.add(fastConfirm); } } } public int mapSchedullerId(long msgId) { for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { if (schedullerPackage.generatedMessageIds.contains(msgId)) { return schedullerPackage.id; } } return 0; } public void resetMessageId() { lastMessageId = 0; lastDependId = 0; } public void resetSession() { lastMessageId = 0; lastDependId = 0; seqNo = 0; currentMessageGeneration.clear(); for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { schedullerPackage.idGenerationTime = 0; schedullerPackage.dependMessageId = 0; schedullerPackage.messageId = 0; schedullerPackage.seqNo = 0; } } public boolean isMessageFromCurrentGeneration(long msgId) { return currentMessageGeneration.contains(msgId); } public void resendAsNewMessage(long msgId) { resendAsNewMessageDelayed(msgId, 0); } public void resendAsNewMessageDelayed(long msgId, int delay) { for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { if (schedullerPackage.relatedMessageIds.contains(msgId)) { schedullerPackage.idGenerationTime = 0; schedullerPackage.dependMessageId = 0; schedullerPackage.messageId = 0; schedullerPackage.seqNo = 0; schedullerPackage.state = STATE_QUEUED; schedullerPackage.scheduleTime = getCurrentTime() + delay; Logger.d(TAG, "Resending as new #" + schedullerPackage.id); } } } public void resendMessage(long msgId) { for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { if (schedullerPackage.relatedMessageIds.contains(msgId)) { // schedullerPackage.relatedMessageIds.clear(); schedullerPackage.state = STATE_QUEUED; schedullerPackage.lastAttemptTime = 0; } } } public int[] mapFastConfirm(int fastConfirm) { ArrayList<Integer> res = new ArrayList<Integer>(); for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { if (schedullerPackage.state == STATE_SENT) { if (schedullerPackage.relatedFastConfirm.contains(fastConfirm)) { res.add(schedullerPackage.id); } } } int[] res2 = new int[res.size()]; for (int i = 0; i < res2.length; i++) { res2[i] = res.get(i); } return res2; } public void onMessageFastConfirmed(int fastConfirm) { // for (SchedullerPackage schedullerPackage : messages.values().toArray(new // SchedullerPackage[0])) { // if (schedullerPackage.state == STATE_SENT) { // if (schedullerPackage.relatedFastConfirm.contains(fastConfirm)) { // schedullerPackage.state = STATE_CONFIRMED; // } // } // } } public void onMessageConfirmed(long msgId) { for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { if (schedullerPackage.state == STATE_SENT) { if (schedullerPackage.relatedMessageIds.contains(msgId)) { schedullerPackage.state = STATE_CONFIRMED; } } } } public void confirmMessage(long msgId) { synchronized (confirmedMessages) { confirmedMessages.add(msgId); if (firstConfirmTime == 0) { firstConfirmTime = getCurrentTime(); } } } public void unableToSendMessage(long messageId) { for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { if (schedullerPackage.state == STATE_SENT) { boolean contains = false; for (Long relatedMsgId : schedullerPackage.relatedMessageIds) { if (relatedMsgId == messageId) { contains = true; break; } } if (contains) { schedullerPackage.state = STATE_QUEUED; } } } } public synchronized void forgetMessageByMsgId(long msgId) { int scId = mapSchedullerId(msgId); if (scId > 0) { forgetMessage(scId); } } public synchronized void forgetMessage(int id) { Logger.d(TAG, "Forgetting message: #" + id); messages.remove(id); } private synchronized ArrayList<SchedullerPackage> actualPackages(int contextId) { ArrayList<SchedullerPackage> foundedPackages = new ArrayList<SchedullerPackage>(); long time = getCurrentTime(); for (SchedullerPackage schedullerPackage : messages.values().toArray(new SchedullerPackage[0])) { if (schedullerPackage.queuedToChannel != -1 && contextId != schedullerPackage.queuedToChannel) { continue; } boolean isPendingPackage = false; if (schedullerPackage.ttlTime <= getCurrentTime()) { forgetMessage(schedullerPackage.id); continue; } if (schedullerPackage.state == STATE_QUEUED) { if (schedullerPackage.scheduleTime <= time) { isPendingPackage = true; } } else if (schedullerPackage.state == STATE_SENT) { if (getCurrentTime() <= schedullerPackage.expiresTime) { if (schedullerPackage.serialized == null || schedullerPackage.serialized.length < BIG_MESSAGE_SIZE) { if (getCurrentTime() - schedullerPackage.lastAttemptTime >= RETRY_TIMEOUT) { isPendingPackage = true; } } } } if (isPendingPackage) { if (schedullerPackage.serialized == null) { try { if (schedullerPackage.isRpc) { schedullerPackage.serialized = wrapper.wrapObject((TLMethod) schedullerPackage.object).serialize(); } else { schedullerPackage.serialized = schedullerPackage.object.serialize(); } } catch (IOException e) { Logger.e(TAG, e); forgetMessage(schedullerPackage.id); continue; } } foundedPackages.add(schedullerPackage); } } return foundedPackages; } public synchronized PreparedPackage doSchedule(int contextId, boolean isInited) { ArrayList<SchedullerPackage> foundedPackages = actualPackages(contextId); synchronized (confirmedMessages) { if (foundedPackages.size() == 0 && (confirmedMessages.size() <= MAX_ACK_COUNT || (System.nanoTime() - firstConfirmTime) < CONFIRM_TIMEOUT)) { return null; } } boolean useHighPriority = false; for (SchedullerPackage p : foundedPackages) { if (p.priority == PRIORITY_HIGH) { useHighPriority = true; break; } } ArrayList<SchedullerPackage> packages = new ArrayList<SchedullerPackage>(); if (useHighPriority) { Logger.d("Scheduller", "Using high priority scheduling"); int totalSize = 0; for (SchedullerPackage p : foundedPackages) { if (p.priority == PRIORITY_HIGH) { packages.add(p); totalSize += p.serialized.length; if (totalSize > MAX_WORKLOAD_SIZE) { break; } } } } else { int totalSize = 0; for (SchedullerPackage p : foundedPackages) { packages.add(p); Logger.d( "Scheduller", "Prepare package: " + p.supportTag + " of size " + p.serialized.length); totalSize += p.serialized.length; Logger.d("Scheduller", "Total size: " + totalSize); if (totalSize > MAX_WORKLOAD_SIZE) { break; } } } Logger.d(TAG, "Iteration: count: " + packages.size() + ", confirm:" + confirmedMessages.size()); Logger.d(TAG, "Building package"); if (foundedPackages.size() == 0 && confirmedMessages.size() != 0) { Long[] msgIds; synchronized (confirmedMessages) { msgIds = confirmedMessages.toArray(new Long[confirmedMessages.size()]); confirmedMessages.clear(); } MTMsgsAck ack = new MTMsgsAck(msgIds); Logger.d(TAG, "Single msg_ack"); try { return new PreparedPackage( generateSeqNoWeak(), generateMessageId(), ack.serialize(), useHighPriority); } catch (IOException e) { Logger.e(TAG, e); return null; } } else if (foundedPackages.size() == 1 && confirmedMessages.size() == 0) { SchedullerPackage schedullerPackage = foundedPackages.get(0); schedullerPackage.state = STATE_SENT; if (schedullerPackage.idGenerationTime == 0) { generateParams(schedullerPackage); } Logger.d( TAG, "Single package: #" + schedullerPackage.id + " " + schedullerPackage.supportTag + " (" + schedullerPackage.messageId + ", " + schedullerPackage.seqNo + ")"); schedullerPackage.writtenToChannel = contextId; schedullerPackage.lastAttemptTime = getCurrentTime(); return new PreparedPackage( schedullerPackage.seqNo, schedullerPackage.messageId, schedullerPackage.serialized, useHighPriority); } else { MTMessagesContainer container = new MTMessagesContainer(); if ((confirmedMessages.size() > 0 && !useHighPriority) || (!isInited)) { try { Long[] msgIds; synchronized (confirmedMessages) { msgIds = confirmedMessages.toArray(new Long[0]); confirmedMessages.clear(); } MTMsgsAck ack = new MTMsgsAck(msgIds); Logger.d(TAG, "Adding msg_ack: " + msgIds.length); container .getMessages() .add(new MTMessage(generateMessageId(), generateSeqNoWeak(), ack.serialize())); } catch (IOException e) { Logger.e(TAG, e); } } for (SchedullerPackage schedullerPackage : packages) { schedullerPackage.state = STATE_SENT; if (schedullerPackage.idGenerationTime == 0) { generateParams(schedullerPackage); } if (schedullerPackage.isDepend) { if (schedullerPackage.dependMessageId == 0) { if (lastDependId > 0) { schedullerPackage.dependMessageId = lastDependId; } else { schedullerPackage.dependMessageId = -1; } } lastDependId = schedullerPackage.messageId; } schedullerPackage.writtenToChannel = contextId; schedullerPackage.lastAttemptTime = getCurrentTime(); if (schedullerPackage.isDepend && schedullerPackage.dependMessageId > 0) { Logger.d( TAG, "Adding package: #" + schedullerPackage.id + " " + schedullerPackage.supportTag + " (" + schedullerPackage.messageId + " on " + schedullerPackage.dependMessageId + ", " + schedullerPackage.seqNo + ")"); MTInvokeAfter invokeAfter = new MTInvokeAfter(schedullerPackage.dependMessageId, schedullerPackage.serialized); try { container .getMessages() .add( new MTMessage( schedullerPackage.messageId, schedullerPackage.seqNo, invokeAfter.serialize())); } catch (IOException e) { Logger.e(TAG, e); // Never happens } } else { Logger.d( TAG, "Adding package: #" + schedullerPackage.id + " " + schedullerPackage.supportTag + " (" + schedullerPackage.messageId + ", " + schedullerPackage.seqNo + ")"); container .getMessages() .add( new MTMessage( schedullerPackage.messageId, schedullerPackage.seqNo, schedullerPackage.serialized)); } } long containerMessageId = generateMessageId(); int containerSeq = generateSeqNoWeak(); for (SchedullerPackage schedullerPackage : packages) { schedullerPackage.relatedMessageIds.add(containerMessageId); } Logger.d(TAG, "Sending Package (" + containerMessageId + ", " + containerSeq + ")"); try { return new PreparedPackage( containerSeq, containerMessageId, container.serialize(), useHighPriority); } catch (IOException e) { // Might not happens Logger.e(TAG, e); return null; } } } public synchronized void onConnectionDies(int connectionId) { // Logger.d(TAG, "Connection dies " + connectionId); // for (SchedullerPackage schedullerPackage : messages.values().toArray(new // SchedullerPackage[0])) { // if (schedullerPackage.writtenToChannel != connectionId) { // continue; // } // // if (schedullerPackage.queuedToChannel != -1) { // Logger.d(TAG, "Removing: #" + schedullerPackage.id + " " + // schedullerPackage.supportTag); // forgetMessage(schedullerPackage.id); // } else { // if (schedullerPackage.isRpc) { // if (schedullerPackage.state == STATE_CONFIRMED || schedullerPackage.state // == STATE_QUEUED) { // if (schedullerPackage.serialized == null || // schedullerPackage.serialized.length < BIG_MESSAGE_SIZE) { // Logger.d(TAG, "Re-schedule: #" + schedullerPackage.id + " " + // schedullerPackage.supportTag); // schedullerPackage.state = STATE_QUEUED; // schedullerPackage.lastAttemptTime = 0; // } // } // } else { // if (schedullerPackage.state == STATE_SENT) { // Logger.d(TAG, "Re-schedule: #" + schedullerPackage.id + " " + // schedullerPackage.supportTag); // schedullerPackage.state = STATE_QUEUED; // schedullerPackage.lastAttemptTime = 0; // } // } // // } // } } private static final int PRIORITY_HIGH = 1; private static final int PRIORITY_NORMAL = 0; private static final int STATE_QUEUED = 0; private static final int STATE_SENT = 1; private static final int STATE_CONFIRMED = 2; private class SchedullerPackage { public SchedullerPackage(int id) { this.id = id; } public String supportTag; public int id; public TLObject object; public byte[] serialized; public long addTime; public long scheduleTime; public long expiresTime; public long ttlTime; public long lastAttemptTime; public int writtenToChannel = -1; public int queuedToChannel = -1; public int state = STATE_QUEUED; public int priority = PRIORITY_NORMAL; public boolean isDepend; public long idGenerationTime; public long dependMessageId; public long messageId; public int seqNo; public HashSet<Integer> relatedFastConfirm = new HashSet<Integer>(); public HashSet<Long> relatedMessageIds = new HashSet<Long>(); public HashSet<Long> generatedMessageIds = new HashSet<Long>(); public boolean isRpc; } }
/** Key Reporter is used to help see any un-used variables quickly. */ private static final class KeyReporter { /** * Get key report. * * @param source Source to get report on. * @return Key report. */ KeyReport getKeyReport(final String source) { KeyReport rtn = keyReports.get(source); if (rtn == null) { rtn = new KeyReport(); keyReports.put(source, rtn); } return rtn; } /** * Report used key. * * @param source Source where key is used. * @param key Key. */ void reportUsedKey(final String source, final String key) { KeyReport keyReport = getKeyReport(source); if (!keyReport.getUsedKeys().contains(key)) { keyReport.getUsedKeys().add(key); } } /** * Report defined key. * * @param source Source where key is defined. * @param key Key. */ void reportDefinedKey(final String source, final String key) { KeyReport keyReport = getKeyReport(source); if (!keyReport.getDefinedKeys().contains(key)) { keyReport.getDefinedKeys().add(key); } } /** Maximum numerics. */ static final int MAXIMUM_NUMERICS = 100; /** * Report unused keys. * * @param file File. */ void reportUnusedKeys(final String file) { StringWriter sw = new StringWriter(); for (String source : keyReports.keySet()) { StringWriter hdr = new StringWriter(); hdr.append("*********" + source + ":"); hdr.append(Stringop.getEol()); KeyReport keyReport = getKeyReport(source); for (String s : keyReport.getUsedKeys()) { keyReport.getDefinedKeys().remove(s); } for (int i = 0; i < MAXIMUM_NUMERICS; i++) { keyReport.getDefinedKeys().remove("" + i); } keyReport.getDefinedKeys().remove(ReservedWords.FILE); keyReport.getDefinedKeys().remove(ReservedWords.DEPTH_CHARGE); keyReport.getDefinedKeys().remove(ReservedWords.SCRIPT); keyReport.getDefinedKeys().remove(ReservedWords.SPLITS); keyReport.getDefinedKeys().remove(ReservedWords.TARGET); keyReport.getDefinedKeys().remove(ReservedWords.TEMPLATE); keyReport.getDefinedKeys().remove(ReservedWords.TOKENS); boolean firstTime = true; for (String s : keyReport.getDefinedKeys()) { if (firstTime) { sw.append(hdr.toString()); firstTime = false; } sw.append(s); sw.append(Stringop.getEol()); } } Fileop.saveStringToFile(sw.toString(), new File(file)); } /** Key reports map. */ private final Map<String, KeyReport> keyReports = Collections.synchronizedSortedMap(new TreeMap<String, KeyReport>()); /** Key report. */ public static class KeyReport { /** Used keys. */ private final List<String> usedKeys = new ArrayList<String>(); /** Defined keys. */ private final Set<String> dfndKeys = Collections.synchronizedSortedSet(new TreeSet<String>()); /** @return Defined keys. */ public Set<String> getDefinedKeys() { return dfndKeys; } /** * Get used keys. * * @return Used keys. */ public List<String> getUsedKeys() { return usedKeys; } } }
/** * @author czarek @TODO ta klasa powinna byc singletonem @TODO Wybrac waska grupe akceptowanych * szyfrowan w polaczeniach */ public class Server { SortedSet<PeerLoginInfo> loginInfo = Collections.synchronizedSortedSet(new TreeSet<PeerLoginInfo>()); SortedMap<String, PeerInfo> peersInfo = Collections.synchronizedSortedMap(new TreeMap<String, PeerInfo>()); private FileInputStream is; private KeyStore keystore; private KeyManagerFactory kmf; private SSLContext sc; private SSLServerSocketFactory ssf; private SSLServerSocket ss; PrivateKey caPrivKey; int listeningPort; public enum STATE { CONNECTING, IDLE, CONNECTED, LOGGEDIN, DONE, LOGGING } public Server(String keystoreLoc, char[] kestorePass) { try { is = new FileInputStream(keystoreLoc); this.keystore = KeyStore.getInstance(KeyStore.getDefaultType()); keystore.load(is, "123456".toCharArray()); kmf = KeyManagerFactory.getInstance("SunX509", "SunJSSE"); kmf.init(keystore, ("123456").toCharArray()); sc = SSLContext.getInstance("SSL"); sc.init(kmf.getKeyManagers(), null, null); ssf = sc.getServerSocketFactory(); readServerInfo("./res/server/ServerInfo.dat"); ss = (SSLServerSocket) ssf.createServerSocket(listeningPort); caPrivKey = (PrivateKey) keystore.getKey("serverTrustedCert", "123456".toCharArray()); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); System.exit(1); } } public static void main(String[] args) throws Exception { Server server = new Server("./res/server/key/serverKeys", "123456".toCharArray()); // System.out.println(server.sc.getProvider()); // System.out.println(server.ss.getEnabledCipherSuites()); for (int i = 0; i < server.ss.getEnabledCipherSuites().length; i++) { System.out.println(server.ss.getEnabledCipherSuites()[i]); } /** Zrob z tego w¹tek */ while (true) { // new Server(ss.accept()).start(); try { System.out.println("serwer czeka"); new S2PConnection(server.ss.accept(), server); } catch (Exception e1) { e1.printStackTrace(); } } } /** Metoda zaczytujaca z pliku loginy i skroty hasel @TODO dodac sol do hasel */ private void readServerInfo(String serverInfo) { try { System.out.println("readLoginInfo"); FileReader fr = new FileReader(serverInfo); BufferedReader br = new BufferedReader(fr); StringTokenizer st; // = new StringTokenizer(); String line; boolean timeTobreak = false; while (!timeTobreak) { line = br.readLine(); if (!(line.startsWith("#"))) { this.listeningPort = Integer.valueOf(line); timeTobreak = true; } } while ((line = br.readLine()) != null) { if (!(line.startsWith("#"))) { st = new StringTokenizer(line); if ((st.countTokens()) != 2) throw new Exception("Ivalid peerLoginInfo.dat file"); loginInfo.add(new PeerLoginInfo(st.nextToken(), st.nextToken(), true)); } } System.out.println("[Server.readLoginInfo()] " + loginInfo); } catch (Exception e) { e.printStackTrace(); System.exit(1); } } /** * Sprawdza, czy dane od peera zgadzaja sie z tymi z pliku * * @param PeerLoginInfo pli * @return wynik weryfikacji */ boolean verifyPeer(PeerLoginInfo pli) { PeerLoginInfo pliAtServer = loginInfo.tailSet(pli).first(); if (pliAtServer != null && (pliAtServer.getPasswdHash()).equals(pli.getPasswdHash())) return true; else return false; } /** * Generowanie certyfikatu x509 * * @param certInfo informacje ktore maja znalezc sie w certyfikacie * @return * @throws InvalidKeyException * @throws NoSuchProviderException * @throws SignatureException * @throws CertificateEncodingException * @throws IllegalStateException * @throws NoSuchAlgorithmException */ public common.Pair<X509Certificate, KeyPair> generateV3Certificate(X500Principal certInfo) throws InvalidKeyException, NoSuchProviderException, SignatureException, CertificateEncodingException, IllegalStateException, NoSuchAlgorithmException, KeyStoreException { KeyPairGenerator keyGen; keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair keyPair = keyGen.generateKeyPair(); Security.addProvider(new org.bouncycastle.jce.provider.BouncyCastleProvider()); X509V3CertificateGenerator certGen = new X509V3CertificateGenerator(); certGen.setSerialNumber(BigInteger.valueOf(System.currentTimeMillis())); certGen.setIssuerDN( ((X509Certificate) this.keystore.getCertificate("servertrustedcert")) .getSubjectX500Principal()); certGen.setNotBefore(new Date(System.currentTimeMillis() - 7 * 24 * 3600 * 1000)); certGen.setNotAfter(new Date(System.currentTimeMillis() + 7 * 24 * 3600 * 1000)); certGen.setSubjectDN(certInfo); certGen.setPublicKey(keyPair.getPublic()); certGen.setSignatureAlgorithm("SHA1withDSA"); // return new Pair(certGen.generate(this.caPrivKey), keyPair); return new common.Pair<X509Certificate, KeyPair>(certGen.generate(this.caPrivKey), keyPair); } }
public LocationSensitiveUpdater(final A3DContainer container) { sensitives = Collections.synchronizedSortedMap(new TreeMap<Double, Object[]>()); lastestPosition = new Vector3(); currentPosition = new Vector3(); this.container = container; }