public void messageSent(Message message) { if (message.getID().equals(BTMessage.ID_BT_PIECE)) { try { lock_mon.enter(); // due to timing issues we can get in here with a message already removed queued_messages.remove(message); } finally { lock_mon.exit(); } /* if ( peer.getIp().equals( "64.71.5.2" )){ outgoing_message_queue.setTrace( true ); // BTPiece p = (BTPiece)message; // TimeFormatter.milliTrace( "obt sent: " + p.getPieceNumber() + "/" + p.getPieceOffset()); } */ doReadAheadLoads(); } }
/** * Remove an outstanding piece data request. * * @param piece_number * @param piece_offset * @param length */ public void removePieceRequest(int piece_number, int piece_offset, int length) { if (destroyed) return; DiskManagerReadRequest dmr = peer.getManager().getDiskManager().createReadRequest(piece_number, piece_offset, length); try { lock_mon.enter(); if (requests.contains(dmr)) { requests.remove(dmr); return; } if (loading_messages.contains(dmr)) { loading_messages.remove(dmr); return; } for (Iterator i = queued_messages.entrySet().iterator(); i.hasNext(); ) { Map.Entry entry = (Map.Entry) i.next(); if (entry.getValue().equals(dmr)) { // it's already been queued BTPiece msg = (BTPiece) entry.getKey(); if (outgoing_message_queue.removeMessage(msg, true)) { i.remove(); } break; // do manual listener notify } } } finally { lock_mon.exit(); } outgoing_message_queue.doListenerNotifications(); }
private void doReadAheadLoads() { List to_submit = null; try { lock_mon.enter(); while (loading_messages.size() + queued_messages.size() < request_read_ahead && !requests.isEmpty() && !destroyed) { DiskManagerReadRequest dmr = (DiskManagerReadRequest) requests.removeFirst(); loading_messages.add(dmr); if (to_submit == null) to_submit = new ArrayList(); to_submit.add(dmr); } } finally { lock_mon.exit(); } /* if ( peer.getIp().equals( "64.71.5.2")){ TimeFormatter.milliTrace( "obt read_ahead: -> " + (to_submit==null?0:to_submit.size()) + " [lo=" + loading_messages.size() + ",qm=" + queued_messages.size() + ",re=" + requests.size() + ",rl=" + request_read_ahead + "]"); } */ if (to_submit != null) { for (int i = 0; i < to_submit.size(); i++) { peer.getManager() .getAdapter() .enqueueReadRequest(peer, (DiskManagerReadRequest) to_submit.get(i), read_req_listener); } } }
public void setStorageType(int new_type) throws FMFileManagerException { try { this_mon.enter(); boolean was_open = isOpen(); if (was_open) { closeSupport(false); } try { file_access.setStorageType(new_type); } finally { if (was_open) { openSupport("Re-open after storage type change"); } } } finally { this_mon.exit(); } }
protected boolean checkConnectionId(String client_address, long id) { try { random_mon.enter(); Long key = new Long(id); connectionData data = (connectionData) connection_id_map.get(key); if (data == null) { // System.out.println( "TRTrackerServerProcessorUDP: rejected:" + id + ", data not found" ); return (false); } else { if (SystemTime.getMonotonousTime() - data.getTime() > CONNECTION_ID_LIFETIME) { return (false); } } boolean ok = data.getAddress().equals(client_address); // System.out.println( "TRTrackerServerProcessorUDP: tested:" + id + "/" + client_address + " // -> " + ok ); return (ok); } finally { random_mon.exit(); } }
public void setAuthenticationOutcome(String realm, URL tracker, boolean success) { try { this_mon.enter(); setAuthenticationOutcome( realm, tracker.getProtocol(), tracker.getHost(), tracker.getPort(), success); } finally { this_mon.exit(); } }
protected static void generateEvidence(IndentWriter writer) { writer.println(file_map.size() + " FMFile Reservations"); try { writer.indent(); try { file_map_mon.enter(); Iterator it = file_map.keySet().iterator(); while (it.hasNext()) { String key = (String) it.next(); List owners = (List) file_map.get(key); Iterator it2 = owners.iterator(); String str = ""; while (it2.hasNext()) { Object[] entry = (Object[]) it2.next(); FMFileOwner owner = (FMFileOwner) entry[0]; Boolean write = (Boolean) entry[1]; String reason = (String) entry[2]; str += (str.length() == 0 ? "" : ", ") + owner.getName() + "[" + (write.booleanValue() ? "write" : "read") + "/" + reason + "]"; } writer.println(Debug.secretFileName(key) + " -> " + str); } } finally { file_map_mon.exit(); } FMFileManagerImpl.generateEvidence(writer); } finally { writer.exdent(); } }
public PasswordAuthentication getAuthentication(String realm, URL tracker) { try { this_mon.enter(); return (getAuthentication( realm, tracker.getProtocol(), tracker.getHost(), tracker.getPort())); } finally { this_mon.exit(); } }
public void clearPasswords() { try { this_mon.enter(); auth_cache = new HashMap(); saveAuthCache(); } finally { this_mon.exit(); } }
public void readFailed(DiskManagerReadRequest request, Throwable cause) { try { lock_mon.enter(); if (!loading_messages.contains(request) || destroyed) { // was canceled return; } loading_messages.remove(request); } finally { lock_mon.exit(); } }
private void reserveFile() throws FMFileManagerException { if (clone) { return; } try { file_map_mon.enter(); // System.out.println( "FMFile::reserveFile:" + canonical_path + "("+ owner.getName() + ")" + // " - " + Debug.getCompressedStackTrace() ); List owners = (List) file_map.get(canonical_path); if (owners == null) { owners = new ArrayList(); // System.out.println( " creating new owners entr" ); file_map.put(canonical_path, owners); } for (Iterator it = owners.iterator(); it.hasNext(); ) { Object[] entry = (Object[]) it.next(); String entry_name = ((FMFileOwner) entry[0]).getName(); // System.out.println( " existing entry: " + entry_name ); if (owner.getName().equals(entry_name)) { // already present, start off read-access Debug.out("reserve file - entry already present"); entry[1] = new Boolean(false); return; } } owners.add(new Object[] {owner, new Boolean(false), "<reservation>"}); } finally { file_map_mon.exit(); } }
public static ConfigurationManager getInstance(Map data) { try { class_mon.enter(); if (config == null) { config = new ConfigurationManager(data); } return config; } finally { class_mon.exit(); } }
void addListener(Object o) { boolean register_with_downloads = false; try { this_mon.enter(); register_with_downloads = listeners.isEmpty(); ArrayList new_listeners = new ArrayList(listeners); new_listeners.add(o); this.listeners = new_listeners; } finally { this_mon.exit(); } if (register_with_downloads) { dm.addListener(this, true); } }
void removeListener(Object o) { boolean unregister_from_downloads = false; try { this_mon.enter(); ArrayList new_listeners = new ArrayList(listeners); new_listeners.remove(o); this.listeners = new_listeners; unregister_from_downloads = this.listeners.isEmpty(); } finally { this_mon.exit(); } if (unregister_from_downloads) { dm.removeListener(this, true); } }
public void destroy() { try { lock_mon.enter(); removeAllPieceRequests(); queued_messages.clear(); destroyed = true; outgoing_message_queue.cancelQueueListener(sent_message_listener); } finally { lock_mon.exit(); } }
/** * Get a list of piece numbers being requested * * @return list of Long values */ public int[] getRequestedPieceNumbers() { if (destroyed) return new int[0]; /** Cheap hack to reduce (but not remove all) the # of duplicate entries */ int iLastNumber = -1; int pos = 0; int[] pieceNumbers; try { lock_mon.enter(); // allocate max size needed (we'll shrink it later) pieceNumbers = new int[queued_messages.size() + loading_messages.size() + requests.size()]; for (Iterator iter = queued_messages.keySet().iterator(); iter.hasNext(); ) { BTPiece msg = (BTPiece) iter.next(); if (iLastNumber != msg.getPieceNumber()) { iLastNumber = msg.getPieceNumber(); pieceNumbers[pos++] = iLastNumber; } } for (Iterator iter = loading_messages.iterator(); iter.hasNext(); ) { DiskManagerReadRequest dmr = (DiskManagerReadRequest) iter.next(); if (iLastNumber != dmr.getPieceNumber()) { iLastNumber = dmr.getPieceNumber(); pieceNumbers[pos++] = iLastNumber; } } for (Iterator iter = requests.iterator(); iter.hasNext(); ) { DiskManagerReadRequest dmr = (DiskManagerReadRequest) iter.next(); if (iLastNumber != dmr.getPieceNumber()) { iLastNumber = dmr.getPieceNumber(); pieceNumbers[pos++] = iLastNumber; } } } finally { lock_mon.exit(); } int[] trimmed = new int[pos]; System.arraycopy(pieceNumbers, 0, trimmed, 0, pos); return trimmed; }
public void ensureOpen(String reason) throws FMFileManagerException { try { this_mon.enter(); if (isOpen()) { return; } openSupport(reason); } finally { this_mon.exit(); } }
/** * Register a new piece data request. * * @param piece_number * @param piece_offset * @param length */ public void addPieceRequest(int piece_number, int piece_offset, int length) { if (destroyed) return; DiskManagerReadRequest dmr = peer.getManager().getDiskManager().createReadRequest(piece_number, piece_offset, length); try { lock_mon.enter(); requests.addLast(dmr); } finally { lock_mon.exit(); } doReadAheadLoads(); }
/** Remove all outstanding piece data requests. */ public void removeAllPieceRequests() { if (destroyed) return; try { lock_mon.enter(); // removed this trace as Alon can't remember why the trace is here anyway and as far as I can // see there's nothing to stop a piece being delivered to transport and removed from // the message queue before we're notified of this and thus it is entirely possible that // our view of queued messages is lagging. // String before_trace = outgoing_message_queue.getQueueTrace(); /* int num_queued = queued_messages.size(); int num_removed = 0; for( Iterator i = queued_messages.keySet().iterator(); i.hasNext(); ) { BTPiece msg = (BTPiece)i.next(); if( outgoing_message_queue.removeMessage( msg, true ) ) { i.remove(); num_removed++; } } if( num_removed < num_queued -2 ) { Debug.out( "num_removed[" +num_removed+ "] < num_queued[" +num_queued+ "]:\nBEFORE:\n" +before_trace+ "\nAFTER:\n" +outgoing_message_queue.getQueueTrace() ); } */ for (Iterator i = queued_messages.keySet().iterator(); i.hasNext(); ) { BTPiece msg = (BTPiece) i.next(); outgoing_message_queue.removeMessage(msg, true); } queued_messages.clear(); // this replaces stuff above requests.clear(); loading_messages.clear(); } finally { lock_mon.exit(); } outgoing_message_queue.doListenerNotifications(); }
public void setAuthenticationOutcome( String realm, String protocol, String host, int port, boolean success) { try { this_mon.enter(); String tracker = protocol + "://" + host + ":" + port + "/"; String auth_key = realm + ":" + tracker; authCache cache = (authCache) auth_cache.get(auth_key); if (cache != null) { cache.setOutcome(success); } } finally { this_mon.exit(); } }
public static ConfigurationManager getInstance() { try { class_mon.enter(); if (config == null) { // this is nasty but I can't see an easy way around it. Unfortunately while reading the // config // we hit other code (logging for example) that needs access to the config data. Things are // cunningly (?) arranged so that a recursive call here *won't* result in a further // (looping) // recursive call if we attempt to load the config again. Hence this disgusting code that // goes for a second load attempt if (config_temp == null) { config_temp = new ConfigurationManager(); config_temp.load(); config_temp.initialise(); config = config_temp; } else { if (config_temp.propertiesMap == null) { config_temp.load(); } return (config_temp); } } return config; } finally { class_mon.exit(); } }
public void readCompleted(DiskManagerReadRequest request, DirectByteBuffer data) { try { lock_mon.enter(); if (!loading_messages.contains(request) || destroyed) { // was canceled data.returnToPool(); return; } loading_messages.remove(request); BTPiece msg = new BTPiece(request.getPieceNumber(), request.getOffset(), data, piece_version); queued_messages.put(msg, request); outgoing_message_queue.addMessage(msg, true); } finally { lock_mon.exit(); } outgoing_message_queue.doListenerNotifications(); }
private void releaseFile() { if (clone) { return; } try { file_map_mon.enter(); // System.out.println( "FMFile::releaseFile:" + canonical_path + "("+ owner.getName() + ")" + // " - " + Debug.getCompressedStackTrace()); List owners = (List) file_map.get(canonical_path); if (owners != null) { for (Iterator it = owners.iterator(); it.hasNext(); ) { Object[] entry = (Object[]) it.next(); if (owner.getName().equals(((FMFileOwner) entry[0]).getName())) { it.remove(); break; } } if (owners.size() == 0) { file_map.remove(canonical_path); } } } finally { file_map_mon.exit(); } }
protected void saveAuthCache() { try { this_mon.enter(); HashMap map = new HashMap(); Iterator it = auth_cache.values().iterator(); while (it.hasNext()) { authCache value = (authCache) it.next(); if (value.isPersistent()) { try { HashMap entry_map = new HashMap(); entry_map.put("user", value.getAuth().getUserName().getBytes("UTF-8")); entry_map.put("pw", new String(value.getAuth().getPassword()).getBytes("UTF-8")); map.put(value.getKey(), entry_map); } catch (Throwable e) { Debug.printStackTrace(e); } } } COConfigurationManager.setParameter(CONFIG_PARAM, map); } finally { this_mon.exit(); } }
protected Map getVersionCheckInfoSupport( String reason, boolean only_if_cached, boolean force, boolean v6) { try { synchronized (listeners) { if (REASON_UPDATE_CHECK_START.equals(reason)) { startCheckRan = true; } for (VersionCheckClientListener l : listeners) { l.versionCheckStarted(reason); } } } catch (Throwable t) { Debug.out(t); } if (v6) { if (enable_v6) { try { check_mon.enter(); long time_diff = SystemTime.getCurrentTime() - last_check_time_v6; force = force || time_diff > CACHE_PERIOD || time_diff < 0; if (last_check_data_v6 == null || last_check_data_v6.size() == 0 || force) { // if we've never checked before then we go ahead even if the "only_if_cached" // flag is set as its had not chance of being cached yet! if (only_if_cached && last_check_data_v6 != null) { return (new HashMap()); } try { last_check_data_v6 = performVersionCheck(constructVersionCheckMessage(reason), true, true, true); if (last_check_data_v6 != null && last_check_data_v6.size() > 0) { COConfigurationManager.setParameter("versioncheck.cache.v6", last_check_data_v6); } } catch (SocketException t) { // internet is broken // Debug.out(t.getClass().getName() + ": " + t.getMessage()); } catch (UnknownHostException t) { // dns is broken // Debug.out(t.getClass().getName() + ": " + t.getMessage()); } catch (Throwable t) { Debug.out(t); last_check_data_v6 = new HashMap(); } } else { Logger.log( new LogEvent( LOGID, "VersionCheckClient is using " + "cached version check info. Using " + last_check_data_v6.size() + " reply keys.")); } } finally { check_mon.exit(); } } if (last_check_data_v6 == null) last_check_data_v6 = new HashMap(); return last_check_data_v6; } else { try { check_mon.enter(); long time_diff = SystemTime.getCurrentTime() - last_check_time_v4; force = force || time_diff > CACHE_PERIOD || time_diff < 0; if (last_check_data_v4 == null || last_check_data_v4.size() == 0 || force) { // if we've never checked before then we go ahead even if the "only_if_cached" // flag is set as its had not chance of being cached yet! if (only_if_cached && last_check_data_v4 != null) { return (new HashMap()); } try { last_check_data_v4 = performVersionCheck(constructVersionCheckMessage(reason), true, true, false); if (last_check_data_v4 != null && last_check_data_v4.size() > 0) { COConfigurationManager.setParameter("versioncheck.cache.v4", last_check_data_v4); } // clear down any plugin-specific data that has successfully been sent to the version // server try { if (AzureusCoreFactory.isCoreAvailable()) { // installed plugin IDs PluginInterface[] plugins = AzureusCoreFactory.getSingleton().getPluginManager().getPluginInterfaces(); for (int i = 0; i < plugins.length; i++) { PluginInterface plugin = plugins[i]; Map data = plugin .getPluginconfig() .getPluginMapParameter("plugin.versionserver.data", null); if (data != null) { plugin .getPluginconfig() .setPluginMapParameter("plugin.versionserver.data", new HashMap()); } } } } catch (Throwable e) { } } catch (UnknownHostException t) { // no internet Debug.outNoStack( "VersionCheckClient - " + t.getClass().getName() + ": " + t.getMessage()); } catch (IOException t) { // General connection problem. Debug.outNoStack( "VersionCheckClient - " + t.getClass().getName() + ": " + t.getMessage()); } catch (Throwable t) { Debug.out(t); last_check_data_v4 = new HashMap(); } } else { if (Logger.isEnabled()) Logger.log( new LogEvent( LOGID, "VersionCheckClient is using " + "cached version check info. Using " + last_check_data_v4.size() + " reply keys.")); } } finally { check_mon.exit(); } if (last_check_data_v4 == null) last_check_data_v4 = new HashMap(); last_feature_flag_cache_time = 0; return last_check_data_v4; } }
protected long allocateConnectionId(String client_address) { try { random_mon.enter(); long id = random.nextLong(); Long new_key = new Long(id); connectionData new_data = new connectionData(client_address, id); // check for timeouts if (new_data.getTime() - last_timeout_check > 500) { last_timeout_check = new_data.getTime(); Iterator<Long> it = connection_id_map.keySet().iterator(); while (it.hasNext()) { Long key = it.next(); connectionData data = connection_id_map.get(key); if (new_data.getTime() - data.getTime() > CONNECTION_ID_LIFETIME) { // System.out.println( "TRTrackerServerProcessorUDP: connection id timeout" ); it.remove(); List<connectionData> cds = connection_ip_map.get(client_address); if (cds != null) { Iterator<connectionData> it2 = cds.iterator(); while (it2.hasNext()) { if (it2.next().getID() == key) { it2.remove(); break; } } if (cds.size() == 0) { connection_ip_map.remove(client_address); } } } else { // insertion order into map is time based - LinkedHashMap returns keys in same order break; } } } List<connectionData> cds = connection_ip_map.get(client_address); if (cds == null) { cds = new ArrayList<connectionData>(); connection_ip_map.put(client_address, cds); } cds.add(new_data); if (cds.size() > 512) { connectionData dead = cds.remove(0); connection_id_map.remove(dead.getID()); } connection_id_map.put(new_key, new_data); // System.out.println( "TRTrackerServerProcessorUDP: allocated:" + id + ", connection id map // size = " + connection_id_map.size()); return (id); } finally { random_mon.exit(); } }
public PasswordAuthentication getAuthentication( String realm, String protocol, String host, int port) { try { this_mon.enter(); String tracker = protocol + "://" + host + ":" + port + "/"; InetAddress bind_ip = NetworkAdmin.getSingleton().getSingleHomedServiceBindAddress(); String self_addr; // System.out.println( "auth req for " + realm + " - " + tracker ); if (bind_ip == null || bind_ip.isAnyLocalAddress()) { self_addr = "127.0.0.1"; } else { self_addr = bind_ip.getHostAddress(); } // when the tracker is connected to internally we don't want to prompt // for the password. Here we return a special user and the password hash // which is picked up in the tracker auth code - search for "<internal>"! // also include the tracker IP as well as for scrapes these can occur on // a raw torrent which hasn't been modified to point to localhost if (host.equals(self_addr) || host.equals(COConfigurationManager.getStringParameter("Tracker IP", ""))) { try { byte[] pw = COConfigurationManager.getByteParameter("Tracker Password", new byte[0]); String str_pw = new String(Base64.encode(pw)); return (new PasswordAuthentication("<internal>", str_pw.toCharArray())); } catch (Throwable e) { Debug.printStackTrace(e); } } String auth_key = realm + ":" + tracker; authCache cache = (authCache) auth_cache.get(auth_key); if (cache != null) { PasswordAuthentication auth = cache.getAuth(); if (auth != null) { return (auth); } } String[] res = getAuthenticationDialog(realm, tracker); if (res == null) { return (null); } else { PasswordAuthentication auth = new PasswordAuthentication(res[0], res[1].toCharArray()); boolean save_pw = res[2].equals("true"); boolean old_entry_existed = auth_cache.put(auth_key, new authCache(auth_key, auth, save_pw)) != null; if (save_pw || old_entry_existed) { saveAuthCache(); } return (auth); } } finally { this_mon.exit(); } }
private void reserveAccess(String reason) throws FMFileManagerException { if (clone) { return; } try { file_map_mon.enter(); // System.out.println( "FMFile::reserveAccess:" + canonical_path + "("+ owner.getName() + ")" // + " [" + (access_mode==FM_WRITE?"write":"read") + "]" + " - " + // Debug.getCompressedStackTrace()); List owners = (List) file_map.get(canonical_path); Object[] my_entry = null; if (owners == null) { Debug.out("reserveAccess fail"); throw (new FMFileManagerException( "File '" + canonical_path + "' has not been reserved (no entries), '" + owner.getName() + "'")); } for (Iterator it = owners.iterator(); it.hasNext(); ) { Object[] entry = (Object[]) it.next(); String entry_name = ((FMFileOwner) entry[0]).getName(); // System.out.println( " existing entry: " + entry_name ); if (owner.getName().equals(entry_name)) { my_entry = entry; } } if (my_entry == null) { Debug.out("reserveAccess fail"); throw (new FMFileManagerException( "File '" + canonical_path + "' has not been reserved (not found), '" + owner.getName() + "'")); } my_entry[1] = new Boolean(access_mode == FM_WRITE); my_entry[2] = reason; int read_access = 0; int write_access = 0; int write_access_lax = 0; TOTorrentFile my_torrent_file = owner.getTorrentFile(); StringBuilder users_sb = owners.size() == 1 ? null : new StringBuilder(128); for (Iterator it = owners.iterator(); it.hasNext(); ) { Object[] entry = (Object[]) it.next(); FMFileOwner this_owner = (FMFileOwner) entry[0]; if (((Boolean) entry[1]).booleanValue()) { write_access++; TOTorrentFile this_tf = this_owner.getTorrentFile(); if (my_torrent_file != null && this_tf != null && my_torrent_file.getLength() == this_tf.getLength()) { write_access_lax++; } if (users_sb != null) { if (users_sb.length() > 0) { users_sb.append(","); } users_sb.append(this_owner.getName()); users_sb.append(" [write]"); } } else { read_access++; if (users_sb != null) { if (users_sb.length() > 0) { users_sb.append(","); } users_sb.append(this_owner.getName()); users_sb.append(" [read]"); } } } if (write_access > 1 || (write_access == 1 && read_access > 0)) { // relax locking if strict is disabled and torrent file is same size if (!COConfigurationManager.getBooleanParameter("File.strict.locking")) { if (write_access_lax == write_access) { return; } } Debug.out("reserveAccess fail"); throw (new FMFileManagerException( "File '" + canonical_path + "' is in use by '" + (users_sb == null ? "eh?" : users_sb.toString()) + "'")); } } finally { file_map_mon.exit(); } }
public void renameFile(String new_name) throws FMFileManagerException { try { this_mon.enter(); String new_canonical_path; File new_linked_file = new File(linked_file.getParentFile(), new_name); try { try { new_canonical_path = new_linked_file.getCanonicalPath(); } catch (IOException ioe) { String msg = ioe.getMessage(); if (msg != null && msg.indexOf("There are no more files") != -1) { String abs_path = new_linked_file.getAbsolutePath(); String error = "Caught 'There are no more files' exception during new_file.getCanonicalPath(). " + "os=[" + Constants.OSName + "], new_file.getPath()=[" + new_linked_file.getPath() + "], new_file.getAbsolutePath()=[" + abs_path + "]. "; // "new_canonical_path temporarily set to [" +abs_path+ "]"; Debug.out(error, ioe); } throw ioe; } } catch (Throwable e) { throw (new FMFileManagerException("getCanonicalPath fails", e)); } if (new_linked_file.exists()) { throw (new FMFileManagerException( "renameFile fails - file '" + new_canonical_path + "' already exists")); } boolean was_open = isOpen(); close(); // full close, this will release any slots in the limited file case if (!linked_file.exists() || linked_file.renameTo(new_linked_file)) { linked_file = new_linked_file; canonical_path = new_canonical_path; reserveFile(); if (was_open) { ensureOpen("renameFile target"); // ensure open will regain slots in limited file case } } else { try { reserveFile(); } catch (FMFileManagerException e) { Debug.printStackTrace(e); } if (was_open) { try { ensureOpen("renameFile recovery"); } catch (FMFileManagerException e) { Debug.printStackTrace(e); } } throw (new FMFileManagerException("renameFile fails")); } } finally { this_mon.exit(); } }