/** * Test concurrent access: First build a clone iterator in a separate thread that hangs * momentarily during its construction; then modify the shared collection in this thread. This * would cause a ConcurrentModificationException in the other thread if the clone iterator were * not synchronized on the original collection. */ public void testConcurrentAccess() throws Exception { SlowCollection slow = new SlowCollection(); this.populateCollection(slow); // using the unsynchronized collection will cause the test to fail // this.originalCollection = slow; this.originalCollection = Collections.synchronizedCollection(slow); this.concurrentProblem = false; this.concurrentCollection = new ArrayList(); Thread thread = new Thread(this.buildRunnable()); thread.start(); while (!slow.hasStartedClone()) { // wait for the other thread to start the clone... Thread.yield(); } // ...then sneak in an extra element this.originalCollection.add("seventeen"); while (thread.isAlive()) { // wait for the other thread to finish Thread.yield(); } assertFalse(this.concurrentProblem); Collection expected = new ArrayList(); this.populateCollection(expected); assertEquals(expected, this.concurrentCollection); }
/** * Adds the value to the collection specified by the key. If there is not a collection for the * given key, a new collection is created and added to the hash. * * @param key The key whose collection we will add value to. * @param value The value to be added. * @return the collection containing value and all other items associated with the key. */ @SuppressWarnings("unchecked") public Collection<V> put(K key, V value) { Pair<Collection<V>, O> objectPair = map.get(key); if (objectPair == null) { try { Collection<V> items; if (isSynchronized) { items = Collections.synchronizedCollection(collectionType.newInstance()); } else { items = collectionType.newInstance(); } objectPair = new Pair(items, plusProvider.newObject()); map.put(key, objectPair); } catch (InstantiationException ex) { ex.printStackTrace(); return null; } catch (IllegalAccessException ex) { ex.printStackTrace(); return null; } } objectPair.getFirst().add(value); return objectPair.getFirst(); }
public RunThread(Collection<Operation> ops, User u) { super("CacheValidatorThread"); setDaemon(true); operations = ops; user = u; errs = Collections.synchronizedCollection(new RingBuffer<String>(10)); }
public synchronized void start() throws ResourceManagerSystemException { logger.info("Starting ResourceManager"); operationMode = OPERATION_MODE_STARTING; globalTransactions = Collections.synchronizedCollection(new ArrayList()); // TODO: recover and sync doStart(); recover(); // sync(); operationMode = OPERATION_MODE_STARTED; if (dirty) { logger.warn( "Started ResourceManager, but in dirty mode only (Recovery of pending transactions failed)"); } else { logger.info("Started ResourceManager"); } }
public static class DefaultEventSource implements EventSource { private final Collection<Body> mBodies = Collections.synchronizedCollection(new ArrayList<Body>()); @Override public final void connect(Body body) { mBodies.add(body); } public final void write(final String data) { final Iterator<Body> i = mBodies.iterator(); while (i.hasNext()) { final Body body = i.next(); if (body.isStopped()) { i.remove(); } else { body.writeEventData(data); } } } public final void end(final String data) { final Iterator<Body> i = mBodies.iterator(); while (i.hasNext()) { final Body body = i.next(); if (body.isStopped()) { i.remove(); } else { body.writeEventData(data); body.stop(); i.remove(); } } } public final void end() { final Iterator<Body> i = mBodies.iterator(); while (i.hasNext()) { final Body body = i.next(); if (!body.isStopped()) { body.stop(); } i.remove(); } } }
public Pluginer( File source, File process, File destination, Downloader coworker, ImageProcessingLogger log, ArrayList<Plugin> plugins) { super(log); this.source = source; this.process = process; this.destination = destination; this.coworker = coworker; this.plugins = plugins; this.filePlugin = new ArrayList<String>(); this.filePlugin.addAll(Arrays.asList(destination.list())); Collections.synchronizedCollection(filePlugin); }
@RaceTest( expectRace = false, description = "Test instrumentation of Collections$SynchronizedX classes") public void synchronizedCollections() { final Collection<Integer> c = Collections.synchronizedCollection(new ArrayList<Integer>()); new ThreadRunner(2) { @Override public void thread1() { c.add(1); } @Override public void thread2() { c.contains(1); } }; }
@RaceTest( expectRace = true, description = "iterate over synchronized collection without manually synchronize on it") public void iterateSyncCollectionWrong() { ArrayList<Integer> a = new ArrayList<>(); a.add(0); a.add(1); final Collection<Integer> c = Collections.synchronizedCollection(a); new ThreadRunner(2) { @Override public void thread1() { c.iterator().hasNext(); // explicit iterator for (Integer i : c) {} ; // implicit iterator } @Override public void thread2() { c.add(2); } }; }
/** * Returns a {@link Collection} view of the values contained in this map. The collection is backed * by the map, so changes to the map are reflected in the collection, and vice-versa. If the map * is modified while an iteration over the collection is in progress (except through the * iterator's own {@code remove} operation), the results of the iteration are undefined. The * collection supports element removal, which removes the corresponding mapping from the map, via * the {@code Iterator.remove}, {@code Collection.remove}, {@code removeAll}, {@code retainAll} * and {@code clear} operations. It does not support the {@code add} or {@code addAll} operations. * * @since 1.2 */ public Collection<V> values() { if (values == null) values = Collections.synchronizedCollection(new ValueCollection(), this); return values; }
/** * A utility class that provides easy access to Structure objects. If you are running a script that * is frequently re-using the same PDB structures, the AtomCache keeps an in-memory cache of the * files for quicker access. The cache is a soft-cache, this means it won't cause out of memory * exceptions, but garbage collects the data if the Java virtual machine needs to free up space. The * AtomCache is thread-safe. * * @author Andreas Prlic * @author Spencer Bliven * @author Peter Rose * @since 3.0 */ public class AtomCache { private static final Logger logger = LoggerFactory.getLogger(AtomCache.class); public static final String BIOL_ASSEMBLY_IDENTIFIER = "BIO:"; public static final String CHAIN_NR_SYMBOL = ":"; public static final String CHAIN_SPLIT_SYMBOL = "."; public static final String PDP_DOMAIN_IDENTIFIER = "PDP:"; public static final String UNDERSCORE = "_"; private static final String FILE_SEPARATOR = System.getProperty("file.separator"); protected FileParsingParameters params; protected PDPProvider pdpprovider; private FetchBehavior fetchBehavior; private ObsoleteBehavior obsoleteBehavior; private String cachePath; // make sure IDs are loaded uniquely private Collection<String> currentlyLoading = Collections.synchronizedCollection(new TreeSet<String>()); private String path; private boolean useMmCif; /** * Default AtomCache constructor. * * <p>Usually stores files in a temp directory, but this can be overriden by setting the PDB_DIR * variable at runtime. * * @see UserConfiguration#UserConfiguration() */ public AtomCache() { this(new UserConfiguration()); } /** * Creates an instance of an AtomCache that is pointed to the a particular path in the file * system. It will use the same value for pdbFilePath and cachePath. * * @param pdbFilePath a directory in the file system to use as a location to cache files. */ public AtomCache(String pdbFilePath) { this(pdbFilePath, pdbFilePath); } /** * Creates an instance of an AtomCache that is pointed to the a particular path in the file * system. * * @param pdbFilePath a directory in the file system to use as a location to cache files. * @param cachePath */ public AtomCache(String pdbFilePath, String cachePath) { logger.debug( "Initialising AtomCache with pdbFilePath={}, cachePath={}", pdbFilePath, cachePath); if (!pdbFilePath.endsWith(FILE_SEPARATOR)) { pdbFilePath += FILE_SEPARATOR; } // we are caching the binary files that contain the PDBs gzipped // that is the most memory efficient way of caching... // set the input stream provider to caching mode System.setProperty(InputStreamProvider.CACHE_PROPERTY, "true"); setPath(pdbFilePath); this.cachePath = cachePath; fetchBehavior = FetchBehavior.DEFAULT; obsoleteBehavior = ObsoleteBehavior.DEFAULT; currentlyLoading.clear(); params = new FileParsingParameters(); setUseMmCif(true); } /** * @param isSplit Ignored * @deprecated isSplit parameter is ignored (4.0.0) */ @Deprecated public AtomCache(String pdbFilePath, boolean isSplit) { this(pdbFilePath); } /** * @param isSplit Ignored * @deprecated isSplit parameter is ignored (4.0.0) */ @Deprecated public AtomCache(String pdbFilePath, String cachePath, boolean isSplit) { this(pdbFilePath, cachePath); } /** * Creates a new AtomCache object based on the provided UserConfiguration. * * @param config the UserConfiguration to use for this cache. */ public AtomCache(UserConfiguration config) { this(config.getPdbFilePath(), config.getCacheFilePath()); fetchBehavior = config.getFetchBehavior(); obsoleteBehavior = config.getObsoleteBehavior(); useMmCif = config.getFileFormat().equals(UserConfiguration.MMCIF_FORMAT); } /** * Returns the CA atoms for the provided name. See {@link #getStructure(String)} for supported * naming conventions. * * <p>This method only works with protein chains. Use {@link #getRepresentativeAtoms(String)} for * a more general solution. * * @param name * @return an array of Atoms. * @throws IOException * @throws StructureException * @see */ public Atom[] getAtoms(String name) throws IOException, StructureException { return getAtoms(new StructureName(name)); } public Atom[] getAtoms(StructureIdentifier name) throws IOException, StructureException { Atom[] atoms = null; // System.out.println("loading " + name); Structure s = getStructure(name); atoms = StructureTools.getAtomCAArray(s); /* * synchronized (cache){ cache.put(name, atoms); } */ return atoms; } /** * Returns the representative atoms for the provided name. See {@link #getStructure(String)} for * supported naming conventions. * * @param name * @return an array of Atoms. * @throws IOException * @throws StructureException * @see */ public Atom[] getRepresentativeAtoms(String name) throws IOException, StructureException { return getRepresentativeAtoms(new StructureName(name)); } public Atom[] getRepresentativeAtoms(StructureIdentifier name) throws IOException, StructureException { Atom[] atoms = null; Structure s = getStructure(name); atoms = StructureTools.getRepresentativeAtomArray(s); /* * synchronized (cache){ cache.put(name, atoms); } */ return atoms; } /** * Loads the biological assembly for a given PDB ID and bioAssemblyId. If a bioAssemblyId > 0 is * specified, the corresponding biological assembly file will be loaded. Note, the number of * available biological unit files varies. Many entries don't have a biological assembly specified * (i.e. NMR structures), many entries have only one biological assembly (bioAssemblyId=1), and a * few structures have multiple biological assemblies. Set bioAssemblyFallback to true, to * download the original PDB file in cases that a biological assembly file is not available. * * @param pdbId the PDB ID * @param bioAssemblyId the 1-based index of the biological assembly (0 gets the asymmetric unit) * @param bioAssemblyFallback if true, try reading original PDB file in case the biological * assembly file is not available * @return a structure object * @throws IOException * @throws StructureException * @author Peter Rose * @since 3.2 */ public Structure getBiologicalAssembly( String pdbId, int bioAssemblyId, boolean bioAssemblyFallback) throws StructureException, IOException { if (bioAssemblyId < 0) { throw new StructureException( "bioAssemblyID must be nonnegative: " + pdbId + " bioAssemblyId " + bioAssemblyId); } Structure s = StructureIO.getBiologicalAssembly(pdbId, bioAssemblyId, this); if (s == null && bioAssemblyFallback) return StructureIO.getBiologicalAssembly(pdbId, 0, this); return s; } /** * Loads the default biological unit (e.g. *.pdb1.gz). If it is not available, the asymmetric unit * will be loaded, i.e. for NMR structures. * * <p>Biological assemblies can also be accessed using <tt>getStructure("BIO:<i>[pdbId]</i>")</tt> * * @param pdbId the PDB ID * @return a structure object * @throws IOException * @throws StructureException * @since 4.2 */ public Structure getBiologicalAssembly(String pdbId) throws StructureException, IOException { int bioAssemblyId = 1; return getBiologicalAssembly(pdbId, bioAssemblyId); } /** * Loads the default biological unit (e.g. *.pdb1.gz). If it is not available, the asymmetric unit * will be loaded, i.e. for NMR structures. * * @param pdbId the PDB ID * @return a structure object * @throws IOException * @throws StructureException * @since 3.2 * @deprecated Renamed to {@link #getBiologicalAssembly(String)} in 4.2 */ @Deprecated public Structure getBiologicalUnit(String pdbId) throws StructureException, IOException { return getBiologicalAssembly(pdbId); } /** * Loads the default biological unit (e.g. *.pdb1.gz). If it is not available, the asymmetric unit * will be loaded, i.e. for NMR structures. * * @param pdbId the PDB ID * @param bioAssemblyId the 1-based index of the biological assembly (0 gets the asymmetric unit) * @return a structure object * @throws IOException * @throws StructureException * @since 4.2 */ public Structure getBiologicalAssembly(String pdbId, int bioAssemblyId) throws StructureException, IOException { boolean bioAssemblyFallback = true; return getBiologicalAssembly(pdbId, bioAssemblyId, bioAssemblyFallback); } /** * Returns the path that contains the caching file for utility data, such as domain definitions. * * @return */ public String getCachePath() { return cachePath; } public FileParsingParameters getFileParsingParams() { return params; } /** * Get the path that is used to cache PDB files. * * @return path to a directory */ public String getPath() { return path; } public PDPProvider getPdpprovider() { return pdpprovider; } /** * Request a Structure based on a <i>name</i>. * * <pre> * Formal specification for how to specify the <i>name</i>: * * name := pdbID * | pdbID '.' chainID * | pdbID '.' range * | scopID * range := '('? range (',' range)? ')'? * | chainID * | chainID '_' resNum '-' resNum * pdbID := [0-9][a-zA-Z0-9]{3} * chainID := [a-zA-Z0-9] * scopID := 'd' pdbID [a-z_][0-9_] * resNum := [-+]?[0-9]+[A-Za-z]? * * * Example structures: * 1TIM #whole structure * 4HHB.C #single chain * 4GCR.A_1-83 #one domain, by residue number * 3AA0.A,B #two chains treated as one structure * d2bq6a1 #scop domain * </pre> * * With the additional set of rules: * * <ul> * <li>If only a PDB code is provided, the whole structure will be return including ligands, but * the first model only (for NMR). * <li>Chain IDs are case sensitive, PDB ids are not. To specify a particular chain write as: * 4hhb.A or 4HHB.A * <li>To specify a SCOP domain write a scopId e.g. d2bq6a1. Some flexibility can be allowed in * SCOP domain names, see {@link #setStrictSCOP(boolean)} * <li>URLs are accepted as well * </ul> * * <p>Note that this method should not be used in StructureIdentifier implementations to avoid * circular calls. * * @param name * @return a Structure object, or null if name appears improperly formated (eg too short, etc) * @throws IOException The PDB file cannot be cached due to IO errors * @throws StructureException The name appeared valid but did not correspond to a structure. Also * thrown by some submethods upon errors, eg for poorly formatted subranges. */ public Structure getStructure(String name) throws IOException, StructureException { StructureName structureName = new StructureName(name); return getStructure(structureName); } /** * Get the structure corresponding to the given {@link StructureIdentifier}. Equivalent to calling * {@link StructureIdentifier#loadStructure(AtomCache)} followed by {@link * StructureIdentifier#reduce(Structure)}. * * <p>Note that this method should not be used in StructureIdentifier implementations to avoid * circular calls. * * @param strucId * @return * @throws IOException * @throws StructureException */ public Structure getStructure(StructureIdentifier strucId) throws IOException, StructureException { Structure s = strucId.loadStructure(this); Structure r = strucId.reduce(s); r.setStructureIdentifier(strucId); return r; // if (name.length() < 4) { // throw new IllegalArgumentException("Can't interpret IDs that are shorter than 4 // characters!"); // } // // Structure n = null; // // boolean useChainNr = false; // boolean useDomainInfo = false; // String range = null; // int chainNr = -1; // // // StructureName structureName = new StructureName(name); // // String pdbId = null; // String chainId = null; // // if (name.length() == 4) { // // pdbId = name; // Structure s; // if (useMmCif) { // s = loadStructureFromCifByPdbId(pdbId); // } else { // s = loadStructureFromPdbByPdbId(pdbId); // } // return s; // } else if (structureName.isScopName()) { // // // return based on SCOP domain ID // return getStructureFromSCOPDomain(name); // } else if (structureName.isCathID()) { // return getStructureForCathDomain(structureName, CathFactory.getCathDatabase()); // } else if (name.length() == 6) { // // name is PDB.CHAINID style (e.g. 4hhb.A) // // pdbId = name.substring(0, 4); // if (name.substring(4, 5).equals(CHAIN_SPLIT_SYMBOL)) { // chainId = name.substring(5, 6); // } else if (name.substring(4, 5).equals(CHAIN_NR_SYMBOL)) { // // useChainNr = true; // chainNr = Integer.parseInt(name.substring(5, 6)); // } // // } else if (name.startsWith("file:/") || name.startsWith("http:/")) { // // this is a URL // // URL url = new URL(name); // return getStructureFromURL(url); // // // } else if (structureName.isPDPDomain()) { // // // this is a PDP domain definition // // return getPDPStructure(name); // // } else if (name.startsWith(BIOL_ASSEMBLY_IDENTIFIER)) { // // return getBioAssembly(name); // // } else if (name.length() > 6 && !name.startsWith(PDP_DOMAIN_IDENTIFIER) // && (name.contains(CHAIN_NR_SYMBOL) || name.contains(UNDERSCORE)) // && !(name.startsWith("file:/") || name.startsWith("http:/")) // // ) { // // // this is a name + range // // pdbId = name.substring(0, 4); // // this ID has domain split information... // useDomainInfo = true; // range = name.substring(5); // // } // // // System.out.println("got: >" + name + "< " + pdbId + " " + chainId + " useChainNr:" + // useChainNr + " " // // +chainNr + " useDomainInfo:" + useDomainInfo + " " + range); // // if (pdbId == null) { // // return null; // } // // while (checkLoading(pdbId)) { // // waiting for loading to be finished... // // try { // Thread.sleep(100); // } catch (InterruptedException e) { // logger.error(e.getMessage()); // } // // } // // // long start = System.currentTimeMillis(); // // Structure s; // if (useMmCif) { // s = loadStructureFromCifByPdbId(pdbId); // } else { // s = loadStructureFromPdbByPdbId(pdbId); // } // // // long end = System.currentTimeMillis(); // // System.out.println("time to load " + pdbId + " " + (end-start) + "\t size :" + // // StructureTools.getNrAtoms(s) + "\t cached: " + cache.size()); // // if (chainId == null && chainNr < 0 && range == null) { // // we only want the 1st model in this case // n = StructureTools.getReducedStructure(s, -1); // } else { // // if (useChainNr) { // // System.out.println("using ChainNr"); // n = StructureTools.getReducedStructure(s, chainNr); // } else if (useDomainInfo) { // // System.out.println("calling getSubRanges"); // n = StructureTools.getSubRanges(s, range); // } else { // // System.out.println("reducing Chain Id " + chainId); // n = StructureTools.getReducedStructure(s, chainId); // } // } // // // // n.setName(name); // return n; } /** * Returns the representation of a {@link ScopDomain} as a BioJava {@link Structure} object. * * @param domain a SCOP domain * @return a Structure object * @throws IOException * @throws StructureException */ public Structure getStructureForDomain(ScopDomain domain) throws IOException, StructureException { return getStructureForDomain(domain, ScopFactory.getSCOP()); } /** * Returns the representation of a {@link ScopDomain} as a BioJava {@link Structure} object. * * @param domain a SCOP domain * @param scopDatabase A {@link ScopDatabase} to use * @return a Structure object * @throws IOException * @throws StructureException */ public Structure getStructureForDomain(ScopDomain domain, ScopDatabase scopDatabase) throws IOException, StructureException { return getStructureForDomain(domain, scopDatabase, false); } /** * Returns the representation of a {@link ScopDomain} as a BioJava {@link Structure} object. * * @param domain a SCOP domain * @param scopDatabase A {@link ScopDatabase} to use * @param strictLigandHandling If set to false, hetero-atoms are included if and only if they * belong to a chain to which the SCOP domain belongs; if set to true, hetero-atoms are * included if and only if they are strictly within the definition (residue numbers) of the * SCOP domain * @return a Structure object * @throws IOException * @throws StructureException */ public Structure getStructureForDomain( ScopDomain domain, ScopDatabase scopDatabase, boolean strictLigandHandling) throws IOException, StructureException { String pdbId = domain.getPdbId(); Structure fullStructure = getStructureForPdbId(pdbId); Structure structure = domain.reduce(fullStructure); // TODO It would be better to move all of this into the reduce method, // but that would require ligand handling properties in StructureIdentifiers // because ligands sometimes occur after TER records in PDB files, we may need to add some // ligands back in // specifically, we add a ligand if and only if it occurs within the domain AtomPositionMap map = null; List<ResidueRangeAndLength> rrs = null; if (strictLigandHandling) { map = new AtomPositionMap( StructureTools.getAllAtomArray(fullStructure), AtomPositionMap.ANYTHING_MATCHER); rrs = ResidueRangeAndLength.parseMultiple(domain.getRanges(), map); } for (Chain chain : fullStructure.getChains()) { if (!structure.hasChain(chain.getChainID())) { continue; // we can't do anything with a chain our domain } // doesn't contain Chain newChain = structure.getChainByPDB(chain.getChainID()); List<Group> ligands = StructureTools.filterLigands(chain.getAtomGroups()); for (Group group : ligands) { boolean shouldContain = true; if (strictLigandHandling) { shouldContain = false; // whether the ligand occurs within the domain for (ResidueRange rr : rrs) { if (rr.contains(group.getResidueNumber(), map)) { shouldContain = true; } } } boolean alreadyContains = newChain.getAtomGroups().contains(group); // we don't want to add duplicate // ligands if (shouldContain && !alreadyContains) { newChain.addGroup(group); } } } // build a more meaningful description for the new structure StringBuilder header = new StringBuilder(); header.append(domain.getClassificationId()); if (scopDatabase != null) { int sf = domain.getSuperfamilyId(); ScopDescription description = scopDatabase.getScopDescriptionBySunid(sf); if (description != null) { header.append(" | "); header.append(description.getDescription()); } } structure.getPDBHeader().setDescription(header.toString()); return structure; } /** * Returns the representation of a {@link ScopDomain} as a BioJava {@link Structure} object. * * @param scopId a SCOP Id * @return a Structure object * @throws IOException * @throws StructureException */ public Structure getStructureForDomain(String scopId) throws IOException, StructureException { return getStructureForDomain(scopId, ScopFactory.getSCOP()); } /** * Returns the representation of a {@link ScopDomain} as a BioJava {@link Structure} object. * * @param scopId a SCOP Id * @param scopDatabase A {@link ScopDatabase} to use * @return a Structure object * @throws IOException * @throws StructureException */ public Structure getStructureForDomain(String scopId, ScopDatabase scopDatabase) throws IOException, StructureException { ScopDomain domain = scopDatabase.getDomainByScopID(scopId); return getStructureForDomain(domain, scopDatabase); } /** * Does the cache automatically download files that are missing from the local installation from * the PDB FTP site? * * @return flag * @deprecated Use {@link #getFetchBehavior()} */ @Deprecated public boolean isAutoFetch() { return fetchBehavior != FetchBehavior.LOCAL_ONLY; } /** * <b>N.B.</b> This feature won't work unless the structure wasn't found & autoFetch is set to * <code>true</code>. * * @return the fetchCurrent * @deprecated Use {@link FileParsingParameters#getObsoleteBehavior()} instead (4.0.0) */ @Deprecated public boolean isFetchCurrent() { return getObsoleteBehavior() == ObsoleteBehavior.FETCH_CURRENT; } /** * forces the cache to fetch the file if its status is OBSOLETE. This feature has a higher * priority than {@link #setFetchCurrent(boolean)}.<br> * <b>N.B.</b> This feature won't work unless the structure wasn't found & autoFetch is set to * <code>true</code>. * * @return the fetchFileEvenIfObsolete * @author Amr AL-Hossary * @see #fetchCurrent * @since 3.0.2 * @deprecated Use {@link FileParsingParameters#getObsoleteBehavior()} instead (4.0.0) */ @Deprecated public boolean isFetchFileEvenIfObsolete() { return getObsoleteBehavior() == ObsoleteBehavior.FETCH_OBSOLETE; } /** * Scop handling was changed in 4.2.0. For behaviour equivalent to strictSCOP==true, use {@link * ScopDatabase#getDomainByScopID(String)}. For strictSCOP==False, create a {@link StructureName} * or use {@link StructureName#guessScopDomain(String, ScopDatabase)} explicitely. * * @return false; ignored * @deprecated since 4.2 */ @Deprecated public boolean isStrictSCOP() { return false; } /** * Send a signal to the cache that the system is shutting down. Notifies underlying * SerializableCache instances to flush themselves... */ public void notifyShutdown() { // System.out.println(" AtomCache got notify shutdown.."); if (pdpprovider != null) { if (pdpprovider instanceof RemotePDPProvider) { RemotePDPProvider remotePDP = (RemotePDPProvider) pdpprovider; remotePDP.flushCache(); } } // todo: use a SCOP implementation that is backed by SerializableCache ScopDatabase scopInstallation = ScopFactory.getSCOP(); if (scopInstallation != null) { if (scopInstallation instanceof CachedRemoteScopInstallation) { CachedRemoteScopInstallation cacheScop = (CachedRemoteScopInstallation) scopInstallation; cacheScop.flushCache(); } } } /** * Does the cache automatically download files that are missing from the local installation from * the PDB FTP site? * * @param autoFetch flag * @deprecated Use {@link #getFetchBehavior()} */ @Deprecated public void setAutoFetch(boolean autoFetch) { if (autoFetch) { setFetchBehavior(FetchBehavior.DEFAULT); } else { setFetchBehavior(FetchBehavior.LOCAL_ONLY); } } /** * set the location at which utility data should be cached. * * @param cachePath */ public void setCachePath(String cachePath) { this.cachePath = cachePath; } /** * if enabled, the reader searches for the newest possible PDB ID, if not present in he local * installation. The {@link #setFetchFileEvenIfObsolete(boolean)} function has a higher priority * than this function.<br> * <b>N.B.</b> This feature won't work unless the structure wasn't found & autoFetch is set to * <code>true</code>. * * @param fetchCurrent the fetchCurrent to set * @author Amr AL-Hossary * @see #setFetchFileEvenIfObsolete(boolean) * @since 3.0.2 * @deprecated Use {@link FileParsingParameters#setObsoleteBehavior()} instead (4.0.0) */ @Deprecated public void setFetchCurrent(boolean fetchNewestCurrent) { if (fetchNewestCurrent) { setObsoleteBehavior(ObsoleteBehavior.FETCH_CURRENT); } else { if (getObsoleteBehavior() == ObsoleteBehavior.FETCH_CURRENT) { setObsoleteBehavior(ObsoleteBehavior.DEFAULT); } } } /** * <b>N.B.</b> This feature won't work unless the structure wasn't found & autoFetch is set to * <code>true</code>. * * @param fetchFileEvenIfObsolete the fetchFileEvenIfObsolete to set * @deprecated Use {@link FileParsingParameters#setObsoleteBehavior()} instead (4.0.0) */ @Deprecated public void setFetchFileEvenIfObsolete(boolean fetchFileEvenIfObsolete) { if (fetchFileEvenIfObsolete) { setObsoleteBehavior(ObsoleteBehavior.FETCH_OBSOLETE); } else { if (getObsoleteBehavior() == ObsoleteBehavior.FETCH_OBSOLETE) { setObsoleteBehavior(ObsoleteBehavior.DEFAULT); } } } public void setFileParsingParams(FileParsingParameters params) { this.params = params; } /** * <b>[Optional]</b> This method changes the behavior when obsolete entries are requested. Current * behaviors are: * * <ul> * <li>{@link ObsoleteBehavior#THROW_EXCEPTION THROW_EXCEPTION} Throw a {@link * StructureException} (the default) * <li>{@link ObsoleteBehavior#FETCH_OBSOLETE FETCH_OBSOLETE} Load the requested ID from the * PDB's obsolete repository * <li>{@link ObsoleteBehavior#FETCH_CURRENT FETCH_CURRENT} Load the most recent version of the * requested structure * <p>This setting may be silently ignored by implementations which do not have access to * the server to determine whether an entry is obsolete, such as if {@link #isAutoFetch()} * is false. Note that an obsolete entry may still be returned even this is FETCH_CURRENT if * the entry is found locally. * * @param fetchFileEvenIfObsolete Whether to fetch obsolete records * @see #setFetchCurrent(boolean) * @since 4.0.0 */ public void setObsoleteBehavior(ObsoleteBehavior behavior) { obsoleteBehavior = behavior; } /** * Returns how this instance deals with obsolete entries. Note that this setting may be ignored by * some implementations or in some situations, such as when {@link #isAutoFetch()} is false. * * <p>For most implementations, the default value is {@link ObsoleteBehavior#THROW_EXCEPTION * THROW_EXCEPTION}. * * @return The ObsoleteBehavior * @since 4.0.0 */ public ObsoleteBehavior getObsoleteBehavior() { return obsoleteBehavior; } /** * Get the behavior for fetching files from the server * * @return */ public FetchBehavior getFetchBehavior() { return fetchBehavior; } /** * Set the behavior for fetching files from the server * * @param fetchBehavior */ public void setFetchBehavior(FetchBehavior fetchBehavior) { this.fetchBehavior = fetchBehavior; } /** * Set the path that is used to cache PDB files. * * @param path to a directory */ public void setPath(String path) { this.path = FileDownloadUtils.expandUserHome(path); } public void setPdpprovider(PDPProvider pdpprovider) { this.pdpprovider = pdpprovider; } /** * This method does nothing. * * <p>Scop handling was changed in 4.2.0. For behaviour equivalent to strictSCOP==true, use {@link * ScopDatabase#getDomainByScopID(String)}. For strictSCOP==False, create a {@link StructureName} * or use {@link StructureName#guessScopDomain(String, ScopDatabase)} explicitely. * * @param strictSCOP Ignored * @deprecated Removed in 4.2.0 */ @Deprecated public void setStrictSCOP(boolean ignored) {} /** @return the useMmCif */ public boolean isUseMmCif() { return useMmCif; } /** @param useMmCif the useMmCif to set */ public void setUseMmCif(boolean useMmCif) { this.useMmCif = useMmCif; if (useMmCif) { // get bio assembly from mmcif file BioUnitDataProviderFactory.setBioUnitDataProvider(MmCifBiolAssemblyProvider.class); } else { BioUnitDataProviderFactory.setBioUnitDataProvider(PDBBioUnitDataProvider.class); } } private boolean checkLoading(String name) { return currentlyLoading.contains(name); } /** * Returns a {@link Structure} corresponding to the CATH identifier supplied in {@code * structureName}, using the the {@link CathDatabase} at {@link CathFactory#getCathDatabase()}. */ public Structure getStructureForCathDomain(StructureName structureName) throws IOException, StructureException { return getStructureForCathDomain(structureName, CathFactory.getCathDatabase()); } /** * Returns a {@link Structure} corresponding to the CATH identifier supplied in {@code * structureName}, using the specified {@link CathDatabase}. */ public Structure getStructureForCathDomain(StructureName structureName, CathDatabase cathInstall) throws IOException, StructureException { CathDomain cathDomain = cathInstall.getDomainByCathId(structureName.getIdentifier()); Structure s = getStructureForPdbId(cathDomain.getIdentifier()); Structure n = cathDomain.reduce(s); // add the ligands of the chain... Chain newChain = n.getChainByPDB(structureName.getChainId()); Chain origChain = s.getChainByPDB(structureName.getChainId()); List<Group> ligands = origChain.getAtomLigands(); for (Group g : ligands) { if (!newChain.getAtomGroups().contains(g)) { newChain.addGroup(g); } } return n; } protected void flagLoading(String name) { if (!currentlyLoading.contains(name)) { currentlyLoading.add(name); } } protected void flagLoadingFinished(String name) { currentlyLoading.remove(name); } /** * Loads a structure directly by PDB ID * * @param pdbId * @return * @throws IOException * @throws StructureException */ public Structure getStructureForPdbId(String pdbId) throws IOException, StructureException { if (pdbId == null) return null; if (pdbId.length() != 4) { throw new StructureException("Unrecognized PDB ID: " + pdbId); } while (checkLoading(pdbId)) { // waiting for loading to be finished... try { Thread.sleep(100); } catch (InterruptedException e) { logger.error(e.getMessage()); } } Structure s; if (useMmCif) { s = loadStructureFromCifByPdbId(pdbId); } else { s = loadStructureFromPdbByPdbId(pdbId); } return s; } protected Structure loadStructureFromCifByPdbId(String pdbId) throws IOException, StructureException { Structure s; flagLoading(pdbId); try { MMCIFFileReader reader = new MMCIFFileReader(path); reader.setFetchBehavior(fetchBehavior); reader.setObsoleteBehavior(obsoleteBehavior); reader.setFileParsingParameters(params); s = reader.getStructureById(pdbId.toLowerCase()); } finally { flagLoadingFinished(pdbId); } return s; } protected Structure loadStructureFromPdbByPdbId(String pdbId) throws IOException, StructureException { Structure s; flagLoading(pdbId); try { PDBFileReader reader = new PDBFileReader(path); reader.setFetchBehavior(fetchBehavior); reader.setObsoleteBehavior(obsoleteBehavior); reader.setFileParsingParameters(params); s = reader.getStructureById(pdbId.toLowerCase()); } finally { flagLoadingFinished(pdbId); } return s; } }
public CollectElements(Collection<PsiReference> collection) { myCollection = Collections.synchronizedCollection(collection); }
private IdleConnectionMonitorThread() { connMgrs = new ArrayList<HttpClientConnectionManager>(); Collections.synchronizedCollection(connMgrs); isblock = new AtomicBoolean(true); start(); };
/** * BundleLoader runtime constructor. This object is created lazily when the first request for a * resource is made to this bundle. * * @param bundle Bundle object for this loader. * @param proxy the BundleLoaderProxy for this loader. * @exception org.osgi.framework.BundleException */ protected BundleLoader(BundleHost bundle, BundleLoaderProxy proxy) throws BundleException { this.bundle = bundle; this.proxy = proxy; try { bundle.getBundleData().open(); /* make sure the BundleData is open */ } catch (IOException e) { throw new BundleException(Msg.BUNDLE_READ_EXCEPTION, e); } BundleDescription description = proxy.getBundleDescription(); // init the require bundles list. BundleDescription[] required = description.getResolvedRequires(); if (required.length > 0) { // get a list of re-exported symbolic names HashSet reExportSet = new HashSet(required.length); BundleSpecification[] requiredSpecs = description.getRequiredBundles(); if (requiredSpecs != null && requiredSpecs.length > 0) for (int i = 0; i < requiredSpecs.length; i++) if (requiredSpecs[i].isExported()) reExportSet.add(requiredSpecs[i].getName()); requiredBundles = new BundleLoaderProxy[required.length]; int[] reexported = new int[required.length]; int reexportIndex = 0; for (int i = 0; i < required.length; i++) { requiredBundles[i] = getLoaderProxy(required[i]); if (reExportSet.contains(required[i].getSymbolicName())) reexported[reexportIndex++] = i; } if (reexportIndex > 0) { reexportTable = new int[reexportIndex]; System.arraycopy(reexported, 0, reexportTable, 0, reexportIndex); } else { reexportTable = null; } requiredSources = new KeyedHashSet(10, false); } else { requiredBundles = null; reexportTable = null; requiredSources = null; } // init the provided packages set ExportPackageDescription[] exports = description.getSelectedExports(); if (exports != null && exports.length > 0) { exportedPackages = Collections.synchronizedCollection( exports.length > 10 ? (Collection) new HashSet(exports.length) : new ArrayList(exports.length)); initializeExports(exports, exportedPackages); } else { exportedPackages = Collections.synchronizedCollection(new ArrayList(0)); } ExportPackageDescription substituted[] = description.getSubstitutedExports(); if (substituted.length > 0) { substitutedPackages = substituted.length > 10 ? (Collection) new HashSet(substituted.length) : new ArrayList(substituted.length); for (int i = 0; i < substituted.length; i++) substitutedPackages.add(substituted[i].getName()); } else { substitutedPackages = null; } // This is the fastest way to access to the description for fragments since the // hostdescription.getFragments() is slow BundleFragment[] fragmentObjects = bundle.getFragments(); BundleDescription[] fragments = new BundleDescription[fragmentObjects == null ? 0 : fragmentObjects.length]; for (int i = 0; i < fragments.length; i++) fragments[i] = fragmentObjects[i].getBundleDescription(); // init the dynamic imports tables if (description.hasDynamicImports()) addDynamicImportPackage(description.getImportPackages()); // ...and its fragments for (int i = 0; i < fragments.length; i++) if (fragments[i].isResolved() && fragments[i].hasDynamicImports()) addDynamicImportPackage(fragments[i].getImportPackages()); // Initialize the policy handler String buddyList = null; try { buddyList = (String) bundle.getBundleData().getManifest().get(Constants.BUDDY_LOADER); } catch (BundleException e) { // do nothing; buddyList == null } policy = buddyList != null ? new PolicyHandler(this, buddyList, bundle.getFramework().getPackageAdmin()) : null; if (policy != null) policy.open(bundle.getFramework().getSystemBundleContext()); }
/** * Covariant function so the compiler can choose the proper one at compile time, eliminates the * need for XDoclet to really understand compiletime typing. * * <p>Read only collections need to be synchronized. Once we start giving out handles to these * collections, they'll be used in other threads sooner or later. */ private static java.util.Collection wrapCollection(java.util.Collection input) { return java.util.Collections.synchronizedCollection(input); }
/** * Class for prototyping database connections. Prototype only -- hardcoded for mysql, connects to * single database, keeps single connection, etc. * * @author Jim Robinson * @date 10/31/11 */ public class DBManager { private static Logger log = Logger.getLogger(DBManager.class); static Collection<ConnectionWrapper> connectionPool = Collections.synchronizedCollection(new ArrayList<ConnectionWrapper>()); static String username; static String password; public static Connection getConnection() { Iterator<ConnectionWrapper> poolIter = connectionPool.iterator(); while (poolIter.hasNext()) { ConnectionWrapper conn = poolIter.next(); try { if (conn == null || conn.isReallyClosed()) { poolIter.remove(); } else if (!conn.isClosed()) { return conn; } } catch (SQLException e) { log.error("Bad connection", e); poolIter.remove(); } } // No valid connections ConnectionWrapper conn = createConnection(); if (conn != null) { connectionPool.add(conn); log.info("Connection pool size: " + connectionPool.size()); } return conn; } private static ConnectionWrapper createConnection() { String driver = "com.mysql.jdbc.Driver"; try { Class.forName(driver).newInstance(); } catch (Exception e) { e.printStackTrace(); } final PreferenceManager preferenceManager = PreferenceManager.getInstance(); String host = preferenceManager.get(PreferenceManager.DB_HOST); String db = preferenceManager.get(PreferenceManager.DB_NAME); String port = preferenceManager.get(PreferenceManager.DB_PORT); String url = "jdbc:mysql://" + host; if (!port.equals("-1")) { url += ":" + port; } url += "/" + db; return connect(url); } private static ConnectionWrapper connect(String url) { try { return new ConnectionWrapper(DriverManager.getConnection(url, username, password)); } catch (SQLException e) { int errorCode = e.getErrorCode(); if (errorCode == 1044 || errorCode == 1045) { String host = PreferenceManager.getInstance().get(PreferenceManager.DB_HOST); Frame parent = Globals.isHeadless() ? null : IGV.getMainFrame(); LoginDialog dlg = new LoginDialog(parent, false, host, false); dlg.setVisible(true); if (dlg.isCanceled()) { throw new RuntimeException("Must login to access" + host); } username = dlg.getUsername(); password = new String(dlg.getPassword()); return connect(url); } else { MessageUtils.showMessage("<html>Error connecting to database: <br>" + e.getMessage()); return null; } } } public static void shutdown() { for (ConnectionWrapper conn : connectionPool) { if (conn != null) { try { conn.reallyClose(); } catch (SQLException e) { } } } connectionPool.clear(); } static class ConnectionWrapper implements Connection { Connection conn; boolean closed; ConnectionWrapper(Connection conn) { this.conn = conn; closed = false; } public void close() throws SQLException { closed = true; } public boolean isClosed() throws SQLException { return closed; } public void reallyClose() throws SQLException { closed = true; conn.close(); } public boolean isReallyClosed() throws SQLException { return conn.isClosed(); } public void clearWarnings() throws SQLException { conn.clearWarnings(); } public void commit() throws SQLException { conn.commit(); } public Array createArrayOf(String s, Object[] objects) throws SQLException { return conn.createArrayOf(s, objects); } public Blob createBlob() throws SQLException { return conn.createBlob(); } public Clob createClob() throws SQLException { return conn.createClob(); } public NClob createNClob() throws SQLException { return conn.createNClob(); } public SQLXML createSQLXML() throws SQLException { return conn.createSQLXML(); } public Statement createStatement() throws SQLException { return conn.createStatement(); } public Statement createStatement(int i, int i1) throws SQLException { return conn.createStatement(i, i1); } public Statement createStatement(int i, int i1, int i2) throws SQLException { return conn.createStatement(i, i1, i2); } public Struct createStruct(String s, Object[] objects) throws SQLException { return conn.createStruct(s, objects); } public void setSchema(String schema) throws SQLException { throw new UnsupportedOperationException( "Operation not supported for backwards compatibility to Java 6"); } public String getSchema() throws SQLException { return null; // TODO } public void abort(Executor executor) throws SQLException { throw new UnsupportedOperationException( "Operation not supported for backwards compatibility to Java 6"); } public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { throw new UnsupportedOperationException( "Operation not supported for backwards compatibility to Java 6"); } public int getNetworkTimeout() { return -1; } public boolean getAutoCommit() throws SQLException { return conn.getAutoCommit(); } public String getCatalog() throws SQLException { return conn.getCatalog(); } public Properties getClientInfo() throws SQLException { return conn.getClientInfo(); } public String getClientInfo(String s) throws SQLException { return conn.getClientInfo(s); } public int getHoldability() throws SQLException { return conn.getHoldability(); } public DatabaseMetaData getMetaData() throws SQLException { return conn.getMetaData(); } public int getTransactionIsolation() throws SQLException { return conn.getTransactionIsolation(); } public Map<String, Class<?>> getTypeMap() throws SQLException { return conn.getTypeMap(); } public SQLWarning getWarnings() throws SQLException { return conn.getWarnings(); } public boolean isReadOnly() throws SQLException { return conn.isReadOnly(); } public boolean isValid(int i) throws SQLException { return conn.isValid(i); } public String nativeSQL(String s) throws SQLException { return conn.nativeSQL(s); } public CallableStatement prepareCall(String s) throws SQLException { return conn.prepareCall(s); } public CallableStatement prepareCall(String s, int i, int i1) throws SQLException { return conn.prepareCall(s, i, i1); } public CallableStatement prepareCall(String s, int i, int i1, int i2) throws SQLException { return conn.prepareCall(s, i, i1, i2); } public PreparedStatement prepareStatement(String s) throws SQLException { return conn.prepareStatement(s); } public PreparedStatement prepareStatement(String s, int i) throws SQLException { return conn.prepareStatement(s, i); } public PreparedStatement prepareStatement(String s, int i, int i1) throws SQLException { return conn.prepareStatement(s, i, i1); } public PreparedStatement prepareStatement(String s, int i, int i1, int i2) throws SQLException { return conn.prepareStatement(s, i, i1, i2); } public PreparedStatement prepareStatement(String s, int[] ints) throws SQLException { return conn.prepareStatement(s, ints); } public PreparedStatement prepareStatement(String s, String[] strings) throws SQLException { return conn.prepareStatement(s, strings); } public void releaseSavepoint(Savepoint savepoint) throws SQLException { conn.releaseSavepoint(savepoint); } public void rollback() throws SQLException { conn.rollback(); } public void rollback(Savepoint savepoint) throws SQLException { conn.rollback(savepoint); } public void setAutoCommit(boolean b) throws SQLException { conn.setAutoCommit(b); } public void setCatalog(String s) throws SQLException { conn.setCatalog(s); } public void setClientInfo(Properties properties) throws SQLClientInfoException { conn.setClientInfo(properties); } public void setClientInfo(String s, String s1) throws SQLClientInfoException { conn.setClientInfo(s, s1); } public void setHoldability(int i) throws SQLException { conn.setHoldability(i); } public void setReadOnly(boolean b) throws SQLException { conn.setReadOnly(b); } public Savepoint setSavepoint() throws SQLException { return conn.setSavepoint(); } public Savepoint setSavepoint(String s) throws SQLException { return conn.setSavepoint(s); } public void setTransactionIsolation(int i) throws SQLException { conn.setTransactionIsolation(i); } public void setTypeMap(Map<String, Class<?>> stringClassMap) throws SQLException { conn.setTypeMap(stringClassMap); } public boolean isWrapperFor(Class<?> aClass) throws SQLException { return conn.isWrapperFor(aClass); } public <T> T unwrap(Class<T> tClass) throws SQLException { return conn.unwrap(tClass); } } }