/* * @see org.alfresco.repo.content.ContentStore#delete(java.lang.String) */ @Override public boolean delete(String contentUrl) { ReentrantReadWriteLock readWriteLock = readWriteLock(contentUrl); ReadLock readLock = readWriteLock.readLock(); readLock.lock(); try { if (!cache.contains(contentUrl)) { // The item isn't in the cache, so simply delete from the backing store return backingStore.delete(contentUrl); } } finally { readLock.unlock(); } WriteLock writeLock = readWriteLock.writeLock(); writeLock.lock(); try { // Double check the content still exists in the cache if (cache.contains(contentUrl)) { // The item is in the cache, so remove. cache.remove(contentUrl); } // Whether the item was in the cache or not, it must still be deleted from the backing store. return backingStore.delete(contentUrl); } finally { writeLock.unlock(); } }
/** * maintains a list of last <i>committedLog</i> or so committed requests. This is used for fast * follower synchronization. * * @param request committed request */ public void addCommittedProposal(Request request) { WriteLock wl = logLock.writeLock(); try { wl.lock(); if (committedLog.size() > commitLogCount) { committedLog.removeFirst(); minCommittedLog = committedLog.getFirst().packet.getZxid(); } if (committedLog.isEmpty()) { minCommittedLog = request.zxid; maxCommittedLog = request.zxid; } ByteArrayOutputStream baos = new ByteArrayOutputStream(); BinaryOutputArchive boa = BinaryOutputArchive.getArchive(baos); try { request.getHdr().serialize(boa, "hdr"); if (request.getTxn() != null) { request.getTxn().serialize(boa, "txn"); } baos.close(); } catch (IOException e) { LOG.error("This really should be impossible", e); } QuorumPacket pp = new QuorumPacket(Leader.PROPOSAL, request.zxid, baos.toByteArray(), null); Proposal p = new Proposal(); p.packet = pp; p.request = request; committedLog.add(p); maxCommittedLog = p.packet.getZxid(); } finally { wl.unlock(); } }
/** * Mark the container as active * * @param containerId the ContainerId * @param pidFilePath Path where the executor should write the pid of the launched process */ public void activateContainer(ContainerId containerId, Path pidFilePath) { try { writeLock.lock(); this.pidFiles.put(containerId, pidFilePath); } finally { writeLock.unlock(); } }
/** * Mark the container as inactive. Done iff the container is still active. Else treat it as a * no-op */ public void deactivateContainer(ContainerId containerId) { try { writeLock.lock(); this.pidFiles.remove(containerId); } finally { writeLock.unlock(); } }
public void incrementChangeNumber() { writeTreeLock.lock(); try { changeNumber++; } finally { writeTreeLock.unlock(); } }
/** {@inheritDoc} */ public boolean remove(Object o) { writeLock.lock(); try { return super.remove(o); } finally { writeLock.unlock(); } }
public final <R> R writeE(final Callable<R> writeOp) throws Exception { writeLock.lock(); try { return writeOp.call(); } finally { writeLock.unlock(); } }
/** For Hibernate use */ private void setParent(Node parentNode) { refWriteLock.lock(); try { this.parent = parentNode; this.childAssocRef = null; } finally { refWriteLock.unlock(); } }
public void setTypeQName(QNameDAO qnameDAO, QName typeQName) { Long typeQNameId = qnameDAO.getOrCreateQName(typeQName).getFirst(); refWriteLock.lock(); try { setTypeQNameId(typeQNameId); } finally { refWriteLock.unlock(); } }
public void setIndex(int index) { refWriteLock.lock(); try { this.index = index; this.childAssocRef = null; } finally { refWriteLock.unlock(); } }
private void setValue(Object value) { // get a write lock singletonWriteLock.lock(); try { tenantSingletonValue.put(TenantUtil.getCurrentDomain(), value); } finally { singletonWriteLock.unlock(); } }
/** For Hibernate use */ private void setChild(Node node) { refWriteLock.lock(); try { child = node; this.childAssocRef = null; } finally { refWriteLock.unlock(); } }
public void setIsPrimary(boolean isPrimary) { refWriteLock.lock(); try { this.isPrimary = isPrimary; this.childAssocRef = null; } finally { refWriteLock.unlock(); } }
public void setTypeQNameId(Long typeQNameId) { refWriteLock.lock(); try { this.typeQNameId = typeQNameId; this.childAssocRef = null; this.typeQName = null; } finally { refWriteLock.unlock(); } }
public void setQnameNamespaceId(Long qnameNamespaceId) { refWriteLock.lock(); try { this.qnameNamespaceId = qnameNamespaceId; this.childAssocRef = null; this.qname = null; } finally { refWriteLock.unlock(); } }
public void setQnameLocalName(String qnameLocalName) { refWriteLock.lock(); try { this.qnameLocalName = qnameLocalName; this.childAssocRef = null; this.qname = null; } finally { refWriteLock.unlock(); } }
public final <R> R write(final Callable<R> writeOp) { writeLock.lock(); try { return writeOp.call(); } catch (Exception e) { // can't be ignore return (R) e; } finally { writeLock.unlock(); } }
/** {@inheritDoc} */ public boolean removeAll(Collection<?> c) { if (CollectionUtils.isEmpty(c)) { return false; } writeLock.lock(); try { return super.removeAll(c); } finally { writeLock.unlock(); } }
/** Adds all elements in this Collection. */ public boolean addAll(Collection<? extends E> c) { Assert.notNull(c, "Can not merge with NULL set"); writeLock.lock(); try { for (E object : c) { this.add(object); } return true; } finally { writeLock.unlock(); } }
public int prepareGame(ClientInfo clientInfo) { System.out.println("prepareGame: " + clientInfo); int id = currentID.incrementAndGet(); clientInfo.resetPing(); writeLock.lock(); clients.put(id, clientInfo); notifyMatcher(); writeLock.unlock(); return id; }
@Override public void cleanIndexedFiles() { writeLock.lock(); try { sentFiles.clear(); syncedFilesPerPath.clear(); } finally { writeLock.unlock(); } synchronized (toRefresh) { toRefresh.clear(); } }
/** * Every time an Ordered element is added via this method this Set will be re-sorted, otherwise * the element is simply added to the end. Added element must not be null. */ public boolean add(E o) { Assert.notNull(o, "Can not add NULL object"); writeLock.lock(); try { boolean present = false; if (o instanceof Ordered) { present = this.addOrderedElement((Ordered) o); } else { present = super.add(o); } return present; } finally { writeLock.unlock(); } }
public void setQName(QNameDAO qnameDAO, QName qname) { String assocQNameNamespace = qname.getNamespaceURI(); String assocQNameLocalName = qname.getLocalName(); Long assocQNameNamespaceId = qnameDAO.getOrCreateNamespace(assocQNameNamespace).getFirst(); Long assocQNameCrc = getCrc(qname); // get write lock refWriteLock.lock(); try { setQnameNamespaceId(assocQNameNamespaceId); setQnameLocalName(assocQNameLocalName); setQnameCrc(assocQNameCrc); } finally { refWriteLock.unlock(); } }
@Override public void preLogArchive(Path oldPath, Path newPath) throws IOException { // take a write lock on the index - any pending index updates will complete before we finish LOG.debug("Taking INDEX_UPDATE writelock"); logArchiveLock.lock(); LOG.debug("Got the INDEX_UPDATE writelock"); }
/** {@inheritDoc} */ public QName getTypeQName(QNameDAO qnameDAO) { refReadLock.lock(); try { if (typeQName != null) { return typeQName; } } finally { refReadLock.unlock(); } refWriteLock.lock(); try { typeQName = qnameDAO.getQName(typeQNameId).getSecond(); return typeQName; } finally { refWriteLock.unlock(); } }
/** * clear the zkdatabase. Note to developers - be careful to see that the clear method does clear * out all the data structures in zkdatabase. */ public void clear() { minCommittedLog = 0; maxCommittedLog = 0; /* to be safe we just create a new * datatree. */ dataTree = new DataTree(); sessionsWithTimeouts.clear(); WriteLock lock = logLock.writeLock(); try { lock.lock(); committedLog.clear(); } finally { lock.unlock(); } initialized = false; }
@Override public void synchronizeFile(ITernFile file) throws IOException { writeLock.lock(); try { TernFile tf = file.toTernServerFile(getProject()); String oldText = sentFiles.get(tf.getName()); if (tf.getText().equals(oldText) && !uploader.cancel(tf.getName())) { // no need to synchronize the file, already up-to-date return; } TernDoc doc = new TernDoc(); doc.addFile(tf); sendFiles(doc); } finally { writeLock.unlock(); } }
@Override public void synchronizeScriptPath(ITernScriptPath path, String... forced) { TernDoc doc = new TernDoc(); writeLock.lock(); try { // make sure we do not send duplicate files Set<String> requestedFiles = new HashSet<String>(sentFiles.keySet()); Set<String> perPath = new HashSet<String>(); syncedFilesPerPath.put(path, perPath); requestedFiles.removeAll(Arrays.asList(forced)); long totalSize = 0; for (String file : requestedFiles) { totalSize += sentFiles.get(file).length(); } for (ITernScriptResource resource : path.getScriptResources()) { // limit the number of files being sent to the Tern server if (totalSize >= MAX_ALLOWED_SIZE) { sizeExceeded(); break; } ITernFile file = resource.getFile(); if (file == null) { continue; } String name = file.getFullName(getProject()); perPath.add(name); if (!requestedFiles.contains(name)) { try { TernFile tf = file.toTernServerFile(getProject()); doc.addFile(tf); totalSize += tf.getText().length(); requestedFiles.add(name); } catch (IOException e) { getProject().handleException(e); } } } // perform actual synchronization with the server sendFiles(doc); } finally { writeLock.unlock(); } }
private ContentReader cacheAndRead(String url) { WriteLock writeLock = readWriteLock(url).writeLock(); writeLock.lock(); try { for (int i = 0; i < maxCacheTries; i++) { ContentReader reader = attemptCacheAndRead(url); if (reader != null) { return reader; } } // Have tried multiple times to cache the item and read it back from the cache // but there is a recurring problem - give up and return the item from the backing store. return backingStore.getReader(url); } finally { writeLock.unlock(); } }
@Override public void run() { System.out.println("GameMatcher Started"); while (true) { System.out.println( "Running match cycle: currentID=" + currentID + " numClients=" + clients.size()); writeLock.lock(); removeStaleClients(); matchClients(); synchronized (clients) { running = false; writeLock.unlock(); try { clients.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } } }