/* * Termina a realização da Tarefa. * Retorna true se foi terminada com sucesso false caso contrário */ public boolean endTask(String id) throws InterruptedException { Map<String, Integer> objectsToSupply; String type; lock.lock(); try { if (!this.tasksRunning.containsKey(Integer.valueOf(id))) { return false; } else { type = this.tasksRunning.get(Integer.valueOf(id)); objectsToSupply = this.tasks.get(type).getObjects(); } } finally { lock.unlock(); } // Supply de todos os objetos for (Map.Entry<String, Integer> entry : objectsToSupply.entrySet()) { warehouse.supply(entry.getKey(), entry.getValue()); } lock.lock(); try { this.tasksRunning.remove(Integer.valueOf(id)); this.tasks.get(type).signalP(); } finally { lock.unlock(); } return true; }
/** Cleans up resources to avoid excessive memory usage. */ public void cleanUp() { topSnapshot.set(null); singleMsgs.clear(); fullMsgs.clear(); rcvdIds.clear(); oldestNode.set(null); partReleaseFut = null; Collection<ClusterNode> rmtNodes = this.rmtNodes; if (rmtNodes != null) rmtNodes.clear(); }
static Set<RDPConnection> getAllConnections() { lock.lock(); try { Set<RDPConnection> allCon = new HashSet<RDPConnection>(); Iterator<Map<ConnectionInfo, RDPConnection>> iter = allConMap.values().iterator(); while (iter.hasNext()) { Map<ConnectionInfo, RDPConnection> dcMap = iter.next(); allCon.addAll(dcMap.values()); } return allCon; } finally { lock.unlock(); } }
/** * returns the RDPConnection that is registered for the given datagram channel and is connected to * the host/port in ConnectionInfo returns null if there is no matching registered rdpconnection */ static RDPConnection getConnection(DatagramChannel dc, ConnectionInfo conInfo) { lock.lock(); try { Map<ConnectionInfo, RDPConnection> dcConMap = allConMap.get(dc); if (dcConMap == null) { // there isnt even a datagram associated if (Log.loggingNet) Log.net("RDPServer.getConnection: could not find datagram"); return null; } return dcConMap.get(conInfo); } finally { lock.unlock(); } }
/** * Creates a new map with the same mappings as the given map. The map is created with a capacity * of twice the number of mappings in the given map or 11 (whichever is greater), and a default * load factor. * * @param t the map */ public ConcurrentHashMap(Map<? extends K, ? extends V> t) { this( Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1, 11), DEFAULT_LOAD_FACTOR, DEFAULT_SEGMENTS); putAll(t); }
/** * Copies all of the mappings from the specified map to this one. * * <p>These mappings replace any mappings that this map had for any of the keys currently in the * specified Map. * * @param t Mappings to be stored in this map. */ public void putAll(Map<? extends K, ? extends V> t) { for (Iterator<? extends Map.Entry<? extends K, ? extends V>> it = (Iterator<? extends Map.Entry<? extends K, ? extends V>>) t.entrySet().iterator(); it.hasNext(); ) { Entry<? extends K, ? extends V> e = it.next(); put(e.getKey(), e.getValue()); } }
/** * removes this connection from the connections map the datagram channel still sticks around in * case it needs to be reused */ static void removeConnection(RDPConnection con) { lock.lock(); try { if (Log.loggingNet) Log.net("RDPServer.removeConnection: removing con " + con); con.setState(RDPConnection.CLOSED); DatagramChannel dc = con.getDatagramChannel(); // first we get the set of connections attached to the given dc Map<ConnectionInfo, RDPConnection> dcConMap = allConMap.get(dc); if (dcConMap == null) { throw new MVRuntimeException("RDPServer.removeConnection: cannot find dc"); } int localPort = con.getLocalPort(); int remotePort = con.getRemotePort(); InetAddress remoteAddr = con.getRemoteAddr(); ConnectionInfo conInfo = new ConnectionInfo(remoteAddr, remotePort, localPort); Object rv = dcConMap.remove(conInfo); if (rv == null) { throw new MVRuntimeException("RDPServer.removeConnection: could not find the connection"); } // close the datagramchannel if needed // conditions: no other connections on this datagramchannel // no socket listening on this datagramchannel if (dcConMap.isEmpty()) { Log.net("RDPServer.removeConnection: no other connections for this datagramchannel (port)"); // there are no more connections on this datagram channel // check if there is a serversocket listening if (getRDPSocket(dc) == null) { Log.net("RDPServer.removeConnection: no socket listening on this port - closing"); // no socket either, close the datagramchannel dc.socket().close(); channelMap.remove(localPort); Log.net("RDPServer.removeConnection: closed and removed datagramchannel/socket"); } else { Log.net("RDPServer.removeConnection: there is a socket listening on this port"); } } else { Log.net("RDPServer.removeConnection: there are other connections on this port"); } } finally { lock.unlock(); } }
/** {@inheritDoc} */ @Override public boolean onDone(AffinityTopologyVersion res, Throwable err) { Map<Integer, Boolean> m = null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name())) { if (m == null) m = new HashMap<>(); m.put( cacheCtx.cacheId(), cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes())); } } cacheValidRes = m != null ? m : Collections.<Integer, Boolean>emptyMap(); cctx.cache().onExchangeDone(exchId.topologyVersion(), reqs, err); cctx.exchange().onExchangeDone(this, err); if (super.onDone(res, err) && !dummy && !forcePreload) { if (log.isDebugEnabled()) log.debug( "Completed partition exchange [localNode=" + cctx.localNodeId() + ", exchange= " + this + ']'); initFut.onDone(err == null); GridTimeoutObject timeoutObj = this.timeoutObj; // Deschedule timeout object. if (timeoutObj != null) cctx.kernalContext().timeout().removeTimeoutObject(timeoutObj); if (exchId.isLeft()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.config().getAffinity().removeNode(exchId.nodeId()); } return true; } return dummy; }
@Override public void sessionDestroyed(HttpSessionEvent _se) { try { HttpSession tempSession = sessions.remove(_se.getSession().getId()); } catch (NullPointerException | UnsupportedOperationException | ClassCastException e) { } setUsersOnline(getUsersOnline() - 1); }
/** * @param key Key. * @param ver Version. */ public void onEntryEvicted(KeyCacheObject key, GridCacheVersion ver) { assert key != null; assert ver != null; assert lock.isHeldByCurrentThread(); // Only one thread can enter this method at a time. if (state() != MOVING) return; Map<KeyCacheObject, GridCacheVersion> evictHist0 = evictHist; if (evictHist0 != null) { GridCacheVersion ver0 = evictHist0.get(key); if (ver0 == null || ver0.isLess(ver)) { GridCacheVersion ver1 = evictHist0.put(key, ver); assert ver1 == ver0; } } }
/** * Cache preloader should call this method within partition lock. * * @param key Key. * @param ver Version. * @return {@code True} if preloading is permitted. */ public boolean preloadingPermitted(KeyCacheObject key, GridCacheVersion ver) { assert key != null; assert ver != null; assert lock.isHeldByCurrentThread(); // Only one thread can enter this method at a time. if (state() != MOVING) return false; Map<KeyCacheObject, GridCacheVersion> evictHist0 = evictHist; if (evictHist0 != null) { GridCacheVersion ver0 = evictHist0.get(key); // Permit preloading if version in history // is missing or less than passed in. return ver0 == null || ver0.isLess(ver); } return false; }
/** * Returns the value of the statistic. * * @param stat the name of the statistic. * @return the value. */ public Object getStat(String stat) { Lock lock = this.lock.readLock(); Object value; lock.lock(); try { value = stats.get(stat); } finally { lock.unlock(); } return value; }
/** the conn data should already be set (remote addr, etc) */ static void registerConnection(RDPConnection con, DatagramChannel dc) { lock.lock(); try { if (Log.loggingNet) Log.net("RDPServer.registerConnection: registering con " + con); // first we get the set of connections attached to the given dc Map<ConnectionInfo, RDPConnection> dcConMap = allConMap.get(dc); if (dcConMap == null) { dcConMap = new HashMap<ConnectionInfo, RDPConnection>(); } // add this connection to the map int localPort = con.getLocalPort(); int remotePort = con.getRemotePort(); InetAddress remoteAddr = con.getRemoteAddr(); ConnectionInfo conInfo = new ConnectionInfo(remoteAddr, remotePort, localPort); dcConMap.put(conInfo, con); allConMap.put(dc, dcConMap); } finally { lock.unlock(); } }
@Override public void sessionCreated(HttpSessionEvent se) { HttpSession session = se.getSession(); try { sessions.put(session.getId(), session); setUsersOnline(getUsersOnline() + 1); } catch (UnsupportedOperationException | ClassCastException | NullPointerException | IllegalArgumentException e) { System.out.println( "edu.temple.cis3238.wiki.WikiEventMonitor.sessionCreated() -- " + e.toString()); } }
/** * Sets the value of a specific piece of statistics. The method assumes that the caller has * acquired the write lock of {@link #lock} and, thus, allows the optimization of batch updates to * multiple pieces of statistics. * * @param stat the piece of statistics to set * @param value the value of the piece of statistics to set */ protected void unlockedSetStat(String stat, Object value) { if (value == null) stats.remove(stat); else stats.put(stat, value); }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** * Remove particular entry from the trash directory or subdirectory. * * @param parentId Parent ID. * @param id Entry id. * @throws IgniteCheckedException If delete failed for some reason. */ private void deleteDirectory(IgniteUuid parentId, IgniteUuid id) throws IgniteCheckedException { assert parentId != null; assert id != null; while (true) { IgfsFileInfo info = meta.info(id); if (info != null) { assert info.isDirectory(); Map<String, IgfsListingEntry> listing = info.listing(); if (listing.isEmpty()) return; // Directory is empty. Map<String, IgfsListingEntry> delListing; if (listing.size() <= MAX_DELETE_BATCH) delListing = listing; else { delListing = new HashMap<>(MAX_DELETE_BATCH, 1.0f); int i = 0; for (Map.Entry<String, IgfsListingEntry> entry : listing.entrySet()) { delListing.put(entry.getKey(), entry.getValue()); if (++i == MAX_DELETE_BATCH) break; } } GridCompoundFuture<Object, ?> fut = new GridCompoundFuture<>(); // Delegate to child folders. for (IgfsListingEntry entry : delListing.values()) { if (!cancelled) { if (entry.isDirectory()) deleteDirectory(id, entry.fileId()); else { IgfsFileInfo fileInfo = meta.info(entry.fileId()); if (fileInfo != null) { assert fileInfo.isFile(); fut.add(data.delete(fileInfo)); } } } else return; } fut.markInitialized(); // Wait for data cache to delete values before clearing meta cache. try { fut.get(); } catch (IgniteFutureCancelledCheckedException ignore) { // This future can be cancelled only due to IGFS shutdown. cancelled = true; return; } // Actual delete of folder content. Collection<IgniteUuid> delIds = meta.delete(id, delListing); if (delListing == listing && delListing.size() == delIds.size()) break; // All entries were deleted. } else break; // Entry was deleted concurrently. } }
/** {@inheritDoc} */ @Override public boolean isCacheTopologyValid(GridCacheContext cctx) { return cctx.config().getTopologyValidator() != null && cacheValidRes.containsKey(cctx.cacheId()) ? cacheValidRes.get(cctx.cacheId()) : true; }
public static HttpSession find(boolean uselog, String sessionId) { return sessions.get(sessionId); }