/** Checks consistency after all operations. */ private void consistencyCheck() { if (CONSISTENCY_CHECK) { assert lock.writeLock().isHeldByCurrentThread(); if (node2part == null) return; for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) { for (Integer p : e.getValue().keySet()) { Set<UUID> nodeIds = part2node.get(p); assert nodeIds != null : "Failed consistency check [part=" + p + ", nodeId=" + e.getKey() + ']'; assert nodeIds.contains(e.getKey()) : "Failed consistency check [part=" + p + ", nodeId=" + e.getKey() + ", nodeIds=" + nodeIds + ']'; } } for (Map.Entry<Integer, Set<UUID>> e : part2node.entrySet()) { for (UUID nodeId : e.getValue()) { GridDhtPartitionMap map = node2part.get(nodeId); assert map != null : "Failed consistency check [part=" + e.getKey() + ", nodeId=" + nodeId + ']'; assert map.containsKey(e.getKey()) : "Failed consistency check [part=" + e.getKey() + ", nodeId=" + nodeId + ']'; } } } }
/* Retorna um Map com os utilizadores existentes no sistema */ public HashMap<String, User> getUsers() { HashMap<String, User> r = new HashMap<>(); for (Map.Entry<String, User> entry : users.entrySet()) { r.put(entry.getKey(), entry.getValue()); } return r; }
/** Clears swap entries for evicted partition. */ private void clearSwap() { assert state() == EVICTED; assert !GridQueryProcessor.isEnabled(cctx.config()) : "Indexing needs to have unswapped values."; try { GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> it = cctx.swap().iterator(id); boolean isLocStore = cctx.store().isLocal(); if (it != null) { // We can safely remove these values because no entries will be created for evicted // partition. while (it.hasNext()) { Map.Entry<byte[], GridCacheSwapEntry> entry = it.next(); byte[] keyBytes = entry.getKey(); KeyCacheObject key = cctx.toCacheKeyObject(keyBytes); cctx.swap().remove(key); if (isLocStore) cctx.store().remove(null, key.value(cctx.cacheObjectContext(), false)); } } } catch (IgniteCheckedException e) { U.error(log, "Failed to clear swap for evicted partition: " + this, e); } }
public boolean equals(Object o) { // If not acting as entry, just use default. if (lastReturned == null) return super.equals(o); if (!(o instanceof Map.Entry)) return false; Map.Entry e = (Map.Entry) o; return eq(getKey(), e.getKey()) && eq(getValue(), e.getValue()); }
/* * Termina a realização da Tarefa. * Retorna true se foi terminada com sucesso false caso contrário */ public boolean endTask(String id) throws InterruptedException { Map<String, Integer> objectsToSupply; String type; lock.lock(); try { if (!this.tasksRunning.containsKey(Integer.valueOf(id))) { return false; } else { type = this.tasksRunning.get(Integer.valueOf(id)); objectsToSupply = this.tasks.get(type).getObjects(); } } finally { lock.unlock(); } // Supply de todos os objetos for (Map.Entry<String, Integer> entry : objectsToSupply.entrySet()) { warehouse.supply(entry.getKey(), entry.getValue()); } lock.lock(); try { this.tasksRunning.remove(Integer.valueOf(id)); this.tasks.get(type).signalP(); } finally { lock.unlock(); } return true; }
/** * Formats statistics in <tt>ColibriStatsExtension</tt> object * * @param statistics the statistics instance * @return the <tt>ColibriStatsExtension</tt> instance. */ public static ColibriStatsExtension toXMPP(Statistics statistics) { ColibriStatsExtension ext = new ColibriStatsExtension(); for (Map.Entry<String, Object> e : statistics.getStats().entrySet()) { ext.addStat(new ColibriStatsExtension.Stat(e.getKey(), e.getValue())); } return ext; }
/** {@inheritDoc} */ @Override public String toString() { StringBuilder s = new StringBuilder(); for (Map.Entry<String, Object> e : getStats().entrySet()) { s.append(e.getKey()).append(":").append(e.getValue()).append("\n"); } return s.toString(); }
/** Perform cleanup of the trash directory. */ private void delete() { IgfsFileInfo info = null; try { info = meta.info(TRASH_ID); } catch (ClusterTopologyServerNotFoundException e) { LT.warn(log, e, "Server nodes not found."); } catch (IgniteCheckedException e) { U.error(log, "Cannot obtain trash directory info.", e); } if (info != null) { for (Map.Entry<String, IgfsListingEntry> entry : info.listing().entrySet()) { IgniteUuid fileId = entry.getValue().fileId(); if (log.isDebugEnabled()) log.debug( "Deleting IGFS trash entry [name=" + entry.getKey() + ", fileId=" + fileId + ']'); try { if (!cancelled) { if (delete(entry.getKey(), fileId)) { if (log.isDebugEnabled()) log.debug( "Sending delete confirmation message [name=" + entry.getKey() + ", fileId=" + fileId + ']'); sendDeleteMessage(new IgfsDeleteMessage(fileId)); } } else break; } catch (IgniteInterruptedCheckedException ignored) { // Ignore this exception while stopping. } catch (IgniteCheckedException e) { U.error(log, "Failed to delete entry from the trash directory: " + entry.getKey(), e); sendDeleteMessage(new IgfsDeleteMessage(fileId, e)); } } } }
/** * Updates partition map in all caches. * * @param msg Partitions single message. */ private void updatePartitionSingleMap(GridDhtPartitionsSingleMessage msg) { for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) { Integer cacheId = entry.getKey(); GridCacheContext cacheCtx = cctx.cacheContext(cacheId); GridDhtPartitionTopology top = cacheCtx != null ? cacheCtx.topology() : cctx.exchange().clientTopology(cacheId, this); top.update(exchId, entry.getValue()); } }
/* Devolve lista com o tipo de tarefas em desenvolvimento */ public HashMap<Integer, String> listTaskRunnig() { HashMap<Integer, String> tasklist = new HashMap<>(); lock.lock(); try { for (Map.Entry<Integer, String> entry : this.tasksRunning.entrySet()) { tasklist.put(entry.getKey(), entry.getValue()); } return tasklist; } finally { lock.unlock(); } }
/*Devolve uma lista do stock corrente*/ public HashMap<String, Integer> listStock() { HashMap<String, Integer> liststock = new HashMap<>(); lock.lock(); try { for (Map.Entry<String, Product> entry : this.warehouse.getStock().entrySet()) { liststock.put(entry.getKey(), entry.getValue().getQuantity()); } return liststock; } finally { lock.unlock(); } }
/** * Updates partition map in all caches. * * @param msg Partitions full messages. */ private void updatePartitionFullMap(GridDhtPartitionsFullMessage msg) { for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) { Integer cacheId = entry.getKey(); GridCacheContext cacheCtx = cctx.cacheContext(cacheId); if (cacheCtx != null) cacheCtx.topology().update(exchId, entry.getValue()); else { ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, AffinityTopologyVersion.NONE); if (oldest != null && oldest.isLocal()) cctx.exchange().clientTopology(cacheId, this).update(exchId, entry.getValue()); } } }
/* * Cria uma nova Tarefa. * Retorna true se foi criada com sucesso false caso contrário */ public boolean newTask(String type, Packet objects) { if (type.equals("")) return false; lock.lock(); try { if (this.tasks.containsKey(type)) { return false; } else { HashMap<String, String> aux = objects.getArgs(); HashMap<String, Integer> objs = new HashMap<>(); for (Map.Entry<String, String> entry : aux.entrySet()) { objs.put(entry.getKey(), Integer.parseInt((entry.getValue()))); } Task t = new Task(type, objs, lock); this.tasks.put(type, t); return true; } } finally { lock.unlock(); } }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
public boolean contains(Object o) { if (!(o instanceof Map.Entry)) return false; Map.Entry<K, V> e = (Map.Entry<K, V>) o; V v = ConcurrentHashMap.this.get(e.getKey()); return v != null && v.equals(e.getValue()); }
public boolean remove(Object o) { if (!(o instanceof Map.Entry)) return false; Map.Entry<K, V> e = (Map.Entry<K, V>) o; return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()); }
/** {@inheritDoc} */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) @Nullable @Override public GridDhtPartitionMap update( @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) { if (log.isDebugEnabled()) log.debug( "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']'); assert partMap != null; lock.writeLock().lock(); try { if (stopping) return null; if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) { if (log.isDebugEnabled()) log.debug( "Stale exchange id for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ']'); return null; } if (node2part != null && node2part.compareTo(partMap) >= 0) { if (log.isDebugEnabled()) log.debug( "Stale partition map for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ", curMap=" + node2part + ", newMap=" + partMap + ']'); return null; } long updateSeq = this.updateSeq.incrementAndGet(); if (exchId != null) lastExchangeId = exchId; if (node2part != null) { for (GridDhtPartitionMap part : node2part.values()) { GridDhtPartitionMap newPart = partMap.get(part.nodeId()); // If for some nodes current partition has a newer map, // then we keep the newer value. if (newPart != null && newPart.updateSequence() < part.updateSequence()) { if (log.isDebugEnabled()) log.debug( "Overriding partition map in full update map [exchId=" + exchId + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']'); partMap.put(part.nodeId(), part); } } for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) { UUID nodeId = it.next(); if (!cctx.discovery().alive(nodeId)) { if (log.isDebugEnabled()) log.debug( "Removing left node from full map update [nodeId=" + nodeId + ", partMap=" + partMap + ']'); it.remove(); } } } node2part = partMap; Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f); for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) { for (Integer p : e.getValue().keySet()) { Set<UUID> ids = p2n.get(p); if (ids == null) // Initialize HashSet to size 3 in anticipation that there won't be // more than 3 nodes per partitions. p2n.put(p, ids = U.newHashSet(3)); ids.add(e.getKey()); } } part2node = p2n; boolean changed = checkEvictions(updateSeq); consistencyCheck(); if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString()); return changed ? localPartitionMap() : null; } finally { lock.writeLock().unlock(); } }
public boolean equals(Object o) { if (!(o instanceof Map.Entry)) return false; Map.Entry e = (Map.Entry) o; return eq(key, e.getKey()) && eq(value, e.getValue()); }
/** * Remove particular entry from the trash directory or subdirectory. * * @param parentId Parent ID. * @param id Entry id. * @throws IgniteCheckedException If delete failed for some reason. */ private void deleteDirectory(IgniteUuid parentId, IgniteUuid id) throws IgniteCheckedException { assert parentId != null; assert id != null; while (true) { IgfsFileInfo info = meta.info(id); if (info != null) { assert info.isDirectory(); Map<String, IgfsListingEntry> listing = info.listing(); if (listing.isEmpty()) return; // Directory is empty. Map<String, IgfsListingEntry> delListing; if (listing.size() <= MAX_DELETE_BATCH) delListing = listing; else { delListing = new HashMap<>(MAX_DELETE_BATCH, 1.0f); int i = 0; for (Map.Entry<String, IgfsListingEntry> entry : listing.entrySet()) { delListing.put(entry.getKey(), entry.getValue()); if (++i == MAX_DELETE_BATCH) break; } } GridCompoundFuture<Object, ?> fut = new GridCompoundFuture<>(); // Delegate to child folders. for (IgfsListingEntry entry : delListing.values()) { if (!cancelled) { if (entry.isDirectory()) deleteDirectory(id, entry.fileId()); else { IgfsFileInfo fileInfo = meta.info(entry.fileId()); if (fileInfo != null) { assert fileInfo.isFile(); fut.add(data.delete(fileInfo)); } } } else return; } fut.markInitialized(); // Wait for data cache to delete values before clearing meta cache. try { fut.get(); } catch (IgniteFutureCancelledCheckedException ignore) { // This future can be cancelled only due to IGFS shutdown. cancelled = true; return; } // Actual delete of folder content. Collection<IgniteUuid> delIds = meta.delete(id, delListing); if (delListing == listing && delListing.size() == delIds.size()) break; // All entries were deleted. } else break; // Entry was deleted concurrently. } }