@Override public void reconnectServer( final String serverName, final ModelNode domainModel, final byte[] authKey, final boolean running, final boolean stopping) { if (shutdown || connectionFinished) { throw HostControllerMessages.MESSAGES.hostAlreadyShutdown(); } ManagedServer existing = servers.get(serverName); if (existing != null) { ROOT_LOGGER.existingServerWithState(serverName, existing.getState()); return; } final ManagedServer server = createManagedServer(serverName, authKey); if ((existing = servers.putIfAbsent(serverName, server)) != null) { ROOT_LOGGER.existingServerWithState(serverName, existing.getState()); return; } if (running) { if (!stopping) { server.reconnectServerProcess(createBootFactory(serverName, domainModel)); // Register the server proxy at the domain controller domainController.registerRunningServer(server.getProxyController()); } else { server.setServerProcessStopping(); } } else { server.removeServerProcess(); } synchronized (shutdownCondition) { shutdownCondition.notifyAll(); } }
public void addKnownInput( String hostName, int port, InputAttemptIdentifier srcAttemptIdentifier, int srcPhysicalIndex) { String identifier = InputHost.createIdentifier(hostName, port); InputHost host = knownSrcHosts.get(identifier); if (host == null) { host = new InputHost(hostName, port, inputContext.getApplicationId(), srcPhysicalIndex); assert identifier.equals(host.getIdentifier()); InputHost old = knownSrcHosts.putIfAbsent(identifier, host); if (old != null) { host = old; } } if (LOG.isDebugEnabled()) { LOG.debug("Adding input: " + srcAttemptIdentifier + ", to host: " + host); } host.addKnownInput(srcAttemptIdentifier); lock.lock(); try { boolean added = pendingHosts.offer(host); if (!added) { String errorMessage = "Unable to add host: " + host.getIdentifier() + " to pending queue"; LOG.error(errorMessage); throw new TezUncheckedException(errorMessage); } wakeLoop.signal(); } finally { lock.unlock(); } }
/** * @param e Entry. * @return Entry. */ private CacheContinuousQueryEntry handleEntry(CacheContinuousQueryEntry e) { assert e != null; assert entryBufs != null; if (internal) { if (e.isFiltered()) return null; else return e; } // Initial query entry. // This events should be fired immediately. if (e.updateCounter() == -1) return e; EntryBuffer buf = entryBufs.get(e.partition()); if (buf == null) { buf = new EntryBuffer(); EntryBuffer oldRec = entryBufs.putIfAbsent(e.partition(), buf); if (oldRec != null) buf = oldRec; } return buf.handle(e); }
/** * This method calculates the minimum view ID known by the current node. This method is only used * in a clustered cache, and only invoked when either a view change is detected, or a transaction * whose view ID is not the same as the current view ID. * * <p>This method is guarded by minViewRecalculationLock to prevent concurrent updates to the * minimum view ID field. * * @param idOfRemovedTransaction the view ID associated with the transaction that triggered this * recalculation, or -1 if triggered by a view change event. */ @GuardedBy("minViewRecalculationLock") private void calculateMinViewId(int idOfRemovedTransaction) { minViewRecalculationLock.lock(); try { // We should only need to re-calculate the minimum view ID if the transaction being completed // has the same ID as the smallest known transaction ID, to check what the new smallest is. // We do this check // again here, since this is now within a synchronized method. if (idOfRemovedTransaction == -1 || (idOfRemovedTransaction == minTxViewId && idOfRemovedTransaction < currentViewId)) { int minViewIdFound = currentViewId; for (CacheTransaction ct : localTransactions.values()) { int viewId = ct.getViewId(); if (viewId < minViewIdFound) minViewIdFound = viewId; } for (CacheTransaction ct : remoteTransactions.values()) { int viewId = ct.getViewId(); if (viewId < minViewIdFound) minViewIdFound = viewId; } if (minViewIdFound > minTxViewId) { log.tracef("Changing minimum view ID from %s to %s", minTxViewId, minViewIdFound); minTxViewId = minViewIdFound; } else { log.tracef("Minimum view ID still is %s; nothing to change", minViewIdFound); } } } finally { minViewRecalculationLock.unlock(); } }
public Map<String, Object> process(Map<String, Object> input) { if (!isValid) { validate(true); } Long currentProcessId = processId.incrementAndGet(); Semaphore resultSemaphoreForProcess = new Semaphore(0); resultSemaphores.put(currentProcessId, resultSemaphoreForProcess); // send input to all input pipeline stages for (PipelineStage inputStage : inputStages.keySet()) { Map<String, String> inputPortMapping = inputStages.get(inputStage); for (String inputPort : inputPortMapping.keySet()) { Object inputParam = input.get(inputPort); inputStage.consume(currentProcessId, inputPortMapping.get(inputPort), inputParam); } } // wait for the output to become ready resultSemaphoreForProcess.acquireUninterruptibly(); if (Boolean.FALSE == processingStatus.remove(currentProcessId)) { Throwable t = processingException.remove(currentProcessId); throw new PipelineProcessingException( "Processing failed for id '" + currentProcessId + "'.", t); } // cleanup and return the result return clear(currentProcessId); }
/** * Gets all the consumer endpoints. * * @return consumer endpoints */ public static Collection<Endpoint> getConsumerEndpoints() { Collection<Endpoint> endpoints = new ArrayList<Endpoint>(CONSUMERS.size()); for (DirectVmConsumer consumer : CONSUMERS.values()) { endpoints.add(consumer.getEndpoint()); } return endpoints; }
/** * Removes candidate from the list of near local candidates. * * @param cand Candidate to remove. */ public void removeExplicitLock(GridCacheMvccCandidate cand) { GridCacheExplicitLockSpan span = pendingExplicit.get(cand.threadId()); if (span == null) return; if (span.removeCandidate(cand)) pendingExplicit.remove(cand.threadId(), span); }
@NotNull private InspectionTreeNode getToolParentNode( @NotNull String groupName, HighlightDisplayLevel errorLevel, boolean groupedBySeverity) { if (groupName.isEmpty()) { return getRelativeRootNode(groupedBySeverity, errorLevel); } ConcurrentMap<String, InspectionGroupNode> map = myGroups.get(errorLevel); if (map == null) { map = ConcurrencyUtil.cacheOrGet( myGroups, errorLevel, ContainerUtil.<String, InspectionGroupNode>newConcurrentMap()); } InspectionGroupNode group; if (groupedBySeverity) { group = map.get(groupName); } else { group = null; for (Map<String, InspectionGroupNode> groupMap : myGroups.values()) { if ((group = groupMap.get(groupName)) != null) break; } } if (group == null) { group = ConcurrencyUtil.cacheOrGet(map, groupName, new InspectionGroupNode(groupName)); addChildNodeInEDT(getRelativeRootNode(groupedBySeverity, errorLevel), group); } return group; }
@Override public int doStartTag() throws JspException { String key = JspUtils.getCurrentServletPath((HttpServletRequest) pageContext.getRequest()) + "/" + name; bodyContent = null; output = OUTPUT_CACHE.get(key); // Output is expired? While producing, it's not considered expired // because lastProduced field is set far in the future. if (output != null && System.currentTimeMillis() - output.lastProduced > duration) { setOutput(output, null); OUTPUT_CACHE.remove(key); output = null; } // Output isn't cached, so flag it to be produced. if (output == null) { output = new Output(); output.key = key; // Make sure there's only one producing output at [R]. Output o = OUTPUT_CACHE.putIfAbsent(key, output); if (o == null) { LOGGER.info("Producing [{}] in [{}]", key, Thread.currentThread()); return EVAL_BODY_BUFFERED; } output = o; } return SKIP_BODY; }
protected void checkRemoveLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode) throws IOException { // check all labels being added existed Set<String> knownLabels = labelCollections.keySet(); for (Entry<NodeId, Set<String>> entry : removeLabelsFromNode.entrySet()) { NodeId nodeId = entry.getKey(); Set<String> labels = entry.getValue(); if (!knownLabels.containsAll(labels)) { String msg = "Not all labels being removed contained by known " + "label collections, please check" + ", removed labels=[" + StringUtils.join(labels, ",") + "]"; LOG.error(msg); throw new IOException(msg); } Set<String> originalLabels = null; boolean nodeExisted = false; if (WILDCARD_PORT != nodeId.getPort()) { Node nm = getNMInNodeSet(nodeId); if (nm != null) { originalLabels = nm.labels; nodeExisted = true; } } else { Host host = nodeCollections.get(nodeId.getHost()); if (null != host) { originalLabels = host.labels; nodeExisted = true; } } if (!nodeExisted) { String msg = "Try to remove labels from NM=" + nodeId + ", but the NM doesn't existed"; LOG.error(msg); throw new IOException(msg); } // the labels will never be null if (labels.isEmpty()) { continue; } // originalLabels may be null, // because when a Node is created, Node.labels can be null. if (originalLabels == null || !originalLabels.containsAll(labels)) { String msg = "Try to remove labels = [" + StringUtils.join(labels, ",") + "], but not all labels contained by NM=" + nodeId; LOG.error(msg); throw new IOException(msg); } } }
/** * Removes a Resident from the Datasource * * @param resident */ public boolean removeResident(Resident resident) { boolean result = residents.remove(resident.getUUID()) != null; for (Town t : towns.values()) if (t.hasResident(resident)) t.removeResident(resident); return result; }
@SuppressWarnings("unchecked") protected void internalRemoveFromClusterNodeLabels(Collection<String> labelsToRemove) { // remove labels from nodes for (Map.Entry<String, Host> nodeEntry : nodeCollections.entrySet()) { Host host = nodeEntry.getValue(); if (null != host) { host.labels.removeAll(labelsToRemove); for (Node nm : host.nms.values()) { if (nm.labels != null) { nm.labels.removeAll(labelsToRemove); } } } } // remove labels from node labels collection for (String label : labelsToRemove) { labelCollections.remove(label); } // create event to remove labels if (null != dispatcher) { dispatcher.getEventHandler().handle(new RemoveClusterNodeLabels(labelsToRemove)); } LOG.info("Remove labels: [" + StringUtils.join(labelsToRemove.iterator(), ",") + "]"); }
protected void createHostIfNonExisted(String hostName) { Host host = nodeCollections.get(hostName); if (null == host) { host = new Host(); nodeCollections.put(hostName, host); } }
/** * @param sesId Session ID. * @param taskNodeId Task node ID. * @param taskName Task name. * @param dep Deployment. * @param taskClsName Task class name. * @param top Topology. * @param startTime Execution start time. * @param endTime Execution end time. * @param siblings Collection of siblings. * @param attrs Map of attributes. * @param fullSup {@code True} to enable distributed session attributes and checkpoints. * @return New session if one did not exist, or existing one. */ public GridTaskSessionImpl createTaskSession( GridUuid sesId, UUID taskNodeId, String taskName, @Nullable GridDeployment dep, String taskClsName, @Nullable Collection<UUID> top, long startTime, long endTime, Collection<GridComputeJobSibling> siblings, Map<Object, Object> attrs, boolean fullSup) { if (!fullSup) { return new GridTaskSessionImpl( taskNodeId, taskName, dep, taskClsName, sesId, top, startTime, endTime, siblings, attrs, ctx, fullSup); } while (true) { GridTaskSessionImpl ses = sesMap.get(sesId); if (ses == null) { GridTaskSessionImpl old = sesMap.putIfAbsent( sesId, ses = new GridTaskSessionImpl( taskNodeId, taskName, dep, taskClsName, sesId, top, startTime, endTime, siblings, attrs, ctx, fullSup)); if (old != null) ses = old; else // Return without acquire. return ses; } if (ses.acquire()) return ses; else sesMap.remove(sesId, ses); } }
/** {@inheritDoc} */ public IPlaylistSubscriberStream newPlaylistSubscriberStream(int streamId) { getReadLock().lock(); try { int index = streamId - 1; if (index < 0 || !reservedStreams.get(streamId - 1)) { // StreamId has not been reserved before return null; } } finally { getReadLock().unlock(); } if (streams.get(streamId - 1) != null) { // Another stream already exists with this id return null; } /** Picking up the PlaylistSubscriberStream defined as a Spring prototype in red5-common.xml */ PlaylistSubscriberStream pss = (PlaylistSubscriberStream) scope.getContext().getBean("playlistSubscriberStream"); Integer buffer = streamBuffers.get(streamId - 1); if (buffer != null) { pss.setClientBufferDuration(buffer); } pss.setName(createStreamName()); pss.setConnection(this); pss.setScope(this.getScope()); pss.setStreamId(streamId); registerStream(pss); usedStreams.incrementAndGet(); return pss; }
public ClientProxy getOrCreateProxy(String service, String id) { final ObjectNamespace ns = new DefaultObjectNamespace(service, id); ClientProxyFuture proxyFuture = proxies.get(ns); if (proxyFuture != null) { return proxyFuture.get(); } final ClientProxyFactory factory = proxyFactories.get(service); if (factory == null) { throw new IllegalArgumentException("No factory registered for service: " + service); } final ClientProxy clientProxy = factory.create(id); proxyFuture = new ClientProxyFuture(); final ClientProxyFuture current = proxies.putIfAbsent(ns, proxyFuture); if (current != null) { return current.get(); } try { initialize(clientProxy); } catch (Exception e) { proxies.remove(ns); proxyFuture.set(e); throw ExceptionUtil.rethrow(e); } proxyFuture.set(clientProxy); return clientProxy; }
private void multicastJoin(int count, final boolean sleep) throws InterruptedException { final TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(count); final Config config = new Config(); config.setProperty("hazelcast.wait.seconds.before.join", "5"); config.getNetworkConfig().getJoin().getMulticastConfig().setMulticastTimeoutSeconds(25); final ConcurrentMap<Integer, HazelcastInstance> map = new ConcurrentHashMap<Integer, HazelcastInstance>(); final CountDownLatch latch = new CountDownLatch(count); final ExecutorService ex = Executors.newCachedThreadPool(); for (int i = 0; i < count; i++) { final int index = i; ex.execute( new Runnable() { public void run() { if (sleep) { try { Thread.sleep((int) (1000 * Math.random())); } catch (InterruptedException ignored) { } } HazelcastInstance h = nodeFactory.newHazelcastInstance(config); map.put(index, h); latch.countDown(); } }); } assertOpenEventually(latch); for (HazelcastInstance h : map.values()) { assertEquals(count, h.getCluster().getMembers().size()); } ex.shutdown(); }
/** Update the recorded test results to remove all tests. Reports any changes. */ public void empty() { Set<Test> wontRun = new HashSet<Test>(results.keySet()); results.clear(); for (Test test : wontRun) { reportRemove(test); } }
@Override public ActionVersionMap getActionVersionMapByActionType(VdcActionType action_type) { ActionVersionMap result = cache.get(action_type); if (result != null) { if (result.isNullValue()) { return null; } return result; } MapSqlParameterSource parameterSource = getCustomMapSqlParameterSource().addValue("action_type", action_type); result = getCallsHandler() .executeRead( "Getaction_version_mapByaction_type", ActionVersionMapMapper.instance, parameterSource); if (result == null) { cache.putIfAbsent(action_type, nullActionVersionMap); } else { cache.putIfAbsent(action_type, result); } result = cache.get(action_type); if (result.isNullValue()) { return null; } return result; }
/** Implement CollectorProtocol methods */ @Override public TaskTrackerUtilization getTaskTrackerUtilization(String hostName) throws IOException { if (taskTrackerReports.get(hostName) == null) { return null; } return taskTrackerReports.get(hostName).getTaskTrackerUtilization(); }
@Override public ThreadPoolExecutor getThreadPool( HystrixThreadPoolKey threadPoolKey, HystrixProperty<Integer> corePoolSize, HystrixProperty<Integer> maximumPoolSize, HystrixProperty<Integer> keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue) { final String nameFormat = Joiner.on('-').join(ImmutableList.of("hystrix"), threadPoolKey.name(), "%d"); final ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat(nameFormat).build(); final String key = threadPoolKey.name(); final ThreadPoolExecutor existing = executors.putIfAbsent( key, new ThreadPoolExecutor( corePoolSize.get(), maximumPoolSize.get(), keepAliveTime.get(), unit, workQueue, threadFactory)); final ThreadPoolExecutor threadPoolExecutor = executors.get(key); if (existing == null) { environment .lifecycle() .manage(new ExecutorServiceManager(threadPoolExecutor, Duration.seconds(5), nameFormat)); } return threadPoolExecutor; }
public static CurrencyInfo reloadCurrencyInfo(String id) throws SQLException { CurrencyInfo currency = CurrencyInfo.loadCurrency(id); if (currency == null) CurrencyDb.remove(id); else CurrencyDb.put(currency.CurrencyId, currency); return currency; }
private void shutDownGracefully() { if (log.isDebugEnabled()) log.debugf( "Wait for on-going transactions to finish for %s.", Util.prettyPrintTime( configuration.transaction().cacheStopTimeout(), TimeUnit.MILLISECONDS)); long failTime = currentMillisFromNanotime() + configuration.transaction().cacheStopTimeout(); boolean txsOnGoing = areTxsOnGoing(); while (txsOnGoing && currentMillisFromNanotime() < failTime) { try { Thread.sleep(30); txsOnGoing = areTxsOnGoing(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); if (clustered) { log.debugf( "Interrupted waiting for on-going transactions to finish. %s local transactions and %s remote transactions", localTransactions.size(), remoteTransactions.size()); } else { log.debugf( "Interrupted waiting for %s on-going transactions to finish.", localTransactions.size()); } } } if (txsOnGoing) { log.unfinishedTransactionsRemain( localTransactions == null ? 0 : localTransactions.size(), remoteTransactions == null ? 0 : remoteTransactions.size()); } else { log.debug("All transactions terminated"); } }
/** Update metrics for alive/dead nodes. */ private void setAliveDeadMetrics() { clusterManager.getMetrics().setAliveNodes(nameToNode.size()); int totalHosts = hostsReader.getHosts().size(); if (totalHosts > 0) { clusterManager.getMetrics().setDeadNodes(totalHosts - nameToNode.size()); } }
@Override public boolean isReadOnly(boolean useCache, String cluster, String table) { String key = getClusterTableKey(cluster, table); if (useCache) { Boolean flag = _readOnlyMap.get(key); if (flag != null) { return flag; } } LOG.debug("trace isReadOnly"); String path = ZookeeperPathConstants.getTableReadOnlyPath(cluster, table); Boolean flag = null; try { if (_zk.exists(path, false) == null) { flag = false; return false; } flag = true; return true; } catch (KeeperException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new RuntimeException(e); } finally { _readOnlyMap.put(key, flag); } }
/** * Remove the node from the runnable indices * * @param node node to remove */ public void deleteRunnable(ClusterNode node) { String host = node.getHost(); if (LOG.isDebugEnabled()) { LOG.debug(node.getName() + " deleted from runnable list for type: " + type); } NodeContainer nodeContainer = hostToRunnableNodes.get(host); if (nodeContainer != null) { synchronized (nodeContainer) { if (nodeContainer.removeNode(node)) { /** * We are not removing the nodeContainer from runnable nodes map since we are * synchronizing operations with runnable indices on it */ hostsWithRunnableNodes.decrementAndGet(); } } } Node rack = node.hostNode.getParent(); nodeContainer = rackToRunnableNodes.get(rack); if (nodeContainer != null) { synchronized (nodeContainer) { /** * We are not removing the nodeContainer from runnable nodes map since we are * synchronizing operations with runnable indices on it */ nodeContainer.removeNode(node); } } }
/** * @param ctx Context. * @param e entry. * @return Entry collection. */ private Collection<CacheContinuousQueryEntry> handleEvent( GridKernalContext ctx, CacheContinuousQueryEntry e) { assert e != null; if (internal) { if (e.isFiltered()) return Collections.emptyList(); else return F.asList(e); } // Initial query entry or evicted entry. These events should be fired immediately. if (e.updateCounter() == -1L) return F.asList(e); PartitionRecovery rec = rcvs.get(e.partition()); if (rec == null) { rec = new PartitionRecovery( ctx.log(getClass()), initTopVer, initUpdCntrs == null ? null : initUpdCntrs.get(e.partition())); PartitionRecovery oldRec = rcvs.putIfAbsent(e.partition(), rec); if (oldRec != null) rec = oldRec; } return rec.collectEntries(e); }
/** {@inheritDoc} */ public IClientBroadcastStream newBroadcastStream(int streamId) { getReadLock().lock(); try { int index = streamId - 1; if (index < 0 || !reservedStreams.get(index)) { // StreamId has not been reserved before return null; } } finally { getReadLock().unlock(); } if (streams.get(streamId - 1) != null) { // Another stream already exists with this id return null; } /** Picking up the ClientBroadcastStream defined as a spring prototype in red5-common.xml */ ClientBroadcastStream cbs = (ClientBroadcastStream) scope.getContext().getBean("clientBroadcastStream"); Integer buffer = streamBuffers.get(streamId - 1); if (buffer != null) { cbs.setClientBufferDuration(buffer); } cbs.setStreamId(streamId); cbs.setConnection(this); cbs.setName(createStreamName()); cbs.setScope(this.getScope()); registerStream(cbs); usedStreams.incrementAndGet(); return cbs; }
static long nextMemberId(Container container) { AtomicLong counter = CONTAINER_COUNTER.putIfAbsent(container.id(), new AtomicLong(-1)); if (counter == null) { counter = CONTAINER_COUNTER.get(container.id()); } return counter.incrementAndGet(); }
@Override public ServerStatus startServer( final String serverName, final ModelNode domainModel, final boolean blocking) { if (shutdown || connectionFinished) { throw HostControllerMessages.MESSAGES.hostAlreadyShutdown(); } ManagedServer server = servers.get(serverName); if (server == null) { // Create a new authKey final byte[] authKey = new byte[16]; new Random(new SecureRandom().nextLong()).nextBytes(authKey); removeNullChar(authKey); // Create the managed server final ManagedServer newServer = createManagedServer(serverName, authKey); server = servers.putIfAbsent(serverName, newServer); if (server == null) { server = newServer; } } // Start the server server.start(createBootFactory(serverName, domainModel)); synchronized (shutdownCondition) { shutdownCondition.notifyAll(); } if (blocking) { // Block until the server started message server.awaitState(ManagedServer.InternalState.SERVER_STARTED); } else { // Wait until the server opens the mgmt connection server.awaitState(ManagedServer.InternalState.SERVER_STARTING); } return server.getState(); }