@Test public void testReplaceOldValueSuccess() { ConcurrentMap<SimpleKey, SimpleValue> map = redisson.getMap("simple"); map.put(new SimpleKey("1"), new SimpleValue("2")); boolean res = map.replace(new SimpleKey("1"), new SimpleValue("2"), new SimpleValue("3")); Assert.assertTrue(res); boolean res1 = map.replace(new SimpleKey("1"), new SimpleValue("2"), new SimpleValue("3")); Assert.assertFalse(res1); SimpleValue val1 = map.get(new SimpleKey("1")); Assert.assertEquals("3", val1.getValue()); }
@Override void doRun() { long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(DURATION_SECONDS); Random random = new Random(); while (true) { int key = random.nextInt(itemCount); int increment = random.nextInt(100); values[key] += increment; while (true) { Integer value = map.get(key); if (value == null) { value = 0; } if (map.replace(key, value, value + increment)) { break; } } if (System.currentTimeMillis() > endTime) { break; } } }
// 重置在指定时间内未完成的任务 protected void checkTaskStatus() { Iterator<String> taskIds = statusPool.keySet().iterator(); while (taskIds.hasNext()) { String taskId = taskIds.next(); JobTaskStatus taskStatus = statusPool.get(taskId); JobTask jobTask = jobTaskPool.get(taskId); if (taskStatus == JobTaskStatus.DOING && jobTask.getStartTime() != 0 && System.currentTimeMillis() - jobTask.getStartTime() >= jobTask.getTaskRecycleTime() * 1000) { if (statusPool.replace(taskId, JobTaskStatus.DOING, JobTaskStatus.UNDO)) { jobTask.setStatus(JobTaskStatus.UNDO); undoTaskQueue.offer(jobTask); jobTask.getRecycleCounter().incrementAndGet(); if (logger.isWarnEnabled()) logger.warn( "Task : " + jobTask.getTaskId() + " can't complete in time, it be recycle."); } } } }
// Nullness checker is not powerful enough to prove null-safety of // this method @SuppressWarnings("nullness") void incrementBy(String key, long delta) { // We use a compareAndSet strategy to update the map, which is much // faster when there isn't too much contention. Look at a value, and // conditionally update the map if the value hasn't changed. // If it has changed, repeat. Long oldValue = map.get(key); if (oldValue == null) { // Currently, the slot is empty oldValue = map.putIfAbsent(key, delta); if (oldValue == null) { // The slot was still empty when we set it return; } else { // Someone filled in the slot behind our back. oldValue has // its current value } } while (true) { if (map.replace(key, oldValue, oldValue + delta)) { break; } // Nullness checker doesn't understand that this cannot return null. oldValue = map.get(key); } }
@Override public final void addEvent(@NotNull final StuffEventMessage stuffEventMessage) { @NotNull final NhsNumber patientIdentifier = stuffEventMessage.patientIdentifier(); try { final K key = key(patientIdentifier); @Nullable V oldPatientRecord = root.get(key); do { if (oldPatientRecord == null) { // No entry, try to add one final SimplePatientRecord simplePatientRecord = initialPatientRecord(stuffEventMessage); oldPatientRecord = root.putIfAbsent(key, value(simplePatientRecord)); if (wasNewPatientSuccessfullyAdded(oldPatientRecord)) { return; } } else { // Existing entry. Try to atomically replace it final V newPatientRecord = oldPatientRecord.addRepositoryEvent(stuffEventMessage); if (root.replace(key, oldPatientRecord, newPatientRecord)) { return; } oldPatientRecord = root.get(key); } } while (true); } finally { eventObserver.storeChanged(patientIdentifier); } }
private static <K, V> boolean compareAndSet( ConcurrentMap<K, V> map, K key, V oldValue, V newValue) { if (oldValue == null) { return map.putIfAbsent(key, newValue) == null; } return map.replace(key, oldValue, newValue); }
public void addMetric(String name, TelemetryData d) { lastUpdate = System.currentTimeMillis() / 1000; metrics.putIfAbsent(name, d); TelemetryData oldVal; do { oldVal = metrics.get(name); } while (!metrics.replace(name, oldVal, d)); }
@Override public V replace(K key, V value) { writeLock.lock(); try { return delegate.replace(key, value); } finally { writeLock.unlock(); } }
@Override public boolean replace(K key, V oldValue, V newValue) { writeLock.lock(); try { return delegate.replace(key, oldValue, newValue); } finally { writeLock.unlock(); } }
/** {@inheritDoc} */ @Override public boolean replace(ExtKey key, Object oldValue, Object newValue) { if (newValue == null) { return map.remove(key, oldValue); } else { checkKeyValue(key, newValue); return map.replace(key, oldValue, newValue); } }
/** {@inheritDoc} */ @Override public Object replace(ExtKey key, Object value) { if (value == null) { return map.remove(key); } else { checkKeyValue(key, value); return map.replace(key, value); } }
public static void main(String[] args) { Config config = new Config(); HazelcastInstance h = Hazelcast.newHazelcastInstance(config); ConcurrentMap<String, String> map = h.getMap("my-distributed-map"); map.put("key", "value"); map.get("key"); // Concurrent Map methods map.putIfAbsent("somekey", "somevalue"); map.replace("key", "value", "newvalue"); }
@Test public void testReplaceValue() { ConcurrentMap<SimpleKey, SimpleValue> map = redisson.getMap("simple"); map.put(new SimpleKey("1"), new SimpleValue("2")); SimpleValue res = map.replace(new SimpleKey("1"), new SimpleValue("3")); Assert.assertEquals("2", res.getValue()); SimpleValue val1 = map.get(new SimpleKey("1")); Assert.assertEquals("3", val1.getValue()); }
public void runCleanTask(final String name, String timeoutSetName, long currentDate) { final Long lastExpired = lastExpiredTime.get(name); long now = System.currentTimeMillis(); if (lastExpired == null) { if (lastExpiredTime.putIfAbsent(name, now) != null) { return; } } else if (lastExpired + expireTaskExecutionDelay >= now) { if (!lastExpiredTime.replace(name, lastExpired, now)) { return; } } else { return; } Future<Integer> future = cleanupExpiredEntires(name, timeoutSetName, null, valuesAmountToClean, false); future.addListener( new FutureListener<Integer>() { @Override public void operationComplete(Future<Integer> future) throws Exception { executor .getConnectionManager() .getGroup() .schedule( new Runnable() { @Override public void run() { lastExpiredTime.remove(name, lastExpired); } }, expireTaskExecutionDelay * 3, TimeUnit.SECONDS); if (!future.isSuccess()) { log.warn( "Can't execute clean task for expired values. RSetCache name: " + name, future.cause()); return; } } }); }
@Override public void put(Entry entry) { ByteBuffer key = ByteBuffer.wrap(entry.getKey()); boolean done = false; while (!done) { Entry old = map.putIfAbsent(key, entry); done = true; if (old != null) { entry = resolver.resolve(old, entry); if (entry != old) { done = map.replace(key, old, entry); } } } }
public <T> boolean replace(@NotNull Key<T> key, @Nullable T oldValue, @Nullable T newValue) { while (true) { try { ConcurrentMap<Key, Object> map = getOrCreateMap(); if (oldValue == null) { return newValue == null || map.putIfAbsent(key, newValue) == null; } if (newValue == null) { boolean removed = map.remove(key, oldValue); if (removed) { nullifyMapFieldIfEmpty(); } return removed; } return map.replace(key, oldValue, newValue); } catch (ConcurrentModificationException ignored) { // someone blocked modification, retry } } }
public void updateSource(TaskSource source) { checkLockNotHeld("Can not update sources while holding the driver lock"); // does this driver have an operator for the specified source? if (!sourceOperators.containsKey(source.getPlanNodeId())) { return; } // stage the new updates while (true) { // attempt to update directly to the new source TaskSource currentNewSource = newSources.putIfAbsent(source.getPlanNodeId(), source); // if update succeeded, just break if (currentNewSource == null) { break; } // merge source into the current new source TaskSource newSource = currentNewSource.update(source); // if this is not a new source, just return if (newSource == currentNewSource) { break; } // attempt to replace the currentNewSource with the new source if (newSources.replace(source.getPlanNodeId(), currentNewSource, newSource)) { break; } // someone else updated while we were processing } // attempt to get the lock and process the updates we staged above // updates will be processed in close if and only if we got the lock tryLockAndProcessPendingStateChanges(0, TimeUnit.MILLISECONDS).close(); }
@Override public boolean replace(K key, T object) { T previous = map.replace(key, object); return previous != null; }
public boolean replace(K k, V v, V v1) { k = SharedUtils.checkObject(k); v1 = SharedUtils.checkObject(v1); return map.replace(k, v, v1); }
public V replace(K k, V v) { k = SharedUtils.checkObject(k); v = SharedUtils.checkObject(v); return map.replace(k, v); }
public boolean replace(K k, V v, V v1) { Checker.checkType(v1); return map.replace(k, v, v1); }
@Override public void fireEvent(final Hertz event) { final long defaultPollIntervalSeconds = TimeUnit.MINUTES.toSeconds(DEFAULT_POLL_INTERVAL_MINS); if (!Bootstrap.isOperational() || !BootstrapArgs.isCloudController() || !event.isAsserted(defaultPollIntervalSeconds)) { return; } else { if (DEFAULT_POLL_INTERVAL_MINS >= 1) { COLLECTION_INTERVAL_TIME_MS = ((int) TimeUnit.MINUTES.toMillis(DEFAULT_POLL_INTERVAL_MINS) / 2); } else { COLLECTION_INTERVAL_TIME_MS = 0; } if (COLLECTION_INTERVAL_TIME_MS == 0 || HISTORY_SIZE > 15 || HISTORY_SIZE < 1) { LOG.debug("The instance usage report is disabled"); } else if (COLLECTION_INTERVAL_TIME_MS <= MAX_WRITE_INTERVAL_MS) { try { if (event.isAsserted(defaultPollIntervalSeconds)) { if (Bootstrap.isFinished() && Hosts.isCoordinator()) { CloudWatchHelper.DefaultInstanceInfoProvider.refresh(); for (final ServiceConfiguration ccConfig : Topology.enabledServices(ClusterController.class)) { final String ccHost = ccConfig.getHostName(); if (busyHosts.replace(ccHost, false, true) || busyHosts.putIfAbsent(ccHost, true) == null) { Threads.lookup(Reporting.class, DescribeSensorsListener.class) .submit( new Callable<Object>() { @Override public Object call() throws Exception { final ExecutorService executorService = Threads.lookup( Reporting.class, DescribeSensorsListener.class, "response-processing") .limitTo(4); final long startTime = System.currentTimeMillis(); try { final List<String> allInstanceIds = VmInstances.listWithProjection( VmInstances.instanceIdProjection(), VmInstance.criterion(VmState.RUNNING), VmInstance.zoneCriterion(ccConfig.getPartition()), VmInstance.nonNullNodeCriterion()); final Iterable<List<String>> processInts = Iterables.partition(allInstanceIds, SENSOR_QUERY_BATCH_SIZE); for (final List<String> instIds : processInts) { final ArrayList<String> instanceIds = Lists.newArrayList(instIds); /** * Here this is hijacking the sensor callback in order to control * the thread of execution used when firing */ final DescribeSensorCallback msgCallback = new DescribeSensorCallback( HISTORY_SIZE, COLLECTION_INTERVAL_TIME_MS, instanceIds) { @Override public void fireException(Throwable e) {} @Override public void fire(DescribeSensorsResponse msg) {} }; /** * Here we actually get the future reference to the result and on * a response processing thread, invoke .fire(). */ final DescribeSensorsResponse response = AsyncRequests.newRequest(msgCallback) .dispatch(ccConfig) .get(); executorService.submit( new Runnable() { @Override public void run() { try { new DescribeSensorCallback( HISTORY_SIZE, COLLECTION_INTERVAL_TIME_MS, instanceIds) .fire(response); } catch (Exception e) { Exceptions.maybeInterrupted(e); } } }); } } finally { /** Only and finally set the busy bit back to false. */ busyHosts.put(ccHost, false); LOG.debug( "Sensor polling for " + ccHost + " took " + (System.currentTimeMillis() - startTime) + "ms"); } return null; } }); } else { LOG.warn( "Skipping sensors polling for " + ccHost + ", previous poll not complete."); } } } } } catch (Exception ex) { LOG.error("Unable to listen for describe sensors events", ex); } } else { LOG.error( "DEFAULT_POLL_INTERVAL_MINS : " + DEFAULT_POLL_INTERVAL_MINS + " must be less than 1440 minutes"); } } }
public V replace(K k, V v) { Checker.checkType(v); V ret = map.replace(k, v); return Checker.copyIfRequired(ret); }
// 分配任务和结果提交处理由于是单线程处理, // 因此本身不用做状态池并发控制,将消耗较多的发送操作交给ServerConnector多线程操作 @Override public void getUnDoJobTasks(GetTaskRequestEvent requestEvent) { String jobName = requestEvent.getJobName(); int jobCount = requestEvent.getRequestJobCount(); final List<JobTask> jobTasks = new ArrayList<JobTask>(); // 如果关闭,则直接返回一个空的JobTask的list给slave if (this.stopped) { masterNode.echoGetJobTasks(requestEvent.getSequence(), jobTasks, requestEvent.getChannel()); return; } // 指定job if (jobName != null && jobs.containsKey(jobName)) { Job job = jobs.get(jobName); List<JobTask> tasks = job.getJobTasks(); for (JobTask jobTask : tasks) { if (jobTask.getStatus().equals(JobTaskStatus.UNDO)) { if (statusPool.replace(jobTask.getTaskId(), JobTaskStatus.UNDO, JobTaskStatus.DOING)) { this.allocateTask(jobTask); jobTasks.add(jobTask); if (jobTasks.size() == jobCount) break; } } } } else { Iterator<JobTask> taskIter = undoTaskQueue.iterator(); while (taskIter.hasNext()) { // String taskId = taskIds.next(); // JobTask jobTask = jobTaskPool.get(taskId); JobTask jobTask = taskIter.next(); if (!jobTaskPool.keySet().contains(jobTask.getTaskId()) || jobs.get(jobTask.getJobName()).getEpoch().get() > jobTask.getJobEpoch() || jobs.get(jobTask.getJobName()).getJobTimeOut().get()) { taskIter.remove(); continue; } if (statusPool.get(jobTask.getTaskId()).equals(JobTaskStatus.UNDO)) { if (statusPool.replace(jobTask.getTaskId(), JobTaskStatus.UNDO, JobTaskStatus.DOING)) { this.allocateTask(jobTask); jobTasks.add(jobTask); taskIter.remove(); if (jobTasks.size() >= jobCount) break; } } else taskIter.remove(); } } // 是否需要用异步方式发送,减少对jobManager事件处理延时 if (config.isUseAsynModeToSendResponse()) { final String sequence = requestEvent.getSequence(); final Object channel = requestEvent.getChannel(); // 由于该操作比较慢,开线程执行,保证速度 eventProcessThreadPool.execute( new Runnable() { public void run() { try { masterNode.echoGetJobTasks(sequence, jobTasks, channel); } catch (Throwable e) { logger.error(e); } } }); } else masterNode.echoGetJobTasks(requestEvent.getSequence(), jobTasks, requestEvent.getChannel()); }
// 分配任务和结果提交处理由于是单线程处理, // 因此本身不用做状态池并发控制,将消耗较多的发送操作交给ServerConnector多线程操作 @Override public void addTaskResultToQueue(SendResultsRequestEvent jobResponseEvent) { JobTaskResult jobTaskResult = jobResponseEvent.getJobTaskResult(); if (jobTaskResult.getTaskIds() != null && jobTaskResult.getTaskIds().size() > 0) { // 判断是否是过期的一些老任务数据,根据task和taskresult的createtime来判断 // 以后要扩展成为如果发现当前的epoch < 结果的epoch,表明这台可能是从属的master,负责reduce,但是速度跟不上了 if (jobTaskPool.get(jobTaskResult.getTaskIds().get(0)) == null) { logger.error("jobTask is null " + jobTaskResult.getTaskIds().get(0)); } if (jobTaskResult.getJobEpoch() != jobTaskPool.get(jobTaskResult.getTaskIds().get(0)).getJobEpoch()) { if (jobTaskResult.getJobEpoch() < jobTaskPool.get(jobTaskResult.getTaskIds().get(0)).getJobEpoch()) { logger.error( "old task result will be discard! job:" + jobTaskPool.get(jobTaskResult.getTaskIds().get(0)).getJobName() + ",epoch:" + jobTaskResult.getJobEpoch() + ",slave:" + jobResponseEvent.getChannel()); masterNode.echoSendJobTaskResults( jobResponseEvent.getSequence(), "success", jobResponseEvent.getChannel()); return; } else { // 给一定的容忍时间,暂时定为5秒 jobs.get(jobTaskPool.get(jobTaskResult.getTaskIds().get(0)).getJobName()) .blockToResetJob(15000); if (jobTaskResult.getJobEpoch() > jobTaskPool.get(jobTaskResult.getTaskIds().get(0)).getJobEpoch()) { logger.error( "otherMaster can't merge in time!job:" + jobTaskPool.get(jobTaskResult.getTaskIds().get(0)).getJobName()); masterNode.echoSendJobTaskResults( jobResponseEvent.getSequence(), "success", jobResponseEvent.getChannel()); return; } } } if (logger.isWarnEnabled()) { StringBuilder ts = new StringBuilder("Receive slave analysis result, jobTaskIds : ") .append(jobTaskResult.toString()) .append(", ") .append(jobTaskResult.getTaskIds().size()); logger.warn(ts.toString()); } // 先放入队列,防止小概率多线程并发问题 jobTaskResultsQueuePool .get(jobTaskPool.get(jobTaskResult.getTaskIds().get(0)).getJobName()) .offer(jobTaskResult); for (int i = 0; i < jobTaskResult.getTaskIds().size(); i++) { String taskId = jobTaskResult.getTaskIds().get(i); JobTask jobTask = jobTaskPool.get(taskId); if (jobTask == null) { logger.error( new StringBuilder("taskId :").append(taskId).append("not exist!").toString()); continue; } Job job = jobs.get(jobTask.getJobName()); if (job == null) { logger.error( new StringBuilder("job :") .append(jobTask.getJobName()) .append("not exist!") .toString()); continue; } if (statusPool.replace(taskId, JobTaskStatus.DOING, JobTaskStatus.DONE) || statusPool.replace(taskId, JobTaskStatus.UNDO, JobTaskStatus.DONE)) { logger.info("task " + jobTask.getJobName() + " of job " + job.getJobName() + " done"); jobTask.setStatus(JobTaskStatus.DONE); jobTask.setEndTime(System.currentTimeMillis()); jobTask.setLastMergedEpoch(job.getEpoch().get()); job.getCompletedTaskCount().incrementAndGet(); } // 对jobTask的执行结果打点 StringBuilder log = new StringBuilder(ReportUtil.SLAVE_LOG) .append(",") .append(System.currentTimeMillis()) .append(",") .append(job.getEpoch()) .append(","); log.append(jobTask.getJobName()) .append(",") .append(jobTask.getTaskId()) .append(",") .append(jobTask.getRecycleCounter().get()) .append(",") .append(jobTaskResult.getSlaveIp()) .append(",") .append(jobTaskResult.getEfficiency()) .append(","); JobTaskExecuteInfo executeInfo = jobTaskResult.getTaskExecuteInfos().get(jobTask.getTaskId()); if (executeInfo != null) log.append(executeInfo.getAnalysisConsume()) .append(",") .append(executeInfo.getJobDataSize()) .append(",") .append(executeInfo.getTotalLine()) .append(",") .append(executeInfo.getErrorLine()) .append(",") .append(executeInfo.getEmptyLine()); else logger.error( new StringBuilder() .append("taskId : ") .append(jobTask.getTaskId()) .append(" executeInfo is null!") .toString()); ReportUtil.clusterLog(log.toString()); } } // 是否需要用异步方式发送,减少对jobManager事件处理延时 if (config.isUseAsynModeToSendResponse()) { final String sequence = jobResponseEvent.getSequence(); final Object channel = jobResponseEvent.getChannel(); eventProcessThreadPool.execute( new Runnable() { public void run() { try { masterNode.echoSendJobTaskResults(sequence, "success", channel); } catch (Throwable e) { logger.error(e); } } }); } else masterNode.echoSendJobTaskResults( jobResponseEvent.getSequence(), "success", jobResponseEvent.getChannel()); }
public boolean replace(K key, V oldValue, V newValue) { Object keyReference = referenceKey(key); Object referenceAwareOldValue = makeValueReferenceAware(oldValue); return delegate.replace( keyReference, referenceAwareOldValue, referenceValue(keyReference, newValue)); }
// Note that we suppress deprecation since we have the size method. @SuppressWarnings("deprecation") @Override public MapResponse invoke(ClientDescriptor clientDescriptor, MapOperation input) { MapResponse response; switch (input.operationType()) { case PUT: { PutOperation putOperation = (PutOperation) input; Object key = putOperation.getKey(); Object old = map.get(key); map.put(key, putOperation.getValue()); response = new MapValueResponse(old); break; } case GET: { Object key = ((GetOperation) input).getKey(); response = new MapValueResponse(map.get(key)); break; } case REMOVE: { Object key = ((RemoveOperation) input).getKey(); response = new MapValueResponse(map.remove(key)); break; } case CONTAINS_KEY: { Object key = ((ContainsKeyOperation) input).getKey(); response = new BooleanResponse(map.containsKey(key)); break; } case CONTAINS_VALUE: { Object value = ((ContainsValueOperation) input).getValue(); response = new BooleanResponse(map.containsValue(value)); break; } case CLEAR: { map.clear(); // There is no response from the clear. response = new NullResponse(); break; } case PUT_ALL: { @SuppressWarnings("unchecked") Map<Object, Object> newValues = (Map<Object, Object>) ((PutAllOperation) input).getMap(); map.putAll(newValues); // There is no response from a put all. response = new NullResponse(); break; } case KEY_SET: { Set<Object> keySet = new HashSet<Object>(); keySet.addAll(map.keySet()); response = new KeySetResponse(keySet); break; } case VALUES: { Collection<Object> values = new ArrayList<Object>(); values.addAll(map.values()); response = new ValueCollectionResponse(values); break; } case ENTRY_SET: { Set<Map.Entry<Object, Object>> entrySet = new HashSet<Map.Entry<Object, Object>>(); for (Map.Entry<Object, Object> entry : map.entrySet()) { entrySet.add( new AbstractMap.SimpleEntry<Object, Object>(entry.getKey(), entry.getValue())); } response = new EntrySetResponse(entrySet); break; } case SIZE: { response = new SizeResponse(map.size()); break; } case PUT_IF_ABSENT: { PutIfAbsentOperation operation = (PutIfAbsentOperation) input; response = new MapValueResponse(map.putIfAbsent(operation.getKey(), operation.getValue())); break; } case PUT_IF_PRESENT: { PutIfPresentOperation operation = (PutIfPresentOperation) input; response = new MapValueResponse(map.replace(operation.getKey(), operation.getValue())); break; } case CONDITIONAL_REMOVE: { ConditionalRemoveOperation operation = (ConditionalRemoveOperation) input; response = new BooleanResponse(map.remove(operation.getKey(), operation.getValue())); break; } case CONDITIONAL_REPLACE: { ConditionalReplaceOperation operation = (ConditionalReplaceOperation) input; response = new BooleanResponse( map.replace( operation.getKey(), operation.getOldValue(), operation.getNewValue())); break; } default: // Unknown message type. throw new AssertionError("Unsupported message type: " + input.operationType()); } return response; }