private void processMessages() { List<NetData.NetMessage> messages = Lists.newArrayListWithExpectedSize(queuedMessages.size()); queuedMessages.drainTo(messages); for (NetData.NetMessage message : messages) { if (message.hasTime()) { time.updateTimeFromServer(message.getTime()); } processBlockRegistrations(message); processReceivedChunks(message); processInvalidatedChunks(message); processBlockChanges(message); processBiomeChanges(message); processRemoveEntities(message); for (NetData.CreateEntityMessage createEntity : message.getCreateEntityList()) { createEntityMessage(createEntity); } for (NetData.UpdateEntityMessage updateEntity : message.getUpdateEntityList()) { updateEntity(updateEntity); } for (NetData.EventMessage event : message.getEventList()) { try { processEvent(event); } catch (RuntimeException e) { logger.error("Error processing server event", e); } } } }
/** Shutdown the scheduler. All subsequent execution request will be rejected. */ public void shutdown() throws InterruptedException { checkState(!_isShutdown); _isShutdown = true; /* Drain jobs from the queue so they will never be started. Has to be done * before killing jobs as otherwise the queued jobs will immediatley fill * the freed job slot. */ Collection<PrioritizedRequest> toBeCancelled = new ArrayList<>(); _queue.drainTo(toBeCancelled); /* Kill both the jobs that were queued and which are running. */ _jobs.values().forEach(j -> j.kill("shutdown")); /* Jobs that were queued were never submitted for execution and thus we * manually trigger postprocessing. */ toBeCancelled.forEach(this::postprocessWithoutJobSlot); LOGGER.info("Waiting for movers on queue '{}' to finish", _name); if (!_semaphore.tryAcquire(_semaphore.getMaxPermits(), 2, TimeUnit.SECONDS)) { // This is often due to a mover not reacting to interrupt or the transfer // doing a lengthy checksum calculation during post processing. String versions = _jobs .values() .stream() .map(PrioritizedRequest::getMover) .map(Mover::getProtocolInfo) .map(ProtocolInfo::getVersionString) .collect(joining(",")); LOGGER.warn("Failed to terminate some movers prior to shutdown: {}", versions); } }
public ViewState takeTask(final long timeout, final TimeUnit unit, final boolean useLastState) { ViewState task = null; try { task = queue.poll(timeout, unit); if (task != null && useLastState) { final ArrayList<ViewState> list = new ArrayList<ViewState>(); // Workaround for possible ConcurrentModificationException while (true) { list.clear(); try { if (queue.drainTo(list) > 0) { task = list.get(list.size() - 1); } break; } catch (Throwable ex) { // Go to next attempt LCTX.e( "Unexpected error on retrieving last view state from draw queue: " + ex.getMessage()); } } } } catch (final InterruptedException e) { Thread.interrupted(); } catch (Throwable ex) { // Go to next attempt LCTX.e("Unexpected error on retrieving view state from draw queue: " + ex.getMessage()); } return task; }
/** * The implementation for getPackets() function. This is a blocking API. * * @param sessionid * @return A list of packets associated with the session */ @Override public List<ByteBuffer> getPackets(String sessionid) throws org.apache.thrift.TException { List<ByteBuffer> packets = new ArrayList<ByteBuffer>(); int count = 0; while (!msgQueues.containsKey(sessionid) && count++ < 100) { log.debug("Queue for session {} doesn't exist yet.", sessionid); try { Thread.sleep(100); // Wait 100 ms to check again. } catch (InterruptedException e) { log.error(e.toString()); } } if (count < 100) { SessionQueue pQueue = msgQueues.get(sessionid); BlockingQueue<ByteBuffer> queue = pQueue.getQueue(); // Block if queue is empty try { packets.add(queue.take()); queue.drainTo(packets); } catch (InterruptedException e) { log.error(e.toString()); } } return packets; }
/** * Drain the queue of pending counts into the provided buffer and write those counts to DynamoDB. * This blocks until data is available in the queue. * * @param buffer A reusable buffer with sufficient space to drain the entire queue if necessary. * This is provided as an optimization to avoid allocating a new buffer every interval. * @throws InterruptedException Thread interrupted while waiting for new data to arrive in the * queue. */ protected void sendQueueToDynamoDB(List<HttpReferrerPairsCount> buffer) throws InterruptedException { // Block while waiting for data buffer.add(counts.take()); // Drain as much of the queue as we can. // DynamoDBMapper will handle splitting the batch sizes for us. counts.drainTo(buffer); try { long start = System.nanoTime(); // Write the contents of the buffer as items to our table List<FailedBatch> failures = mapper.batchWrite(buffer, Collections.emptyList()); long end = System.nanoTime(); LOG.info( String.format( "%d new counts sent to DynamoDB in %dms", buffer.size(), TimeUnit.NANOSECONDS.toMillis(end - start))); for (FailedBatch failure : failures) { LOG.warn( "Error sending count batch to DynamoDB. This will not be retried!", failure.getException()); } } catch (Exception ex) { LOG.error("Error sending new counts to DynamoDB. The some counts may not be persisted.", ex); } }
@Override public void run() { try { boolean running = true; while (running) { try { // block on event availability ThreadBoundEvent event = queue.take(); // add to the batch, and see if we can add more batch.add(event); if (maxBatchSize > 0) { queue.drainTo(batch, maxBatchSize); } // check for the stop condition (and remove it) // treat batches of 1 (the most common case) specially if (batch.size() > 1) { ListIterator<ThreadBoundEvent> itr = batch.listIterator(); while (itr.hasNext()) { ThreadBoundEvent next = itr.next(); if (next.getClass().equals(ShutdownTask.class)) { running = false; ((ShutdownTask) next).latch.countDown(); itr.remove(); } } eventProcessor.process(batch); } else { // just the one event, no need to iterate if (event.getClass().equals(ShutdownTask.class)) { running = false; ((ShutdownTask) event).latch.countDown(); } else { eventProcessor.process(batch); } } } catch (InterruptedException e) { LOG.warn( String.format( "Consumer on queue %s interrupted.", Thread.currentThread().getName())); // ignore } catch (Throwable exception) { LOG.error( String.format( "exception on queue %s while executing events", Thread.currentThread().getName()), exception); } finally { // reset the batch batch.clear(); } } } catch (Throwable unexpectedThrowable) { // we observed some cases where trying to log the inner exception threw an error // don't use the logger here as that seems to be causing the problem in the first place System.err.println("Caught and unexpected Throwable while logging"); System.err.println( "This problem happens when jar files change at runtime, JVM might be UNSTABLE"); unexpectedThrowable.printStackTrace(System.err); } }
/** * Drains the queue as {@link java.util.concurrent.BlockingQueue#drainTo(java.util.Collection, * int)}, but if the requested {@code numElements} elements are not available, it will wait for * them up to the specified timeout. * * @param q the blocking queue to be drained * @param buffer where to add the transferred elements * @param numElements the number of elements to be waited for * @param timeout how long to wait before giving up, in units of {@code unit} * @param unit a {@code TimeUnit} determining how to interpret the timeout parameter * @return the number of elements transferred * @throws InterruptedException if interrupted while waiting */ @Beta public static <E> int drain( BlockingQueue<E> q, Collection<? super E> buffer, int numElements, long timeout, TimeUnit unit) throws InterruptedException { Preconditions.checkNotNull(buffer); /* * This code performs one System.nanoTime() more than necessary, and in return, the time to * execute Queue#drainTo is not added *on top* of waiting for the timeout (which could make * the timeout arbitrarily inaccurate, given a queue that is slow to drain). */ long deadline = System.nanoTime() + unit.toNanos(timeout); int added = 0; while (added < numElements) { // we could rely solely on #poll, but #drainTo might be more efficient when there are multiple // elements already available (e.g. LinkedBlockingQueue#drainTo locks only once) added += q.drainTo(buffer, numElements - added); if (added < numElements) { // not enough elements immediately available; will have to poll E e = q.poll(deadline - System.nanoTime(), TimeUnit.NANOSECONDS); if (e == null) { break; // we already waited enough, and there are no more elements in sight } buffer.add(e); added++; } } return added; }
public boolean putMessage(String message, long timeout) { BufferListener listener = listenerRef.get(); if (listener != null) { try { if (queue.size() == 0) { return listener.onMessage(message); } else { ArrayList<String> messages = new ArrayList<String>(queue.size() + 1); queue.drainTo(messages); messages.add(message); return listener.onMessages(messages); } } catch (Throwable t) { return false; } } else { try { if (!inputSemaphore.tryAcquire(message.length(), timeout, TimeUnit.MILLISECONDS)) { return false; } queue.offer(message); return true; } catch (InterruptedException e) { return false; } } }
/** 将数据提交到运营商 */ private void startSubmitData() { while (isContinue) { List<SmQueue> tempList = new LinkedList<SmQueue>(); try { // 每晚23:58分暂停操作 if (ConstantUtils.isPause_23_58()) { Thread.sleep(10 * 60 * 1000); } if (queue.size() >= (smgFlowLimit / 2) || isDrainTo()) { int num = queue.drainTo(tempList, (smgFlowLimit / 2)); lastDrainToTime = System.currentTimeMillis(); if (num > 0) { submitDataThreadPool.execute(new SubmitChildThread(tempList, channel)); } } else { Thread.sleep(1000); } } catch (InterruptedException e1) { } } }
public void clear() { List<String> list = new ArrayList<String>(); queue.drainTo(list); for (String str : list) { inputSemaphore.release(str.length()); } }
public synchronized void setOrder(Order order) { if (order != _order) { PriorityBlockingQueue<PrioritizedRequest> queue = createQueue(order); _queue.drainTo(queue); _queue = queue; _order = order; } }
// 推送任务执行结果 private List<JobExecutionInfo> _push_job_result_() { List<JobExecutionInfo> doneJobList = new LinkedList<JobExecutionInfo>(); if (doneJobQueue.size() > 0) { doneJobQueue.drainTo(doneJobList); } return doneJobList; }
private void processReceivedChunks() { if (remoteWorldProvider != null) { List<Chunk> chunks = Lists.newArrayListWithExpectedSize(chunkQueue.size()); chunkQueue.drainTo(chunks); for (Chunk chunk : chunks) { remoteWorldProvider.receiveChunk(chunk); } } }
public List<String> drainMessages() { List<String> list = new ArrayList<String>(); queue.drainTo(list); for (String str : list) { inputSemaphore.release(str.length()); } return list; }
protected void doStop() throws Exception { if (log.isDebugEnabled()) { log.debug("Stopping service pool: " + this); } for (BlockingQueue<Service> entry : pool.values()) { Collection<Service> values = new ArrayList<Service>(); entry.drainTo(values); ServiceHelper.stopServices(values); entry.clear(); } pool.clear(); }
@Override public void drainDestructableClaims( final Collection<ContentClaim> destination, final int maxElements, final long timeout, final TimeUnit unit) { try { final ContentClaim firstClaim = destructableClaims.poll(timeout, unit); if (firstClaim != null) { destination.add(firstClaim); destructableClaims.drainTo(destination, maxElements - 1); } } catch (final InterruptedException e) { } }
@Override public String produce(String lastSourceOffset, int maxBatchSize, BatchMaker batchMaker) throws StageException { try { Thread.sleep(1000); } catch (InterruptedException ex) { } // report any Kafka producer errors captured by the KafkaUDPConsumer errorList.clear(); errorQueue.drainTo(errorList); for (Exception exception : errorList) { getContext().reportError(exception); } return "::asyncudp::" + (counter++) + System.currentTimeMillis(); }
@SuppressWarnings("unchecked") private ClassLoadingCodeRunnerClient.RemoteCodeResponse handle( ClassLoadingCodeRunnerClient.RemoteCodeRequest request) { if (request instanceof ClassLoadingCodeRunnerClient.GetToClientMessagesRequest) { List<ClassLoadingCodeRunnerClient.RemoteCodeMessage> messages = new ArrayList<>(); try { messages.add(toClient.take()); } catch (InterruptedException e) { throw new RuntimeException(e); } toClient.drainTo(messages); ClassLoadingCodeRunnerClient.log.debug("sending to client: {}", messages); return new ToClientMessagesResponse(messages); } else if (request instanceof ClassLoadingCodeRunnerClient.SendToServerMessagesRequest) { ClassLoadingCodeRunnerClient.SendToServerMessagesRequest sendToServerMessagesRequest = (ClassLoadingCodeRunnerClient.SendToServerMessagesRequest) request; for (ClassLoadingCodeRunnerClient.RemoteCodeMessage message : sendToServerMessagesRequest.messages) { ClassLoadingCodeRunnerClient.log.debug("handling toServer message " + message); if (message instanceof ClassLoadingCodeRunnerClient.ServerCodeExitReceived) { toServerDeserializer.shutdown(); exitConfirmationReceived.release(); } else if (message instanceof ClassLoadingCodeRunnerClient.SendResourceMessage) { classLoader.addResource((ClassLoadingCodeRunnerClient.SendResourceMessage) message); } else if (message instanceof ClassLoadingCodeRunnerClient.SendJarsMessage) { classLoader.addJars(((ClassLoadingCodeRunnerClient.SendJarsMessage) message).jars); } else if (message instanceof ClassLoadingCodeRunnerClient.CustomMessageWrapper) { toServerDeserializer.execute( () -> { ClassLoadingCodeRunnerClient.CustomMessageWrapper wrapper = (ClassLoadingCodeRunnerClient.CustomMessageWrapper) message; TMessage wrappedMessage = (TMessage) SerializationHelper.toObject(wrapper.message, classLoader); ClassLoadingCodeRunnerClient.log.debug( "received and deserialized custom message {}", wrappedMessage); toServer.add(wrappedMessage); }); } else { throw new UnsupportedOperationException("Unknown message " + message); } } return new EmptyResponse(); } else { throw new UnsupportedOperationException(request.getClass().getName()); } }
/** * Drains the queue as {@linkplain #drain(java.util.concurrent.BlockingQueue, * java.util.Collection, int, long, java.util.concurrent.TimeUnit)}, but with a different behavior * in case it is interrupted while waiting. In that case, the operation will continue as usual, * and in the end the thread's interruption status will be set (no {@code InterruptedException} is * thrown). * * @param q the blocking queue to be drained * @param buffer where to add the transferred elements * @param numElements the number of elements to be waited for * @param timeout how long to wait before giving up, in units of {@code unit} * @param unit a {@code TimeUnit} determining how to interpret the timeout parameter * @return the number of elements transferred */ @Beta public static <E> int drainUninterruptibly( BlockingQueue<E> q, Collection<? super E> buffer, int numElements, long timeout, TimeUnit unit) { Preconditions.checkNotNull(buffer); long deadline = System.nanoTime() + unit.toNanos(timeout); int added = 0; boolean interrupted = false; try { while (added < numElements) { // we could rely solely on #poll, but #drainTo might be more efficient when there are // multiple elements already available (e.g. LinkedBlockingQueue#drainTo locks only once) added += q.drainTo(buffer, numElements - added); if (added < numElements) { // not enough elements immediately available; will have to poll E e; // written exactly once, by a successful (uninterrupted) invocation of #poll while (true) { try { e = q.poll(deadline - System.nanoTime(), TimeUnit.NANOSECONDS); break; } catch (InterruptedException ex) { interrupted = true; // note interruption and retry } } if (e == null) { break; // we already waited enough, and there are no more elements in sight } buffer.add(e); added++; } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } return added; }
@Override public void run() { List<Transaction> records = new ArrayList<Transaction>(); if (archivingEnabled == false) { log.info("Archiving has not been enabled"); return; } while (isExit() == false) { records.clear(); buffer.drainTo(records); PrintWriter wf = null; try { FileWriter ff = new FileWriter(filename, true); wf = new PrintWriter(ff); } catch (FileNotFoundException e) { log.error("Should not have happened for [" + filename + "]"); continue; } catch (IOException e) { log.error("Cannot write to [" + filename + "]"); setExit(true); continue; } TransactionLogRecord tlr = new TransactionLogRecord(); for (Transaction t : records) { log.debug("Logging " + t); if (t instanceof ExitTransaction) { setExit(true); continue; } if (t.getDirection().equals(DirectionType.NONE)) { log.error("Transaction [" + t + "] is empty"); continue; } wf.print(tlr.toString(t)); } wf.close(); } log.info("Archiving is ending"); }
protected void nextBatch() { buffer.clear(); pending.drainTo(drain, MAX_BATCH_SIZE - 1); for (BatchIdentity bid : drain) { bid.serializeOn(buffer); } drain.clear(); buffer.flip(); if (writeBatch()) { fsm.payloadWritten(); } else { if (inError) { fsm.close(); } else { if (inError) { fsm.close(); } else { handler.selectForWrite(); } } } }
private void consume() throws InterruptedException { while (true) { List lst = new ArrayList(); lst.add(queue.take()); queue.drainTo(lst); if (lst.contains(quitToken)) { List ret = CollectionUtils.transformToList( lst, new Function() { @Override public Object call(Object arg) { return arg instanceof LogVO ? arg : null; } }); batchWrite(ret); return; } batchWrite(lst); } }
private void makeChunksAvailable() { List<ReadyChunkInfo> newReadyChunks = Lists.newArrayListWithExpectedSize(readyChunks.size()); readyChunks.drainTo(newReadyChunks); for (ReadyChunkInfo readyChunkInfo : newReadyChunks) { nearCache.put(readyChunkInfo.getPos(), readyChunkInfo.getChunk()); preparingChunks.remove(readyChunkInfo.getPos()); } if (!newReadyChunks.isEmpty()) { sortedReadyChunks.addAll(newReadyChunks); Collections.sort(sortedReadyChunks, new ReadyChunkRelevanceComparator()); } if (!sortedReadyChunks.isEmpty()) { boolean loaded = false; for (int i = sortedReadyChunks.size() - 1; i >= 0 && !loaded; i--) { ReadyChunkInfo chunkInfo = sortedReadyChunks.get(i); PerformanceMonitor.startActivity("Make Chunk Available"); if (makeChunkAvailable(chunkInfo)) { sortedReadyChunks.remove(i); loaded = true; } PerformanceMonitor.endActivity(); } } }
@Override public int drainTo(Collection<? super E> c) { return localInternalQueue.drainTo(c); }
public List<RecordedRequest> drainRequests() { List<RecordedRequest> requests = new ArrayList<RecordedRequest>(); requestQueue.drainTo(requests); return requests; }
@Override public void drainDestructableClaims( final Collection<ContentClaim> destination, final int maxElements) { final int drainedCount = destructableClaims.drainTo(destination, maxElements); logger.debug("Drained {} destructable claims to {}", drainedCount, destination); }
@Override public int drainTo(Collection<? super T> c) { return delegate.drainTo(c); }
@Override public int drainTo(Collection<? super T> c, int maxElements) { return delegate.drainTo(c, maxElements); }
@Override public int drainTo(Collection<? super E> c, int maxElements) { return localInternalQueue.drainTo(c, maxElements); }
public static final Collection<Long> getWriteLatenciesNanos() { final List<Long> latencies = Lists.newArrayListWithCapacity(fsWriteLatenciesNanos.size()); fsWriteLatenciesNanos.drainTo(latencies); return latencies; }