protected void scheduleUploadCheck() { LOG.trace("Attempt to execute upload check: {}", uploadCheckInProgress); synchronized (uploadCheckLock) { if (!uploadCheckInProgress) { LOG.trace("Scheduling upload check with timeout: {}", strategy.getUploadCheckPeriod()); uploadCheckInProgress = true; executorContext .getScheduledExecutor() .schedule( new Runnable() { @Override public void run() { synchronized (uploadCheckLock) { uploadCheckInProgress = false; } uploadIfNeeded(); } }, strategy.getUploadCheckPeriod(), TimeUnit.SECONDS); } else { LOG.trace("Upload check is already scheduled!"); } } }
public AbstractLogCollector( LogTransport transport, ExecutorContext executorContext, KaaChannelManager channelManager, FailoverManager failoverManager) { this.strategy = new DefaultLogUploadStrategy(); this.storage = new MemLogStorage(strategy.getBatchSize(), strategy.getBatchCount()); this.controller = new DefaultLogUploadController(); this.channelManager = channelManager; this.transport = transport; this.executorContext = executorContext; this.failoverManager = failoverManager; }
@Override public synchronized void onLogResponse(LogSyncResponse logSyncResponse) throws IOException { if (logSyncResponse.getDeliveryStatuses() != null) { boolean isAlreadyScheduled = false; for (LogDeliveryStatus response : logSyncResponse.getDeliveryStatuses()) { if (response.getResult() == SyncResponseResultType.SUCCESS) { storage.removeRecordBlock(response.getRequestId()); } else { storage.notifyUploadFailed(response.getRequestId()); final LogDeliveryErrorCode errorCode = response.getErrorCode(); final LogFailoverCommand controller = this.controller; executorContext .getCallbackExecutor() .execute( new Runnable() { @Override public void run() { strategy.onFailure(controller, errorCode); } }); isAlreadyScheduled = true; } LOG.info("Removing bucket id from timeouts: {}", response.getRequestId()); timeouts.remove(response.getRequestId()); } if (!isAlreadyScheduled) { processUploadDecision(strategy.isUploadNeeded(storage.getStatus())); } } }
@Override public void fillSyncRequest(LogSyncRequest request) { LogBlock group = null; if (storage.getStatus().getRecordCount() == 0) { LOG.debug("Log storage is empty"); return; } group = storage.getRecordBlock(strategy.getBatchSize(), strategy.getBatchCount()); if (group != null) { List<LogRecord> recordList = group.getRecords(); if (!recordList.isEmpty()) { LOG.trace("Sending {} log records", recordList.size()); List<LogEntry> logs = new LinkedList<>(); for (LogRecord record : recordList) { logs.add(new LogEntry(ByteBuffer.wrap(record.getData()))); } request.setRequestId(group.getBlockId()); request.setLogEntries(logs); LOG.info("Adding following bucket id [{}] for timeout tracking", group.getBlockId()); timeouts.add(group.getBlockId()); final LogBlock timeoutGroup = group; executorContext .getScheduledExecutor() .schedule( new Runnable() { @Override public void run() { checkDeliveryTimeout(timeoutGroup.getBlockId()); } }, strategy.getTimeout(), TimeUnit.SECONDS); } } else { LOG.warn("Log group is null: log group size is too small"); } }
private void processUploadDecision(LogUploadStrategyDecision decision) { switch (decision) { case UPLOAD: transport.sync(); break; case NOOP: if (strategy.getUploadCheckPeriod() > 0 && storage.getStatus().getRecordCount() > 0) { scheduleUploadCheck(); } break; default: break; } }
protected void uploadIfNeeded() { processUploadDecision(strategy.isUploadNeeded(storage.getStatus())); }