protected void handleView(View view) { this.view = view; if (log.isDebugEnabled()) log.debug("view=" + view); List<Address> members = view.getMembers(); _consumerLock.lock(); try { // This removes the consumers that were registered that are now gone Iterator<Owner> iterator = _consumersAvailable.iterator(); while (iterator.hasNext()) { Owner owner = iterator.next(); if (!members.contains(owner.getAddress())) { iterator.remove(); sendRemoveConsumerRequest(owner); } } // This removes the tasks that those requestors are gone iterator = _runRequests.iterator(); while (iterator.hasNext()) { Owner owner = iterator.next(); if (!members.contains(owner.getAddress())) { iterator.remove(); sendRemoveRunRequest(owner); } } synchronized (_awaitingReturn) { for (Entry<Owner, Runnable> entry : _awaitingReturn.entrySet()) { // The person currently servicing our request has gone down // without completing so we have to keep our request alive by // sending ours back to the coordinator Owner owner = entry.getKey(); if (!members.contains(owner.getAddress())) { Runnable runnable = entry.getValue(); // We need to register the request id before sending the request back to the coordinator // in case if our task gets picked up since another was removed _requestId.put(runnable, owner.getRequestId()); _awaitingConsumer.add(runnable); sendToCoordinator(RUN_REQUEST, owner.getRequestId(), local_addr); } } } } finally { _consumerLock.unlock(); } }
protected void replay(long windowId) { // This operator can partition itself dynamically. When that happens a file can be re-hashed // to a different partition than the previous one. In order to handle this, the partition loads // all the recovery data for a window and then processes only those files which would be hashed // to it in the current run. try { Map<Integer, Object> recoveryDataPerOperator = idempotentStorageManager.load(windowId); for (Object recovery : recoveryDataPerOperator.values()) { @SuppressWarnings("unchecked") LinkedList<RecoveryEntry> recoveryData = (LinkedList<RecoveryEntry>) recovery; for (RecoveryEntry recoveryEntry : recoveryData) { if (scanner.acceptFile(recoveryEntry.file)) { // The operator may have continued processing the same file in multiple windows. // So the recovery states of subsequent windows will have an entry for that file however // the offset changes. // In this case we continue reading from previously opened stream. if (currentFile == null || !(currentFile.equals(recoveryEntry.file) && offset == recoveryEntry.startOffset)) { if (inputStream != null) { closeFile(inputStream); } processedFiles.add(recoveryEntry.file); // removing the file from failed and unfinished queues and pending set Iterator<FailedFile> failedFileIterator = failedFiles.iterator(); while (failedFileIterator.hasNext()) { FailedFile ff = failedFileIterator.next(); if (ff.path.equals(recoveryEntry.file) && ff.offset == recoveryEntry.startOffset) { failedFileIterator.remove(); break; } } Iterator<FailedFile> unfinishedFileIterator = unfinishedFiles.iterator(); while (unfinishedFileIterator.hasNext()) { FailedFile ff = unfinishedFileIterator.next(); if (ff.path.equals(recoveryEntry.file) && ff.offset == recoveryEntry.startOffset) { unfinishedFileIterator.remove(); break; } } if (pendingFiles.contains(recoveryEntry.file)) { pendingFiles.remove(recoveryEntry.file); } inputStream = retryFailedFile(new FailedFile(recoveryEntry.file, recoveryEntry.startOffset)); while (offset < recoveryEntry.endOffset) { T line = readEntity(); offset++; emit(line); } } else { while (offset < recoveryEntry.endOffset) { T line = readEntity(); offset++; emit(line); } } } } } } catch (IOException e) { throw new RuntimeException("replay", e); } }