void markFinished( Map<AccumulatorRegistry.Metric, Accumulator<?, ?>> flinkAccumulators, Map<String, Accumulator<?, ?>> userAccumulators) { // this call usually comes during RUNNING, but may also come while still in deploying (very fast // tasks!) while (true) { ExecutionState current = this.state; if (current == RUNNING || current == DEPLOYING) { if (transitionState(current, FINISHED)) { try { for (IntermediateResultPartition finishedPartition : getVertex().finishAllBlockingPartitions()) { IntermediateResultPartition[] allPartitions = finishedPartition.getIntermediateResult().getPartitions(); for (IntermediateResultPartition partition : allPartitions) { scheduleOrUpdateConsumers(partition.getConsumers()); } } synchronized (accumulatorLock) { this.flinkAccumulators = flinkAccumulators; this.userAccumulators = userAccumulators; } assignedResource.releaseSlot(); vertex.getExecutionGraph().deregisterExecution(this); } finally { vertex.executionFinished(); } return; } } else if (current == CANCELING) { // we sent a cancel call, and the task manager finished before it arrived. We // will never get a CANCELED call back from the job manager cancelingComplete(); return; } else if (current == CANCELED || current == FAILED) { if (LOG.isDebugEnabled()) { LOG.debug("Task FINISHED, but concurrently went to state " + state); } return; } else { // this should not happen, we need to fail this markFailed(new Exception("Vertex received FINISHED message while being in state " + state)); return; } } }
@Test public void testRegistrationOfExecutionsCanceled() { try { final JobVertexID jid1 = new JobVertexID(); final JobVertexID jid2 = new JobVertexID(); JobVertex v1 = new JobVertex("v1", jid1); JobVertex v2 = new JobVertex("v2", jid2); Map<ExecutionAttemptID, Execution> executions = setupExecution(v1, 19, v2, 37); for (Execution e : executions.values()) { e.cancel(); e.cancelingComplete(); } assertEquals(0, executions.size()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }