@Test
  public void testRestoreWithInterrupt() throws Exception {

    Configuration taskConfig = new Configuration();
    StreamConfig cfg = new StreamConfig(taskConfig);
    cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
    cfg.setStreamOperator(new StreamSource<>(new TestSource()));

    StreamStateHandle lockingHandle = new InterruptLockingStateHandle();

    TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(taskConfig, lockingHandle);
    Task task = createTask(tdd);

    // start the task and wait until it is in "restore"
    task.startTaskThread();
    IN_RESTORE_LATCH.await();

    // trigger cancellation and signal to continue
    task.cancelExecution();

    task.getExecutingThread().join(30000);

    if (task.getExecutionState() == ExecutionState.CANCELING) {
      fail("Task is stuck and not canceling");
    }

    assertEquals(ExecutionState.CANCELED, task.getExecutionState());
    assertNull(task.getFailureCause());
  }
 private void block() {
   IN_RESTORE_LATCH.trigger();
   // this mimics what happens in the HDFS client code.
   // an interrupt on a waiting object leads to an infinite loop
   try {
     synchronized (this) {
       wait();
     }
   } catch (InterruptedException e) {
     while (!isClosed()) {
       try {
         synchronized (this) {
           wait();
         }
       } catch (InterruptedException ignored) {
       }
     }
   }
 }