@Override
 public void close(TaskAttemptContext context) throws InterruptedException, IOException {
   LOG.info("SqoopOutputFormatLoadExecutor::SqoopRecordWriter is about to be closed");
   free.acquire();
   writerFinished = true;
   filled.release();
   waitForConsumer();
   LOG.info("SqoopOutputFormatLoadExecutor::SqoopRecordWriter is closed");
 }
    @SuppressWarnings({"rawtypes", "unchecked"})
    @Override
    public void run() {
      LOG.info("SqoopOutputFormatLoadExecutor consumer thread is starting");
      try {
        DataReader reader = new SqoopOutputFormatDataReader();
        Configuration conf = context.getConfiguration();
        Loader loader = (Loader) ClassUtils.instantiate(loaderName);

        // Objects that should be passed to the Loader
        PrefixContext subContext =
            new PrefixContext(conf, MRJobConstants.PREFIX_CONNECTOR_TO_CONTEXT);
        Object connectorLinkConfig =
            MRConfigurationUtils.getConnectorLinkConfigUnsafe(Direction.TO, conf);
        Object connectorToJobConfig =
            MRConfigurationUtils.getConnectorJobConfigUnsafe(Direction.TO, conf);
        // Using the TO schema since the SqoopDataWriter in the SqoopMapper
        // encapsulates the toDataFormat

        // Create loader context
        LoaderContext loaderContext = new LoaderContext(subContext, reader, matcher.getToSchema());

        LOG.info("Running loader class " + loaderName);
        loader.load(loaderContext, connectorLinkConfig, connectorToJobConfig);
        LOG.info("Loader has finished");
        ((TaskAttemptContext) jobctx)
            .getCounter(SqoopCounters.ROWS_WRITTEN)
            .increment(loader.getRowsWritten());

      } catch (Throwable t) {
        readerFinished = true;
        LOG.error("Error while loading data out of MR job.", t);
        // Release so that the writer can tell Sqoop something went
        // wrong.
        free.release();
        throw new SqoopException(SparkExecutionError.SPARK_EXEC_0018, t);
      }

      // if no exception happens yet and reader finished before writer,
      // something went wrong
      if (!writerFinished) {
        // throw exception if data are not all consumed
        readerFinished = true;
        LOG.error("Reader terminated, but writer is still running!");
        // Release so that the writer can tell Sqoop something went
        // wrong.
        free.release();
        throw new SqoopException(SparkExecutionError.SPARK_EXEC_0019);
      }
      // inform writer that reader is finished
      readerFinished = true;
    }
 private void acquireSema() throws InterruptedException {
   // Has any more data been produced after I last consumed.
   // If no, wait for the producer to produce.
   try {
     filled.acquire();
   } catch (InterruptedException ex) {
     // Really at this point, there is nothing to do. Just throw and get out
     LOG.error("Interrupted while waiting for data to be available from " + "mapper", ex);
     throw ex;
   }
 }
 @Override
 public Object readContent() throws InterruptedException {
   acquireSema();
   if (writerFinished) {
     return null;
   }
   try {
     return toDataFormat.getData();
   } catch (Throwable t) {
     readerFinished = true;
     LOG.error("Caught exception e while getting content ", t);
     throw new SqoopException(SparkExecutionError.SPARK_EXEC_0018, t);
   } finally {
     releaseSema();
   }
 }