private void execute(BulkWrites bulkWrites) throws Exception {
    assert bulkWrites != null : "bulk writes passed in are null";
    if (LOG.isDebugEnabled())
      SpliceLogUtils.debug(LOG, "Executing BulkWriteAction: id=%d, bulkWrites=%s", id, bulkWrites);
    boolean first = true;
    LinkedList<BulkWrites> writesToPerform = Lists.newLinkedList();
    writesToPerform.add(bulkWrites);
    WriteAttemptContext ctx = new WriteAttemptContext();
    do {
      if (!first) {
        retryCounter.increment();
      }
      first = false;
      BulkWrites nextWrite = writesToPerform.removeFirst();
      assert nextWrite != null : "next write is null";
      ctx.reset();
      ctx.attemptCount++;
      if (ctx.attemptCount > 100 && ctx.attemptCount % 50 == 0) {
        int numRows = nextWrite.numEntries();
        SpliceLogUtils.warn(
            LOG,
            "BulkWriteAction[%d rows] is taking a long time with %d attempts: id=%d",
            numRows,
            ctx.attemptCount,
            id);
        LOG.warn("Attempting bulk write " + nextWrite);
      }
      executeSingle(nextWrite, ctx);

      /*
       * We need to do an exponential backoff to ensure that our cache has a chance to invalidate, or
       * simply because we were told to wait a bit by the write pipeline (i.e. we were rejected).
       */
      if (ctx.shouldSleep()) {
        clock.sleep(PipelineUtils.getPauseTime(ctx.attemptCount, 10), TimeUnit.MILLISECONDS);
      }
      if (ctx.directRetry) writesToPerform.add(nextWrite);
      else if (ctx.nextWriteSet != null && ctx.nextWriteSet.size() > 0) {
        ctx.failed();
        // rebuild a new buffer to retry from any records that need retrying
        addToRetryCallBuffer(ctx.nextWriteSet, bulkWrites.getTxn(), ctx.refreshCache);
      }

      if (retryPipingCallBuffer != null) {
        writesToPerform.addAll(retryPipingCallBuffer.getBulkWrites());
        retryPipingCallBuffer = null;
      }
    } while (writesToPerform.size() > 0);
  }