@Override
    public void sync() {
      Preconditions.checkState(
          state.equals(ReaderWriterState.OPEN), "Attempt to sync a writer in state:%s", state);

      LOG.debug("Syncing all cached writers for view:{}", view);

      for (FileSystemWriter.IncrementalWriter<E> writer : cachedWriters.asMap().values()) {
        LOG.debug("Syncing partition writer:{}", writer);
        writer.sync();
      }
    }
    @Override
    public void flush() {
      Preconditions.checkState(
          state.equals(ReaderWriterState.OPEN), "Attempt to flush a writer in state:%s", state);

      LOG.debug("Flushing all cached writers for view:{}", view);

      /*
       * There's a potential for flushing entries that are created by other
       * threads while looping through the writers. While normally just wasteful,
       * on HDFS, this is particularly bad. We should probably do something about
       * this, but it will be difficult as Cache (ideally) uses multiple
       * partitions to prevent cached writer contention.
       */
      for (FileSystemWriter.IncrementalWriter<E> writer : cachedWriters.asMap().values()) {
        LOG.debug("Flushing partition writer:{}", writer);
        writer.flush();
      }
    }