@Override
 public void write(Object key, List<ByteBuffer> values) {
   prepareWriter();
   // To ensure Crunch doesn't reuse CQLSSTableWriter's objects
   List<ByteBuffer> bb = Lists.newArrayList();
   for (ByteBuffer v : values) {
     bb.add(ByteBufferUtil.clone(v));
   }
   values = bb;
   try {
     ((CQLSSTableWriter) writer).rawAddRow(values);
     if (null != progress) progress.progress();
     if (null != context) HadoopCompat.progress(context);
   } catch (InvalidRequestException | IOException e) {
     LOG.error(e.getMessage());
     throw new CrunchRuntimeException("Error adding row : " + e.getMessage());
   }
 }
 private void prepareWriter() {
   try {
     if (writer == null) {
       writer =
           CQLSSTableWriter.builder()
               .forTable(schema)
               .using(insertStatement)
               .withPartitioner(ConfigHelper.getOutputPartitioner(conf))
               .inDirectory(outputDir)
               .withBufferSizeInMB(Integer.parseInt(conf.get(BUFFER_SIZE_IN_MB, "64")))
               .build();
     }
     if (loader == null) {
       CrunchExternalClient externalClient = new CrunchExternalClient(conf);
       this.loader =
           new SSTableLoader(outputDir, externalClient, new BulkRecordWriter.NullOutputHandler());
     }
   } catch (Exception e) {
     throw new CrunchRuntimeException(e);
   }
 }