private ArrayList<Object[]> writeToTable(RowMetaInterface rowMeta, ArrayList<Object[]> rows)
      throws KettleException {

    if (rows.isEmpty()) // Stop: last line or error encountered
    {
      if (log.isDetailed()) logDetailed("Last line inserted: stop");
      return null;
    }

    PreparedStatement insertStatement = null;

    ArrayList<Object[]> insertRowsData = new ArrayList<Object[]>();
    ArrayList<Object[]> outputRowsData = rows;

    String tableName = null;

    boolean sendToErrorRow = false;
    String errorMessage = null;
    boolean rowIsSafe = false;
    int[] updateCounts = null;
    List<Exception> exceptionsList = null;
    boolean batchProblem = false;

    for (Object[] row : rows) {
      if (meta.isTableNameInField()) {
        // Cache the position of the table name field
        if (data.indexOfTableNameField < 0) {
          String realTablename = environmentSubstitute(meta.getTableNameField());
          data.indexOfTableNameField = rowMeta.indexOfValue(realTablename);
          if (data.indexOfTableNameField < 0) {
            String message = "Unable to find table name field [" + realTablename + "] in input row";
            logError(message);
            throw new KettleStepException(message);
          }
          if (!meta.isTableNameInTable()) {
            data.insertRowMeta.removeValueMeta(data.indexOfTableNameField);
          }
        }
        tableName = rowMeta.getString(rows.get(0), data.indexOfTableNameField);
        if (!meta.isTableNameInTable()) {
          // If the name of the table should not be inserted itself,
          // remove the table name
          // from the input row data as well. This forcibly creates a
          // copy of r
          insertRowsData.add(
              RowDataUtil.removeItem(rowMeta.cloneRow(row), data.indexOfTableNameField));
        } else {
          insertRowsData.add(row);
        }
      } else if (meta.isPartitioningEnabled()
          && (meta.isPartitioningDaily() || meta.isPartitioningMonthly())
          && (meta.getPartitioningField() != null && meta.getPartitioningField().length() > 0)) {
        // Initialize some stuff!
        if (data.indexOfPartitioningField < 0) {
          data.indexOfPartitioningField =
              rowMeta.indexOfValue(environmentSubstitute(meta.getPartitioningField()));
          if (data.indexOfPartitioningField < 0) {
            throw new KettleStepException(
                "Unable to find field [" + meta.getPartitioningField() + "] in the input row!");
          }

          if (meta.isPartitioningDaily()) {
            data.dateFormater = new SimpleDateFormat("yyyyMMdd");
          } else {
            data.dateFormater = new SimpleDateFormat("yyyyMM");
          }
        }

        ValueMetaInterface partitioningValue = rowMeta.getValueMeta(data.indexOfPartitioningField);
        if (!partitioningValue.isDate() || row[data.indexOfPartitioningField] == null) {
          throw new KettleStepException(
              "Sorry, the partitioning field needs to contain a data value and can't be empty!");
        }

        Object partitioningValueData = rowMeta.getDate(row, data.indexOfPartitioningField);
        tableName =
            environmentSubstitute(meta.getTablename())
                + "_"
                + data.dateFormater.format((Date) partitioningValueData);
        insertRowsData.add(row);
      } else {
        tableName = data.tableName;
        insertRowsData.add(row);
      }

      if (Const.isEmpty(tableName)) {
        throw new KettleStepException("The tablename is not defined (empty)");
      }
    }

    if (!data.preparedStatements.containsKey(tableName)) {
      data.preparedStatements.put(tableName, new Hashtable<Integer, PreparedStatement>());
    }

    insertStatement = (PreparedStatement) data.preparedStatements.get(tableName).get(rows.size());
    if (insertStatement == null) {
      String sql =
          getInsertStatement(
              environmentSubstitute(meta.getSchemaName()),
              tableName,
              data.insertRowMeta,
              rows.size());
      if (log.isDetailed()) logDetailed("Prepared statement : " + sql);
      insertStatement = data.db.prepareSQL(sql);

      if (!data.preparedStatements.containsKey(tableName)) {
        data.preparedStatements.put(tableName, new Hashtable<Integer, PreparedStatement>());
      }

      data.preparedStatements.get(tableName).put(rows.size(), insertStatement);
    }

    try {
      // For PG & GP, we add a savepoint before the row.
      // Then revert to the savepoint afterwards... (not a transaction, so
      // hopefully still fast)
      //
      if (data.specialErrorHandling) {
        data.savepoint = data.db.setSavepoint();
      }

      RowMeta insertRowMeta = new RowMeta();
      for (int i = 0; i < rows.size(); i++) {
        for (int j = 0; j < data.valuenrs.length; j++) {
          insertRowMeta.addValueMeta(data.insertRowMeta.getValueMeta(j));
        }
      }
      data.db.setValues(insertRowMeta, toArray(insertRowsData), insertStatement);
      data.db.insertRow(insertStatement, data.batchMode, false); // false:
      // no
      // commit,
      // it is
      // handled
      // in
      // this
      // step
      // different

      // Get a commit counter per prepared statement to keep track of
      // separate tables, etc.
      //
      Integer commitCounter = data.commitCounterMap.get(tableName);
      if (commitCounter == null) {
        commitCounter = Integer.valueOf(1);
      } else {
        commitCounter++;
      }
      data.commitCounterMap.put(tableName, Integer.valueOf(commitCounter.intValue()));

      // Release the savepoint if needed
      //
      if (data.specialErrorHandling) {
        if (data.releaseSavepoint) {
          data.db.releaseSavepoint(data.savepoint);
        }
      }

      // Perform a commit if needed
      //

      if ((data.commitSize > 0) && ((commitCounter % data.commitSize) == 0)) {
        if (data.batchMode) {
          try {
            insertStatement.executeBatch();
            data.db.commit();
            insertStatement.clearBatch();
          } catch (BatchUpdateException ex) {
            KettleDatabaseBatchException kdbe =
                new KettleDatabaseBatchException("Error updating batch", ex);
            kdbe.setUpdateCounts(ex.getUpdateCounts());
            List<Exception> exceptions = new ArrayList<Exception>();

            // 'seed' the loop with the root exception
            SQLException nextException = ex;
            do {
              exceptions.add(nextException);
              // while current exception has next exception, add
              // to list
            } while ((nextException = nextException.getNextException()) != null);
            kdbe.setExceptionsList(exceptions);
            throw kdbe;
          } catch (SQLException ex) {
            throw new KettleDatabaseException("Error inserting row", ex);
          } catch (Exception ex) {
            throw new KettleDatabaseException("Unexpected error inserting row", ex);
          }
        } else {
          // insertRow normal commit
          data.db.commit();
        }
        // Clear the batch/commit counter...
        //
        data.commitCounterMap.put(tableName, Integer.valueOf(0));
        rowIsSafe = true;
      } else {
        rowIsSafe = false;
      }
    } catch (KettleDatabaseBatchException be) {
      errorMessage = be.toString();
      batchProblem = true;
      sendToErrorRow = true;
      updateCounts = be.getUpdateCounts();
      exceptionsList = be.getExceptionsList();

      if (getStepMeta().isDoingErrorHandling()) {
        data.db.clearBatch(insertStatement);
        data.db.commit(true);
      } else {
        data.db.clearBatch(insertStatement);
        data.db.rollback();
        StringBuffer msg =
            new StringBuffer("Error batch inserting rows into table [" + tableName + "].");
        msg.append(Const.CR);
        msg.append("Errors encountered (first 10):").append(Const.CR);
        for (int x = 0; x < be.getExceptionsList().size() && x < 10; x++) {
          Exception exception = be.getExceptionsList().get(x);
          if (exception.getMessage() != null) msg.append(exception.getMessage()).append(Const.CR);
        }
        throw new KettleException(msg.toString(), be);
      }
    } catch (KettleDatabaseException dbe) {
      if (getStepMeta().isDoingErrorHandling()) {
        if (data.specialErrorHandling) {
          data.db.rollback(data.savepoint);
          if (data.releaseSavepoint) {
            data.db.releaseSavepoint(data.savepoint);
          }
          // data.db.commit(true); // force a commit on the connection
          // too.
        }

        sendToErrorRow = true;
        errorMessage = dbe.toString();
      } else {
        if (meta.ignoreErrors()) {
          if (data.warnings < 20) {
            if (log.isBasic())
              logBasic("WARNING: Couldn't insert row into table." + Const.CR + dbe.getMessage());
          } else if (data.warnings == 20) {
            if (log.isBasic())
              logBasic(
                  "FINAL WARNING (no more then 20 displayed): Couldn't insert row into table: "
                      + Const.CR
                      + dbe.getMessage());
          }
          data.warnings++;
        } else {
          setErrors(getErrors() + 1);
          data.db.rollback();
          throw new KettleException("Error inserting row into table [" + tableName + "]", dbe);
        }
      }
    }

    if (data.batchMode) {
      if (sendToErrorRow) {
        if (batchProblem) {
          for (Object[] row : outputRowsData) {
            data.batchBuffer.add(row);
          }
          outputRowsData = null;
          processBatchException(errorMessage, updateCounts, exceptionsList);
        } else {
          // Simply add this row to the error row
          for (Object[] row : outputRowsData) {
            putError(rowMeta, row, 1L, errorMessage, null, "TOP001");
          }
          outputRowsData = null;
        }
      } else {
        for (Object[] row : outputRowsData) {
          data.batchBuffer.add(row);
        }
        outputRowsData = null;

        if (rowIsSafe) // A commit was done and the rows are all safe
        // (no error)
        {
          for (int i = 0; i < data.batchBuffer.size(); i++) {
            Object[] row = (Object[]) data.batchBuffer.get(i);
            putRow(data.outputRowMeta, row);
            incrementLinesOutput();
          }
          // Clear the buffer
          data.batchBuffer.clear();
        }
      }
    } else {
      if (sendToErrorRow) {
        for (Object[] row : outputRowsData) {
          putError(rowMeta, row, 1L, errorMessage, null, "TOP001");
        }
        outputRowsData = null;
      }
    }

    return outputRowsData;
  }