コード例 #1
0
ファイル: InjectorTest.java プロジェクト: hoverzheng/kettle
  /** Test case for injector step... also a show case on how to use injector. */
  public void testInjector() throws Exception {
    KettleEnvironment.init();

    //
    // Create a new transformation...
    //
    TransMeta transMeta = new TransMeta();
    transMeta.setName("injectortest");

    PluginRegistry registry = PluginRegistry.getInstance();

    //
    // create an injector step...
    //
    String injectorStepname = "injector step";
    InjectorMeta im = new InjectorMeta();

    // Set the information of the injector.

    String injectorPid = registry.getPluginId(StepPluginType.class, im);
    StepMeta injectorStep = new StepMeta(injectorPid, injectorStepname, (StepMetaInterface) im);
    transMeta.addStep(injectorStep);

    //
    // Create a dummy step
    //
    String dummyStepname = "dummy step";
    DummyTransMeta dm = new DummyTransMeta();

    String dummyPid = registry.getPluginId(StepPluginType.class, dm);
    StepMeta dummyStep = new StepMeta(dummyPid, dummyStepname, (StepMetaInterface) dm);
    transMeta.addStep(dummyStep);

    TransHopMeta hi = new TransHopMeta(injectorStep, dummyStep);
    transMeta.addTransHop(hi);

    // Now execute the transformation...
    Trans trans = new Trans(transMeta);

    trans.prepareExecution(null);

    StepInterface si = trans.getStepInterface(dummyStepname, 0);
    RowStepCollector rc = new RowStepCollector();
    si.addRowListener(rc);

    RowProducer rp = trans.addRowProducer(injectorStepname, 0);
    trans.startThreads();

    // add rows
    List<RowMetaAndData> inputList = createData();
    for (RowMetaAndData rm : inputList) {
      rp.putRow(rm.getRowMeta(), rm.getData());
    }
    rp.finished();

    trans.waitUntilFinished();

    List<RowMetaAndData> resultRows = rc.getRowsWritten();
    checkRows(resultRows, inputList);
  }
コード例 #2
0
  public boolean init(StepMetaInterface smi, StepDataInterface sdi) {
    meta = (RowGeneratorMeta) smi;
    data = (RowGeneratorData) sdi;

    if (super.init(smi, sdi)) {
      // Determine the number of rows to generate...
      data.rowLimit = Const.toLong(environmentSubstitute(meta.getRowLimit()), -1L);
      data.rowsWritten = 0L;

      if (data.rowLimit < 0L) // Unable to parse
      {
        logError(BaseMessages.getString(PKG, "RowGenerator.Wrong.RowLimit.Number"));
        return false; // fail
      }

      // Create a row (constants) with all the values in it...
      List<CheckResultInterface> remarks =
          new ArrayList<CheckResultInterface>(); // stores the errors...
      RowMetaAndData outputRow = buildRow(meta, remarks, getStepname());
      if (!remarks.isEmpty()) {
        for (int i = 0; i < remarks.size(); i++) {
          CheckResult cr = (CheckResult) remarks.get(i);
          logError(cr.getText());
        }
        return false;
      }
      data.outputRowData = outputRow.getData();
      data.outputRowMeta = outputRow.getRowMeta();
      return true;
    }
    return false;
  }
コード例 #3
0
ファイル: ConstantTest.java プロジェクト: hoverzheng/kettle
  /** Check the 2 lists comparing the rows in order. If they are not the same fail the test. */
  public void checkRows(List<RowMetaAndData> rows1, List<RowMetaAndData> rows2) {
    int idx = 1;
    if (rows1.size() != rows2.size()) {
      fail("Number of rows is not the same: " + rows1.size() + " and " + rows2.size());
    }
    Iterator<RowMetaAndData> it1 = rows1.iterator();
    Iterator<RowMetaAndData> it2 = rows2.iterator();

    while (it1.hasNext() && it2.hasNext()) {
      RowMetaAndData rm1 = it1.next();
      RowMetaAndData rm2 = it2.next();

      Object[] r1 = rm1.getData();
      Object[] r2 = rm2.getData();

      if (rm1.size() != rm2.size()) {
        fail("row nr " + idx + " is not equal");
      }
      int fields[] = new int[rm1.size()];
      for (int ydx = 0; ydx < rm1.size(); ydx++) {
        fields[ydx] = ydx;
      }
      try {
        if (rm1.getRowMeta().compare(r1, r2, fields) != 0) {
          fail("row nr " + idx + " is not equal");
        }
      } catch (KettleValueException e) {
        fail("row nr " + idx + " is not equal");
      }

      idx++;
    }
  }
コード例 #4
0
  /**
   * Check the 2 lists comparing the rows in order. If they are not the same fail the test.
   *
   * @param rows1 set 1 of rows to compare
   * @param rows2 set 2 of rows to compare
   * @param fileNameColumn Number of the column containing the filename. This is only checked for
   *     being non-null (some systems maybe canonize names differently than we input).
   */
  public static void checkRows(
      List<RowMetaAndData> rows1, List<RowMetaAndData> rows2, int fileNameColumn)
      throws TestFailedException {

    int idx = 1;
    if (rows1.size() != rows2.size()) {
      throw new TestFailedException(
          "Number of rows is not the same: " + rows1.size() + " and " + rows2.size());
    }
    Iterator<RowMetaAndData> itrRows1 = rows1.iterator();
    Iterator<RowMetaAndData> itrRows2 = rows2.iterator();

    while (itrRows1.hasNext() && itrRows2.hasNext()) {
      RowMetaAndData rowMetaAndData1 = itrRows1.next();
      RowMetaAndData rowMetaAndData2 = itrRows2.next();

      RowMetaInterface rowMetaInterface1 = rowMetaAndData1.getRowMeta();

      Object[] rowObject1 = rowMetaAndData1.getData();
      Object[] rowObject2 = rowMetaAndData2.getData();

      if (rowMetaAndData1.size() != rowMetaAndData2.size()) {
        throw new TestFailedException("row number " + idx + " is not equal");
      }

      int[] fields = new int[rowMetaInterface1.size()];
      for (int ydx = 0; ydx < rowMetaInterface1.size(); ydx++) {
        fields[ydx] = ydx;
      }

      try {
        if (fileNameColumn >= 0) {
          rowObject1[fileNameColumn] = rowObject2[fileNameColumn];
        }
        if (rowMetaAndData1.getRowMeta().compare(rowObject1, rowObject2, fields) != 0) {
          throw new ComparisonFailure(
              "row nr " + idx + " is not equal",
              rowMetaInterface1.getString(rowObject1),
              rowMetaInterface1.getString(rowObject2));
        }
      } catch (KettleValueException e) {
        throw new TestFailedException("row nr " + idx + " is not equal");
      }
      idx++;
    }
  }
コード例 #5
0
 @Test
 public void testGetFeatureSummary() throws Exception {
   DatabaseMeta databaseMeta = mock(DatabaseMeta.class);
   OracleDatabaseMeta odbm = new OracleDatabaseMeta();
   doCallRealMethod().when(databaseMeta).setDatabaseInterface(any(DatabaseInterface.class));
   doCallRealMethod().when(databaseMeta).getFeatureSummary();
   doCallRealMethod().when(databaseMeta).getAttributes();
   databaseMeta.setDatabaseInterface(odbm);
   List<RowMetaAndData> result = databaseMeta.getFeatureSummary();
   assertNotNull(result);
   for (RowMetaAndData rmd : result) {
     assertEquals(2, rmd.getRowMeta().size());
     assertEquals("Parameter", rmd.getRowMeta().getValueMeta(0).getName());
     assertEquals(ValueMetaInterface.TYPE_STRING, rmd.getRowMeta().getValueMeta(0).getType());
     assertEquals("Value", rmd.getRowMeta().getValueMeta(1).getName());
     assertEquals(ValueMetaInterface.TYPE_STRING, rmd.getRowMeta().getValueMeta(1).getType());
   }
 }
  private synchronized ObjectId insertCluster(ClusterSchema clusterSchema) throws KettleException {
    if (getClusterID(clusterSchema.getName()) != null) {
      // This cluster schema name is already in use. Throw an exception.
      throw new KettleObjectExistsException(
          "Failed to create object in repository. Object ["
              + clusterSchema.getName()
              + "] already exists.");
    }

    ObjectId id = repository.connectionDelegate.getNextClusterID();

    RowMetaAndData table = new RowMetaAndData();

    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, ValueMetaInterface.TYPE_INTEGER),
        id);
    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_CLUSTER_NAME, ValueMetaInterface.TYPE_STRING),
        clusterSchema.getName());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, ValueMetaInterface.TYPE_STRING),
        clusterSchema.getBasePort());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE,
            ValueMetaInterface.TYPE_STRING),
        clusterSchema.getSocketsBufferSize());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL,
            ValueMetaInterface.TYPE_STRING),
        clusterSchema.getSocketsFlushInterval());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED,
            ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(clusterSchema.isSocketsCompressed()));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(clusterSchema.isDynamic()));

    repository
        .connectionDelegate
        .getDatabase()
        .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_CLUSTER);
    repository.connectionDelegate.getDatabase().setValuesInsert(table);
    repository.connectionDelegate.getDatabase().insertRow();
    repository.connectionDelegate.getDatabase().closeInsert();

    return id;
  }
コード例 #7
0
ファイル: RowGeneratorMeta.java プロジェクト: cnopens/BA
  public void getFields(
      RowMetaInterface row,
      String origin,
      RowMetaInterface[] info,
      StepMeta nextStep,
      VariableSpace space)
      throws KettleStepException {
    List<CheckResultInterface> remarks = new ArrayList<CheckResultInterface>();
    RowMetaAndData rowMetaAndData = RowGenerator.buildRow(this, remarks, origin);
    if (!remarks.isEmpty()) {
      StringBuffer stringRemarks = new StringBuffer();
      for (CheckResultInterface remark : remarks) {
        stringRemarks.append(remark.toString()).append(Const.CR);
      }
      throw new KettleStepException(stringRemarks.toString());
    }

    for (ValueMetaInterface valueMeta : rowMetaAndData.getRowMeta().getValueMetaList()) {
      valueMeta.setOrigin(origin);
    }

    row.mergeRowMeta(rowMetaAndData.getRowMeta());
  }
コード例 #8
0
  public void getFields(
      RowMetaInterface r,
      String name,
      RowMetaInterface[] info,
      StepMeta nextStep,
      VariableSpace space,
      Repository repository,
      IMetaStore metaStore)
      throws KettleStepException {
    RowMetaAndData add =
        ExecSQL.getResultRow(
            new Result(), getUpdateField(), getInsertField(), getDeleteField(), getReadField());

    r.mergeRowMeta(add.getRowMeta());
  }
コード例 #9
0
  public synchronized int getNrStepAttributes(ObjectId id_step) throws KettleException {
    int retval = 0;

    RowMetaAndData par = repository.connectionDelegate.getParameterMetaData(id_step);
    String sql =
        "SELECT COUNT(*) FROM "
            + quoteTable(KettleDatabaseRepository.TABLE_R_STEP_ATTRIBUTE)
            + " WHERE "
            + quote(KettleDatabaseRepository.FIELD_STEP_ATTRIBUTE_ID_STEP)
            + " = ? ";
    RowMetaAndData r =
        repository.connectionDelegate.getOneRow(sql, par.getRowMeta(), par.getData());
    if (r != null) {
      retval = (int) r.getInteger(0, 0L);
    }

    return retval;
  }
コード例 #10
0
  private synchronized ObjectId insertDatabaseAttribute(
      ObjectId id_database, String code, String value_str) throws KettleException {
    ObjectId id = repository.connectionDelegate.getNextDatabaseAttributeID();

    RowMetaAndData table = new RowMetaAndData();

    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_ID_DATABASE_ATTRIBUTE,
            ValueMetaInterface.TYPE_INTEGER),
        id);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_ID_DATABASE,
            ValueMetaInterface.TYPE_INTEGER),
        id_database);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_CODE, ValueMetaInterface.TYPE_STRING),
        code);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_VALUE_STR,
            ValueMetaInterface.TYPE_STRING),
        value_str);

    /*
     * If we have prepared the insert, we don't do it again. We assume that all the step insert statements come one
     * after the other.
     */
    repository
        .connectionDelegate
        .getDatabase()
        .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_DATABASE_ATTRIBUTE);
    repository.connectionDelegate.getDatabase().setValuesInsert(table);
    repository.connectionDelegate.getDatabase().insertRow();
    repository.connectionDelegate.getDatabase().closeInsert();

    if (log.isDebug()) {
      log.logDebug("saved database attribute [" + code + "]");
    }

    return id;
  }
コード例 #11
0
  public synchronized int getNrDatabases(ObjectId id_transformation) throws KettleException {
    int retval = 0;

    RowMetaAndData transIdRow =
        repository.connectionDelegate.getParameterMetaData(id_transformation);
    String sql =
        "SELECT COUNT(*) FROM "
            + quoteTable(KettleDatabaseRepository.TABLE_R_STEP_DATABASE)
            + " WHERE "
            + quote(KettleDatabaseRepository.FIELD_STEP_DATABASE_ID_TRANSFORMATION)
            + " = ? ";
    RowMetaAndData r =
        repository.connectionDelegate.getOneRow(sql, transIdRow.getRowMeta(), transIdRow.getData());
    if (r != null) {
      retval = (int) r.getInteger(0, 0L);
    }

    return retval;
  }
コード例 #12
0
  // CHECKSTYLE:LineLength:OFF
  public synchronized ObjectId insertStep(
      ObjectId id_transformation,
      String name,
      String description,
      String steptype,
      boolean distribute,
      long copies,
      long gui_location_x,
      long gui_location_y,
      boolean gui_draw,
      String copiesString)
      throws KettleException {
    ObjectId id = repository.connectionDelegate.getNextStepID();

    ObjectId id_step_type = getStepTypeID(steptype);

    RowMetaAndData table = new RowMetaAndData();

    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_STEP_ID_STEP, ValueMetaInterface.TYPE_INTEGER),
        id);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_ID_TRANSFORMATION, ValueMetaInterface.TYPE_INTEGER),
        id_transformation);
    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_STEP_NAME, ValueMetaInterface.TYPE_STRING),
        name);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_DESCRIPTION, ValueMetaInterface.TYPE_STRING),
        description);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_ID_STEP_TYPE, ValueMetaInterface.TYPE_INTEGER),
        id_step_type);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_DISTRIBUTE, ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(distribute));
    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_STEP_COPIES, ValueMetaInterface.TYPE_INTEGER),
        new Long(copies));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_GUI_LOCATION_X, ValueMetaInterface.TYPE_INTEGER),
        new Long(gui_location_x));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_GUI_LOCATION_Y, ValueMetaInterface.TYPE_INTEGER),
        new Long(gui_location_y));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_GUI_DRAW, ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(gui_draw));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_COPIES_STRING, ValueMetaInterface.TYPE_STRING),
        copiesString);

    repository
        .connectionDelegate
        .getDatabase()
        .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_STEP);
    repository.connectionDelegate.getDatabase().setValuesInsert(table);
    repository.connectionDelegate.getDatabase().insertRow();
    repository.connectionDelegate.getDatabase().closeInsert();

    return id;
  }
コード例 #13
0
  public synchronized ObjectId insertDatabase(
      String name,
      String type,
      String access,
      String host,
      String dbname,
      String port,
      String user,
      String pass,
      String servername,
      String data_tablespace,
      String index_tablespace)
      throws KettleException {

    ObjectId id = repository.connectionDelegate.getNextDatabaseID();

    ObjectId id_database_type = getDatabaseTypeID(type);
    if (id_database_type == null) {
      // New support database type: add it!

      id_database_type = repository.connectionDelegate.getNextDatabaseTypeID();

      String tablename = KettleDatabaseRepository.TABLE_R_DATABASE_TYPE;
      RowMetaInterface tableMeta = new RowMeta();

      tableMeta.addValueMeta(
          new ValueMeta(
              KettleDatabaseRepository.FIELD_DATABASE_TYPE_ID_DATABASE_TYPE,
              ValueMetaInterface.TYPE_INTEGER,
              5,
              0));
      tableMeta.addValueMeta(
          new ValueMeta(
              KettleDatabaseRepository.FIELD_DATABASE_TYPE_CODE,
              ValueMetaInterface.TYPE_STRING,
              KettleDatabaseRepository.REP_STRING_CODE_LENGTH,
              0));
      tableMeta.addValueMeta(
          new ValueMeta(
              KettleDatabaseRepository.FIELD_DATABASE_TYPE_DESCRIPTION,
              ValueMetaInterface.TYPE_STRING,
              KettleDatabaseRepository.REP_STRING_LENGTH,
              0));

      repository.connectionDelegate.getDatabase().prepareInsert(tableMeta, tablename);

      Object[] tableData = new Object[3];
      int tableIndex = 0;

      tableData[tableIndex++] = new LongObjectId(id_database_type).longValue();
      tableData[tableIndex++] = type;
      tableData[tableIndex++] = type;

      repository.connectionDelegate.getDatabase().setValuesInsert(tableMeta, tableData);
      repository.connectionDelegate.getDatabase().insertRow();
      repository.connectionDelegate.getDatabase().closeInsert();
    }

    ObjectId id_database_contype = getDatabaseConTypeID(access);

    RowMetaAndData table = new RowMetaAndData();
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ID_DATABASE, ValueMetaInterface.TYPE_INTEGER),
        id);
    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_DATABASE_NAME, ValueMetaInterface.TYPE_STRING),
        name);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ID_DATABASE_TYPE,
            ValueMetaInterface.TYPE_INTEGER),
        id_database_type);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ID_DATABASE_CONTYPE,
            ValueMetaInterface.TYPE_INTEGER),
        id_database_contype);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_HOST_NAME, ValueMetaInterface.TYPE_STRING),
        host);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_DATABASE_NAME, ValueMetaInterface.TYPE_STRING),
        dbname);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_PORT, ValueMetaInterface.TYPE_INTEGER),
        new Long(Const.toInt(port, -1)));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_USERNAME, ValueMetaInterface.TYPE_STRING),
        user);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_PASSWORD, ValueMetaInterface.TYPE_STRING),
        Encr.encryptPasswordIfNotUsingVariables(pass));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_SERVERNAME, ValueMetaInterface.TYPE_STRING),
        servername);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_DATA_TBS, ValueMetaInterface.TYPE_STRING),
        data_tablespace);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_INDEX_TBS, ValueMetaInterface.TYPE_STRING),
        index_tablespace);

    repository
        .connectionDelegate
        .getDatabase()
        .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_DATABASE);
    repository.connectionDelegate.getDatabase().setValuesInsert(table);
    repository.connectionDelegate.getDatabase().insertRow();
    repository.connectionDelegate.getDatabase().closeInsert();

    return id;
  }
コード例 #14
0
  /** Test case for janino step. */
  public void testJaninoStep() throws Exception {
    KettleEnvironment.init();

    //
    // Create a new transformation...
    //
    TransMeta transMeta = new TransMeta();
    transMeta.setName("janino test");

    PluginRegistry registry = PluginRegistry.getInstance();

    // create an injector step...
    String injectorStepName = "injector step";
    InjectorMeta im = new InjectorMeta();

    // Set the information of the injector.
    String injectorPid = registry.getPluginId(StepPluginType.class, im);
    StepMeta injectorStep = new StepMeta(injectorPid, injectorStepName, im);
    transMeta.addStep(injectorStep);

    //
    // create a janino step...
    //
    String stepname = "janino";
    JaninoMeta jm = new JaninoMeta();

    // Set the information of the step
    String janinoPid = registry.getPluginId(StepPluginType.class, jm);
    StepMeta janinoStep = new StepMeta(janinoPid, stepname, jm);
    transMeta.addStep(janinoStep);

    jm.setDefault();

    JaninoMetaFunction[] formulas = {
      new JaninoMetaFunction(
          "string",
          "(string==null)?null:\"string-value\"",
          ValueMeta.TYPE_STRING,
          -1,
          -1,
          "string"),
      new JaninoMetaFunction(
          "integer",
          "(integer==null)?null:new Long(42L)",
          ValueMeta.TYPE_INTEGER,
          -1,
          -1,
          "integer"),
      new JaninoMetaFunction(
          "number",
          "(number==null)?null:new Double(23.0)",
          ValueMeta.TYPE_NUMBER,
          -1,
          -1,
          "number"),
      new JaninoMetaFunction(
          "bigdecimal",
          "(bigdecimal==null)?null:new java.math.BigDecimal(11.0)",
          ValueMeta.TYPE_BIGNUMBER,
          -1,
          -1,
          "bigdecimal"),
      new JaninoMetaFunction(
          "date",
          "(date==null)?null:new java.util.Date(10000000)",
          ValueMeta.TYPE_DATE,
          -1,
          -1,
          "date"),
      new JaninoMetaFunction(
          "binary",
          "(binary==null)?null:new byte[]{1,2,3,4,5}",
          ValueMeta.TYPE_BINARY,
          -1,
          -1,
          "binary"),
      new JaninoMetaFunction(
          "bool", "(bool==null)?null:Boolean.TRUE", ValueMeta.TYPE_BOOLEAN, -1, -1, "bool"),
    };

    jm.setFormula(formulas);

    transMeta.addTransHop(new TransHopMeta(injectorStep, janinoStep));

    //
    // Create a dummy step
    //
    String dummyStepname = "dummy step";
    DummyTransMeta dm = new DummyTransMeta();

    String dummyPid = registry.getPluginId(StepPluginType.class, dm);
    StepMeta dummyStep = new StepMeta(dummyPid, dummyStepname, dm);
    transMeta.addStep(dummyStep);

    TransHopMeta hi = new TransHopMeta(janinoStep, dummyStep);
    transMeta.addTransHop(hi);

    // Now execute the transformation...
    Trans trans = new Trans(transMeta);

    trans.prepareExecution(null);

    StepInterface si = trans.getStepInterface(dummyStepname, 0);
    RowStepCollector rc = new RowStepCollector();
    si.addRowListener(rc);
    RowProducer rp = trans.addRowProducer(injectorStepName, 0);

    trans.startThreads();

    for (RowMetaAndData rm : createInputList()) {
      rp.putRow(rm.getRowMeta(), rm.getData());
    }
    rp.finished();

    trans.waitUntilFinished();

    List<RowMetaAndData> checkList = createExpectedList();
    List<RowMetaAndData> resultRows = rc.getRowsWritten();
    checkRows(resultRows, checkList);
  }
コード例 #15
0
  protected Object[] writeToTable(RowMetaInterface rowMeta, Object[] r) throws KettleException {

    if (r == null) { // Stop: last line or error encountered
      if (log.isDetailed()) {
        logDetailed("Last line inserted: stop");
      }
      return null;
    }

    PreparedStatement insertStatement = null;
    Object[] insertRowData;
    Object[] outputRowData = r;

    String tableName = null;

    boolean sendToErrorRow = false;
    String errorMessage = null;
    boolean rowIsSafe = false;
    int[] updateCounts = null;
    List<Exception> exceptionsList = null;
    boolean batchProblem = false;
    Object generatedKey = null;

    if (meta.isTableNameInField()) {
      // Cache the position of the table name field
      if (data.indexOfTableNameField < 0) {
        String realTablename = environmentSubstitute(meta.getTableNameField());
        data.indexOfTableNameField = rowMeta.indexOfValue(realTablename);
        if (data.indexOfTableNameField < 0) {
          String message = "Unable to find table name field [" + realTablename + "] in input row";
          logError(message);
          throw new KettleStepException(message);
        }
        if (!meta.isTableNameInTable() && !meta.specifyFields()) {
          data.insertRowMeta.removeValueMeta(data.indexOfTableNameField);
        }
      }
      tableName = rowMeta.getString(r, data.indexOfTableNameField);
      if (!meta.isTableNameInTable() && !meta.specifyFields()) {
        // If the name of the table should not be inserted itself, remove the table name
        // from the input row data as well. This forcibly creates a copy of r
        //
        insertRowData = RowDataUtil.removeItem(rowMeta.cloneRow(r), data.indexOfTableNameField);
      } else {
        insertRowData = r;
      }
    } else if (meta.isPartitioningEnabled()
        && (meta.isPartitioningDaily() || meta.isPartitioningMonthly())
        && (meta.getPartitioningField() != null && meta.getPartitioningField().length() > 0)) {
      // Initialize some stuff!
      if (data.indexOfPartitioningField < 0) {
        data.indexOfPartitioningField =
            rowMeta.indexOfValue(environmentSubstitute(meta.getPartitioningField()));
        if (data.indexOfPartitioningField < 0) {
          throw new KettleStepException(
              "Unable to find field [" + meta.getPartitioningField() + "] in the input row!");
        }

        if (meta.isPartitioningDaily()) {
          data.dateFormater = new SimpleDateFormat("yyyyMMdd");
        } else {
          data.dateFormater = new SimpleDateFormat("yyyyMM");
        }
      }

      ValueMetaInterface partitioningValue = rowMeta.getValueMeta(data.indexOfPartitioningField);
      if (!partitioningValue.isDate() || r[data.indexOfPartitioningField] == null) {
        throw new KettleStepException(
            "Sorry, the partitioning field needs to contain a data value and can't be empty!");
      }

      Object partitioningValueData = rowMeta.getDate(r, data.indexOfPartitioningField);
      tableName =
          environmentSubstitute(meta.getTableName())
              + "_"
              + data.dateFormater.format((Date) partitioningValueData);
      insertRowData = r;
    } else {
      tableName = data.tableName;
      insertRowData = r;
    }

    if (meta.specifyFields()) {
      //
      // The values to insert are those in the fields sections
      //
      insertRowData = new Object[data.valuenrs.length];
      for (int idx = 0; idx < data.valuenrs.length; idx++) {
        insertRowData[idx] = r[data.valuenrs[idx]];
      }
    }

    if (Const.isEmpty(tableName)) {
      throw new KettleStepException("The tablename is not defined (empty)");
    }

    insertStatement = data.preparedStatements.get(tableName);
    if (insertStatement == null) {
      String sql =
          data.db.getInsertStatement(
              environmentSubstitute(meta.getSchemaName()), tableName, data.insertRowMeta);
      if (log.isDetailed()) {
        logDetailed("Prepared statement : " + sql);
      }
      insertStatement = data.db.prepareSQL(sql, meta.isReturningGeneratedKeys());
      data.preparedStatements.put(tableName, insertStatement);
    }

    try {
      // For PG & GP, we add a savepoint before the row.
      // Then revert to the savepoint afterwards... (not a transaction, so hopefully still fast)
      //
      if (data.useSafePoints) {
        data.savepoint = data.db.setSavepoint();
      }
      data.db.setValues(data.insertRowMeta, insertRowData, insertStatement);
      data.db.insertRow(
          insertStatement, data.batchMode, false); // false: no commit, it is handled in this step
      // different
      if (isRowLevel()) {
        logRowlevel("Written row: " + data.insertRowMeta.getString(insertRowData));
      }

      // Get a commit counter per prepared statement to keep track of separate tables, etc.
      //
      Integer commitCounter = data.commitCounterMap.get(tableName);
      if (commitCounter == null) {
        commitCounter = Integer.valueOf(1);
      } else {
        commitCounter++;
      }
      data.commitCounterMap.put(tableName, Integer.valueOf(commitCounter.intValue()));

      // Release the savepoint if needed
      //
      if (data.useSafePoints) {
        if (data.releaseSavepoint) {
          data.db.releaseSavepoint(data.savepoint);
        }
      }

      // Perform a commit if needed
      //

      if ((data.commitSize > 0) && ((commitCounter % data.commitSize) == 0)) {
        if (data.db.getUseBatchInsert(data.batchMode)) {
          try {
            insertStatement.executeBatch();
            data.db.commit();
            insertStatement.clearBatch();
          } catch (SQLException ex) {
            throw Database.createKettleDatabaseBatchException("Error updating batch", ex);
          } catch (Exception ex) {
            throw new KettleDatabaseException("Unexpected error inserting row", ex);
          }
        } else {
          // insertRow normal commit
          data.db.commit();
        }
        // Clear the batch/commit counter...
        //
        data.commitCounterMap.put(tableName, Integer.valueOf(0));
        rowIsSafe = true;
      } else {
        rowIsSafe = false;
      }

      // See if we need to get back the keys as well...
      if (meta.isReturningGeneratedKeys()) {
        RowMetaAndData extraKeys = data.db.getGeneratedKeys(insertStatement);

        if (extraKeys.getRowMeta().size() > 0) {
          // Send out the good word!
          // Only 1 key at the moment. (should be enough for now :-)
          generatedKey = extraKeys.getRowMeta().getInteger(extraKeys.getData(), 0);
        } else {
          // we have to throw something here, else we don't know what the
          // type is of the returned key(s) and we would violate our own rule
          // that a hop should always contain rows of the same type.
          throw new KettleStepException(
              "No generated keys while \"return generated keys\" is active!");
        }
      }
    } catch (KettleDatabaseBatchException be) {
      errorMessage = be.toString();
      batchProblem = true;
      sendToErrorRow = true;
      updateCounts = be.getUpdateCounts();
      exceptionsList = be.getExceptionsList();

      if (getStepMeta().isDoingErrorHandling()) {
        data.db.clearBatch(insertStatement);
        data.db.commit(true);
      } else {
        data.db.clearBatch(insertStatement);
        data.db.rollback();
        StringBuilder msg =
            new StringBuilder("Error batch inserting rows into table [" + tableName + "].");
        msg.append(Const.CR);
        msg.append("Errors encountered (first 10):").append(Const.CR);
        for (int x = 0; x < be.getExceptionsList().size() && x < 10; x++) {
          Exception exception = be.getExceptionsList().get(x);
          if (exception.getMessage() != null) {
            msg.append(exception.getMessage()).append(Const.CR);
          }
        }
        throw new KettleException(msg.toString(), be);
      }
    } catch (KettleDatabaseException dbe) {
      if (getStepMeta().isDoingErrorHandling()) {
        if (isRowLevel()) {
          logRowlevel("Written row to error handling : " + getInputRowMeta().getString(r));
        }

        if (data.useSafePoints) {
          data.db.rollback(data.savepoint);
          if (data.releaseSavepoint) {
            data.db.releaseSavepoint(data.savepoint);
          }
          // data.db.commit(true); // force a commit on the connection too.
        }

        sendToErrorRow = true;
        errorMessage = dbe.toString();
      } else {
        if (meta.ignoreErrors()) {
          if (data.warnings < 20) {
            if (log.isBasic()) {
              logBasic(
                  "WARNING: Couldn't insert row into table: "
                      + rowMeta.getString(r)
                      + Const.CR
                      + dbe.getMessage());
            }
          } else if (data.warnings == 20) {
            if (log.isBasic()) {
              logBasic(
                  "FINAL WARNING (no more then 20 displayed): Couldn't insert row into table: "
                      + rowMeta.getString(r)
                      + Const.CR
                      + dbe.getMessage());
            }
          }
          data.warnings++;
        } else {
          setErrors(getErrors() + 1);
          data.db.rollback();
          throw new KettleException(
              "Error inserting row into table ["
                  + tableName
                  + "] with values: "
                  + rowMeta.getString(r),
              dbe);
        }
      }
    }

    // We need to add a key
    if (generatedKey != null) {
      outputRowData = RowDataUtil.addValueData(outputRowData, rowMeta.size(), generatedKey);
    }

    if (data.batchMode) {
      if (sendToErrorRow) {
        if (batchProblem) {
          data.batchBuffer.add(outputRowData);
          outputRowData = null;

          processBatchException(errorMessage, updateCounts, exceptionsList);
        } else {
          // Simply add this row to the error row
          putError(rowMeta, r, 1L, errorMessage, null, "TOP001");
          outputRowData = null;
        }
      } else {
        data.batchBuffer.add(outputRowData);
        outputRowData = null;

        if (rowIsSafe) { // A commit was done and the rows are all safe (no error)
          for (int i = 0; i < data.batchBuffer.size(); i++) {
            Object[] row = data.batchBuffer.get(i);
            putRow(data.outputRowMeta, row);
            incrementLinesOutput();
          }
          // Clear the buffer
          data.batchBuffer.clear();
        }
      }
    } else {
      if (sendToErrorRow) {
        putError(rowMeta, r, 1, errorMessage, null, "TOP001");
        outputRowData = null;
      }
    }

    return outputRowData;
  }