예제 #1
0
  /** Check the 2 lists comparing the rows in order. If they are not the same fail the test. */
  public void checkRows(List<RowMetaAndData> rows1, List<RowMetaAndData> rows2) {
    int idx = 1;
    if (rows1.size() != rows2.size()) {
      fail("Number of rows is not the same: " + rows1.size() + " and " + rows2.size());
    }
    Iterator<RowMetaAndData> it1 = rows1.iterator();
    Iterator<RowMetaAndData> it2 = rows2.iterator();

    while (it1.hasNext() && it2.hasNext()) {
      RowMetaAndData rm1 = (RowMetaAndData) it1.next();
      RowMetaAndData rm2 = (RowMetaAndData) it2.next();

      Object[] r1 = rm1.getData();
      Object[] r2 = rm2.getData();

      if (r1.length != r2.length) {
        fail("row nr " + idx + "is not equal");
      }
      int fields[] = new int[r1.length];
      for (int ydx = 0; ydx < r1.length; ydx++) {
        fields[ydx] = ydx;
      }
      try {
        if (rm1.getRowMeta().compare(r1, r2, fields) != 0) {
          fail("row nr " + idx + "is not equal");
        }
      } catch (KettleValueException e) {
        fail("row nr " + idx + "is not equal");
      }

      idx++;
    }
  }
예제 #2
0
  /** Test case for injector step... also a show case on how to use injector. */
  public void testInjector() throws Exception {
    KettleEnvironment.init();

    //
    // Create a new transformation...
    //
    TransMeta transMeta = new TransMeta();
    transMeta.setName("injectortest");

    PluginRegistry registry = PluginRegistry.getInstance();

    //
    // create an injector step...
    //
    String injectorStepname = "injector step";
    InjectorMeta im = new InjectorMeta();

    // Set the information of the injector.

    String injectorPid = registry.getPluginId(StepPluginType.class, im);
    StepMeta injectorStep = new StepMeta(injectorPid, injectorStepname, (StepMetaInterface) im);
    transMeta.addStep(injectorStep);

    //
    // Create a dummy step
    //
    String dummyStepname = "dummy step";
    DummyTransMeta dm = new DummyTransMeta();

    String dummyPid = registry.getPluginId(StepPluginType.class, dm);
    StepMeta dummyStep = new StepMeta(dummyPid, dummyStepname, (StepMetaInterface) dm);
    transMeta.addStep(dummyStep);

    TransHopMeta hi = new TransHopMeta(injectorStep, dummyStep);
    transMeta.addTransHop(hi);

    // Now execute the transformation...
    Trans trans = new Trans(transMeta);

    trans.prepareExecution(null);

    StepInterface si = trans.getStepInterface(dummyStepname, 0);
    RowStepCollector rc = new RowStepCollector();
    si.addRowListener(rc);

    RowProducer rp = trans.addRowProducer(injectorStepname, 0);
    trans.startThreads();

    // add rows
    List<RowMetaAndData> inputList = createData();
    for (RowMetaAndData rm : inputList) {
      rp.putRow(rm.getRowMeta(), rm.getData());
    }
    rp.finished();

    trans.waitUntilFinished();

    List<RowMetaAndData> resultRows = rc.getRowsWritten();
    checkRows(resultRows, inputList);
  }
예제 #3
0
  public void test01_BasicSelectFrom() throws Exception {
    KettleEnvironment.init();

    String sqlQuery = "SELECT * FROM Service";

    SqlTransExecutor executor = new SqlTransExecutor(sqlQuery, getServices());

    final List<RowMetaAndData> rows = new ArrayList<RowMetaAndData>();

    // print the eventual result rows...
    //
    executor.executeQuery(
        new RowAdapter() {
          @Override
          public void rowWrittenEvent(RowMetaInterface rowMeta, Object[] row)
              throws KettleStepException {
            rows.add(new RowMetaAndData(rowMeta, row));
          }
        });

    // Now the generated transformation is waiting for input so we
    // can start the service transformation
    //
    executor.waitUntilFinished();

    assertEquals(8, rows.size());
    RowMetaAndData row = rows.get(0);
    assertEquals(4, row.size());
  }
  public boolean init(StepMetaInterface smi, StepDataInterface sdi) {
    meta = (RowGeneratorMeta) smi;
    data = (RowGeneratorData) sdi;

    if (super.init(smi, sdi)) {
      // Determine the number of rows to generate...
      data.rowLimit = Const.toLong(environmentSubstitute(meta.getRowLimit()), -1L);
      data.rowsWritten = 0L;

      if (data.rowLimit < 0L) // Unable to parse
      {
        logError(BaseMessages.getString(PKG, "RowGenerator.Wrong.RowLimit.Number"));
        return false; // fail
      }

      // Create a row (constants) with all the values in it...
      List<CheckResultInterface> remarks =
          new ArrayList<CheckResultInterface>(); // stores the errors...
      RowMetaAndData outputRow = buildRow(meta, remarks, getStepname());
      if (!remarks.isEmpty()) {
        for (int i = 0; i < remarks.size(); i++) {
          CheckResult cr = (CheckResult) remarks.get(i);
          logError(cr.getText());
        }
        return false;
      }
      data.outputRowData = outputRow.getData();
      data.outputRowMeta = outputRow.getRowMeta();
      return true;
    }
    return false;
  }
  @Override
  public Result execute(Result prevResult, int k) throws KettleException {

    Result result = prevResult;

    ML_Classify direct = new ML_Classify();
    direct.setName(this.getRecordsetName());

    direct.setRecordsetName(this.getRecordsetName());
    direct.setModel(model);
    direct.setIndependentVar(independentVar);
    direct.setClassifyType(classifyType);
    direct.setDataType(dataType);

    direct.setRidge(ridge);
    direct.setEpsilon(epsilon);
    direct.setMaxIter(maxIter);
    direct.setPasses(passes);
    direct.setAlpha(alpha);

    // private Text algType; //NaiveBayes, Logistic
    // private Text dependentVar; // 1
    // private Text independentVar; // 2

    // ml.setIterations(this.getIterations());
    // ml.setThreshold(this.getThreshold());
    // ml.setThreshold(this.getThreshold());

    logBasic("{Iterate Job} Execute = " + direct.ecl());

    logBasic("{Iterate Job} Previous =" + result.getLogText());

    result.setResult(true);

    RowMetaAndData data = new RowMetaAndData();
    data.addValue("ecl", Value.VALUE_TYPE_STRING, direct.ecl());

    List list = result.getRows();
    list.add(data);
    String eclCode = "";

    if (list == null) {
      list = new ArrayList();
    } else {

      for (int i = 0; i < list.size(); i++) {
        RowMetaAndData rowData = (RowMetaAndData) list.get(i);
        String code = rowData.getString("ecl", null);
        if (code != null) {
          eclCode += code;
        }
      }
      logBasic("{Iterate Job} ECL Code =" + eclCode);
    }

    result.setRows(list);

    return result;
  }
 public ResultSet getTableTypes() throws SQLException {
   List<RowMetaAndData> rowAndDatas = new ArrayList<RowMetaAndData>();
   RowMetaAndData rd = new RowMetaAndData();
   rd.addValue("TABLE_TYPE", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
   rowAndDatas.add(rd);
   KettleJDBCResultSet rs = new KettleJDBCResultSet(null, rowAndDatas, "*");
   return rs;
 }
예제 #7
0
  @Override
  public Result execute(Result prevResult, int k) throws KettleException {

    Result result = modifyResults(prevResult);
    if (result.isStopped()) {
      return result;
    }
    Rollup rollup = new Rollup();
    rollup.setName(this.getRecordsetName());
    rollup.setRecordset(this.getRecordset());
    rollup.setRecordFormat(this.getRecordset());
    rollup.setRunLocal(this.getRunLocal());

    rollup.setCondition(this.getCondition());
    rollup.setFieldlist(this.getFieldlist());

    if (this.group.equalsIgnoreCase("yes")) {
      rollup.setGroup("GROUP");
    } else {
      rollup.setGroup("");
    }

    rollup.setTransformName(this.getTransformName());

    rollup.setTransform(generateEclForMapperGrid());

    logBasic("{rollup Job} Execute = " + rollup.ecl());

    logBasic("{rollup Job} Previous =" + result.getLogText());

    result.setResult(true);

    RowMetaAndData data = new RowMetaAndData();
    data.addValue("ecl", Value.VALUE_TYPE_STRING, rollup.ecl());

    List list = result.getRows();
    list.add(data);
    String eclCode = parseEclFromRowData(list);
    /*
    String eclCode = "";
    if (list == null) {
        list = new ArrayList();
    } else {

        for (int i = 0; i < list.size(); i++) {
            RowMetaAndData rowData = (RowMetaAndData) list.get(i);
            String code = rowData.getString("ecl", null);
            if (code != null) {
                eclCode += code;
            }
        }
        logBasic("{Iterate Job} ECL Code =" + eclCode);
    }
    */
    result.setRows(list);

    return result;
  }
  public synchronized int getNrDatabases() throws KettleException {
    int retval = 0;

    String sql = "SELECT COUNT(*) FROM " + quoteTable(KettleDatabaseRepository.TABLE_R_DATABASE);
    RowMetaAndData r = repository.connectionDelegate.getOneRow(sql);
    if (r != null) {
      retval = (int) r.getInteger(0, 0L);
    }

    return retval;
  }
  private synchronized ObjectId insertCluster(ClusterSchema clusterSchema) throws KettleException {
    if (getClusterID(clusterSchema.getName()) != null) {
      // This cluster schema name is already in use. Throw an exception.
      throw new KettleObjectExistsException(
          "Failed to create object in repository. Object ["
              + clusterSchema.getName()
              + "] already exists.");
    }

    ObjectId id = repository.connectionDelegate.getNextClusterID();

    RowMetaAndData table = new RowMetaAndData();

    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, ValueMetaInterface.TYPE_INTEGER),
        id);
    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_CLUSTER_NAME, ValueMetaInterface.TYPE_STRING),
        clusterSchema.getName());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, ValueMetaInterface.TYPE_STRING),
        clusterSchema.getBasePort());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE,
            ValueMetaInterface.TYPE_STRING),
        clusterSchema.getSocketsBufferSize());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL,
            ValueMetaInterface.TYPE_STRING),
        clusterSchema.getSocketsFlushInterval());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED,
            ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(clusterSchema.isSocketsCompressed()));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(clusterSchema.isDynamic()));

    repository
        .connectionDelegate
        .getDatabase()
        .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_CLUSTER);
    repository.connectionDelegate.getDatabase().setValuesInsert(table);
    repository.connectionDelegate.getDatabase().insertRow();
    repository.connectionDelegate.getDatabase().closeInsert();

    return id;
  }
  public void getFields(
      RowMetaInterface r,
      String name,
      RowMetaInterface[] info,
      StepMeta nextStep,
      VariableSpace space,
      Repository repository,
      IMetaStore metaStore)
      throws KettleStepException {
    RowMetaAndData add =
        ExecSQL.getResultRow(
            new Result(), getUpdateField(), getInsertField(), getDeleteField(), getReadField());

    r.mergeRowMeta(add.getRowMeta());
  }
예제 #11
0
  public ResultSet getSchemas() throws SQLException {
    List<RowMetaAndData> rowAndDatas = new ArrayList<RowMetaAndData>();
    // if USE_TRANSNAME_AS_SCHEMA is true, then we use the filename or
    // transformation name as the schema

    Set<String> set = this.stepsMap.keySet();
    for (Iterator<String> iterator = set.iterator(); iterator.hasNext(); ) {
      String name = iterator.next();
      RowMetaAndData rd = new RowMetaAndData();
      rd.addValue("TABLE_SCHEM", ValueMetaInterface.TYPE_STRING, name);
      rowAndDatas.add(rd);
    }

    KettleJDBCResultSet rs = new KettleJDBCResultSet(null, rowAndDatas, "*");
    return rs;
  }
  public synchronized int getNrDatabaseAttributes(ObjectId id_database) throws KettleException {
    int retval = 0;

    String sql =
        "SELECT COUNT(*) FROM "
            + quoteTable(KettleDatabaseRepository.TABLE_R_DATABASE_ATTRIBUTE)
            + " WHERE "
            + quote(KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_ID_DATABASE)
            + " = "
            + id_database;
    RowMetaAndData r = repository.connectionDelegate.getOneRow(sql);
    if (r != null) {
      retval = (int) r.getInteger(0, 0L);
    }

    return retval;
  }
예제 #13
0
  // Load user with login from repository, don't verify password...
  public UserInfo(Repository rep, String login) throws KettleException {
    try {
      long id_profile;

      setID(rep.getUserID(login));
      if (getID() > 0) {
        RowMetaAndData r = rep.getUser(getID());
        if (r != null) {
          this.login = r.getString("LOGIN", null);
          password = Encr.decryptPassword(r.getString("PASSWORD", null));
          name = r.getString("NAME", null);
          description = r.getString("DESCRIPTION", null);
          enabled = r.getBoolean("ENABLED", false);
          id_profile = r.getInteger("ID_PROFILE", 0);
          profile = new ProfileMeta(rep, id_profile);
        } else {
          setID(-1L);
          throw new KettleDatabaseException(
              Messages.getString("UserInfo.Error.UserNotFound", login));
        }
      } else {
        setID(-1L);
        throw new KettleDatabaseException(Messages.getString("UserInfo.Error.UserNotFound", login));
      }
    } catch (KettleDatabaseException dbe) {
      rep.log.logError(
          toString(), Messages.getString("UserInfo.Error.UserNotLoaded", login, dbe.getMessage()));
      throw new KettleException(Messages.getString("UserInfo.Error.UserNotLoaded", login, ""), dbe);
    }
  }
  public ClusterSchema loadClusterSchema(ObjectId id_cluster_schema, List<SlaveServer> slaveServers)
      throws KettleException {
    ClusterSchema clusterSchema = new ClusterSchema();
    RowMetaAndData row = getClusterSchema(id_cluster_schema);

    clusterSchema.setObjectId(id_cluster_schema);
    clusterSchema.setName(row.getString(KettleDatabaseRepository.FIELD_CLUSTER_NAME, null));
    clusterSchema.setBasePort(
        row.getString(KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, null));
    clusterSchema.setSocketsBufferSize(
        row.getString(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE, null));
    clusterSchema.setSocketsFlushInterval(
        row.getString(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL, null));
    clusterSchema.setSocketsCompressed(
        row.getBoolean(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED, true));
    clusterSchema.setDynamic(row.getBoolean(KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, true));

    ObjectId[] pids = repository.getClusterSlaveIDs(id_cluster_schema);
    for (int i = 0; i < pids.length; i++) {
      SlaveServer slaveServer = repository.loadSlaveServer(pids[i], null); // Load last version
      SlaveServer reference = SlaveServer.findSlaveServer(slaveServers, slaveServer.getName());
      if (reference != null) {
        clusterSchema.getSlaveServers().add(reference);
      } else {
        clusterSchema.getSlaveServers().add(slaveServer);
      }
    }

    return clusterSchema;
  }
  public synchronized int getNrStepAttributes(ObjectId id_step) throws KettleException {
    int retval = 0;

    RowMetaAndData par = repository.connectionDelegate.getParameterMetaData(id_step);
    String sql =
        "SELECT COUNT(*) FROM "
            + quoteTable(KettleDatabaseRepository.TABLE_R_STEP_ATTRIBUTE)
            + " WHERE "
            + quote(KettleDatabaseRepository.FIELD_STEP_ATTRIBUTE_ID_STEP)
            + " = ? ";
    RowMetaAndData r =
        repository.connectionDelegate.getOneRow(sql, par.getRowMeta(), par.getData());
    if (r != null) {
      retval = (int) r.getInteger(0, 0L);
    }

    return retval;
  }
 public Collection<RowMetaAndData> getDatabaseAttributes()
     throws KettleDatabaseException, KettleValueException {
   List<RowMetaAndData> attrs = new ArrayList<RowMetaAndData>();
   List<Object[]> rows =
       repository.connectionDelegate.getRows(
           "SELECT * FROM " + quoteTable(KettleDatabaseRepository.TABLE_R_DATABASE_ATTRIBUTE), 0);
   for (Object[] row : rows) {
     RowMetaAndData rowWithMeta =
         new RowMetaAndData(repository.connectionDelegate.getReturnRowMeta(), row);
     long id =
         rowWithMeta.getInteger(
             quote(KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_ID_DATABASE_ATTRIBUTE), 0);
     if (id > 0) {
       attrs.add(rowWithMeta);
     }
   }
   return attrs;
 }
  public synchronized int getNrDatabases(ObjectId id_transformation) throws KettleException {
    int retval = 0;

    RowMetaAndData transIdRow =
        repository.connectionDelegate.getParameterMetaData(id_transformation);
    String sql =
        "SELECT COUNT(*) FROM "
            + quoteTable(KettleDatabaseRepository.TABLE_R_STEP_DATABASE)
            + " WHERE "
            + quote(KettleDatabaseRepository.FIELD_STEP_DATABASE_ID_TRANSFORMATION)
            + " = ? ";
    RowMetaAndData r =
        repository.connectionDelegate.getOneRow(sql, transIdRow.getRowMeta(), transIdRow.getData());
    if (r != null) {
      retval = (int) r.getInteger(0, 0L);
    }

    return retval;
  }
예제 #18
0
  @Override
  public Result execute(Result prevResult, int k) throws KettleException {

    Result result = prevResult;

    logBasic("{Group job} Creating Group object");

    Group group = new Group();

    logBasic("{Group job} Group object created");

    group.setName(this.getRecordSetName());
    group.setRecordSet(this.getRecordSet());
    group.setBreakCriteria(this.getBreakCriteria());
    group.setIsAll(this.getIsAll());
    group.setRunLocal(this.getIsRunLocal());

    logBasic("{Group job} Execute = " + group.ecl());

    logBasic("{Group job} Previous = " + result.getLogText());

    result.setResult(true);

    RowMetaAndData data = new RowMetaAndData();
    data.addValue("ecl", Value.VALUE_TYPE_STRING, group.ecl());

    List list = result.getRows();
    list.add(data);
    String eclCode = "";
    if (list == null) {
      list = new ArrayList();
    } else {
      for (int i = 0; i < list.size(); i++) {
        RowMetaAndData rowData = (RowMetaAndData) list.get(i);
        String code = rowData.getString("ecl", null);
        if (code != null) eclCode += code;
      }
      logBasic("{Group job} ECL Code = " + eclCode);
    }
    result.setRows(list);

    return result;
  }
예제 #19
0
  @Override
  public Result execute(Result prevResult, int k) throws KettleException {

    Result result = prevResult;

    Sort sort = new Sort();
    sort.setFields(getFields());
    sort.setDatasetName(getDatasetName());
    sort.setName(getRecordsetName());
    logBasic("{Sort Job} Execute = " + sort.ecl());

    logBasic("{Sort Job} Previous =" + result.getLogText());

    result.setResult(true);

    RowMetaAndData data = new RowMetaAndData();
    data.addValue("ecl", Value.VALUE_TYPE_STRING, sort.ecl());

    List list = result.getRows();
    list.add(data);
    String eclCode = "";
    if (list == null) {
      list = new ArrayList();
    } else {

      for (int i = 0; i < list.size(); i++) {
        RowMetaAndData rowData = (RowMetaAndData) list.get(i);
        String code = rowData.getString("ecl", null);
        if (code != null) {
          eclCode += code;
        }
      }
      logBasic("{Sort Job} ECL Code =" + eclCode);
    }

    result.setRows(list);

    return result;
  }
예제 #20
0
  public RowMetaAndData fillTableRow() {
    RowMetaAndData r = new RowMetaAndData();
    r.addValue(new ValueMeta("ID_USER", ValueMetaInterface.TYPE_INTEGER), new Long(getID()));
    r.addValue(new ValueMeta("LOGIN", ValueMetaInterface.TYPE_STRING), login);
    r.addValue(
        new ValueMeta("PASSWORD", ValueMetaInterface.TYPE_STRING), Encr.encryptPassword(password));
    r.addValue(new ValueMeta("NAME", ValueMetaInterface.TYPE_STRING), name);
    r.addValue(new ValueMeta("DESCRIPTION", ValueMetaInterface.TYPE_STRING), description);
    r.addValue(new ValueMeta("ENABLED", ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(enabled));
    r.addValue(
        new ValueMeta("ID_PROFILE", ValueMetaInterface.TYPE_INTEGER),
        Long.valueOf(profile.getID()));

    return r;
  }
예제 #21
0
  public void getFields(
      RowMetaInterface row,
      String origin,
      RowMetaInterface[] info,
      StepMeta nextStep,
      VariableSpace space)
      throws KettleStepException {
    List<CheckResultInterface> remarks = new ArrayList<CheckResultInterface>();
    RowMetaAndData rowMetaAndData = RowGenerator.buildRow(this, remarks, origin);
    if (!remarks.isEmpty()) {
      StringBuffer stringRemarks = new StringBuffer();
      for (CheckResultInterface remark : remarks) {
        stringRemarks.append(remark.toString()).append(Const.CR);
      }
      throw new KettleStepException(stringRemarks.toString());
    }

    for (ValueMetaInterface valueMeta : rowMetaAndData.getRowMeta().getValueMetaList()) {
      valueMeta.setOrigin(origin);
    }

    row.mergeRowMeta(rowMetaAndData.getRowMeta());
  }
  /**
   * Check the 2 lists comparing the rows in order. If they are not the same fail the test.
   *
   * @param rows1 set 1 of rows to compare
   * @param rows2 set 2 of rows to compare
   * @param fileNameColumn Number of the column containing the filename. This is only checked for
   *     being non-null (some systems maybe canonize names differently than we input).
   */
  public static void checkRows(
      List<RowMetaAndData> rows1, List<RowMetaAndData> rows2, int fileNameColumn)
      throws TestFailedException {

    int idx = 1;
    if (rows1.size() != rows2.size()) {
      throw new TestFailedException(
          "Number of rows is not the same: " + rows1.size() + " and " + rows2.size());
    }
    Iterator<RowMetaAndData> itrRows1 = rows1.iterator();
    Iterator<RowMetaAndData> itrRows2 = rows2.iterator();

    while (itrRows1.hasNext() && itrRows2.hasNext()) {
      RowMetaAndData rowMetaAndData1 = itrRows1.next();
      RowMetaAndData rowMetaAndData2 = itrRows2.next();

      RowMetaInterface rowMetaInterface1 = rowMetaAndData1.getRowMeta();

      Object[] rowObject1 = rowMetaAndData1.getData();
      Object[] rowObject2 = rowMetaAndData2.getData();

      if (rowMetaAndData1.size() != rowMetaAndData2.size()) {
        throw new TestFailedException("row number " + idx + " is not equal");
      }

      int[] fields = new int[rowMetaInterface1.size()];
      for (int ydx = 0; ydx < rowMetaInterface1.size(); ydx++) {
        fields[ydx] = ydx;
      }

      try {
        if (fileNameColumn >= 0) {
          rowObject1[fileNameColumn] = rowObject2[fileNameColumn];
        }
        if (rowMetaAndData1.getRowMeta().compare(rowObject1, rowObject2, fields) != 0) {
          throw new ComparisonFailure(
              "row nr " + idx + " is not equal",
              rowMetaInterface1.getString(rowObject1),
              rowMetaInterface1.getString(rowObject2));
        }
      } catch (KettleValueException e) {
        throw new TestFailedException("row nr " + idx + " is not equal");
      }
      idx++;
    }
  }
예제 #23
0
  public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
    log.debug("getPrimaryKeys........");

    List<RowMetaAndData> rowAndDatas = new ArrayList<RowMetaAndData>();

    RowMetaAndData rd = new RowMetaAndData();
    rd.addValue("TABLE_CAT", ValueMetaInterface.TYPE_STRING, catalog);
    rd.addValue("TABLE_SCHEM", ValueMetaInterface.TYPE_STRING, schema);
    rd.addValue("TABLE_NAME", ValueMetaInterface.TYPE_STRING, table);
    rd.addValue("COLUMN_NAME", ValueMetaInterface.TYPE_STRING, "");
    rd.addValue("KEY_SEQ", ValueMetaInterface.TYPE_INTEGER, "1");
    rd.addValue("PK_NAME", ValueMetaInterface.TYPE_STRING, "");
    rowAndDatas.add(rd);
    KettleJDBCResultSet rs = new KettleJDBCResultSet(null, rowAndDatas, "*");
    return rs;
  }
  public synchronized void updateCluster(ClusterSchema clusterSchema) throws KettleException {
    RowMetaAndData table = new RowMetaAndData();

    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, ValueMetaInterface.TYPE_INTEGER),
        clusterSchema.getObjectId());
    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_CLUSTER_NAME, ValueMetaInterface.TYPE_STRING),
        clusterSchema.getName());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, ValueMetaInterface.TYPE_STRING),
        clusterSchema.getBasePort());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE,
            ValueMetaInterface.TYPE_STRING),
        clusterSchema.getSocketsBufferSize());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL,
            ValueMetaInterface.TYPE_STRING),
        clusterSchema.getSocketsFlushInterval());
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED,
            ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(clusterSchema.isSocketsCompressed()));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(clusterSchema.isDynamic()));

    repository.connectionDelegate.updateTableRow(
        KettleDatabaseRepository.TABLE_R_CLUSTER,
        KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER,
        table,
        clusterSchema.getObjectId());
  }
예제 #25
0
 @Test
 public void testGetFeatureSummary() throws Exception {
   DatabaseMeta databaseMeta = mock(DatabaseMeta.class);
   OracleDatabaseMeta odbm = new OracleDatabaseMeta();
   doCallRealMethod().when(databaseMeta).setDatabaseInterface(any(DatabaseInterface.class));
   doCallRealMethod().when(databaseMeta).getFeatureSummary();
   doCallRealMethod().when(databaseMeta).getAttributes();
   databaseMeta.setDatabaseInterface(odbm);
   List<RowMetaAndData> result = databaseMeta.getFeatureSummary();
   assertNotNull(result);
   for (RowMetaAndData rmd : result) {
     assertEquals(2, rmd.getRowMeta().size());
     assertEquals("Parameter", rmd.getRowMeta().getValueMeta(0).getName());
     assertEquals(ValueMetaInterface.TYPE_STRING, rmd.getRowMeta().getValueMeta(0).getType());
     assertEquals("Value", rmd.getRowMeta().getValueMeta(1).getName());
     assertEquals(ValueMetaInterface.TYPE_STRING, rmd.getRowMeta().getValueMeta(1).getType());
   }
 }
  private synchronized ObjectId insertDatabaseAttribute(
      ObjectId id_database, String code, String value_str) throws KettleException {
    ObjectId id = repository.connectionDelegate.getNextDatabaseAttributeID();

    RowMetaAndData table = new RowMetaAndData();

    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_ID_DATABASE_ATTRIBUTE,
            ValueMetaInterface.TYPE_INTEGER),
        id);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_ID_DATABASE,
            ValueMetaInterface.TYPE_INTEGER),
        id_database);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_CODE, ValueMetaInterface.TYPE_STRING),
        code);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_DATABASE_ATTRIBUTE_VALUE_STR,
            ValueMetaInterface.TYPE_STRING),
        value_str);

    /*
     * If we have prepared the insert, we don't do it again. We assume that all the step insert statements come one
     * after the other.
     */
    repository
        .connectionDelegate
        .getDatabase()
        .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_DATABASE_ATTRIBUTE);
    repository.connectionDelegate.getDatabase().setValuesInsert(table);
    repository.connectionDelegate.getDatabase().insertRow();
    repository.connectionDelegate.getDatabase().closeInsert();

    if (log.isDebug()) {
      log.logDebug("saved database attribute [" + code + "]");
    }

    return id;
  }
예제 #27
0
  private Object[] getRowFromCache(RowMetaInterface lookupMeta, Object[] lookupRow)
      throws KettleException {
    if (data.allEquals) {
      // only do the hashtable lookup when all equals otherwise conditions >, <, <> will give wrong
      // results
      TimedRow timedRow = data.look.get(new RowMetaAndData(data.lookupMeta, lookupRow));
      if (timedRow != null) {
        return timedRow.getRow();
      }
    } else { // special handling of conditions <,>, <> etc.
      if (!data.hasDBCondition) { // e.g. LIKE not handled by this routine, yet
        // TODO: find an alternative way to look up the data based on the condition.
        // Not all conditions are "=" so we are going to have to evaluate row by row
        // A sorted list or index might be a good solution here...
        //
        Enumeration<RowMetaAndData> keys = data.look.keys();
        while (keys.hasMoreElements()) {
          RowMetaAndData key = keys.nextElement();
          // Now verify that the key is matching our conditions...
          //
          boolean match = true;
          int lookupIndex = 0;
          for (int i = 0; i < data.conditions.length && match; i++) {
            ValueMetaInterface cmpMeta = lookupMeta.getValueMeta(lookupIndex);
            Object cmpData = lookupRow[lookupIndex];
            ValueMetaInterface keyMeta = key.getValueMeta(i);
            Object keyData = key.getData()[i];

            switch (data.conditions[i]) {
              case DatabaseLookupMeta.CONDITION_EQ:
                match = (cmpMeta.compare(cmpData, keyMeta, keyData) == 0);
                break;
              case DatabaseLookupMeta.CONDITION_NE:
                match = (cmpMeta.compare(cmpData, keyMeta, keyData) != 0);
                break;
              case DatabaseLookupMeta.CONDITION_LT:
                match = (cmpMeta.compare(cmpData, keyMeta, keyData) > 0);
                break;
              case DatabaseLookupMeta.CONDITION_LE:
                match = (cmpMeta.compare(cmpData, keyMeta, keyData) >= 0);
                break;
              case DatabaseLookupMeta.CONDITION_GT:
                match = (cmpMeta.compare(cmpData, keyMeta, keyData) < 0);
                break;
              case DatabaseLookupMeta.CONDITION_GE:
                match = (cmpMeta.compare(cmpData, keyMeta, keyData) <= 0);
                break;
              case DatabaseLookupMeta.CONDITION_IS_NULL:
                match = keyMeta.isNull(keyData);
                break;
              case DatabaseLookupMeta.CONDITION_IS_NOT_NULL:
                match = !keyMeta.isNull(keyData);
                break;
              case DatabaseLookupMeta.CONDITION_BETWEEN:
                // Between key >= cmp && key <= cmp2
                ValueMetaInterface cmpMeta2 = lookupMeta.getValueMeta(lookupIndex + 1);
                Object cmpData2 = lookupRow[lookupIndex + 1];
                match = (keyMeta.compare(keyData, cmpMeta, cmpData) >= 0);
                if (match) {
                  match = (keyMeta.compare(keyData, cmpMeta2, cmpData2) <= 0);
                }
                lookupIndex++;
                break;
                // TODO: add LIKE operator (think of changing the hasDBCondition logic then)
              default:
                match = false;
                data.hasDBCondition =
                    true; // avoid looping in here the next time, also safety when a new condition
                // will be introduced
                break;
            }
            lookupIndex++;
          }
          if (match) {
            TimedRow timedRow = data.look.get(key);
            if (timedRow != null) {
              return timedRow.getRow();
            }
          }
        }
      }
    }
    return null;
  }
예제 #28
0
  public ResultSet getTables(
      String catalog, String schemaPattern, String tableNamePattern, String[] types)
      throws SQLException {
    List<RowMetaAndData> rowAndDatas = new ArrayList<RowMetaAndData>();
    // get the steps from *.ktr or *.job
    log.debug(
        "catalog:"
            + catalog
            + " , schemaPattern:"
            + schemaPattern
            + " , tableNamePattern:"
            + tableNamePattern);
    if (!isDir) {
      Set<Map.Entry<String, String[]>> tables = this.stepsMap.entrySet();
      log.debug("tables:" + tables);
      for (Iterator<Map.Entry<String, String[]>> iterator = tables.iterator();
          iterator.hasNext(); ) {
        Map.Entry<String, String[]> o = iterator.next();
        String[] values = (o.getValue());
        for (int i = 0; i < values.length; i++) {
          RowMetaAndData rd = new RowMetaAndData();
          rd.addValue("TABLE_CAT", ValueMetaInterface.TYPE_STRING, "jdbckettle");
          rd.addValue("TABLE_SCHEM", ValueMetaInterface.TYPE_STRING, "jdbckettle");
          rd.addValue("TABLE_NAME", ValueMetaInterface.TYPE_STRING, values[i]);

          rd.addValue("TABLE_TYPE", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
          rd.addValue("REMARKS", ValueMetaInterface.TYPE_STRING, "");
          rd.addValue("TYPE_CAT", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
          rd.addValue("TYPE_SCHEM", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
          rd.addValue("TYPE_NAME", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
          rd.addValue("SELF_REFERENCING_COL_NAME", ValueMetaInterface.TYPE_STRING, "");
          rd.addValue("REF_GENERATION", ValueMetaInterface.TYPE_STRING, "");
          rowAndDatas.add(rd);
        }
      }
    } else {
      Set<Map.Entry<String, String[]>> tables = this.stepsMap.entrySet();
      // for BIRT special schema

      boolean isBirtSchema = this.stepsMap.keySet().contains(schemaPattern);
      for (Iterator<Map.Entry<String, String[]>> iterator = tables.iterator();
          iterator.hasNext(); ) {
        Map.Entry<String, String[]> o = iterator.next();
        String schema = o.getKey();

        if ((!schema.equals(schemaPattern)) && isBirtSchema) {
          continue;
        }
        String[] values = (o.getValue());
        //				log.debug("getTables:"
        //						+ java.util.Arrays.toString(values));

        for (int i = 0; i < values.length; i++) {
          RowMetaAndData rd = new RowMetaAndData();
          rd.addValue("TABLE_CAT", ValueMetaInterface.TYPE_STRING, "jdbckettle");
          rd.addValue("TABLE_SCHEM", ValueMetaInterface.TYPE_STRING, "jdbckettle");
          rd.addValue("TABLE_NAME", ValueMetaInterface.TYPE_STRING, values[i]);

          rd.addValue("TABLE_TYPE", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
          rd.addValue("REMARKS", ValueMetaInterface.TYPE_STRING, "");
          rd.addValue("TYPE_CAT", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
          rd.addValue("TYPE_SCHEM", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
          rd.addValue("TYPE_NAME", ValueMetaInterface.TYPE_STRING, Constants.TABLE_TYPE_TABLE);
          rd.addValue("SELF_REFERENCING_COL_NAME", ValueMetaInterface.TYPE_STRING, "");
          rd.addValue("REF_GENERATION", ValueMetaInterface.TYPE_STRING, "");
          rowAndDatas.add(rd);
        }
      }
    }
    KettleJDBCResultSet rs = new KettleJDBCResultSet(null, rowAndDatas, "*");
    return rs;
  }
  // CHECKSTYLE:LineLength:OFF
  public synchronized ObjectId insertStep(
      ObjectId id_transformation,
      String name,
      String description,
      String steptype,
      boolean distribute,
      long copies,
      long gui_location_x,
      long gui_location_y,
      boolean gui_draw,
      String copiesString)
      throws KettleException {
    ObjectId id = repository.connectionDelegate.getNextStepID();

    ObjectId id_step_type = getStepTypeID(steptype);

    RowMetaAndData table = new RowMetaAndData();

    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_STEP_ID_STEP, ValueMetaInterface.TYPE_INTEGER),
        id);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_ID_TRANSFORMATION, ValueMetaInterface.TYPE_INTEGER),
        id_transformation);
    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_STEP_NAME, ValueMetaInterface.TYPE_STRING),
        name);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_DESCRIPTION, ValueMetaInterface.TYPE_STRING),
        description);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_ID_STEP_TYPE, ValueMetaInterface.TYPE_INTEGER),
        id_step_type);
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_DISTRIBUTE, ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(distribute));
    table.addValue(
        new ValueMeta(KettleDatabaseRepository.FIELD_STEP_COPIES, ValueMetaInterface.TYPE_INTEGER),
        new Long(copies));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_GUI_LOCATION_X, ValueMetaInterface.TYPE_INTEGER),
        new Long(gui_location_x));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_GUI_LOCATION_Y, ValueMetaInterface.TYPE_INTEGER),
        new Long(gui_location_y));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_GUI_DRAW, ValueMetaInterface.TYPE_BOOLEAN),
        Boolean.valueOf(gui_draw));
    table.addValue(
        new ValueMeta(
            KettleDatabaseRepository.FIELD_STEP_COPIES_STRING, ValueMetaInterface.TYPE_STRING),
        copiesString);

    repository
        .connectionDelegate
        .getDatabase()
        .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_STEP);
    repository.connectionDelegate.getDatabase().setValuesInsert(table);
    repository.connectionDelegate.getDatabase().insertRow();
    repository.connectionDelegate.getDatabase().closeInsert();

    return id;
  }
예제 #30
0
  public ResultSet getColumns(
      String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
      throws SQLException {

    log.debug(
        "catalog:"
            + catalog
            + " , schemaPattern:"
            + schemaPattern
            + " , tableNamePattern:"
            + tableNamePattern
            + " ,columnNamePattern:"
            + columnNamePattern);

    List<RowMetaAndData> rowAndDatas = new ArrayList<RowMetaAndData>();
    // if USE_TRANSNAME_AS_SCHEMA is true, then we use the filename or
    // transformation name as the schema
    if (!isDir) {

      log.debug(helper.getRowMeta(tableNamePattern));
      RowMeta rm = helper.getRowMeta(tableNamePattern);
      ColInfo[] colInfo = KettleHelper.convert(rm);
      String[] columns = rm.getFieldNames();
      for (int i = 0; columns != null && i < columns.length; i++) {

        RowMetaAndData rd = new RowMetaAndData();
        rd.addValue("TABLE_CAT", ValueMetaInterface.TYPE_STRING, catalog);
        rd.addValue("TABLE_SCHEM", ValueMetaInterface.TYPE_STRING, schemaPattern);
        rd.addValue("TABLE_NAME", ValueMetaInterface.TYPE_STRING, tableNamePattern);
        rd.addValue("COLUMN_NAME", ValueMetaInterface.TYPE_STRING, columns[i]);
        rd.addValue("DATA_TYPE", ValueMetaInterface.TYPE_INTEGER, colInfo[i].jdbcType);
        rd.addValue("TYPE_NAME", ValueMetaInterface.TYPE_STRING, "");
        rd.addValue("COLUMN_SIZE", ValueMetaInterface.TYPE_INTEGER, columns.length);
        rd.addValue("BUFFER_LENGTH", ValueMetaInterface.TYPE_INTEGER, "20");

        rd.addValue("DECIMAL_DIGITS", ValueMetaInterface.TYPE_INTEGER, "20");
        rd.addValue("NUM_PREC_RADIX", ValueMetaInterface.TYPE_INTEGER, "20");
        rd.addValue("NULLABLE", ValueMetaInterface.TYPE_INTEGER, "20");
        rd.addValue("REMARKS", ValueMetaInterface.TYPE_STRING, "");
        rd.addValue("COLUMN_DEF", ValueMetaInterface.TYPE_STRING, "");
        rd.addValue("SQL_DATA_TYPE", ValueMetaInterface.TYPE_INTEGER, "20");
        rd.addValue("SQL_DATETIME_SUB", ValueMetaInterface.TYPE_INTEGER, "20");
        rd.addValue("CHAR_OCTET_LENGTH", ValueMetaInterface.TYPE_INTEGER, "1");
        rd.addValue("ORDINAL_POSITION", ValueMetaInterface.TYPE_INTEGER, "20");
        rd.addValue("IS_NULLABLE", ValueMetaInterface.TYPE_STRING, "0");
        rd.addValue("SCOPE_CATALOG", ValueMetaInterface.TYPE_STRING, "0");
        rd.addValue("SCOPE_SCHEMA", ValueMetaInterface.TYPE_STRING, "0");
        rd.addValue("SCOPE_TABLE", ValueMetaInterface.TYPE_STRING, "0");
        rd.addValue("SOURCE_DATA_TYPE", ValueMetaInterface.TYPE_INTEGER, "1");
        rowAndDatas.add(rd);
      }
      KettleJDBCResultSet rs = new KettleJDBCResultSet(null, rowAndDatas, "*");
      return rs;
    }

    //		log.debug("getRowMeta:" + helper.getRowMeta(tableNamePattern));
    RowMeta rm = helper.getRowMeta(tableNamePattern);
    ColInfo[] colInfo = KettleHelper.convert(rm);
    String[] columns = rm.getFieldNames();
    for (int i = 0; columns != null && i < columns.length; i++) {
      String name = columns[i];
      RowMetaAndData rd = new RowMetaAndData();
      rd.addValue("TABLE_CAT", ValueMetaInterface.TYPE_STRING, catalog);
      rd.addValue("TABLE_SCHEM", ValueMetaInterface.TYPE_STRING, schemaPattern);
      rd.addValue("TABLE_NAME", ValueMetaInterface.TYPE_STRING, tableNamePattern);
      rd.addValue("COLUMN_NAME", ValueMetaInterface.TYPE_STRING, name);
      rd.addValue("DATA_TYPE", ValueMetaInterface.TYPE_INTEGER, colInfo[i].getJdbcType());
      rd.addValue("TYPE_NAME", ValueMetaInterface.TYPE_STRING, "");
      rd.addValue("COLUMN_SIZE", ValueMetaInterface.TYPE_INTEGER, columns.length);
      rd.addValue("BUFFER_LENGTH", ValueMetaInterface.TYPE_INTEGER, name);
      rd.addValue("DECIMAL_DIGITS", ValueMetaInterface.TYPE_INTEGER, "20");
      rd.addValue("NUM_PREC_RADIX", ValueMetaInterface.TYPE_INTEGER, "20");
      rd.addValue("NULLABLE", ValueMetaInterface.TYPE_INTEGER, "20");
      rd.addValue("REMARKS", ValueMetaInterface.TYPE_STRING, name);
      rd.addValue("COLUMN_DEF", ValueMetaInterface.TYPE_STRING, name);
      rd.addValue("SQL_DATA_TYPE", ValueMetaInterface.TYPE_INTEGER, "20");
      rd.addValue("SQL_DATETIME_SUB", ValueMetaInterface.TYPE_INTEGER, "20");
      rd.addValue("CHAR_OCTET_LENGTH", ValueMetaInterface.TYPE_INTEGER, "1");
      rd.addValue("ORDINAL_POSITION", ValueMetaInterface.TYPE_INTEGER, "20");
      rd.addValue("IS_NULLABLE", ValueMetaInterface.TYPE_STRING, "0");
      rd.addValue("SCOPE_CATALOG", ValueMetaInterface.TYPE_STRING, "0");
      rd.addValue("SCOPE_SCHEMA", ValueMetaInterface.TYPE_STRING, "0");
      rd.addValue("SCOPE_TABLE", ValueMetaInterface.TYPE_STRING, "0");
      rd.addValue("SOURCE_DATA_TYPE", ValueMetaInterface.TYPE_INTEGER, "1");
      rowAndDatas.add(rd);
    }

    KettleJDBCResultSet rs = new KettleJDBCResultSet(null, rowAndDatas, "*");
    return rs;
  }