Ejemplo n.º 1
0
  /** Write to the output! */
  private boolean outputRow() throws KettleException {

    // Read one row and store it in joinrow[]
    //
    data.joinrow[data.filenr] = getRowData(data.filenr);
    if (data.joinrow[data.filenr]
        == null) // 100 x 0 = 0 : don't output when one of the input streams has no rows.
    { // If this is filenr #0, it's fine too!
      setOutputDone();
      return false;
    }

    //
    // OK, are we at the last file yet?
    // If so, we can output one row in the cartesian product.
    // Otherwise, go to the next file to get an extra row.
    //
    if (data.filenr >= data.file.length - 1) {
      if (data.outputRowMeta == null) {
        data.outputRowMeta = createOutputRowMeta(data.fileRowMeta);
      }

      // Stich the output row together
      Object[] sum = new Object[data.outputRowMeta.size()];
      int sumIndex = 0;
      for (int f = 0; f <= data.filenr; f++) {
        for (int c = 0; c < data.fileRowMeta[f].size(); c++) {
          sum[sumIndex] = data.joinrow[f][c];
          sumIndex++;
        }
      }

      if (meta.getCondition() != null && !meta.getCondition().isEmpty()) {
        // Test the specified condition...
        if (meta.getCondition().evaluate(data.outputRowMeta, sum)) {
          putRow(data.outputRowMeta, sum);
        }
      } else {
        // Put it out where it belongs!
        putRow(data.outputRowMeta, sum);
      }

      // Did we reach the last position in the last file?
      // This means that position[] is at 0!
      // Possible we have to do this multiple times.
      //
      while (data.restart[data.filenr]) {
        // Get row from the previous file
        data.filenr--;
      }
    } else {
      data.filenr++;
    }
    return true;
  }
Ejemplo n.º 2
0
  /*
   * Allocate input streams and create the temporary files...
   *
   */
  @SuppressWarnings("unchecked")
  public boolean init(StepMetaInterface smi, StepDataInterface sdi) {
    meta = (JoinRowsMeta) smi;
    data = (JoinRowsData) sdi;

    if (super.init(smi, sdi)) {
      try {
        // Start with the caching of the data, write later...
        data.caching = true;

        // Start at file 1, skip 0 for speed!
        data.filenr = 1;

        // See if a main step is supplied: in that case move the corresponding rowset to position 0
        for (int i = 0; i < getInputRowSets().size(); i++) {
          RowSet rs = getInputRowSets().get(i);
          if (rs.getOriginStepName().equalsIgnoreCase(meta.getMainStepname())) {
            // swap this one and position 0...
            // That means, the main stream is always stream 0 --> easy!
            //
            RowSet zero = getInputRowSets().get(0);
            getInputRowSets().set(0, rs);
            getInputRowSets().set(i, zero);
          }
        }

        // ** INPUT SIDE **
        data.file = new File[getInputRowSets().size()];
        data.fileInputStream = new FileInputStream[getInputRowSets().size()];
        data.dataInputStream = new DataInputStream[getInputRowSets().size()];
        data.size = new int[getInputRowSets().size()];
        data.fileRowMeta = new RowMetaInterface[getInputRowSets().size()];
        data.joinrow = new Object[getInputRowSets().size()][];
        data.rs = new RowSet[getInputRowSets().size()];
        data.cache = new List[getInputRowSets().size()];
        data.position = new int[getInputRowSets().size()];
        data.fileOutputStream = new FileOutputStream[getInputRowSets().size()];
        data.dataOutputStream = new DataOutputStream[getInputRowSets().size()];
        data.restart = new boolean[getInputRowSets().size()];

        for (int i = 1; i < getInputRowSets().size(); i++) {
          String directoryName = environmentSubstitute(meta.getDirectory());
          data.file[i] =
              File.createTempFile(meta.getPrefix(), ".tmp", new File(directoryName)); // $NON-NLS-1$
          data.file[i].deleteOnExit();

          data.size[i] = 0;
          data.rs[i] = getInputRowSets().get(i);
          data.cache[i] = null;
          // data.row[i]      = null;
          data.position[i] = 0;

          data.dataInputStream[i] = null;
          data.dataOutputStream[i] = null;

          data.joinrow[i] = null;
          data.restart[i] = false;
        }

        return true;
      } catch (IOException e) {
        logError(
            BaseMessages.getString(PKG, "JoinRows.Log.ErrorCreatingTemporaryFiles")
                + e.toString()); // $NON-NLS-1$
      }
    }

    return false;
  }
Ejemplo n.º 3
0
  private boolean cacheInputRow() throws KettleException {
    ///////////////////////////////
    // Read from  input channels //
    ///////////////////////////////

    if (data.filenr >= data.file.length) {
      // Switch the mode to reading back from the data cache
      data.caching = false;

      // Start back at filenr = 0
      data.filenr = 0;

      return true;
    }

    // We need to open a new outputstream
    if (data.dataOutputStream[data.filenr] == null) {
      try {
        // Open the temp file
        data.fileOutputStream[data.filenr] = new FileOutputStream(data.file[data.filenr]);

        // Open the data output stream...
        data.dataOutputStream[data.filenr] =
            new DataOutputStream(data.fileOutputStream[data.filenr]);
      } catch (FileNotFoundException fnfe) {
        logError(
            BaseMessages.getString(PKG, "JoinRows.Log.UnableToOpenOutputstream")
                + data.file[data.filenr].toString()
                + "] : "
                + fnfe.toString()); // $NON-NLS-1$ //$NON-NLS-2$
        stopAll();
        setErrors(1);
        return false;
      }
    }

    // Read a line from the appropriate rowset...
    RowSet rowSet = data.rs[data.filenr];
    Object[] rowData = getRowFrom(rowSet);
    if (rowData != null) // We read a row from one of the input streams...
    {
      if (data.fileRowMeta[data.filenr] == null) {
        // The first row is used as meta-data, clone it for safety
        data.fileRowMeta[data.filenr] = rowSet.getRowMeta().clone();
      }

      data.fileRowMeta[data.filenr].writeData(data.dataOutputStream[data.filenr], rowData);
      data.size[data.filenr]++;

      if (log.isRowLevel()) logRowlevel(rowData.toString());

      //
      // Perhaps we want to cache this data??
      //
      if (data.size[data.filenr] <= meta.getCacheSize()) {
        if (data.cache[data.filenr] == null) data.cache[data.filenr] = new ArrayList<Object[]>();

        // Add this row to the cache!
        data.cache[data.filenr].add(rowData);
      } else {
        // we can't cope with this many rows: reset the cache...
        if (log.isDetailed())
          logDetailed(
              BaseMessages.getString(
                  PKG,
                  "JoinRows.Log.RowsFound",
                  meta.getCacheSize() + "",
                  data.rs[data.filenr]
                      .getOriginStepName())); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
        data.cache[data.filenr] = null;
      }

    } else // No more rows found on rowset!!
    {
      // Close outputstream.
      try {
        data.dataOutputStream[data.filenr].close();
        data.fileOutputStream[data.filenr].close();
        data.dataOutputStream[data.filenr] = null;
        data.fileOutputStream[data.filenr] = null;
      } catch (IOException ioe) {
        logError(
            BaseMessages.getString(PKG, "JoinRows.Log.ErrorInClosingOutputStream")
                + data.filenr
                + " : ["
                + data.file[data.filenr].toString()
                + "] : "
                + ioe.toString()); // $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
      }

      // Advance to the next file/input-stream...
      data.filenr++;
    }

    return true;
  }