Exemple #1
0
  public boolean transform(ProgressListener progressListener) {

    toneMap = toneMapFrame.getToneMap();
    timeSet = toneMap.getTimeSet();
    pitchSet = toneMap.getPitchSet();
    timeRange = timeSet.getRange();
    pitchRange = pitchSet.getRange();

    pitchFreqSet = pitchSet.getFreqSet();
    audioFTPower = new double[timeRange * (pitchRange + 1)];

    int startSample = timeSet.getStartSample();
    int endSample = timeSet.getEndSample();
    int sampleLength = (int) Math.floor((endSample - startSample) / ((double) resolution));
    double[] audioSamples = new double[sampleLength];

    for (int i = 0; i < sampleLength; i++) {
      audioSamples[i] = (double) audioData[startSample + i * resolution];
    }

    int sampleIndexSize =
        (int) Math.floor((double) timeSet.getSampleIndexSize() / (double) resolution);

    double dt = (double) resolution / sampleRate;
    if (transformMode == TRANSFORM_MODE_JAVA) {
      wavelet.convert(
          audioFTPower,
          audioSamples,
          pitchFreqSet,
          dt,
          (double) pFactor,
          (double) tFactor,
          sampleIndexSize,
          sampleLength,
          pitchRange,
          progressListener);
    } else {

      WaveletJNI waveletJNI = new WaveletJNI();

      waveletJNI.waveletConvert(
          audioFTPower,
          audioSamples,
          pitchFreqSet,
          dt,
          (double) pFactor,
          (double) tFactor,
          sampleIndexSize,
          sampleLength,
          pitchRange,
          progressListener);
    }

    return true;
  }
Exemple #2
0
  public AudioModel(ToneMapFrame toneMapFrame) {

    this.toneMapFrame = toneMapFrame;
    audioPanel = new AudioPanel(this);
    if (toneMapFrame.getJNIStatus()) audioPanel.jniB.setEnabled(true);
  }