コード例 #1
0
  // Simple median filtering along frequencies and amplitudes
  public static SinusoidalTracks postProcess(SinusoidalTracks st) {
    for (int i = 0; i < st.totalTracks; i++) {
      if (st.tracks[i].totalSins > 20) {
        st.tracks[i].freqs = SignalProcUtils.meanFilter(st.tracks[i].freqs, MEAN_FILTER_FREQ_AXIS);
        st.tracks[i].amps = SignalProcUtils.meanFilter(st.tracks[i].amps, MEAN_FILTER_AMP_AXIS);
      }
    }

    return st;
  }
コード例 #2
0
  public DoubleDataSource process(DoubleDataSource inputAudio) {
    amount = MathUtils.CheckLimits(amount, MIN_AMOUNT, MAX_AMOUNT);

    double[] vscales = {amount};

    int frameLength = SignalProcUtils.getDFTSize(fs);
    int predictionOrder = SignalProcUtils.getLPOrder(fs);

    VocalTractScalingProcessor p =
        new VocalTractScalingProcessor(predictionOrder, fs, frameLength, vscales);
    FrameOverlapAddSource foas =
        new FrameOverlapAddSource(inputAudio, Window.HANNING, true, frameLength, fs, p);

    return new BufferedDoubleDataSource(foas);
  }
コード例 #3
0
  private void initialise(
      int samplingRate,
      int LPOrder,
      double[] pscalesIn,
      double[] tscalesIn,
      double[] escalesIn,
      double[] vscalesIn) {
    if (pscalesIn != null) {
      pscales = new double[pscalesIn.length];
      System.arraycopy(pscalesIn, 0, pscales, 0, pscalesIn.length);
    }

    if (tscalesIn != null) {
      tscales = new double[tscalesIn.length];
      System.arraycopy(tscalesIn, 0, tscales, 0, tscalesIn.length);
    }

    if (escalesIn != null) {
      escales = new double[escalesIn.length];
      System.arraycopy(escalesIn, 0, escales, 0, escalesIn.length);
    }

    if (vscalesIn != null) {
      vscales = new double[vscalesIn.length];
      System.arraycopy(vscalesIn, 0, vscales, 0, vscalesIn.length);
    }

    fs = samplingRate;
    lpOrder = SignalProcUtils.getLPOrder(fs);
  }
コード例 #4
0
  /*
   * Group individual sinusoids into tracks by considering closeness in frequency
   * Current version is a simple implementation of checking the frequency difference between neighbouring
   * sinusoids and assigning them to same track if the absolute difference is less than a threshold
   * Possible ways to improve this process would be to employ:
   * - constraints on amplitude continuity
   * - constraints on phase continuity (i.e. the phase difference between two consecutive sinusoids
   *   should not be larger or smaller than some percent of the period
   *
   * framesSins[i][] : Array of sinusoidal parameters (amps, freqs, phases) extracted from ith speech frame
   * framesSins[i][j]:  Sinusoidal parameters of the jth peak sinusoid in the DFT spectrum of speech frame i
   * Returns a number of sinusoidal tracks
   *
   * This version uses a simple search mechanism to compare a current sinusoid frequecny with the previous and if the difference is smaller than
   * +-deltaInHz, assigns the new sinusoid to the previous sinusoid´s track
   * In the assignment, longer previous paths are favoured in a weighted manner, i.e. the longer a candidate track,
   * the more likely the current sinusoid gets assigned to that track
   *
   */
  public SinusoidalTracks generateTracks(
      NonharmonicSinusoidalSpeechSignal sinSignal, float deltaInHz, int samplingRate) {
    int numFrames = sinSignal.framesSins.length;
    float deltaInRadians = SignalProcUtils.hz2radian(deltaInHz, samplingRate);

    SinusoidalTracks tr = null;
    int i;
    Sinusoid zeroAmpSin;

    if (numFrames > 0) {
      int j, k;
      float tmpDist, minDist;
      int trackInd;
      boolean[] bSinAssigneds = null;

      for (i = 0; i < numFrames; i++) {
        if (tr == null) // If no tracks yet, assign the current sinusoids to new tracks
        {
          tr = new SinusoidalTracks(sinSignal.framesSins[i].sinusoids.length, samplingRate);
          tr.setSysAmpsAndTimes(sinSignal.framesSins);

          for (j = 0; j < sinSignal.framesSins[i].sinusoids.length; j++) {
            // First add a zero amplitude sinusoid at previous time instant to allow smooth
            // synthesis (i.e. "turning on" the track)
            zeroAmpSin =
                new Sinusoid(
                    0.0f,
                    sinSignal.framesSins[i].sinusoids[j].freq,
                    0.0f,
                    Sinusoid.NON_EXISTING_FRAME_INDEX);
            tr.add(
                new SinusoidalTrack(
                    sinSignal.framesSins[i].time - ZERO_AMP_SHIFT_IN_SECONDS,
                    zeroAmpSin,
                    sinSignal.framesSins[i].maxFreqOfVoicing,
                    SinusoidalTrack.TURNED_ON));
            //

            tr.tracks[tr.currentIndex].add(
                sinSignal.framesSins[i].time,
                sinSignal.framesSins[i].sinusoids[j],
                sinSignal.framesSins[i].maxFreqOfVoicing,
                SinusoidalTrack.ACTIVE);
          }
        } else // If there are tracks, first check "continuations" by checking whether a given
               // sinusoid is in the +-deltaInRadians neighbourhood of the previous track.
        // Those tracks that do not continue are "turned off".
        // All sinusoids of the current frame that are not assigned to any of the "continuations" or
        // "turned off" are "birth"s of new tracks.
        {
          for (j = 0; j < tr.currentIndex + 1; j++) {
            if (tr.tracks[j] != null) tr.tracks[j].resetCandidate();
          }

          bSinAssigneds = new boolean[sinSignal.framesSins[i].sinusoids.length];

          // Continuations:
          for (k = 0; k < sinSignal.framesSins[i].sinusoids.length; k++) {
            minDist =
                Math.abs(
                    sinSignal.framesSins[i].sinusoids[k].freq
                        - tr.tracks[0].freqs[tr.tracks[0].currentIndex]);
            if (minDist < deltaInRadians) trackInd = 0;
            else trackInd = -1;

            for (j = 1; j < tr.currentIndex + 1; j++) {
              tmpDist =
                  Math.abs(
                      sinSignal.framesSins[i].sinusoids[k].freq
                          - tr.tracks[j].freqs[tr.tracks[j].currentIndex]);

              if (tmpDist < deltaInRadians && (trackInd == -1 || tmpDist < minDist)) {
                minDist = tmpDist;
                trackInd = j;
              }
            }

            if (trackInd > -1) {
              if (tr.tracks[trackInd].newCandidateInd > -1)
                bSinAssigneds[tr.tracks[trackInd].newCandidateInd] = false;

              tr.tracks[trackInd].newCandidate = new Sinusoid(sinSignal.framesSins[i].sinusoids[k]);
              tr.tracks[trackInd].newCandidateInd = k;

              bSinAssigneds[k] =
                  true; // The sinusoid might be assigned to an existing track provided that a
                        // closer sinusoid is not found
            } else
              bSinAssigneds[k] =
                  false; // This is the birth of a new track since it does not match any existing
                         // tracks
          }

          // Here is the actual assignment of sinusoids to existing tracks
          for (j = 0; j < tr.currentIndex + 1; j++) {
            if (tr.tracks[j].newCandidate != null) {
              Sinusoid tmpSin = new Sinusoid(tr.tracks[j].newCandidate);

              if (tr.tracks[j].states[tr.tracks[j].currentIndex] != SinusoidalTrack.ACTIVE) {
                zeroAmpSin =
                    new Sinusoid(
                        0.0f,
                        tr.tracks[j].freqs[tr.tracks[j].totalSins - 1],
                        0.0f,
                        Sinusoid.NON_EXISTING_FRAME_INDEX);
                tr.tracks[j].add(
                    sinSignal.framesSins[i].time - ZERO_AMP_SHIFT_IN_SECONDS,
                    zeroAmpSin,
                    sinSignal.framesSins[i].maxFreqOfVoicing,
                    SinusoidalTrack.TURNED_ON);
              }

              tr.tracks[j].add(
                  sinSignal.framesSins[i].time,
                  tmpSin,
                  sinSignal.framesSins[i].maxFreqOfVoicing,
                  SinusoidalTrack.ACTIVE);
            } else // Turn off tracks that are not assigned any new sinusoid
            {
              if (tr.tracks[j].states[tr.tracks[j].currentIndex] != SinusoidalTrack.TURNED_OFF) {
                zeroAmpSin =
                    new Sinusoid(
                        0.0f,
                        tr.tracks[j].freqs[tr.tracks[j].totalSins - 1],
                        0.0f,
                        Sinusoid.NON_EXISTING_FRAME_INDEX);
                tr.tracks[j].add(
                    sinSignal.framesSins[i].time + ZERO_AMP_SHIFT_IN_SECONDS,
                    zeroAmpSin,
                    sinSignal.framesSins[i].maxFreqOfVoicing,
                    SinusoidalTrack.TURNED_OFF);
              }
            }
          }

          // Births: Create new tracks from sinusoids that are not assigned to existing tracks
          for (k = 0; k < bSinAssigneds.length; k++) {
            if (!bSinAssigneds[k]) {
              // First add a zero amplitude sinusoid to previous frame to allow smooth synthesis
              // (i.e. "turning on" the track)
              zeroAmpSin =
                  new Sinusoid(
                      0.0f,
                      sinSignal.framesSins[i].sinusoids[k].freq,
                      0.0f,
                      Sinusoid.NON_EXISTING_FRAME_INDEX);
              tr.add(
                  new SinusoidalTrack(
                      sinSignal.framesSins[i].time - ZERO_AMP_SHIFT_IN_SECONDS,
                      zeroAmpSin,
                      sinSignal.framesSins[i].maxFreqOfVoicing,
                      SinusoidalTrack.TURNED_ON));
              //

              tr.tracks[tr.currentIndex].add(
                  sinSignal.framesSins[i].time,
                  sinSignal.framesSins[i].sinusoids[k],
                  sinSignal.framesSins[i].maxFreqOfVoicing,
                  SinusoidalTrack.ACTIVE);
            }
          }

          System.out.println(
              "Track generation using frame "
                  + String.valueOf(i + 1)
                  + " of "
                  + String.valueOf(numFrames));
        }

        // Turn-off all active tracks after the last speech frame
        if (i == numFrames - 1) {
          for (j = 0; j < tr.currentIndex + 1; j++) {
            if (Math.abs(
                    sinSignal.framesSins[i].time - tr.tracks[j].times[tr.tracks[j].totalSins - 1])
                < ZERO_AMP_SHIFT_IN_SECONDS) {
              if (tr.tracks[j].states[tr.tracks[j].currentIndex] == SinusoidalTrack.ACTIVE) {
                zeroAmpSin =
                    new Sinusoid(
                        0.0f,
                        tr.tracks[j].freqs[tr.tracks[j].totalSins - 1],
                        0.0f,
                        Sinusoid.NON_EXISTING_FRAME_INDEX);
                tr.tracks[j].add(
                    sinSignal.framesSins[i].time + ZERO_AMP_SHIFT_IN_SECONDS,
                    zeroAmpSin,
                    sinSignal.framesSins[i].maxFreqOfVoicing,
                    SinusoidalTrack.TURNED_OFF);
              }
            }
          }
        }
        //
      }
    }

    for (i = 0; i <= tr.currentIndex; i++) tr.tracks[i].correctTrack();

    tr.setOriginalDurationManual(sinSignal.originalDurationInSeconds);

    SinusoidalTracks trOut = new SinusoidalTracks(tr, 0, tr.currentIndex);
    trOut = postProcess(trOut);

    return trOut;
  }
コード例 #5
0
  public void initialise(
      double[] lowerCutOffsInHzIn, double[] upperCutOffsInHzIn, double overlapAround1000HzIn) {
    normalizationFilterTransformedIR = null;

    if (lowerCutOffsInHzIn != null && upperCutOffsInHzIn != null) {
      assert lowerCutOffsInHzIn.length == upperCutOffsInHzIn.length;
      lowerCutOffsInHz = new double[lowerCutOffsInHzIn.length];
      upperCutOffsInHz = new double[upperCutOffsInHzIn.length];
      System.arraycopy(lowerCutOffsInHzIn, 0, lowerCutOffsInHz, 0, lowerCutOffsInHzIn.length);
      System.arraycopy(upperCutOffsInHzIn, 0, upperCutOffsInHz, 0, upperCutOffsInHzIn.length);

      int i;
      filters = new FIRFilter[lowerCutOffsInHz.length];
      int filterOrder = SignalProcUtils.getFIRFilterOrder(samplingRateInHz);
      double normalizedLowerCutoff;
      double normalizedUpperCutoff;

      overlapAround1000Hz = overlapAround1000HzIn;

      for (i = 0; i < lowerCutOffsInHz.length; i++)
        assert lowerCutOffsInHz[i] < upperCutOffsInHz[i];

      for (i = 0; i < lowerCutOffsInHz.length; i++) {
        if (lowerCutOffsInHz[i] <= 0.0) {
          normalizedUpperCutoff = Math.min(upperCutOffsInHz[i] / samplingRateInHz, 0.5);
          normalizedUpperCutoff = Math.max(normalizedUpperCutoff, 0.0);
          filters[i] = new LowPassFilter(normalizedUpperCutoff, filterOrder);
        } else if (upperCutOffsInHz[i] >= 0.5 * samplingRateInHz) {
          normalizedLowerCutoff = Math.max(lowerCutOffsInHz[i] / samplingRateInHz, 0.0);
          normalizedLowerCutoff = Math.min(normalizedLowerCutoff, 0.5);
          filters[i] = new HighPassFilter(normalizedLowerCutoff, filterOrder);
        } else {
          normalizedLowerCutoff = Math.max(lowerCutOffsInHz[i] / samplingRateInHz, 0.0);
          normalizedLowerCutoff = Math.min(normalizedLowerCutoff, 0.5);
          normalizedUpperCutoff = Math.min(upperCutOffsInHz[i] / samplingRateInHz, 0.5);
          normalizedUpperCutoff = Math.max(normalizedUpperCutoff, 0.0);

          assert normalizedLowerCutoff < normalizedUpperCutoff;

          filters[i] =
              new BandPassFilter(normalizedLowerCutoff, normalizedUpperCutoff, filterOrder);
        }
      }

      int maxFreq = filters[0].transformedIR.length / 2 + 1;

      // Estimate a smooth gain normalization filter
      normalizationFilterTransformedIR = new double[maxFreq];
      Arrays.fill(normalizationFilterTransformedIR, 0.0);

      int j;
      for (i = 0; i < filters.length; i++) {
        normalizationFilterTransformedIR[0] += Math.abs(filters[i].transformedIR[0]);
        normalizationFilterTransformedIR[maxFreq - 1] += Math.abs(filters[i].transformedIR[1]);
        for (j = 1; j < maxFreq - 1; j++)
          normalizationFilterTransformedIR[j] +=
              Math.sqrt(
                  filters[i].transformedIR[2 * j] * filters[i].transformedIR[2 * j]
                      + filters[i].transformedIR[2 * j + 1] * filters[i].transformedIR[2 * j + 1]);
      }

      for (j = 0; j < maxFreq; j++)
        normalizationFilterTransformedIR[j] = 1.0 / normalizationFilterTransformedIR[j];

      // MaryUtils.plot(normalizationFilterTransformedIR, "Normalization filter");
      //
    }
  }
コード例 #6
0
  public static void main(String[] args) throws UnsupportedAudioFileException, IOException {
    // File input
    AudioInputStream inputAudio = AudioSystem.getAudioInputStream(new File(args[0]));
    int samplingRate = (int) inputAudio.getFormat().getSampleRate();
    AudioDoubleDataSource signal = new AudioDoubleDataSource(inputAudio);
    double[] x = signal.getAllData();
    double maxOrig = MathUtils.getAbsMax(x);

    SinusoidalAnalyzer sa = null;
    SinusoidalTracks st = null;
    PitchSynchronousSinusoidalAnalyzer pa = null;
    //

    // Analysis
    float deltaInHz = SinusoidalAnalysisParams.DEFAULT_DELTA_IN_HZ;
    float numPeriods = PitchSynchronousSinusoidalAnalyzer.DEFAULT_ANALYSIS_PERIODS;

    boolean isSilentSynthesis = false;

    int windowType = Window.HANNING;

    boolean bRefinePeakEstimatesParabola = false;
    boolean bRefinePeakEstimatesBias = false;
    boolean bSpectralReassignment = false;
    boolean bAdjustNeighFreqDependent = false;

    // int spectralEnvelopeType = SinusoidalAnalysisParams.LP_SPEC;
    int spectralEnvelopeType = SinusoidalAnalysisParams.SEEVOC_SPEC;
    float[] initialPeakLocationsInHz = null;
    initialPeakLocationsInHz = new float[1];
    for (int i = 0; i < 1; i++) initialPeakLocationsInHz[i] = (i + 1) * 350.0f;

    boolean isFixedRateAnalysis = false;
    boolean isRealSpeech = true;
    double startFreqInHz = 0.0;
    double endFreqInHz = 0.5 * samplingRate;

    SinusoidalAnalysisParams params =
        new SinusoidalAnalysisParams(
            samplingRate,
            startFreqInHz,
            endFreqInHz,
            windowType,
            bRefinePeakEstimatesParabola,
            bRefinePeakEstimatesBias,
            bSpectralReassignment,
            bAdjustNeighFreqDependent);

    if (isFixedRateAnalysis) {
      // Fixed window size and skip rate analysis
      double[] f0s = null;
      float ws_f0 = -1.0f;
      float ss_f0 = -1.0f;
      sa = new SinusoidalAnalyzer(params);

      if (spectralEnvelopeType == SinusoidalAnalysisParams.SEEVOC_SPEC) // Pitch info needed
      {
        String strPitchFile = args[0].substring(0, args[0].length() - 4) + ".ptc";
        PitchReaderWriter f0 = new PitchReaderWriter(strPitchFile);
        f0s = f0.contour;
        ws_f0 = (float) f0.header.windowSizeInSeconds;
        ss_f0 = (float) f0.header.skipSizeInSeconds;
      }

      st =
          sa.analyzeFixedRate(
              x, 0.020f, 0.010f, deltaInHz, spectralEnvelopeType, f0s, ws_f0, ss_f0);
      //
    } else {
      // Pitch synchronous analysis
      String strPitchFile = args[0].substring(0, args[0].length() - 4) + ".ptc";
      PitchReaderWriter f0 = new PitchReaderWriter(strPitchFile);
      int pitchMarkOffset = 0;
      PitchMarks pm =
          SignalProcUtils.pitchContour2pitchMarks(
              f0.contour,
              samplingRate,
              x.length,
              f0.header.windowSizeInSeconds,
              f0.header.skipSizeInSeconds,
              true,
              pitchMarkOffset);
      pa = new PitchSynchronousSinusoidalAnalyzer(params);

      st =
          pa.analyzePitchSynchronous(
              x, pm, numPeriods, -1.0f, deltaInHz, spectralEnvelopeType, initialPeakLocationsInHz);
      isSilentSynthesis = false;
    }
    //

    // Resynthesis
    PeakMatchedSinusoidalSynthesizer ss = new PeakMatchedSinusoidalSynthesizer(samplingRate);
    x = ss.synthesize(st, isSilentSynthesis);
    //

    // File output
    DDSAudioInputStream outputAudio =
        new DDSAudioInputStream(new BufferedDoubleDataSource(x), inputAudio.getFormat());
    String outFileName =
        args[0].substring(0, args[0].length() - 4) + "_sinResynthFullbandPitchSynch.wav";
    AudioSystem.write(outputAudio, AudioFileFormat.Type.WAVE, new File(outFileName));
    //
  }
コード例 #7
0
ファイル: TransientSegment.java プロジェクト: Fawzanm/marytts
 public float getEndTime(int samplingRateInHz) {
   if (waveform != null && startTime > -1.0f)
     return startTime + SignalProcUtils.sample2time(waveform.length, samplingRateInHz);
   else return -1.0f;
 }
コード例 #8
0
  // Pseudo harmonics based noise generation for pseudo periods
  public static double[] synthesize(
      HntmSpeechSignal hnmSignal,
      HntmAnalyzerParams analysisParams,
      HntmSynthesizerParams synthesisParams,
      String referenceFile) {
    double[] noisePart = null;
    int trackNoToExamine = 1;

    int i, k, n;
    double t; // Time in seconds

    double tsik = 0.0; // Synthesis time in seconds
    double tsikPlusOne = 0.0; // Synthesis time in seconds

    double trackStartInSeconds, trackEndInSeconds;
    // double lastPeriodInSeconds = 0.0;
    int trackStartIndex, trackEndIndex;
    double akt;
    int numHarmonicsCurrentFrame, numHarmonicsPrevFrame, numHarmonicsNextFrame;
    int harmonicIndexShiftPrev, harmonicIndexShiftCurrent, harmonicIndexShiftNext;
    int maxNumHarmonics = 0;
    for (i = 0; i < hnmSignal.frames.length; i++) {
      if (hnmSignal.frames[i].maximumFrequencyOfVoicingInHz > 0.0f
          && hnmSignal.frames[i].n != null) {
        numHarmonicsCurrentFrame =
            (int) Math.floor(hnmSignal.samplingRateInHz / analysisParams.noiseF0InHz + 0.5);
        numHarmonicsCurrentFrame = Math.max(0, numHarmonicsCurrentFrame);
        if (numHarmonicsCurrentFrame > maxNumHarmonics) maxNumHarmonics = numHarmonicsCurrentFrame;
      }
    }

    double aksi;
    double aksiPlusOne;

    float[] phasekis = null;
    float phasekiPlusOne;

    double ht;
    float phasekt = 0.0f;

    float phasekiEstimate = 0.0f;
    float phasekiPlusOneEstimate = 0.0f;
    int Mk;
    boolean isPrevNoised, isNoised, isNextNoised;
    boolean isTrackNoised, isNextTrackNoised, isPrevTrackNoised;
    int outputLen =
        SignalProcUtils.time2sample(
            hnmSignal.originalDurationInSeconds, hnmSignal.samplingRateInHz);

    noisePart =
        new double
            [outputLen]; // In fact, this should be prosody scaled length when you implement prosody
                         // modifications
    Arrays.fill(noisePart, 0.0);

    // Write separate tracks to output
    double[][] noiseTracks = null;

    if (maxNumHarmonics > 0) {
      noiseTracks = new double[maxNumHarmonics][];
      for (k = 0; k < maxNumHarmonics; k++) {
        noiseTracks[k] = new double[outputLen];
        Arrays.fill(noiseTracks[k], 0.0);
      }

      phasekis = new float[maxNumHarmonics];
      for (k = 0; k < maxNumHarmonics; k++)
        phasekis[k] = (float) (MathUtils.TWOPI * (Math.random() - 0.5));
    }
    //

    int transitionLen =
        SignalProcUtils.time2sample(
            synthesisParams.unvoicedVoicedTrackTransitionInSeconds, hnmSignal.samplingRateInHz);
    Window transitionWin = Window.get(Window.HAMMING, transitionLen * 2);
    transitionWin.normalizePeakValue(1.0f);
    double[] halfTransitionWinLeft = transitionWin.getCoeffsLeftHalf();
    float halfFs = hnmSignal.samplingRateInHz;

    for (i = 0; i < hnmSignal.frames.length; i++) {
      isPrevNoised = false;
      isNoised = false;
      isNextNoised = false;

      if (i > 0
          && hnmSignal.frames[i - 1].n != null
          && hnmSignal.frames[i - 1].maximumFrequencyOfVoicingInHz < halfFs
          && ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i - 1].n).ceps != null)
        isPrevNoised = true;

      if (i > 0
          && hnmSignal.frames[i].n != null
          && hnmSignal.frames[i].maximumFrequencyOfVoicingInHz < halfFs
          && ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i].n).ceps != null) isNoised = true;

      if (i < hnmSignal.frames.length - 1
          && hnmSignal.frames[i + 1].maximumFrequencyOfVoicingInHz < halfFs
          && hnmSignal.frames[i + 1].n != null
          && ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i + 1].n).ceps != null)
        isNextNoised = true;

      numHarmonicsPrevFrame = 0;
      numHarmonicsCurrentFrame = 0;
      numHarmonicsNextFrame = 0;
      harmonicIndexShiftPrev = 0;
      harmonicIndexShiftCurrent = 0;
      harmonicIndexShiftNext = 0;

      if (isPrevNoised) {
        numHarmonicsPrevFrame =
            (int)
                Math.floor(
                    (hnmSignal.samplingRateInHz
                                - hnmSignal.frames[i - 1].maximumFrequencyOfVoicingInHz)
                            / analysisParams.noiseF0InHz
                        + 0.5);
        numHarmonicsPrevFrame = Math.max(0, numHarmonicsPrevFrame);
        harmonicIndexShiftPrev =
            (int)
                Math.floor(
                    hnmSignal.frames[i - 1].maximumFrequencyOfVoicingInHz
                            / analysisParams.noiseF0InHz
                        + 0.5);
        harmonicIndexShiftPrev = Math.max(1, harmonicIndexShiftPrev);
      }

      if (isNoised) {
        numHarmonicsCurrentFrame =
            (int)
                Math.floor(
                    (hnmSignal.samplingRateInHz - hnmSignal.frames[i].maximumFrequencyOfVoicingInHz)
                            / analysisParams.noiseF0InHz
                        + 0.5);
        numHarmonicsCurrentFrame = Math.max(0, numHarmonicsCurrentFrame);
        harmonicIndexShiftCurrent =
            (int)
                Math.floor(
                    hnmSignal.frames[i].maximumFrequencyOfVoicingInHz / analysisParams.noiseF0InHz
                        + 0.5);
        harmonicIndexShiftCurrent = Math.max(1, harmonicIndexShiftCurrent);
      } else if (!isNoised && isNextNoised) {
        numHarmonicsCurrentFrame =
            (int)
                Math.floor(
                    (hnmSignal.samplingRateInHz
                                - hnmSignal.frames[i + 1].maximumFrequencyOfVoicingInHz)
                            / analysisParams.noiseF0InHz
                        + 0.5);
        numHarmonicsCurrentFrame = Math.max(0, numHarmonicsCurrentFrame);
        harmonicIndexShiftCurrent =
            (int)
                Math.floor(
                    hnmSignal.frames[i + 1].maximumFrequencyOfVoicingInHz
                            / analysisParams.noiseF0InHz
                        + 0.5);
        harmonicIndexShiftCurrent = Math.max(1, harmonicIndexShiftCurrent);
      }

      if (isNextNoised) {
        numHarmonicsNextFrame =
            (int)
                Math.floor(
                    (hnmSignal.samplingRateInHz
                                - hnmSignal.frames[i + 1].maximumFrequencyOfVoicingInHz)
                            / analysisParams.noiseF0InHz
                        + 0.5);
        numHarmonicsNextFrame = Math.max(0, numHarmonicsNextFrame);
        harmonicIndexShiftNext =
            (int)
                Math.floor(
                    hnmSignal.frames[i + 1].maximumFrequencyOfVoicingInHz
                            / analysisParams.noiseF0InHz
                        + 0.5);
        harmonicIndexShiftNext = Math.max(1, harmonicIndexShiftNext);
      }

      for (k = 0; k < numHarmonicsCurrentFrame; k++) {
        aksi = 0.0;
        aksiPlusOne = 0.0;

        phasekiPlusOne = 0.0f;

        isPrevTrackNoised = false;
        isTrackNoised = false;
        isNextTrackNoised = false;

        if (i > 0 && hnmSignal.frames[i - 1].n != null && numHarmonicsPrevFrame > k)
          isPrevTrackNoised = true;

        if (hnmSignal.frames[i].n != null && numHarmonicsCurrentFrame > k) isTrackNoised = true;

        if (i < hnmSignal.frames.length - 1
            && hnmSignal.frames[i + 1].n != null
            && numHarmonicsNextFrame > k) isNextTrackNoised = true;

        tsik = hnmSignal.frames[i].tAnalysisInSeconds;

        if (i == 0) trackStartInSeconds = 0.0;
        else trackStartInSeconds = tsik;

        if (i == hnmSignal.frames.length - 1) tsikPlusOne = hnmSignal.originalDurationInSeconds;
        else tsikPlusOne = hnmSignal.frames[i + 1].tAnalysisInSeconds;

        trackEndInSeconds = tsikPlusOne;

        trackStartIndex =
            SignalProcUtils.time2sample(trackStartInSeconds, hnmSignal.samplingRateInHz);
        trackEndIndex = SignalProcUtils.time2sample(trackEndInSeconds, hnmSignal.samplingRateInHz);

        if (isTrackNoised && trackEndIndex - trackStartIndex + 1 > 0) {
          // Amplitudes
          if (isTrackNoised) {
            if (!analysisParams.useNoiseAmplitudesDirectly) {
              if (analysisParams.regularizedCepstrumWarpingMethod
                  == RegularizedCepstrumEstimator.REGULARIZED_CEPSTRUM_WITH_PRE_BARK_WARPING)
                aksi =
                    RegularizedPreWarpedCepstrumEstimator.cepstrum2linearSpectrumValue(
                        ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i].n).ceps,
                        (k + harmonicIndexShiftCurrent) * analysisParams.noiseF0InHz,
                        hnmSignal.samplingRateInHz);
              else if (analysisParams.regularizedCepstrumWarpingMethod
                  == RegularizedCepstrumEstimator.REGULARIZED_CEPSTRUM_WITH_POST_MEL_WARPING)
                aksi =
                    RegularizedPostWarpedCepstrumEstimator.cepstrum2linearSpectrumValue(
                        ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i].n).ceps,
                        (k + harmonicIndexShiftCurrent) * analysisParams.noiseF0InHz,
                        hnmSignal.samplingRateInHz);
            } else {
              if (k < ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i].n).ceps.length)
                aksi =
                    ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i].n)
                        .ceps[k]; // Use amplitudes directly without cepstrum method
              else aksi = 0.0;
            }
          } else aksi = 0.0;

          if (isNextTrackNoised) {
            if (!analysisParams.useNoiseAmplitudesDirectly) {
              if (analysisParams.regularizedCepstrumWarpingMethod
                  == RegularizedCepstrumEstimator.REGULARIZED_CEPSTRUM_WITH_PRE_BARK_WARPING)
                aksiPlusOne =
                    RegularizedPreWarpedCepstrumEstimator.cepstrum2linearSpectrumValue(
                        ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i + 1].n).ceps,
                        (k + harmonicIndexShiftNext) * analysisParams.noiseF0InHz,
                        hnmSignal.samplingRateInHz);
              else if (analysisParams.regularizedCepstrumWarpingMethod
                  == RegularizedCepstrumEstimator.REGULARIZED_CEPSTRUM_WITH_POST_MEL_WARPING)
                aksiPlusOne =
                    RegularizedPostWarpedCepstrumEstimator.cepstrum2linearSpectrumValue(
                        ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i + 1].n).ceps,
                        (k + harmonicIndexShiftNext) * analysisParams.noiseF0InHz,
                        hnmSignal.samplingRateInHz);
            } else {
              if (k < ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i + 1].n).ceps.length)
                aksiPlusOne =
                    ((FrameNoisePartPseudoHarmonic) hnmSignal.frames[i + 1].n)
                        .ceps[k]; // Use amplitudes directly without cepstrum method
              else aksiPlusOne = 0.0;
            }
          } else aksiPlusOne = 0.0;
          //

          // Phases
          phasekis[k] = (float) (MathUtils.TWOPI * (Math.random() - 0.5));
          phasekiPlusOne =
              (float)
                  (phasekis[k]
                      + (k + harmonicIndexShiftCurrent)
                          * MathUtils.TWOPI
                          * analysisParams.noiseF0InHz
                          * (tsikPlusOne - tsik)); // Equation (3.55)
          //

          if (!isPrevTrackNoised) trackStartIndex = Math.max(0, trackStartIndex - transitionLen);

          for (n = trackStartIndex; n <= Math.min(trackEndIndex, outputLen - 1); n++) {
            t = SignalProcUtils.sample2time(n, hnmSignal.samplingRateInHz);

            // if (t>=tsik && t<tsikPlusOne)
            {
              // Amplitude estimate
              akt = MathUtils.interpolatedSample(tsik, t, tsikPlusOne, aksi, aksiPlusOne);
              //

              // Phase estimate
              phasekt = (float) (phasekiPlusOne * (t - tsik) / (tsikPlusOne - tsik));
              //

              if (!isPrevTrackNoised && n - trackStartIndex < transitionLen)
                noiseTracks[k][n] =
                    halfTransitionWinLeft[n - trackStartIndex] * akt * Math.cos(phasekt);
              else noiseTracks[k][n] = akt * Math.cos(phasekt);
            }
          }

          phasekis[k] = phasekiPlusOne;
        }
      }
    }

    for (k = 0; k < noiseTracks.length; k++) {
      for (n = 0; n < noisePart.length; n++) noisePart[n] += noiseTracks[k][n];
    }

    // Write separate tracks to output
    if (noiseTracks != null) {
      for (k = 0; k < noiseTracks.length; k++) {
        for (n = 0; n < noisePart.length; n++) noisePart[n] += noiseTracks[k][n];
      }

      if (referenceFile != null
          && FileUtils.exists(referenceFile)
          && synthesisParams.writeSeparateHarmonicTracksToOutputs) {
        // Write separate tracks to output
        AudioInputStream inputAudio = null;
        try {
          inputAudio = AudioSystem.getAudioInputStream(new File(referenceFile));
        } catch (UnsupportedAudioFileException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        } catch (IOException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }

        if (inputAudio != null) {
          // k=1;
          for (k = 0; k < noiseTracks.length; k++) {
            noiseTracks[k] = MathUtils.divide(noiseTracks[k], 32767.0);

            DDSAudioInputStream outputAudio =
                new DDSAudioInputStream(
                    new BufferedDoubleDataSource(noiseTracks[k]), inputAudio.getFormat());
            String outFileName =
                StringUtils.getFolderName(referenceFile)
                    + "noiseTrack"
                    + String.valueOf(k + 1)
                    + ".wav";
            try {
              AudioSystem.write(outputAudio, AudioFileFormat.Type.WAVE, new File(outFileName));
            } catch (IOException e) {
              // TODO Auto-generated catch block
              e.printStackTrace();
            }
          }
        }
      }
      //
    }

    return noisePart;
  }