예제 #1
0
 /**
  * Construct an audio input stream from which <code>duration</code> seconds of silence can be
  * read.
  *
  * @param duration the desired duration of the silence, in seconds
  * @param format the desired audio format of the audio input stream. getFrameSize() and
  *     getFrameRate() must return meaningful values.
  */
 public SilenceAudioInputStream(double duration, AudioFormat format) {
   super(
       new ByteArrayInputStream(
           new byte[(int) (format.getFrameSize() * format.getFrameRate() * duration)]),
       format,
       (long) (format.getFrameRate() * duration));
 }
 @Override
 public void iniciouLeituraSom(AudioFormat format) {
   this.sampleRate = format.getFrameRate();
   this.idxTempo = 0;
   this.listaFrequenciasTempo = new ArrayList<FrequenciasSomTempo>();
   this.dadosAmostra = new double[0];
 }
예제 #3
0
  /**
   * Construct a RawAudioFormat from an AudioFormat, assuming a WAV header of size WAV_HEADER_SIZE
   * (44) bytes.
   *
   * @param af AudioFormat (e.g. from AudioSystem.getAudioFileFormat(File).
   */
  public RawAudioFormat(AudioFormat af) throws IOException {
    sr = (int) af.getFrameRate();
    br = af.getSampleSizeInBits();
    fs = br / 8;

    if (af.getChannels() > 1) throw new IOException("multi-channel files are not supported");

    if (af.getEncoding() == AudioFormat.Encoding.PCM_SIGNED) {
      signed = true;
      alaw = false;
      ulaw = false;
      hs = WAV_HEADER_SIZE;
    }
    if (af.getEncoding() == AudioFormat.Encoding.PCM_UNSIGNED) {
      signed = false;
      alaw = false;
      ulaw = false;
      hs = WAV_HEADER_SIZE;
    }
    if (af.getEncoding() == AudioFormat.Encoding.ALAW) {
      alaw = true;
      signed = true;
      ulaw = false;
      hs = WAV_HEADER_SIZE2;
    }
    if (af.getEncoding() == AudioFormat.Encoding.ULAW) {
      ulaw = true;
      signed = true;
      alaw = false;
      hs = WAV_HEADER_SIZE2;
    }
  }
 /**
  * Store an AudioFormat
  *
  * @param audioFormat
  */
 public AudioFormatTransport(AudioFormat audioFormat) {
   _channels = audioFormat.getChannels();
   _encoding = audioFormat.getEncoding().toString();
   _frameRate = audioFormat.getFrameRate();
   _frameSize = audioFormat.getFrameSize();
   _sampleRate = audioFormat.getSampleRate();
   _sampleSizeInBits = audioFormat.getSampleSizeInBits();
   _isBigEndian = audioFormat.isBigEndian();
   _properties = audioFormat.properties();
 }
예제 #5
0
  public void open(AudioInputStream stream) throws IOException, LineUnavailableException {

    AudioInputStream is1;
    format = stream.getFormat();

    if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
      is1 = AudioSystem.getAudioInputStream(AudioFormat.Encoding.PCM_SIGNED, stream);
    } else {
      is1 = stream;
    }
    format = is1.getFormat();
    InputStream is2;
    if (parent != null) {
      ProgressMonitorInputStream pmis =
          new ProgressMonitorInputStream(parent, "Loading track..", is1);
      pmis.getProgressMonitor().setMillisToPopup(0);
      is2 = pmis;
    } else {
      is2 = is1;
    }

    byte[] buf = new byte[2 ^ 16];
    int totalRead = 0;
    int numRead = 0;
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    numRead = is2.read(buf);
    while (numRead > -1) {
      baos.write(buf, 0, numRead);
      numRead = is2.read(buf, 0, buf.length);
      totalRead += numRead;
    }
    is2.close();
    audioData = baos.toByteArray();
    AudioFormat afTemp;
    if (format.getChannels() < 2) {
      afTemp =
          new AudioFormat(
              format.getEncoding(),
              format.getSampleRate(),
              format.getSampleSizeInBits(),
              2,
              format.getSampleSizeInBits() * 2 / 8, // calculate
              // frame
              // size
              format.getFrameRate(),
              format.isBigEndian());
    } else {
      afTemp = format;
    }

    setLoopPoints(0, audioData.length);
    dataLine = AudioSystem.getSourceDataLine(afTemp);
    dataLine.open();
    inputStream = new ByteArrayInputStream(audioData);
  }
예제 #6
0
  public static Clip loadClip(URL url) {
    Clip clip = null;
    String fnm = "" + url;
    try {
      AudioInputStream stream = AudioSystem.getAudioInputStream(url);
      AudioFormat format = stream.getFormat();

      if ((format.getEncoding() == AudioFormat.Encoding.ULAW)
          || (format.getEncoding() == AudioFormat.Encoding.ALAW)) {
        AudioFormat newFormat =
            new AudioFormat(
                AudioFormat.Encoding.PCM_SIGNED,
                format.getSampleRate(),
                format.getSampleSizeInBits() * 2,
                format.getChannels(),
                format.getFrameSize() * 2,
                format.getFrameRate(),
                true); // big endian
        stream = AudioSystem.getAudioInputStream(newFormat, stream);
        // System.out.println("Converted Audio format: " + newFormat);
        format = newFormat;
      }

      DataLine.Info info = new DataLine.Info(Clip.class, format);

      // make sure sound system supports data line
      if (!AudioSystem.isLineSupported(info)) {
        System.out.println("Unsupported Clip File: " + fnm);
        return null;
      }
      // get clip line resource
      clip = (Clip) AudioSystem.getLine(info);
      clip.open(stream); // open the sound file as a clip
      stream.close(); // we're done with the input stream
      // duration (in secs) of the clip
      double duration = clip.getMicrosecondLength() / 1000000.0; // new
      if (duration <= 1.0) {
        System.out.println("WARNING. Duration <= 1 sec : " + duration + " secs");
        System.out.println(
            "         The clip in " + fnm + " may not play in J2SE 1.5 -- make it longer");
      }
      // else
      //  System.out.println(fnm + ": Duration: " + duration + " secs");
    } // end of try block
    catch (UnsupportedAudioFileException audioException) {
      System.out.println("Unsupported audio file: " + fnm);
    } catch (LineUnavailableException noLineException) {
      System.out.println("No audio line available for : " + fnm);
    } catch (IOException ioException) {
      System.out.println("Could not read: " + fnm);
    } catch (Exception e) {
      System.out.println("Problem with " + fnm);
    }
    return clip;
  } // end of loadClip()
 private AudioInputStream toLittleEndian(AudioInputStream ais) {
   AudioFormat format = ais.getFormat();
   AudioFormat targetFormat =
       new AudioFormat(
           format.getEncoding(),
           format.getSampleRate(),
           format.getSampleSizeInBits(),
           format.getChannels(),
           format.getFrameSize(),
           format.getFrameRate(),
           false);
   return AudioSystem.getAudioInputStream(targetFormat, ais);
 }
  public void write(AudioInputStream stream, RIFFWriter writer) throws IOException {

    RIFFWriter fmt_chunk = writer.writeChunk("fmt ");

    AudioFormat format = stream.getFormat();
    fmt_chunk.writeUnsignedShort(3); // WAVE_FORMAT_IEEE_FLOAT
    fmt_chunk.writeUnsignedShort(format.getChannels());
    fmt_chunk.writeUnsignedInt((int) format.getSampleRate());
    fmt_chunk.writeUnsignedInt(((int) format.getFrameRate()) * format.getFrameSize());
    fmt_chunk.writeUnsignedShort(format.getFrameSize());
    fmt_chunk.writeUnsignedShort(format.getSampleSizeInBits());
    fmt_chunk.close();
    RIFFWriter data_chunk = writer.writeChunk("data");
    byte[] buff = new byte[1024];
    int len;
    while ((len = stream.read(buff, 0, buff.length)) != -1) data_chunk.write(buff, 0, len);
    data_chunk.close();
  }
  /** Convert javax.sound.sampled.AudioFormat to javax.media.format.AudioFormat. */
  public static AudioFormat convertFormat(javax.sound.sampled.AudioFormat format) {

    Encoding encoding = format.getEncoding();
    int channels = format.getChannels();
    float frameRate = format.getFrameRate();
    int frameSize = format.getFrameSize() < 0 ? format.getFrameSize() : (format.getFrameSize() * 8);
    float sampleRate = format.getSampleRate();
    int sampleSize = format.getSampleSizeInBits();

    int endian = format.isBigEndian() ? AudioFormat.BIG_ENDIAN : AudioFormat.LITTLE_ENDIAN;

    int signed = AudioFormat.NOT_SPECIFIED;
    String encodingString = AudioFormat.LINEAR;

    if (encoding == Encoding.PCM_SIGNED) {
      signed = AudioFormat.SIGNED;
      encodingString = AudioFormat.LINEAR;
    } else if (encoding == Encoding.PCM_UNSIGNED) {
      signed = AudioFormat.UNSIGNED;
      encodingString = AudioFormat.LINEAR;
    } else if (encoding == Encoding.ALAW) {
      encodingString = AudioFormat.ALAW;
    } else if (encoding == Encoding.ULAW) {
      encodingString = AudioFormat.ULAW;
    } else {
      encodingString = encoding.toString();
    }

    AudioFormat jmfFormat =
        new AudioFormat(
            encodingString,
            (double) sampleRate,
            sampleSize,
            channels,
            endian,
            signed,
            frameSize,
            frameRate,
            AudioFormat.byteArray);

    return jmfFormat;
  }
예제 #10
0
 /**
  * Inits AudioInputStream and AudioFileFormat from the data source.
  *
  * @throws BasicPlayerException
  */
 protected void initAudioInputStream() throws BasicPlayerException {
   try {
     reset();
     notifyEvent(BasicPlayerEvent.OPENING, getEncodedStreamPosition(), -1, m_dataSource);
     if (m_dataSource instanceof URL) {
       initAudioInputStream((URL) m_dataSource);
     } else if (m_dataSource instanceof File) {
       initAudioInputStream((File) m_dataSource);
     } else if (m_dataSource instanceof InputStream) {
       initAudioInputStream((InputStream) m_dataSource);
     }
     createLine();
     // Notify listeners with AudioFileFormat properties.
     Map properties = null;
     if (m_audioFileFormat instanceof TAudioFileFormat) {
       // Tritonus SPI compliant audio file format.
       properties = ((TAudioFileFormat) m_audioFileFormat).properties();
       // Clone the Map because it is not mutable.
       properties = deepCopy(properties);
     } else {
       properties = new HashMap();
     }
     // Add JavaSound properties.
     if (m_audioFileFormat.getByteLength() > 0) {
       properties.put("audio.length.bytes", new Integer(m_audioFileFormat.getByteLength()));
     }
     if (m_audioFileFormat.getFrameLength() > 0) {
       properties.put("audio.length.frames", new Integer(m_audioFileFormat.getFrameLength()));
     }
     if (m_audioFileFormat.getType() != null) {
       properties.put("audio.type", (m_audioFileFormat.getType().toString()));
     }
     // Audio format.
     AudioFormat audioFormat = m_audioFileFormat.getFormat();
     if (audioFormat.getFrameRate() > 0) {
       properties.put("audio.framerate.fps", new Float(audioFormat.getFrameRate()));
     }
     if (audioFormat.getFrameSize() > 0) {
       properties.put("audio.framesize.bytes", new Integer(audioFormat.getFrameSize()));
     }
     if (audioFormat.getSampleRate() > 0) {
       properties.put("audio.samplerate.hz", new Float(audioFormat.getSampleRate()));
     }
     if (audioFormat.getSampleSizeInBits() > 0) {
       properties.put("audio.samplesize.bits", new Integer(audioFormat.getSampleSizeInBits()));
     }
     if (audioFormat.getChannels() > 0) {
       properties.put("audio.channels", new Integer(audioFormat.getChannels()));
     }
     if (audioFormat instanceof TAudioFormat) {
       // Tritonus SPI compliant audio format.
       Map addproperties = ((TAudioFormat) audioFormat).properties();
       properties.putAll(addproperties);
     }
     // Add SourceDataLine
     properties.put("basicplayer.sourcedataline", m_line);
     Iterator<BasicPlayerListener> it = laucher.getBasicPlayerListeners().iterator();
     while (it.hasNext()) {
       BasicPlayerListener bpl = it.next();
       bpl.opened(m_dataSource, properties);
     }
     m_status = OPENED;
     notifyEvent(BasicPlayerEvent.OPENED, getEncodedStreamPosition(), -1, null);
   } catch (LineUnavailableException e) {
     throw new BasicPlayerException(e);
   } catch (UnsupportedAudioFileException e) {
     throw new BasicPlayerException(e);
   } catch (IOException e) {
     throw new BasicPlayerException(e);
   }
 }
예제 #11
0
  private InputStream getFileStream(WaveFileFormat waveFileFormat, InputStream audioStream)
      throws IOException {
    // private method ... assumes audioFileFormat is a supported file type

    // WAVE header fields
    AudioFormat audioFormat = waveFileFormat.getFormat();
    int headerLength = waveFileFormat.getHeaderSize();
    int riffMagic = WaveFileFormat.RIFF_MAGIC;
    int waveMagic = WaveFileFormat.WAVE_MAGIC;
    int fmtMagic = WaveFileFormat.FMT_MAGIC;
    int fmtLength = WaveFileFormat.getFmtChunkSize(waveFileFormat.getWaveType());
    short wav_type = (short) waveFileFormat.getWaveType();
    short channels = (short) audioFormat.getChannels();
    short sampleSizeInBits = (short) audioFormat.getSampleSizeInBits();
    int sampleRate = (int) audioFormat.getSampleRate();
    int frameSizeInBytes = audioFormat.getFrameSize();
    int frameRate = (int) audioFormat.getFrameRate();
    int avgBytesPerSec = channels * sampleSizeInBits * sampleRate / 8;
    short blockAlign = (short) ((sampleSizeInBits / 8) * channels);
    int dataMagic = WaveFileFormat.DATA_MAGIC;
    int dataLength = waveFileFormat.getFrameLength() * frameSizeInBytes;
    int length = waveFileFormat.getByteLength();
    int riffLength = dataLength + headerLength - 8;

    byte header[] = null;
    ByteArrayInputStream headerStream = null;
    ByteArrayOutputStream baos = null;
    DataOutputStream dos = null;
    SequenceInputStream waveStream = null;

    AudioFormat audioStreamFormat = null;
    AudioFormat.Encoding encoding = null;
    InputStream codedAudioStream = audioStream;

    // if audioStream is an AudioInputStream and we need to convert, do it here...
    if (audioStream instanceof AudioInputStream) {
      audioStreamFormat = ((AudioInputStream) audioStream).getFormat();

      encoding = audioStreamFormat.getEncoding();

      if (AudioFormat.Encoding.PCM_SIGNED.equals(encoding)) {
        if (sampleSizeInBits == 8) {
          wav_type = WaveFileFormat.WAVE_FORMAT_PCM;
          // plug in the transcoder to convert from PCM_SIGNED to PCM_UNSIGNED
          codedAudioStream =
              AudioSystem.getAudioInputStream(
                  new AudioFormat(
                      AudioFormat.Encoding.PCM_UNSIGNED,
                      audioStreamFormat.getSampleRate(),
                      audioStreamFormat.getSampleSizeInBits(),
                      audioStreamFormat.getChannels(),
                      audioStreamFormat.getFrameSize(),
                      audioStreamFormat.getFrameRate(),
                      false),
                  (AudioInputStream) audioStream);
        }
      }
      if ((AudioFormat.Encoding.PCM_SIGNED.equals(encoding) && audioStreamFormat.isBigEndian())
          || (AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)
              && !audioStreamFormat.isBigEndian())
          || (AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)
              && audioStreamFormat.isBigEndian())) {
        if (sampleSizeInBits != 8) {
          wav_type = WaveFileFormat.WAVE_FORMAT_PCM;
          // plug in the transcoder to convert to PCM_SIGNED_LITTLE_ENDIAN
          codedAudioStream =
              AudioSystem.getAudioInputStream(
                  new AudioFormat(
                      AudioFormat.Encoding.PCM_SIGNED,
                      audioStreamFormat.getSampleRate(),
                      audioStreamFormat.getSampleSizeInBits(),
                      audioStreamFormat.getChannels(),
                      audioStreamFormat.getFrameSize(),
                      audioStreamFormat.getFrameRate(),
                      false),
                  (AudioInputStream) audioStream);
        }
      }
    }

    // Now push the header into a stream, concat, and return the new SequenceInputStream

    baos = new ByteArrayOutputStream();
    dos = new DataOutputStream(baos);

    // we write in littleendian...
    dos.writeInt(riffMagic);
    dos.writeInt(big2little(riffLength));
    dos.writeInt(waveMagic);
    dos.writeInt(fmtMagic);
    dos.writeInt(big2little(fmtLength));
    dos.writeShort(big2littleShort(wav_type));
    dos.writeShort(big2littleShort(channels));
    dos.writeInt(big2little(sampleRate));
    dos.writeInt(big2little(avgBytesPerSec));
    dos.writeShort(big2littleShort(blockAlign));
    dos.writeShort(big2littleShort(sampleSizeInBits));
    // $$fb 2002-04-16: Fix for 4636355: RIFF audio headers could be _more_ spec compliant
    if (wav_type != WaveFileFormat.WAVE_FORMAT_PCM) {
      // add length 0 for "codec specific data length"
      dos.writeShort(0);
    }

    dos.writeInt(dataMagic);
    dos.writeInt(big2little(dataLength));

    dos.close();
    header = baos.toByteArray();
    headerStream = new ByteArrayInputStream(header);
    waveStream = new SequenceInputStream(headerStream, new NoCloseInputStream(codedAudioStream));

    return waveStream;
  }
예제 #12
0
  /**
   * Returns the AudioFileFormat describing the file that will be written from this
   * AudioInputStream. Throws IllegalArgumentException if not supported.
   */
  private AudioFileFormat getAudioFileFormat(AudioFileFormat.Type type, AudioInputStream stream) {
    if (!isFileTypeSupported(type, stream)) {
      throw new IllegalArgumentException("File type " + type + " not supported.");
    }
    AudioFormat format = null;
    WaveFileFormat fileFormat = null;
    AudioFormat.Encoding encoding = AudioFormat.Encoding.PCM_SIGNED;

    AudioFormat streamFormat = stream.getFormat();
    AudioFormat.Encoding streamEncoding = streamFormat.getEncoding();

    float sampleRate;
    int sampleSizeInBits;
    int channels;
    int frameSize;
    float frameRate;
    int fileSize;

    int waveType = WaveFileFormat.WAVE_FORMAT_PCM;

    if (AudioFormat.Encoding.ALAW.equals(streamEncoding)
        || AudioFormat.Encoding.ULAW.equals(streamEncoding)) {

      encoding = streamEncoding;
      sampleSizeInBits = streamFormat.getSampleSizeInBits();
      if (streamEncoding.equals(AudioFormat.Encoding.ALAW)) {
        waveType = WaveFileFormat.WAVE_FORMAT_ALAW;
      } else {
        waveType = WaveFileFormat.WAVE_FORMAT_MULAW;
      }
    } else if (streamFormat.getSampleSizeInBits() == 8) {
      encoding = AudioFormat.Encoding.PCM_UNSIGNED;
      sampleSizeInBits = 8;
    } else {
      encoding = AudioFormat.Encoding.PCM_SIGNED;
      sampleSizeInBits = streamFormat.getSampleSizeInBits();
    }

    format =
        new AudioFormat(
            encoding,
            streamFormat.getSampleRate(),
            sampleSizeInBits,
            streamFormat.getChannels(),
            streamFormat.getFrameSize(),
            streamFormat.getFrameRate(),
            false); // WAVE is little endian

    if (stream.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
      fileSize =
          (int) stream.getFrameLength() * streamFormat.getFrameSize()
              + WaveFileFormat.getHeaderSize(waveType);
    } else {
      fileSize = AudioSystem.NOT_SPECIFIED;
    }

    fileFormat =
        new WaveFileFormat(
            AudioFileFormat.Type.WAVE, fileSize, format, (int) stream.getFrameLength());

    return fileFormat;
  }
예제 #13
0
  public boolean convertToTxt(short SIZE) {

    // WAV FILE ATTRIBUTE
    AudioInputStream audioinput;
    AudioFormat audioformat;
    long length_in_frames;
    float frameRate;
    int frameSize;
    int channels;
    float sampleRate;
    int sampleSize;
    boolean big_endian;
    System.out.println(wav_file_path);

    File wavfile = new File(wav_file_path);
    BufferedInputStream bufread;

    BufferedWriter wrL, wrR; // left channel file and right channel file;
    int max_mag_in_recording =
        0; // This is the place holder for the maximum magnitude in the WAV file (Used for
           // normalizing)
    byte[] buffer = new byte[SIZE];
    ArrayList write_list = new ArrayList();
    Integer[] write_buffer;
    int sampleCount = 0;
    boolean write_buffer_full = false;
    int num_bytes_read = 0;
    int valuecount = 0;
    try {
      audioinput = AudioSystem.getAudioInputStream(wavfile);
      bufread = new BufferedInputStream(audioinput);
      String[] tempstr = wav_file_path.split(".wav");
      left_channel_file = tempstr[0] + "LEFT.txt";
      right_channel_file = tempstr[0] + "RIGHT.txt";
      wrL = new BufferedWriter(new FileWriter(left_channel_file));
      wrR = new BufferedWriter(new FileWriter(right_channel_file));

      audioformat = audioinput.getFormat();
      length_in_frames = audioinput.getFrameLength();
      channels = audioformat.getChannels();
      frameRate = audioformat.getFrameRate();
      frameSize = audioformat.getFrameSize();
      sampleRate = audioformat.getSampleRate();
      sampling_rate = (int) sampleRate;
      sampleSize = audioformat.getSampleSizeInBits();
      big_endian = audioformat.isBigEndian();

      bufread = new BufferedInputStream(audioinput, SIZE);

      // THIS BYTE ARRAY WILL STORE THE FRAME
      short sampleSizeinBytes = (short) (sampleSize / 8);
      int[] frame = new int[sampleSizeinBytes];
      int frame_low = 0;
      int frame_high = sampleSizeinBytes - 1;

      /*
         frame[highest] + frame[highest-1] + .... + frame[0]
      */
      int tmp;
      int tempint = 0;
      short buffer_read_count = 0;
      boolean left_overs = false;
      ArrayList left_over_array = new ArrayList();
      short requires_more = 0;
      byte[] left_over_byte_array = new byte[SIZE];
      boolean number_negative = false;
      boolean write_to_file = false;
      int temp_left = 0;
      int temp_byte_to_int = 0;

      boolean right_channel = false;
      int negative_number_shift = 0;
      for (int i = sampleSizeinBytes; i < 4; i++) {
        negative_number_shift = negative_number_shift | 0xFF << (8 * i);
      }

      while ((num_bytes_read = bufread.read(buffer)) != -1) {
        buffer_read_count = -1;
        if (left_overs) {
          requires_more = (short) (sampleSizeinBytes - left_over_array.size());
          if (num_bytes_read >= requires_more) {
            for (short k = 1; k <= requires_more; k++) {
              buffer_read_count = (short) (buffer_read_count + 1);
              left_over_array.add(buffer[buffer_read_count]);
            }
          }
          left_overs = false;
          Byte[] t = new Byte[left_over_array.size()];
          left_over_array.toArray(t);
          temp_byte_to_int = 0;
          tempint = 0;
          temp_left = 0;
          for (int i = 0; i < left_over_array.size(); i++) {
            if (!((t[i]) >= 0 && (t[i] <= 127))) {
              if (i == left_over_array.size() - 1) number_negative = true;
              temp_byte_to_int = 256 + t[i];
            } else temp_byte_to_int = t[i];
            temp_left = temp_left + (temp_byte_to_int << (8 * (i)));
          }
          if (number_negative) {
            tempint = (negative_number_shift) | temp_left;
            number_negative = false;
          } else tempint = temp_left;
          write_list.add(tempint);
          sampleCount += 1;

          tempint = temp_left = temp_byte_to_int = 0;

          left_over_array.clear();
        }
        while ((buffer_read_count) < num_bytes_read - 1) {
          if (((num_bytes_read - 1) - (buffer_read_count)) >= sampleSizeinBytes) {

            for (short i = 1; i <= sampleSizeinBytes; i++) {
              buffer_read_count = (short) (buffer_read_count + 1);
              frame[i - 1] = buffer[buffer_read_count];
            }
            tempint = temp_left = temp_byte_to_int = 0;

            for (int m = 0; m < frame.length; m++) {
              if (!((frame[m]) >= 0 && (frame[m] <= 127))) {
                if (m == frame.length - 1) number_negative = true;
                temp_byte_to_int = 256 + frame[m];
              } else temp_byte_to_int = frame[m];
              temp_left = temp_left + (temp_byte_to_int << (8 * (m)));
            }
            if (number_negative) {
              tempint = (negative_number_shift) | temp_left;
              number_negative = false;
            } else tempint = temp_left;
            sampleCount += 1;
            write_list.add(tempint);

            tempint = temp_left = temp_byte_to_int = 0;
          } else {
            // save the left over bytes which are lesser in number than frame size in a tempbuffer
            // and turn the flag on. If flag==true then process that first and then process other.
            left_over_array.clear();
            short left_over_array_count = (short) (num_bytes_read - 1 - buffer_read_count);
            for (short l = 1; l <= left_over_array_count; l++) {
              buffer_read_count = (short) (buffer_read_count + 1);
              left_over_array.add(buffer[buffer_read_count]);
            }
            left_overs = true;
          }
        }
        write_to_file = true;
        if (write_to_file) {

          // store all the number in the write_buffer to the file
          write_buffer = new Integer[write_list.size()];
          write_list.toArray(write_buffer);
          for (int w = 0; w < write_list.size(); w++) {
            if (!right_channel) {
              int storeL = write_buffer[w].intValue();
              valuecount = valuecount + 1;

              if ((storeL > 0) && (maxValueL < storeL)) {
                maxValueL = storeL;
                sampleMaxL = valuecount;
              }
              if ((storeL < 0) && (minValueL > storeL)) {
                sampleMinL = valuecount;
                minValueL = storeL;
              }
              wrL.write(String.valueOf(storeL));
              wrL.newLine();
              right_channel = true;
            } else {
              valuecount = valuecount + 1;
              int storeR = write_buffer[w].intValue();
              if ((storeR > 0) && (maxValueR < storeR)) {
                sampleMaxR = valuecount;
                maxValueR = storeR;
              }
              if ((storeR < 0) && (minValueR > storeR)) {
                sampleMinR = valuecount;
                minValueR = storeR;
              }
              wrR.write(String.valueOf(storeR));
              wrR.newLine();
              right_channel = false;
            }
          }
          write_list.clear();
        }
      }
      wrR.close();
      wrL.close();
      bufread.close();
      System.out.println(sampleMaxL + "\n" + sampleMaxR + "\n" + sampleMinL + "\n" + sampleMinR);

    } catch (IOException e) {
      System.err.println(e);
    } catch (UnsupportedAudioFileException f) {
      System.err.println(f);
    }

    return true;
  }
  public static void main(String args[]) {
    double windowSize = 1.3932; // 1.8576;	//1.85759-second window
    int calculations = 4; // 6;
    double stepSize = (windowSize / calculations);

    try {
      // Open the file that is the first
      // command line parameter
      File soundFile = new File("180bpmstd0.8.wav");
      AudioInputStream ais = AudioSystem.getAudioInputStream(soundFile);
      AudioFormat format = ais.getFormat(); // get format
      int Fs = (int) format.getFrameRate(); // get Sampling frequency
      int N = 30;
      int samples = 30720; // set number of samples, samples = windowSize*Fs
      double ds = (double) (Fs / N);
      int minT0 = (int) (0.3 * ds); // minimum = 0.3sec (200bpm)
      int maxT0 = (int) (0.6 * ds); // (windowSize*Fs)-1;							//maximum = 0.6sec (100bpm)

      int stepSizeSample = (int) (stepSize * Fs);
      long nBytes = (long) (ais.getFrameLength() * format.getFrameSize()); // get number of bytes

      byte[] inBuffer = new byte[(int) nBytes];
      ais.read(inBuffer, 0, (int) nBytes); // read
      FIR filt = new FIR(); // create filter

      int j = 0;
      double[] value = new double[(int) (nBytes) / format.getFrameSize()];

      for (int i = 0; i < inBuffer.length; i = i + 2) {
        value[j] =
            0.000030525 * (double) (((byte) inBuffer[i] & 0xff) | ((int) inBuffer[i + 1] << 8));
        // filter
        value[j] = filt.process(value[j]);
        j++;
      }

      // Shift over time
      long start = System.currentTimeMillis();
      for (int s = 0; s < value.length; s += stepSizeSample) {
        if ((int) (value.length - s)
            >= samples) { // if there is enough number of samples to compute

          double[] value1 = new double[samples];
          // Copy window-size samples to value2
          System.arraycopy(value, s, value1, 0, samples);

          // Get peak-decimation
          double[] beatpeak = new double[value1.length / N];
          int base = 0;
          int k = 0;
          for (int a = 0; a < samples; a += N) {
            double peak = 0;
            for (int b = 0; b < N; b++) {
              if (base + N < samples) {
                if (Math.abs(value1[base + b]) > peak) {
                  beatpeak[k] = Math.abs(value1[base + b]);
                  peak = beatpeak[k];
                }
              }
            }
            base = a + N;
            k++;
          }

          // Autocorrelation
          double[] R = new double[beatpeak.length];
          R = FFT.autoCorrelate(beatpeak);

          // Get the time of occurrence of peak
          int t = MathUtils.findLocalPeakLocation(R, minT0, maxT0);

          // System.out.println(time[t]);
          // Solve FHR using the time of occurrence of peak
          double FHR = (double) 60 * ds / t; // time[t];
          // Print
          System.out.format("%3.3f\n", FHR);

        } else {
          System.out.println("end");
          break;
        }
      }
      long end = System.currentTimeMillis();
      System.out.println((end - start));
      // Close the input stream
      ais.close();

    } catch (Exception e) { // Catch exception if any
      System.err.println("Error: " + e.getMessage());
    }
  }