コード例 #1
1
 public void run(String local, String remote, InputStream pin, OutputStream pout) {
   try {
     microphone = SoundMixerEnumerator.getInputLine(pcmformat, DefaultPhonePCMBlockSize);
   } catch (LineUnavailableException lue) {
     System.out.println(
         "\3b"
             + getClass().getName()
             + ".<init>:\n\tCould not create microphone input stream.\n\t"
             + lue);
   }
   try {
     speaker = SoundMixerEnumerator.getOutputLine(pcmformat, DefaultPhonePCMBlockSize);
   } catch (LineUnavailableException lue) {
     microphone.close();
     System.out.println(
         "\3b"
             + getClass().getName()
             + ".<init>:\n\tCould not create speaker output stream.\n\t"
             + lue);
   }
   if ((speaker == null) || (microphone == null)) {
     super.run(local, remote, pin, pout);
     return;
   }
   try {
     recorder = new Recorder(pout);
     recorder.start();
     gui = openMonitorGUI("Remote " + remote + " Local " + local);
     pin.skip(pin.available()); // waste whatever we couldn't process in time
     super.run(local, remote, new PhoneCallMonitorInputStream(pin), pout);
     recorder.interrupt();
     gui.dispose();
   } catch (Exception e) {
     System.out.println("3\b" + getClass().getName() + ".run:\n\t" + e);
     e.printStackTrace();
   } finally {
     deactivate();
     microphone.close();
     speaker.close();
     if (gui != null) {
       gui.dispose();
     }
   }
 }
コード例 #2
0
ファイル: AudioCapture.java プロジェクト: Jellofishi/Java
 public void close() throws IOException {
   if (line.isActive()) {
     line.flush();
     line.stop();
   }
   line.close();
 }
コード例 #3
0
ファイル: JSMinim.java プロジェクト: casmi/minim-java
 TargetDataLine getTargetDataLine(AudioFormat format, int bufferSize) {
   TargetDataLine line = null;
   DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
   if (AudioSystem.isLineSupported(info)) {
     try {
       if (inputMixer == null) {
         line = (TargetDataLine) AudioSystem.getLine(info);
       } else {
         line = (TargetDataLine) inputMixer.getLine(info);
       }
       line.open(format, bufferSize * format.getFrameSize());
       debug(
           "TargetDataLine buffer size is "
               + line.getBufferSize()
               + "\n"
               + "TargetDataLine format is "
               + line.getFormat().toString()
               + "\n"
               + "TargetDataLine info is "
               + line.getLineInfo().toString());
     } catch (Exception e) {
       error("Error acquiring TargetDataLine: " + e.getMessage());
     }
   } else {
     error("Unable to return a TargetDataLine: unsupported format - " + format.toString());
   }
   return line;
 }
コード例 #4
0
ファイル: RawRecorder.java プロジェクト: sjoynt/shelbi
  /**
   * Create a new RawRecorder.
   *
   * @param audioFormat the desired output
   * @throws LineUnavailableException if the audioFormat is not supported
   */
  public RawRecorder(AudioFormat audioFormat) throws LineUnavailableException {

    inFormat = audioFormat;
    outFormat = audioFormat;

    /* Some machines, such as my Mac OS X PowerBook, don't support
     * a wide range of input formats.  So...we may need to read
     * data in using a different format and then resample to the
     * desired format.  Here, I'm just going to go for 44.1kHz
     * 16-bit signed little endian data if the given audio format
     * is not supported.
     */
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, inFormat);

    if (!AudioSystem.isLineSupported(info)) {
      downsample = true;
      inFormat =
          new AudioFormat(
              44100.0f, // sample rate
              16, // sample size
              1, // channels (1 == mono)
              true, // signed
              false); // little endian
      info = new DataLine.Info(TargetDataLine.class, inFormat);
      if (!AudioSystem.isLineSupported(info)) {
        throw new LineUnavailableException("Unsupported format: " + audioFormat);
      }
    }

    microphone = (TargetDataLine) AudioSystem.getLine(info);
    microphone.open(audioFormat, microphone.getBufferSize());
  }
コード例 #5
0
ファイル: SoundMixer.java プロジェクト: nakag/mmscomputing
  public TargetDataLine getInputLine(AudioFormat format) throws LineUnavailableException {
    TargetDataLine in;

    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    in = (TargetDataLine) mixer.getLine(info);
    in.open(format, in.getBufferSize());
    return in;
  }
コード例 #6
0
  public void deactivate() {
    active = false;
    microphone.stop();
    microphone.flush();

    speaker.stop();
    speaker.flush();
  }
コード例 #7
0
 public void activate() {
   active = true;
   microphone.flush();
   speaker.flush();
   speaker.start();
   blocker.release();
   microphone.start();
   microphone.flush();
 }
コード例 #8
0
  @Ignore
  @Test
  public void testSilenceWriter()
      throws UnsupportedAudioFileException, InterruptedException, LineUnavailableException,
          FileNotFoundException {
    float sampleRate = 44100;
    int bufferSize = 1024;
    int overlap = 0;

    // available mixers
    int index = 0;
    int selectedMixerIndex = 4;
    for (Mixer.Info mixer : AudioSystem.getMixerInfo()) {
      System.out.println(index + ": " + Shared.toLocalString(mixer));
      index++;
    }
    Mixer.Info selectedMixer = AudioSystem.getMixerInfo()[selectedMixerIndex];
    System.out.println("Selected mixer: " + Shared.toLocalString(selectedMixer));

    // open a line
    final Mixer mixer = AudioSystem.getMixer(selectedMixer);
    final AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, true);
    final DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, format);
    TargetDataLine line;
    line = (TargetDataLine) mixer.getLine(dataLineInfo);
    final int numberOfSamples = bufferSize;
    line.open(format, numberOfSamples);
    line.start();
    final AudioInputStream stream = new AudioInputStream(line);

    // create a new dispatcher
    AudioDispatcher dispatcher = new AudioDispatcher(stream, bufferSize, overlap);

    WaveformWriter writer = new WaveformWriter(format, "01.file.wav");
    // add a processor, handle percussion event.
    dispatcher.addAudioProcessor(new SilenceDetector());
    dispatcher.addAudioProcessor(writer);

    // run the dispatcher (on the same thread, use start() to run it on
    // another thread).
    new Thread(dispatcher).start();

    Thread.sleep(3000);

    dispatcher.removeAudioProcessor(writer);
    writer = new WaveformWriter(format, "02.file.wav");
    dispatcher.addAudioProcessor(writer);

    Thread.sleep(3000);

    dispatcher.stop();
  }
コード例 #9
0
ファイル: RawRecorder.java プロジェクト: sjoynt/shelbi
 /**
  * Stop recording and give us the clip.
  *
  * @return the clip that was recorded since the last time start was called
  * @see #start
  */
 public short[] stop() {
   synchronized (lock) {
     if (recorder == null) {
       return new short[0];
     }
     ByteArrayOutputStream out = recorder.stopRecording();
     microphone.close();
     recorder = null;
     byte audioBytes[] = out.toByteArray();
     ByteArrayInputStream in = new ByteArrayInputStream(audioBytes);
     try {
       short[] samples = RawReader.readAudioData(in, inFormat);
       if (downsample) {
         samples =
             Downsampler.downsample(
                 samples,
                 (int) (inFormat.getSampleRate() / 1000.0f),
                 (int) (outFormat.getSampleRate() / 1000.0f));
       }
       return samples;
     } catch (IOException e) {
       e.printStackTrace();
       return new short[0];
     }
   }
 }
コード例 #10
0
ファイル: AudioCapture.java プロジェクト: Jellofishi/Java
 public int read(byte[] b, int off, int len) throws IOException {
   // System.out.print("'"+len+"'");
   try {
     int ret = line.read(b, off, len);
     //				if (ret > 50 && DEBUG_TRANSPORT && !printedBytes) {
     //					printedBytes = true;
     //					out("AudioCapture: first bytes being captured:");
     //					String s = "";
     //					for (int i = 0; i < 50; i++) {
     //						s += " " + b[i];
     //					}
     //					out(s);
     //				}
     if (isMuted()) {
       muteBuffer(b, off, ret);
     }
     // run some simple analysis
     if (ret > 0) {
       calcCurrVol(b, off, ret);
     }
     return ret;
   } catch (IllegalArgumentException e) {
     throw new IOException(e.getMessage());
   }
 }
コード例 #11
0
ファイル: SourceThread.java プロジェクト: jgardella/Audiocast
 public void run() {
   try {
     b = new byte[6300];
     line.open(new AudioFormat(44100, 16, 1, true, true), 6300);
     line.start();
     while (true) {
       line.read(b, 0, b.length);
       server.writeByteBuffers(b, sourceIndex);
       if (output) serverOutput.write(b, 0, b.length);
     }
   } catch (LineUnavailableException e) {
     e.printStackTrace();
     System.out.println(sourceIndex);
   } finally {
     line.stop();
     line.close();
   }
 }
コード例 #12
0
ファイル: MicrophoneWorker.java プロジェクト: WestonZz/alter
  @Override
  protected Object doInBackground() throws Exception {
    try {
      while (!isCancelled()) {
        int bytesRead = microphone.read(tempBuffer, 0, tempBuffer.length);
        if (bytesRead > 0) {
          int currentLevel = (int) (calculateRMSLevel(tempBuffer) * 10);
          if (currentLevel > 100) currentLevel = 100;
          if (currentLevel < 0) currentLevel = 0;
          setProgress(currentLevel);
        }
      }

      microphone.close();
    } catch (Throwable e) {
      System.out.println(e);
      System.exit(-1);
    }

    return null;
  }
コード例 #13
0
ファイル: Loop2.java プロジェクト: BerndDammer/at1
    public void run() {
      // --------------init
      SourceDataLine sdl = Central.getGoutputSelector().getSourceDataLine();
      TargetDataLine tdl = Central.getGinputSelector().getTargetDataLine();

      try {
        tdl.open(af, READ_BUFFER_SIZE);
        sdl.open(af, WRITE_BUFFER_SIZE);

        sdl.start();
        tdl.start();
        while (checkRunning()) {
          tdl.read(sbMove, 0, READSIZE);
          sdl.write(sbMove, 0, READSIZE);
        }
        tdl.close();
        sdl.close();
      } catch (LineUnavailableException e) {
        e.printStackTrace();
      }
      error.log("Continuous Loop Stopped");
    }
コード例 #14
0
  /**
   * Starts the recording. To accomplish this, (i) the line is started and (ii) the thread is
   * started.
   */
  public void start() {
    /*
     * Starting the TargetDataLine. It tells the line that we now want to read data from it. If this method isn't called, we
     * won't be able to read data from the line at all.
     */
    m_line.start();

    /*
     * Starting the thread. This call results in the method 'run()' (see below) being called. There, the data is actually read
     * from the line.
     */
    super.start();
  }
コード例 #15
0
ファイル: Spectrogram.java プロジェクト: gaiyangjun/TarsosDSP
  private void setNewMixer(Mixer mixer)
      throws LineUnavailableException, UnsupportedAudioFileException {

    if (dispatcher != null) {
      dispatcher.stop();
    }
    if (fileName == null) {
      final AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
      final DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, format);
      TargetDataLine line;
      line = (TargetDataLine) mixer.getLine(dataLineInfo);
      final int numberOfSamples = bufferSize;
      line.open(format, numberOfSamples);
      line.start();
      final AudioInputStream stream = new AudioInputStream(line);

      // create a new dispatcher
      dispatcher = new AudioDispatcher(stream, bufferSize, overlap);
    } else {
      try {
        File audioFile = new File(fileName);
        dispatcher = AudioDispatcher.fromFile(audioFile, bufferSize, overlap);
        AudioFormat format = AudioSystem.getAudioFileFormat(audioFile).getFormat();
        dispatcher.addAudioProcessor(new AudioPlayer(format));
      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
    currentMixer = mixer;

    // add a processor, handle pitch event.
    dispatcher.addAudioProcessor(new PitchProcessor(algo, sampleRate, bufferSize, this));
    dispatcher.addAudioProcessor(fftProcessor);

    // run the dispatcher (on a new thread).
    new Thread(dispatcher, "Audio dispatching").start();
  }
コード例 #16
0
ファイル: Recorder.java プロジェクト: kgfig/isip7speech
  /**
   * Starts the recording. To accomplish this, (i) the line is started and (ii) the thread is
   * started.
   */
  public void start() {
    /* Starting the TargetDataLine. It tells the line that
       we now want to read data from it. If this method
       isn't called, we won't
       be able to read data from the line at all.
    */
    // Globals.recorderStatus = "STATUS: recording...";
    m_line.start();

    /* Starting the thread. This call results in the
       method 'run()' (see below) being called. There, the
       data is actually read from the line.
    */
    super.start();
  }
コード例 #17
0
ファイル: DataInThread.java プロジェクト: scottandrus/VoIP283
  // Thread logic
  public void run() {
    while (true) {
      // micData = new byte[microphone.available()];
      lastNumBytesRead = microphone.read(micData, 0, micData.length);

      // dp.setData(micData);
      DatagramPacket dp = new DatagramPacket(micData, Math.min(MAXBUFSIZE, micData.length));

      try {
        // System.out.println("input: " + buf[5]);
        dp.setData(micData);
        dSocket.send(dp);
      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  }
コード例 #18
0
ファイル: DataInThread.java プロジェクト: scottandrus/VoIP283
  // ctor
  public DataInThread(DatagramSocket socket) {
    try {
      dSocket = socket;

      // sampl Rate & bits, #Channels, signed?, bigEndian?
      AudioFormat format = new AudioFormat(16000.0f, 16, 1, true, true);
      microphone = AudioSystem.getTargetDataLine(format);
      micData = new byte[MAXBUFSIZE];
      microphone.open(format);
      inputThread = new Thread(this);
    } catch (LineUnavailableException e) {
      // TODO is there anything else we should do if the microphone
      //		can't be connected?

      // http://stackoverflow.com/questions/14348169/understanding-java-sound-api-finding-mic-on-mixer
      e.printStackTrace();
    }
  }
コード例 #19
0
ファイル: AudioCapture.java プロジェクト: Jellofishi/Java
 TargetDataLineMeter(TargetDataLine line) {
   super(new ByteArrayInputStream(new byte[0]), line.getFormat(), AudioSystem.NOT_SPECIFIED);
   this.line = line;
 }
コード例 #20
0
ファイル: AudioCapture.java プロジェクト: Jellofishi/Java
 public int available() throws IOException {
   return line.available();
 }
コード例 #21
0
ファイル: AudioCapture.java プロジェクト: Jellofishi/Java
 protected void openLineImpl() throws Exception {
   TargetDataLine tdl = (TargetDataLine) line;
   tdl.open(lineFormat, bufferSize);
   ais = new TargetDataLineMeter(tdl);
   ais = AudioSystem.getAudioInputStream(netFormat, ais);
 }
コード例 #22
0
ファイル: SoundMixer.java プロジェクト: nakag/mmscomputing
  public String toString() {
    Mixer.Info info = mixer.getMixerInfo();

    String s = "\nMixer [" + id + "]";
    s += "\n\t Name: " + info.getName();
    s += "\n\t Desc: " + info.getDescription();
    s += "\n\t Ven : " + info.getVendor();
    s += "\n\t Ver : " + info.getVersion();
    s += "\n\t Str : " + info.toString();

    Line.Info[] infos = mixer.getSourceLineInfo();
    s += "\n\nSourceLine count : " + infos.length;
    for (int i = 0; i < infos.length; i++) {
      if (infos[i] instanceof DataLine.Info) {
        s += "\n\t\tData Line Source [" + i + "]";
        s += "\n\t\t\t Str : " + infos[i].toString();
      } else if (infos[i] instanceof Port.Info) {
        s += "\n\t\tPort Source [" + i + "]";
        s += "\n\t\t\t Name: " + ((Port.Info) infos[i]).getName();
        s += "\n\t\t\t is Src: " + ((Port.Info) infos[i]).isSource();
        s += "\n\t\t\t Str : " + infos[i].toString();
      } else /*if(infos[i]!=null)*/ {
        s += "\n\t\tSource [" + i + "]";
        s += "\n\t\t\t Str : " + infos[i].toString();
      }
    }
    s += "\n\nOUTPUT\n";
    for (int i = 0; i < formats.length; i++) {
      try {
        SourceDataLine out = getOutputLine(formats[i]);
        out.close();
        s += "\n" + formats[i].toString();
      } catch (Exception e) {
        //        s+="\n"+e.getMessage();
      }
    }

    infos = mixer.getTargetLineInfo();
    s += "\n\nTargetLine count : " + infos.length;
    for (int i = 0; i < infos.length; i++) {
      if (infos[i] instanceof DataLine.Info) {
        s += "\n\t\tData Line Target [" + i + "]";
        s += "\n\t\t\t Str : " + infos[i].toString();
      } else if (infos[i] instanceof Port.Info) {
        s += "\n\t\tPort Target [" + i + "]";
        s += "\n\t\t\t Name: " + ((Port.Info) infos[i]).getName();
        s += "\n\t\t\t is Src: " + ((Port.Info) infos[i]).isSource();
        s += "\n\t\t\t Str : " + infos[i].toString();
      } else /*if(infos[i]!=null)*/ {
        s += "\n\t\tTarget [" + i + "]";
        s += "\n\t\t\t Str : " + infos[i].toString();
      }
    }

    s += "\n\nINPUT\n";
    for (int i = 0; i < formats.length; i++) {
      try {
        TargetDataLine out = getInputLine(formats[i]);
        out.close();
        s += "\n" + formats[i].toString();
      } catch (Exception e) {
        //        s+="\n"+e.getMessage();
      }
    }

    return s;
  }
コード例 #23
0
ファイル: Flanger.java プロジェクト: johnwygle/TarsosDSP
  private void startFile(File inputFile, Mixer mixer) {
    if (dispatcher != null) {
      dispatcher.stop();
    }
    AudioFormat format;
    int bufferSize = 1024;
    int overlap = 0;
    double sampleRate = 44100;
    try {
      if (inputFile != null) {
        format = AudioSystem.getAudioFileFormat(inputFile).getFormat();
        sampleRate = format.getSampleRate();
      } else {
        format = new AudioFormat((float) sampleRate, 16, 1, true, true);
      }

      inputGain = new GainProcessor(defaultInputGain / 100.0);
      AudioPlayer audioPlayer = new AudioPlayer(format);

      if (inputFile == null) {
        DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, format);
        TargetDataLine line;
        line = (TargetDataLine) mixer.getLine(dataLineInfo);
        line.open(format, bufferSize);
        line.start();
        final AudioInputStream stream = new AudioInputStream(line);
        final TarsosDSPAudioInputStream audioStream = new JVMAudioInputStream(stream);
        dispatcher = new AudioDispatcher(audioStream, bufferSize, overlap);
      } else {
        if (format.getChannels() != 1) {
          dispatcher =
              AudioDispatcherFactory.fromFile(
                  inputFile, bufferSize * format.getChannels(), overlap * format.getChannels());
          dispatcher.addAudioProcessor(new MultichannelToMono(format.getChannels(), true));
        } else {
          dispatcher = AudioDispatcherFactory.fromFile(inputFile, bufferSize, overlap);
        }
      }

      flangerEffect =
          new FlangerEffect(
              defaultLength / 1000.0, defaultImpact / 100.0, sampleRate, defaultFrequency / 10.0);

      dispatcher.addAudioProcessor(flangerEffect);
      dispatcher.addAudioProcessor(inputGain);
      dispatcher.addAudioProcessor(new WaveformWriter(format, "flanger.wav"));
      dispatcher.addAudioProcessor(audioPlayer);

      Thread t = new Thread(dispatcher);
      t.start();
    } catch (UnsupportedAudioFileException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    } catch (LineUnavailableException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
    }
  }
コード例 #24
0
  public static void main(String[] args) {
    if (args.length != 1 || args[0].equals("-h")) {
      printUsageAndExit();
    }

    /*
     * We have made shure that there is only one command line argument. This is taken as the filename of the soundfile to
     * store to.
     */
    String strFilename = args[0];
    File outputFile = new File(strFilename);

    /*
     * For simplicity, the audio data format used for recording is hardcoded here. We use PCM 44.1 kHz, 16 bit signed, stereo.
     */
    AudioFormat audioFormat =
        new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, 44100.0F, 16, 2, 4, 44100.0F, false);

    /*
     * Now, we are trying to get a TargetDataLine. The TargetDataLine is used later to read audio data from it. If requesting
     * the line was successful, we are opening it (important!).
     */
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, audioFormat);
    TargetDataLine targetDataLine = null;
    try {
      targetDataLine = (TargetDataLine) AudioSystem.getLine(info);
      targetDataLine.open(audioFormat);
    } catch (LineUnavailableException e) {
      out("unable to get a recording line");
      e.printStackTrace();
      System.exit(1);
    }

    /*
     * Again for simplicity, we've hardcoded the audio file type, too.
     */
    AudioFileFormat.Type targetType = AudioFileFormat.Type.WAVE;

    /*
     * Now, we are creating an SimpleAudioRecorder object. It contains the logic of starting and stopping the recording,
     * reading audio data from the TargetDataLine and writing the data to a file.
     */
    SimpleAudioRecorder recorder = new SimpleAudioRecorder(targetDataLine, targetType, outputFile);

    /*
     * We are waiting for the user to press ENTER to start the recording. (You might find it inconvenient if recording starts
     * immediately.)
     */
    out("Press ENTER to start the recording.");
    try {
      System.in.read();
    } catch (IOException e) {
      e.printStackTrace();
    }
    /*
     * Here, the recording is actually started.
     */
    recorder.start();
    out("Recording...");

    /*
     * And now, we are waiting again for the user to press ENTER, this time to signal that the recording should be stopped.
     */
    out("Press ENTER to stop the recording.");
    try {
      System.in.read();
    } catch (IOException e) {
      e.printStackTrace();
    }

    /*
     * Here, the recording is actually stopped.
     */
    recorder.stopRecording();
    out("Recording stopped.");
  }
コード例 #25
0
 /**
  * Stops the recording.
  *
  * <p>Note that stopping the thread explicitely is not necessary. Once no more data can be read
  * from the TargetDataLine, no more data be read from our AudioInputStream. And if there is no
  * more data from the AudioInputStream, the method 'AudioSystem.write()' (called in 'run()'
  * returns. Returning from 'AudioSystem.write()' is followed by returning from 'run()', and thus,
  * the thread is terminated automatically.
  *
  * <p>It's not a good idea to call this method just 'stop()' because stop() is a (deprecated)
  * method of the class 'Thread'. And we don't want to override this method.
  */
 public void stopRecording() {
   m_line.stop();
   m_line.close();
 }
コード例 #26
0
  /**
   * Apply the given effect to this WaveTab´s audio data
   *
   * @param effect The effect to apply
   */
  public void applyEffect(Effect effect) {
    Selection sel = waveDisplay.getSelection();
    if (sel.getLeft() == sel.getRight())
      waveDisplay.setSelection(new Selection(0, getTotalLength()));
    Thread thread = null;
    try {
      AudioInputStream stream = getAudioInputStream();
      int sourceChannels = stream.getFormat().getChannels();
      stream = AudioManager.getStereoInputStream(stream);
      final FXUnit unit = new FXUnit(effect);
      if (effect.needsAnalysis()) {
        Analyzer a = new Analyzer(unit, stream);
        ProgressMonitor monitor =
            new ProgressMonitor(getShell(), a, "Analyzing...", "Analyzing audio data");
        monitor.start();
        stream = AudioManager.getStereoInputStream(getAudioInputStream());
      }

      final SourceDataLine sourceLine = unit.getEffectSourceLine();
      sourceLine.open();
      sourceLine.start();
      final TargetDataLine targetLine = unit.getEffectTargetLine();
      targetLine.open();
      targetLine.start();
      if (!stream.getFormat().equals(sourceLine.getFormat())) {
        if (AudioSystem.isConversionSupported(sourceLine.getFormat(), stream.getFormat()))
          stream = AudioSystem.getAudioInputStream(sourceLine.getFormat(), stream);
        else {
          editor.errorMessage(
              "Unable to apply effect:\nFormat conversion from "
                  + stream.getFormat()
                  + " to "
                  + sourceLine.getFormat()
                  + " not supported.");
          return;
        }
      }

      final AudioInputStream inStream = stream;
      thread =
          new Thread() {
            public void run() {
              int numBytesRead = 0;
              byte[] buffer = new byte[sourceLine.getBufferSize()];
              while (numBytesRead != -1 && !getItem().isDisposed()) {
                try {
                  numBytesRead = inStream.read(buffer, 0, buffer.length);
                } catch (IOException e1) {
                  e1.printStackTrace();
                  numBytesRead = -1;
                }
                if (numBytesRead > 0) {
                  sourceLine.write(buffer, 0, numBytesRead);
                }
                try {
                  Thread.sleep(0, 1);
                } catch (InterruptedException e) {
                }
              }
            }
          };
      thread.start();

      AudioInputStream in = new AudioInputStream(targetLine);
      if (sourceChannels == 1) in = AudioManager.getMonoInputStream(in);
      File tempFile = File.createTempFile("gmtmp_", ".wav");
      AudioFormat tempFormat =
          new AudioFormat(
              fileFormat.getFormat().getSampleRate(),
              16,
              fileFormat.getFormat().getChannels(),
              true,
              false);
      AudioFileOutputStream out =
          AudioManager.getDefault()
              .getAudioFileOutputStream(
                  tempFile, tempFormat, AudioFileFormat.Type.WAVE, null, null, null);
      if (!in.getFormat().equals(out.getFormat()))
        in = AudioSystem.getAudioInputStream(out.getFormat(), in);
      SaveFileThread saver =
          new SaveFileThread(
              in, out, (int) inStream.getFrameLength(), in.getFormat().getFrameSize(), true);
      ProgressMonitor monitor =
          new ProgressMonitor(
              getShell(), saver, "Apply Effect", "Applying " + effect.getName() + " to Selection");
      monitor.start();

      File tempPeak = File.createTempFile("gmtmp_", ".gmpk");
      CreatePeakFileThread peak =
          new CreatePeakFileThread(AudioSystem.getAudioInputStream(tempFile), tempPeak);
      monitor =
          new ProgressMonitor(
              getShell(), peak, "Creating peak file", "Creating peak file for applied effect.");
      monitor.start();

      PeakWaveForm pwf = new PeakWaveForm(tempPeak);
      AudioFileWaveForm awf = new AudioFileWaveForm(tempFile, pwf, 32 * 1024, 25);
      CutListSource newSource = new AudioFileSource(tempFile, awf);

      sel = waveDisplay.getSelection();
      int left = sel.getLeft();
      int right = sel.getRight();

      ReplaceElement el =
          new ReplaceElement(
              effect.getName(), newSource, left, right - left, fileFormat.getFormat());
      cutList.addElement(el);
      undoOperations.add(el);
      redoOperations.clear();
      thread.stop();
    } catch (NotReadyException e) {
      e.printStackTrace();
      editor.errorMessage(e.getMessage());
      if (thread != null) thread.stop();
    } catch (NotFinishedException e) {
      e.printStackTrace();
      editor.errorMessage(e.getMessage());
      if (thread != null) thread.stop();
    } catch (LineUnavailableException e) {
      e.printStackTrace();
      editor.errorMessage(e.getMessage());
      if (thread != null) thread.stop();
    } catch (IOException e) {
      e.printStackTrace();
      editor.errorMessage(e.getMessage());
      if (thread != null) thread.stop();
    } catch (UnsupportedAudioFileException e) {
      e.printStackTrace();
      editor.errorMessage(e.getMessage());
      if (thread != null) thread.stop();
    }
  }
コード例 #27
0
ファイル: DataInThread.java プロジェクト: scottandrus/VoIP283
 public void stopMic() {
   microphone.stop();
 }
コード例 #28
0
ファイル: DataInThread.java プロジェクト: scottandrus/VoIP283
 // are there any checks we need to do before starting or stopping?
 public void startMic() {
   microphone.start();
   inputThread.start();
 }
コード例 #29
0
ファイル: SoundServer.java プロジェクト: LordLeicester/Sound
  public SoundServer(Mixer mixer, DataLine.Info dataLineInfo, AudioFormat audioFormat) {
    try {
      final TargetDataLine targetDataLine = (TargetDataLine) mixer.getLine(dataLineInfo);
      targetDataLine.open(audioFormat);
      targetDataLine.start();

      try {
        serverSocket = new ServerSocket(20000);
      } catch (IOException e1) {
        e1.printStackTrace();
        return;
      }
      clients = new ArrayList<>();
      streams = new HashMap<>();
      runServer = true;

      acceptRunnable =
          new Runnable() {

            public void run() {
              while (runServer) {
                try {
                  Socket socket = serverSocket.accept();
                  synchronized (clients) {
                    clients.add(socket);
                    synchronized (streams) {
                      streams.put(socket, socket.getOutputStream());
                    }
                    System.out.println("Client connected from " + socket.getInetAddress());
                  }
                } catch (IOException e) {
                  e.printStackTrace();
                }
              }
            }
          };

      sendRunnable =
          new Runnable() {

            public void run() {
              System.out.println("Server is running...");

              recordBuffer = new byte[BUFFER_SIZE];

              while (runServer) {
                int count = targetDataLine.read(recordBuffer, 0, BUFFER_SIZE);

                if (count > 0) {
                  synchronized (clients) {
                    for (int i = 0; i < clients.size(); i++) {
                      Socket client = clients.get(i);
                      OutputStream os = streams.get(client);
                      try {
                        os.write(recordBuffer, 0, BUFFER_SIZE);
                      } catch (SocketException e) {
                        clients.set(i, null);
                        System.out.println("Client connection dropped...");
                      } catch (IOException e) {
                        e.printStackTrace();
                      }
                    }
                    for (int i = 0; i < clients.size(); i++) {
                      if (clients.get(i) == null) {
                        clients.remove(i);
                      }
                    }
                  }
                }
              }
            }
          };

      new Thread(acceptRunnable).start();
      new Thread(sendRunnable).start();

    } catch (LineUnavailableException lue) {
      lue.printStackTrace();
    }
  }