Esempio n. 1
0
  public void testAudioRecordProperties() throws Exception {
    if (!hasMicrophone()) {
      return;
    }
    assertEquals(AudioFormat.ENCODING_PCM_16BIT, mAudioRecord.getAudioFormat());
    assertEquals(MediaRecorder.AudioSource.DEFAULT, mAudioRecord.getAudioSource());
    assertEquals(1, mAudioRecord.getChannelCount());
    assertEquals(AudioFormat.CHANNEL_CONFIGURATION_MONO, mAudioRecord.getChannelConfiguration());
    assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState());
    assertEquals(mHz, mAudioRecord.getSampleRate());
    assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());

    int bufferSize =
        AudioRecord.getMinBufferSize(
            mHz, AudioFormat.CHANNEL_CONFIGURATION_DEFAULT, AudioFormat.ENCODING_PCM_16BIT);
    assertTrue(bufferSize > 0);
  }
Esempio n. 2
0
  public DetectThread(CaptureThread rec) {
    notes.put("C4", 261.6);
    notes.put("C4#", 277.2);
    notes.put("D4", 293.6);
    notes.put("D4#", 311.1);
    notes.put("E4", 329.6);
    notes.put("F4", 349.2);
    notes.put("F4#", 369.9);
    notes.put("G4", 391.9);
    notes.put("G4#", 415.3);
    notes.put("A4", 440.0);
    notes.put("A4#", 466.1);
    notes.put("B4", 493.8);
    notes.put("C5", 523.25);
    notes.put("C5#", 554.36);
    notes.put("D5", 587.3);
    notes.put("D5#", 622.254);
    notes.put("E5", 659.25);
    notes.put("F5", 698.45);
    notes.put("F5#", 739.98);
    notes.put("G5", 783.9);

    ct = rec;
    AudioRecord ar = rec.getAudioRecord();
    int bps = 0;

    if (ar.getAudioFormat() == AudioFormat.ENCODING_PCM_16BIT) {
      bps = 16;
    } else if (ar.getAudioFormat() == AudioFormat.ENCODING_PCM_8BIT) {
      bps = 8;
    }

    int channel = 0;
    if (ar.getChannelConfiguration() == AudioFormat.CHANNEL_IN_MONO) {
      channel = 1;
    }

    wh = new WaveHeader();
    wh.setChannels(channel);
    wh.setBitsPerSample(bps);
    wh.setSampleRate(ar.getSampleRate());

    analyzer = new Analyse(wh);
  }
Esempio n. 3
0
  private int initRecording(int sampleRate, int channels) {
    Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" + channels + ")");
    if (!WebRtcAudioUtils.hasPermission(context, android.Manifest.permission.RECORD_AUDIO)) {
      Logging.e(TAG, "RECORD_AUDIO permission is missing");
      return -1;
    }
    if (audioRecord != null) {
      Logging.e(TAG, "InitRecording() called twice without StopRecording()");
      return -1;
    }
    final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
    final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
    byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
    Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
    emptyBytes = new byte[byteBuffer.capacity()];
    // Rather than passing the ByteBuffer with every callback (requiring
    // the potentially expensive GetDirectBufferAddress) we simply have the
    // the native class cache the address to the memory once.
    nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);

    // Get the minimum buffer size required for the successful creation of
    // an AudioRecord object, in byte units.
    // Note that this size doesn't guarantee a smooth recording under load.
    int minBufferSize =
        AudioRecord.getMinBufferSize(
            sampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
    if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
      Logging.e(TAG, "AudioRecord.getMinBufferSize failed: " + minBufferSize);
      return -1;
    }
    Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);

    // Use a larger buffer size than the minimum required when creating the
    // AudioRecord instance to ensure smooth recording under load. It has been
    // verified that it does not increase the actual recording latency.
    int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
    Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
    try {
      audioRecord =
          new AudioRecord(
              AudioSource.VOICE_COMMUNICATION,
              sampleRate,
              AudioFormat.CHANNEL_IN_MONO,
              AudioFormat.ENCODING_PCM_16BIT,
              bufferSizeInBytes);
    } catch (IllegalArgumentException e) {
      Logging.e(TAG, e.getMessage());
      return -1;
    }
    if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
      Logging.e(TAG, "Failed to create a new AudioRecord instance");
      return -1;
    }
    Logging.d(
        TAG,
        "AudioRecord "
            + "session ID: "
            + audioRecord.getAudioSessionId()
            + ", "
            + "audio format: "
            + audioRecord.getAudioFormat()
            + ", "
            + "channels: "
            + audioRecord.getChannelCount()
            + ", "
            + "sample rate: "
            + audioRecord.getSampleRate());
    if (effects != null) {
      effects.enable(audioRecord.getAudioSessionId());
    }
    // TODO(phoglund): put back audioRecord.getBufferSizeInFrames when
    // all known downstream users supports M.
    // if (WebRtcAudioUtils.runningOnMOrHigher()) {
    // Returns the frame count of the native AudioRecord buffer. This is
    // greater than or equal to the bufferSizeInBytes converted to frame
    // units. The native frame count may be enlarged to accommodate the
    // requirements of the source on creation or if the AudioRecord is
    // subsequently rerouted.

    // Logging.d(TAG, "bufferSizeInFrames: "
    //     + audioRecord.getBufferSizeInFrames());
    // }
    return framesPerBuffer;
  }
Esempio n. 4
0
  public void testAudioRecordOP() throws Exception {
    if (!hasMicrophone()) {
      return;
    }
    final int SLEEP_TIME = 10;
    final int RECORD_TIME = 10000;
    assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState());

    int markerInFrames = mAudioRecord.getSampleRate() / 2;
    assertEquals(AudioRecord.SUCCESS, mAudioRecord.setNotificationMarkerPosition(markerInFrames));
    assertEquals(markerInFrames, mAudioRecord.getNotificationMarkerPosition());
    int periodInFrames = mAudioRecord.getSampleRate();
    assertEquals(AudioRecord.SUCCESS, mAudioRecord.setPositionNotificationPeriod(periodInFrames));
    assertEquals(periodInFrames, mAudioRecord.getPositionNotificationPeriod());
    OnRecordPositionUpdateListener listener =
        new OnRecordPositionUpdateListener() {

          public void onMarkerReached(AudioRecord recorder) {
            mIsOnMarkerReachedCalled = true;
          }

          public void onPeriodicNotification(AudioRecord recorder) {
            mIsOnPeriodicNotificationCalled = true;
          }
        };
    mAudioRecord.setRecordPositionUpdateListener(listener);

    // use byte array as buffer
    final int BUFFER_SIZE = 102400;
    byte[] byteData = new byte[BUFFER_SIZE];
    long time = System.currentTimeMillis();
    mAudioRecord.startRecording();
    assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState());
    while (System.currentTimeMillis() - time < RECORD_TIME) {
      Thread.sleep(SLEEP_TIME);
      mAudioRecord.read(byteData, 0, BUFFER_SIZE);
    }
    mAudioRecord.stop();
    assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
    assertTrue(mIsOnMarkerReachedCalled);
    assertTrue(mIsOnPeriodicNotificationCalled);
    reset();

    // use short array as buffer
    short[] shortData = new short[BUFFER_SIZE];
    time = System.currentTimeMillis();
    mAudioRecord.startRecording();
    assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState());
    while (System.currentTimeMillis() - time < RECORD_TIME) {
      Thread.sleep(SLEEP_TIME);
      mAudioRecord.read(shortData, 0, BUFFER_SIZE);
    }
    mAudioRecord.stop();
    assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
    assertTrue(mIsOnMarkerReachedCalled);
    assertTrue(mIsOnPeriodicNotificationCalled);
    reset();

    // use ByteBuffer as buffer
    ByteBuffer byteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE);
    time = System.currentTimeMillis();
    mAudioRecord.startRecording();
    assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState());
    while (System.currentTimeMillis() - time < RECORD_TIME) {
      Thread.sleep(SLEEP_TIME);
      mAudioRecord.read(byteBuffer, BUFFER_SIZE);
    }
    mAudioRecord.stop();
    assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
    assertTrue(mIsOnMarkerReachedCalled);
    assertTrue(mIsOnPeriodicNotificationCalled);
    reset();

    // use handler
    final Handler handler =
        new Handler(Looper.getMainLooper()) {
          @Override
          public void handleMessage(Message msg) {
            mIsHandleMessageCalled = true;
            super.handleMessage(msg);
          }
        };

    mAudioRecord.setRecordPositionUpdateListener(listener, handler);
    time = System.currentTimeMillis();
    mAudioRecord.startRecording();
    assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState());
    while (System.currentTimeMillis() - time < RECORD_TIME) {
      Thread.sleep(SLEEP_TIME);
      mAudioRecord.read(byteData, 0, BUFFER_SIZE);
    }
    mAudioRecord.stop();
    assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
    assertTrue(mIsOnMarkerReachedCalled);
    assertTrue(mIsOnPeriodicNotificationCalled);
    // The handler argument is only ever used for getting the associated Looper
    assertFalse(mIsHandleMessageCalled);

    mAudioRecord.release();
    assertEquals(AudioRecord.STATE_UNINITIALIZED, mAudioRecord.getState());
  }