private boolean enableBuiltInNS(boolean enable) {
   Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
   if (effects == null) {
     Logging.e(TAG, "Built-in NS is not supported on this platform");
     return false;
   }
   return effects.setNS(enable);
 }
 WebRtcAudioRecord(Context context, long nativeAudioRecord) {
   Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
   this.context = context;
   this.nativeAudioRecord = nativeAudioRecord;
   if (DEBUG) {
     WebRtcAudioUtils.logDeviceInfo(TAG);
   }
   effects = WebRtcAudioEffects.create();
 }
 private boolean stopRecording() {
   Logging.d(TAG, "stopRecording");
   assertTrue(audioThread != null);
   audioThread.stopThread();
   if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
     Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
   }
   audioThread = null;
   if (effects != null) {
     effects.release();
   }
   audioRecord.release();
   audioRecord = null;
   return true;
 }
  private int initRecording(int sampleRate, int channels) {
    Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" + channels + ")");
    if (!WebRtcAudioUtils.hasPermission(context, android.Manifest.permission.RECORD_AUDIO)) {
      Logging.e(TAG, "RECORD_AUDIO permission is missing");
      return -1;
    }
    if (audioRecord != null) {
      Logging.e(TAG, "InitRecording() called twice without StopRecording()");
      return -1;
    }
    final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
    final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
    byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
    Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
    emptyBytes = new byte[byteBuffer.capacity()];
    // Rather than passing the ByteBuffer with every callback (requiring
    // the potentially expensive GetDirectBufferAddress) we simply have the
    // the native class cache the address to the memory once.
    nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);

    // Get the minimum buffer size required for the successful creation of
    // an AudioRecord object, in byte units.
    // Note that this size doesn't guarantee a smooth recording under load.
    int minBufferSize =
        AudioRecord.getMinBufferSize(
            sampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
    if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
      Logging.e(TAG, "AudioRecord.getMinBufferSize failed: " + minBufferSize);
      return -1;
    }
    Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);

    // Use a larger buffer size than the minimum required when creating the
    // AudioRecord instance to ensure smooth recording under load. It has been
    // verified that it does not increase the actual recording latency.
    int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
    Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
    try {
      audioRecord =
          new AudioRecord(
              AudioSource.VOICE_COMMUNICATION,
              sampleRate,
              AudioFormat.CHANNEL_IN_MONO,
              AudioFormat.ENCODING_PCM_16BIT,
              bufferSizeInBytes);
    } catch (IllegalArgumentException e) {
      Logging.e(TAG, e.getMessage());
      return -1;
    }
    if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
      Logging.e(TAG, "Failed to create a new AudioRecord instance");
      return -1;
    }
    Logging.d(
        TAG,
        "AudioRecord "
            + "session ID: "
            + audioRecord.getAudioSessionId()
            + ", "
            + "audio format: "
            + audioRecord.getAudioFormat()
            + ", "
            + "channels: "
            + audioRecord.getChannelCount()
            + ", "
            + "sample rate: "
            + audioRecord.getSampleRate());
    if (effects != null) {
      effects.enable(audioRecord.getAudioSessionId());
    }
    // TODO(phoglund): put back audioRecord.getBufferSizeInFrames when
    // all known downstream users supports M.
    // if (WebRtcAudioUtils.runningOnMOrHigher()) {
    // Returns the frame count of the native AudioRecord buffer. This is
    // greater than or equal to the bufferSizeInBytes converted to frame
    // units. The native frame count may be enlarged to accommodate the
    // requirements of the source on creation or if the AudioRecord is
    // subsequently rerouted.

    // Logging.d(TAG, "bufferSizeInFrames: "
    //     + audioRecord.getBufferSizeInFrames());
    // }
    return framesPerBuffer;
  }