示例#1
0
  public boolean recordSamples(int sampleRate, int audioChannels, Buffer... samples)
      throws Exception {
    if (audio_st == null) {
      throw new Exception(
          "No audio output stream (Is audioChannels > 0 and has start() been called?)");
    }
    int ret;

    if (sampleRate <= 0) {
      sampleRate = audio_c.sample_rate();
    }
    if (audioChannels <= 0) {
      audioChannels = audio_c.channels();
    }
    int inputSize = samples != null ? samples[0].limit() - samples[0].position() : 0;
    int inputFormat = AV_SAMPLE_FMT_NONE;
    int inputChannels = samples != null && samples.length > 1 ? 1 : audioChannels;
    int inputDepth = 0;
    int outputFormat = audio_c.sample_fmt();
    int outputChannels = samples_out.length > 1 ? 1 : audio_c.channels();
    int outputDepth = av_get_bytes_per_sample(outputFormat);
    if (samples != null && samples[0] instanceof ByteBuffer) {
      inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_U8P : AV_SAMPLE_FMT_U8;
      inputDepth = 1;
      for (int i = 0; i < samples.length; i++) {
        ByteBuffer b = (ByteBuffer) samples[i];
        if (samples_in[i] instanceof BytePointer
            && samples_in[i].capacity() >= inputSize
            && b.hasArray()) {
          ((BytePointer) samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
        } else {
          samples_in[i] = new BytePointer(b);
        }
      }
    } else if (samples != null && samples[0] instanceof ShortBuffer) {
      inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
      inputDepth = 2;
      for (int i = 0; i < samples.length; i++) {
        ShortBuffer b = (ShortBuffer) samples[i];
        if (samples_in[i] instanceof ShortPointer
            && samples_in[i].capacity() >= inputSize
            && b.hasArray()) {
          ((ShortPointer) samples_in[i])
              .position(0)
              .put(b.array(), samples[i].position(), inputSize);
        } else {
          samples_in[i] = new ShortPointer(b);
        }
      }
    } else if (samples != null && samples[0] instanceof IntBuffer) {
      inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
      inputDepth = 4;
      for (int i = 0; i < samples.length; i++) {
        IntBuffer b = (IntBuffer) samples[i];
        if (samples_in[i] instanceof IntPointer
            && samples_in[i].capacity() >= inputSize
            && b.hasArray()) {
          ((IntPointer) samples_in[i]).position(0).put(b.array(), samples[i].position(), inputSize);
        } else {
          samples_in[i] = new IntPointer(b);
        }
      }
    } else if (samples != null && samples[0] instanceof FloatBuffer) {
      inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
      inputDepth = 4;
      for (int i = 0; i < samples.length; i++) {
        FloatBuffer b = (FloatBuffer) samples[i];
        if (samples_in[i] instanceof FloatPointer
            && samples_in[i].capacity() >= inputSize
            && b.hasArray()) {
          ((FloatPointer) samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
        } else {
          samples_in[i] = new FloatPointer(b);
        }
      }
    } else if (samples != null && samples[0] instanceof DoubleBuffer) {
      inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_DBLP : AV_SAMPLE_FMT_DBL;
      inputDepth = 8;
      for (int i = 0; i < samples.length; i++) {
        DoubleBuffer b = (DoubleBuffer) samples[i];
        if (samples_in[i] instanceof DoublePointer
            && samples_in[i].capacity() >= inputSize
            && b.hasArray()) {
          ((DoublePointer) samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
        } else {
          samples_in[i] = new DoublePointer(b);
        }
      }
    } else if (samples != null) {
      throw new Exception("Audio samples Buffer has unsupported type: " + samples);
    }

    if (samples_convert_ctx == null
        || samples_channels != audioChannels
        || samples_format != inputFormat
        || samples_rate != sampleRate) {
      samples_convert_ctx =
          swr_alloc_set_opts(
              samples_convert_ctx,
              audio_c.channel_layout(),
              outputFormat,
              audio_c.sample_rate(),
              av_get_default_channel_layout(audioChannels),
              inputFormat,
              sampleRate,
              0,
              null);
      if (samples_convert_ctx == null) {
        throw new Exception("swr_alloc_set_opts() error: Cannot allocate the conversion context.");
      } else if ((ret = swr_init(samples_convert_ctx)) < 0) {
        throw new Exception(
            "swr_init() error " + ret + ": Cannot initialize the conversion context.");
      }
      samples_channels = audioChannels;
      samples_format = inputFormat;
      samples_rate = sampleRate;
    }

    for (int i = 0; samples != null && i < samples.length; i++) {
      samples_in[i]
          .position(samples_in[i].position() * inputDepth)
          .limit((samples_in[i].position() + inputSize) * inputDepth);
    }
    while (true) {
      int inputCount =
          samples != null
              ? (samples_in[0].limit() - samples_in[0].position()) / (inputChannels * inputDepth)
              : 0;
      int outputCount =
          (samples_out[0].limit() - samples_out[0].position()) / (outputChannels * outputDepth);
      inputCount =
          Math.min(
              inputCount,
              (outputCount * sampleRate + audio_c.sample_rate() - 1) / audio_c.sample_rate());
      for (int i = 0; samples != null && i < samples.length; i++) {
        samples_in_ptr.put(i, samples_in[i]);
      }
      for (int i = 0; i < samples_out.length; i++) {
        samples_out_ptr.put(i, samples_out[i]);
      }
      if ((ret =
              swr_convert(
                  samples_convert_ctx, samples_out_ptr, outputCount, samples_in_ptr, inputCount))
          < 0) {
        throw new Exception("swr_convert() error " + ret + ": Cannot convert audio samples.");
      } else if (ret == 0) {
        break;
      }
      for (int i = 0; samples != null && i < samples.length; i++) {
        samples_in[i].position(samples_in[i].position() + inputCount * inputChannels * inputDepth);
      }
      for (int i = 0; i < samples_out.length; i++) {
        samples_out[i].position(samples_out[i].position() + ret * outputChannels * outputDepth);
      }

      if (samples == null || samples_out[0].position() >= samples_out[0].limit()) {
        frame.nb_samples(audio_input_frame_size);
        avcodec_fill_audio_frame(
            frame, audio_c.channels(), outputFormat, samples_out[0], samples_out[0].limit(), 0);
        for (int i = 0; i < samples_out.length; i++) {
          frame.data(i, samples_out[i].position(0));
          frame.linesize(i, samples_out[i].limit());
        }
        frame.quality(audio_c.global_quality());
        record(frame);
      }
    }
    return samples != null ? frame.key_frame() != 0 : record((AVFrame) null);
  }
示例#2
0
  public boolean recordImage(
      int width, int height, int depth, int channels, int stride, int pixelFormat, Buffer... image)
      throws Exception {
    if (video_st == null) {
      throw new Exception(
          "No video output stream (Is imageWidth > 0 && imageHeight > 0 and has start() been called?)");
    }
    int ret;

    if (image == null || image.length == 0) {
      /* no more frame to compress. The codec has a latency of a few
      frames if using B frames, so we get the last frames by
      passing the same picture again */
    } else {
      int step = stride * Math.abs(depth) / 8;
      BytePointer data =
          image[0] instanceof ByteBuffer
              ? new BytePointer((ByteBuffer) image[0].position(0))
              : new BytePointer(new Pointer(image[0].position(0)));

      if (pixelFormat == AV_PIX_FMT_NONE) {
        if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 3) {
          pixelFormat = AV_PIX_FMT_BGR24;
        } else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 1) {
          pixelFormat = AV_PIX_FMT_GRAY8;
        } else if ((depth == Frame.DEPTH_USHORT || depth == Frame.DEPTH_SHORT) && channels == 1) {
          pixelFormat =
              ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN)
                  ? AV_PIX_FMT_GRAY16BE
                  : AV_PIX_FMT_GRAY16LE;
        } else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 4) {
          pixelFormat = AV_PIX_FMT_RGBA;
        } else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 2) {
          pixelFormat = AV_PIX_FMT_NV21; // Android's camera capture format
          step = width;
        } else {
          throw new Exception(
              "Could not guess pixel format of image: depth=" + depth + ", channels=" + channels);
        }
      }

      if (video_c.pix_fmt() != pixelFormat
          || video_c.width() != width
          || video_c.height() != height) {
        /* convert to the codec pixel format if needed */
        img_convert_ctx =
            sws_getCachedContext(
                img_convert_ctx,
                width,
                height,
                pixelFormat,
                video_c.width(),
                video_c.height(),
                video_c.pix_fmt(),
                SWS_BILINEAR,
                null,
                null,
                (DoublePointer) null);
        if (img_convert_ctx == null) {
          throw new Exception(
              "sws_getCachedContext() error: Cannot initialize the conversion context.");
        }
        avpicture_fill(new AVPicture(tmp_picture), data, pixelFormat, width, height);
        avpicture_fill(
            new AVPicture(picture),
            picture_buf,
            video_c.pix_fmt(),
            video_c.width(),
            video_c.height());
        tmp_picture.linesize(0, step);
        tmp_picture.format(pixelFormat);
        tmp_picture.width(width);
        tmp_picture.height(height);
        picture.format(video_c.pix_fmt());
        picture.width(video_c.width());
        picture.height(video_c.height());
        sws_scale(
            img_convert_ctx,
            new PointerPointer(tmp_picture),
            tmp_picture.linesize(),
            0,
            height,
            new PointerPointer(picture),
            picture.linesize());
      } else {
        avpicture_fill(new AVPicture(picture), data, pixelFormat, width, height);
        picture.linesize(0, step);
        picture.format(pixelFormat);
        picture.width(width);
        picture.height(height);
      }
    }

    if ((oformat.flags() & AVFMT_RAWPICTURE) != 0) {
      if (image == null || image.length == 0) {
        return false;
      }
      /* raw video case. The API may change slightly in the future for that? */
      av_init_packet(video_pkt);
      video_pkt.flags(video_pkt.flags() | AV_PKT_FLAG_KEY);
      video_pkt.stream_index(video_st.index());
      video_pkt.data(new BytePointer(picture));
      video_pkt.size(Loader.sizeof(AVPicture.class));
    } else {
      /* encode the image */
      av_init_packet(video_pkt);
      video_pkt.data(video_outbuf);
      video_pkt.size(video_outbuf_size);
      picture.quality(video_c.global_quality());
      if ((ret =
              avcodec_encode_video2(
                  video_c,
                  video_pkt,
                  image == null || image.length == 0 ? null : picture,
                  got_video_packet))
          < 0) {
        throw new Exception(
            "avcodec_encode_video2() error " + ret + ": Could not encode video packet.");
      }
      picture.pts(picture.pts() + 1); // magic required by libx264

      /* if zero size, it means the image was buffered */
      if (got_video_packet[0] != 0) {
        if (video_pkt.pts() != AV_NOPTS_VALUE) {
          video_pkt.pts(av_rescale_q(video_pkt.pts(), video_c.time_base(), video_st.time_base()));
        }
        if (video_pkt.dts() != AV_NOPTS_VALUE) {
          video_pkt.dts(av_rescale_q(video_pkt.dts(), video_c.time_base(), video_st.time_base()));
        }
        video_pkt.stream_index(video_st.index());
      } else {
        return false;
      }
    }

    synchronized (oc) {
      /* write the compressed frame in the media file */
      if (interleaved && audio_st != null) {
        if ((ret = av_interleaved_write_frame(oc, video_pkt)) < 0) {
          throw new Exception(
              "av_interleaved_write_frame() error "
                  + ret
                  + " while writing interleaved video frame.");
        }
      } else {
        if ((ret = av_write_frame(oc, video_pkt)) < 0) {
          throw new Exception("av_write_frame() error " + ret + " while writing video frame.");
        }
      }
    }
    return image != null ? (video_pkt.flags() & AV_PKT_FLAG_KEY) != 0 : got_video_packet[0] != 0;
  }