public boolean record(IplImage image, int pixelFormat) throws Exception {
    if (video_st == null) {
      throw new Exception(
          "No video output stream (Is imageWidth > 0 && imageHeight > 0 and has start() been called?)");
    }
    int ret;

    if (image == null) {
      /* no more frame to compress. The codec has a latency of a few
      frames if using B frames, so we get the last frames by
      passing the same picture again */
    } else {
      int width = image.width();
      int height = image.height();
      int step = image.widthStep();
      BytePointer data = image.imageData();

      if (pixelFormat == AV_PIX_FMT_NONE) {
        int depth = image.depth();
        int channels = image.nChannels();
        if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 3) {
          pixelFormat = AV_PIX_FMT_BGR24;
        } else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 1) {
          pixelFormat = AV_PIX_FMT_GRAY8;
        } else if ((depth == IPL_DEPTH_16U || depth == IPL_DEPTH_16S) && channels == 1) {
          pixelFormat = AV_HAVE_BIGENDIAN() ? AV_PIX_FMT_GRAY16BE : AV_PIX_FMT_GRAY16LE;
        } else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 4) {
          pixelFormat = AV_PIX_FMT_RGBA;
        } else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 2) {
          pixelFormat = AV_PIX_FMT_NV21; // Android's camera capture format
          step = width;
        } else {
          throw new Exception(
              "Could not guess pixel format of image: depth=" + depth + ", channels=" + channels);
        }
      }

      if (video_c.pix_fmt() != pixelFormat
          || video_c.width() != width
          || video_c.height() != height) {
        /* convert to the codec pixel format if needed */
        img_convert_ctx =
            sws_getCachedContext(
                img_convert_ctx,
                width,
                height,
                pixelFormat,
                video_c.width(),
                video_c.height(),
                video_c.pix_fmt(),
                SWS_BILINEAR,
                null,
                null,
                null);
        if (img_convert_ctx == null) {
          throw new Exception(
              "sws_getCachedContext() error: Cannot initialize the conversion context.");
        }
        avpicture_fill(tmp_picture, data, pixelFormat, width, height);
        avpicture_fill(picture, picture_buf, video_c.pix_fmt(), video_c.width(), video_c.height());
        tmp_picture.linesize(0, step);
        sws_scale(
            img_convert_ctx,
            new PointerPointer(tmp_picture),
            tmp_picture.linesize(),
            0,
            height,
            new PointerPointer(picture),
            picture.linesize());
      } else {
        avpicture_fill(picture, data, pixelFormat, width, height);
        picture.linesize(0, step);
      }
    }

    if ((oformat.flags() & AVFMT_RAWPICTURE) != 0) {
      if (image == null) {
        return false;
      }
      /* raw video case. The API may change slightly in the future for that? */
      av_init_packet(video_pkt);
      video_pkt.flags(video_pkt.flags() | AV_PKT_FLAG_KEY);
      video_pkt.stream_index(video_st.index());
      video_pkt.data(new BytePointer(picture));
      video_pkt.size(Loader.sizeof(AVPicture.class));
    } else {
      /* encode the image */
      av_init_packet(video_pkt);
      video_pkt.data(video_outbuf);
      video_pkt.size(video_outbuf_size);
      picture.quality(video_c.global_quality());
      if ((ret =
              avcodec_encode_video2(
                  video_c, video_pkt, image == null ? null : picture, got_video_packet))
          < 0) {
        throw new Exception(
            "avcodec_encode_video2() error " + ret + ": Could not encode video packet.");
      }
      picture.pts(picture.pts() + 1); // magic required by libx264

      /* if zero size, it means the image was buffered */
      if (got_video_packet[0] != 0) {
        if (video_pkt.pts() != AV_NOPTS_VALUE) {
          video_pkt.pts(av_rescale_q(video_pkt.pts(), video_c.time_base(), video_st.time_base()));
        }
        if (video_pkt.dts() != AV_NOPTS_VALUE) {
          video_pkt.dts(av_rescale_q(video_pkt.dts(), video_c.time_base(), video_st.time_base()));
        }
        video_pkt.stream_index(video_st.index());
      } else {
        return false;
      }
    }

    synchronized (oc) {
      /* write the compressed frame in the media file */
      if (interleaved && audio_st != null) {
        if ((ret = av_interleaved_write_frame(oc, video_pkt)) < 0) {
          throw new Exception(
              "av_interleaved_write_frame() error "
                  + ret
                  + " while writing interleaved video frame.");
        }
      } else {
        if ((ret = av_write_frame(oc, video_pkt)) < 0) {
          throw new Exception("av_write_frame() error " + ret + " while writing video frame.");
        }
      }
    }
    return picture.key_frame() != 0;
  }
Exemplo n.º 2
0
  public IplImage grab() throws Exception {
    if (pFormatCtx == null || pFormatCtx.isNull()) {
      throw new Exception("Could not grab: No AVFormatContext. (Has start() been called?)");
    }
    boolean done = false;
    long pts = 0;
    while (!done) {
      if (av_read_frame(pFormatCtx, packet) < 0) {
        // throw new Exception("Could not read frame");
        return null; // end of file?
      }

      // Is this a packet from the video stream?
      if (packet.stream_index() == videoStream) {
        // Decode video frame
        int len = avcodec_decode_video2(pCodecCtx, pFrame, frameFinished, packet);

        LongPointer opaque = new LongPointer(pFrame.opaque());
        if (packet.dts() != AV_NOPTS_VALUE) {
          pts = packet.dts();
        } else if (!opaque.isNull() && opaque.get() != AV_NOPTS_VALUE) {
          pts = opaque.get();
        } else {
          pts = 0;
        }
        AVRational time_base = pStream.time_base();
        pts = 1000 * pts * time_base.num() / time_base.den();

        // Did we get a video frame?
        if (len > 0 && frameFinished[0] != 0) {
          switch (colorMode) {
            case BGR:
            case GRAY:
              // Deinterlace Picture
              if (deinterlace) {
                avpicture_deinterlace(
                    pFrame, pFrame, pCodecCtx.pix_fmt(), pCodecCtx.width(), pCodecCtx.height());
              }

              // Convert the image from its native format to RGB
              sws_scale(
                  img_convert_ctx,
                  new PointerPointer(pFrame),
                  pFrame.linesize(),
                  0,
                  pCodecCtx.height(),
                  new PointerPointer(pFrameRGB),
                  pFrameRGB.linesize());
              return_image.imageData(buffer);
              return_image.widthStep(pFrameRGB.linesize(0));
              break;
            case RAW:
              assert (pCodecCtx.width() == return_image.width()
                  && pCodecCtx.height() == return_image.height());
              return_image.imageData(pFrame.data(0));
              return_image.widthStep(pFrame.linesize(0));
              break;
            default:
              assert (false);
          }
          return_image.imageSize(return_image.height() * return_image.widthStep());

          done = true;
        }
      }

      // Free the packet that was allocated by av_read_frame
      av_free_packet(packet);
    }

    return_image.timestamp = pts;
    return return_image;
  }