コード例 #1
0
 public static void tryLoad() throws Exception {
   if (loadingException != null) {
     throw loadingException;
   } else {
     try {
       Loader.load(com.googlecode.javacv.cpp.avutil.class);
       Loader.load(com.googlecode.javacv.cpp.avcodec.class);
       Loader.load(com.googlecode.javacv.cpp.avformat.class);
       Loader.load(com.googlecode.javacv.cpp.swscale.class);
     } catch (Throwable t) {
       if (t instanceof Exception) {
         throw loadingException = (Exception) t;
       } else {
         throw loadingException = new Exception("Failed to load " + FFmpegFrameRecorder.class, t);
       }
     }
   }
 }
コード例 #2
0
ファイル: FaceLearner.java プロジェクト: taosheng/consultant
  /** normalized to 120*120 PGM files.. */
  public void normalize(String sampleImagePath, String trainedOutput) throws Exception {
    Loader.load(opencv_objdetect.class);
    String srcPath = sampleImagePath;
    String destPath = trainedOutput;

    File folder = new File(srcPath);
    File[] listOfFiles = folder.listFiles();

    for (int i = 0; i < listOfFiles.length; i++) {
      if (listOfFiles[i].isFile()) {
        String files = listOfFiles[i].getName();
        generatePGMFromPic(srcPath, files, destPath);
      }
    }
  }
コード例 #3
0
  public boolean record(IplImage image, int pixelFormat) throws Exception {
    if (video_st == null) {
      throw new Exception(
          "No video output stream (Is imageWidth > 0 && imageHeight > 0 and has start() been called?)");
    }
    int ret;

    if (image == null) {
      /* no more frame to compress. The codec has a latency of a few
      frames if using B frames, so we get the last frames by
      passing the same picture again */
    } else {
      int width = image.width();
      int height = image.height();
      int step = image.widthStep();
      BytePointer data = image.imageData();

      if (pixelFormat == AV_PIX_FMT_NONE) {
        int depth = image.depth();
        int channels = image.nChannels();
        if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 3) {
          pixelFormat = AV_PIX_FMT_BGR24;
        } else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 1) {
          pixelFormat = AV_PIX_FMT_GRAY8;
        } else if ((depth == IPL_DEPTH_16U || depth == IPL_DEPTH_16S) && channels == 1) {
          pixelFormat = AV_HAVE_BIGENDIAN() ? AV_PIX_FMT_GRAY16BE : AV_PIX_FMT_GRAY16LE;
        } else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 4) {
          pixelFormat = AV_PIX_FMT_RGBA;
        } else if ((depth == IPL_DEPTH_8U || depth == IPL_DEPTH_8S) && channels == 2) {
          pixelFormat = AV_PIX_FMT_NV21; // Android's camera capture format
          step = width;
        } else {
          throw new Exception(
              "Could not guess pixel format of image: depth=" + depth + ", channels=" + channels);
        }
      }

      if (video_c.pix_fmt() != pixelFormat
          || video_c.width() != width
          || video_c.height() != height) {
        /* convert to the codec pixel format if needed */
        img_convert_ctx =
            sws_getCachedContext(
                img_convert_ctx,
                width,
                height,
                pixelFormat,
                video_c.width(),
                video_c.height(),
                video_c.pix_fmt(),
                SWS_BILINEAR,
                null,
                null,
                null);
        if (img_convert_ctx == null) {
          throw new Exception(
              "sws_getCachedContext() error: Cannot initialize the conversion context.");
        }
        avpicture_fill(tmp_picture, data, pixelFormat, width, height);
        avpicture_fill(picture, picture_buf, video_c.pix_fmt(), video_c.width(), video_c.height());
        tmp_picture.linesize(0, step);
        sws_scale(
            img_convert_ctx,
            new PointerPointer(tmp_picture),
            tmp_picture.linesize(),
            0,
            height,
            new PointerPointer(picture),
            picture.linesize());
      } else {
        avpicture_fill(picture, data, pixelFormat, width, height);
        picture.linesize(0, step);
      }
    }

    if ((oformat.flags() & AVFMT_RAWPICTURE) != 0) {
      if (image == null) {
        return false;
      }
      /* raw video case. The API may change slightly in the future for that? */
      av_init_packet(video_pkt);
      video_pkt.flags(video_pkt.flags() | AV_PKT_FLAG_KEY);
      video_pkt.stream_index(video_st.index());
      video_pkt.data(new BytePointer(picture));
      video_pkt.size(Loader.sizeof(AVPicture.class));
    } else {
      /* encode the image */
      av_init_packet(video_pkt);
      video_pkt.data(video_outbuf);
      video_pkt.size(video_outbuf_size);
      picture.quality(video_c.global_quality());
      if ((ret =
              avcodec_encode_video2(
                  video_c, video_pkt, image == null ? null : picture, got_video_packet))
          < 0) {
        throw new Exception(
            "avcodec_encode_video2() error " + ret + ": Could not encode video packet.");
      }
      picture.pts(picture.pts() + 1); // magic required by libx264

      /* if zero size, it means the image was buffered */
      if (got_video_packet[0] != 0) {
        if (video_pkt.pts() != AV_NOPTS_VALUE) {
          video_pkt.pts(av_rescale_q(video_pkt.pts(), video_c.time_base(), video_st.time_base()));
        }
        if (video_pkt.dts() != AV_NOPTS_VALUE) {
          video_pkt.dts(av_rescale_q(video_pkt.dts(), video_c.time_base(), video_st.time_base()));
        }
        video_pkt.stream_index(video_st.index());
      } else {
        return false;
      }
    }

    synchronized (oc) {
      /* write the compressed frame in the media file */
      if (interleaved && audio_st != null) {
        if ((ret = av_interleaved_write_frame(oc, video_pkt)) < 0) {
          throw new Exception(
              "av_interleaved_write_frame() error "
                  + ret
                  + " while writing interleaved video frame.");
        }
      } else {
        if ((ret = av_write_frame(oc, video_pkt)) < 0) {
          throw new Exception("av_write_frame() error " + ret + " while writing video frame.");
        }
      }
    }
    return picture.key_frame() != 0;
  }
コード例 #4
0
ファイル: swscale.java プロジェクト: Bingle-labake/Vine15
 static {
   Loader.load();
 }
コード例 #5
0
ファイル: Broadphase.java プロジェクト: ejwa/dinja-engine
 static {
   Loader.load(BulletNative.class);
 }