public AdaptiveSquareBinaryFilter(int radius, double bias, boolean down, ImageType<T> inputType) {
   this.radius = radius;
   this.bias = bias;
   this.down = down;
   this.inputType = inputType;
   work1 = inputType.createImage(1, 1);
   work2 = inputType.createImage(1, 1);
 }
Exemplo n.º 2
0
 /**
  * Creates a new image with all of its parameters initialized, including the data array.
  *
  * @param width Image's width.
  * @param height Image's height.
  * @param numBands Number of bands/colors.
  */
 protected ImageInterleaved(int width, int height, int numBands) {
   _setData(Array.newInstance(getPrimitiveDataType(), width * height * numBands));
   this.startIndex = 0;
   this.stride = width * numBands;
   this.numBands = numBands;
   this.width = width;
   this.height = height;
   this.imageType = (ImageType) ImageType.interleaved(numBands, getClass());
 }
  public SurfMultiSpectral_to_DescribeRegionPoint(
      DescribePointSurfMultiSpectral<II> alg, Class<T> imageType, Class<II> integralType) {
    this.alg = alg;

    gray = GeneralizedImageOps.createSingleBand(imageType, 1, 1);
    grayII = GeneralizedImageOps.createSingleBand(integralType, 1, 1);
    bandII = new MultiSpectral<II>(integralType, 1, 1, alg.getNumBands());

    this.imageType = ImageType.ms(alg.getNumBands(), imageType);
  }
  public static void main(String args[]) {
    VisualizeCirculantTrackerApp app =
        new VisualizeCirculantTrackerApp<ImageUInt8>(ImageUInt8.class);

    //		String fileName = "../data/applet/trackzing/track_peter.mjpeg";
    String fileName = "../data/applet/tracking/day_follow_car.mjpeg";

    SimpleImageSequence<ImageUInt8> sequence =
        DefaultMediaManager.INSTANCE.openVideo(fileName, ImageType.single(ImageUInt8.class));

    app.process(sequence);
  }
  public static void evaluate(String dataset) {
    Class type = ImageFloat32.class;

    DebugTldTrackerTldData generator = new DebugTldTrackerTldData(ImageType.single(type));

    InterpolatePixelS interpolate = FactoryInterpolation.bilinearPixelS(type, BorderType.EXTENDED);
    ImageGradient gradient = FactoryDerivative.sobel(type, type);

    TldTracker tracker = new TldTracker(null, interpolate, gradient, type, type);

    generator.evaluate(dataset, tracker);
  }
  protected MonocularPlaneVisualOdometry<ImageUInt8> createAlgorithm() {

    PkltConfig config = new PkltConfig();
    config.pyramidScaling = new int[] {1, 2, 4, 8};
    config.templateRadius = 3;
    ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1);

    PointTracker<ImageUInt8> tracker =
        FactoryPointTracker.klt(config, configDetector, ImageUInt8.class, ImageSInt16.class);

    return FactoryVisualOdometry.monoPlaneInfinity(
        50, 2, 1.5, 300, tracker, ImageType.single(ImageUInt8.class));
  }
  private StitchingFromMotion2D<GrayU8, Affine2D_F64> createStabilization() {

    ConfigGeneralDetector config = new ConfigGeneralDetector();
    config.maxFeatures = 150;
    config.threshold = 40;
    config.radius = 3;

    PointTracker<GrayU8> tracker =
        FactoryPointTracker.klt(new int[] {1, 2, 4}, config, 3, GrayU8.class, GrayS16.class);

    ImageMotion2D<GrayU8, Affine2D_F64> motion =
        FactoryMotion2D.createMotion2D(
            100, 1.5, 2, 40, 0.5, 0.6, false, tracker, new Affine2D_F64());

    return FactoryMotion2D.createVideoStitch(0.2, motion, ImageType.single(GrayU8.class));
  }
 public void setDistorted(IntrinsicParameters param, DenseMatrix64F rect) {
   if (rect == null) {
     this.undoRadial =
         LensDistortionOps.imageRemoveDistortion(
             AdjustmentType.FULL_VIEW,
             BorderType.VALUE,
             param,
             null,
             ImageType.single(ImageFloat32.class));
     this.remove_p_to_p =
         LensDistortionOps.transform_F32(AdjustmentType.FULL_VIEW, param, null, false);
   } else {
     this.undoRadial =
         RectifyImageOps.rectifyImage(param, rect, BorderType.VALUE, ImageFloat32.class);
     this.remove_p_to_p = RectifyImageOps.transformPixelToRect_F32(param, rect);
   }
 }
  public VisualizeAssociationMatchesApp(Class<T> imageType, Class<D> derivType) {
    super(3);
    this.imageType = imageType;

    GeneralFeatureDetector<T, D> alg;

    addAlgorithm(
        0,
        "Fast Hessian",
        FactoryInterestPoint.fastHessian(new ConfigFastHessian(1, 2, 200, 1, 9, 4, 4)));
    if (imageType == ImageFloat32.class)
      addAlgorithm(
          0, "SIFT", FactoryInterestPoint.siftDetector(null, new ConfigSiftDetector(2, 5, 200, 5)));
    alg =
        FactoryDetectPoint.createShiTomasi(new ConfigGeneralDetector(500, 2, 1), false, derivType);
    addAlgorithm(0, "Shi-Tomasi", FactoryInterestPoint.wrapPoint(alg, 1, imageType, derivType));

    addAlgorithm(1, "SURF-S", FactoryDescribeRegionPoint.surfStable(null, imageType));
    addAlgorithm(
        1,
        "SURF-S Color",
        FactoryDescribeRegionPoint.surfColorStable(null, ImageType.ms(3, imageType)));
    if (imageType == ImageFloat32.class)
      addAlgorithm(1, "SIFT", FactoryDescribeRegionPoint.sift(null, null));
    addAlgorithm(1, "BRIEF", FactoryDescribeRegionPoint.brief(new ConfigBrief(true), imageType));
    addAlgorithm(1, "BRIEFSO", FactoryDescribeRegionPoint.brief(new ConfigBrief(false), imageType));
    addAlgorithm(1, "Pixel 11x11", FactoryDescribeRegionPoint.pixel(11, 11, imageType));
    addAlgorithm(1, "NCC 11x11", FactoryDescribeRegionPoint.pixelNCC(11, 11, imageType));

    addAlgorithm(2, "Greedy", false);
    addAlgorithm(2, "Backwards", true);

    // estimate orientation using this once since it is fast and accurate
    Class integralType = GIntegralImageOps.getIntegralType(imageType);
    OrientationIntegral orientationII = FactoryOrientationAlgs.sliding_ii(null, integralType);
    orientation = FactoryOrientation.convertImage(orientationII, imageType);

    imageLeft = new MultiSpectral<T>(imageType, 1, 1, 3);
    imageRight = new MultiSpectral<T>(imageType, 1, 1, 3);
    grayLeft = GeneralizedImageOps.createSingleBand(imageType, 1, 1);
    grayRight = GeneralizedImageOps.createSingleBand(imageType, 1, 1);

    setMainGUI(panel);
  }
Exemplo n.º 10
0
 @Override
 public ImageType<Output> getDerivativeType() {
   return ImageType.single((Class) m.getParameterTypes()[2]);
 }
Exemplo n.º 11
0
 protected ShowGradient() {
   super(ImageType.single(GrayU8.class));
 }
Exemplo n.º 12
0
 @Override
 public ImageType<ImageUInt8> getOutputType() {
   return ImageType.single(ImageUInt8.class);
 }
Exemplo n.º 13
0
 @Override
 public ImageType<T> getImageType() {
   return ImageType.single(imageType);
 }
Exemplo n.º 14
0
 @Override
 public ImageType<ImageFloat32> getImageType() {
   return ImageType.single(ImageFloat32.class);
 }
 public PointProcessing(StitchingFromMotion2D<GrayU8, Affine2D_F64> alg) {
   super(ImageType.single(GrayU8.class));
   this.alg = alg;
 }
 public TestImplImageDistort_I8() {
   super(ImageType.single(GrayU8.class));
 }
  public static void main(String[] args) {

    // Example with a moving camera.  Highlights why motion estimation is sometimes required
    String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
    // Camera has a bit of jitter in it.  Static kinda works but motion reduces false positives
    //		String fileName = UtilIO.pathExample("background/horse_jitter.mp4");

    // Comment/Uncomment to switch input image type
    ImageType imageType = ImageType.single(GrayF32.class);
    //		ImageType imageType = ImageType.il(3, InterleavedF32.class);
    //		ImageType imageType = ImageType.il(3, InterleavedU8.class);

    // Configure the feature detector
    ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
    confDetector.threshold = 10;
    confDetector.maxFeatures = 300;
    confDetector.radius = 6;

    // Use a KLT tracker
    PointTracker tracker =
        FactoryPointTracker.klt(new int[] {1, 2, 4, 8}, confDetector, 3, GrayF32.class, null);

    // This estimates the 2D image motion
    ImageMotion2D<GrayF32, Homography2D_F64> motion2D =
        FactoryMotion2D.createMotion2D(
            500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());

    ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);

    // Configuration for Gaussian model.  Note that the threshold changes depending on the number of
    // image bands
    // 12 = gray scale and 40 = color
    ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
    configGaussian.initialVariance = 64;
    configGaussian.minimumDifference = 5;

    // Comment/Uncomment to switch background mode
    BackgroundModelMoving background =
        FactoryBackgroundModel.movingBasic(
            configBasic, new PointTransformHomography_F32(), imageType);
    //				FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(),
    // imageType);

    MediaManager media = DefaultMediaManager.INSTANCE;
    SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
    //				media.openCamera(null,640,480,background.getImageType());

    // ====== Initialize Images

    // storage for segmented image.  Background = 0, Foreground = 1
    GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
    // Grey scale image that's the input for motion estimation
    GrayF32 grey = new GrayF32(segmented.width, segmented.height);

    // coordinate frames
    Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
    Homography2D_F32 homeToWorld = new Homography2D_F32();
    homeToWorld.a13 = grey.width / 2;
    homeToWorld.a23 = grey.height / 2;

    // Create a background image twice the size of the input image.  Tell it that the home is in the
    // center
    background.initialize(grey.width * 2, grey.height * 2, homeToWorld);

    BufferedImage visualized =
        new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
    ImageGridPanel gui = new ImageGridPanel(1, 2);
    gui.setImages(visualized, visualized);

    ShowImages.showWindow(gui, "Detections", true);

    double fps = 0;
    double alpha = 0.01; // smoothing factor for FPS

    while (video.hasNext()) {
      ImageBase input = video.next();

      long before = System.nanoTime();
      GConvertImage.convert(input, grey);

      if (!motion2D.process(grey)) {
        throw new RuntimeException("Should handle this scenario");
      }

      Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
      UtilHomography.convert(firstToCurrent64, firstToCurrent32);

      background.segment(firstToCurrent32, input, segmented);
      background.updateBackground(firstToCurrent32, input);
      long after = System.nanoTime();

      fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));

      VisualizeBinaryData.renderBinary(segmented, false, visualized);
      gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
      gui.setImage(0, 1, visualized);
      gui.repaint();

      System.out.println("FPS = " + fps);

      try {
        Thread.sleep(5);
      } catch (InterruptedException e) {
      }
    }
  }
 public DebugTldTrackerTldData(ImageType<T> type) {
   input = type.createImage(1, 1);
 }