예제 #1
0
  /** Generate output image whose type is same as input image. */
  private ImagePlus makeOutputImage(ImagePlus imp, FloatProcessor fp, int ptype) {
    int width = imp.getWidth();
    int height = imp.getHeight();
    float[] pixels = (float[]) fp.getPixels();
    ImageProcessor oip = null;

    // Create output image consistent w/ type of input image.
    int size = pixels.length;
    switch (ptype) {
      case BYTE_TYPE:
        oip = imp.getProcessor().createProcessor(width, height);
        byte[] pixels8 = (byte[]) oip.getPixels();
        for (int i = 0; i < size; i++) pixels8[i] = (byte) pixels[i];
        break;
      case SHORT_TYPE:
        oip = imp.getProcessor().createProcessor(width, height);
        short[] pixels16 = (short[]) oip.getPixels();
        for (int i = 0; i < size; i++) pixels16[i] = (short) pixels[i];
        break;
      case FLOAT_TYPE:
        oip = new FloatProcessor(width, height, pixels, null);
        break;
    }

    // Adjust for display.
    // Calling this on non-ByteProcessors ensures image
    // processor is set up to correctly display image.
    oip.resetMinAndMax();

    // Create new image plus object. Don't use
    // ImagePlus.createImagePlus here because there may be
    // attributes of input image that are not appropriate for
    // projection.
    return new ImagePlus(makeTitle(), oip);
  }
예제 #2
0
파일: Wand.java 프로젝트: AlexJoz/docuensj
 /** Constructs a Wand object from an ImageProcessor. */
 public Wand(ImageProcessor ip) {
   this.ip = ip;
   if (ip instanceof ByteProcessor) bpixels = (byte[]) ip.getPixels();
   else if (ip instanceof ColorProcessor) cpixels = (int[]) ip.getPixels();
   else if (ip instanceof ShortProcessor) spixels = (short[]) ip.getPixels();
   else if (ip instanceof FloatProcessor) fpixels = (float[]) ip.getPixels();
   width = ip.getWidth();
   height = ip.getHeight();
 }
예제 #3
0
  void Bernsen(ImagePlus imp, int radius, double par1, double par2, boolean doIwhite) {
    // Bernsen recommends WIN_SIZE = 31 and CONTRAST_THRESHOLD = 15.
    //  1) Bernsen J. (1986) "Dynamic Thresholding of Grey-Level Images"
    //    Proc. of the 8th Int. Conf. on Pattern Recognition, pp. 1251-1255
    //  2) Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
    //   Techniques and Quantitative Performance Evaluation" Journal of
    //   Electronic Imaging, 13(1): 146-165
    //  http://citeseer.ist.psu.edu/sezgin04survey.html
    // Ported to ImageJ plugin from E Celebi's fourier_0.8 routines
    // This version uses a circular local window, instead of a rectagular one
    ImagePlus Maximp, Minimp;
    ImageProcessor ip = imp.getProcessor(), ipMax, ipMin;
    int contrast_threshold = 15;
    int local_contrast;
    int mid_gray;
    byte object;
    byte backg;
    int temp;

    if (par1 != 0) {
      IJ.log("Bernsen: changed contrast_threshold from :" + contrast_threshold + "  to:" + par1);
      contrast_threshold = (int) par1;
    }

    if (doIwhite) {
      object = (byte) 0xff;
      backg = (byte) 0;
    } else {
      object = (byte) 0;
      backg = (byte) 0xff;
    }

    Maximp = duplicateImage(ip);
    ipMax = Maximp.getProcessor();
    RankFilters rf = new RankFilters();
    rf.rank(ipMax, radius, rf.MAX); // Maximum
    // Maximp.show();
    Minimp = duplicateImage(ip);
    ipMin = Minimp.getProcessor();
    rf.rank(ipMin, radius, rf.MIN); // Minimum
    // Minimp.show();
    byte[] pixels = (byte[]) ip.getPixels();
    byte[] max = (byte[]) ipMax.getPixels();
    byte[] min = (byte[]) ipMin.getPixels();

    for (int i = 0; i < pixels.length; i++) {
      local_contrast = (int) ((max[i] & 0xff) - (min[i] & 0xff));
      mid_gray = (int) ((min[i] & 0xff) + (max[i] & 0xff)) / 2;
      temp = (int) (pixels[i] & 0x0000ff);
      if (local_contrast < contrast_threshold)
        pixels[i] = (mid_gray >= 128) ? object : backg; // Low contrast region
      else pixels[i] = (temp >= mid_gray) ? object : backg;
    }
    // imp.updateAndDraw();
    return;
  }
예제 #4
0
 void getCentroid(ImageProcessor ip, int minThreshold, int maxThreshold) {
   byte[] pixels = (byte[]) ip.getPixels();
   byte[] mask = ip.getMaskArray();
   boolean limit = minThreshold > 0 || maxThreshold < 255;
   double xsum = 0, ysum = 0;
   int count = 0, i, mi, v;
   for (int y = ry, my = 0; y < (ry + rh); y++, my++) {
     i = y * width + rx;
     mi = my * rw;
     for (int x = rx; x < (rx + rw); x++) {
       if (mask == null || mask[mi++] != 0) {
         if (limit) {
           v = pixels[i] & 255;
           if (v >= minThreshold && v <= maxThreshold) {
             count++;
             xsum += x;
             ysum += y;
           }
         } else {
           count++;
           xsum += x;
           ysum += y;
         }
       }
       i++;
     }
   }
   xCentroid = xsum / count + 0.5;
   yCentroid = ysum / count + 0.5;
   if (cal != null) {
     xCentroid = cal.getX(xCentroid);
     yCentroid = cal.getY(yCentroid, height);
   }
 }
예제 #5
0
  /*------------------------------------------------------------------*/
  void putRow(ImageProcessor ip, int y, double[] row) {
    int rowLength = ip.getWidth();

    if (rowLength != row.length) {
      throw new IndexOutOfBoundsException("Incoherent array sizes");
    }
    y *= rowLength;
    if (ip.getPixels() instanceof float[]) {
      float[] floatPixels = (float[]) ip.getPixels();
      for (int i = 0; (i < rowLength); i++) {
        floatPixels[y++] = (float) row[i];
      }
    } else {
      throw new IllegalArgumentException("Float image required");
    }
  } /* end putRow */
예제 #6
0
  /*------------------------------------------------------------------*/
  void putColumn(ImageProcessor ip, int x, double[] column) {
    int width = ip.getWidth();

    if (ip.getHeight() != column.length) {
      throw new IndexOutOfBoundsException("Incoherent array sizes");
    }
    if (ip.getPixels() instanceof float[]) {
      float[] floatPixels = (float[]) ip.getPixels();
      for (int i = 0; (i < column.length); i++) {
        floatPixels[x] = (float) column[i];
        x += width;
      }
    } else {
      throw new IllegalArgumentException("Float image required");
    }
  } /* end putColumn */
예제 #7
0
 void reset(ImagePlus imp, ImageProcessor ip) {
   // Assign the pixels of ip to the data in the restore array, while
   // taking care to not give the address the restore array to the
   // image processor.
   int[] pixels = (int[]) ip.getPixels();
   for (int i = 0; i < numPixels; i++) pixels[i] = restore[i];
 }
예제 #8
0
  void Contrast(ImagePlus imp, int radius, double par1, double par2, boolean doIwhite) {
    // G. Landini, 2013
    // Based on a simple contrast toggle. This procedure does not have user-provided paramters other
    // than the kernel radius
    // Sets the pixel value to either white or black depending on whether its current value is
    // closest to the local Max or Min respectively
    // The procedure is similar to Toggle Contrast Enhancement (see Soille, Morphological Image
    // Analysis (2004), p. 259

    ImagePlus Maximp, Minimp;
    ImageProcessor ip = imp.getProcessor(), ipMax, ipMin;
    int c_value = 0;
    int mid_gray;
    byte object;
    byte backg;

    if (doIwhite) {
      object = (byte) 0xff;
      backg = (byte) 0;
    } else {
      object = (byte) 0;
      backg = (byte) 0xff;
    }

    Maximp = duplicateImage(ip);
    ipMax = Maximp.getProcessor();
    RankFilters rf = new RankFilters();
    rf.rank(ipMax, radius, rf.MAX); // Maximum
    // Maximp.show();
    Minimp = duplicateImage(ip);
    ipMin = Minimp.getProcessor();
    rf.rank(ipMin, radius, rf.MIN); // Minimum
    // Minimp.show();
    byte[] pixels = (byte[]) ip.getPixels();
    byte[] max = (byte[]) ipMax.getPixels();
    byte[] min = (byte[]) ipMin.getPixels();
    for (int i = 0; i < pixels.length; i++) {
      pixels[i] =
          ((Math.abs((int) (max[i] & 0xff - pixels[i] & 0xff))
                  <= Math.abs((int) (pixels[i] & 0xff - min[i] & 0xff))))
              ? object
              : backg;
    }
    // imp.updateAndDraw();
    return;
  }
예제 #9
0
  void MidGrey(ImagePlus imp, int radius, double par1, double par2, boolean doIwhite) {
    // See: Image Processing Learning Resourches HIPR2
    // http://homepages.inf.ed.ac.uk/rbf/HIPR2/adpthrsh.htm
    ImagePlus Maximp, Minimp;
    ImageProcessor ip = imp.getProcessor(), ipMax, ipMin;
    int c_value = 0;
    int mid_gray;
    byte object;
    byte backg;

    if (par1 != 0) {
      IJ.log("MidGrey: changed c_value from :" + c_value + "  to:" + par1);
      c_value = (int) par1;
    }

    if (doIwhite) {
      object = (byte) 0xff;
      backg = (byte) 0;
    } else {
      object = (byte) 0;
      backg = (byte) 0xff;
    }

    Maximp = duplicateImage(ip);
    ipMax = Maximp.getProcessor();
    RankFilters rf = new RankFilters();
    rf.rank(ipMax, radius, rf.MAX); // Maximum
    // Maximp.show();
    Minimp = duplicateImage(ip);
    ipMin = Minimp.getProcessor();
    rf.rank(ipMin, radius, rf.MIN); // Minimum
    // Minimp.show();
    byte[] pixels = (byte[]) ip.getPixels();
    byte[] max = (byte[]) ipMax.getPixels();
    byte[] min = (byte[]) ipMin.getPixels();

    for (int i = 0; i < pixels.length; i++) {
      pixels[i] =
          ((int) (pixels[i] & 0xff) > (int) (((max[i] & 0xff) + (min[i] & 0xff)) / 2) - c_value)
              ? object
              : backg;
    }
    // imp.updateAndDraw();
    return;
  }
예제 #10
0
  /*------------------------------------------------------------------*/
  public void getHorizontalGradient(ImageProcessor ip, double tolerance) {
    if (!(ip.getPixels() instanceof float[])) {
      throw new IllegalArgumentException("Float image required");
    }

    float[] floatPixels = (float[]) ip.getPixels();
    int width = ip.getWidth();
    int height = ip.getHeight();
    double line[] = new double[width];

    for (int y = 0; (y < height); y++) {
      getRow(ip, y, line);
      getSplineInterpolationCoefficients(line, tolerance);
      getGradient(line);
      putRow(ip, y, line);
      stepProgressBar();
    }
  } /* end getHorizontalGradient */
예제 #11
0
  /*------------------------------------------------------------------*/
  public void getVerticalHessian(ImageProcessor ip, double tolerance) {
    if (!(ip.getPixels() instanceof float[])) {
      throw new IllegalArgumentException("Float image required");
    }

    float[] floatPixels = (float[]) ip.getPixels();
    int width = ip.getWidth();
    int height = ip.getHeight();
    double line[] = new double[height];

    for (int x = 0; (x < width); x++) {
      getColumn(ip, x, line);
      getSplineInterpolationCoefficients(line, tolerance);
      getHessian(line);
      putColumn(ip, x, line);
      stepProgressBar();
    }
  } /* end getVerticalHessian */
예제 #12
0
 /** Adds the image in 'ip' to the end of the stack. */
 public void addSlice(String sliceLabel, ImageProcessor ip) {
   if (ip.getWidth() != width || ip.getHeight() != height)
     throw new IllegalArgumentException("Dimensions do not match");
   if (nSlices == 0) {
     cm = ip.getColorModel();
     min = ip.getMin();
     max = ip.getMax();
   }
   addSlice(sliceLabel, ip.getPixels());
 }
예제 #13
0
  void Mean(ImagePlus imp, int radius, double par1, double par2, boolean doIwhite) {
    // See: Image Processing Learning Resourches HIPR2
    // http://homepages.inf.ed.ac.uk/rbf/HIPR2/adpthrsh.htm
    ImagePlus Meanimp;
    ImageProcessor ip = imp.getProcessor(), ipMean;
    int c_value = 0;
    byte object;
    byte backg;

    if (par1 != 0) {
      IJ.log("Mean: changed c_value from :" + c_value + "  to:" + par1);
      c_value = (int) par1;
    }

    if (doIwhite) {
      object = (byte) 0xff;
      backg = (byte) 0;
    } else {
      object = (byte) 0;
      backg = (byte) 0xff;
    }

    Meanimp = duplicateImage(ip);
    ImageConverter ic = new ImageConverter(Meanimp);
    ic.convertToGray32();

    ipMean = Meanimp.getProcessor();
    RankFilters rf = new RankFilters();
    rf.rank(ipMean, radius, rf.MEAN); // Mean
    // Meanimp.show();
    byte[] pixels = (byte[]) ip.getPixels();
    float[] mean = (float[]) ipMean.getPixels();

    for (int i = 0; i < pixels.length; i++)
      pixels[i] = ((int) (pixels[i] & 0xff) > (int) (mean[i] - c_value)) ? object : backg;
    // imp.updateAndDraw();
    return;
  }
예제 #14
0
파일: Binary.java 프로젝트: kkkkxu/BioImage
 // Binary fill by Gabriel Landini, G.Landini at bham.ac.uk
 // 21/May/2008
 void fill(ImageProcessor ip, int foreground, int background) {
   int width = ip.getWidth();
   int height = ip.getHeight();
   FloodFiller ff = new FloodFiller(ip);
   ip.setColor(127);
   for (int y = 0; y < height; y++) {
     if (ip.getPixel(0, y) == background) ff.fill(0, y);
     if (ip.getPixel(width - 1, y) == background) ff.fill(width - 1, y);
   }
   for (int x = 0; x < width; x++) {
     if (ip.getPixel(x, 0) == background) ff.fill(x, 0);
     if (ip.getPixel(x, height - 1) == background) ff.fill(x, height - 1);
   }
   byte[] pixels = (byte[]) ip.getPixels();
   int n = width * height;
   for (int i = 0; i < n; i++) {
     if (pixels[i] == 127) pixels[i] = (byte) background;
     else pixels[i] = (byte) foreground;
   }
 }
예제 #15
0
 void calculateMoments(ImageProcessor ip, int minThreshold, int maxThreshold, float[] cTable) {
   byte[] pixels = (byte[]) ip.getPixels();
   byte[] mask = ip.getMaskArray();
   int v, i, mi;
   double dv, dv2, sum1 = 0.0, sum2 = 0.0, sum3 = 0.0, sum4 = 0.0, xsum = 0.0, ysum = 0.0;
   for (int y = ry, my = 0; y < (ry + rh); y++, my++) {
     i = y * width + rx;
     mi = my * rw;
     for (int x = rx; x < (rx + rw); x++) {
       if (mask == null || mask[mi++] != 0) {
         v = pixels[i] & 255;
         if (v >= minThreshold && v <= maxThreshold) {
           dv = ((cTable != null) ? cTable[v] : v) + Double.MIN_VALUE;
           dv2 = dv * dv;
           sum1 += dv;
           sum2 += dv2;
           sum3 += dv * dv2;
           sum4 += dv2 * dv2;
           xsum += x * dv;
           ysum += y * dv;
         }
       }
       i++;
     }
   }
   double mean2 = mean * mean;
   double variance = sum2 / pixelCount - mean2;
   double sDeviation = Math.sqrt(variance);
   skewness =
       ((sum3 - 3.0 * mean * sum2) / pixelCount + 2.0 * mean * mean2) / (variance * sDeviation);
   kurtosis =
       (((sum4 - 4.0 * mean * sum3 + 6.0 * mean2 * sum2) / pixelCount - 3.0 * mean2 * mean2)
               / (variance * variance)
           - 3.0);
   xCenterOfMass = xsum / sum1 + 0.5;
   yCenterOfMass = ysum / sum1 + 0.5;
   if (cal != null) {
     xCenterOfMass = cal.getX(xCenterOfMass);
     yCenterOfMass = cal.getY(yCenterOfMass, height);
   }
 }
예제 #16
0
 public ImageProcessor getMask() {
   if (cachedMask != null && cachedMask.getPixels() != null) return cachedMask;
   ImageProcessor mask = new ByteProcessor(width, height);
   double a = width / 2.0, b = height / 2.0;
   double a2 = a * a, b2 = b * b;
   a -= 0.5;
   b -= 0.5;
   double xx, yy;
   int offset;
   byte[] pixels = (byte[]) mask.getPixels();
   for (int y = 0; y < height; y++) {
     offset = y * width;
     for (int x = 0; x < width; x++) {
       xx = x - a;
       yy = y - b;
       if ((xx * xx / a2 + yy * yy / b2) <= 1.0) pixels[offset + x] = -1;
     }
   }
   cachedMask = mask;
   return mask;
 }
예제 #17
0
 int[] readJPEG(InputStream in) throws IOException {
   BufferedImage bi = ImageIO.read(in);
   ImageProcessor ip = new ColorProcessor(bi);
   return (int[]) ip.getPixels();
 }
예제 #18
0
 /**
  * LocalThicknesstoCleanedUpLocalThickness
  *
  * <p>Input: 3D Local Thickness map (32-bit stack)
  *
  * <p>Output: Same as input with border voxels corrected for "jaggies." Non-background voxels
  * adjacent to background voxels are have their local thickness values replaced by the average of
  * their non-background neighbors that do not border background points. Bob Dougherty August 1,
  * 2007
  *
  * <ul>
  *   <li>August 10. Version 3 This version also multiplies the local thickness by 2 to conform
  *       with the official definition of local thickness.
  * </ul>
  */
 private ImagePlus localThicknesstoCleanedUpLocalThickness(ImagePlus imp, float[][] s) {
   final int w = imp.getWidth();
   final int h = imp.getHeight();
   final int d = imp.getStackSize();
   IJ.showStatus("Cleaning up local thickness...");
   // Create 32 bit floating point stack for output, sNew.
   ImageStack newStack = new ImageStack(w, h);
   sNew = new float[d][];
   for (int k = 0; k < d; k++) {
     ImageProcessor ipk = new FloatProcessor(w, h);
     newStack.addSlice(null, ipk);
     sNew[k] = (float[]) ipk.getPixels();
   }
   /*
    * First set the output array to flags: 0 for a background point -1 for
    * a non-background point that borders a background point s (input data)
    * for an interior non-background point
    */
   for (int k = 0; k < d; k++) {
     for (int j = 0; j < h; j++) {
       final int wj = w * j;
       for (int i = 0; i < w; i++) {
         sNew[k][i + wj] = setFlag(s, i, j, k, w, h, d);
       } // i
     } // j
   } // k
   /*
    * Process the surface points. Initially set results to negative values
    * to be able to avoid including them in averages of for subsequent
    * points. During the calculation, positive values in sNew are interior
    * non-background local thicknesses. Negative values are surface points.
    * In this case the value might be -1 (not processed yet) or -result,
    * where result is the average of the neighboring interior points.
    * Negative values are excluded from the averaging.
    */
   for (int k = 0; k < d; k++) {
     for (int j = 0; j < h; j++) {
       final int wj = w * j;
       for (int i = 0; i < w; i++) {
         final int ind = i + wj;
         if (sNew[k][ind] == -1) {
           sNew[k][ind] = -averageInteriorNeighbors(s, i, j, k, w, h, d);
         }
       } // i
     } // j
   } // k
   // Fix the negative values and double the results
   for (int k = 0; k < d; k++) {
     for (int j = 0; j < h; j++) {
       final int wj = w * j;
       for (int i = 0; i < w; i++) {
         final int ind = i + wj;
         sNew[k][ind] = (float) Math.abs(sNew[k][ind]);
       } // i
     } // j
   } // k
   IJ.showStatus("Clean Up Local Thickness complete");
   String title = stripExtension(imp.getTitle());
   ImagePlus impOut = new ImagePlus(title + "_CL", newStack);
   final double vW = imp.getCalibration().pixelWidth;
   // calibrate the pixel values to pixel width
   // so that thicknesses represent real units (not pixels)
   for (int z = 0; z < d; z++) {
     impOut.setSlice(z + 1);
     impOut.getProcessor().multiply(vW);
   }
   return impOut;
 }
예제 #19
0
  /**
   * DistanceMaptoDistanceRidge
   *
   * <p>Output: Distance ridge resulting from a local scan of the distance map. Overwrites the
   * input.
   *
   * <p>Note: Non-background points that are not part of the distance ridge are assiged a
   * VERY_SMALL_VALUE. This is used for subsequent processing by other plugins to find the local
   * thickness. Bob Dougherty August 10, 2006
   *
   * <ul>
   *   <li>Version 1: August 10-11, 2006. Subtracts 0.5 from the distances.
   *   <li>Version 1.01: September 6, 2006. Corrected some typos in the comments.
   *   <li>Version 1.01: Sept. 7, 2006. More tiny edits.
   *   <li>Version 2: Sept. 25, 2006. Creates a separate image stack for symmetry. <br>
   *       Temporary version that is very conservative. <br>
   *       Admittedly does not produce much impovement on real images.
   *   <li>Version 3: Sept. 30, 2006. Ball calculations based on grid points. Should be much more
   *       accurate.
   *   <li>Version 3.1 Oct. 1, 2006. Faster scanning of search points.
   * </ul>
   *
   * @param imp 3D Distance map (32-bit stack)
   */
  private void distanceMaptoDistanceRidge(ImagePlus imp, float[][] s) {
    final int w = imp.getWidth();
    final int h = imp.getHeight();
    final int d = imp.getStackSize();
    sNew = new float[d][];
    for (int k = 0; k < d; k++) {
      ImageProcessor ipk = new FloatProcessor(w, h);
      sNew[k] = (float[]) ipk.getPixels();
    }

    // Do it
    int k1, j1, i1, dz, dy, dx;
    boolean notRidgePoint;
    float[] sk1;
    float[] sk, skNew;
    int sk0Sq, sk0SqInd, sk1Sq;
    // Find the largest distance in the data
    IJ.showStatus("Distance Ridge: scanning the data");
    float distMax = 0;
    for (int k = 0; k < d; k++) {
      sk = s[k];
      for (int j = 0; j < h; j++) {
        final int wj = w * j;
        for (int i = 0; i < w; i++) {
          final int ind = i + wj;
          if (sk[ind] > distMax) distMax = sk[ind];
        }
      }
    }
    int rSqMax = (int) (distMax * distMax + 0.5f) + 1;
    boolean[] occurs = new boolean[rSqMax];
    for (int i = 0; i < rSqMax; i++) occurs[i] = false;
    for (int k = 0; k < d; k++) {
      sk = s[k];
      for (int j = 0; j < h; j++) {
        final int wj = w * j;
        for (int i = 0; i < w; i++) {
          final int ind = i + wj;
          occurs[(int) (sk[ind] * sk[ind] + 0.5f)] = true;
        }
      }
    }
    int numRadii = 0;
    for (int i = 0; i < rSqMax; i++) {
      if (occurs[i]) numRadii++;
    }
    // Make an index of the distance-squared values
    int[] distSqIndex = new int[rSqMax];
    int[] distSqValues = new int[numRadii];
    int indDS = 0;
    for (int i = 0; i < rSqMax; i++) {
      if (occurs[i]) {
        distSqIndex[i] = indDS;
        distSqValues[indDS++] = i;
      }
    }
    /*
     * Build template The first index of the template is the number of
     * nonzero components in the offest from the test point to the remote
     * point. The second index is the radii index (of the test point). The
     * value of the template is the minimum square radius of the remote
     * point required to cover the ball of the test point.
     */
    IJ.showStatus("Distance Ridge: creating search templates");
    int[][] rSqTemplate = createTemplate(distSqValues);
    int numCompZ, numCompY, numCompX, numComp;
    for (int k = 0; k < d; k++) {
      IJ.showStatus("Distance Ridge: processing slice " + (k + 1) + "/" + d);
      // IJ.showProgress(k/(1.*d));
      sk = s[k];
      skNew = sNew[k];
      for (int j = 0; j < h; j++) {
        final int wj = w * j;
        for (int i = 0; i < w; i++) {
          final int ind = i + wj;
          if (sk[ind] > 0) {
            notRidgePoint = false;
            sk0Sq = (int) (sk[ind] * sk[ind] + 0.5f);
            sk0SqInd = distSqIndex[sk0Sq];
            for (dz = -1; dz <= 1; dz++) {
              k1 = k + dz;
              if ((k1 >= 0) && (k1 < d)) {
                sk1 = s[k1];
                if (dz == 0) {
                  numCompZ = 0;
                } else {
                  numCompZ = 1;
                }
                for (dy = -1; dy <= 1; dy++) {
                  j1 = j + dy;
                  final int wj1 = w * j1;
                  if ((j1 >= 0) && (j1 < h)) {
                    if (dy == 0) {
                      numCompY = 0;
                    } else {
                      numCompY = 1;
                    }
                    for (dx = -1; dx <= 1; dx++) {
                      i1 = i + dx;
                      if ((i1 >= 0) && (i1 < w)) {
                        if (dx == 0) {
                          numCompX = 0;
                        } else {
                          numCompX = 1;
                        }
                        numComp = numCompX + numCompY + numCompZ;
                        if (numComp > 0) {
                          final float sk1i1wj1 = sk1[i1 + wj1];
                          sk1Sq = (int) (sk1i1wj1 * sk1i1wj1 + 0.5f);
                          if (sk1Sq >= rSqTemplate[numComp - 1][sk0SqInd]) notRidgePoint = true;
                        }
                      } // if in grid for i1
                      if (notRidgePoint) break;
                    } // dx
                  } // if in grid for j1
                  if (notRidgePoint) break;
                } // dy
              } // if in grid for k1
              if (notRidgePoint) break;
            } // dz
            if (!notRidgePoint) skNew[ind] = sk[ind];
          } // if not in background
        } // i
      } // j
    } // k
    IJ.showStatus("Distance Ridge complete");
    // replace work array s with result of the method, sNew
    s = sNew;
  }
예제 #20
0
  /**
   * Saito-Toriwaki algorithm for Euclidian Distance Transformation. Direct application of Algorithm
   * 1. Bob Dougherty 8/8/2006
   *
   * <ul>
   *   <li>Version S1A: lower memory usage.
   *   <li>Version S1A.1 A fixed indexing bug for 666-bin data set
   *   <li>Version S1A.2 Aug. 9, 2006. Changed noResult value.
   *   <li>Version S1B Aug. 9, 2006. Faster.
   *   <li>Version S1B.1 Sept. 6, 2006. Changed comments.
   *   <li>Version S1C Oct. 1, 2006. Option for inverse case. <br>
   *       Fixed inverse behavior in y and z directions.
   *   <li>Version D July 30, 2007. Multithread processing for step 2.
   * </ul>
   *
   * <p>This version assumes the input stack is already in memory, 8-bit, and outputs to a new
   * 32-bit stack. Versions that are more stingy with memory may be forthcoming.
   *
   * @param imp 8-bit (binary) ImagePlus
   */
  private float[][] geometryToDistanceMap(ImagePlus imp, boolean inv) {
    final int w = imp.getWidth();
    final int h = imp.getHeight();
    final int d = imp.getStackSize();
    int nThreads = Runtime.getRuntime().availableProcessors();

    // Create references to input data
    ImageStack stack = imp.getStack();
    byte[][] data = new byte[d][];
    for (int k = 0; k < d; k++) data[k] = (byte[]) stack.getPixels(k + 1);

    // Create 32 bit floating point stack for output, s. Will also use it
    // for g in Transformation 1.
    float[][] s = new float[d][];
    for (int k = 0; k < d; k++) {
      ImageProcessor ipk = new FloatProcessor(w, h);
      s[k] = (float[]) ipk.getPixels();
    }
    float[] sk;
    // Transformation 1. Use s to store g.
    IJ.showStatus("EDT transformation 1/3");
    Step1Thread[] s1t = new Step1Thread[nThreads];
    for (int thread = 0; thread < nThreads; thread++) {
      s1t[thread] = new Step1Thread(thread, nThreads, w, h, d, inv, s, data);
      s1t[thread].start();
    }
    try {
      for (int thread = 0; thread < nThreads; thread++) {
        s1t[thread].join();
      }
    } catch (InterruptedException ie) {
      IJ.error("A thread was interrupted in step 1 .");
    }
    // Transformation 2. g (in s) -> h (in s)
    IJ.showStatus("EDT transformation 2/3");
    Step2Thread[] s2t = new Step2Thread[nThreads];
    for (int thread = 0; thread < nThreads; thread++) {
      s2t[thread] = new Step2Thread(thread, nThreads, w, h, d, s);
      s2t[thread].start();
    }
    try {
      for (int thread = 0; thread < nThreads; thread++) {
        s2t[thread].join();
      }
    } catch (InterruptedException ie) {
      IJ.error("A thread was interrupted in step 2 .");
    }
    // Transformation 3. h (in s) -> s
    IJ.showStatus("EDT transformation 3/3");
    Step3Thread[] s3t = new Step3Thread[nThreads];
    for (int thread = 0; thread < nThreads; thread++) {
      s3t[thread] = new Step3Thread(thread, nThreads, w, h, d, inv, s, data);
      s3t[thread].start();
    }
    try {
      for (int thread = 0; thread < nThreads; thread++) {
        s3t[thread].join();
      }
    } catch (InterruptedException ie) {
      IJ.error("A thread was interrupted in step 3 .");
    }
    // Find the largest distance for scaling
    // Also fill in the background values.
    float distMax = 0;
    final int wh = w * h;
    float dist;
    for (int k = 0; k < d; k++) {
      sk = s[k];
      for (int ind = 0; ind < wh; ind++) {
        if (((data[k][ind] & 255) < 128) ^ inv) {
          sk[ind] = 0;
        } else {
          dist = (float) Math.sqrt(sk[ind]);
          sk[ind] = dist;
          distMax = (dist > distMax) ? dist : distMax;
        }
      }
    }
    IJ.showProgress(1.0);
    IJ.showStatus("Done");
    return s;
  }
  /**
   * Performs particle analysis on the specified ImagePlus and ImageProcessor. Returns false if
   * there is an error.
   */
  public boolean analyze(ImagePlus imp, ImageProcessor ip) {
    if (this.imp == null) this.imp = imp;
    showResults = (options & SHOW_RESULTS) != 0;
    excludeEdgeParticles = (options & EXCLUDE_EDGE_PARTICLES) != 0;
    resetCounter = (options & CLEAR_WORKSHEET) != 0;
    showProgress = (options & SHOW_PROGRESS) != 0;
    floodFill = (options & INCLUDE_HOLES) == 0;
    recordStarts = (options & RECORD_STARTS) != 0;
    addToManager = (options & ADD_TO_MANAGER) != 0;
    displaySummary = (options & DISPLAY_SUMMARY) != 0;
    inSituShow = (options & IN_SITU_SHOW) != 0;
    outputImage = null;
    ip.snapshot();
    ip.setProgressBar(null);
    if (Analyzer.isRedirectImage()) {
      redirectImp = Analyzer.getRedirectImage(imp);
      if (redirectImp == null) return false;
      int depth = redirectImp.getStackSize();
      if (depth > 1 && depth == imp.getStackSize()) {
        ImageStack redirectStack = redirectImp.getStack();
        redirectIP = redirectStack.getProcessor(imp.getCurrentSlice());
      } else redirectIP = redirectImp.getProcessor();
    } else if (imp.getType() == ImagePlus.COLOR_RGB) {
      ImagePlus original = (ImagePlus) imp.getProperty("OriginalImage");
      if (original != null
          && original.getWidth() == imp.getWidth()
          && original.getHeight() == imp.getHeight()) {
        redirectImp = original;
        redirectIP = original.getProcessor();
      }
    }
    if (!setThresholdLevels(imp, ip)) return false;
    width = ip.getWidth();
    height = ip.getHeight();
    if (!(showChoice == NOTHING || showChoice == OVERLAY_OUTLINES || showChoice == OVERLAY_MASKS)) {
      blackBackground = Prefs.blackBackground && inSituShow;
      if (slice == 1) outlines = new ImageStack(width, height);
      if (showChoice == ROI_MASKS) drawIP = new ShortProcessor(width, height);
      else drawIP = new ByteProcessor(width, height);
      drawIP.setLineWidth(lineWidth);
      if (showChoice == ROI_MASKS) {
      } // Place holder for now...
      else if (showChoice == MASKS && !blackBackground) drawIP.invertLut();
      else if (showChoice == OUTLINES) {
        if (!inSituShow) {
          if (customLut == null) makeCustomLut();
          drawIP.setColorModel(customLut);
        }
        drawIP.setFont(new Font("SansSerif", Font.PLAIN, fontSize));
        if (fontSize > 12 && inSituShow) drawIP.setAntialiasedText(true);
      }
      outlines.addSlice(null, drawIP);

      if (showChoice == ROI_MASKS || blackBackground) {
        drawIP.setColor(Color.black);
        drawIP.fill();
        drawIP.setColor(Color.white);
      } else {
        drawIP.setColor(Color.white);
        drawIP.fill();
        drawIP.setColor(Color.black);
      }
    }
    calibration = redirectImp != null ? redirectImp.getCalibration() : imp.getCalibration();

    if (rt == null) {
      rt = Analyzer.getResultsTable();
      analyzer = new Analyzer(imp);
    } else analyzer = new Analyzer(imp, measurements, rt);
    if (resetCounter && slice == 1) {
      if (!Analyzer.resetCounter()) return false;
    }
    beginningCount = Analyzer.getCounter();

    byte[] pixels = null;
    if (ip instanceof ByteProcessor) pixels = (byte[]) ip.getPixels();
    if (r == null) {
      r = ip.getRoi();
      mask = ip.getMask();
      if (displaySummary) {
        if (mask != null) totalArea = ImageStatistics.getStatistics(ip, AREA, calibration).area;
        else totalArea = r.width * calibration.pixelWidth * r.height * calibration.pixelHeight;
      }
    }
    minX = r.x;
    maxX = r.x + r.width;
    minY = r.y;
    maxY = r.y + r.height;
    if (r.width < width || r.height < height || mask != null) {
      if (!eraseOutsideRoi(ip, r, mask)) return false;
    }
    int offset;
    double value;
    int inc = Math.max(r.height / 25, 1);
    int mi = 0;
    ImageWindow win = imp.getWindow();
    if (win != null) win.running = true;
    if (measurements == 0) measurements = Analyzer.getMeasurements();
    if (showChoice == ELLIPSES) measurements |= ELLIPSE;
    measurements &= ~LIMIT; // ignore "Limit to Threshold"
    roiNeedsImage =
        (measurements & PERIMETER) != 0
            || (measurements & SHAPE_DESCRIPTORS) != 0
            || (measurements & FERET) != 0;
    particleCount = 0;
    wand = new Wand(ip);
    pf = new PolygonFiller();
    if (floodFill) {
      ImageProcessor ipf = ip.duplicate();
      ipf.setValue(fillColor);
      ff = new FloodFiller(ipf);
    }
    roiType = Wand.allPoints() ? Roi.FREEROI : Roi.TRACED_ROI;

    for (int y = r.y; y < (r.y + r.height); y++) {
      offset = y * width;
      for (int x = r.x; x < (r.x + r.width); x++) {
        if (pixels != null) value = pixels[offset + x] & 255;
        else if (imageType == SHORT) value = ip.getPixel(x, y);
        else value = ip.getPixelValue(x, y);
        if (value >= level1 && value <= level2) analyzeParticle(x, y, imp, ip);
      }
      if (showProgress && ((y % inc) == 0)) IJ.showProgress((double) (y - r.y) / r.height);
      if (win != null) canceled = !win.running;
      if (canceled) {
        Macro.abort();
        break;
      }
    }
    if (showProgress) IJ.showProgress(1.0);
    if (showResults) rt.updateResults();
    imp.killRoi();
    ip.resetRoi();
    ip.reset();
    if (displaySummary && IJ.getInstance() != null) updateSliceSummary();
    if (addToManager && roiManager != null) roiManager.setEditMode(imp, true);
    maxParticleCount = (particleCount > maxParticleCount) ? particleCount : maxParticleCount;
    totalCount += particleCount;
    if (!canceled) showResults();
    return true;
  }
예제 #22
0
  void Sauvola(ImagePlus imp, int radius, double par1, double par2, boolean doIwhite) {
    // Sauvola recommends K_VALUE = 0.5 and R_VALUE = 128.
    // This is a modification of Niblack's thresholding method.
    // Sauvola J. and Pietaksinen M. (2000) "Adaptive Document Image Binarization"
    // Pattern Recognition, 33(2): 225-236
    // http://www.ee.oulu.fi/mvg/publications/show_pdf.php?ID=24
    // Ported to ImageJ plugin from E Celebi's fourier_0.8 routines
    // This version uses a circular local window, instead of a rectagular one

    ImagePlus Meanimp, Varimp;
    ImageProcessor ip = imp.getProcessor(), ipMean, ipVar;
    double k_value = 0.5;
    double r_value = 128;
    byte object;
    byte backg;

    if (par1 != 0) {
      IJ.log("Sauvola: changed k_value from :" + k_value + "  to:" + par1);
      k_value = par1;
    }

    if (par2 != 0) {
      IJ.log("Sauvola: changed r_value from :" + r_value + "  to:" + par2);
      r_value = par2;
    }

    if (doIwhite) {
      object = (byte) 0xff;
      backg = (byte) 0;
    } else {
      object = (byte) 0;
      backg = (byte) 0xff;
    }

    Meanimp = duplicateImage(ip);
    ImageConverter ic = new ImageConverter(Meanimp);
    ic.convertToGray32();

    ipMean = Meanimp.getProcessor();
    RankFilters rf = new RankFilters();
    rf.rank(ipMean, radius, rf.MEAN); // Mean
    // Meanimp.show();
    Varimp = duplicateImage(ip);
    ic = new ImageConverter(Varimp);
    ic.convertToGray32();
    ipVar = Varimp.getProcessor();
    rf.rank(ipVar, radius, rf.VARIANCE); // Variance
    // Varimp.show();
    byte[] pixels = (byte[]) ip.getPixels();
    float[] mean = (float[]) ipMean.getPixels();
    float[] var = (float[]) ipVar.getPixels();

    for (int i = 0; i < pixels.length; i++)
      pixels[i] =
          ((int) (pixels[i] & 0xff)
                  > (int) (mean[i] * (1.0 + k_value * ((Math.sqrt(var[i]) / r_value) - 1.0))))
              ? object
              : backg;
    // imp.updateAndDraw();
    return;
  }
예제 #23
0
  void Phansalkar(ImagePlus imp, int radius, double par1, double par2, boolean doIwhite) {
    // This is a modification of Sauvola's thresholding method to deal with low contrast images.
    // Phansalskar N. et al. Adaptive local thresholding for detection of nuclei in diversity
    // stained
    // cytology images.International Conference on Communications and Signal Processing (ICCSP),
    // 2011,
    // 218 - 220.
    // In this method, the threshold t = mean*(1+p*exp(-q*mean)+k*((stdev/r)-1))
    // Phansalkar recommends k = 0.25, r = 0.5, p = 2 and q = 10. In this plugin, k and r are the
    // parameters 1 and 2 respectively, but the values of p and q are fixed.
    //
    // Implemented from Phansalkar's paper description by G. Landini
    // This version uses a circular local window, instead of a rectagular one

    ImagePlus Meanimp, Varimp, Orimp;
    ImageProcessor ip = imp.getProcessor(), ipMean, ipVar, ipOri;
    double k_value = 0.25;
    double r_value = 0.5;
    double p_value = 2.0;
    double q_value = 10.0;
    byte object;
    byte backg;

    if (par1 != 0) {
      IJ.log("Phansalkar: changed k_value from :" + k_value + "  to:" + par1);
      k_value = par1;
    }

    if (par2 != 0) {
      IJ.log("Phansalkar: changed r_value from :" + r_value + "  to:" + par2);
      r_value = par2;
    }

    if (doIwhite) {
      object = (byte) 0xff;
      backg = (byte) 0;
    } else {
      object = (byte) 0;
      backg = (byte) 0xff;
    }

    Meanimp = duplicateImage(ip);
    ContrastEnhancer ce = new ContrastEnhancer();
    ce.stretchHistogram(Meanimp, 0.0);
    ImageConverter ic = new ImageConverter(Meanimp);
    ic.convertToGray32();
    ipMean = Meanimp.getProcessor();
    ipMean.multiply(1.0 / 255);

    Orimp = duplicateImage(ip);
    ce.stretchHistogram(Orimp, 0.0);
    ic = new ImageConverter(Orimp);
    ic.convertToGray32();
    ipOri = Orimp.getProcessor();
    ipOri.multiply(1.0 / 255); // original to compare
    // Orimp.show();

    RankFilters rf = new RankFilters();
    rf.rank(ipMean, radius, rf.MEAN); // Mean

    // Meanimp.show();
    Varimp = duplicateImage(ip);
    ce.stretchHistogram(Varimp, 0.0);
    ic = new ImageConverter(Varimp);
    ic.convertToGray32();
    ipVar = Varimp.getProcessor();
    ipVar.multiply(1.0 / 255);

    rf.rank(ipVar, radius, rf.VARIANCE); // Variance
    ipVar.sqr(); // SD

    // Varimp.show();
    byte[] pixels = (byte[]) ip.getPixels();
    float[] ori = (float[]) ipOri.getPixels();
    float[] mean = (float[]) ipMean.getPixels();
    float[] sd = (float[]) ipVar.getPixels();

    for (int i = 0; i < pixels.length; i++)
      pixels[i] =
          ((ori[i])
                  > (mean[i]
                      * (1.0
                          + p_value * Math.exp(-q_value * mean[i])
                          + k_value * ((sd[i] / r_value) - 1.0))))
              ? object
              : backg;
    // imp.updateAndDraw();
    return;
  }
예제 #24
0
  void Otsu(ImagePlus imp, int radius, double par1, double par2, boolean doIwhite) {
    // Otsu's threshold algorithm
    // C++ code by Jordan Bevik <*****@*****.**>
    // ported to ImageJ plugin by G.Landini. Same algorithm as in Auto_Threshold, this time on local
    // circular regions
    int[] data;
    int w = imp.getWidth();
    int h = imp.getHeight();
    int position;
    int radiusx2 = radius * 2;
    ImageProcessor ip = imp.getProcessor();
    byte[] pixels = (byte[]) ip.getPixels();
    byte[] pixelsOut =
        new byte
            [pixels.length]; // need this to avoid changing the image data (and further histograms)
    byte object;
    byte backg;

    if (doIwhite) {
      object = (byte) 0xff;
      backg = (byte) 0;
    } else {
      object = (byte) 0;
      backg = (byte) 0xff;
    }

    int k, kStar; // k = the current threshold; kStar = optimal threshold
    int N1, N; // N1 = # points with intensity <=k; N = total number of points
    double BCV, BCVmax; // The current Between Class Variance and maximum BCV
    double num, denom; // temporary bookeeping
    int Sk; // The total intensity for all histogram points <=k
    int S,
        L =
            256; // The total intensity of the image. Need to hange here if modifying for >8 bits
                 // images
    int roiy;

    Roi roi = new OvalRoi(0, 0, radiusx2, radiusx2);
    // ip.setRoi(roi);
    for (int y = 0; y < h; y++) {
      IJ.showProgress(
          (double) (y) / (h - 1)); // this method is slow, so let's show the progress bar
      roiy = y - radius;
      for (int x = 0; x < w; x++) {
        roi.setLocation(x - radius, roiy);
        ip.setRoi(roi);
        // ip.setRoi(new OvalRoi(x-radius, roiy, radiusx2, radiusx2));
        position = x + y * w;
        data = ip.getHistogram();

        // Initialize values:
        S = N = 0;
        for (k = 0; k < L; k++) {
          S += k * data[k]; // Total histogram intensity
          N += data[k]; // Total number of data points
        }

        Sk = 0;
        N1 = data[0]; // The entry for zero intensity
        BCV = 0;
        BCVmax = 0;
        kStar = 0;

        // Look at each possible threshold value,
        // calculate the between-class variance, and decide if it's a max
        for (k = 1; k < L - 1; k++) { // No need to check endpoints k = 0 or k = L-1
          Sk += k * data[k];
          N1 += data[k];

          // The float casting here is to avoid compiler warning about loss of precision and
          // will prevent overflow in the case of large saturated images
          denom = (double) (N1) * (N - N1); // Maximum value of denom is (N^2)/4 =  approx. 3E10

          if (denom != 0) {
            // Float here is to avoid loss of precision when dividing
            num = ((double) N1 / N) * S - Sk; // Maximum value of num =  255*N = approx 8E7
            BCV = (num * num) / denom;
          } else BCV = 0;

          if (BCV >= BCVmax) { // Assign the best threshold found so far
            BCVmax = BCV;
            kStar = k;
          }
        }
        // kStar += 1;	// Use QTI convention that intensity -> 1 if intensity >= k
        // (the algorithm was developed for I-> 1 if I <= k.)
        // return kStar;
        pixelsOut[position] = ((int) (pixels[position] & 0xff) > kStar) ? object : backg;
      }
    }
    for (position = 0; position < w * h; position++)
      pixels[position] = pixelsOut[position]; // update with thresholded pixels
  }
예제 #25
0
  void Niblack(ImagePlus imp, int radius, double par1, double par2, boolean doIwhite) {
    // Niblack recommends K_VALUE = -0.2 for images with black foreground
    // objects, and K_VALUE = +0.2 for images with white foreground objects.
    //  Niblack W. (1986) "An introduction to Digital Image Processing" Prentice-Hall.
    // Ported to ImageJ plugin from E Celebi's fourier_0.8 routines
    // This version uses a circular local window, instead of a rectagular one

    ImagePlus Meanimp, Varimp;
    ImageProcessor ip = imp.getProcessor(), ipMean, ipVar;
    double k_value;
    int c_value = 0;

    byte object;
    byte backg;

    if (doIwhite) {
      k_value = 0.2;
      object = (byte) 0xff;
      backg = (byte) 0;
    } else {
      k_value = -0.2;
      object = (byte) 0;
      backg = (byte) 0xff;
    }

    if (par1 != 0) {
      IJ.log("Niblack: changed k_value from :" + k_value + "  to:" + par1);
      k_value = par1;
    }

    if (par2 != 0) {
      IJ.log(
          "Niblack: changed c_value from :"
              + c_value
              + "  to:"
              + par2); // requested feature, not in original
      c_value = (int) par2;
    }

    Meanimp = duplicateImage(ip);
    ImageConverter ic = new ImageConverter(Meanimp);
    ic.convertToGray32();

    ipMean = Meanimp.getProcessor();
    RankFilters rf = new RankFilters();
    rf.rank(ipMean, radius, rf.MEAN); // Mean
    // Meanimp.show();
    Varimp = duplicateImage(ip);
    ic = new ImageConverter(Varimp);
    ic.convertToGray32();
    ipVar = Varimp.getProcessor();
    rf.rank(ipVar, radius, rf.VARIANCE); // Variance
    // Varimp.show();
    byte[] pixels = (byte[]) ip.getPixels();
    float[] mean = (float[]) ipMean.getPixels();
    float[] var = (float[]) ipVar.getPixels();

    for (int i = 0; i < pixels.length; i++)
      pixels[i] =
          ((int) (pixels[i] & 0xff) > (int) (mean[i] + k_value * Math.sqrt(var[i]) - c_value))
              ? object
              : backg;
    // imp.updateAndDraw();
    return;
  }
예제 #26
0
  /*------------------------------------------------------------------*/
  void doIt(ImageProcessor ip) {
    int width = ip.getWidth();
    int height = ip.getHeight();
    double hLine[] = new double[width];
    double vLine[] = new double[height];

    if (!(ip.getPixels() instanceof float[])) {
      throw new IllegalArgumentException("Float image required");
    }
    switch (operation) {
      case GRADIENT_MAGNITUDE:
        {
          ImageProcessor h = ip.duplicate();
          ImageProcessor v = ip.duplicate();
          float[] floatPixels = (float[]) ip.getPixels();
          float[] floatPixelsH = (float[]) h.getPixels();
          float[] floatPixelsV = (float[]) v.getPixels();

          getHorizontalGradient(h, FLT_EPSILON);
          getVerticalGradient(v, FLT_EPSILON);
          for (int y = 0, k = 0; (y < height); y++) {
            for (int x = 0; (x < width); x++, k++) {
              floatPixels[k] =
                  (float)
                      Math.sqrt(
                          floatPixelsH[k] * floatPixelsH[k] + floatPixelsV[k] * floatPixelsV[k]);
            }
            stepProgressBar();
          }
        }
        break;
      case GRADIENT_DIRECTION:
        {
          ImageProcessor h = ip.duplicate();
          ImageProcessor v = ip.duplicate();
          float[] floatPixels = (float[]) ip.getPixels();
          float[] floatPixelsH = (float[]) h.getPixels();
          float[] floatPixelsV = (float[]) v.getPixels();

          getHorizontalGradient(h, FLT_EPSILON);
          getVerticalGradient(v, FLT_EPSILON);
          for (int y = 0, k = 0; (y < height); y++) {
            for (int x = 0; (x < width); x++, k++) {
              floatPixels[k] = (float) Math.atan2(floatPixelsH[k], floatPixelsV[k]);
            }
            stepProgressBar();
          }
        }
        break;
      case LAPLACIAN:
        {
          ImageProcessor hh = ip.duplicate();
          ImageProcessor vv = ip.duplicate();
          float[] floatPixels = (float[]) ip.getPixels();
          float[] floatPixelsHH = (float[]) hh.getPixels();
          float[] floatPixelsVV = (float[]) vv.getPixels();

          getHorizontalHessian(hh, FLT_EPSILON);
          getVerticalHessian(vv, FLT_EPSILON);
          for (int y = 0, k = 0; (y < height); y++) {
            for (int x = 0; (x < width); x++, k++) {
              floatPixels[k] = (float) (floatPixelsHH[k] + floatPixelsVV[k]);
            }
            stepProgressBar();
          }
        }
        break;
      case LARGEST_HESSIAN:
        {
          ImageProcessor hh = ip.duplicate();
          ImageProcessor vv = ip.duplicate();
          ImageProcessor hv = ip.duplicate();
          float[] floatPixels = (float[]) ip.getPixels();
          float[] floatPixelsHH = (float[]) hh.getPixels();
          float[] floatPixelsVV = (float[]) vv.getPixels();
          float[] floatPixelsHV = (float[]) hv.getPixels();

          getHorizontalHessian(hh, FLT_EPSILON);
          getVerticalHessian(vv, FLT_EPSILON);
          getCrossHessian(hv, FLT_EPSILON);
          for (int y = 0, k = 0; (y < height); y++) {
            for (int x = 0; (x < width); x++, k++) {
              floatPixels[k] =
                  (float)
                      (0.5
                          * (floatPixelsHH[k]
                              + floatPixelsVV[k]
                              + Math.sqrt(
                                  4.0 * floatPixelsHV[k] * floatPixelsHV[k]
                                      + (floatPixelsHH[k] - floatPixelsVV[k])
                                          * (floatPixelsHH[k] - floatPixelsVV[k]))));
            }
            stepProgressBar();
          }
        }
        break;
      case SMALLEST_HESSIAN:
        {
          ImageProcessor hh = ip.duplicate();
          ImageProcessor vv = ip.duplicate();
          ImageProcessor hv = ip.duplicate();
          float[] floatPixels = (float[]) ip.getPixels();
          float[] floatPixelsHH = (float[]) hh.getPixels();
          float[] floatPixelsVV = (float[]) vv.getPixels();
          float[] floatPixelsHV = (float[]) hv.getPixels();

          getHorizontalHessian(hh, FLT_EPSILON);
          getVerticalHessian(vv, FLT_EPSILON);
          getCrossHessian(hv, FLT_EPSILON);
          for (int y = 0, k = 0; (y < height); y++) {
            for (int x = 0; (x < width); x++, k++) {
              floatPixels[k] =
                  (float)
                      (0.5
                          * (floatPixelsHH[k]
                              + floatPixelsVV[k]
                              - Math.sqrt(
                                  4.0 * floatPixelsHV[k] * floatPixelsHV[k]
                                      + (floatPixelsHH[k] - floatPixelsVV[k])
                                          * (floatPixelsHH[k] - floatPixelsVV[k]))));
            }
            stepProgressBar();
          }
        }
        break;
      case HESSIAN_ORIENTATION:
        {
          ImageProcessor hh = ip.duplicate();
          ImageProcessor vv = ip.duplicate();
          ImageProcessor hv = ip.duplicate();
          float[] floatPixels = (float[]) ip.getPixels();
          float[] floatPixelsHH = (float[]) hh.getPixels();
          float[] floatPixelsVV = (float[]) vv.getPixels();
          float[] floatPixelsHV = (float[]) hv.getPixels();

          getHorizontalHessian(hh, FLT_EPSILON);
          getVerticalHessian(vv, FLT_EPSILON);
          getCrossHessian(hv, FLT_EPSILON);
          for (int y = 0, k = 0; (y < height); y++) {
            for (int x = 0; (x < width); x++, k++) {
              if (floatPixelsHV[k] < 0.0) {
                floatPixels[k] =
                    (float)
                        (-0.5
                            * Math.acos(
                                (floatPixelsHH[k] - floatPixelsVV[k])
                                    / Math.sqrt(
                                        4.0 * floatPixelsHV[k] * floatPixelsHV[k]
                                            + (floatPixelsHH[k] - floatPixelsVV[k])
                                                * (floatPixelsHH[k] - floatPixelsVV[k]))));
              } else {
                floatPixels[k] =
                    (float)
                        (0.5
                            * Math.acos(
                                (floatPixelsHH[k] - floatPixelsVV[k])
                                    / Math.sqrt(
                                        4.0 * floatPixelsHV[k] * floatPixelsHV[k]
                                            + (floatPixelsHH[k] - floatPixelsVV[k])
                                                * (floatPixelsHH[k] - floatPixelsVV[k]))));
              }
            }
            stepProgressBar();
          }
        }
        break;
      default:
        throw new IllegalArgumentException("Invalid operation");
    }
    ip.resetMinAndMax();
    imp.updateAndDraw();
  } /* end doIt */
예제 #27
0
    ImageProcessor setup(ImagePlus imp) {

      ImageProcessor ip;
      int type = imp.getType();
      if (type != ImagePlus.COLOR_RGB) return null;
      ip = imp.getProcessor();
      int id = imp.getID();
      int slice = imp.getCurrentSlice();

      if ((id != previousImageID) | (slice != previousSlice) | (flag)) {
        flag = false; // if true, flags a change from HSB to RGB or viceversa
        numSlices = imp.getStackSize();
        stack = imp.getStack();
        width = stack.getWidth();
        height = stack.getHeight();
        numPixels = width * height;

        hSource = new byte[numPixels];
        sSource = new byte[numPixels];
        bSource = new byte[numPixels];

        // restore = (int[])ip.getPixelsCopy(); //This runs into trouble sometimes, so do it the
        // long way:
        int[] temp = (int[]) ip.getPixels();
        restore = new int[numPixels];
        for (int i = 0; i < numPixels; i++) restore[i] = temp[i];

        fillMask = new int[numPixels];

        // Get hsb or rgb from image.
        ColorProcessor cp = (ColorProcessor) ip;
        IJ.showStatus("Gathering data");

        if (isRGB) cp.getRGB(hSource, sSource, bSource);
        else cp.getHSB(hSource, sSource, bSource);

        IJ.showStatus("done");

        // Create a spectrum ColorModel for the Hue histogram plot.
        Color c;
        byte[] reds = new byte[256];
        byte[] greens = new byte[256];
        byte[] blues = new byte[256];
        for (int i = 0; i < 256; i++) {
          c = Color.getHSBColor(i / 255f, 1f, 1f);

          reds[i] = (byte) c.getRed();
          greens[i] = (byte) c.getGreen();
          blues[i] = (byte) c.getBlue();
        }
        ColorModel cm = new IndexColorModel(8, 256, reds, greens, blues);

        // Make an image with just the hue from the RGB image and the spectrum LUT.
        // This is just for a hue histogram for the plot.  Do not show it.
        // ByteProcessor bpHue = new ByteProcessor(width,height,h,cm);
        ByteProcessor bpHue = new ByteProcessor(width, height, hSource, cm);
        ImagePlus impHue = new ImagePlus("Hue", bpHue);
        // impHue.show();

        ByteProcessor bpSat = new ByteProcessor(width, height, sSource, cm);
        ImagePlus impSat = new ImagePlus("Sat", bpSat);
        // impSat.show();

        ByteProcessor bpBri = new ByteProcessor(width, height, bSource, cm);
        ImagePlus impBri = new ImagePlus("Bri", bpBri);
        // impBri.show();

        plot.setHistogram(impHue, 0);
        splot.setHistogram(impSat, 1);
        bplot.setHistogram(impBri, 2);

        updateLabels();
        updatePlot();
        updateScrollBars();
        imp.updateAndDraw();
      }
      previousImageID = id;
      previousSlice = slice;
      return ip;
    }