Beispiel #1
0
  /**
   * Collects the points to build the observation array, by iterating in a hypercube around the
   * given location. Points found out of the image are not included.
   *
   * @param image the source image to sample.
   * @param point the location around which to collect the samples
   * @param span the span size of the hypercube to sample, such that in dimension <code>d</code>,
   *     the cube sampled if a of size <code>2 x span[d] + 1</code>.
   * @return an {@link Observation} object containing the sampled data.
   */
  public static final <T extends RealType<T>> Observation gatherObservationData(
      final RandomAccessibleInterval<T> image, final Localizable point, final long[] span) {

    final int ndims = image.numDimensions();
    RectangleNeighborhoodGPL<T> neighborhood = new RectangleNeighborhoodGPL<T>(image);
    neighborhood.setSpan(span);
    neighborhood.setPosition(point);

    int n_pixels = (int) neighborhood.size();
    double[] tmp_I = new double[n_pixels];
    double[][] tmp_X = new double[n_pixels][ndims];

    RectangleCursor<T> cursor = neighborhood.localizingCursor();
    long[] pos = new long[image.numDimensions()];

    int index = 0;
    while (cursor.hasNext()) {

      cursor.fwd();
      cursor.localize(pos); // This is the absolute roi position
      if (cursor.isOutOfBounds()) {
        continue;
      }

      for (int i = 0; i < ndims; i++) {
        tmp_X[index][i] = pos[i];
      }

      tmp_I[index] = cursor.get().getRealDouble();
      index++;
    }

    // Now we possibly resize the arrays, in case we have been too close to
    // the
    // image border.
    double[][] X = null;
    double[] I = null;
    if (index == n_pixels) {
      // Ok, we have gone through the whole square
      X = tmp_X;
      I = tmp_I;
    } else {
      // Re-dimension the arrays
      X = new double[index][ndims];
      I = new double[index];
      System.arraycopy(tmp_X, 0, X, 0, index);
      System.arraycopy(tmp_I, 0, I, 0, index);
    }

    Observation obs = new Observation();
    obs.I = I;
    obs.X = X;
    return obs;
  }
Beispiel #2
0
  @SuppressWarnings({"rawtypes", "unchecked"})
  public static <T extends Type<T>> ImageRenderer<T>[] createSuitableRenderer(
      final RandomAccessibleInterval<T> img) {

    final List<ImageRenderer> res = new ArrayList<ImageRenderer>();

    if (Util.getTypeFromInterval(img) instanceof LabelingType) {
      res.add(new ColorLabelingRenderer());
      res.add(new BoundingBoxLabelRenderer());
      res.add(new BoundingBoxRandomColorLabelRenderer());
    } else {
      final T type = img.randomAccess().get();

      if (type instanceof RealType) {
        res.add(new Real2GreyRenderer(((RealType) Util.getTypeFromInterval(img)).getMinValue()));
        for (int d = 0; d < img.numDimensions(); d++) {
          if ((img.dimension(d) > 1) && (img.dimension(d) < 4)) {
            res.add(new Real2ColorRenderer(d));
          }
        }
      }
    }

    return res.toArray(new ImageRenderer[res.size()]);
  }
  @Override
  public RandomAccessibleInterval<V> compute(
      RandomAccessibleInterval<T> input, RandomAccessibleInterval<V> output) {
    setUpNeighbours(input.numDimensions());

    // OutOfBounds for marker
    V zeroV = output.randomAccess().get().createVariable();
    zeroV.set(getVMinValue(zeroV));
    OutOfBounds<V> marker =
        new OutOfBoundsConstantValueFactory<V, RandomAccessibleInterval<V>>(zeroV).create(output);

    Cursor<V> cur = new Cursor<V>(output);

    // OutOfBounds for mask
    T zeroT = input.randomAccess().get().createVariable();
    zeroT.set(getTMinValue(zeroT));
    OutOfBounds<T> mask =
        new OutOfBoundsConstantValueFactory<T, RandomAccessibleInterval<T>>(zeroT).create(input);

    scanInRasterOrder(cur, marker, mask);
    scanInAntiRasterOrder(cur, marker, mask);

    propagate(marker, mask, m_neighbours);

    return output;
  }
  /** {@inheritDoc} */
  @Override
  public Labeling<L> compute(final RandomAccessibleInterval<T> op, final Labeling<L> r) {

    initRegionGrowing(op);

    final LinkedList<ValuePair<int[], L>> q = new LinkedList<ValuePair<int[], L>>();

    // image and random access to keep track of the already visited
    // pixel
    // positions
    if (m_allowOverlap) {
      NativeImgLabeling<L, IntType> tmp =
          new NativeImgLabeling<L, IntType>(
              new ArrayImgFactory<IntType>().create(resultDims(op), new IntType()));
      m_visitedLabRA = tmp.randomAccess();
    } else {
      BitType bt = new BitType();
      Img<BitType> tmp = null;
      try {
        tmp = new ArrayImgFactory<BitType>().imgFactory(bt).create(op, bt);
      } catch (IncompatibleTypeException e) {
        //
      }
      m_visitedRA = tmp.randomAccess();
    }

    // access to the resulting labeling
    RandomAccess<LabelingType<L>> resRA = r.randomAccess();

    L label;
    int[] pos = new int[op.numDimensions()];
    do {
      while ((label = nextSeedPosition(pos)) != null) {

        // already visited?
        setVisitedPosition(pos);
        if (isMarkedAsVisited(label)) {
          continue;
        }
        markAsVisited(label);

        q.addLast(new ValuePair<int[], L>(pos.clone(), label));

        // set new labeling
        resRA.setPosition(pos);
        setLabel(resRA, label);

        if (m_mode == GrowingMode.ASYNCHRONOUS) {
          growProcess(q, resRA, op);
        }
      }
      if (m_mode == GrowingMode.SYNCHRONOUS) {
        growProcess(q, resRA, op);
      }
    } while (hasMoreSeedingPoints());

    return r;
  }
    public Cursor(final RandomAccessibleInterval<U> ra) {
      m_ra = ra.randomAccess();

      m_numPixel = numPixels(ra);

      m_lastPos = new long[ra.numDimensions()];
      for (int i = 0; i < m_lastPos.length; i++) {
        m_lastPos[i] = ra.dimension(i) - 1;
      }

      m_breaks = new long[ra.numDimensions()];
      ra.dimensions(m_breaks);

      for (int i = 1; i < m_breaks.length; i++) {
        m_breaks[i] *= m_breaks[i - 1];
      }

      setToOrigin();
    }
Beispiel #6
0
  /**
   * @param permuted
   * @return
   */
  private static IntervalIterator createIntervalIterator(
      final RandomAccessibleInterval<?> permuted) {
    final long[] dims = new long[permuted.numDimensions()];
    permuted.dimensions(dims);

    dims[0] = 1;
    dims[1] = 1;

    final IntervalIterator ii = new IntervalIterator(dims);
    return ii;
  }
  protected void generateHistogramData(DataContainer<T> container) {
    double ch1BinWidth = getXBinWidth(container);
    double ch2BinWidth = getYBinWidth(container);

    // get the 2 images for the calculation of Pearson's
    final RandomAccessibleInterval<T> img1 = getImageCh1(container);
    final RandomAccessibleInterval<T> img2 = getImageCh2(container);
    final RandomAccessibleInterval<BitType> mask = container.getMask();

    // get the cursors for iterating through pixels in images
    TwinCursor<T> cursor =
        new TwinCursor<T>(
            img1.randomAccess(), img2.randomAccess(), Views.iterable(mask).localizingCursor());

    // create new image to put the scatter-plot in
    final ImgFactory<LongType> scatterFactory = new ArrayImgFactory<LongType>();
    plotImage = scatterFactory.create(new int[] {xBins, yBins}, new LongType());

    // create access cursors
    final RandomAccess<LongType> histogram2DCursor = plotImage.randomAccess();

    // iterate over images
    long[] pos = new long[plotImage.numDimensions()];
    while (cursor.hasNext()) {
      cursor.fwd();
      double ch1 = cursor.getFirst().getRealDouble();
      double ch2 = cursor.getSecond().getRealDouble();
      /* Scale values for both channels to fit in the range.
       * Moreover mirror the y value on the x axis.
       */
      pos[0] = getXValue(ch1, ch1BinWidth, ch2, ch2BinWidth);
      pos[1] = getYValue(ch1, ch1BinWidth, ch2, ch2BinWidth);
      // set position of input/output cursor
      histogram2DCursor.setPosition(pos);
      // get current value at position and increment it
      long count = histogram2DCursor.get().getIntegerLong();
      count++;

      histogram2DCursor.get().set(count);
    }

    xBinWidth = ch1BinWidth;
    yBinWidth = ch2BinWidth;
    xLabel = getLabelCh1();
    yLabel = getLabelCh2();
    xMin = getXMin(container);
    xMax = getXMax(container);
    yMin = getYMin(container);
    yMax = getYMax(container);
  }
  @Override
  public void compute(final RandomAccessibleInterval<T> input, final IterableInterval<V> output) {
    final Cursor<V> cursor = output.localizingCursor();
    final RandomAccess<T> access = input.randomAccess();

    while (cursor.hasNext()) {
      cursor.fwd();
      for (int d = 0; d < input.numDimensions(); d++) {
        if (d != dim) {
          access.setPosition(cursor.getIntPosition(d - d > dim ? -1 : 0), d);
        }
      }

      method.compute(new DimensionIterable(input.dimension(dim), access), cursor.get());
    }
  }
Beispiel #9
0
 public static final <T extends RealType<T>> void addGaussianSpotToImage(
     RandomAccessibleInterval<T> img, double[] params) {
   IterableInterval<T> iterImg = Views.iterable(img);
   Cursor<T> lc = iterImg.localizingCursor();
   int nDims = img.numDimensions();
   double[] position = new double[nDims];
   double val;
   T var = iterImg.firstElement().createVariable();
   while (lc.hasNext()) {
     lc.fwd();
     lc.localize(position);
     val = gaussian.val(position, params);
     var.setReal(val);
     lc.get().add(var);
   }
 }
 /**
  * Compute the steepest descent images of the template at the identity warp. Each steepest descent
  * image comprises the partial derivatives of template intensities with respect to one parameter
  * of the warp function.
  *
  * <p>The result is stored in the <em>n+1</em> dimensional {@link #target} image. Dimension
  * <em>n</em> is used to index the partial derivative. For example, the partial derivative by the
  * second parameter of the warp function is stored in slice <em>n=1</em>.
  *
  * @param gradients n+1 dimensional image of partial derivatives of the template. Dimension n is
  *     used to index the partial derivative. For example, the partial derivative by Y is stored in
  *     slice n=1.
  * @param warpFunction The warp function to be applied to the template. The partial derivatives of
  *     template intensities with respect to the parameters of this warp function are computed.
  * @param target Image of <em>n+1</em> dimensions to store the steepest descent Dimension
  *     <em>n</em> is used to index the parameters of the warp function. For example, the partial
  *     derivative of the template image intensity by parameter 2 of the warp function at pixel
  *     <em>(x,y)</em> is stored at position <em>(x,y,1)</em>.
  */
 public static <T extends NumericType<T>> void computeSteepestDescents(
     final RandomAccessibleInterval<T> gradients,
     final WarpFunction warpFunction,
     final RandomAccessibleInterval<T> target) {
   final int n = gradients.numDimensions() - 1;
   final int numParameters = warpFunction.numParameters();
   final T tmp = Util.getTypeFromInterval(gradients).createVariable();
   for (int p = 0; p < numParameters; ++p) {
     for (int d = 0; d < n; ++d) {
       final Cursor<T> gd =
           Views.flatIterable(Views.hyperSlice(gradients, n, d)).localizingCursor();
       for (final T t : Views.flatIterable(Views.hyperSlice(target, n, p))) {
         tmp.set(gd.next());
         tmp.mul(warpFunction.partial(gd, d, p));
         t.add(tmp);
       }
     }
   }
 }
  private static void adjustForOSEM(
      final HashMap<ViewId, RandomAccessibleInterval<FloatType>> weights,
      final WeightType weightType,
      final double osemspeedup) {
    if (osemspeedup == 1.0) return;

    if (weightType == WeightType.PRECOMPUTED_WEIGHTS
        || weightType == WeightType.WEIGHTS_ONLY
        || weightType == WeightType.LOAD_WEIGHTS) {
      for (final RandomAccessibleInterval<FloatType> w : weights.values()) {
        for (final FloatType f : Views.iterable(w))
          f.set(
              Math.min(
                  1, f.get() * (float) osemspeedup)); // individual contribution never higher than 1
      }
    } else if (weightType == WeightType.NO_WEIGHTS) {
      for (final RandomAccessibleInterval<FloatType> w : weights.values()) {
        final RandomAccess<FloatType> r = w.randomAccess();
        final long[] min = new long[w.numDimensions()];
        w.min(min);
        r.setPosition(min);
        r.get()
            .set(
                Math.min(
                    1,
                    r.get().get()
                        * (float) osemspeedup)); // individual contribution never higher than 1
      }
    } else if (weightType == WeightType.VIRTUAL_WEIGHTS) {
      for (final RandomAccessibleInterval<FloatType> w : weights.values())
        ((NormalizingRandomAccessibleInterval<FloatType>) w).setOSEMspeedup(osemspeedup);
    } else {
      throw new RuntimeException(
          "Weight Type: "
              + weightType.name()
              + " not supported in ProcessForDeconvolution.adjustForOSEM()");
    }
  }
  public Align(final RandomAccessibleInterval<T> template, final ImgFactory<T> factory) {
    this.template = template;
    final T type = Util.getTypeFromInterval(template);

    n = template.numDimensions();
    warpFunction = new AffineWarp(n);
    numParameters = warpFunction.numParameters();
    currentTransform = new AffineTransform(n);

    final long[] dim = new long[n + 1];
    for (int d = 0; d < n; ++d) dim[d] = template.dimension(d);
    dim[n] = n;
    final Img<T> gradients = factory.create(dim, type);
    gradients(Views.extendBorder(template), gradients);

    dim[n] = numParameters;
    descent = factory.create(dim, type);
    computeSteepestDescents(gradients, warpFunction, descent);

    Hinv = computeInverseHessian(descent);

    error = factory.create(template, type);
  }
 /** Compute the inverse Hessian matrix from the the steepest descent images. */
 public static <T extends RealType<T>> double[][] computeInverseHessian(
     final RandomAccessibleInterval<T> descent) {
   final int n = descent.numDimensions() - 1;
   final int numParameters = (int) descent.dimension(n);
   final long[] dim = new long[n + 1];
   descent.dimensions(dim);
   dim[n] = 1;
   final LocalizingIntervalIterator pos = new LocalizingIntervalIterator(dim);
   final RandomAccess<T> r = descent.randomAccess();
   final double[] deriv = new double[numParameters];
   final double[][] H = new double[numParameters][numParameters];
   while (pos.hasNext()) {
     pos.fwd();
     r.setPosition(pos);
     for (int p = 0; p < numParameters; ++p) {
       deriv[p] = r.get().getRealDouble();
       r.fwd(n);
     }
     for (int i = 0; i < numParameters; ++i)
       for (int j = 0; j < numParameters; ++j) H[i][j] += deriv[i] * deriv[j];
   }
   return new Matrix(H).inverse().getArray();
 }
Beispiel #14
0
 /**
  * TODO
  *
  * @param r The segmentation image.
  * @param op0 Source intensity image.
  * @param op1 Start position.
  */
 public final void compute(
     RandomAccessibleInterval<T> op0, final long[] op1, final RandomAccessibleInterval<T> r) {
   final RandomAccess<T> rc = r.randomAccess();
   final RandomAccess<T> op0c = op0.randomAccess();
   op0c.setPosition(op1);
   final T floodVal = op0c.get().copy();
   final LinkedList<long[]> q = new LinkedList<long[]>();
   q.addFirst(op1.clone());
   long[] pos, nextPos;
   long[] perm = new long[r.numDimensions()];
   while (!q.isEmpty()) {
     pos = q.removeLast();
     rc.setPosition(pos);
     if (rc.get().compareTo(floodVal) == 0) {
       continue;
     }
     op0c.setPosition(pos);
     if (op0c.get().compareTo(floodVal) == 0) {
       // set new label
       rc.get().set(floodVal);
       switch (m_type) {
         case EIGHT_CONNECTED:
           Arrays.fill(perm, -1);
           int i = r.numDimensions() - 1;
           boolean add;
           while (i > -1) {
             nextPos = pos.clone();
             add = true;
             // Modify position
             for (int j = 0; j < r.numDimensions(); j++) {
               nextPos[j] += perm[j];
               // Check boundaries
               if (nextPos[j] < 0 || nextPos[j] >= r.dimension(j)) {
                 add = false;
                 break;
               }
             }
             if (add) {
               q.addFirst(nextPos);
             }
             // Calculate next permutation
             for (i = perm.length - 1; i > -1; i--) {
               if (perm[i] < 1) {
                 perm[i]++;
                 for (int j = i + 1; j < perm.length; j++) {
                   perm[j] = -1;
                 }
                 break;
               }
             }
           }
           break;
         case FOUR_CONNECTED:
         default:
           for (int j = 0; j < r.numDimensions(); j++) {
             if (pos[j] + 1 < r.dimension(j)) {
               nextPos = pos.clone();
               nextPos[j]++;
               q.addFirst(nextPos);
             }
             if (pos[j] - 1 >= 0) {
               nextPos = pos.clone();
               nextPos[j]--;
               q.addFirst(nextPos);
             }
           }
           break;
       }
     }
   }
 }
  /**
   * Fuses one stack, i.e. all angles/illuminations for one timepoint and channel
   *
   * @param timepoint
   * @param channel
   * @return
   */
  public boolean fuseStacksAndGetPSFs(
      final TimePoint timepoint,
      final Channel channel,
      final ImgFactory<FloatType> imgFactory,
      final int osemIndex,
      double osemspeedup,
      WeightType weightType,
      final HashMap<Channel, ChannelPSF> extractPSFLabels,
      final long[] psfSize,
      final HashMap<Channel, ArrayList<Pair<Pair<Angle, Illumination>, String>>> psfFiles,
      final boolean transformLoadedPSFs) {
    // TODO: get rid of this hack
    if (files != null) {
      weightType = WeightType.LOAD_WEIGHTS;
      IOFunctions.println("WARNING: LOADING WEIGHTS FROM IMAGES, files.length()=" + files.length);
    }

    // get all views that are fused for this timepoint & channel
    this.viewDescriptions =
        FusionHelper.assembleInputData(spimData, timepoint, channel, viewIdsToProcess);

    if (this.viewDescriptions.size() == 0) return false;

    this.imgs = new HashMap<ViewId, RandomAccessibleInterval<FloatType>>();
    this.weights = new HashMap<ViewId, RandomAccessibleInterval<FloatType>>();

    final Img<FloatType> overlapImg;

    if (weightType == WeightType.WEIGHTS_ONLY)
      overlapImg = imgFactory.create(bb.getDimensions(), new FloatType());
    else overlapImg = null;

    final boolean extractPSFs =
        (extractPSFLabels != null) && (extractPSFLabels.get(channel).getLabel() != null);
    final boolean loadPSFs = (psfFiles != null);

    if (extractPSFs) ePSF = new ExtractPSF<FloatType>();
    else if (loadPSFs) ePSF = loadPSFs(channel, viewDescriptions, psfFiles, transformLoadedPSFs);
    else {
      ePSF = assignOtherChannel(channel, extractPSFLabels);
    }

    if (ePSF == null) return false;

    // remember the extracted or loaded PSFs
    extractPSFLabels.get(channel).setExtractPSFInstance(ePSF);

    // we will need to run some batches until all is fused
    for (int i = 0; i < viewDescriptions.size(); ++i) {
      final ViewDescription vd = viewDescriptions.get(i);

      IOFunctions.println(
          "Transforming view "
              + i
              + " of "
              + (viewDescriptions.size() - 1)
              + " (viewsetup="
              + vd.getViewSetupId()
              + ", tp="
              + vd.getTimePointId()
              + ")");
      IOFunctions.println(
          "("
              + new Date(System.currentTimeMillis())
              + "): Reserving memory for transformed & weight image.");

      // creating the output
      RandomAccessibleInterval<FloatType> transformedImg; // might be null if WEIGHTS_ONLY
      final RandomAccessibleInterval<FloatType>
          weightImg; // never null (except LOAD_WEIGHTS which is not implemented yet)

      if (weightType == WeightType.WEIGHTS_ONLY) transformedImg = overlapImg;
      else transformedImg = imgFactory.create(bb.getDimensions(), new FloatType());

      IOFunctions.println(
          "("
              + new Date(System.currentTimeMillis())
              + "): Transformed image factory: "
              + imgFactory.getClass().getSimpleName());

      // loading the input if necessary
      final RandomAccessibleInterval<FloatType> img;

      if (weightType == WeightType.WEIGHTS_ONLY && !extractPSFs) {
        img = null;
      } else {
        IOFunctions.println("(" + new Date(System.currentTimeMillis()) + "): Loading image.");
        img = ProcessFusion.getImage(new FloatType(), spimData, vd, true);

        if (Img.class.isInstance(img))
          IOFunctions.println(
              "("
                  + new Date(System.currentTimeMillis())
                  + "): Input image factory: "
                  + ((Img<FloatType>) img).factory().getClass().getSimpleName());
      }

      // initializing weights
      IOFunctions.println(
          "("
              + new Date(System.currentTimeMillis())
              + "): Initializing transformation & weights: "
              + weightType.name());

      spimData.getViewRegistrations().getViewRegistration(vd).updateModel();
      final AffineTransform3D transform =
          spimData.getViewRegistrations().getViewRegistration(vd).getModel();
      final long[] offset = new long[] {bb.min(0), bb.min(1), bb.min(2)};

      if (weightType == WeightType.PRECOMPUTED_WEIGHTS || weightType == WeightType.WEIGHTS_ONLY)
        weightImg = imgFactory.create(bb.getDimensions(), new FloatType());
      else if (weightType == WeightType.NO_WEIGHTS)
        weightImg =
            Views.interval(
                new ConstantRandomAccessible<FloatType>(
                    new FloatType(1), transformedImg.numDimensions()),
                transformedImg);
      else if (weightType == WeightType.VIRTUAL_WEIGHTS) {
        final Blending blending = getBlending(img, blendingBorder, blendingRange, vd);

        weightImg =
            new TransformedRealRandomAccessibleInterval<FloatType>(
                blending, new FloatType(), transformedImg, transform, offset);
      } else // if ( processType == ProcessType.LOAD_WEIGHTS )
      {
        IOFunctions.println("WARNING: LOADING WEIGHTS FROM: '" + new File(files[i]) + "'");
        ImagePlus imp = StackImgLoaderIJ.open(new File(files[i]));
        weightImg = imgFactory.create(bb.getDimensions(), new FloatType());
        StackImgLoaderIJ.imagePlus2ImgLib2Img(imp, (Img<FloatType>) weightImg, false);
        imp.close();
        if (debugImport) {
          imp = ImageJFunctions.show(weightImg);
          imp.setTitle("ViewSetup " + vd.getViewSetupId() + " Timepoint " + vd.getTimePointId());
        }
      }

      // split up into many parts for multithreading
      final Vector<ImagePortion> portions =
          FusionHelper.divideIntoPortions(
              Views.iterable(transformedImg).size(), Threads.numThreads() * 4);

      // set up executor service
      final ExecutorService taskExecutor = Executors.newFixedThreadPool(Threads.numThreads());
      final ArrayList<Callable<String>> tasks = new ArrayList<Callable<String>>();

      IOFunctions.println(
          "("
              + new Date(System.currentTimeMillis())
              + "): Transforming image & computing weights.");

      for (final ImagePortion portion : portions) {
        if (weightType == WeightType.WEIGHTS_ONLY) {
          final Interval imgInterval =
              new FinalInterval(
                  ViewSetupUtils.getSizeOrLoad(
                      vd.getViewSetup(),
                      vd.getTimePoint(),
                      spimData.getSequenceDescription().getImgLoader()));
          final Blending blending = getBlending(imgInterval, blendingBorder, blendingRange, vd);

          tasks.add(
              new TransformWeights(
                  portion, imgInterval, blending, transform, overlapImg, weightImg, offset));
        } else if (weightType == WeightType.PRECOMPUTED_WEIGHTS) {
          final Blending blending = getBlending(img, blendingBorder, blendingRange, vd);

          tasks.add(
              new TransformInputAndWeights(
                  portion, img, blending, transform, transformedImg, weightImg, offset));
        } else if (weightType == WeightType.NO_WEIGHTS
            || weightType == WeightType.VIRTUAL_WEIGHTS
            || weightType == WeightType.LOAD_WEIGHTS) {
          tasks.add(new TransformInput(portion, img, transform, transformedImg, offset));
        } else {
          throw new RuntimeException(weightType.name() + " not implemented yet.");
        }
      }

      try {
        // invokeAll() returns when all tasks are complete
        taskExecutor.invokeAll(tasks);
      } catch (final InterruptedException e) {
        IOFunctions.println("Failed to compute fusion: " + e);
        e.printStackTrace();
        return false;
      }

      taskExecutor.shutdown();

      // extract PSFs if wanted
      if (extractPSFs) {
        final ArrayList<double[]> llist =
            getLocationsOfCorrespondingBeads(
                timepoint, vd, extractPSFLabels.get(channel).getLabel());

        IOFunctions.println(
            "("
                + new Date(System.currentTimeMillis())
                + "): Extracting PSF for viewsetup "
                + vd.getViewSetupId()
                + " using label '"
                + extractPSFLabels.get(channel).getLabel()
                + "'"
                + " ("
                + llist.size()
                + " corresponding detections available)");

        ePSF.extractNextImg(img, vd, transform, llist, psfSize);
      }

      if (weightType != WeightType.WEIGHTS_ONLY) imgs.put(vd, transformedImg);
      weights.put(vd, weightImg);
    }

    // normalize the weights
    final ArrayList<RandomAccessibleInterval<FloatType>> weightsSorted =
        new ArrayList<RandomAccessibleInterval<FloatType>>();

    for (final ViewDescription vd : viewDescriptions) weightsSorted.add(weights.get(vd));

    IOFunctions.println(
        "("
            + new Date(System.currentTimeMillis())
            + "): Computing weight normalization for deconvolution.");

    final WeightNormalizer wn;

    if (weightType == WeightType.WEIGHTS_ONLY
        || weightType == WeightType.PRECOMPUTED_WEIGHTS
        || weightType == WeightType.LOAD_WEIGHTS) wn = new WeightNormalizer(weightsSorted);
    else if (weightType == WeightType.VIRTUAL_WEIGHTS)
      wn = new WeightNormalizer(weightsSorted, imgFactory);
    else // if ( processType == ProcessType.NO_WEIGHTS )
    wn = null;

    if (wn != null && !wn.process()) return false;

    // put the potentially modified weights back
    for (int i = 0; i < viewDescriptions.size(); ++i)
      weights.put(viewDescriptions.get(i), weightsSorted.get(i));

    this.minOverlappingViews = wn.getMinOverlappingViews();
    this.avgOverlappingViews = wn.getAvgOverlappingViews();

    IOFunctions.println(
        "("
            + new Date(System.currentTimeMillis())
            + "): Minimal number of overlapping views: "
            + getMinOverlappingViews()
            + ", using "
            + (this.minOverlappingViews = Math.max(1, this.minOverlappingViews)));
    IOFunctions.println(
        "("
            + new Date(System.currentTimeMillis())
            + "): Average number of overlapping views: "
            + getAvgOverlappingViews()
            + ", using "
            + (this.avgOverlappingViews = Math.max(1, this.avgOverlappingViews)));

    if (osemIndex == 1) osemspeedup = getMinOverlappingViews();
    else if (osemIndex == 2) osemspeedup = getAvgOverlappingViews();

    IOFunctions.println(
        "("
            + new Date(System.currentTimeMillis())
            + "): Adjusting for OSEM speedup = "
            + osemspeedup);

    if (weightType == WeightType.WEIGHTS_ONLY)
      displayWeights(osemspeedup, weightsSorted, overlapImg, imgFactory);
    else adjustForOSEM(weights, weightType, osemspeedup);

    IOFunctions.println(
        "("
            + new Date(System.currentTimeMillis())
            + "): Finished precomputations for deconvolution.");

    return true;
  }
Beispiel #16
0
  /**
   * @param img
   * @param processorFactory
   * @param converter
   * @return wrapped {@link ImagePlus}
   */
  @SuppressWarnings({"unchecked", "rawtypes"})
  public static final <T extends RealType<T>> ImagePlus wrap(
      final ImgPlus<T> img,
      final ImageProcessorFactory processorFactory,
      final Converter<T, FloatType> converter) {
    // we always want to have 5 dimensions
    final RandomAccessibleInterval permuted = extendAndPermute(img);

    final int width = (int) permuted.dimension(0);
    final int height = (int) permuted.dimension(1);

    final ImagePlus r = new ImagePlus();
    final ImageStack is = new ImageStack(width, height);

    final RandomAccessibleInterval<T> access =
        img.iterationOrder().equals(((IterableRealInterval<?>) permuted).iterationOrder())
            ? img
            : permuted;

    final IntervalIterator ii = createIntervalIterator(access);

    final long[] min = new long[access.numDimensions()];
    final long[] max = new long[access.numDimensions()];

    max[0] = permuted.max(0);
    max[1] = permuted.max(1);

    // number of planes = num tasks
    int numSlices = 1;
    for (int d = 2; d < access.numDimensions(); d++) {
      numSlices *= access.dimension(d);
    }

    // parallelization
    final ImageProcessor[] slices = new ImageProcessor[numSlices];
    final ExecutorService service =
        new ThreadPoolExecutorService(
            KNIMEConstants.GLOBAL_THREAD_POOL.createSubPool(KNIPConstants.THREADS_PER_NODE));

    final ArrayList<Future<Void>> futures = new ArrayList<Future<Void>>();
    final T inType = img.firstElement();

    int i = 0;
    while (ii.hasNext()) {
      ii.fwd();

      for (int d = 2; d < ii.numDimensions(); d++) {
        min[d] = ii.getIntPosition(d);
        max[d] = min[d];
      }

      final int proxy = i++;

      futures.add(
          service.submit(
              new Callable<Void>() {

                final FinalInterval tmp = new FinalInterval(min, max);

                @Override
                public Void call() throws Exception {

                  final Cursor<T> cursor = Views.iterable(Views.interval(access, tmp)).cursor();

                  final ImageProcessor ip = processorFactory.createProcessor(width, height, inType);

                  final FloatType outProxy = new FloatType();
                  for (int y = 0; y < height; y++) {
                    for (int x = 0; x < width; x++) {
                      converter.convert(cursor.next(), outProxy);
                      ip.setf(x, y, outProxy.get());
                    }
                  }
                  slices[proxy] = ip;

                  return null;
                }
              }));
    }

    for (final Future<Void> f : futures) {
      try {
        f.get();
      } catch (final InterruptedException e) {
        e.printStackTrace();
      } catch (final ExecutionException e) {
        e.printStackTrace();
      }
    }

    // add slices to stack
    for (ImageProcessor slice : slices) {
      is.addSlice("", slice);
    }

    // set calibration
    final double[] newCalibration = getNewCalibration(img);
    Calibration cal = new Calibration();
    cal.pixelWidth = newCalibration[0];
    cal.pixelHeight = newCalibration[1];
    cal.pixelDepth = newCalibration[3];
    r.setCalibration(cal);

    r.setStack(
        is, (int) permuted.dimension(2), (int) permuted.dimension(3), (int) permuted.dimension(4));
    r.setTitle(img.getName());

    return r;
  }