double alignStep(final RandomAccessibleInterval<T> image) { // compute error image = warped image - template computeDifference(Views.extendBorder(image), currentTransform, template, error); // compute transform parameter update final double[] gradient = new double[numParameters]; for (int p = 0; p < numParameters; ++p) { final Cursor<T> err = Views.flatIterable(error).cursor(); for (final T t : Views.flatIterable(Views.hyperSlice(descent, n, p))) gradient[p] += t.getRealDouble() * err.next().getRealDouble(); } final double[] dp = new double[numParameters]; LinAlgHelpers.mult(Hinv, gradient, dp); // udpate transform currentTransform.preConcatenate(warpFunction.getAffine(dp)); // return norm of parameter update vector return LinAlgHelpers.length(dp); }
/** * Get a "good" initial viewer transform. The viewer transform is chosen such that for the first * source, * * <ul> * <li>the XY plane is aligned with the screen plane, * <li>the <em>z = dim_z / 2</em> slice is shown, * <li>centered and scaled such that the full <em>dim_x</em> by <em>dim_y</em> is visible. * </ul> * * @param viewerWidth width of the viewer display * @param viewerHeight height of the viewer display * @param state the {@link ViewerState} containing at least one source. * @return proposed initial viewer transform. */ public static AffineTransform3D initTransform( final int viewerWidth, final int viewerHeight, final boolean zoomedIn, final ViewerState state) { final int cX = viewerWidth / 2; final int cY = viewerHeight / 2; final Source<?> source = state.getSources().get(state.getCurrentSource()).getSpimSource(); final int timepoint = state.getCurrentTimepoint(); if (!source.isPresent(timepoint)) return new AffineTransform3D(); final AffineTransform3D sourceTransform = new AffineTransform3D(); source.getSourceTransform(timepoint, 0, sourceTransform); final Interval sourceInterval = source.getSource(timepoint, 0); final double sX0 = sourceInterval.min(0); final double sX1 = sourceInterval.max(0); final double sY0 = sourceInterval.min(1); final double sY1 = sourceInterval.max(1); final double sZ0 = sourceInterval.min(2); final double sZ1 = sourceInterval.max(2); final double sX = (sX0 + sX1 + 1) / 2; final double sY = (sY0 + sY1 + 1) / 2; final double sZ = (sZ0 + sZ1 + 1) / 2; final double[][] m = new double[3][4]; // rotation final double[] qSource = new double[4]; final double[] qViewer = new double[4]; Affine3DHelpers.extractApproximateRotationAffine(sourceTransform, qSource, 2); LinAlgHelpers.quaternionInvert(qSource, qViewer); LinAlgHelpers.quaternionToR(qViewer, m); // translation final double[] centerSource = new double[] {sX, sY, sZ}; final double[] centerGlobal = new double[3]; final double[] translation = new double[3]; sourceTransform.apply(centerSource, centerGlobal); LinAlgHelpers.quaternionApply(qViewer, centerGlobal, translation); LinAlgHelpers.scale(translation, -1, translation); LinAlgHelpers.setCol(3, translation, m); final AffineTransform3D viewerTransform = new AffineTransform3D(); viewerTransform.set(m); // scale final double[] pSource = new double[] {sX1 + 0.5, sY1 + 0.5, sZ}; final double[] pGlobal = new double[3]; final double[] pScreen = new double[3]; sourceTransform.apply(pSource, pGlobal); viewerTransform.apply(pGlobal, pScreen); final double scaleX = cX / pScreen[0]; final double scaleY = cY / pScreen[1]; final double scale; if (zoomedIn) scale = Math.max(scaleX, scaleY); else scale = Math.min(scaleX, scaleY); viewerTransform.scale(scale); // window center offset viewerTransform.set(viewerTransform.get(0, 3) + cX, 0, 3); viewerTransform.set(viewerTransform.get(1, 3) + cY, 1, 3); return viewerTransform; }