Added checking for interruption in the montage-method.
[trakem2.git] / SIFT_Matcher_new.java
bloba7bfba37c978c96bd02ba23cf5099c63bde0f712
1 //package mpi.fruitfly.registration;
3 import mpi.fruitfly.general.*;
4 import mpi.fruitfly.math.datastructures.*;
5 import mpi.fruitfly.registration.FloatArray2DScaleOctave;
6 import mpi.fruitfly.registration.FloatArray2DSIFT;
7 import mpi.fruitfly.registration.TRModel2D;
8 import mpi.fruitfly.registration.PointMatch;
9 import mpi.fruitfly.registration.ImageFilter;
10 import mpi.fruitfly.registration.Feature;
12 import imagescience.transforms.*;
13 import imagescience.images.Image;
15 import ij.plugin.*;
16 import ij.gui.*;
17 import ij.*;
18 import ij.process.*;
20 import java.util.Collections;
21 import java.util.Vector;
22 import java.awt.Color;
23 import java.awt.Polygon;
24 import java.awt.geom.AffineTransform;
25 import java.awt.TextField;
26 import java.awt.event.KeyEvent;
27 import java.awt.event.KeyListener;
29 import java.io.*;
32 public class SIFT_Matcher_new implements PlugIn, KeyListener
34 private static final String[] schemes = {
35 "nearest neighbor",
36 "linear",
37 "cubic convolution",
38 "cubic B-spline",
39 "cubic O-MOMS",
40 "quintic B-spline"
42 private static int scheme = 5;
44 // steps
45 private static int steps = 3;
46 // initial sigma
47 private static float initial_sigma = 1.6f;
48 // background colour
49 private static double bg = 0.0;
50 // feature descriptor size
51 private static int fdsize = 8;
52 // feature descriptor orientation bins
53 private static int fdbins = 8;
54 // size restrictions for scale octaves, use octaves < max_size and > min_size only
55 private static int min_size = 64;
56 private static int max_size = 1024;
57 // minimal allowed alignment error in px
58 private static float min_epsilon = 2.0f;
59 // maximal allowed alignment error in px
60 private static float max_epsilon = 100.0f;
61 private static float min_inlier_ratio = 0.05f;
63 /**
64 * Set true to double the size of the image by linear interpolation to
65 * ( with * 2 + 1 ) * ( height * 2 + 1 ). Thus we can start identifying
66 * DoG extrema with $\sigma = INITIAL_SIGMA / 2$ like proposed by
67 * \citet{Lowe04}.
69 * This is useful for images scmaller than 1000px per side only.
70 */
71 private static boolean upscale = false;
72 private static float scale = 1.0f;
74 private static boolean adjust = false;
75 private static boolean antialias = true;
77 /**
78 * show the employed feature correspondences in a small info stack
80 private static boolean show_info = false;
82 /**
83 * draw an arbitrarily rotated and scaled ellipse
85 * @param evec eigenvectors of unit length ( ev1_x, ev1_y, ev2_x, ev2_y ) define the ellipse's rotation
86 * @param e eigenvalues ( e1, e2 ) define the ellipses size
87 * @param o center of the ellipse ( o_x, o_y )
88 * @param scale scales both, e and o
90 static void drawEllipse( ImageProcessor ip, double[] evec, double[] o, double[] e, double scale )
92 int num_keys = 36;
93 int[] x_keys = new int[ num_keys + 1 ];
94 int[] y_keys = new int[ num_keys + 1 ];
95 for ( int i = 0; i < num_keys; ++i )
97 double r = ( double )i * 2 * Math.PI / ( double )num_keys;
98 double x = Math.sin( r ) * Math.sqrt( Math.abs( e[ 0 ] ) );
99 double y = Math.cos( r ) * Math.sqrt( Math.abs( e[ 1 ] ) );
100 x_keys[ i ] = ( int )( scale * ( x * evec[ 0 ] + y * evec[ 2 ] + o[ 0 ] ) );
101 y_keys[ i ] = ( int )( scale * ( x * evec[ 1 ] + y * evec[ 3 ] + o[ 1 ] ) );
102 // System.out.println( "keypoint: ( " + x_keys[ i ] + ", " + y_keys[ i ] + ")" );
104 x_keys[ num_keys ] = x_keys[ 0 ];
105 y_keys[ num_keys ] = y_keys[ 0 ];
106 ip.drawPolygon( new Polygon( x_keys, y_keys, num_keys + 1 ) );
110 * downscale a grey scale float image using gaussian blur
112 static ImageProcessor downScale( FloatProcessor ip, float s )
114 FloatArray2D g = ImageArrayConverter.ImageToFloatArray2D( ip );
116 float sigma = ( float )Math.sqrt( 0.25 / s / s - 0.25 );
117 float[] kernel = ImageFilter.createGaussianKernel1D( sigma, true );
119 g = ImageFilter.convolveSeparable( g, kernel, kernel );
121 ImageArrayConverter.FloatArrayToFloatProcessor( ip, g );
122 // ip.setInterpolate( false );
123 return ip.resize( ( int )( s * ip.getWidth() ) );
127 * draws a rotated square with center point center, having size and orientation
129 static void drawSquare( ImageProcessor ip, double[] o, double scale, double orient )
131 scale /= 2;
133 double sin = Math.sin( orient );
134 double cos = Math.cos( orient );
136 int[] x = new int[ 6 ];
137 int[] y = new int[ 6 ];
140 x[ 0 ] = ( int )( o[ 0 ] + ( sin - cos ) * scale );
141 y[ 0 ] = ( int )( o[ 1 ] - ( sin + cos ) * scale );
143 x[ 1 ] = ( int )o[ 0 ];
144 y[ 1 ] = ( int )o[ 1 ];
146 x[ 2 ] = ( int )( o[ 0 ] + ( sin + cos ) * scale );
147 y[ 2 ] = ( int )( o[ 1 ] + ( sin - cos ) * scale );
148 x[ 3 ] = ( int )( o[ 0 ] - ( sin - cos ) * scale );
149 y[ 3 ] = ( int )( o[ 1 ] + ( sin + cos ) * scale );
150 x[ 4 ] = ( int )( o[ 0 ] - ( sin + cos ) * scale );
151 y[ 4 ] = ( int )( o[ 1 ] - ( sin - cos ) * scale );
152 x[ 5 ] = x[ 0 ];
153 y[ 5 ] = y[ 0 ];
155 ip.drawPolygon( new Polygon( x, y, x.length ) );
158 public void run( String args )
160 if ( IJ.versionLessThan( "1.37i" ) ) return;
162 final ImagePlus imp = WindowManager.getCurrentImage();
163 if ( imp == null ) { System.err.println( "There are no images open" ); return; }
165 GenericDialog gd = new GenericDialog( "Align stack" );
166 gd.addNumericField( "steps_per_scale_octave :", steps, 0 );
167 gd.addNumericField( "initial_gaussian_blur :", initial_sigma, 2 );
168 gd.addNumericField( "feature_descriptor_size :", fdsize, 0 );
169 gd.addNumericField( "feature_descriptor_orientation_bins :", fdbins, 0 );
170 gd.addNumericField( "minimum_image_size :", min_size, 0 );
171 gd.addNumericField( "maximum_image_size :", max_size, 0 );
172 gd.addNumericField( "minimal_alignment_error :", min_epsilon, 2 );
173 gd.addNumericField( "maximal_alignment_error :", max_epsilon, 2 );
174 gd.addNumericField( "inlier_ratio :", min_inlier_ratio, 2 );
175 gd.addNumericField( "background_color :", bg, 2 );
176 gd.addChoice( "interpolation_scheme :", schemes, schemes[ scheme ] );
177 gd.addCheckbox( "upscale_image_first", upscale );
178 gd.addCheckbox( "display_correspondences", show_info );
179 gd.showDialog();
180 if (gd.wasCanceled()) return;
182 steps = ( int )gd.getNextNumber();
183 initial_sigma = ( float )gd.getNextNumber();
184 fdsize = ( int )gd.getNextNumber();
185 fdbins = ( int )gd.getNextNumber();
186 min_size = ( int )gd.getNextNumber();
187 max_size = ( int )gd.getNextNumber();
188 min_epsilon = ( float )gd.getNextNumber();
189 max_epsilon = ( float )gd.getNextNumber();
190 min_inlier_ratio = ( float )gd.getNextNumber();
191 bg = ( double )gd.getNextNumber();
192 scheme = gd.getNextChoiceIndex();
193 upscale = gd.getNextBoolean();
194 if ( upscale ) scale = 2.0f;
195 else scale = 1.0f;
196 show_info = gd.getNextBoolean();
198 Affine a = new Affine();
200 int ischeme = Affine.NEAREST;
201 switch ( scheme )
203 case 0:
204 ischeme = Affine.NEAREST;
205 break;
206 case 1:
207 ischeme = Affine.LINEAR;
208 break;
209 case 2:
210 ischeme = Affine.CUBIC;
211 break;
212 case 3:
213 ischeme = Affine.BSPLINE3;
214 break;
215 case 4:
216 ischeme = Affine.OMOMS3;
217 break;
218 case 5:
219 ischeme = Affine.BSPLINE5;
220 break;
223 ImageStack stack = imp.getStack();
224 ImageStack stackAligned = new ImageStack( stack.getWidth(), stack.getHeight() );
226 float vis_scale = 256.0f / imp.getWidth();
227 // float vis_scale = 1024.0f / imp.getWidth();
228 ImageStack stackInfo = null;
229 ImagePlus impInfo = null;
231 if ( show_info )
232 stackInfo = new ImageStack(
233 Math.round( vis_scale * stack.getWidth() ),
234 Math.round( vis_scale * stack.getHeight() ) );
236 stackAligned.addSlice( null, stack.getProcessor( 1 ) );
237 ImagePlus impAligned = new ImagePlus( "Aligned 1 of " + stack.getSize(), stackAligned );
238 impAligned.show();
240 ImageProcessor ip1;
241 ImageProcessor ip2;
242 ImageProcessor ip3 = null;
244 Vector< Feature > fs1;
245 Vector< Feature > fs2;
247 ip2 = stack.getProcessor( 1 ).convertToFloat();
249 AffineTransform at = new AffineTransform();
251 FloatArray2DSIFT sift = new FloatArray2DSIFT( fdsize, fdbins );
253 FloatArray2D fa = ImageArrayConverter.ImageToFloatArray2D( ip2 );
254 ImageFilter.enhance( fa, 1.0f );
256 if ( upscale )
258 FloatArray2D fat = new FloatArray2D( fa.width * 2 - 1, fa.height * 2 - 1 );
259 FloatArray2DScaleOctave.upsample( fa, fat );
260 fa = fat;
261 fa = ImageFilter.computeGaussianFastMirror( fa, ( float )Math.sqrt( initial_sigma * initial_sigma - 1.0 ) );
263 else
264 fa = ImageFilter.computeGaussianFastMirror( fa, ( float )Math.sqrt( initial_sigma * initial_sigma - 0.25 ) );
266 long start_time = System.currentTimeMillis();
267 System.out.print( "processing SIFT ..." );
268 sift.init( fa, steps, initial_sigma, min_size, max_size );
269 fs2 = sift.run( max_size );
270 Collections.sort( fs2 );
271 System.out.println( " took " + ( System.currentTimeMillis() - start_time ) + "ms" );
273 System.out.println( fs2.size() + " features identified and processed" );
275 // downscale ip2 for visualisation purposes
276 if ( show_info )
277 ip2 = downScale( ( FloatProcessor )ip2, vis_scale );
279 for ( int i = 1; i < stack.getSize(); ++i )
281 ip1 = ip2;
282 ip2 = stack.getProcessor( i + 1 ).convertToFloat();
283 fa = ImageArrayConverter.ImageToFloatArray2D( ip2 );
284 ImageFilter.enhance( fa, 1.0f );
286 if ( upscale )
288 FloatArray2D fat = new FloatArray2D( fa.width * 2 - 1, fa.height * 2 - 1 );
289 FloatArray2DScaleOctave.upsample( fa, fat );
290 fa = fat;
291 fa = ImageFilter.computeGaussianFastMirror( fa, ( float )Math.sqrt( initial_sigma * initial_sigma - 1.0 ) );
293 else
294 fa = ImageFilter.computeGaussianFastMirror( fa, ( float )Math.sqrt( initial_sigma * initial_sigma - 0.25 ) );
296 fs1 = fs2;
298 start_time = System.currentTimeMillis();
299 System.out.print( "processing SIFT ..." );
300 sift.init( fa, steps, initial_sigma, min_size, max_size );
301 fs2 = sift.run( max_size);
302 Collections.sort( fs2 );
303 System.out.println( " took " + ( System.currentTimeMillis() - start_time ) + "ms" );
305 System.out.println( fs2.size() + " features identified and processed");
307 start_time = System.currentTimeMillis();
308 System.out.print( "identifying correspondences using brute force ..." );
309 Vector< PointMatch > candidates =
310 FloatArray2DSIFT.createMatches( fs2, fs1, 1.5f, null, Float.MAX_VALUE );
311 System.out.println( " took " + ( System.currentTimeMillis() - start_time ) + "ms" );
313 IJ.log( candidates.size() + " potentially corresponding features identified" );
316 * draw all correspondence candidates
318 if ( show_info )
320 ip2 = downScale( ( FloatProcessor )ip2, vis_scale );
322 ip1 = ip1.convertToRGB();
323 ip3 = ip2.convertToRGB();
324 ip1.setColor( Color.red );
325 ip3.setColor( Color.red );
327 ip1.setLineWidth( 2 );
328 ip3.setLineWidth( 2 );
329 for ( PointMatch m : candidates )
331 float[] m_p1 = m.getP1().getL();
332 float[] m_p2 = m.getP2().getL();
334 ip1.drawDot( ( int )Math.round( vis_scale / scale * m_p2[ 0 ] ), ( int )Math.round( vis_scale / scale * m_p2[ 1 ] ) );
335 ip3.drawDot( ( int )Math.round( vis_scale / scale * m_p1[ 0 ] ), ( int )Math.round( vis_scale / scale * m_p1[ 1 ] ) );
339 Vector< PointMatch > inliers = new Vector< PointMatch >();
341 TRModel2D model = TRModel2D.estimateBestModel(
342 candidates,
343 inliers,
344 min_epsilon,
345 max_epsilon,
346 min_inlier_ratio );
347 float epsilon = 0.0f;
349 if ( model != null )
351 if ( show_info )
353 ip1.setColor( Color.green );
354 ip3.setColor( Color.green );
355 ip1.setLineWidth( 2 );
356 ip3.setLineWidth( 2 );
357 for ( PointMatch m : inliers )
359 float[] m_p1 = m.getP1().getL();
360 float[] m_p2 = m.getP2().getL();
362 ip1.drawDot( ( int )Math.round( vis_scale / scale * m_p2[ 0 ] ), ( int )Math.round( vis_scale / scale * m_p2[ 1 ] ) );
363 ip3.drawDot( ( int )Math.round( vis_scale / scale * m_p1[ 0 ] ), ( int )Math.round( vis_scale / scale * m_p1[ 1 ] ) );
368 * append the estimated transformation model
370 * TODO the current rotation assumes the origin (0,0) of the
371 * image in the image's "center"
372 * ( width / 2 - 1.0, height / 2 - 1.0 ). This is, because we
373 * use imagescience.jar for transformation and they do so...
374 * Think about using an other transformation class, focusing on
375 * better interpolation schemes ( Lanczos would be great ).
377 AffineTransform at_current = new AffineTransform( model.getAffine() );
378 double[] m = new double[ 6 ];
379 at_current.getMatrix( m );
380 m[ 4 ] /= scale;
381 m[ 5 ] /= scale;
382 at_current.setTransform( m[ 0 ], m[ 1 ], m[ 2 ], m[ 3 ], m[ 4 ], m[ 5 ] );
384 double hw = ( double )imp.getWidth() / 2.0 - 1.0;
385 double hh = ( double )imp.getHeight() / 2.0 - 1.0;
387 at.translate(
388 -hw,
389 -hh );
390 at.concatenate( at_current );
391 at.translate(
393 hh );
396 double[] m = new double[ 6 ];
397 at.getMatrix( m );
399 Image img = Image.wrap( new ImagePlus( "new_layer", stack.getProcessor( i + 1 ) ) );
401 Image imgAligned = a.run(
402 img,
403 new double[][]
404 { { m[ 0 ], m[ 2 ], 0, m[ 4 ] },
405 { m[ 1 ], m[ 3 ], 0, m[ 5 ] },
406 { 0, 0, 1, 0 },
407 { 0, 0, 0, 1 } },
408 ischeme,
409 adjust,
410 antialias );
411 ImagePlus impAlignedSlice = imgAligned.imageplus();
412 stackAligned.addSlice( null, impAlignedSlice.getProcessor() );
413 if ( show_info )
415 ImageProcessor tmp;
416 tmp = ip1.createProcessor( stackInfo.getWidth(), stackInfo.getHeight() );
417 tmp.insert( ip1, 0, 0 );
418 stackInfo.addSlice( null, tmp ); // fixing silly 1 pixel size missmatches
419 tmp = ip3.createProcessor( stackInfo.getWidth(), stackInfo.getHeight() );
420 tmp.insert( ip3, 0, 0 );
421 stackInfo.addSlice( null, tmp );
422 if ( i == 1 )
424 impInfo = new ImagePlus( "Alignment info", stackInfo );
425 impInfo.show();
427 impInfo.setStack( "Alignment info", stackInfo );
428 impInfo.updateAndDraw();
430 impAligned.setStack( "Aligned " + stackAligned.getSize() + " of " + stack.getSize(), stackAligned );
431 impAligned.updateAndDraw();
435 public void keyPressed(KeyEvent e)
437 if (
438 ( e.getKeyCode() == KeyEvent.VK_F1 ) &&
439 ( e.getSource() instanceof TextField ) )
444 public void keyReleased(KeyEvent e) { }
446 public void keyTyped(KeyEvent e) { }