3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
9 * VERSION 3.3.99_development_20071104
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2006, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Groningen Machine for Chemical Simulation
49 #include "gmx_fatal.h"
71 static int comp_xptr(const void *a
,const void *b
)
79 if ((dx
= (xptr
[va
][XX
] - xptr
[vb
][XX
])) < 0)
87 static void do_my_pme(FILE *fp
,real tm
,bool bVerbose
,t_inputrec
*ir
,
88 rvec x
[],rvec xbuf
[],rvec f
[],
89 real charge
[],real qbuf
[],real qqbuf
[],
90 matrix box
,bool bSort
,
91 t_commrec
*cr
,t_nsborder
*nsb
,t_nrnb
*nrnb
,
92 t_block
*excl
,real qtot
,
93 t_forcerec
*fr
,int index
[],FILE *fp_xvg
,
94 int ngroups
,unsigned short cENER
[])
96 real ener
,vcorr
,q
,xx
,dvdl
=0,vdip
,vcharge
;
97 tensor vir
,vir_corr
,vir_tot
;
102 /* Initiate local variables */
108 fprintf(fp
,"There are %d energy groups\n",ngroups
);
110 for(i
=0; (i
<ngroups
); i
++)
111 snew(epme
[i
],ngroups
);
114 /* Put x is in the box, this part needs to be parallellized properly */
115 /*put_atoms_in_box(box,nsb->natoms,x);*/
116 /* Here sorting of X (and q) is done.
117 * Alternatively, one could just put the atoms in one of the
118 * cr->nnodes slabs. That is much cheaper than sorting.
120 for(i
=0; (i
<nsb
->natoms
); i
++)
124 qsort(index
,nsb
->natoms
,sizeof(index
[0]),comp_xptr
);
125 xptr
= NULL
; /* To trap unintentional use of the ptr */
127 /* After sorting we only need the part that is to be computed on
128 * this processor. We also compute the mu_tot here (system dipole)
130 clear_rvec(mu_tot
[0]);
131 for(i
=START(nsb
); (i
<START(nsb
)+HOMENR(nsb
)); i
++) {
135 for(m
=0; (m
<DIM
); m
++) {
138 mu_tot
[0][m
] += q
*xx
;
142 copy_rvec(mu_tot
[0],mu_tot
[1]);
144 pr_rvec(debug
,0,"qbuf",qbuf
,nsb
->natoms
,TRUE
);
145 pr_rvecs(debug
,0,"xbuf",xbuf
,nsb
->natoms
);
146 pr_rvecs(debug
,0,"box",box
,DIM
);
148 for(ig
=0; (ig
<ngroups
); ig
++) {
149 for(jg
=ig
; (jg
<ngroups
); jg
++) {
151 for(i
=START(nsb
); (i
<START(nsb
)+HOMENR(nsb
)); i
++) {
152 if ((cENER
[i
] == ig
) || (cENER
[i
] == jg
))
161 ener
= do_pme(fp
,bVerbose
,ir
,xbuf
,f
,qptr
,qptr
,box
,cr
,
162 nsb
,nrnb
,vir
,fr
->ewaldcoeff
,FALSE
,0,&dvdl
,FALSE
);
163 vcorr
= ewald_LRcorrection(fp
,nsb
,cr
,fr
,qptr
,qptr
,excl
,xbuf
,box
,mu_tot
,
164 ir
->ewald_geometry
,ir
->epsilon_surface
,
165 0,&dvdl
,&vdip
,&vcharge
);
167 gmx_sum(1,&vcorr
,cr
);
169 epme
[ig
][jg
] = ener
+vcorr
;
174 fprintf(fp_xvg
,"%10.3f",tm
);
175 for(ig
=0; (ig
<ngroups
); ig
++) {
176 for(jg
=ig
; (jg
<ngroups
); jg
++) {
178 epme
[ig
][jg
] -= epme
[ig
][ig
]+epme
[jg
][jg
];
180 fprintf(fp_xvg
," %12.5e",epme
[ig
][jg
]);
184 fprintf(fp_xvg
,"\n");
187 fprintf(fp
,"Time: %10.3f Energy: %12.5e Correction: %12.5e Total: %12.5e\n",
188 tm
,ener
,vcorr
,ener
+vcorr
);
190 fprintf(fp_xvg
,"%10.3f %12.5e %12.5e %12.5e\n",tm
,ener
+vcorr
,vdip
,vcharge
);
192 m_add(vir
,vir_corr
,vir_tot
);
193 gmx_sum(9,vir_tot
[0],cr
);
194 pr_rvecs(fp
,0,"virial",vir_tot
,DIM
);
200 int main(int argc
,char *argv
[])
202 static char *desc
[] = {
203 "The pmetest program tests the scaling of the PME code. When only given",
204 "a tpr file it will compute PME for one frame. When given a trajectory",
205 "it will do so for all the frames in the trajectory. Before the PME",
206 "routine is called the coordinates are sorted along the X-axis.[PAR]",
207 "As an extra service to the public the program can also compute",
208 "long-range Coulomb energies for components of the system. When the",
209 "[TT]-groups[tt] flag is given to the program the energy groups",
210 "from the tpr file will be read, and half an energy matrix computed."
213 static t_filenm fnm
[] = {
214 { efTPX
, NULL
, NULL
, ffREAD
},
215 { efTRN
, "-o", NULL
, ffWRITE
},
216 { efLOG
, "-g", "pme", ffWRITE
},
217 { efTRX
, "-f", NULL
, ffOPTRD
},
218 { efXVG
, "-x", "ener-pme", ffWRITE
}
220 #define NFILE asize(fnm)
222 /* Command line options ! */
223 static bool bVerbose
=FALSE
;
224 static bool bOptFFT
=FALSE
;
225 static bool bSort
=FALSE
;
226 static int ewald_geometry
=eewg3D
;
228 static int nthreads
=1;
229 static int pme_order
=0;
230 static rvec grid
= { -1, -1, -1 };
231 static real rc
= 0.0;
232 static real dtol
= 0.0;
233 static bool bGroups
= FALSE
;
234 static t_pargs pa
[] = {
235 { "-np", FALSE
, etINT
, {&nnodes
},
236 "Number of nodes, must be the same as used for grompp" },
237 { "-nt", FALSE
, etINT
, {&nthreads
},
238 "Number of threads to start on each node" },
239 { "-v", FALSE
, etBOOL
,{&bVerbose
},
240 "Be loud and noisy" },
241 { "-sort", FALSE
, etBOOL
,{&bSort
},
242 "Sort coordinates. Crucial for domain decomposition." },
243 { "-grid", FALSE
, etRVEC
,{&grid
},
244 "Number of grid cells in X, Y, Z dimension (if -1 use from tpr)" },
245 { "-order", FALSE
, etINT
, {&pme_order
},
246 "Order of the PME spreading algorithm" },
247 { "-groups", FALSE
, etBOOL
, {&bGroups
},
248 "Compute half an energy matrix based on the energy groups in your tpr file" },
249 { "-rc", FALSE
, etREAL
, {&rc
},
250 "Rcoulomb for Ewald summation" },
251 { "-tol", FALSE
, etREAL
, {&dtol
},
252 "Tolerance for Ewald summation" }
263 int natoms
,step
,status
,i
,ncg
,root
;
264 real t
,lambda
,ewaldcoeff
,qtot
;
268 real
*charge
,*qbuf
,*qqbuf
;
271 /* Start the actual parallel code if necessary */
272 cr
= init_par(&argc
,&argv
);
276 CopyRight(stderr
,argv
[0]);
278 /* Parse command line on all processors, arguments are passed on in
279 * init_par (see above)
281 parse_common_args(&argc
,argv
,
282 PCA_KEEP_ARGS
| PCA_NOEXIT_ON_ARGS
| PCA_BE_NICE
|
283 PCA_CAN_SET_DEFFNM
| (MASTER(cr
) ? 0 : PCA_QUIET
),
284 NFILE
,fnm
,asize(pa
),pa
,asize(desc
),desc
,0,NULL
);
288 gmx_fatal(FARGS
,"GROMACS compiled without MPI support - can't do parallel runs");
290 #ifndef GMX_THREAD_SHM_FDECOMP
292 gmx_fatal(FARGS
,"GROMACS compiled without threads support - can only use one thread");
295 /* Open log files on all processors */
296 open_log(ftp2fn(efLOG
,NFILE
,fnm
),cr
);
300 /* Read tpr file etc. */
301 read_tpxheader(ftp2fn(efTPX
,NFILE
,fnm
),&tpx
,FALSE
,NULL
,NULL
);
303 read_tpx(ftp2fn(efTPX
,NFILE
,fnm
),&step
,&t
,&lambda
,ir
,
304 box
,&natoms
,x
,NULL
,NULL
,&top
);
308 for(i
=0; (i
<natoms
); i
++) {
309 charge
[i
] = top
.atoms
.atom
[i
].q
;
314 if (opt2parg_bSet("-grid",asize(pa
),pa
)) {
319 /* Check command line parameters for consistency */
320 if ((ir
->nkx
<= 0) || (ir
->nky
<= 0) || (ir
->nkz
<= 0))
321 gmx_fatal(FARGS
,"PME grid = %d %d %d",ir
->nkx
,ir
->nky
,ir
->nkz
);
322 if (opt2parg_bSet("-rc",asize(pa
),pa
))
324 if (ir
->rcoulomb
<= 0)
325 gmx_fatal(FARGS
,"rcoulomb should be > 0 (not %f)",ir
->rcoulomb
);
326 if (opt2parg_bSet("-order",asize(pa
),pa
))
327 ir
->pme_order
= pme_order
;
328 if (ir
->pme_order
<= 0)
329 gmx_fatal(FARGS
,"pme_order should be > 0 (not %d)",ir
->pme_order
);
330 if (opt2parg_bSet("-tol",asize(pa
),pa
))
331 ir
->ewald_rtol
= dtol
;
332 if (ir
->ewald_rtol
<= 0)
333 gmx_fatal(FARGS
,"ewald_tol should be > 0 (not %f)",ir
->ewald_rtol
);
339 /* Add parallellization code here */
342 ncg
= top
.blocks
[ebCGS
].multinr
[0];
343 for(i
=0; (i
<cr
->nnodes
-1); i
++)
344 top
.blocks
[ebCGS
].multinr
[i
] = min(ncg
,(ncg
*(i
+1))/cr
->nnodes
);
345 for( ; (i
<MAXNODES
); i
++)
346 top
.blocks
[ebCGS
].multinr
[i
] = ncg
;
349 /* Set some variables to zero to avoid core dumps */
350 ir
->opts
.ngtc
= ir
->opts
.ngacc
= ir
->opts
.ngfrz
= ir
->opts
.ngener
= 0;
352 /* Distribute the data over processors */
353 MPI_Bcast(&natoms
,1,MPI_INT
,root
,MPI_COMM_WORLD
);
354 MPI_Bcast(ir
,sizeof(*ir
),MPI_BYTE
,root
,MPI_COMM_WORLD
);
355 MPI_Bcast(&qtot
,1,GMX_MPI_REAL
,root
,MPI_COMM_WORLD
);
358 /* Call some dedicated communication routines, master sends n-1 times */
360 for(i
=1; (i
<cr
->nnodes
); i
++) {
361 mv_block(i
,&(top
.blocks
[ebCGS
]));
362 mv_block(i
,&(top
.atoms
.excl
));
366 ld_block(root
,&(top
.blocks
[ebCGS
]));
367 ld_block(root
,&(top
.atoms
.excl
));
374 MPI_Bcast(charge
,natoms
,GMX_MPI_REAL
,root
,MPI_COMM_WORLD
);
377 ewaldcoeff
= calc_ewaldcoeff(ir
->rcoulomb
,ir
->ewald_rtol
);
381 pr_inputrec(stdlog
,0,"Inputrec",ir
);
383 /* Allocate memory for temp arrays etc. */
390 /* Initialize the PME code */
391 init_pme(stdlog
,cr
,ir
->nkx
,ir
->nky
,ir
->nkz
,ir
->pme_order
,
392 natoms
,FALSE
,bOptFFT
,ewald_geometry
);
394 /* MFlops accounting */
397 /* Initialize the work division */
398 calc_nsb(stdlog
,&(top
.blocks
[ebCGS
]),cr
->nnodes
,nsb
,0);
399 nsb
->nodeid
= cr
->nodeid
;
400 print_nsb(stdlog
,"pmetest",nsb
);
402 /* Initiate forcerec */
403 mdatoms
= atoms2md(stdlog
,&top
.atoms
,ir
->opts
.nFreeze
,ir
->eI
,
404 ir
->delta_t
,0,ir
->opts
.tau_t
,FALSE
,FALSE
);
406 init_forcerec(stdlog
,fr
,ir
,&top
,cr
,mdatoms
,nsb
,box
,FALSE
,NULL
,NULL
,FALSE
);
408 /* First do PME based on coordinates in tpr file, send them to
409 * other processors if needed.
412 fprintf(stdlog
,"-----\n"
413 "Results based on tpr file %s\n",ftp2fn(efTPX
,NFILE
,fnm
));
416 MPI_Bcast(x
[0],natoms
*DIM
,GMX_MPI_REAL
,root
,MPI_COMM_WORLD
);
417 MPI_Bcast(box
[0],DIM
*DIM
,GMX_MPI_REAL
,root
,MPI_COMM_WORLD
);
418 MPI_Bcast(&t
,1,GMX_MPI_REAL
,root
,MPI_COMM_WORLD
);
421 do_my_pme(stdlog
,0,bVerbose
,ir
,x
,xbuf
,f
,charge
,qbuf
,qqbuf
,box
,bSort
,
422 cr
,nsb
,&nrnb
,&(top
.atoms
.excl
),qtot
,fr
,index
,NULL
,
423 bGroups
? ir
->opts
.ngener
: 1,mdatoms
->cENER
);
425 /* If we have a trajectry file, we will read the frames in it and compute
428 if (ftp2bSet(efTRX
,NFILE
,fnm
)) {
429 fprintf(stdlog
,"-----\n"
430 "Results based on trx file %s\n",ftp2fn(efTRX
,NFILE
,fnm
));
433 natoms
= read_first_x(&status
,ftp2fn(efTRX
,NFILE
,fnm
),&t
,&x
,box
);
434 if (natoms
!= top
.atoms
.nr
)
435 gmx_fatal(FARGS
,"natoms in trx = %d, in tpr = %d",natoms
,top
.atoms
.nr
);
436 fp
= xvgropen(ftp2fn(efXVG
,NFILE
,fnm
),"PME Energy","Time (ps)","E (kJ/mol)");
441 /* Send coordinates, box and time to the other nodes */
444 MPI_Bcast(x
[0],natoms
*DIM
,GMX_MPI_REAL
,root
,MPI_COMM_WORLD
);
445 MPI_Bcast(box
[0],DIM
*DIM
,GMX_MPI_REAL
,root
,MPI_COMM_WORLD
);
446 MPI_Bcast(&t
,1,GMX_MPI_REAL
,root
,MPI_COMM_WORLD
);
449 rm_pbc(&top
.idef
,nsb
->natoms
,box
,x
,x
);
450 /* Call the PME wrapper function */
451 do_my_pme(stdlog
,t
,bVerbose
,ir
,x
,xbuf
,f
,charge
,qbuf
,qqbuf
,box
,bSort
,cr
,
452 nsb
,&nrnb
,&(top
.atoms
.excl
),qtot
,fr
,index
,fp
,
453 bGroups
? ir
->opts
.ngener
: 1,mdatoms
->cENER
);
454 /* Only the master processor reads more data */
456 bCont
= read_next_x(status
,&t
,natoms
,x
,box
);
457 /* Check whether we need to continue */
460 MPI_Bcast(&bCont
,1,MPI_INT
,root
,MPI_COMM_WORLD
);
465 /* Finish I/O, close files */
473 /* Do some final I/O about performance, might be useful in debugging */
474 fprintf(stdlog
,"-----\n");
475 print_nrnb(stdlog
,&nrnb
);
478 /* Finish the parallel stuff */
479 if (gmx_parallel_env_initialized())
482 /* Thank the audience, as usual */