2 Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
3 (HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of
4 the University of Michigan All rights reserved.
6 HOOMD-blue may contain modifications ("Contributions") provided, and to which
7 copyright is held, by various Contributors who have granted The Regents of the
8 University of Michigan the right to modify and/or distribute such Contributions.
10 You may redistribute, use, and create derivate works of HOOMD-blue, in source
11 and binary forms, provided you abide by the following conditions:
13 * Redistributions of source code must retain the above copyright notice, this
14 list of conditions, and the following disclaimer both in the code and
15 prominently in any materials provided with the distribution.
17 * Redistributions in binary form must reproduce the above copyright notice, this
18 list of conditions, and the following disclaimer in the documentation and/or
19 other materials provided with the distribution.
21 * All publications and presentations based on HOOMD-blue, including any reports
22 or published results obtained, in whole or in part, with HOOMD-blue, will
23 acknowledge its use according to the terms posted at the time of submission on:
24 http://codeblue.umich.edu/hoomd-blue/citations.html
26 * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
27 http://codeblue.umich.edu/hoomd-blue/
29 * Apart from the above required attributions, neither the name of the copyright
30 holder nor the names of HOOMD-blue's contributors may be used to endorse or
31 promote products derived from this software without specific prior written
36 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
37 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
38 WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
39 WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
41 IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
42 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
43 BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
46 OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
47 ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 // Maintainer: ndtrung
53 #include "RigidData.cuh"
61 /*! \file RigidData.cu
67 //! Kernel for seting R and V of rigid body particles
69 \param pdata_pos Particle position
70 \param pdata_vel Particle velocity
71 \param pdata_image Particle image
72 \param pdata_orientation Particle orientation
73 \param d_pgroup_idx Particle index
74 \param n_pgroup Number of particles in the group
75 \param d_particle_offset Local index of a particle in the body
76 \param d_particle_body Body index of a particle
77 \param d_rigid_orientation Body orientation (quaternion)
78 \param d_rigid_com Body center of mass
79 \param d_rigid_vel Body velocity
80 \param d_rigid_angvel Body angular velocity
81 \param d_rigid_image Body image
82 \param d_rigid_particle_dis Position of a particle in the body frame
83 \param d_rigid_particle_orientation Orientation of a particle in the body frame
84 \param nmax Maximum number of particles per body
85 \param box Box dimensions for periodic boundary condition handling
88 __global__ void gpu_rigid_setRV_kernel(Scalar4* pdata_pos,
91 Scalar4* pdata_orientation,
92 unsigned int *d_pgroup_idx,
93 unsigned int n_pgroup,
94 unsigned int *d_particle_offset,
95 unsigned int *d_particle_body,
96 Scalar4* d_rigid_orientation,
99 Scalar4* d_rigid_angvel,
101 Scalar4* d_rigid_particle_dis,
102 Scalar4* d_rigid_particle_orientation,
106 Scalar4 com, vel, angvel, ex_space, ey_space, ez_space;
107 int3 body_image = make_int3(0, 0, 0);
109 int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
111 if (group_idx >= n_pgroup)
114 unsigned int pidx = d_pgroup_idx[group_idx];
115 unsigned int idx_body = d_particle_body[pidx];
116 unsigned int particle_offset = d_particle_offset[pidx];
117 Scalar4 body_orientation = d_rigid_orientation[idx_body];
119 com = d_rigid_com[idx_body];
120 vel = d_rigid_vel[idx_body];
121 angvel = d_rigid_angvel[idx_body];
124 body_image = d_rigid_image[idx_body];
127 exyzFromQuaternion(body_orientation, ex_space, ey_space, ez_space);
129 int localidx = idx_body * nmax + particle_offset;
130 Scalar4 particle_pos = d_rigid_particle_dis[localidx];
131 Scalar4 constituent_orientation = d_rigid_particle_orientation[localidx];
133 // compute ri with new orientation
135 ri.x = ex_space.x * particle_pos.x + ey_space.x * particle_pos.y + ez_space.x * particle_pos.z;
136 ri.y = ex_space.y * particle_pos.x + ey_space.y * particle_pos.y + ez_space.y * particle_pos.z;
137 ri.z = ex_space.z * particle_pos.x + ey_space.z * particle_pos.y + ez_space.z * particle_pos.z;
141 Scalar4 porientation;
144 // x_particle = com + ri
145 ppos.x = com.x + ri.x;
146 ppos.y = com.y + ri.y;
147 ppos.z = com.z + ri.z;
149 // time to fix the periodic boundary conditions
151 box.wrap(ppos, image);
153 // update particle orientation
154 quatquat(body_orientation,
155 constituent_orientation,
159 // v_particle = vel + angvel x ri
160 Scalar4 pvel = pdata_vel[pidx];
161 pvel.x = vel.x + angvel.y * ri.z - angvel.z * ri.y;
162 pvel.y = vel.y + angvel.z * ri.x - angvel.x * ri.z;
163 pvel.z = vel.z + angvel.x * ri.y - angvel.y * ri.x;
165 // write out the results
168 pdata_pos[pidx] = make_scalar4(ppos.x, ppos.y, ppos.z, pdata_pos[pidx].w);
169 pdata_image[pidx] = image;
170 pdata_orientation[pidx] = porientation;
172 pdata_vel[pidx] = pvel;
176 // Sets R and v of particles of the rigid body on the GPU
177 /*! \param d_pos array of particle positions
178 \param d_vel array of particle velocities
179 \param d_image array of particle images
180 \param d_body array of particle body ids
181 \param rigid_data Rigid body data
182 \param d_pdata_orientation Particle orientations
183 \param d_group_members Device array listing the indicies of the mebers of the group to integrate (all particles in rigid bodies)
184 \param group_size Number of members in the group
185 \param box Box dimensions for periodic boundary condition handling
186 \param set_x boolean indicating whether the positions are changed or not (first or second step of integration)
188 cudaError_t gpu_rigid_setRV(Scalar4 *d_pos,
191 unsigned int *d_body,
192 const gpu_rigid_data_arrays& rigid_data,
193 Scalar4 *d_pdata_orientation,
194 unsigned int *d_group_members,
195 unsigned int group_size,
202 assert(d_pdata_orientation);
204 assert(d_group_members);
206 assert(rigid_data.particle_offset);
208 assert(rigid_data.orientation);
209 assert(rigid_data.com);
210 assert(rigid_data.vel);
211 assert(rigid_data.angvel);
212 assert(rigid_data.body_image);
213 assert(rigid_data.particle_pos);
214 assert(rigid_data.particle_orientation);
216 unsigned int nmax = rigid_data.nmax;
218 unsigned int block_size = 192;
219 dim3 particle_grid(group_size/block_size+1, 1, 1);
220 dim3 particle_threads(block_size, 1, 1);
223 gpu_rigid_setRV_kernel<true><<< particle_grid, particle_threads >>>(d_pos,
229 rigid_data.particle_offset,
231 rigid_data.orientation,
235 rigid_data.body_image,
236 rigid_data.particle_pos,
237 rigid_data.particle_orientation,
241 gpu_rigid_setRV_kernel<false><<< particle_grid, particle_threads >>>(d_pos,
247 rigid_data.particle_offset,
249 rigid_data.orientation,
253 rigid_data.body_image,
254 rigid_data.particle_pos,
255 rigid_data.particle_orientation,
261 //! Kernel driven by gpu_compute_virial_correction_end()
262 __global__ void gpu_compute_virial_correction_end_kernel(Scalar *d_net_virial,
263 unsigned int virial_pitch,
264 const Scalar4 *d_net_force,
265 const Scalar4 *d_oldpos,
266 const Scalar4 *d_oldvel,
267 const Scalar4 *d_vel,
268 const unsigned int *d_body,
272 unsigned int pidx = blockIdx.x * blockDim.x + threadIdx.x;
276 if (d_body[pidx] != NO_BODY)
278 // calculate the virial from the position and velocity from the previous step
279 Scalar4 old_vel = d_oldvel[pidx];
280 Scalar4 old_pos = d_oldpos[pidx];
281 Scalar4 vel = d_vel[pidx];
283 Scalar4 net_force = d_net_force[pidx];
285 fc.x = mass * (vel.x - old_vel.x) / deltaT - net_force.x;
286 fc.y = mass * (vel.y - old_vel.y) / deltaT - net_force.y;
287 fc.z = mass * (vel.z - old_vel.z) / deltaT - net_force.z;
289 d_net_virial[0*virial_pitch+pidx] += old_pos.x * fc.x;
290 d_net_virial[1*virial_pitch+pidx] += old_pos.x * fc.y;
291 d_net_virial[2*virial_pitch+pidx] += old_pos.x * fc.z;
292 d_net_virial[3*virial_pitch+pidx] += old_pos.y * fc.y;
293 d_net_virial[4*virial_pitch+pidx] += old_pos.y * fc.z;
294 d_net_virial[5*virial_pitch+pidx] += old_pos.z * fc.z;
298 /*! \param d_net_virial Net virial data to update with correction terms
299 \param virial_pitch Pitch of d_net_virial
300 \param d_net_force Net force on each particle
301 \param d_oldpos Old position of particles saved at the start of the step
302 \param d_oldvel Old velocity of particles saved at the start of the step
303 \param d_vel Current velocity of particles at the end of the step
304 \param d_body Body index of each particle
305 \param deltaT Step size
306 \param N number of particles in the box
308 cudaError_t gpu_compute_virial_correction_end(Scalar *d_net_virial,
309 const unsigned int virial_pitch,
310 const Scalar4 *d_net_force,
311 const Scalar4 *d_oldpos,
312 const Scalar4 *d_oldvel,
313 const Scalar4 *d_vel,
314 const unsigned int *d_body,
318 assert(d_net_virial);
324 unsigned int block_size = 192;
325 dim3 particle_grid(N/block_size+1, 1, 1);
326 dim3 particle_threads(block_size, 1, 1);
328 gpu_compute_virial_correction_end_kernel<<<particle_grid, particle_threads>>>(d_net_virial,