9 // Test type reference counting for SendReq
10 void typefree_isend_test(int rank, int size) {
12 int source = 0, dest = size - 1;
13 MPI_Comm comm = MPI_COMM_WORLD;
15 MPI_Datatype strideType;
16 MPI_Datatype tmpType[1024];
20 fprintf(stderr, "This test requires at least two processes.");
21 MPI_Abort(MPI_COMM_WORLD, 1);
25 * The idea here is to create a simple but non-contig datatype,
26 * perform an isend with it, free it, and then create
27 * many new datatypes. While not a complete test, if the datatype
28 * was freed and the space was reused, this test may detect
31 MPI_Type_vector(VEC_NELM, 1, VEC_STRIDE, MPI_INT, &strideType);
32 MPI_Type_commit(&strideType);
35 buf.resize(VEC_NELM * VEC_STRIDE);
36 std::iota(buf.begin(), buf.end(), 0);
38 MPI_Isend(buf.data(), 1, strideType, dest, 0, comm, &req);
39 MPI_Type_free(&strideType);
42 * If strideType was incorrectly freed and the following types reuse its memory, then
43 * we should see that elements in the destination's receive buffer are from the initial
44 * VEC_NELEM / VEC_STRIDE "rows" of the send buffer instead of the first "column".
46 for (i = 0; i < 1024; i++) {
47 MPI_Type_vector(VEC_NELM, 1, 1, MPI_INT, &tmpType[i]);
48 MPI_Type_commit(&tmpType[i]);
51 /* Synchronize with the receiver */
52 MPI_Sendrecv(NULL, 0, MPI_INT, dest, 1, NULL, 0, MPI_INT, dest, 1, comm, MPI_STATUS_IGNORE);
54 MPI_Wait(&req, MPI_STATUS_IGNORE);
55 for (i = 0; i < 1024; i++) {
56 MPI_Type_free(&tmpType[i]);
59 else if (rank == dest) {
61 for (i = 0; i < VEC_NELM; i++)
64 /* Synchronize with the sender */
65 MPI_Sendrecv(NULL, 0, MPI_INT, source, 1, NULL, 0, MPI_INT, source, 1, comm, MPI_STATUS_IGNORE);
66 MPI_Recv(buf.data(), VEC_NELM, MPI_INT, source, 0, comm, MPI_STATUS_IGNORE);
68 for (i = 0; i < VEC_NELM; i++) {
69 if (buf[i] != i * VEC_STRIDE) {
72 printf("buf[%d] = %d, expected %d\n", VEC_STRIDE * i, buf[VEC_STRIDE * i], i);
78 /* Clean up the strideType */
80 MPI_Type_free(&strideType);
84 // Test type reference counting for SsendReq
85 void typefree_issend_test(int rank, int size) {
87 int source = 0, dest = size - 1;
88 MPI_Comm comm = MPI_COMM_WORLD;
90 MPI_Datatype strideType;
91 MPI_Datatype tmpType[1024];
95 fprintf(stderr, "This test requires at least two processes.");
96 MPI_Abort(MPI_COMM_WORLD, 1);
100 * The idea here is to create a simple but non-contig datatype,
101 * perform an isend with it, free it, and then create
102 * many new datatypes. While not a complete test, if the datatype
103 * was freed and the space was reused, this test may detect
106 MPI_Type_vector(VEC_NELM, 1, VEC_STRIDE, MPI_INT, &strideType);
107 MPI_Type_commit(&strideType);
109 if (rank == source) {
110 buf.resize(VEC_NELM * VEC_STRIDE);
111 std::iota(buf.begin(), buf.end(), 0);
113 MPI_Issend(buf.data(), 1, strideType, dest, 0, comm, &req);
114 MPI_Type_free(&strideType);
117 * If strideType was incorrectly freed and the following types reuse its memory, then
118 * we should see that elements in the destination's receive buffer are from the initial
119 * VEC_NELEM / VEC_STRIDE "rows" of the send buffer instead of the first "column".
121 for (i = 0; i < 1024; i++) {
122 MPI_Type_vector(VEC_NELM, 1, 1, MPI_INT, &tmpType[i]);
123 MPI_Type_commit(&tmpType[i]);
126 /* Synchronize with the receiver */
127 MPI_Sendrecv(NULL, 0, MPI_INT, dest, 1, NULL, 0, MPI_INT, dest, 1, comm, MPI_STATUS_IGNORE);
129 MPI_Wait(&req, MPI_STATUS_IGNORE);
130 for (i = 0; i < 1024; i++) {
131 MPI_Type_free(&tmpType[i]);
134 else if (rank == dest) {
135 buf.resize(VEC_NELM);
136 for (i = 0; i < VEC_NELM; i++)
139 /* Synchronize with the sender */
140 MPI_Sendrecv(NULL, 0, MPI_INT, source, 1, NULL, 0, MPI_INT, source, 1, comm, MPI_STATUS_IGNORE);
141 MPI_Recv(buf.data(), VEC_NELM, MPI_INT, source, 0, comm, MPI_STATUS_IGNORE);
143 for (i = 0; i < VEC_NELM; i++) {
144 if (buf[i] != i * VEC_STRIDE) {
147 printf("buf[%d] = %d, expected %d\n", VEC_STRIDE * i, buf[VEC_STRIDE * i], i);
153 /* Clean up the strideType */
154 if (rank != source) {
155 MPI_Type_free(&strideType);
159 void vector_add(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype) {
160 int count = VEC_NELM;
161 // Needs to be the same as the number of processes in the communicator used for typefree_ireduce_test
164 for ( int i=0; i<count; i++ ) {
165 inoutvec[i*stride] += invec[i*stride];
169 // Test type reference counting for RednReq
170 void typefree_ireduce_test(MPI_Comm comm) {
175 MPI_Datatype strideType;
176 MPI_Datatype tmpType[1024];
179 MPI_Comm_rank(comm, &rank);
180 MPI_Comm_size(comm, &size);
183 fprintf(stderr, "This test requires at least two processes.");
184 MPI_Abort(MPI_COMM_WORLD, 1);
187 MPI_Type_vector(VEC_NELM, 1, size, MPI_INT, &strideType);
188 MPI_Type_commit(&strideType);
189 MPI_Op_create((MPI_User_function *)vector_add, 1, &op);
191 std::vector<int> sendbuf(VEC_NELM * size * size, rank+1);
192 std::vector<int> recvbuf(VEC_NELM * size * size, 1);
195 MPI_Ireduce(sendbuf.data(), recvbuf.data(), 1, strideType, op, root, comm, &req);
196 MPI_Type_free(&strideType);
197 for (i = 0; i < 1024; i++) {
198 MPI_Type_vector(VEC_NELM, 1, 1, MPI_INT, &tmpType[i]);
199 MPI_Type_commit(&tmpType[i]);
202 // Sync with non-root processes (comm limits these to 1 process)
203 for (int r = root + 1; r < size; r++) {
204 MPI_Sendrecv(NULL, 0, MPI_INT, r, 1, NULL, 0, MPI_INT, r, 1, comm, MPI_STATUS_IGNORE);
206 MPI_Wait(&req, MPI_STATUS_IGNORE);
208 int expected = (size * (size+1)) / 2;
210 for (i = 0; i < VEC_NELM; i++) {
211 if (recvbuf[i*size] != expected) {
214 printf("recvbuf[%d] = %d, expected %d\n", i, recvbuf[i*size], expected);
219 for (i = 0; i < 1024; i++) {
220 MPI_Type_free(&tmpType[i]);
224 MPI_Sendrecv(NULL, 0, MPI_INT, root, 1, NULL, 0, MPI_INT, root, 1, comm, MPI_STATUS_IGNORE);
225 MPI_Ireduce(sendbuf.data(), recvbuf.data(), 1, strideType, op, root, comm, &req);
226 MPI_Wait(&req, MPI_STATUS_IGNORE);
227 MPI_Type_free(&strideType);
231 // Test type reference counting for GatherReq
232 void typefree_igather_test(int rank, int size) {
235 MPI_Comm comm = MPI_COMM_WORLD;
237 MPI_Datatype contigType;
238 MPI_Datatype tmpType[1024];
240 std::vector<int> sendbuf(VEC_NELM * size);
241 std::iota(sendbuf.begin(), sendbuf.end(), 0);
244 fprintf(stderr, "This test requires at least two processes.");
245 MPI_Abort(MPI_COMM_WORLD, 1);
248 MPI_Type_contiguous(VEC_NELM, MPI_INT, &contigType);
249 MPI_Type_commit(&contigType);
253 * MPI_gather places a newly received vector after the extent of the previous vector in recvbuf.
254 * If contigType is incorrectly freed and its memory is reused, allocating this extra memory ensures
255 * that the program does not segfault.
257 std::vector<int> recvbuf(VEC_NELM * size * size);
258 for (i = 0; i < VEC_NELM * size * size; i++)
261 MPI_Igather(&sendbuf[rank], 1, contigType, recvbuf.data(), 1, contigType, root, comm, &req);
262 MPI_Type_free(&contigType);
264 for (i = 0; i < 1024; i++) {
265 MPI_Type_vector(VEC_NELM, 1, size, MPI_INT, &tmpType[i]);
266 MPI_Type_commit(&tmpType[i]);
269 // Sync with non-root processes
270 for (int r = root + 1; r < size; r++) {
271 MPI_Sendrecv(NULL, 0, MPI_INT, r, 1, NULL, 0, MPI_INT, r, 1, comm, MPI_STATUS_IGNORE);
273 MPI_Wait(&req, MPI_STATUS_IGNORE);
276 for (i = 0; i < VEC_NELM * size; i++) {
277 if (recvbuf[i] != i) {
280 printf("recvbuf[%d] = %d, expected %d\n", i, recvbuf[i], i);
285 for (i = 0; i < 1024; i++) {
286 MPI_Type_free(&tmpType[i]);
290 MPI_Sendrecv(NULL, 0, MPI_INT, root, 1, NULL, 0, MPI_INT, root, 1, comm, MPI_STATUS_IGNORE);
292 MPI_Igather(&sendbuf[VEC_NELM*rank], VEC_NELM, MPI_INT, NULL, 0, 0, root, comm, &req);
293 MPI_Wait(&req, MPI_STATUS_IGNORE);
295 MPI_Type_free(&contigType);
299 // Test type reference counting for GathervReq
300 void typefree_igatherv_test(int rank, int size) {
304 MPI_Comm comm = MPI_COMM_WORLD;
306 MPI_Datatype strideType, resizedType;
307 MPI_Datatype tmpType[1024];
308 std::vector<int> sendbuf(VEC_NELM * size);
309 std::vector<int> recvcounts(size, 1), displs(size);
311 std::iota(sendbuf.begin(), sendbuf.end(), 0);
312 std::iota(displs.begin(), displs.end(), 0);
315 fprintf(stderr, "This test requires at least two processes.");
316 MPI_Abort(MPI_COMM_WORLD, 1);
319 MPI_Type_vector(VEC_NELM, 1, size, MPI_INT, &strideType);
320 MPI_Type_commit(&strideType);
321 // This enables the root to receive vectors in column-major order.
322 MPI_Type_create_resized(strideType, 0, 4, &resizedType);
325 std::vector<int> recvbuf(VEC_NELM * size);
326 for (i = 0; i < VEC_NELM * size; i++)
329 MPI_Igatherv(sendbuf.data(), VEC_NELM, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), resizedType, root, comm, &req);
330 MPI_Type_free(&strideType);
331 MPI_Type_free(&resizedType);
333 for (i = 0; i < 1024; i++) {
334 MPI_Type_vector(VEC_NELM, 1, 1, MPI_INT, &tmpType[i]);
335 MPI_Type_commit(&tmpType[i]);
338 // Sync with non-root processes
339 for (int r = root + 1; r < size; r++) {
340 MPI_Sendrecv(NULL, 0, MPI_INT, r, 1, NULL, 0, MPI_INT, r, 1, comm, MPI_STATUS_IGNORE);
342 MPI_Wait(&req, MPI_STATUS_IGNORE);
344 // We expect to have received a "transposed" sendbuf.
345 for (i = 0; i < size; i++) {
346 for (j = 0; j < VEC_NELM; j++) {
347 if (recvbuf[j*size + i] != i*VEC_NELM+j) {
350 printf("recvbuf[%d] = %d, expected %d\n", j*size+i, recvbuf[j*size+i], i*VEC_NELM+j);
356 for (i = 0; i < 1024; i++) {
357 MPI_Type_free(&tmpType[i]);
361 MPI_Sendrecv(NULL, 0, MPI_INT, root, 1, NULL, 0, MPI_INT, root, 1, comm, MPI_STATUS_IGNORE);
363 MPI_Igatherv(&sendbuf[rank*VEC_NELM], VEC_NELM, MPI_INT, NULL, recvcounts.data(), displs.data(), strideType, root, comm, &req);
364 MPI_Wait(&req, MPI_STATUS_IGNORE);
366 MPI_Type_free(&strideType);
367 MPI_Type_free(&resizedType);
371 // Test type reference counting for ATAReq
372 void typefree_ialltoallv_test(MPI_Comm comm) {
376 MPI_Datatype strideType, resizedType;
377 MPI_Datatype tmpType[1024];
379 MPI_Comm_rank(comm, &rank);
380 MPI_Comm_size(comm, &size);
383 fprintf(stderr, "This test requires at least two processes.");
384 MPI_Abort(MPI_COMM_WORLD, 1);
386 MPI_Type_vector(VEC_NELM, 1, size, MPI_INT, &strideType);
387 MPI_Type_commit(&strideType);
388 MPI_Type_create_resized(strideType, 0, 4, &resizedType);
390 std::vector<int> sendbuf(VEC_NELM * size), recvbuf(VEC_NELM * size);
391 std::vector<int> sendcounts(size, 1), recvcounts(size, 1);
392 std::vector<int> sdispls(size), rdispls(size);
393 for (i = 0; i < size; i++) {
394 sdispls[i] = (rank + i) % size;
395 rdispls[i] = (rank + i) % size;
398 std::iota(sendbuf.begin(), sendbuf.end(), 0);
399 for (i = 0; i < VEC_NELM * size; i++) {
404 MPI_Ialltoallv(sendbuf.data(), sendcounts.data(), sdispls.data(), resizedType,
405 recvbuf.data(), recvcounts.data(), rdispls.data(), resizedType,
407 MPI_Type_free(&strideType);
408 MPI_Type_free(&resizedType);
410 for (i = 0; i < 1024; i++) {
411 MPI_Type_vector(VEC_NELM, 1, 1, MPI_INT, &tmpType[i]);
412 MPI_Type_commit(&tmpType[i]);
415 for (i = 0; i < 1024; i++) {
416 MPI_Type_free(&tmpType[i]);
419 MPI_Sendrecv(NULL, 0, MPI_INT, 1, 1, NULL, 0, MPI_INT, 1, 1, comm, MPI_STATUS_IGNORE);
420 MPI_Wait(&req, MPI_STATUS_IGNORE);
423 for (i = 0; i < VEC_NELM * size; i++) {
424 if (recvbuf[i] != i) {
427 printf("recvbuf[%d] = %d, expected %d\n", i, recvbuf[i], i);
431 } else if (rank == 1) {
432 MPI_Sendrecv(NULL, 0, MPI_INT, 0, 1, NULL, 0, MPI_INT, 0, 1, comm, MPI_STATUS_IGNORE);
433 MPI_Ialltoallv(sendbuf.data(), sendcounts.data(), sdispls.data(), resizedType,
434 recvbuf.data(), recvcounts.data(), rdispls.data(), resizedType,
436 MPI_Wait(&req, MPI_STATUS_IGNORE);
437 MPI_Type_free(&strideType);
438 MPI_Type_free(&resizedType);
441 for (i = 0; i < VEC_NELM * size; i++) {
442 if (recvbuf[i] != i) {
445 printf("recvbuf[%d] = %d, expected %d\n", i, recvbuf[i], i);
450 MPI_Type_free(&strideType);
451 MPI_Type_free(&resizedType);
455 // Test type reference counting for ATAReq
456 void typefree_ialltoall_test(MPI_Comm comm) {
460 MPI_Datatype contigType;
461 MPI_Datatype tmpType[1024];
463 MPI_Comm_rank(comm, &rank);
464 MPI_Comm_size(comm, &size);
467 fprintf(stderr, "This test requires at least two processes.");
468 MPI_Abort(MPI_COMM_WORLD, 1);
470 MPI_Type_contiguous(VEC_NELM, MPI_INT, &contigType);
471 MPI_Type_commit(&contigType);
473 std::vector<int> sendbuf(VEC_NELM * size), recvbuf(VEC_NELM * size);
474 std::vector<int> sendcounts(size, 1), recvcounts(size, 1);
475 std::vector<int> sdispls(size), rdispls(size);
476 for (i = 0; i < size; i++) {
477 sdispls[i] = (rank + i) % size;
478 rdispls[i] = (rank + i) % size;
481 std::iota(sendbuf.begin(), sendbuf.end(), 0);
482 for (i = 0; i < VEC_NELM * size; i++) {
487 MPI_Ialltoallv(sendbuf.data(), sendcounts.data(), sdispls.data(), contigType,
488 recvbuf.data(), recvcounts.data(), rdispls.data(), contigType,
490 MPI_Type_free(&contigType);
492 for (i = 0; i < 1024; i++) {
493 MPI_Type_vector(VEC_NELM, 1, size, MPI_INT, &tmpType[i]);
494 MPI_Type_commit(&tmpType[i]);
497 for (i = 0; i < 1024; i++) {
498 MPI_Type_free(&tmpType[i]);
501 MPI_Sendrecv(NULL, 0, MPI_INT, 1, 1, NULL, 0, MPI_INT, 1, 1, comm, MPI_STATUS_IGNORE);
502 MPI_Wait(&req, MPI_STATUS_IGNORE);
505 for (i = 0; i < VEC_NELM * size; i++) {
506 if (recvbuf[i] != i) {
509 printf("recvbuf[%d] = %d, expected %d\n", i, recvbuf[i], i);
513 } else if (rank == 1) {
514 MPI_Sendrecv(NULL, 0, MPI_INT, 0, 1, NULL, 0, MPI_INT, 0, 1, comm, MPI_STATUS_IGNORE);
515 MPI_Ialltoallv(sendbuf.data(), sendcounts.data(), sdispls.data(), contigType,
516 recvbuf.data(), recvcounts.data(), rdispls.data(), contigType,
518 MPI_Wait(&req, MPI_STATUS_IGNORE);
519 MPI_Type_free(&contigType);
522 for (i = 0; i < VEC_NELM * size; i++) {
523 if (recvbuf[i] != i) {
526 printf("recvbuf[%d] = %d, expected %d\n", i, recvbuf[i], i);
531 MPI_Type_free(&contigType);
535 // Test type reference counting for ATAReq
536 void typefree_iscatterv_test(int rank, int size) {
540 MPI_Comm comm = MPI_COMM_WORLD;
542 MPI_Datatype strideType, resizedType;
543 MPI_Datatype tmpType[1024];
544 std::vector<int> recvbuf(VEC_NELM * size);
545 std::vector<int> sendcounts(size, 1), displs(size);
548 fprintf(stderr, "This test requires at least two processes.");
549 MPI_Abort(MPI_COMM_WORLD, 1);
552 MPI_Type_vector(VEC_NELM, 1, size, MPI_INT, &strideType);
553 MPI_Type_commit(&strideType);
554 // This enables the root to send vectors from sendbuf in column-major order.
555 MPI_Type_create_resized(strideType, 0, 4, &resizedType);
557 std::iota(displs.begin(), displs.end(), 0);
558 for (i = 0; i < VEC_NELM; i++)
562 std::vector<int> sendbuf(VEC_NELM * size);
563 std::iota(sendbuf.begin(), sendbuf.end(), 0);
565 MPI_Iscatterv(sendbuf.data(), sendcounts.data(), displs.data(), resizedType, recvbuf.data(), VEC_NELM, MPI_INT, root, comm, &req);
566 MPI_Type_free(&strideType);
567 MPI_Type_free(&resizedType);
569 for (i = 0; i < 1024; i++) {
570 MPI_Type_vector(VEC_NELM, 1, 1, MPI_INT, &tmpType[i]);
571 MPI_Type_commit(&tmpType[i]);
574 // Sync with non-root processes
575 for (int r = root + 1; r < size; r++) {
576 MPI_Sendrecv(NULL, 0, MPI_INT, r, 1, NULL, 0, MPI_INT, r, 1, comm, MPI_STATUS_IGNORE);
578 MPI_Wait(&req, MPI_STATUS_IGNORE);
580 for (i = 0; i < 1024; i++) {
581 MPI_Type_free(&tmpType[i]);
585 MPI_Sendrecv(NULL, 0, MPI_INT, root, 1, NULL, 0, MPI_INT, root, 1, comm, MPI_STATUS_IGNORE);
587 MPI_Iscatterv(NULL, NULL, NULL, 0, recvbuf.data(), VEC_NELM, MPI_INT, root, comm, &req);
588 MPI_Wait(&req, MPI_STATUS_IGNORE);
590 MPI_Type_free(&strideType);
591 MPI_Type_free(&resizedType);
596 for (i = 0; i < VEC_NELM; i++) {
597 if (recvbuf[i] != i*size+rank) {
600 printf("recvbuf[%d] = %d, expected %d\n", i, recvbuf[i], i*size+rank);
607 void typefree_persistent_req_test(int rank, int size) {
611 MPI_Comm comm = MPI_COMM_WORLD;
613 MPI_Datatype strideType;
614 MPI_Datatype tmpType[1024];
615 std::vector<int> buf;
618 MPI_Type_vector(VEC_NELM, 1, VEC_STRIDE, MPI_INT, &strideType);
619 MPI_Type_commit(&strideType);
621 buf.resize(VEC_NELM * VEC_STRIDE);
622 for (i = 0; i < VEC_NELM * VEC_STRIDE; i++) {
626 MPI_Recv_init(buf.data(), 1, strideType, source, 0, comm, &req);
629 MPI_Wait(&req, MPI_STATUS_IGNORE);
631 MPI_Type_free(&strideType);
633 for (i = 0; i < 1024; i++) {
634 MPI_Type_vector(VEC_NELM, 1, 1, MPI_INT, &tmpType[i]);
635 MPI_Type_commit(&tmpType[i]);
638 /* Synchronize with the sender */
639 MPI_Sendrecv(NULL, 0, MPI_INT, source, 1, NULL, 0, MPI_INT, source, 1, comm, MPI_STATUS_IGNORE);
642 MPI_Wait(&req, MPI_STATUS_IGNORE);
644 MPI_Request_free(&req);
646 for (i = 0; i < 1024; i++) {
647 MPI_Type_free(&tmpType[i]);
651 for (i = 0; i < VEC_NELM; i++) {
652 if (buf[i*VEC_STRIDE] != i) {
655 printf("buf[%d] = %d, expected %d\n", VEC_STRIDE * i, buf[VEC_STRIDE * i], i);
659 } else if (rank == source) {
660 buf.resize(VEC_NELM, 0);
662 MPI_Isend(buf.data(), VEC_NELM, MPI_INT, dest, 0, comm, &req);
663 MPI_Wait(&req, MPI_STATUS_IGNORE);
665 /* Synchronize with the receiver */
666 MPI_Sendrecv(NULL, 0, MPI_INT, dest, 1, NULL, 0, MPI_INT, dest, 1, comm, MPI_STATUS_IGNORE);
668 std::iota(buf.begin(), buf.end(), 0);
669 MPI_Isend(buf.data(), VEC_NELM, MPI_INT, dest, 0, comm, &req);
670 MPI_Wait(&req, MPI_STATUS_IGNORE);
674 int main(int argc, char **argv) {
675 int size, global_rank;
677 MPI_Init(&argc, &argv);
678 MPI_Comm_rank(MPI_COMM_WORLD, &global_rank);
679 MPI_Comm_size(MPI_COMM_WORLD, &size);
681 if (global_rank == 0) printf("Testing datatype free isend\n");
682 typefree_isend_test(global_rank, size);
684 if (global_rank == 0) printf("Testing datatype free issend\n");
685 typefree_issend_test(global_rank, size);
687 if (global_rank == 0) printf("Testing datatype free igatherv\n");
688 typefree_igatherv_test(global_rank, size);
690 if (global_rank == 0) printf("Testing datatype free igather\n");
691 typefree_igather_test(global_rank, size);
693 if (global_rank == 0) printf("Testing datatype free persistent request\n");
694 typefree_persistent_req_test(global_rank, size);
696 int color = (global_rank == 0 || global_rank == 1) ? 0 : 1;
698 MPI_Comm_split(MPI_COMM_WORLD, color, global_rank, &comm);
700 if (global_rank == 0) printf("Testing datatype free ialltoallv\n");
701 if (global_rank == 0 || global_rank == 1)
702 typefree_ialltoallv_test(comm);
704 if (global_rank == 0) printf("Testing datatype free ialltoall\n");
705 if (global_rank == 0 || global_rank == 1)
706 typefree_ialltoall_test(comm);
709 * TODO: This test yields multiple errors/segfaults when compiled with AMPI, but
710 * runs as expected when compiled with MPICH.
712 // if (global_rank == 0) printf("Testing datatype free ireduce\n");
713 // if (global_rank == 0 || global_rank == 1)
714 // typefree_ireduce_test(comm);
715 MPI_Comm_free(&comm);
717 if (global_rank == 0) printf("Testing datatype free iscatterv\n");
718 typefree_iscatterv_test(global_rank, size);