[mpich-commits] [mpich] MPICH primary repository branch, master, updated. v3.2b3-191-g2e0aa86
Service Account
noreply at mpich.org
Wed Jul 8 16:10:29 CDT 2015
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "MPICH primary repository".
The branch, master has been updated
via 2e0aa863c4c8f2b4c260eb97def4f4910e31fcc3 (commit)
via 51c037843be604541db9a47a856cf4c9cc9fdd4b (commit)
via 7a2cd3dc25775712e60516b24acf44d74e173ad7 (commit)
from 8519d2bb2866c72c32ce6efa202ee5c126e98716 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
http://git.mpich.org/mpich.git/commitdiff/2e0aa863c4c8f2b4c260eb97def4f4910e31fcc3
commit 2e0aa863c4c8f2b4c260eb97def4f4910e31fcc3
Author: Junchao Zhang <jczhang at mcs.anl.gov>
Date: Sun Jul 5 20:24:02 2015 -0500
Test for multithreaded idup overlapped with nbc.
Add a test for idup, overlaped with non-blocking
collectives
Signed-off-by: Lena Oden <loden at anl.gov>
diff --git a/test/mpi/.gitignore b/test/mpi/.gitignore
index 16f9ad8..b8e0b16 100644
--- a/test/mpi/.gitignore
+++ b/test/mpi/.gitignore
@@ -1078,6 +1078,7 @@
/threads/comm/ctxdup
/threads/comm/ctxidup
/threads/comm/dup_leak_test
+/threads/comm/idup_nb
/threads/init/initth
/threads/pt2pt/threads
/threads/spawn/multispawn
diff --git a/test/mpi/threads/comm/Makefile.am b/test/mpi/threads/comm/Makefile.am
index 0fb58ad..6a1da81 100644
--- a/test/mpi/threads/comm/Makefile.am
+++ b/test/mpi/threads/comm/Makefile.am
@@ -9,5 +9,5 @@ include $(top_srcdir)/threads/Makefile_threads.mtest
EXTRA_DIST = testlist
-noinst_PROGRAMS = ctxdup dup_leak_test comm_dup_deadlock comm_create_threads comm_create_group_threads comm_idup ctxidup
+noinst_PROGRAMS = ctxdup dup_leak_test comm_dup_deadlock comm_create_threads comm_create_group_threads comm_idup ctxidup idup_nb
diff --git a/test/mpi/threads/comm/idup_nb.c b/test/mpi/threads/comm/idup_nb.c
new file mode 100644
index 0000000..b38aeda
--- /dev/null
+++ b/test/mpi/threads/comm/idup_nb.c
@@ -0,0 +1,291 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* This test tests the overlapping of MPI_Comm_idup with other nonblocking calls
+ in a multithreaded setting. Each process produces a new communicator for each
+ of its threads. A thread will duplicate it again to produce child communicators.
+ When the first child communicator is ready, the thread duplicates it to produce
+ grandchild communicators. Meanwhile, the thread also issues nonblocking calls,
+ such as idup, iscan, ibcast, iallreduce, ibarrier on the communicators available.
+
+ The test tests both intracommunicators and intercommunicators
+*/
+
+#include <stdio.h>
+#include <mpi.h>
+#include "mpitest.h"
+#include "mpithreadtest.h"
+
+#define NUM_THREADS 3 /* threads to spawn per process, must >= 1 */
+#define NUM_IDUPS1 3 /* child communicators to iduplicate per thread, must >= 1*/
+#define NUM_IDUPS2 3 /* grandchild communicators to iduplicate per thread, must >= 1*/
+#define NUM_ITER 1 /* run the kernel this times */
+
+#define check(X_) \
+ do { \
+ if (!(X_)) { \
+ printf("[%s:%d] -- Assertion failed: %s\n", __FILE__, __LINE__, #X_);\
+ MPI_Abort(MPI_COMM_WORLD, 1); \
+ } \
+ } while (0)
+
+int isLeft; /* Is left group of an intercomm? */
+MPI_Comm parentcomms[NUM_THREADS];
+MPI_Comm nbrcomms[NUM_THREADS];
+int errs[NUM_THREADS] = {0};
+int verbose = 0;
+
+/* Threads idup the communicator assigned to them NUM_IDUPS1 times. The operation
+ is overlapped with other non-blocking operations on the same communicator or
+ on a different communicator.
+*/
+MTEST_THREAD_RETURN_TYPE test_intracomm(void *arg)
+{
+ int i, j;
+ int root, bcastbuf;
+ int rank, size;
+ int ans[4], expected[4];
+ MPI_Request reqs[NUM_IDUPS1 + NUM_IDUPS2 + 10] = { MPI_REQUEST_NULL }; /* Preallocate enough reqs */
+ MPI_Comm comms[NUM_IDUPS1 + NUM_IDUPS2]; /* Hold all descendant comms */
+ int cnt;
+ int tid = *(int*)arg;
+
+ MPI_Comm parentcomm = parentcomms[tid];
+ MPI_Comm nbrcomm = nbrcomms[tid];
+
+ for (i = 0; i < NUM_ITER; i++) {
+ cnt = 0;
+ if (*(int*)arg == rank) MTestSleep(1);
+
+ if (verbose) printf("%d: Thread %d - comm_idup %d start\n", rank, tid, i);
+
+ /* Idup the parent intracomm NUM_IDUPS1 times */
+ for (j = 0; j < NUM_IDUPS1; j++)
+ MPI_Comm_idup(parentcomm, &comms[j], &reqs[cnt++]);
+
+ /* Issue an iscan on parent comm to overlap with the pending idups */
+ MPI_Comm_rank(parentcomm, &rank);
+ MPI_Iscan(&rank, &ans[0], 1, MPI_INT, MPI_SUM, parentcomm, &reqs[cnt++]);
+ expected[0] = rank*(rank+1)/2;
+
+ /* Wait for the first child comm to be ready */
+ MPI_Wait(&reqs[0], MPI_STATUS_IGNORE);
+
+ /* Do Idups & iallreduce on the first child comm simultaneously */
+ for (j = 0; j < NUM_IDUPS2; j++)
+ MPI_Comm_idup(comms[0], &comms[NUM_IDUPS1+j], &reqs[cnt++]);
+
+ MPI_Comm_size(comms[0], &size);
+ MPI_Iallreduce(&rank, &ans[1], 1, MPI_INT, MPI_SUM, comms[0], &reqs[cnt++]);
+ expected[1] = (size-1)*size/2;
+
+ /* Issue an ibcast on the parent comm */
+ MPI_Comm_rank(parentcomm, &rank);
+ ans[2] = (rank == 0)? 199 : 111;
+ MPI_Ibcast(&ans[2], 1, MPI_INT, 0, parentcomm, &reqs[cnt++]);
+ expected[2] = 199;
+
+ /* Do ibarrier on the dup'ed comm */
+ MPI_Ibarrier(comms[0], &reqs[cnt++]);
+
+ /* Issue an iscan on a neighbor comm */
+ MPI_Comm_rank(nbrcomm, &rank);
+ MPI_Iscan(&rank, &ans[3], 1, MPI_INT, MPI_SUM, nbrcomm, &reqs[cnt++]);
+ expected[3] = rank*(rank+1)/2;
+
+ /* Pending operations include idup/iscan/ibcast on parentcomm
+ idup/Iallreduce/Ibarrier on comms[0], and Iscan on nbrcomm */
+
+ /* Waitall even if the first request is completed */
+ MPI_Waitall(cnt, reqs, MPI_STATUSES_IGNORE);
+
+ /* Check the answers */
+ for (j = 0; j < 4; j++) {
+ if (ans[j] != expected[j]) errs[tid]++;
+ }
+
+ for (j = 0; j < NUM_IDUPS1 + NUM_IDUPS2; j++) {
+ errs[tid] += MTestTestComm(comms[j]);
+ MPI_Comm_free(&comms[j]);
+ }
+
+ if (verbose) printf("\t%d: Thread %d - comm_idup %d finish\n", rank, tid, i);
+ }
+
+ if (verbose) printf("%d: Thread %d - Done.\n", rank, tid);
+ return (MTEST_THREAD_RETURN_TYPE)0;
+}
+
+/* Threads idup the communicator assigned to them NUM_IDUPS1 times. The operation
+ is overlapped with other non-blocking operations on the same communicator or
+ on a different communicator.
+*/
+MTEST_THREAD_RETURN_TYPE test_intercomm(void *arg)
+{
+ int rank, rsize, root;
+ int i, j;
+ int tid = *(int*)arg;
+ int ans[4], expected[4];
+ MPI_Comm parentcomm = parentcomms[tid];
+ MPI_Comm nbrcomm = nbrcomms[tid];
+
+ MPI_Request reqs[NUM_IDUPS1 + NUM_IDUPS2 + 10] = { MPI_REQUEST_NULL }; /* Preallocate enough reqs */
+ MPI_Comm comms[NUM_IDUPS1 + NUM_IDUPS2]; /* Hold all descendant comms */
+ int cnt;
+
+ for (i = 0; i < NUM_ITER; i++) {
+ cnt = 0;
+ if (*(int*)arg == rank) MTestSleep(1);
+
+ if (verbose) printf("%d: Thread %d - comm_idup %d start\n", rank, tid, i);
+
+ /* Idup the parent intracomm multiple times */
+ for (j = 0; j < NUM_IDUPS1; j++)
+ MPI_Comm_idup(parentcomm, &comms[j], &reqs[cnt++]);
+
+ /* Issue an Iallreduce on parentcomm */
+ MPI_Comm_rank(parentcomm, &rank);
+ MPI_Comm_remote_size(parentcomm, &rsize);
+ MPI_Iallreduce(&rank, &ans[0], 1, MPI_INT, MPI_SUM, parentcomm, &reqs[cnt++]);
+ expected[0] = (rsize-1)*rsize/2;
+
+ /* Wait for the first child comm to be ready */
+ MPI_Wait(&reqs[0], MPI_STATUS_IGNORE);
+
+ /* Do idup & iallreduce on the first child comm simultaneously */
+ for (j = 0; j < NUM_IDUPS2; j++)
+ MPI_Comm_idup(comms[0], &comms[NUM_IDUPS1+j], &reqs[cnt++]);
+
+ MPI_Comm_rank(comms[0], &rank);
+ MPI_Comm_remote_size(comms[0], &rsize);
+ MPI_Iallreduce(&rank, &ans[1], 1, MPI_INT, MPI_SUM, comms[0], &reqs[cnt++]);
+ expected[1] = (rsize-1)*rsize/2;
+
+ /* Issue an ibcast on parentcomm */
+ MPI_Comm_rank(parentcomm, &rank);
+ if (isLeft) {
+ if (rank == 0) {
+ root = MPI_ROOT;
+ ans[2] = 199;
+ } else {
+ root = MPI_PROC_NULL;
+ ans[2] = 199; /* not needed, just to make correctness checking easier */
+ }
+ } else {
+ root = 0;
+ ans[2] = 111; /* garbage value */
+ }
+ MPI_Ibcast(&ans[2], 1, MPI_INT, root, parentcomm, &reqs[cnt++]);
+ expected[2] = 199;
+
+ MPI_Ibarrier(comms[0], &reqs[cnt++]);
+
+ /* Do an Iscan on a neighbor comm */
+ MPI_Comm_rank(nbrcomm, &rank);
+ MPI_Comm_remote_size(nbrcomm, &rsize);
+ MPI_Iallreduce(&rank, &ans[3], 1, MPI_INT, MPI_SUM, nbrcomm, &reqs[cnt++]);
+ expected[3] = (rsize-1)*rsize/2;
+
+ /* Pending operations include idup/iallreduce/ibcast on parentcomm
+ Iallreduce/Ibarrier on comms[0], and Iallreduce on nbrcomm */
+
+ /* Waitall even if the first request is completed */
+ MPI_Waitall(cnt, reqs, MPI_STATUSES_IGNORE);
+
+ /* Check the answers */
+ for (j = 0; j < 4; j++) {
+ if (ans[j] != expected[j]) errs[tid]++;
+ }
+
+ for (j = 0; j < NUM_IDUPS1 + NUM_IDUPS2; j++) {
+ errs[tid] += MTestTestComm(comms[j]);
+ MPI_Comm_free(&comms[j]);
+ }
+
+ if (verbose) printf("\t%d: Thread %d - comm_idup %d finish\n", rank, tid, i);
+ }
+
+ if (verbose) printf("%d: Thread %d - Done.\n", rank, tid);
+ return (MTEST_THREAD_RETURN_TYPE)0;
+}
+
+
+int main(int argc, char **argv)
+{
+ int thread_args[NUM_THREADS];
+ MPI_Request requests[NUM_THREADS*2];
+ int i, provided;
+ MPI_Comm newcomm;
+ int toterrs = 0;
+
+ MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
+
+ check(provided == MPI_THREAD_MULTIPLE);
+ check(NUM_IDUPS1 >= 1 && NUM_IDUPS2 >= 1);
+
+ /* In each iteration, the process generates a new kind of intracommunicator, then
+ uses idup to duplicate the communicator for NUM_THREADS threads.
+ */
+ while (MTestGetIntracommGeneral(&newcomm, 1, 1)) {
+ if (newcomm == MPI_COMM_NULL) continue;
+
+ for (i = 0; i < NUM_THREADS; i++) {
+ MPI_Comm_idup(newcomm, &parentcomms[i], &requests[2*i]);
+ MPI_Comm_idup(newcomm, &nbrcomms[i], &requests[2*i+1]);
+ }
+
+ MPI_Waitall(NUM_THREADS*2, requests, MPI_STATUSES_IGNORE);
+
+ for (i = 0; i < NUM_THREADS; i++) {
+ thread_args[i] = i;
+ MTest_Start_thread(test_intracomm, (void *)&thread_args[i] );
+ }
+
+ MTest_Join_threads();
+
+ for (i = 0; i < NUM_THREADS; i++) {
+ toterrs += errs[i];
+ MPI_Comm_free(&parentcomms[i]);
+ MPI_Comm_free(&nbrcomms[i]);
+ }
+
+ MTestFreeComm(&newcomm);
+ }
+
+ /* In each iteration, the process generates a new kind of intercommunicator, then
+ uses idup to duplicate the communicator for NUM_THREADS threads.
+ */
+ while (MTestGetIntercomm(&newcomm, &isLeft, 1)) {
+ if (newcomm == MPI_COMM_NULL) continue;
+
+ for (i = 0; i < NUM_THREADS; i++) {
+ MPI_Comm_idup(newcomm, &parentcomms[i], &requests[2*i]);
+ MPI_Comm_idup(newcomm, &nbrcomms[i], &requests[2*i+1]);
+ }
+
+ MPI_Waitall(NUM_THREADS*2, requests, MPI_STATUSES_IGNORE);
+
+ for (i = 0; i < NUM_THREADS; i++) {
+ thread_args[i] = i;
+ MTest_Start_thread(test_intercomm, (void *)&thread_args[i] );
+ }
+
+ MTest_Join_threads();
+
+ for (i = 0; i < NUM_THREADS; i++) {
+ toterrs += errs[i];
+ MPI_Comm_free(&parentcomms[i]);
+ MPI_Comm_free(&nbrcomms[i]);
+ }
+
+ MTestFreeComm(&newcomm);
+ }
+
+ MTest_Finalize(toterrs);
+ MPI_Finalize();
+ return 0;
+}
+
diff --git a/test/mpi/threads/comm/testlist b/test/mpi/threads/comm/testlist
index 06764d8..50b841d 100644
--- a/test/mpi/threads/comm/testlist
+++ b/test/mpi/threads/comm/testlist
@@ -5,3 +5,4 @@ comm_create_threads 4
comm_create_group_threads 4
comm_idup 4 mpiversion=3.0 xfail=ticket2269
ctxidup 4 mpiversion=3.0
+idup_nb 4 mpiversion=3.0
http://git.mpich.org/mpich.git/commitdiff/51c037843be604541db9a47a856cf4c9cc9fdd4b
commit 51c037843be604541db9a47a856cf4c9cc9fdd4b
Author: Junchao Zhang <jczhang at mcs.anl.gov>
Date: Mon Jul 6 13:43:18 2015 -0500
Renamed routine TestIntercomm, moved it to mtest.c
To provide MTestTestIntracomm, MTestTestIntercomm,
MTestTestComm as a common service
for all tests.
Signed-off-by: Lena Oden <loden at anl.gov>
diff --git a/test/mpi/comm/iccreate.c b/test/mpi/comm/iccreate.c
index 4b3cedd..b2dc85e 100644
--- a/test/mpi/comm/iccreate.c
+++ b/test/mpi/comm/iccreate.c
@@ -13,9 +13,6 @@
* This program tests that MPI_Comm_create applies to intercommunicators;
* this is an extension added in MPI-2
*/
-
-int TestIntercomm( MPI_Comm );
-
int main( int argc, char *argv[] )
{
int errs = 0;
@@ -74,7 +71,7 @@ int main( int argc, char *argv[] )
}
/* ... more to do */
if (commok) {
- errs += TestIntercomm( newcomm );
+ errs += MTestTestComm(newcomm);
}
}
MPI_Group_free( &newgroup );
@@ -104,7 +101,7 @@ int main( int argc, char *argv[] )
}
else {
/* Try to communication between each member of intercomm */
- errs += TestIntercomm( newcomm );
+ errs += MTestTestComm(newcomm);
}
if (newcomm != MPI_COMM_NULL) {
@@ -138,79 +135,3 @@ int main( int argc, char *argv[] )
return 0;
}
-
-int TestIntercomm( MPI_Comm comm )
-{
- int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
- int errs = 0, wrank, nsize;
- char commname[MPI_MAX_OBJECT_NAME+1];
- MPI_Request *reqs;
-
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
- MPI_Comm_size( comm, &local_size );
- MPI_Comm_remote_size( comm, &remote_size );
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_get_name( comm, commname, &nsize );
-
- MTestPrintfMsg( 1, "Testing communication on intercomm '%s', remote_size=%d\n",
- commname, remote_size );
-
- reqs = (MPI_Request *)malloc( remote_size * sizeof(MPI_Request) );
- if (!reqs) {
- printf( "[%d] Unable to allocated %d requests for testing intercomm %s\n",
- wrank, remote_size, commname );
- errs++;
- return errs;
- }
- bufs = (int **) malloc( remote_size * sizeof(int *) );
- if (!bufs) {
- printf( "[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
- wrank, remote_size, commname );
- errs++;
- return errs;
- }
- bufmem = (int *) malloc( remote_size * 2 * sizeof(int) );
- if (!bufmem) {
- printf( "[%d] Unable to allocated %d int data for testing intercomm %s\n",
- wrank, 2*remote_size, commname );
- errs++;
- return errs;
- }
-
- /* Each process sends a message containing its own rank and the
- rank of the destination with a nonblocking send. Because we're using
- nonblocking sends, we need to use different buffers for each isend */
- /* NOTE: the send buffer access restriction was relaxed in MPI-2.2, although
- it doesn't really hurt to keep separate buffers for our purposes */
- for (j=0; j<remote_size; j++) {
- bufs[j] = &bufmem[2*j];
- bufs[j][0] = rank;
- bufs[j][1] = j;
- MPI_Isend( bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j] );
- }
- MTestPrintfMsg( 2, "isends posted, about to recv\n" );
-
- for (j=0; j<remote_size; j++) {
- MPI_Recv( rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE );
- if (rbuf[0] != j) {
- printf( "[%d] Expected rank %d but saw %d in %s\n",
- wrank, j, rbuf[0], commname );
- errs++;
- }
- if (rbuf[1] != rank) {
- printf( "[%d] Expected target rank %d but saw %d from %d in %s\n",
- wrank, rank, rbuf[1], j, commname );
- errs++;
- }
- }
- if (errs)
- fflush(stdout);
- MTestPrintfMsg( 2, "my recvs completed, about to waitall\n" );
- MPI_Waitall( remote_size, reqs, MPI_STATUSES_IGNORE );
-
- free( reqs );
- free( bufs );
- free( bufmem );
-
- return errs;
-}
diff --git a/test/mpi/comm/icsplit.c b/test/mpi/comm/icsplit.c
index 9ad2d51..177c945 100644
--- a/test/mpi/comm/icsplit.c
+++ b/test/mpi/comm/icsplit.c
@@ -13,9 +13,6 @@
* This program tests that MPI_Comm_split applies to intercommunicators;
* this is an extension added in MPI-2
*/
-
-int TestIntercomm( MPI_Comm );
-
int main( int argc, char *argv[] )
{
int errs = 0;
@@ -90,7 +87,7 @@ int main( int argc, char *argv[] )
}
/* ... more to do */
if (commok) {
- errs += TestIntercomm( newcomm );
+ errs += MTestTestComm(newcomm);
}
}
else {
@@ -118,75 +115,3 @@ int main( int argc, char *argv[] )
return 0;
}
-
-/* FIXME: This is copied from iccreate. It should be in one place */
-int TestIntercomm( MPI_Comm comm )
-{
- int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
- int errs = 0, wrank, nsize;
- char commname[MPI_MAX_OBJECT_NAME+1];
- MPI_Request *reqs;
-
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
- MPI_Comm_size( comm, &local_size );
- MPI_Comm_remote_size( comm, &remote_size );
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_get_name( comm, commname, &nsize );
-
- MTestPrintfMsg( 1, "Testing communication on intercomm %s\n", commname );
-
- reqs = (MPI_Request *)malloc( remote_size * sizeof(MPI_Request) );
- if (!reqs) {
- printf( "[%d] Unable to allocated %d requests for testing intercomm %s\n",
- wrank, remote_size, commname );
- errs++;
- return errs;
- }
- bufs = (int **) malloc( remote_size * sizeof(int *) );
- if (!bufs) {
- printf( "[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
- wrank, remote_size, commname );
- errs++;
- return errs;
- }
- bufmem = (int *) malloc( remote_size * 2 * sizeof(int) );
- if (!bufmem) {
- printf( "[%d] Unable to allocated %d int data for testing intercomm %s\n",
- wrank, 2*remote_size, commname );
- errs++;
- return errs;
- }
-
- /* Each process sends a message containing its own rank and the
- rank of the destination with a nonblocking send. Because we're using
- nonblocking sends, we need to use different buffers for each isend */
- for (j=0; j<remote_size; j++) {
- bufs[j] = &bufmem[2*j];
- bufs[j][0] = rank;
- bufs[j][1] = j;
- MPI_Isend( bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j] );
- }
-
- for (j=0; j<remote_size; j++) {
- MPI_Recv( rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE );
- if (rbuf[0] != j) {
- printf( "[%d] Expected rank %d but saw %d in %s\n",
- wrank, j, rbuf[0], commname );
- errs++;
- }
- if (rbuf[1] != rank) {
- printf( "[%d] Expected target rank %d but saw %d from %d in %s\n",
- wrank, rank, rbuf[1], j, commname );
- errs++;
- }
- }
- if (errs)
- fflush(stdout);
- MPI_Waitall( remote_size, reqs, MPI_STATUSES_IGNORE );
-
- free( reqs );
- free( bufs );
- free( bufmem );
-
- return errs;
-}
diff --git a/test/mpi/include/mpitest.h b/test/mpi/include/mpitest.h
index f20eb76..594f58e 100644
--- a/test/mpi/include/mpitest.h
+++ b/test/mpi/include/mpitest.h
@@ -82,6 +82,9 @@ int MTestGetIntracomm( MPI_Comm *, int );
int MTestGetIntracommGeneral( MPI_Comm *, int, int );
int MTestGetIntercomm( MPI_Comm *, int *, int );
int MTestGetComm( MPI_Comm *, int );
+int MTestTestIntercomm(MPI_Comm intercomm);
+int MTestTestIntracomm(MPI_Comm intracomm);
+int MTestTestComm(MPI_Comm comm);
const char *MTestGetIntracommName( void );
const char *MTestGetIntercommName( void );
void MTestFreeComm( MPI_Comm * );
diff --git a/test/mpi/util/mtest.c b/test/mpi/util/mtest.c
index c4f0c4d..b77439c 100644
--- a/test/mpi/util/mtest.c
+++ b/test/mpi/util/mtest.c
@@ -406,7 +406,7 @@ int MTestGetIntracommGeneral( MPI_Comm *comm, int min_size, int allowSmaller )
ranges[0][2] = 1;
if (allowSmaller && (size+1)/2 >= min_size) {
- MPI_Comm_group( MPI_COMM_WORLD, &world_group);
+ MPI_Comm_group(MPI_COMM_WORLD, &world_group);
merr = MPI_Group_range_incl(world_group, 1, ranges, &high_group);
if (merr) MTestPrintError(merr);
merr = MPI_Comm_create(MPI_COMM_WORLD, high_group, comm);
@@ -835,6 +835,117 @@ int MTestGetIntercomm( MPI_Comm *comm, int *isLeftGroup, int min_size )
return interCommIdx;
}
+
+int MTestTestIntercomm(MPI_Comm comm)
+{
+ int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
+ int errs = 0, wrank, nsize;
+ char commname[MPI_MAX_OBJECT_NAME+1];
+ MPI_Request *reqs;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+ MPI_Comm_size(comm, &local_size);
+ MPI_Comm_remote_size(comm, &remote_size);
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_get_name(comm, commname, &nsize);
+
+ MTestPrintfMsg(1, "Testing communication on intercomm '%s', remote_size=%d\n",
+ commname, remote_size);
+
+ reqs = (MPI_Request *)malloc(remote_size * sizeof(MPI_Request));
+ if (!reqs) {
+ printf("[%d] Unable to allocated %d requests for testing intercomm %s\n",
+ wrank, remote_size, commname);
+ errs++;
+ return errs;
+ }
+ bufs = (int **) malloc(remote_size * sizeof(int *));
+ if (!bufs) {
+ printf("[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
+ wrank, remote_size, commname);
+ errs++;
+ return errs;
+ }
+ bufmem = (int *) malloc(remote_size * 2 * sizeof(int));
+ if (!bufmem) {
+ printf("[%d] Unable to allocated %d int data for testing intercomm %s\n",
+ wrank, 2*remote_size, commname);
+ errs++;
+ return errs;
+ }
+
+ /* Each process sends a message containing its own rank and the
+ rank of the destination with a nonblocking send. Because we're using
+ nonblocking sends, we need to use different buffers for each isend */
+ /* NOTE: the send buffer access restriction was relaxed in MPI-2.2, although
+ it doesn't really hurt to keep separate buffers for our purposes */
+ for (j=0; j<remote_size; j++) {
+ bufs[j] = &bufmem[2*j];
+ bufs[j][0] = rank;
+ bufs[j][1] = j;
+ MPI_Isend(bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j]);
+ }
+ MTestPrintfMsg(2, "isends posted, about to recv\n");
+
+ for (j=0; j<remote_size; j++) {
+ MPI_Recv(rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE);
+ if (rbuf[0] != j) {
+ printf("[%d] Expected rank %d but saw %d in %s\n",
+ wrank, j, rbuf[0], commname);
+ errs++;:
+ }
+ if (rbuf[1] != rank) {
+ printf( "[%d] Expected target rank %d but saw %d from %d in %s\n",
+ wrank, rank, rbuf[1], j, commname );
+ errs++;
+ }
+ }
+ if (errs) fflush(stdout);
+
+ MTestPrintfMsg(2, "my recvs completed, about to waitall\n");
+ MPI_Waitall(remote_size, reqs, MPI_STATUSES_IGNORE);
+
+ free(reqs);
+ free(bufs);
+ free(bufmem);
+
+ return errs;
+}
+
+int MTestTestIntracomm(MPI_Comm comm)
+{
+ int i, errs = 0;
+ int size;
+ int in[16], out[16], sol[16];
+
+ MPI_Comm_size(comm, &size);
+
+ /* Set input, output and sol-values */
+ for(i = 0; i<16; i++) {
+ in[i] = i;
+ out[i] = 0;
+ sol[i] = i*size;
+ }
+ MPI_Allreduce(in, out, 16, MPI_INT, MPI_SUM, comm);
+
+ /* Test results */
+ for(i = 0; i<16; i++) {
+ if(sol[i] != out[i]) errs++;
+ }
+
+ return errs;
+}
+
+int MTestTestComm(MPI_Comm comm)
+{
+ int is_inter;
+ MPI_Comm_test_inter(comm, &is_inter);
+ if (is_inter)
+ return MTestTestIntercomm(comm);
+ else
+ return MTestTestIntracomm(comm);
+}
+
/* Return the name of an intercommunicator */
const char *MTestGetIntercommName( void )
{
http://git.mpich.org/mpich.git/commitdiff/7a2cd3dc25775712e60516b24acf44d74e173ad7
commit 7a2cd3dc25775712e60516b24acf44d74e173ad7
Author: Junchao Zhang <jczhang at mcs.anl.gov>
Date: Mon Jul 6 13:40:07 2015 -0500
Add more cases to generate var intracommunicators
Such as MPI_Comm_create, MPI_Comm_create_group,
MPI_Intercomm_merge.
Signed-off-by: Lena Oden <loden at anl.gov>
diff --git a/test/mpi/util/mtest.c b/test/mpi/util/mtest.c
index ea309c0..c4f0c4d 100644
--- a/test/mpi/util/mtest.c
+++ b/test/mpi/util/mtest.c
@@ -331,18 +331,105 @@ int MTestGetIntracommGeneral( MPI_Comm *comm, int min_size, int allowSmaller )
isBasic = 1;
intraCommName = "MPI_COMM_SELF";
break;
+ case 5:
+ {
+ /* Dup of the world using MPI_Intercomm_merge */
+ int rleader, isLeft;
+ MPI_Comm local_comm, inter_comm;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (size > 1) {
+ merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank, &local_comm);
+ if (merr) MTestPrintError( merr );
+ if (rank == 0) { rleader = size/2; }
+ else if (rank == size/2) { rleader = 0; }
+ else { rleader = -1; }
+ isLeft = rank < size/2;
+ merr = MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99, &inter_comm);
+ if (merr) MTestPrintError(merr);
+ merr = MPI_Intercomm_merge(inter_comm, isLeft, comm);
+ if (merr) MTestPrintError(merr);
+ MPI_Comm_free(&inter_comm);
+ MPI_Comm_free(&local_comm);
+ intraCommName = "Dup of WORLD created by MPI_Intercomm_merge";
+ } else {
+ *comm = MPI_COMM_NULL;
+ }
+ }
+ break;
+ case 6:
+ {
+#if MTEST_HAVE_MIN_MPI_VERSION(3,0)
+ /* Even of the world using MPI_Comm_create_group */
+ int i;
+ MPI_Group world_group, even_group;
+ int *excl = NULL;
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (allowSmaller && (size+1)/2 >= min_size) {
+ /* exclude the odd ranks */
+ excl = malloc((size/2) * sizeof(int));
+ for (i = 0; i < size / 2; i++) excl[i] = (2 * i) + 1;
+
+ MPI_Comm_group(MPI_COMM_WORLD, &world_group);
+ MPI_Group_excl(world_group, size / 2, excl, &even_group);
+ MPI_Group_free(&world_group);
+ free(excl);
+
+ if (rank % 2 == 0) {
+ /* Even processes create a comm. for themselves */
+ MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, comm);
+ intraCommName = "Even of WORLD created by MPI_Comm_create_group";
+ } else {
+ *comm = MPI_COMM_NULL;
+ }
+ MPI_Group_free(&even_group);
+ } else {
+ *comm = MPI_COMM_NULL;
+ }
+#else
+ *comm = MPI_COMM_NULL;
+#endif
+ }
+ break;
+ case 7:
+ {
+ /* High half of the world using MPI_Comm_create */
+ int ranges[1][3];
+ MPI_Group world_group, high_group;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ ranges[0][0] = size/2;
+ ranges[0][1] = size-1;
+ ranges[0][2] = 1;
+
+ if (allowSmaller && (size+1)/2 >= min_size) {
+ MPI_Comm_group( MPI_COMM_WORLD, &world_group);
+ merr = MPI_Group_range_incl(world_group, 1, ranges, &high_group);
+ if (merr) MTestPrintError(merr);
+ merr = MPI_Comm_create(MPI_COMM_WORLD, high_group, comm);
+ if (merr) MTestPrintError(merr);
+ MPI_Group_free(&world_group);
+ MPI_Group_free(&high_group);
+ intraCommName = "High half of WORLD created by MPI_Comm_create";
+ } else {
+ *comm = MPI_COMM_NULL;
+ }
+ }
+ break;
/* These next cases are communicators that include some
but not all of the processes */
- case 5:
- case 6:
- case 7:
case 8:
+ case 9:
+ case 10:
+ case 11:
{
int newsize;
merr = MPI_Comm_size( MPI_COMM_WORLD, &size );
if (merr) MTestPrintError( merr );
- newsize = size - (intraCommIdx - 4);
+ newsize = size - (intraCommIdx - 7);
if (allowSmaller && newsize >= min_size) {
merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
-----------------------------------------------------------------------
Summary of changes:
test/mpi/.gitignore | 1 +
test/mpi/comm/iccreate.c | 83 +----------
test/mpi/comm/icsplit.c | 77 +----------
test/mpi/include/mpitest.h | 3 +
test/mpi/threads/comm/Makefile.am | 2 +-
test/mpi/threads/comm/idup_nb.c | 291 +++++++++++++++++++++++++++++++++++++
test/mpi/threads/comm/testlist | 1 +
test/mpi/util/mtest.c | 206 +++++++++++++++++++++++++-
8 files changed, 502 insertions(+), 162 deletions(-)
create mode 100644 test/mpi/threads/comm/idup_nb.c
hooks/post-receive
--
MPICH primary repository
More information about the commits
mailing list