[mpich-commits] [mpich] MPICH primary repository branch, master, updated. v3.2b3-120-gbac2513

Service Account noreply at mpich.org
Tue Jun 23 13:38:09 CDT 2015


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "MPICH primary repository".

The branch, master has been updated
       via  bac25133c02822ef28e5011a71ad713966c5f0c5 (commit)
      from  3efedf179d0cd885d32f8b47eccc52f9f851ef17 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
http://git.mpich.org/mpich.git/commitdiff/bac25133c02822ef28e5011a71ad713966c5f0c5

commit bac25133c02822ef28e5011a71ad713966c5f0c5
Author: Halim Amer <aamer at anl.gov>
Date:   Mon Jun 22 22:32:01 2015 -0500

    Added tests to stream ACC-like RMA ops
    
    Signed-off-by: Min Si <msi at il.is.s.u-tokyo.ac.jp>
    Signed-off-by: Rob Latham <robl at mcs.anl.gov>

diff --git a/test/mpi/.gitignore b/test/mpi/.gitignore
index d5fcfe6..16f9ad8 100644
--- a/test/mpi/.gitignore
+++ b/test/mpi/.gitignore
@@ -537,6 +537,15 @@
 /rma/putpscw1
 /rma/wintest
 /rma/manyget
+/rma/lock_dt
+/rma/lock_dt_flush
+/rma/lock_dt_flushlocal
+/rma/lockall_dt
+/rma/lockall_dt_flush
+/rma/lockall_dt_flushall
+/rma/lockall_dt_flushlocal
+/rma/lockall_dt_flushlocalall
+/rma/lock_contention_dt
 /spawn/namepub
 /spawn/disconnect3
 /spawn/spaiccreate
diff --git a/test/mpi/rma/Makefile.am b/test/mpi/rma/Makefile.am
index 656afc6..8b3b9d5 100644
--- a/test/mpi/rma/Makefile.am
+++ b/test/mpi/rma/Makefile.am
@@ -25,6 +25,15 @@ noinst_PROGRAMS =          \
     lockcontention2        \
     lockcontention3        \
     lockopts               \
+    lock_dt                \
+    lock_dt_flush          \
+    lock_dt_flushlocal     \
+    lockall_dt             \
+    lockall_dt_flush       \
+    lockall_dt_flushall    \
+    lockall_dt_flushlocal  \
+    lockall_dt_flushlocalall\
+    lock_contention_dt     \
     contention_put         \
     contention_putget      \
     put_base               \
diff --git a/test/mpi/rma/lock_contention_dt.c b/test/mpi/rma/lock_contention_dt.c
new file mode 100644
index 0000000..394f799
--- /dev/null
+++ b/test/mpi/rma/lock_contention_dt.c
@@ -0,0 +1,99 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test lock contention while streaming ACC-like operations";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size;
+    int target = 1;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank != target) {
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, target, 0, win);
+                    MPI_Accumulate(sendtype.buf, sendtype.count,
+                                   sendtype.datatype, target, 0,
+                                   recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                    MPI_Win_unlock(target, win);
+                    MPI_Barrier(comm);
+
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destination to finish checking and reinitializing the buffer */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, target, 0, win);
+                    MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                       sendtype.datatype, resbuf, recvtype.count, recvtype.datatype,
+                                       target, 0, recvtype.count, recvtype.datatype, MPI_REPLACE,
+                                       win);
+                    MPI_Win_unlock(target, win);
+                    MPI_Barrier(comm);
+                    free(resbuf);
+                }
+                else {  /* Target checks the result */
+                    int err;
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Win_unlock(rank, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Win_unlock(rank, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/lock_dt.c b/test/mpi/rma/lock_dt.c
new file mode 100644
index 0000000..f98d4f4
--- /dev/null
+++ b/test/mpi/rma/lock_dt.c
@@ -0,0 +1,101 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test for streaming ACC-like operations with lock";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size, source, dest;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+        source = 0;
+        dest = size - 1;
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                /* Make sure that everyone has a recv buffer */
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank == source) {
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    MPI_Accumulate(sendtype.buf, sendtype.count,
+                                   sendtype.datatype, dest, 0,
+                                   recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                    MPI_Win_unlock(dest, win);
+                    MPI_Barrier(comm);
+
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destination to finish checking and reinitializing the buffer */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                       sendtype.datatype, resbuf, recvtype.count, recvtype.datatype,
+                                       dest, 0, recvtype.count, recvtype.datatype, MPI_REPLACE,
+                                       win);
+                    MPI_Win_unlock(dest, win);
+                    MPI_Barrier(comm);
+                    free(resbuf);
+                }
+                else if (rank == dest) {
+                    int err;
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Win_unlock(dest, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Win_unlock(dest, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/lock_dt_flush.c b/test/mpi/rma/lock_dt_flush.c
new file mode 100644
index 0000000..9613c92
--- /dev/null
+++ b/test/mpi/rma/lock_dt_flush.c
@@ -0,0 +1,109 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test for streaming ACC-like operations with lock+flush";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size, source, dest;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+        source = 0;
+        dest = size - 1;
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank == source) {
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    MPI_Accumulate(sendtype.buf, sendtype.count,
+                                   sendtype.datatype, dest, 0,
+                                   recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                    MPI_Win_flush(dest, win);
+                    /*signal to dest that the ops are flushed so that it starts checking the result */
+                    MPI_Barrier(comm);
+                    /*make sure dest finishes checking the result before issuing unlock */
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock(dest, win);
+
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destination to finish checking  and reinitializing the buffer */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                       sendtype.datatype, resbuf, recvtype.count, recvtype.datatype,
+                                       dest, 0, recvtype.count, recvtype.datatype, MPI_REPLACE,
+                                       win);
+                    MPI_Win_flush(dest, win);
+                    /*signal to dest that the ops are flushed so that it starts checking the result */
+                    MPI_Barrier(comm);
+                    /*make sure dest finishes checking the result before issuing unlock */
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock(dest, win);
+                    free(resbuf);
+                }
+                else if (rank == dest) {
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    int err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock(dest, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock(dest, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/lock_dt_flushlocal.c b/test/mpi/rma/lock_dt_flushlocal.c
new file mode 100644
index 0000000..ffd24e1
--- /dev/null
+++ b/test/mpi/rma/lock_dt_flushlocal.c
@@ -0,0 +1,110 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test for streaming ACC-like operations with lock+flush_local";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size, source, dest;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+        /* Determine the sender and receiver */
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+        source = 0;
+        dest = size - 1;
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank == source) {
+                    MPI_Aint slb, sextent;
+                    MPI_Type_get_extent(sendtype.datatype, &slb, &sextent);
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    MPI_Accumulate(sendtype.buf, sendtype.count,
+                                   sendtype.datatype, dest, 0,
+                                   recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                    MPI_Win_flush_local(dest, win);
+                    /* reset the send buffer to test local completion */
+                    memset(sendtype.buf, 0, slb + sextent * sendtype.count);
+                    MPI_Win_unlock(dest, win);
+                    MPI_Barrier(comm);
+
+                    sendtype.InitBuf(&sendtype);
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destination to finish checking and reinitializing the buffer */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                       sendtype.datatype, resbuf, recvtype.count, recvtype.datatype,
+                                       dest, 0, recvtype.count, recvtype.datatype, MPI_REPLACE,
+                                       win);
+                    MPI_Win_flush_local(dest, win);
+                    /* reset the send buffer to test local completion */
+                    memset(sendtype.buf, 0, slb + sextent * sendtype.count);
+                    MPI_Win_unlock(dest, win);
+                    MPI_Barrier(comm);
+                    free(resbuf);
+                }
+                else if (rank == dest) {
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    int err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Win_unlock(dest, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, dest, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Win_unlock(dest, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/lockall_dt.c b/test/mpi/rma/lockall_dt.c
new file mode 100644
index 0000000..bdb7e97
--- /dev/null
+++ b/test/mpi/rma/lockall_dt.c
@@ -0,0 +1,107 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test for streaming ACC-like operations with lock_all";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+        int source = 0;
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank == source) {
+                    int dest;
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock_all(0, win);
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Accumulate(sendtype.buf, sendtype.count,
+                                           sendtype.datatype, dest, 0,
+                                           recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                        }
+                    MPI_Win_unlock_all(win);
+                    MPI_Barrier(comm);
+
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destinations to finish checking and reinitializing the buffers */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock_all(0, win);
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                               sendtype.datatype, resbuf, recvtype.count,
+                                               recvtype.datatype, dest, 0, recvtype.count,
+                                               recvtype.datatype, MPI_REPLACE, win);
+
+                        }
+                    MPI_Win_unlock_all(win);
+                    MPI_Barrier(comm);
+                    free(resbuf);
+                }
+                else {
+                    int err;
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Win_unlock(rank, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Win_unlock(rank, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/lockall_dt_flush.c b/test/mpi/rma/lockall_dt_flush.c
new file mode 100644
index 0000000..f6ddde3
--- /dev/null
+++ b/test/mpi/rma/lockall_dt_flush.c
@@ -0,0 +1,117 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test for streaming ACC-like operations with lock_all+flush";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+        int source = 0;
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank == source) {
+                    int dest;
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock_all(0, win);
+
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Accumulate(sendtype.buf, sendtype.count,
+                                           sendtype.datatype, dest, 0,
+                                           recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                            MPI_Win_flush(dest, win);
+                        }
+                    /*signal to dest that the ops are flushed so that it starts checking the result */
+                    MPI_Barrier(comm);
+                    /*make sure dest finishes checking the result before issuing unlock */
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock_all(win);
+
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destinations to finish checking and reinitializing the buffers */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock_all(0, win);
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                               sendtype.datatype, resbuf, recvtype.count,
+                                               recvtype.datatype, dest, 0, recvtype.count,
+                                               recvtype.datatype, MPI_REPLACE, win);
+                            MPI_Win_flush(dest, win);
+                        }
+                    /*signal to dest that the ops are flushed so that it starts checking the result */
+                    MPI_Barrier(comm);
+                    /*make sure dest finishes checking the result before issuing unlock */
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock_all(win);
+                    free(resbuf);
+                }
+                else {
+                    int err;
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock(rank, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock(rank, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/lockall_dt_flushall.c b/test/mpi/rma/lockall_dt_flushall.c
new file mode 100644
index 0000000..9fdf416
--- /dev/null
+++ b/test/mpi/rma/lockall_dt_flushall.c
@@ -0,0 +1,118 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test for streaming ACC-like operations with lock_all+flush_all";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+        int source = 0;
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank == source) {
+                    int dest;
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock_all(0, win);
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Accumulate(sendtype.buf, sendtype.count,
+                                           sendtype.datatype, dest, 0,
+                                           recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                        }
+
+                    MPI_Win_flush_all(win);
+                    /*signal to dest that the ops are flushed so that it starts checking the result */
+                    MPI_Barrier(comm);
+                    /*make sure dest finishes checking the result before issuing unlock */
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock_all(win);
+
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destination to finish checking and reinitializing the buffer */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock_all(0, win);
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                               sendtype.datatype, resbuf, recvtype.count,
+                                               recvtype.datatype, dest, 0, recvtype.count,
+                                               recvtype.datatype, MPI_REPLACE, win);
+
+                        }
+                    MPI_Win_flush_all(win);
+                    /*signal to dest that the ops are flushed so that it starts checking the result */
+                    MPI_Barrier(comm);
+                    /*make sure dest finishes checking the result before issuing unlock */
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock_all(win);
+                    free(resbuf);
+                }
+                else {
+                    int err;
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock(rank, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Barrier(comm);
+                    MPI_Win_unlock(rank, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/lockall_dt_flushlocal.c b/test/mpi/rma/lockall_dt_flushlocal.c
new file mode 100644
index 0000000..a3726b6
--- /dev/null
+++ b/test/mpi/rma/lockall_dt_flushlocal.c
@@ -0,0 +1,117 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test for streaming ACC-like operations with lock_all+flush_local";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+        int source = 0;
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank == source) {
+                    int dest;
+                    MPI_Aint slb, sextent;
+                    MPI_Type_get_extent(sendtype.datatype, &slb, &sextent);
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock_all(0, win);
+
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Accumulate(sendtype.buf, sendtype.count,
+                                           sendtype.datatype, dest, 0,
+                                           recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                            MPI_Win_flush_local(dest, win);
+                        }
+                    /* reset the send buffer to test local completion */
+                    memset(sendtype.buf, 0, slb + sextent * sendtype.count);
+                    MPI_Win_unlock_all(win);
+                    MPI_Barrier(comm);
+
+                    sendtype.InitBuf(&sendtype);
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destinations to finish checking and reinitializing the buffers */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock_all(0, win);
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                               sendtype.datatype, resbuf, recvtype.count,
+                                               recvtype.datatype, dest, 0, recvtype.count,
+                                               recvtype.datatype, MPI_REPLACE, win);
+                            MPI_Win_flush_local(dest, win);
+                        }
+                    /* reset the send buffer to test local completion */
+                    memset(sendtype.buf, 0, slb + sextent * sendtype.count);
+                    MPI_Win_unlock_all(win);
+                    MPI_Barrier(comm);
+                    free(resbuf);
+                }
+                else {
+                    int err;
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Win_unlock(rank, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Win_unlock(rank, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/lockall_dt_flushlocalall.c b/test/mpi/rma/lockall_dt_flushlocalall.c
new file mode 100644
index 0000000..9d662fa
--- /dev/null
+++ b/test/mpi/rma/lockall_dt_flushlocalall.c
@@ -0,0 +1,118 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ *  (C) 2015 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test for streaming ACC-like operations with lock_all+flush_local_all";
+*/
+
+int main(int argc, char *argv[])
+{
+    int errs = 0;
+    int rank, size;
+    int minsize = 2, count;
+    MPI_Comm comm;
+    MPI_Win win;
+    MPI_Aint lb, extent;
+    MTestDatatype sendtype, recvtype;
+
+    MTest_Init(&argc, &argv);
+
+    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+        if (comm == MPI_COMM_NULL)
+            continue;
+
+        MPI_Comm_rank(comm, &rank);
+        MPI_Comm_size(comm, &size);
+        int source = 0;
+
+        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+                recvtype.printErrors = 1;
+                recvtype.InitBuf(&recvtype);
+                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);
+
+                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
+                               (int) extent, MPI_INFO_NULL, comm, &win);
+                if (rank == source) {
+                    int dest;
+                    MPI_Aint slb, sextent;
+                    MPI_Type_get_extent(sendtype.datatype, &slb, &sextent);
+                    sendtype.InitBuf(&sendtype);
+
+                    MPI_Win_lock_all(0, win);
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Accumulate(sendtype.buf, sendtype.count,
+                                           sendtype.datatype, dest, 0,
+                                           recvtype.count, recvtype.datatype, MPI_REPLACE, win);
+                        }
+
+                    MPI_Win_flush_local_all(win);
+                    /* reset the send buffer to test local completion */
+                    memset(sendtype.buf, 0, slb + sextent * sendtype.count);
+                    MPI_Win_unlock_all(win);
+                    MPI_Barrier(comm);
+
+                    sendtype.InitBuf(&sendtype);
+                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));
+
+                    /*wait for the destination to finish checking and reinitializing the buffer */
+                    MPI_Barrier(comm);
+
+                    MPI_Win_lock_all(0, win);
+                    for (dest = 0; dest < size; dest++)
+                        if (dest != source) {
+                            MPI_Get_accumulate(sendtype.buf, sendtype.count,
+                                               sendtype.datatype, resbuf, recvtype.count,
+                                               recvtype.datatype, dest, 0, recvtype.count,
+                                               recvtype.datatype, MPI_REPLACE, win);
+
+                        }
+                    MPI_Win_flush_local_all(win);
+                    /* reset the send buffer to test local completion */
+                    memset(sendtype.buf, 0, slb + sextent * sendtype.count);
+                    MPI_Win_unlock_all(win);
+                    MPI_Barrier(comm);
+                    free(resbuf);
+                }
+                else {
+                    int err;
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    recvtype.InitBuf(&recvtype);
+                    MPI_Win_unlock(rank, win);
+
+                    /*signal the source that checking and reinitialization is done */
+                    MPI_Barrier(comm);
+
+                    MPI_Barrier(comm);
+                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
+                    err = MTestCheckRecv(0, &recvtype);
+                    if (err)
+                        errs++;
+                    MPI_Win_unlock(rank, win);
+                }
+
+                MPI_Win_free(&win);
+                MTestFreeDatatype(&sendtype);
+                MTestFreeDatatype(&recvtype);
+            }
+        }
+        MTestFreeComm(&comm);
+    }
+    MTest_Finalize(errs);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/test/mpi/rma/testlist.in b/test/mpi/rma/testlist.in
index fbf65cc..5f59ccd 100644
--- a/test/mpi/rma/testlist.in
+++ b/test/mpi/rma/testlist.in
@@ -28,6 +28,15 @@ lockcontention2 4
 lockcontention2 8
 lockcontention3 8
 lockopts 2
+lock_dt 2
+lock_dt_flush 2
+lock_dt_flushlocal 2
+lockall_dt 4
+lockall_dt_flush 4
+lockall_dt_flushall 4
+lockall_dt_flushlocal 4
+lockall_dt_flushlocalall 4
+lock_contention_dt 4
 transpose4 2
 fetchandadd 7
 fetchandadd_tree 7

-----------------------------------------------------------------------

Summary of changes:
 test/mpi/.gitignore                     |    9 +++
 test/mpi/rma/Makefile.am                |    9 +++
 test/mpi/rma/lock_contention_dt.c       |   99 ++++++++++++++++++++++++++
 test/mpi/rma/lock_dt.c                  |  101 ++++++++++++++++++++++++++
 test/mpi/rma/lock_dt_flush.c            |  109 ++++++++++++++++++++++++++++
 test/mpi/rma/lock_dt_flushlocal.c       |  110 ++++++++++++++++++++++++++++
 test/mpi/rma/lockall_dt.c               |  107 ++++++++++++++++++++++++++++
 test/mpi/rma/lockall_dt_flush.c         |  117 ++++++++++++++++++++++++++++++
 test/mpi/rma/lockall_dt_flushall.c      |  118 +++++++++++++++++++++++++++++++
 test/mpi/rma/lockall_dt_flushlocal.c    |  117 ++++++++++++++++++++++++++++++
 test/mpi/rma/lockall_dt_flushlocalall.c |  118 +++++++++++++++++++++++++++++++
 test/mpi/rma/testlist.in                |    9 +++
 12 files changed, 1023 insertions(+), 0 deletions(-)
 create mode 100644 test/mpi/rma/lock_contention_dt.c
 create mode 100644 test/mpi/rma/lock_dt.c
 create mode 100644 test/mpi/rma/lock_dt_flush.c
 create mode 100644 test/mpi/rma/lock_dt_flushlocal.c
 create mode 100644 test/mpi/rma/lockall_dt.c
 create mode 100644 test/mpi/rma/lockall_dt_flush.c
 create mode 100644 test/mpi/rma/lockall_dt_flushall.c
 create mode 100644 test/mpi/rma/lockall_dt_flushlocal.c
 create mode 100644 test/mpi/rma/lockall_dt_flushlocalall.c


hooks/post-receive
-- 
MPICH primary repository


More information about the commits mailing list