[mpich-commits] [mpich] MPICH primary repository branch, master, updated. v3.2a1-34-geda105b
Service Account
noreply at mpich.org
Wed Oct 1 17:29:25 CDT 2014
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "MPICH primary repository".
The branch, master has been updated
via eda105b029572ebf7f4008f6de2e1afb9ccf6fc2 (commit)
via 98114df4c0504568ab188ea98a9d1269d087bd82 (commit)
via ad25799865749680338c05264c8007242f01326a (commit)
from 6b8c426d90b28bb0ed6ff6ade6ac03a4e4b96634 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
http://git.mpich.org/mpich.git/commitdiff/eda105b029572ebf7f4008f6de2e1afb9ccf6fc2
commit eda105b029572ebf7f4008f6de2e1afb9ccf6fc2
Author: Junchao Zhang <jczhang at mcs.anl.gov>
Date: Tue Sep 16 15:58:55 2014 -0500
Remove unnessary array init in MPIU_CHKLMEM_DECL
Initializing mpiu_chklmem_stk_sp_ to 0 should already have the effect to
initialize mpiu_chklmem_stk_[n_].
Signed-off-by: Antonio J. Pena <apenya at mcs.anl.gov>
diff --git a/src/include/mpimem.h b/src/include/mpimem.h
index a217e8b..e1478a5 100644
--- a/src/include/mpimem.h
+++ b/src/include/mpimem.h
@@ -317,7 +317,7 @@ extern char *strdup( const char * );
}}
#else
#define MPIU_CHKLMEM_DECL(n_) \
- void *(mpiu_chklmem_stk_[n_]) = {0};\
+ void *(mpiu_chklmem_stk_[n_]); \
int mpiu_chklmem_stk_sp_=0;\
MPIU_AssertDeclValue(const int mpiu_chklmem_stk_sz_,n_)
http://git.mpich.org/mpich.git/commitdiff/98114df4c0504568ab188ea98a9d1269d087bd82
commit 98114df4c0504568ab188ea98a9d1269d087bd82
Author: Junchao Zhang <jczhang at mcs.anl.gov>
Date: Thu Sep 4 11:10:36 2014 -0500
Change // comments to /* */ style
// comments are erroneous with --enable-strict=c89
Signed-off-by: Antonio J. Pena <apenya at mcs.anl.gov>
diff --git a/src/mpi/romio/adio/common/p2p_aggregation.c b/src/mpi/romio/adio/common/p2p_aggregation.c
index 46cfe55..c7c1800 100644
--- a/src/mpi/romio/adio/common/p2p_aggregation.c
+++ b/src/mpi/romio/adio/common/p2p_aggregation.c
@@ -4,7 +4,7 @@
#include <pthread.h>
-// #define p2pcontigtrace 1
+/* #define p2pcontigtrace 1 */
void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
const void *buf,
@@ -15,7 +15,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
ADIO_Offset* fd_end)
{
- *error_code = MPI_SUCCESS; // initialize to success
+ *error_code = MPI_SUCCESS; /* initialize to success */
#ifdef ROMIO_GPFS
double startTimeBase,endTimeBase;
@@ -32,7 +32,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
ADIO_Offset myOffsetStart = st_offsets[myrank], myOffsetEnd = end_offsets[myrank];
- int myAggRank = -1; // if I am an aggregor this is my index into fd->hints->ranklist
+ int myAggRank = -1; /* if I am an aggregor this is my index into fd->hints->ranklist */
int iAmUsedAgg = 0;
#ifdef ROMIO_GPFS
@@ -85,7 +85,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
if (iAmUsedAgg) { /* for the used aggregators figure out how much data I
need from what procs */
- // count numSourceProcs so we know how large to make the arrays
+ /* count numSourceProcs so we know how large to make the arrays */
for (i=0;i<nprocs;i++)
if ( ((st_offsets[i] >= fd_start[myAggRank]) && (st_offsets[i] <= fd_end[myAggRank])) || ((end_offsets[i] >= fd_start[myAggRank]) && (end_offsets[i] <= fd_end[myAggRank])))
numSourceProcs++;
@@ -105,11 +105,11 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
remainingDataAmountToGetPerProc[numSourceProcs] = (end_offsets[i] - st_offsets[i])+1;
remainingDataOffsetToGetPerProc[numSourceProcs] = st_offsets[i];
}
- else if ((st_offsets[i] >= fd_start[myAggRank]) && (st_offsets[i] <= fd_end[myAggRank])) {// starts in this fd and goes past it
+ else if ((st_offsets[i] >= fd_start[myAggRank]) && (st_offsets[i] <= fd_end[myAggRank])) {/* starts in this fd and goes past it */
remainingDataAmountToGetPerProc[numSourceProcs] = (fd_end[myAggRank] - st_offsets[i]) +1;
remainingDataOffsetToGetPerProc[numSourceProcs] = st_offsets[i];
}
- else { // starts in fd before this and ends in it
+ else { /* starts in fd before this and ends in it */
remainingDataAmountToGetPerProc[numSourceProcs] = (end_offsets[i] - fd_start[myAggRank]) +1;
remainingDataOffsetToGetPerProc[numSourceProcs] = fd_start[myAggRank];
}
@@ -135,7 +135,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
MPI_Status mpiWaitAnyStatusFromTargetAggs,mpiWaitAnyStatusFromSourceProcs;
MPI_Status mpiIsendStatusForSize, mpiIsendStatusForData;
- // use the write buffer allocated in the file_open
+ /* use the write buffer allocated in the file_open */
char *write_buf0 = fd->io_buf;
char *write_buf1 = fd->io_buf + coll_bufsize;
@@ -143,7 +143,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
* case) we'll swap later */
char *write_buf = write_buf0;
- // compute number of rounds
+ /* compute number of rounds */
ADIO_Offset numberOfRounds = (ADIO_Offset)((((ADIO_Offset)(end_offsets[nprocs-1]-st_offsets[0]))/((ADIO_Offset)((ADIO_Offset)coll_bufsize*(ADIO_Offset)naggs)))) + 1;
int currentWriteBuf = 0;
@@ -184,9 +184,9 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
for (i=0;i<numTargetAggs;i++) {
if ( ((myOffsetStart >= targetAggsForMyDataFDStart[i]) && (myOffsetStart <= targetAggsForMyDataFDEnd[i])) ||
((myOffsetEnd >= targetAggsForMyDataFDStart[i]) && (myOffsetEnd <= targetAggsForMyDataFDEnd[i]))) {
- // we know that we need to send data to this target agg at some point, now need to figure out how much this round
+ /* we know that we need to send data to this target agg at some point, now need to figure out how much this round */
- // here are the offsets currently being collected by the aggregator during this round
+ /* here are the offsets currently being collected by the aggregator during this round */
ADIO_Offset currentRoundFDStartForMyTargetAgg = (ADIO_Offset)((ADIO_Offset)targetAggsForMyDataFDStart[i] + (ADIO_Offset)((ADIO_Offset)roundIter*(ADIO_Offset)coll_bufsize));
ADIO_Offset currentRoundFDEndForMyTargetAgg = (ADIO_Offset)((ADIO_Offset)targetAggsForMyDataFDStart[i] + (ADIO_Offset)((ADIO_Offset)(roundIter+1)*(ADIO_Offset)coll_bufsize) - (ADIO_Offset)1);
if (currentRoundFDEndForMyTargetAgg > targetAggsForMyDataFDEnd[i])
@@ -225,7 +225,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
bufferAmountToSend = (currentRoundFDEndForMyTargetAgg - currentRoundFDStartForMyTargetAgg) +1;
}
- if (bufferAmountToSend > 0) { // we have data to send this round
+ if (bufferAmountToSend > 0) { /* we have data to send this round */
targetAggIndexesForMyDataThisRound[numTargetAggsThisRound] = i;
sendBufferOffsetsThisRound[numTargetAggsThisRound] = sendBufferOffset;
bufferAmountsToSendThisRound[numTargetAggsThisRound] = bufferAmountToSend;
@@ -244,7 +244,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
}
}
- // determine what offsets define the portion of the file domain the agg is writing this round
+ /* determine what offsets define the portion of the file domain the agg is writing this round */
if (iAmUsedAgg) {
if ((fd_end[myAggRank] - currentRoundFDStart) < coll_bufsize) {
currentRoundFDEnd = fd_end[myAggRank];
@@ -259,7 +259,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
int irecv,isend;
int numSourceProcsSentData = 0;
- // the aggs send the amount of data they need to their source procs
+ /* the aggs send the amount of data they need to their source procs */
for (i=0;i<numSourceProcs;i++) {
if ((remainingDataOffsetToGetPerProc[i] >= currentRoundFDStart) && (remainingDataOffsetToGetPerProc[i] <= currentRoundFDEnd)) {
if ((remainingDataOffsetToGetPerProc[i] + remainingDataAmountToGetPerProc[i]) <= currentRoundFDEnd)
@@ -324,7 +324,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
startTimeBase = MPI_Wtime();
#endif
- // the aggs receive the data from the source procs
+ /* the aggs receive the data from the source procs */
int numDataRecvToWaitFor = 0;
for (i=0;i<numSourceProcs;i++) {
@@ -332,7 +332,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
for (j=0;j<i;j++)
currentWBOffset += dataSizeGottenThisRoundPerProc[j];
- // only receive from source procs that will send > 0 count data
+ /* only receive from source procs that will send > 0 count data */
if (dataSizeGottenThisRoundPerProc[i] > 0) {
#ifdef p2pcontigtrace
printf("receiving data from rank %d dataSizeGottenThisRoundPerProc is %d currentWBOffset is %d\n",sourceProcsForMyData[i],dataSizeGottenThisRoundPerProc[i],currentWBOffset);
@@ -380,7 +380,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
endTimeBase = MPI_Wtime();
gpfsmpio_prof_cw[GPFSMPIO_CIO_T_DEXCH_NET] += (endTimeBase-startTimeBase);
#endif
- // the aggs now write the data
+ /* the aggs now write the data */
if (numDataRecvToWaitFor > 0) {
#ifdef p2pcontigtrace
@@ -391,7 +391,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
ADIO_WriteContig(fd, write_buf, (int)totalDataReceivedThisRound,
MPI_BYTE, ADIO_EXPLICIT_OFFSET,
currentRoundFDStart, &status, error_code);
- } else { // use the thread writer
+ } else { /* use the thread writer */
if(!pthread_equal(io_thread, pthread_self())) {
pthread_join(io_thread, &thread_ret);
@@ -425,7 +425,7 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
}
- } // numDataRecvToWaitFor > 0
+ } /* numDataRecvToWaitFor > 0 */
if (iAmUsedAgg)
currentRoundFDStart += coll_bufsize;
@@ -434,14 +434,14 @@ void ADIOI_P2PContigWriteAggregation(ADIO_File fd,
&mpiIsendStatusForData);
}
- } // for-loop roundIter
+ } /* for-loop roundIter */
#ifdef ROMIO_GPFS
endTimeBase = MPI_Wtime();
gpfsmpio_prof_cw[GPFSMPIO_CIO_T_DEXCH] += (endTimeBase-startTimeBase);
#endif
- if (useIOBuffer) { // thread writer cleanup
+ if (useIOBuffer) { /* thread writer cleanup */
if ( !pthread_equal(io_thread, pthread_self()) ) {
pthread_join(io_thread, &thread_ret);
@@ -486,7 +486,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
ADIO_Offset* fd_end)
{
- *error_code = MPI_SUCCESS; // initialize to success
+ *error_code = MPI_SUCCESS; /* initialize to success */
#ifdef ROMIO_GPFS
double startTimeBase,endTimeBase;
@@ -507,7 +507,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
ADIO_Offset myOffsetStart = st_offsets[myrank], myOffsetEnd = end_offsets[myrank];
- int myAggRank = -1; // if I am an aggregor this is my index into fd->hints->ranklist
+ int myAggRank = -1; /* if I am an aggregor this is my index into fd->hints->ranklist */
int iAmUsedAgg = 0;
int naggs = fd->hints->cb_nodes;
@@ -585,11 +585,11 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
remainingDataAmountToSendPerProc[numTargetProcs] = (end_offsets[i] - st_offsets[i])+1;
remainingDataOffsetToSendPerProc[numTargetProcs] = st_offsets[i];
}
- else if ((st_offsets[i] >= fd_start[myAggRank]) && (st_offsets[i] <= fd_end[myAggRank])) {// starts in this fd and goes past it
+ else if ((st_offsets[i] >= fd_start[myAggRank]) && (st_offsets[i] <= fd_end[myAggRank])) {/* starts in this fd and goes past it */
remainingDataAmountToSendPerProc[numTargetProcs] = (fd_end[myAggRank] - st_offsets[i]) +1;
remainingDataOffsetToSendPerProc[numTargetProcs] = st_offsets[i];
}
- else { // starts in fd before this and ends in it
+ else { /* starts in fd before this and ends in it */
remainingDataAmountToSendPerProc[numTargetProcs] = (end_offsets[i] - fd_start[myAggRank]) +1;
remainingDataOffsetToSendPerProc[numTargetProcs] = fd_start[myAggRank];
}
@@ -610,7 +610,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
/* if threaded i/o selected, we'll do a kind of double buffering */
char *read_buf = read_buf0;
- // compute number of rounds
+ /* compute number of rounds */
ADIO_Offset numberOfRounds = (ADIO_Offset)((((ADIO_Offset)(end_offsets[nprocs-1]-st_offsets[0]))/((ADIO_Offset)((ADIO_Offset)coll_bufsize*(ADIO_Offset)naggs)))) + 1;
ADIO_Offset currentRoundFDStart = 0, nextRoundFDStart = 0;
@@ -642,12 +642,12 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
#endif
- // each iteration of this loop reads a coll_bufsize portion of the file domain
+ /* each iteration of this loop reads a coll_bufsize portion of the file domain */
int roundIter;
for (roundIter=0;roundIter<numberOfRounds;roundIter++) {
int irecv,isend;
- // determine what offsets define the portion of the file domain the agg is reading this round
+ /* determine what offsets define the portion of the file domain the agg is reading this round */
if (iAmUsedAgg) {
currentRoundFDStart = nextRoundFDStart;
@@ -663,7 +663,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
amountDataToReadThisRound = coll_bufsize;
}
- // read currentRoundFDEnd bytes
+ /* read currentRoundFDEnd bytes */
ADIO_ReadContig(fd, read_buf,amountDataToReadThisRound,
MPI_BYTE, ADIO_EXPLICIT_OFFSET, currentRoundFDStart,
&status, error_code);
@@ -673,8 +673,8 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
#endif
}
- if (useIOBuffer) { // use the thread reader for the next round
- // switch back and forth between the read buffers so that the data aggregation code is diseminating 1 buffer while the thread is reading into the other
+ if (useIOBuffer) { /* use the thread reader for the next round */
+ /* switch back and forth between the read buffers so that the data aggregation code is diseminating 1 buffer while the thread is reading into the other */
if (roundIter > 0)
currentRoundFDEnd = nextRoundFDEnd;
@@ -722,7 +722,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
io_thread = pthread_self();
}
- else { // last round
+ else { /* last round */
if(!pthread_equal(io_thread, pthread_self())) {
pthread_join(io_thread, &thread_ret);
@@ -739,8 +739,8 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
}
}
- } // useIOBuffer
- } // IAmUsedAgg
+ } /* useIOBuffer */
+ } /* IAmUsedAgg */
/* determine what source aggs I need to get data from this round and
* recv only from them */
@@ -766,8 +766,8 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
printf("roundIter %d source iter %d sourceAggsForMyData is %d myOffsetStart is %ld myOffsetEnd is %ld sourceAggsForMyDataFDStart is %ld sourceAggsForMyDataFDEnd is %ld currentRoundFDStartForMySourceAgg is %ld currentRoundFDEndForMySourceAgg is %ld\n",roundIter,i,sourceAggsForMyData[i],myOffsetStart,myOffsetEnd,sourceAggsForMyDataFDStart[i],sourceAggsForMyDataFDEnd[i],currentRoundFDStartForMySourceAgg,currentRoundFDEndForMySourceAgg);
#endif
- // get the portion of my data that is within currentRoundFDStartForMySourceAgg to currentRoundFDEndForMySourceAgg
- // find the offset into the recv buffer and the amount of data to get
+ /* get the portion of my data that is within currentRoundFDStartForMySourceAgg to currentRoundFDEndForMySourceAgg */
+ /* find the offset into the recv buffer and the amount of data to get */
int recvBufferOffset = 0;
int bufferAmountToGet = 0;
@@ -790,7 +790,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
}
- if (bufferAmountToGet > 0) { // we have data to get this round
+ if (bufferAmountToGet > 0) { /* we have data to get this round */
sourceAggIndexesForMyDataThisRound[numSourceAggsThisRound] = i;
recvBufferOffsetsThisRound[numSourceAggsThisRound] = recvBufferOffset;
bufferAmountsToGetThisRound[numSourceAggsThisRound] = bufferAmountToGet;
@@ -838,7 +838,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
&mpiRecvDataFromSourceAggsRequest[i]);
}
- // the source aggs send the data to the target procs
+ /* the source aggs send the data to the target procs */
int numTargetProcsSentThisRound = 0;
for (i=0;i<numTargetProcs;i++) {
@@ -846,7 +846,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
for (j=0;j<i;j++)
currentWBOffset += dataSizeSentThisRoundPerProc[j];
- // only send to target procs that will recv > 0 count data
+ /* only send to target procs that will recv > 0 count data */
if (dataSizeSentThisRoundPerProc[i] > 0) {
MPI_Isend(&((char*)read_buf)[currentWBOffset],
dataSizeSentThisRoundPerProc[i],
@@ -858,7 +858,7 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
}
}
- // wait for the target procs to get their data
+ /* wait for the target procs to get their data */
for (i = 0; i < numSourceAggsThisRound; i++) {
MPI_Waitany(numSourceAggsThisRound,mpiRecvDataFromSourceAggsRequest,
&irecv,&mpiWaitAnyStatusFromSourceProcs);
@@ -866,17 +866,17 @@ void ADIOI_P2PContigReadAggregation(ADIO_File fd,
nextRoundFDStart = currentRoundFDStart + coll_bufsize;
- // clean up the MPI_Isend MPI_Requests
+ /* clean up the MPI_Isend MPI_Requests */
for (i=0;i<numTargetProcsSentThisRound;i++) {
MPI_Waitany(numTargetProcsSentThisRound,mpiSendDataToTargetProcRequest,
&isend,&mpiIsendStatusForData);
}
- MPI_Barrier(fd->comm); // need to sync up the source aggs which did the isend with the target procs which did the irecvs to give the target procs time to get the data before overwriting with next round readcontig
+ MPI_Barrier(fd->comm); /* need to sync up the source aggs which did the isend with the target procs which did the irecvs to give the target procs time to get the data before overwriting with next round readcontig */
- } // for-loop roundIter
+ } /* for-loop roundIter */
- if (useIOBuffer) { // thread reader cleanup
+ if (useIOBuffer) { /* thread reader cleanup */
if ( !pthread_equal(io_thread, pthread_self()) ) {
pthread_join(io_thread, &thread_ret);
diff --git a/src/mpi/romio/adio/common/system_hints.c b/src/mpi/romio/adio/common/system_hints.c
index 0a2c642..5d0e24b 100644
--- a/src/mpi/romio/adio/common/system_hints.c
+++ b/src/mpi/romio/adio/common/system_hints.c
@@ -163,7 +163,7 @@ void ADIOI_incorporate_system_hints(MPI_Info info,
MPI_Info sysinfo,
MPI_Info *new_info)
{
- int i, nkeys_sysinfo, flag=0; // must initialize flag to 0
+ int i, nkeys_sysinfo, flag=0; /* must initialize flag to 0 */
char val[MPI_MAX_INFO_VAL], key[MPI_MAX_INFO_KEY];
diff --git a/src/mpi/romio/adio/include/adio.h b/src/mpi/romio/adio/include/adio.h
index 90206d9..7ad3ced 100644
--- a/src/mpi/romio/adio/include/adio.h
+++ b/src/mpi/romio/adio/include/adio.h
@@ -293,10 +293,10 @@ typedef struct {
#define ADIO_PANFS 161 /* Panasas FS */
#define ADIO_GRIDFTP 162 /* Globus GridFTP */
#define ADIO_LUSTRE 163 /* Lustre */
-// #define ADIO_BGL 164 /* IBM BGL */
-// #define ADIO_BGLOCKLESS 165 /* IBM BGL (lock-free) */
+/* #define ADIO_BGL 164 */ /* IBM BGL */
+/* #define ADIO_BGLOCKLESS 165 */ /* IBM BGL (lock-free) */
#define ADIO_ZOIDFS 167 /* ZoidFS: the I/O forwarding fs */
-//#define ADIO_BG 168
+/* #define ADIO_BG 168 */
#define ADIO_GPFS 168
#define ADIO_SEEK_SET SEEK_SET
http://git.mpich.org/mpich.git/commitdiff/ad25799865749680338c05264c8007242f01326a
commit ad25799865749680338c05264c8007242f01326a
Author: Junchao Zhang <jczhang at mcs.anl.gov>
Date: Thu Sep 4 08:23:23 2014 -0500
Fix mixed code warnings reported by --enable-strict=c89
The warning message is:
warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
Fixes #2167
Signed-off-by: Antonio J. Pena <apenya at mcs.anl.gov>
diff --git a/src/binding/fortran/use_mpi_f08/wrappers_c/comm_spawn_multiple_c.c b/src/binding/fortran/use_mpi_f08/wrappers_c/comm_spawn_multiple_c.c
index c87766c..5e07e57 100644
--- a/src/binding/fortran/use_mpi_f08/wrappers_c/comm_spawn_multiple_c.c
+++ b/src/binding/fortran/use_mpi_f08/wrappers_c/comm_spawn_multiple_c.c
@@ -58,8 +58,9 @@ int MPIR_Comm_spawn_multiple_c(int count, char* array_of_commands_f,
for (i = 0; i < count; i++) {
/* Extract args of command i, and put them in buf */
+ char *arg;
offset = 0; /* offset in bytes in buf to put next arg */
- char* arg = array_of_argv_f + argv_elem_len * i; /* Point to 1st arg of command i */
+ arg = array_of_argv_f + argv_elem_len * i; /* Point to 1st arg of command i */
do {
if (offset + argv_elem_len > len) { /* Make sure buf is big enough */
len = offset + argv_elem_len;
diff --git a/src/include/mpit.h b/src/include/mpit.h
index 549ae46..1a87f56 100644
--- a/src/include/mpit.h
+++ b/src/include/mpit.h
@@ -42,10 +42,11 @@ extern int MPIR_T_cat_add_desc(const char *cat_name, const char *cat_desc);
static inline cvar_table_entry_t * LOOKUP_CVAR_BY_NAME(const char* cvar_name)
{
+ int cvar_idx;
name2index_hash_t *hash_entry;
HASH_FIND_STR(cvar_hash, cvar_name, hash_entry);
MPIU_Assert(hash_entry != NULL);
- int cvar_idx = hash_entry->idx;
+ cvar_idx = hash_entry->idx;
return (cvar_table_entry_t *)utarray_eltptr(cvar_table, cvar_idx);
}
diff --git a/src/include/mpitimpl.h b/src/include/mpitimpl.h
index 8de41f8..75b72fb 100644
--- a/src/include/mpitimpl.h
+++ b/src/include/mpitimpl.h
@@ -388,6 +388,7 @@ extern void MPIR_T_PVAR_REGISTER_impl(
#define MPIR_T_PVAR_STATE_REGISTER_STATIC_impl(dtype_, name_, \
initval_, etype_, verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_INT); \
/* Double check if dtype_ and name_ match */ \
@@ -396,7 +397,7 @@ extern void MPIR_T_PVAR_REGISTER_impl(
/* State pvars should be describled further by an enum */ \
MPIU_Assert((etype_) != MPI_T_ENUM_NULL); \
PVAR_STATE_##name_ = (initval_); \
- void *addr_ = &PVAR_STATE_##name_; \
+ addr_ = &PVAR_STATE_##name_; \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_STATE, dtype_, #name_, \
addr_, 1, etype_, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
} while (0)
@@ -469,6 +470,7 @@ extern void MPIR_T_PVAR_REGISTER_impl(
#define MPIR_T_PVAR_LEVEL_REGISTER_STATIC_impl(dtype_, name_, \
initval_, verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_UNSIGNED || (dtype_) == MPI_UNSIGNED_LONG || \
(dtype_) == MPI_UNSIGNED_LONG_LONG || (dtype_) == MPI_DOUBLE); \
@@ -476,7 +478,7 @@ extern void MPIR_T_PVAR_REGISTER_impl(
MPIU_Assert(sizeof(PVAR_LEVEL_##name_) == MPID_Datatype_get_basic_size(dtype_)); \
MPIU_Assert((flags_) & MPIR_T_PVAR_FLAG_CONTINUOUS); \
PVAR_LEVEL_##name_ = (initval_); \
- void *addr_ = &PVAR_LEVEL_##name_; \
+ addr_ = &PVAR_LEVEL_##name_; \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_LEVEL, dtype_, #name_, \
addr_, 1, MPI_T_ENUM_NULL, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
} while (0)
@@ -542,6 +544,7 @@ extern void MPIR_T_PVAR_REGISTER_impl(
#define MPIR_T_PVAR_SIZE_REGISTER_STATIC_impl(dtype_, name_, \
initval_, verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_UNSIGNED || (dtype_) == MPI_UNSIGNED_LONG || \
(dtype_) == MPI_UNSIGNED_LONG_LONG || (dtype_) == MPI_DOUBLE); \
@@ -549,7 +552,7 @@ extern void MPIR_T_PVAR_REGISTER_impl(
MPIU_Assert(sizeof(PVAR_SIZE_##name_) == MPID_Datatype_get_basic_size(dtype_)); \
MPIU_Assert((flags_) & MPIR_T_PVAR_FLAG_CONTINUOUS); \
PVAR_SIZE_##name_ = (initval_); \
- void *addr_ = &PVAR_SIZE_##name_; \
+ addr_ = &PVAR_SIZE_##name_; \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_SIZE, dtype_, #name_, \
addr_, 1, MPI_T_ENUM_NULL, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
} while (0)
@@ -600,12 +603,13 @@ extern void MPIR_T_PVAR_REGISTER_impl(
#define MPIR_T_PVAR_PERCENTAGE_REGISTER_STATIC_impl(dtype_, name_, \
initval_, verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_DOUBLE); \
/* Double check if dtype_ and name_ match */ \
MPIU_Assert(sizeof(PVAR_PERCENTAGE_##name_) == MPID_Datatype_get_basic_size(dtype_)); \
MPIU_Assert((flags_) & MPIR_T_PVAR_FLAG_CONTINUOUS); \
- void *addr_ = &PVAR_PERCENTAGE_##name_; \
+ addr_ = &PVAR_PERCENTAGE_##name_; \
PVAR_PERCENTAGE_##name_ = (initval_); \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_PERCENTAGE, dtype_, #name_, \
addr_, 1, MPI_T_ENUM_NULL, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
@@ -672,13 +676,14 @@ extern void MPIR_T_PVAR_REGISTER_impl(
#define MPIR_T_PVAR_COUNTER_REGISTER_STATIC_impl(dtype_, name_, \
verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_UNSIGNED || (dtype_) == MPI_UNSIGNED_LONG || \
(dtype_) == MPI_UNSIGNED_LONG_LONG); \
/* Double check if dtype_ and name_ match*/ \
MPIU_Assert(sizeof(PVAR_COUNTER_##name_) == MPID_Datatype_get_basic_size(dtype_)); \
PVAR_COUNTER_##name_ = 0; \
- void *addr_ = &PVAR_COUNTER_##name_; \
+ addr_ = &PVAR_COUNTER_##name_; \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_COUNTER, dtype_, #name_, \
addr_, 1, MPI_T_ENUM_NULL, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
} while (0)
@@ -721,7 +726,8 @@ extern void MPIR_T_PVAR_REGISTER_impl(
/* Interfaces through pointer or name */
#define MPIR_T_PVAR_COUNTER_ARRAY_INIT_VAR_impl(ptr_, count_) \
do { \
- int idx_ = 0; \
+ int idx_; \
+ idx_ = 0; \
for (; idx_ < (count_); idx_++) \
*((ptr_) + idx_) = 0; \
} while (0)
@@ -732,7 +738,8 @@ extern void MPIR_T_PVAR_REGISTER_impl(
#define MPIR_T_PVAR_COUNTER_ARRAY_INIT_impl(name_) \
do { \
- int count_ = sizeof(PVAR_COUNTER_##name_)/sizeof(PVAR_COUNTER_##name_[0]); \
+ int count_; \
+ count_ = sizeof(PVAR_COUNTER_##name_)/sizeof(PVAR_COUNTER_##name_[0]); \
MPIR_T_PVAR_COUNTER_ARRAY_INIT_VAR_impl(PVAR_COUNTER_##name_, count_); \
} while (0)
#define MPIR_T_PVAR_COUNTER_ARRAY_GET_impl(name_, idx_) \
@@ -744,14 +751,16 @@ extern void MPIR_T_PVAR_REGISTER_impl(
#define MPIR_T_PVAR_COUNTER_ARRAY_REGISTER_STATIC_impl(dtype_, name_, \
verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
+ int count_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_UNSIGNED || (dtype_) == MPI_UNSIGNED_LONG || \
(dtype_) == MPI_UNSIGNED_LONG_LONG); \
/* Double check if dtype_ and name_ match */ \
MPIU_Assert(sizeof(PVAR_COUNTER_##name_[0]) == MPID_Datatype_get_basic_size(dtype_)); \
- void *addr_ = PVAR_COUNTER_##name_; \
+ addr_ = PVAR_COUNTER_##name_; \
MPIR_T_PVAR_COUNTER_ARRAY_INIT_impl(name_); \
- int count_ = sizeof(PVAR_COUNTER_##name_)/sizeof(mpit_pvar_##name_[0]); \
+ count_ = sizeof(PVAR_COUNTER_##name_)/sizeof(mpit_pvar_##name_[0]); \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_COUNTER, dtype_, #name_, \
addr_, count_, MPI_T_ENUM_NULL, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
} while (0)
@@ -809,13 +818,14 @@ extern void MPIR_T_PVAR_REGISTER_impl(
#define MPIR_T_PVAR_AGGREGATE_REGISTER_STATIC_impl(dtype_, name_, \
verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_UNSIGNED || (dtype_) == MPI_UNSIGNED_LONG || \
(dtype_) == MPI_UNSIGNED_LONG_LONG || (dtype_) == MPI_DOUBLE); \
/* Double check if dtype_ and name_ match*/ \
MPIU_Assert(sizeof(PVAR_AGGREGATE_##name_) == MPID_Datatype_get_basic_size(dtype_)); \
PVAR_AGGREGATE_##name_ = 0; \
- void *addr_ = &PVAR_AGGREGATE_##name_; \
+ addr_ = &PVAR_AGGREGATE_##name_; \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_AGGREGATE, dtype_, #name_, \
addr_, 1, MPI_T_ENUM_NULL, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
} while (0)
@@ -894,11 +904,13 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_TIMER_REGISTER_STATIC_impl(dtype_, name_, \
verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
+ void *count_addr_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_DOUBLE); \
MPIR_T_PVAR_TIMER_INIT_impl(name_); \
- void *addr_ = &PVAR_TIMER_##name_; \
- void *count_addr_ = &(PVAR_TIMER_##name_.count); \
+ addr_ = &PVAR_TIMER_##name_; \
+ count_addr_ = &(PVAR_TIMER_##name_.count); \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_TIMER, dtype_, #name_, \
addr_, 1, MPI_T_ENUM_NULL, verb_, bind_, flags_, \
(MPIR_T_pvar_get_value_cb *)&get_timer_time_in_double, NULL, cat_, desc_); \
@@ -982,12 +994,13 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_UINT_HIGHWATERMARK_UPDATE_VAR_impl(ptr_, val_) \
do { \
+ MPIR_T_pvar_handle_t *head; \
(ptr_)->current.u = (val_); \
if ((ptr_)->first_used && (ptr_)->first_started) { \
if ((val_) > (ptr_)->watermark.u) \
(ptr_)->watermark.u = (val_); \
} \
- MPIR_T_pvar_handle_t *head = (ptr_)->hlist; \
+ head = (ptr_)->hlist; \
while (head != NULL) { \
if (MPIR_T_pvar_is_started(head) && (val_) > head->watermark.u) { \
head->watermark.u = (val_); \
@@ -998,12 +1011,13 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_ULONG_HIGHWATERMARK_UPDATE_VAR_impl(ptr_, val_) \
do { \
+ MPIR_T_pvar_handle_t *head; \
(ptr_)->current.ul = (val_); \
if ((ptr_)->first_used && (ptr_)->first_started) { \
if ((val_) > (ptr_)->watermark.ul) \
(ptr_)->watermark.ul = (val_); \
} \
- MPIR_T_pvar_handle_t *head = (ptr_)->hlist; \
+ head = (ptr_)->hlist; \
while (head != NULL) { \
if (MPIR_T_pvar_is_started(head) && (val_) > head->watermark.ul) { \
head->watermark.ul = (val_); \
@@ -1014,12 +1028,13 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_ULONG2_HIGHWATERMARK_UPDATE_VAR_impl(ptr_, val_) \
do { \
+ MPIR_T_pvar_handle_t *head; \
(ptr_)->current.ull = (val_); \
if ((ptr_)->first_used && (ptr_)->first_started) { \
if ((val_) > (ptr_)->watermark.ull) \
(ptr_)->watermark.ull = (val_); \
} \
- MPIR_T_pvar_handle_t *head = (ptr_)->hlist; \
+ head = (ptr_)->hlist; \
while (head != NULL) { \
if (MPIR_T_pvar_is_started(head) && (val_) > head->watermark.ull) { \
head->watermark.ull = (val_); \
@@ -1030,12 +1045,13 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_DOUBLE_HIGHWATERMARK_UPDATE_VAR_impl(ptr_, val_) \
do { \
+ MPIR_T_pvar_handle_t *head; \
(ptr_)->current.f = (val_); \
if ((ptr_)->first_used && (ptr_)->first_started) { \
if ((val_) > (ptr_)->watermark.f) \
(ptr_)->watermark.f = (val_); \
} \
- MPIR_T_pvar_handle_t *head = (ptr_)->hlist; \
+ head = (ptr_)->hlist; \
while (head != NULL) { \
if (MPIR_T_pvar_is_started(head) && (val_) > head->watermark.f) { \
head->watermark.f = (val_); \
@@ -1066,6 +1082,7 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_HIGHWATERMARK_REGISTER_STATIC_impl(dtype_, name_, \
initval_, verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_UNSIGNED || (dtype_) == MPI_UNSIGNED_LONG || \
(dtype_) == MPI_UNSIGNED_LONG_LONG || (dtype_) == MPI_DOUBLE); \
@@ -1081,7 +1098,7 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
default: \
break; \
}; \
- void *addr_ = &PVAR_HIGHWATERMARK_##name_; \
+ addr_ = &PVAR_HIGHWATERMARK_##name_; \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_HIGHWATERMARK, dtype_, #name_, \
addr_, 1, MPI_T_ENUM_NULL, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
} while (0)
@@ -1169,13 +1186,14 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_UINT_LOWWATERMARK_UPDATE_VAR_impl(ptr_, val_) \
do { \
+ MPIR_T_pvar_handle_t *head; \
(ptr_)->current.u = (val_); \
/* Update values in all handles */ \
if ((ptr_)->first_used && (ptr_)->first_started) { \
if ((val_) < (ptr_)->watermark.u) \
(ptr_)->watermark.u = (val_); \
} \
- MPIR_T_pvar_handle_t *head = (ptr_)->hlist; \
+ head = (ptr_)->hlist; \
while (head != NULL) { \
if (MPIR_T_pvar_is_started(head) && (val_) < head->watermark.u) { \
head->watermark.u = (val_); \
@@ -1186,12 +1204,13 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_ULONG_LOWWATERMARK_UPDATE_VAR_impl(ptr_, val_) \
do { \
+ MPIR_T_pvar_handle_t *head; \
(ptr_)->current.ul = (val_); \
if ((ptr_)->first_used && (ptr_)->first_started) { \
if ((val_) < (ptr_)->watermark.ul) \
(ptr_)->watermark.ul = (val_); \
} \
- MPIR_T_pvar_handle_t *head = (ptr_)->hlist; \
+ head = (ptr_)->hlist; \
while (head != NULL) { \
if (MPIR_T_pvar_is_started(head) && (val_) < head->watermark.ul) { \
head->watermark.ul = (val_); \
@@ -1202,12 +1221,13 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_ULONG2_LOWWATERMARK_UPDATE_VAR_impl(ptr_, val_) \
do { \
+ MPIR_T_pvar_handle_t *head; \
(ptr_)->current.ull = (val_); \
if ((ptr_)->first_used && (ptr_)->first_started) { \
if ((val_) < (ptr_)->watermark.ull) \
(ptr_)->watermark.ull = (val_); \
} \
- MPIR_T_pvar_handle_t *head = (ptr_)->hlist; \
+ head = (ptr_)->hlist; \
while (head != NULL) { \
if (MPIR_T_pvar_is_started(head) && (val_) < head->watermark.ull) { \
head->watermark.ull = (val_); \
@@ -1218,12 +1238,13 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_DOUBLE_LOWWATERMARK_UPDATE_VAR_impl(ptr_, val_) \
do { \
+ MPIR_T_pvar_handle_t *head; \
(ptr_)->current.f = (val_); \
if ((ptr_)->first_used && (ptr_)->first_started) { \
if ((val_) < (ptr_)->watermark.f) \
(ptr_)->watermark.f = (val_); \
} \
- MPIR_T_pvar_handle_t *head = (ptr_)->hlist; \
+ head = (ptr_)->hlist; \
while (head != NULL) { \
if (MPIR_T_pvar_is_started(head) && (val_) < head->watermark.f) { \
head->watermark.f = (val_); \
@@ -1254,6 +1275,7 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
#define MPIR_T_PVAR_LOWWATERMARK_REGISTER_STATIC_impl(dtype_, name_, \
initval_, verb_, bind_, flags_, cat_, desc_) \
do { \
+ void *addr_; \
/* Allowable datatypes only */ \
MPIU_Assert((dtype_) == MPI_UNSIGNED || (dtype_) == MPI_UNSIGNED_LONG || \
(dtype_) == MPI_UNSIGNED_LONG_LONG || (dtype_) == MPI_DOUBLE); \
@@ -1269,7 +1291,7 @@ void get_timer_time_in_double(MPIR_T_pvar_timer_t *timer, void *obj_handle,
default: \
break; \
}; \
- void *addr_ = &PVAR_LOWWATERMARK_##name_; \
+ addr_ = &PVAR_LOWWATERMARK_##name_; \
MPIR_T_PVAR_REGISTER_impl(MPI_T_PVAR_CLASS_LOWWATERMARK, dtype_, #name_, \
addr_, 1, MPI_T_ENUM_NULL, verb_, bind_, flags_, NULL, NULL, cat_, desc_); \
} while (0)
diff --git a/src/mpi/coll/ired_scat.c b/src/mpi/coll/ired_scat.c
index 884c8da..423e25f 100644
--- a/src/mpi/coll/ired_scat.c
+++ b/src/mpi/coll/ired_scat.c
@@ -1084,9 +1084,10 @@ int MPI_Ireduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
{
int mpi_errno = MPI_SUCCESS;
+ int i;
MPID_Comm *comm_ptr = NULL;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_IREDUCE_SCATTER);
- int i = 0;
+ i = 0;
MPIU_THREAD_CS_ENTER(ALLFUNC,);
MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_IREDUCE_SCATTER);
diff --git a/src/mpi/romio/adio/common/ad_fstype.c b/src/mpi/romio/adio/common/ad_fstype.c
index cd7e6c5..e2c41ea 100644
--- a/src/mpi/romio/adio/common/ad_fstype.c
+++ b/src/mpi/romio/adio/common/ad_fstype.c
@@ -614,6 +614,7 @@ void ADIO_ResolveFileType(MPI_Comm comm, const char *filename, int *fstype,
int myerrcode, file_system, min_code, max_code;
char *tmp;
static char myname[] = "ADIO_RESOLVEFILETYPE";
+ char * p;
file_system = -1;
if (filename == NULL) {
@@ -696,7 +697,7 @@ void ADIO_ResolveFileType(MPI_Comm comm, const char *filename, int *fstype,
* Assumes all processes set the same environment varialble.
* Values: the same prefix you would stick on a file path. e.g. pvfs2: --
* including the colon! */
- char * p = getenv("ROMIO_FSTYPE_FORCE");
+ p = getenv("ROMIO_FSTYPE_FORCE");
if (p != NULL) {
ADIO_FileSysType_prefix(p, &file_system, &myerrcode);
if (myerrcode != MPI_SUCCESS) {
diff --git a/src/mpi/romio/adio/common/ad_open.c b/src/mpi/romio/adio/common/ad_open.c
index d26a072..7d69280 100644
--- a/src/mpi/romio/adio/common/ad_open.c
+++ b/src/mpi/romio/adio/common/ad_open.c
@@ -31,6 +31,8 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
static char myname[] = "ADIO_OPEN";
int max_error_code;
MPI_Info dupinfo;
+ int syshints_processed, can_skip;
+ char *p;
*error_code = MPI_SUCCESS;
@@ -98,7 +100,6 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
*
* a code might do an "initialize from 0", so we can only skip hint
* processing once everyone has particpiated in hint processing */
- int syshints_processed, can_skip;
if (ADIOI_syshints == MPI_INFO_NULL)
syshints_processed = 0;
else
@@ -150,7 +151,7 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
goto fn_exit;
}
/* for debugging, it can be helpful to see the hints selected */
- char *p = getenv("ROMIO_PRINT_HINTS");
+ p = getenv("ROMIO_PRINT_HINTS");
if (rank == 0 && p != NULL ) {
ADIOI_Info_print_keyvals(fd->info);
}
diff --git a/src/mpi/romio/adio/common/cb_config_list.c b/src/mpi/romio/adio/common/cb_config_list.c
index 0eebc43..626709f 100644
--- a/src/mpi/romio/adio/common/cb_config_list.c
+++ b/src/mpi/romio/adio/common/cb_config_list.c
@@ -68,6 +68,8 @@ int ADIOI_cb_bcast_rank_map(ADIO_File fd)
char *value;
int error_code = MPI_SUCCESS;
static char myname[] = "ADIOI_cb_bcast_rank_map";
+ char *p;
+ int i;
MPI_Bcast(&(fd->hints->cb_nodes), 1, MPI_INT, 0, fd->comm);
if (fd->hints->cb_nodes > 0) {
@@ -92,8 +94,7 @@ int ADIOI_cb_bcast_rank_map(ADIO_File fd)
value = (char *) ADIOI_Malloc((MPI_MAX_INFO_VAL+1)*sizeof(char));
ADIOI_Snprintf(value, MPI_MAX_INFO_VAL+1, "%d", fd->hints->cb_nodes);
ADIOI_Info_set(fd->info, "cb_nodes", value);
- char *p = value;
- int i;
+ p = value;
/* the (by MPI rank) list of aggregators can be larger than
* MPI_MAX_INFO_VAL, so we will simply truncate when we reach capacity. I
* wasn't clever enough to figure out how to rewind and put '...' at the
diff --git a/src/mpi_t/cat_get_info.c b/src/mpi_t/cat_get_info.c
index 782cbe5..b9b196b 100644
--- a/src/mpi_t/cat_get_info.c
+++ b/src/mpi_t/cat_get_info.c
@@ -58,6 +58,7 @@ int MPI_T_category_get_info(int cat_index, char *name, int *name_len, char *desc
int *desc_len, int *num_cvars, int *num_pvars, int *num_categories)
{
int mpi_errno = MPI_SUCCESS;
+ cat_table_entry_t *cat;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_T_CATEGORY_GET_INFO);
MPIR_ERRTEST_MPIT_INITIALIZED(mpi_errno);
@@ -80,7 +81,6 @@ int MPI_T_category_get_info(int cat_index, char *name, int *name_len, char *desc
/* ... body of routine ... */
- cat_table_entry_t *cat;
cat = (cat_table_entry_t *)utarray_eltptr(cat_table, cat_index);
MPIR_T_strncpy(name, cat->name, name_len);
MPIR_T_strncpy(desc, cat->desc, desc_len);
diff --git a/src/mpi_t/cvar_get_info.c b/src/mpi_t/cvar_get_info.c
index dc5fef0..8bbbe94 100644
--- a/src/mpi_t/cvar_get_info.c
+++ b/src/mpi_t/cvar_get_info.c
@@ -62,6 +62,7 @@ int MPI_T_cvar_get_info(int cvar_index, char *name, int *name_len,
char *desc, int *desc_len, int *binding, int *scope)
{
int mpi_errno = MPI_SUCCESS;
+ const cvar_table_entry_t *cvar;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_T_CVAR_GET_INFO);
MPIR_ERRTEST_MPIT_INITIALIZED(mpi_errno);
@@ -84,7 +85,6 @@ int MPI_T_cvar_get_info(int cvar_index, char *name, int *name_len,
/* ... body of routine ... */
- const cvar_table_entry_t *cvar;
cvar = (cvar_table_entry_t *)utarray_eltptr(cvar_table, cvar_index);
MPIR_T_strncpy(name, cvar->name, name_len);
diff --git a/src/mpi_t/cvar_handle_free.c b/src/mpi_t/cvar_handle_free.c
index d5fe45e..1d2e2a6 100644
--- a/src/mpi_t/cvar_handle_free.c
+++ b/src/mpi_t/cvar_handle_free.c
@@ -45,6 +45,7 @@ Input/Output Parameters:
int MPI_T_cvar_handle_free(MPI_T_cvar_handle *handle)
{
int mpi_errno = MPI_SUCCESS;
+ MPIR_T_cvar_handle_t *hnd;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_T_CVAR_HANDLE_FREE);
MPIR_ERRTEST_MPIT_INITIALIZED(mpi_errno);
@@ -64,7 +65,7 @@ int MPI_T_cvar_handle_free(MPI_T_cvar_handle *handle)
/* ... body of routine ... */
- MPIR_T_cvar_handle_t *hnd = *handle;
+ hnd = *handle;
MPIU_Free(hnd);
*handle = MPI_T_CVAR_HANDLE_NULL;
diff --git a/src/mpi_t/enum_get_item.c b/src/mpi_t/enum_get_item.c
index 4820b8e..af8ce7a 100644
--- a/src/mpi_t/enum_get_item.c
+++ b/src/mpi_t/enum_get_item.c
@@ -54,6 +54,7 @@ Output Parameters:
int MPI_T_enum_get_item(MPI_T_enum enumtype, int index, int *value, char *name, int *name_len)
{
int mpi_errno = MPI_SUCCESS;
+ enum_item_t *item;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_T_ENUM_GET_ITEM);
MPIR_ERRTEST_MPIT_INITIALIZED(mpi_errno);
@@ -78,7 +79,7 @@ int MPI_T_enum_get_item(MPI_T_enum enumtype, int index, int *value, char *name,
/* ... body of routine ... */
- enum_item_t *item = (enum_item_t *)utarray_eltptr(enumtype->items, index);
+ item = (enum_item_t *)utarray_eltptr(enumtype->items, index);
*value = item->value;
MPIR_T_strncpy(name, item->name, name_len);
diff --git a/src/mpi_t/pvar_get_info.c b/src/mpi_t/pvar_get_info.c
index a229edf..0116454 100644
--- a/src/mpi_t/pvar_get_info.c
+++ b/src/mpi_t/pvar_get_info.c
@@ -65,6 +65,8 @@ int MPI_T_pvar_get_info(int pvar_index, char *name, int *name_len, int *verbosit
int *desc_len, int *binding, int *readonly, int *continuous, int *atomic)
{
int mpi_errno = MPI_SUCCESS;
+ const pvar_table_entry_t *entry;
+ const pvar_table_entry_t *info;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_T_PVAR_GET_INFO);
MPIR_ERRTEST_MPIT_INITIALIZED(mpi_errno);
@@ -86,7 +88,6 @@ int MPI_T_pvar_get_info(int pvar_index, char *name, int *name_len, int *verbosit
# endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
- const pvar_table_entry_t *entry;
entry = (pvar_table_entry_t *) utarray_eltptr(pvar_table, pvar_index);
if (!entry->active) {
@@ -94,7 +95,6 @@ int MPI_T_pvar_get_info(int pvar_index, char *name, int *name_len, int *verbosit
goto fn_fail;
}
- const pvar_table_entry_t *info;
info = (pvar_table_entry_t *) utarray_eltptr(pvar_table, pvar_index);
MPIR_T_strncpy(name, info->name, name_len);
diff --git a/src/mpi_t/pvar_handle_alloc.c b/src/mpi_t/pvar_handle_alloc.c
index 31edd9a..e4c97ee 100644
--- a/src/mpi_t/pvar_handle_alloc.c
+++ b/src/mpi_t/pvar_handle_alloc.c
@@ -202,6 +202,7 @@ int MPI_T_pvar_handle_alloc(MPI_T_pvar_session session, int pvar_index,
void *obj_handle, MPI_T_pvar_handle *handle, int *count)
{
int mpi_errno = MPI_SUCCESS;
+ pvar_table_entry_t *entry;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_T_PVAR_HANDLE_ALLOC);
MPIR_ERRTEST_MPIT_INITIALIZED(mpi_errno);
@@ -225,7 +226,7 @@ int MPI_T_pvar_handle_alloc(MPI_T_pvar_session session, int pvar_index,
/* ... body of routine ... */
- pvar_table_entry_t *entry = (pvar_table_entry_t *) utarray_eltptr(pvar_table, pvar_index);
+ entry = (pvar_table_entry_t *) utarray_eltptr(pvar_table, pvar_index);
if (!entry->active) {
mpi_errno = MPI_T_ERR_INVALID_INDEX;
goto fn_fail;
diff --git a/src/mpi_t/pvar_reset.c b/src/mpi_t/pvar_reset.c
index aea3655..e12cee0 100644
--- a/src/mpi_t/pvar_reset.c
+++ b/src/mpi_t/pvar_reset.c
@@ -108,6 +108,7 @@ variables are ignored when MPI_T_PVAR_ALL_HANDLES is specified.
int MPI_T_pvar_reset(MPI_T_pvar_session session, MPI_T_pvar_handle handle)
{
int mpi_errno = MPI_SUCCESS;
+ MPIR_T_pvar_handle_t *hnd;
MPID_MPI_STATE_DECL(MPID_STATE_MPI_T_PVAR_RESET);
MPIR_ERRTEST_MPIT_INITIALIZED(mpi_errno);
@@ -131,7 +132,6 @@ int MPI_T_pvar_reset(MPI_T_pvar_session session, MPI_T_pvar_handle handle)
/* If handle is MPI_T_PVAR_ALL_HANDLES, dispatch the call.
* Otherwise, do correctness check, then go to impl.
*/
- MPIR_T_pvar_handle_t *hnd;
if (handle == MPI_T_PVAR_ALL_HANDLES) {
MPL_DL_FOREACH(session->hlist, hnd) {
if (!MPIR_T_pvar_is_readonly(hnd)) {
diff --git a/src/mpid/ch3/channels/nemesis/src/ch3_win_fns.c b/src/mpid/ch3/channels/nemesis/src/ch3_win_fns.c
index e176832..eebca52 100644
--- a/src/mpid/ch3/channels/nemesis/src/ch3_win_fns.c
+++ b/src/mpid/ch3/channels/nemesis/src/ch3_win_fns.c
@@ -54,6 +54,8 @@ static int MPIDI_CH3I_SHM_Wins_match(MPID_Win ** win_ptr, MPID_Win ** matched_wi
int mpi_errno = MPI_SUCCESS;
int i, comm_size;
int node_size, node_rank, shm_node_size;
+ int group_diff;
+ int base_diff;
MPID_Comm *node_comm_ptr = NULL, *shm_node_comm_ptr = NULL;
int *node_ranks = NULL, *node_ranks_in_shm_node = NULL;
@@ -62,13 +64,13 @@ static int MPIDI_CH3I_SHM_Wins_match(MPID_Win ** win_ptr, MPID_Win ** matched_wi
MPI_Aint *base_shm_offs;
MPIDI_SHM_Win_t *elem = shm_wins_list;
- *matched_win = NULL;
- base_shm_offs = *base_shm_offs_ptr;
MPIU_CHKLMEM_DECL(2);
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_SHM_WINS_MATCH);
MPIDI_RMA_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_SHM_WINS_MATCH);
+ *matched_win = NULL;
+ base_shm_offs = *base_shm_offs_ptr;
node_comm_ptr = (*win_ptr)->comm_ptr->node_comm;
MPIU_Assert(node_comm_ptr != NULL);
node_size = node_comm_ptr->local_size;
@@ -116,7 +118,7 @@ static int MPIDI_CH3I_SHM_Wins_match(MPID_Win ** win_ptr, MPID_Win ** matched_wi
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
shm_node_group_ptr = NULL;
- int group_diff = 0;
+ group_diff = 0;
for (i = 0; i < node_size; i++) {
/* not exist in shm_comm->node_comm */
if (node_ranks_in_shm_node[i] == MPI_UNDEFINED) {
@@ -140,7 +142,7 @@ static int MPIDI_CH3I_SHM_Wins_match(MPID_Win ** win_ptr, MPID_Win ** matched_wi
if (mpi_errno) MPIU_ERR_POP(mpi_errno);
MPIU_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail");
- int base_diff = 0;
+ base_diff = 0;
for (i = 0; i < comm_size; ++i) {
int i_node_rank = (*win_ptr)->comm_ptr->intranode_table[i];
if (i_node_rank >= 0) {
@@ -499,6 +501,8 @@ static int MPIDI_CH3I_Win_allocate_shm(MPI_Aint size, int disp_unit, MPID_Info *
/* compute the base addresses of each process within the shared memory segment */
{
+ char *cur_base;
+ int cur_rank;
if ((*win_ptr)->create_flavor != MPI_WIN_FLAVOR_SHARED) {
/* If create flavor is not MPI_WIN_FLAVOR_SHARED, all processes on this
window may not be on the same node. Because we only need to calculate
@@ -512,8 +516,8 @@ static int MPIDI_CH3I_Win_allocate_shm(MPI_Aint size, int disp_unit, MPID_Info *
node_shm_base_addrs = (*win_ptr)->shm_base_addrs;
}
- char *cur_base = (*win_ptr)->shm_base_addr;
- int cur_rank = 0;
+ cur_base = (*win_ptr)->shm_base_addr;
+ cur_rank = 0;
node_shm_base_addrs[0] = (*win_ptr)->shm_base_addr;
for (i = 1; i < node_size; ++i) {
if (node_sizes[i]) {
diff --git a/src/mpid/ch3/src/ch3u_rma_sync.c b/src/mpid/ch3/src/ch3u_rma_sync.c
index 19e8b60..0eeae75 100644
--- a/src/mpid/ch3/src/ch3u_rma_sync.c
+++ b/src/mpid/ch3/src/ch3u_rma_sync.c
@@ -5361,6 +5361,7 @@ int MPIDI_CH3_PktHandler_LockPutUnlock( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
char *data_buf = NULL;
MPIDI_msg_sz_t data_len;
int mpi_errno = MPI_SUCCESS;
+ int (*fcn)( MPIDI_VC_t *, struct MPID_Request *, int * );
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_PKTHANDLER_LOCKPUTUNLOCK);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_PKTHANDLER_LOCKPUTUNLOCK);
@@ -5450,7 +5451,6 @@ int MPIDI_CH3_PktHandler_LockPutUnlock( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt,
req->dev.lock_queue_entry = new_ptr;
}
- int (*fcn)( MPIDI_VC_t *, struct MPID_Request *, int * );
fcn = req->dev.OnDataAvail;
mpi_errno = MPIDI_CH3U_Receive_data_found(req, data_buf, &data_len,
&complete);
diff --git a/src/mpid/ch3/src/mpid_comm_failure_ack.c b/src/mpid/ch3/src/mpid_comm_failure_ack.c
index 7e8e070..fe70fdc 100644
--- a/src/mpid/ch3/src/mpid_comm_failure_ack.c
+++ b/src/mpid/ch3/src/mpid_comm_failure_ack.c
@@ -81,8 +81,8 @@ int MPID_Comm_failed_bitarray(MPID_Comm *comm_ptr, uint32_t **bitarray, int acke
uint32_t bit;
int *failed_procs, *group_procs;
MPID_Group *failed_group, *comm_group;
- MPIDI_STATE_DECL(MPID_STATE_COMM_FAILED_BITARRAY);
MPIU_CHKLMEM_DECL(2);
+ MPIDI_STATE_DECL(MPID_STATE_COMM_FAILED_BITARRAY);
MPIDI_FUNC_ENTER(MPID_STATE_COMM_FAILED_BITARRAY);
diff --git a/src/mpid/ch3/src/mpid_comm_revoke.c b/src/mpid/ch3/src/mpid_comm_revoke.c
index 8f9d457..ab26cf2 100644
--- a/src/mpid/ch3/src/mpid_comm_revoke.c
+++ b/src/mpid/ch3/src/mpid_comm_revoke.c
@@ -21,14 +21,14 @@
#define FCNAME MPIDI_QUOTE(FUNCNAME)
int MPID_Comm_revoke(MPID_Comm *comm_ptr, int is_remote)
{
+ MPIDI_VC_t *vc;
+ MPID_IOV iov[MPID_IOV_LIMIT];
int mpi_errno = MPI_SUCCESS;
+ int i, size, my_rank, failed=0;
+ MPID_Request *request;
MPIDI_CH3_Pkt_t upkt;
MPIDI_CH3_Pkt_revoke_t *revoke_pkt = &upkt.revoke;
MPIDI_STATE_DECL(MPID_STATE_MPID_COMM_REVOKE);
- MPIDI_VC_t *vc;
- MPID_IOV iov[MPID_IOV_LIMIT];
- int i, size, my_rank, failed = 0;
- MPID_Request *request;
MPIDI_FUNC_ENTER(MPID_STATE_MPID_COMM_REVOKE);
diff --git a/src/mpid/ch3/src/mpid_init.c b/src/mpid/ch3/src/mpid_init.c
index 796d1f2..e0a9a0f 100644
--- a/src/mpid/ch3/src/mpid_init.c
+++ b/src/mpid/ch3/src/mpid_init.c
@@ -98,8 +98,8 @@ int MPID_Init(int *argc, char ***argv, int requested, int *provided,
int pg_size;
MPID_Comm * comm;
int p;
- MPIDI_STATE_DECL(MPID_STATE_MPID_INIT);
int val;
+ MPIDI_STATE_DECL(MPID_STATE_MPID_INIT);
MPIDI_FUNC_ENTER(MPID_STATE_MPID_INIT);
-----------------------------------------------------------------------
Summary of changes:
.../use_mpi_f08/wrappers_c/comm_spawn_multiple_c.c | 3 +-
src/include/mpimem.h | 2 +-
src/include/mpit.h | 3 +-
src/include/mpitimpl.h | 66 ++++++++++-----
src/mpi/coll/ired_scat.c | 3 +-
src/mpi/romio/adio/common/ad_fstype.c | 3 +-
src/mpi/romio/adio/common/ad_open.c | 5 +-
src/mpi/romio/adio/common/cb_config_list.c | 5 +-
src/mpi/romio/adio/common/p2p_aggregation.c | 86 ++++++++++----------
src/mpi/romio/adio/common/system_hints.c | 2 +-
src/mpi/romio/adio/include/adio.h | 6 +-
src/mpi_t/cat_get_info.c | 2 +-
src/mpi_t/cvar_get_info.c | 2 +-
src/mpi_t/cvar_handle_free.c | 3 +-
src/mpi_t/enum_get_item.c | 3 +-
src/mpi_t/pvar_get_info.c | 4 +-
src/mpi_t/pvar_handle_alloc.c | 3 +-
src/mpi_t/pvar_reset.c | 2 +-
src/mpid/ch3/channels/nemesis/src/ch3_win_fns.c | 16 +++--
src/mpid/ch3/src/ch3u_rma_sync.c | 2 +-
src/mpid/ch3/src/mpid_comm_failure_ack.c | 2 +-
src/mpid/ch3/src/mpid_comm_revoke.c | 8 +-
src/mpid/ch3/src/mpid_init.c | 2 +-
23 files changed, 134 insertions(+), 99 deletions(-)
hooks/post-receive
--
MPICH primary repository
More information about the commits
mailing list