#include "BufferExchange.h" int main( int argc, char **argv ) { int rank, nranks; IntVec peers; BufferExchange *be; int n_iterations = 10; int buffer_size = 100; char logfile_path[512]; MPI_Init( &argc, &argv ); MPI_Comm_size( MPI_COMM_WORLD, &nranks ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); // // Set up separate log file for each process // sprintf( logfile_path, "log.%d", rank ); if( (logfile=fopen(logfile_path,"w")) == NULL ) { ERROR_MACRO( "Unable to open logfile '%s'\n", logfile_path ); } // // Talk to all other ranks (but not self) // for( int i=0; i; // // Loop over sends/receive integer buffers to/from peers. // // Note that actual buffer sizes increase by 1 per loop iteration, so we expect to // see occasional increases in memory use when buffers are received from other ranks. // The details of exactly when this happens depend on the std::vector reallocation // strategy via the std::vector.resize() calls in BufferExchange::FullExchange() etc. // // We also reset/clear some other buffers, but as we're talking to a fixed number of // peers we're likely to just end up re0using the previously allocated/free'd // memory for those particular vectors. // for( int loop=0; loopClear( peers ); for( size_t pi=0; pisend_buffers[pi].clear(); for( int i=0; isend_buffers[pi].push_back( 0 ); // arbitrary integers in the buffers } // exchange buffers with peers be->FullExchange(); } MPI_Barrier( MPI_COMM_WORLD ); delete be; fclose( logfile ); MPI_Finalize(); return 0; }