[mpich-discuss] the SCATTERV function
Chafik sanaa
san.chafik at gmail.com
Fri Nov 28 13:53:02 CST 2014
Hi
when i execute this program (I use two processes) i have a error in the
display part :
* RESULT OF EXECUTION:
0 1 2 3 4 5 6 7 8 9
>> (1,6) <<
>>>>
> 1 <
* -6277438562204192500000000000000000000000000000000000000000000000000 *
-6277438562204192500000000000000000000000000000000000000000000000000 *
-6277438562204192500000000000000000000000000000000000000000000000000 *
-6277438562204192500000000000000000000000000000000000000000000000000 *
-6277438562204192500000000000000000000000000000000000000000000000000 *
-6277438562204192500000000000000000000000000000000000000000000000000
0 1 2 3 4 5 6 7 8 9
0 1 2 3
4 5 6 7 8 9
>> (0,4) <<
>>>>
> 0 <
* 0 * 1 * 2 * 3
* PROGRAM
#include <malloc.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "math.h"
#include "mpi.h"
int main(int argc, char** argv)
{
int taskid, ntasks;
int ierr, i, itask;
int sendcounts[2048], displs[2048], recvcount;
double **sendbuff, *recvbuff, buffsum, buffsums[2048];
double inittime, totaltime;
const int nbr_etat = 10;
double tab[nbr_etat];
for (int i = 0; i < nbr_etat; i++)
tab[i] = i;
for (int i = 0; i < nbr_etat; i++)
printf("%0.0f ", tab[i]);
printf("\n");
int nbr_elm[2] = { 4, 6 };
int dpl[2] = { 0, 4 };
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
recvbuff = (double *)malloc(sizeof(double)*nbr_etat);
if (taskid == 0)
{
sendbuff = (double **)malloc(sizeof(double *)*ntasks);// on execute pour
deux proc
sendbuff[0] = (double *)malloc(sizeof(double)*ntasks*nbr_etat);
for (i = 1; i < ntasks; i++)
{
sendbuff[i] = sendbuff[i - 1] + nbr_etat;
}
}
else
{
sendbuff = (double **)malloc(sizeof(double *)* 1);
sendbuff[0] = (double *)malloc(sizeof(double)* 1);
}
if (taskid == 0){
srand((unsigned)time(NULL) + taskid);
for (itask = 0; itask<ntasks; itask++)
{
int k;
displs[itask] = dpl[itask];
int s = displs[itask];
sendcounts[itask] = nbr_elm[itask];
for (i = 0; i<sendcounts[itask]; i++)
{
k = i + s;
sendbuff[itask][i] = tab[k];
printf("%0.0f ", sendbuff[itask][i]);
}
printf("\n");
}
}
recvcount = nbr_elm[taskid];
inittime = MPI_Wtime();
ierr = MPI_Scatterv(sendbuff[0], sendcounts, displs, MPI_DOUBLE,
recvbuff, recvcount, MPI_DOUBLE,
0, MPI_COMM_WORLD);
totaltime = MPI_Wtime() - inittime;
printf("\n >>>> \n");
buffsum = 0.0;
printf("\n > %d < \n",taskid);
for (i = 0; i<recvcount; i++)
{
printf("* %0.0f ", recvbuff[i]);
}
printf("\n");
if (taskid == 0){
free(sendbuff[0]);
free(sendbuff);
}
else{
free(sendbuff[0]);
free(sendbuff);
free(recvbuff);
}
/*===============================================================*/
/* Finalisation de MPI */
MPI_Finalize();
}
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.mpich.org/pipermail/discuss/attachments/20141128/d17e0554/attachment.html>
-------------- next part --------------
_______________________________________________
discuss mailing list discuss at mpich.org
To manage subscription options or unsubscribe:
https://lists.mpich.org/mailman/listinfo/discuss
More information about the discuss
mailing list