Параллельное программирование в стандарте MPI. Баканов В.М - 59 стр.

UptoLike

Составители: 

- 59 -
/**************************** master task ************************************/
if (taskid == MASTER)
{
printf(“Number of worker tasks = %d\n”,numworkers);
for (i=0; i<NRA; i++)
for (j=0; j<NCA; j++)
a[i][j]= i+j;
for (i=0; i<NCA; i++)
for (j=0; j<NCB; j++)
b[i][j] = i*j;
/* send matrix data to the worker tasks */
averow = NRA/numworkers;
extra = NRA%numworkers;
offset = 0;
t1=MPI_Wtime(); // get start time’s moment
for (dest=1; dest<=numworkers; dest++)
{
if(dest <= extra)
rows = averow + 1;
else
rows = averow;
rows = (dest <= extra) ? averow+1 : averow;
printf(“...sending %d rows to task %d\n”, rows, dest);
MPI_Send(&offset, 1, MPI_INT, dest, FROM_MASTER, M_C_W);
MPI_Send(&rows, 1, MPI_INT, dest, FROM_MASTER, M_C_W);
MPI_Send(&a[offset][0], rows*NCA, MPI_DOUBLE, dest, FROM_MASTER,
M_C_W);
MPI_Send(&b, NCA*NCB, MPI_DOUBLE, dest, FROM_MASTER, M_C_W);
offset += rows;
}
/* wait for results from all worker tasks */
for (source=1; source<=numworkers; i++)
{
MPI_Recv(&offset, 1, MPI_INT, source, FROM_WORKER, M_C_W, &status);
MPI_Recv(&rows, 1, MPI_INT, source, FROM_WORKER, M_C_W, &status);
MPI_Recv(&c[offset][0], rows*NCB, MPI_DOUBLE, source, FROM_WORKER,
M_C_W, &status);
}
t2=MPI_Wtime(); // get ended time’s momemt
printf (“Multiply time= %.3lf sec\n\n”, t2-t1);
printf(“Here is the result matrix:\n”);
for (i=0; i<NRA; i++)
{
printf(“\n”);
for (j=0; j<NCB; j++)
                                                - 59 -

/**************************** master task ************************************/
  if (taskid == MASTER)
   {
      printf(“Number of worker tasks = %d\n”,numworkers);
      for (i=0; i