-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathintercomm.cpp
126 lines (106 loc) · 3.11 KB
/
intercomm.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#include <mpi.h>
#include <iostream>
#include <vector>
#include <cstdlib>
typedef std::vector<int> buf_type;
void work(int grank, int gsize,
int lrank, int lsize,
int rsize,
MPI_Comm intercomm, int right_root)
{
// cout buffer to not overwrite
char buf[1024*10];
std::cout.rdbuf()->pubsetbuf(buf, sizeof(buf));
// communications buffers
buf_type sbuf = {grank, lrank};
buf_type rbuf(sbuf.size()*rsize);
// output id
std::cout << "Pre - "
<< "rank: " << grank << ", "
<< "size: " << gsize << ", "
<< "lrank: " << lrank << ", "
<< "lsize: " << lsize << ", "
<< "rsize: " << rsize << std::endl;
// send and receive: receive length is the length of every send (??)
MPI_Allgather(&sbuf[0], sbuf.size(), MPI_INT,
&rbuf[0], sbuf.size(), MPI_INT,
intercomm);
// output other group
std::cout << "Post - "
<< "rank: " << grank << ", ";
for (size_t i = 0; i < rbuf.size(); i += 2) {
std::cout << "prank(" << i/2 << ") "
<< rbuf[i] << ", " << rbuf[i+1]
<< "; ";
}
std::cout << std::endl;
if (grank >= right_root) {
buf_type rbufv(sbuf.size()*right_root);
std::vector<int> rbufvc(rsize, 2);
std::vector<int> rbufvd;
buf_type sbufv;
for (int i = 0; i < rsize; i++) {
rbufvd.push_back(2*i);
}
MPI_Allgatherv(&sbufv[0], 0, MPI_INT,
&rbufv[0], &rbufvc[0], &rbufvd[0], MPI_INT,
intercomm);
std::cout << "rank {" << grank << ", " << lrank << "}: ";
for (int i = 0; i < rsize; i++) {
const auto num = rbufvc[i];
const auto off = rbufvd[i];
std::cout << "{" << off << ", " << num;
for (int j = off; j < off+num; j++) {
std::cout << ", " << rbufv[j];
}
std::cout << "} ";
}
std::cout << std::endl;
}
else {
buf_type sbufv = {grank, lrank};
buf_type rbufv;
std::vector<int> rbufvc(rsize, 0);
std::vector<int> rbufvd(rsize, 0);
MPI_Allgatherv(&sbufv[0], sbufv.size(), MPI_INT,
&rbufv[0], &rbufvc[0], &rbufvd[0], MPI_INT,
intercomm);
}
}
int main(int argc, char **argv)
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// group: one half left, one half right
int right_root = size/2;
int is_left = (rank < right_root) ? 1 : 0;
/* Build intra-communicator for local sub-group */
MPI_Comm intracomm; /* intra-communicator of local sub-group */
MPI_Comm_split(MPI_COMM_WORLD, is_left, rank, &intracomm);
/* Build inter-communicators. Tags are hard-coded. */
MPI_Comm intercomm; /* inter-communicator */
if (is_left) {
MPI_Intercomm_create(intracomm, 0,
MPI_COMM_WORLD, right_root, 0,
&intercomm);
}
else {
MPI_Intercomm_create(intracomm, 0,
MPI_COMM_WORLD, 0, 0,
&intercomm);
}
int lrank, lsize;
MPI_Comm_rank(intracomm, &lrank);
MPI_Comm_size(intracomm, &lsize);
int rsize;
MPI_Comm_remote_size(intercomm, &rsize);
// work
work(rank, size, lrank, lsize, rsize, intercomm, right_root);
// cleanup
MPI_Comm_free(&intercomm);
MPI_Comm_free(&intracomm);
MPI_Finalize();
return 0;
}