Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
MPIProcess.cxx
Go to the documentation of this file.
1// @(#)root/minuit2:$Id$
2// Author: A. Lazzaro 2009
3/***************************************************************************
4 * Package: Minuit2 *
5 * File: $Id$ *
6 * Author: Alfio Lazzaro, alfio.lazzaro@mi.infn.it *
7 * *
8 * Copyright: (C) 2008 by Universita' and INFN, Milan *
9 ***************************************************************************/
10
11#include "Minuit2/MPIProcess.h"
12
13#include <iostream>
14
15namespace ROOT {
16
17namespace Minuit2 {
18
19unsigned int MPIProcess::fgGlobalSize = 1;
20unsigned int MPIProcess::fgGlobalRank = 0;
21
22// By default all procs are for X
23unsigned int MPIProcess::fgCartSizeX = 0;
24unsigned int MPIProcess::fgCartSizeY = 0;
25unsigned int MPIProcess::fgCartDimension = 0;
26bool MPIProcess::fgNewCart = true;
27
28#ifdef MPIPROC
29MPI::Intracomm *MPIProcess::fgCommunicator = 0;
30int MPIProcess::fgIndexComm = -1; // -1 for no-initialization
31MPI::Intracomm *MPIProcess::fgCommunicators[2] = {0};
32unsigned int MPIProcess::fgIndecesComm[2] = {0};
33#endif
34
35MPIProcess::MPIProcess(unsigned int nelements, unsigned int indexComm) : fNelements(nelements), fSize(1), fRank(0)
36{
37
38 // check local requested index for communicator, valid values are 0 and 1
39 indexComm = (indexComm == 0) ? 0 : 1;
40
41#ifdef MPIPROC
42
43 StartMPI();
44
46 // declare the cartesian topology
47
48 if (fgCommunicator == 0 && fgIndexComm < 0 && fgNewCart) {
49 // first call, declare the topology
50 std::cout << "Info --> MPIProcess::MPIProcess: Declare cartesian Topology (" << fgCartSizeX << "x"
51 << fgCartSizeY << ")" << std::endl;
52
53 int color = fgGlobalRank / fgCartSizeY;
54 int key = fgGlobalRank % fgCartSizeY;
55
56 fgCommunicators[0] = new MPI::Intracomm(MPI::COMM_WORLD.Split(key, color)); // rows for Minuit
57 fgCommunicators[1] = new MPI::Intracomm(MPI::COMM_WORLD.Split(color, key)); // columns for NLL
58
59 fgNewCart = false;
60 }
61
62 fgIndexComm++;
63
64 if (fgIndexComm > 1 ||
65 fgCommunicator == (&(MPI::COMM_WORLD))) { // Remember, no more than 2 dimensions in the topology!
66 std::cerr << "Error --> MPIProcess::MPIProcess: Requiring more than 2 dimensions in the topology!"
67 << std::endl;
68 MPI::COMM_WORLD.Abort(-1);
69 }
70
71 // requiring columns as first call. In this case use all nodes
72 if (((unsigned int)fgIndexComm) < indexComm)
73 fgCommunicator = &(MPI::COMM_WORLD);
74 else {
75 fgIndecesComm[fgIndexComm] = indexComm;
76 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
77 }
78
79 } else {
80 // no cartesian topology
82 std::cout << "Warning --> MPIProcess::MPIProcess: Cartesian dimension doesn't correspond to # total procs!"
83 << std::endl;
84 std::cout << "Warning --> MPIProcess::MPIProcess: Ignoring topology, use all procs for X." << std::endl;
85 std::cout << "Warning --> MPIProcess::MPIProcess: Resetting topology..." << std::endl;
87 fgCartSizeY = 1;
89 }
90
91 if (fgIndexComm < 0) {
93 fgCommunicators[0] = &(MPI::COMM_WORLD);
94 fgCommunicators[1] = 0;
95 } else {
96 fgCommunicators[0] = 0;
97 fgCommunicators[1] = &(MPI::COMM_WORLD);
98 }
99 }
100
101 fgIndexComm++;
102
103 if (fgIndexComm > 1) { // Remember, no more than 2 nested MPI calls!
104 std::cerr << "Error --> MPIProcess::MPIProcess: More than 2 nested MPI calls!" << std::endl;
105 MPI::COMM_WORLD.Abort(-1);
106 }
107
108 fgIndecesComm[fgIndexComm] = indexComm;
109
110 // require 2 nested communicators
111 if (fgCommunicator != 0 && fgCommunicators[indexComm] != 0) {
112 std::cout << "Warning --> MPIProcess::MPIProcess: Requiring 2 nested MPI calls!" << std::endl;
113 std::cout << "Warning --> MPIProcess::MPIProcess: Ignoring second call." << std::endl;
114 fgIndecesComm[fgIndexComm] = (indexComm == 0) ? 1 : 0;
115 }
116
117 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
118 }
119
120 // set size and rank
121 if (fgCommunicator != 0) {
122 fSize = fgCommunicator->Get_size();
123 fRank = fgCommunicator->Get_rank();
124 } else {
125 // no MPI calls
126 fSize = 1;
127 fRank = 0;
128 }
129
130 if (fSize > fNelements) {
131 std::cerr << "Error --> MPIProcess::MPIProcess: more processors than elements!" << std::endl;
132 MPI::COMM_WORLD.Abort(-1);
133 }
134
135#endif
136
139}
140
142{
143 // destructor
144#ifdef MPIPROC
145 fgCommunicator = 0;
146 fgIndexComm--;
147 if (fgIndexComm == 0)
148 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
149
150#endif
151}
152
154{
155
156 // In case of just one job, don't need sync, just go
157 if (fSize < 2)
158 return false;
159
160 if (mnvector.size() != fNelements) {
161 std::cerr << "Error --> MPIProcess::SyncVector: # defined elements different from # requested elements!"
162 << std::endl;
163 std::cerr << "Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
164 exit(-1);
165 }
166
167#ifdef MPIPROC
168 unsigned int numElements4ThisJob = NumElements4Job(fRank);
169 unsigned int startElementIndex = StartElementIndex();
170 unsigned int endElementIndex = EndElementIndex();
171
172 double dvectorJob[numElements4ThisJob];
173 for (unsigned int i = startElementIndex; i < endElementIndex; i++)
174 dvectorJob[i - startElementIndex] = mnvector(i);
175
176 double dvector[fNelements];
177 MPISyncVector(dvectorJob, numElements4ThisJob, dvector);
178
179 for (unsigned int i = 0; i < fNelements; i++) {
180 mnvector(i) = dvector[i];
181 }
182
183 return true;
184
185#else
186
187 std::cerr << "Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
188 exit(-1);
189
190#endif
191}
192
194{
195
196 // In case of just one job, don't need sync, just go
197 if (fSize < 2)
198 return false;
199
200 if (mnmatrix.size() - mnmatrix.Nrow() != fNelements) {
201 std::cerr
202 << "Error --> MPIProcess::SyncSymMatrixOffDiagonal: # defined elements different from # requested elements!"
203 << std::endl;
204 std::cerr << "Error --> MPIProcess::SyncSymMatrixOffDiagonal: no MPI syncronization is possible!" << std::endl;
205 exit(-1);
206 }
207
208#ifdef MPIPROC
209 unsigned int numElements4ThisJob = NumElements4Job(fRank);
210 unsigned int startElementIndex = StartElementIndex();
211 unsigned int endElementIndex = EndElementIndex();
212 unsigned int nrow = mnmatrix.Nrow();
213
214 unsigned int offsetVect = 0;
215 for (unsigned int i = 0; i < startElementIndex; i++)
216 if ((i + offsetVect) % (nrow - 1) == 0)
217 offsetVect += (i + offsetVect) / (nrow - 1);
218
219 double dvectorJob[numElements4ThisJob];
220 for (unsigned int i = startElementIndex; i < endElementIndex; i++) {
221
222 int x = (i + offsetVect) / (nrow - 1);
223 if ((i + offsetVect) % (nrow - 1) == 0)
224 offsetVect += x;
225 int y = (i + offsetVect) % (nrow - 1) + 1;
226
227 dvectorJob[i - startElementIndex] = mnmatrix(x, y);
228 }
229
230 double dvector[fNelements];
231 MPISyncVector(dvectorJob, numElements4ThisJob, dvector);
232
233 offsetVect = 0;
234 for (unsigned int i = 0; i < fNelements; i++) {
235
236 int x = (i + offsetVect) / (nrow - 1);
237 if ((i + offsetVect) % (nrow - 1) == 0)
238 offsetVect += x;
239 int y = (i + offsetVect) % (nrow - 1) + 1;
240
241 mnmatrix(x, y) = dvector[i];
242 }
243
244 return true;
245
246#else
247
248 std::cerr << "Error --> MPIProcess::SyncMatrix: no MPI syncronization is possible!" << std::endl;
249 exit(-1);
250
251#endif
252}
253
254#ifdef MPIPROC
255void MPIProcess::MPISyncVector(double *ivector, int svector, double *ovector)
256{
257 int offsets[fSize];
258 int nconts[fSize];
259 nconts[0] = NumElements4Job(0);
260 offsets[0] = 0;
261 for (unsigned int i = 1; i < fSize; i++) {
262 nconts[i] = NumElements4Job(i);
263 offsets[i] = nconts[i - 1] + offsets[i - 1];
264 }
265
266 fgCommunicator->Allgatherv(ivector, svector, MPI::DOUBLE, ovector, nconts, offsets, MPI::DOUBLE);
267}
268
269bool MPIProcess::SetCartDimension(unsigned int dimX, unsigned int dimY)
270{
271 if (fgCommunicator != 0 || fgIndexComm >= 0) {
272 std::cout << "Warning --> MPIProcess::SetCartDimension: MPIProcess already declared! Ignoring command..."
273 << std::endl;
274 return false;
275 }
276 if (dimX * dimY <= 0) {
277 std::cout << "Warning --> MPIProcess::SetCartDimension: Invalid topology! Ignoring command..." << std::endl;
278 return false;
279 }
280
281 StartMPI();
282
283 if (fgGlobalSize != dimX * dimY) {
284 std::cout << "Warning --> MPIProcess::SetCartDimension: Cartesian dimension doesn't correspond to # total procs!"
285 << std::endl;
286 std::cout << "Warning --> MPIProcess::SetCartDimension: Ignoring command..." << std::endl;
287 return false;
288 }
289
290 if (fgCartSizeX != dimX || fgCartSizeY != dimY) {
291 fgCartSizeX = dimX;
292 fgCartSizeY = dimY;
294 fgNewCart = true;
295
296 if (fgCommunicators[0] != 0 && fgCommunicators[1] != 0) {
297 delete fgCommunicators[0];
298 fgCommunicators[0] = 0;
299 fgIndecesComm[0] = 0;
300 delete fgCommunicators[1];
301 fgCommunicators[1] = 0;
302 fgIndecesComm[1] = 0;
303 }
304 }
305
306 return true;
307}
308
309bool MPIProcess::SetDoFirstMPICall(bool doFirstMPICall)
310{
311
312 StartMPI();
313
314 bool ret;
315 if (doFirstMPICall)
317 else
319
320 return ret;
321}
322
323#endif
324
325#ifdef MPIPROC
326MPITerminate dummyMPITerminate = MPITerminate();
327#endif
328
329} // namespace Minuit2
330
331} // namespace ROOT
size_t fSize
Class describing a symmetric matrix of size n.
Definition LASymMatrix.h:45
unsigned int Nrow() const
unsigned int size() const
unsigned int size() const
Definition LAVector.h:227
unsigned int fNumElements4JobIn
Definition MPIProcess.h:141
bool SyncVector(ROOT::Minuit2::MnAlgebraicVector &mnvector)
static bool SetCartDimension(unsigned int dimX, unsigned int dimY)
MPIProcess(unsigned int nelements, unsigned int indexComm)
static unsigned int fgCartSizeY
Definition MPIProcess.h:137
unsigned int fNumElements4JobOut
Definition MPIProcess.h:142
static unsigned int fgGlobalRank
Definition MPIProcess.h:134
unsigned int NumElements4Job(unsigned int rank) const
Definition MPIProcess.h:50
unsigned int StartElementIndex() const
Definition MPIProcess.h:55
bool SyncSymMatrixOffDiagonal(ROOT::Minuit2::MnAlgebraicSymMatrix &mnmatrix)
static unsigned int fgGlobalSize
Definition MPIProcess.h:133
static unsigned int fgCartDimension
Definition MPIProcess.h:138
static unsigned int fgCartSizeX
Definition MPIProcess.h:136
static bool SetDoFirstMPICall(bool doFirstMPICall=true)
unsigned int EndElementIndex() const
Definition MPIProcess.h:61
Double_t y[n]
Definition legend1.C:17
Double_t x[n]
Definition legend1.C:17
tbb::task_arena is an alias of tbb::interface7::task_arena, which doesn't allow to forward declare tb...
#define Split(a, ahi, alo)
Definition triangle.c:4776