29MPI::Intracomm *MPIProcess::fgCommunicator = 0;
30int MPIProcess::fgIndexComm = -1;
31MPI::Intracomm *MPIProcess::fgCommunicators[2] = {0};
32unsigned int MPIProcess::fgIndecesComm[2] = {0};
39 indexComm = (indexComm == 0) ? 0 : 1;
48 if (fgCommunicator == 0 && fgIndexComm < 0 &&
fgNewCart) {
50 std::cout <<
"Info --> MPIProcess::MPIProcess: Declare cartesian Topology (" <<
fgCartSizeX <<
"x"
56 fgCommunicators[0] =
new MPI::Intracomm(MPI::COMM_WORLD.
Split(key, color));
57 fgCommunicators[1] =
new MPI::Intracomm(MPI::COMM_WORLD.
Split(color, key));
64 if (fgIndexComm > 1 ||
65 fgCommunicator == (&(MPI::COMM_WORLD))) {
66 std::cerr <<
"Error --> MPIProcess::MPIProcess: Requiring more than 2 dimensions in the topology!"
68 MPI::COMM_WORLD.Abort(-1);
72 if (((
unsigned int)fgIndexComm) < indexComm)
73 fgCommunicator = &(MPI::COMM_WORLD);
75 fgIndecesComm[fgIndexComm] = indexComm;
76 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
82 std::cout <<
"Warning --> MPIProcess::MPIProcess: Cartesian dimension doesn't correspond to # total procs!"
84 std::cout <<
"Warning --> MPIProcess::MPIProcess: Ignoring topology, use all procs for X." << std::endl;
85 std::cout <<
"Warning --> MPIProcess::MPIProcess: Resetting topology..." << std::endl;
91 if (fgIndexComm < 0) {
93 fgCommunicators[0] = &(MPI::COMM_WORLD);
94 fgCommunicators[1] = 0;
96 fgCommunicators[0] = 0;
97 fgCommunicators[1] = &(MPI::COMM_WORLD);
103 if (fgIndexComm > 1) {
104 std::cerr <<
"Error --> MPIProcess::MPIProcess: More than 2 nested MPI calls!" << std::endl;
105 MPI::COMM_WORLD.Abort(-1);
108 fgIndecesComm[fgIndexComm] = indexComm;
111 if (fgCommunicator != 0 && fgCommunicators[indexComm] != 0) {
112 std::cout <<
"Warning --> MPIProcess::MPIProcess: Requiring 2 nested MPI calls!" << std::endl;
113 std::cout <<
"Warning --> MPIProcess::MPIProcess: Ignoring second call." << std::endl;
114 fgIndecesComm[fgIndexComm] = (indexComm == 0) ? 1 : 0;
117 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
121 if (fgCommunicator != 0) {
122 fSize = fgCommunicator->Get_size();
123 fRank = fgCommunicator->Get_rank();
131 std::cerr <<
"Error --> MPIProcess::MPIProcess: more processors than elements!" << std::endl;
132 MPI::COMM_WORLD.Abort(-1);
147 if (fgIndexComm == 0)
148 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
161 std::cerr <<
"Error --> MPIProcess::SyncVector: # defined elements different from # requested elements!"
163 std::cerr <<
"Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
172 double dvectorJob[numElements4ThisJob];
173 for (
unsigned int i = startElementIndex; i < endElementIndex; i++)
174 dvectorJob[i - startElementIndex] = mnvector(i);
177 MPISyncVector(dvectorJob, numElements4ThisJob, dvector);
179 for (
unsigned int i = 0; i <
fNelements; i++) {
180 mnvector(i) = dvector[i];
187 std::cerr <<
"Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
202 <<
"Error --> MPIProcess::SyncSymMatrixOffDiagonal: # defined elements different from # requested elements!"
204 std::cerr <<
"Error --> MPIProcess::SyncSymMatrixOffDiagonal: no MPI syncronization is possible!" << std::endl;
212 unsigned int nrow = mnmatrix.
Nrow();
214 unsigned int offsetVect = 0;
215 for (
unsigned int i = 0; i < startElementIndex; i++)
216 if ((i + offsetVect) % (nrow - 1) == 0)
217 offsetVect += (i + offsetVect) / (nrow - 1);
219 double dvectorJob[numElements4ThisJob];
220 for (
unsigned int i = startElementIndex; i < endElementIndex; i++) {
222 int x = (i + offsetVect) / (nrow - 1);
223 if ((i + offsetVect) % (nrow - 1) == 0)
225 int y = (i + offsetVect) % (nrow - 1) + 1;
227 dvectorJob[i - startElementIndex] = mnmatrix(
x,
y);
231 MPISyncVector(dvectorJob, numElements4ThisJob, dvector);
234 for (
unsigned int i = 0; i <
fNelements; i++) {
236 int x = (i + offsetVect) / (nrow - 1);
237 if ((i + offsetVect) % (nrow - 1) == 0)
239 int y = (i + offsetVect) % (nrow - 1) + 1;
241 mnmatrix(
x,
y) = dvector[i];
248 std::cerr <<
"Error --> MPIProcess::SyncMatrix: no MPI syncronization is possible!" << std::endl;
255void MPIProcess::MPISyncVector(
double *ivector,
int svector,
double *ovector)
261 for (
unsigned int i = 1; i <
fSize; i++) {
263 offsets[i] = nconts[i - 1] + offsets[i - 1];
266 fgCommunicator->Allgatherv(ivector, svector, MPI::DOUBLE, ovector, nconts, offsets, MPI::DOUBLE);
271 if (fgCommunicator != 0 || fgIndexComm >= 0) {
272 std::cout <<
"Warning --> MPIProcess::SetCartDimension: MPIProcess already declared! Ignoring command..."
276 if (dimX * dimY <= 0) {
277 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Invalid topology! Ignoring command..." << std::endl;
284 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Cartesian dimension doesn't correspond to # total procs!"
286 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Ignoring command..." << std::endl;
296 if (fgCommunicators[0] != 0 && fgCommunicators[1] != 0) {
297 delete fgCommunicators[0];
298 fgCommunicators[0] = 0;
299 fgIndecesComm[0] = 0;
300 delete fgCommunicators[1];
301 fgCommunicators[1] = 0;
302 fgIndecesComm[1] = 0;
326MPITerminate dummyMPITerminate = MPITerminate();
Class describing a symmetric matrix of size n.
unsigned int Nrow() const
unsigned int size() const
unsigned int size() const
unsigned int fNumElements4JobIn
bool SyncVector(ROOT::Minuit2::MnAlgebraicVector &mnvector)
static bool SetCartDimension(unsigned int dimX, unsigned int dimY)
MPIProcess(unsigned int nelements, unsigned int indexComm)
static unsigned int fgCartSizeY
unsigned int fNumElements4JobOut
static unsigned int fgGlobalRank
unsigned int NumElements4Job(unsigned int rank) const
unsigned int StartElementIndex() const
bool SyncSymMatrixOffDiagonal(ROOT::Minuit2::MnAlgebraicSymMatrix &mnmatrix)
static unsigned int fgGlobalSize
static unsigned int fgCartDimension
static unsigned int fgCartSizeX
static bool SetDoFirstMPICall(bool doFirstMPICall=true)
unsigned int EndElementIndex() const
tbb::task_arena is an alias of tbb::interface7::task_arena, which doesn't allow to forward declare tb...
#define Split(a, ahi, alo)