29 MPI::Intracomm* MPIProcess::fgCommunicator = 0;
30 int MPIProcess::fgIndexComm = -1;
31 MPI::Intracomm* MPIProcess::fgCommunicators[2] = {0};
32 unsigned int MPIProcess::fgIndecesComm[2] = {0};
36 fNelements(nelements), fSize(1), fRank(0)
40 indexComm = (indexComm==0) ? 0 : 1;
50 if (fgCommunicator==0 && fgIndexComm<0 &&
fgNewCart) {
52 std::cout <<
"Info --> MPIProcess::MPIProcess: Declare cartesian Topology (" 58 fgCommunicators[0] =
new MPI::Intracomm(MPI::COMM_WORLD.
Split(key,color));
59 fgCommunicators[1] =
new MPI::Intracomm(MPI::COMM_WORLD.
Split(color,key));
67 if (fgIndexComm>1 || fgCommunicator==(&(MPI::COMM_WORLD))) {
68 std::cerr <<
"Error --> MPIProcess::MPIProcess: Requiring more than 2 dimensions in the topology!" << std::endl;
69 MPI::COMM_WORLD.Abort(-1);
73 if (((
unsigned int)fgIndexComm)<indexComm)
74 fgCommunicator = &(MPI::COMM_WORLD);
76 fgIndecesComm[fgIndexComm] = indexComm;
77 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
84 std::cout <<
"Warning --> MPIProcess::MPIProcess: Cartesian dimension doesn't correspond to # total procs!" << std::endl;
85 std::cout <<
"Warning --> MPIProcess::MPIProcess: Ignoring topology, use all procs for X." << std::endl;
86 std::cout <<
"Warning --> MPIProcess::MPIProcess: Resetting topology..." << std::endl;
94 fgCommunicators[0] = &(MPI::COMM_WORLD);
95 fgCommunicators[1] = 0;
98 fgCommunicators[0] = 0;
99 fgCommunicators[1] = &(MPI::COMM_WORLD);
106 std::cerr <<
"Error --> MPIProcess::MPIProcess: More than 2 nested MPI calls!" << std::endl;
107 MPI::COMM_WORLD.Abort(-1);
110 fgIndecesComm[fgIndexComm] = indexComm;
113 if (fgCommunicator!=0 && fgCommunicators[indexComm]!=0) {
114 std::cout <<
"Warning --> MPIProcess::MPIProcess: Requiring 2 nested MPI calls!" << std::endl;
115 std::cout <<
"Warning --> MPIProcess::MPIProcess: Ignoring second call." << std::endl;
116 fgIndecesComm[fgIndexComm] = (indexComm==0) ? 1 : 0;
119 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
124 if (fgCommunicator!=0) {
125 fSize = fgCommunicator->Get_size();
126 fRank = fgCommunicator->Get_rank();
136 std::cerr <<
"Error --> MPIProcess::MPIProcess: more processors than elements!" << std::endl;
137 MPI::COMM_WORLD.Abort(-1);
154 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
168 std::cerr <<
"Error --> MPIProcess::SyncVector: # defined elements different from # requested elements!" << std::endl;
169 std::cerr <<
"Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
178 double dvectorJob[numElements4ThisJob];
179 for(
unsigned int i = startElementIndex; i<endElementIndex; i++)
180 dvectorJob[i-startElementIndex] = mnvector(i);
183 MPISyncVector(dvectorJob,numElements4ThisJob,dvector);
186 mnvector(i) = dvector[i];
193 std::cerr <<
"Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
209 std::cerr <<
"Error --> MPIProcess::SyncSymMatrixOffDiagonal: # defined elements different from # requested elements!" << std::endl;
210 std::cerr <<
"Error --> MPIProcess::SyncSymMatrixOffDiagonal: no MPI syncronization is possible!" << std::endl;
218 unsigned int nrow = mnmatrix.
Nrow();
220 unsigned int offsetVect = 0;
221 for (
unsigned int i = 0; i<startElementIndex; i++)
222 if ((i+offsetVect)%(nrow-1)==0) offsetVect += (i+offsetVect)/(nrow-1);
224 double dvectorJob[numElements4ThisJob];
225 for(
unsigned int i = startElementIndex; i<endElementIndex; i++) {
227 int x = (i+offsetVect)/(nrow-1);
228 if ((i+offsetVect)%(nrow-1)==0) offsetVect +=
x;
229 int y = (i+offsetVect)%(nrow-1)+1;
231 dvectorJob[i-startElementIndex] = mnmatrix(x,y);
236 MPISyncVector(dvectorJob,numElements4ThisJob,dvector);
241 int x = (i+offsetVect)/(nrow-1);
242 if ((i+offsetVect)%(nrow-1)==0) offsetVect +=
x;
243 int y = (i+offsetVect)%(nrow-1)+1;
245 mnmatrix(x,y) = dvector[i];
253 std::cerr <<
"Error --> MPIProcess::SyncMatrix: no MPI syncronization is possible!" << std::endl;
261 void MPIProcess::MPISyncVector(
double *ivector,
int svector,
double *ovector)
267 for (
unsigned int i = 1; i<
fSize; i++) {
269 offsets[i] = nconts[i-1] + offsets[i-1];
272 fgCommunicator->Allgatherv(ivector,svector,MPI::DOUBLE,
273 ovector,nconts,offsets,MPI::DOUBLE);
279 if (fgCommunicator!=0 || fgIndexComm>=0) {
280 std::cout <<
"Warning --> MPIProcess::SetCartDimension: MPIProcess already declared! Ignoring command..." << std::endl;
284 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Invalid topology! Ignoring command..." << std::endl;
291 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Cartesian dimension doesn't correspond to # total procs!" << std::endl;
292 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Ignoring command..." << std::endl;
301 if (fgCommunicators[0]!=0 && fgCommunicators[1]!=0) {
302 delete fgCommunicators[0]; fgCommunicators[0] = 0; fgIndecesComm[0] = 0;
303 delete fgCommunicators[1]; fgCommunicators[1] = 0; fgIndecesComm[1] = 0;
unsigned int fNumElements4JobOut
#define Split(a, ahi, alo)
Namespace for new ROOT classes and functions.
static unsigned int fgGlobalSize
MPIProcess(unsigned int nelements, unsigned int indexComm)
bool SyncVector(ROOT::Minuit2::MnAlgebraicVector &mnvector)
Class describing a symmetric matrix of size n.
static unsigned int fgCartDimension
unsigned int fNumElements4JobIn
unsigned int EndElementIndex() const
static bool SetDoFirstMPICall(bool doFirstMPICall=true)
static bool SetCartDimension(unsigned int dimX, unsigned int dimY)
unsigned int StartElementIndex() const
static unsigned int fgCartSizeX
static unsigned int fgGlobalRank
unsigned int NumElements4Job(unsigned int rank) const
unsigned int Nrow() const
unsigned int size() const
static unsigned int fgCartSizeY
unsigned int size() const
bool SyncSymMatrixOffDiagonal(ROOT::Minuit2::MnAlgebraicSymMatrix &mnmatrix)