19 unsigned int MPIProcess::fgGlobalSize = 1;
20 unsigned int MPIProcess::fgGlobalRank = 0;
23 unsigned int MPIProcess::fgCartSizeX = 0;
24 unsigned int MPIProcess::fgCartSizeY = 0;
25 unsigned int MPIProcess::fgCartDimension = 0;
26 bool MPIProcess::fgNewCart =
true;
29 MPI::Intracomm* MPIProcess::fgCommunicator = 0;
30 int MPIProcess::fgIndexComm = -1;
31 MPI::Intracomm* MPIProcess::fgCommunicators[2] = {0};
32 unsigned int MPIProcess::fgIndecesComm[2] = {0};
35 MPIProcess::MPIProcess(
unsigned int nelements,
unsigned int indexComm) :
36 fNelements(nelements), fSize(1), fRank(0)
40 indexComm = (indexComm==0) ? 0 : 1;
46 if (fgGlobalSize==fgCartDimension &&
47 fgCartSizeX!=fgCartDimension && fgCartSizeY!=fgCartDimension) {
50 if (fgCommunicator==0 && fgIndexComm<0 && fgNewCart) {
52 std::cout <<
"Info --> MPIProcess::MPIProcess: Declare cartesian Topology ("
53 << fgCartSizeX <<
"x" << fgCartSizeY <<
")" << std::endl;
55 int color = fgGlobalRank / fgCartSizeY;
56 int key = fgGlobalRank % fgCartSizeY;
58 fgCommunicators[0] =
new MPI::Intracomm(MPI::COMM_WORLD.Split(key,color));
59 fgCommunicators[1] =
new MPI::Intracomm(MPI::COMM_WORLD.Split(color,key));
67 if (fgIndexComm>1 || fgCommunicator==(&(MPI::COMM_WORLD))) {
68 std::cerr <<
"Error --> MPIProcess::MPIProcess: Requiring more than 2 dimensions in the topology!" << std::endl;
69 MPI::COMM_WORLD.Abort(-1);
73 if (((
unsigned int)fgIndexComm)<indexComm)
74 fgCommunicator = &(MPI::COMM_WORLD);
76 fgIndecesComm[fgIndexComm] = indexComm;
77 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
83 if (fgCartDimension!=0 && fgGlobalSize!=fgCartDimension) {
84 std::cout <<
"Warning --> MPIProcess::MPIProcess: Cartesian dimension doesn't correspond to # total procs!" << std::endl;
85 std::cout <<
"Warning --> MPIProcess::MPIProcess: Ignoring topology, use all procs for X." << std::endl;
86 std::cout <<
"Warning --> MPIProcess::MPIProcess: Resetting topology..." << std::endl;
87 fgCartSizeX = fgGlobalSize;
89 fgCartDimension = fgGlobalSize;
93 if (fgCartSizeX==fgCartDimension) {
94 fgCommunicators[0] = &(MPI::COMM_WORLD);
95 fgCommunicators[1] = 0;
98 fgCommunicators[0] = 0;
99 fgCommunicators[1] = &(MPI::COMM_WORLD);
106 std::cerr <<
"Error --> MPIProcess::MPIProcess: More than 2 nested MPI calls!" << std::endl;
107 MPI::COMM_WORLD.Abort(-1);
110 fgIndecesComm[fgIndexComm] = indexComm;
113 if (fgCommunicator!=0 && fgCommunicators[indexComm]!=0) {
114 std::cout <<
"Warning --> MPIProcess::MPIProcess: Requiring 2 nested MPI calls!" << std::endl;
115 std::cout <<
"Warning --> MPIProcess::MPIProcess: Ignoring second call." << std::endl;
116 fgIndecesComm[fgIndexComm] = (indexComm==0) ? 1 : 0;
119 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
124 if (fgCommunicator!=0) {
125 fSize = fgCommunicator->Get_size();
126 fRank = fgCommunicator->Get_rank();
135 if (fSize>fNelements) {
136 std::cerr <<
"Error --> MPIProcess::MPIProcess: more processors than elements!" << std::endl;
137 MPI::COMM_WORLD.Abort(-1);
142 fNumElements4JobIn = fNelements / fSize;
143 fNumElements4JobOut = fNelements % fSize;
147 MPIProcess::~MPIProcess()
154 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
160 bool MPIProcess::SyncVector(ROOT::Minuit2::MnAlgebraicVector &mnvector)
167 if (mnvector.size()!=fNelements) {
168 std::cerr <<
"Error --> MPIProcess::SyncVector: # defined elements different from # requested elements!" << std::endl;
169 std::cerr <<
"Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
174 unsigned int numElements4ThisJob = NumElements4Job(fRank);
175 unsigned int startElementIndex = StartElementIndex();
176 unsigned int endElementIndex = EndElementIndex();
178 double dvectorJob[numElements4ThisJob];
179 for(
unsigned int i = startElementIndex; i<endElementIndex; i++)
180 dvectorJob[i-startElementIndex] = mnvector(i);
182 double dvector[fNelements];
183 MPISyncVector(dvectorJob,numElements4ThisJob,dvector);
185 for (
unsigned int i = 0; i<fNelements; i++) {
186 mnvector(i) = dvector[i];
193 std::cerr <<
"Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
201 bool MPIProcess::SyncSymMatrixOffDiagonal(ROOT::Minuit2::MnAlgebraicSymMatrix &mnmatrix)
208 if (mnmatrix.size()-mnmatrix.Nrow()!=fNelements) {
209 std::cerr <<
"Error --> MPIProcess::SyncSymMatrixOffDiagonal: # defined elements different from # requested elements!" << std::endl;
210 std::cerr <<
"Error --> MPIProcess::SyncSymMatrixOffDiagonal: no MPI syncronization is possible!" << std::endl;
215 unsigned int numElements4ThisJob = NumElements4Job(fRank);
216 unsigned int startElementIndex = StartElementIndex();
217 unsigned int endElementIndex = EndElementIndex();
218 unsigned int nrow = mnmatrix.Nrow();
220 unsigned int offsetVect = 0;
221 for (
unsigned int i = 0; i<startElementIndex; i++)
222 if ((i+offsetVect)%(nrow-1)==0) offsetVect += (i+offsetVect)/(nrow-1);
224 double dvectorJob[numElements4ThisJob];
225 for(
unsigned int i = startElementIndex; i<endElementIndex; i++) {
227 int x = (i+offsetVect)/(nrow-1);
228 if ((i+offsetVect)%(nrow-1)==0) offsetVect += x;
229 int y = (i+offsetVect)%(nrow-1)+1;
231 dvectorJob[i-startElementIndex] = mnmatrix(x,y);
235 double dvector[fNelements];
236 MPISyncVector(dvectorJob,numElements4ThisJob,dvector);
239 for (
unsigned int i = 0; i<fNelements; i++) {
241 int x = (i+offsetVect)/(nrow-1);
242 if ((i+offsetVect)%(nrow-1)==0) offsetVect += x;
243 int y = (i+offsetVect)%(nrow-1)+1;
245 mnmatrix(x,y) = dvector[i];
253 std::cerr <<
"Error --> MPIProcess::SyncMatrix: no MPI syncronization is possible!" << std::endl;
261 void MPIProcess::MPISyncVector(
double *ivector,
int svector,
double *ovector)
265 nconts[0] = NumElements4Job(0);
267 for (
unsigned int i = 1; i<fSize; i++) {
268 nconts[i] = NumElements4Job(i);
269 offsets[i] = nconts[i-1] + offsets[i-1];
272 fgCommunicator->Allgatherv(ivector,svector,MPI::DOUBLE,
273 ovector,nconts,offsets,MPI::DOUBLE);
277 bool MPIProcess::SetCartDimension(
unsigned int dimX,
unsigned int dimY)
279 if (fgCommunicator!=0 || fgIndexComm>=0) {
280 std::cout <<
"Warning --> MPIProcess::SetCartDimension: MPIProcess already declared! Ignoring command..." << std::endl;
284 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Invalid topology! Ignoring command..." << std::endl;
290 if (fgGlobalSize!=dimX*dimY) {
291 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Cartesian dimension doesn't correspond to # total procs!" << std::endl;
292 std::cout <<
"Warning --> MPIProcess::SetCartDimension: Ignoring command..." << std::endl;
296 if (fgCartSizeX!=dimX || fgCartSizeY!=dimY) {
297 fgCartSizeX = dimX; fgCartSizeY = dimY;
298 fgCartDimension = fgCartSizeX * fgCartSizeY;
301 if (fgCommunicators[0]!=0 && fgCommunicators[1]!=0) {
302 delete fgCommunicators[0]; fgCommunicators[0] = 0; fgIndecesComm[0] = 0;
303 delete fgCommunicators[1]; fgCommunicators[1] = 0; fgIndecesComm[1] = 0;
311 bool MPIProcess::SetDoFirstMPICall(
bool doFirstMPICall)
318 ret = SetCartDimension(fgGlobalSize,1);
320 ret = SetCartDimension(1,fgGlobalSize);
329 MPITerminate dummyMPITerminate = MPITerminate();