HPhi++  3.1.0
CheckMPI.cpp
Go to the documentation of this file.
1 /* HPhi - Quantum Lattice Model Simulator */
2 /* Copyright (C) 2015 The University of Tokyo */
3 
4 /* This program is free software: you can redistribute it and/or modify */
5 /* it under the terms of the GNU General Public License as published by */
6 /* the Free Software Foundation, either version 3 of the License, or */
7 /* (at your option) any later version. */
8 
9 /* This program is distributed in the hope that it will be useful, */
10 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
11 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
12 /* GNU General Public License for more details. */
13 
14 /* You should have received a copy of the GNU General Public License */
15 /* along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "Common.hpp"
20 #include "wrapperMPI.hpp"
27 int CheckMPI(struct BindStruct *X)
28 {
29  int isite;
30  int NDimInterPE, SmallDim, SpinNum, ishift;
31  int ipivot, isiteMax, isiteMax0;
32 
37  X->Def.NsiteMPI = X->Def.Nsite;
38  X->Def.Total2SzMPI = X->Def.Total2Sz;
39  switch (X->Def.iCalcModel) {
40  case HubbardGC: /****************************************************/
41  case Hubbard:
42  case HubbardNConserved:
43  case Kondo:
44  case KondoGC:
45 
51  NDimInterPE = 1;
52  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
53  if (NDimInterPE == nproc) {
54  X->Def.Nsite = isite;
55  break;
56  } /*if (NDimInterPE == nproc)*/
57  NDimInterPE *= 4;
58  } /*for (isite = NsiteMPI; isite > 0; isite--)*/
59 
60  if (isite == 0) {
61  fprintf(stdoutMPI, "%s", "Error ! The number of PROCESS should be 4-exponent !\n");
62  fprintf(stdoutMPI, " The number of PROCESS : %d\n", nproc);
63  NDimInterPE = 1;
64  int ismallNproc=1;
65  int ilargeNproc=1;
66  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
67  if (NDimInterPE > nproc) {
68  ilargeNproc = NDimInterPE;
69  if(isite >1)
70  ismallNproc = NDimInterPE/4;
71  break;
72  }/*if (NDimInterPE > nproc)*/
73  NDimInterPE *= 4;
74  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
75  fprintf(stdoutMPI, " Set the number of PROCESS as %d or %d.\n",ismallNproc, ilargeNproc );
76  return FALSE;
77  //return FALSE;
78  } /*if (isite == 0)*/
79 
80  switch (X->Def.iCalcModel) /*2 (inner)*/ {
81 
82  case Hubbard:
88  SmallDim = myrank;
89  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
90  SpinNum = SmallDim % 4;
91  SmallDim /= 4;
92  if (SpinNum == 1 /*01*/) {
93  X->Def.Nup -= 1;
94  X->Def.Ne -= 1;
95  }
96  else if (SpinNum == 2 /*10*/) {
97  X->Def.Ndown -= 1;
98  X->Def.Ne -= 1;
99  }
100  else if (SpinNum == 3 /*11*/){
101  X->Def.Nup -= 1;
102  X->Def.Ndown -= 1;
103  X->Def.Ne -= 2;
104  }
105  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
106 
107  break;/*case Hubbard:*/
108 
109  case HubbardNConserved:
114  SmallDim = myrank;
115  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
116  SpinNum = SmallDim % 4;
117  SmallDim /= 4;
118  if (SpinNum == 1 /*01*/ || SpinNum == 2 /*10*/) X->Def.Ne -= 1;
119  else if (SpinNum == 3 /*11*/) X->Def.Ne -= 2;
120  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
121 
122  break; /*case HubbardNConserved:*/
123 
124  case KondoGC:
125  case Kondo:
131  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)
132  if (X->Def.LocSpn[isite] != ITINERANT) X->Def.NLocSpn -= 1;
133 
134  if (X->Def.iCalcModel == Kondo) {
135  SmallDim = myrank;
136  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
137  SpinNum = SmallDim % 4;
138  SmallDim /= 4;
139  if (X->Def.LocSpn[isite] == ITINERANT) {
140  if (SpinNum == 1 /*01*/) {
141  X->Def.Nup -= 1;
142  X->Def.Ne -= 1;
143  }
144  else if (SpinNum == 2 /*10*/) {
145  X->Def.Ndown -= 1;
146  X->Def.Ne -= 1;
147  }
148  else if (SpinNum == 3 /*11*/) {
149  X->Def.Nup -= 1;
150  X->Def.Ndown -= 1;
151  X->Def.Ne -= 2;
152  }
153  }
154  else {
155  fprintf(stdoutMPI, "\n Stop because local spin in the inter process region\n");
156  return FALSE;
157  }
158  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
159  } /*if (X->Def.iCalcModel == Kondo)*/
160  else {
161  X->Def.Nup = 0;
162  X->Def.Ndown = 0;
163  X->Def.Ne = 0;
164  }
165 
166  break; /*case KondoGC, Kondo*/
167 
168  case HubbardGC:
169  X->Def.Nup = 0;
170  X->Def.Ndown = 0;
171  X->Def.Ne = 0;
172  X->Def.Total2Sz = 0;
173  break;
174  } /*switch (X->Def.iCalcModel) 2(inner)*/
175 
176  break; /*case HubbardGC, Hubbard, HubbardNConserved, Kondo, KondoGC:*/
178  case SpinGC:/********************************************************/
179  case Spin:
180 
181  if (X->Def.iFlgGeneralSpin == FALSE) {
186  NDimInterPE = 1;
187  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
188  if (NDimInterPE == nproc) {
189  X->Def.Nsite = isite;
190  break;
191  }/*if (NDimInterPE == nproc)*/
192  NDimInterPE *= 2;
193  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
194 
195  if (isite == 0) {
196  fprintf(stdoutMPI, "%s", "Error ! The number of PROCESS should be 2-exponent !\n");
197  fprintf(stdoutMPI, " The number of PROCESS : %d\n", nproc);
198  NDimInterPE = 1;
199  int ismallNproc=1;
200  int ilargeNproc=1;
201  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
202  if (NDimInterPE > nproc) {
203  ilargeNproc = NDimInterPE;
204  if(isite >1)
205  ismallNproc = NDimInterPE/2;
206  break;
207  }/*if (NDimInterPE > nproc)*/
208  NDimInterPE *= 2;
209  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
210  fprintf(stdoutMPI, " Set the number of PROCESS as %d or %d.\n",ismallNproc, ilargeNproc );
211  return FALSE;
212  }/*if (isite == 0)*/
213 
214  if (X->Def.iCalcModel == Spin) {
215  /*X->Def.NeMPI = X->Def.Ne;*/
216 
217  /* Ne should be different in each PE */
218  SmallDim = myrank;
219  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
220  SpinNum = SmallDim % 2;
221  SmallDim /= 2;
222  if (SpinNum == 0) {
223  X->Def.Ndown -= 1;
224  }
225  else {
226  X->Def.Ne -= 1;
227  X->Def.Nup -= 1;
228  }
229  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
230  }/*if (X->Def.iCalcModel == Spin)*/
231 
232  } /*if (X->Def.iFlgGeneralSpin == FALSE)*/
233  else{/* General Spin */
238  NDimInterPE = 1;
239  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
240  if (NDimInterPE == nproc) {
241  X->Def.Nsite = isite;
242  break;
243  }/*if (NDimInterPE == nproc)*/
244  NDimInterPE *= X->Def.SiteToBit[isite - 1];
245  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
246 
247  if (isite == 0) {
248  fprintf(stdoutMPI, "%s", "Error ! The number of PROCESS is wrong !\n");
249  fprintf(stdoutMPI, " The number of PROCESS : %d\n", nproc);
250  NDimInterPE = 1;
251  int ismallNproc=1;
252  int ilargeNproc=1;
253  for (isite = X->Def.NsiteMPI; isite > 0; isite--) {
254  if (NDimInterPE > nproc) {
255  ilargeNproc = NDimInterPE;
256  if(isite >1)
257  ismallNproc = NDimInterPE/X->Def.SiteToBit[isite - 2];
258  break;
259  }/*if (NDimInterPE > nproc)*/
260  NDimInterPE *= X->Def.SiteToBit[isite - 1];
261  }/*for (isite = X->Def.NsiteMPI; isite > 0; isite--)*/
262  fprintf(stdoutMPI, " Set the number of PROCESS as %d or %d.\n",ismallNproc, ilargeNproc );
263  return FALSE;
264  }/*if (isite == 0)*/
265 
266  if (X->Def.iCalcModel == Spin) {
267  X->Def.Total2SzMPI = X->Def.Total2Sz;
268 
269  /* Ne should be different in each PE */
270  SmallDim = myrank;
271  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
272  SpinNum = SmallDim % X->Def.SiteToBit[isite];
273  SmallDim /= X->Def.SiteToBit[isite];
274 
275  X->Def.Total2Sz += X->Def.SiteToBit[isite] - 1 - 2*SpinNum;
276  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
277  }/*if (X->Def.iCalcModel == Spin)*/
278  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
279 
281  break; /*case SpinGC, Spin*/
282 
283  default:
284  fprintf(stdoutMPI, "Error ! Wrong model !\n");
285  return FALSE;
286  }/*switch (X->Def.iCalcModel)*/
287 
291  if (X->Boost.flgBoost == 1) {
292  isiteMax = X->Boost.W0;
293  ishift = 0;
294  for (ipivot = 0; ipivot < X->Boost.num_pivot; ipivot++) {
295  isiteMax0 = X->Boost.list_6spin_star[ipivot][1]
296  + X->Boost.list_6spin_star[ipivot][2]
297  + X->Boost.list_6spin_star[ipivot][3]
298  + X->Boost.list_6spin_star[ipivot][4]
299  + X->Boost.list_6spin_star[ipivot][5];
300  if (ishift > 1) isiteMax0 = X->Def.NsiteMPI - isiteMax0 - 1 - ishift;
301  else isiteMax0 = X->Def.NsiteMPI - isiteMax0 - 2;
302  if (isiteMax0 < isiteMax) isiteMax = isiteMax0;
303  if (X->Boost.list_6spin_star[ipivot][6] == 1) ishift += X->Boost.ishift_nspin;
304  }/*for (ipivot = 0; ipivot < X->Boost.num_pivot; ipivot++)*/
305 
306  NDimInterPE = 1;
307  for (isite = 0; isite < isiteMax; isite++) NDimInterPE *= 2;
308 
309  if (NDimInterPE < nproc) {
310  fprintf(stderr, "\n Error ! in ReadDefFileIdxPara.\n");
311  fprintf(stderr, "Too many MPI processes ! It should be <= %d. \n\n", NDimInterPE);
312  exitMPI(-1);
313  }/*if (NDimInterPE < nproc)*/
314  }/*if (X->Boost.flgBoost == 1)*/
315 
316  return TRUE;
317 }/*void CheckMPI*/
323 void CheckMPI_Summary(struct BindStruct *X) {
324 
325  int iproc, SmallDim, SpinNum, Nelec;
326  int isite;
327  long int idimMPI;
328 
329  if(X->Def.iFlgScaLAPACK == 0) {
330  fprintf(stdoutMPI, "\n\n###### MPI site separation summary ######\n\n");
331  fprintf(stdoutMPI, " INTRA process site\n");
332  fprintf(stdoutMPI, " Site Bit\n");
333  for (isite = 0; isite < X->Def.Nsite; isite++) {
334  switch (X->Def.iCalcModel) {
335  case HubbardGC:
336  case Hubbard:
337  case HubbardNConserved:
338  case Kondo:
339  case KondoGC:
340 
341  fprintf(stdoutMPI, " %4d %4d\n", isite, 4);
342  break;
343 
344  case Spin:
345  case SpinGC:
346 
347  if (X->Def.iFlgGeneralSpin == FALSE) {
348  fprintf(stdoutMPI, " %4d %4d\n", isite, 2);
349  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
350  else {
351  fprintf(stdoutMPI, " %4d %4ld\n", isite, X->Def.SiteToBit[isite]);
352  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
353 
354  break;
355 
356  } /*switch (X->Def.iCalcModel)*/
357  } /*for (isite = 0; isite < X->Def.Nsite; isite++)*/
358 
359  fprintf(stdoutMPI, "\n INTER process site\n");
360  fprintf(stdoutMPI, " Site Bit\n");
361  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
362  switch (X->Def.iCalcModel) {
363  case HubbardGC:
364  case Hubbard:
365  case HubbardNConserved:
366  case Kondo:
367  case KondoGC:
368 
369  fprintf(stdoutMPI, " %4d %4d\n", isite, 4);
370  break;
371 
372  case Spin:
373  case SpinGC:
374 
375  if (X->Def.iFlgGeneralSpin == FALSE) {
376  fprintf(stdoutMPI, " %4d %4d\n", isite, 2);
377  }/*if (X->Def.iFlgGeneralSpin == FALSE) */
378  else {
379  fprintf(stdoutMPI, " %4d %4ld\n", isite, X->Def.SiteToBit[isite]);
380  }/*if (X->Def.iFlgGeneralSpin == TRUE) */
381 
382  break;
383 
384  }/*switch (X->Def.iCalcModel)*/
385  }/*for (isite = X->Def.Nsite; isite < NsiteMPI; isite++)*/
386 
387  fprintf(stdoutMPI, "\n Process element info\n");
388  fprintf(stdoutMPI, " Process Dimension Nup Ndown Nelec Total2Sz State\n");
389 
390  for (iproc = 0; iproc < nproc; iproc++) {
391 
392  fprintf(stdoutMPI, " %7d", iproc);
393 
394  if (myrank == iproc) idimMPI = X->Check.idim_max;
395  else idimMPI = 0;
396  fprintf(stdoutMPI, " %15ld", SumMPI_li(idimMPI));
397 
398  if (myrank == iproc) Nelec = X->Def.Nup;
399  else Nelec = 0;
400  fprintf(stdoutMPI, " %4d", SumMPI_i(Nelec));
401 
402  if (myrank == iproc) Nelec = X->Def.Ndown;
403  else Nelec = 0;
404  fprintf(stdoutMPI, " %5d", SumMPI_i(Nelec));
405 
406  if (myrank == iproc) {
407  Nelec = X->Def.Ne; //X->Def.Nup
408  if (X->Def.iCalcModel == Spin || X->Def.iCalcModel == SpinGC) Nelec += X->Def.Ndown;
409  } else Nelec = 0;
410 
411  fprintf(stdoutMPI, " %5d", SumMPI_i(Nelec));
412 
413  if (myrank == iproc) Nelec = X->Def.Total2Sz;
414  else Nelec = 0;
415  fprintf(stdoutMPI, " %8d ", SumMPI_i(Nelec));
420  switch (X->Def.iCalcModel) {
421  case HubbardGC: /****************************************************/
422  case Hubbard:
423  case HubbardNConserved:
424  case Kondo:
425  case KondoGC:
426 
427  SmallDim = iproc;
428  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
429  SpinNum = SmallDim % 4;
430  SmallDim /= 4;
431  if (SpinNum == 0) fprintf(stdoutMPI, "00");
432  else if (SpinNum == 1) fprintf(stdoutMPI, "01");
433  else if (SpinNum == 2) fprintf(stdoutMPI, "10");
434  else if (SpinNum == 3) fprintf(stdoutMPI, "11");
435  } /*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
436 
437  break;
438 
439  case Spin:
440  case SpinGC:
441 
442  SmallDim = iproc;
443  if (X->Def.iFlgGeneralSpin == FALSE) {
444  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
445  SpinNum = SmallDim % 2;
446  SmallDim /= 2;
447  fprintf(stdoutMPI, "%1d", SpinNum);
448  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
449  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
450  else {
451  SmallDim = iproc;
452  for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++) {
453  SpinNum = SmallDim % (int) X->Def.SiteToBit[isite];
454  SmallDim /= X->Def.SiteToBit[isite];
455  fprintf(stdoutMPI, "%1d", SpinNum);
456  }/*for (isite = X->Def.Nsite; isite < X->Def.NsiteMPI; isite++)*/
457  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
458 
459  break;
460 
461  }/*switch (X->Def.iCalcModel)*/
462  fprintf(stdoutMPI, "\n");
463  }/*for (iproc = 0; iproc < nproc; iproc++)*/
464 
466  fprintf(stdoutMPI, "\n Total dimension : %ld\n\n", X->Check.idim_maxMPI);
467  if (X->Check.idim_maxMPI < 1) {
468  fprintf(stdoutMPI, "ERROR! Total dimension < 1\n");
469  exitMPI(-1);
470  }
471  }
472  else{
473  fprintf(stdoutMPI, "\n Total dimension : %ld\n\n", X->Check.idim_max);
474  }
475 
482  switch (X->Def.iCalcModel) {
483  case HubbardGC: /****************************************************/
484  case Hubbard:
485  case HubbardNConserved:
486  case Kondo:
487  case KondoGC:
488 
489  X->Def.Tpow[2 * X->Def.Nsite] = 1;
490  for (isite = 2 * X->Def.Nsite + 1; isite < 2 * X->Def.NsiteMPI; isite++)
491  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * 2;
492 
493  X->Def.OrgTpow[0]=1;
494  for (isite = 1; isite < 2 * X->Def.NsiteMPI; isite++)
495  X->Def.OrgTpow[isite] = X->Def.OrgTpow[isite-1]*2;
496 
497  break;
498 
499  case SpinGC:/********************************************************/
500  case Spin:
501 
502  if (X->Def.iFlgGeneralSpin == FALSE) {
503 
504  X->Def.Tpow[X->Def.Nsite] = 1;
505  for (isite = X->Def.Nsite + 1; isite < X->Def.NsiteMPI; isite++)
506  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * 2;
507 
508  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
509  else{
510 
511  X->Def.Tpow[X->Def.Nsite] = 1;
512  for (isite = X->Def.Nsite + 1; isite < X->Def.NsiteMPI; isite++)
513  X->Def.Tpow[isite] = X->Def.Tpow[isite - 1] * X->Def.SiteToBit[isite - 1];
514 
515  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
516  break;
517  } /*switch (X->Def.iCalcModel)*/
518 }/*void CheckMPI_Summary*/
void exitMPI(int errorcode)
MPI Abortation wrapper.
Definition: wrapperMPI.cpp:86
int Nup
Number of spin-up electrons in this process.
Definition: struct.hpp:58
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
int nproc
Number of processors, defined in InitializeMPI()
Definition: global.cpp:72
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int ** list_6spin_star
Definition: struct.hpp:388
long int * OrgTpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:92
int Total2Sz
Total in this process.
Definition: struct.hpp:69
long int SumMPI_li(long int idim)
MPI wrapper function to obtain sum of unsigned long integer across processes.
Definition: wrapperMPI.cpp:271
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
long int idim_maxMPI
The total dimension across process.
Definition: struct.hpp:306
int * LocSpn
[DefineList::NLocSpn] Flag (and size) of the local spin. malloc in setmem_def().
Definition: struct.hpp:82
int iFlgScaLAPACK
ScaLAPACK mode ( only for FullDiag )
Definition: struct.hpp:237
int CheckMPI(struct BindStruct *X)
Define the number of sites in each PE (DefineList.Nsite). Reduce the number of electrons (DefineList...
Definition: CheckMPI.cpp:27
int NsiteMPI
Total number of sites, differ from DefineList::Nsite.
Definition: struct.hpp:57
int Ne
Number of electrons in this process.
Definition: struct.hpp:71
Bind.
Definition: struct.hpp:394
struct BoostList Boost
For Boost.
Definition: struct.hpp:399
int SumMPI_i(int idim)
MPI wrapper function to obtain sum of integer across processes.
Definition: wrapperMPI.cpp:288
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.hpp:86
long int ishift_nspin
Definition: struct.hpp:384
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.hpp:94
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
long int num_pivot
Definition: struct.hpp:383
long int W0
Definition: struct.hpp:382
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
int flgBoost
Flag whether use CMA algorithm.
Definition: struct.hpp:380
int Ndown
Number of spin-down electrons in this process.
Definition: struct.hpp:59
int NLocSpn
Number of local spins.
Definition: struct.hpp:84
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
void CheckMPI_Summary(struct BindStruct *X)
Print infomation of MPI parallelization Modify Definelist::Tpow in the inter process region...
Definition: CheckMPI.cpp:323
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305
int Total2SzMPI
Total across processes.
Definition: struct.hpp:70