HPhi++  3.1.0
diagonalcalc.cpp File Reference

Calculate diagonal components, i.e. \( H_d |\phi_0> = E_d |\phi_0> \). More...

#include <bitcalc.hpp>
#include "FileIO.hpp"
#include "diagonalcalc.hpp"
#include "mltplySpinCore.hpp"
#include "wrapperMPI.hpp"
#include <iostream>

Go to the source code of this file.

Functions

int SetDiagonalTEInterAll (long int isite1, long int isite2, long int isigma1, long int isigma2, double dtmp_V, struct BindStruct *X, std::complex< double > *tmp_v0, std::complex< double > *tmp_v1)
 Update the vector by the general two-body diagonal interaction, \( H_{i\sigma_1 j\sigma_2} n_ {i\sigma_1}n_{j\sigma_2}\).
(Using in Time Evolution mode). More...
 
int SetDiagonalTEChemi (long int isite1, long int spin, double dtmp_V, struct BindStruct *X, std::complex< double > *tmp_v0, std::complex< double > *tmp_v1)
 Update the vector by the chemical potential \( \mu_{i \sigma_1} n_ {i \sigma_1} \)
generated by the commutation relation in terms of the general two-body interaction,
\( c_ {i \sigma_1} a_{j\sigma_2}c_ {j \sigma_2}a_ {i \sigma_1} = c_ {i \sigma_1}a_ {i \sigma_1}-c_ {i \sigma_1} a_ {i \sigma_1} c_ {j \sigma_2}a_{j\sigma_2}\) . (Using in Time Evolution mode). More...
 
int SetDiagonalTETransfer (long int isite1, double dtmp_V, long int spin, struct BindStruct *X, std::complex< double > *tmp_v0, std::complex< double > *tmp_v1)
 Update the vector by the general one-body diagonal interaction, \( \mu_{i\sigma_1} n_ {i\sigma_1}\).
(Using in Time Evolution mode). More...
 
int diagonalcalcForTE (const int _istep, struct BindStruct *X, std::complex< double > *tmp_v0, std::complex< double > *tmp_v1)
 
int SetDiagonalCoulombIntra (long int isite1, double dtmp_V, struct BindStruct *X)
 Calculate the components for Coulombintra interaction, \( U_i n_ {i \uparrow}n_{i \downarrow} \). More...
 
int SetDiagonalChemi (long int isite1, double dtmp_V, long int spin, struct BindStruct *X)
 Calculate the components for the chemical potential \( \mu_{i \sigma_1} n_ {i \sigma_1} \). More...
 
int SetDiagonalCoulombInter (long int isite1, long int isite2, double dtmp_V, struct BindStruct *X)
 Calculate the components for Coulombinter interaction, \( V_{ij} n_ {i}n_{j} \). More...
 
int SetDiagonalHund (long int isite1, long int isite2, double dtmp_V, struct BindStruct *X)
 Calculate the components for Hund interaction, \( H_{ij}(n_ {i\uparrow}n_{j\uparrow}+ n_ {i\downarrow}n_{j\downarrow})\). More...
 
int SetDiagonalInterAll (long int isite1, long int isite2, long int isigma1, long int isigma2, double dtmp_V, struct BindStruct *X)
 Calculate the components for general two-body diagonal interaction, \( H_{i\sigma_1 j\sigma_2} n_ {i\sigma_1}n_{j\sigma_2}\). More...
 
int diagonalcalc (struct BindStruct *X)
 Calculate diagonal components and obtain the list, list_diagonal. More...
 

Detailed Description

Calculate diagonal components, i.e. \( H_d |\phi_0> = E_d |\phi_0> \).

Version
2.1

add functions to calculate diagonal components for Time evolution.

Author
Kazuyoshi Yoshimi (The University of Tokyo)
Version
0.2

modify functions to calculate diagonal components for general spin.

Author
Kazuyoshi Yoshimi (The University of Tokyo)
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition in file diagonalcalc.cpp.

Function Documentation

◆ diagonalcalc()

int diagonalcalc ( struct BindStruct X)

Calculate diagonal components and obtain the list, list_diagonal.

Parameters
X[in] Struct to get the information of the diagonal operators.
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)
Return values
-1fail to calculate diagonal components.
0succeed to calculate diagonal components.

Definition at line 1912 of file diagonalcalc.cpp.

References BindStruct::Check, childfopenMPI(), DefineList::CoulombInter, DefineList::CoulombIntra, BindStruct::Def, DefineList::EDChemi, DefineList::EDNChemi, DefineList::EDParaChemi, DefineList::EDSpinChemi, DefineList::HundCoupling, CheckList::idim_max, DefineList::InterAll_Diagonal, list_Diagonal, DefineList::NCoulombInter, DefineList::NCoulombIntra, DefineList::NHundCoupling, DefineList::NInterAll_Diagonal, DefineList::ParaCoulombInter, DefineList::ParaCoulombIntra, DefineList::ParaHundCoupling, DefineList::ParaInterAll_Diagonal, SetDiagonalChemi(), SetDiagonalCoulombInter(), SetDiagonalCoulombIntra(), SetDiagonalHund(), SetDiagonalInterAll(), stdoutMPI, and TimeKeeper().

Referenced by CalcSpectrum(), main(), and SetDiagonalInterAll().

1914  {
1915 
1916  FILE *fp;
1917  long int i, j;
1918  long int isite1, isite2;
1919  long int spin;
1920  double tmp_V;
1921 
1922  /*[s] For InterAll*/
1923  long int A_spin, B_spin;
1924  /*[e] For InterAll*/
1925  long int i_max = X->Check.idim_max;
1926 
1927  fprintf(stdoutMPI, "%s", " Start: Calculate diagaonal components of Hamiltonian. \n");
1928  TimeKeeper(X, "%s_TimeKeeper.dat", "diagonal calculation starts: %s", "a");
1929 
1930 #pragma omp parallel for default(none) private(j) shared(list_Diagonal) firstprivate(i_max)
1931  for (j = 1; j <= i_max; j++) {
1932  list_Diagonal[j] = 0.0;
1933  }
1934 
1935  if (X->Def.NCoulombIntra > 0) {
1936  if (childfopenMPI("CHECK_CoulombIntra.dat", "w", &fp) != 0) {
1937  return -1;
1938  }
1939  for (i = 0; i < X->Def.NCoulombIntra; i++) {
1940  isite1 = X->Def.CoulombIntra[i][0] + 1;
1941  tmp_V = X->Def.ParaCoulombIntra[i];
1942  fprintf(fp, "i=%ld isite1=%ld tmp_V=%lf \n", i, isite1, tmp_V);
1943  SetDiagonalCoulombIntra(isite1, tmp_V, X);
1944  }
1945  fclose(fp);
1946  }
1947 
1948  if (X->Def.EDNChemi > 0) {
1949  if (childfopenMPI("CHECK_Chemi.dat", "w", &fp) != 0) {
1950  return -1;
1951  }
1952  for (i = 0; i < X->Def.EDNChemi; i++) {
1953  isite1 = X->Def.EDChemi[i] + 1;
1954  spin = X->Def.EDSpinChemi[i];
1955  tmp_V = -X->Def.EDParaChemi[i];
1956  fprintf(fp, "i=%ld spin=%ld isite1=%ld tmp_V=%lf \n", i, spin, isite1, tmp_V);
1957  if (SetDiagonalChemi(isite1, tmp_V, spin, X) != 0) {
1958  return -1;
1959  }
1960  }
1961  fclose(fp);
1962  }
1963 
1964  if (X->Def.NCoulombInter > 0) {
1965  if (childfopenMPI("CHECK_INTER_U.dat", "w", &fp) != 0) {
1966  return -1;
1967  }
1968  for (i = 0; i < X->Def.NCoulombInter; i++) {
1969  isite1 = X->Def.CoulombInter[i][0] + 1;
1970  isite2 = X->Def.CoulombInter[i][1] + 1;
1971  tmp_V = X->Def.ParaCoulombInter[i];
1972  fprintf(fp, "i=%ld isite1=%ld isite2=%ld tmp_V=%lf \n", i, isite1, isite2, tmp_V);
1973  if (SetDiagonalCoulombInter(isite1, isite2, tmp_V, X) != 0) {
1974  return -1;
1975  }
1976  }
1977  fclose(fp);
1978  }
1979  if (X->Def.NHundCoupling > 0) {
1980  if (childfopenMPI("CHECK_Hund.dat", "w", &fp) != 0) {
1981  return -1;
1982  }
1983  for (i = 0; i < X->Def.NHundCoupling; i++) {
1984  isite1 = X->Def.HundCoupling[i][0] + 1;
1985  isite2 = X->Def.HundCoupling[i][1] + 1;
1986  tmp_V = -X->Def.ParaHundCoupling[i];
1987  if (SetDiagonalHund(isite1, isite2, tmp_V, X) != 0) {
1988  return -1;
1989  }
1990  fprintf(fp, "i=%ld isite1=%ld isite2=%ld tmp_V=%lf \n", i, isite1, isite2, tmp_V);
1991  }
1992  fclose(fp);
1993  }
1994 
1995  if (X->Def.NInterAll_Diagonal > 0) {
1996  if (childfopenMPI("CHECK_InterAll.dat", "w", &fp) != 0) {
1997  return -1;
1998  }
1999  for (i = 0; i < X->Def.NInterAll_Diagonal; i++) {
2000  isite1 = X->Def.InterAll_Diagonal[i][0] + 1;
2001  A_spin = X->Def.InterAll_Diagonal[i][1];
2002  isite2 = X->Def.InterAll_Diagonal[i][2] + 1;
2003  B_spin = X->Def.InterAll_Diagonal[i][3];
2004  tmp_V = X->Def.ParaInterAll_Diagonal[i];
2005  fprintf(fp, "i=%ld isite1=%ld A_spin=%ld isite2=%ld B_spin=%ld tmp_V=%lf \n", i, isite1, A_spin, isite2, B_spin, tmp_V);
2006  SetDiagonalInterAll(isite1, isite2, A_spin, B_spin, tmp_V, X);
2007  }
2008  fclose(fp);
2009  }
2010 
2011  TimeKeeper(X, "%s_TimeKeeper.dat", "diagonal calculation finishes: %s", "a");
2012  fprintf(stdoutMPI, "%s", " End : Calculate diagaonal components of Hamiltonian. \n\n");
2013  return 0;
2014 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
int SetDiagonalChemi(long int isite1, double dtmp_V, long int spin, struct BindStruct *X)
Calculate the components for the chemical potential .
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int NCoulombInter
Number of off-site Coulomb interaction.
Definition: struct.hpp:127
int * EDSpinChemi
[DefineList::Nsite]
Definition: struct.hpp:99
double * ParaInterAll_Diagonal
[DefineList::NInterAll_Diagonal] Coupling constant of diagonal inter-all term. malloc in setmem_def()...
Definition: struct.hpp:168
int SetDiagonalInterAll(long int isite1, long int isite2, long int isigma1, long int isigma2, double dtmp_V, struct BindStruct *X)
Calculate the components for general two-body diagonal interaction, .
int EDNChemi
Number of on-site term.
Definition: struct.hpp:97
int NHundCoupling
Number of Hund coupling.
Definition: struct.hpp:133
int ** InterAll_Diagonal
[DefineList::NinterAll_Diagonal][4] Interacted quartet
Definition: struct.hpp:162
int ** HundCoupling
[DefineList::NHundCoupling][2] Index of Hund coupling. malloc in setmem_def().
Definition: struct.hpp:134
double * ParaCoulombInter
[DefineList::NCoulombInter]Coupling constant of off-site Coulomb interaction. malloc in setmem_def()...
Definition: struct.hpp:130
int ** CoulombInter
Definition: struct.hpp:128
double * EDParaChemi
[DefineList::Nsite] On-site potential parameter. malloc in setmem_def().
Definition: struct.hpp:100
int NInterAll_Diagonal
Number of interall term (diagonal)
Definition: struct.hpp:164
double * list_Diagonal
Definition: global.cpp:24
int SetDiagonalCoulombIntra(long int isite1, double dtmp_V, struct BindStruct *X)
Calculate the components for Coulombintra interaction, .
int ** CoulombIntra
Definition: struct.hpp:122
double * ParaHundCoupling
[DefineList::NHundCoupling] Hund coupling constant. malloc in setmem_def().
Definition: struct.hpp:136
int NCoulombIntra
Definition: struct.hpp:121
int TimeKeeper(struct BindStruct *X, const char *cFileName, const char *cTimeKeeper_Message, const char *cWriteType)
Functions for writing a time log.
Definition: log.cpp:42
int SetDiagonalCoulombInter(long int isite1, long int isite2, double dtmp_V, struct BindStruct *X)
Calculate the components for Coulombinter interaction, .
double * ParaCoulombIntra
Definition: struct.hpp:124
int * EDChemi
[DefineList::Nsite] Chemical potential. malloc in setmem_def().
Definition: struct.hpp:98
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
int SetDiagonalHund(long int isite1, long int isite2, double dtmp_V, struct BindStruct *X)
Calculate the components for Hund interaction, .
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305
int childfopenMPI(const char *_cPathChild, const char *_cmode, FILE **_fp)
Only the root process open file in output/ directory.
Definition: FileIO.cpp:27

◆ diagonalcalcForTE()

int diagonalcalcForTE ( const int  _istep,
struct BindStruct X,
std::complex< double > *  tmp_v0,
std::complex< double > *  tmp_v1 
)

Definition at line 703 of file diagonalcalc.cpp.

References BindStruct::Def, DefineList::NTEChemi, DefineList::NTEInterAllDiagonal, DefineList::NTETransferDiagonal, DefineList::ParaTEChemi, DefineList::ParaTEInterAllDiagonal, DefineList::ParaTETransferDiagonal, SetDiagonalCoulombIntra(), SetDiagonalTEChemi(), SetDiagonalTEInterAll(), SetDiagonalTETransfer(), DefineList::SpinTEChemi, DefineList::TEChemi, DefineList::TEInterAllDiagonal, and DefineList::TETransferDiagonal.

Referenced by mltply(), and SetDiagonalTETransfer().

708  {
709 
710  long int i;
711  long int isite1, isite2;
712  long int A_spin, B_spin;
713  double tmp_V;
714 
715  if (X->Def.NTETransferDiagonal[_istep] > 0) {
716  for (i = 0; i < X->Def.NTETransferDiagonal[_istep]; i++) {
717  isite1 = X->Def.TETransferDiagonal[_istep][i][0] + 1;
718  A_spin = X->Def.TETransferDiagonal[_istep][i][1];
719  tmp_V = X->Def.ParaTETransferDiagonal[_istep][i];
720  SetDiagonalTETransfer(isite1, tmp_V, A_spin, X, tmp_v0, tmp_v1);
721  }
722  }
723  else if (X->Def.NTEInterAllDiagonal[_istep] > 0) {
724  for (i = 0; i < X->Def.NTEInterAllDiagonal[_istep]; i++) {
725  //Assume n_{1\sigma_1} n_{2\sigma_2}
726  isite1 = X->Def.TEInterAllDiagonal[_istep][i][0] + 1;
727  A_spin = X->Def.TEInterAllDiagonal[_istep][i][1];
728  isite2 = X->Def.TEInterAllDiagonal[_istep][i][2] + 1;
729  B_spin = X->Def.TEInterAllDiagonal[_istep][i][3];
730  tmp_V = X->Def.ParaTEInterAllDiagonal[_istep][i];
731  if (SetDiagonalTEInterAll(isite1, isite2, A_spin, B_spin, tmp_V, X, tmp_v0, tmp_v1) != 0) {
732  return -1;
733  }
734  }
735 
736  if (X->Def.NTEChemi[_istep] > 0) {
737  for (i = 0; i < X->Def.NTEChemi[_istep]; i++) {
738  isite1 = X->Def.TEChemi[_istep][i] + 1;
739  A_spin = X->Def.SpinTEChemi[_istep][i];
740  tmp_V = -X->Def.ParaTEChemi[_istep][i];
741  if (SetDiagonalTEChemi(isite1, A_spin, tmp_V, X, tmp_v0, tmp_v1) != 0) {
742  return -1;
743  }
744  }
745  }
746  }
747  return 0;
748 }
int * NTETransferDiagonal
Definition: struct.hpp:260
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
int ** SpinTEChemi
Definition: struct.hpp:297
int *** TETransferDiagonal
Definition: struct.hpp:264
int ** TEChemi
Definition: struct.hpp:295
int SetDiagonalTETransfer(long int isite1, double dtmp_V, long int spin, struct BindStruct *X, std::complex< double > *tmp_v0, std::complex< double > *tmp_v1)
Update the vector by the general one-body diagonal interaction, . (Using in Time Evolution mode)...
int * NTEChemi
Definition: struct.hpp:296
double ** ParaTEChemi
Definition: struct.hpp:298
double ** ParaTETransferDiagonal
Definition: struct.hpp:268
int SetDiagonalTEInterAll(long int isite1, long int isite2, long int isigma1, long int isigma2, double dtmp_V, struct BindStruct *X, std::complex< double > *tmp_v0, std::complex< double > *tmp_v1)
Update the vector by the general two-body diagonal interaction, . (Using in Time Evolution mode)...
int * NTEInterAllDiagonal
Definition: struct.hpp:278
double ** ParaTEInterAllDiagonal
Definition: struct.hpp:293
int SetDiagonalTEChemi(long int isite1, long int spin, double dtmp_V, struct BindStruct *X, std::complex< double > *tmp_v0, std::complex< double > *tmp_v1)
Update the vector by the chemical potential generated by the commutation relation in terms of the g...
int *** TEInterAllDiagonal
Definition: struct.hpp:286

◆ SetDiagonalChemi()

int SetDiagonalChemi ( long int  isite1,
double  dtmp_V,
long int  spin,
struct BindStruct X 
)

Calculate the components for the chemical potential \( \mu_{i \sigma_1} n_ {i \sigma_1} \).

Parameters
isite1[in] a site number
dtmp_V[in] A value of coulombintra interaction \( \mu_{i \sigma_1} \)
spin[in] Spin index for the chemical potential
X[in] Define list to get dimension number
Return values
-1fail to calculate the diagonal component.
0succeed to calculate the diagonal component.
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 871 of file diagonalcalc.cpp.

References BitCheckGeneral(), BindStruct::Check, BindStruct::Def, DefineList::iCalcModel, CheckList::idim_max, DefineList::iFlgGeneralSpin, list_1, list_Diagonal, myrank, DefineList::Nsite, SetDiagonalCoulombInter(), DefineList::SiteToBit, stdoutMPI, and DefineList::Tpow.

Referenced by diagonalcalc(), and SetDiagonalCoulombIntra().

876  {
877  long int is1_up;
878  long int ibit1_up;
879  long int num1;
880  long int isigma1 = spin;
881  long int is1, ibit1;
882 
883  long int j;
884  long int i_max = X->Check.idim_max;
885 
886  /*
887  When isite1 is in the inter process region
888  */
889  if (isite1 > X->Def.Nsite) {
890 
891  switch (X->Def.iCalcModel) {
892 
893  case HubbardGC:
894  case KondoGC:
895  case Hubbard:
896  case Kondo:
897 
898  if (spin == 0) {
899  is1 = X->Def.Tpow[2 * isite1 - 2];
900  }
901  else {
902  is1 = X->Def.Tpow[2 * isite1 - 1];
903  }
904  ibit1 = (long int)myrank & is1;
905  num1 = ibit1 / is1;
906 #pragma omp parallel for default(none) shared(list_Diagonal) \
907  firstprivate(i_max, dtmp_V, num1) private(j)
908  for (j = 1; j <= i_max; j++) list_Diagonal[j] += num1 * dtmp_V;
909 
910  break;/*case HubbardGC, case KondoGC, Hubbard, Kondo:*/
911 
912  case SpinGC:
913  case Spin:
914 
915  if (X->Def.iFlgGeneralSpin == FALSE) {
916  is1_up = X->Def.Tpow[isite1 - 1];
917  ibit1_up = (((long int)myrank& is1_up) / is1_up) ^ (1 - spin);
918 #pragma omp parallel for default(none) shared(list_Diagonal) \
919 firstprivate(i_max, dtmp_V, ibit1_up) private(j)
920  for (j = 1; j <= i_max; j++) list_Diagonal[j] += dtmp_V * ibit1_up;
921  } /*if (X->Def.iFlgGeneralSpin == FALSE)*/
922  else /*if (X->Def.iFlgGeneralSpin == TRUE)*/ {
923  num1 = BitCheckGeneral((long int)myrank,
924  isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
925  if (num1 != 0) {
926 #pragma omp parallel for default(none) shared(list_Diagonal) \
927 firstprivate(i_max, dtmp_V) private(j)
928  for (j = 1; j <= i_max; j++) list_Diagonal[j] += dtmp_V;
929  }/*if (num1 != 0)*/
930  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
931  break;/*case SpinGC, Spin:*/
932 
933  default:
934  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
935  return -1;
936 
937  } /*switch (X->Def.iCalcModel)*/
938 
939  return 0;
940 
941  }/*if (isite1 >= X->Def.Nsite*/
942 
943  switch (X->Def.iCalcModel) {
944  case HubbardGC:
945  if (spin == 0) {
946  is1 = X->Def.Tpow[2 * isite1 - 2];
947  }
948  else {
949  is1 = X->Def.Tpow[2 * isite1 - 1];
950  }
951 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) firstprivate(i_max, dtmp_V, is1) private(num1, ibit1)
952  for (j = 1; j <= i_max; j++) {
953 
954  ibit1 = (j - 1)&is1;
955  num1 = ibit1 / is1;
956  //fprintf(stdoutMPI, "DEBUG: spin=%ld is1=%ld: isite1=%ld j=%ld num1=%ld \n",spin,is1,isite1,j,num1);
957 
958  list_Diagonal[j] += num1 * dtmp_V;
959  }
960  break;
961  case KondoGC:
962  case Hubbard:
963  case Kondo:
964  if (spin == 0) {
965  is1 = X->Def.Tpow[2 * isite1 - 2];
966  }
967  else {
968  is1 = X->Def.Tpow[2 * isite1 - 1];
969  }
970 
971 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) firstprivate(i_max, dtmp_V, is1) private(num1, ibit1)
972  for (j = 1; j <= i_max; j++) {
973 
974  ibit1 = list_1[j] & is1;
975  num1 = ibit1 / is1;
976  list_Diagonal[j] += num1 * dtmp_V;
977  }
978  break;
979 
980  case SpinGC:
981  if (X->Def.iFlgGeneralSpin == FALSE) {
982  is1_up = X->Def.Tpow[isite1 - 1];
983 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, spin) private(num1, ibit1_up)
984  for (j = 1; j <= i_max; j++) {
985  ibit1_up = (((j - 1)& is1_up) / is1_up) ^ (1 - spin);
986  list_Diagonal[j] += dtmp_V * ibit1_up;
987  }
988  }
989  else {
990 #pragma omp parallel for default(none) shared(list_Diagonal) firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
991  for (j = 1; j <= i_max; j++) {
992  num1 = BitCheckGeneral(j - 1, isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
993  if (num1 != 0) {
994  list_Diagonal[j] += dtmp_V;
995  }
996  }
997  }
998  break;
999 
1000  case Spin:
1001  if (X->Def.iFlgGeneralSpin == FALSE) {
1002  is1_up = X->Def.Tpow[isite1 - 1];
1003 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, spin) private(num1, ibit1_up)
1004  for (j = 1; j <= i_max; j++) {
1005  ibit1_up = ((list_1[j] & is1_up) / is1_up) ^ (1 - spin);
1006  list_Diagonal[j] += dtmp_V * ibit1_up;
1007  }
1008  }
1009  else {
1010 #pragma omp parallel for default(none) shared(list_Diagonal, list_1) firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
1011  for (j = 1; j <= i_max; j++) {
1012  num1 = BitCheckGeneral(list_1[j], isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
1013  if (num1 != 0) {
1014  list_Diagonal[j] += dtmp_V;
1015  }
1016  }
1017  }
1018 
1019  break;
1020  default:
1021  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1022  return -1;
1023  }
1024 
1025  return 0;
1026 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int BitCheckGeneral(const long int org_bit, const int org_isite, const int target_ispin, const long int *SiteToBit, const long int *Tpow)
bit check function for general spin
Definition: bitcalc.cpp:392
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.hpp:86
double * list_Diagonal
Definition: global.cpp:24
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.hpp:94
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
long int * list_1
Definition: global.cpp:25
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305

◆ SetDiagonalCoulombInter()

int SetDiagonalCoulombInter ( long int  isite1,
long int  isite2,
double  dtmp_V,
struct BindStruct X 
)

Calculate the components for Coulombinter interaction, \( V_{ij} n_ {i}n_{j} \).

Parameters
isite1[in] a site number \(i \)
isite2[in] a site number \(j \)
dtmp_V[in] A value of coulombinter interaction \( V_{ij} \)
X[in] Define list to get the operator information.
Return values
-1fail to calculate the diagonal component.
0succeed to calculate the diagonal component.
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 1041 of file diagonalcalc.cpp.

References BindStruct::Check, BindStruct::Def, DefineList::iCalcModel, CheckList::idim_max, list_1, list_Diagonal, myrank, DefineList::Nsite, SetDiagonalHund(), stdoutMPI, and DefineList::Tpow.

Referenced by diagonalcalc(), and SetDiagonalChemi().

1046  {
1047  long int is1_up, is1_down;
1048  long int ibit1_up, ibit1_down;
1049  long int num1;
1050  long int is2_up, is2_down;
1051  long int ibit2_up, ibit2_down;
1052  long int num2;
1053 
1054  long int j;
1055  long int i_max = X->Check.idim_max;
1056 
1057  /*
1058  Force isite1 <= isite2
1059  */
1060  if (isite2 < isite1) {
1061  j = isite2;
1062  isite2 = isite1;
1063  isite1 = j;
1064  }/*if (isite2 < isite1)*/
1065  /*
1066  When isite1 & site2 are in the inter process region
1067  */
1068  if (/*isite2 => */ isite1 > X->Def.Nsite) {
1069 
1070  switch (X->Def.iCalcModel) {
1071 
1072  case HubbardGC:
1073  case KondoGC:
1074  case Hubbard:
1075  case Kondo:
1076 
1077  is1_up = X->Def.Tpow[2 * isite1 - 2];
1078  is1_down = X->Def.Tpow[2 * isite1 - 1];
1079  is2_up = X->Def.Tpow[2 * isite2 - 2];
1080  is2_down = X->Def.Tpow[2 * isite2 - 1];
1081 
1082  num1 = 0;
1083  num2 = 0;
1084 
1085  ibit1_up = (long int)myrank&is1_up;
1086  num1 += ibit1_up / is1_up;
1087  ibit1_down = (long int)myrank&is1_down;
1088  num1 += ibit1_down / is1_down;
1089 
1090  ibit2_up = (long int)myrank&is2_up;
1091  num2 += ibit2_up / is2_up;
1092  ibit2_down = (long int)myrank&is2_down;
1093  num2 += ibit2_down / is2_down;
1094 
1095 #pragma omp parallel for default(none) shared(list_Diagonal) \
1096  firstprivate(i_max, dtmp_V, num1, num2) private(j)
1097  for (j = 1; j <= i_max; j++) list_Diagonal[j] += num1 * num2*dtmp_V;
1098 
1099  break;/*case HubbardGC, KondoGC, Hubbard, Kondo:*/
1100 
1101  case Spin:
1102  case SpinGC:
1103 #pragma omp parallel for default(none) shared(list_Diagonal) firstprivate(i_max, dtmp_V)
1104  for (j = 1; j <= i_max; j++) {
1105  list_Diagonal[j] += dtmp_V;
1106  }
1107  break;/*case Spin, SpinGC*/
1108 
1109  default:
1110  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1111  return -1;
1112 
1113  }/*switch (X->Def.iCalcModel)*/
1114 
1115  return 0;
1116 
1117  }/*if (isite1 > X->Def.Nsite)*/
1118  else if (isite2 > X->Def.Nsite /* => isite1 */) {
1119 
1120  switch (X->Def.iCalcModel) {
1121  case HubbardGC:
1122  case KondoGC:
1123  case Hubbard:
1124  case Kondo:
1125  is1_up = X->Def.Tpow[2 * isite1 - 2];
1126  is1_down = X->Def.Tpow[2 * isite1 - 1];
1127  is2_up = X->Def.Tpow[2 * isite2 - 2];
1128  is2_down = X->Def.Tpow[2 * isite2 - 1];
1129  num2 = 0;
1130  ibit2_up = (long int)myrank&is2_up;
1131  num2 += ibit2_up / is2_up;
1132  ibit2_down = (long int)myrank&is2_down;
1133  num2 += ibit2_down / is2_down;
1134  break;
1135 
1136  case Spin:
1137  case SpinGC:
1138  break;
1139 
1140  default:
1141  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1142  return -1;
1143  }
1144 
1145  switch (X->Def.iCalcModel) {
1146 
1147  case HubbardGC:
1148 
1149 #pragma omp parallel for default(none) shared(list_Diagonal) \
1150 firstprivate(i_max, dtmp_V, num2, is1_up, is1_down) \
1151 private(num1, ibit1_up, ibit1_down, j)
1152  for (j = 1; j <= i_max; j++) {
1153  num1 = 0;
1154  ibit1_up = (j - 1)&is1_up;
1155  num1 += ibit1_up / is1_up;
1156  ibit1_down = (j - 1)&is1_down;
1157  num1 += ibit1_down / is1_down;
1158 
1159  list_Diagonal[j] += num1 * num2*dtmp_V;
1160  }
1161 
1162  break;/*case HubbardGC*/
1163 
1164  case KondoGC:
1165  case Hubbard:
1166  case Kondo:
1167 
1168 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) \
1169 firstprivate(i_max, dtmp_V, is1_up, is1_down, num2) \
1170 private(num1, ibit1_up, ibit1_down, j)
1171  for (j = 1; j <= i_max; j++) {
1172  num1 = 0;
1173  ibit1_up = list_1[j] & is1_up;
1174  num1 += ibit1_up / is1_up;
1175  ibit1_down = list_1[j] & is1_down;
1176  num1 += ibit1_down / is1_down;
1177 
1178  list_Diagonal[j] += num1 * num2*dtmp_V;
1179  }
1180  break;/*case KondoGC, Hubbard, Kondo:*/
1181 
1182  case Spin:
1183  case SpinGC:
1184 #pragma omp parallel for default(none) shared(list_Diagonal) firstprivate(i_max, dtmp_V)
1185  for (j = 1; j <= i_max; j++) {
1186  list_Diagonal[j] += dtmp_V;
1187  }
1188  break;/* case Spin, SpinGC:*/
1189 
1190  default:
1191  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1192  return -1;
1193 
1194  }/*switch (X->Def.iCalcModel)*/
1195 
1196  return 0;
1197 
1198  }/*else if (isite2 > X->Def.Nsite)*/
1199  else {
1200  switch (X->Def.iCalcModel) {
1201  case HubbardGC: //list_1[j] -> j-1
1202  is1_up = X->Def.Tpow[2 * isite1 - 2];
1203  is1_down = X->Def.Tpow[2 * isite1 - 1];
1204  is2_up = X->Def.Tpow[2 * isite2 - 2];
1205  is2_down = X->Def.Tpow[2 * isite2 - 1];
1206 #pragma omp parallel for default(none) shared( list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, is1_down, is2_up, is2_down) private(num1, ibit1_up, ibit1_down, num2, ibit2_up, ibit2_down)
1207  for (j = 1; j <= i_max; j++) {
1208  num1 = 0;
1209  num2 = 0;
1210  ibit1_up = (j - 1)&is1_up;
1211  num1 += ibit1_up / is1_up;
1212  ibit1_down = (j - 1)&is1_down;
1213  num1 += ibit1_down / is1_down;
1214 
1215  ibit2_up = (j - 1)&is2_up;
1216  num2 += ibit2_up / is2_up;
1217  ibit2_down = (j - 1)&is2_down;
1218  num2 += ibit2_down / is2_down;
1219 
1220  list_Diagonal[j] += num1 * num2*dtmp_V;
1221  }
1222  break;
1223  case KondoGC:
1224  case Hubbard:
1225  case Kondo:
1226  is1_up = X->Def.Tpow[2 * isite1 - 2];
1227  is1_down = X->Def.Tpow[2 * isite1 - 1];
1228  is2_up = X->Def.Tpow[2 * isite2 - 2];
1229  is2_down = X->Def.Tpow[2 * isite2 - 1];
1230 
1231 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, is1_down, is2_up, is2_down) private(num1, ibit1_up, ibit1_down, num2, ibit2_up, ibit2_down)
1232  for (j = 1; j <= i_max; j++) {
1233  num1 = 0;
1234  num2 = 0;
1235  ibit1_up = list_1[j] & is1_up;
1236  num1 += ibit1_up / is1_up;
1237  ibit1_down = list_1[j] & is1_down;
1238  num1 += ibit1_down / is1_down;
1239 
1240  ibit2_up = list_1[j] & is2_up;
1241  num2 += ibit2_up / is2_up;
1242  ibit2_down = list_1[j] & is2_down;
1243  num2 += ibit2_down / is2_down;
1244 
1245  list_Diagonal[j] += num1 * num2*dtmp_V;
1246  }
1247  break;
1248 
1249  case Spin:
1250  case SpinGC:
1251 #pragma omp parallel for default(none) shared(list_Diagonal) firstprivate(i_max, dtmp_V)
1252  for (j = 1; j <= i_max; j++) {
1253  list_Diagonal[j] += dtmp_V;
1254  }
1255  break;
1256  default:
1257  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1258  return -1;
1259  }
1260  }
1261 
1262  return 0;
1263 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
double * list_Diagonal
Definition: global.cpp:24
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
long int * list_1
Definition: global.cpp:25
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305

◆ SetDiagonalCoulombIntra()

int SetDiagonalCoulombIntra ( long int  isite1,
double  dtmp_V,
struct BindStruct X 
)

Calculate the components for Coulombintra interaction, \( U_i n_ {i \uparrow}n_{i \downarrow} \).

Parameters
isite1[in] a site number
dtmp_V[in] A value of coulombintra interaction \( U_i \)
X[in] Define list to get dimension number
Return values
-1fail to calculate the diagonal component.
0succeed to calculate the diagonal component.
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 762 of file diagonalcalc.cpp.

References BindStruct::Check, BindStruct::Def, DefineList::iCalcModel, CheckList::idim_max, list_1, list_Diagonal, myrank, DefineList::Nsite, SetDiagonalChemi(), stdoutMPI, and DefineList::Tpow.

Referenced by diagonalcalc(), and diagonalcalcForTE().

766  {
767  long int is;
768  long int ibit;
769  long int is1_up, is1_down;
770 
771  long int j;
772  long int i_max = X->Check.idim_max;
773 
774  /*
775  When isite1 is in the inter process region
776  */
777  if (isite1 > X->Def.Nsite) {
778 
779  switch (X->Def.iCalcModel) {
780 
781  case HubbardGC:
782  case KondoGC:
783  case Hubbard:
784  case Kondo:
785 
786  is1_up = X->Def.Tpow[2 * isite1 - 2];
787  is1_down = X->Def.Tpow[2 * isite1 - 1];
788  is = is1_up + is1_down;
789  ibit = (long int)myrank & is;
790  if (ibit == is) {
791 #pragma omp parallel for default(none) shared(list_Diagonal) \
792  firstprivate(i_max, dtmp_V) private(j)
793  for (j = 1; j <= i_max; j++) list_Diagonal[j] += dtmp_V;
794  }
795 
796  break; /*case HubbardGC, KondoGC, Hubbard, Kondo:*/
797 
798  case Spin:
799  case SpinGC:
800  /*
801  They do not have the Coulomb term
802  */
803  break;
804 
805  default:
806  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
807  return -1;
808  //break;
809 
810  }/*switch (X->Def.iCalcModel)*/
811 
812  return 0;
813 
814  }/*if (isite1 >= X->Def.Nsite*/
815  else {
816  switch (X->Def.iCalcModel) {
817  case HubbardGC:
818  is1_up = X->Def.Tpow[2 * isite1 - 2];
819  is1_down = X->Def.Tpow[2 * isite1 - 1];
820  is = is1_up + is1_down;
821 #pragma omp parallel for default(none) shared(list_Diagonal, list_1) firstprivate(i_max, is, dtmp_V) private(ibit)
822  for (j = 1; j <= i_max; j++) {
823  ibit = (j - 1)&is;
824  if (ibit == is) {
825  list_Diagonal[j] += dtmp_V;
826  }
827  }
828 
829  break;
830  case KondoGC:
831  case Hubbard:
832  case Kondo:
833  is1_up = X->Def.Tpow[2 * isite1 - 2];
834  is1_down = X->Def.Tpow[2 * isite1 - 1];
835  is = is1_up + is1_down;
836 #pragma omp parallel for default(none) shared(list_Diagonal, list_1) firstprivate(i_max, is, dtmp_V) private(ibit)
837  for (j = 1; j <= i_max; j++) {
838  ibit = list_1[j] & is;
839  if (ibit == is) {
840  list_Diagonal[j] += dtmp_V;
841  }
842  }
843  break;
844 
845  case Spin:
846  case SpinGC:
847  break;
848 
849  default:
850  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
851  return -1;
852  //break;
853  }
854  }
855  return 0;
856 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
double * list_Diagonal
Definition: global.cpp:24
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
long int * list_1
Definition: global.cpp:25
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305

◆ SetDiagonalHund()

int SetDiagonalHund ( long int  isite1,
long int  isite2,
double  dtmp_V,
struct BindStruct X 
)

Calculate the components for Hund interaction, \( H_{ij}(n_ {i\uparrow}n_{j\uparrow}+ n_ {i\downarrow}n_{j\downarrow})\).

Parameters
isite1[in] a site number \(i \)
isite2[in] a site number \(j \)
dtmp_V[in] A value of Hund interaction \( H_{ij} \)
X[in] Define list to get the operator information.
Return values
-1fail to calculate the diagonal component.
0succeed to calculate the diagonal component.
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 1278 of file diagonalcalc.cpp.

References BindStruct::Check, BindStruct::Def, DefineList::iCalcModel, CheckList::idim_max, list_1, list_Diagonal, myrank, DefineList::Nsite, SetDiagonalInterAll(), stdoutMPI, and DefineList::Tpow.

Referenced by diagonalcalc(), and SetDiagonalCoulombInter().

1283  {
1284 
1285  long int is1_up, is1_down;
1286  long int ibit1_up, ibit1_down;
1287  long int num1_up, num1_down;
1288  long int is2_up, is2_down;
1289  long int ibit2_up, ibit2_down;
1290  long int num2_up, num2_down;
1291 
1292  long int is_up;
1293  long int ibit;
1294  long int j;
1295  long int i_max = X->Check.idim_max;
1296  /*
1297  Force isite1 <= isite2
1298  */
1299  if (isite2 < isite1) {
1300  j = isite2;
1301  isite2 = isite1;
1302  isite1 = j;
1303  }
1304  /*
1305  When isite1 & site2 are in the inter process region
1306  */
1307  if (/*isite2 >= */ isite1 > X->Def.Nsite) {
1308 
1309  switch (X->Def.iCalcModel) {
1310 
1311  case HubbardGC:
1312  case KondoGC:
1313  case Hubbard:
1314  case Kondo:
1315 
1316  is1_up = X->Def.Tpow[2 * isite1 - 2];
1317  is1_down = X->Def.Tpow[2 * isite1 - 1];
1318  is2_up = X->Def.Tpow[2 * isite2 - 2];
1319  is2_down = X->Def.Tpow[2 * isite2 - 1];
1320 
1321  num1_up = 0;
1322  num1_down = 0;
1323  num2_up = 0;
1324  num2_down = 0;
1325 
1326  ibit1_up = (long int)myrank &is1_up;
1327  num1_up = ibit1_up / is1_up;
1328  ibit1_down = (long int)myrank &is1_down;
1329  num1_down = ibit1_down / is1_down;
1330 
1331  ibit2_up = (long int)myrank &is2_up;
1332  num2_up = ibit2_up / is2_up;
1333  ibit2_down = (long int)myrank &is2_down;
1334  num2_down = ibit2_down / is2_down;
1335 
1336 #pragma omp parallel for default(none) shared(list_Diagonal) \
1337  firstprivate(i_max, dtmp_V, num1_up, num1_down, num2_up, num2_down) private(j)
1338  for (j = 1; j <= i_max; j++)
1339  list_Diagonal[j] += dtmp_V * (num1_up*num2_up + num1_down * num2_down);
1340 
1341  break;/*case HubbardGC, KondoGC, Hubbard, Kondo:*/
1342 
1343  case SpinGC:
1344  case Spin:
1345 
1346  is1_up = X->Def.Tpow[isite1 - 1];
1347  is2_up = X->Def.Tpow[isite2 - 1];
1348  is_up = is1_up + is2_up;
1349  ibit = (long int)myrank & is_up;
1350  if (ibit == 0 || ibit == is_up) {
1351 #pragma omp parallel for default(none) shared(list_Diagonal) \
1352 firstprivate(i_max, dtmp_V) private(j)
1353  for (j = 1; j <= i_max; j++) list_Diagonal[j] += dtmp_V;
1354  }
1355  break;/*case SpinGC, Spin:*/
1356 
1357  default:
1358  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1359  return -1;
1360  }
1361 
1362  return 0;
1363 
1364  }/*if (isite1 > X->Def.Nsite)*/
1365  else if (isite2 > X->Def.Nsite /* >= isite1 */) {
1366 
1367  switch (X->Def.iCalcModel) {
1368 
1369  case HubbardGC:
1370 
1371  is1_up = X->Def.Tpow[2 * isite1 - 2];
1372  is1_down = X->Def.Tpow[2 * isite1 - 1];
1373  is2_up = X->Def.Tpow[2 * isite2 - 2];
1374  is2_down = X->Def.Tpow[2 * isite2 - 1];
1375 
1376  num2_up = 0;
1377  num2_down = 0;
1378 
1379  ibit2_up = (long int)myrank &is2_up;
1380  num2_up = ibit2_up / is2_up;
1381  ibit2_down = (long int)myrank &is2_down;
1382  num2_down = ibit2_down / is2_down;
1383 
1384 #pragma omp parallel for default(none) shared( list_Diagonal) \
1385 firstprivate(i_max, dtmp_V, num2_up, num2_down, is1_up, is1_down) \
1386 private(num1_up, num1_down, ibit1_up, ibit1_down, j)
1387  for (j = 1; j <= i_max; j++) {
1388  num1_up = 0;
1389  num1_down = 0;
1390 
1391  ibit1_up = (j - 1)&is1_up;
1392  num1_up = ibit1_up / is1_up;
1393  ibit1_down = (j - 1)&is1_down;
1394  num1_down = ibit1_down / is1_down;
1395 
1396  list_Diagonal[j] += dtmp_V * (num1_up*num2_up + num1_down * num2_down);
1397  }
1398  break;/*case HubbardGC:*/
1399 
1400  case KondoGC:
1401  case Hubbard:
1402  case Kondo:
1403 
1404  is1_up = X->Def.Tpow[2 * isite1 - 2];
1405  is1_down = X->Def.Tpow[2 * isite1 - 1];
1406  is2_up = X->Def.Tpow[2 * isite2 - 2];
1407  is2_down = X->Def.Tpow[2 * isite2 - 1];
1408 
1409  num2_up = 0;
1410  num2_down = 0;
1411 
1412  ibit2_up = (long int)myrank&is2_up;
1413  num2_up = ibit2_up / is2_up;
1414  ibit2_down = (long int)myrank&is2_down;
1415  num2_down = ibit2_down / is2_down;
1416 
1417 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) \
1418 firstprivate(i_max, dtmp_V, num2_up, num2_down, is1_up, is1_down) \
1419 private(num1_up, num1_down, ibit1_up, ibit1_down, j)
1420  for (j = 1; j <= i_max; j++) {
1421  num1_up = 0;
1422  num1_down = 0;
1423 
1424  ibit1_up = list_1[j] & is1_up;
1425  num1_up = ibit1_up / is1_up;
1426  ibit1_down = list_1[j] & is1_down;
1427  num1_down = ibit1_down / is1_down;
1428 
1429  list_Diagonal[j] += dtmp_V * (num1_up*num2_up + num1_down * num2_down);
1430  }
1431  break;/*case KondoGC, Hubbard, Kondo:*/
1432 
1433  case SpinGC:
1434  is1_up = X->Def.Tpow[isite1 - 1];
1435  is2_up = X->Def.Tpow[isite2 - 1];
1436  ibit2_up = (long int)myrank & is2_up;
1437 
1438  if (ibit2_up == is2_up) {
1439 #pragma omp parallel for default(none) shared(list_Diagonal) \
1440 firstprivate(i_max, dtmp_V, is1_up) private(j, ibit1_up)
1441  for (j = 1; j <= i_max; j++) {
1442  ibit1_up = (j - 1) & is1_up;
1443  if (ibit1_up == is1_up) {
1444  list_Diagonal[j] += dtmp_V;
1445  }
1446  }
1447  }
1448  else if (ibit2_up == 0) {
1449 #pragma omp parallel for default(none) shared(list_Diagonal) \
1450 firstprivate(i_max, dtmp_V, is1_up) private(j, ibit1_up)
1451  for (j = 1; j <= i_max; j++) {
1452  ibit1_up = (j - 1) & is1_up;
1453  if (ibit1_up == 0) {
1454  list_Diagonal[j] += dtmp_V;
1455  }
1456  }
1457  }
1458  break;/*case SpinGC:*/
1459 
1460  case Spin:
1461  is1_up = X->Def.Tpow[isite1 - 1];
1462  is2_up = X->Def.Tpow[isite2 - 1];
1463  ibit2_up = (long int)myrank & is2_up;
1464 
1465  if (ibit2_up == is2_up) {
1466 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) \
1467 firstprivate(i_max, dtmp_V, is1_up) private(j, ibit1_up)
1468  for (j = 1; j <= i_max; j++) {
1469  ibit1_up = list_1[j] & is1_up;
1470  if (ibit1_up == is1_up) {
1471  list_Diagonal[j] += dtmp_V;
1472  }
1473  }
1474  }
1475  else if (ibit2_up == 0) {
1476 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) \
1477 firstprivate(i_max, dtmp_V, is1_up) private(j, ibit1_up)
1478  for (j = 1; j <= i_max; j++) {
1479  ibit1_up = list_1[j] & is1_up;
1480  if (ibit1_up == 0) {
1481  list_Diagonal[j] += dtmp_V;
1482  }
1483  }
1484  }
1485  break;/*case Spin:*/
1486 
1487  default:
1488  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1489  return -1;
1490 
1491  }/*switch (X->Def.iCalcModel)*/
1492 
1493  return 0;
1494 
1495  }/*else if (isite2 > X->Def.Nsite)*/
1496  else {
1497  switch (X->Def.iCalcModel) {
1498  case HubbardGC: // list_1[j] -> j-1
1499  is1_up = X->Def.Tpow[2 * isite1 - 2];
1500  is1_down = X->Def.Tpow[2 * isite1 - 1];
1501  is2_up = X->Def.Tpow[2 * isite2 - 2];
1502  is2_down = X->Def.Tpow[2 * isite2 - 1];
1503 
1504 #pragma omp parallel for default(none) shared( list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, is1_down, is2_up, is2_down) private(num1_up, num1_down, num2_up, num2_down, ibit1_up, ibit1_down, ibit2_up, ibit2_down)
1505  for (j = 1; j <= i_max; j++) {
1506  num1_up = 0;
1507  num1_down = 0;
1508  num2_up = 0;
1509  num2_down = 0;
1510 
1511  ibit1_up = (j - 1)&is1_up;
1512  num1_up = ibit1_up / is1_up;
1513  ibit1_down = (j - 1)&is1_down;
1514  num1_down = ibit1_down / is1_down;
1515 
1516  ibit2_up = (j - 1)&is2_up;
1517  num2_up = ibit2_up / is2_up;
1518  ibit2_down = (j - 1)&is2_down;
1519  num2_down = ibit2_down / is2_down;
1520 
1521  list_Diagonal[j] += dtmp_V * (num1_up*num2_up + num1_down * num2_down);
1522  }
1523  break;
1524  case KondoGC:
1525  case Hubbard:
1526  case Kondo:
1527  is1_up = X->Def.Tpow[2 * isite1 - 2];
1528  is1_down = X->Def.Tpow[2 * isite1 - 1];
1529  is2_up = X->Def.Tpow[2 * isite2 - 2];
1530  is2_down = X->Def.Tpow[2 * isite2 - 1];
1531 
1532 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, is1_down, is2_up, is2_down) private(num1_up, num1_down, num2_up, num2_down, ibit1_up, ibit1_down, ibit2_up, ibit2_down)
1533  for (j = 1; j <= i_max; j++) {
1534  num1_up = 0;
1535  num1_down = 0;
1536  num2_up = 0;
1537  num2_down = 0;
1538 
1539  ibit1_up = list_1[j] & is1_up;
1540  num1_up = ibit1_up / is1_up;
1541  ibit1_down = list_1[j] & is1_down;
1542  num1_down = ibit1_down / is1_down;
1543 
1544  ibit2_up = list_1[j] & is2_up;
1545  num2_up = ibit2_up / is2_up;
1546  ibit2_down = list_1[j] & is2_down;
1547  num2_down = ibit2_down / is2_down;
1548 
1549  list_Diagonal[j] += dtmp_V * (num1_up*num2_up + num1_down * num2_down);
1550  }
1551  break;
1552 
1553  case SpinGC:
1554  is1_up = X->Def.Tpow[isite1 - 1];
1555  is2_up = X->Def.Tpow[isite2 - 1];
1556  is_up = is1_up + is2_up;
1557 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, is2_up, is_up) private(j, ibit)
1558  for (j = 1; j <= i_max; j++) {
1559  ibit = (j - 1) & is_up;
1560  if (ibit == 0 || ibit == is_up) {
1561  list_Diagonal[j] += dtmp_V;
1562  }
1563  }
1564  break;
1565 
1566  case Spin:
1567  is1_up = X->Def.Tpow[isite1 - 1];
1568  is2_up = X->Def.Tpow[isite2 - 1];
1569  is_up = is1_up + is2_up;
1570 #pragma omp parallel for default(none) shared(list_1, list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, is2_up, is_up) private(j, ibit)
1571  for (j = 1; j <= i_max; j++) {
1572  ibit = list_1[j] & is_up;
1573  if (ibit == 0 || ibit == is_up) {
1574  list_Diagonal[j] += dtmp_V;
1575  }
1576  }
1577  break;
1578  default:
1579  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1580  return -1;
1581  }
1582  }
1583  return 0;
1584 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
double * list_Diagonal
Definition: global.cpp:24
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
long int * list_1
Definition: global.cpp:25
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305

◆ SetDiagonalInterAll()

int SetDiagonalInterAll ( long int  isite1,
long int  isite2,
long int  isigma1,
long int  isigma2,
double  dtmp_V,
struct BindStruct X 
)

Calculate the components for general two-body diagonal interaction, \( H_{i\sigma_1 j\sigma_2} n_ {i\sigma_1}n_{j\sigma_2}\).

Parameters
isite1[in] a site number \(i \)
isite2[in] a site number \(j \)
isigma1[in] a spin index at \(i \) site.
isigma2[in] a spin index at \(j \) site.
dtmp_V[in] A value of general two-body diagonal interaction \( H_{i\sigma_1 j\sigma_2} \)
X[in] Define list to get the operator information.
Return values
-1fail to calculate the diagonal component.
0succeed to calculate the diagonal component.
Version
0.1
Author
Takahiro Misawa (The University of Tokyo)
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 1601 of file diagonalcalc.cpp.

References BitCheckGeneral(), BindStruct::Check, BindStruct::Def, diagonalcalc(), DefineList::iCalcModel, CheckList::idim_max, DefineList::iFlgGeneralSpin, list_1, list_Diagonal, myrank, DefineList::Nsite, DefineList::SiteToBit, stdoutMPI, DefineList::Tpow, X_Spin_CisAis(), and X_SpinGC_CisAis().

Referenced by diagonalcalc(), and SetDiagonalHund().

1609 {
1610  long int is1_spin;
1611  long int is2_spin;
1612  long int is1_up;
1613  long int is2_up;
1614 
1615  long int ibit1_spin;
1616  long int ibit2_spin;
1617 
1618  long int num1;
1619  long int num2;
1620 
1621  long int j;
1622  long int i_max = X->Check.idim_max;
1623 
1624  /*
1625  Forse isite1 <= isite2
1626  */
1627  if (isite2 < isite1) {
1628  j = isite2;
1629  isite2 = isite1;
1630  isite1 = j;
1631  j = isigma2;
1632  isigma2 = isigma1;
1633  isigma1 = j;
1634  }
1635  /*
1636  When isite1 & site2 are in the inter process regino
1637  */
1638  if (isite1 > X->Def.Nsite) {
1639 
1640  switch (X->Def.iCalcModel) {
1641 
1642  case HubbardGC:
1643  case KondoGC:
1644  case Hubbard:
1645  case Kondo:
1646 
1647  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
1648  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
1649 
1650  num1 = 0;
1651  ibit1_spin = (long int)myrank&is1_spin;
1652  num1 += ibit1_spin / is1_spin;
1653 
1654  num2 = 0;
1655  ibit2_spin = (long int)myrank&is2_spin;
1656  num2 += ibit2_spin / is2_spin;
1657 
1658 #pragma omp parallel for default(none) shared(list_Diagonal) \
1659 firstprivate(i_max, dtmp_V, num2, num1) private(ibit1_spin, j)
1660  for (j = 1; j <= i_max; j++) list_Diagonal[j] += num1 * num2*dtmp_V;
1661 
1662  break;/*case HubbardGC, KondoGC, Hubbard, Kondo:*/
1663 
1664  case SpinGC:
1665  case Spin:
1666 
1667  if (X->Def.iFlgGeneralSpin == FALSE) {
1668  is1_up = X->Def.Tpow[isite1 - 1];
1669  is2_up = X->Def.Tpow[isite2 - 1];
1670  num1 = X_SpinGC_CisAis((long int)myrank + 1, is1_up, isigma1);
1671  num2 = X_SpinGC_CisAis((long int)myrank + 1, is2_up, isigma2);
1672 
1673 #pragma omp parallel for default(none) shared(list_Diagonal) \
1674 firstprivate(i_max, dtmp_V, is1_up, isigma1, X, num1, num2) private(j)
1675  for (j = 1; j <= i_max; j++) {
1676  list_Diagonal[j] += num1 * num2*dtmp_V;
1677  }
1678  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
1679  else {//start:generalspin
1680  num1 = BitCheckGeneral((long int)myrank, isite1, isigma1,
1681  X->Def.SiteToBit, X->Def.Tpow);
1682  num2 = BitCheckGeneral((long int)myrank, isite2, isigma2,
1683  X->Def.SiteToBit, X->Def.Tpow);
1684  if (num1 != 0 && num2 != 0) {
1685 #pragma omp parallel for default(none) shared(list_Diagonal) \
1686 firstprivate(i_max, dtmp_V, num1, X) private(j)
1687  for (j = 1; j <= i_max; j++) list_Diagonal[j] += dtmp_V * num1;
1688  }
1689  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
1690 
1691  break;/*case SpinGC, Spin:*/
1692 
1693  default:
1694  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1695  return -1;
1696 
1697  }/*if (isite1 > X->Def.Nsite)*/
1698 
1699  return 0;
1700 
1701  }/*if (isite1 > X->Def.Nsite)*/
1702  else if (isite2 > X->Def.Nsite) {
1703 
1704  switch (X->Def.iCalcModel) {
1705 
1706  case HubbardGC:
1707 
1708  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
1709  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
1710 
1711  num2 = 0;
1712  ibit2_spin = (long int)myrank&is2_spin;
1713  num2 += ibit2_spin / is2_spin;
1714 
1715 #pragma omp parallel for default(none) shared(list_Diagonal) \
1716 firstprivate(i_max, dtmp_V, is1_spin, num2) private(num1, ibit1_spin, j)
1717  for (j = 1; j <= i_max; j++) {
1718  num1 = 0;
1719  ibit1_spin = (j - 1)&is1_spin;
1720  num1 += ibit1_spin / is1_spin;
1721  list_Diagonal[j] += num1 * num2*dtmp_V;
1722  }
1723  break;/*case HubbardGC:*/
1724 
1725  case KondoGC:
1726  case Hubbard:
1727  case Kondo:
1728 
1729  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
1730  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
1731 
1732  num2 = 0;
1733  ibit2_spin = (long int)myrank&is2_spin;
1734  num2 += ibit2_spin / is2_spin;
1735 
1736 #pragma omp parallel for default(none) shared(list_Diagonal, list_1) \
1737 firstprivate(i_max, dtmp_V, is1_spin, num2) private(num1, ibit1_spin, j)
1738  for (j = 1; j <= i_max; j++) {
1739  num1 = 0;
1740  ibit1_spin = list_1[j] & is1_spin;
1741  num1 += ibit1_spin / is1_spin;
1742  list_Diagonal[j] += num1 * num2*dtmp_V;
1743  }
1744  break;/*case KondoGC, Hubbard, Kondo:*/
1745 
1746  case SpinGC:
1747 
1748  if (X->Def.iFlgGeneralSpin == FALSE) {
1749  is1_up = X->Def.Tpow[isite1 - 1];
1750  is2_up = X->Def.Tpow[isite2 - 1];
1751  num2 = X_SpinGC_CisAis((long int)myrank + 1, is2_up, isigma2);
1752 
1753 #pragma omp parallel for default(none) shared(list_Diagonal) \
1754 firstprivate(i_max, dtmp_V, is1_up, isigma1, X, num2) private(j, num1)
1755  for (j = 1; j <= i_max; j++) {
1756  num1 = X_SpinGC_CisAis(j, is1_up, isigma1);
1757  list_Diagonal[j] += num1 * num2*dtmp_V;
1758  }
1759  }/* if (X->Def.iFlgGeneralSpin == FALSE)*/
1760  else {//start:generalspin
1761  num2 = BitCheckGeneral((long int)myrank, isite2, isigma2,
1762  X->Def.SiteToBit, X->Def.Tpow);
1763  if (num2 != 0) {
1764 #pragma omp parallel for default(none) shared(list_Diagonal) \
1765 firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
1766  for (j = 1; j <= i_max; j++) {
1767  num1 = BitCheckGeneral(j - 1, isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
1768  list_Diagonal[j] += dtmp_V * num1;
1769  }
1770  }
1771  }/* if (X->Def.iFlgGeneralSpin == TRUE)*/
1772 
1773  break;/*case SpinGC:*/
1774 
1775  case Spin:
1776 
1777  if (X->Def.iFlgGeneralSpin == FALSE) {
1778  is1_up = X->Def.Tpow[isite1 - 1];
1779  is2_up = X->Def.Tpow[isite2 - 1];
1780  num2 = X_SpinGC_CisAis((long int)myrank + 1, is2_up, isigma2);
1781 
1782 #pragma omp parallel for default(none) shared(list_Diagonal) \
1783 firstprivate(i_max, dtmp_V, is1_up, isigma1, X, num2) private(j, num1)
1784  for (j = 1; j <= i_max; j++) {
1785  num1 = X_Spin_CisAis(j, is1_up, isigma1);
1786  list_Diagonal[j] += num1 * num2*dtmp_V;
1787  }
1788  }/* if (X->Def.iFlgGeneralSpin == FALSE)*/
1789  else /* if (X->Def.iFlgGeneralSpin == TRUE)*/ {
1790  num2 = BitCheckGeneral((long int)myrank, isite2, isigma2, \
1791  X->Def.SiteToBit, X->Def.Tpow);
1792  if (num2 != 0) {
1793 #pragma omp parallel for default(none) shared(list_Diagonal, list_1) \
1794 firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
1795  for (j = 1; j <= i_max; j++) {
1796  num1 = BitCheckGeneral(list_1[j], isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
1797  list_Diagonal[j] += dtmp_V * num1;
1798  }
1799  }
1800  } /* if (X->Def.iFlgGeneralSpin == TRUE)*/
1801 
1802  break;/*case Spin:*/
1803 
1804  default:
1805  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1806  return -1;
1807 
1808  }/*switch (X->Def.iCalcModel)*/
1809 
1810  return 0;
1811 
1812  }/*else if (isite2 > X->Def.Nsite)*/
1813 
1814  switch (X->Def.iCalcModel) {
1815  case HubbardGC: //list_1[j] -> j-1
1816  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
1817  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
1818 #pragma omp parallel for default(none) shared(list_Diagonal) firstprivate(i_max, dtmp_V, is1_spin, is2_spin) private(num1, ibit1_spin, num2, ibit2_spin)
1819  for (j = 1; j <= i_max; j++) {
1820  num1 = 0;
1821  num2 = 0;
1822  ibit1_spin = (j - 1)&is1_spin;
1823  num1 += ibit1_spin / is1_spin;
1824  ibit2_spin = (j - 1)&is2_spin;
1825  num2 += ibit2_spin / is2_spin;
1826  list_Diagonal[j] += num1 * num2*dtmp_V;
1827  }
1828  break;
1829  case KondoGC:
1830  case Hubbard:
1831  case Kondo:
1832  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
1833  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
1834 
1835 #pragma omp parallel for default(none) shared(list_Diagonal, list_1) firstprivate(i_max, dtmp_V, is1_spin, is2_spin) private(num1, ibit1_spin, num2, ibit2_spin)
1836  for (j = 1; j <= i_max; j++) {
1837  num1 = 0;
1838  num2 = 0;
1839  ibit1_spin = list_1[j] & is1_spin;
1840  num1 += ibit1_spin / is1_spin;
1841 
1842  ibit2_spin = list_1[j] & is2_spin;
1843  num2 += ibit2_spin / is2_spin;
1844  list_Diagonal[j] += num1 * num2*dtmp_V;
1845  }
1846  break;
1847 
1848  case Spin:
1849  if (X->Def.iFlgGeneralSpin == FALSE) {
1850  is1_up = X->Def.Tpow[isite1 - 1];
1851  is2_up = X->Def.Tpow[isite2 - 1];
1852 #pragma omp parallel for default(none) shared(list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, is2_up, isigma1, isigma2, X) private(j, num1, num2)
1853  for (j = 1; j <= i_max; j++) {
1854  num1 = X_Spin_CisAis(j, is1_up, isigma1);
1855  num2 = X_Spin_CisAis(j, is2_up, isigma2);
1856  list_Diagonal[j] += num1 * num2*dtmp_V;
1857  }
1858  }
1859  else {
1860 #pragma omp parallel for default(none) shared(list_Diagonal, list_1) firstprivate(i_max, dtmp_V, isite1, isite2, isigma1, isigma2, X) private(j, num1)
1861  for (j = 1; j <= i_max; j++) {
1862  num1 = BitCheckGeneral(list_1[j], isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
1863  if (num1 != 0) {
1864  num1 = BitCheckGeneral(list_1[j], isite2, isigma2, X->Def.SiteToBit, X->Def.Tpow);
1865  list_Diagonal[j] += dtmp_V * num1;
1866  }
1867  }
1868 
1869  }
1870  break;
1871 
1872  case SpinGC:
1873  if (X->Def.iFlgGeneralSpin == FALSE) {
1874  is1_up = X->Def.Tpow[isite1 - 1];
1875  is2_up = X->Def.Tpow[isite2 - 1];
1876 #pragma omp parallel for default(none) shared(list_Diagonal) firstprivate(i_max, dtmp_V, is1_up, is2_up, isigma1, isigma2, X) private(j, num1, num2)
1877  for (j = 1; j <= i_max; j++) {
1878  num1 = X_SpinGC_CisAis(j, is1_up, isigma1);
1879  num2 = X_SpinGC_CisAis(j, is2_up, isigma2);
1880  list_Diagonal[j] += num1 * num2*dtmp_V;
1881  }
1882  }
1883  else {//start:generalspin
1884 #pragma omp parallel for default(none) shared(list_Diagonal) firstprivate(i_max, dtmp_V, isite1, isite2, isigma1, isigma2, X) private(j, num1)
1885  for (j = 1; j <= i_max; j++) {
1886  num1 = BitCheckGeneral(j - 1, isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
1887  if (num1 != 0) {
1888  num1 = BitCheckGeneral(j - 1, isite2, isigma2, X->Def.SiteToBit, X->Def.Tpow);
1889  list_Diagonal[j] += dtmp_V * num1;
1890  }
1891  }
1892  }
1893  break;
1894 
1895  default:
1896  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
1897  return -1;
1898  }
1899  return 0;
1900 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int BitCheckGeneral(const long int org_bit, const int org_isite, const int target_ispin, const long int *SiteToBit, const long int *Tpow)
bit check function for general spin
Definition: bitcalc.cpp:392
int X_Spin_CisAis(long int j, long int is1_spin, long int sigma1)
Compute the spin state with bit mask is1_spin.
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
int X_SpinGC_CisAis(long int j, long int is1_spin, long int sigma1)
Compute the grandcanonical spin state with bit mask is1_spin.
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.hpp:86
double * list_Diagonal
Definition: global.cpp:24
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.hpp:94
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
long int * list_1
Definition: global.cpp:25
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305

◆ SetDiagonalTEChemi()

int SetDiagonalTEChemi ( long int  isite1,
long int  spin,
double  dtmp_V,
struct BindStruct X,
std::complex< double > *  tmp_v0,
std::complex< double > *  tmp_v1 
)

Update the vector by the chemical potential \( \mu_{i \sigma_1} n_ {i \sigma_1} \)
generated by the commutation relation in terms of the general two-body interaction,
\( c_ {i \sigma_1} a_{j\sigma_2}c_ {j \sigma_2}a_ {i \sigma_1} = c_ {i \sigma_1}a_ {i \sigma_1}-c_ {i \sigma_1} a_ {i \sigma_1} c_ {j \sigma_2}a_{j\sigma_2}\) . (Using in Time Evolution mode).

Parameters
isite1[in] a site number
spin[in] a spin number
dtmp_V[in] A value of coulombintra interaction \( \mu_{i \sigma_1} \)
X[in] Define list to get dimension number
tmp_v0[in,out] Result vector
tmp_v1[in] Input produced vector
Return values
-1fail to calculate the diagonal component.
0succeed to calculate the diagonal component.
Version
2.1
Author
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 375 of file diagonalcalc.cpp.

References BitCheckGeneral(), BindStruct::Check, BindStruct::Def, DefineList::iCalcModel, CheckList::idim_max, DefineList::iFlgGeneralSpin, list_1, myrank, DefineList::Nsite, SetDiagonalTETransfer(), DefineList::SiteToBit, stdoutMPI, and DefineList::Tpow.

Referenced by diagonalcalcForTE().

382  {
383  long int is1_up;
384  long int num1;
385  long int isigma1 = spin;
386  long int is1, ibit1;
387 
388  long int j;
389  long int i_max = X->Check.idim_max;
390 
391  /*
392  When isite1 is in the inter process region
393  */
394  if (isite1 > X->Def.Nsite) {
395 
396  switch (X->Def.iCalcModel) {
397 
398  case HubbardGC:
399  case KondoGC:
400  case Hubbard:
401  case Kondo:
402 
403  if (spin == 0) {
404  is1 = X->Def.Tpow[2 * isite1 - 2];
405  }
406  else {
407  is1 = X->Def.Tpow[2 * isite1 - 1];
408  }
409  ibit1 = (long int)myrank & is1;
410  num1 = ibit1 / is1;
411  break;/*case HubbardGC, case KondoGC, Hubbard, Kondo:*/
412 
413  case SpinGC:
414  case Spin:
415 
416  if (X->Def.iFlgGeneralSpin == FALSE) {
417  is1_up = X->Def.Tpow[isite1 - 1];
418  num1 = (((long int)myrank& is1_up) / is1_up) ^ (1 - spin);
419  } /*if (X->Def.iFlgGeneralSpin == FALSE)*/
420  else /*if (X->Def.iFlgGeneralSpin == TRUE)*/ {
421  num1 = BitCheckGeneral((long int)myrank,
422  isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
423  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
424  break;/*case SpinGC, Spin:*/
425 
426  default:
427  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
428  return -1;
429 
430  } /*switch (X->Def.iCalcModel)*/
431  if (num1 != 0) {
432 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1) \
433 firstprivate(i_max, dtmp_V) private(j)
434  for (j = 1; j <= i_max; j++) {
435  tmp_v0[j] += dtmp_V * tmp_v1[j];
436  }
437  }/*if (num1 != 0)*/
438  return 0;
439 
440  }/*if (isite1 >= X->Def.Nsite*/
441 
442  switch (X->Def.iCalcModel) {
443  case HubbardGC:
444  if (spin == 0) {
445  is1 = X->Def.Tpow[2 * isite1 - 2];
446  }
447  else {
448  is1 = X->Def.Tpow[2 * isite1 - 1];
449  }
450 
451 #pragma omp parallel for default(none) \
452 shared(tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, is1) private(num1, ibit1)
453  for (j = 1; j <= i_max; j++) {
454  ibit1 = (j - 1)&is1;
455  num1 = ibit1 / is1;
456  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
457  }
458  break;
459  case KondoGC:
460  case Hubbard:
461  case Kondo:
462  if (spin == 0) {
463  is1 = X->Def.Tpow[2 * isite1 - 2];
464  }
465  else {
466  is1 = X->Def.Tpow[2 * isite1 - 1];
467  }
468 
469 #pragma omp parallel for default(none) \
470 shared(list_1, tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, is1) private(num1, ibit1)
471  for (j = 1; j <= i_max; j++) {
472  ibit1 = list_1[j] & is1;
473  num1 = ibit1 / is1;
474  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
475  }
476  break;
477 
478  case SpinGC:
479  if (X->Def.iFlgGeneralSpin == FALSE) {
480  is1_up = X->Def.Tpow[isite1 - 1];
481 #pragma omp parallel for default(none) \
482 shared(list_1, tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, is1_up, spin) private(num1)
483  for (j = 1; j <= i_max; j++) {
484  num1 = (((j - 1)& is1_up) / is1_up) ^ (1 - spin);
485  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
486  }
487  }
488  else {
489 #pragma omp parallel for default(none) \
490 shared(tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
491  for (j = 1; j <= i_max; j++) {
492  num1 = BitCheckGeneral(j - 1, isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
493  if (num1 != 0) {
494  tmp_v0[j] += dtmp_V * tmp_v1[j];
495  }
496  }
497  }
498  break;
499 
500  case Spin:
501  if (X->Def.iFlgGeneralSpin == FALSE) {
502  is1_up = X->Def.Tpow[isite1 - 1];
503 #pragma omp parallel for default(none) \
504 shared(list_1, tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, is1_up, spin) private(num1)
505  for (j = 1; j <= i_max; j++) {
506  num1 = ((list_1[j] & is1_up) / is1_up) ^ (1 - spin);
507  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
508  }
509  }
510  else {
511 #pragma omp parallel for default(none) \
512 shared(tmp_v0, tmp_v1, list_1) firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
513  for (j = 1; j <= i_max; j++) {
514  num1 = BitCheckGeneral(list_1[j], isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
515  if (num1 != 0) {
516  tmp_v0[j] += dtmp_V * tmp_v1[j];
517 
518  }
519  }
520  }
521 
522  break;
523  default:
524  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
525  return -1;
526  }
527  return 0;
528 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int BitCheckGeneral(const long int org_bit, const int org_isite, const int target_ispin, const long int *SiteToBit, const long int *Tpow)
bit check function for general spin
Definition: bitcalc.cpp:392
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.hpp:86
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.hpp:94
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
long int * list_1
Definition: global.cpp:25
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305

◆ SetDiagonalTEInterAll()

int SetDiagonalTEInterAll ( long int  isite1,
long int  isite2,
long int  isigma1,
long int  isigma2,
double  dtmp_V,
struct BindStruct X,
std::complex< double > *  tmp_v0,
std::complex< double > *  tmp_v1 
)

Update the vector by the general two-body diagonal interaction, \( H_{i\sigma_1 j\sigma_2} n_ {i\sigma_1}n_{j\sigma_2}\).
(Using in Time Evolution mode).

Parameters
isite1[in] a site number \(i \)
isite2[in] a site number \(j \)
isigma1[in] a spin index at \(i \) site.
isigma2[in] a spin index at \(j \) site.
dtmp_V[in] A value of general two-body diagonal interaction \( H_{i\sigma_1 j\sigma_2} \)
X[in] Define list to get the operator information.
tmp_v0[in,out] Result vector
tmp_v1[in] Input produced vector
Return values
-1fail to calculate the diagonal component.
0succeed to calculate the diagonal component.
Version
2.1
Author
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 58 of file diagonalcalc.cpp.

References BitCheckGeneral(), BindStruct::Check, BindStruct::Def, DefineList::iCalcModel, CheckList::idim_max, DefineList::iFlgGeneralSpin, list_1, myrank, DefineList::Nsite, DefineList::SiteToBit, stdoutMPI, DefineList::Tpow, X_Spin_CisAis(), and X_SpinGC_CisAis().

Referenced by diagonalcalcForTE().

67  {
68  long int is1_spin;
69  long int is2_spin;
70  long int is1_up;
71  long int is2_up;
72 
73  long int ibit1_spin;
74  long int ibit2_spin;
75 
76  long int num1;
77  long int num2;
78 
79  long int j;
80  long int i_max = X->Check.idim_max;
81  /*
82  Forse isite1 <= isite2
83  */
84  if (isite2 < isite1) {
85  j = isite2;
86  isite2 = isite1;
87  isite1 = j;
88  j = isigma2;
89  isigma2 = isigma1;
90  isigma1 = j;
91  }
92  /*
93  When isite1 & site2 are in the inter process regino
94  */
95  if (isite1 > X->Def.Nsite) {
96 
97  switch (X->Def.iCalcModel) {
98 
99  case HubbardGC:
100  case KondoGC:
101  case Hubbard:
102  case Kondo:
103  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
104  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
105  num1 = 0;
106  ibit1_spin = (long int)myrank&is1_spin;
107  num1 += ibit1_spin / is1_spin;
108  num2 = 0;
109  ibit2_spin = (long int)myrank&is2_spin;
110  num2 += ibit2_spin / is2_spin;
111  break;/*case HubbardGC, KondoGC, Hubbard, Kondo:*/
112 
113  case SpinGC:
114  case Spin:
115  if (X->Def.iFlgGeneralSpin == FALSE) {
116  is1_up = X->Def.Tpow[isite1 - 1];
117  is2_up = X->Def.Tpow[isite2 - 1];
118  num1 = X_SpinGC_CisAis((long int) myrank + 1, is1_up, isigma1);
119  num2 = X_SpinGC_CisAis((long int) myrank + 1, is2_up, isigma2);
120  }/*if (X->Def.iFlgGeneralSpin == FALSE)*/
121  else {//start:generalspin
122  num1 = BitCheckGeneral((long int) myrank, isite1, isigma1,
123  X->Def.SiteToBit, X->Def.Tpow);
124  num2 = BitCheckGeneral((long int) myrank, isite2, isigma2,
125  X->Def.SiteToBit, X->Def.Tpow);
126  }
127  break;/*case SpinGC, Spin:*/
128 
129  default:
130  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
131  return -1;
132  }/*if (isite1 > X->Def.Nsite)*/
133 
134  if (num1 * num2 != 0) {
135 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1) \
136 firstprivate(i_max, dtmp_V) private(j)
137  for (j = 1; j <= i_max; j++) {
138  tmp_v0[j] += dtmp_V * tmp_v1[j];
139  }
140  }
141  return 0;
142  }/*if (isite1 > X->Def.Nsite)*/
143  else if (isite2 > X->Def.Nsite) {
144 
145  switch (X->Def.iCalcModel) {
146 
147  case HubbardGC:
148 
149  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
150  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
151 
152  num2 = 0;
153  ibit2_spin = (long int)myrank&is2_spin;
154  num2 += ibit2_spin / is2_spin;
155  if (num2 != 0) {
156 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1)\
157  firstprivate(i_max, dtmp_V, is1_spin) private(num1, ibit1_spin, j)
158  for (j = 1; j <= i_max; j++) {
159  num1 = 0;
160  ibit1_spin = (j - 1) & is1_spin;
161  num1 += ibit1_spin / is1_spin;
162  tmp_v0[j] += dtmp_V * num1 * tmp_v1[j];
163  }
164  }
165  break;/*case HubbardGC:*/
166 
167  case KondoGC:
168  case Hubbard:
169  case Kondo:
170 
171  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
172  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
173 
174  num2 = 0;
175  ibit2_spin = (long int)myrank&is2_spin;
176  num2 += ibit2_spin / is2_spin;
177  if (num2 != 0) {
178 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1, list_1)\
179  firstprivate(i_max, dtmp_V, is1_spin) private(num1, ibit1_spin, j)
180  for (j = 1; j <= i_max; j++) {
181  num1 = 0;
182  ibit1_spin = list_1[j] & is1_spin;
183  num1 += ibit1_spin / is1_spin;
184  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
185  }
186  }
187  break;/*case KondoGC, Hubbard, Kondo:*/
188 
189  case SpinGC:
190 
191  if (X->Def.iFlgGeneralSpin == FALSE) {
192  is1_up = X->Def.Tpow[isite1 - 1];
193  is2_up = X->Def.Tpow[isite2 - 1];
194  num2 = X_SpinGC_CisAis((long int)myrank + 1, is2_up, isigma2);
195 
196  if (num2 != 0) {
197 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1)\
198  firstprivate(i_max, dtmp_V, is1_up, isigma1, X) private(num1, j)
199  for (j = 1; j <= i_max; j++) {
200  num1 = X_SpinGC_CisAis(j, is1_up, isigma1);
201  tmp_v0[j] += dtmp_V * num1 * tmp_v1[j];
202  }
203  }
204  }/* if (X->Def.iFlgGeneralSpin == FALSE)*/
205  else {//start:generalspin
206  num2 = BitCheckGeneral((long int)myrank, isite2, isigma2,
207  X->Def.SiteToBit, X->Def.Tpow);
208  if (num2 != 0) {
209 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1) \
210 firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
211  for (j = 1; j <= i_max; j++) {
212  num1 = BitCheckGeneral(j - 1, isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
213  tmp_v0[j] += dtmp_V * num1 * tmp_v1[j];
214  }
215  }
216  }/* if (X->Def.iFlgGeneralSpin == TRUE)*/
217 
218  break;/*case SpinGC:*/
219 
220  case Spin:
221 
222  if (X->Def.iFlgGeneralSpin == FALSE) {
223  is1_up = X->Def.Tpow[isite1 - 1];
224  is2_up = X->Def.Tpow[isite2 - 1];
225  num2 = X_SpinGC_CisAis((long int)myrank + 1, is2_up, isigma2);
226 
227  if (num2 != 0) {
228 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1) \
229 firstprivate(i_max, dtmp_V, is1_up, isigma1, X, num2) private(j, num1)
230  for (j = 1; j <= i_max; j++) {
231  num1 = X_Spin_CisAis(j, is1_up, isigma1);
232  tmp_v0[j] += dtmp_V * num1 * tmp_v1[j];
233  }
234  }
235  }/* if (X->Def.iFlgGeneralSpin == FALSE)*/
236  else /* if (X->Def.iFlgGeneralSpin == TRUE)*/ {
237  num2 = BitCheckGeneral((long int)myrank, isite2, isigma2, \
238  X->Def.SiteToBit, X->Def.Tpow);
239  if (num2 != 0) {
240 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1, list_1)\
241 firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
242  for (j = 1; j <= i_max; j++) {
243  num1 = BitCheckGeneral(list_1[j], isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
244  tmp_v0[j] += dtmp_V * num1 * tmp_v1[j];
245  }
246  }
247  } /* if (X->Def.iFlgGeneralSpin == TRUE)*/
248 
249  break;/*case Spin:*/
250 
251  default:
252  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
253  return -1;
254 
255  }/*switch (X->Def.iCalcModel)*/
256  return 0;
257  }/*else if (isite2 > X->Def.Nsite)*/
258 
259  switch (X->Def.iCalcModel) {
260  case HubbardGC: //list_1[j] -> j-1
261  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
262  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
263 #pragma omp parallel for default(none) \
264 shared(tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, is1_spin, is2_spin) \
265 private(num1, ibit1_spin, num2, ibit2_spin)
266  for (j = 1; j <= i_max; j++) {
267  num1 = 0;
268  num2 = 0;
269  ibit1_spin = (j - 1)&is1_spin;
270  num1 += ibit1_spin / is1_spin;
271  ibit2_spin = (j - 1)&is2_spin;
272  num2 += ibit2_spin / is2_spin;
273  tmp_v0[j] += dtmp_V * num1*num2*tmp_v1[j];
274  }
275  break;
276  case KondoGC:
277  case Hubbard:
278  case Kondo:
279  is1_spin = X->Def.Tpow[2 * isite1 - 2 + isigma1];
280  is2_spin = X->Def.Tpow[2 * isite2 - 2 + isigma2];
281 
282 #pragma omp parallel for default(none) \
283 shared(tmp_v0, tmp_v1, list_1) firstprivate(i_max, dtmp_V, is1_spin, is2_spin) \
284 private(num1, ibit1_spin, num2, ibit2_spin)
285  for (j = 1; j <= i_max; j++) {
286  num1 = 0;
287  num2 = 0;
288  ibit1_spin = list_1[j] & is1_spin;
289  num1 += ibit1_spin / is1_spin;
290 
291  ibit2_spin = list_1[j] & is2_spin;
292  num2 += ibit2_spin / is2_spin;
293  tmp_v0[j] += dtmp_V * num1*num2*tmp_v1[j];
294  }
295  break;
296 
297  case Spin:
298  if (X->Def.iFlgGeneralSpin == FALSE) {
299  is1_up = X->Def.Tpow[isite1 - 1];
300  is2_up = X->Def.Tpow[isite2 - 1];
301 #pragma omp parallel for default(none) \
302 shared(tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, is1_up, is2_up, isigma1, isigma2, X) \
303 private(j, num1, num2)
304  for (j = 1; j <= i_max; j++) {
305  num1 = X_Spin_CisAis(j, is1_up, isigma1);
306  num2 = X_Spin_CisAis(j, is2_up, isigma2);
307  tmp_v0[j] += dtmp_V * num1*num2*tmp_v1[j];
308  }
309  }
310  else {
311 #pragma omp parallel for default(none) \
312 shared(tmp_v0, tmp_v1, list_1) firstprivate(i_max, dtmp_V, isite1, isite2, isigma1, isigma2, X) \
313 private(j, num1)
314  for (j = 1; j <= i_max; j++) {
315  num1 = BitCheckGeneral(list_1[j], isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
316  if (num1 != 0) {
317  num1 = BitCheckGeneral(list_1[j], isite2, isigma2, X->Def.SiteToBit, X->Def.Tpow);
318  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
319  }
320  }
321 
322  }
323  break;
324 
325  case SpinGC:
326  if (X->Def.iFlgGeneralSpin == FALSE) {
327  is1_up = X->Def.Tpow[isite1 - 1];
328  is2_up = X->Def.Tpow[isite2 - 1];
329 #pragma omp parallel for default(none) \
330 shared(tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, is1_up, is2_up, isigma1, isigma2, X) \
331 private(j, num1, num2)
332  for (j = 1; j <= i_max; j++) {
333  num1 = X_SpinGC_CisAis(j, is1_up, isigma1);
334  num2 = X_SpinGC_CisAis(j, is2_up, isigma2);
335  tmp_v0[j] += dtmp_V * num1*num2*tmp_v1[j];
336  }
337  }
338  else {//start:generalspin
339 #pragma omp parallel for default(none) \
340 shared(tmp_v0, tmp_v1) firstprivate(i_max, dtmp_V, isite1, isite2, isigma1, isigma2, X) \
341 private(j, num1)
342  for (j = 1; j <= i_max; j++) {
343  num1 = BitCheckGeneral(j - 1, isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
344  if (num1 != 0) {
345  num1 = BitCheckGeneral(j - 1, isite2, isigma2, X->Def.SiteToBit, X->Def.Tpow);
346  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
347  }
348  }
349  }
350  break;
351 
352  default:
353  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
354  return -1;
355  }
356  return 0;
357 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int BitCheckGeneral(const long int org_bit, const int org_isite, const int target_ispin, const long int *SiteToBit, const long int *Tpow)
bit check function for general spin
Definition: bitcalc.cpp:392
int X_Spin_CisAis(long int j, long int is1_spin, long int sigma1)
Compute the spin state with bit mask is1_spin.
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
int X_SpinGC_CisAis(long int j, long int is1_spin, long int sigma1)
Compute the grandcanonical spin state with bit mask is1_spin.
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.hpp:86
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.hpp:94
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
long int * list_1
Definition: global.cpp:25
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305

◆ SetDiagonalTETransfer()

int SetDiagonalTETransfer ( long int  isite1,
double  dtmp_V,
long int  spin,
struct BindStruct X,
std::complex< double > *  tmp_v0,
std::complex< double > *  tmp_v1 
)

Update the vector by the general one-body diagonal interaction, \( \mu_{i\sigma_1} n_ {i\sigma_1}\).
(Using in Time Evolution mode).

Parameters
isite1[in] a site number \(i \)
dtmp_V[in] A value of general one-body diagonal interaction \( \mu_{i\sigma_1} \)
spin[in] a spin index at \(i \) site.
X[in] Define list to get the operator information.
tmp_v0[in,out] Result vector
tmp_v1[in] Input produced vector
Return values
-1fail to calculate the diagonal component.
0succeed to calculate the diagonal component.
Version
2.1
Author
Kazuyoshi Yoshimi (The University of Tokyo)

Definition at line 545 of file diagonalcalc.cpp.

References BitCheckGeneral(), BindStruct::Check, BindStruct::Def, diagonalcalcForTE(), DefineList::iCalcModel, CheckList::idim_max, DefineList::iFlgGeneralSpin, list_1, myrank, DefineList::Nsite, DefineList::SiteToBit, stdoutMPI, and DefineList::Tpow.

Referenced by diagonalcalcForTE(), and SetDiagonalTEChemi().

552  {
553  long int is1_up;
554  long int ibit1_up;
555  long int num1;
556  long int isigma1 = spin;
557  long int is1, ibit1;
558 
559  long int j;
560  long int i_max = X->Check.idim_max;
561 
562  /*
563  When isite1 is in the inter process region
564  */
565  if (isite1 > X->Def.Nsite) {
566 
567  switch (X->Def.iCalcModel) {
568 
569  case HubbardGC:
570  case KondoGC:
571  case Hubbard:
572  case Kondo:
573  if (spin == 0) {
574  is1 = X->Def.Tpow[2 * isite1 - 2];
575  }
576  else {
577  is1 = X->Def.Tpow[2 * isite1 - 1];
578  }
579  ibit1 = (long int)myrank & is1;
580  num1 = ibit1 / is1;
581  break;/*case HubbardGC, case KondoGC, Hubbard, Kondo:*/
582 
583  case SpinGC:
584  case Spin:
585  if (X->Def.iFlgGeneralSpin == FALSE) {
586  is1_up = X->Def.Tpow[isite1 - 1];
587  num1 = (((long int)myrank& is1_up) / is1_up) ^ (1 - spin);
588  } /*if (X->Def.iFlgGeneralSpin == FALSE)*/
589  else /*if (X->Def.iFlgGeneralSpin == TRUE)*/ {
590  num1 = BitCheckGeneral((long int)myrank,
591  isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
592  }/*if (X->Def.iFlgGeneralSpin == TRUE)*/
593  break;/*case SpinGC, Spin:*/
594 
595  default:
596  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
597  return -1;
598 
599  } /*switch (X->Def.iCalcModel)*/
600 
601  if (num1 != 0) {
602 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1)\
603  firstprivate(i_max, dtmp_V) private(j)
604  for (j = 1; j <= i_max; j++) {
605  tmp_v0[j] += dtmp_V * tmp_v1[j];
606  }
607  }
608  }/*if (isite1 >= X->Def.Nsite*/
609  else {//(isite1 < X->Def.Nsite)
610  switch (X->Def.iCalcModel) {
611  case HubbardGC:
612  if (spin == 0) {
613  is1 = X->Def.Tpow[2 * isite1 - 2];
614  }
615  else {
616  is1 = X->Def.Tpow[2 * isite1 - 1];
617  }
618 #pragma omp parallel for default(none) shared(list_1, tmp_v0, tmp_v1) \
619  firstprivate(i_max, dtmp_V, is1) private(num1, ibit1)
620  for (j = 1; j <= i_max; j++) {
621  ibit1 = (j - 1) & is1;
622  num1 = ibit1 / is1;
623  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
624  }
625  break;
626 
627  case KondoGC:
628  case Hubbard:
629  case Kondo:
630  if (spin == 0) {
631  is1 = X->Def.Tpow[2 * isite1 - 2];
632  }
633  else {
634  is1 = X->Def.Tpow[2 * isite1 - 1];
635  }
636 #pragma omp parallel for default(none) shared(list_1, tmp_v0, tmp_v1) \
637  firstprivate(i_max, dtmp_V, is1) private(num1, ibit1)
638  for (j = 1; j <= i_max; j++) {
639  ibit1 = list_1[j] & is1;
640  num1 = ibit1 / is1;
641  tmp_v0[j] += dtmp_V * num1*tmp_v1[j];
642  }
643  break;
644 
645  case SpinGC:
646  if (X->Def.iFlgGeneralSpin == FALSE) {
647  is1_up = X->Def.Tpow[isite1 - 1];
648 #pragma omp parallel for default(none) \
649 shared(list_1, tmp_v0, tmp_v1) \
650 firstprivate(i_max, dtmp_V, is1_up, spin) private(num1, ibit1_up)
651  for (j = 1; j <= i_max; j++) {
652  ibit1_up = (((j - 1) & is1_up) / is1_up) ^ (1 - spin);
653  tmp_v0[j] += dtmp_V * ibit1_up*tmp_v1[j];
654  }
655  }
656  else {
657 #pragma omp parallel for default(none) shared(tmp_v0, tmp_v1) \
658 firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
659  for (j = 1; j <= i_max; j++) {
660  num1 = BitCheckGeneral(j - 1, isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
661  if (num1 != 0) {
662  tmp_v0[j] += dtmp_V * tmp_v1[j];
663  }
664  }
665  }
666  break;
667 
668  case Spin:
669  if (X->Def.iFlgGeneralSpin == FALSE) {
670  is1_up = X->Def.Tpow[isite1 - 1];
671 #pragma omp parallel for default(none) shared(list_1, tmp_v0, tmp_v1)\
672  firstprivate(i_max, dtmp_V, is1_up, spin) private(num1, ibit1_up)
673  for (j = 1; j <= i_max; j++) {
674  ibit1_up = ((list_1[j] & is1_up) / is1_up) ^ (1 - spin);
675  tmp_v0[j] += dtmp_V * ibit1_up * tmp_v1[j];
676  }
677  }
678  else {
679 #pragma omp parallel for default(none) shared(list_1, tmp_v0, tmp_v1)\
680  firstprivate(i_max, dtmp_V, isite1, isigma1, X) private(j, num1)
681  for (j = 1; j <= i_max; j++) {
682  num1 = BitCheckGeneral(list_1[j], isite1, isigma1, X->Def.SiteToBit, X->Def.Tpow);
683  tmp_v0[j] += dtmp_V * num1 * tmp_v1[j];
684  }
685  }
686  break;
687 
688  default:
689  fprintf(stdoutMPI, "Error: CalcModel %d is incorrect.\n", X->Def.iCalcModel);
690  return -1;
691  }
692  }
693  return 0;
694 }
struct DefineList Def
Definision of system (Hamiltonian) etc.
Definition: struct.hpp:395
FILE * stdoutMPI
File pointer to the standard output defined in InitializeMPI()
Definition: global.cpp:75
int BitCheckGeneral(const long int org_bit, const int org_isite, const int target_ispin, const long int *SiteToBit, const long int *Tpow)
bit check function for general spin
Definition: bitcalc.cpp:392
int Nsite
Number of sites in the INTRA process region.
Definition: struct.hpp:56
int myrank
Process ID, defined in InitializeMPI()
Definition: global.cpp:73
int iFlgGeneralSpin
Flag for the general (Sz/=1/2) spin.
Definition: struct.hpp:86
long int * SiteToBit
[DefineList::NsiteMPI] Similar to DefineList::Tpow. For general spin.
Definition: struct.hpp:94
long int * Tpow
[2 * DefineList::NsiteMPI] malloc in setmem_def().
Definition: struct.hpp:90
int iCalcModel
Switch for model. 0:Hubbard, 1:Spin, 2:Kondo, 3:HubbardGC, 4:SpinGC, 5:KondoGC, 6:HubbardNConserved.
Definition: struct.hpp:200
struct CheckList Check
Size of the Hilbert space.
Definition: struct.hpp:396
long int * list_1
Definition: global.cpp:25
long int idim_max
The dimension of the Hilbert space of this process.
Definition: struct.hpp:305