diff --git a/examples/open/9AGNR-7AGNR/compute.py b/examples/open/9AGNR-7AGNR/compute.py new file mode 100644 index 00000000..b1d38384 --- /dev/null +++ b/examples/open/9AGNR-7AGNR/compute.py @@ -0,0 +1,79 @@ +import sisl +import numpy as np +import sys +import matplotlib.pyplot as plt +from hubbard import HubbardHamiltonian, density, NEGF, sp2, plot + +# Set U for the whole calculation +U = 3.0 +kT = 0.025 + +# Build AGNRs +AGNR7 = sisl.geom.agnr(7) +AGNR9 = sisl.geom.agnr(9) + +# Central region is a junction between the 7 and 9 AGNRs +Nrep = 5 +AGNR7_rep = AGNR7.tile(Nrep,axis=0).remove(np.arange(-7, 0)) +AGNR9_rep = AGNR9.tile(Nrep,axis=0).move(np.array([AGNR7_rep.xyz[-1,0]+1.42,-1.42*np.sin(np.pi/3), 0])) +junction = AGNR7_rep.add(AGNR9_rep) +junction.set_nsc([1,1,1]) +HC = sp2(junction) +HC.geometry.write('7-9-AGNR.xyz') +#HC.geometry.write('7-9-AGNR.xyz') +HC.set_nsc([1, 1, 1]) + +# and 3NN TB Hamiltonian +H_elec_7 = sp2(AGNR7, t1=2.7, t2=0.2, t3=0.18) +H_elec_9 = sp2(AGNR9, t1=2.7, t2=0.2, t3=0.18) + +# Hubbard Hamiltonian of elecs +MFH_elec_7 = HubbardHamiltonian(H_elec_7, U=U, nkpt=[102, 1, 1], kT=kT) +MFH_elec_9 = HubbardHamiltonian(H_elec_9, U=U, nkpt=[102, 1, 1], kT=kT) + +# Initialize spin densities +MFH_elec_7.set_polarization(range(int(7/2)), dn=range(-int(7/2), 0)) # Ensure we break symmetry +MFH_elec_9.set_polarization(range(int(9/2)), dn=range(-int(9/2), 0)) # Ensure we break symmetry + +# Converge Electrode Hamiltonians +dn = MFH_elec_7.converge(density.calc_n, mixer=sisl.mixing.PulayMixer(weight=.7, history=7), tol=1e-10) +dn = MFH_elec_9.converge(density.calc_n, mixer=sisl.mixing.PulayMixer(weight=.7, history=7), tol=1e-10) + +p = plot.SpinPolarization(MFH_elec_7, colorbar=True, vmax=0.2, vmin=-0.2) +p.annotate() +p.savefig('ELEC_AGNR7.pdf') + +p = plot.SpinPolarization(MFH_elec_9, colorbar=True, vmax=0.2, vmin=-0.2) +p.annotate() +p.savefig('ELEC_AGNR9.pdf') + +dist = sisl.get_distribution('fermi_dirac', smearing=kT) +Ef_7 = MFH_elec_7.fermi_level() +print("Electrode 7-AGNR Ef = ", Ef_7, ' eV') +# Shift each electrode with its Fermi-level and write it to netcdf file +MFH_elec_7.shift(-Ef_7) + +Ef_9 = MFH_elec_9.fermi_level() +print("Electrode 9-AGNR Ef = ", Ef_9, ' eV') +# Shift each electrode with its Fermi-level and write it to netcdf file +MFH_elec_9.shift(-Ef_9) + +# Map electrodes in the device region +elec_indx = [range(len(H_elec_7)), range(-len(H_elec_9), 0)] + +# MFH object +MFH_HC = HubbardHamiltonian(HC, U=U, kT=kT) +MFH_HC.set_polarization([59,62]) +print(MFH_HC.q) + +# First create NEGF object +negf = NEGF(MFH_HC, [(MFH_elec_7, '-A'), (MFH_elec_9, '+A')], elec_indx) +# Converge using Green's function method to obtain the densities +dn = MFH_HC.converge(negf.calc_n_open, steps=1, mixer=sisl.mixing.PulayMixer(weight=.1), tol=0.1) +dn = MFH_HC.converge(negf.calc_n_open, steps=1, mixer=sisl.mixing.PulayMixer(weight=1., history=7), tol=1e-6, print_info=True) +print('Nup, Ndn, Ntot: ', MFH_HC.n.sum(axis=1), MFH_HC.n.sum()) +print('Device potential:', negf.Ef, ' eV') + +p = plot.SpinPolarization(MFH_HC, colorbar=True, vmax=0.2, vmin=-0.2) +p.annotate() +p.savefig('junction.pdf') diff --git a/hubbard/block_linalg.py b/hubbard/block_linalg.py new file mode 100644 index 00000000..fc426fec --- /dev/null +++ b/hubbard/block_linalg.py @@ -0,0 +1,1881 @@ +import numpy as np +from scipy import sparse as sp +from numba import njit, jit, prange +import gc +from scipy.linalg import sqrtm + +__all__ = ["block_linalg"] + +Inv = np.linalg.inv +Solve = np.linalg.solve +MM = np.matmul +Wh = np.where + +def error(): + assert 1==0 + +def test_partition_2d_sparse_matrix(A,P,tol = 1e-5,sparse=True, only_slices = False): + assert P[-1]==A.shape[0]==A.shape[1] + + if sparse==True: + i,j,v = sp.find(A) + ABS=np.abs(v).sum() + N = len(P) + Slices_a = [ [slice(P[i],P[i+1]),slice(P[i],P[i+1])] for i in range(N-1) ] + Slices_b = [ [slice(P[i],P[i+1]),slice(P[i-1],P[i])] for i in range(1,N-1) ] + Slices_c = [ [slice(P[i-1],P[i]),slice(P[i],P[i+1])] for i in range(1,N-1) ] + if only_slices: + return 1.0, [Slices_a,Slices_b,Slices_c] + + Abs = 0.0 + + + for i in range(N-1): + Abs+=np.abs(A[Slices_a[i][0],Slices_a[i][1]].todense()).sum() + if i=ia_start) * (I=ja_start) * (J=ib_start) * (I=jb_start) * (J=ic_start) * (I=jc_start) * (J0)[0].min() + jmax = np.where(col>0)[0].max() + for m in range(n+1, jmax+1): + Mt = MM(-self.Xt(m-1),Mt) + if col[m]==1: + self.iMl+=[Mt] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + Mt = self.iM(n,n) + for m in range(n-1,jmin-1,-1): + Mt = MM(-self.Yt(m+1),Mt) + if col[m] == 1: + self.iMl+=[Mt] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + Res = block_sparse(self.inds,self.iMl,self.is_zero.shape,E_grid = self.E_grid) + self.Clean_inverse() + return Res + + def Invert(self, BW = 'all'): + if isinstance(BW, np.ndarray): + return self.Invert_from_mask(BW) + + if BW=='all': + self.Gen_Inv_Diag() + #Regner alle elementer i inverse matrix + it = int(self.iMl_it) + for n in range(self.N): + for m in range(n+1,self.N): + self.iMl +=[MM(-self.Xt(m-1),self.iM(m-1,n))]#[-self.Xt(m-1).dot(self.iM(m-1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + for m in range(n-1,-1,-1): + self.iMl +=[MM(-self.Yt(m+1),self.iM(m+1,n))]#[-self.Yt(m+1).dot(self.iM(m+1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + + Res = block_sparse(self.inds,self.iMl,self.is_zero.shape,E_grid = self.E_grid) + self.Clean_inverse() + return Res + + elif isinstance(BW,int): + self.Gen_Inv_Diag() + #Regner kun elementer et antal steps væk fra diagonalen + it = int(self.iMl_it) + + for n in range(self.N): + for m in range(n+1,min(self.N,n+1+BW)): + self.iMl +=[MM(-self.Xt(m-1),self.iM(m-1,n))]#[-self.Xt(m-1).dot(self.iM(m-1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + for m in range(n-1,max(-1,n-1-BW),-1): + self.iMl +=[MM(-self.Yt(m+1),self.iM(m+1,n))]#[-self.Yt(m+1).dot(self.iM(m+1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + Res = block_sparse(self.inds,self.iMl,self.is_zero.shape,E_grid = self.E_grid) + self.Clean_inverse() + return Res + + elif BW[0] == 'N': + if BW=='N': nn=0 + else: nn = int(BW[1:]) + + self.Gen_Inv_Diag() + #Regner invers matrix elementer i en N form + it = int(self.iMl_it) + + for n in range(self.N): + if n<=0+nn or n>=self.N-1-nn: + for m in range(n+1,self.N): + self.iMl +=[MM(-self.Xt(m-1),self.iM(m-1,n))]#[-self.Xt(m-1).dot(self.iM(m-1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + for m in range(n-1,-1,-1): + self.iMl +=[MM(-self.Yt(m+1),self.iM(m+1,n))]#[-self.Yt(m+1).dot(self.iM(m+1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + Res = block_sparse(self.inds,self.iMl,self.is_zero.shape,E_grid = self.E_grid) + self.Clean_inverse() + gc.collect() + return Res + + elif BW[0]=='Z': + if BW=='Z': nn=0 + else: nn = int(BW[1:]) + + self.do_transposition() + self.Gen_Inv_Diag() + + it = int(self.iMl_it) + + for n in range(self.N): + if n<=0+nn or n>=self.N-1-nn: + for m in range(n+1,self.N): + self.iMl +=[MM(-self.Xt(m-1),self.iM(m-1,n))]#[-self.Xt(m-1).dot(self.iM(m-1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + for m in range(n-1,-1,-1): + self.iMl +=[MM(-self.Yt(m+1),self.iM(m+1,n))]#[-self.Yt(m+1).dot(self.iM(m+1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + self.do_transposition() + Res = block_sparse(self.inds,self.iMl,self.is_zero.shape,E_grid = self.E_grid) + Res.do_transposition() + self.Clean_inverse() + gc.collect() + return Res + elif BW == 'Upper': + self.Gen_Inv_Diag() + #Regner alle elementer i inverse matrix + it = int(self.iMl_it) + for n in range(self.N): + for m in range(n+1,self.N): + self.iMl +=[MM(-self.Xt(m-1),self.iM(m-1,n))]#[-self.Xt(m-1).dot(self.iM(m-1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + Res = block_sparse(self.inds,self.iMl,self.is_zero.shape,E_grid = self.E_grid) + self.Clean_inverse() + return Res + + + elif BW[0:3]=='*\*': + if BW=='*\*':nn=0 + else: nn=int(BW[3:]) + + diag_ele = [0] + for i in range(nn): + diag_ele+=[i+1] + diag_ele += [self.N-nn-1] + for i in range(nn): + diag_ele+=[self.N - nn + i] + + # self.Gen_Inv_Diag() + self.Gen_Inv_Diag(which_n = diag_ele) + it = int(self.iMl_it) + + for n in range(self.N): + if n<=0+nn or n>=self.N-1-nn: + for m in range(n+1,self.N): + if m <= nn or n>=self.N-nn-1: + # print('diag loop1: ', m,n) + self.iMl +=[MM(-self.Xt(m-1),self.iM(m-1,n))]#[-self.Xt(m-1).dot(self.iM(m-1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + + for m in range(n-1,-1,-1): + if n <= nn or m >= self.N-nn-1: + # print('diag loop2: ',m,n) + self.iMl +=[MM(-self.Yt(m+1),self.iM(m+1,n))]#[-self.Yt(m+1).dot(self.iM(m+1,n))] + self.inds+=[[m,n]] + self.iMl_dict.update({(m,n):it}) + it+=1 + # print(self.inds) + for n in range(nn+1): + mt = nn + # print('offdiag loop 1: ',mt,n) + M_temp = self.iM(mt, n) + #Propagate through region we dont need + for j in range(self.N-2*(nn+1)): + M_temp = MM(-self.Xt(mt), M_temp) + mt+=1 + + #Save the matrix elements we need + for j in range(nn+1): + M_temp = MM(-self.Xt(mt), M_temp) + self.iMl += [M_temp] + mt+=1 + self.inds+= [[mt,n]] + self.iMl_dict.update({(mt,n): it}) + it+=1 + + for n in range(self.N - (nn + 1), self.N): + mt = self.N - (nn + 1) + # print('offdiag loop 2: ',mt,n) + M_temp = self.iM(mt, n) + + for j in range(self.N - 2*(nn+1)): + M_temp = MM(- self.Yt(mt), M_temp) + mt-=1 + + for j in range(nn+1): + M_temp = MM(-self.Yt(mt), M_temp) + self.iMl += [M_temp] + mt-=1 + self.inds+=[[mt,n]] + self.iMl_dict.update({(mt,n): it}) + it+=1 + + + + Res = block_sparse(self.inds,self.iMl,self.is_zero.shape,E_grid = self.E_grid) + self.Clean_inverse() + gc.collect() + return Res + + elif BW[0:4]=='diag': + which_n = BW[4:].split() + which_n = [int(i) for i in which_n] + self.Gen_Inv_Diag(which_n = which_n,forget_tilde = True) + Res = block_sparse(self.inds,self.iMl,self.is_zero.shape,E_grid = self.E_grid) + self.Clean_inverse() + gc.collect() + return Res + + + def Clean_inverse(self): + self.inds = None + self.iMl = None + self.iMl_dict = None + self.inds = [] + self.iMl = [] + + def Block(self,i,j): + if i==j: + return self.A(i) + if i==j+1: + return self.B(j) + if i==j-1: + return self.C(i) + else: + return None + + def _A(self,n): + if n<0 or n>self.N-1:print('Index error A'); error() + return self.Al[self.I_al[n]] + + def _B(self,n): + if n<0 or n>self.N-2:print('Index error B'); error() + return self.Bl[self.I_bl[n]] + + def _C(self,n): + if n<0 or n>self.N-2:print('Index error C'); error() + return self.Cl[self.I_cl[n]] + + def A(self, n): + if self.has_been_transposed == False and self.has_been_conjugated == False: + return self._A(n) + if self.has_been_transposed == False and self.has_been_conjugated == True: + return np.conj(self._A(n)) + if self.has_been_transposed == True and self.has_been_conjugated == False: + return Transpose(self._A(n)) + if self.has_been_transposed == True and self.has_been_conjugated == True: + return np.conj(Transpose(self._A(n))) + + def B(self, n): + if self.has_been_transposed == False and self.has_been_conjugated == False: + return self._B(n) + if self.has_been_transposed == False and self.has_been_conjugated == True: + return np.conj(self._B(n)) + if self.has_been_transposed == True and self.has_been_conjugated == False: + return Transpose(self._C(n)) + if self.has_been_transposed == True and self.has_been_conjugated == True: + return np.conj(Transpose(self._C(n))) + + def C(self, n): + if self.has_been_transposed == False and self.has_been_conjugated == False: + return self._C(n) + if self.has_been_transposed == False and self.has_been_conjugated == True: + return np.conj(self._C(n)) + if self.has_been_transposed == True and self.has_been_conjugated == False: + return Transpose(self._B(n)) + if self.has_been_transposed == True and self.has_been_conjugated == True: + return np.conj(Transpose(self._B(n))) + + def X(self,n): + if n<0 or n>self.N-1:print('Index error X');error() + return self.Xs[n] + def Y(self,n): + if n<0 or n>self.N-1:print('Index error Y');error() + return self.Ys[n] + def Yt(self,n): + if n<1 or n>self.N-1:print('Index error Yt');error() + return self.Yts[n] + def Xt(self,n): + if n<0 or n>self.N-2:print('Index error Xt');error() + return self.Xts[n] + + def Shape(self): + n0,n1=0,0 + for i in range(len(self.I_al)): + n0+=self.A(i).shape[0+self.num_vect_inds] + n1+=self.A(i).shape[1+self.num_vect_inds] + + if self.num_vect_inds==0: + self.shape=(n0,n1) + elif self.num_vect_inds==1: + self.shape=(self.A(i).shape[0],n0,n1) + elif self.num_vect_inds==2: + self.shape=(self.A(i).shape[0],self.A(i).shape[1],n0,n1) + elif self.num_vect_inds==3: + #print('Please visit https://downloadmoreram.com/') + self.shape=(self.A(i).shape[0],self.A(i).shape[1],self.A(i).shape[2],n0,n1) + self.Block_shape=(len(self.I_al),len(self.I_al)) + + def do_transposition(self): + self.is_zero = self.is_zero.T + new_all_slices = [] + for j in range(self.Block_shape[1]): + new_all_slices_j = [] + for i in range(self.Block_shape[0]): + new_all_slices_j+=[self.all_slices[i][j][::-1]] + new_all_slices+=[new_all_slices_j] + + self.all_slices = new_all_slices + self.has_been_transposed = not self.has_been_transposed + + def do_conjugation(self): + self.has_been_conjugated = not self.has_been_conjugated + + + + def do_dag(self): + self.do_transposition() + self.do_conjugation() + + def copy(self): + new_Al = [self.Al[i].copy() for i in range(len(self.Al))] + new_Bl = [self.Bl[i].copy() for i in range(len(self.Bl))] + new_Cl = [self.Cl[i].copy() for i in range(len(self.Cl))] + if self.E_grid is not None: + E_grid_new = self.E_grid.copy() + else: + E_grid_new = None + return block_td(new_Al,new_Bl,new_Cl,self.I_al.copy(),self.I_bl.copy(),self.I_cl.copy(),self.diagonal_zeros.copy(),E_grid = E_grid_new) + + #Kopieret fra block_sparse + def Tr(self,Ei = None): + if Ei is None: + return block_TRACE(self) + else: + return block_TRACE_interpolated(self,Ei) + + def TrProd(self,A,Ei1=None,Ei2=None,warning='yes'): + if Ei1 is None and Ei2 is None: + return block_TRACEPROD(self,A) + else: + return block_TRACEPROD_interpolated(self,A,Ei1,Ei2) + + def SumAll(self): + return block_SUMALL(self) + + def SumAllMatrixEntries(self): + return block_SUMALLMATRIXINDECIES(self) + + def BDot(self,A,Ei1=None,Ei2=None): + if Ei1 is None and Ei2 is None: + return block_MATMUL(self,A) + else: + return block_MATMUL_interpolated(self,A,Ei1,Ei2) + + def Add(self,A,Ei1=None,Ei2=None): + if Ei1 is None and Ei2 is None: + return block_ADD(self,A) + else: + return block_ADD_interpolated(self,A,Ei1,Ei2) + + def Subtract(self,A,Ei1=None,Ei2=None): + if Ei1 is None and Ei2 is None: + return block_SUBTRACT(self,A) + else: + return block_SUBTRACT_interpolated(self,A,Ei1,Ei2) + + def MulEleWise(self,A,Ei1=None,Ei2=None): + if Ei1 is None and Ei2 is None: + return block_MULELEMENTWISE(self,A) + else: + return block_MULELEMENTWISE_interpolated(self,A,Ei1,Ei2) + + def A_Adag(self,Ei1,Ei2): + return block_A_Adag_Interpolated(self,Ei1,Ei2) + + def scalar_add(self, s): + return block_td([al + s for al in self.Al], + [bl + s for bl in self.Bl], + [cl + s for cl in self.Cl], + self.I_al.copy(), + self.I_bl.copy(), + self.I_cl.copy(), + diagonal_zeros = self.diagonal_zeros, E_grid = self.E_grid) + + def scalar_multiply(self, s): + return block_td([al * s for al in self.Al], + [bl * s for bl in self.Bl], + [cl * s for cl in self.Cl], + self.I_al.copy(), + self.I_bl.copy(), + self.I_cl.copy(), + diagonal_zeros = self.diagonal_zeros, E_grid = self.E_grid) + + + +class block_sparse: + def __init__(self, inds, vals,Block_shape,E_grid=None,FoRcE_dTypE = None): + self.inds = inds.copy() + self.vals = [v.copy() for v in vals] + + self.Block_shape = Block_shape + self.FoRcE_dTypE = FoRcE_dTypE + + self.has_been_conjugated = False + self.has_been_transposed = False + + self.info() + + if np.any(self.is_zero): + self.num_vect_inds = max([len(vals[i].shape)-2 for i in range(len(vals))]) + else: + self.num_vec_inds = 0 + + self.E_grid = E_grid + + # def _Block_old_dont_use(self,i,j): + # lind=inds_to_lind([i,j],self.inds) + # if lind is not None: + # return self.vals[lind] + # else: + # return None + + + def Block(self,i,j): + + if hasattr(self, 'Symmetric'): + try: + try: + lind = self.ind_dict[(i,j)] + return self.vals[lind] + except: + pass + try: + lind = self.ind_dict[(j,i)] + return Transpose(self.vals[lind]) + except: + pass + + except KeyError: + return None + + if self.has_been_transposed == False and self.has_been_conjugated == False: + try: + lind = self.ind_dict[(i,j)] + return self.vals[lind] + except KeyError: + return None + if self.has_been_transposed == False and self.has_been_conjugated == True: + try: + lind = self.ind_dict[(i,j)] + return np.conj(self.vals[lind]) + except KeyError: + return None + if self.has_been_transposed == True and self.has_been_conjugated == False: + try: + lind = self.ind_dict[(j,i)] + return Transpose(self.vals[lind]) + except KeyError: + return None + if self.has_been_transposed == True and self.has_been_conjugated == True: + try: + lind = self.ind_dict[(j,i)] + return np.conj(Transpose(self.vals[lind])) + except KeyError: + return None + + def info(self): + inds_d = {} + it = 0 + for ind in self.inds: + inds_d.update({(ind[0],ind[1]):it}) + it+=1 + self.ind_dict = inds_d + it=0 + + is_zero=[] + n0=self.Block_shape[0] + n1=self.Block_shape[1] + + for i in range(n0): + z = [] + for j in range(n1): + if not all_zero(self.Block(i,j)): + z+=[1] + else: + z+=[0] + + is_zero+=[z] + + self.is_zero=np.array(is_zero) + + if self.FoRcE_dTypE is None: + self.dtype = self.vals[0].dtype + else: + self.dtype = self.FoRcE_dTypE + + + + def do_transposition(self): + self.has_been_transposed = not self.has_been_transposed + self.is_zero = self.is_zero.T + self.Block_shape = self.Block_shape[::-1] + + def do_conjugation(self): + self.has_been_conjugated = not self.has_been_conjugated + + + + + + def do_dag(self): + self.do_transposition() + self.do_conjugation() + + def copy(self): + vals_new = [self.vals[i].copy() for i in range(len(self.vals))] + inds_new = self.inds.copy() + if self.E_grid is not None: + E_grid_new = self.E_grid.copy() + else: + E_grid_new = None + BS = self.Block_shape.copy() + if self.has_been_transposed: + BS = BS[::-1] + return block_sparse(inds_new, vals_new, BS, E_grid = E_grid_new) + + def Tr(self,Ei = None): + if Ei is None: + return block_TRACE(self) + else: + return block_TRACE_interpolated(self,Ei) + + def TrProd(self,A,Ei1=None,Ei2=None,warning='yes'): + if Ei1 is None and Ei2 is None: + return block_TRACEPROD(self,A) + else: + return block_TRACEPROD_interpolated(self,A,Ei1,Ei2) + + def SumAll(self): + return block_SUMALL(self) + + def SumAllMatrixEntries(self): + return block_SUMALLMATRIXINDECIES(self) + + def BDot(self,A,Ei1=None,Ei2=None): + if Ei1 is None and Ei2 is None: + return block_MATMUL(self,A) + else: + return block_MATMUL_interpolated(self,A,Ei1,Ei2) + + def Add(self,A,Ei1=None,Ei2=None): + if Ei1 is None and Ei2 is None: + return block_ADD(self,A) + else: + return block_ADD_interpolated(self,A,Ei1,Ei2) + def Subtract(self,A,Ei1=None,Ei2=None): + if Ei1 is None and Ei2 is None: + return block_SUBTRACT(self,A) + else: + return block_SUBTRACT_interpolated(self,A,Ei1,Ei2) + def MulEleWise(self,A,Ei1=None,Ei2=None): + if Ei1 is None and Ei2 is None: + return block_MULELEMENTWISE(self,A) + else: + return block_MULELEMENTWISE_interpolated(self,A,Ei1,Ei2) + + def Make_BTD(self, Mask = None) : + Al = []; Bl = []; Cl = [] + Ia = []; Ib = []; Ic = [] + if self.Block_shape[0] != self.Block_shape[1]: + print('Matrix is not square, so cant convert block trididagonal matrix') + return + bs = self.Block_shape[0] + if Mask is None: + Mask = [[], [], []] + i_idx = np.arange(0,bs,2) + if np.mod(bs,2)==1: + i_idx = i_idx[0:len(i_idx)-1] + for i in i_idx: + + Mask[1] += [np.array([ + [ + [i,i], [i,i+1] + ] + , + [ + [i+1,i],[i+1,i+1] + ] + ]) + ] + if i != i_idx[-1]: + Mask[0] += [np.array([ + [ + [i+2,i], [i+2,i+1] + ] + , + [ + [i+3,i], [i+3,i+1] + ] + ]) + ] + Mask[2] += [np.array([ + [ + [i,i+2], [i, i+3] + ] + , + [ + [i+1,i+2], [i+1,i+3] + ] + ]) + ] + + if np.mod(bs,2) == 1: + Mask[1] += [np.array([ + [ + [i+2,i+2] + ] + ]) + ] + Mask[0] += [np.array([ + [ + [i+2,i ], [i+2,i+1] + ] + ]) + ] + Mask[2] += [np.array([ + [ + [i, i+2] + ] + , + [ + [i+1,i+2] + ] + ]) + ] + Shapes = [[], [], []] + pcount = 0 + for P in Mask: + never_use_i= 0 + for idx in P: + s = idx.shape + ske = [] + ske_shape = [] + for i in range(s[0]): + let = [] + let_shape = [] + for j in range(s[1]): + bi = idx[i,j,0] + bj = idx[i,j,1] + + if self.Block(bi,bj) is not None: + BB = self.Block(bi,bj).copy() + else: + for k in range(self.Block_shape[0]): + tb = self.Block(k, bj) + if tb is not None: + col_s = tb.shape[-1] + break + elif k == self.Block_shape[0]-1: + print('error in making BTD????') + assert 1 == 2 + for k in range(self.Block_shape[1]): + tb = self.Block(bi, k) + if tb is not None: + row_s = tb.shape[-2] + break + elif k == self.Block_shape[1]-1: + print('error in making BTD????') + assert 1 == 2 + + zero_shape = self.vals[0][...,0,0].shape + (row_s,col_s) + BB = np.zeros(zero_shape, dtype = self.dtype).copy() + let+=[BB] + let_shape+=[BB.shape] + ske+=[let] + ske_shape += [let_shape] + + Block = np.block(ske) + if pcount == 1: + Al+=[Block]; Ia+=[never_use_i] + if pcount == 0: + Bl+=[Block]; Ib+=[never_use_i] + if pcount == 2: + Cl+=[Block]; Ic+=[never_use_i] + Shapes[pcount] += [ske_shape] + + never_use_i+=1 + pcount+=1 + + Out = block_td(Al,Bl,Cl,Ia,Ib,Ic,diagonal_zeros = False, E_grid = self.E_grid) + Out.BTD_Mask = Mask + Out.shapes_mask = Shapes + + return Out + + def Get_diagonal(self, slices): + n = min(self.Block_shape) + num = slices[-1][-1][0].stop + s = self.vals[0][...,0,0].shape+(num,) + diag = np.zeros(s, dtype = self.dtype) + + for i in range(n): + si = slices[i][i][0] + block_ii = self.Block(i,i) + if block_ii is not None: + vals = [] + + for i in range(si.stop-si.start): + diag[..., si.start + i] = block_ii[ ... ,i,i] + + return diag + + def get_e_subset(self, idx): + #inds, vals,Block_shape,E_grid + return block_sparse(self.inds, + [v[... , idx, :,:] for v in self. vals], + Block_shape = self.Block_shape, + E_grid = None) + + + + def tonp(self,slices): + return Blocksparse2Numpy(self,slices) + + def eig(self,slices, hermitian = False, as_dense = False): + if hermitian: + EIG = np.linalg.eigh + else: + EIG = np.linalg.eig + + print('\n Diagonalising matrix!\n') + # meant for taking squareroots of scattering matrices + # lazily implemented using scipy.linalg.sqrtm + + nv = self.num_vect_inds + idx,jdx = np.where(self.is_zero != 0) + + i_min, i_max = idx.min(), idx.max() + j_min, j_max = jdx.min(), jdx.max() + + mat = [] + def len_slice(s): + return s.stop - s.start + + S = self.vals[0].shape[:-2] + subslices = [] + + for i in range(i_min, i_max+1): + lines = [] + subsubslices = [] + for j in range(j_min, j_max+1): + si, sj = slices[i][j] + B = self.Block(i,j) + if B is None: + B = np.zeros(S + (len_slice(si), len_slice(sj)), dtype = self.dtype) + lines += [B] + subsubslices+=[[si, sj]] + mat += [lines] + subslices+=[subsubslices] + + sub_mat = np.block(mat) + e,v = EIG(sub_mat) + if as_dense: + return e, v, slices[i_min][i_min][0].start, slices[i_max][i_max][0].stop-1 + + + def zero_slices(inp_slices): + new_slices = [] + zero = inp_slices[0][0][0].start,inp_slices[0][0][1].start + for rows in inp_slices: + new_cols = [] + for cols in rows: + si, sj = cols + sin = slice(si.start - zero[0], si.stop - zero[0]) + sjn = slice(sj.start - zero[1], sj.stop - zero[1]) + new_cols += [ [sin, sjn] ] + new_slices += [ new_cols ] + return new_slices + + zeroed_slices = zero_slices(subslices) + new_idx = [] + new_blocks = [] + new_idx_diag = [] + new_blocks_diag = [] + + for i in range(i_max - i_min +1): + for j in range(j_max - j_min +1): + new_idx += [[i + i_min,j + j_min]] + si,sj = zeroed_slices[i][j] + new_blocks += [v[..., si,sj]] + if i == j: + new_idx_diag += [[i + i_min,j + j_min]] + nvals = si.stop - si.start + m = np.zeros(S + (nvals , nvals), dtype = e.dtype) + midx = np.arange(si.stop - si.start) + m[..., midx,midx] = e[..., si] + new_blocks_diag+= [m.copy()] + return block_sparse(new_idx_diag, new_blocks_diag, self.Block_shape, E_grid = self.E_grid), block_sparse(new_idx , new_blocks , self.Block_shape, E_grid = self.E_grid) + + def sqrt(self, slices): + print('using scipy.linalg.sqrtm + for loops, pretty slow') + # meant for taking squareroots of scattering matrices + # lazily implemented using scipy.linalg.sqrtm + + nv = self.num_vect_inds + idx,jdx = np.where(self.is_zero != 0) + + i_min, i_max = idx.min(), idx.max() + j_min, j_max = jdx.min(), jdx.max() + + mat = [] + def len_slice(s): + return s.stop - s.start + + S = self.vals[0].shape[:-2] + subslices = [] + + for i in range(i_min, i_max+1): + lines = [] + subsubslices = [] + for j in range(j_min, j_max+1): + si, sj = slices[i][j] + B = self.Block(i,j) + if B is None: + B = np.zeros(S + (len_slice(si), len_slice(sj)), dtype = self.dtype) + lines += [B] + subsubslices+=[[si, sj]] + mat += [lines] + subslices+=[subsubslices] + + sub_mat = np.block(mat) + sqrt = sqrtm_on_KLMij(sub_mat) + + def zero_slices(inp_slices): + new_slices = [] + zero = inp_slices[0][0][0].start,inp_slices[0][0][1].start + for rows in inp_slices: + new_cols = [] + for cols in rows: + si, sj = cols + sin = slice(si.start - zero[0], si.stop - zero[0]) + sjn = slice(sj.start - zero[1], sj.stop - zero[1]) + new_cols += [ [sin, sjn] ] + new_slices += [ new_cols ] + return new_slices + + zeroed_slices = zero_slices(subslices) + new_idx = [] + new_blocks = [] + for i in range(i_max - i_min +1): + for j in range(j_max - j_min +1): + new_idx += [[i + i_min,j + j_min]] + si,sj = zeroed_slices[i][j] + new_blocks += [sqrt[..., si,sj]] + + return block_sparse(new_idx, new_blocks, self.Block_shape, E_grid = self.E_grid) + + def scalar_add(self, s): + return block_sparse(self.inds.copy(), + [v + s for v in self.vals], + self.Block_shape, E_grid = self.E_grid) + def scalar_multiply(self, s): + return block_sparse(self.inds.copy(), + [v * s for v in self.vals], + self.Block_shape, E_grid = self.E_grid) + +def block_TRACE(A): + n=A.is_zero.shape[0] + return sum([np.trace(A.Block(i,i),axis1=-1,axis2=-2) for i in range(n) if A.Block(i,i) is not None]) + +def block_SUMALLMATRIXINDECIES(A): + I,J = Wh(A.is_zero>0) + n_ind = len(I) + if n_ind==0: + return 0 + else: + S=[] + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + S+=[np.sum(A.Block(i,j),axis=(-1,-2))] + return sum(S) + +def block_SUMALL(A): + return np.sum(block_SUMALLMATRIXINDECIES(A)) + +def block_TRACEPROD(A1,A2): + Res = block_SUMALLMATRIXINDECIES(block_MULELEMENTWISE_TRANSPOSE_LAST(A1, A2)) + return Res + +def block_MATMUL(A1,A2): + assert A1.Block_shape[1]==A2.Block_shape[0] + Prod_pat = A1.is_zero.dot(A2.is_zero) + I,J = Wh(Prod_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + k1 = Wh(A1.is_zero[i,:]==1)[0] + k2 = Wh(A2.is_zero[:,j]==1)[0] + K=np.intersect1d(k1,k2) + if len(K)>0: + Res_inds+=[[i,j]] + # First = MM(A1.Block(i,K),A2.Block(K,j)).sum(axis = 0) + First = MM(A1.Block(i,K[0]),A2.Block(K[0],j)) + for k in K[1:]: + First += MM(A1.Block(i,k),A2.Block(k,j)) + Res_vals += [First] + return block_sparse(Res_inds.copy(),Res_vals.copy(),(A1.is_zero.shape[0],A2.is_zero.shape[1])) + + +def flatten(x): + result = [] + for el in x: + if hasattr(el, "__iter__") and not isinstance(el, str): + result.extend(flatten(el)) + else: + result.append(el) + return result + +def block_ADD(A1,A2): + assert A1.Block_shape==A2.Block_shape + assert A1.dtype==A2.dtype + #assert A1.num_vect_inds==A2.num_vect_inds + + Sum_pat = A1.is_zero + A2.is_zero + I,J = Wh(Sum_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + Res_inds+=[[i,j]] + if A1.Block(i,j) is not None and A2.Block(i,j) is not None: + Res_vals += [A1.Block(i,j)+A2.Block(i,j)] + if A1.Block(i,j) is not None and A2.Block(i,j) is None: + Res_vals += [A1.Block(i,j) ] + if A1.Block(i,j) is None and A2.Block(i,j) is not None: + Res_vals += [A2.Block(i,j) ] + + return block_sparse(Res_inds.copy(),Res_vals.copy(),(A1.is_zero.shape[0],A1.is_zero.shape[1])) + +def block_SUBTRACT(A1,A2): + assert A1.Block_shape==A2.Block_shape + + Sum_pat = A1.is_zero + A2.is_zero + I,J = Wh(Sum_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + Res_inds+=[[i,j]] + if A1.Block(i,j) is not None and A2.Block(i,j) is not None: + Res_vals += [ A1.Block(i,j) - A2.Block(i,j)] + if A1.Block(i,j) is not None and A2.Block(i,j) is None: + Res_vals += [ A1.Block(i,j) ] + if A1.Block(i,j) is None and A2.Block(i,j) is not None: + Res_vals += [-A2.Block(i,j) ] + + return block_sparse(Res_inds.copy(),Res_vals.copy(),(A1.is_zero.shape[0],A1.is_zero.shape[1])) + +def block_MULELEMENTWISE(A1,A2): + assert A1.Block_shape==A2.Block_shape + Mul_pat = A1.is_zero * A2.is_zero + I,J = Wh(Mul_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + Res_inds+=[[i,j]] + Res_vals+=[A1.Block(i,j)*A2.Block(i,j)] + return block_sparse(Res_inds.copy(),Res_vals.copy(),A1.is_zero.shape) + +def block_MULELEMENTWISE_TRANSPOSE_LAST(A1,A2): + assert A1.Block_shape==A2.Block_shape + Mul_pat = A1.is_zero * A2.is_zero.T + I,J = Wh(Mul_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + Res_inds+=[[i,j]] + Res_vals+=[A1.Block(i,j)*Transpose(A2.Block(j,i))] + return block_sparse(Res_inds.copy(),Res_vals.copy(),A1.is_zero.shape) + + +def block_TRACE_interpolated(A,Ei): + assert not isinstance(A.E_grid,type(None)) + F,IND = Interpolate(A.E_grid,Ei) + n=A.is_zero.shape[0] + return sum([np.trace(Interpolate_block(A.Block(i,i),F,IND),axis1=-1,axis2=-2) for i in range(n) if A.Block(i,i) is not None]) + +def block_SUMALLMATRIXINDECIES_interpolated(A,Ei): + assert not isinstance(A.E_grid,type(None)) + F,IND = Interpolate(A.E_grid,Ei) + I,J = Wh(A.is_zero>0) + n_ind = len(I) + if n_ind==0: + return 0 + else: + S=[] + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + S+=[np.sum(Interpolate_block(A.Block(i,j),F,IND),axis=(-1,-2))] + return sum(S) + +def block_SUMALL_interpolated(A,Ei): + return np.sum(block_SUMALLMATRIXINDECIES_interpolated(A,Ei)) + +def block_MULELEMENTWISE_interpolated(A1,A2,Ei1,Ei2): + assert A1.Block_shape==A2.Block_shape + assert A1.dtype==A2.dtype + #assert A1.num_vect_inds==A2.num_vect_inds + assert not isinstance(A1.E_grid,type(None)) + assert not isinstance(A2.E_grid,type(None)) + F1,IND1 = Interpolate(A1.E_grid,Ei1) + F2,IND2 = Interpolate(A2.E_grid,Ei2) + + Mul_pat = A1.is_zero * A2.is_zero + I,J = Wh(Mul_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + Res_inds+=[[i,j]] + + Res_vals+=[Interpolate_block(A1.Block(i,j),F1,IND1)*Interpolate_block(A2.Block(i,j),F2,IND2)] + + return block_sparse(Res_inds.copy(),Res_vals.copy(),A1.is_zero.shape) + +def block_MULELEMENTWISE_TRANSPOSE_LAST_interpolated(A1,A2,Ei1,Ei2): + assert A1.Block_shape==A2.Block_shape + assert A1.dtype==A2.dtype + # assert A1.num_vect_inds==A2.num_vect_inds + assert not isinstance(A1.E_grid,type(None)) + assert not isinstance(A2.E_grid,type(None)) + + F1,IND1 = Interpolate(A1.E_grid,Ei1) + F2,IND2 = Interpolate(A2.E_grid,Ei2) + + Mul_pat = A1.is_zero * A2.is_zero.T + I,J = Wh(Mul_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + Res_inds+=[[i,j]] + Res_vals+=[Interpolate_block(A1.Block(i,j),F1,IND1)*Transpose(Interpolate_block(A2.Block(j,i),F2,IND2))] + return block_sparse(Res_inds.copy(),Res_vals.copy(),A1.is_zero.shape) + +def block_TRACEPROD_interpolated(A1,A2,Ei1,Ei2,warning='yes'): + assert not isinstance(A1.E_grid,type(None)) + assert not isinstance(A2.E_grid,type(None)) + Res = block_SUMALLMATRIXINDECIES(block_MULELEMENTWISE_TRANSPOSE_LAST_interpolated(A1, A2, Ei1, Ei2)) + return Res + +def block_MATMUL_interpolated(A1,A2,Ei1,Ei2): + assert A1.dtype==A2.dtype + assert A1.Block_shape[1]==A2.Block_shape[0] + assert not isinstance(A1.E_grid,type(None)) + assert not isinstance(A2.E_grid,type(None)) + + F1,IND1 = Interpolate(A1.E_grid,Ei1) + F2,IND2 = Interpolate(A2.E_grid,Ei2) + + Prod_pat = A1.is_zero.dot(A2.is_zero) + I,J = Wh(Prod_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + k1 = Wh(A1.is_zero[i,:]==1)[0] + k2 = Wh(A2.is_zero[:,j]==1)[0] + K=np.intersect1d(k1,k2) + if len(K)>0: + Res_inds+=[[i,j]] + First = MM(Interpolate_block(A1.Block(i,K[0]),F1,IND1),Interpolate_block(A2.Block(K[0],j),F2,IND2)) + for k in K[1:]: + First += MM(Interpolate_block(A1.Block(i,k),F1,IND1),Interpolate_block(A2.Block(k,j),F2,IND2)) + Res_vals += [First] + return block_sparse(Res_inds.copy(),Res_vals.copy(),(A1.is_zero.shape[0],A2.is_zero.shape[1])) + +def block_A_Adag_Interpolated(A,Ei1,Ei2): + assert not isinstance(A.E_grid,type(None)) + + F1,IND1 = Interpolate(A.E_grid,Ei1) + F2,IND2 = Interpolate(A.E_grid,Ei2) + + Prod_pat = A.is_zero.dot(A.is_zero) + I,J = Wh(Prod_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + k1 = Wh(A.is_zero [i,:]==1)[0] + k2 = Wh(A.is_zero.T[:,j]==1)[0] + K=np.intersect1d(k1,k2) + if len(K)>0: + Res_inds+=[[i,j]] + First = MM( + Interpolate_block(A.Block(i,K[0]),F1,IND1), + Interpolate_block( + Transpose(A.Block(j,K[0])).conj(),F2,IND2 + ) + ) + + for k in K[1:]: + First += MM( + Interpolate_block(A.Block(i,k),F1,IND1), + Interpolate_block( + Transpose(A.Block(j,k)).conj(),F2,IND2 + ) + ) + + Res_vals += [First] + return block_sparse(Res_inds.copy(),Res_vals.copy(),(A.is_zero.shape[0],A.is_zero.shape[0])) + +def block_ADD_interpolated(A1,A2,Ei1,Ei2): + assert A1.Block_shape==A2.Block_shape + assert A1.dtype==A2.dtype + # assert A1.num_vect_inds==A2.num_vect_inds + + assert not isinstance(A1.E_grid,type(None)) + assert not isinstance(A2.E_grid,type(None)) + + F1,IND1 = Interpolate(A1.E_grid,Ei1) + F2,IND2 = Interpolate(A2.E_grid,Ei2) + + Sum_pat = A1.is_zero + A2.is_zero + I,J = Wh(Sum_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + Res_inds+=[[i,j]] + if A1.Block(i,j) is not None and A2.Block(i,j) is not None: + Res_vals += [Interpolate_block(A1.Block(i,j),F1,IND1) + Interpolate_block(A2.Block(i,j),F2,IND2)] + if A1.Block(i,j) is not None and A2.Block(i,j) is None: + Res_vals += [Interpolate_block(A1.Block(i,j),F1,IND1) ] + if A1.Block(i,j) is None and A2.Block(i,j) is not None: + Res_vals += [Interpolate_block(A2.Block(i,j),F2,IND2) ] + return block_sparse(Res_inds.copy(),Res_vals.copy(),(A1.is_zero.shape[0],A1.is_zero.shape[1])) + +def block_SUBTRACT_interpolated(A1,A2,Ei1,Ei2): + assert A1.Block_shape==A2.Block_shape + assert A1.dtype==A2.dtype + # assert A1.num_vect_inds==A2.num_vect_inds + + assert not isinstance(A1.E_grid,type(None)) + assert not isinstance(A2.E_grid,type(None)) + + F1,IND1 = Interpolate(A1.E_grid,Ei1) + F2,IND2 = Interpolate(A2.E_grid,Ei2) + + Sum_pat = A1.is_zero + A2.is_zero + I,J = Wh(Sum_pat>0) + Res_inds = [] + Res_vals = [] + n_ind =len(I) + for tæl in range(n_ind): + i,j = I[tæl],J[tæl] + Res_inds+=[[i,j]] + if A1.Block(i,j) is not None and A2.Block(i,j) is not None: + Res_vals += [ Interpolate_block(A1.Block(i,j),F1,IND1)-Interpolate_block(A2.Block(i,j),F2,IND2)] + if A1.Block(i,j) is not None and A2.Block(i,j) is None: + Res_vals += [ Interpolate_block(A1.Block(i,j),F1,IND1) ] + if A1.Block(i,j) is None and A2.Block(i,j) is not None: + Res_vals += [-Interpolate_block(A2.Block(i,j),F2,IND2) ] + return block_sparse(Res_inds.copy(),Res_vals.copy(),(A1.is_zero.shape[0],A1.is_zero.shape[1])) + +def block_TRACE_different_bs(A,a,S,s): + #Matrix with more grainy block structure goes on the right.... + if 'block_td' in A.__str__(): + Res = np.zeros( A.Al[0][..., 0, 0].shape, dtype = A.dtype ) + else: + Res = np.zeros( A.vals[0][..., 0, 0].shape, dtype = A.dtype ) + assert S[-1][-1][0].stop == s[-1][-1][0].stop + assert S[-1][-1][1].stop == s[-1][-1][1].stop + + idx_A = np.where(A.is_zero==1) + idx_a = np.where(a.is_zero==1) + used_idx = [] + for tæller in range(len(idx_A[0])): + I,J = idx_A[0][tæller], idx_A[1][tæller] + sA0, sA1 = S[I][J][0], S[I][J][1] + z0, z1 = sA0.start, sA1.start + sA0, sA1= slice(0, sA0.stop - z0), slice(0, sA1.stop - z1) + B = A.Block(I,J) + for rellæt in range(len(idx_a[0])): + #Tr AB = sum(A .* B.T) + j,i = idx_a[0][rellæt], idx_a[1][rellæt] + sa0, sa1 = s[i][j][0], s[i][j][1] + sa0, sa1 = slice( sa1.start - z0, sa1.stop - z0), slice( sa0.start - z1, sa0.stop - z1) + b = Transpose(a.Block(i,j)) + if sA0.start<= sa0.start < sA0.stop and sA0.start<= sa0.stop <= sA0.stop: + if sA1.start<= sa1.start < sA1.stop and sA1.start<= sa1.stop <= sA1.stop: + # print(sa0,sa1, sA0, sA1) + if [j,i] not in used_idx: + Res += np.sum(B[..., sa0,sa1] * b, axis = (-1,-2) ) + used_idx += [[j,i]] + + return Res + +@jit +def Interp(E0,E): + assert E.min()>=E0.min() + assert E.max()< E0.max() + F = np.zeros((len(E),2)) + inds = np.zeros((len(E),2)) + ne=len(E) + ne0=len(E0) + for j in range(ne): + e=E[j].real + for i in range(ne0-1): + if E0[i]<=eemax: begin = False + if begin: cols+=[i] + + cols = np.array(cols) + cols = np.unique(cols) + mask = np.diag(np.ones(n_diags)).astype(np.int32) + mask[:,cols] = 1 + return Blocksparse2Numpy(iGreens.Invert(mask), iGreens.all_slices)[:,ipiv, :][:,:,ipiv] + + + elif mode == 'Full': + G = iGreens.Invert(BW='Upper') + G.Symmetric = 'Set this to whatever, the code checks if the G object has this attribute, not its value' + return Blocksparse2Numpy(G, iGreens.all_slices)[:,ipiv, :][:,:,ipiv] + elif mode == 'nonsymmetric': + G = iGreens.Invert(BW='all') + return Blocksparse2Numpy(G, iGreens.all_slices)[:,ipiv, :][:,:,ipiv] def _nested_list(*args): if len(args) == 0: @@ -43,6 +201,16 @@ def _nested_list(*args): return l +"""def _choose_G(*args, **kwargs): + if "tbt" not in kwargs: + _G = _G_dens(*args) + else: + tbt = kwargs["tbt"] + Ov = kwargs["Ov"] if "Ov" in kwargs else None + Alloced_G = kwargs["Alloced_G"] if "Alloced_G" in kwargs else None + def _G(*args): + return _G_btd(*args, tbt=tbt, Ov=Ov, Alloced_G=Alloced_G)""" + class NEGF: r""" This class creates the open quantum system object for a N-terminal device @@ -78,7 +246,6 @@ class NEGF: ----- This class has to be generalized to non-orthogonal basis """ - def __init__(self, Hdev, elec_SE, elec_idx, CC=None, V=0, **kwargs): """ Initialize NEGF class """ @@ -87,6 +254,19 @@ def __init__(self, Hdev, elec_SE, elec_idx, CC=None, V=0, **kwargs): self.kT = Hdev.kT self.eta = 0.1 + if "tbt" in kwargs: + self.tbt = kwargs["tbt"] + else: + self.tbt = None + if "Ov" in kwargs: + self.Ov = kwargs["Ov"] + else: + self.Ov = None + if "Alloced_G" in kwargs: + self.Alloced_G = kwargs["Alloced_G"] + else: + self.Alloced_G = None + # Immediately retrieve the distribution dist = sisl.get_distribution('fermi_dirac', smearing=self.kT) @@ -167,10 +347,10 @@ def convert2SelfEnergy(elec_SE, mu): # spin, electrode self._ef_SE = _nested_list(Hdev.spin_size, len(self.elec_SE)) - # spin, EQ-contour, energy, electrode - self._cc_eq_SE = _nested_list(Hdev.spin_size, *self.CC_eq.shape, len(self.elec_SE)) - # spin, energy, electrode - self._cc_neq_SE = _nested_list(Hdev.spin_size, self.CC_neq.shape[0], len(self.elec_SE)) + # spin, EQ-contour, electrode, energy + self._cc_eq_SE = _nested_list(Hdev.spin_size, self.CC_eq.shape[0], len(self.elec_SE), self.CC_eq.shape[1]) + # spin, electrode, energy + self._cc_neq_SE = _nested_list(Hdev.spin_size, len(self.elec_SE), self.CC_neq.shape[0]) kw = {} for i, se in enumerate(self.elec_SE): @@ -184,14 +364,14 @@ def convert2SelfEnergy(elec_SE, mu): for cc_eq_i, CC_eq in enumerate(self.CC_eq): for ic, cc in enumerate(CC_eq): # Do it also for each point in the CC, for all EQ CC - self._cc_eq_SE[spin][cc_eq_i][ic][i] = se.self_energy(cc, **kw) - + self._cc_eq_SE[spin][cc_eq_i][i][ic] = se.self_energy(cc, **kw) if self.NEQ: for ic, cc in enumerate(self.CC_neq): # And for each point in the Neq CC - self._cc_neq_SE[spin][ic][i] = se.self_energy(cc, **kw) + self._cc_neq_SE[spin][i][ic] = se.self_energy(cc, **kw) + - def calc_n_open(self, H, q, qtol=1e-5): + def calc_n_open(self, H, q, qtol=1e-5, Nblocks=5): """ Method to compute the spin densities from the non-equilibrium Green's function @@ -204,6 +384,8 @@ def calc_n_open(self, H, q, qtol=1e-5): qtol: float, optional tolerance to which the charge is going to be converged in the internal loop that finds the potential of the device (i.e. that makes the device neutrally charged) + Nblocks: int, optional + number of blocks in which the energy contour will be split to obtain the Green's matrix (to obtain the NEQ integrals) Returns ------- @@ -212,8 +394,11 @@ def calc_n_open(self, H, q, qtol=1e-5): Etot: float total energy """ + form = 'csr' if self.tbt is not None else 'array' + # ensure scalar, for open systems one cannot impose a spin-charge # This spin-charge would be dependent on the system size + q = np.asarray(q).sum() # Create short-hands @@ -262,12 +447,12 @@ def calc_n_open(self, H, q, qtol=1e-5): f = 0. for spin in range(H.spin_size): if H.spin_size == 2: - HC = H.H.Hk(spin=spin, format='array') + HC = H.H.Hk(spin=spin, format=form) else: - HC = H.H.Hk(format='array') - cc = Ef + 1j * self.eta + HC = H.H.Hk(format=form) - GF = _G(cc, HC, self.elec_idx, ef_SE[spin]) + GF = _G([Ef + 1j * self.eta], HC, self.elec_idx, ef_SE[spin], + tbt=self.tbt, Ov=self.Ov, alloced_G=self.Alloced_G) # Now we need to calculate the new Fermi level based on the # difference in charge and by estimating the current Fermi level @@ -276,7 +461,7 @@ def calc_n_open(self, H, q, qtol=1e-5): # F(x) - F(0) = arctan(x) / pi = dq # In our case we *know* that 0.5 = - Im[Tr(Gf)] / \pi # and consider this a pre-factor - f -= np.trace(GF).imag / _pi + f -= GF.sum(axis=1).imag / _pi # calculate fractional change f = dq / f @@ -289,18 +474,16 @@ def calc_n_open(self, H, q, qtol=1e-5): Etot = 0. for spin in range(H.spin_size): if H.spin_size==2: - HC = H.H.Hk(spin=spin, format='array') + HC = H.H.Hk(spin=spin, format=form) else: - HC = H.H.Hk(format='array') + HC = H.H.Hk(format=form) D = np.zeros([len(self.CC_eq), no], dtype=np.complex128) if self.NEQ: # Correct Density matrix with Non-equilibrium integrals - Delta, w = self.Delta(HC, Ef, spin=spin) - # Store only diagonal - w = np.diag(w) + Delta, w = self.Delta(HC, Ef, spin=spin, Nblocks=Nblocks) # Transfer Delta to D - D[0, :] = np.diag(Delta[1]) # Correction to left: Delta_R - D[1, :] = np.diag(Delta[0]) # Correction to right: Delta_L + D[0, :] = Delta[1] # Correction to left: Delta_R + D[1, :] = Delta[0] # Correction to right: Delta_L # TODO We need to also calculate the total energy for NEQ # this should probably be done in the Delta method del Delta @@ -310,20 +493,23 @@ def calc_n_open(self, H, q, qtol=1e-5): # Loop over all eq. Contours for cc_eq_i, CC in enumerate(self.CC_eq): - for ic, [cc, wi] in enumerate(zip(CC + Ef, self.w_eq)): - - GF = _G(cc, HC, self.elec_idx, cc_eq_SE[spin][cc_eq_i][ic]) - - # Greens function evaluated at each point of the CC multiplied by the weight - Gf_wi = - np.diag(GF) * wi - D[cc_eq_i] += Gf_wi.imag - - # Integrate density of states to obtain the total energy - # For the non equilibrium energy maybe we could obtain it as in PRL 70, 14 (1993) - if cc_eq_i == 0: - Etot += ((w * Gf_wi).sum() * cc).imag - else: - Etot += (((1 - w) * Gf_wi).sum() * cc).imag + cc = CC + Ef + self_energy = cc_eq_SE[spin][cc_eq_i] + GF = _G(cc, HC, self.elec_idx, self_energy, mode='DOS', + tbt=self.tbt, Ov=self.Ov, alloced_G=self.Alloced_G) + + # Greens function evaluated at each point of the CC multiplied by the weight + # Each row is the diagonal of Gf(e) multiplied by the weight + Gf_wi = - GF * self.w_eq.reshape(-1,1) + D[cc_eq_i] = Gf_wi.imag.sum(axis=0) # sum elements of each row to evaluate integral over energy + + # Integrate density of states to obtain the total energy + # For the non equilibrium energy maybe we could obtain it as in PRL 70, 14 (1993) + if cc_eq_i == 0: + # Sum over spin components + Etot += ((w * Gf_wi).sum(axis=1) * cc).sum().imag + else: + Etot += (((1 - w) * Gf_wi).sum(axis=1) * cc).sum().imag if self.NEQ: D = w * D[0] + (1 - w) * D[1] @@ -342,7 +528,7 @@ def calc_n_open(self, H, q, qtol=1e-5): # multiply Etot by 2 for spin degeneracy return ni, (2./H.spin_size)*Etot - def Delta(self, HC, Ef, spin=0): + def Delta(self, HC, Ef, spin=0, Nblocks=3): """ Finds the non-equilibrium integrals to correct the left and right equilibrium integrals @@ -354,6 +540,9 @@ def Delta(self, HC, Ef, spin=0): Potential of the device spin: int spin index (0=up, 1=dn) + Nblocks: int, optional + number of blocks in which the energy contour will be split to obtain the Green's matrix, this number should be larger + for a relative large number of orbitals Returns ------- @@ -361,23 +550,23 @@ def Delta(self, HC, Ef, spin=0): weight """ def spectral(G, self_energy): - # Use self-energy of elec, now the matrix will have dimension (Nelec, Nelec) - Gamma = 1j * (self_energy - np.conjugate(self_energy.T)) - # Product of (Ndev, Nelec) x (Nelec, Nelec) x (Nelec, Ndev) - return np.dot(G, np.dot(Gamma, np.conjugate(G.T))) + # Use self-energy of elec, now the matrix will have dimension (E, Nelec, Nelec) + Gamma = 1j * (self_energy - np.conjugate(np.transpose(self_energy, axes=[0,2,1]))) + # Product of (E, Ndev, Nelec) x (E, Nelec, Nelec) x (E, Nelec, Ndev) -> (E, Ndev, Ndev) + return einsum('ijk, ikm, imj -> ij', G, Gamma, np.conjugate(np.transpose(G, axes=[0,2,1]))) no = len(HC) - Delta = np.zeros([2, no, no], dtype=np.complex128) + Delta = np.zeros([2, no], dtype=np.complex128) cc_neq_SE = self._cc_neq_SE[spin] - for ic, cc in enumerate(self.CC_neq + Ef): - - GF = _G(cc, HC, self.elec_idx, cc_neq_SE[ic]) - - # Elec (0, 1) are (left, right) - # only do for the first two! - for i, SE in enumerate(cc_neq_SE[ic][:2]): - Delta[i] += spectral(GF[:, self.elec_idx[i].ravel()], SE) * self.w_neq[i, ic] + # Elec (0, 1) are (left, right) + # only do for the first two! + for i in range(2): + for ic, CC in enumerate(np.array_split((self.CC_neq + Ef), Nblocks)): + GF = _G(CC, HC, self.elec_idx, cc_neq_SE, mode='Full') + A = spectral(GF[:, :, self.elec_idx[i].ravel()], np.array_split(np.array(cc_neq_SE)[:, i], Nblocks)[ic]) + # Build Delta for each electrode + Delta[i] += einsum('i, ij -> j', np.array_split(self.w_neq[i], Nblocks)[ic], A) # Firstly implement it for two terminals following PRB 65 165401 (2002) # then we can think of implementing it for N terminals as in Com. Phys. Comm. 212 8-24 (2017) @@ -419,9 +608,9 @@ def DOS(self, H, E, spin=[0, 1], eta=0.01): # Append all the self-energies for the electrodes at each energy point SE = [se.self_energy(e, spin=ispin) for se in self.elec_SE] - GF = _G(e + 1j * eta, HC, self.elec_idx, SE) + GF = _G([e + 1j * eta], HC, self.elec_idx, SE) - dos[i] -= np.trace(GF).imag + dos[i] -= GF.sum().imag return dos / np.pi @@ -459,7 +648,7 @@ def PDOS(self, H, E, spin=(0, 1), eta=0.01): HC = H.H.Hk(spin=ispin, format='array') for i, e in enumerate(E): SE = [se.self_energy(e, spin=ispin) for se in self.elec_SE] - GF = _G(e + 1j * eta, HC, self.elec_idx, SE) - ldos[i] -= GF.diagonal().imag + GF = _G([e + 1j * eta], HC, self.elec_idx, SE) + ldos[i] -= GF.imag return ldos / np.pi diff --git a/tests/Test_BTD3.py b/tests/Test_BTD3.py new file mode 100644 index 00000000..bda88406 --- /dev/null +++ b/tests/Test_BTD3.py @@ -0,0 +1,189 @@ +import sisl +import numpy as np +import sys +import matplotlib.pyplot as plt +from hubbard import HubbardHamiltonian, density, NEGF, sp2, plot +from scipy import sparse as sp +import time +from ase.visualize import view +# Set U for the whole calculation +U = 3.0 +kT = 0.025 + +class dummy_class: + def __init__(self, pivot, btd): + self._piv = pivot + self._btd = btd + lpivot = list(pivot) + self._ipiv = np.array([lpivot.index(i) for i in range(len(pivot))]) + def pivot(self): return self._piv + def ipivot(self): return self._ipiv + def btd(self): return self._btd + +# Build zigzag GNR + +tx, ty = 400,12 +useBTD = True + +ZGNR = sisl.geom.zgnr(ty) + +# and 3NN TB Hamiltonian +H_elec = sp2(ZGNR, t1=2.7, t2=0.2, t3=0.18)#.tile(ty, 1) + +# Hubbard Hamiltonian of elecs +MFH_elec = HubbardHamiltonian(H_elec, U=U, nkpt=[102, 1, 1], kT=kT) +# Initialize spin densities +MFH_elec.set_polarization([0], dn=[-1]) # Ensure we break symmetry +# Converge Electrode Hamiltonians +#dn = MFH_elec.converge(density.calc_n, mixer=sisl.mixing.PulayMixer(weight=.7, history=7), tol=1e-10) + +dist = sisl.get_distribution('fermi_dirac', smearing=kT) +Ef_elec = MFH_elec.H.fermi_level(MFH_elec.mp, q=MFH_elec.q, distribution=dist) +print("Electrode Ef = ", Ef_elec) +# Shift each electrode with its Fermi-level and write it to netcdf file +MFH_elec.H.shift(-Ef_elec) +MFH_elec.H.write('MFH_elec.nc') + +# Central region is a repetition of the electrodes without PBC +HC = H_elec.tile(tx, axis=0) +HC.set_nsc([1, 1, 1]) +# The pristine Block partitioning is easy in this case + +#We place a hole in the center of the device +nHoles = 46 + +step = 10 +Cl = np.array([HC.center() + np.array([step*i,0,0]) for i in range(-nHoles//2+1, nHoles//2+1) + ]) + +Cl = Cl.reshape(nHoles,3) + +Rl = np.array([3.0, + ]*nHoles) + +def Holes(r): + Di = np.linalg.norm(r - Cl, axis = 1) + if (Di RUN.out') + +tbt_up = sisl.get_sile('device.TBT_UP.nc') +tbt_dn = sisl.get_sile('device.TBT_DN.nc') + +p = plot.Plot() +p.axes.plot(tbt_up.E, tbt_up.transmission(0, 1), label=r'$\sigma=\uparrow$') +p.axes.plot(tbt_dn.E, tbt_dn.transmission(0, 1), label=r'$\sigma=\downarrow$') +p.axes.legend() +p.set_xlim(-10, 10) +p.set_xlabel('Energy [eV]') +p.set_ylabel('Transmission [a.u.]') +p.savefig('transmission.pdf') + + diff --git a/tests/test-BTD.py b/tests/test-BTD.py new file mode 100644 index 00000000..1f485cd3 --- /dev/null +++ b/tests/test-BTD.py @@ -0,0 +1,146 @@ +import sisl +import numpy as np +import sys +import matplotlib.pyplot as plt +from hubbard import HubbardHamiltonian, density, NEGF, sp2, plot +from scipy import sparse as sp +import time + +# Set U for the whole calculation +U = 3.0 +kT = 0.025 + +class dummy_class: + def __init__(self, pivot, btd): + self._piv = pivot + self._btd = btd + lpivot = list(pivot) + self._ipiv = np.array([lpivot.index(i) for i in range(len(pivot))]) + def pivot(self): return self._piv + def ipivot(self): return self._ipiv + def btd(self): return self._btd + +# Build zigzag GNR + +tx, ty = 20,10 + +ZGNR = sisl.geom.zgnr(ty) + +# and 3NN TB Hamiltonian +H_elec = sp2(ZGNR, t1=2.7, t2=0.2, t3=0.18)#.tile(ty, 1) + +# Hubbard Hamiltonian of elecs +MFH_elec = HubbardHamiltonian(H_elec, U=U, nkpt=[102, 1, 1], kT=kT) +# Initialize spin densities +MFH_elec.set_polarization([0], dn=[-1]) # Ensure we break symmetry +# Converge Electrode Hamiltonians +#dn = MFH_elec.converge(density.calc_n, mixer=sisl.mixing.PulayMixer(weight=.7, history=7), tol=1e-10) + +dist = sisl.get_distribution('fermi_dirac', smearing=kT) +Ef_elec = MFH_elec.H.fermi_level(MFH_elec.mp, q=MFH_elec.q, distribution=dist) +print("Electrode Ef = ", Ef_elec) +# Shift each electrode with its Fermi-level and write it to netcdf file +MFH_elec.H.shift(-Ef_elec) +MFH_elec.H.write('MFH_elec.nc') + +# Central region is a repetition of the electrodes without PBC +HC = H_elec.tile(tx, axis=0) +HC.set_nsc([1, 1, 1]) +# The pristine Block partitioning is easy in this case +Part = [0] +for i in range(tx): + Part += [2 * ty] +#pick out the needed part +Part = Part[1:] +#We place a hole in the center of the device +C = HC.center() +R = 6 +# indices to be removed +ridx = [i for i in range(HC.na) if np.linalg.norm(HC.xyz[i] - C) RUN.out') + +tbt_up = sisl.get_sile('device.TBT_UP.nc') +tbt_dn = sisl.get_sile('device.TBT_DN.nc') + +p = plot.Plot() +p.axes.plot(tbt_up.E, tbt_up.transmission(0, 1), label=r'$\sigma=\uparrow$') +p.axes.plot(tbt_dn.E, tbt_dn.transmission(0, 1), label=r'$\sigma=\downarrow$') +p.axes.legend() +p.set_xlim(-10, 10) +p.set_xlabel('Energy [eV]') +p.set_ylabel('Transmission [a.u.]') +p.savefig('transmission.pdf') + diff --git a/tests/test-BTD_2.py b/tests/test-BTD_2.py new file mode 100644 index 00000000..e69dce51 --- /dev/null +++ b/tests/test-BTD_2.py @@ -0,0 +1,169 @@ +import sisl +import numpy as np +import sys +import matplotlib.pyplot as plt +from hubbard import HubbardHamiltonian, density, NEGF, sp2, plot +from scipy import sparse as sp +import time +from ase.visualize import view +# Set U for the whole calculation +U = 3.0 +kT = 0.025 + +class dummy_class: + def __init__(self, pivot, btd): + self._piv = pivot + self._btd = btd + lpivot = list(pivot) + self._ipiv = np.array([lpivot.index(i) for i in range(len(pivot))]) + def pivot(self): return self._piv + def ipivot(self): return self._ipiv + def btd(self): return self._btd + +# Build zigzag GNR + +tx, ty = 200,6 +useBTD = True + +ZGNR = sisl.geom.zgnr(ty) + +# and 3NN TB Hamiltonian +H_elec = sp2(ZGNR, t1=2.7, t2=0.2, t3=0.18)#.tile(ty, 1) + +# Hubbard Hamiltonian of elecs +MFH_elec = HubbardHamiltonian(H_elec, U=U, nkpt=[102, 1, 1], kT=kT) +# Initialize spin densities +MFH_elec.set_polarization([0], dn=[-1]) # Ensure we break symmetry +# Converge Electrode Hamiltonians +#dn = MFH_elec.converge(density.calc_n, mixer=sisl.mixing.PulayMixer(weight=.7, history=7), tol=1e-10) + +dist = sisl.get_distribution('fermi_dirac', smearing=kT) +Ef_elec = MFH_elec.H.fermi_level(MFH_elec.mp, q=MFH_elec.q, distribution=dist) +print("Electrode Ef = ", Ef_elec) +# Shift each electrode with its Fermi-level and write it to netcdf file +MFH_elec.H.shift(-Ef_elec) +MFH_elec.H.write('MFH_elec.nc') + +# Central region is a repetition of the electrodes without PBC +HC = H_elec.tile(tx, axis=0) +HC.set_nsc([1, 1, 1]) +# The pristine Block partitioning is easy in this case +Part = [] +for i in range(tx): + Part += [2 * ty] + +#We place a hole in the center of the device +nHoles = 46 + +step = 10 +Cl = np.array([HC.center() + np.array([step*i,0,0]) for i in range(-nHoles//2+1, nHoles//2+1) + ]) + +Cl = Cl.reshape(nHoles,3) + +Rl = np.array([3.0, + ]*nHoles) + +def Holes(r): + Di = np.linalg.norm(r - Cl, axis = 1) + if (Di RUN.out') + +tbt_up = sisl.get_sile('device.TBT_UP.nc') +tbt_dn = sisl.get_sile('device.TBT_DN.nc') + +p = plot.Plot() +p.axes.plot(tbt_up.E, tbt_up.transmission(0, 1), label=r'$\sigma=\uparrow$') +p.axes.plot(tbt_dn.E, tbt_dn.transmission(0, 1), label=r'$\sigma=\downarrow$') +p.axes.legend() +p.set_xlim(-10, 10) +p.set_xlabel('Energy [eV]') +p.set_ylabel('Transmission [a.u.]') +p.savefig('transmission.pdf') + diff --git a/tests/test-transmission-open.py b/tests/test-transmission-open.py index b6fdc422..744f8297 100644 --- a/tests/test-transmission-open.py +++ b/tests/test-transmission-open.py @@ -3,17 +3,33 @@ import sys import matplotlib.pyplot as plt from hubbard import HubbardHamiltonian, density, NEGF, sp2, plot +from scipy import sparse as sp +import time # Set U for the whole calculation U = 3.0 kT = 0.025 +class dummy_class: + def __init__(self, pivot, btd): + self._piv = pivot + self._btd = btd + lpivot = list(pivot) + self._ipiv = np.array([lpivot.index(i) for i in range(len(pivot))]) + def pivot(self): return self._piv + def ipivot(self): return self._ipiv + def btd(self): return self._btd + + + + # Build zigzag GNR ZGNR = sisl.geom.zgnr(2) # and 3NN TB Hamiltonian H_elec = sp2(ZGNR, t1=2.7, t2=0.2, t3=0.18) + # Hubbard Hamiltonian of elecs MFH_elec = HubbardHamiltonian(H_elec, U=U, nkpt=[102, 1, 1], kT=kT) # Initialize spin densities @@ -29,22 +45,37 @@ MFH_elec.H.write('MFH_elec.nc') # Central region is a repetition of the electrodes without PBC -HC = H_elec.tile(3, axis=0) +Ntile = 20 +HC = H_elec.tile(Ntile, axis=0) HC.set_nsc([1, 1, 1]) +no = HC.geometry.no +piv = np.arange(no) +btd = np.array([8] * (Ntile//2)) +tbt =dummy_class(piv, btd) +Ov = sp.identity(no) + # Map electrodes in the device region elec_indx = [range(len(H_elec)), range(-len(H_elec), 0)] # MFH object -MFH_HC = HubbardHamiltonian(HC.H, n=np.tile(MFH_elec.n, 3), U=U, kT=kT) +MFH_HC = HubbardHamiltonian(HC.H, n=np.tile(MFH_elec.n, Ntile), U=U, kT=kT) + # First create NEGF object -negf = NEGF(MFH_HC, [(MFH_elec, '-A'), (MFH_elec, '+A')], elec_indx) +negf = NEGF(MFH_HC, [(MFH_elec, '-A'), (MFH_elec, '+A')], elec_indx, + tbt = tbt, Ov = Ov + ) + +time_start = time.time() + # Converge using Green's function method to obtain the densities dn = MFH_HC.converge(negf.calc_n_open, steps=1, mixer=sisl.mixing.PulayMixer(weight=.1), tol=0.1) dn = MFH_HC.converge(negf.calc_n_open, steps=1, mixer=sisl.mixing.PulayMixer(weight=1., history=7), tol=1e-6, print_info=True) print('Nup, Ndn: ', MFH_HC.n.sum(axis=1)) +print("time:", time.time()-time_start) + # Shift device with its Fermi level and write nc file MFH_HC.H.write('MFH_HC.nc', Ef=negf.Ef)