diff --git a/activelearning/al_conf_cycle.py b/activelearning/al_conf_cycle.py index 4308dc5..a8bbaee 100644 --- a/activelearning/al_conf_cycle.py +++ b/activelearning/al_conf_cycle.py @@ -22,6 +22,8 @@ h5stor = root_dir + 'h5files/'# h5store location +strucsfolder = root_dir + 'strucs/'# strucs location + optlfile = root_dir + 'optimized_input_files.dat' #Comet @@ -73,6 +75,10 @@ 'sig' : M, } +strucsparams = {'N': 40, # number of maximum structures to select before QBC from each XYZ file + 'sig': M, + } + mdsparams = {'N': 2, # trajectories to run 'T1': 300, 'T2': 1000, @@ -91,7 +97,7 @@ 'tsfiles': ['/home/jsmith48/scratch/auto_rxn_al/rxns/'], 'nmfile':None, #path to gaussian log file containing the data 'nm':0, #id of normal mode - 'perc':0, #Move the molecules initial coordiantes along the mode by this amount. Negative numbers are ok. + 'perc':0, #Move the molecules initial coordiantes along the mode by this amount. Negative numbers are ok. } dhparams = { 'Nmol': 100, @@ -106,9 +112,9 @@ dmrparams = {#'mdselect' : [(400,0),(60,2),(40,3),(5,4)], 'mdselect' : [(10,0), (1,11)], 'N' : 20, - 'T' : 400.0, # running temp + 'T' : 400.0, # running temp 'L' : 25.0, # box length - 'V' : 0.04, # Random init velocities + 'V' : 0.04, # Random init velocities 'dt' : 0.5, # MD time step 'Nm' : 140, # Molecules to embed #'Nm' : 160, # Molecules to embed @@ -186,10 +192,11 @@ os.mkdir(root_dir + datdir + str(i+1).zfill(2)) ## Run active learning sampling ## - acs = alt.alconformationalsampler(ldtdir, datdir + str(i+1).zfill(2), optlfile, fpatoms, netdict) + acs = alt.alconformationalsampler(ldtdir, datdir + str(i+1).zfill(2), optlfile, strucsfolder, fpatoms, netdict) #acs.run_sampling_cluster(gcmddict, GPU) #acs.run_sampling_dimer(dmrparams, GPU) #acs.run_sampling_nms(nmsparams, GPU) + #acs.run_sampling_strucs(strucsparams, GPU) #acs.run_sampling_md(mdsparams, perc=0.5, gpus=GPU) #acs.run_sampling_TS(tsparams, gpus=[2]) acs.run_sampling_dhl(dhparams, gpus=GPU+GPU) @@ -199,5 +206,3 @@ ## Submit jobs, return and pack data ast.generateQMdata(hostname, username, swkdir, ldtdir, datdir + str(i+1).zfill(2), h5stor, mae, jtime) - - diff --git a/activelearning/al_ions_cycle.py b/activelearning/al_ions_cycle.py index 43406e8..e85486e 100644 --- a/activelearning/al_ions_cycle.py +++ b/activelearning/al_ions_cycle.py @@ -20,6 +20,8 @@ h5stor = root_dir + 'h5files/'# h5store location +strucsfolder = root_dir + 'strucs/'# strucs locations + optlfile = root_dir + 'optimized_input_files.dat' #Comet @@ -58,10 +60,59 @@ root = '/home/jsmith48/scratch/auto_ion_al' wkdir = root+'/modelions/ANI-AL-smallions/' -iptfile = root+'/modelions/inputtrain.ipt' +#iptfile = root+'/modelions/inputtrain.ipt' saefile = root+'/modelions/sae_linfit.dat' cstfile = root+'/modelions/rHCNOSFCl-5.2R_16-3.8A_a4-8.params' +ipt = alt.anitrainerinputdesigner() + +ipt.set_parameter('atomEnergyFile','sae_linfit.dat') +ipt.set_parameter('sflparamsfile','rHCNOSFCl-5.2R_16-3.8A_a4-8.params') +ipt.set_parameter('eta','0.001') +ipt.set_parameter('energy','1') +ipt.set_parameter('force','0') +ipt.set_parameter('fmult','1.0') +ipt.set_parameter('feps','0.001') +ipt.set_parameter('dipole','0') +ipt.set_parameter('charge','0') +ipt.set_parameter('cdweight','2.0') +ipt.set_parameter('tolr','100') +ipt.set_parameter('tbtchsz','2560') +ipt.set_parameter('vbtchsz','2560') +ipt.set_parameter('nkde','2') + + +# Set network layers +ipt.add_layer('H',{"nodes":320,"activation":9,"type":0,"l2norm":1,"l2valu":5.000e-3}) +ipt.add_layer('H',{"nodes":64 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) +ipt.add_layer('H',{"nodes":128,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) + +ipt.add_layer('C',{"nodes":288,"activation":9,"type":0,"l2norm":1,"l2valu":5.000e-3}) +ipt.add_layer('C',{"nodes":96 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) +ipt.add_layer('C',{"nodes":128,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) + +ipt.add_layer('N',{"nodes":256,"activation":9,"type":0,"l2norm":1,"l2valu":5.000e-3}) +ipt.add_layer('N',{"nodes":48 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) +ipt.add_layer('N',{"nodes":96 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) + +ipt.add_layer('O',{"nodes":256,"activation":9,"type":0,"l2norm":1,"l2valu":5.000e-3}) +ipt.add_layer('O',{"nodes":48 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) +ipt.add_layer('O',{"nodes":48 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) + +ipt.add_layer('S',{"nodes":192,"activation":9,"type":0,"l2norm":1,"l2valu":5.000e-3}) +ipt.add_layer('S',{"nodes":48 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) +ipt.add_layer('S',{"nodes":80 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) + +ipt.add_layer('F',{"nodes":192,"activation":9,"type":0,"l2norm":1,"l2valu":5.000e-3}) +ipt.add_layer('F',{"nodes":48 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) +ipt.add_layer('F',{"nodes":80 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) + +ipt.add_layer('Cl',{"nodes":192,"activation":9,"type":0,"l2norm":1,"l2valu":5.000e-3}) +ipt.add_layer('Cl',{"nodes":48 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) +ipt.add_layer('Cl',{"nodes":80 ,"activation":9,"type":0,"l2norm":1,"l2valu":1.000e-6}) + +ipt.print_layer_parameters() + #-----------0--------- # Training varibles @@ -74,6 +125,10 @@ 'sig' : M, } +strucsparams = {'N': 40, # number of maximum structures to select before QBC from each XYZ file + 'sig': M, + } + mdsparams = {'N': 1, # trajectories to run 'T1': 300, 'T2': 1000, @@ -92,7 +147,7 @@ 'tsfiles': ['/home/jsmith48/scratch/auto_rxn_al/rxns/'], 'nmfile':None, #path to gaussian log file containing the data 'nm':0, #id of normal mode - 'perc':0, #Move the molecules initial coordiantes along the mode by this amount. Negative numbers are ok. + 'perc':0, #Move the molecules initial coordiantes along the mode by this amount. Negative numbers are ok. } dhparams = { 'Nmol': 250, @@ -109,9 +164,9 @@ #'mdselect' : [(10,0), (1,11)], #'N' : 20, 'maxNa' : 15, # Largest molecule to consider (for dimers max size is 2x maxNa) - 'T' : 400.0, # running temp + 'T' : 400.0, # running temp 'L' : 25.0, # box length - 'V' : 0.04, # Random init velocities + 'V' : 0.04, # Random init velocities 'dt' : 0.25, # MD time step 'Nm' : 100, # Molecules to embed #'Nm' : 160, # Molecules to embed @@ -144,7 +199,7 @@ ### BEGIN CONFORMATIONAL REFINEMENT LOOP HERE ### #N = [20] -N = [1] +N = [2] for i in N: #netdir = wkdir+'ANI-1x-RXN-0000.00'+str(i).zfill(2)+'/' @@ -156,9 +211,9 @@ nnfprefix = netdir + 'train' - netdict = {'iptfile' : iptfile, - 'cnstfile' : cstfile, + netdict = {'cnstfile' : cstfile, 'saefile': saefile, + 'iptsize': 1008, 'nnfprefix': netdir+'train', 'aevsize': aevsize, 'num_nets': Nnets, @@ -166,23 +221,22 @@ } ## Train the ensemble ## - aln = att.alaniensembletrainer(netdir, netdict, h5stor, Nnets) - aln.build_strided_training_cache(Nblock,Nbvald,Nbtest,False) + aln = att.alaniensembletrainer(netdir, netdict, ipt, h5stor, Nnets) + aln.build_strided_training_cache(Nblock,Nbvald,Nbtest,Ekey='energy',forces=False,dipole=False,rmhighe=True) aln.train_ensemble(GPU) if not os.path.exists(root_dir + datdir + str(i+1).zfill(2)): os.mkdir(root_dir + datdir + str(i+1).zfill(2)) ## Run active learning sampling ## - acs = alt.alconformationalsampler(ldtdir, datdir + str(i+1).zfill(2), optlfile, fpatoms+['H'], netdict) + acs = alt.alconformationalsampler(ldtdir, datdir + str(i+1).zfill(2), optlfile, strucsfolder, fpatoms+['H'], netdict) #acs.run_sampling_cluster(gcmddict, GPU) #acs.run_sampling_dimer(dmrparams, GPU) acs.run_sampling_nms(nmsparams, GPU) - #acs.run_sampling_md(mdsparams, perc=0.25, gpus=GPU+GPU+GPU) + #acs.run_sampling_strucs(strucsparams, GPU) + acs.run_sampling_md(mdsparams, perc=0.25, gpus=GPU+GPU+GPU) #acs.run_sampling_dhl(dhparams, gpus=GPU+GPU) #acs.run_sampling_TS(tsparams, gpus=GPU) ## Submit jobs, return and pack data ast.generateQMdata(hostname, username, swkdir, ldtdir, datdir + str(i+1).zfill(2), h5stor, mae, jtime,max_jobs=50) - - diff --git a/lib/anialtools.py b/lib/anialtools.py index 980e6c3..1e364be 100644 --- a/lib/anialtools.py +++ b/lib/anialtools.py @@ -26,7 +26,7 @@ class alconformationalsampler(): # Constructor - def __init__(self, ldtdir, datdir, optlfile, fpatoms, netdict): + def __init__(self, ldtdir, datdir, optlfile, strucsfolder, fpatoms, netdict): self.ldtdir = ldtdir # local working dir self.datdir = datdir # working data dir self.cdir = ldtdir+datdir+'/confs/' # confs store dir (the data gen code looks here for conformations to run QM on) @@ -35,6 +35,8 @@ def __init__(self, ldtdir, datdir, optlfile, fpatoms, netdict): self.optlfile = optlfile # Optimized molecules store path file + self.strucsfolder = strucsfolder # Structures folder + self.idir = [f for f in open(optlfile).read().split('\n') if f != ''] # read and store the paths to the opt files # create cdir if it does not exist @@ -57,6 +59,15 @@ def run_sampling_nms(self, nmsparams, gpus=[0]): p.start() p.join() + # Runs structural sampling (single GPU only currently) + def run_sampling_strucs(self, strucsparams, gpus=[0]): + print('Running Structural sampling...') + p = Process(target=self.structural_sampling, args=(strucsparams['N'], + strucsparams['sig'], + gpus[0])) + p.start() + p.join() + # Run MD sampling on N GPUs. This code will automatically run 2 mds per GPU for better utilization def run_sampling_md(self, mdparams, perc=1.0, gpus=[0]): md_work = [] @@ -75,7 +86,7 @@ def run_sampling_md(self, mdparams, perc=1.0, gpus=[0]): np.random.shuffle(md_work) md_work = md_work[0:int(perc*md_work.size)] md_work = np.array_split(md_work,len(gpus2)) - + proc = [] for i,(md,g) in enumerate(zip(md_work,gpus2)): proc.append(Process(target=self.mol_dyn_sampling, args=(md,i, @@ -204,7 +215,6 @@ def normal_mode_sampling(self, T, Ngen, Nkep, maxd, sig, gpuid): sigma = anicv.compute_stddev_conformations(conformers,spc) sid = np.where( sigma > sig )[0] - Nt += sigma.size Nk += sid.size if 100.0*sid.size/float(Ngen) > 0: @@ -227,6 +237,91 @@ def normal_mode_sampling(self, T, Ngen, Nkep, maxd, sig, gpuid): #print('\nGrand Total:', Nkt, 'of', Ntt,'percent:',"{:.2f}".format(100.0*Nkt/Ntt), 'Kept',Nkp) of.close() + # Structural sampler function + def structural_sampling(self, N, sig, gpuid): + of = open(self.ldtdir + self.datdir + '/info_data_strucs.nfo', 'w') + + aevsize = self.netdict['aevsize'] + + anicv = aat.anicrossvalidationconformer(self.netdict['cnstfile'], + self.netdict['saefile'], + self.netdict['nnfprefix'], + self.netdict['num_nets'], + [gpuid], False) + + dc = aat.diverseconformers(self.netdict['cnstfile'], + self.netdict['saefile'], + self.netdict['nnfprefix']+'0/networks/', + aevsize, + gpuid, False) + + files = os.listdir(self.strucsfolder) + files.sort() + + Nkt = 0 + Ntt = 0 + cnt = 0 + for fi,f in enumerate(files): + print(f) + fil = open(self.strucsfolder+f,'r') + lines = fil.readlines() + fil.close() + nlines = len(lines) + # Reading all conformations + nat = int(lines[0]) + nconfs=int(round(len(lines)/(nat+2))) + crds=[] + for conf in range(nconfs): + crd =[] + if (conf==0): + if (not re.search("Charge:",lines[1]) or not re.search("Mul:",lines[1])): + raise ValueError('Error: the first comment line in %s must have charge and multiplicity. Please add something like " Charge: 0 Mul: 1 "'%(self.strucsfolder+f)) + chg = lines[1].split("Charge:")[1].split()[0] + mul = lines[1].split("Mul:")[1].split()[0] + spc = [] + for i in range(nat): + var = lines[conf*(nat+2)+2+i].split() + spc.append(var[0]) + crd.append([float(var[1]),float(var[2]),float(var[3])]) + else: + for i in range(nat): + var = lines[conf*(nat+2)+2+i].split() + crd.append([float(var[1]),float(var[2]),float(var[3])]) + crds.append(crd) + # Select up to N random structures, if needed + if (nconfs>N): + list=[] + for i in range(N): + num=np.random.random_integers(0,nconfs-1) + while(num in list): + num=num=np.random.random_integers(0,nconfs-1) + list.append(num) + ncrds=[] + for i in sorted(list): + ncrds.append(crds[i]) + del crds + crds=ncrds + del ncrds + # Converting list to numpy array + crds=np.asarray(crds, dtype=np.float32) + # Filter by QBC + sigma = anicv.compute_stddev_conformations(crds,spc) + sid = np.where( sigma > sig )[0] + + Ntt += sigma.size + Nkt += sid.size + of.write(str(cnt+1)+' of '+str(len(files))+') file: '+ str(self.strucsfolder+f) +'\n') + of.write(' -Total: '+str(sid.size)+' of '+str(sigma.size)+' percent: '+"{:.2f}".format(100.0*sid.size/sigma.size)+'\n') + of.flush() + if sid.size > 0: + cfn = f.split('.')[0]+'_strucs.xyz' + cmts = [' '+chg+' '+mul for c in range(sid.size)] + hdt.writexyzfilewc(self.cdir+cfn,crds[sid],spc,cmts) + cnt += 1 + + of.write('\nGrand Total: '+ str(Nkt)+ ' of '+ str(Ntt)+' percent: '+"{:.2f}".format(100.0*Nkt/Ntt)+'\n') + of.close() + # MD Sampling function def mol_dyn_sampling(self,md_work, i, N, T1, T2, dt, Nc, Ns, sig, gpuid): activ = aat.moldynactivelearning(self.netdict['cnstfile'], @@ -246,6 +341,13 @@ def mol_dyn_sampling(self,md_work, i, N, T1, T2, dt, Nc, Ns, sig, gpuid): #print(di, ') Working on', id, '...') S = data["species"] + if "charge" in data and "multip" in data: + chg = data["charge"] + mlt = data["multip"] + else: + chg = "0" + mlt = "1" + # Set mols activ.setmol(data["coordinates"], S) @@ -261,7 +363,7 @@ def mol_dyn_sampling(self,md_work, i, N, T1, T2, dt, Nc, Ns, sig, gpuid): #print(nfo) if X.size > 0: - hdt.writexyzfile(self.cdir + 'mds_' + m.split('.')[0] + '_' + str(i).zfill(2) + str(di).zfill(4) + '.xyz', X, S) + hdt.writexyzfilewc(self.cdir + 'mds_' + m.split('.')[0] + '_' + str(i).zfill(2) + str(di).zfill(4) + '.xyz', X, S, ' '+chg+' '+mlt) difo.write('Complete mean fail time: ' + "{:.2f}".format(ftme_t / float(Nmol)) + '\n') print(Nmol) del activ @@ -280,7 +382,7 @@ def dimer_sampling(self, tid, Nr, dparam, gpuid): Nm = dparam['Nm'] Ni = dparam['Ni'] Ns = dparam['Ns'] - + mols = [] difo = open(self.ldtdir + self.datdir + '/info_data_mddimer-'+str(tid)+'.nfo', 'w') for di,id in enumerate(dparam['mdselect']): @@ -290,26 +392,26 @@ def dimer_sampling(self, tid, Nr, dparam, gpuid): dnfo = str(di) + ' of ' + str(len(dparam['mdselect'])) + ') dir: ' + str(self.idir[id[1]]) + ' Selecting: '+str(id[0]*len(files)) #print(dnfo) difo.write(dnfo+'\n') - + for i in range(id[0]): for n,m in enumerate(files): data = hdt.read_rcdb_coordsandnm(self.idir[id[1]]+m) if len(data['species']) < maxNa: mols.append(data) - dgen = pmf.dimergenerator(self.netdict['cnstfile'], - self.netdict['saefile'], - self.netdict['nnfprefix'], - self.netdict['num_nets'], + dgen = pmf.dimergenerator(self.netdict['cnstfile'], + self.netdict['saefile'], + self.netdict['nnfprefix'], + self.netdict['num_nets'], mols, gpuid) difo.write('Beginning dimer generation...\n') - + Nt = 0 Nd = 0 for i in range(Nr): dgen.init_dynamics(Nm, V, L, dt, T) - + for j in range(Ns): if j != 0: dgen.run_dynamics(Ni) @@ -322,7 +424,7 @@ def dimer_sampling(self, tid, Nr, dparam, gpuid): Nt += dgen.Nt Nd += dgen.Nd - + #print('Step (',tid,',',i,') [', str(dgen.Nd), '/', str(dgen.Nt),'] generated ',len(dgen.frag_list), 'dimers...') #difo.write('Step ('+str(i)+') ['+ str(dgen.Nd)+ '/'+ str(dgen.Nt)+'] generated '+str(len(dgen.frag_list))+'dimers...\n') if max_sig > 3.0*sig: @@ -376,7 +478,7 @@ def run_sampling_TS(self, tsparams, gpus=[0], perc=1.0): np.random.shuffle(TS_infiles) TS_infiles = TS_infiles[0:int(perc*len(TS_infiles))] TS_infiles = np.array_split(TS_infiles,len(gpus2)) - + proc = [] for i,g in enumerate(gpus2): proc.append(Process(target=self.TS_sampling, args=(i, TS_infiles[i], tsparams, g))) @@ -481,7 +583,7 @@ def run_sampling_pDynTS(self, pdynparams, gpus=0): print('Finished pDynamo sampling.') - def pDyn_QMsampling(self, pdynparams, gpuid): + def pDyn_QMsampling(self, pdynparams, gpuid): #Call subproc_pDyn class in pyaniasetools as activ activ = aat.subproc_pDyn(self.netdict['cnstfile'], self.netdict['saefile'], @@ -500,7 +602,7 @@ def pDyn_QMsampling(self, pdynparams, gpuid): indir=pdynparams['indir'] #path to save XYZ files of IRC points to check stddev XYZfile=pdynparams['XYZfile'] #XYZ file with high standard deviations structures l_val=pdynparams['l_val'] #Ri --> randomly perturb in the interval [+x,-x] - h_val=pdynparams['h_val'] + h_val=pdynparams['h_val'] n_points=pdynparams['n_points'] #Number of points along IRC (forward+backward+1 for TS) sig=pdynparams['sig'] N=pdynparams['N'] @@ -521,7 +623,7 @@ def pDyn_QMsampling(self, pdynparams, gpuid): chk_TS = activ.subprocess_cmd(sbproc_cmdTS, False, logfile_TS) if chk_TS == 0: chk_IRC = activ.subprocess_cmd(sbproc_cmdIRC, False, logfile_IRC) - + # ----------------------- Save points along ANI IRC ------------------------ IRCfils=os.listdir(IRCdir) IRCfils.sort() @@ -530,16 +632,16 @@ def pDyn_QMsampling(self, pdynparams, gpuid): activ.getIRCpoints_toXYZ(n_points, IRCdir+f, f, indir) infils=os.listdir(indir) infils.sort() - + # ------ Check for high standard deviation structures and get vib modes ----- for f in infils: stdev = activ.check_stddev(indir+f, sig) if stdev > sig: #if stddev is high then get modes for that point nmc = activ.get_nm(indir+f) #save modes in memory for later use - + activ.write_nm_xyz(XYZfile) #writes all the structures with high standard deviations to xyz file - # ----------------------------- Read XYZ for NM ----------------------------- + # ----------------------------- Read XYZ for NM ----------------------------- X, spc, Na, C = hdt.readxyz3(XYZfile) # --------- NMS for XYZs with high stddev -------- @@ -553,7 +655,7 @@ def pDyn_QMsampling(self, pdynparams, gpuid): gen_crd[k] = gen.get_random_structure() hdt.writexyzfile(self.cdir + 'nms_TS%i.xyz' %N, gen_crd, spc[i]) - + del activ @@ -895,11 +997,11 @@ def build_strided_training_cache(self,Nblocks,Nvalid,Ntest,build_test=True, buil S = data['species'] E = data['energies'] X = data['coordinates'] - + Esae = hdt.compute_sae(self.netdict['saefile'], S) - + dE.append((E-Esae)/np.sqrt(len(S))) - + dE = np.concatenate(dE) cidx = np.where(np.abs(dE) < 15.0) std = np.abs(dE[cidx]).std() diff --git a/lib/nmstools.py b/lib/nmstools.py index 634a1d9..e861aa1 100644 --- a/lib/nmstools.py +++ b/lib/nmstools.py @@ -27,15 +27,15 @@ def __getCharge__(self,type): elif type is 'C': return 6.0 elif type is 'N': - return 6.0 + return 7.0 elif type is 'O': - return 6.0 + return 8.0 elif type is 'F': - return 6.0 + return 9.0 elif type is 'S': - return 6.0 + return 16.0 elif type == 'Cl': - return 6.0 + return 17.0 else: print('Unknown atom type! ',type) exit(1) @@ -62,19 +62,16 @@ def __check_distance_from_eq__(self,rxyz): # Generate a structure def __genrandomstruct__(self): - rdt = np.random.random(self.Nf+1) - rdt[0] = 0.0 + rdt = np.random.random(self.Nf) norm = np.random.random(1)[0] - rdt = norm*np.sort(rdt) - rdt[self.Nf] = norm oxyz = self.xyz.copy() for i in range(self.Nf): Ki = mDynetoMet * self.fcc[i] - ci = rdt[i+1]-rdt[i] + ci = norm*rdt[i]/np.sum(rdt) Sn = -1.0 if np.random.binomial(1,0.5,1) else 1.0 - Ri = Sn * MtoA * np.sqrt((3.0 * ci * Kb * float(self.Nf) * self.T)/(Ki)) + Ri = Sn * MtoA * np.sqrt((ci * Kb * float(self.Nf) * self.T)/(Ki)) Ri = min([Ri,self.maxd]) oxyz = oxyz + Ri * self.nmo[i] return oxyz @@ -131,6 +128,12 @@ def __getCharge__(self,type): return 7.0 elif type is 'O': return 8.0 + elif type is 'F': + return 9.0 + elif type is 'S': + return 16.0 + elif type == 'Cl': + return 17.0 else: print('Unknown atom type! ',type) exit(1) @@ -157,7 +160,7 @@ def __check_distance_from_eq__(self,rxyz): def __genrandomstruct__(self): ("generating randomstruct") oxyz = self.xyz.copy() - Ri = np.random.uniform(low=self.l_val, high=self.h_val, size=None) + Ri = np.random.uniform(low=self.l_val, high=self.h_val, size=None) oxyz = oxyz + Ri * self.nmo return oxyz