From f0fa205f2dd72f3b74555cd7e5c5fe3af087d7c6 Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 12:43:05 -0500 Subject: [PATCH 01/12] psc: rm debugging ifdefs --- src/include/psc.hxx | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index 0a51e1183..0ac924de7 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -521,10 +521,8 @@ struct Psc // === field propagation E^{n+1/2} -> E^{n+3/2} mpi_printf(comm, "***** Push fields E\n"); prof_start(pr_bndf); -#if 1 bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); -#endif // === external current this->ext_current_(grid(), mflds_); @@ -538,12 +536,10 @@ struct Psc pushf_.push_E(mflds_, 1., Dim{}); prof_stop(pr_push_flds); -#if 1 prof_restart(pr_bndf); bndf_.fill_ghosts_E(mflds_); bnd_.fill_ghosts(mflds_, EX, EX + 3); prof_stop(pr_bndf); -#endif // state is now: x^{n+3/2}, p^{n+1}, E^{n+3/2}, B^{n+1} // === field propagation B^{n+1} -> B^{n+3/2} @@ -552,13 +548,11 @@ struct Psc pushf_.push_H(mflds_, .5, Dim{}); prof_stop(pr_push_flds); -#if 1 prof_start(pr_bndf); bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); prof_stop(pr_bndf); // state is now: x^{n+3/2}, p^{n+1}, E^{n+3/2}, B^{n+3/2} -#endif if (checks_.continuity.should_do_check(timestep)) { prof_restart(pr_checks); From 021641416060a6be17399cd1fcdbf68f6bbe7961 Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 13:03:04 -0500 Subject: [PATCH 02/12] psc: more thorough printing --- src/include/psc.hxx | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index 0ac924de7..c7e41f1e5 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -488,14 +488,14 @@ struct Psc } if (checks_.continuity.should_do_check(timestep)) { - mpi_printf(comm, "***** Checking continuity...\n"); + mpi_printf(comm, "***** Checking continuity (1 of 2)...\n"); prof_start(pr_checks); checks_.continuity.before_particle_push(mprts_); prof_stop(pr_checks); } // === particle propagation p^{n} -> p^{n+1}, x^{n+1/2} -> x^{n+3/2} - mpi_printf(comm, "***** Pushing particles...\n"); + mpi_printf(comm, "***** Push particles...\n"); prof_start(pr_push_prts); pushp_.push_mprts(mprts_, mflds_); prof_stop(pr_push_prts); @@ -507,7 +507,7 @@ struct Psc prof_stop(pr_inject_prts); // === field propagation B^{n+1/2} -> B^{n+1} - mpi_printf(comm, "***** Pushing B...\n"); + mpi_printf(comm, "***** Push fields B (1 of 2)...\n"); prof_start(pr_push_flds); pushf_.push_H(mflds_, .5, Dim{}); prof_stop(pr_push_flds); @@ -519,7 +519,7 @@ struct Psc prof_stop(pr_bndp); // === field propagation E^{n+1/2} -> E^{n+3/2} - mpi_printf(comm, "***** Push fields E\n"); + mpi_printf(comm, "***** Bnd fields B (1 of 2), J...\n"); prof_start(pr_bndf); bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); @@ -532,10 +532,12 @@ struct Psc bnd_.fill_ghosts(mflds_, JXI, JXI + 3); prof_stop(pr_bndf); + mpi_printf(comm, "***** Push fields E...\n"); prof_restart(pr_push_flds); pushf_.push_E(mflds_, 1., Dim{}); prof_stop(pr_push_flds); + mpi_printf(comm, "***** Bnd fields E...\n"); prof_restart(pr_bndf); bndf_.fill_ghosts_E(mflds_); bnd_.fill_ghosts(mflds_, EX, EX + 3); @@ -543,11 +545,12 @@ struct Psc // state is now: x^{n+3/2}, p^{n+1}, E^{n+3/2}, B^{n+1} // === field propagation B^{n+1} -> B^{n+3/2} - mpi_printf(comm, "***** Push fields B\n"); + mpi_printf(comm, "***** Push fields B (2 of 2)...\n"); prof_restart(pr_push_flds); pushf_.push_H(mflds_, .5, Dim{}); prof_stop(pr_push_flds); + mpi_printf(comm, "***** Bnd fields B (2 of 2)...\n"); prof_start(pr_bndf); bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); @@ -555,6 +558,7 @@ struct Psc // state is now: x^{n+3/2}, p^{n+1}, E^{n+3/2}, B^{n+3/2} if (checks_.continuity.should_do_check(timestep)) { + mpi_printf(comm, "***** Checking continuity (2 of 2)...\n"); prof_restart(pr_checks); checks_.continuity.after_particle_push(mprts_, mflds_); prof_stop(pr_checks); @@ -571,6 +575,7 @@ struct Psc } if (checks_.gauss.should_do_check(timestep)) { + mpi_printf(comm, "***** Checking gauss...\n"); prof_restart(pr_checks); checks_.gauss(mprts_, mflds_); prof_stop(pr_checks); From 4257d67722161841d85b6f62440bc0ed3a317c85 Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 13:07:04 -0500 Subject: [PATCH 03/12] psc: mv order of bnd J --- src/include/psc.hxx | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index c7e41f1e5..42cf81676 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -506,24 +506,13 @@ struct Psc inject_particles_(mprts_, mflds_); prof_stop(pr_inject_prts); - // === field propagation B^{n+1/2} -> B^{n+1} - mpi_printf(comm, "***** Push fields B (1 of 2)...\n"); - prof_start(pr_push_flds); - pushf_.push_H(mflds_, .5, Dim{}); - prof_stop(pr_push_flds); - // state is now: x^{n+3/2}, p^{n+1}, E^{n+1/2}, B^{n+1}, j^{n+1} - mpi_printf(comm, "***** Bnd particles...\n"); prof_start(pr_bndp); bndp_(mprts_); prof_stop(pr_bndp); - // === field propagation E^{n+1/2} -> E^{n+3/2} - mpi_printf(comm, "***** Bnd fields B (1 of 2), J...\n"); + mpi_printf(comm, "***** Bnd fields J...\n"); prof_start(pr_bndf); - bndf_.fill_ghosts_H(mflds_); - bnd_.fill_ghosts(mflds_, HX, HX + 3); - // === external current this->ext_current_(grid(), mflds_); @@ -532,6 +521,20 @@ struct Psc bnd_.fill_ghosts(mflds_, JXI, JXI + 3); prof_stop(pr_bndf); + // === field propagation B^{n+1/2} -> B^{n+1} + mpi_printf(comm, "***** Push fields B (1 of 2)...\n"); + prof_start(pr_push_flds); + pushf_.push_H(mflds_, .5, Dim{}); + prof_stop(pr_push_flds); + // state is now: x^{n+3/2}, p^{n+1}, E^{n+1/2}, B^{n+1}, j^{n+1} + + // === field propagation E^{n+1/2} -> E^{n+3/2} + mpi_printf(comm, "***** Bnd fields B (1 of 2)...\n"); + prof_start(pr_bndf); + bndf_.fill_ghosts_H(mflds_); + bnd_.fill_ghosts(mflds_, HX, HX + 3); + prof_stop(pr_bndf); + mpi_printf(comm, "***** Push fields E...\n"); prof_restart(pr_push_flds); pushf_.push_E(mflds_, 1., Dim{}); From 844520d9c5adacf39c41598cda5e830f94db0f34 Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 13:08:29 -0500 Subject: [PATCH 04/12] psc: fix comment order --- src/include/psc.hxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index 42cf81676..81bc87837 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -499,7 +499,6 @@ struct Psc prof_start(pr_push_prts); pushp_.push_mprts(mprts_, mflds_); prof_stop(pr_push_prts); - // state is now: x^{n+3/2}, p^{n+1}, E^{n+1/2}, B^{n+1/2}, j^{n+1} // === particle injection prof_start(pr_inject_prts); @@ -520,21 +519,22 @@ struct Psc bnd_.add_ghosts(mflds_, JXI, JXI + 3); bnd_.fill_ghosts(mflds_, JXI, JXI + 3); prof_stop(pr_bndf); + // state is now: x^{n+3/2}, p^{n+1}, E^{n+1/2}, B^{n+1/2}, j^{n+1} // === field propagation B^{n+1/2} -> B^{n+1} mpi_printf(comm, "***** Push fields B (1 of 2)...\n"); prof_start(pr_push_flds); pushf_.push_H(mflds_, .5, Dim{}); prof_stop(pr_push_flds); - // state is now: x^{n+3/2}, p^{n+1}, E^{n+1/2}, B^{n+1}, j^{n+1} - // === field propagation E^{n+1/2} -> E^{n+3/2} mpi_printf(comm, "***** Bnd fields B (1 of 2)...\n"); prof_start(pr_bndf); bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); prof_stop(pr_bndf); + // state is now: x^{n+3/2}, p^{n+1}, E^{n+1/2}, B^{n+1}, j^{n+1} + // === field propagation E^{n+1/2} -> E^{n+3/2} mpi_printf(comm, "***** Push fields E...\n"); prof_restart(pr_push_flds); pushf_.push_E(mflds_, 1., Dim{}); From 92af4281948e4943d0c69ee4226b1ec4af3e5dad Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 13:11:24 -0500 Subject: [PATCH 05/12] psc: +pr_external_current --- src/include/psc.hxx | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index 81bc87837..4f8b14faf 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -446,12 +446,13 @@ struct Psc using Dim = typename PscConfig::Dim; static int pr_sort, pr_collision, pr_checks, pr_push_prts, pr_push_flds, - pr_bndp, pr_bndf, pr_marder, pr_inject_prts; + pr_bndp, pr_bndf, pr_marder, pr_inject_prts, pr_external_current; if (!pr_sort) { pr_sort = prof_register("step_sort", 1., 0, 0); pr_collision = prof_register("step_collision", 1., 0, 0); pr_push_prts = prof_register("step_push_prts", 1., 0, 0); pr_inject_prts = prof_register("step_inject_prts", 1., 0, 0); + pr_external_current = prof_register("step_external_current", 1., 0, 0); pr_push_flds = prof_register("step_push_flds", 1., 0, 0); pr_bndp = prof_register("step_bnd_prts", 1., 0, 0); pr_bndf = prof_register("step_bnd_flds", 1., 0, 0); @@ -505,6 +506,11 @@ struct Psc inject_particles_(mprts_, mflds_); prof_stop(pr_inject_prts); + // === external current + prof_start(pr_external_current); + this->ext_current_(grid(), mflds_); + prof_stop(pr_external_current); + mpi_printf(comm, "***** Bnd particles...\n"); prof_start(pr_bndp); bndp_(mprts_); @@ -512,9 +518,6 @@ struct Psc mpi_printf(comm, "***** Bnd fields J...\n"); prof_start(pr_bndf); - // === external current - this->ext_current_(grid(), mflds_); - bndf_.add_ghosts_J(mflds_); bnd_.add_ghosts(mflds_, JXI, JXI + 3); bnd_.fill_ghosts(mflds_, JXI, JXI + 3); From c2d01bb381370d6698cb68e23cf8c895fd41eb69 Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 13:23:42 -0500 Subject: [PATCH 06/12] psc: -VPIC --- src/include/psc.hxx | 428 +------------------------------------------- 1 file changed, 9 insertions(+), 419 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index 4f8b14faf..a81733fed 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -22,40 +22,7 @@ #include -#ifdef VPIC -#include "../libpsc/vpic/vpic_iface.h" -#endif - -#ifndef VPIC struct MaterialList; -#endif - -#ifdef VPIC - -// FIXME, global variables are bad... - -using VpicConfig = VpicConfigPsc; -using Mparticles = typename VpicConfig::Mparticles; -using MfieldsState = typename VpicConfig::MfieldsState; -using MfieldsHydro = MfieldsHydroQ; -using MfieldsInterpolator = typename VpicConfig::MfieldsInterpolator; -using MfieldsAccumulator = typename VpicConfig::MfieldsAccumulator; -using Grid = typename MfieldsState::Grid; -using ParticleBcList = typename Mparticles::ParticleBcList; -using MaterialList = typename MfieldsState::MaterialList; -using Material = typename MaterialList::Material; -using OutputHydro = OutputHydroQ; -using DiagMixin = - NoneDiagMixin; - -Grid* vgrid; -std::unique_ptr hydro; -std::unique_ptr interpolator; -std::unique_ptr accumulator; -ParticleBcList particle_bc_list; -DiagMixin diag_mixin; - -#endif // ====================================================================== @@ -150,10 +117,6 @@ struct Psc using BndParticles = typename PscConfig::BndParticles; using Dim = typename PscConfig::Dim; -#ifdef VPIC - using AccumulateOps = typename PushParticles::AccumulateOps; -#endif - // ---------------------------------------------------------------------- // ctor @@ -219,11 +182,7 @@ struct Psc void initialize() { -#ifdef VPIC - initialize_vpic(); -#else initialize_default(); -#endif bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); @@ -273,10 +232,6 @@ struct Psc step(); grid_->timestep_++; // FIXME, too hacky -#ifdef VPIC - vgrid->step++; - assert(vgrid->step == grid().timestep()); -#endif diagnostics(); @@ -314,130 +269,6 @@ struct Psc d, h, m, s); } -#ifdef VPIC - // ---------------------------------------------------------------------- - // step_vpic - - void step_vpic() - { - static int pr_sort, pr_collision, pr_checks, pr_push_prts, pr_push_flds, - pr_bndp, pr_bndf, pr_marder, pr_inject, pr_heating; - if (!pr_sort) { - pr_sort = prof_register("step_sort", 1., 0, 0); - pr_collision = prof_register("step_collision", 1., 0, 0); - pr_push_prts = prof_register("step_push_prts", 1., 0, 0); - pr_push_flds = prof_register("step_push_flds", 1., 0, 0); - pr_bndp = prof_register("step_bnd_prts", 1., 0, 0); - pr_bndf = prof_register("step_bnd_flds", 1., 0, 0); - pr_checks = prof_register("step_checks", 1., 0, 0); - pr_marder = prof_register("step_marder", 1., 0, 0); - pr_inject = prof_register("step_inject", 1., 0, 0); - pr_heating = prof_register("step_heating", 1., 0, 0); - } - - MPI_Comm comm = grid().comm(); - - // x^{n+1/2}, p^{n}, E^{n+1/2}, B^{n+1/2} - - int timestep = grid().timestep(); - - if (p_.balance_interval > 0 && timestep % p_.balance_interval == 0) { - balance_(grid_, mprts_); - } - - // prof_start(pr_time_step_no_comm); - // prof_stop(pr_time_step_no_comm); // actual measurements are done w/ - // restart - - if (p_.sort_interval > 0 && timestep % p_.sort_interval == 0) { - // mpi_printf(comm, "***** Sorting...\n"); - prof_start(pr_sort); - sort_(mprts_); - prof_stop(pr_sort); - } - - if (collision_.interval() > 0 && timestep % collision_.interval() == 0) { - mpi_printf(comm, "***** Performing collisions...\n"); - prof_start(pr_collision); - collision_(mprts_); - prof_stop(pr_collision); - } - - // psc_bnd_particles_open_calc_moments(psc_->bnd_particles, - // psc_->particles); - - checks_.continuity_before_particle_push(mprts_); - - // === particle propagation p^{n} -> p^{n+1}, x^{n+1/2} -> x^{n+3/2} - prof_start(pr_push_prts); - pushp_.push_mprts(mprts_, mflds_, *interpolator, *accumulator, - particle_bc_list, num_comm_round); - prof_stop(pr_push_prts); - // state is now: x^{n+3/2}, p^{n+1}, E^{n+1/2}, B^{n+1/2}, j^{n+1} - - // field propagation B^{n+1/2} -> B^{n+1} - pushf_.push_H(mflds_, .5); - // x^{n+3/2}, p^{n+1}, E^{n+1/2}, B^{n+1}, j^{n+1} - - prof_start(pr_bndp); - bndp_(mprts_); - prof_stop(pr_bndp); - - // field propagation E^{n+1/2} -> E^{n+3/2} - - // fill ghosts for H - bndf_.fill_ghosts_H(mflds_); - bnd_.fill_ghosts(mflds_, HX, HX + 3); - - // add and fill ghost for J - bndf_.add_ghosts_J(mflds_); - bnd_.add_ghosts(mflds_, JXI, JXI + 3); - bnd_.fill_ghosts(mflds_, JXI, JXI + 3); - - // push E - pushf_.push_E(mflds_, 1.); - - bndf_.fill_ghosts_E(mflds_); - // if (pushf_->variant == 0) { - bnd_.fill_ghosts(mflds_, EX, EX + 3); - //} - // x^{n+3/2}, p^{n+1}, E^{n+3/2}, B^{n+1} - - // field propagation B^{n+1} -> B^{n+3/2} - // if (pushf_->variant == 0) { - bndf_.fill_ghosts_E(mflds_); - bnd_.fill_ghosts(mflds_, EX, EX + 3); - // } - - // push H - pushf_.push_H(mflds_, .5); - - bndf_.fill_ghosts_H(mflds_); - // if (pushf_->variant == 0) { - bnd_.fill_ghosts(mflds_, HX, HX + 3); - //} - // x^{n+3/2}, p^{n+1}, E^{n+3/2}, B^{n+3/2} - - checks_.continuity_after_particle_push(mprts_, mflds_); - - // E at t^{n+3/2}, particles at t^{n+3/2} - // B at t^{n+3/2} (Note: that is not it's natural time, - // but div B should be == 0 at any time...) - if (p_.marder_interval > 0 && timestep % p_.marder_interval == 0) { - // mpi_printf(comm, "***** Performing Marder correction...\n"); - prof_start(pr_marder); - marder_(mflds_, mprts_); - prof_stop(pr_marder); - } - - checks_.gauss(mprts_, mflds_); - -#ifdef VPIC - pushp_.load_interpolator(mprts_, mflds_, *interpolator); -#endif - } -#endif - // ---------------------------------------------------------------------- // step_psc @@ -590,14 +421,7 @@ struct Psc // psc_push_particles_prep(psc->push_particles, psc->particles, psc->flds); } - void step() - { -#ifdef VPIC - step_vpic(); -#else - step_psc(); -#endif - } + void step() { step_psc(); } private: // ---------------------------------------------------------------------- @@ -622,7 +446,6 @@ private: } } -#ifndef VPIC // ---------------------------------------------------------------------- // initialize_default @@ -632,62 +455,6 @@ private: // checks_.gauss(mprts_, mflds_); } -#endif - -#ifdef VPIC - - // ---------------------------------------------------------------------- - // initialize_vpic - - void initialize_vpic() - { - MPI_Comm comm = grid().comm(); - - // Do some consistency checks on user initialized fields - - mpi_printf(comm, "Checking interdomain synchronization\n"); - double err = marder_.synchronize_tang_e_norm_b(mflds_); - mpi_printf(comm, "Error = %g (arb units)\n", err); - - mpi_printf(comm, "Checking magnetic field divergence\n"); - marder_.compute_div_b_err(mflds_); - err = marder_.compute_rms_div_b_err(mflds_); - mpi_printf(comm, "RMS error = %e (charge/volume)\n", err); - marder_.clean_div_b(mflds_); - - // Load fields not initialized by the user - - mpi_printf(comm, "Initializing radiation damping fields\n"); - TIC AccumulateOps::compute_curl_b(mflds_); - TOC(compute_curl_b, 1); - - mpi_printf(comm, "Initializing bound charge density\n"); - marder_.clear_rhof(mflds_); - marder_.accumulate_rho_p(mprts_, mflds_); - marder_.synchronize_rho(mflds_); - TIC AccumulateOps::compute_rhob(mflds_); - TOC(compute_rhob, 1); - - // Internal sanity checks - - mpi_printf(comm, "Checking electric field divergence\n"); - marder_.compute_div_e_err(mflds_); - err = marder_.compute_rms_div_e_err(mflds_); - mpi_printf(comm, "RMS error = %e (charge/volume)\n", err); - marder_.clean_div_e(mflds_); - - mpi_printf(comm, "Rechecking interdomain synchronization\n"); - err = marder_.synchronize_tang_e_norm_b(mflds_); - mpi_printf(comm, "Error = %e (arb units)\n", err); - - mpi_printf(comm, "Uncentering particles\n"); - if (!mprts_.empty()) { - pushp_.load_interpolator(mprts_, mflds_, *interpolator); - pushp_.uncenter(mprts_, *interpolator); - } - } - -#endif // ---------------------------------------------------------------------- // diagnostics @@ -699,13 +466,6 @@ private: void print_status() { -#ifdef VPIC -#ifdef USE_VPIC - int rank; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - update_profile(rank == 0); -#endif -#endif psc_stats_log(grid().timestep()); print_profiling(); } @@ -858,214 +618,44 @@ void setupParticles(SetupParticles& setup_particles, Mparticles& mprts, void define_periodic_grid(const double xl[3], const double xh[3], const int gdims[3], const int np[3]) -{ -#ifdef VPIC - // SimulationMixin::setTopology(np[0], np[1], np[2]); FIXME, needed for - // vpic_simulation, I believe only because this info is written out in - // diagnostics_run - vgrid->partition_periodic_box(xl, xh, gdims, Int3::fromPointer(np)); -#endif -} +{} // ---------------------------------------------------------------------- // set_domain_field_bc -void set_domain_field_bc(Int3 bnd, int bc) -{ -#ifdef VPIC - int boundary = BOUNDARY(bnd[0], bnd[1], bnd[2]); - int fbc; - switch (bc) { - case BND_FLD_CONDUCTING_WALL: fbc = Grid::pec_fields; break; - case BND_FLD_ABSORBING: fbc = Grid::absorb_fields; break; - default: assert(0); - } - vgrid->set_fbc(boundary, fbc); -#endif -} +void set_domain_field_bc(Int3 bnd, int bc) {} // ---------------------------------------------------------------------- // set_domain_particle_bc -void set_domain_particle_bc(Int3 bnd, int bc) -{ -#ifdef VPIC - int boundary = BOUNDARY(bnd[0], bnd[1], bnd[2]); - int pbc; - switch (bc) { - case BND_PRT_REFLECTING: pbc = Grid::reflect_particles; break; - case BND_PRT_ABSORBING: pbc = Grid::absorb_particles; break; - default: assert(0); - } - vgrid->set_pbc(boundary, pbc); -#endif -} +void set_domain_particle_bc(Int3 bnd, int bc) {} -void grid_setup_communication() -{ -#ifdef VPIC - assert(vgrid->nx && vgrid->ny && vgrid->ny); - - // Pre-size communications buffers. This is done to get most memory - // allocation over with before the simulation starts running - // FIXME, this isn't a great place. First, we shouldn't call mp - // functions (semi-)directly. 2nd, whether we need these buffers depends - // on b.c., which aren't yet known. - - // FIXME, this really isn't a good place to do this, as it requires layer - // breaking knowledge of which communication will need the largest - // buffers... - int nx1 = vgrid->nx + 1, ny1 = vgrid->ny + 1, nz1 = vgrid->nz + 1; - vgrid->mp_size_recv_buffer( - BOUNDARY(-1, 0, 0), ny1 * nz1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_recv_buffer( - BOUNDARY(1, 0, 0), ny1 * nz1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_recv_buffer( - BOUNDARY(0, -1, 0), nz1 * nx1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_recv_buffer( - BOUNDARY(0, 1, 0), nz1 * nx1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_recv_buffer( - BOUNDARY(0, 0, -1), nx1 * ny1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_recv_buffer( - BOUNDARY(0, 0, 1), nx1 * ny1 * sizeof(typename MfieldsHydro::Element)); - - vgrid->mp_size_send_buffer( - BOUNDARY(-1, 0, 0), ny1 * nz1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_send_buffer( - BOUNDARY(1, 0, 0), ny1 * nz1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_send_buffer( - BOUNDARY(0, -1, 0), nz1 * nx1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_send_buffer( - BOUNDARY(0, 1, 0), nz1 * nx1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_send_buffer( - BOUNDARY(0, 0, -1), nx1 * ny1 * sizeof(typename MfieldsHydro::Element)); - vgrid->mp_size_send_buffer( - BOUNDARY(0, 0, 1), nx1 * ny1 * sizeof(typename MfieldsHydro::Element)); -#endif -} +void grid_setup_communication() {} // ---------------------------------------------------------------------- // vpic_define_grid -void vpic_define_grid(const Grid_t& grid) -{ -#ifdef VPIC - auto domain = grid.domain; - auto bc = grid.bc; - auto dt = grid.dt; - - vgrid = Grid::create(); - vgrid->setup(domain.dx, dt, grid.norm.cc, grid.norm.eps0); - - // define the grid - define_periodic_grid(domain.corner, domain.corner + domain.length, - domain.gdims, domain.np); - - // set field boundary conditions - for (int p = 0; p < grid.n_patches(); p++) { - assert(p == 0); - for (int d = 0; d < 3; d++) { - bool lo = grid.atBoundaryLo(p, d); - bool hi = grid.atBoundaryHi(p, d); - - if (lo && bc.fld_lo[d] != BND_FLD_PERIODIC) { - Int3 bnd = {0, 0, 0}; - bnd[d] = -1; - set_domain_field_bc(bnd, bc.fld_lo[d]); - } - - if (hi && bc.fld_hi[d] != BND_FLD_PERIODIC) { - Int3 bnd = {0, 0, 0}; - bnd[d] = 1; - set_domain_field_bc(bnd, bc.fld_hi[d]); - } - } - } - - // set particle boundary conditions - for (int p = 0; p < grid.n_patches(); p++) { - assert(p == 0); - for (int d = 0; d < 3; d++) { - bool lo = grid.atBoundaryLo(p, d); - bool hi = grid.atBoundaryHi(p, d); - - if (lo && bc.prt_lo[d] != BND_PRT_PERIODIC) { - Int3 bnd = {0, 0, 0}; - bnd[d] = -1; - set_domain_particle_bc(bnd, bc.prt_lo[d]); - } - - if (hi && bc.prt_hi[d] != BND_PRT_PERIODIC) { - Int3 bnd = {0, 0, 0}; - bnd[d] = 1; - set_domain_particle_bc(bnd, bc.prt_hi[d]); - } - } - } - - grid_setup_communication(); -#endif -} +void vpic_define_grid(const Grid_t& grid) {} // ---------------------------------------------------------------------- // vpic_define_material -#ifdef VPIC -static Material* vpic_define_material(MaterialList& material_list, - const char* name, double eps, - double mu = 1., double sigma = 0., - double zeta = 0.) -{ - auto m = MaterialList::create(name, eps, eps, eps, mu, mu, mu, sigma, sigma, - sigma, zeta, zeta, zeta); - return material_list.append(m); -} -#else static void vpic_define_material(MaterialList& material_list, const char* name, double eps, double mu = 1., double sigma = 0., double zeta = 0.) {} -#endif // ---------------------------------------------------------------------- // vpic_define_fields -void vpic_define_fields(const Grid_t& grid) -{ -#ifdef VPIC - hydro.reset(new MfieldsHydro{grid, vgrid}); - interpolator.reset(new MfieldsInterpolator{vgrid}); - accumulator.reset(new MfieldsAccumulator{vgrid}); -#endif -} +void vpic_define_fields(const Grid_t& grid) {} // ---------------------------------------------------------------------- // vpic_create_diagnotics -void vpic_create_diagnostics(int interval) -{ -#ifdef VPIC - diag_mixin.diagnostics_init(interval); -#endif -} +void vpic_create_diagnostics(int interval) {} // ---------------------------------------------------------------------- // vpic_setup_diagnostics -void vpic_setup_diagnostics() -{ -#ifdef VPIC - diag_mixin.diagnostics_setup(); -#endif -} - -#ifdef VPIC -// ---------------------------------------------------------------------- -// vpic_run_diagnostics - -void vpic_run_diagnostics(Mparticles& mprts, MfieldsState& mflds) -{ - diag_mixin.diagnostics_run(mprts, mflds, *interpolator, *hydro, - mprts.grid().domain.np); -} -#endif +void vpic_setup_diagnostics() {} From b3942386248937ed6bde4c08a2c3b809ef61c239 Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 13:35:33 -0500 Subject: [PATCH 07/12] psc: -step_psc, just step --- src/include/psc.hxx | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index a81733fed..d18c65fea 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -270,9 +270,9 @@ struct Psc } // ---------------------------------------------------------------------- - // step_psc + // step - void step_psc() + void step() { using Dim = typename PscConfig::Dim; @@ -421,8 +421,6 @@ struct Psc // psc_push_particles_prep(psc->push_particles, psc->particles, psc->flds); } - void step() { step_psc(); } - private: // ---------------------------------------------------------------------- // print_profiling From 931733cd3d811be9639b94e9a07f161d77d5af2e Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 13:31:38 -0500 Subject: [PATCH 08/12] psc: inline initialize_default --- src/include/psc.hxx | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index d18c65fea..52b23817d 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -182,7 +182,6 @@ struct Psc void initialize() { - initialize_default(); bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); @@ -191,6 +190,12 @@ struct Psc bndf_.fill_ghosts_E(mflds_); bnd_.fill_ghosts(mflds_, EX, EX + 3); + // FIXME: do a half-step on p to bring it to its natural time, + // p^{n+1/2} -> p^{n+1} + // pushp_.stagger(mprts, mflds); + + // checks_.gauss(mprts_, mflds_); + // initial output / stats mpi_printf(grid().comm(), "Performing initial diagnostics.\n"); diagnostics(); @@ -444,16 +449,6 @@ private: } } - // ---------------------------------------------------------------------- - // initialize_default - - void initialize_default() - { - // pushp_.stagger(mprts, mflds); FIXME, vpic does it - - // checks_.gauss(mprts_, mflds_); - } - // ---------------------------------------------------------------------- // diagnostics From f016588254a9255b3acf3d62e6aa96c3446f1ad2 Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 13:42:45 -0500 Subject: [PATCH 09/12] psc: actually check gauss for ICs --- src/include/psc.hxx | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index 52b23817d..a9ce6ddf1 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -194,7 +194,10 @@ struct Psc // p^{n+1/2} -> p^{n+1} // pushp_.stagger(mprts, mflds); - // checks_.gauss(mprts_, mflds_); + if (checks_.gauss.should_do_check(0)) { + mpi_printf(grid().comm(), "Checking gauss.\n"); + checks_.gauss(mprts_, mflds_); + } // initial output / stats mpi_printf(grid().comm(), "Performing initial diagnostics.\n"); From 6c1fda7eb8be079bc8f31e3bc1e89910b1e23eaf Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 14:08:51 -0500 Subject: [PATCH 10/12] psc; *: update timestep in step Certain actions are now performed 1 step sooner, which in general means they aren't performed before or after the first time step. Actions that should be performed before the first time step should explicitly be done in initialize(). --- src/include/psc.hxx | 3 ++- src/libpsc/tests/test_boundary_injector.cxx | 6 +++--- src/libpsc/tests/test_reflective_bcs_integration.cxx | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index a9ce6ddf1..3ebc8c4a2 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -239,7 +239,6 @@ struct Psc // pr_time_step_no_comm); // actual measurements are done w/ restart step(); - grid_->timestep_++; // FIXME, too hacky diagnostics(); @@ -301,6 +300,8 @@ struct Psc // state is at: x^{n+1/2}, p^{n}, E^{n+1/2}, B^{n+1/2} MPI_Comm comm = grid().comm(); + // === time propagation t^{n+1/2} -> t^{n+3/2} + grid_->timestep_++; int timestep = grid().timestep(); #ifdef USE_CUDA diff --git a/src/libpsc/tests/test_boundary_injector.cxx b/src/libpsc/tests/test_boundary_injector.cxx index 002930265..44e540fc1 100644 --- a/src/libpsc/tests/test_boundary_injector.cxx +++ b/src/libpsc/tests/test_boundary_injector.cxx @@ -158,7 +158,7 @@ TEST(BoundaryInjectorTest, Integration1Particle) ASSERT_EQ(prts.size(), 0); - for (; grid.timestep_ < psc_params.nmax; grid.timestep_++) { + for (; grid.timestep_ < psc_params.nmax;) { psc.step(); EXPECT_LT(checks.continuity.last_max_err, checks.continuity.err_threshold); @@ -221,7 +221,7 @@ TEST(BoundaryInjectorTest, IntegrationManyParticles) ASSERT_EQ(prts.size(), 0); - for (; grid.timestep_ < psc_params.nmax; grid.timestep_++) { + for (; grid.timestep_ < psc_params.nmax;) { psc.step(); EXPECT_LT(checks.continuity.last_max_err, checks.continuity.err_threshold); @@ -289,7 +289,7 @@ TEST(BoundaryInjectorTest, IntegrationManySpecies) ASSERT_EQ(prts.size(), 0); - for (; grid.timestep_ < psc_params.nmax; grid.timestep_++) { + for (; grid.timestep_ < psc_params.nmax;) { psc.step(); EXPECT_LT(checks.continuity.last_max_err, checks.continuity.err_threshold); diff --git a/src/libpsc/tests/test_reflective_bcs_integration.cxx b/src/libpsc/tests/test_reflective_bcs_integration.cxx index b2cefb3ab..5068be256 100644 --- a/src/libpsc/tests/test_reflective_bcs_integration.cxx +++ b/src/libpsc/tests/test_reflective_bcs_integration.cxx @@ -141,7 +141,7 @@ TEST(ReflectiveBcsTest, IntegrationY) bool about_to_reflect = false; bool reflected = false; - for (; grid.timestep_ < psc_params.nmax; grid.timestep_++) { + for (; grid.timestep_ < psc_params.nmax;) { about_to_reflect = prts[0].x()[1] < grid.domain.dx[1] && prts[0].u()[1] < 0.0; @@ -228,7 +228,7 @@ TEST(ReflectiveBcsTest, IntegrationZ) bool about_to_reflect = false; bool reflected = false; - for (; grid.timestep_ < psc_params.nmax; grid.timestep_++) { + for (; grid.timestep_ < psc_params.nmax;) { about_to_reflect = prts[0].x()[2] < grid.domain.dx[2] && prts[0].u()[2] < 0.0; From 9571b3ffdfc9459256a2fb0dc6819d7d4b95329b Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 22 Dec 2025 14:42:33 -0500 Subject: [PATCH 11/12] psc: fix profs --- src/include/psc.hxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/include/psc.hxx b/src/include/psc.hxx index 3ebc8c4a2..4b8a567f0 100644 --- a/src/include/psc.hxx +++ b/src/include/psc.hxx @@ -371,7 +371,7 @@ struct Psc prof_stop(pr_push_flds); mpi_printf(comm, "***** Bnd fields B (1 of 2)...\n"); - prof_start(pr_bndf); + prof_restart(pr_bndf); bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); prof_stop(pr_bndf); @@ -397,7 +397,7 @@ struct Psc prof_stop(pr_push_flds); mpi_printf(comm, "***** Bnd fields B (2 of 2)...\n"); - prof_start(pr_bndf); + prof_restart(pr_bndf); bndf_.fill_ghosts_H(mflds_); bnd_.fill_ghosts(mflds_, HX, HX + 3); prof_stop(pr_bndf); From 07056cd63ad5deaffa07b47debaf09584a888fe9 Mon Sep 17 00:00:00 2001 From: James McClung Date: Mon, 29 Dec 2025 09:23:10 -0500 Subject: [PATCH 12/12] -psc_harris_xz --- src/CMakeLists.txt | 3 +- src/psc_harris_xz.cxx | 932 ------------------------------------------ 2 files changed, 1 insertion(+), 934 deletions(-) delete mode 100644 src/psc_harris_xz.cxx diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index de0e7a3ec..3c42b99f7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -16,7 +16,6 @@ add_psc_executable(psc_bubble_yz) add_psc_executable(psc_flatfoil_yz) add_psc_executable(psc_flatfoil_yz_small) add_psc_executable(psc_whistler) -add_psc_executable(psc_harris_xz) add_psc_executable(psc_harris_yz) add_psc_executable(psc_2d_shock) add_psc_executable(psc_radiation) @@ -30,6 +29,6 @@ if(NOT USE_CUDA) endif() install( - TARGETS psc_bubble_yz psc_flatfoil_yz psc_whistler psc_harris_xz psc_shock + TARGETS psc_bubble_yz psc_flatfoil_yz psc_whistler psc_shock RUNTIME DESTINATION bin ) diff --git a/src/psc_harris_xz.cxx b/src/psc_harris_xz.cxx deleted file mode 100644 index b8e65c11f..000000000 --- a/src/psc_harris_xz.cxx +++ /dev/null @@ -1,932 +0,0 @@ - -#define VPIC 1 - -#include -#include -#include - -#include "../libpsc/vpic/fields_item_vpic.hxx" -#include "../libpsc/vpic/setup_fields_vpic.hxx" -#include "OutputFieldsDefault.h" -#include "psc_config.hxx" - -#include "rngpool_iface.h" - -extern Grid* vgrid; // FIXME - -static RngPool* rngpool; // FIXME, should be member (of struct psc, really) - -// FIXME, helper should go somewhere... - -static inline double trunc_granular(double a, double b) -{ - return b * (int)(a / b); -} - -// ====================================================================== -// PSC configuration -// -// This sets up compile-time configuration for the code, in particular -// what data structures and algorithms to use -// -// EDIT to change order / floating point type / cuda / 2d/3d - -#ifdef VPIC -#ifdef DO_VPIC -using PscConfig = PscConfigVpicWrap; -#else -using PscConfig = PscConfigVpicPsc; -#endif -#else -using PscConfig = PscConfig1vbecSingle; -#endif - -// ---------------------------------------------------------------------- - -using MfieldsState = typename PscConfig::MfieldsState; -#ifdef VPIC -using MaterialList = typename MfieldsState::MaterialList; -#endif -using Mparticles = typename PscConfig::Mparticles; -using Balance = typename PscConfig::Balance; -using Collision = typename PscConfig::Collision; -using Checks = typename PscConfig::Checks; -using Marder = typename PscConfig::Marder; -using OutputParticles = PscConfig::OutputParticles; - -// FIXME! -MfieldsC evalMfields(const MfieldsState& _exp) -{ - auto& exp = const_cast(_exp); - MfieldsC mflds{exp.grid(), exp.n_comps(), exp.ibn()}; - - for (int p = 0; p < mflds.n_patches(); p++) { - auto flds = make_Fields3d(mflds[p]); - auto _exp = make_Fields3d(exp[p]); - for (int m = 0; m < exp.n_comps(); m++) { - mflds.Foreach_3d(0, 0, [&](int i, int j, int k) { - flds(m, i, j, k) = _exp(m, i, j, k); - }); - } - } - return mflds; -} - -// ====================================================================== -// PscHarrisParams - -struct PscHarrisParams -{ - double L_di; // Sheet thickness / ion inertial length - double Ti_Te; // Ion temperature / electron temperature - double nb_n0; // background plasma density - double Tbe_Te; // Ratio of background T_e to Harris T_e - double Tbi_Ti; // Ratio of background T_i to Harris T_i - - double bg; // Guide field - double theta; - - double Lpert_Lx; // wavelength of perturbation in terms of Lx - double dbz_b0; // perturbation in Bz relative to B0 - double nppc; // Average number of macro particle per cell per species - bool open_bc_x; // Flag to signal we want to do open boundary condition in x - bool - driven_bc_z; // Flag to signal we want to do driven boundary condition in z - - // FIXME, not really harris-specific - double wpedt_max; - - double wpe_wce; // electron plasma freq / electron cyclotron freq - double mi_me; // Ion mass / electron mass - - double Lx_di, Ly_di, Lz_di; // Size of box in d_i - - int ion_sort_interval; - int electron_sort_interval; - - double taui; // simulation wci's to run - double t_intervali; // output interval in terms of 1/wci - double output_particle_interval; // particle output interval in terms of 1/wci - - double overalloc; // Overallocation factor (> 1) for particle arrays - - Int3 gdims; - Int3 np; -}; - -// ====================================================================== -// Global parameters -// -// I'm not a big fan of global parameters, but they're only for -// this particular case and they help make things simpler. - -// An "anonymous namespace" makes these variables visible in this source file -// only -namespace -{ - -// Parameters specific to this case. They don't really need to be collected in a -// struct, but maybe it's nice that they are -PscHarrisParams g; - -std::string read_checkpoint_filename; - -// This is a set of generic PSC params (see include/psc.hxx), -// like number of steps to run, etc, which also should be set by the case -PscParams psc_params; - -} // namespace - -// ====================================================================== -// setupHarrisParams() - -void setupHarrisParams() -{ - g.wpedt_max = .36; - g.wpe_wce = 2.; - g.mi_me = 25.; - - g.Lx_di = 40.; - g.Ly_di = 1.; - g.Lz_di = 10.; - - g.electron_sort_interval = 25; - g.ion_sort_interval = 25; - - g.taui = 40.; - g.t_intervali = 1.; - g.output_particle_interval = 10.; - - g.overalloc = 2.; - - g.gdims = {512, 1, 128}; - g.np = {4, 1, 1}; - - g.L_di = .5; - g.Ti_Te = 5.; - g.nb_n0 = .05; - g.Tbe_Te = .333; - g.Tbi_Ti = .333; - - g.bg = 0.; - g.theta = 0.; - - g.Lpert_Lx = 1.; - g.dbz_b0 = .03; - g.nppc = 100; - g.open_bc_x = false; - g.driven_bc_z = false; -} - -// ====================================================================== -// globals_physics -// -// FIXME rename / merge? - -struct globals_physics -{ - double ec; - double me; - double c; - double eps0; - double de; - - double mi; - double di; - double wpe; - double wpi; - double wce; - double wci; - - // calculated - double b0; // B0 - double n0; - double v_A; - double rhoi_L; - double Lx, Ly, Lz; // size of box - double L; // Harris sheet thickness - double Lpert; // wavelength of perturbation - double dbx; // Perturbation in Bz relative to Bo (Only change here) - double dbz; // Set Bx perturbation so that div(B) = 0 - double tanhf; - - double Ne; // Total number of macro electrons - double Ne_sheet; // Number of macro electrons in Harris sheet - double weight_s; // Charge per macro electron - double vthe; // Electron thermal velocity - double vthi; // Ion thermal velocity - double vdre; // Electron drift velocity - double vdri; // Ion drift velocity - double gdri; // gamma of ion drift frame - double gdre; // gamma of electron drift frame - double udri; // 4-velocity of ion drift frame - double udre; // 4-velocity of electron drift frame - - double Ne_back; // Number of macro electrons in background - double weight_b; // Charge per macro electron - double vtheb; // normalized background e thermal vel. - double vthib; // normalized background ion thermal vel. - - int n_global_patches; - - // ---------------------------------------------------------------------- - // ctor - - // FIXME, do we want to keep this? - globals_physics() {} - - globals_physics(const PscHarrisParams& p) - { - assert(p.np[2] <= 2); // For load balance, keep "1" or "2" for Harris sheet - - // FIXME, the general normalization stuff should be shared somehow - - // use natural PIC units - ec = 1; // Charge normalization - me = 1; // Mass normalization - c = 1; // Speed of light - de = 1; // Length normalization (electron inertial length) - eps0 = 1; // Permittivity of space - - // derived quantities - mi = me * p.mi_me; // Ion mass - - double Te = - me * sqr(c) / - (2. * eps0 * sqr(p.wpe_wce) * (1. + p.Ti_Te)); // Electron temperature - double Ti = Te * p.Ti_Te; // Ion temperature - vthe = sqrt(Te / me); // Electron thermal velocity - vthi = sqrt(Ti / mi); // Ion thermal velocity - vtheb = sqrt(p.Tbe_Te * Te / me); // normalized background e thermal vel. - vthib = sqrt(p.Tbi_Ti * Ti / mi); // normalized background ion thermal vel. - wci = 1. / (p.mi_me * p.wpe_wce); // Ion cyclotron frequency - wce = wci * p.mi_me; // Electron cyclotron freqeuncy - wpe = wce * p.wpe_wce; // electron plasma frequency - wpi = wpe / sqrt(p.mi_me); // ion plasma frequency - di = c / wpi; // ion inertial length - L = p.L_di * di; // Harris sheet thickness - rhoi_L = sqrt(p.Ti_Te / (1. + p.Ti_Te)) / p.L_di; - v_A = (wci / wpi) / sqrt(p.nb_n0); // based on nb - - Lx = p.Lx_di * di; // size of box in x dimension - Ly = p.Ly_di * di; // size of box in y dimension - Lz = p.Lz_di * di; // size of box in z dimension - - b0 = me * c * wce / ec; // Asymptotic magnetic field strength - n0 = me * eps0 * wpe * wpe / (ec * ec); // Peak electron (ion) density - vdri = 2 * c * Ti / (ec * b0 * L); // Ion drift velocity - vdre = -vdri / (p.Ti_Te); // electron drift velocity - - n_global_patches = p.np[0] * p.np[1] * p.np[2]; - double Npe_sheet = - 2 * n0 * Lx * Ly * L * tanh(0.5 * Lz / L); // N physical e's in sheet - double Npe_back = p.nb_n0 * n0 * Ly * Lz * Lx; // N physical e's in backgrnd - double Npe = Npe_sheet + Npe_back; - Ne = p.nppc * p.gdims[0] * p.gdims[1] * - p.gdims[2]; // total macro electrons in box - Ne_sheet = Ne * Npe_sheet / Npe; - Ne_back = Ne * Npe_back / Npe; - Ne_sheet = trunc_granular( - Ne_sheet, n_global_patches); // Make it divisible by # subdomains - Ne_back = trunc_granular( - Ne_back, n_global_patches); // Make it divisible by # subdomains - Ne = Ne_sheet + Ne_back; - weight_s = ec * Npe_sheet / Ne_sheet; // Charge per macro electron - weight_b = ec * Npe_back / Ne_back; // Charge per macro electron - - gdri = 1. / sqrt(1. - sqr(vdri) / sqr(c)); // gamma of ion drift frame - gdre = 1. / sqrt(1. - sqr(vdre) / sqr(c)); // gamma of electron drift frame - udri = vdri * gdri; // 4-velocity of ion drift frame - udre = vdre * gdre; // 4-velocity of electron drift frame - tanhf = tanh(0.5 * Lz / L); - Lpert = p.Lpert_Lx * Lx; // wavelength of perturbation - dbz = p.dbz_b0 * b0; // Perturbation in Bz relative to Bo (Only change here) - dbx = -dbz * Lpert / (2. * Lz); // Set Bx perturbation so that div(B) = 0 - } -}; - -globals_physics phys; - -// ====================================================================== -// setupGrid -// -// This helper function is responsible for setting up the "Grid", -// which is really more than just the domain and its decomposition, it -// also encompasses PC normalization parameters, information about the -// particle kinds, etc. - -Grid_t* setupGrid() -{ - auto comm = MPI_COMM_WORLD; - - // --- set up domain - - auto domain = Grid_t::Domain{g.gdims, - {phys.Lx, phys.Ly, phys.Lz}, - {0., -.5 * phys.Ly, -.5 * phys.Lz}, - g.np}; - - mpi_printf(comm, "Conducting fields on Z-boundaries\n"); - mpi_printf(comm, "Reflect particles on Z-boundaries\n"); - auto bc = - psc::grid::BC{{BND_FLD_PERIODIC, BND_FLD_PERIODIC, BND_FLD_CONDUCTING_WALL}, - {BND_FLD_PERIODIC, BND_FLD_PERIODIC, BND_FLD_CONDUCTING_WALL}, - {BND_PRT_PERIODIC, BND_PRT_PERIODIC, BND_PRT_REFLECTING}, - {BND_PRT_PERIODIC, BND_PRT_PERIODIC, BND_PRT_REFLECTING}}; - if (g.open_bc_x) { - mpi_printf(comm, "Absorbing fields on X-boundaries\n"); - bc.fld_lo[0] = BND_FLD_ABSORBING; - bc.fld_hi[0] = BND_FLD_ABSORBING; - mpi_printf(comm, "Absorb particles on X-boundaries\n"); - bc.prt_lo[1] = BND_PRT_ABSORBING; - bc.prt_hi[1] = BND_PRT_ABSORBING; - } - - if (g.driven_bc_z) { - mpi_printf(comm, "Absorb particles on Z-boundaries\n"); - bc.prt_lo[2] = BND_PRT_ABSORBING; - bc.prt_hi[2] = BND_PRT_ABSORBING; - } - - auto kinds = Grid_t::Kinds(NR_KINDS); - kinds[KIND_ELECTRON] = {-phys.ec, phys.me, "e"}; - kinds[KIND_ION] = {phys.ec, phys.mi, "i"}; - - // determine the time step - double dg = courant_length(domain); - double dt = psc_params.cfl * dg / phys.c; // courant limited time step - if (phys.wpe * dt > g.wpedt_max) { - dt = - g.wpedt_max / phys.wpe; // override timestep if plasma frequency limited - } - - assert(phys.c == 1. && phys.eps0 == 1.); - auto norm_params = Grid_t::NormalizationParams::dimensionless(); - norm_params.nicell = 1; - auto norm = Grid_t::Normalization{norm_params}; - -#ifdef VPIC - Int3 ibn = {1, 1, 1}; -#else - int n_ghosts = 2; - Int3 ibn = n_ghosts * Dim::get_noninvariant_mask(); -#endif - - auto grid_ptr = new Grid_t{domain, bc, kinds, norm, dt, -1, ibn}; - vpic_define_grid(*grid_ptr); - return grid_ptr; -} - -// ---------------------------------------------------------------------- -// setupMaterials - -void setupMaterials(MaterialList& material_list) -{ - MPI_Comm comm = MPI_COMM_WORLD; - - mpi_printf(comm, "Setting up materials.\n"); - - // -- set up MaterialList - vpic_define_material(material_list, "vacuum", 1., 1., 0., 0.); -#if 0 - struct material *resistive = - vpic_define_material(material_list, "resistive", 1., 1., 1., 0.); -#endif - - // Note: define_material defaults to isotropic materials with mu=1,sigma=0 - // Tensor electronic, magnetic and conductive materials are supported - // though. See "shapes" for how to define them and assign them to regions. - // Also, space is initially filled with the first material defined. -} - -// ---------------------------------------------------------------------- -// vpic_setup_species -// -// FIXME, half-redundant to the PSC species setup - -void vpic_setup_species(Mparticles& mprts) -{ - mpi_printf(mprts.grid().comm(), "Setting up species.\n"); - double nmax = g.overalloc * phys.Ne / phys.n_global_patches; - double nmovers = .1 * nmax; - double sort_method = 1; // 0=in place and 1=out of place - - mprts.define_species("electron", -phys.ec, phys.me, nmax, nmovers, - g.electron_sort_interval, sort_method); - mprts.define_species("ion", phys.ec, phys.mi, nmax, nmovers, - g.ion_sort_interval, sort_method); -} - -// ---------------------------------------------------------------------- -// setup_log - -void setup_log(const Grid_t& grid) -{ - MPI_Comm comm = grid.comm(); - - mpi_printf(comm, "***********************************************\n"); - mpi_printf(comm, "* Topology: %d x %d x %d\n", g.np[0], g.np[1], g.np[2]); - mpi_printf(comm, "tanhf = %g\n", phys.tanhf); - mpi_printf(comm, "L_di = %g\n", g.L_di); - mpi_printf(comm, "rhoi/L = %g\n", phys.rhoi_L); - mpi_printf(comm, "Ti/Te = %g\n", g.Ti_Te); - mpi_printf(comm, "nb/n0 = %g\n", g.nb_n0); - mpi_printf(comm, "wpe/wce = %g\n", g.wpe_wce); - mpi_printf(comm, "mi/me = %g\n", g.mi_me); - mpi_printf(comm, "theta = %g\n", g.theta); - mpi_printf(comm, "Lpert/Lx = %g\n", g.Lpert_Lx); - mpi_printf(comm, "dbz/b0 = %g\n", g.dbz_b0); - mpi_printf(comm, "taui = %g\n", g.taui); - mpi_printf(comm, "t_intervali = %g\n", g.t_intervali); - mpi_printf(comm, "num_step = %d\n", psc_params.nmax); - mpi_printf(comm, "Lx/di = %g\n", phys.Lx / phys.di); - mpi_printf(comm, "Lx/de = %g\n", phys.Lx / phys.de); - mpi_printf(comm, "Ly/di = %g\n", phys.Ly / phys.di); - mpi_printf(comm, "Ly/de = %g\n", phys.Ly / phys.de); - mpi_printf(comm, "Lz/di = %g\n", phys.Lz / phys.di); - mpi_printf(comm, "Lz/de = %g\n", phys.Lz / phys.de); - mpi_printf(comm, "nx = %d\n", g.gdims[0]); - mpi_printf(comm, "ny = %d\n", g.gdims[1]); - mpi_printf(comm, "nz = %d\n", g.gdims[2]); - mpi_printf(comm, "n_global_patches = %d\n", phys.n_global_patches); - mpi_printf(comm, "nppc = %g\n", g.nppc); - mpi_printf(comm, "b0 = %g\n", phys.b0); - mpi_printf(comm, "v_A (based on nb) = %g\n", phys.v_A); - mpi_printf(comm, "di = %g\n", phys.di); - mpi_printf(comm, "Ne = %g\n", phys.Ne); - mpi_printf(comm, "Ne_sheet = %g\n", phys.Ne_sheet); - mpi_printf(comm, "Ne_back = %g\n", phys.Ne_back); - mpi_printf(comm, "total # of particles = %g\n", 2 * phys.Ne); - mpi_printf(comm, "dt*wpe = %g\n", phys.wpe * grid.dt); - mpi_printf(comm, "dt*wce = %g\n", phys.wce * grid.dt); - mpi_printf(comm, "dt*wci = %g\n", phys.wci * grid.dt); - mpi_printf(comm, "dx/de = %g\n", phys.Lx / (phys.de * g.gdims[0])); - mpi_printf(comm, "dy/de = %g\n", phys.Ly / (phys.de * g.gdims[1])); - mpi_printf(comm, "dz/de = %g\n", phys.Lz / (phys.de * g.gdims[2])); - mpi_printf(comm, "dx/rhoi = %g\n", - (phys.Lx / g.gdims[0]) / (phys.vthi / phys.wci)); - mpi_printf(comm, "dx/rhoe = %g\n", - (phys.Lx / g.gdims[0]) / (phys.vthe / phys.wce)); - mpi_printf(comm, "L/debye = %g\n", phys.L / (phys.vthe / phys.wpe)); - mpi_printf(comm, "dx/debye = %g\n", - (phys.Lx / g.gdims[0]) / (phys.vthe / phys.wpe)); - mpi_printf(comm, "n0 = %g\n", phys.n0); - mpi_printf(comm, "vthi/c = %g\n", phys.vthi / phys.c); - mpi_printf(comm, "vthe/c = %g\n", phys.vthe / phys.c); - mpi_printf(comm, "vdri/c = %g\n", phys.vdri / phys.c); - mpi_printf(comm, "vdre/c = %g\n", phys.vdre / phys.c); - mpi_printf(comm, "Open BC in x? = %d\n", g.open_bc_x); - mpi_printf(comm, "Driven BC in z? = %d\n", g.driven_bc_z); -} - -// ====================================================================== -// Diagnostics - -class Diagnostics -{ -public: - Diagnostics(Grid_t& grid, - OutputFields& outf, - OutputParticles& outp, DiagEnergies& oute) - : outf_{outf}, - outp_{outp}, - oute_{oute}, - outf_state_(), - outf_hydro_(grid), - mflds_acc_state_(grid, outf_state_.n_comps(), {1, 1, 1}), - mflds_acc_hydro_(grid, outf_hydro_.n_comps(), {1, 1, 1}) - { - io_pfd_.open("pfd"); - io_tfd_.open("tfd"); - } - - void operator()(Mparticles& mprts, MfieldsState& mflds) - { - vpic_run_diagnostics(mprts, mflds); - - const auto& grid = mprts.grid(); - MPI_Comm comm = grid.comm(); - - int timestep = grid.timestep(); - if (outf_.fields.pfield.out_interval > 0 && - timestep % outf_.fields.pfield.out_interval == 0) { - mpi_printf(comm, "***** Writing PFD output\n"); - io_pfd_.begin_step(grid); - - { - auto result = outf_state_(mflds); - io_pfd_.write(adapt(evalMfields(result.mflds)), grid, result.name, - result.comp_names); - } - - { - auto result = outf_hydro_(mprts, *hydro, *interpolator); - io_pfd_.write(adapt(result.mflds), grid, result.name, - result.comp_names); - } - - io_pfd_.end_step(); - } - - if (outf_.fields.tfield.out_interval > 0) { - auto result_state = outf_state_(mflds); - - for (int p = 0; p < mflds_acc_state_.n_patches(); p++) { - auto flds_acc_state = make_Fields3d(mflds_acc_state_[p]); - auto flds_state = make_Fields3d(result_state.mflds[p]); - for (int m = 0; m < mflds_acc_state_.n_comps(); m++) { - mflds_acc_state_.grid().Foreach_3d(0, 0, [&](int i, int j, int k) { - flds_acc_state(m, i, j, k) += flds_state(m, i, j, k); - }); - } - } - - auto result_hydro = outf_hydro_(mprts, *hydro, *interpolator); - for (int p = 0; p < mflds_acc_hydro_.n_patches(); p++) { - auto flds_acc_hydro = make_Fields3d(mflds_acc_hydro_[p]); - auto flds_hydro = make_Fields3d(result_hydro.mflds[p]); - for (int m = 0; m < mflds_acc_hydro_.n_comps(); m++) { - mflds_acc_hydro_.grid().Foreach_3d(0, 0, [&](int i, int j, int k) { - flds_acc_hydro(m, i, j, k) += flds_hydro(m, i, j, k); - }); - } - } - - n_accum_++; - - if (timestep % outf_.fields.tfield.out_interval == 0) { - mpi_printf(comm, "***** Writing TFD output\n"); - io_tfd_.begin_step(grid); - mflds_acc_state_.storage() = - (1. / n_accum_) * mflds_acc_state_.storage(); - io_tfd_.write(adapt(mflds_acc_state_), grid, result_state.name, - result_state.comp_names); - mflds_acc_state_.storage().view() = 0.; - - mflds_acc_hydro_.storage() = - (1. / n_accum_) * mflds_acc_hydro_.storage(); - io_tfd_.write(adapt(mflds_acc_hydro_), grid, result_hydro.name, - result_hydro.comp_names); - mflds_acc_hydro_.storage().view() = 0.; - - io_tfd_.end_step(); - - n_accum_ = 0; - } - } - - psc_stats_start(st_time_output); - outp_(mprts); - psc_stats_stop(st_time_output); - -#ifndef VPIC - oute_(mprts, mflds); -#endif - } - -private: - WriterMRC io_pfd_; - WriterMRC io_tfd_; - OutputFields& outf_; - OutputFieldsVpic outf_state_; - OutputHydro outf_hydro_; - OutputParticles& outp_; - DiagEnergies& oute_; - MfieldsSingle mflds_acc_state_; - MfieldsSingle mflds_acc_hydro_; - int n_accum_ = 0; -}; - -// ---------------------------------------------------------------------- -// setup_particles -// -// set particles x^{n+1/2}, p^{n+1/2} - -void setup_particles(Mparticles& mprts, - std::vector& nr_particles_by_patch, bool count_only) -{ - const auto& grid = mprts.grid(); - MPI_Comm comm = grid.comm(); - - double cs = cos(g.theta), sn = sin(g.theta); - double Ne_sheet = phys.Ne_sheet, vthe = phys.vthe, vthi = phys.vthi; - int n_global_patches = phys.n_global_patches; - double weight_s = phys.weight_s; - double tanhf = phys.tanhf, L = phys.L; - double gdre = phys.gdre, udre = phys.udre, gdri = phys.gdri, udri = phys.udri; - double Ne_back = phys.Ne_back, vtheb = phys.vtheb, vthib = phys.vthib; - double weight_b = phys.weight_b; - - if (count_only) { - for (int p = 0; p < grid.n_patches(); p++) { - nr_particles_by_patch[p] = - 2 * (Ne_sheet / n_global_patches + Ne_back / n_global_patches); - } - return; - } - - // LOAD PARTICLES - - mpi_printf(comm, "Loading particles\n"); - - // Do a fast load of the particles - - rngpool = - RngPool_create(); // FIXME, should be part of ctor (of struct psc, really) - - int rank; - MPI_Comm_rank(comm, &rank); - RngPool_seed(rngpool, rank); - Rng* rng = RngPool_get(rngpool, 0); - - assert(grid.n_patches() > 0); - const Grid_t::Patch& patch = grid.patches[0]; - double xmin = patch.xb[0], xmax = patch.xe[0]; - double ymin = patch.xb[1], ymax = patch.xe[1]; - double zmin = patch.xb[2], zmax = patch.xe[2]; - - // Load Harris population - - { - auto inj = mprts.injector(); - auto injector = inj[0]; - - mpi_printf(comm, "-> Main Harris Sheet\n"); - - for (int64_t n = 0; n < Ne_sheet / n_global_patches; n++) { - double x, y, z, ux, uy, uz, d0; - - do { - z = L * atanh(Rng_uniform(rng, -1., 1.) * tanhf); - } while (z <= zmin || z >= zmax); - x = Rng_uniform(rng, xmin, xmax); - y = Rng_uniform(rng, ymin, ymax); - - // inject_particles() will return an error for particles not on this - // node and will not inject particle locally - - ux = Rng_normal(rng, 0, vthe); - uy = Rng_normal(rng, 0, vthe); - uz = Rng_normal(rng, 0, vthe); - d0 = gdre * uy + sqrt(ux * ux + uy * uy + uz * uz + 1) * udre; - uy = d0 * cs - ux * sn; - ux = d0 * sn + ux * cs; - - injector.reweight(psc::particle::Inject{ - {x, y, z}, {ux, uy, uz}, weight_s, KIND_ELECTRON}); - - ux = Rng_normal(rng, 0, vthi); - uy = Rng_normal(rng, 0, vthi); - uz = Rng_normal(rng, 0, vthi); - d0 = gdri * uy + sqrt(ux * ux + uy * uy + uz * uz + 1) * udri; - uy = d0 * cs - ux * sn; - ux = d0 * sn + ux * cs; - - injector.reweight( - psc::particle::Inject{{x, y, z}, {ux, uy, uz}, weight_s, KIND_ION}); - } - - mpi_printf(comm, "-> Background Population\n"); - - for (int64_t n = 0; n < Ne_back / n_global_patches; n++) { - Double3 pos{Rng_uniform(rng, xmin, xmax), Rng_uniform(rng, ymin, ymax), - Rng_uniform(rng, zmin, zmax)}; - - Double3 u{Rng_normal(rng, 0, vtheb), Rng_normal(rng, 0, vtheb), - Rng_normal(rng, 0, vtheb)}; - injector.reweight(psc::particle::Inject{pos, u, weight_b, KIND_ELECTRON}); - - u = {Rng_normal(rng, 0, vthib), Rng_normal(rng, 0, vthib), - Rng_normal(rng, 0, vthib)}; - injector.reweight(psc::particle::Inject{pos, u, weight_b, KIND_ION}); - } - } - mpi_printf(comm, "Finished loading particles\n"); -} - -// ====================================================================== -// initializeParticles - -void initializeParticles(Balance& balance, Grid_t*& grid_ptr, Mparticles& mprts) -{ - auto comm = grid_ptr->comm(); - - mpi_printf(comm, "**** Partitioning...\n"); - std::vector n_prts_by_patch(mprts.n_patches()); - setup_particles(mprts, n_prts_by_patch, true); - - balance.initial(grid_ptr, n_prts_by_patch); - mprts.reset(*grid_ptr); - - mpi_printf(comm, "**** Setting up particles...\n"); - mprts.reserve_all(n_prts_by_patch); - setup_particles(mprts, n_prts_by_patch, false); -} - -// ====================================================================== -// initializeFields - -void initializeFields(MfieldsState& mflds) -{ - double b0 = phys.b0, dbx = phys.dbx, dbz = phys.dbz; - double L = phys.L, Lx = phys.Lx, Lz = phys.Lz, Lpert = phys.Lpert; - double cs = cos(g.theta), sn = sin(g.theta); - - setupFields(mflds, [&](int m, double crd[3]) { - double x = crd[0], z = crd[2]; - - switch (m) { - case HX: - return cs * b0 * tanh(z / L) + - dbx * cos(2. * M_PI * (x - .5 * Lx) / Lpert) * - sin(M_PI * z / Lz); - - case HY: return -sn * b0 * tanh(z / L) + b0 * g.bg; - - case HZ: - return dbz * cos(M_PI * z / Lz) * - sin(2.0 * M_PI * (x - 0.5 * Lx) / Lpert); - - case JYI: return 0.; // FIXME - - default: return 0.; - } - }); -} - -// ====================================================================== -// run - -void run() -{ - auto comm = MPI_COMM_WORLD; - - mpi_printf(comm, "*** Setting up simulation\n"); - - setupHarrisParams(); - phys = globals_physics{g}; - - psc_params.cfl = 0.99; - psc_params.stats_every = 100; - - // ---------------------------------------------------------------------- - // Set up grid, state fields, particles - - auto grid_ptr = setupGrid(); - auto& grid = *grid_ptr; - - psc_params.nmax = - int(g.taui / (phys.wci * grid.dt)); // number of steps from taui - - // --- create Simulation -#if 0 - // set high level VPIC simulation parameters - // FIXME, will be unneeded eventually - setParams(psc_params.nmax, psc_params.stats_every, - psc_params.stats_every / 2, psc_params.stats_every / 2, - psc_params.stats_every / 2); -#endif - - // --- setup field data structure -#ifdef VPIC - // --- setup materials - MaterialList material_list; - setupMaterials(material_list); - - // FIXME, mv assert into MfieldsState ctor - assert(!material_list.empty()); - - double damp = 0.; - MfieldsState mflds{grid, vgrid, material_list, damp}; - vpic_define_fields(grid); -#else - MfieldsState mflds{grid}; -#endif - - mpi_printf(comm, "*** Finalizing Field Advance\n"); -#if 0 - assert(grid.nr_patches() > 0); - Simulation_set_region_resistive_harris(sub->sim, &sub->prm, phys, psc_->patch[0].dx, - 0., resistive); -#endif - - /// --- setup particle data structure -#ifdef VPIC - Mparticles mprts{grid, vgrid}; - vpic_setup_species(mprts); -#else - Mparticles mprts{grid}; -#endif - - // -- Balance - psc_params.balance_interval = 0; - Balance balance{}; - - // -- Sort - // FIXME: the "vpic" sort actually keeps track of per-species sorting - // intervals internally, so it needs to be called every step -#ifdef VPIC - psc_params.sort_interval = 1; -#endif - - // -- Collision - int collision_interval = 0; - double collision_nu = .1; // FIXME, != 0 needed to avoid crash - Collision collision{grid, collision_interval, collision_nu}; - - // -- Checks - ChecksParams checks_params{}; - Checks checks{grid, comm, checks_params}; - - // -- Marder correction - // FIXME, these are ignored for vpic (?) - double marder_diffusion = 0.9; - int marder_loop = 3; - bool marder_dump = false; - // FIXME, how do we make sure that we don't forget to set this? - // (maybe make it part of the Marder object, or provide a base class - // interface define_marder() that takes the object and the interval -#ifdef VPIC - psc_params.marder_interval = 1; -#else - psc_params.marder_interval = 0; -#endif -#if 0 - // FIXME, marder "vpic" manages its own cleaning intervals - psc_marder_set_param_int(psc_->marder, "every_step", 1); - psc_marder_set_param_int(psc_->marder, "clean_div_e_interval", 50); - psc_marder_set_param_int(psc_->marder, "clean_div_b_interval", 50); - psc_marder_set_param_int(psc_->marder, "sync_shared_interval", 50); - psc_marder_set_param_int(psc_->marder, "num_div_e_round", 2); - psc_marder_set_param_int(psc_->marder, "num_div_b_round", 2); -#endif - - Marder marder(grid, marder_diffusion, marder_loop, marder_dump); - - // -- output fields - OutputFieldsParams outf_params; - double output_field_interval = .1; - outf_params.fields.pfield.out_interval = 100; - // int((output_field_interval / (phys.wci * grid.dt))); - outf_params.fields.tfield.out_interval = -1; - // int((output_field_interval / (phys.wci * grid.dt))); - OutputFields outf{grid, outf_params}; - - OutputParticlesParams outp_params{}; - outp_params.every_step = - int((g.output_particle_interval / (phys.wci * grid.dt))); - outp_params.data_dir = "."; - outp_params.basename = "prt"; - outp_params.lo = {192, 0, 48}; - outp_params.hi = {320, 0, 80}; - OutputParticles outp{grid, outp_params}; - - int oute_interval = 100; - DiagEnergies oute{grid.comm(), oute_interval}; - - Diagnostics diagnostics{grid, outf, outp, oute}; - - // --- - - int interval = int(g.t_intervali / (phys.wci * grid.dt)); - vpic_create_diagnostics(interval); - vpic_setup_diagnostics(); - setup_log(grid); - - mpi_printf(comm, "*** Finished with user-specified initialization ***\n"); - - // ---------------------------------------------------------------------- - - initializeParticles(balance, grid_ptr, mprts); - initializeFields(mflds); - - // ---------------------------------------------------------------------- - // hand off to PscIntegrator to run the simulation - - auto psc = - makePscIntegrator(psc_params, *grid_ptr, mflds, mprts, balance, - collision, checks, marder, diagnostics); - -#if 0 - // FIXME, checkpoint reading should be moved to before the integrator - if (!read_checkpoint_filename.empty()) { - mpi_printf(MPI_COMM_WORLD, "**** Reading checkpoint...\n"); - psc.read_checkpoint(read_checkpoint_filename); - } -#endif - - psc.integrate(); -} - -// ====================================================================== -// main - -int main(int argc, char** argv) -{ - psc_init(argc, argv); - - run(); - - psc_finalize(); - return 0; -}