diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 331c9569ff5..f467b2aee33 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -23,6 +23,7 @@ /src/FEP/ @agiliopadua /src/GPU/ @ndtrung81 /src/GRANULAR/ @jtclemm @dsbolin +/src/GRAPHICS/ @akohlmey /src/INTEL/ @wmbrownintel /src/KIM/ @ellio167 /src/KOKKOS/ @stanmoore1 @@ -60,10 +61,12 @@ /src/MANYBODY/pair_vashishta_table.* @andeplane /src/MANYBODY/pair_atm.* @sergeylishchuk /src/MANYBODY/pair_nb3b_screened.* @flodesani +/src/OPENMP/fix_nvt_sllod_omp.* @akohlmey @jtclemm @sjplimp @athomps /src/REPLICA/*_grem.* @dstelter92 /src/EXTRA-COMMAND/geturl.* @akohlmey /src/EXTRA-COMMAND/group_ndx.* @akohlmey /src/EXTRA-COMMAND/ndx_group.* @akohlmey +/src/EXTRA-COMPUTE/compute_hbond_local.* @akohlmey /src/EXTRA-COMPUTE/compute_stress_mop*.* @RomainVermorel /src/EXTRA-COMPUTE/compute_born_matrix.* @Bibobu @athomps /src/EXTRA-DUMP/dump_extxyz.* @fxcoudert @akohlmey @@ -148,6 +151,9 @@ /src/verlet.* @sjplimp @stanmoore1 /src/math_eigen_impl.h @jewettaij /src/fix_press_langevin.* @Bibobu +/src/fix_nvt_sllod.* @jtclemm @sjplimp @athomps +/src/compute_temp_deform.* @jtclemm @sjplimp @athomps +/src/fix_deform.* @jtclemm @sjplimp @athomps # tools /tools/coding_standard/ @akohlmey diff --git a/doc/src/Packages_details.rst b/doc/src/Packages_details.rst index 71a8c033a84..c21e14cb6c8 100644 --- a/doc/src/Packages_details.rst +++ b/doc/src/Packages_details.rst @@ -518,6 +518,7 @@ the :doc:`Build extras ` page. **Supporting info:** * ``src/COLVARS``: filenames -> commands +* https://colvars.github.io/master/colvars-refman-lammps.html * `doc/PDF/colvars-refman-lammps.pdf `_ * ``src/COLVARS/README`` * ``lib/colvars/README`` @@ -1092,9 +1093,15 @@ of regions scripted graphics in VMD. * :doc:`fix graphics/arrows ` * :doc:`fix graphics/isosurface ` * :doc:`fix graphics/labels `, +* :doc:`fix graphics/lines `, * :doc:`fix graphics/objects `, * :doc:`fix graphics/periodic `, * :doc:`region2vmd ` +* https://www.youtube.com/watch?v=9HEsGaOsdik +* https://www.youtube.com/watch?v=f4hfPs7aCmI +* https://www.youtube.com/shorts/1QEjIITapwQ +* https://www.youtube.com/shorts/OYn_VVodnIg +* https://www.youtube.com/shorts/4Cm5p0SfgNU ---------- diff --git a/doc/src/Run_options.rst b/doc/src/Run_options.rst index 546658a6448..78690d3e93c 100644 --- a/doc/src/Run_options.rst +++ b/doc/src/Run_options.rst @@ -67,18 +67,19 @@ used. **-in file** -Specify a file to use as an input script. This is an optional but -recommended switch when running LAMMPS in one-partition mode. If it -is not specified, LAMMPS reads its script from standard input, typically -from a script via I/O redirection; e.g. lmp_linux < in.run. -With many MPI implementations I/O redirection also works in parallel, -but using the -in flag will always work. - -Note that this is a required switch when running LAMMPS in -multi-partition mode, since multiple processors cannot all read from -stdin concurrently. The file name may be "none" for starting -multi-partition calculations without reading an initial input file -from the library interface. +Specify a file to use as an input script. This is currently an optional +but recommended switch when running LAMMPS in the default one-partition +mode. If it is not specified, LAMMPS reads its script from standard +input, typically from a script via I/O redirection; e.g. ``lmp_linux < +in.run``. With many MPI implementations (but not all of them), I/O +redirection also works in parallel, but using the ``-in`` flag will +*always* work. + +This is a **required** switch when running LAMMPS in multi-partition +mode (see below), since multiple pools of MPI processes cannot all read +from standard input concurrently. The file name may be "none" for +starting multi-partition calculations without reading an initial input +file when using the library interface. ---------- @@ -87,12 +88,12 @@ from the library interface. **-kokkos on/off keyword/value ...** Explicitly enable or disable KOKKOS support, as provided by the KOKKOS -package. Even if LAMMPS is built with this package, as described -in the :doc:`the KOKKOS package page `, this switch must be set to enable -running with KOKKOS-enabled styles the package provides. If the -switch is not set (the default), LAMMPS will operate as if the KOKKOS -package were not installed; i.e. you can run standard LAMMPS or with -the GPU or OPENMP packages, for testing or benchmarking purposes. +package. Even if LAMMPS is built with this package, as described in the +:doc:`the KOKKOS package page `, this switch must be set +to enable running with KOKKOS-enabled styles the package provides. If +the switch is not set (the default), LAMMPS will operate as if the +KOKKOS package were not installed; i.e. you can run standard LAMMPS or +with the GPU or OPENMP packages, for testing or benchmarking purposes. Additional optional keyword/value pairs can be specified which determine how Kokkos will use the underlying hardware on your platform. These @@ -210,13 +211,15 @@ how to launch LAMMPS in MDI client/server mode please refer to the If used, this must be the first command-line argument after the LAMMPS executable name. It is only used when LAMMPS is launched by an mpirun -command which also launches another executable(s) at the same time. -(The other executable could be LAMMPS as well.) The color is an -integer value which should be different for each executable (another -application may set this value in a different way). LAMMPS and the -other executable(s) perform an MPI_Comm_split() with their own colors -to shrink the MPI_COMM_WORLD communication to be the subset of -processors they are actually running on. +command which also launches another executable(s) at the same time (The +other executable could be LAMMPS as well.). The *color* is an integer +value which should be different for each executable (another application +may set this value in a different way). LAMMPS and the other +executable(s) perform an `MPI_Comm_split() +`_ +with their own colors to replace the ``MPI_COMM_WORLD`` communicator +with a new communicator using the subset of MPI processes they are +actually running on. ---------- @@ -291,23 +294,33 @@ having to edit an input script. **-partition 8x2 4 5 ...** -Invoke LAMMPS in multi-partition mode. When LAMMPS is run on P -processors and this switch is not used, LAMMPS runs in one partition, -i.e. all P processors run a single simulation. If this switch is -used, the P processors are split into separate partitions and each -partition runs its own simulation. The arguments to the switch -specify the number of processors in each partition. Arguments of the -form MxN mean M partitions, each with N processors. Arguments of the -form N mean a single partition with N processors. The sum of -processors in all partitions must equal P. Thus the command -``-partition 8x2 4 5`` has 10 partitions and runs on a total of 25 -processors. +Invoke LAMMPS in multi-partition mode. When LAMMPS is run on *P* MPI +processes and this switch is not used, LAMMPS runs in *one partition*, +i.e. all *P* MPI processes run a single simulation with the same +settings. If this switch *is* used, the *P* MPI processes are split +into separate partitions and each partition runs its own simulation. +The arguments to the switch specify the number of MPI processes in each +partition. Arguments of the form *MxN* mean *M* partitions, each with +*N* MPI processes. Arguments of the form *N* mean a single partition +with *N* MPI processes. The sum of MPI processes in all partitions must +equal *P*. Thus the command ``-partition 8x2 4 5`` has 10 partitions +(eight with 2 MPI processes, one with 4 and one with 5) and runs on a +total of 25 MPI processes. Running with multiple partitions can be useful for running :doc:`multi-replica simulations `, where each replica -runs on one or a few processors. Note that with MPI installed on a -machine (e.g. your desktop), you can run on more (virtual) processors -than you have physical processors. +runs on one or a few MPI processes. + +.. note:: + + With MPI installed on a standalone machine (e.g. your desktop or + laptop), you can run on more (virtual) MPI processes than you have + physical processors (for testing purposes), but some MPI + implementations (for instance `OpenMPI `_) + may require an additional command line flag to enable this so-called + oversubscription. You may also have to disable `processor affinity + `_ or else the + performance may be exceptionally bad when oversubscribing processors. To run multiple independent simulations from one input script, using multiple partitions, see the :doc:`Howto multiple ` @@ -320,15 +333,15 @@ in this context. **-plog file** -Specify the base name for the partition log files, so partition N -writes log information to file.N. If file is none, then no partition -log files are created. This overrides the filename specified in the --log command-line option. This option is useful when working with -large numbers of partitions, allowing the partition log files to be -suppressed (-plog none) or placed in a subdirectory (-plog -replica_files/log.lammps) If this option is not used the log file for -partition N is log.lammps.N or whatever is specified by the -log -command-line option. +Specify the base name for the partition log files, so partition *N* +writes log information to ``file.N``. If *file* is *none*, then no +partition log files are created. This overrides the filename specified +in the *-log* command-line option. This option is useful when working +with large numbers of partitions, allowing the partition log files to be +suppressed (``-plog none``) or placed in a subdirectory (``-plog +replica_files/log.lammps``). If this option is not used, the log file +for partition *N* is ``log.lammps.N`` or whatever is specified by the +*-log* command-line option. ---------- @@ -336,15 +349,15 @@ command-line option. **-pscreen file** -Specify the base name for the partition screen file, so partition N -writes screen information to file.N. If file is "none", then no +Specify the base name for the partition screen file, so partition *N* +writes screen information to ``file.N``. If *file* is *none*, then no partition screen files are created. This overrides the filename -specified in the -screen command-line option. This option is useful +specified in the *-screen* command-line option. This option is useful when working with large numbers of partitions, allowing the partition -screen files to be suppressed (-pscreen none) or placed in a -subdirectory (-pscreen replica_files/screen). If this option is not -used the screen file for partition N is screen.N or whatever is -specified by the -screen command-line option. +screen files to be suppressed (``-pscreen none``) or placed in a +subdirectory (``-pscreen replica_files/screen``). If this option is not +used, the screen file for partition *N* is ``screen.N`` or whatever is +specified by the *-screen* command-line option. ---------- @@ -359,28 +372,29 @@ This option has 2 forms: -reorder nth N -reorder custom filename -Reorder the processors in the MPI communicator used to instantiate -LAMMPS, in one of several ways. The original MPI communicator ranks -all P processors from 0 to P-1. The mapping of these ranks to -physical processors is done by MPI before LAMMPS begins. It may be -useful in some cases to alter the rank order. E.g. to ensure that -cores within each node are ranked in a desired order. Or when using -the :doc:`run_style verlet/split ` command with 2 partitions -to ensure that a specific Kspace processor (in the second partition) is -matched up with a specific set of processors in the first partition. -See the :doc:`General tips ` page for more details. - -If the keyword *nth* is used with a setting *N*, then it means every -Nth processor will be moved to the end of the ranking. This is useful +Reorder the ranks in the MPI communicator used to instantiate LAMMPS, in +one of several ways. The original MPI communicator ranks all *P* MPI +processes from *0* to *P-1*. The mapping of these ranks to physical +processors is done by the MPI library before LAMMPS begins. It may be +useful in some cases to alter the order of the ranks, for example to +ensure that cores within each node are ranked in a desired order. Or when using the :doc:`run_style verlet/split ` command with 2 -partitions via the -partition command-line switch. The first set of +partitions to ensure that a specific Kspace processor (in the second +partition) is matched up with a specific set of processors in the first +partition. See the :doc:`General tips ` page for more +details. + +If the keyword *nth* is used with a setting *N*, then it means every Nth +processor will be moved to the end of the ranking. This is useful when +using the :doc:`run_style verlet/split ` command with 2 +partitions via the *-partition* command-line switch. The first set of processors will be in the first partition, the second set in the second -partition. The -reorder command-line switch can alter this so that -the first N procs in the first partition and one proc in the second partition -will be ordered consecutively, e.g. as the cores on one physical node. -This can boost performance. For example, if you use ``-reorder nth 4`` -and ``-partition 9 3`` and you are running on 12 processors, the -processors will be reordered from +partition. The *-reorder* command-line switch can alter this so that +the first *N* MPI processes in the first partition and one MPI process +in the second partition will be ordered consecutively, e.g. as the cores +on one physical node. This can boost performance. For example, if you +use ``-reorder nth 4`` and ``-partition 9 3`` and you are running on 12 +processors, the MPI process ranks will be reordered from .. parsed-literal:: @@ -406,30 +420,30 @@ If the keyword is *custom*, then a file that specifies a permutation of the processor ranks is also specified. The format of the reorder file is as follows. Any number of initial blank or comment lines (starting with a "#" character) can be present. These should be -followed by P lines of the form: +followed by *P* lines of the form: .. parsed-literal:: I J -where P is the number of processors LAMMPS was launched with. Note +where *P* is the number of processors LAMMPS was launched with. Note that if running in multi-partition mode (see the -partition switch -above) P is the total number of processors in all partitions. The I -and J values describe a permutation of the P processors. Every I and -J should be values from 0 to P-1 inclusive. In the set of P I values, -every proc ID should appear exactly once. Ditto for the set of P J -values. A single I,J pairing means that the physical processor with -rank I in the original MPI communicator will have rank J in the -reordered communicator. +above) *P* is the total number of MPI processes in all partitions. The +*I* and *J* values describe a permutation of the *P* MPI process ranks. +Every *I* and *J* should be values from *0* to *P-1* inclusive. In the +set of *P* *I* values, every MPI rank ID should appear exactly once. +Ditto for the set of *P* *J* values. A single *I*, *J* pairing means +that the physical processor with MPI rank *I* in the original MPI +communicator will have rank *J* in the reordered MPI communicator. Note that rank ordering can also be specified by many MPI implementations, either by environment variables that specify how to -order physical processors, or by config files that specify what -physical processors to assign to each MPI rank. The -reorder switch -simply gives you a portable way to do this without relying on MPI -itself. See the :doc:`processors file ` command for how -to output info on the final assignment of physical processors to -the LAMMPS simulation domain. +order physical processors, or by config files that specify what physical +processors to assign to each MPI rank. The *-reorder* switch simply +gives you a portable way to do this without relying on MPI itself. See +the :doc:`processors file ` command for how to output info +on the final assignment of physical processors to the LAMMPS simulation +domain. ---------- diff --git a/doc/src/Speed_kokkos.rst b/doc/src/Speed_kokkos.rst index a6dc6f30bff..80dfa51eae1 100644 --- a/doc/src/Speed_kokkos.rst +++ b/doc/src/Speed_kokkos.rst @@ -18,7 +18,7 @@ package was developed primarily by Christian Trott (Sandia) and Stan Moore (Sandia) with contributions of various styles by others, including Sikandar Mashayak (UIUC), Ray Shan (Sandia), and Dan Ibanez (Sandia). For more information on developing using Kokkos abstractions -see the `Kokkos Wiki `_. +see the `Kokkos Wiki `_. .. note:: diff --git a/doc/src/Tools.rst b/doc/src/Tools.rst index 06203860dc9..c7ae09a0d2d 100644 --- a/doc/src/Tools.rst +++ b/doc/src/Tools.rst @@ -859,7 +859,7 @@ with those in the provided log file with the same number of processors in the same subdirectory. If the differences between the actual and reference values are within specified tolerances, the test is considered passed. For each test batch, that is, a set of example input scripts, -the mpirun command, the LAMMPS command-line arguments, and the +the ``mpirun`` command, the LAMMPS command-line arguments, and the tolerances for individual thermo quantities can be specified in a configuration file in YAML format. diff --git a/doc/src/fix_graphics_labels.rst b/doc/src/fix_graphics_labels.rst index e8392ceb1c7..cb24dd63341 100644 --- a/doc/src/fix_graphics_labels.rst +++ b/doc/src/fix_graphics_labels.rst @@ -425,7 +425,7 @@ the generated plot into the visualization of the atom. --------- Restart, fix_modify, output, run start/stop, minimize info -========================================================== +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""" No information about this fix is written to :doc:`binary restart files `. diff --git a/doc/src/fix_graphics_lines.rst b/doc/src/fix_graphics_lines.rst index 28f9efe96a8..5fde8b5d976 100644 --- a/doc/src/fix_graphics_lines.rst +++ b/doc/src/fix_graphics_lines.rst @@ -111,7 +111,7 @@ example input after adjusting its :doc:`create_box ` and Restart, fix_modify, output, run start/stop, minimize info -========================================================== +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""" This fix writes its current status to :doc:`binary restart files `. See the :doc:`read_restart ` command for info diff --git a/doc/src/fix_graphics_periodic.rst b/doc/src/fix_graphics_periodic.rst index 07fbbfe7ae1..11bbc579898 100644 --- a/doc/src/fix_graphics_periodic.rst +++ b/doc/src/fix_graphics_periodic.rst @@ -91,7 +91,7 @@ diameter relative to the automatically chosen one. In most use cases a value of 0.0 is probably the desired choice. Restart, fix_modify, output, run start/stop, minimize info -========================================================== +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""" No information about this fix is written to :doc:`binary restart files `. diff --git a/doc/src/partition.rst b/doc/src/partition.rst index 2726102e05b..b4217bd718c 100644 --- a/doc/src/partition.rst +++ b/doc/src/partition.rst @@ -27,41 +27,42 @@ Examples Description """"""""""" -This command invokes the specified command on a subset of the -partitions of processors you have defined via the :doc:`-partition command-line switch `. +This command invokes the specified command on a subset of the partitions +of MPI processes you have defined via the :doc:`-partition command-line +switch `. -Normally, every input script command in your script is invoked by -every partition. This behavior can be modified by defining world- or +Normally, every input script command in your script is invoked by every +partition. This behavior can be modified by defining world- or universe-style :doc:`variables ` that have different values -for each partition. This mechanism can be used to cause your script -to jump to different input script files on different partitions, if -such a variable is used in a :doc:`jump ` command. +for each partition. This mechanism can be used to cause your script to +jump to different input script files on different partitions, if such a +variable is used in a :doc:`jump ` command. -The "partition" command is another mechanism for having as input -script operate differently on different partitions. It is basically a -prefix on any LAMMPS command. The command will only be invoked on -the partition(s) specified by the *style* and *N* arguments. +The "partition" command is another mechanism for having as input script +operate differently on different partitions. It is basically a prefix +on any LAMMPS command. The command will only be invoked on the +partition(s) specified by the *style* and *N* arguments. If the *style* is *yes*, the command will be invoked on any partition -which matches the *N* argument. If the *style* is *no* the command -will be invoked on all the partitions which do not match the Np -argument. - -Partitions are numbered from 1 to Np, where Np is the number of -partitions specified by the :doc:`-partition command-line switch `. - -*N* can be specified in one of two ways. An explicit numeric value -can be used, as in the first example above. Or a wild-card asterisk can -be used to span a range of partition numbers. This takes the form "\*" -or "\*n" or "n\*" or "m\*n". An asterisk with no numeric values means -all partitions from 1 to Np. A leading asterisk means all partitions -from 1 to n (inclusive). A trailing asterisk means all partitions -from n to Np (inclusive). A middle asterisk means all partitions from -m to n (inclusive). +which matches the *N* argument. If the *style* is *no* the command will +be invoked on all the partitions which do not match the *Np* argument. + +Partitions are numbered from 1 to *Np*, where *Np* is the number of +partitions specified by the :doc:`-partition command-line switch +`. + +*N* can be specified in one of two ways. An explicit numeric value can +be used, as in the first example above. Or a wild-card asterisk can be +used to span a range of partition numbers. This takes the form "\*" or +"\*n" or "n\*" or "m\*n". An asterisk with no numeric values means all +partitions from 1 to *Np*. A leading asterisk means all partitions from +1 to n (inclusive). A trailing asterisk means all partitions from n to +*Np* (inclusive). A middle asterisk means all partitions from m to n +(inclusive). This command can be useful for the "run_style verlet/split" command which imposed requirements on how the :doc:`processors ` -command lays out a 3d grid of processors in each of 2 partitions. +command lays out a 3d grid of MPI processes in each of 2 partitions. Restrictions """""""""""" diff --git a/doc/src/processors.rst b/doc/src/processors.rst index f9097903047..399af992a94 100644 --- a/doc/src/processors.rst +++ b/doc/src/processors.rst @@ -10,7 +10,7 @@ Syntax processors Px Py Pz keyword args ... -* Px,Py,Pz = # of processors in each dimension of 3d grid overlaying the simulation domain +* Px,Py,Pz = # of MPI processes in each dimension of 3d grid overlaying the simulation domain * zero or more keyword/arg pairs may be appended * keyword = *grid* or *map* or *part* or *file* @@ -28,16 +28,16 @@ Syntax *numa_nodes* arg = Nn Nn = number of numa domains per node *map* arg = *cart* or *cart/reorder* or *xyz* or *xzy* or *yxz* or *yzx* or *zxy* or *zyx* - cart = use MPI_Cart() methods to map processors to 3d grid with reorder = 0 - cart/reorder = use MPI_Cart() methods to map processors to 3d grid with reorder = 1 - xyz,xzy,yxz,yzx,zxy,zyx = map processors to 3d grid in IJK ordering + cart = use MPI_Cart() methods to map MPI processes to 3d grid with reorder = 0 + cart/reorder = use MPI_Cart() methods to map MPI processes to 3d grid with reorder = 1 + xyz,xzy,yxz,yzx,zxy,zyx = map MPI processes to 3d grid in IJK ordering *part* args = Psend Precv cstyle Psend = partition # (1 to Np) which will send its processor layout Precv = partition # (1 to Np) which will recv the processor layout cstyle = *multiple* *multiple* = Psend grid will be multiple of Precv grid in each dimension *file* arg = outfile - outfile = name of file to write 3d grid of processors to + outfile = name of file to write 3d grid of MPI process ranks to Examples """""""" @@ -55,41 +55,43 @@ Examples Description """"""""""" -Specify how processors are mapped as a regular 3d grid to the global -simulation box. The mapping involves 2 steps. First if there are P -processors it means choosing a factorization P = Px by Py by Pz so -that there are Px processors in the x dimension, and similarly for the -y and z dimensions. Second, the P processors are mapped to the -regular 3d grid. The arguments to this command control each of these -2 steps. +Specify how MPI processes are mapped as a regular 3d grid to the global +simulation box. The mapping involves 2 steps. First if there are *P* +MPI processes, it means choosing a factorization *P* = *Px* by *Py* by +*Pz* so that there are *Px* MPI processes in the x dimension, and +similarly for the y and z dimensions. Second, the *P* MPI processes are +mapped to the regular 3d grid. The arguments to this command control +each of these 2 steps. -The Px, Py, Pz parameters affect the factorization. Any of the 3 +The *Px*, *Py*, *Pz* parameters affect the factorization. Any of the 3 parameters can be specified with an asterisk "\*", which means LAMMPS -will choose the number of processors in that dimension of the grid. +will choose the number of MPI processes in that dimension of the grid. It will do this based on the size and shape of the global simulation -box so as to minimize the surface-to-volume ratio of each processor's +box so as to minimize the surface-to-volume ratio of each MPI rank's subdomain. -Choosing explicit values for Px or Py or Pz can be used to override -the default manner in which LAMMPS will create the regular 3d grid of -processors, if it is known to be sub-optimal for a particular problem. -E.g. a problem where the extent of atoms will change dramatically in a -particular dimension over the course of the simulation. +Choosing explicit values for *Px* or *Py* or *Pz* can be used to +override the default manner in which LAMMPS will create the regular 3d +grid of MPI processes, if it is known to be sub-optimal for a particular +problem. E.g. a problem where the extent of atoms will change +dramatically in a particular dimension over the course of the +simulation. -The product of Px, Py, Pz must equal P, the total # of processors -LAMMPS is running on. For a :doc:`2d simulation `, Pz must -equal 1. +The product of *Px*, *Py*, *Pz* must equal *P*, the total # of MPI +processes LAMMPS is running on. For a :doc:`2d simulation `, +*Pz* must equal 1. -Note that if you run on a prime number of processors P, then a grid -such as 1 x P x 1 will be required, which may incur extra +Note that if you run on a prime number of MPI processes *P*, then a grid +such as 1 x *P* x 1 will be required, which may incur extra communication costs due to the high surface area of each processor's subdomain. -Also note that if multiple partitions are being used then P is the -number of processors in this partition; see the :doc:`-partition command-line switch ` page for details. Also note -that you can prefix the processors command with the -:doc:`partition ` command to easily specify different -Px,Py,Pz values for different partitions. +Also note that if multiple partitions are being used then *P* is the +number of MPI processes in this partition; see the :doc:`-partition +command-line switch ` page for details. Also note that you +can prefix the *processors* command with the :doc:`partition +` command to easily specify different *Px*, *Py*, *Pz* values +for different partitions. You can use the :doc:`partition ` command to specify different processor grids for different partitions, e.g. @@ -112,27 +114,27 @@ different processor grids for different partitions, e.g. If load-balancing is never invoked via the :doc:`balance ` or :doc:`fix balance ` commands, then the initial regular grid will persist for all simulations. If balancing is performed, some of -the methods invoked by those commands retain the logical topology of -the initial 3d grid, and the mapping of processors to the grid -specified by the processors command. However the grid spacings in -different dimensions may change, so that processors own subdomains of +the methods invoked by those commands retain the logical topology of the +initial 3d grid, and the mapping of MPI processes to the grid specified +by the *processors* command. However the grid spacings in different +dimensions may change, so that MPI processes would own subdomains of different sizes. If the :doc:`comm_style tiled ` command is -used, methods invoked by the balancing commands may discard the 3d -grid of processors and tile the simulation domain with subdomains of +used, methods invoked by the balancing commands may discard the 3d grid +of MPI processes and tile the simulation domain with subdomains of different sizes and shapes which no longer have a logical 3d connectivity. If that occurs, all the information specified by the -processors command is ignored. +*processors* command is ignored. ---------- -The *grid* keyword affects the factorization of P into Px,Py,Pz and it -can also affect how the P processor IDs are mapped to the 3d grid of -processors. +The *grid* keyword affects the factorization of *P* into *Px*, *Py*, +*Pz* and it can also affect how the *P* MPI ranks are mapped to the 3d +grid of MPI processes. -The *onelevel* style creates a 3d grid that is compatible with the -Px,Py,Pz settings, and which minimizes the surface-to-volume ratio of +The *onelevel* style creates a 3d grid that is compatible with the *Px*, +*Py*, *Pz* settings, and which minimizes the surface-to-volume ratio of each processor's subdomain, as described above. The mapping of -processors to the grid is determined by the *map* keyword setting. +MPI processes to the grid is determined by the *map* keyword setting. The *twolevel* style can be used on machines with multicore nodes to minimize off-node communication. It ensures that contiguous @@ -145,42 +147,44 @@ The *Cx*, *Cy*, *Cz* settings are similar to the *Px*, *Py*, *Pz* settings, only their product should equal *Nc*\ . Any of the 3 parameters can be specified with an asterisk "\*", which means LAMMPS will choose the number of cores in that dimension of the node's -sub-grid. As with Px,Py,Pz, it will do this based on the size and -shape of the global simulation box so as to minimize the +sub-grid. As with *Px*, *Py*, *Pz*, it will do this based on the size +and shape of the global simulation box so as to minimize the surface-to-volume ratio of each processor's subdomain. .. note:: - For the *twolevel* style to work correctly, it assumes the MPI - ranks of processors LAMMPS is running on are ordered by core and then - by node. E.g. if you are running on 2 quad-core nodes, for a total of - 8 processors, then it assumes processors 0,1,2,3 are on node 1, and - processors 4,5,6,7 are on node 2. This is the default rank ordering - for most MPI implementations, but some MPIs provide options for this - ordering, e.g. via environment variable settings. + For the *twolevel* style to work correctly, it assumes the ranks of + the MPI processes that LAMMPS is running on are ordered by core and + then by node. E.g. if you are running on 2 quad-core nodes, for a + total of 8 MPI processes, then it assumes ranks 0, 1, 2, and 3 are on + node 1, and ranks 4, 5, 6, and 7 are on node 2. This is the default + rank ordering for most MPI implementations, but some MPI libraries + provide options for customizing this ordering, e.g. via environment + variable settings. The *numa* style operates similar to the *twolevel* keyword except that it auto-detects which cores are running on which nodes. It will also -subdivide the cores into numa domains. Currently, the number of numa -domains is not auto-detected and must be specified using the -*numa_nodes* keyword; otherwise, the default value is used. The *numa* -style uses a different algorithm than the *twolevel* keyword for doing -the two-level factorization of the simulation box into a 3d processor -grid to minimize off-node communication and communication across numa -domains. It does its own MPI-based mapping of nodes and cores to the -regular 3d grid. Thus it may produce a different layout of the -processors than the *twolevel* options. +subdivide the cores into `NUMA domains +`_. Currently, +the number of NUMA domains is not auto-detected and must be specified +using the *numa_nodes* keyword. Otherwise, the default value is used. +The *numa* style uses a different algorithm than the *twolevel* keyword +for doing the two-level factorization of the simulation box into a 3d +processor grid to minimize off-node communication and communication +across NUMA domains. It does its own MPI-based mapping of nodes and +cores to the regular 3d grid. Thus it may produce a different layout of +the MPI ranks than the *twolevel* options. The *numa* style will give an error if the number of MPI processes is -not divisible by the number of cores used per node, or any of the Px -or Py or Pz values is greater than 1. +not divisible by the number of cores used per node, or any of the *Px* +or *Py* or *Pz* values is greater than 1. .. note:: - Unlike the *twolevel* style, the *numa* style does not require - any particular ordering of MPI ranks in order to work correctly. This - is because it auto-detects which processes are running on which nodes. - However, it assumes that the lowest ranks are in the first numa + Unlike the *twolevel* style, the *numa* style does not require any + particular ordering of MPI ranks in order to work correctly. This is + because it auto-detects which processes are running on which nodes. + However, it assumes that the lowest ranks are in the first NUMA domain, and so forth. MPI rank orderings that do not preserve this property might result in more intra-node communication between CPUs. @@ -194,26 +198,27 @@ The first non-blank, non-comment line should have .. parsed-literal:: - Px Py Py + Px Py Pz These must be compatible with the total number of processors -and the Px, Py, Pz settings of the processors command. +and the *Px*, *Py*, *Pz* settings of the processors command. This line should be immediately followed by -P = Px\*Py\*Pz lines of the form: +P = *Px* \* *Py* \* *Pz* lines of the form: .. parsed-literal:: ID I J K -where ID is a processor ID (from 0 to P-1) and I,J,K are the -processors location in the 3d grid. I must be a number from 1 to Px -(inclusive) and similarly for J and K. The P lines can be listed in -any order, but no processor ID should appear more than once. +where ID is an MPI process rank (from 0 to P-1) and *I*, *J*, *K* are +the MPI rank's location in the 3d grid. *I* must be a number from 1 to +*Px* (inclusive) and similarly for *J* and *Py* or *K* and *Pz*. The +*P* lines can be listed in any order, but no MPI rank should appear more +than once. ---------- -The *numa_nodes* keyword is used to specify the number of numa domains +The *numa_nodes* keyword is used to specify the number of NUMA domains per node. It is currently only used by the *numa* style for two-level factorization to reduce the amount of MPI communications between CPUs. A good setting for this will typically be equal to the number of CPU @@ -221,27 +226,35 @@ sockets per node. ---------- -The *map* keyword affects how the P processor IDs (from 0 to P-1) are -mapped to the 3d grid of processors. It is only used by the +The *map* keyword affects how the *P* MPI process ranks (from 0 to P-1) +are mapped to the 3d grid of MPI processes. It is only used by the *onelevel* and *twolevel* grid settings. The *cart* style uses the family of MPI Cartesian functions to perform -the mapping, namely MPI_Cart_create(), MPI_Cart_get(), -MPI_Cart_shift(), and MPI_Cart_rank(). It invokes the -MPI_Cart_create() function with its reorder flag = 0, so that MPI is -not free to reorder the processors. +the mapping, namely `MPI_Cart_create() +`_, +`MPI_Cart_get() +`_, +`MPI_Cart_shift() +`_, +and `MPI_Cart_rank() +`_. +It invokes the `MPI_Cart_create() +`_ +function with its reorder flag = 0, so that MPI is not free to reorder +the processors. The *cart/reorder* style does the same thing as the *cart* style except it sets the reorder flag to 1, so that MPI can reorder processors if it desires. -The *xyz*, *xzy*, *yxz*, *yzx*, *zxy*, and *zyx* styles are all -similar. If the style is IJK, then it maps the P processors to the -grid so that the processor ID in the I direction varies fastest, the -processor ID in the J direction varies next fastest, and the processor -ID in the K direction varies slowest. For example, if you select -style *xyz* and you have a 2x2x2 grid of 8 processors, the assignments -of the 8 octants of the simulation domain will be: +The *xyz*, *xzy*, *yxz*, *yzx*, *zxy*, and *zyx* styles are all similar. +If the style is IJK, then it maps the *P* processors to the grid so that +the MPI ranks in the *I* direction varies fastest, the MPI ranks in the +*J* direction varies next fastest, and the MPI rank in the *K* direction +varies slowest. For example, if you select style *xyz* and you have a +2x2x2 grid of 8 processors, the assignments of the 8 octants of the +simulation domain will be: .. parsed-literal:: @@ -255,13 +268,15 @@ of the 8 octants of the simulation domain will be: proc 7 = hi x, hi y, hi z octant Note that, in principle, an MPI implementation on a particular machine -should be aware of both the machine's network topology and the -specific subset of processors and nodes that were assigned to your -simulation. Thus its MPI_Cart calls can optimize the assignment of -MPI processes to the 3d grid to minimize communication costs. In -practice, however, few if any MPI implementations actually do this. -So it is likely that the *cart* and *cart/reorder* styles simply give -the same result as one of the IJK styles. +should be aware of both the machine's network topology and the specific +subset of processors and nodes that were assigned to your simulation. +Thus its `MPI_Cart() +`_ +calls can optimize the assignment of MPI processes to the 3d grid to +minimize communication costs. In practice, however, few if any MPI +implementations actually do this. So it is likely that the *cart* and +*cart/reorder* styles simply give the same result as one of the *IJK* +styles. Also note, that for the *twolevel* grid style, the *map* setting is used to first map the nodes to the 3d grid, then again to the cores @@ -270,23 +285,24 @@ styles are not supported, so an *xyz* style is used in their place. ---------- -The *part* keyword affects the factorization of P into Px,Py,Pz. +The *part* keyword affects the factorization of *P* into *Px*, *Py*, *Pz*. It can be useful when running in multi-partition mode, e.g. with the :doc:`run_style verlet/split ` command. It specifies a -dependency between a sending partition *Psend* and a receiving -partition *Precv* which is enforced when each is setting up their own -mapping of their processors to the simulation box. Each of *Psend* -and *Precv* must be integers from 1 to Np, where Np is the number of -partitions you have defined via the :doc:`-partition command-line switch `. - -A "dependency" means that the sending partition will create its -regular 3d grid as Px by Py by Pz and after it has done this, it will -send the Px,Py,Pz values to the receiving partition. The receiving +dependency between a sending partition *Psend* and a receiving partition +*Precv* which is enforced when each is setting up their own mapping of +their processors to the simulation box. Each of *Psend* and *Precv* +must be integers from 1 to *Np*, where *Np* is the number of partitions +you have defined via the :doc:`-partition command-line switch +`. + +A "dependency" means that the sending partition will create its regular +3d grid as *Px* by *Py* by *Pz* and after it has done this, it will send +the *Px*, *Py*, *Pz* values to the receiving partition. The receiving partition will wait to receive these values before creating its own -regular 3d grid and will use the sender's Px,Py,Pz values as a -constraint. The nature of the constraint is determined by the -*cstyle* argument. +regular 3d grid and will use the sender's *Px*, *Py*, *Pz* values as a +constraint. The nature of the constraint is determined by the *cstyle* +argument. For a *cstyle* of *multiple*, each dimension of the sender's processor grid is required to be an integer multiple of the corresponding @@ -294,29 +310,28 @@ dimension in the receiver's processor grid. This is a requirement of the :doc:`run_style verlet/split ` command. For example, assume the sending partition creates a 4x6x10 grid = 240 -processor grid. If the receiving partition is running on 80 -processors, it could create a 4x2x10 grid, but it will not create a -2x4x10 grid, since in the y-dimension, 6 is not an integer multiple of -4. +processor grid. If the receiving partition is running on 80 processors, +it could create a 4x2x10 grid, but it will not create a 2x4x10 grid, +since in the y-dimension, 6 is not an integer multiple of 4. .. note:: If you use the :doc:`partition ` command to invoke different "processors" commands on different partitions, and you also - use the *part* keyword, then you must ensure that both the sending and - receiving partitions invoke the "processors" command that connects the - 2 partitions via the *part* keyword. LAMMPS cannot easily check for - this, but your simulation will likely hang in its setup phase if this - error has been made. + use the *part* keyword, then you must ensure that both the sending + and receiving partitions invoke the "processors" command that + connects the 2 partitions via the *part* keyword. LAMMPS cannot + easily check for this, but your simulation will likely hang in its + setup phase if this error has been made. ---------- -The *file* keyword writes the mapping of the factorization of P -processors and their mapping to the 3d grid to the specified file +The *file* keyword writes the mapping of the factorization of *P* MPI +processes and their mapping to the 3d grid to the specified file *outfile*\ . This is useful to check that you assigned physical -processors in the manner you desired, which can be tricky to figure -out, especially when running on multiple partitions or on, a multicore -machine or when the processor ranks were reordered by use of the +processors in the manner you desired, which can be tricky to figure out, +especially when running on multiple partitions or on, a multicore +machine or when the MPI ranks were reordered by use of the :doc:`-reorder command-line switch ` or due to use of MPI-specific launch options such as a config file. @@ -327,21 +342,22 @@ one-line per processor in this format: world-ID universe-ID original-ID: I J K: name -The IDs are the processor's rank in this simulation (the world), the -universe (of multiple simulations), and the original MPI communicator -used to instantiate LAMMPS, respectively. The world and universe IDs -will only be different if you are running on more than one partition; -see the :doc:`-partition command-line switch `. The -universe and original IDs will only be different if you used the -:doc:`-reorder command-line switch ` to reorder the -processors differently than their rank in the original communicator -LAMMPS was instantiated with. - -I,J,K are the indices of the processor in the regular 3d grid, each -from 1 to Nd, where Nd is the number of processors in that dimension -of the grid. - -The *name* is what is returned by a call to MPI_Get_processor_name() +The IDs are the MPI ranks in this simulation (the world), the universe +(of multiple simulations), and the original MPI communicator used to +instantiate LAMMPS, respectively. The world and universe IDs will only +be different if you are running on more than one partition; see the +:doc:`-partition command-line switch `. The universe and +original IDs will only be different if you used the :doc:`-reorder +command-line switch ` to reorder the MPI processes +differently than their rank in the original communicator LAMMPS was +instantiated with. + +*I*, *J*, *K* are the indices of the processor in the regular 3d grid, +each from 1 to *Nd*, where *Nd* is the number of MPI processes in that +dimension of the grid. + +The *name* is what is returned by a call to `MPI_Get_processor_name() +`_ and should represent an identifier relevant to the physical processors in your machine. Note that depending on the MPI implementation, multiple cores can have the same *name*\ . @@ -351,9 +367,9 @@ multiple cores can have the same *name*\ . Restrictions """""""""""" -This command cannot be used after the simulation box is defined by a +This command cannot be used *after* the simulation box is defined by a :doc:`read_data ` or :doc:`create_box ` command. -It can be used before a restart file is read to change the 3d +It can be used *before* a restart file is read to change the 3d processor grid from what is specified in the restart file. The *grid numa* keyword only currently works with the *map cart* @@ -370,5 +386,5 @@ Related commands Default """"""" -The option defaults are Px Py Pz = \* \* \*, grid = onelevel, map = +The option defaults are *Px* *Py* *Pz* = \* \* \*, grid = onelevel, map = cart, and numa_nodes = 2. diff --git a/doc/src/temper.rst b/doc/src/temper.rst index d8bbcb959ba..8157072f667 100644 --- a/doc/src/temper.rst +++ b/doc/src/temper.rst @@ -32,12 +32,13 @@ Description Run a parallel tempering or replica exchange simulation using multiple replicas (ensembles) of a system. Two or more replicas must be used. -Each replica runs on a partition of one or more processors. Processor -partitions are defined at run-time using the :doc:`-partition command-line switch `. Note that if you have MPI installed, you -can run a multi-replica simulation with more replicas (partitions) -than you have physical processors, e.g you can run a 10-replica -simulation on one or two processors. You will simply not get the -performance speed-up you would see with one or more physical +Each replica runs on a partition of one or more MPI processes. Processor +partitions are defined at run-time using the :doc:`-partition +command-line switch `. Note that if you have MPI +installed, you can run a multi-replica simulation with more replicas +(partitions) than you have physical processors, e.g you can run a +10-replica simulation on one or two processors. You will simply not get +the performance speed-up you would see with one or more physical processors per replica. See the :doc:`Howto replica ` doc page for further discussion. @@ -68,14 +69,16 @@ time. Each attempted swap of temperatures is either accepted or rejected based on a Boltzmann-weighted Metropolis criterion which uses *seed2* in the random number generator. -As a tempering run proceeds, multiple log files and screen output -files are created, one per replica. By default these files are named -log.lammps.M and screen.M where M is the replica number from 0 to N-1, -with N = # of replicas. See the :doc:`-log and -screen command-line swiches ` for info on how to change these names. +As a tempering run proceeds, multiple log files and screen output files +are created, one per replica. By default these files are named +``log.lammps.M`` and ``screen.M`` where *M* is the replica number from 0 +to *N*-1, with *N* = # of replicas. See the :doc:`-log and -screen +command-line swiches ` for info on how to change these +names. -The main screen and log file (log.lammps) will list information about -which temperature is assigned to each replica at each thermodynamic -output timestep. E.g. for a simulation with 16 replicas: +The main screen and log file (``log.lammps``) will list information +about which temperature is assigned to each replica at each +thermodynamic output timestep. E.g. for a simulation with 16 replicas: .. parsed-literal:: @@ -89,15 +92,15 @@ output timestep. E.g. for a simulation with 16 replicas: 2500 2 1 3 0 6 4 5 7 11 8 9 10 12 14 13 15 ... -The column headings T0 to TN-1 mean which temperature is currently -assigned to the replica 0 to N-1. Thus the columns represent replicas +The column headings *T0* to *TN-1* mean which temperature is currently +assigned to the replica 0 to *N*-1. Thus the columns represent replicas and the value in each column is its temperature (also numbered 0 to -N-1). For example, a 0 in the fourth column (column T3, step 2500) means -that the fourth replica is assigned temperature 0, i.e. the lowest +N-1). For example, a 0 in the fourth column (column *T3*, step 2500) +means that the fourth replica is assigned temperature 0, i.e. the lowest temperature. You can verify this time sequence of temperature -assignments for the Nth replica by comparing the Nth column of screen -output to the thermodynamic data in the corresponding log.lammps.N or -screen.N files as time proceeds. +assignments for the *N*\ th replica by comparing the *N*\ th column of +screen output to the thermodynamic data in the corresponding +``log.lammps.N`` or ``screen.N`` files as time proceeds. You can have each replica create its own dump file in the following manner: diff --git a/doc/src/temper_grem.rst b/doc/src/temper_grem.rst index 0963ea98c1f..7166e95316a 100644 --- a/doc/src/temper_grem.rst +++ b/doc/src/temper_grem.rst @@ -34,13 +34,13 @@ Run a parallel tempering or replica exchange simulation in LAMMPS partition mode using multiple generalized replicas (ensembles) of a system defined by :doc:`fix grem `, which stands for the generalized replica exchange method (gREM) originally developed by -:ref:`(Kim) `. It uses non-Boltzmann ensembles to sample over first -order phase transitions. The is done by defining replicas with an -enthalpy dependent effective temperature +:ref:`(Kim) `. It uses non-Boltzmann ensembles to sample +over first order phase transitions. The is done by defining replicas +with an enthalpy dependent effective temperature Two or more replicas must be used. See the :doc:`temper ` command for an explanation of how to run replicas on multiple -partitions of one or more processors. +partitions of one or more MPI processes. This command is a modification of the :doc:`temper ` command and has the same dependencies, restraints, and input variables which are diff --git a/doc/src/thermo_modify.rst b/doc/src/thermo_modify.rst index a9c52b8a5cb..48c2515e515 100644 --- a/doc/src/thermo_modify.rst +++ b/doc/src/thermo_modify.rst @@ -93,20 +93,20 @@ certain whether they are an indication of an error. Some warning messages are printed during a run (or immediately before) each time a specific MPI rank encounters the issue (e.g., bonds that are -stretched too far or dihedrals in extreme configurations). These number +stretched too far or dihedrals in extreme configurations). The number of these can quickly blow up the size of the log file and screen output. -Thus, a limit of 100 warning messages is applied by default. The warning -count is applied to the entire input unless reset with a ``thermo_modify -warn reset`` command. If there are more warnings than the limit, LAMMPS -will print one final warning that it will not print any additional -warning messages. +A limit of 100 warning messages is therefore applied by default. The +warning count is applied to the entire input file, unless it is reset +with a ``thermo_modify warn reset`` command. If there are more warnings +than the limit, LAMMPS will print one final warning that it will not +print any additional warning messages. .. note:: The warning limit is enforced on either the per-processor count or - the total count across all processors. For efficiency reasons, + the total count across all MPI processes. For efficiency reasons, however, the total count is only updated at steps with thermodynamic - output. Thus when running on a large number of processors in + output. Thus when running on a large number of MPI processes in parallel, the total number of warnings printed can be significantly larger than the given limit. diff --git a/doc/src/timer.rst b/doc/src/timer.rst index ea411c20f38..e809c85f884 100644 --- a/doc/src/timer.rst +++ b/doc/src/timer.rst @@ -40,25 +40,26 @@ Multiple keywords can be specified with the *timer* command. For keywords that are mutually exclusive, the last one specified takes precedence. -During a simulation run LAMMPS collects information about how much -time is spent in different sections of the code and thus can provide +During a simulation run LAMMPS collects information about how much time +is spent in different sections of the code and thus can provide information for determining performance and load imbalance problems. This can be done at different levels of detail and accuracy. For more -information about the timing output, see the :doc:`Run output ` doc page. +information about the timing output, see the :doc:`Run output +` doc page. The *off* setting will turn all time measurements off. The *loop* setting will only measure the total time for a run and not collect any detailed per section information. With the *normal* setting, timing information for portions of the timestep (pairwise calculations, neighbor list construction, output, etc) are collected as well as -information about load imbalances for those sections across -processors. The *full* setting adds information about CPU -utilization and thread utilization, when multi-threading is enabled. +information about load imbalances for those sections across MPI +processes. The *full* setting adds information about CPU utilization +and thread utilization, when multi-threading is enabled. With the *sync* setting, all MPI tasks are synchronized at each timer call which measures load imbalance for each section more accurately, though it can also slow down the simulation by prohibiting overlapping -independent computations on different MPI ranks Using the *nosync* +independent computations on different MPI ranks. Using the *nosync* setting (which is the default) turns this synchronization off. With the *timeout* keyword a wall time limit can be imposed, that @@ -66,52 +67,52 @@ affects the :doc:`run ` and :doc:`minimize ` commands. This can be convenient when calculations have to comply with execution time limits, e.g. when running under a batch system when you want to maximize the utilization of the batch time slot, especially for runs -where the time per timestep varies much and thus it becomes difficult -to predict how many steps a simulation can perform for a given wall time -limit. This also applies for difficult to converge minimizations. -The timeout *elapse* value should be somewhat smaller than the maximum -wall time requested from the batch system, as there is usually -some overhead to launch jobs, and it is advisable to write -out a restart after terminating a run due to a timeout. - -The timeout timer starts when the command is issued. When the time -limit is reached, the run or energy minimization will exit on the -next step or iteration that is a multiple of the *Ncheck* value -which can be set with the *every* keyword. Default is checking -every 10 steps. After the timer timeout has expired all subsequent -run or minimize commands in the input script will be skipped. -The remaining time or timer status can be accessed with the -:doc:`thermo ` variable *timeremain*, which will be -zero, if the timeout is inactive (default setting), it will be -negative, if the timeout time is expired and positive if there -is time remaining and in this case the value of the variable are -the number of seconds remaining. - -When the *timeout* keyword is used a second time, the timer is -restarted with a new time limit. The timeout *elapse* value can -be specified as *off* or *unlimited* to impose a no timeout condition -(which is the default). The *elapse* setting can be specified as -a single number for seconds, two numbers separated by a colon (MM:SS) -for minutes and seconds, or as three numbers separated by colons for -hours, minutes, and seconds (H:MM:SS). +where the time per timestep varies much and thus it becomes difficult to +predict how many steps a simulation can perform for a given wall time +limit. This also applies for difficult to converge minimizations. The +timeout *elapse* value should be somewhat smaller than the maximum wall +time requested from the batch system, as there is usually some overhead +to launch jobs, and it is advisable to write out a restart after +terminating a run due to a timeout. + +The timeout timer starts when the command is issued. When the time limit +is reached, the run or energy minimization will exit on the next step or +iteration that is a multiple of the *Ncheck* value which can be set with +the *every* keyword. Default is checking every 10 steps. After the timer +timeout has expired all subsequent run or minimize commands in the input +script will be skipped. The remaining time or timer status can be +accessed with the :doc:`thermo ` variable *timeremain*, +which will be zero, if the timeout is inactive (default setting), it +will be negative, if the timeout time is expired and positive if there +is time remaining and in this case the value of the variable are the +number of seconds remaining. + +When the *timeout* keyword is used a second time, the timer is restarted +with a new time limit. The timeout *elapse* value can be specified as +*off* or *unlimited* to impose a no timeout condition (which is the +default). The *elapse* setting can be specified as a single number for +seconds, two numbers separated by a colon (MM:SS) for minutes and +seconds, or as three numbers separated by colons for hours, minutes, and +seconds (H:MM:SS). The *every* keyword sets how frequently during a run or energy minimization the wall clock will be checked. This check count applies -to the outer iterations or time steps during minimizations or :doc:`r-RESPA runs `, respectively. Checking for timeout too often, -can slow a calculation down. Checking too infrequently can make the -timeout measurement less accurate, with the run being stopped later +to the outer iterations or time steps during minimizations or +:doc:`r-RESPA runs `, respectively. Checking for timeout too +often, can slow a calculation down. Checking too infrequently can make +the timeout measurement less accurate, with the run being stopped later than desired. .. note:: - Using the *full* and *sync* options provides the most detailed - and accurate timing information, but can also have a negative - performance impact due to the overhead of the many required system - calls. It is thus recommended to use these settings only when testing - tests to identify performance bottlenecks. For calculations with few - atoms or a very large number of processors, even the *normal* setting - can have a measurable negative performance impact. In those cases you - can just use the *loop* or *off* setting. + Using the *full* and *sync* options provides the most detailed and + accurate timing information, but can also have a negative performance + impact due to the overhead of the many required system calls. It is + thus recommended to use these settings only when testing tests to + identify performance bottlenecks. For calculations with few atoms or + a very large number of MPI processes, even the *normal* setting can + have a measurable negative performance impact. In those cases you can + just use the *loop* or *off* setting. Restrictions """""""""""" diff --git a/doc/src/variable.rst b/doc/src/variable.rst index 4c6e2b12b3c..15207340892 100644 --- a/doc/src/variable.rst +++ b/doc/src/variable.rst @@ -43,7 +43,7 @@ Syntax N = integer size of loop pad = all values will be same length, e.g. 001, 002, ..., 100 *universe* args = one or more strings - *world* args = one string for each partition of processors + *world* args = one string for each partition of MPI processes *equal* or *vector* or *atom* args = one formula containing numbers, thermo keywords, math operations, built-in functions, atom values and vectors, compute/fix/variable references @@ -388,7 +388,7 @@ by the :doc:`shell ` command. For the *index* style, one or more strings are specified. Initially, the first string is assigned to the variable. Each time a :doc:`next ` command is used with the variable name, the next -string is assigned. All processors assign the same string to the +string is assigned. All MPI processes assign the same string to the variable. Index-style variables with a single string value can also be set by @@ -425,7 +425,7 @@ is specified. This allows generation of a long list of runs (e.g. 1000) without having to list N strings in the input script. Initially, the string "1" is assigned to the variable. Each time a :doc:`next ` command is used with the variable name, the next -string ("2", "3", etc) is assigned. All processors assign the same +string ("2", "3", etc) is assigned. All MPI processes assign the same string to the variable. The *loop* style can also be specified with two arguments N1 and N2. In this case the loop runs from N1 to N2 inclusive, and the string N1 is initially assigned to the variable. @@ -508,7 +508,7 @@ For the *world* style, one or more strings are specified. There must be one string for each processor partition or "world". LAMMPS can be run with multiple partitions via the :doc:`-partition command-line switch `. This variable command assigns one string to -each world. All processors in the world are assigned the same string. +each world. All MPI processes in the world are assigned the same string. The next command cannot be used with equal-style variables, since there is only one value per world. This style of variable is useful when you wish to run different simulations on different partitions, or @@ -1074,7 +1074,7 @@ the LAMMPS executable and the running simulation via calling the :cpp:func:`lammps_extract_setting` library function. For example, the number of processors (MPI ranks) being used by the simulation or the MPI process ID (for this processor) can be queried, or the number of atom -types, bond types and so on. For the full list of available keywords +types, bond types and so on. For the full list of available keywords *name* and their meaning, see the documentation for extract_setting() via the link in this paragraph. diff --git a/doc/src/velocity.rst b/doc/src/velocity.rst index 48b0526df96..3ab3bbf1bf3 100644 --- a/doc/src/velocity.rst +++ b/doc/src/velocity.rst @@ -186,35 +186,37 @@ specifying the ID of a :doc:`compute temp/ramp ` or The *loop* keyword is used by *create* in the following ways. -If loop = all, then each processor loops over all atoms in the -simulation to create velocities, but only stores velocities for atoms -it owns. This can be a slow loop for a large simulation. If atoms -were read from a data file, the velocity assigned to a particular atom -will be the same, independent of how many processors are being used. -This will not be the case if atoms were created using the -:doc:`create_atoms ` command, since atom IDs will likely -be assigned to atoms differently. - -If loop = local, then each processor loops over only its atoms to +If *loop = all*, then each processor loops over all atoms in the +simulation to create velocities, but only stores velocities for atoms it +owns. This can be a slow loop for a large simulation. If atoms were +read from a data file, the velocity assigned to a particular atom will +be the same, independent of how many MPI processes are being used. This +will not be the case if atoms were created using the :doc:`create_atoms +` command, since atom IDs will likely be assigned to atoms +differently. + +If *loop = local*, then each processor loops over only its atoms to produce velocities. The random number seed is adjusted to give a -different set of velocities on each processor. This is a fast loop, -but the velocity assigned to a particular atom will depend on which +different set of velocities on each processor. This is a fast loop, but +the velocity assigned to a particular atom will depend on which processor owns it. Thus the results will always be different when a -simulation is run on a different number of processors. +simulation is run on a different number of MPI processes. -If loop = geom, then each processor loops over only its atoms. For +If *loop = geom*, then each processor loops over only its atoms. For each atom a unique random number seed is created, based on the atom's xyz coordinates. A velocity is generated using that seed. This is a fast loop and the velocity assigned to a particular atom will be the -same, independent of how many processors are used. However, the set +same, independent of how many MPI processes are used. However, the set of generated velocities may be more correlated than if the *all* or *local* keywords are used. -Note that the *loop geom* keyword will not necessarily assign -identical velocities for two simulations run on different machines. -This is because the computations based on xyz coordinates are -sensitive to tiny differences in the double-precision value for a -coordinate as stored on a particular machine. +.. note:: + + The *loop geom* keyword will not necessarily assign identical + velocities for two simulations run on different machines. This is + because the computations based on xyz coordinates are sensitive to + tiny differences in the double-precision value for a coordinate as + stored on a particular machine. ---------- diff --git a/doc/src/write_restart.rst b/doc/src/write_restart.rst index 6205f24faff..837a557723e 100644 --- a/doc/src/write_restart.rst +++ b/doc/src/write_restart.rst @@ -17,9 +17,9 @@ Syntax .. parsed-literal:: *fileper* arg = Np - Np = write one file for every this many processors + Np = write one file for every this many MPI processes *nfile* arg = Nf - Nf = write this many files, one from each of Nf processors + Nf = write this many files, one from each of Nf MPI processes Examples """""""" @@ -41,16 +41,16 @@ wish to write out a single current restart file. Similar to :doc:`dump ` files, the restart filename can contain two wild-card characters. If a "\*" appears in the filename, it is -replaced with the current timestep value. If a "%" character appears -in the filename, then one file is written by each processor and the -"%" character is replaced with the processor ID from 0 to P-1. An +replaced with the current timestep value. If a "%" character appears in +the filename, then one file is written by each processor and the "%" +character is replaced with the MPI rank ID from 0 to *P*-1. An additional file with the "%" replaced by "base" is also written, which contains global information. For example, the files written for -filename restart.% would be restart.base, restart.0, restart.1, ... -restart.P-1. This creates smaller files and can be a fast mode of -output and subsequent input on parallel machines that support parallel -I/O. The optional *fileper* and *nfile* keywords discussed below can -alter the number of files written. +``filename restart.%`` would be ``restart.base``, ``restart.0``, +``restart.1``, ... ``restart.P-1``. This creates smaller files and can +be a fast mode of output and subsequent input on parallel machines that +support parallel I/O. The optional *fileper* and *nfile* keywords +discussed below can alter the number of files written. Restart files can be read by a :doc:`read_restart ` command to restart a simulation from a particular state. Because the @@ -75,22 +75,23 @@ switch ` to convert a restart file to a data file. ---------- The optional *nfile* or *fileper* keywords can be used in conjunction -with the "%" wildcard character in the specified restart file name. -As explained above, the "%" character causes the restart file to be -written in pieces, one piece for each of P processors. By default P = -the number of processors the simulation is running on. The *nfile* or -*fileper* keyword can be used to set P to a smaller value, which can -be more efficient when running on a large number of processors. - -The *nfile* keyword sets P to the specified Nf value. For example, if -Nf = 4, and the simulation is running on 100 processors, 4 files will -be written, by processors 0,25,50,75. Each will collect information -from itself and the next 24 processors and write it to a restart file. - -For the *fileper* keyword, the specified value of Np means write one -file for every Np processors. For example, if Np = 4, every fourth -processor (0,4,8,12,etc) will collect information from itself and the -next 3 processors and write it to a restart file. +with the "%" wildcard character in the specified restart file name. As +explained above, the "%" character causes the restart file to be written +in pieces, one piece for each of *P* MPI processes. By default *P* = +the number of MPI processes the simulation is running on. The *nfile* +or *fileper* keyword can be used to set *P* to a smaller value, which +can be more efficient when running on a large number of MPI processes. + +The *nfile* keyword sets *P* to the specified *Nf* value. For example, +if *Nf* = 4, and the simulation is running on 100 MPI processes, 4 files +will be written, by MPI ranks 0, 25, 50, and 75. Each will collect +information from itself and the next 24 MPI processes and write it to a +restart file. + +For the *fileper* keyword, the specified value of *Np* means write one +file for every *Np* MPI processes. For example, if *Np* = 4, every +fourth MPI rank (0, 4, 8, 12, *etc.*) will collect information from +itself and the next 3 MPI processes and write it to a restart file. ---------- @@ -100,7 +101,7 @@ Restrictions This command requires inter-processor communication to migrate atoms before the restart file is written. This means that your system must be ready to perform a simulation before using this command (force -fields setup, atom masses initialized, etc). +fields setup, atom masses initialized, *etc.*). Related commands """""""""""""""" diff --git a/doc/utils/sphinx-config/conf.py.in b/doc/utils/sphinx-config/conf.py.in index 57d052f227a..09649c390c7 100644 --- a/doc/utils/sphinx-config/conf.py.in +++ b/doc/utils/sphinx-config/conf.py.in @@ -241,16 +241,16 @@ html_favicon = '_static/lammps.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static',] +html_static_path = ['_static'] # These paths are either relative to html_static_path # or fully qualified paths (eg. https://...) -html_css_files = ['css/lammps.css',] +html_css_files = ['css/lammps.css'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -html_extra_path = ['_extra',] +html_extra_path = ['_extra'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. @@ -477,7 +477,9 @@ man_pages = [ # strip off LAMMPS_NS:: from index entries cpp_index_common_prefix = [ 'LAMMPS_NS::', - '_LMP_STYLE_CONST::', '_LMP_TYPE_CONST::', '_LMP_VAR_CONST::', + '_LMP_STYLE_CONST::', + '_LMP_TYPE_CONST::', + '_LMP_VAR_CONST::' ] # -- Options for Texinfo output ------------------------------------------- @@ -488,7 +490,7 @@ cpp_index_common_prefix = [ texinfo_documents = [ ('Manual', 'LAMMPS', 'LAMMPS Documentation', 'LAMMPS', 'One line description of project.', - 'Miscellaneous'), + 'Miscellaneous') ] # Documents to append as an appendix to all manuals. diff --git a/doc/utils/sphinx-config/false_positives.txt b/doc/utils/sphinx-config/false_positives.txt index f7a20a212e1..41238197591 100644 --- a/doc/utils/sphinx-config/false_positives.txt +++ b/doc/utils/sphinx-config/false_positives.txt @@ -2941,6 +2941,7 @@ outputss Ouyang overdamped overlayed +oversubscription Ovito oxdna oxDNA diff --git a/examples/GRAPHICS/in.peptide-hbonds b/examples/GRAPHICS/in.peptide-hbonds index 4177487ddaa..228f54e671c 100644 --- a/examples/GRAPHICS/in.peptide-hbonds +++ b/examples/GRAPHICS/in.peptide-hbonds @@ -32,7 +32,7 @@ region water ellipsoid v_comx v_comy v_comz 7.0 8.0 16.0 group viz dynamic all region water include molecule # define groups of donor and acceptor atoms for peptide and water -group pdonor type 5 +group pdonor type 5 9 group wdonor type 13 group pacceptor type 3 5 9 12 group wacceptor type 13 diff --git a/lib/linalg/dlasq2.cpp b/lib/linalg/dlasq2.cpp index 37bc9639759..d0413f1e947 100644 --- a/lib/linalg/dlasq2.cpp +++ b/lib/linalg/dlasq2.cpp @@ -105,7 +105,7 @@ int dlasq2_(integer *n, doublereal *z__, integer *info) zmax = 0.; d__ = 0.; e = 0.; - i__1 = *n - 1 << 1; + i__1 = (*n - 1) << 1; for (k = 1; k <= i__1; k += 2) { if (z__[k] < 0.) { *info = -(k + 200); diff --git a/lib/linalg/dstedc.cpp b/lib/linalg/dstedc.cpp index 1d90d78d6bf..d0e10eeafc2 100644 --- a/lib/linalg/dstedc.cpp +++ b/lib/linalg/dstedc.cpp @@ -82,7 +82,7 @@ int dstedc_(char *compz, integer *n, doublereal *d__, doublereal *e, doublereal lwmin = 1; } else if (*n <= smlsiz) { liwmin = 1; - lwmin = *n - 1 << 1; + lwmin = (*n - 1) << 1; } else { lgn = (integer)(log((doublereal)(*n)) / log(2.)); if (pow_lmp_ii(&c__2, &lgn) < *n) { diff --git a/lib/linalg/zstedc.cpp b/lib/linalg/zstedc.cpp index 5685b5eb2fe..01755b8e520 100644 --- a/lib/linalg/zstedc.cpp +++ b/lib/linalg/zstedc.cpp @@ -87,7 +87,7 @@ int zstedc_(char *compz, integer *n, doublereal *d__, doublereal *e, doublecompl } else if (*n <= smlsiz) { lwmin = 1; liwmin = 1; - lrwmin = *n - 1 << 1; + lrwmin = (*n - 1) << 1; } else if (icompz == 1) { lgn = (integer)(log((doublereal)(*n)) / log(2.)); if (pow_lmp_ii(&c__2, &lgn) < *n) { diff --git a/src/APIP/fix_lambda_la_csp_apip.cpp b/src/APIP/fix_lambda_la_csp_apip.cpp index b12604d3858..132f3b3bc97 100644 --- a/src/APIP/fix_lambda_la_csp_apip.cpp +++ b/src/APIP/fix_lambda_la_csp_apip.cpp @@ -41,9 +41,9 @@ using namespace MathConst; /* ---------------------------------------------------------------------- */ FixLambdaLACSPAPIP::FixLambdaLACSPAPIP(LAMMPS *lmp, int narg, char **arg) : - Fix(lmp, narg, arg), ngh_pairs(nullptr), list(nullptr), distsq(nullptr), nearest(nullptr), - fixstore_la_avg(nullptr), fixstore_la_inp(nullptr), fixstore_la_norm(nullptr), - fixstore_pairs(nullptr), f_lambda(nullptr), prefactor1(nullptr), prefactor2(nullptr) + Fix(lmp, narg, arg), ngh_pairs(nullptr), f_lambda(nullptr), distsq(nullptr), nearest(nullptr), + list(nullptr), fixstore_pairs(nullptr), fixstore_la_avg(nullptr), fixstore_la_inp(nullptr), + fixstore_la_norm(nullptr), prefactor1(nullptr), prefactor2(nullptr) { comm_reverse = 2; comm_forward = 2; @@ -316,7 +316,7 @@ void FixLambdaLACSPAPIP::post_neighbor() * Compute lambda, csp, csp_avg, csp_norm for all local atoms. */ -void FixLambdaLACSPAPIP::setup_pre_force(int vflag) +void FixLambdaLACSPAPIP::setup_pre_force(int /*vflag*/) { if (!const_ngh_flag || !tags_stored) pre_force_dyn_pairs(); @@ -743,7 +743,7 @@ void FixLambdaLACSPAPIP::calculate_forces(int vflag) int i, j, ii, jj, inum, jnum, i_pair, i1, i2, i3; int *ilist, *jlist, *numneigh, **firstneigh, *mask; double **x, **f, *lambda, *csp, *csp_avg, *csp_norm, *e_fast, *e_precise; - double xtmp, ytmp, ztmp, lambdatmp, fpair, delx, dely, delz, r, rsq, cspavgtmp, prefactortmp, + double xtmp, ytmp, ztmp, fpair, delx, dely, delz, r, rsq, cspavgtmp, prefactortmp, delx1, dely1, delz1, delx2, dely2, delz2, tmp, ftmp[3]; int nlocal = atom->nlocal; @@ -809,8 +809,6 @@ void FixLambdaLACSPAPIP::calculate_forces(int vflag) for (ii = 0; ii < inum; ii++) { i = ilist[ii]; - lambdatmp = lambda[i]; - prefactortmp = prefactor1[i]; xtmp = x[i][0]; ytmp = x[i][1]; diff --git a/src/EFF/compute_temp_deform_eff.cpp b/src/EFF/compute_temp_deform_eff.cpp index a184e91c798..ed3e18b69c4 100644 --- a/src/EFF/compute_temp_deform_eff.cpp +++ b/src/EFF/compute_temp_deform_eff.cpp @@ -16,47 +16,48 @@ Contributing author: Andres Jaramillo-Botero (Caltech) ------------------------------------------------------------------------- */ - -#include - #include "compute_temp_deform_eff.h" -#include "domain.h" + #include "atom.h" -#include "update.h" -#include "force.h" -#include "math_extra.h" -#include "modify.h" -#include "fix.h" +#include "comm.h" +#include "domain.h" +#include "error.h" #include "fix_deform.h" #include "fix_nh.h" +#include "force.h" #include "group.h" -#include "comm.h" +#include "math_extra.h" #include "memory.h" -#include "error.h" +#include "modify.h" +#include "update.h" +#include using namespace LAMMPS_NS; -enum{NOBIAS,BIAS}; +enum { NOBIAS, BIAS }; /* ---------------------------------------------------------------------- */ ComputeTempDeformEff::ComputeTempDeformEff(LAMMPS *lmp, int narg, char **arg) : - Compute(lmp, narg, arg) + Compute(lmp, narg, arg), temperature(nullptr), id_temp(nullptr) { tcompute_eff = 0; tcomputeflag = 1; - for (int iarg = 3; iarg < narg; ++iarg) { - if (strcmp(arg[iarg], "temp")==0) { - ++iarg; - if (iarg >= narg) utils::missing_cmd_args(FLERR, fmt::format("compute {} temp", style), error); - id_temp = utils::strdup(arg[iarg]); + int iarg = 3; + while (iarg < narg) { + if (strcmp(arg[iarg], "temp") == 0) { + if (iarg + 2 > narg) + utils::missing_cmd_args(FLERR, fmt::format("compute {} temp", style), error); + delete[] id_temp; + id_temp = utils::strdup(arg[iarg + 1]); tcomputeflag = 0; - } else error->all(FLERR, "Unknown compute {} keyword: {}", style, arg[iarg]); + iarg += 2; + } else error->all(FLERR, iarg, "Unknown compute {} keyword: {}", style, arg[iarg]); } if (!atom->electron_flag) - error->all(FLERR,"Compute temp/deform/eff requires atom style electron"); + error->all(FLERR, 2, "Compute {} requires atom style electron", style); scalar_flag = vector_flag = 1; size_vector = 6; @@ -87,7 +88,7 @@ ComputeTempDeformEff::~ComputeTempDeformEff() // delete temperature compute if created by this compute if (tcomputeflag) modify->delete_compute(id_temp); - delete [] id_temp; + delete[] id_temp; } /* ---------------------------------------------------------------------- */ @@ -98,20 +99,27 @@ void ComputeTempDeformEff::init() auto fixes = modify->get_fix_by_style("^deform"); if (fixes.size() > 0) { - if ((dynamic_cast(fixes[0]))->remapflag == Domain::X_REMAP && comm->me == 0) + auto *f = dynamic_cast(fixes[0]); + if (f && f->remapflag == Domain::X_REMAP && comm->me == 0) error->warning(FLERR, "Using compute {} with inconsistent fix deform remap option", style); - } else - error->warning(FLERR, "Using compute {} with no fix deform defined", style); + } else { + if (comm->me == 0) + error->warning(FLERR, "Using compute {} with no fix deform defined", style); + } // check internal temperature compute temperature = modify->get_compute_by_id(id_temp); if (!temperature) - error->all(FLERR,"Temperature ID {} for compute {} does not exist", id_temp, style); + error->all(FLERR, Error::NOLASTLINE, + "Temperature ID {} for compute {} does not exist", id_temp, style); if (temperature->tempflag == 0) - error->all(FLERR,"Compute {} temperature ID {} does not compute temperature", style, id_temp); + error->all(FLERR, Error::NOLASTLINE, + "Compute {} temperature ID {} does not compute temperature", style, id_temp); if (temperature->igroup != igroup) - error->all(FLERR,"Group of temperature compute with ID {} for compute {} does not match", id_temp, style); + error->all(FLERR, Error::NOLASTLINE, + "Group of temperature compute with ID {} for compute {} does not match", + id_temp, style); // Flag if internal temperature compute is not an eff compute @@ -122,10 +130,13 @@ void ComputeTempDeformEff::init() // avoid possibility of self-referential loop if (utils::strmatch(temperature->style, "^temp/deform")) - error->all(FLERR,"Compute {} internal temperature compute cannot be of style temp/deform", style); + error->all(FLERR, Error::NOLASTLINE, + "Compute {} internal temperature compute cannot be of style temp/deform", style); - if (temperature->tempbias) which = BIAS; - else which = NOBIAS; + if (temperature->tempbias) + which = BIAS; + else + which = NOBIAS; // make sure internal temperature compute is called first @@ -431,20 +442,20 @@ double ComputeTempDeformEff::memory_usage() /* ---------------------------------------------------------------------- */ -int ComputeTempDeformEff::modify_param(int narg, char **arg) { - if (strcmp(arg[0],"temp") == 0) { - if (narg < 2) error->all(FLERR,"Illegal compute_modify command"); +int ComputeTempDeformEff::modify_param(int narg, char **arg) +{ + if (strcmp(arg[0], "temp") == 0) { + if (narg < 2) utils::missing_cmd_args(FLERR,"compute_modify temp/deform/eff", error); if (tcomputeflag) modify->delete_compute(id_temp); - delete [] id_temp; + delete[] id_temp; tcomputeflag = 0; id_temp = utils::strdup(arg[1]); return 2; - - } else if (strcmp(arg[0],"extra/dof") == 0) { + } else if (strcmp(arg[0], "extra/dof") == 0) { // Can't set extra/dof of internal temp compute directly, // so pass through the modify call temperature->modify_params(MIN(narg, 2), arg); - } else if (strcmp(arg[0],"dynamic/dof") == 0) { + } else if (strcmp(arg[0], "dynamic/dof") == 0) { // Can't set dynamic_user flag of internal temp compute directly, // so pass through the modify call temperature->modify_params(MIN(narg, 2), arg); diff --git a/src/EFF/fix_nvt_sllod_eff.cpp b/src/EFF/fix_nvt_sllod_eff.cpp index ecd8f019c66..9476c48178c 100644 --- a/src/EFF/fix_nvt_sllod_eff.cpp +++ b/src/EFF/fix_nvt_sllod_eff.cpp @@ -1,8 +1,8 @@ // clang-format off /* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - https://www.lammps.org/, Sandia National Laboratories - LAMMPS development team: developers@lammps.org + https://www.lammps.org/ + LAMMPS development team: developers@lammps.org, Sandia National Laboratories Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains @@ -16,11 +16,9 @@ #include "atom.h" #include "comm.h" -#include "compute.h" #include "compute_temp_deform_eff.h" #include "domain.h" #include "error.h" -#include "fix.h" #include "fix_deform.h" #include "group.h" #include "math_extra.h" @@ -38,9 +36,9 @@ FixNVTSllodEff::FixNVTSllodEff(LAMMPS *lmp, int narg, char **arg) : FixNHEff(lmp, narg, arg) { if (!tstat_flag) - error->all(FLERR,"Temperature control must be used with fix nvt/sllod/eff"); + error->all(FLERR, 2, "Temperature control must be used with fix nvt/sllod/eff"); if (pstat_flag) - error->all(FLERR,"Pressure control can not be used with fix nvt/sllod/eff"); + error->all(FLERR, 2, "Pressure control can not be used with fix nvt/sllod/eff"); // default values @@ -107,25 +105,30 @@ void FixNVTSllodEff::init() if (integrator == LEGACY) { nondeformbias = 1; if (kick_flag) - error->all(FLERR, "fix {} with peculiar=no and kick=yes requires temperature bias " + error->all(FLERR, Error::NOLASTLINE, + "fix {} with peculiar=no and kick=yes requires temperature bias " "to be calculated by compute temp/deform/eff", style); } else if (!peculiar_flag) { - error->all(FLERR,"Fix {} used with lab-frame velocity and non-deform " - "temperature bias. For non-deform biases, either set peculiar = yes " - "or pass an explicit temp/deform/eff with an extra bias", style); + error->all(FLERR, Error::NOLASTLINE, "Fix {} used with lab-frame velocity and non-deform " + "temperature bias. For non-deform biases, either set peculiar = yes " + "or pass an explicit temp/deform/eff with an extra bias", style); } } // check fix deform remap settings auto deform = modify->get_fix_by_style("^deform"); - if (deform.size() < 1) error->all(FLERR,"Using fix {} with no fix deform defined", style); + if (deform.size() < 1) + error->all(FLERR, Error::NOLASTLINE, "Using fix {} with no fix deform defined", style); for (auto &ifix : deform) { auto *f = dynamic_cast(ifix); + // not compatible with fix deform. ignore. + if (!f) continue; if ((peculiar_flag && f->remapflag != Domain::NO_REMAP) || (!peculiar_flag && f->remapflag != Domain::V_REMAP)) - error->all(FLERR,"Using fix {} with inconsistent fix {} remap option", style, f->style); + error->all(FLERR, Error::NOLASTLINE, + "Using fix {} with inconsistent fix {} remap option", style, f->style); if (kick_flag) { // apply initial kick if velocity stored in lab frame @@ -134,11 +137,11 @@ void FixNVTSllodEff::init() if (!peculiar_flag) { f->init(); if (comm->me == 0) utils::logmesg(lmp, "fix {} applying velocity profile kick.\n", style); - dynamic_cast(temperature)->apply_deform_bias_all(); + auto *f2 = dynamic_cast(temperature); + if (f2) f2->apply_deform_bias_all(); kick_flag = 0; } else if (comm->me == 0) { - error->warning(FLERR,"fix {} using peculiar frame velocity. " - "Ignoring kick flag.", style); + error->warning(FLERR,"fix {} using peculiar frame velocity. Ignoring kick flag.", style); } } diff --git a/src/GPU/pair_born_coul_long_cs_gpu.cpp b/src/GPU/pair_born_coul_long_cs_gpu.cpp index 8d6c0a8f2b0..5a1c498a67f 100644 --- a/src/GPU/pair_born_coul_long_cs_gpu.cpp +++ b/src/GPU/pair_born_coul_long_cs_gpu.cpp @@ -22,6 +22,7 @@ #include "error.h" #include "force.h" #include "gpu_extra.h" +#include "info.h" #include "kspace.h" #include "math_const.h" #include "neigh_list.h" @@ -74,6 +75,9 @@ double bornclcs_gpu_bytes(); PairBornCoulLongCSGPU::PairBornCoulLongCSGPU(LAMMPS *lmp) : PairBornCoulLongCS(lmp), gpu_mode(GPU_FORCE) { + if (Info::has_accelerator_feature("GPU","precision","single")) + error->all(FLERR,"Pair style born/coul/long/cs/gpu does not support single precision GPU mode"); + respa_enable = 0; reinitflag = 0; cpu_time = 0.0; diff --git a/src/GRAPHICS/dump_image.cpp b/src/GRAPHICS/dump_image.cpp index a709b079c68..e943e11b478 100644 --- a/src/GRAPHICS/dump_image.cpp +++ b/src/GRAPHICS/dump_image.cpp @@ -1689,7 +1689,7 @@ void DumpImage::create_image() vec3{objarray[i][4], objarray[i][5], objarray[i][6]}, color, opacity); } else if (objvec[i] == Graphics::PIXMAP) { // get pointer to pixmap buffer and get background transparency color - const auto *pixmap = (const unsigned char *) ubuf(objarray[i][6]).i; + const auto *pixmap = (const unsigned char *) ubuf(objarray[i][6]).i; // NOLINT double transcolor[3] = {objarray[i][7], objarray[i][8], objarray[i][9]}; if (iobj.flag1 == 0.0) // coordinates are in box coordinates image->draw_pixmap(&objarray[i][1], (int) objarray[i][4], (int) objarray[i][5], pixmap, diff --git a/src/KOKKOS/atom_vec_ellipsoid_kokkos.cpp b/src/KOKKOS/atom_vec_ellipsoid_kokkos.cpp index b39ff2aaad7..299e2a23ba0 100644 --- a/src/KOKKOS/atom_vec_ellipsoid_kokkos.cpp +++ b/src/KOKKOS/atom_vec_ellipsoid_kokkos.cpp @@ -33,8 +33,8 @@ using namespace MathConst; /* ---------------------------------------------------------------------- */ -AtomVecEllipsoidKokkos::AtomVecEllipsoidKokkos(LAMMPS *lmp) : AtomVec(lmp), -AtomVecKokkos(lmp), AtomVecEllipsoid(lmp) +AtomVecEllipsoidKokkos::AtomVecEllipsoidKokkos(LAMMPS *lmp) : + AtomVec(lmp), AtomVecKokkos(lmp), AtomVecEllipsoid(lmp), torque(nullptr) { size_exchange_bonus = 8; datamask_bonus = ELLIPSOID_MASK|BONUS_MASK; @@ -42,7 +42,7 @@ AtomVecKokkos(lmp), AtomVecEllipsoid(lmp) k_nghost_bonus = DAT::tdual_int_scalar("atomEllipKK:k_nghost_bonus"); k_nlocal_bonus = DAT::tdual_int_scalar("atomEllipKK:k_nlocal_bonus"); - if (sizeof(KK_FLOAT) != sizeof(double)) + if (((sizeof(KK_FLOAT) != sizeof(double))) && (comm->me == 0)) error->warning(FLERR,"AtomVecEllipsoidKokkos does not (yet) fully support " "KK_FLOAT within bonus struct data (shape, quat). Using double for these fields."); } diff --git a/src/MACHDYN/pair_smd_triangulated_surface.cpp b/src/MACHDYN/pair_smd_triangulated_surface.cpp index cded66e6445..87551b992cb 100644 --- a/src/MACHDYN/pair_smd_triangulated_surface.cpp +++ b/src/MACHDYN/pair_smd_triangulated_surface.cpp @@ -1,4 +1,3 @@ -// clang-format off /* ---------------------------------------------------------------------- * * *** Smooth Mach Dynamics *** @@ -39,319 +38,301 @@ #include "neigh_list.h" #include "neighbor.h" +#include +#include #include #include -#include -using namespace std; using namespace LAMMPS_NS; using namespace Eigen; /* ---------------------------------------------------------------------- */ -PairTriSurf::PairTriSurf(LAMMPS *lmp) : - Pair(lmp) { +PairTriSurf::PairTriSurf(LAMMPS *lmp) : Pair(lmp) +{ - onerad_dynamic = onerad_frozen = maxrad_dynamic = maxrad_frozen = nullptr; - bulkmodulus = nullptr; - kn = nullptr; - scale = 1.0; + onerad_dynamic = onerad_frozen = maxrad_dynamic = maxrad_frozen = nullptr; + bulkmodulus = nullptr; + kn = nullptr; + scale = 1.0; } /* ---------------------------------------------------------------------- */ -PairTriSurf::~PairTriSurf() { +PairTriSurf::~PairTriSurf() +{ - if (allocated) { - memory->destroy(setflag); - memory->destroy(cutsq); - memory->destroy(bulkmodulus); - memory->destroy(kn); + if (allocated) { + memory->destroy(setflag); + memory->destroy(cutsq); + memory->destroy(bulkmodulus); + memory->destroy(kn); - delete[] onerad_dynamic; - delete[] onerad_frozen; - delete[] maxrad_dynamic; - delete[] maxrad_frozen; - } + delete[] onerad_dynamic; + delete[] onerad_frozen; + delete[] maxrad_dynamic; + delete[] maxrad_frozen; + } } /* ---------------------------------------------------------------------- */ -void PairTriSurf::compute(int eflag, int vflag) { - int i, j, ii, jj, inum, jnum, itype, jtype; - double rsq, r, evdwl, fpair; - int *ilist, *jlist, *numneigh, **firstneigh; - double rcut, r_geom, delta, r_tri, r_particle, touch_distance, dt_crit; - int tri, particle; - Vector3d normal, x1, x2, x3, x4, x13, x23, x43, w, cp, x4cp, vnew, v_old; - ; - Vector3d xi, x_center, dx; - Matrix2d C; - Vector2d w2d, rhs; - - evdwl = 0.0; - ev_init(eflag, vflag); - - tagint *mol = atom->molecule; - double **f = atom->f; - double **smd_data_9 = atom->smd_data_9; - double **x = atom->x; - double **x0 = atom->x0; - double **v = atom->v; - double *rmass = atom->rmass; - int *type = atom->type; - int nlocal = atom->nlocal; - double *radius = atom->contact_radius; - double rcutSq; - Vector3d offset; - - int newton_pair = force->newton_pair; - int periodic = (domain->xperiodic || domain->yperiodic || domain->zperiodic); - - inum = list->inum; - ilist = list->ilist; - numneigh = list->numneigh; - firstneigh = list->firstneigh; - - int max_neighs = 0; - stable_time_increment = 1.0e22; - - // loop over neighbors of my atoms using a half neighbor list - for (ii = 0; ii < inum; ii++) { - i = ilist[ii]; - itype = type[i]; - jlist = firstneigh[i]; - jnum = numneigh[i]; - max_neighs = MAX(max_neighs, jnum); - - for (jj = 0; jj < jnum; jj++) { - j = jlist[jj]; - - j &= NEIGHMASK; - - jtype = type[j]; - - /* +void PairTriSurf::compute(int eflag, int vflag) +{ + int i, j, ii, jj, inum, jnum, itype, jtype; + double rsq, r, evdwl, fpair; + int *ilist, *jlist, *numneigh, **firstneigh; + double rcut, r_geom, delta, r_tri, r_particle, touch_distance, dt_crit; + int tri, particle; + Vector3d normal, x1, x2, x3, x4, x13, x23, x43, w, cp, x4cp, vnew, v_old; + ; + Vector3d xi, x_center, dx; + Matrix2d C; + Vector2d w2d, rhs; + + evdwl = 0.0; + ev_init(eflag, vflag); + + tagint *mol = atom->molecule; + double **f = atom->f; + double **smd_data_9 = atom->smd_data_9; + double **x = atom->x; + double **x0 = atom->x0; + double **v = atom->v; + double *rmass = atom->rmass; + int *type = atom->type; + int nlocal = atom->nlocal; + double *radius = atom->contact_radius; + double rcutSq; + Vector3d offset; + + int newton_pair = force->newton_pair; + int periodic = (domain->xperiodic || domain->yperiodic || domain->zperiodic); + + inum = list->inum; + ilist = list->ilist; + numneigh = list->numneigh; + firstneigh = list->firstneigh; + + int max_neighs = 0; + stable_time_increment = 1.0e22; + + // loop over neighbors of my atoms using a half neighbor list + for (ii = 0; ii < inum; ii++) { + i = ilist[ii]; + itype = type[i]; + jlist = firstneigh[i]; + jnum = numneigh[i]; + max_neighs = MAX(max_neighs, jnum); + + for (jj = 0; jj < jnum; jj++) { + j = jlist[jj]; + + j &= NEIGHMASK; + + jtype = type[j]; + + /* * decide which one of i, j is triangle and which is particle */ - if ((mol[i] < 65535) && (mol[j] >= 65535)) { - particle = i; - tri = j; - } else if ((mol[j] < 65535) && (mol[i] >= 65535)) { - particle = j; - tri = i; - } else { - error->one(FLERR, "unknown case"); - } - - //x_center << x[tri][0], x[tri][1], x[tri][2]; // center of triangle - x_center(0) = x[tri][0]; - x_center(1) = x[tri][1]; - x_center(2) = x[tri][2]; - //x4 << x[particle][0], x[particle][1], x[particle][2]; - x4(0) = x[particle][0]; - x4(1) = x[particle][1]; - x4(2) = x[particle][2]; - dx = x_center - x4; // - if (periodic) { - domain->minimum_image(FLERR, dx(0), dx(1), dx(2)); - } - rsq = dx.squaredNorm(); - - r_tri = scale * radius[tri]; - r_particle = scale * radius[particle]; - rcut = r_tri + r_particle; - rcutSq = rcut * rcut; - - //printf("type i=%d, type j=%d, r=%f, ri=%f, rj=%f\n", itype, jtype, sqrt(rsq), ri, rj); - - if (rsq < rcutSq) { - - /* + if ((mol[i] < 65535) && (mol[j] >= 65535)) { + particle = i; + tri = j; + } else if ((mol[j] < 65535) && (mol[i] >= 65535)) { + particle = j; + tri = i; + } else { + error->one(FLERR, "unknown case"); + } + + x_center(0) = x[tri][0]; + x_center(1) = x[tri][1]; + x_center(2) = x[tri][2]; + + x4(0) = x[particle][0]; + x4(1) = x[particle][1]; + x4(2) = x[particle][2]; + dx = x_center - x4; // + if (periodic) { domain->minimum_image(FLERR, dx(0), dx(1), dx(2)); } + rsq = dx.squaredNorm(); + + r_tri = scale * radius[tri]; + r_particle = scale * radius[particle]; + rcut = r_tri + r_particle; + rcutSq = rcut * rcut; + + if (rsq < rcutSq) { + + /* * gather triangle information */ - normal(0) = x0[tri][0]; - normal(1) = x0[tri][1]; - normal(2) = x0[tri][2]; + normal(0) = x0[tri][0]; + normal(1) = x0[tri][1]; + normal(2) = x0[tri][2]; - /* + /* * distance check: is particle closer than its radius to the triangle plane? */ - if (fabs(dx.dot(normal)) < radius[particle]) { - /* + if (fabs(dx.dot(normal)) < radius[particle]) { + /* * get other two triangle vertices */ - x1(0) = smd_data_9[tri][0]; - x1(1) = smd_data_9[tri][1]; - x1(2) = smd_data_9[tri][2]; - x2(0) = smd_data_9[tri][3]; - x2(1) = smd_data_9[tri][4]; - x2(2) = smd_data_9[tri][5]; - x3(0) = smd_data_9[tri][6]; - x3(1) = smd_data_9[tri][7]; - x3(2) = smd_data_9[tri][8]; - - PointTriangleDistance(x4, x1, x2, x3, cp, r); - - /* + x1(0) = smd_data_9[tri][0]; + x1(1) = smd_data_9[tri][1]; + x1(2) = smd_data_9[tri][2]; + x2(0) = smd_data_9[tri][3]; + x2(1) = smd_data_9[tri][4]; + x2(2) = smd_data_9[tri][5]; + x3(0) = smd_data_9[tri][6]; + x3(1) = smd_data_9[tri][7]; + x3(2) = smd_data_9[tri][8]; + + PointTriangleDistance(x4, x1, x2, x3, cp, r); + + /* * distance to closest point */ - x4cp = x4 - cp; + x4cp = x4 - cp; - /* + /* * flip normal to point in direction of x4cp */ - if (x4cp.dot(normal) < 0.0) { - normal *= -1.0; - } + if (x4cp.dot(normal) < 0.0) { normal *= -1.0; } - /* + /* * penalty force pushes particle away from triangle */ - if (r < 1.0 * radius[particle]) { - - delta = radius[particle] - r; // overlap distance - r_geom = radius[particle]; - fpair = 1.066666667e0 * bulkmodulus[itype][jtype] * delta * sqrt(delta * r_geom); - dt_crit = 3.14 * sqrt(rmass[particle] / (fpair / delta)); - stable_time_increment = MIN(stable_time_increment, dt_crit); - - evdwl = r * fpair * 0.4e0 * delta; // GCG 25 April: this expression conserves total energy - - fpair /= (r + 1.0e-2 * radius[particle]); // divide by r + softening and multiply with non-normalized distance vector - - if (particle < nlocal) { - f[particle][0] += x4cp(0) * fpair; - f[particle][1] += x4cp(1) * fpair; - f[particle][2] += x4cp(2) * fpair; - } - - if (tri < nlocal) { - f[tri][0] -= x4cp(0) * fpair; - f[tri][1] -= x4cp(1) * fpair; - f[tri][2] -= x4cp(2) * fpair; - } - - if (evflag) { - ev_tally(i, j, nlocal, newton_pair, evdwl, 0.0, fpair, x4cp(0), x4cp(1), x4cp(2)); - } - - } - - /* + if (r < 1.0 * radius[particle]) { + + delta = radius[particle] - r; // overlap distance + r_geom = radius[particle]; + fpair = 1.066666667e0 * bulkmodulus[itype][jtype] * delta * sqrt(delta * r_geom); + dt_crit = 3.14 * sqrt(rmass[particle] / (fpair / delta)); + stable_time_increment = MIN(stable_time_increment, dt_crit); + + evdwl = r * fpair * 0.4e0 * + delta; // GCG 25 April: this expression conserves total energy + + fpair /= + (r + + 1.0e-2 * + radius + [particle]); // divide by r + softening and multiply with non-normalized distance vector + + if (particle < nlocal) { + f[particle][0] += x4cp(0) * fpair; + f[particle][1] += x4cp(1) * fpair; + f[particle][2] += x4cp(2) * fpair; + } + + if (tri < nlocal) { + f[tri][0] -= x4cp(0) * fpair; + f[tri][1] -= x4cp(1) * fpair; + f[tri][2] -= x4cp(2) * fpair; + } + + if (evflag) { + ev_tally(i, j, nlocal, newton_pair, evdwl, 0.0, fpair, x4cp(0), x4cp(1), x4cp(2)); + } + } + + /* * if particle comes too close to triangle, reflect its velocity and explicitly move it away */ - touch_distance = 1.0 * radius[particle]; - if (r < touch_distance) { + touch_distance = 1.0 * radius[particle]; + if (r < touch_distance) { - /* + /* * reflect velocity if it points toward triangle */ - normal = x4cp / r; - - //v_old << v[particle][0], v[particle][1], v[particle][2]; - v_old(0) = v[particle][0]; - v_old(1) = v[particle][1]; - v_old(2) = v[particle][2]; - if (v_old.dot(normal) < 0.0) { - //printf("flipping velocity\n"); - vnew = 1.0 * (-2.0 * v_old.dot(normal) * normal + v_old); - v[particle][0] = vnew(0); - v[particle][1] = vnew(1); - v[particle][2] = vnew(2); - } - - //printf("moving particle on top of triangle\n"); - x[particle][0] = cp(0) + touch_distance * normal(0); - x[particle][1] = cp(1) + touch_distance * normal(1); - x[particle][2] = cp(2) + touch_distance * normal(2); - } - - } - } - } + normal = x4cp / r; + + v_old(0) = v[particle][0]; + v_old(1) = v[particle][1]; + v_old(2) = v[particle][2]; + if (v_old.dot(normal) < 0.0) { + vnew = 1.0 * (-2.0 * v_old.dot(normal) * normal + v_old); + v[particle][0] = vnew(0); + v[particle][1] = vnew(1); + v[particle][2] = vnew(2); + } + + x[particle][0] = cp(0) + touch_distance * normal(0); + x[particle][1] = cp(1) + touch_distance * normal(1); + x[particle][2] = cp(2) + touch_distance * normal(2); + } } - -// int max_neighs_all = 0; -// MPI_Allreduce(&max_neighs, &max_neighs_all, 1, MPI_INT, MPI_MAX, world); -// if (comm->me == 0) { -// printf("max. neighs in tri pair is %d\n", max_neighs_all); -// } -// -// double stable_time_increment_all = 0.0; -// MPI_Allreduce(&stable_time_increment, &stable_time_increment_all, 1, MPI_DOUBLE, MPI_MIN, world); -// if (comm->me == 0) { -// printf("stable time step tri pair is %f\n", stable_time_increment_all); -// } + } + } + } } /* ---------------------------------------------------------------------- allocate all arrays ------------------------------------------------------------------------- */ -void PairTriSurf::allocate() { - allocated = 1; - int n = atom->ntypes; +void PairTriSurf::allocate() +{ + allocated = 1; + int n = atom->ntypes; - memory->create(setflag, n + 1, n + 1, "pair:setflag"); - for (int i = 1; i <= n; i++) - for (int j = i; j <= n; j++) - setflag[i][j] = 0; + memory->create(setflag, n + 1, n + 1, "pair:setflag"); + for (int i = 1; i <= n; i++) + for (int j = i; j <= n; j++) setflag[i][j] = 0; - memory->create(bulkmodulus, n + 1, n + 1, "pair:kspring"); - memory->create(kn, n + 1, n + 1, "pair:kn"); + memory->create(bulkmodulus, n + 1, n + 1, "pair:kspring"); + memory->create(kn, n + 1, n + 1, "pair:kn"); - memory->create(cutsq, n + 1, n + 1, "pair:cutsq"); // always needs to be allocated, even with granular neighborlist + memory->create(cutsq, n + 1, n + 1, + "pair:cutsq"); // always needs to be allocated, even with granular neighborlist - onerad_dynamic = new double[n + 1]; - onerad_frozen = new double[n + 1]; - maxrad_dynamic = new double[n + 1]; - maxrad_frozen = new double[n + 1]; + onerad_dynamic = new double[n + 1]; + onerad_frozen = new double[n + 1]; + maxrad_dynamic = new double[n + 1]; + maxrad_frozen = new double[n + 1]; } /* ---------------------------------------------------------------------- global settings ------------------------------------------------------------------------- */ -void PairTriSurf::settings(int narg, char **arg) { - if (narg != 1) - error->all(FLERR, "Illegal number of args for pair_style smd/tri_surface"); - - scale = utils::numeric(FLERR, arg[0],false,lmp); - if (comm->me == 0) { - printf("\n>>========>>========>>========>>========>>========>>========>>========>>========\n"); - printf("SMD/TRI_SURFACE CONTACT SETTINGS:\n"); - printf("... effective contact radius is scaled by %f\n", scale); - printf(">>========>>========>>========>>========>>========>>========>>========>>========\n"); - } +void PairTriSurf::settings(int narg, char **arg) +{ + if (narg != 1) error->all(FLERR, "Illegal number of args for pair_style smd/tri_surface"); + scale = utils::numeric(FLERR, arg[0], false, lmp); + if (comm->me == 0) { + printf("\n>>========>>========>>========>>========>>========>>========>>========>>========\n"); + printf("SMD/TRI_SURFACE CONTACT SETTINGS:\n"); + printf("... effective contact radius is scaled by %f\n", scale); + printf(">>========>>========>>========>>========>>========>>========>>========>>========\n"); + } } /* ---------------------------------------------------------------------- set coeffs for one or more type pairs ------------------------------------------------------------------------- */ -void PairTriSurf::coeff(int narg, char **arg) { - if (narg != 3) - error->all(FLERR, "Incorrect args for pair coefficients" + utils::errorurl(21)); - if (!allocated) - allocate(); +void PairTriSurf::coeff(int narg, char **arg) +{ + if (narg != 3) error->all(FLERR, "Incorrect args for pair coefficients" + utils::errorurl(21)); + if (!allocated) allocate(); int ilo, ihi, jlo, jhi; - utils::bounds(FLERR,arg[0], 1,atom->ntypes, ilo, ihi, error); - utils::bounds(FLERR,arg[1], 1,atom->ntypes, jlo, jhi, error); + utils::bounds(FLERR, arg[0], 1, atom->ntypes, ilo, ihi, error); + utils::bounds(FLERR, arg[1], 1, atom->ntypes, jlo, jhi, error); - double bulkmodulus_one = utils::numeric(FLERR,arg[2],false,lmp); + double bulkmodulus_one = utils::numeric(FLERR, arg[2], false, lmp); // set short-range force constant double kn_one = 0.0; if (domain->dimension == 3) { - kn_one = (16. / 15.) * bulkmodulus_one; //assuming poisson ratio = 1/4 for 3d + kn_one = (16. / 15.) * bulkmodulus_one; //assuming poisson ratio = 1/4 for 3d } else { - kn_one = 0.251856195 * (2. / 3.) * bulkmodulus_one; //assuming poisson ratio = 1/3 for 2d + kn_one = 0.251856195 * (2. / 3.) * bulkmodulus_one; //assuming poisson ratio = 1/3 for 2d } int count = 0; @@ -364,18 +345,17 @@ void PairTriSurf::coeff(int narg, char **arg) { } } - if (count == 0) - error->all(FLERR, "Incorrect args for pair coefficients" + utils::errorurl(21)); + if (count == 0) error->all(FLERR, "Incorrect args for pair coefficients" + utils::errorurl(21)); } /* ---------------------------------------------------------------------- init for one type pair i,j and corresponding j,i ------------------------------------------------------------------------- */ -double PairTriSurf::init_one(int i, int j) { +double PairTriSurf::init_one(int i, int j) +{ - if (!allocated) - allocate(); + if (!allocated) allocate(); if (setflag[i][j] == 0) error->all(FLERR, Error::NOLASTLINE, @@ -400,32 +380,32 @@ double PairTriSurf::init_one(int i, int j) { init specific to this pair style ------------------------------------------------------------------------- */ -void PairTriSurf::init_style() { - int i; +void PairTriSurf::init_style() +{ + int i; - // error checks + // error checks - if (!atom->contact_radius_flag) - error->all(FLERR, "Pair style smd/smd/tri_surface requires atom style with contact_radius"); + if (!atom->contact_radius_flag) + error->all(FLERR, "Pair style smd/smd/tri_surface requires atom style with contact_radius"); - neighbor->add_request(this, NeighConst::REQ_SIZE); + neighbor->add_request(this, NeighConst::REQ_SIZE); - // set maxrad_dynamic and maxrad_frozen for each type - // include future Fix pour particles as dynamic + // set maxrad_dynamic and maxrad_frozen for each type + // include future Fix pour particles as dynamic - for (i = 1; i <= atom->ntypes; i++) - onerad_dynamic[i] = onerad_frozen[i] = 0.0; + for (i = 1; i <= atom->ntypes; i++) onerad_dynamic[i] = onerad_frozen[i] = 0.0; - double *radius = atom->radius; - int *type = atom->type; - int nlocal = atom->nlocal; + double *radius = atom->radius; + int *type = atom->type; + int nlocal = atom->nlocal; - for (i = 0; i < nlocal; i++) { - onerad_dynamic[type[i]] = MAX(onerad_dynamic[type[i]], radius[i]); - } + for (i = 0; i < nlocal; i++) { + onerad_dynamic[type[i]] = MAX(onerad_dynamic[type[i]], radius[i]); + } - MPI_Allreduce(&onerad_dynamic[1], &maxrad_dynamic[1], atom->ntypes, MPI_DOUBLE, MPI_MAX, world); - MPI_Allreduce(&onerad_frozen[1], &maxrad_frozen[1], atom->ntypes, MPI_DOUBLE, MPI_MAX, world); + MPI_Allreduce(&onerad_dynamic[1], &maxrad_dynamic[1], atom->ntypes, MPI_DOUBLE, MPI_MAX, world); + MPI_Allreduce(&onerad_frozen[1], &maxrad_frozen[1], atom->ntypes, MPI_DOUBLE, MPI_MAX, world); } /* ---------------------------------------------------------------------- @@ -433,18 +413,19 @@ void PairTriSurf::init_style() { optional granular history list ------------------------------------------------------------------------- */ -void PairTriSurf::init_list(int id, NeighList *ptr) { - if (id == 0) - list = ptr; +void PairTriSurf::init_list(int id, NeighList *ptr) +{ + if (id == 0) list = ptr; } /* ---------------------------------------------------------------------- memory usage of local atom-based arrays ------------------------------------------------------------------------- */ -double PairTriSurf::memory_usage() { +double PairTriSurf::memory_usage() +{ - return 0.0; + return 0.0; } /* @@ -733,97 +714,87 @@ double PairTriSurf::memory_usage() { % https://www.geometrictools.com/Documentation/DistancePoint3Triangle3.pdf */ -void PairTriSurf::PointTriangleDistance(const Vector3d& sourcePosition, const Vector3d& TRI0, const Vector3d& TRI1, - const Vector3d& TRI2, Vector3d &CP, double &dist) { - - Vector3d edge0 = TRI1 - TRI0; - Vector3d edge1 = TRI2 - TRI0; - Vector3d v0 = TRI0 - sourcePosition; - - double a = edge0.dot(edge0); - double b = edge0.dot(edge1); - double c = edge1.dot(edge1); - double d = edge0.dot(v0); - double e = edge1.dot(v0); - - double det = a * c - b * b; - double s = b * e - c * d; - double t = b * d - a * e; - - if (s + t < det) { - if (s < 0.f) { - if (t < 0.f) { - if (d < 0.f) { - s = clamp(-d / a, 0.f, 1.f); - t = 0.f; - } else { - s = 0.f; - t = clamp(-e / c, 0.f, 1.f); - } - } else { - s = 0.f; - t = clamp(-e / c, 0.f, 1.f); - } - } else if (t < 0.f) { - s = clamp(-d / a, 0.f, 1.f); - t = 0.f; - } else { - float invDet = 1.f / det; - s *= invDet; - t *= invDet; - } +void PairTriSurf::PointTriangleDistance(const Vector3d &sourcePosition, const Vector3d &TRI0, + const Vector3d &TRI1, const Vector3d &TRI2, Vector3d &CP, + double &dist) +{ + + Vector3d edge0 = TRI1 - TRI0; + Vector3d edge1 = TRI2 - TRI0; + Vector3d v0 = TRI0 - sourcePosition; + + double a = edge0.dot(edge0); + double b = edge0.dot(edge1); + double c = edge1.dot(edge1); + double d = edge0.dot(v0); + double e = edge1.dot(v0); + + double det = a * c - b * b; + double s = b * e - c * d; + double t = b * d - a * e; + + if (s + t < det) { + if (s < 0.0) { + if (t < 0.0) { + if (d < 0.0) { + s = std::clamp(-d / a, 0.0, 1.0); + t = 0.0; } else { - if (s < 0.f) { - float tmp0 = b + d; - float tmp1 = c + e; - if (tmp1 > tmp0) { - float numer = tmp1 - tmp0; - float denom = a - 2 * b + c; - s = clamp(numer / denom, 0.f, 1.f); - t = 1 - s; - } else { - t = clamp(-e / c, 0.f, 1.f); - s = 0.f; - } - } else if (t < 0.f) { - if (a + d > b + e) { - float numer = c + e - b - d; - float denom = a - 2 * b + c; - s = clamp(numer / denom, 0.f, 1.f); - t = 1 - s; - } else { - s = clamp(-e / c, 0.f, 1.f); - t = 0.f; - } - } else { - float numer = c + e - b - d; - float denom = a - 2 * b + c; - s = clamp(numer / denom, 0.f, 1.f); - t = 1.f - s; - } + s = 0.0; + t = std::clamp(-e / c, 0.0, 1.0); } + } else { + s = 0.0; + t = std::clamp(-e / c, 0.0, 1.0); + } + } else if (t < 0.0) { + s = std::clamp(-d / a, 0.0, 1.0); + t = 0.0; + } else { + double invDet = 1.0 / det; + s *= invDet; + t *= invDet; + } + } else { + if (s < 0.0) { + double tmp0 = b + d; + double tmp1 = c + e; + if (tmp1 > tmp0) { + double numer = tmp1 - tmp0; + double denom = a - 2 * b + c; + s = std::clamp(numer / denom, 0.0, 1.0); + t = 1 - s; + } else { + t = std::clamp(-e / c, 0.0, 1.0); + s = 0.0; + } + } else if (t < 0.0) { + if (a + d > b + e) { + double numer = c + e - b - d; + double denom = a - 2 * b + c; + s = std::clamp(numer / denom, 0.0, 1.0); + t = 1 - s; + } else { + s = std::clamp(-e / c, 0.0, 1.0); + t = 0.0; + } + } else { + double numer = c + e - b - d; + double denom = a - 2 * b + c; + s = std::clamp(numer / denom, 0.0, 1.0); + t = 1.0 - s; + } + } - CP = TRI0 + s * edge0 + t * edge1; - dist = (CP - sourcePosition).norm(); - -} - -double PairTriSurf::clamp(const double a, const double min, const double max) { - if (a < min) { - return min; - } else if (a > max) { - return max; - } else { - return a; - } + CP = TRI0 + s * edge0 + t * edge1; + dist = (CP - sourcePosition).norm(); } -void *PairTriSurf::extract(const char *str, int &/*i*/) { - //printf("in PairTriSurf::extract\n"); - if (strcmp(str, "smd/tri_surface/stable_time_increment_ptr") == 0) { - return (void *) &stable_time_increment; - } - - return nullptr; +void *PairTriSurf::extract(const char *str, int & /*i*/) +{ + if (strcmp(str, "smd/tri_surface/stable_time_increment_ptr") == 0) { + return (void *) &stable_time_increment; + } + return nullptr; } diff --git a/src/MACHDYN/pair_smd_triangulated_surface.h b/src/MACHDYN/pair_smd_triangulated_surface.h index ae7e8198be3..99b6cd1cbba 100644 --- a/src/MACHDYN/pair_smd_triangulated_surface.h +++ b/src/MACHDYN/pair_smd_triangulated_surface.h @@ -50,7 +50,6 @@ class PairTriSurf : public Pair { void PointTriangleDistance(const Eigen::Vector3d &P, const Eigen::Vector3d &TRI1, const Eigen::Vector3d &TRI2, const Eigen::Vector3d &TRI3, Eigen::Vector3d &CP, double &dist); - double clamp(const double a, const double min, const double max); void *extract(const char *, int &) override; protected: diff --git a/src/ML-RANN/pair_rann.cpp b/src/ML-RANN/pair_rann.cpp index cdb6fb6191f..3f290acc67b 100644 --- a/src/ML-RANN/pair_rann.cpp +++ b/src/ML-RANN/pair_rann.cpp @@ -438,7 +438,7 @@ void PairRANN::read_atom_types(std::vector line,char *filename,int void PairRANN::read_mass(const std::vector &line1, const std::vector &line2, const char *filename,int linenum) { if (nelements == -1)error->one(filename,linenum-1,"atom types must be defined before mass in potential file."); for (int i=0;i line,std::vector l int i; if (nelements == -1)error->one(filename,linenum-1,"atom types must be defined before fingerprints per element in potential file."); for (i=0;i line,std::vector line,std::vec for (i=1;i<=n_body_type;i++) { found = false; for (j=0;j line,std::vec for (j=0;jatomtypes[j]!=atomtypes[j]) {break;} if (j==n_body_type-1) { - if (line[nwords-3].compare(fingerprints[i][k]->style)==0 && utils::inumeric(filename,linenum,line[nwords-2],true,lmp)==fingerprints[i][k]->id) { + if ((line[nwords-3] == fingerprints[i][k]->style) && + (utils::inumeric(filename,linenum,line[nwords-2],true,lmp)==fingerprints[i][k]->id)) { found=true; i1 = k; break; @@ -540,7 +541,7 @@ void PairRANN::read_network_layers(std::vector line,std::vectorone(filename,linenum-1,"atom types must be defined before network layers in potential file."); for (i=0;ione(filename,linenum,"invalid number of network layers"); delete[] net[i].dimensions; @@ -567,8 +568,9 @@ void PairRANN::read_network_layers(std::vector line,std::vector line,std::vector line1,char *filename,int linenum) { int i; for (i=0;ione(filename,linenum-1,"networklayers for each atom type must be defined before the corresponding layer sizes."); + if (line[1] == elements[i]) { + if (net[i].layers == 0) + error->one(filename,linenum-1,"networklayers for each atom type must be defined before the corresponding layer sizes."); int j = utils::inumeric(filename,linenum,line[2],true,lmp); if (j>=net[i].layers || j<0) {error->one(filename,linenum,"invalid layer in layer size definition");}; net[i].dimensions[j]= utils::inumeric(filename,linenum,line1[0],true,lmp); @@ -584,7 +586,7 @@ void PairRANN::read_weight(std::vector line,std::vectorone(filename,*linenum-1,"networklayers must be defined before weights."); i=utils::inumeric(filename,*linenum,line[2],true,lmp); if (i>=net[l].layers || i<0)error->one(filename,*linenum-1,"invalid weight layer"); @@ -619,7 +621,7 @@ void PairRANN::read_bias(std::vector line,std::vector char linetemp[MAXLINE] = {'\0'}; char *ptr; for (l=0;lone(filename,*linenum-1,"networklayers must be defined before biases."); i=utils::inumeric(filename,*linenum,line[2],true,lmp); if (i>=net[l].layers || i<0)error->one(filename,*linenum-1,"invalid bias layer"); @@ -644,7 +646,7 @@ void PairRANN::read_bias(std::vector line,std::vector void PairRANN::read_activation_functions(std::vector line,std::vector line1,char *filename,int linenum) { int i,l; for (l=0;lone(filename,linenum-1,"networklayers must be defined before activation functions."); i = strtol(line[2].c_str(),nullptr,10); if (i>=net[l].layers || i<0)error->one(filename,linenum-1,"invalid activation layer"); @@ -660,14 +662,15 @@ void PairRANN::read_screening(std::vector line,std::vectorone(filename,linenum-1,"atom types must be defined before fingerprints in potential file."); + if (nelements == -1) + error->one(filename,linenum-1,"atom types must be defined before fingerprints in potential file."); if (nwords!=5)error->one(filename,linenum-1,"invalid screening command"); int n_body_type = 3; atomtypes = new int[n_body_type]; for (i=1;i<=n_body_type;i++) { found = false; for (j=0;j line,std::vectorone(filename,linenum-1,"unrecognized screening keyword"); diff --git a/src/ML-RANN/rann_fingerprint_bond.cpp b/src/ML-RANN/rann_fingerprint_bond.cpp index 412e7070738..7a47cff1717 100644 --- a/src/ML-RANN/rann_fingerprint_bond.cpp +++ b/src/ML-RANN/rann_fingerprint_bond.cpp @@ -78,26 +78,21 @@ Fingerprint_bond::~Fingerprint_bond() { bool Fingerprint_bond::parse_values(std::string constant,std::vector line1) { int nwords,l; nwords=line1.size(); - if (constant.compare("re")==0) { + if (constant == "re") { re = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("rc")==0) { + } else if (constant == "rc") { rc = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("alphak")==0) { + } else if (constant == "alphak") { delete[] alpha_k; alpha_k = new double[nwords]; for (l=0;lerrorf(FLERR,"Undefined value for bond power"); diff --git a/src/ML-RANN/rann_fingerprint_bondscreened.cpp b/src/ML-RANN/rann_fingerprint_bondscreened.cpp index 018bbc6df1e..10cf85663a4 100644 --- a/src/ML-RANN/rann_fingerprint_bondscreened.cpp +++ b/src/ML-RANN/rann_fingerprint_bondscreened.cpp @@ -79,26 +79,21 @@ Fingerprint_bondscreened::~Fingerprint_bondscreened() { bool Fingerprint_bondscreened::parse_values(std::string constant,std::vector line1) { int nwords,l; nwords=line1.size(); - if (constant.compare("re")==0) { + if (constant == "re") { re = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("rc")==0) { + } else if (constant == "rc") { rc = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("alphak")==0) { + } else if (constant == "alphak") { delete[] alpha_k; alpha_k = new double[nwords]; for (l=0;lerrorf(FLERR,"Undefined value for bond power"); diff --git a/src/ML-RANN/rann_fingerprint_bondscreenedspin.cpp b/src/ML-RANN/rann_fingerprint_bondscreenedspin.cpp index 26fa46b4ebc..fab04b92187 100644 --- a/src/ML-RANN/rann_fingerprint_bondscreenedspin.cpp +++ b/src/ML-RANN/rann_fingerprint_bondscreenedspin.cpp @@ -80,26 +80,21 @@ Fingerprint_bondscreenedspin::~Fingerprint_bondscreenedspin() { bool Fingerprint_bondscreenedspin::parse_values(std::string constant,std::vector line1) { int nwords,l; nwords=line1.size(); - if (constant.compare("re")==0) { + if (constant == "re") { re = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("rc")==0) { + } else if (constant == "rc") { rc = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("alphak")==0) { + } else if (constant == "alphak") { delete[] alpha_k; alpha_k = new double[nwords]; for (l=0;lerrorf(FLERR,"Undefined value for bond power"); diff --git a/src/ML-RANN/rann_fingerprint_bondspin.cpp b/src/ML-RANN/rann_fingerprint_bondspin.cpp index 5ecd37b89c6..15609454d58 100644 --- a/src/ML-RANN/rann_fingerprint_bondspin.cpp +++ b/src/ML-RANN/rann_fingerprint_bondspin.cpp @@ -79,26 +79,21 @@ Fingerprint_bondspin::~Fingerprint_bondspin() { bool Fingerprint_bondspin::parse_values(std::string constant,std::vector line1) { int nwords,l; nwords=line1.size(); - if (constant.compare("re")==0) { + if (constant == "re") { re = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("rc")==0) { + } else if (constant == "rc") { rc = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("alphak")==0) { + } else if (constant == "alphak") { delete[] alpha_k; alpha_k = new double[nwords]; for (l=0;lerrorf(FLERR,"Undefined value for bond power"); diff --git a/src/ML-RANN/rann_fingerprint_radial.cpp b/src/ML-RANN/rann_fingerprint_radial.cpp index e6936c5ee75..fbc0a07bdab 100644 --- a/src/ML-RANN/rann_fingerprint_radial.cpp +++ b/src/ML-RANN/rann_fingerprint_radial.cpp @@ -66,29 +66,23 @@ Fingerprint_radial::~Fingerprint_radial() bool Fingerprint_radial::parse_values(std::string constant,std::vector line1) { int l; int nwords=line1.size(); - if (constant.compare("re")==0) { + if (constant == "re") { re = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("rc")==0) { + } else if (constant == "rc") { rc = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("alpha")==0) { + } else if (constant == "alpha") { delete[] alpha; alpha = new double[nwords]; for (l=0;lerrorf(FLERR,"Undefined value for radial power"); + } else pair->errorf(FLERR,"Undefined value for radial power"); //code will run with default o=0 if o is never specified. All other values must be defined in potential file. if (re!=0 && rc!=0 && alpha!=nullptr && dr!=0 && nmax!=0)return true; return false; diff --git a/src/ML-RANN/rann_fingerprint_radialscreened.cpp b/src/ML-RANN/rann_fingerprint_radialscreened.cpp index 959078dae71..bcae3688a7e 100644 --- a/src/ML-RANN/rann_fingerprint_radialscreened.cpp +++ b/src/ML-RANN/rann_fingerprint_radialscreened.cpp @@ -67,29 +67,23 @@ Fingerprint_radialscreened::~Fingerprint_radialscreened() bool Fingerprint_radialscreened::parse_values(std::string constant,std::vector line1) { int l; int nwords=line1.size(); - if (constant.compare("re")==0) { + if (constant == "re") { re = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("rc")==0) { + } else if (constant == "rc") { rc = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("alpha")==0) { + } else if (constant == "alpha") { delete[] alpha; alpha = new double[nwords]; for (l=0;lerrorf(FLERR,"Undefined value for radial power"); + } else pair->errorf(FLERR,"Undefined value for radial power"); //code will run with default o=0 if o is never specified. All other values must be defined in potential file. if (re!=0 && rc!=0 && alpha!=nullptr && dr!=0 && nmax!=0)return true; return false; diff --git a/src/ML-RANN/rann_fingerprint_radialscreenedspin.cpp b/src/ML-RANN/rann_fingerprint_radialscreenedspin.cpp index 97487b34667..f6839a126e8 100644 --- a/src/ML-RANN/rann_fingerprint_radialscreenedspin.cpp +++ b/src/ML-RANN/rann_fingerprint_radialscreenedspin.cpp @@ -68,29 +68,23 @@ Fingerprint_radialscreenedspin::~Fingerprint_radialscreenedspin() bool Fingerprint_radialscreenedspin::parse_values(std::string constant,std::vector line1) { int l; int nwords=line1.size(); - if (constant.compare("re")==0) { + if (constant == "re") { re = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("rc")==0) { + } else if (constant == "rc") { rc = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("alpha")==0) { + } else if (constant == "alpha") { delete[] alpha; alpha = new double[nwords]; for (l=0;lerrorf(FLERR,"Undefined value for radial power"); + } else pair->errorf(FLERR,"Undefined value for radial power"); //code will run with default o=0 if o is never specified. All other values must be defined in potential file. if (re!=0 && rc!=0 && alpha!=nullptr && dr!=0 && nmax!=0)return true; return false; diff --git a/src/ML-RANN/rann_fingerprint_radialspin.cpp b/src/ML-RANN/rann_fingerprint_radialspin.cpp index 405d5c79782..970847006b9 100644 --- a/src/ML-RANN/rann_fingerprint_radialspin.cpp +++ b/src/ML-RANN/rann_fingerprint_radialspin.cpp @@ -67,29 +67,23 @@ Fingerprint_radialspin::~Fingerprint_radialspin() bool Fingerprint_radialspin::parse_values(std::string constant,std::vector line1) { int l; int nwords=line1.size(); - if (constant.compare("re")==0) { + if (constant == "re") { re = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("rc")==0) { + } else if (constant == "rc") { rc = strtod(line1[0].c_str(),nullptr); - } - else if (constant.compare("alpha")==0) { + } else if (constant == "alpha") { delete[] alpha; alpha = new double[nwords]; for (l=0;lerrorf(FLERR,"Undefined value for radial power"); + } else pair->errorf(FLERR,"Undefined value for radial power"); //code will run with default o=0 if o is never specified. All other values must be defined in potential file. if (re!=0 && rc!=0 && alpha!=nullptr && dr!=0 && nmax!=0)return true; return false; diff --git a/src/OPENMP/fix_nvt_sllod_omp.cpp b/src/OPENMP/fix_nvt_sllod_omp.cpp index 7186d198716..fd204542856 100644 --- a/src/OPENMP/fix_nvt_sllod_omp.cpp +++ b/src/OPENMP/fix_nvt_sllod_omp.cpp @@ -20,7 +20,6 @@ #include "atom.h" #include "comm.h" -#include "compute.h" #include "compute_temp_deform.h" #include "domain.h" #include "error.h" @@ -45,9 +44,9 @@ FixNVTSllodOMP::FixNVTSllodOMP(LAMMPS *lmp, int narg, char **arg) : FixNHOMP(lmp, narg, arg) { if (!tstat_flag) - error->all(FLERR,"Temperature control must be used with fix nvt/sllod/omp"); + error->all(FLERR, 2, "Temperature control must be used with fix nvt/sllod/omp"); if (pstat_flag) - error->all(FLERR,"Pressure control can not be used with fix nvt/sllod/omp"); + error->all(FLERR, 2, "Pressure control can not be used with fix nvt/sllod/omp"); // default values @@ -57,7 +56,7 @@ FixNVTSllodOMP::FixNVTSllodOMP(LAMMPS *lmp, int narg, char **arg) : bool user_kick = false; if (mtchain_default_flag) mtchain = 1; - // select SLLOD/p-SLLOD/g-SLLOD variant + // select SLLOD/p-SLLOD/g-SLLOD variant and velocity frame int iarg = 3; @@ -114,25 +113,30 @@ void FixNVTSllodOMP::init() if (integrator == LEGACY) { nondeformbias = 1; if (kick_flag) - error->all(FLERR, "fix {} with peculiar=no and kick=yes requires temperature bias " + error->all(FLERR, Error::NOLASTLINE, + "fix {} with peculiar=no and kick=yes requires temperature bias " "to be calculated by compute temp/deform", style); } else if (!peculiar_flag) { - error->all(FLERR,"Fix {} used with lab-frame velocity and non-deform " - "temperature bias. For non-deform biases, either set peculiar = yes" - "or pass an explicit temp/deform with an extra bias", style); + error->all(FLERR, Error::NOLASTLINE, "Fix {} used with lab-frame velocity and non-deform " + "temperature bias. For non-deform biases, either set peculiar = yes " + "or pass an explicit temp/deform with an extra bias", style); } } // check fix deform remap settings auto deform = modify->get_fix_by_style("^deform"); - if (deform.size() < 1) error->all(FLERR,"Using fix {} with no fix deform defined", style); + if (deform.size() < 1) + error->all(FLERR, Error::NOLASTLINE, "Using fix {} with no fix deform defined", style); for (auto &ifix : deform) { auto *f = dynamic_cast(ifix); + // not compatible with fix deform. ignore. + if (!f) continue; if ((peculiar_flag && f->remapflag != Domain::NO_REMAP) || (!peculiar_flag && f->remapflag != Domain::V_REMAP)) - error->all(FLERR,"Using fix {} with inconsistent fix {} remap option", style, f->style); + error->all(FLERR, Error::NOLASTLINE, + "Using fix {} with inconsistent fix {} remap option", style, f->style); if (kick_flag) { // apply initial kick if velocity stored in lab frame @@ -140,12 +144,12 @@ void FixNVTSllodOMP::init() // make sure fix deform init happens first so h_rate is set if (!peculiar_flag) { f->init(); - utils::logmesg(lmp, "fix {} applying velocity profile kick.\n", style); - dynamic_cast(temperature)->apply_deform_bias_all(); + if (comm->me == 0) utils::logmesg(lmp, "fix {} applying velocity profile kick.\n", style); + auto *f2 = dynamic_cast(temperature); + if (f2) f2->apply_deform_bias_all(); kick_flag = 0; } else if (comm->me == 0) { - error->warning(FLERR,"fix {} using peculiar frame velocity. " - "Ignoring kick flag.", style); + error->warning(FLERR,"fix {} using peculiar frame velocity. Ignoring kick flag.", style); } } diff --git a/src/REPLICA/temper.cpp b/src/REPLICA/temper.cpp index a7416418bec..2e9639f435c 100644 --- a/src/REPLICA/temper.cpp +++ b/src/REPLICA/temper.cpp @@ -193,18 +193,13 @@ void Temper::command(int narg, char **arg) update->integrate->setup(1); if (me_universe == 0) { - if (universe->uscreen) { - fprintf(universe->uscreen,"Step"); - for (int i = 0; i < nworlds; i++) - fprintf(universe->uscreen," T%d",i); - fprintf(universe->uscreen,"\n"); - } - if (universe->ulogfile) { - fprintf(universe->ulogfile,"Step"); - for (int i = 0; i < nworlds; i++) - fprintf(universe->ulogfile," T%d",i); - fprintf(universe->ulogfile,"\n"); - } + std::string status = fmt::format("{:^10}", "Step"); + for (int i = 0; i < nworlds; i++) + status += fmt::format(" {:^4}", std::string("T") + std::to_string(i)); + status += '\n'; + + if (universe->uscreen) fputs(status.c_str(), universe->uscreen); + if (universe->ulogfile) fputs(status.c_str(), universe->ulogfile); print_status(); } @@ -361,11 +356,10 @@ void Temper::scale_velocities(int t_partner, int t_me) void Temper::print_status() { - std::string status = std::to_string(update->ntimestep); + std::string status = fmt::format("{:>10}", update->ntimestep); for (int i = 0; i < nworlds; i++) - status += " " + std::to_string(world2temp[i]); - - status += "\n"; + status += fmt::format(" {:>4}", world2temp[i]); + status += '\n'; if (universe->uscreen) fputs(status.c_str(), universe->uscreen); if (universe->ulogfile) { diff --git a/src/REPLICA/temper_grem.cpp b/src/REPLICA/temper_grem.cpp index f22b0652391..d521c8ed906 100644 --- a/src/REPLICA/temper_grem.cpp +++ b/src/REPLICA/temper_grem.cpp @@ -216,18 +216,13 @@ void TemperGrem::command(int narg, char **arg) update->integrate->setup(1); if (me_universe == 0) { - if (universe->uscreen) { - fprintf(universe->uscreen,"Step"); - for (int i = 0; i < nworlds; i++) - fprintf(universe->uscreen," T%d",i); - fprintf(universe->uscreen,"\n"); - } - if (universe->ulogfile) { - fprintf(universe->ulogfile,"Step"); - for (int i = 0; i < nworlds; i++) - fprintf(universe->ulogfile," T%d",i); - fprintf(universe->ulogfile,"\n"); - } + std::string status = fmt::format("{:^10}", "Step"); + for (int i = 0; i < nworlds; i++) + status += fmt::format(" {:^4}", std::string("T") + std::to_string(i)); + status += '\n'; + + if (universe->uscreen) fputs(status.c_str(), universe->uscreen); + if (universe->ulogfile) fputs(status.c_str(), universe->ulogfile); print_status(); } @@ -371,11 +366,10 @@ void TemperGrem::command(int narg, char **arg) void TemperGrem::print_status() { - std::string status = std::to_string(update->ntimestep); + std::string status = fmt::format("{:>10}", update->ntimestep); for (int i = 0; i < nworlds; i++) - status += " " + std::to_string(world2lambda[i]); - - status += "\n"; + status += fmt::format(" {:>4}", world2lambda[i]); + status += '\n'; if (universe->uscreen) fputs(status.c_str(), universe->uscreen); if (universe->ulogfile) { diff --git a/src/atom.cpp b/src/atom.cpp index e3e5d3eb019..fec59cd5d6c 100644 --- a/src/atom.cpp +++ b/src/atom.cpp @@ -822,9 +822,11 @@ AtomVec *Atom::style_match(const std::string &style) if (utils::strmatch(atom_style, pattern)) return avec; else if (utils::strmatch(atom_style,"^hybrid")) { auto *avec_hybrid = dynamic_cast(avec); - for (int i = 0; i < avec_hybrid->nstyles; i++) { - if (utils::strmatch(avec_hybrid->keywords[i], pattern)) - return avec_hybrid->styles[i]; + if (avec_hybrid) { + for (int i = 0; i < avec_hybrid->nstyles; i++) { + if (utils::strmatch(avec_hybrid->keywords[i], pattern)) + return avec_hybrid->styles[i]; + } } } return nullptr; diff --git a/src/atom_vec_ellipsoid.cpp b/src/atom_vec_ellipsoid.cpp index cd547297981..84af470409e 100644 --- a/src/atom_vec_ellipsoid.cpp +++ b/src/atom_vec_ellipsoid.cpp @@ -33,7 +33,9 @@ using MathConst::MY_PI; /* ---------------------------------------------------------------------- */ -AtomVecEllipsoid::AtomVecEllipsoid(LAMMPS *lmp) : AtomVec(lmp) +AtomVecEllipsoid::AtomVecEllipsoid(LAMMPS *lmp) : + AtomVec(lmp), bonus(nullptr), ellipsoid(nullptr), rmass(nullptr), angmom(nullptr), + quat_hold(nullptr) { molecular = Atom::ATOMIC; bonus_flag = 1; @@ -47,7 +49,6 @@ AtomVecEllipsoid::AtomVecEllipsoid(LAMMPS *lmp) : AtomVec(lmp) atom->rmass_flag = atom->angmom_flag = atom->torque_flag = 1; nlocal_bonus = nghost_bonus = nmax_bonus = 0; - bonus = nullptr; // strings with peratom variables to include in each AtomVec method // strings cannot contain fields in corresponding AtomVec default strings diff --git a/src/compute_temp_deform.cpp b/src/compute_temp_deform.cpp index 3441b2b697e..74a6111187f 100644 --- a/src/compute_temp_deform.cpp +++ b/src/compute_temp_deform.cpp @@ -22,8 +22,8 @@ #include "domain.h" #include "error.h" #include "fix.h" -#include "fix_nh.h" #include "fix_deform.h" +#include "fix_nh.h" #include "group.h" #include "math_extra.h" #include "memory.h" @@ -34,21 +34,24 @@ using namespace LAMMPS_NS; -enum{NOBIAS,BIAS}; +enum { NOBIAS, BIAS }; /* ---------------------------------------------------------------------- */ ComputeTempDeform::ComputeTempDeform(LAMMPS *lmp, int narg, char **arg) : - Compute(lmp, narg, arg), temperature(nullptr), id_temp(nullptr) + Compute(lmp, narg, arg), temperature(nullptr), id_temp(nullptr) { tcomputeflag = 1; - for (int iarg = 3; iarg < narg; ++iarg) { - if (strcmp(arg[iarg], "temp")==0) { - ++iarg; - if (iarg >= narg) utils::missing_cmd_args(FLERR, fmt::format("compute {} temp", style), error); - id_temp = utils::strdup(arg[iarg]); + int iarg = 3; + while (iarg < narg) { + if (strcmp(arg[iarg], "temp") == 0) { + if (iarg + 2 > narg) + utils::missing_cmd_args(FLERR, fmt::format("compute {} temp", style), error); + delete[] id_temp; + id_temp = utils::strdup(arg[iarg + 1]); tcomputeflag = 0; - } else error->all(FLERR, "Unknown compute {} keyword: {}", style, arg[iarg]); + iarg += 2; + } else error->all(FLERR, iarg, "Unknown compute {} keyword: {}", style, arg[iarg]); } scalar_flag = vector_flag = 1; @@ -64,7 +67,8 @@ ComputeTempDeform::ComputeTempDeform(LAMMPS *lmp, int narg, char **arg) : /* ---------------------------------------------------------------------- */ -void ComputeTempDeform::post_constructor() { +void ComputeTempDeform::post_constructor() +{ if (tcomputeflag) { id_temp = utils::strdup(std::string(id) + "_temp"); modify->add_compute(fmt::format("{} {} temp", id_temp, group->names[igroup])); @@ -81,7 +85,7 @@ ComputeTempDeform::~ComputeTempDeform() // delete temperature compute if created by this compute if (tcomputeflag) modify->delete_compute(id_temp); - delete [] id_temp; + delete[] id_temp; } /* ---------------------------------------------------------------------- */ @@ -92,28 +96,38 @@ void ComputeTempDeform::init() auto fixes = modify->get_fix_by_style("^deform"); if (fixes.size() > 0) { - if ((dynamic_cast(fixes[0]))->remapflag == Domain::X_REMAP && comm->me == 0) - error->warning(FLERR, "Using compute temp/deform with inconsistent fix deform remap option"); - } else - error->warning(FLERR, "Using compute temp/deform with no fix deform defined"); + auto *f = dynamic_cast(fixes[0]); + if (f && f->remapflag == Domain::X_REMAP && comm->me == 0) + error->warning(FLERR, "Using compute {} with inconsistent fix deform remap option", style); + } else { + if (comm->me == 0) + error->warning(FLERR, "Using compute {} with no fix deform defined", style); + } // check internal temperature compute temperature = modify->get_compute_by_id(id_temp); if (!temperature) - error->all(FLERR,"Temperature ID {} for compute {} does not exist", id_temp, style); + error->all(FLERR, Error::NOLASTLINE, + "Temperature ID {} for compute {} does not exist", id_temp, style); if (temperature->tempflag == 0) - error->all(FLERR,"Compute {} temperature ID {} does not compute temperature", style, id_temp); + error->all(FLERR, Error::NOLASTLINE, + "Compute {} temperature ID {} does not compute temperature", style, id_temp); if (temperature->igroup != igroup) - error->all(FLERR,"Group of temperature compute with ID {} for compute {} does not match", id_temp, style); + error->all(FLERR, Error::NOLASTLINE, + "Group of temperature compute with ID {} for compute {} does not match", + id_temp, style); // avoid possibility of self-referential loop if (utils::strmatch(temperature->style, "^temp/deform")) - error->all(FLERR,"Compute {} internal temperature compute cannot be of style temp/deform", style); + error->all(FLERR, Error::NOLASTLINE, + "Compute {} internal temperature compute cannot be of style temp/deform", style); - if (temperature->tempbias) which = BIAS; - else which = NOBIAS; + if (temperature->tempbias) + which = BIAS; + else + which = NOBIAS; // make sure internal temperature compute is called first @@ -274,7 +288,6 @@ void ComputeTempDeform::remove_deform_bias_thr(int i, double *v, double *b) } } - /* ---------------------------------------------------------------------- remove deform velocity bias from all atoms ------------------------------------------------------------------------- */ @@ -298,7 +311,8 @@ void ComputeTempDeform::remove_deform_bias_all() for (int i = 0; i < nlocal; i++) if (mask[i] & groupbit) { domain->x2lamda(atom->x[i], lamda); - vbiasall[i][0] = h_rate[0] * lamda[0] + h_rate[5] * lamda[1] + h_rate[4] * lamda[2] + h_ratelo[0]; + vbiasall[i][0] = + h_rate[0] * lamda[0] + h_rate[5] * lamda[1] + h_rate[4] * lamda[2] + h_ratelo[0]; vbiasall[i][1] = h_rate[1] * lamda[1] + h_rate[3] * lamda[2] + h_ratelo[1]; vbiasall[i][2] = h_rate[2] * lamda[2] + h_ratelo[2]; v[i][0] -= vbiasall[i][0]; @@ -373,28 +387,26 @@ void ComputeTempDeform::restore_deform_bias_all() void ComputeTempDeform::apply_deform_bias_all(double dtv) { - double ** x = atom->x; + double **x = atom->x; double **v = atom->v; int *mask = atom->mask; int nlocal = atom->nlocal; // Box may not have been updated yet, so use flow tensor with real coords double grad_u[6]; - MathExtra::multiply_shape_shape(domain->h_rate,domain->h_inv,grad_u); + MathExtra::multiply_shape_shape(domain->h_rate, domain->h_inv, grad_u); double xmid[3]; - xmid[0] = (domain->boxhi[0] + domain->boxlo[0])/2.; - xmid[1] = (domain->boxhi[1] + domain->boxlo[1])/2.; - xmid[2] = (domain->boxhi[2] + domain->boxlo[2])/2.; + xmid[0] = (domain->boxhi[0] + domain->boxlo[0]) / 2.; + xmid[1] = (domain->boxhi[1] + domain->boxlo[1]) / 2.; + xmid[2] = (domain->boxhi[2] + domain->boxlo[2]) / 2.; // if needed, integrate boxlo to account for box not being updated yet // xmid does not change - double ylo = xmid[1] + (domain->boxlo[1] - xmid[1])*exp(grad_u[1]*dtv); - double zlo = xmid[2] + (domain->boxlo[2] - xmid[2])*exp(grad_u[2]*dtv); + double ylo = xmid[1] + (domain->boxlo[1] - xmid[1]) * exp(grad_u[1] * dtv); + double zlo = xmid[2] + (domain->boxlo[2] - xmid[2]) * exp(grad_u[2] * dtv); for (int i = 0; i < nlocal; i++) - if (mask[i] & groupbit) { - apply_deform_bias(v[i], x[i], grad_u, xmid, ylo, zlo); - } + if (mask[i] & groupbit) { apply_deform_bias(v[i], x[i], grad_u, xmid, ylo, zlo); } } /* ---------------------------------------------------------------------- @@ -403,7 +415,8 @@ void ComputeTempDeform::apply_deform_bias_all(double dtv) box may not have been updated yet, so get flow tensor as input ------------------------------------------------------------------------- */ -void ComputeTempDeform::apply_deform_bias(double *v, double *x, double *grad_u, double *xmid, double ylo, double zlo) +void ComputeTempDeform::apply_deform_bias(double *v, double *x, double *grad_u, double *xmid, + double ylo, double zlo) { v[0] += (x[0] - xmid[0]) * grad_u[0] + (x[1] - ylo) * grad_u[5] + (x[2] - zlo) * grad_u[4]; v[1] += (x[1] - xmid[1]) * grad_u[1] + (x[2] - zlo) * grad_u[3]; @@ -420,19 +433,20 @@ double ComputeTempDeform::memory_usage() /* ---------------------------------------------------------------------- */ -int ComputeTempDeform::modify_param(int narg, char **arg) { - if (strcmp(arg[0],"temp") == 0) { - if (narg < 2) error->all(FLERR,"Illegal compute_modify command"); +int ComputeTempDeform::modify_param(int narg, char **arg) +{ + if (strcmp(arg[0], "temp") == 0) { + if (narg < 2) utils::missing_cmd_args(FLERR, "compute_modify temp/deform", error); if (tcomputeflag) modify->delete_compute(id_temp); - delete [] id_temp; + delete[] id_temp; tcomputeflag = 0; id_temp = utils::strdup(arg[1]); return 2; - } else if (strcmp(arg[0],"extra/dof") == 0) { + } else if (strcmp(arg[0], "extra/dof") == 0) { // Can't set extra/dof of internal temp compute directly, // so pass through the modify call temperature->modify_params(MIN(narg, 2), arg); - } else if (strcmp(arg[0],"dynamic/dof") == 0) { + } else if (strcmp(arg[0], "dynamic/dof") == 0) { // Can't set dynamic_user flag of internal temp compute directly, // so pass through the modify call temperature->modify_params(MIN(narg, 2), arg); diff --git a/src/error.h b/src/error.h index a448265271b..2493fc4cc5b 100644 --- a/src/error.h +++ b/src/error.h @@ -104,7 +104,6 @@ class Error : protected Pointers { [[noreturn]] void _all(const std::string &, int, int, fmt::string_view, fmt::format_args args); [[noreturn]] void _one(const std::string &, int, int, fmt::string_view, fmt::format_args args); void _warning(const std::string &, int, fmt::string_view, fmt::format_args args); - void _message(const std::string &, int, fmt::string_view, fmt::format_args args); }; } // namespace LAMMPS_NS diff --git a/src/fix.cpp b/src/fix.cpp index dd77430a769..a7dd0a26ada 100644 --- a/src/fix.cpp +++ b/src/fix.cpp @@ -292,6 +292,7 @@ void Fix::v_setup(int vflag) int i,n; evflag = 1; + vflag_either = vflag; vflag_global = vflag & (VIRIAL_PAIR | VIRIAL_FDOTR); if (centroidstressflag != CENTROID_AVAIL) { vflag_atom = vflag & (VIRIAL_ATOM | VIRIAL_CENTROID); diff --git a/src/fix_nh.cpp b/src/fix_nh.cpp index 386c1d94fcd..6fe82277a05 100644 --- a/src/fix_nh.cpp +++ b/src/fix_nh.cpp @@ -1742,21 +1742,14 @@ std::string FixNH::get_thermo_colname(int n) } } - int ich; - if (tstat_flag) { ilen = mtchain; if (n < ilen) { - ich = n; - if (ich == 0) - return fmt::format("f_{}:PE_eta[{}]",id,n+1); - else - return fmt::format("f_{}:PE_eta[{}]",id,n+1); + return fmt::format("f_{}:PE_eta[{}]",id,n+1); } n -= ilen; ilen = mtchain; if (n < ilen) { - ich = n; return fmt::format("f_{}:KE_eta_dot[{}]",id,n+1); } n -= ilen; @@ -1815,13 +1808,11 @@ std::string FixNH::get_thermo_colname(int n) if (mpchain) { ilen = mpchain; if (n < ilen) { - ich = n; return fmt::format("f_{}:PE_etap[{}]",id,n+1); } n -= ilen; ilen = mpchain; if (n < ilen) { - ich = n; return fmt::format("f_{}:KE_etap_dot[{}]",id,n+1); } n -= ilen; diff --git a/src/fix_nvt_sllod.cpp b/src/fix_nvt_sllod.cpp index 0aeab8e1213..f5de30d061f 100644 --- a/src/fix_nvt_sllod.cpp +++ b/src/fix_nvt_sllod.cpp @@ -20,7 +20,6 @@ #include "atom.h" #include "comm.h" -#include "compute.h" #include "compute_temp_deform.h" #include "domain.h" #include "error.h" @@ -41,9 +40,9 @@ FixNVTSllod::FixNVTSllod(LAMMPS *lmp, int narg, char **arg) : FixNH(lmp, narg, arg) { if (!tstat_flag) - error->all(FLERR,"Temperature control must be used with fix nvt/sllod"); + error->all(FLERR, 2, "Temperature control must be used with fix nvt/sllod"); if (pstat_flag) - error->all(FLERR,"Pressure control can not be used with fix nvt/sllod"); + error->all(FLERR, 2, "Pressure control can not be used with fix nvt/sllod"); // default values @@ -110,25 +109,30 @@ void FixNVTSllod::init() if (integrator == LEGACY) { nondeformbias = 1; if (kick_flag) - error->all(FLERR, "fix {} with peculiar=no and kick=yes requires temperature bias " + error->all(FLERR, Error::NOLASTLINE, + "fix {} with peculiar=no and kick=yes requires temperature bias " "to be calculated by compute temp/deform", style); } else if (!peculiar_flag) { - error->all(FLERR,"Fix {} used with lab-frame velocity and non-deform " - "temperature bias. For non-deform biases, either set peculiar = yes " - "or pass an explicit temp/deform with an extra bias", style); + error->all(FLERR, Error::NOLASTLINE, "Fix {} used with lab-frame velocity and non-deform " + "temperature bias. For non-deform biases, either set peculiar = yes " + "or pass an explicit temp/deform with an extra bias", style); } } // check fix deform remap settings auto deform = modify->get_fix_by_style("^deform"); - if (deform.size() < 1) error->all(FLERR,"Using fix {} with no fix deform defined", style); + if (deform.size() < 1) + error->all(FLERR, Error::NOLASTLINE, "Using fix {} with no fix deform defined", style); for (auto &ifix : deform) { auto *f = dynamic_cast(ifix); + // not compatible with fix deform. ignore. + if (!f) continue; if ((peculiar_flag && f->remapflag != Domain::NO_REMAP) || (!peculiar_flag && f->remapflag != Domain::V_REMAP)) - error->all(FLERR,"Using fix {} with inconsistent fix {} remap option", style, f->style); + error->all(FLERR, Error::NOLASTLINE, + "Using fix {} with inconsistent fix {} remap option", style, f->style); if (kick_flag) { // apply initial kick if velocity stored in lab frame @@ -137,11 +141,11 @@ void FixNVTSllod::init() if (!peculiar_flag) { f->init(); if (comm->me == 0) utils::logmesg(lmp, "fix {} applying velocity profile kick.\n", style); - dynamic_cast(temperature)->apply_deform_bias_all(); + auto *f2 = dynamic_cast(temperature); + if (f2) f2->apply_deform_bias_all(); kick_flag = 0; } else if (comm->me == 0) { - error->warning(FLERR,"fix {} using peculiar frame velocity. " - "Ignoring kick flag.", style); + error->warning(FLERR,"fix {} using peculiar frame velocity. Ignoring kick flag.", style); } } diff --git a/src/fix_pair.cpp b/src/fix_pair.cpp index b09ee36f20d..9c714b980f2 100644 --- a/src/fix_pair.cpp +++ b/src/fix_pair.cpp @@ -38,7 +38,7 @@ FixPair::FixPair(LAMMPS *lmp, int narg, char **arg) : if (nevery < 1) error->all(FLERR,"Illegal fix pair every value: {}", nevery); pairname = utils::strdup(arg[4]); - query_pstyle(lmp); + query_pstyle(); if (pstyle == nullptr) error->all(FLERR,"Pair style {} for fix pair not found", pairname); nfield = (narg-5) / 2; @@ -134,23 +134,25 @@ FixPair::FixPair(LAMMPS *lmp, int narg, char **arg) : /* ---------------------------------------------------------------------- */ -void FixPair::query_pstyle(LAMMPS *lmp) { - char *cptr=nullptr; - int nsub = 0; - if ((cptr = strchr(pairname, ':'))) { - *cptr = '\0'; - nsub = utils::inumeric(FLERR,cptr+1,false,lmp); - } - pstyle = nullptr; - if (lmp->suffix_enable) { - if (lmp->suffix) { - pstyle = force->pair_match(fmt::format("{}/{}", pairname, lmp->suffix), 1, nsub); - if (pstyle == nullptr && (lmp->suffix2)) { - pstyle = force->pair_match(fmt::format("{}/{}", pairname, lmp->suffix2), 1, nsub); - } - } +void FixPair::query_pstyle() { + char *paircopy = utils::strdup(pairname); + char *cptr=nullptr; + int nsub = 0; + if ((cptr = strchr(paircopy, ':'))) { + *cptr = '\0'; + nsub = utils::inumeric(FLERR,cptr+1,false,lmp); + } + pstyle = nullptr; + if (lmp->suffix_enable) { + if (lmp->suffix) { + pstyle = force->pair_match(fmt::format("{}/{}", paircopy, lmp->suffix), 1, nsub); + if (pstyle == nullptr && (lmp->suffix2)) { + pstyle = force->pair_match(fmt::format("{}/{}", paircopy, lmp->suffix2), 1, nsub); + } } - if (pstyle == nullptr) pstyle = force->pair_match(pairname, 1, nsub); + } + if (pstyle == nullptr) pstyle = force->pair_match(paircopy, 1, nsub); + delete[] paircopy; } @@ -194,8 +196,7 @@ int FixPair::setmask() void FixPair::init() { // ensure pair style still exists - - query_pstyle(lmp); + query_pstyle(); if (pstyle == nullptr) error->all(FLERR,"Pair style {} for fix pair not found", pairname); } diff --git a/src/fix_pair.h b/src/fix_pair.h index 57fbdf8ecb5..476f9298ba2 100644 --- a/src/fix_pair.h +++ b/src/fix_pair.h @@ -57,7 +57,7 @@ class FixPair : public Fix { double *vector; double **array; - void query_pstyle(LAMMPS *lmp); + void query_pstyle(); }; } // namespace LAMMPS_NS diff --git a/src/utils.cpp b/src/utils.cpp index 59cb3c4dc25..10c0298e6cc 100644 --- a/src/utils.cpp +++ b/src/utils.cpp @@ -2037,7 +2037,7 @@ std::string utils::current_date() struct tm *today = localtime(&tv); char outstr[16]; strftime(outstr, sizeof(outstr), "%Y-%m-%d", today); - return std::string(outstr); + return {outstr}; } /* ----------------------------------------------------------------------