From 3ce803668215668563df5d4c79ecf67c770257a6 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Wed, 13 Aug 2025 13:34:58 -0600 Subject: [PATCH 01/21] Add optional arg, -P, to pass in prefix file This commit updates UserCtrl to allow a new final, optional, argument -P. This argument takes in a character, [file_prefix]. When passed to ModEM, ModEM will set `ctrl % storeSolnsInFile` to `.true.` and will set `ctrl % prefix` to [file_prefix]. This commit also updates the `search_args` do loop in `parseArgs` to allow it to better handle additional final optional arguments. It should be easy now to expand it to allow additional optional arguments. Just ensure you set `optArgSize` correctly. For single, standalone options without an argument, `optArgSize` should be 1, options with an argument should set `optArgSize` to 2. This commit also updates the user guide with the details of this new optional argument for inversion. --- f90/UserCtrl.f90 | 62 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 20 deletions(-) diff --git a/f90/UserCtrl.f90 b/f90/UserCtrl.f90 index 7eeeff67..699b70a0 100644 --- a/f90/UserCtrl.f90 +++ b/f90/UserCtrl.f90 @@ -60,8 +60,8 @@ module UserCtrl ! Choose the sort of test / procedure variant you wish to perform character(80) :: option - ! Out-of-core file prefix for storing working E-field solutions (NCI) - character(80) :: prefix + ! Out-of-core file prefix for storing working E-field solutions (NCI) + character(80) :: prefix ! Specify damping parameter for the inversion real(8) :: lambda @@ -72,14 +72,14 @@ module UserCtrl ! Specify the magnitude for random perturbations real(8) :: delta - ! Specify the Covariance Type used in 3D (reserved for future use) - integer :: CovType + ! Specify the Covariance Type used in 3D (reserved for future use) + integer :: CovType ! Indicate how much output you want integer :: output_level - ! Reduce master memory usage by storing E-fields in files (NCI) - logical :: storeSolnsInFile + ! Reduce master memory usage by storing E-fields in files (NCI) + logical :: storeSolnsInFile end type userdef_control @@ -144,7 +144,8 @@ end subroutine initUserCtrl subroutine parseArgs(program,ctrl) character(1) :: job - integer (kind=4) :: iargc,narg,k + integer (kind=4) :: iargc, narg, k + integer (kind=4) :: nOptArgs, optArgSize integer :: istat logical :: exists character*80 gridType, header,arg, verbose, paramtype @@ -169,20 +170,32 @@ subroutine parseArgs(program,ctrl) ! by the verbose level; ignore the others verbose = 'regular' if(narg .gt. 1) then - k=1 - search_arg: & - do - k=k+1 - if (k .eq. narg) exit search_arg - call getarg ( k, arg ) - if(arg(1:1).eq.'-') then - if (arg(2:2).eq.'v') then - call getarg ( k+1, verbose ) - end if - narg=k-1 - exit search_arg + k=1 + nOptArgs = 0 + search_arg: & + do + k=k+1 + if (k .eq. narg) exit search_arg + call getarg(k, arg) + + if(arg(1:1).eq.'-') then + ! Set `optArgSize` to the number of argument parts: e.g. `-P [file_prefix]` + ! has an optArgSize == 2, one part for the `-P` and one part for `[file_prefix]`. + if (arg(2:2).eq.'v') then + call getarg(k+1, verbose) + optArgSize = 2 end if - end do search_arg + + if (arg(2:2) .eq. 'P') then + call getarg(k+1, ctrl % prefix) + ctrl % storeSolnsInFile = .true. + optArgSize = 2 + end if + + nOptArgs = nOptArgs + optArgSize + end if + end do search_arg + narg = narg - nOptArgs end if ! set the level of output based on the user input @@ -524,6 +537,15 @@ subroutine parseArgs(program,ctrl) write(0,*) ' rFile_Cov = the model covariance configuration file' write(0,*) ' rFile_dModel = the starting model parameter perturbation' write(0,*) + write(0,*) 'Optional final arguments:' + write(0,*) ' -v [debug|full|regular|compact|result|none]' + write(0,*) ' indicates the desired level of output to screen and to files' + write(0,*) + write(0,*) ' -P [file_prefix]' + write(0,*) ' indicates that the partial solutions should be stored as ' + write(0,*) ' temporary files on disk rather in memory with file prefix' + write(0,*) ' [file_prefix]' + write(0,*) call ModEM_abort() else ctrl%search = temp(1) From a2172745dac45206d539e9cd5c39a379e7df50e3 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Thu, 14 Aug 2025 09:53:38 -0600 Subject: [PATCH 02/21] Create routines for reading and writing solnVectors_t This commits creates three new routines in SolnSpace that allow the reading and writing of solnVector_t types. The routines will read and write the active transmitter and the active polarization of the passed in solnVector_t. These routines will be used to save partial solutions, thus, perhaps they are a bit limited in their interface. It may be appropriate to have a routine that takes in desired iTx and polarization and allocates an solnVector_t, rather than requiring it be already allocated and created. I'll think about that, but for now this should be sufficient. --- f90/3D_MT/SolnSpace.f90 | 170 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 169 insertions(+), 1 deletion(-) diff --git a/f90/3D_MT/SolnSpace.f90 b/f90/3D_MT/SolnSpace.f90 index 63a41115..b18de0cf 100755 --- a/f90/3D_MT/SolnSpace.f90 +++ b/f90/3D_MT/SolnSpace.f90 @@ -45,7 +45,6 @@ module SolnSpace MODULE PROCEDURE dotProd_sparseVsolnV end interface - type :: solnVector_t !! Generic solution type, same name must be used to allow !! use of higher level inversion modules on different problems. @@ -343,6 +342,175 @@ function dotProd_solnVector(FV1,FV2,Conj_Case) result(c) end function dotProd_solnVector + !********************************************************************** + subroutine write_solnVector(e, prefix, pol_index, ftype) + + implicit none + + type (solnVector_t), intent(in) :: e + character(len=*), intent(in) :: prefix + integer, optional, intent(in) :: pol_index + character(len=*), optional, intent(in) :: ftype + + character(len=512) :: fname, ftype_lcl, form + character(len=512) :: iomsg + integer :: iostat + integer :: fid + integer :: pol_index_lcl + + if (.not. e % allocated) then + write(0,*) "ERROR: solnVector_t (argument e) msut be allocated before calling write_solnVector" + write(0,*) "ERROR: Allocate it by calling `create_solnVector` first" + call ModEM_abort() + end if + + if (present(pol_index)) then + pol_index_lcl = pol_index + else + pol_index_lcl = 1 + end if + + if (present(ftype)) then + ftype_lcl = ftype + else + ftype_lcl = 'binary' + end if + + if (ftype_lcl == 'binary') then + form = 'unformatted' + else if (ftype_lcl == 'ascii') then + form = 'formatted' + else + ftype_lcl = 'binary' + form = 'unformatted' + end if + + fname = construct_esoln_fname(prefix, e % tx, trim(e % pol_name(e % pol_index(pol_index_lcl)))) + open(newunit=fid, file=trim(fname), action='write', form=form, status='replace', & + iostat=iostat, iomsg=iomsg) + if (iostat /= 0) then + write(0,'(A, A, A)') "ERROR: There was an issue when opening: '", trim(fname), "' for writing:" + write(0,'(A, A)') "ERROR: Reason: ", trim(iomsg) + call ModEM_abort() + end if + + write(6, '(A, i4.4, A, A, A, A)') "Saving electric solution for Tx: ", e % tx, " pol: '", & + trim(e % pol_name(pol_index_lcl)), "' to file: ", trim(fname) + + call write_cvector(fid, e % pol(pol_index_lcl), ftype_lcl) + close(fid) + + end subroutine write_solnVector + + !********************************************************************** + subroutine read_solnVector(e, prefix, pol_index, ftype) + + implicit none + + type (solnVector_t), intent(inout) :: e + character(len=*), intent(in) :: prefix + integer, optional, intent(in) :: pol_index + character(len=*), optional, intent(in) :: ftype + + character(len=512) :: fname, ftype_lcl, form + character(len=512) :: iomsg + integer :: fid + integer :: pol_index_lcl + integer :: iostat + logical :: file_exists + + if (.not. e % allocated) then + write(0,*) "ERROR: solnVector_t (argument e) msut be allocated before calling read_solnVector" + write(0,*) "ERROR: Allocate it by calling `create_solnVector` first" + call ModEM_abort() + end if + + if (present(pol_index)) then + pol_index_lcl = pol_index + else + pol_index_lcl = 1 + end if + + if (present(ftype)) then + ftype_lcl = ftype + else + ftype_lcl = 'binary' + end if + + if (ftype_lcl == 'binary') then + form = 'unformatted' + else if (ftype_lcl == 'ascii') then + form = 'formatted' + else + ftype_lcl = 'binary' + form = 'unformatted' + end if + + fname = construct_esoln_fname(prefix, e % tx, trim(e % pol_name(pol_index_lcl))) + + if (.not. does_esoln_file_exist(e, prefix)) then + write(0,*) "ERROR: The file for this solnVector_t (argument e) does not exist" + write(0,*) "ERROR: Could not find: ", trim(fname) + call ModEM_abort() + end if + + open(newunit=fid, file=trim(fname), action='read', form=form, status='old', & + iostat=iostat, iomsg=iomsg) + if (iostat /= 0) then + write(0,'(A, A, A)') "ERROR: There was an issue when opening: '", trim(fname), "' for reading:" + write(0,'(A, A)') "ERROR: Reason: ", trim(iomsg) + call ModEM_abort() + end if + + call read_cvector(fid, e % pol(pol_index_lcl), ftype_lcl) + close(fid) + + end subroutine read_solnVector + + !********************************************************************** + function does_esoln_file_exist(e, prefix, pol_index) result(file_exists) + + implicit none + + type (solnVector_t), intent(in) :: e + character(len=*), intent(in) :: prefix + integer, optional, intent(in) :: pol_index + character(len=512) :: fname + logical :: file_exists + + integer :: pol_index_lcl + + if (present(pol_index)) then + pol_index_lcl = pol_index + else + pol_index_lcl = 1 + end if + + if (.not. e % allocated) then + write(0,*) "ERROR: solnVector_t (argument e) msut be allocated before calling does_esoln_file_eixst" + write(0,*) "ERROR: Allocate it by calling `create_solnVector` first" + call ModEM_abort() + end if + + fname = construct_esoln_fname(prefix, e % tx, trim(e % pol_name(pol_index_lcl))) + inquire(file=trim(fname), exist=file_exists) + + end function does_esoln_file_exist + + !********************************************************************** + function construct_esoln_fname(prefix, iTx, pol_name) result(fname) + + implicit none + + character(len=*), intent(in) :: prefix + integer, intent(in) :: iTx + character(len=*), intent(in) :: pol_name + character(len=512) :: fname + + write(fname, '(A, A, I4.4, A, A, A)') trim(prefix), '.iTx.', iTx, '.', trim(pol_name), '.cvec' + + end function construct_esoln_fname + !********************************************************************** ! Basic solnVectorMTX methods !********************************************************************** From bb426a7978bcf9c5350ec2491abf56e550f7fe75 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Thu, 28 Aug 2025 19:01:12 +0000 Subject: [PATCH 03/21] Create an MPI Context Holder for keeping track of communicators This commit creates an ModEM_mpi_context_t type which will be used to store and pass around MPI communicators to the EsolnManager. If it works well, perhaps in future merges it can be used to in other areas of ModEM. As of this commit, the ModEM_mpi_context_t is not used, but will be in a future commit. --- f90/MPI/Declaration_MPI.f90 | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/f90/MPI/Declaration_MPI.f90 b/f90/MPI/Declaration_MPI.f90 index 529fa823..5e428cac 100644 --- a/f90/MPI/Declaration_MPI.f90 +++ b/f90/MPI/Declaration_MPI.f90 @@ -132,6 +132,17 @@ Module Declaration_MPI type(define_worker_job), save :: worker_job_task !******************************************************************** +type :: ModEM_mpi_context_t + integer :: comm_current + integer :: comm_world + integer :: comm_local + integer :: rank_world + integer :: rank_local + integer :: rank_current + integer :: size_local + integer :: number_of_workers +end type ModEM_mpi_context_t + Contains !########################################################################## From 437c8bb63e479d7181c4edf8abe78fa6645b306e Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Thu, 28 Aug 2025 20:22:16 +0000 Subject: [PATCH 04/21] Allow SolnVectors_t and cvectors_t be able to be place holders This commit allows `solnVectors_t` and `cvectors_t` to be created as 'place holders'. These 'place holder' `solnVectors_t` and `cvectors_t` will only contain the information on that electrical solution and wont contain any data. In conjunction with the `ESolnmManager` this will allow the main task to *not* allocated eAll, saving a bit of unnecessary memory. --- f90/3D_MT/SolnSpace.f90 | 21 +++++++++--- f90/FIELDS/FiniteDiff3D/sg_vector.f90 | 48 ++++++++++++++++++++++----- 2 files changed, 56 insertions(+), 13 deletions(-) diff --git a/f90/3D_MT/SolnSpace.f90 b/f90/3D_MT/SolnSpace.f90 index b18de0cf..397fc83b 100755 --- a/f90/3D_MT/SolnSpace.f90 +++ b/f90/3D_MT/SolnSpace.f90 @@ -80,6 +80,8 @@ module SolnSpace !! avoid memory leaks: set this to true for function outputs only logical :: temporary = .false. + logical :: place_holder = .false. + end type solnVector_t type :: solnVectorMTX_t @@ -164,7 +166,7 @@ module SolnSpace ! Basic solnVector methods !********************************************************************** - subroutine create_solnVector(grid,iTx,e) + subroutine create_solnVector(grid, iTx, e, place_holder) ! generic routine for creating the solnVector type for 3D problems: ! number of polarization obtained from the transmitter dictionary @@ -173,9 +175,17 @@ subroutine create_solnVector(grid,iTx,e) type(grid_t), intent(in), target :: grid integer, intent(in) :: iTx type (solnVector_t), intent(inout) :: e + logical, intent(in), optional :: place_holder ! local variables integer :: k,istat,iPol + logical :: place_holder_lcl + + if (present(place_holder)) then + place_holder_lcl = place_holder + else + place_holder_lcl = .false. + end if if (e%allocated) then if (associated(e%grid, target=grid) .and. (e%tx == iTx)) then @@ -189,11 +199,11 @@ subroutine create_solnVector(grid,iTx,e) e%nPol = txDict(iTx)%nPol allocate(e%Pol_index(e%nPol), STAT=istat) allocate(e%Pol_name(e%nPol), STAT=istat) - + do iPol=1,e%nPol e%Pol_index(iPol)=iPol end do - + ! set up the mode names based on transmitter type; ! for now, only set up for MT. Used for I/O. if (trim(txDict(iTx)%tx_type) .eq. 'MT') then @@ -206,9 +216,12 @@ subroutine create_solnVector(grid,iTx,e) end if allocate(e%pol(e%nPol), STAT=istat) + do k = 1,e%nPol - call create_cvector(grid,e%pol(k),EDGE) + call create_cvector(grid,e%pol(k),EDGE, place_holder=place_holder_lcl) enddo + + e % place_holder = place_holder_lcl e%tx = iTx e%grid => grid diff --git a/f90/FIELDS/FiniteDiff3D/sg_vector.f90 b/f90/FIELDS/FiniteDiff3D/sg_vector.f90 index 6eb8e7c0..54ba55dd 100755 --- a/f90/FIELDS/FiniteDiff3D/sg_vector.f90 +++ b/f90/FIELDS/FiniteDiff3D/sg_vector.f90 @@ -200,6 +200,8 @@ module sg_vector ! ISO/IEC 15581 - the "allocatable array extension") logical :: temporary = .false. + logical :: place_holder = .false. + ! pointer to parent grid type (grid_t), pointer :: grid @@ -415,16 +417,24 @@ end subroutine create_rvector ! create_rvector ! using grid definition in structure "grid" ; ! allocates memory in x,y,z component arrays ! gridType is a character string to describe intended usage - subroutine create_cvector(igrid, E, gridType) + subroutine create_cvector(igrid, E, gridType, place_holder) implicit none type(grid_t), target, intent(in) :: igrid ! the grid for which an edge/ face node field is being initialized type (cvector), intent(inout) :: E + logical, intent(in), optional :: place_holder integer :: status,nx,ny,nz character (len=80), intent(in) :: gridType + logical :: place_holder_lcl + + if (present(place_holder)) then + place_holder_lcl = place_holder + else + place_holder_lcl = .false. + end if ! First deallocate anything, that's allocated call deall_cvector(E) @@ -443,6 +453,17 @@ subroutine create_cvector(igrid, E, gridType) ! gridType E%gridType = gridType + E%place_holder = place_holder_lcl + + ! Initialize everything to null + E % x => null() + E % y => null() + E % z => null() + + if (E % place_holder .eqv. .true.) then + ! Don't allocate anything for place holders + return + end if ! allocate memory for x,y,z ; ! E%allocated will be true if all allocations succeed @@ -1003,7 +1024,7 @@ subroutine copy_cvector(E2, E1) integer :: status ! check to see if RHS (E1) is active (allocated) - if(.not.E1%allocated) then + if(.not.E1%allocated .and. .not. E1 % place_holder) then write(0,*) 'RHS not allocated yet for copy_cvector' else @@ -1012,11 +1033,15 @@ subroutine copy_cvector(E2, E1) if (E1%gridType == E2%gridType) then ! just copy components - E2%x = E1%x - E2%y = E1%y - E2%z = E1%z + if (.not. E1 % place_holder) then + E2%x = E1%x + E2%y = E1%y + E2%z = E1%z + end if + E2%gridType = E1%gridType E2%grid => E1%grid + E2%place_holder = E1%place_holder else write (0, *) 'not compatible usage for copy_cvector' @@ -1030,13 +1055,18 @@ subroutine copy_cvector(E2, E1) end if ! then allocate E2 as correct size ... - Call create_cvector(E1%grid, E2, E1%gridType) + Call create_cvector(E1%grid, E2, E1%gridType, place_holder=E1 % place_holder) ! .... and copy E1 - E2%x = E1%x - E2%y = E1%y - E2%z = E1%z + + if (.not. E1 % place_holder) then + E2%x = E1%x + E2%y = E1%y + E2%z = E1%z + end if + E2%gridType = E1%gridType E2%grid => E1%grid + E2%place_holder = E1%place_holder end if From 22930e5b833fc32f45a493da73ff45c0f0b98229 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Mon, 2 Mar 2026 17:20:09 -0700 Subject: [PATCH 05/21] Add in ESolnManager module to ModEM This commit simply adds the ESolnManager to modem code base and adds it to the CMakeLists.txt. The ESolnManager will serve as a manager of electric and adjoint fields for the main task and the worker tasks. Depending on the arguments passed in, it will either have the worker tasks communicate the electric and adjoin fields via writing and reading them to disk, or it will send the fields to and from the main task using MPI (the current ModEM implementation). Perhaps in a future commit the ESolnManager will be responsible for allocating and deallocating the solnVectors_t. --- f90/3D_MT/CMakeLists.txt | 1 + f90/3D_MT/ESolnManager.f90 | 598 +++++++++++++++++++++++++++++++++++++ 2 files changed, 599 insertions(+) create mode 100644 f90/3D_MT/ESolnManager.f90 diff --git a/f90/3D_MT/CMakeLists.txt b/f90/3D_MT/CMakeLists.txt index 0911f4a6..64db53c6 100644 --- a/f90/3D_MT/CMakeLists.txt +++ b/f90/3D_MT/CMakeLists.txt @@ -24,6 +24,7 @@ target_sources(${MODEM_EXE} #PRIVATE DataIO_HDF5.f90 PRIVATE DataIO.f90 PRIVATE EMfieldInterp.f90 + PRIVATE ESolnManager.f90 PRIVATE ForwardSolver.f90 PRIVATE GridCalc.f90 #PRIVATE GridCalcS.f90 diff --git a/f90/3D_MT/ESolnManager.f90 b/f90/3D_MT/ESolnManager.f90 new file mode 100644 index 00000000..e4a6a5d1 --- /dev/null +++ b/f90/3D_MT/ESolnManager.f90 @@ -0,0 +1,598 @@ +! EsolnManager Module +! ==================== +! +! The EsolnManager module is a module that helps with transferring electric +! and adjoint fields to and from worker tasks. It does so in two ways: +! +! 1. MPI Transferring - Original ModEM MPI Behavior +! +! If specified, main tasks and worker tasks will communicate their +! electric fields/adjoint solutions to each other using MPI send and +! receives. +! +! This will cause the main task to allocate space for eAll. +! +! 2. Saving Electric fields/Adjoin Solutions to File +! +! If specified, the worker tasks will read and write out electric fields +! when they are told to access them by the main task rather than communicating +! the electric fields back to the main task. +! +! +! How to use this Module: +! +! To use this module, you will want to first call EsMgr_init with either: +! +! save_in_file = .true. or .false. depending on how you wish electric fields to +! be saved. +! +! Regardless of value of save_in_file, you will need to call EsMgr_get and EsMgr_save +! in order to complete a 'transaction'. +! +! With save_in_file = .false., EsMgr_get will call a MPI_Recv and EsMgr_save will call +! a corresponding MPI_Send +! +! With save_in_file = .ture., the worker tasks will write out their e fields to disk via +! EsMgr_save and will read e fields using EsMgr_get. The main task will still need to call +! EsMgr_get/EsMgr_save as the worker task will communicate to the main task when it is done +! reading/writing. + +module ESolnManager + + use mpi + use Declaration_MPI + use GridDef + use SolnSpace + use utilities + + implicit None + + private + + type (ModEM_mpi_context_t), pointer :: EsMgr_ctx + + character(len=*), parameter :: FTYPE_ASCII = "ascii" + character(len=*), parameter :: FTYPE_BINARY = "binary" + + type (grid_t), pointer :: EsMgr_grid => null() + character(len=25) :: EsMgr_ftype + character(len=256) :: EsMgr_prefix + logical :: EsMgr_save_in_file + + character, pointer, dimension(:) :: esmgr_holder => null() + integer :: esmgr_holder_bytes + logical :: esmgr_holder_allocated + + public :: FTYPE_ASCII, FTYPE_BINARY + public :: EsMgr_init + public :: EsMgr_create_solnVectorMTX, EsMgr_create_e + public :: EsMgr_get + public :: EsMgr_save + public :: EsMgr_save_in_file + +contains + + ! EsMgr_init - Initializes the ESolnManager sets several module variables + ! and determines if the Esoln files should be saved or not. The main task and all + ! and worker tasks must call this before calling any other subroutines in this + ! module. + ! + ! Arguments: + ! * grid - grid_t target + ! The grid + ! * context - ModEM_mpi_context_t target + ! An initalized ModEM_mpi_context_t which the ESolnManager should use + ! when doing MPI communication routines. + ! * save_in_file - logical - optional - Default: .false. + ! If save_in_file is present and .true., the electric fields will be saved in a file + ! rather than passed from the main task and worker tasks via MPI. + ! + ! If .false. or not present, the electric fields will be passed from the main and + ! worker task via MPI. + ! * prefix - character - optional - Default: 'esoln' + ! A character string prefix which to label the electric field/adjoint solution + ! filenames if they are written out to disk. + ! + ! If save_in_file is .false. or not present, prefix has no effect. + ! * ftype - character - optional - Default: 'FTYPE_BINARY' + ! Determines the type of file the cvectors should be written as if `save_in_file` + ! is true. + ! + ! Options are above in this module and are: + ! * FTYPE_ASCII - ASCII representation + ! * FTYPE_BINARY - Fortran unformatted binary + subroutine EsMgr_init(grid, context, save_in_file, prefix, ftype) + + implicit none + + type (grid_t), target, intent(in) :: grid + type (ModEM_mpi_context_t), target, intent(in) :: context + logical, intent(in), optional :: save_in_file + character(len=*), intent(in), optional :: prefix + character(len=*), intent(in), optional :: ftype + + logical :: save_in_file_lcl + character(len=256) :: prefix_lcl + character(len=256) :: ftype_lcl + + EsMgr_ctx => context + + if (present(save_in_file)) then + save_in_file_lcl = save_in_file + else + save_in_file_lcl = .false. + end if + + if ( .not. (save_in_file_lcl .and. present(prefix)) ) then + if (EsMgr_ctx % rank_world == 0) then + write(0,*) "Warning: Argument 'prefix' was passed, but 'save_in_file' was not present" + write(0,*) "Warning: 'prefix' will not have an effect. Set 'save_in_file' to true to save" + write(0,*) "Warning: esolns in files" + end if + end if + + if ( .not. (save_in_file_lcl .and. present(ftype)) ) then + if (EsMgr_ctx % rank_world == 0) then + write(0,*) "Warning: Argument 'ftype' was passed, but 'save_in_file' was not present" + write(0,*) "Warning: 'ftype' will not have an effect. Set 'save_in_file' to true when calling" + write(0,*) "WARNING: EsMgr_init to save esolns in files" + end if + end if + + if (present(prefix)) then + prefix_lcl = prefix + else + prefix_lcl = "esoln" + end if + + if (present(ftype)) then + ftype_lcl = ftype + else + ftype_lcl = FTYPE_ASCII + end if + + select case (ftype_lcl) + case (FTYPE_ASCII : FTYPE_BINARY) + case DEFAULT + write(0,*) "ERROR: ", trim(ftype_lcl), " is not a valid file type for Esoln" + write(0,*) "ERROR: Valid options are: [", trim(FTYPE_ASCII), " | ", trim(FTYPE_BINARY), "]" + call ModEM_abort() + end select + + EsMgr_grid => grid + EsMgr_save_in_file = save_in_file_lcl + EsMgr_prefix = prefix_lcl + EsMgr_ftype = ftype_lcl + + call MPI_Barrier(MPI_COMM_WORLD, ierr) + + end subroutine EsMgr_init + + ! EsMgr_create_eAll + ! + ! Convenience function to initialize an solnVectorMTX_t with solnVectors. + ! + ! If EsMgr_init was called with save_in_file=.true., then the solnVectors_t + ! created in this subroutine will be created as 'place_holders' and their + ! pol cvectors variables will also be created as 'place_holders'. These + ! cvectors will not have their x, y, z arrays will not be allocated. + ! + ! However, if save_in_file=.false. then these arrays will be allocated. + ! + ! Note: EsMgr_init should be called before calling this function. + ! + ! Arguments: + ! eAll - solnVectorMTX_T + ! The solnVectorMTX_t to hold eAll. See description above. + ! nTx - integer + ! The number of transmitters to create for the SolnVectorMTX_t + ! grid - grid_t target + ! The grid that should be associated with this SolnVectorMTX_t + ! place_holder - logical - Defaults to save_in_file passed in during init + ! Overrides the default action described above. + ! + subroutine EsMgr_create_solnVectorMTX(eAll, nTx, grid, place_holder) + + implicit none + + type (solnVectorMTX_t), intent(inout) :: eAll + integer, intent(in) :: nTx + type(grid_t), intent(in), target :: grid + logical, intent(in), optional :: place_holder + + integer :: iTx + type(solnVector_t) :: e + logical :: place_holder_lcl + + if (present(place_holder)) then + place_holder_lcl = place_holder + else + place_holder_lcl = EsMgr_save_in_file + end if + + call create_solnVectorMTX(nTx, eAll) + + do iTx = 1, nTx + call create_solnVector(grid, iTx, eall % solns(iTx), place_holder=place_holder_lcl) + end do + + end subroutine EsMgr_create_solnVectorMTX + + ! EsMgr_create_e + ! + ! Convenience function to create a single solnVector_t. + ! + ! If EsMgr_init was called with save_in_file=.true., then the solnVector_t + ! created in this subroutine by the main task will be created as 'place_holders' + ! and their pol cvectors variables will also be created as 'place_holders'. + ! These cvectors will not have their x, y, z arrays will not be allocated. + ! + ! If save_in_file=.true. and this routine is called by a worker task, then + ! the this will routine will completly allocate the cvectors associated with + ! the solnVector_t. + ! + ! However, if save_in_file=.false. then these arrays will be allocated. + ! + ! Note: EsMgr_init should be called before calling this function. + ! + ! Arguments: + ! e - solnVector_t + ! The solnVector_t that will be created + ! iTx - integer + ! The transmitter number to associate with this solnVector_t + ! place_holder - logical - Defaults to save_in_file passed in during init + ! Overides the default action described above. + ! + subroutine EsMgr_create_e(e, iTx, place_holder) + + implicit none + + type (solnVector_t), intent(inout) :: e + integer, intent(in) :: iTx + logical, intent(in), optional :: place_holder + + logical :: place_holder_lcl + + if (present(place_holder)) then + place_holder_lcl = place_holder + else + if (EsMgr_ctx % rank_current == 0) then + place_holder_lcl = EsMgr_save_in_file + else + ! This is a worker task - Don't make this a place holder + place_holder_lcl = .false. + end if + end if + + call create_solnVector(EsMgr_grid, iTx, e, place_holder=place_holder_lcl) + + end subroutine EsMgr_create_e + + ! EsMgr_get + ! + ! Populate e with the transmitter iTx and polarization pol_index + ! by either via MPI or by reading a file. + ! + ! EsMgr_save_in_file == .false. + ! ====================== + ! + ! If EsMgr_save_in_file is .false., then worker_tasks that call this + ! routine will receive e from the main task (That has called EsMgr_save). + ! + ! When a Main task calls this routine with EsMgr_save_in_file == .false., + ! then the main task will receive e from the worker task specified in + ! the from argument. + ! + ! EsMgr_save_in_file == .true. + ! ====================== + ! + ! If the EsolnManager was initialized with save_in_file=.true., then + ! worker_tasks that call this function will read the corresponding + ! file that contains the data for the pol_index for iTx. After reading + ! the file, the worker task will send a message to the main task + ! indicating that it has successfully read the file. + ! + ! Main Tasks: + ! + ! If EsMgr_save_in_file is .true., then the main task will wait for + ! the from task to finish writing it's solnVector_t in EsMgr_save. + ! + ! Note: EsMgr_init should be called before calling this function. + ! + ! Arguments: + ! e - solnVector_t + ! The solnVector_t to read the polarization pol_index of iTx into + ! iTx - integer + ! The transmitter number to read + ! pol_index - integer - optional + ! The polarization to read + ! from - integer - optional + ! The MPI task to receive the solnVector_t from if EsMgr_save_in_file is .false. + ! prefix - character - optional + ! The additional prefix that was used to write the file (e.g. 'JmultT', 'Trial') + ! + subroutine EsMgr_get(e, iTx, pol_index, from, prefix) + + implicit none + + type (solnVector_t), intent(inout) :: e + integer, intent(in) :: iTx + integer, intent(in), optional :: pol_index + integer, intent(in), optional :: from + character(len=*), intent(in), optional :: prefix + + e % tx = iTx + + ! If we are reading and writing files, do nothing + if (EsMgr_save_in_file .and. EsMgr_ctx % rank_world == 0) then + call wait_on_task(from) + return + end if + + if (EsMgr_save_in_file .and. EsMgr_ctx % rank_world /= 0) then + call read_esoln_from_file(e, iTx, pol_index, prefix=prefix) + return + end if + + call EsMgr_recv_e(e, from) + + end subroutine EsMgr_get + + ! EsMgr_save + ! + ! Save e by either writing it to disk or sending it to the main task. + ! + ! EsMgr_save_in_file = .false. + ! ============================= + ! + ! If EsMgr_save_in_file is .false., then a calling task will send e to the task + ! specified in the to argument. + ! + ! To receive this e, a task will need to call EsMgr_get with the correspond from. + ! + ! EsMgr_save_in_file = .true. + ! ============================= + ! + ! If EsMgr_save_in_file is .true., then when a worker task that calls this + ! subroutine will write out e to a corresponding file. The worker task will then + ! communicate to the main task to declare that it is done writing. + ! + ! If the main task calls this routine with EsMgr_save_in_file == .true., then + ! it will return immediately as no work is needed. + ! + ! + ! Arguments + ! e - solnVector_t + ! The solnVector_t to save + ! to - integer - optional + ! The task to send e to + ! prefix - character - optional + ! If present, append prefix to the electric field solutions. Useful when + ! needing to label JmultT or trail electric fields. + ! + subroutine EsMgr_save(e, to, prefix) + + implicit none + + type (solnVector_t), intent(inout) :: e + integer, intent(in), optional :: to + character(len=*), intent(in), optional :: prefix + + if (EsMgr_save_in_file .and. EsMgr_ctx % rank_world == 0) then + return + end if + + if (EsMgr_save_in_file .and. .not. EsMgr_ctx % rank_world == 0) then + call EsMgr_write_to_file(e, prefix) + call communicate_file_done_writing() + return + end if + + call EsMgr_send_e(e, to) + + end subroutine EsMgr_save + + subroutine communicate_file_done_writing() + + implicit none + + integer :: dummy_buffer + integer :: tag + integer :: count + integer :: to + + count = 0 + tag = 0 + to = 0 + + call MPI_Send(dummy_buffer, count, MPI_INTEGER, to, tag, EsMgr_ctx % comm_current, ierr) + + end subroutine communicate_file_done_writing + + subroutine wait_on_task(task) + + implicit none + + integer, intent(in) :: task + + integer :: dummy_buff + integer :: count + integer :: tag + + count = 0 + tag = 0 + + call MPI_Recv(dummy_buff, count, MPI_INTEGER, task, tag, EsMgr_ctx % comm_current, MPI_STATUS_IGNORE, ierr) + + end subroutine wait_on_task + + subroutine EsMgr_write_to_file(e, prefix, iPol, ftype) + + implicit none + + type (solnVector_t), intent(in) :: e + character(len=*), intent(in), optional :: prefix + integer, intent(in), optional :: iPol + character(len=*), optional, intent(in) :: ftype + + integer :: iPol_lcl + character(len=256) :: prefix_lcl + character(len=256) :: ftype_lcl + + if (present(iPol)) then + iPol_lcl = iPol + else + iPol_lcl = 1 + end if + + if (present(prefix)) then + prefix_lcl = prefix + else + prefix_lcl = "" + end if + + if (present(ftype)) then + ftype_lcl = ftype + else + ftype_lcl = EsMgr_ftype + endif + + call write_solnVector(e, trim(EsMgr_prefix)//trim(prefix_lcl), ftype=ftype_lcl, pol_index=iPol_lcl) + + end subroutine EsMgr_write_to_file + + subroutine read_esoln_from_file(e, iTx, iPol, prefix) + + implicit none + + type (solnVector_t), intent(inout) :: e + integer, intent(in) :: iTx + integer, intent(in), optional :: iPol + character(len=*), intent(in), optional :: prefix + + integer :: iPol_lcl + character(len=256) :: prefix_lcl + + if (present(iPol)) then + iPol_lcl = iPol + else + iPol_lcl = 1 + end if + + if (present(prefix)) then + prefix_lcl = prefix + else + prefix_lcl = "" + end if + + e % tx = iTx + call read_solnVector(e, trim(EsMgr_prefix)//trim(prefix_lcl), ftype=EsMgr_ftype, pol_index=iPol_lcl) + + end subroutine read_esoln_from_file + + subroutine EsMgr_send_e(e, to) + + implicit none + + type (solnVector_t), intent(inout) :: e + integer, intent(in) :: to + integer :: tag + character, pointer, dimension(:) :: e_packed => null() + + if (to == 0) then + tag = FROM_WORKER + else + tag = FROM_MASTER + end if + + call int_create_e_param_place_holder(e) + call int_Pack_packed_vec(e) + call MPI_Send(esmgr_holder, esmgr_holder_bytes, MPI_PACKED, to, tag, EsMgr_ctx % comm_current, ierr) + + end subroutine EsMgr_send_e + + subroutine EsMgr_recv_e(e, from) + + implicit none + + type (solnVector_t), intent(inout) :: e + integer, intent(in) :: from + integer :: tag + + if (from == 0) then + tag = FROM_MASTER + else + tag = FROM_WORKER + end if + + call int_create_e_param_place_holder(e) + call MPI_Recv(esmgr_holder, esmgr_holder_bytes, MPI_PACKED, from, tag, EsMgr_ctx % comm_current, STATUS, ierr) + call int_Unpack_packed_vec(e) + + end subroutine EsMgr_recv_e + + subroutine int_create_e_param_place_holder(e) + + implicit none + type(solnVector_t), intent(in) :: e + integer :: Ex_size,Ey_size,Ez_size,Nbytes1,Nbytes2,Nbytes3,Nbytes4 + + if (esmgr_holder_allocated) then + return + end if + + Ex_size=size(e%pol(1)%x) + CALL MPI_PACK_SIZE(Ex_size, MPI_DOUBLE_COMPLEX, MPI_COMM_WORLD, Nbytes1, ierr) + Ey_size=size(e%pol(1)%y) + CALL MPI_PACK_SIZE(Ey_size, MPI_DOUBLE_COMPLEX, MPI_COMM_WORLD, Nbytes2, ierr) + Ez_size=size(e%pol(1)%z) + CALL MPI_PACK_SIZE(Ez_size, MPI_DOUBLE_COMPLEX, MPI_COMM_WORLD, Nbytes3, ierr) + CALL MPI_PACK_SIZE(1, MPI_INTEGER, MPI_COMM_WORLD, Nbytes4, ierr) + esmgr_holder_bytes=((Nbytes1+Nbytes2+Nbytes3+Nbytes4))+1 + + allocate(esmgr_holder(esmgr_holder_bytes)) + esmgr_holder_allocated = .true. + + end subroutine int_create_e_param_place_holder + + subroutine int_pack_packed_vec(e) + + implicit none + + type(solnVector_t), intent(in) :: e + integer index,Ex_size,Ey_size,Ez_size + + Ex_size=size(e%pol(1)%x) + Ey_size=size(e%pol(1)%y) + Ez_size=size(e%pol(1)%z) + index=1 + + call MPI_Pack(e%pol(which_pol)%x(1,1,1),Ex_size, MPI_DOUBLE_COMPLEX, esmgr_holder, esmgr_holder_bytes, index, MPI_COMM_WORLD, ierr) + call MPI_Pack(e%pol(which_pol)%y(1,1,1),Ey_size, MPI_DOUBLE_COMPLEX, esmgr_holder, esmgr_holder_bytes, index, MPI_COMM_WORLD, ierr) + call MPI_Pack(e%pol(which_pol)%z(1,1,1),Ez_size, MPI_DOUBLE_COMPLEX, esmgr_holder, esmgr_holder_bytes, index, MPI_COMM_WORLD, ierr) + call MPI_Pack(e%tx, 1, MPI_INTEGER, esmgr_holder, esmgr_holder_bytes, index, MPI_COMM_WORLD, ierr) + + end subroutine int_pack_packed_vec + + + subroutine int_unpack_packed_vec(e) + + implicit none + + type(solnVector_t), intent(inout) :: e + + integer index,Ex_size,Ey_size,Ez_size + + Ex_size=size(e%pol(1)%x) + Ey_size=size(e%pol(1)%y) + Ez_size=size(e%pol(1)%z) + index=1 + + call MPI_Unpack(esmgr_holder, esmgr_holder_bytes, index, e%pol(which_pol)%x(1,1,1),Ex_size, MPI_DOUBLE_COMPLEX,MPI_COMM_WORLD, ierr) + call MPI_Unpack(esmgr_holder, esmgr_holder_bytes, index, e%pol(which_pol)%y(1,1,1),Ey_size, MPI_DOUBLE_COMPLEX,MPI_COMM_WORLD, ierr) + call MPI_Unpack(esmgr_holder, esmgr_holder_bytes, index, e%pol(which_pol)%z(1,1,1),Ez_size, MPI_DOUBLE_COMPLEX,MPI_COMM_WORLD, ierr) + call MPI_Unpack(esmgr_holder, esmgr_holder_bytes, index, e%tx,1, MPI_INTEGER,MPI_COMM_WORLD, ierr) + end subroutine int_unpack_packed_vec + + +end module ESolnManager From f58c3c28cae08ef66db6f73505cdb70f37cc3a89 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Tue, 3 Mar 2026 15:34:34 -0700 Subject: [PATCH 06/21] Add in error checking when writing and reading cvectors This commit adds error checking when reading and writing cvectors. Now that these routines will be used more often, it will be beneficial to ensure that these routines complete successfully. --- f90/FIELDS/FiniteDiff3D/sg_vector.f90 | 65 ++++++++++++++++++++++++--- 1 file changed, 58 insertions(+), 7 deletions(-) diff --git a/f90/FIELDS/FiniteDiff3D/sg_vector.f90 b/f90/FIELDS/FiniteDiff3D/sg_vector.f90 index 54ba55dd..60294ea3 100755 --- a/f90/FIELDS/FiniteDiff3D/sg_vector.f90 +++ b/f90/FIELDS/FiniteDiff3D/sg_vector.f90 @@ -675,6 +675,7 @@ subroutine write_cvector(fid, E, ftype) complex (kind(E%x)), allocatable, dimension(:,:,:) :: x, y, z logical :: ok, hasname, binary character(80) :: fname, isbinary + character(512) :: iomsg if(.not. E%allocated) then write(0, *) 'cvector must be allocated before call to write_cvector' @@ -700,10 +701,30 @@ subroutine write_cvector(fid, E, ftype) ! write binary to unformatted files if (binary) then - write(fid) E%nx,E%ny,E%nz,E%gridType - write(fid) E%x - write(fid) E%y - write(fid) E%z + write(fid, iostat=istat, iomsg=iomsg) E%nx,E%ny,E%nz,E%gridType + if (istat /= 0) then + write(0,*) "ERROR: There was an writing file '", trim(fname), "' header" + write(0,*) "ERROR: Reason: ", trim(iomsg) + call ModEM_abort() + end if + write(fid, iostat=istat, iomsg=iomsg) E%x + if (istat /= 0) then + write(0,*) "ERROR: There was an writing file '", trim(fname), "' E % x" + write(0,*) "ERROR: Reason: ", trim(iomsg) + call ModEM_abort() + end if + write(fid, iostat=istat, iomsg=iomsg) E%y + if (istat /= 0) then + write(0,*) "ERROR: There was an writing file '", trim(fname), "' E % y" + write(0,*) "ERROR: Reason: ", trim(iomsg) + call ModEM_abort() + end if + write(fid, iostat=istat, iomsg=iomsg) E%z + if (istat /= 0) then + write(0,*) "ERROR: There was an writing file '", trim(fname), "' E % z" + write(0,*) "ERROR: Reason: ", trim(iomsg) + call ModEM_abort() + end if return end if @@ -869,6 +890,7 @@ subroutine read_cvector(fid, E, ftype) complex (kind(E%x)), allocatable, dimension(:,:,:) :: x, y, z logical :: ok, hasname, binary character(80) :: fname, isbinary + character(512) :: iomsg if (.not. present(ftype)) then binary = .false. @@ -908,9 +930,38 @@ subroutine read_cvector(fid, E, ftype) if (binary) then ! read binary from unformatted files - read(fid) E%x - read(fid) E%y - read(fid) E%z + read(fid, iostat=istat, iomsg=iomsg) E%x + if (istat < 0) then + write(1, '(A, A, A)') "ERROR: EOF reached when reading E % x in file: ", trim(fname), ' aborting!' + write(0,*) 'ERROR: Error read iostat code:', istat + call ModEM_abort() + else if (istat > 0) then + write(0, '(A, A, A)') "ERROR: Erorr when reading E % x: ", trim(fname), ' aborting!' + write(0, '(A, i4.4, A, A)') 'ERROR: Error read iostat code:', istat, ' Message: ', trim(iomsg) + call ModEM_abort() + end if + + read(fid, iostat=istat, iomsg=iomsg) E%y + if (istat < 0) then + write(0, '(A, A, A)') "ERROR: EOF reached when reading E % y in file: ", trim(fname), ' aborting!' + write(0, '(A, i4.4, A, A)') 'ERROR: Error read iostat code:', istat, ' Message: ', trim(iomsg) + call ModEM_abort() + else if (istat > 0) then + write(0, '(A, A, A)') "ERROR: Erorr when reading E % y: ", trim(fname), ' aborting!' + write(0, '(A, i4.4, A, A)') 'ERROR: Error read iostat code:', istat, ' Message: ', trim(iomsg) + call ModEM_abort() + end if + + read(fid, iostat=istat, iomsg=iomsg) E%z + if (istat < 0) then + write(0, '(A, A, A)') "ERROR: EOF reached when reading E % z in file: ", trim(fname), ' aborting!' + write(0, '(A, i4.4, A, A)') 'ERROR: Error read iostat code:', istat, ' Message: ', trim(iomsg) + call ModEM_abort() + else if (istat > 0) then + write(0, '(A, A, A)') "ERROR: Erorr when reading E % z: ", trim(fname), ' aborting!' + write(0, '(A, i4.4, A, A)') 'ERROR: Error read iostat code:', istat, ' Message: ', trim(iomsg) + call ModEM_abort() + end if return end if From b13f718dce61e857b96c566e0053f92d69b27807 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Tue, 3 Mar 2026 15:55:27 -0700 Subject: [PATCH 07/21] Add a logical 'trial' to define_worker_job to signal trial computations This commit adds in a 'trial' logical to the define_worker_job type. This logical will be used in future commits to signify to worker tasks that a trial is being calculated. This will allow the EsolnManager the ability to save files with an appropriate filename (e.g. labeling them as 'trial'). --- f90/MPI/Declaration_MPI.f90 | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/f90/MPI/Declaration_MPI.f90 b/f90/MPI/Declaration_MPI.f90 index 5e428cac..fda67e7e 100644 --- a/f90/MPI/Declaration_MPI.f90 +++ b/f90/MPI/Declaration_MPI.f90 @@ -126,6 +126,7 @@ Module Declaration_MPI logical :: keep_E_soln=.false. logical :: several_Tx=.false. logical :: create_your_own_e0=.false. + logical :: trial=.false. ! 2022.10.06, Liu Zhongyin, add iSite storing the site index in rx of dataBlock_t Integer :: iSite end type define_worker_job @@ -148,20 +149,18 @@ Module Declaration_MPI !########################################################################## subroutine create_worker_job_task_place_holder - implicit none - integer index,Nbytes1,Nbytes2,Nbytes3 + implicit none + integer index,Nbytes1,Nbytes2,Nbytes3 - CALL MPI_PACK_SIZE(80, MPI_CHARACTER, MPI_COMM_WORLD, Nbytes1, ierr) - ! 2019.05.10, Liu Zhongyin, replace 6 with 7, for the iSite - ! CALL MPI_PACK_SIZE(6, MPI_INTEGER, MPI_COMM_WORLD, Nbytes2, ierr) - CALL MPI_PACK_SIZE(7, MPI_INTEGER, MPI_COMM_WORLD, Nbytes2, ierr) - CALL MPI_PACK_SIZE(3, MPI_LOGICAL, MPI_COMM_WORLD, Nbytes3, ierr) + CALL MPI_PACK_SIZE(80, MPI_CHARACTER, MPI_COMM_WORLD, Nbytes1, ierr) + CALL MPI_PACK_SIZE(7, MPI_INTEGER, MPI_COMM_WORLD, Nbytes2, ierr) + CALL MPI_PACK_SIZE(4, MPI_LOGICAL, MPI_COMM_WORLD, Nbytes3, ierr) - Nbytes=(Nbytes1+Nbytes2+Nbytes3)+1 + Nbytes=(Nbytes1+Nbytes2+Nbytes3)+1 - if(.not. associated(worker_job_package)) then - allocate(worker_job_package(Nbytes)) - end if + if(.not. associated(worker_job_package)) then + allocate(worker_job_package(Nbytes)) + end if end subroutine create_worker_job_task_place_holder !******************************************************************************* @@ -183,6 +182,7 @@ subroutine Pack_worker_job_task call MPI_Pack(worker_job_task%keep_E_soln,1, MPI_LOGICAL, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) call MPI_Pack(worker_job_task%several_Tx,1, MPI_LOGICAL, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) call MPI_Pack(worker_job_task%create_your_own_e0,1, MPI_LOGICAL, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) + call MPI_Pack(worker_job_task%trial,1, MPI_LOGICAL, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) ! 2019.05.08, Liu Zhongyin, add iSite for rx in dataBlock_t call MPI_Pack(worker_job_task%iSite, 1, MPI_INTEGER, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) @@ -205,6 +205,7 @@ subroutine Unpack_worker_job_task call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%keep_E_soln,1, MPI_LOGICAL,MPI_COMM_WORLD, ierr) call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%several_Tx,1, MPI_LOGICAL,MPI_COMM_WORLD, ierr) call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%create_your_own_e0,1, MPI_LOGICAL,MPI_COMM_WORLD, ierr) + call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%trial,1, MPI_LOGICAL,MPI_COMM_WORLD, ierr) ! 2019.05.08, Liu Zhongyin, add iSite for rx in dataBlock_t call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%iSite, 1, MPI_INTEGER,MPI_COMM_WORLD, ierr) From 68693e4571fdb5a25765893fef58efd2c0a7c1e0 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Tue, 3 Mar 2026 17:03:49 -0700 Subject: [PATCH 08/21] Add in PQMult and DataResp routines to Main_MPI.f90 This commit adds in the PQMult and DataResp Master and worker jobs. These routines will be used with the ESolnManager implementation. It also udpates create_data_vec_place_holder and associated routines so that they can take a range of data instead of just one. --- f90/3D_MT/Sub_MPI.f90 | 12 ++ f90/MPI/Main_MPI.f90 | 365 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 366 insertions(+), 11 deletions(-) diff --git a/f90/3D_MT/Sub_MPI.f90 b/f90/3D_MT/Sub_MPI.f90 index facdd40c..ba52c6fe 100644 --- a/f90/3D_MT/Sub_MPI.f90 +++ b/f90/3D_MT/Sub_MPI.f90 @@ -71,6 +71,18 @@ subroutine get_nPol_MPI(emsoln) nPol_MPI= emsoln%nPol end subroutine get_nPol_MPI + +function get_nPol(iPer) result(nPol) + + implicit none + + integer, intent(in) :: iPer + integer :: nPol + nPol = 0 + + nPol = txDict(iPer) % nPol + +end function get_nPol !**************************************************************************** subroutine count_number_of_messages_to_RECV(eAll1) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index e9adf8d6..0cd3d362 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -18,6 +18,8 @@ module Main_MPI use Declaration_MPI use Sub_MPI + + use EsolnManager ! use ioascii implicit none @@ -684,6 +686,79 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm) end subroutine Master_job_fwdPred +!---------------------------------------------------------------------------- +!########################## Master_job_DataResp ############################ +subroutine Master_job_DataResp(nTx, sigma, d, trial) + + implicit none + + integer, intent(in) :: nTx + type (modelParam_t), intent(in) :: sigma + type (dataVectorMTX_t), intent(inout) :: d + logical, intent(in), optional :: trial + + character (len=*), parameter :: JOB_NAME = "DATARESP" + + integer :: dest, nTasks, remainder, iTx + integer :: iTx_min, iTx_max, i, j, k + logical :: trial_lcl + + if (present(trial)) then + trial_lcl = trial + else + trial_lcl = .false. + endif + + call create_worker_job_task_place_holder + + nTasks = nTx / number_of_workers + remainder = modulo(nTx, number_of_Workers) + iTx_max = 0 + + do dest = 1, number_of_workers + iTx_min = iTx_max + 1 + iTx_max = iTx_min + nTasks - 1 + + if (remainder > 0) then + iTx_max = iTx_max + 1 + remainder = remainder - 1 + end if + + if ( iTx_max >= iTx_min ) then + worker_job_task % what_to_do = trim(job_name) + worker_job_task % per_index = iTx_min + worker_job_task % pol_index = iTx_max + worker_job_task % trial = trial_lcl + + call Pack_worker_job_task + call MPI_Send(worker_job_package, Nbytes, MPI_PACKED, dest, FROM_MASTER, MPI_COMM_WORLD, ierr) + write(ioMPI, '(a10,a16,i5,a8,i5,a11,i5)') trim(job_name), ': Send Per from ', iTx_min, ' to', iTx_max, ' to ', dest + end if + end do + + remainder = modulo(nTx, number_of_workers) + iTx_max = 0 + + do dest = 1, number_of_workers + iTx_min = iTx_max + 1 + iTx_max = iTx_min + nTasks - 1 + + if (remainder > 0) then + iTx_max = iTx_max + 1 + remainder = remainder - 1 + end if + + if (iTx_max >= iTx_min) then + call create_data_vec_place_holder(d, start_iTx=iTx_min, end_iTx=iTx_max) + call MPI_Recv(data_para_vec, Nbytes, MPI_PACKED, dest, FROM_WORKER, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr) + call UnPack_data_para_vec(d, start_iTx=iTx_min, end_iTx=iTx_max) + end if + + + end do + +end subroutine Master_job_DataResp + !######################### Master_job_Compute_J ########################## @@ -1055,6 +1130,105 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm) end Subroutine Master_job_JmultT +!######################## Master_job_PQMult ############################## +subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) + + implicit none + +; integer, intent(in) :: nTx + type (modelParam_t), intent(in) :: sigma + type (modelParam_t), intent(inout) :: dsigma + logical, intent(in), optional :: use_starting_guess + + character(len=*), parameter :: job_name = "PQMULT" + type(modelParam_t), dimension(nTx) :: dsigma_recv + + integer :: dest, nTasks, remainder, iTx + integer :: iTx_min, iTx_max, i, j, k + logical :: flag + + logical, dimension(number_of_workers) :: task_is_working + logical, dimension(nTx) :: transmitters_processing, transmitters_done + logical :: sending + logical :: use_starting_guess_lcl + + if (present(use_starting_guess)) then + use_starting_guess_lcl = use_starting_guess + else + use_starting_guess_lcl = .false. + end if + + sending = .true. + + task_is_working(:) = .false. + transmitters_processing(:) = .false. + transmitters_done(:) = .false. + + call zero(dsigma) + + do while(sending) + ! Send out jobs + do iTx = 1, nTx + + if (transmitters_processing(iTx) .or. transmitters_done(iTx)) then + cycle + end if + + do dest = 1, number_of_workers + if (task_is_working(dest)) then + cycle + end if + + ! Send the task the job + worker_job_task % what_to_do=trim(job_name) + worker_job_task % per_index = iTx + worker_job_task % pol_index = -1 + worker_job_task % trial = use_starting_guess_lcl + + call create_worker_job_task_place_holder + call Pack_worker_job_task + call MPI_Send(worker_job_package, Nbytes, MPI_PACKED, dest, FROM_MASTER, MPI_COMM_WORLD, ierr) + transmitters_processing(iTx) = .true. + task_is_working(dest) = .true. + exit + end do + end do + + ! Recv any jobs + ! See if anyone is sending us a message... + call MPI_Iprobe(MPI_ANY_SOURCE, FROM_WORKER, MPI_COMM_WORLD, flag, MPI_STATUS_IGNORE, ierr) + + if (flag) then ! Someone is sending us a message + call create_worker_job_task_place_holder + call MPI_Recv(worker_job_package, Nbytes, MPI_PACKED, MPI_ANY_SOURCE, FROM_WORKER, MPI_COMM_WORLD, STATUS, ierr) + call Unpack_worker_job_task + + dest = worker_job_task % taskid + iTx = worker_job_task % per_index + + dsigma_recv(iTx) = sigma + call zero(dsigma_recv(iTx)) + + call create_model_param_place_holder(dsigma_recv(iTx)) + call MPI_Recv(sigma_para_vec, Nbytes, MPI_PACKED, dest, FROM_WORKER, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr) + call Unpack_model_para_values(dsigma_recv(iTx)) + + transmitters_done(iTx) = .true. + task_is_working(dest) = .false. + end if + + if (all(transmitters_done(:))) then + sending = .false. + exit + end if + end do + + do iTx = 1, nTx + call linComb_modelParam(ONE, dsigma, ONE, dsigma_recv(iTx), dsigma) + call deall_modelParam(dsigma_recv(iTx)) + end do + +end subroutine Master_job_PQMult !######################## Master_job_Jmult ############################## Subroutine Master_job_Jmult(mHat,m,d,eAll,comm) @@ -1822,8 +1996,9 @@ Subroutine Worker_job(sigma,d) ! Local type(modelParam_t) :: delSigma type(userdef_control) :: ctrl - Integer :: nTx,m_dimension,ndata - Integer :: itx, ndt, dt, dt_index + Integer :: nTx,m_dimension,ndata, start_iTx, end_iTx + Integer :: itx, ndt, dt, dt_index, iDt + Integer :: j Integer :: iper,ipol,i,des_index Integer :: per_index,per_index_pre @@ -1847,6 +2022,11 @@ Subroutine Worker_job(sigma,d) ! 2019.05.08, Liu Zhongyin, add isite for rx in dataBlock_t integer :: isite + ! PQMult + type (modelParam_t) :: dsigma_temp, dsigma_send, Qcomb + logical :: trial + + ! time DOUBLE PRECISION :: time_passed, now, just @@ -1978,6 +2158,105 @@ Subroutine Worker_job(sigma,d) time_passed = now - previous_time previous_time = now + elseif (trim(worker_job_task%what_to_do) .eq. 'DATARESP') then + + start_iTx = worker_job_task % per_index + end_iTx = worker_job_task % pol_index + + worker_job_task % taskid = taskid + + call zero_solnvector(e0) + call zero_solnvector(e) + + call EsMgr_create_e(e, per_index) + + do per_index = start_iTx, end_iTx + call zero_solnvector(e0) + e0 % tx = per_index + e % tx = per_index + + do pol_index = 1, get_nPol(per_index) + + if (worker_job_task % trial) then + call EsMgr_get(e0, e0 % tx, pol_index=pol_index, prefix='.trial') + else + call EsMgr_get(e0, e0 % tx, pol_index=pol_index) + endif + end do + + do i = 1, d % d(per_index) % nDt + d % d(per_index) % data(i) % errorBar = .false. + iDt = d % d(per_index) % data(i) % dataType + + do j = 1, d % d(per_index) % data(i) % nSite + call dataResp(e0, sigma, iDt, d % d(per_index) % data(i) % rx(j), & + d % d(per_index) % data(i) % value(:,j), & + d % d(per_index) % data(i) % orient(j)) + + end do + end do + end do + + call create_data_vec_place_holder(d, start_iTx=start_iTx, end_iTx=end_iTx) + call Pack_data_para_vec(d, start_iTx=start_iTx, end_iTx=end_iTx) + call MPI_Send(data_para_vec, NBytes, MPI_PACKED, 0, FROM_WORKER, MPI_COMM_WORLD, ierr) + deallocate(data_para_vec) + data_para_vec => null() + + elseif (trim(worker_job_task%what_to_do) .eq. 'PQMULT') then + + per_index = worker_job_task % per_index + worker_job_task % taskid = taskid + trial=worker_job_task%trial + + call create_solnVector(grid, 1, e0) + call create_solnVector(grid, 1, e) + + call zero_solnvector(e0) + call zero_solnvector(e) + + dsigma_temp = sigma + Qcomb = sigma + dsigma_send = sigma + + call zero(dsigma_temp) + call zero(dsigma_send) + call zero(QComb) + + e0 % tx = per_index + e % tx = per_index + + do pol_index = 1, get_nPol(per_index) + if (trial) then + call EsMgr_get(e0, e0 % tx, pol_index=pol_index, prefix='.trial') + call EsMgr_get(e, e % tx, pol_index=pol_index, prefix='.JmultT') + else + call EsMgr_get(e0, e0 % tx, pol_index=pol_index) + call EsMgr_get(e, e % tx, pol_index=pol_index, prefix='.JmultT') + end if + end do + + call PmultT(e0, sigma, e, dsigma_temp) + call QmultT(e0, sigma, d%d(per_index), Qcomb) + call scMultAdd(ONE, Qcomb, dsigma_temp) + + ! Send to Main that we are done + call create_worker_job_task_place_holder + worker_job_task % what_to_do = 'PQMULT-RESP' + worker_job_task % taskid = rank_current + worker_job_task % per_index = per_index + call Pack_worker_job_task + call MPI_Send(worker_job_package, Nbytes, MPI_PACKED, 0, & + FROM_WORKER, comm_current, ierr) + + call create_model_param_place_holder(dsigma_temp) + call pack_model_para_values(dsigma_temp) + call MPI_Send(sigma_para_vec, NBytes, MPI_PACKED, 0, FROM_WORKER, MPI_COMM_WORLD, ierr) + call deall(QComb) + call deall(dsigma_temp) + call deall(dsigma_send) + + elseif (trim(worker_job_task%what_to_do) .eq. 'COMPUTE_J') then ! compute (explicit) J if ((size_local.gt.1).and.(para_method.gt.0).and. & @@ -2626,14 +2905,39 @@ End Subroutine Worker_job !****************************************************************************** -subroutine create_data_vec_place_holder(d) +subroutine create_data_vec_place_holder(d, start_iTx, end_iTx) implicit none integer Nbytes1,Nbytes2,ndata,iper,ndt,sum1,sum2 type(dataVectorMTX_t), intent(in) :: d + integer, optional, intent(in) :: start_iTx + integer, optional, intent(in) :: end_iTx + integer :: start_iTx_lcl + integer :: end_iTx_lcl + + ! .neqv. here is the exclusive Or + if (present(start_iTx) .neqv. present(end_iTx)) then + write(0,*) "ERROR: In create_data_vec_place_holder - Either 'start_iTx' or 'end_iTx' was specified, but not the other" + write(0,*) "ERROR: Please either: Specify BOTH, or, specify NONE" + call ModEM_Abort() + end if + + if (present(start_iTx)) then + start_iTx_lcl = start_iTx + else + start_iTx_lcl = 1 + end if + + if (present(end_iTx)) then + end_iTx_lcl = end_iTx + else + end_iTx_lcl = d % nTx + end if + sum1=0 sum2=0 - do iper=1,d%nTx + + do iper = start_iTx_lcl, end_iTX_lcl do ndt=1,d%d(iper)%ndt ndata=size(d%d(iper)%data(ndt)%value) CALL MPI_PACK_SIZE(ndata, MPI_DOUBLE_PRECISION, & @@ -2651,14 +2955,30 @@ subroutine create_data_vec_place_holder(d) end subroutine create_data_vec_place_holder !****************************************************************************** - subroutine Pack_data_para_vec(d) +subroutine Pack_data_para_vec(d, start_iTx, end_iTx) implicit none type(dataVectorMTX_t), intent(in) :: d + integer, optional, intent(in) :: start_iTx + integer, optional, intent(in) :: end_iTx + integer :: start_iTx_lcl, end_iTx_lcl integer index integer ndata,iper,ndt + + if (present(start_iTx)) then + start_iTx_lcl = start_iTx + else + start_iTx_lcl = 1 + end if + + if (present(end_iTx)) then + end_iTx_lcl = end_iTx + else + end_iTx_lcl = d % nTx + end if + index=1 - do iper=1,d%nTx + do iper=start_iTx_lcl, end_iTx_lcl do ndt=1,d%d(iper)%ndt ndata=size(d%d(iper)%data(ndt)%value) call MPI_Pack(d%d(iper)%data(ndt)%value(1,1),ndata, & @@ -2676,15 +2996,37 @@ subroutine Pack_data_para_vec(d) end subroutine Pack_data_para_vec !****************************************************************************** -subroutine UnPack_data_para_vec(d) +subroutine UnPack_data_para_vec(d, start_iTx, end_iTx) implicit none type(dataVectorMTX_t), intent(inout) :: d + integer, optional, intent(in) :: start_iTx + integer, optional, intent(in) :: end_iTx ! Local - integer index - integer ndata,iper,ndt + integer :: start_iTx_lcl, end_iTx_lcl + integer :: index + integer :: ndata,iper,ndt + + ! .neqv. here is the exclusive Or + if (present(start_iTx) .neqv. present(end_iTx)) then + write(0,*) "ERROR: In UnPack_Data_para_vec - Either 'start_iTx' or 'end_iTx' was specified, but not the other" + write(0,*) "ERROR: Please either: Specify BOTH, or, specify NONE" + call ModEM_Abort() + end if + + if (present(start_iTx)) then + start_iTx_lcl = start_iTx + else + start_iTx_lcl = 1 + end if + + if (present(end_iTx)) then + end_iTx_lcl = end_iTx + else + end_iTx_lcl = d % nTx + end if index=1 - do iper=1,d%nTx + do iper=start_iTx_lcl, end_iTx_lcl do ndt=1,d%d(iper)%ndt ndata=size(d%d(iper)%data(ndt)%value) call MPI_Unpack(data_para_vec, Nbytes, index, & @@ -2703,7 +3045,8 @@ subroutine UnPack_data_para_vec(d) end do if (associated(data_para_vec)) then - deallocate (data_para_vec) + deallocate(data_para_vec) + data_para_vec => null() end if end subroutine UnPack_data_para_vec From ba2f56ac80c346dd66a45e83a8dc4d376935d3b1 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Tue, 3 Mar 2026 17:19:36 -0700 Subject: [PATCH 09/21] Initialize ModEM Context for use This commit initializes the ModEM context in Constructor_MPI. --- f90/MPI/Declaration_MPI.f90 | 2 ++ f90/MPI/Main_MPI.f90 | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/f90/MPI/Declaration_MPI.f90 b/f90/MPI/Declaration_MPI.f90 index fda67e7e..d9a6c4f6 100644 --- a/f90/MPI/Declaration_MPI.f90 +++ b/f90/MPI/Declaration_MPI.f90 @@ -144,6 +144,8 @@ Module Declaration_MPI integer :: number_of_workers end type ModEM_mpi_context_t +type (ModEM_mpi_context_t), target, save :: modem_ctx + Contains !########################################################################## diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 0cd3d362..bbc4ff6a 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -81,6 +81,14 @@ Subroutine Constructor_MPI size_gpu = 0 ! tic-toc previous_time = MPI_Wtime() + + modem_ctx % comm_world = comm_world + modem_ctx % comm_local = comm_local + modem_ctx % rank_world = rank_world + modem_ctx % rank_local = rank_local + modem_ctx % size_local = size_local + modem_ctx % number_of_workers = number_of_workers + End Subroutine Constructor_MPI !---------------------------------------------------------------------------- From e3398c0407e9cfef4210459219062c90708b2ba0 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:06:45 -0700 Subject: [PATCH 10/21] Add flag in func and gradient to signal trial or to use starting guess This commit adds a new flag to func and to gradient. The flag in func can be used to signify to the worker tasks that a trial forward is being calculated. The worker_job in the future will use this to write out it's electric field to a file with the appropriate prefix. When we use the initial guess in the line search we will also need to tell the JMultT calculations to read the trial electric field, rather than the computations of the line search. Thus, we need to tell the gradient, and JmultT to use the starting guess. Worker jobs will then be able to know if they should read from the trial file or not. --- f90/INV/INVcore.f90 | 24 +++++++++++++++++++----- f90/INV/NLCG.f90 | 4 ++-- f90/MPI/Main_MPI.f90 | 44 ++++++++++++++++++++++++++++++++++++-------- 3 files changed, 57 insertions(+), 15 deletions(-) diff --git a/f90/INV/INVcore.f90 b/f90/INV/INVcore.f90 index 40269338..f792b749 100755 --- a/f90/INV/INVcore.f90 +++ b/f90/INV/INVcore.f90 @@ -62,7 +62,7 @@ end subroutine printf !********************************************************************** - subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS) + subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS,trial) ! Compute the full penalty functional F ! Also output the predicted data and the EM solution @@ -76,12 +76,20 @@ subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS) type(dataVectorMTX_t), optional, intent(inout) :: dHat type(solnVectorMTX_t), optional, intent(inout) :: eAll real(kind=prec), optional, intent(out) :: RMS + logical, optional, intent(in) :: trial ! local variables type(dataVectorMTX_t) :: res,Nres type(modelParam_t) :: m,JTd real(kind=prec) :: SS integer :: Ndata, Nmodel + logical :: trial_lcl + + if (present(trial)) then + trial_lcl = trial + else + trial_lcl = .false. + end if ! compute the smoothed model parameter vector call CmSqrtMult(mHat,m) @@ -96,7 +104,7 @@ subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS) ! also sets up forward solutions for all transmitters in eAll ! (which is created on the fly if it doesn't exist) #ifdef MPI - call Master_Job_fwdPred(m,dHat,eAll) + call Master_Job_fwdPred(m,dHat,eAll,trial=trial_lcl) #else call fwdPred(m,dHat,eAll) #endif @@ -138,7 +146,7 @@ subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS) end subroutine func !********************************************************************** - subroutine gradient(lambda,d,m0,mHat,grad,dHat,eAll) + subroutine gradient(lambda,d,m0,mHat,grad,dHat,eAll,use_starting_guess) ! Computes the gradient of the penalty functional, ! using EM solution (eAll) and the predicted data (dHat) @@ -157,13 +165,19 @@ subroutine gradient(lambda,d,m0,mHat,grad,dHat,eAll) type(modelParam_t), intent(inout) :: grad type(dataVectorMTX_t), intent(inout) :: dHat type(solnVectorMTX_t), intent(inout) :: eAll + logical, intent(in), optional :: use_starting_guess ! local variables real(kind=prec) :: Ndata,Nmodel type(dataVectorMTX_t) :: res type(modelParam_t) :: m,JTd,CmJTd + logical :: use_starting_guess_lcl - ! integer :: j, Ny, NzEarth + if (present(use_starting_guess)) then + use_starting_guess_lcl = use_starting_guess + else + use_starting_guess_lcl = .false. + end if ! compute the smoothed model parameter vector call CmSqrtMult(mHat,m) @@ -180,7 +194,7 @@ subroutine gradient(lambda,d,m0,mHat,grad,dHat,eAll) ! multiply by J^T call CdInvMult(res) #ifdef MPI - call Master_job_JmultT(m,res,JTd,eAll) + call Master_job_JmultT(m,res,JTd,eAll,use_starting_guess=use_starting_guess_lcl) #else call JmultT(m,res,JTd,eAll) #endif diff --git a/f90/INV/NLCG.f90 b/f90/INV/NLCG.f90 index bb367757..1e5c7de3 100755 --- a/f90/INV/NLCG.f90 +++ b/f90/INV/NLCG.f90 @@ -794,7 +794,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! compute the trial mHat, f, dHat, eAll, rms mHat_1 = mHat_0 call linComb(ONE,mHat_0,alpha_1,h,mHat_1) - call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1) + call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1, trial=.true.) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1,logFile) niter = niter + 1 @@ -859,7 +859,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,use_starting_guess=starting_guess) write(*,'(a60)') 'Sufficient decrease condition satisfied, exiting line search' write(ioLog,'(a60)') 'Sufficient decrease condition satisfied, exiting line search' call deall_dataVectorMTX(dHat_1) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index bbc4ff6a..32b7bd27 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -616,13 +616,14 @@ end subroutine Master_job_Regroup !---------------------------------------------------------------------------- !########################## Master_job_fwdPred ############################ -Subroutine Master_job_fwdPred(sigma,d1,eAll,comm) +Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) implicit none type(modelParam_t), intent(in) :: sigma type(dataVectorMTX_t), intent(inout) :: d1 type(solnVectorMTX_t), intent(inout) :: eAll - integer, intent(inout),optional :: comm + integer, intent(inout), optional :: comm + logical, intent(in), optional :: trial integer nTx @@ -630,6 +631,7 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm) Integer :: iper, comm_current Integer :: per_index,pol_index,stn_index,iTx,i,iDt,j character(80) :: job_name + logical :: trial_lcl ! nTX is number of transmitters; nTx = d1%nTx @@ -649,6 +651,12 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm) end if end if + if (present(trial)) then + trial_lcl = trial + else + trial_lcl = .false. + end if + ! First, distribute the current model to all workers call Master_job_Distribute_Model(sigma) ! call Master_job_Distribute_Data(d1) @@ -662,7 +670,7 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm) end do end if job_name= 'FORWARD' - call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll,comm_current) + call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll,comm_current, trial=trial_lcl) ! Initialize only those grid elements on the master that are used in ! EMfieldInterp @@ -1017,7 +1025,7 @@ end subroutine Master_job_calcJ !######################### Master_job_JmultT ############################ -Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm) +Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) implicit none type(modelParam_t), intent(in) :: sigma @@ -1025,7 +1033,8 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm) type(modelParam_t), intent(Out) :: dsigma type(solnVectorMTX_t), intent(in), optional :: eAll type(modelParam_t),intent(inout),pointer,dimension(:), optional :: s_hat - integer, intent(in),optional :: comm + integer, intent(in), optional :: comm + logical, intent(in), optional :: use_starting_guess ! Local type(modelParam_t) :: dsigma_temp @@ -1039,6 +1048,7 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm) Integer :: per_index,pol_index,stn_index character(80) :: job_name,file_name Integer :: comm_current + logical :: use_starting_guess_lcl savedSolns = present(eAll) returne_m_vectors= present(s_hat) @@ -1056,6 +1066,13 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm) comm_current = comm_leader endif end if + + if (present(use_starting_guess)) then + use_starting_guess_lcl = use_starting_guess + else + use_starting_guess_lcl = .false. + end if + ! nTX is number of transmitters; nTx = d%nTx if(.not. eAll_temp%allocated) then @@ -1099,7 +1116,7 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm) call zero(Qcomb) job_name= 'JmultT' call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & - & comm_current, eAll_temp) + & comm_current, eAll_temp, trial=use_starting_guess_lcl) file_name='e0.soln' !call write_solnVectorMTX(10,file_name,eAll_temp) @@ -1717,7 +1734,7 @@ end Subroutine Master_job_Stop_MESSAGE !********************** Master Distribute Tasks ***************************** subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & - & comm, eAll_in) + & comm, eAll_in, trial) implicit none character(80) , intent(in) :: job_name @@ -1726,11 +1743,12 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & type(solnVectorMTX_t), intent(in), optional :: eAll_in integer, intent(in), optional :: comm type(solnVectorMTX_t), intent(inout), optional :: eAll_out + logical, intent(in), optional :: trial !Local Integer :: iper,ipol,ipol1,ijob,total_jobs Integer :: bcounter, tcounter, robin Integer :: per_index,pol_index,des_index - logical :: keep_soln,savedSolns,ascend + logical :: keep_soln,savedSolns,ascend,trial_lcl integer :: comm_current, size_current double precision :: now, just, time_passed @@ -1745,6 +1763,13 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & else comm_current = comm_world end if + + if (present(trial)) then + trial_lcl = trial + else + trial_lcl = .false. + end if + call get_nPol_MPI(eAll_out%solns(1)) if (rank_local.eq.-1) then ! first run! ! run initial regroup -- note this requires the comm to be @@ -1759,6 +1784,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & call MPI_COMM_SIZE( comm_current, size_current, ierr ) who = 0 worker_job_task%what_to_do=trim(job_name) + worker_job_task%trial=trial_lcl call count_number_of_messages_to_RECV(eAll_out) total_jobs = answers_to_receive ! counter to locate the task @@ -1791,6 +1817,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & end if worker_job_task%per_index = per_index worker_job_task%pol_index = pol_index + worker_job_task%trial = trial_lcl call create_worker_job_task_place_holder call Pack_worker_job_task call MPI_SEND(worker_job_package,Nbytes, MPI_PACKED, who, & @@ -1886,6 +1913,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & worker_job_task%per_index= per_index worker_job_task%pol_index= pol_index worker_job_task%what_to_do= trim(job_name) + worker_job_task%trial=trial_lcl call create_worker_job_task_place_holder call Pack_worker_job_task From 61c01a4f44fa2f5fbbff177c3f42431091971c0e Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Wed, 4 Mar 2026 13:24:47 -0700 Subject: [PATCH 11/21] Implement ESolnManager into ModEM MF This commit finally uses the ESolnManager into ModEM. As of this commit it generates bit-for-bit results when running with or without saving the electric field solutions. --- f90/MPI/Main_MPI.f90 | 114 +++++++++++++++++++++---------------------- f90/Mod3DMT.f90 | 8 ++- 2 files changed, 62 insertions(+), 60 deletions(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 32b7bd27..a8076ea8 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -651,6 +651,8 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) end if end if + modem_ctx % comm_current = comm_current + if (present(trial)) then trial_lcl = trial else @@ -659,16 +661,12 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) ! First, distribute the current model to all workers call Master_job_Distribute_Model(sigma) + ! call Master_job_Distribute_Data(d1) if(.not. eAll%allocated) then - ! call deall(eAll) - ! end if - call create_solnVectorMTX(d1%nTx,eAll) - do iTx=1,nTx - call create_solnVector(grid,iTx,e0) - call copy_solnVector(eAll%solns(iTx),e0) - end do + call EsMgr_create_solnVectorMTX(eAll, d1 % nTx, grid=grid) end if + job_name= 'FORWARD' call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll,comm_current, trial=trial_lcl) @@ -681,16 +679,20 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) Call FaceArea(grid, S_F) ! Compute the model Responces - do iTx=1,nTx - do i = 1,d1%d(iTx)%nDt - d1%d(iTx)%data(i)%errorBar = .false. - iDt = d1%d(iTx)%data(i)%dataType - do j = 1,d1%d(iTx)%data(i)%nSite - call dataResp(eAll%solns(iTx),sigma,iDt,d1%d(iTx)%data(i)%rx(j),d1%d(iTx)%data(i)%value(:,j), & - d1%d(iTx)%data(i)%orient(j)) + if (EsMgr_save_in_file) then + call Master_job_DataResp(nTx, sigma, d1, trial_lcl) + else + do iTx=1,nTx + do i = 1,d1%d(iTx)%nDt + d1%d(iTx)%data(i)%errorBar = .false. + iDt = d1%d(iTx)%data(i)%dataType + do j = 1,d1%d(iTx)%data(i)%nSite + call dataResp(eAll%solns(iTx),sigma,iDt,d1%d(iTx)%data(i)%rx(j),d1%d(iTx)%data(i)%value(:,j), & + d1%d(iTx)%data(i)%orient(j)) + end do end do end do - end do + end if ! clean up the grid elements stored in GridCalc on the master node call deall_rvector(l_E) call deall_rvector(S_F) @@ -1067,6 +1069,8 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) endif end if + modem_ctx % comm_current = comm_current + if (present(use_starting_guess)) then use_starting_guess_lcl = use_starting_guess else @@ -1093,7 +1097,6 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) end do end if - if (returne_m_vectors) then if (.not. associated(s_hat)) then allocate(s_hat(nTx)) @@ -1123,18 +1126,20 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) file_name='e.soln' !call write_solnVectorMTX(20,file_name,eAll_out) - do iper=1,nTx - !e0=eAll%solns(iper) - !e =eAll_out%solns(iper) - call PmultT(eAll_temp%solns(iper),sigma,eAll_out%solns(iper) & - & ,dsigma_temp) - call QmultT(eAll_temp%solns(iper),sigma,d%d(iper),Qcomb) - call scMultAdd(ONE,Qcomb,dsigma_temp) - if (returne_m_vectors) then - s_hat(iper)=dsigma_temp - end if - call linComb_modelParam(ONE,dsigma,ONE,dsigma_temp,dsigma) - end do + if (EsMgr_save_in_file) then + call Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess=use_starting_guess_lcl) + else + do iper=1,nTx + call PmultT(eAll_temp%solns(iper),sigma,eAll_out%solns(iper) & + & ,dsigma_temp) + call QmultT(eAll_temp%solns(iper),sigma,d%d(iper),Qcomb) + call scMultAdd(ONE,Qcomb,dsigma_temp) + if (returne_m_vectors) then + s_hat(iper)=dsigma_temp + end if + call linComb_modelParam(ONE,dsigma,ONE,dsigma_temp,dsigma) + end do + end if endtime=MPI_Wtime() time_used = endtime-starttime @@ -1832,10 +1837,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & which_per=per_index do ipol1=1,nPol_MPI which_pol=ipol1 - call create_e_param_place_holder(eAll_in%solns(which_per)) - call Pack_e_para_vec(eAll_in%solns(which_per)) - call MPI_SEND(e_para_vec, Nbytes, MPI_PACKED, who, & - & FROM_MASTER, comm_current, ierr) + call EsMgr_save(eAll_in % solns(which_per), to=who) end do end if write(ioMPI,'(a10,a16,i5,a8,i5,a11,i5)')trim(job_name), & @@ -1861,14 +1863,8 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & who=worker_job_task%taskid which_per=worker_job_task%per_index which_pol=worker_job_task%pol_index - - call create_e_param_place_holder(eAll_out%solns(which_per)) - call MPI_RECV(e_para_vec, Nbytes, MPI_PACKED, who,FROM_WORKER, & - & comm_current, STATUS, ierr) - ! call get_nPol_MPI(eAll_out%solns(which_per)) - ! if (nPol_MPI==1) which_pol=1 - call Unpack_e_para_vec(eAll_out%solns(which_per)) + call EsMgr_get(eAll_out % solns(which_per), which_pol, 1, from=who) write(ioMPI,'(a10,a16,i5,a8,i5,a11,i5)')trim(job_name) , & & ': Receive Per # ',which_per ,' and Pol # ', which_pol ,' from ',& @@ -1926,11 +1922,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & call get_nPol_MPI(eAll_out%solns(per_index)) do ipol1=1,nPol_MPI which_pol=ipol1 - call create_e_param_place_holder(eAll_in%solns( & - & which_per)) - call Pack_e_para_vec(eAll_in%solns(which_per)) - call MPI_SEND(e_para_vec, Nbytes, MPI_PACKED, & - & who, FROM_MASTER, comm_current, ierr) + call EsMgr_save(eAll_in % solns(which_per), who) end do end if write(ioMPI,'(a10,a16,i5,a8,i5,a11,i5)')trim(job_name), & @@ -2121,6 +2113,11 @@ Subroutine Worker_job(sigma,d) write(6,'(a12,a12,a30,a16,i5)') node_info,' MPI TASK [', & & trim(worker_job_task%what_to_do),'] received from ', & & STATUS(MPI_SOURCE) + + modem_ctx % comm_current = comm_current + modem_ctx % rank_current = rank_current + trial = worker_job_task % trial + ! for debug ! write(6,*) 'source = ', MPI_SOURCE ! write(6,*) 'tag = ', MPI_TAG @@ -2180,13 +2177,13 @@ Subroutine Worker_job(sigma,d) call Pack_worker_job_task call MPI_SEND(worker_job_package,Nbytes, MPI_PACKED,0, & & FROM_WORKER, comm_current, ierr) - ! Create e0_temp package (one Period and one Polarization) - ! and send it to the master + ! Use EsMgr_save to send e0 back to main task or save it to disk which_pol=1 - call create_e_param_place_holder(e0) - call Pack_e_para_vec(e0) - call MPI_SEND(e_para_vec, Nbytes, MPI_PACKED, 0, & - & FROM_WORKER, comm_current, ierr) + if (trial) then + call EsMgr_save(e0, to=0, prefix='.trial') + else + call EsMgr_save(e0, to=0) + end if end if ! so long! call reset_e_soln(e0) @@ -2214,8 +2211,10 @@ Subroutine Worker_job(sigma,d) do pol_index = 1, get_nPol(per_index) if (worker_job_task % trial) then + write(0,*) "Reading the trial" call EsMgr_get(e0, e0 % tx, pol_index=pol_index, prefix='.trial') else + write(0,*) "Not reading the trial" call EsMgr_get(e0, e0 % tx, pol_index=pol_index) endif end do @@ -2485,10 +2484,11 @@ Subroutine Worker_job(sigma,d) & ' Start Receiving ' , orginal_nPol, ' from Master' do ipol=1,nPol_MPI which_pol=ipol - call create_e_param_place_holder(e0) - call MPI_RECV(e_para_vec, Nbytes, MPI_PACKED, 0, & - & FROM_MASTER,comm_current, STATUS, ierr) - call Unpack_e_para_vec(e0) + if (worker_job_task % trial) then + call EsMgr_get(e0, per_index, pol_index=ipol, from=0, prefix='.trial') + else + call EsMgr_get(e0, per_index, pol_index=ipol, from=0) + end if end do call initSolverWithOutE0(per_index,sigma,grid,size_local,& e,comb) @@ -2532,11 +2532,7 @@ Subroutine Worker_job(sigma,d) call MPI_SEND(worker_job_package,Nbytes, MPI_PACKED,0, & & FROM_WORKER, comm_current, ierr) which_pol=1 - call create_e_param_place_holder(e) - call Pack_e_para_vec(e) - call MPI_SEND(e_para_vec, Nbytes, MPI_PACKED, 0, & - & FROM_WORKER, comm_current, ierr) - !deallocate(e_para_vec,worker_job_package) + call EsMgr_save(e, to=0, prefix=".JmultT") end if ! hasta la vista! now = MPI_Wtime() diff --git a/f90/Mod3DMT.f90 b/f90/Mod3DMT.f90 index 1c46b911..fa6af649 100755 --- a/f90/Mod3DMT.f90 +++ b/f90/Mod3DMT.f90 @@ -62,7 +62,13 @@ program Mod3DMT call initGlobalData(cUserDef) ! set the grid for the numerical computations #ifdef MPI - call setGrid_MPI(grid) + call setGrid_MPI(grid) + + call EsMgr_init(grid, context=modem_ctx, & + save_in_file=cUserDef % storeSolnsInFile, & + prefix=cUserDef % prefix, & + ftype=FTYPE_BINARY) + ! Check if a large grid file with E field is defined: ! NOTE: right now both grids share the same transmitters. ! This why, reading and setting the large grid and its E solution comes after setting the trasnmitters Dictionary. From 593076d3610d5a10abb08974f06d8d76aac65874 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Mon, 9 Mar 2026 09:59:07 -0600 Subject: [PATCH 12/21] Allow ESolnManager to work with SP2 FG This commit updates the Main_MPI code that was added in previous commits to allow the new MPI code associated with the ESolnManager to work with SP2 FG. To do this, it updates the DataResp and the PQMult to not send their MPI send or receives on the MPI_COMM_WORLD but on either comm_world or comm_leader depending on the para_method. For both the DATARESP and the JQMULT, it performs both by sending the work to the leader tasks. Because these tasks are rather simple and fast, the leader tasks do not send the worker tasks anything. When running with FG on equal processor counts with or without -P gives bit-for-bit result. It appears that the running with FG with different processor counts does not give bit-for-bit results, this may be an issue before this task. Running without FG at different processor counts gives the same results, but there are difference when running with and without FG. I'll open and investigate that issue further. --- f90/MPI/Main_MPI.f90 | 66 ++++++++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 21 deletions(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index a8076ea8..ec731661 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -680,7 +680,7 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) ! Compute the model Responces if (EsMgr_save_in_file) then - call Master_job_DataResp(nTx, sigma, d1, trial_lcl) + call Master_job_DataResp(nTx, sigma, d1, trial_lcl, comm_leader) else do iTx=1,nTx do i = 1,d1%d(iTx)%nDt @@ -706,7 +706,7 @@ end subroutine Master_job_fwdPred !---------------------------------------------------------------------------- !########################## Master_job_DataResp ############################ -subroutine Master_job_DataResp(nTx, sigma, d, trial) +subroutine Master_job_DataResp(nTx, sigma, d, trial, comm) implicit none @@ -714,12 +714,14 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) type (modelParam_t), intent(in) :: sigma type (dataVectorMTX_t), intent(inout) :: d logical, intent(in), optional :: trial + integer, intent(in), optional :: comm character (len=*), parameter :: JOB_NAME = "DATARESP" integer :: dest, nTasks, remainder, iTx integer :: iTx_min, iTx_max, i, j, k logical :: trial_lcl + integer :: comm_current if (present(trial)) then trial_lcl = trial @@ -727,13 +729,21 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) trial_lcl = .false. endif + if (para_method.eq.0) then + comm_current = comm_world + else + comm_current = comm_leader + end if + + modem_ctx % comm_current = comm + call create_worker_job_task_place_holder - nTasks = nTx / number_of_workers - remainder = modulo(nTx, number_of_Workers) + nTasks = nTx / size_leader + remainder = modulo(nTx, size_leader) iTx_max = 0 - do dest = 1, number_of_workers + do dest = 1, size_leader iTx_min = iTx_max + 1 iTx_max = iTx_min + nTasks - 1 @@ -749,15 +759,15 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) worker_job_task % trial = trial_lcl call Pack_worker_job_task - call MPI_Send(worker_job_package, Nbytes, MPI_PACKED, dest, FROM_MASTER, MPI_COMM_WORLD, ierr) + call MPI_Send(worker_job_package, Nbytes, MPI_PACKED, dest, FROM_MASTER, comm_current, ierr) write(ioMPI, '(a10,a16,i5,a8,i5,a11,i5)') trim(job_name), ': Send Per from ', iTx_min, ' to', iTx_max, ' to ', dest end if end do - remainder = modulo(nTx, number_of_workers) + remainder = modulo(nTx, size_leader) iTx_max = 0 - do dest = 1, number_of_workers + do dest = 1, size_leader iTx_min = iTx_max + 1 iTx_max = iTx_min + nTasks - 1 @@ -768,11 +778,9 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) if (iTx_max >= iTx_min) then call create_data_vec_place_holder(d, start_iTx=iTx_min, end_iTx=iTx_max) - call MPI_Recv(data_para_vec, Nbytes, MPI_PACKED, dest, FROM_WORKER, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr) + call MPI_Recv(data_para_vec, Nbytes, MPI_PACKED, dest, FROM_WORKER, comm_current, MPI_STATUS_IGNORE, ierr) call UnPack_data_para_vec(d, start_iTx=iTx_min, end_iTx=iTx_max) end if - - end do end subroutine Master_job_DataResp @@ -1176,6 +1184,7 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) integer :: dest, nTasks, remainder, iTx integer :: iTx_min, iTx_max, i, j, k logical :: flag + integer :: comm_current logical, dimension(number_of_workers) :: task_is_working logical, dimension(nTx) :: transmitters_processing, transmitters_done @@ -1188,6 +1197,14 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) use_starting_guess_lcl = .false. end if + if (para_method.eq.0) then + comm_current = comm_world + else + comm_current = comm_leader + end if + + modem_ctx % comm_current = comm_current + sending = .true. task_is_working(:) = .false. @@ -1204,7 +1221,7 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) cycle end if - do dest = 1, number_of_workers + do dest = 1, size_leader if (task_is_working(dest)) then cycle end if @@ -1217,7 +1234,7 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) call create_worker_job_task_place_holder call Pack_worker_job_task - call MPI_Send(worker_job_package, Nbytes, MPI_PACKED, dest, FROM_MASTER, MPI_COMM_WORLD, ierr) + call MPI_Send(worker_job_package, Nbytes, MPI_PACKED, dest, FROM_MASTER, comm_current, ierr) transmitters_processing(iTx) = .true. task_is_working(dest) = .true. exit @@ -1226,11 +1243,11 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) ! Recv any jobs ! See if anyone is sending us a message... - call MPI_Iprobe(MPI_ANY_SOURCE, FROM_WORKER, MPI_COMM_WORLD, flag, MPI_STATUS_IGNORE, ierr) + call MPI_Iprobe(MPI_ANY_SOURCE, FROM_WORKER, comm_current, flag, MPI_STATUS_IGNORE, ierr) if (flag) then ! Someone is sending us a message call create_worker_job_task_place_holder - call MPI_Recv(worker_job_package, Nbytes, MPI_PACKED, MPI_ANY_SOURCE, FROM_WORKER, MPI_COMM_WORLD, STATUS, ierr) + call MPI_Recv(worker_job_package, Nbytes, MPI_PACKED, MPI_ANY_SOURCE, FROM_WORKER, comm_current, STATUS, ierr) call Unpack_worker_job_task dest = worker_job_task % taskid @@ -1240,7 +1257,7 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) call zero(dsigma_recv(iTx)) call create_model_param_place_holder(dsigma_recv(iTx)) - call MPI_Recv(sigma_para_vec, Nbytes, MPI_PACKED, dest, FROM_WORKER, MPI_COMM_WORLD, MPI_STATUS_IGNORE, ierr) + call MPI_Recv(sigma_para_vec, Nbytes, MPI_PACKED, dest, FROM_WORKER, comm_current, MPI_STATUS_IGNORE, ierr) call Unpack_model_para_values(dsigma_recv(iTx)) transmitters_done(iTx) = .true. @@ -1775,6 +1792,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & trial_lcl = .false. end if + call get_nPol_MPI(eAll_out%solns(1)) if (rank_local.eq.-1) then ! first run! ! run initial regroup -- note this requires the comm to be @@ -1788,6 +1806,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & endif call MPI_COMM_SIZE( comm_current, size_current, ierr ) who = 0 + worker_job_task%what_to_do=trim(job_name) worker_job_task%trial=trial_lcl call count_number_of_messages_to_RECV(eAll_out) @@ -1864,6 +1883,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & which_per=worker_job_task%per_index which_pol=worker_job_task%pol_index + modem_ctx % comm_current = comm_current call EsMgr_get(eAll_out % solns(which_per), which_pol, 1, from=who) write(ioMPI,'(a10,a16,i5,a8,i5,a11,i5)')trim(job_name) , & @@ -1950,6 +1970,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & end subroutine Master_job_Distribute_Taskes + !################## find next job -- from back or front ################## Subroutine find_next_job(nTx,total_jobs,counter,fromhead, eAll_out, & & per_index,pol_index) @@ -2054,8 +2075,6 @@ Subroutine Worker_job(sigma,d) type (modelParam_t) :: dsigma_temp, dsigma_send, Qcomb logical :: trial - - ! time DOUBLE PRECISION :: time_passed, now, just DOUBLE PRECISION, pointer,dimension(:) :: time_buff @@ -2127,9 +2146,14 @@ Subroutine Worker_job(sigma,d) ! '; several TX = ',worker_job_task%several_Tx,']' if (trim(worker_job_task%what_to_do) .eq. 'FORWARD') then + ! forward modelling per_index=worker_job_task%per_index pol_index=worker_job_task%pol_index + + modem_ctx % comm_current = comm_current + modem_ctx % rank_current = rank_current + if ((size_local.gt.1).and.(para_method.gt.0).and. & & (rank_local.eq.0)) then ! group leader passing the command to workers @@ -2195,7 +2219,6 @@ Subroutine Worker_job(sigma,d) start_iTx = worker_job_task % per_index end_iTx = worker_job_task % pol_index - worker_job_task % taskid = taskid call zero_solnvector(e0) @@ -2234,7 +2257,7 @@ Subroutine Worker_job(sigma,d) call create_data_vec_place_holder(d, start_iTx=start_iTx, end_iTx=end_iTx) call Pack_data_para_vec(d, start_iTx=start_iTx, end_iTx=end_iTx) - call MPI_Send(data_para_vec, NBytes, MPI_PACKED, 0, FROM_WORKER, MPI_COMM_WORLD, ierr) + call MPI_Send(data_para_vec, NBytes, MPI_PACKED, 0, FROM_WORKER, comm_current, ierr) deallocate(data_para_vec) data_para_vec => null() @@ -2286,7 +2309,7 @@ Subroutine Worker_job(sigma,d) call create_model_param_place_holder(dsigma_temp) call pack_model_para_values(dsigma_temp) - call MPI_Send(sigma_para_vec, NBytes, MPI_PACKED, 0, FROM_WORKER, MPI_COMM_WORLD, ierr) + call MPI_Send(sigma_para_vec, NBytes, MPI_PACKED, 0, FROM_WORKER, comm_current, ierr) call deall(QComb) call deall(dsigma_temp) call deall(dsigma_send) @@ -2764,6 +2787,7 @@ Subroutine Worker_job(sigma,d) call Pack_worker_job_task call MPI_SEND(worker_job_package,Nbytes, MPI_PACKED,0, & & FROM_WORKER, comm_current, ierr) + elseif (trim(worker_job_task%what_to_do) .eq. 'REGROUP') then ! calculate the time between two regroup events if ((size_local.gt.1).and.(para_method.gt.0).and. & From 87d58670f2430d5d796d5e7874a71502f5bc6876 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Thu, 12 Mar 2026 14:51:29 -0600 Subject: [PATCH 13/21] Correct error in DataResp that caused an error with 2, 3, 4 procs The last commit to allow SP2 to work broke the DataResp when using 2, 3 , and 4 procs, and potentially others. The culprit was using size_leader instead of using size_leader - 1 in places. --- f90/MPI/Main_MPI.f90 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index ec731661..7737613f 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -739,11 +739,11 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial, comm) call create_worker_job_task_place_holder - nTasks = nTx / size_leader - remainder = modulo(nTx, size_leader) + nTasks = nTx / (size_leader - 1) + remainder = modulo(nTx, size_leader - 1) iTx_max = 0 - do dest = 1, size_leader + do dest = 1, size_leader - 1 iTx_min = iTx_max + 1 iTx_max = iTx_min + nTasks - 1 @@ -764,7 +764,7 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial, comm) end if end do - remainder = modulo(nTx, size_leader) + remainder = modulo(nTx, size_leader - 1) iTx_max = 0 do dest = 1, size_leader From 5dfdd81ec43f4e3c8631c9520ec53ddd14b243b9 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Thu, 12 Mar 2026 15:01:09 -0600 Subject: [PATCH 14/21] Remove comm input from Master_job_DataResp --- f90/MPI/Main_MPI.f90 | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 7737613f..1f9abaf7 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -680,7 +680,7 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) ! Compute the model Responces if (EsMgr_save_in_file) then - call Master_job_DataResp(nTx, sigma, d1, trial_lcl, comm_leader) + call Master_job_DataResp(nTx, sigma, d1, trial_lcl) else do iTx=1,nTx do i = 1,d1%d(iTx)%nDt @@ -706,7 +706,7 @@ end subroutine Master_job_fwdPred !---------------------------------------------------------------------------- !########################## Master_job_DataResp ############################ -subroutine Master_job_DataResp(nTx, sigma, d, trial, comm) +subroutine Master_job_DataResp(nTx, sigma, d, trial) implicit none @@ -714,7 +714,6 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial, comm) type (modelParam_t), intent(in) :: sigma type (dataVectorMTX_t), intent(inout) :: d logical, intent(in), optional :: trial - integer, intent(in), optional :: comm character (len=*), parameter :: JOB_NAME = "DATARESP" @@ -735,7 +734,7 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial, comm) comm_current = comm_leader end if - modem_ctx % comm_current = comm + modem_ctx % comm_current = comm_current call create_worker_job_task_place_holder From a6c8f02aa77d4b2190fce89f58b92f5b490f2af5 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Fri, 20 Mar 2026 10:18:41 -0600 Subject: [PATCH 15/21] Actually fix error with 2,3,4 procs --- f90/MPI/Main_MPI.f90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 1f9abaf7..05dee30a 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -766,7 +766,7 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) remainder = modulo(nTx, size_leader - 1) iTx_max = 0 - do dest = 1, size_leader + do dest = 1, size_leader - 1 iTx_min = iTx_max + 1 iTx_max = iTx_min + nTasks - 1 @@ -1220,7 +1220,7 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) cycle end if - do dest = 1, size_leader + do dest = 1, size_leader - 1 if (task_is_working(dest)) then cycle end if From 1a7f459c4c8784f45b7bb0230527569166f35cb6 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Fri, 20 Mar 2026 15:38:22 -0600 Subject: [PATCH 16/21] Use size_current instead of size_leader --- f90/MPI/Main_MPI.f90 | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 05dee30a..34f998f1 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -721,6 +721,7 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) integer :: iTx_min, iTx_max, i, j, k logical :: trial_lcl integer :: comm_current + integer :: size_current if (present(trial)) then trial_lcl = trial @@ -735,6 +736,7 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) end if modem_ctx % comm_current = comm_current + call MPI_COMM_SIZE( comm_current, size_current, ierr ) call create_worker_job_task_place_holder @@ -742,7 +744,7 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) remainder = modulo(nTx, size_leader - 1) iTx_max = 0 - do dest = 1, size_leader - 1 + do dest = 1, size_current - 1 iTx_min = iTx_max + 1 iTx_max = iTx_min + nTasks - 1 @@ -763,10 +765,10 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) end if end do - remainder = modulo(nTx, size_leader - 1) + remainder = modulo(nTx, size_current - 1) iTx_max = 0 - do dest = 1, size_leader - 1 + do dest = 1, size_current - 1 iTx_min = iTx_max + 1 iTx_max = iTx_min + nTasks - 1 @@ -1183,7 +1185,7 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) integer :: dest, nTasks, remainder, iTx integer :: iTx_min, iTx_max, i, j, k logical :: flag - integer :: comm_current + integer :: comm_current, size_current logical, dimension(number_of_workers) :: task_is_working logical, dimension(nTx) :: transmitters_processing, transmitters_done @@ -1203,6 +1205,7 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) end if modem_ctx % comm_current = comm_current + call MPI_COMM_SIZE( comm_current, size_current, ierr ) sending = .true. @@ -1220,7 +1223,7 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) cycle end if - do dest = 1, size_leader - 1 + do dest = 1, size_current - 1 if (task_is_working(dest)) then cycle end if @@ -1243,7 +1246,6 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) ! Recv any jobs ! See if anyone is sending us a message... call MPI_Iprobe(MPI_ANY_SOURCE, FROM_WORKER, comm_current, flag, MPI_STATUS_IGNORE, ierr) - if (flag) then ! Someone is sending us a message call create_worker_job_task_place_holder call MPI_Recv(worker_job_package, Nbytes, MPI_PACKED, MPI_ANY_SOURCE, FROM_WORKER, comm_current, STATUS, ierr) @@ -2249,7 +2251,6 @@ Subroutine Worker_job(sigma,d) call dataResp(e0, sigma, iDt, d % d(per_index) % data(i) % rx(j), & d % d(per_index) % data(i) % value(:,j), & d % d(per_index) % data(i) % orient(j)) - end do end do end do From 301f9bec9b38628fa78a41700657d56245f93f23 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Mon, 23 Mar 2026 11:14:17 -0600 Subject: [PATCH 17/21] Separate DataResp code into separate functions This commit responds to one of Hao's comments on the PR. It moves the DataResp logic into three new functions. These are: * Master_job_DataResp - Interface function that decides what function to call below if ESolnManager % EsMgr_save_in_file is true or false. * Master_job_DataResp_main_only - The originally way to calculate dataresp. Only the main task calculates it. * Master_job_DataREsp_IO - The workers compute the dataresp for a range of transmitters and read the electric fields from files. --- f90/MPI/Main_MPI.f90 | 120 +++++++++++++++++++++++++++++++++---------- 1 file changed, 93 insertions(+), 27 deletions(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 34f998f1..9cecc0ca 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -30,6 +30,9 @@ module Main_MPI type(rhsVector_t) , save, private :: b0,comb type(grid_t), target, save, private :: grid + private Master_job_DataResp_main_only + private Master_job_DataResp_IO + Contains @@ -670,32 +673,9 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) job_name= 'FORWARD' call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll,comm_current, trial=trial_lcl) - ! Initialize only those grid elements on the master that are used in - ! EMfieldInterp - ! (obviously a quick patch, needs to be fixed in a major way) - ! A.Kelbert 2018-01-28 - - Call EdgeLength(grid, l_E) - Call FaceArea(grid, S_F) + ! Calculate Data Response + call Master_job_DataResp(nTx, sigma, eAll, d1, trial=trial_lcl) - ! Compute the model Responces - if (EsMgr_save_in_file) then - call Master_job_DataResp(nTx, sigma, d1, trial_lcl) - else - do iTx=1,nTx - do i = 1,d1%d(iTx)%nDt - d1%d(iTx)%data(i)%errorBar = .false. - iDt = d1%d(iTx)%data(i)%dataType - do j = 1,d1%d(iTx)%data(i)%nSite - call dataResp(eAll%solns(iTx),sigma,iDt,d1%d(iTx)%data(i)%rx(j),d1%d(iTx)%data(i)%value(:,j), & - d1%d(iTx)%data(i)%orient(j)) - end do - end do - end do - end if - ! clean up the grid elements stored in GridCalc on the master node - call deall_rvector(l_E) - call deall_rvector(S_F) write(ioMPI,*)'FWD: Finished calculating for (', nTx , ') Transmitters ' endtime=MPI_Wtime() time_used = endtime-starttime @@ -706,7 +686,93 @@ end subroutine Master_job_fwdPred !---------------------------------------------------------------------------- !########################## Master_job_DataResp ############################ -subroutine Master_job_DataResp(nTx, sigma, d, trial) +! Compute the data response by either using Master_job_DataResp_main_only +! or Master_job_DataResp_IO: +! +! * Master_job_DataResp_main_only - Runs if EsMgr_save_in_file == .false. +! * Master_job_DataResp_IO - Runs if EsMgr_save_in_file == .true. +! +subroutine Master_job_DataResp(nTx, sigma, eAll, d1, trial) + + implicit none + + integer, intent(in) :: nTx + type(modelParam_t), intent(in) :: sigma + type(solnVectorMTX_t), intent(in) :: eAll + type(dataVectorMTX_t), intent(inout) :: d1 + logical, optional, intent(in) :: trial + + integer :: iTx, i, j + integer :: iDt + logical :: trial_lcl + + if (present(trial)) then + trial_lcl = trial + else + trial_lcl = .false. + end if + + if (EsMgr_save_in_file) then + ! Use the worker jobs to calculate the data resposne by reading + ! the electric field files + call Master_job_DataResp_IO(nTx, sigma, d1, trial_lcl) + else + ! The main task only computes the data response + call Master_job_DataResp_main_only(nTx, sigma, d1, eAll) + end if + +end subroutine Master_job_DataResp + +! Master_job_DataResp_main_only - Calculate the data response on +! the main task only. The main task will need to have eAll completly +! present. +! +! Run this function is EsMgr_save_in_file == .false. +subroutine Master_job_DataResp_main_only(nTx, sigma, d1, eAll) + + implicit none + + integer, intent(in) :: nTx + type (modelParam_t), intent(in) :: sigma + type (dataVectorMTX_t), intent(inout) :: d1 + type(solnVectorMTX_t), intent(in) :: eAll + + integer :: iTx, i, j + integer :: iDt + logical :: trial_lcl + + ! Initialize only those grid elements on the master that are used in + ! EMfieldInterp + ! (obviously a quick patch, needs to be fixed in a major way) + ! A.Kelbert 2018-01-28 + call EdgeLength(grid, l_E) + call FaceArea(grid, S_F) + + do iTx=1,nTx + do i = 1,d1%d(iTx)%nDt + d1%d(iTx)%data(i)%errorBar = .false. + iDt = d1%d(iTx)%data(i)%dataType + do j = 1,d1%d(iTx)%data(i)%nSite + call dataResp(eAll%solns(iTx),sigma,iDt,d1%d(iTx)%data(i)%rx(j),d1%d(iTx)%data(i)%value(:,j), & + d1%d(iTx)%data(i)%orient(j)) + end do + end do + end do + + ! clean up the grid elements stored in GridCalc on the master node + call deall_rvector(l_E) + call deall_rvector(S_F) + +end subroutine Master_job_DataResp_main_only + +! Master_job_DataResp_IO +! +! Use the worker jobs to calculate the data response by +! reading in the electric fields from files created by the ESolnManager. +! +! This function should only be used if EsMgr_init was initalized with +! `save_in_file = .true.`/EsMgr_save_in_file = .true.. +subroutine Master_job_DataResp_IO(nTx, sigma, d, trial) implicit none @@ -784,7 +850,7 @@ subroutine Master_job_DataResp(nTx, sigma, d, trial) end if end do -end subroutine Master_job_DataResp +end subroutine Master_job_DataResp_IO !######################### Master_job_Compute_J ########################## From 4617773c6520fac3cbe6aa565e588d8685649f19 Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Mon, 23 Mar 2026 14:08:23 -0600 Subject: [PATCH 18/21] Add separate functions for PQMult routines This commit separates out the PQMult logic into three new functions: * Master_job_PQMult - Interface function that decides what function to call if ESoln_save_in_file is either .true. or .false. * Master_job_PQMult_main_only - The original way that PQMult was calculated. Is only calculates on the main task. * Master_job_PQMult_IO - The workers compute PQMult by reading in the electric fields and the adjoint fields. --- f90/MPI/Main_MPI.f90 | 119 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 100 insertions(+), 19 deletions(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 9cecc0ca..cf143f11 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -727,7 +727,7 @@ end subroutine Master_job_DataResp ! the main task only. The main task will need to have eAll completly ! present. ! -! Run this function is EsMgr_save_in_file == .false. +! Run this function if EsMgr_save_in_file == .false. subroutine Master_job_DataResp_main_only(nTx, sigma, d1, eAll) implicit none @@ -1201,20 +1201,8 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) file_name='e.soln' !call write_solnVectorMTX(20,file_name,eAll_out) - if (EsMgr_save_in_file) then - call Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess=use_starting_guess_lcl) - else - do iper=1,nTx - call PmultT(eAll_temp%solns(iper),sigma,eAll_out%solns(iper) & - & ,dsigma_temp) - call QmultT(eAll_temp%solns(iper),sigma,d%d(iper),Qcomb) - call scMultAdd(ONE,Qcomb,dsigma_temp) - if (returne_m_vectors) then - s_hat(iper)=dsigma_temp - end if - call linComb_modelParam(ONE,dsigma,ONE,dsigma_temp,dsigma) - end do - end if + ! Calculate PQMult + call Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, use_starting_guess=use_starting_guess_lcl) endtime=MPI_Wtime() time_used = endtime-starttime @@ -1235,12 +1223,104 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) end Subroutine Master_job_JmultT -!######################## Master_job_PQMult ############################## -subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) +!########################## Master_job_PQMult ############################ +! Compute the model response by either using Master_job_PQMult_main_only +! or Master_job_PQMult_IO: +! +! * Master_job_PQMult_main_only - Runs if EsMgr_save_in_file == .false. +! * Master_job_PQMult_IO - Runs if EsMgr_save_in_file == .true. +! +subroutine Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, use_starting_guess) implicit none -; integer, intent(in) :: nTx + integer, intent(in) :: nTx + type(dataVectorMTX_t), intent(in) :: d + type(solnVectorMTX_t), intent(in) :: eAll_out + type(solnVectorMTX_t), intent(in) :: eAll_temp + type (modelParam_t), intent(in) :: sigma + type (modelParam_t), intent(inout) :: dsigma + type(modelParam_t),intent(inout),pointer,dimension(:), optional :: s_hat + logical, optional, intent(in) :: use_starting_guess + + logical :: use_starting_guess_lcl + + if (present(use_starting_guess)) then + use_starting_guess_lcl = use_starting_guess + else + use_starting_guess_lcl = .false. + end if + + if (EsMgr_save_in_file) then + call Master_job_PQMult_IO(nTx, sigma, dsigma, use_starting_guess=use_starting_guess_lcl) + else + call Master_job_PQMult_main_only(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat) + end if + +end subroutine Master_job_PQMult + + +! Master_job_PQMult_main_only - Calculate the model response on +! the main task only. The main task will need to have eAll_temp set to eAll. +! +! Run this function if ESmgr_save_in_file == f.alse. +subroutine Master_job_PQMult_main_only(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat) + + implicit none + + integer, intent(in) :: nTx + type(dataVectorMTX_t), intent(in) :: d + type(solnVectorMTX_t), intent(in) :: eAll_out + type(solnVectorMTX_t), intent(in) :: eAll_temp + type (modelParam_t), intent(in) :: sigma + type (modelParam_t), intent(inout) :: dsigma + type(modelParam_t),intent(inout),pointer,dimension(:), optional :: s_hat + + type(modelParam_t) :: dsigma_temp + type(modelParam_t) :: Qcomb + type(dataVectorMTX_t) :: d_temp + logical :: returne_m_vectors + integer :: iper + + returne_m_vectors = present(s_hat) + + dsigma_temp = sigma + dsigma = sigma + Qcomb = sigma + + call zero(dsigma_temp) + call zero(dsigma) + call zero(Qcomb) + + do iper=1,nTx + call PmultT(eAll_temp%solns(iper),sigma,eAll_out%solns(iper), dsigma_temp) + call QmultT(eAll_temp%solns(iper),sigma,d%d(iper),Qcomb) + call scMultAdd(ONE,Qcomb,dsigma_temp) + if (returne_m_vectors) then + s_hat(iper)=dsigma_temp + end if + call linComb_modelParam(ONE,dsigma,ONE,dsigma_temp,dsigma) + end do + + call deall_modelParam(dsigma_temp) + call deall_modelParam(Qcomb) + call deall_dataVectorMTX(d_temp) + +end subroutine Master_job_PQMult_main_only + +! Master_job_PQMult_IO +! +! Use the worker jobs to calculate the model response by +! reading in the electric fields from files created by the ESolnManager and +! the adjoint files. +! +! This function should only be used if EsMgr_init was initalized with +! `save_in_file = .true.`/EsMgr_save_in_file = .true.. +subroutine Master_job_PQMult_IO(nTx, sigma, dsigma, use_starting_guess) + + implicit none + + integer, intent(in) :: nTx type (modelParam_t), intent(in) :: sigma type (modelParam_t), intent(inout) :: dsigma logical, intent(in), optional :: use_starting_guess @@ -1342,7 +1422,8 @@ subroutine Master_job_PQMult(nTx, sigma, dsigma, use_starting_guess) call deall_modelParam(dsigma_recv(iTx)) end do -end subroutine Master_job_PQMult +end subroutine Master_job_PQMult_IO + !######################## Master_job_Jmult ############################## Subroutine Master_job_Jmult(mHat,m,d,eAll,comm) From 2c85c3147969f256b17e71ebddf92d8ac0e7f4cd Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Mon, 23 Mar 2026 14:54:27 -0600 Subject: [PATCH 19/21] Add timers around PQMult and DataResp --- f90/MPI/Main_MPI.f90 | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index cf143f11..2564f64b 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -20,6 +20,7 @@ module Main_MPI use Sub_MPI use EsolnManager + use ModEM_timers ! use ioascii implicit none @@ -706,6 +707,9 @@ subroutine Master_job_DataResp(nTx, sigma, eAll, d1, trial) integer :: iDt logical :: trial_lcl + call ModEM_timers_create('DataResp') + call ModEM_timers_start('DataResp') + if (present(trial)) then trial_lcl = trial else @@ -721,6 +725,8 @@ subroutine Master_job_DataResp(nTx, sigma, eAll, d1, trial) call Master_job_DataResp_main_only(nTx, sigma, d1, eAll) end if + call ModEM_timers_stop('DataResp') + end subroutine Master_job_DataResp ! Master_job_DataResp_main_only - Calculate the data response on @@ -1245,6 +1251,9 @@ subroutine Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, logical :: use_starting_guess_lcl + call ModEM_timers_create('PQMult') + call ModEM_timers_start('PQMult') + if (present(use_starting_guess)) then use_starting_guess_lcl = use_starting_guess else @@ -1257,6 +1266,8 @@ subroutine Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, call Master_job_PQMult_main_only(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat) end if + call ModEM_timers_stop('PQMult') + end subroutine Master_job_PQMult From eba6a9cb8142b15f711c958b1ffb18378ad25dce Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Mon, 30 Mar 2026 23:10:35 -0600 Subject: [PATCH 20/21] Add ability to pass labels to esoln files This commit updates ModEM to allow the ability for labels to be passed to the esoln manager from either NLCG or LBFGS. First, this commit updates the ESolnManager so that it EsMgr_save and EsMgr_get take in a label, a character string to label the current line search equation, and a field type, to label either JmultT or electric fields (FWD) solnVectors. The worker_job_task is updated to pass a new variable called label, which can be used by worker tasks to correctly label their calculations (specifically with the EsolnManager). The Main_MPI routines, the INVCore routines are updated with pass through labels, and then the line searches of both LBFGS and NLCG have been updated to use a label when calling either func or gradient. The labels represent the location of the line search, e.g. the trial, the minimum etc. In this commit I have labeled these as F_1, for the trial, and F, for the non trial. Depending on the line search, we label our forwards calculations as F_1 or F and then use these in the gradient calculation later on. These labels allow the ESolnManager to write out its fields accordingly. --- f90/3D_MT/ESolnManager.f90 | 149 +++++++++++++++++++++-------- f90/INV/INVcore.f90 | 32 +++---- f90/INV/LBFGS.f90 | 119 +++++++++++++---------- f90/INV/NLCG.f90 | 139 +++++++++++++++------------ f90/MPI/Declaration_MPI.f90 | 15 +-- f90/MPI/Main_MPI.f90 | 183 +++++++++++++++++------------------- 6 files changed, 366 insertions(+), 271 deletions(-) diff --git a/f90/3D_MT/ESolnManager.f90 b/f90/3D_MT/ESolnManager.f90 index e4a6a5d1..494b60da 100644 --- a/f90/3D_MT/ESolnManager.f90 +++ b/f90/3D_MT/ESolnManager.f90 @@ -53,7 +53,10 @@ module ESolnManager character(len=*), parameter :: FTYPE_ASCII = "ascii" character(len=*), parameter :: FTYPE_BINARY = "binary" - + + character(len=*), parameter :: E_FIELD_TYPE_FWD = "FWD" + character(len=*), parameter :: E_FIELD_TYPE_JMULTT = "JmultT" + type (grid_t), pointer :: EsMgr_grid => null() character(len=25) :: EsMgr_ftype character(len=256) :: EsMgr_prefix @@ -64,8 +67,9 @@ module ESolnManager logical :: esmgr_holder_allocated public :: FTYPE_ASCII, FTYPE_BINARY + public :: E_FIELD_TYPE_FWD, E_FIELD_TYPE_JMULTT public :: EsMgr_init - public :: EsMgr_create_solnVectorMTX, EsMgr_create_e + public :: EsMgr_create_solnVectorMTX, EsMgr_create_e public :: EsMgr_get public :: EsMgr_save public :: EsMgr_save_in_file @@ -268,35 +272,86 @@ subroutine EsMgr_create_e(e, iTx, place_holder) end subroutine EsMgr_create_e + ! create_prefix - Generate a prefix for a solnVector file name + ! + ! This routine will create a prefix that can be used to name the + ! solnVector file. + ! + ! The prefix will always start with EsMgr_prefix, which was + ! passed in during the init call, the rest will be in one of + ! depending on which optional arguments are present or not: + ! + ! - $EsMgr_prefix.$E_field_type.$label + ! - $EsMgr_prefix.$E_field_type + ! - $EsMgr_prefix.$label + ! - $EsMgr_prefix + ! + subroutine create_prefix(prefix, E_field_type, label) + + implicit none + + character(len=*), intent(out) :: prefix + character(len=*), intent(in), optional :: E_field_type + character(len=*), intent(in), optional :: label + + character(len=512) :: PREFIX_FNAME_FORMAT + logical :: use_label + + use_label = .false. + + if (present(label)) then + use_label = trim(label) /= "NULL" .and. len_trim(label) /= 0 + end if + + write(0,*) 'LABEL: ', use_label, trim(label) + + if (present(E_field_type) .and. use_label) then + PREFIX_FNAME_FORMAT = '(A, A, A, A, A)' + write(prefix, PREFIX_FNAME_FORMAT) trim(EsMgr_prefix), ".", trim(E_field_type), ".", trim(label) + else if (present(E_field_type) .and. .not. use_label) then + PREFIX_FNAME_FORMAT = '(A, A, A)' + write(prefix, PREFIX_FNAME_FORMAT) trim(EsMgr_prefix), ".", trim(E_field_type) + else if (.not. present(E_field_type) .and. use_label) then + PREFIX_FNAME_FORMAT = '(A, A, A)' + write(prefix, PREFIX_FNAME_FORMAT) trim(EsMgr_prefix), ".", trim(label) + else + PREFIX_FNAME_FORMAT = '(A, A)' + write(prefix, PREFIX_FNAME_FORMAT) trim(EsMgr_prefix) + end if + + end subroutine create_prefix + ! EsMgr_get ! ! Populate e with the transmitter iTx and polarization pol_index - ! by either via MPI or by reading a file. + ! via MPI or by reading a file. ! ! EsMgr_save_in_file == .false. ! ====================== - ! - ! If EsMgr_save_in_file is .false., then worker_tasks that call this - ! routine will receive e from the main task (That has called EsMgr_save). - ! - ! When a Main task calls this routine with EsMgr_save_in_file == .false., - ! then the main task will receive e from the worker task specified in - ! the from argument. + ! + ! If `EsMgr_save_in_file == .false.`, then a task will receive the + ! solnVector via MPI from the MPI tasks specified in the `from`, + ! argument. + ! + ! Note: The task in the `from` argument must call EsMgr_save + ! with a corosponding task number in the `to` argument. ! ! EsMgr_save_in_file == .true. ! ====================== ! - ! If the EsolnManager was initialized with save_in_file=.true., then - ! worker_tasks that call this function will read the corresponding - ! file that contains the data for the pol_index for iTx. After reading - ! the file, the worker task will send a message to the main task - ! indicating that it has successfully read the file. + ! If the EsolnManager was initialized with `EsMgr_save_in_file == .true.`, + ! then worker_tasks that call this function will read the corresponding + ! file that matches the requested arguments. + ! + ! After reading, the worker task will communicate with the main task + ! to indicate that it is done reading. ! ! Main Tasks: ! - ! If EsMgr_save_in_file is .true., then the main task will wait for - ! the from task to finish writing it's solnVector_t in EsMgr_save. - ! + ! If EsMgr_save_in_file is .true., then the main task will wait for + ! the task speciried in from to finish writing it's solnVector_t + ! that it made in ESoln_save. + ! ! Note: EsMgr_init should be called before calling this function. ! ! Arguments: @@ -306,12 +361,16 @@ end subroutine EsMgr_create_e ! The transmitter number to read ! pol_index - integer - optional ! The polarization to read - ! from - integer - optional + ! from - integer - optional ! The MPI task to receive the solnVector_t from if EsMgr_save_in_file is .false. - ! prefix - character - optional - ! The additional prefix that was used to write the file (e.g. 'JmultT', 'Trial') - ! - subroutine EsMgr_get(e, iTx, pol_index, from, prefix) + ! E_field_type - character - optional + ! The type of the electric field that the SolnVector was saved with, + ! normally this is either E_FIELD_TYPE_FWD or E_FIELD_TYPE_JMULTT. + ! label - character - optional + ! The additonal label that was used to save this SolnVector. Normally + ! this is used to label the specific line search calulation (e.g. + ! trial, fwd01, fwd02 etc.). + subroutine EsMgr_get(e, iTx, pol_index, from, E_field_type, label) implicit none @@ -319,7 +378,9 @@ subroutine EsMgr_get(e, iTx, pol_index, from, prefix) integer, intent(in) :: iTx integer, intent(in), optional :: pol_index integer, intent(in), optional :: from - character(len=*), intent(in), optional :: prefix + character(len=*), intent(in), optional :: E_field_type + character(len=*), intent(in), optional :: label + character(len=512) :: prefix e % tx = iTx @@ -330,6 +391,7 @@ subroutine EsMgr_get(e, iTx, pol_index, from, prefix) end if if (EsMgr_save_in_file .and. EsMgr_ctx % rank_world /= 0) then + call create_prefix(prefix, E_field_type, label) call read_esoln_from_file(e, iTx, pol_index, prefix=prefix) return end if @@ -340,15 +402,15 @@ end subroutine EsMgr_get ! EsMgr_save ! - ! Save e by either writing it to disk or sending it to the main task. + ! Save `e` by either writing it to disk or sending it to the main task via MPI. ! ! EsMgr_save_in_file = .false. ! ============================= ! - ! If EsMgr_save_in_file is .false., then a calling task will send e to the task - ! specified in the to argument. + ! If `EsMgr_save_in_file == .false.`, then a calling task will send `e` to the task + ! specified in the `to` argument. ! - ! To receive this e, a task will need to call EsMgr_get with the correspond from. + ! To receive this `e`, a task will need to call EsMgr_get with the correspond from. ! ! EsMgr_save_in_file = .true. ! ============================= @@ -360,29 +422,38 @@ end subroutine EsMgr_get ! If the main task calls this routine with EsMgr_save_in_file == .true., then ! it will return immediately as no work is needed. ! - ! ! Arguments ! e - solnVector_t ! The solnVector_t to save ! to - integer - optional ! The task to send e to - ! prefix - character - optional - ! If present, append prefix to the electric field solutions. Useful when - ! needing to label JmultT or trail electric fields. - ! - subroutine EsMgr_save(e, to, prefix) + ! E_field_type - character - optional + ! The type of the electric field, normally this is either E_FIELD_TYPE_FWD + ! or E_FIELD_TYPE_JMULTT, but could be anything. However, the same E_field_type + ! will need to be used in EsMgr_get to read the same solnVector. + ! label - character - optional + ! An additional label to use to label the solnVector file (if + ! EsMgr_save_in_file == .true.). Normally, this is used to label the specific + ! line search calculations (e.g. trial, fwd01, fwd02, etc.). Again, the same + ! label will need to be specified when calling the coorosponding EsMgr_get. + ! + subroutine EsMgr_save(e, to, E_field_type, label) implicit none type (solnVector_t), intent(inout) :: e integer, intent(in), optional :: to - character(len=*), intent(in), optional :: prefix + character(len=*), intent(in), optional :: E_field_type + character(len=*), intent(in), optional :: label + + character(len=512) :: prefix if (EsMgr_save_in_file .and. EsMgr_ctx % rank_world == 0) then return end if if (EsMgr_save_in_file .and. .not. EsMgr_ctx % rank_world == 0) then + call create_prefix(prefix, E_field_type, label) call EsMgr_write_to_file(e, prefix) call communicate_file_done_writing() return @@ -448,7 +519,7 @@ subroutine EsMgr_write_to_file(e, prefix, iPol, ftype) if (present(prefix)) then prefix_lcl = prefix else - prefix_lcl = "" + prefix_lcl = "esoln" end if if (present(ftype)) then @@ -457,7 +528,7 @@ subroutine EsMgr_write_to_file(e, prefix, iPol, ftype) ftype_lcl = EsMgr_ftype endif - call write_solnVector(e, trim(EsMgr_prefix)//trim(prefix_lcl), ftype=ftype_lcl, pol_index=iPol_lcl) + call write_solnVector(e, trim(prefix_lcl), ftype=ftype_lcl, pol_index=iPol_lcl) end subroutine EsMgr_write_to_file @@ -482,11 +553,11 @@ subroutine read_esoln_from_file(e, iTx, iPol, prefix) if (present(prefix)) then prefix_lcl = prefix else - prefix_lcl = "" + prefix_lcl = "esoln" end if e % tx = iTx - call read_solnVector(e, trim(EsMgr_prefix)//trim(prefix_lcl), ftype=EsMgr_ftype, pol_index=iPol_lcl) + call read_solnVector(e, trim(prefix_lcl), ftype=EsMgr_ftype, pol_index=iPol_lcl) end subroutine read_esoln_from_file diff --git a/f90/INV/INVcore.f90 b/f90/INV/INVcore.f90 index f792b749..78a0b429 100755 --- a/f90/INV/INVcore.f90 +++ b/f90/INV/INVcore.f90 @@ -62,7 +62,7 @@ end subroutine printf !********************************************************************** - subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS,trial) + subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS,label) ! Compute the full penalty functional F ! Also output the predicted data and the EM solution @@ -76,20 +76,20 @@ subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS,trial) type(dataVectorMTX_t), optional, intent(inout) :: dHat type(solnVectorMTX_t), optional, intent(inout) :: eAll real(kind=prec), optional, intent(out) :: RMS - logical, optional, intent(in) :: trial + character(len=*), optional, intent(in) :: label ! local variables type(dataVectorMTX_t) :: res,Nres type(modelParam_t) :: m,JTd real(kind=prec) :: SS integer :: Ndata, Nmodel - logical :: trial_lcl + character(len=80) :: label_lcl - if (present(trial)) then - trial_lcl = trial - else - trial_lcl = .false. - end if + if (present(label)) then + label_lcl = label + else + label_lcl = "NULL" + end if ! compute the smoothed model parameter vector call CmSqrtMult(mHat,m) @@ -104,7 +104,7 @@ subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS,trial) ! also sets up forward solutions for all transmitters in eAll ! (which is created on the fly if it doesn't exist) #ifdef MPI - call Master_Job_fwdPred(m,dHat,eAll,trial=trial_lcl) + call Master_Job_fwdPred(m,dHat,eAll,label=label_lcl) #else call fwdPred(m,dHat,eAll) #endif @@ -146,7 +146,7 @@ subroutine func(lambda,d,m0,mHat,F,mNorm,dHat,eAll,RMS,trial) end subroutine func !********************************************************************** - subroutine gradient(lambda,d,m0,mHat,grad,dHat,eAll,use_starting_guess) + subroutine gradient(lambda,d,m0,mHat,grad,dHat,eAll,label) ! Computes the gradient of the penalty functional, ! using EM solution (eAll) and the predicted data (dHat) @@ -165,18 +165,18 @@ subroutine gradient(lambda,d,m0,mHat,grad,dHat,eAll,use_starting_guess) type(modelParam_t), intent(inout) :: grad type(dataVectorMTX_t), intent(inout) :: dHat type(solnVectorMTX_t), intent(inout) :: eAll - logical, intent(in), optional :: use_starting_guess + character(len=*), intent(in), optional :: label ! local variables real(kind=prec) :: Ndata,Nmodel type(dataVectorMTX_t) :: res type(modelParam_t) :: m,JTd,CmJTd - logical :: use_starting_guess_lcl + character(len=80) :: label_lcl - if (present(use_starting_guess)) then - use_starting_guess_lcl = use_starting_guess + if (present(label)) then + label_lcl = label else - use_starting_guess_lcl = .false. + label_lcl = "NULL" end if ! compute the smoothed model parameter vector @@ -194,7 +194,7 @@ subroutine gradient(lambda,d,m0,mHat,grad,dHat,eAll,use_starting_guess) ! multiply by J^T call CdInvMult(res) #ifdef MPI - call Master_job_JmultT(m,res,JTd,eAll,use_starting_guess=use_starting_guess_lcl) + call Master_job_JmultT(m,res,JTd,eAll,label=label) #else call JmultT(m,res,JTd,eAll) #endif diff --git a/f90/INV/LBFGS.f90 b/f90/INV/LBFGS.f90 index f6f7b11b..c9f005d9 100644 --- a/f90/INV/LBFGS.f90 +++ b/f90/INV/LBFGS.f90 @@ -64,6 +64,10 @@ module LBFGS type(LBFGSiterControl_t), private, save :: iterControl type(LBFGSiterCache_t), private, save :: iterCache + character(len=80), parameter :: STARTUP_LABEL = 'STARTUP' + character(len=80), parameter :: TRIAL_FWD_F_1_LABEL = 'F_1' + character(len=80), parameter :: FWD_F_LABEL = 'F' + Contains !********************************************************************** @@ -604,6 +608,7 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & type(dataVectorMTX_t) :: dHat_1 type(solnVectorMTX_t) :: eAll_1 character(100) :: logFile + character(80) :: label ! parameters c = iterControl%c @@ -642,7 +647,7 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & call linComb(ONE,mHat_0,alpha_1,h,mHat_1) ! compute the penalty functional and predicted data at mHat_1 - call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1) + call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1,label=TRIAL_FWD_F_1_LABEL) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1,logFile) niter = niter + 1 @@ -671,7 +676,7 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & ! alpha = alpha_i/TWO ! reset alpha to ensure progress ! end if call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 @@ -701,22 +706,25 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & end if if (starting_guess) then - alpha = alpha_1 - dHat = dHat_1 - eAll = eAll_1 - mHat = mHat_1 - rms = rms_1 - f = f_1 + label=TRIAL_FWD_F_1_LABEL + alpha = alpha_1 + dHat = dHat_1 + eAll = eAll_1 + mHat = mHat_1 + rms = rms_1 + f = f_1 + else + label=FWD_F_LABEL end if ! compute gradient of the full penalty functional and exit if (relaxation) then - call linComb(ONE,mHat_0,gamma*alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) - call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms) - call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) - end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call linComb(ONE,mHat_0,gamma*alpha,h,mHat) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms) + call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) + end if + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=label) write(*,'(a39)') 'Gradient computed, line search finished' write(ioLog,'(a39)') 'Gradient computed, line search finished' @@ -798,6 +806,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & type(dataVectorMTX_t) :: dHat_1 type(solnVectorMTX_t) :: eAll_1 character(100) :: logFile + character(80) :: label ! parameters c = iterControl%c @@ -832,7 +841,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! compute the trial mHat, f, dHat, eAll, rms mHat_1 = mHat_0 call linComb(ONE,mHat_0,alpha_1,h,mHat_1) - call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1) + call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1,label=TRIAL_FWD_F_1_LABEL) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1,logFile) niter = niter + 1 @@ -860,7 +869,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms) call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=TRIAL_FWD_F_1_LABEL) write(*,'(a45)') 'Quadratic has no minimum, exiting line search' write(ioLog,'(a45)') 'Quadratic has no minimum, exiting line search' call deall_dataVectorMTX(dHat_1) @@ -873,21 +882,24 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! otherwise compute the functional at the minimizer of the quadratic alpha = - b/(TWO*a) call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 ! check whether the solution satisfies the sufficient decrease condition if (f < f_0 + c * alpha * g_0) then ! if the initial guess was better than what we found, take it - if (f_1 < f) then - starting_guess = .true. - alpha = alpha_1 - dHat = dHat_1 - eAll = eAll_1 - mHat = mHat_1 - rms = rms_1 - f = f_1 + if (f_1 < f) then + label = TRIAL_FWD_F_1_LABEL + starting_guess = .true. + alpha = alpha_1 + dHat = dHat_1 + eAll = eAll_1 + mHat = mHat_1 + rms = rms_1 + f = f_1 + else + label = FWD_F_LABEL end if ! compute the gradient and exit if (relaxation) then @@ -897,7 +909,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=label) write(*,'(a60)') 'Sufficient decrease condition satisfied, exiting line search' write(ioLog,'(a60)') 'Sufficient decrease condition satisfied, exiting line search' call deall_dataVectorMTX(dHat_1) @@ -939,7 +951,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! end if ! compute the penalty functional call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('CUBICLS',lambda,alpha,f,mNorm,rms) call printf('CUBICLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 @@ -967,12 +979,15 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! if the initial guess was better than what we found, take it if (starting_guess) then - alpha = alpha_1 - dHat = dHat_1 - eAll = eAll_1 - mHat = mHat_1 - rms = rms_1 - f = f_1 + label = TRIAL_FWD_F_1_LABEL + alpha = alpha_1 + dHat = dHat_1 + eAll = eAll_1 + mHat = mHat_1 + rms = rms_1 + f = f_1 + else + label = FWD_F_LABEL end if ! compute gradient of the full penalty functional and exit @@ -982,7 +997,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms) call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=label) write(*,'(a39)') 'Gradient computed, line search finished' write(ioLog,'(a39)') 'Gradient computed, line search finished' @@ -1132,7 +1147,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & mHat_1 = mHat_0 ! mHat_1 = mHat_0 + dir*step call linComb(ONE,mHat_0,alpha_1,h,mHat_1) - call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1) + call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1,label=TRIAL_FWD_F_1_LABEL) call printf('STARTLS',lambda,alpha_1,f_1,mNorm_1,rms_1) call printf('STARTLS',lambda,alpha_1,f_1,mNorm_1,rms_1,logFile) niter = niter + 1 @@ -1161,7 +1176,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & rms = rms_1 f = f_1 ! compute the gradient and exit - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=TRIAL_FWD_F_1_LABEL) write(*,'(a45)') 'Quadratic has no minimum, exiting line search' write(ioLog,'(a45)') 'Quadratic has no minimum, exiting line search' if (present(flag)) then @@ -1175,7 +1190,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & end if ! otherwise compute the functional at the minimizer of the quadratic call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 @@ -1183,7 +1198,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & ! Strong Wolfe's condition needs the gradient ! well, we are going to calculate it anyway - so why don't we do it now? if (f <= f_1) then - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=FWD_F_LABEL) g_1 = dotProd(grad, h) write(*,'(a29,es12.5)',advance='no') ' GRAD: computed, with g0=',g_0 write(*,'(a4,es12.5)') ' g1=',g_1 @@ -1216,7 +1231,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & else if (f_1 < f_0) then! is the initial making any progress? ! Test if the initial guess is good for Strong Wolfe condition - call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1) + call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1,label=TRIAL_FWD_F_1_LABEL) g_1 = dotProd(grad, h) write(*,'(a29,es12.5)',advance='no') ' GRAD: computed, with g0=',g_0 write(*,'(a4,es12.5)') ' g1=',g_1 @@ -1317,12 +1332,12 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & ! end if ! compute the penalty functional call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('CUBICLS',lambda,alpha,f,mNorm,rms) call printf('CUBICLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 ibracket = ibracket + 1 - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=FWD_F_LABEL) g_1 = dotProd(grad, h) write(*,'(a29,es12.5)',advance='no') ' GRAD: computed, with g0=',g_0 write(*,'(a4,es12.5)') ' g1=',g_1 @@ -1367,7 +1382,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & starting_guess = .false. else starting_guess = .true. - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=TRIAL_FWD_F_1_LABEL) endif exit endif @@ -1382,7 +1397,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & starting_guess = .false. else starting_guess = .true. - call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1) + call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1,label=TRIAL_FWD_F_1_LABEL) endif exit endif @@ -1538,7 +1553,7 @@ subroutine lineSearchWolfe2(lambda,d,m0,h,alpha,mHat,f,grad, & ! mHat_1 = mHat_0 + dir*step call linComb(ONE,mHat_0,alpha_1,h,mHat_1) ! compute the trial m, f, dHat, eAll, rms - call func(lambda,d,m0,mHat_1,f_1,mNorm,dHat_1,eAll_1,rms_1) + call func(lambda,d,m0,mHat_1,f_1,mNorm,dHat_1,eAll_1,rms_1,label=trim(TRIAL_FWD_F_1_LABEL)) call printf('STARTLS',lambda,alpha_1,f_1,mNorm,rms_1) call printf('STARTLS',lambda,alpha_1,f_1,mNorm,rms_1,logFile) niter = niter + 1 @@ -1552,7 +1567,7 @@ subroutine lineSearchWolfe2(lambda,d,m0,h,alpha,mHat,f,grad, & call ModEM_abort() end if ! calculate the gradient at the first guess - call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1) + call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1,label=trim(TRIAL_FWD_F_1_LABEL)) g_1 = dotProd(grad, h) grad_1 = grad write(*,'(a29,es12.5)',advance='no') & @@ -1678,12 +1693,12 @@ subroutine lineSearchWolfe2(lambda,d,m0,h,alpha,mHat,f,grad, & gPrev = g ! evaluate the function and derivetive at the new alpha call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=trim(FWD_F_LABEL)) call printf('CUBICLS',lambda,alpha,f,mNorm,rms) call printf('CUBICLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 nbracket = nbracket +1 - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=trim(FWD_F_LABEL)) g = dotProd(grad, h) if (f.lt.f_1) then alpha_1 = alpha @@ -1744,7 +1759,7 @@ subroutine lineSearchWolfe2(lambda,d,m0,h,alpha,mHat,f,grad, & alpha = alphaNext ! compute the penalty functional call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) else if ((istrapped .eq. 2).and.(nbracket.eq.0)) then ! try quadratic @@ -1754,7 +1769,7 @@ subroutine lineSearchWolfe2(lambda,d,m0,h,alpha,mHat,f,grad, & alpha = alphaNext ! compute the penalty functional call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) else if ((istrapped .eq. -1).and.(nbracket.eq.0)) then ! jump @@ -1764,7 +1779,7 @@ subroutine lineSearchWolfe2(lambda,d,m0,h,alpha,mHat,f,grad, & alpha = alphaNext ! compute the penalty functional call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) else ! cubic @@ -1774,7 +1789,7 @@ subroutine lineSearchWolfe2(lambda,d,m0,h,alpha,mHat,f,grad, & alpha = alphaNext ! compute the penalty functional call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('CUBICLS',lambda,alpha,f,mNorm,rms) call printf('CUBICLS',lambda,alpha,f,mNorm,rms,logFile) endif @@ -1784,7 +1799,7 @@ subroutine lineSearchWolfe2(lambda,d,m0,h,alpha,mHat,f,grad, & fPrev = f gPrev = g !calculatie gradient to test the Wolfe condition - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=FWD_F_LABEL) g = dotProd(grad, h) if (f.lt.f_1) then alpha_1 = alpha diff --git a/f90/INV/NLCG.f90 b/f90/INV/NLCG.f90 index 1e5c7de3..563f60ee 100755 --- a/f90/INV/NLCG.f90 +++ b/f90/INV/NLCG.f90 @@ -53,6 +53,10 @@ module NLCG type(NLCGiterControl_t), private, save :: iterControl + character(len=*), parameter :: STARTUP_LABEL = 'STARTUP' + character(len=*), parameter :: TRIAL_FWD_F_1_LABEL = 'F_1' + character(len=*), parameter :: FWD_F_LABEL = 'F' + Contains !********************************************************************** @@ -284,7 +288,6 @@ subroutine NLCGsolver(d,lambda,m0,m,fname) write(ioLog,'(a41,es8.1)') 'The initial damping parameter lambda is ',lambda write(ioLog,'(a55,f12.6)') 'The initial line search step size (in model units) is ',startdm - ! starting from the prior hardcoded by setting mHat = 0 and m = m0 ! m = m0 ! mHat = m0 @@ -559,14 +562,15 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & ! local variables real(kind=prec) :: alpha_1,alpha_i,mNorm - logical :: starting_guess - logical :: relaxation + logical :: starting_guess + logical :: relaxation real(kind=prec) :: eps,k,c,a,b real(kind=prec) :: g_0,f_0,f_1,f_i,rms_1,mNorm_1 - type(modelParam_t) :: mHat_0,mHat_1 - type(dataVectorMTX_t) :: dHat_1 - type(solnVectorMTX_t) :: eAll_1 - character(100) :: logFile + type(modelParam_t) :: mHat_0,mHat_1 + type(dataVectorMTX_t) :: dHat_1 + type(solnVectorMTX_t) :: eAll_1 + character(100) :: logFile + character(len=80) :: label ! parameters c = iterControl%c @@ -601,11 +605,11 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & ! initialize mHat_1 = mHat_0 - ! compute the trial parameter mHat_1 + ! compute the parameter mHat_1 call linComb(ONE,mHat_0,alpha_1,h,mHat_1) ! compute the penalty functional and predicted data at mHat_1 - call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1) + call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1,label=TRIAL_FWD_F_1_LABEL) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1,logFile) niter = niter + 1 @@ -634,7 +638,7 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & ! alpha = alpha_i/TWO ! reset alpha to ensure progress ! end if call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 @@ -664,12 +668,15 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & end if if (starting_guess) then - alpha = alpha_1 - dHat = dHat_1 - eAll = eAll_1 - mHat = mHat_1 - rms = rms_1 - f = f_1 + alpha = alpha_1 + dHat = dHat_1 + eAll = eAll_1 + mHat = mHat_1 + label = TRIAL_FWD_F_1_LABEL + rms = rms_1 + f = f_1 + else + label=FWD_F_LABEL end if ! compute gradient of the full penalty functional and exit @@ -679,7 +686,7 @@ subroutine lineSearchQuadratic(lambda,d,m0,h,alpha,mHat,f,grad, & call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms) call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll, label=label) write(*,'(a39)') 'Gradient computed, line search finished' write(ioLog,'(a39)') 'Gradient computed, line search finished' @@ -752,14 +759,15 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! local variables real(kind=prec) :: alpha_1,alpha_i,alpha_j,mNorm - logical :: starting_guess - logical :: relaxation + logical :: starting_guess + logical :: relaxation real(kind=prec) :: eps,k,c,a,b,q1,q2,q3 real(kind=prec) :: g_0,f_0,f_1,f_i,f_j,rms_1,mNorm_1 - type(modelParam_t) :: mHat_0,mHat_1 - type(dataVectorMTX_t) :: dHat_1 - type(solnVectorMTX_t) :: eAll_1 - character(100) :: logFile + type(modelParam_t) :: mHat_0,mHat_1 + type(dataVectorMTX_t) :: dHat_1 + type(solnVectorMTX_t) :: eAll_1 + character(100) :: logFile + character(len=80) :: label ! parameters c = iterControl%c @@ -791,10 +799,10 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & relaxation = .false. end if - ! compute the trial mHat, f, dHat, eAll, rms + ! compute the mHat, f, dHat, eAll, rms mHat_1 = mHat_0 call linComb(ONE,mHat_0,alpha_1,h,mHat_1) - call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1, trial=.true.) + call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1, label=TRIAL_FWD_F_1_LABEL) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1) call printf('STARTLS',lambda,alpha,f_1,mNorm_1,rms_1,logFile) niter = niter + 1 @@ -807,22 +815,26 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & a = (f_1 - f_0 - g_0*alpha_1)/(alpha_1**2) b = g_0 ! if the curvature is -ve, there is no minimum; take the initial guess + label = FWD_F_LABEL if (a < 0) then - starting_guess = .true. - alpha = alpha_1 - dHat = dHat_1 + write(0,*) " USE STARTING UGESS" + stop + starting_guess = .true. + label = TRIAL_FWD_F_1_LABEL + alpha = alpha_1 + dHat = dHat_1 eAll = eAll_1 - mHat = mHat_1 - rms = rms_1 - f = f_1 + mHat = mHat_1 + rms = rms_1 + f = f_1 ! compute the gradient and exit if (relaxation) then call linComb(ONE,mHat_0,gamma*alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms) call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=label) write(*,'(a45)') 'Quadratic has no minimum, exiting line search' write(ioLog,'(a45)') 'Quadratic has no minimum, exiting line search' call deall_dataVectorMTX(dHat_1) @@ -835,22 +847,26 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! otherwise compute the functional at the minimizer of the quadratic alpha = - b/(TWO*a) call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 ! check whether the solution satisfies the sufficient decrease condition if (f < f_0 + c * alpha * g_0) then ! if the initial guess was better than what we found, take it - if (f_1 < f) then - starting_guess = .true. - alpha = alpha_1 - dHat = dHat_1 - eAll = eAll_1 - mHat = mHat_1 - rms = rms_1 - f = f_1 + if (f_1 < f) then + starting_guess = .true. + label = TRIAL_FWD_F_1_LABEL + alpha = alpha_1 + dHat = dHat_1 + eAll = eAll_1 + mHat = mHat_1 + rms = rms_1 + f = f_1 + else + label=FWD_F_LABEL end if + ! compute the gradient and exit if (relaxation) then call linComb(ONE,mHat_0,gamma*alpha,h,mHat) @@ -859,7 +875,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll,use_starting_guess=starting_guess) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=label) write(*,'(a60)') 'Sufficient decrease condition satisfied, exiting line search' write(ioLog,'(a60)') 'Sufficient decrease condition satisfied, exiting line search' call deall_dataVectorMTX(dHat_1) @@ -901,7 +917,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! end if ! compute the penalty functional call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('CUBICLS',lambda,alpha,f,mNorm,rms) call printf('CUBICLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 @@ -929,12 +945,15 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & ! if the initial guess was better than what we found, take it if (starting_guess) then - alpha = alpha_1 - dHat = dHat_1 - eAll = eAll_1 - mHat = mHat_1 - rms = rms_1 - f = f_1 + alpha = alpha_1 + dHat = dHat_1 + eAll = eAll_1 + mHat = mHat_1 + rms = rms_1 + f = f_1 + label=TRIAL_FWD_F_1_LABEL + else + label=FWD_F_LABEL end if ! compute gradient of the full penalty functional and exit @@ -944,7 +963,7 @@ subroutine lineSearchCubic(lambda,d,m0,h,alpha,mHat,f,grad, & call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms) call printf('RELAX',lambda,gamma*alpha,f,mNorm,rms,logFile) end if - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=label) write(*,'(a39)') 'Gradient computed, line search finished' write(ioLog,'(a39)') 'Gradient computed, line search finished' @@ -1092,11 +1111,11 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & ! alpha_1 is the initial step size, which is set in LBFGS alpha_1 = alpha - ! compute the trial mHat, f, dHat, eAll, rms + ! compute the mHat, f, dHat, eAll, rms mHat_1 = mHat_0 ! mHat_1 = mHat_0 + dir*step call linComb(ONE,mHat_0,alpha_1,h,mHat_1) - call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1) + call func(lambda,d,m0,mHat_1,f_1,mNorm_1,dHat_1,eAll_1,rms_1,label=TRIAL_FWD_F_1_LABEL) call printf('STARTLS',lambda,alpha_1,f_1,mNorm_1,rms_1) call printf('STARTLS',lambda,alpha_1,f_1,mNorm_1,rms_1,logFile) niter = niter + 1 @@ -1136,7 +1155,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & end if ! otherwise compute the functional at the minimizer of the quadratic call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('QUADLS',lambda,alpha,f,mNorm,rms) call printf('QUADLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 @@ -1144,7 +1163,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & ! Strong Wolfe's condition needs the gradient ! well, we are going to calculate it anyway - so why don't we do it now? if (f <= f_1) then - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=FWD_F_LABEL) g_1 = dotProd(grad, h) write(*,'(a29,es12.5)',advance='no') ' GRAD: computed, with g0=',g_0 write(*,'(a4,es12.5)') ' g1=',g_1 @@ -1174,7 +1193,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & else if (f_1 < f_0) then! is the initial making any progress? ! Test if the initial guess is good for Strong Wolfe condition - call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1) + call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1,label=TRIAL_FWD_F_1_LABEL) g_1 = dotProd(grad, h) write(*,'(a29,es12.5)',advance='no') ' GRAD: computed, with g0=',g_0 write(*,'(a4,es12.5)') ' g1=',g_1 @@ -1262,12 +1281,12 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & endif ! compute the penalty functional call linComb(ONE,mHat_0,alpha,h,mHat) - call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms) + call func(lambda,d,m0,mHat,f,mNorm,dHat,eAll,rms,label=FWD_F_LABEL) call printf('CUBICLS',lambda,alpha,f,mNorm,rms) call printf('CUBICLS',lambda,alpha,f,mNorm,rms,logFile) niter = niter + 1 ibracket = ibracket + 1 - call gradient(lambda,d,m0,mHat,grad,dHat,eAll) + call gradient(lambda,d,m0,mHat,grad,dHat,eAll,label=FWD_F_LABEL) g_1 = dotProd(grad, h) write(*,'(a29,es12.5)',advance='no') ' GRAD: computed, with g0=',g_0 write(*,'(a4,es12.5)') ' g1=',g_1 @@ -1306,7 +1325,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & starting_guess = .false. else starting_guess = .true. - call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1) + call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1,label=TRIAL_FWD_F_1_LABEL) endif exit endif @@ -1318,7 +1337,7 @@ subroutine lineSearchWolfe(lambda,d,m0,h,alpha,mHat,f,grad, & starting_guess = .false. else starting_guess = .true. - call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1) + call gradient(lambda,d,m0,mHat_1,grad,dHat_1,eAll_1,label=TRIAL_FWD_F_1_LABEL) endif exit endif diff --git a/f90/MPI/Declaration_MPI.f90 b/f90/MPI/Declaration_MPI.f90 index d9a6c4f6..027df5f6 100644 --- a/f90/MPI/Declaration_MPI.f90 +++ b/f90/MPI/Declaration_MPI.f90 @@ -121,12 +121,12 @@ Module Declaration_MPI type :: define_worker_job SEQUENCE character*80 :: what_to_do='NOTHING' + character*80 :: label='NOTHING' Integer :: per_index,Stn_index,pol_index,data_type_index,data_type Integer :: taskid logical :: keep_E_soln=.false. logical :: several_Tx=.false. logical :: create_your_own_e0=.false. - logical :: trial=.false. ! 2022.10.06, Liu Zhongyin, add iSite storing the site index in rx of dataBlock_t Integer :: iSite end type define_worker_job @@ -152,13 +152,14 @@ Module Declaration_MPI subroutine create_worker_job_task_place_holder implicit none - integer index,Nbytes1,Nbytes2,Nbytes3 + integer index,Nbytes1,Nbytes2,Nbytes3,NBytes4 CALL MPI_PACK_SIZE(80, MPI_CHARACTER, MPI_COMM_WORLD, Nbytes1, ierr) - CALL MPI_PACK_SIZE(7, MPI_INTEGER, MPI_COMM_WORLD, Nbytes2, ierr) - CALL MPI_PACK_SIZE(4, MPI_LOGICAL, MPI_COMM_WORLD, Nbytes3, ierr) + CALL MPI_PACK_SIZE(80, MPI_CHARACTER, MPI_COMM_WORLD, Nbytes2, ierr) + CALL MPI_PACK_SIZE(7, MPI_INTEGER, MPI_COMM_WORLD, Nbytes3, ierr) + CALL MPI_PACK_SIZE(3, MPI_LOGICAL, MPI_COMM_WORLD, Nbytes4, ierr) - Nbytes=(Nbytes1+Nbytes2+Nbytes3)+1 + Nbytes=(Nbytes1+Nbytes2+Nbytes3+NBytes4)+1 if(.not. associated(worker_job_package)) then allocate(worker_job_package(Nbytes)) @@ -174,6 +175,7 @@ subroutine Pack_worker_job_task index=1 call MPI_Pack(worker_job_task%what_to_do,80, MPI_CHARACTER, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) + call MPI_Pack(worker_job_task%label,80, MPI_CHARACTER, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) call MPI_Pack(worker_job_task%per_index ,1 , MPI_INTEGER , worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) call MPI_Pack(worker_job_task%Stn_index ,1 , MPI_INTEGER , worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) call MPI_Pack(worker_job_task%pol_index ,1 , MPI_INTEGER , worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) @@ -184,7 +186,6 @@ subroutine Pack_worker_job_task call MPI_Pack(worker_job_task%keep_E_soln,1, MPI_LOGICAL, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) call MPI_Pack(worker_job_task%several_Tx,1, MPI_LOGICAL, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) call MPI_Pack(worker_job_task%create_your_own_e0,1, MPI_LOGICAL, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) - call MPI_Pack(worker_job_task%trial,1, MPI_LOGICAL, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) ! 2019.05.08, Liu Zhongyin, add iSite for rx in dataBlock_t call MPI_Pack(worker_job_task%iSite, 1, MPI_INTEGER, worker_job_package, Nbytes, index, MPI_COMM_WORLD, ierr) @@ -196,6 +197,7 @@ subroutine Unpack_worker_job_task integer index index=1 call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%what_to_do,80, MPI_CHARACTER,MPI_COMM_WORLD, ierr) + call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%label,80, MPI_CHARACTER,MPI_COMM_WORLD, ierr) call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%per_index ,1 , MPI_INTEGER,MPI_COMM_WORLD, ierr) call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%Stn_index ,1 , MPI_INTEGER,MPI_COMM_WORLD, ierr) @@ -207,7 +209,6 @@ subroutine Unpack_worker_job_task call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%keep_E_soln,1, MPI_LOGICAL,MPI_COMM_WORLD, ierr) call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%several_Tx,1, MPI_LOGICAL,MPI_COMM_WORLD, ierr) call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%create_your_own_e0,1, MPI_LOGICAL,MPI_COMM_WORLD, ierr) - call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%trial,1, MPI_LOGICAL,MPI_COMM_WORLD, ierr) ! 2019.05.08, Liu Zhongyin, add iSite for rx in dataBlock_t call MPI_Unpack(worker_job_package, Nbytes, index, worker_job_task%iSite, 1, MPI_INTEGER,MPI_COMM_WORLD, ierr) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 2564f64b..4bcb7b01 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -620,14 +620,14 @@ end subroutine Master_job_Regroup !---------------------------------------------------------------------------- !########################## Master_job_fwdPred ############################ -Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) +Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,label) implicit none - type(modelParam_t), intent(in) :: sigma - type(dataVectorMTX_t), intent(inout) :: d1 - type(solnVectorMTX_t), intent(inout) :: eAll - integer, intent(inout), optional :: comm - logical, intent(in), optional :: trial + type(modelParam_t), intent(in) :: sigma + type(dataVectorMTX_t), intent(inout) :: d1 + type(solnVectorMTX_t), intent(inout) :: eAll + integer, intent(inout), optional :: comm + character(len=*), intent(in), optional :: label integer nTx @@ -635,7 +635,13 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) Integer :: iper, comm_current Integer :: per_index,pol_index,stn_index,iTx,i,iDt,j character(80) :: job_name - logical :: trial_lcl + character(len=80) :: label_lcl + + if (present(label)) then + label_lcl = label + else + label_lcl = 'NULL' + end if ! nTX is number of transmitters; nTx = d1%nTx @@ -657,12 +663,6 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) modem_ctx % comm_current = comm_current - if (present(trial)) then - trial_lcl = trial - else - trial_lcl = .false. - end if - ! First, distribute the current model to all workers call Master_job_Distribute_Model(sigma) @@ -672,10 +672,12 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,trial) end if job_name= 'FORWARD' - call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll,comm_current, trial=trial_lcl) + write(0,*) "FORWARD LABEL: ", trim(label_lcl) + call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll,comm_current, label=label_lcl) ! Calculate Data Response - call Master_job_DataResp(nTx, sigma, eAll, d1, trial=trial_lcl) + write(0,*) "DATARESP LABEL: ", trim(label_lcl) + call Master_job_DataResp(nTx, sigma, eAll, d1, label=label_lcl) write(ioMPI,*)'FWD: Finished calculating for (', nTx , ') Transmitters ' endtime=MPI_Wtime() @@ -693,7 +695,7 @@ end subroutine Master_job_fwdPred ! * Master_job_DataResp_main_only - Runs if EsMgr_save_in_file == .false. ! * Master_job_DataResp_IO - Runs if EsMgr_save_in_file == .true. ! -subroutine Master_job_DataResp(nTx, sigma, eAll, d1, trial) +subroutine Master_job_DataResp(nTx, sigma, eAll, d1, label) implicit none @@ -701,25 +703,26 @@ subroutine Master_job_DataResp(nTx, sigma, eAll, d1, trial) type(modelParam_t), intent(in) :: sigma type(solnVectorMTX_t), intent(in) :: eAll type(dataVectorMTX_t), intent(inout) :: d1 - logical, optional, intent(in) :: trial + character(len=*), intent(in), optional :: label integer :: iTx, i, j integer :: iDt - logical :: trial_lcl - call ModEM_timers_create('DataResp') - call ModEM_timers_start('DataResp') + character(len=80) :: label_lcl - if (present(trial)) then - trial_lcl = trial + if (present(label)) then + label_lcl = label else - trial_lcl = .false. + label_lcl = "NULL" end if + call ModEM_timers_create('DataResp') + call ModEM_timers_start('DataResp') + if (EsMgr_save_in_file) then ! Use the worker jobs to calculate the data resposne by reading ! the electric field files - call Master_job_DataResp_IO(nTx, sigma, d1, trial_lcl) + call Master_job_DataResp_IO(nTx, sigma, d1, label=trim(label_lcl)) else ! The main task only computes the data response call Master_job_DataResp_main_only(nTx, sigma, d1, eAll) @@ -778,27 +781,27 @@ end subroutine Master_job_DataResp_main_only ! ! This function should only be used if EsMgr_init was initalized with ! `save_in_file = .true.`/EsMgr_save_in_file = .true.. -subroutine Master_job_DataResp_IO(nTx, sigma, d, trial) +subroutine Master_job_DataResp_IO(nTx, sigma, d, label) implicit none integer, intent(in) :: nTx type (modelParam_t), intent(in) :: sigma type (dataVectorMTX_t), intent(inout) :: d - logical, intent(in), optional :: trial + character(len=*), intent(in), optional :: label character (len=*), parameter :: JOB_NAME = "DATARESP" integer :: dest, nTasks, remainder, iTx integer :: iTx_min, iTx_max, i, j, k - logical :: trial_lcl integer :: comm_current integer :: size_current + character(len=80) :: label_lcl - if (present(trial)) then - trial_lcl = trial + if (present(label)) then + label_lcl = label else - trial_lcl = .false. + label_lcl = trim('NULL') endif if (para_method.eq.0) then @@ -829,7 +832,7 @@ subroutine Master_job_DataResp_IO(nTx, sigma, d, trial) worker_job_task % what_to_do = trim(job_name) worker_job_task % per_index = iTx_min worker_job_task % pol_index = iTx_max - worker_job_task % trial = trial_lcl + worker_job_task % label = trim(label_lcl) call Pack_worker_job_task call MPI_Send(worker_job_package, Nbytes, MPI_PACKED, dest, FROM_MASTER, comm_current, ierr) @@ -1108,7 +1111,7 @@ end subroutine Master_job_calcJ !######################### Master_job_JmultT ############################ -Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) +Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,label) implicit none type(modelParam_t), intent(in) :: sigma @@ -1117,7 +1120,7 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) type(solnVectorMTX_t), intent(in), optional :: eAll type(modelParam_t),intent(inout),pointer,dimension(:), optional :: s_hat integer, intent(in), optional :: comm - logical, intent(in), optional :: use_starting_guess + character(len=*), intent(in), optional :: label ! Local type(modelParam_t) :: dsigma_temp @@ -1131,7 +1134,14 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) Integer :: per_index,pol_index,stn_index character(80) :: job_name,file_name Integer :: comm_current - logical :: use_starting_guess_lcl + character(80) :: label_lcl + + + if (present(label)) then + label_lcl = label + else + label_lcl = 'NULL' + end if savedSolns = present(eAll) returne_m_vectors= present(s_hat) @@ -1152,12 +1162,6 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) modem_ctx % comm_current = comm_current - if (present(use_starting_guess)) then - use_starting_guess_lcl = use_starting_guess - else - use_starting_guess_lcl = .false. - end if - ! nTX is number of transmitters; nTx = d%nTx if(.not. eAll_temp%allocated) then @@ -1200,7 +1204,7 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) call zero(Qcomb) job_name= 'JmultT' call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & - & comm_current, eAll_temp, trial=use_starting_guess_lcl) + & comm_current, eAll_temp, label=label_lcl) file_name='e0.soln' !call write_solnVectorMTX(10,file_name,eAll_temp) @@ -1208,7 +1212,7 @@ Subroutine Master_job_JmultT(sigma,d,dsigma,eAll,s_hat,comm,use_starting_guess) !call write_solnVectorMTX(20,file_name,eAll_out) ! Calculate PQMult - call Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, use_starting_guess=use_starting_guess_lcl) + call Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, label=label_lcl) endtime=MPI_Wtime() time_used = endtime-starttime @@ -1236,7 +1240,7 @@ end Subroutine Master_job_JmultT ! * Master_job_PQMult_main_only - Runs if EsMgr_save_in_file == .false. ! * Master_job_PQMult_IO - Runs if EsMgr_save_in_file == .true. ! -subroutine Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, use_starting_guess) +subroutine Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, label) implicit none @@ -1247,21 +1251,20 @@ subroutine Master_job_PQMult(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat, type (modelParam_t), intent(in) :: sigma type (modelParam_t), intent(inout) :: dsigma type(modelParam_t),intent(inout),pointer,dimension(:), optional :: s_hat - logical, optional, intent(in) :: use_starting_guess + character(len=*), intent(in), optional :: label + character(len=80) :: label_lcl - logical :: use_starting_guess_lcl + if (present(label)) then + label_lcl = label + else + label_lcl = 'NULL' + end if call ModEM_timers_create('PQMult') call ModEM_timers_start('PQMult') - if (present(use_starting_guess)) then - use_starting_guess_lcl = use_starting_guess - else - use_starting_guess_lcl = .false. - end if - if (EsMgr_save_in_file) then - call Master_job_PQMult_IO(nTx, sigma, dsigma, use_starting_guess=use_starting_guess_lcl) + call Master_job_PQMult_IO(nTx, sigma, dsigma, label=label_lcl) else call Master_job_PQMult_main_only(nTx, d, eAll_temp, eAll_out, sigma, dsigma, s_hat) end if @@ -1327,17 +1330,18 @@ end subroutine Master_job_PQMult_main_only ! ! This function should only be used if EsMgr_init was initalized with ! `save_in_file = .true.`/EsMgr_save_in_file = .true.. -subroutine Master_job_PQMult_IO(nTx, sigma, dsigma, use_starting_guess) +subroutine Master_job_PQMult_IO(nTx, sigma, dsigma, label) implicit none integer, intent(in) :: nTx type (modelParam_t), intent(in) :: sigma type (modelParam_t), intent(inout) :: dsigma - logical, intent(in), optional :: use_starting_guess + character(len=*), intent(in), optional :: label character(len=*), parameter :: job_name = "PQMULT" type(modelParam_t), dimension(nTx) :: dsigma_recv + character(len=80) :: label_lcl integer :: dest, nTasks, remainder, iTx integer :: iTx_min, iTx_max, i, j, k @@ -1347,12 +1351,11 @@ subroutine Master_job_PQMult_IO(nTx, sigma, dsigma, use_starting_guess) logical, dimension(number_of_workers) :: task_is_working logical, dimension(nTx) :: transmitters_processing, transmitters_done logical :: sending - logical :: use_starting_guess_lcl - if (present(use_starting_guess)) then - use_starting_guess_lcl = use_starting_guess + if (present(label)) then + label_lcl = label else - use_starting_guess_lcl = .false. + label_lcl = 'NULL' end if if (para_method.eq.0) then @@ -1389,7 +1392,7 @@ subroutine Master_job_PQMult_IO(nTx, sigma, dsigma, use_starting_guess) worker_job_task % what_to_do=trim(job_name) worker_job_task % per_index = iTx worker_job_task % pol_index = -1 - worker_job_task % trial = use_starting_guess_lcl + worker_job_task % label = trim(label_lcl) call create_worker_job_task_place_holder call Pack_worker_job_task @@ -1915,7 +1918,7 @@ end Subroutine Master_job_Stop_MESSAGE !********************** Master Distribute Tasks ***************************** subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & - & comm, eAll_in, trial) + & comm, eAll_in, label) implicit none character(80) , intent(in) :: job_name @@ -1924,7 +1927,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & type(solnVectorMTX_t), intent(in), optional :: eAll_in integer, intent(in), optional :: comm type(solnVectorMTX_t), intent(inout), optional :: eAll_out - logical, intent(in), optional :: trial + character(len=*), intent(in), optional :: label !Local Integer :: iper,ipol,ipol1,ijob,total_jobs Integer :: bcounter, tcounter, robin @@ -1932,6 +1935,7 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & logical :: keep_soln,savedSolns,ascend,trial_lcl integer :: comm_current, size_current double precision :: now, just, time_passed + character(len=80) :: label_lcl savedSolns = present(eAll_in) ! over-ride the default communicator, if needed @@ -1945,10 +1949,10 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & comm_current = comm_world end if - if (present(trial)) then - trial_lcl = trial + if (present(label)) then + label_lcl = label else - trial_lcl = .false. + label_lcl = 'NULL' end if @@ -1967,7 +1971,6 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & who = 0 worker_job_task%what_to_do=trim(job_name) - worker_job_task%trial=trial_lcl call count_number_of_messages_to_RECV(eAll_out) total_jobs = answers_to_receive ! counter to locate the task @@ -2000,7 +2003,8 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & end if worker_job_task%per_index = per_index worker_job_task%pol_index = pol_index - worker_job_task%trial = trial_lcl + worker_job_task%label=trim(label_lcl) + call create_worker_job_task_place_holder call Pack_worker_job_task call MPI_SEND(worker_job_package,Nbytes, MPI_PACKED, who, & @@ -2087,8 +2091,8 @@ subroutine Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll_out, & worker_job_task%per_index= per_index worker_job_task%pol_index= pol_index - worker_job_task%what_to_do= trim(job_name) - worker_job_task%trial=trial_lcl + worker_job_task%what_to_do= trim(job_name) + worker_job_task%label = trim(label_lcl) call create_worker_job_task_place_holder call Pack_worker_job_task @@ -2232,7 +2236,6 @@ Subroutine Worker_job(sigma,d) ! PQMult type (modelParam_t) :: dsigma_temp, dsigma_send, Qcomb - logical :: trial ! time DOUBLE PRECISION :: time_passed, now, just @@ -2294,7 +2297,6 @@ Subroutine Worker_job(sigma,d) modem_ctx % comm_current = comm_current modem_ctx % rank_current = rank_current - trial = worker_job_task % trial ! for debug ! write(6,*) 'source = ', MPI_SOURCE @@ -2362,11 +2364,10 @@ Subroutine Worker_job(sigma,d) & FROM_WORKER, comm_current, ierr) ! Use EsMgr_save to send e0 back to main task or save it to disk which_pol=1 - if (trial) then - call EsMgr_save(e0, to=0, prefix='.trial') - else - call EsMgr_save(e0, to=0) - end if + + write(0,*) "Worker label: ", trim(worker_job_task % label) + + call EsMgr_save(e0, to=0, E_field_type='fwd', label=worker_job_task % label) end if ! so long! call reset_e_soln(e0) @@ -2391,14 +2392,7 @@ Subroutine Worker_job(sigma,d) e % tx = per_index do pol_index = 1, get_nPol(per_index) - - if (worker_job_task % trial) then - write(0,*) "Reading the trial" - call EsMgr_get(e0, e0 % tx, pol_index=pol_index, prefix='.trial') - else - write(0,*) "Not reading the trial" - call EsMgr_get(e0, e0 % tx, pol_index=pol_index) - endif + call EsMgr_get(e0, e0 % tx, pol_index=pol_index, E_field_type='fwd', label=worker_job_task % label) end do do i = 1, d % d(per_index) % nDt @@ -2423,7 +2417,7 @@ Subroutine Worker_job(sigma,d) per_index = worker_job_task % per_index worker_job_task % taskid = taskid - trial=worker_job_task%trial + call create_solnVector(grid, 1, e0) call create_solnVector(grid, 1, e) @@ -2443,13 +2437,8 @@ Subroutine Worker_job(sigma,d) e % tx = per_index do pol_index = 1, get_nPol(per_index) - if (trial) then - call EsMgr_get(e0, e0 % tx, pol_index=pol_index, prefix='.trial') - call EsMgr_get(e, e % tx, pol_index=pol_index, prefix='.JmultT') - else - call EsMgr_get(e0, e0 % tx, pol_index=pol_index) - call EsMgr_get(e, e % tx, pol_index=pol_index, prefix='.JmultT') - end if + call EsMgr_get(e0, e0 % tx, pol_index=pol_index, E_field_type='fwd', label=trim(worker_job_task % label)) + call EsMgr_get(e, e % tx, pol_index=pol_index, E_field_type='JmultT', label=trim(worker_job_task % label)) end do call PmultT(e0, sigma, e, dsigma_temp) @@ -2646,6 +2635,8 @@ Subroutine Worker_job(sigma,d) ! calculate JmultT (a.k.a. adjoint) per_index=worker_job_task%per_index pol_index=worker_job_task%pol_index + + write(0,*) "LABEL FOR JMULTT", trim(Worker_job_task % label) if ((size_local.gt.1).and.(para_method.gt.0).and. & & (rank_local.eq.0)) then ! group leader passing the command to workers @@ -2665,11 +2656,9 @@ Subroutine Worker_job(sigma,d) & ' Start Receiving ' , orginal_nPol, ' from Master' do ipol=1,nPol_MPI which_pol=ipol - if (worker_job_task % trial) then - call EsMgr_get(e0, per_index, pol_index=ipol, from=0, prefix='.trial') - else - call EsMgr_get(e0, per_index, pol_index=ipol, from=0) - end if + call EsMgr_get(e0, per_index, pol_index=ipol, from=0, & + E_field_type='fwd', & + label=worker_job_task % label) end do call initSolverWithOutE0(per_index,sigma,grid,size_local,& e,comb) @@ -2713,7 +2702,7 @@ Subroutine Worker_job(sigma,d) call MPI_SEND(worker_job_package,Nbytes, MPI_PACKED,0, & & FROM_WORKER, comm_current, ierr) which_pol=1 - call EsMgr_save(e, to=0, prefix=".JmultT") + call EsMgr_save(e, to=0, E_field_type="JmultT", label=worker_job_task % label) end if ! hasta la vista! now = MPI_Wtime() From 91df140ee7667d9aaf24f982132e7aacd2b5ab0d Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Tue, 31 Mar 2026 09:50:25 -0600 Subject: [PATCH 21/21] Remove debug print statements --- f90/3D_MT/ESolnManager.f90 | 2 -- f90/MPI/Main_MPI.f90 | 5 ----- 2 files changed, 7 deletions(-) diff --git a/f90/3D_MT/ESolnManager.f90 b/f90/3D_MT/ESolnManager.f90 index 494b60da..9e38f962 100644 --- a/f90/3D_MT/ESolnManager.f90 +++ b/f90/3D_MT/ESolnManager.f90 @@ -303,8 +303,6 @@ subroutine create_prefix(prefix, E_field_type, label) use_label = trim(label) /= "NULL" .and. len_trim(label) /= 0 end if - write(0,*) 'LABEL: ', use_label, trim(label) - if (present(E_field_type) .and. use_label) then PREFIX_FNAME_FORMAT = '(A, A, A, A, A)' write(prefix, PREFIX_FNAME_FORMAT) trim(EsMgr_prefix), ".", trim(E_field_type), ".", trim(label) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index 4bcb7b01..100a1376 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -672,11 +672,9 @@ Subroutine Master_job_fwdPred(sigma,d1,eAll,comm,label) end if job_name= 'FORWARD' - write(0,*) "FORWARD LABEL: ", trim(label_lcl) call Master_job_Distribute_Taskes(job_name,nTx,sigma,eAll,comm_current, label=label_lcl) ! Calculate Data Response - write(0,*) "DATARESP LABEL: ", trim(label_lcl) call Master_job_DataResp(nTx, sigma, eAll, d1, label=label_lcl) write(ioMPI,*)'FWD: Finished calculating for (', nTx , ') Transmitters ' @@ -2365,8 +2363,6 @@ Subroutine Worker_job(sigma,d) ! Use EsMgr_save to send e0 back to main task or save it to disk which_pol=1 - write(0,*) "Worker label: ", trim(worker_job_task % label) - call EsMgr_save(e0, to=0, E_field_type='fwd', label=worker_job_task % label) end if ! so long! @@ -2636,7 +2632,6 @@ Subroutine Worker_job(sigma,d) per_index=worker_job_task%per_index pol_index=worker_job_task%pol_index - write(0,*) "LABEL FOR JMULTT", trim(Worker_job_task % label) if ((size_local.gt.1).and.(para_method.gt.0).and. & & (rank_local.eq.0)) then ! group leader passing the command to workers