From 812dcd3462852c1ffbdaf1e6c17343e9c80b02fb Mon Sep 17 00:00:00 2001 From: Miles Curry <2590700+MiCurry@users.noreply.github.com> Date: Wed, 11 Mar 2026 16:10:06 -0600 Subject: [PATCH] Add MPI routine to tell workers when a new iteration starts This commit adds a new routine to Main_MPI that allows the main task to communicate to workers that a new inversion iteration is starting. It currently is not called anywhere in ModEM. The workers keep track themselves of the iteration number. This routine is intended to provide a space where the main task and all workers can coordinate specific tasks. Specifically, this will allow us the ability to call ModEM_memory_get_all to record all total memory usage across all tasks. --- f90/MPI/Main_MPI.f90 | 42 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/f90/MPI/Main_MPI.f90 b/f90/MPI/Main_MPI.f90 index e9adf8d6..625d3688 100644 --- a/f90/MPI/Main_MPI.f90 +++ b/f90/MPI/Main_MPI.f90 @@ -1455,6 +1455,40 @@ Subroutine Master_job_Distribute_userdef_control(ctrl, comm) end Subroutine Master_job_Distribute_userdef_control + +subroutine Master_job_send_inv_iteration(iteration_num, comm) + + implicit none + + integer, intent(in) :: iteration_num + integer, intent(in), optional :: comm + integer :: task, size_current, comm_current + + if (present(comm)) then + if (comm .eq. MPI_COMM_NULL) then + comm_current = comm_world + else + comm_current = comm + endif + else + comm_current = comm_world + end if + call MPI_COMM_SIZE( comm_current, size_current, ierr ) + + worker_job_task % what_to_do = 'ITER_NUM' + worker_job_task % per_index = -1 + worker_job_task % pol_index = -1 + call create_worker_job_task_place_holder + call Pack_worker_job_task + + do task = 1, size_current - 1 + call MPI_SEND(worker_job_package, Nbytes, MPI_PACKED, task, FROM_MASTER, comm_current, ierr) + end do + + write(0,*) "New Iteration: ", iteration_num + +end subroutine Master_job_send_inv_iteration + !######################### Master_job_Clean Memory ########################## Subroutine Master_job_Clean_Memory(comm) @@ -1827,7 +1861,7 @@ Subroutine Worker_job(sigma,d) Integer :: iper,ipol,i,des_index Integer :: per_index,per_index_pre - Integer :: pol_index, stn_index + Integer :: pol_index, stn_index, iteration_number Integer :: eAll_vec_size Integer :: comm_current, rank_current Integer :: cpu_only_ranks @@ -1854,6 +1888,7 @@ Subroutine Worker_job(sigma,d) nTx=d%nTx recv_loop=0 + iteration_number = 0 previous_message='' write(node_info,'(a5,i3.3,a4)') 'node[',taskid,']: ' @@ -2565,6 +2600,11 @@ Subroutine Worker_job(sigma,d) if (associated(group_sizes)) then deallocate(group_sizes) endif + elseif (trim(worker_job_task%what_to_do) .eq. 'ITER_NUM' ) then + + iteration_number = iteration_number + 1 + write(0,*) "New Iteration: ", iteration_number + elseif (trim(worker_job_task%what_to_do) .eq. 'STOP' ) then ! clear all the temp packages and stop if (associated(sigma_para_vec)) then