result_irecv Subroutine

public subroutine result_irecv(result, comm, source, tag, req)

Receive calculation result over MPI (non-blocking) Receives SCF energy (non-blocking) and other components (blocking)

Arguments

Type IntentOptional Attributes Name
type(calculation_result_t), intent(inout) :: result
type(comm_t), intent(in) :: comm
integer, intent(in) :: source
integer, intent(in) :: tag
type(request_t), intent(out) :: req

Calls

proc~~result_irecv~~CallsGraph proc~result_irecv result_irecv irecv irecv proc~result_irecv->irecv recv recv proc~result_irecv->recv

Called by

proc~~result_irecv~~CalledByGraph proc~result_irecv result_irecv proc~global_coordinator global_coordinator proc~global_coordinator->proc~result_irecv proc~gmbe_pie_coordinator gmbe_pie_coordinator proc~gmbe_pie_coordinator->proc~result_irecv proc~node_coordinator node_coordinator proc~node_coordinator->proc~result_irecv interface~global_coordinator global_coordinator interface~global_coordinator->proc~global_coordinator interface~node_coordinator node_coordinator interface~node_coordinator->proc~node_coordinator proc~gmbe_run_distributed gmbe_context_t%gmbe_run_distributed proc~gmbe_run_distributed->proc~gmbe_pie_coordinator proc~gmbe_run_distributed->interface~node_coordinator proc~mbe_run_distributed mbe_context_t%mbe_run_distributed proc~mbe_run_distributed->interface~global_coordinator proc~mbe_run_distributed->interface~node_coordinator

Variables

Type Visibility Attributes Name Initial
type(MPI_Status), private :: status

Source Code

   subroutine result_irecv(result, comm, source, tag, req)
      !! Receive calculation result over MPI (non-blocking)
      !! Receives SCF energy (non-blocking) and other components (blocking)
      type(calculation_result_t), intent(inout) :: result
      type(comm_t), intent(in) :: comm
      integer, intent(in) :: source, tag
      type(request_t), intent(out) :: req
      type(MPI_Status) :: status

      ! Receive SCF energy (non-blocking)
      call irecv(comm, result%energy%scf, source, tag, req)

      ! Receive other energy components (blocking to avoid needing multiple request handles)
      call recv(comm, result%energy%mp2%ss, source, tag, status)
      call recv(comm, result%energy%mp2%os, source, tag, status)
      call recv(comm, result%energy%cc%singles, source, tag, status)
      call recv(comm, result%energy%cc%doubles, source, tag, status)
      call recv(comm, result%energy%cc%triples, source, tag, status)
      result%has_energy = .true.

      ! Receive fragment metadata
      call recv(comm, result%distance, source, tag, status)

      ! Receive gradient flag and data (blocking to avoid needing multiple request handles)
      call recv(comm, result%has_gradient, source, tag, status)
      if (result%has_gradient) then
         ! Receive allocatable gradient array (MPI lib handles allocation)
         call recv(comm, result%gradient, source, tag, status)
      end if

      ! Receive Hessian flag and data (blocking to avoid needing multiple request handles)
      call recv(comm, result%has_hessian, source, tag, status)
      if (result%has_hessian) then
         ! Receive allocatable Hessian array (MPI lib handles allocation)
         call recv(comm, result%hessian, source, tag, status)
      end if

      ! Receive dipole flag and data (blocking to avoid needing multiple request handles)
      call recv(comm, result%has_dipole, source, tag, status)
      if (result%has_dipole) then
         ! Receive allocatable dipole array (MPI lib handles allocation)
         call recv(comm, result%dipole, source, tag, status)
      end if

      ! Receive dipole derivatives flag and data (blocking to avoid needing multiple request handles)
      call recv(comm, result%has_dipole_derivatives, source, tag, status)
      if (result%has_dipole_derivatives) then
         ! Receive allocatable dipole derivatives array (MPI lib handles allocation)
         call recv(comm, result%dipole_derivatives, source, tag, status)
      end if
   end subroutine result_irecv