handle_node_results_to_batch Subroutine

public subroutine handle_node_results_to_batch(world_comm, batch_count, batch_ids, batch_results, results_received)

Drain pending node-level results and append them to the outbound batch.

Arguments

Type IntentOptional Attributes Name
type(comm_t), intent(in) :: world_comm
integer(kind=int32), intent(inout) :: batch_count
integer(kind=int64), intent(inout) :: batch_ids(:)
type(calculation_result_t), intent(inout) :: batch_results(:)
integer(kind=int64), intent(inout), optional :: results_received

Calls

proc~~handle_node_results_to_batch~~CallsGraph proc~handle_node_results_to_batch handle_node_results_to_batch abort_comm abort_comm proc~handle_node_results_to_batch->abort_comm error error proc~handle_node_results_to_batch->error iprobe iprobe proc~handle_node_results_to_batch->iprobe irecv irecv proc~handle_node_results_to_batch->irecv proc~append_result_to_batch append_result_to_batch proc~handle_node_results_to_batch->proc~append_result_to_batch proc~error_get_message error_t%error_get_message proc~handle_node_results_to_batch->proc~error_get_message proc~flush_group_results flush_group_results proc~handle_node_results_to_batch->proc~flush_group_results proc~result_destroy calculation_result_t%result_destroy proc~handle_node_results_to_batch->proc~result_destroy proc~result_irecv result_irecv proc~handle_node_results_to_batch->proc~result_irecv to_char to_char proc~handle_node_results_to_batch->to_char proc~flush_group_results->proc~result_destroy isend isend proc~flush_group_results->isend proc~result_isend result_isend proc~flush_group_results->proc~result_isend proc~result_reset calculation_result_t%result_reset proc~result_destroy->proc~result_reset proc~result_irecv->irecv recv recv proc~result_irecv->recv proc~result_isend->isend send send proc~result_isend->send proc~energy_reset energy_t%energy_reset proc~result_reset->proc~energy_reset proc~error_clear error_t%error_clear proc~result_reset->proc~error_clear proc~mp2_reset mp2_energy_t%mp2_reset proc~energy_reset->proc~mp2_reset

Called by

proc~~handle_node_results_to_batch~~CalledByGraph proc~handle_node_results_to_batch handle_node_results_to_batch proc~gmbe_group_global_coordinator gmbe_group_global_coordinator proc~gmbe_group_global_coordinator->proc~handle_node_results_to_batch proc~group_global_coordinator_impl group_global_coordinator_impl proc~group_global_coordinator_impl->proc~handle_node_results_to_batch proc~gmbe_run_distributed gmbe_context_t%gmbe_run_distributed proc~gmbe_run_distributed->proc~gmbe_group_global_coordinator interface~node_coordinator node_coordinator proc~gmbe_run_distributed->interface~node_coordinator proc~node_coordinator_impl node_coordinator_impl proc~node_coordinator_impl->proc~group_global_coordinator_impl proc~node_coordinator node_coordinator proc~node_coordinator->proc~node_coordinator_impl interface~node_coordinator->proc~node_coordinator proc~mbe_run_distributed mbe_context_t%mbe_run_distributed proc~mbe_run_distributed->interface~node_coordinator

Variables

Type Visibility Attributes Name Initial
logical, private :: has_pending
integer(kind=int64), private :: item_idx
type(calculation_result_t), private :: node_result
type(request_t), private :: req
type(MPI_Status), private :: status

Source Code

   subroutine handle_node_results_to_batch(world_comm, batch_count, batch_ids, batch_results, results_received)
      !! Drain pending node-level results and append them to the outbound batch.
      type(comm_t), intent(in) :: world_comm
      integer(int32), intent(inout) :: batch_count
      integer(int64), intent(inout) :: batch_ids(:)
      type(calculation_result_t), intent(inout) :: batch_results(:)
      integer(int64), intent(inout), optional :: results_received

      integer(int64) :: item_idx
      type(MPI_Status) :: status
      logical :: has_pending
      type(request_t) :: req
      type(calculation_result_t) :: node_result

      do
         call iprobe(world_comm, MPI_ANY_SOURCE, TAG_NODE_SCALAR_RESULT, has_pending, status)
         if (.not. has_pending) exit

         call irecv(world_comm, item_idx, status%MPI_SOURCE, TAG_NODE_SCALAR_RESULT, req)
         call wait(req)
         call result_irecv(node_result, world_comm, status%MPI_SOURCE, TAG_NODE_SCALAR_RESULT, req)
         call wait(req)

         if (node_result%has_error) then
            call logger%error("Item "//to_char(item_idx)//" calculation failed: "// &
                              node_result%error%get_message())
            call abort_comm(world_comm, 1)
         end if

         if (batch_count >= size(batch_ids)) then
            call flush_group_results(world_comm, batch_count, batch_ids, batch_results)
         end if

         call append_result_to_batch(item_idx, node_result, batch_count, batch_ids, batch_results)
         if (present(results_received)) results_received = results_received + 1_int64
         if (batch_count >= size(batch_ids)) then
            call flush_group_results(world_comm, batch_count, batch_ids, batch_results)
         end if
         call node_result%destroy()
      end do
   end subroutine handle_node_results_to_batch