Internal implementation of node_worker with typed context
| Type | Intent | Optional | Attributes | Name | ||
|---|---|---|---|---|---|---|
| class(many_body_expansion_t), | intent(in) | :: | ctx |
| Type | Visibility | Attributes | Name | Initial | |||
|---|---|---|---|---|---|---|---|
| integer(kind=int32), | private | :: | dummy_msg | ||||
| type(error_t), | private | :: | error | ||||
| integer(kind=int64), | private | :: | fragment_idx | ||||
| integer(kind=int32), | private, | allocatable | :: | fragment_indices(:) | |||
| integer(kind=int32), | private | :: | fragment_size | ||||
| integer(kind=int32), | private | :: | fragment_type |
0 = monomer (indices), 1 = intersection (atom list) |
|||
| type(physical_fragment_t), | private | :: | phys_frag | ||||
| type(request_t), | private | :: | req | ||||
| type(calculation_result_t), | private | :: | result | ||||
| type(MPI_Status), | private | :: | status |
subroutine node_worker_impl(ctx) !! Internal implementation of node_worker with typed context use mqc_error, only: error_t use mqc_many_body_expansion, only: many_body_expansion_t class(many_body_expansion_t), intent(in) :: ctx integer(int64) :: fragment_idx integer(int32) :: fragment_size, dummy_msg integer(int32) :: fragment_type !! 0 = monomer (indices), 1 = intersection (atom list) integer(int32), allocatable :: fragment_indices(:) type(calculation_result_t) :: result type(MPI_Status) :: status type(physical_fragment_t) :: phys_frag type(error_t) :: error ! MPI request handles for non-blocking operations type(request_t) :: req dummy_msg = 0 do call isend(ctx%resources%mpi_comms%node_comm, dummy_msg, 0, TAG_WORKER_REQUEST, req) call wait(req) call irecv(ctx%resources%mpi_comms%node_comm, fragment_idx, 0, MPI_ANY_TAG, req) call wait(req, status) select case (status%MPI_TAG) case (TAG_WORKER_FRAGMENT) ! Receive fragment type (0 = monomer indices, 1 = intersection atom list) call irecv(ctx%resources%mpi_comms%node_comm, fragment_type, 0, TAG_WORKER_FRAGMENT, req) call wait(req) call irecv(ctx%resources%mpi_comms%node_comm, fragment_size, 0, TAG_WORKER_FRAGMENT, req) call wait(req) ! Note: must use blocking recv for allocatable arrays since size is unknown allocate (fragment_indices(fragment_size)) call recv(ctx%resources%mpi_comms%node_comm, fragment_indices, 0, TAG_WORKER_FRAGMENT, status) ! Build physical fragment based on type if (ctx%has_geometry()) then if (fragment_type == 0) then ! Monomer: fragment_indices are monomer indices call build_fragment_from_indices(ctx%sys_geom, fragment_indices, phys_frag, error, ctx%sys_geom%bonds) else ! Intersection: fragment_indices are atom indices call build_fragment_from_atom_list(ctx%sys_geom, fragment_indices, fragment_size, & phys_frag, error, ctx%sys_geom%bonds) end if if (error%has_error()) then call logger%error(error%get_full_trace()) call abort_comm(ctx%resources%mpi_comms%world_comm, 1) end if ! Process the chemistry fragment with physical geometry call do_fragment_work(fragment_idx, result, ctx%method_config, phys_frag, ctx%calc_type, & ctx%resources%mpi_comms%world_comm) call phys_frag%destroy() else ! Process without physical geometry (old behavior) call do_fragment_work(fragment_idx, result, ctx%method_config, & calc_type=ctx%calc_type, world_comm=ctx%resources%mpi_comms%world_comm) end if ! Send result back to coordinator call result_isend(result, ctx%resources%mpi_comms%node_comm, 0, TAG_WORKER_SCALAR_RESULT, req) call wait(req) ! Clean up result call result%destroy() deallocate (fragment_indices) case (TAG_WORKER_FINISH) exit case default ! Unexpected MPI tag - this should not happen in normal operation call logger%error("Worker received unexpected MPI tag: "//to_char(status%MPI_TAG)) call logger%error("Expected TAG_WORKER_FRAGMENT or TAG_WORKER_FINISH") call abort_comm(ctx%resources%mpi_comms%world_comm, 1) end select end do end subroutine node_worker_impl