Compute Hessian for unfragmented system using MPI distribution
Uses a dynamic work queue approach: workers request displacement indices from rank 0, compute gradients, and send results back. This provides better load balancing than static work distribution.
| Type | Intent | Optional | Attributes | Name | ||
|---|---|---|---|---|---|---|
| type(comm_t), | intent(in) | :: | world_comm | |||
| type(system_geometry_t), | intent(in) | :: | sys_geom | |||
| type(driver_config_t), | intent(in) | :: | config |
Driver configuration |
||
| type(json_output_data_t), | intent(out), | optional | :: | json_data |
JSON output data |
| Type | Visibility | Attributes | Name | Initial | |||
|---|---|---|---|---|---|---|---|
| integer, | private | :: | my_rank | ||||
| integer, | private | :: | n_ranks |
module subroutine distributed_unfragmented_hessian(world_comm, sys_geom, config, json_data) !! Compute Hessian for unfragmented system using MPI distribution !! !! Uses a dynamic work queue approach: workers request displacement indices !! from rank 0, compute gradients, and send results back. This provides !! better load balancing than static work distribution. use mqc_json_output_types, only: json_output_data_t type(comm_t), intent(in) :: world_comm type(system_geometry_t), intent(in) :: sys_geom type(driver_config_t), intent(in) :: config !! Driver configuration type(json_output_data_t), intent(out), optional :: json_data !! JSON output data integer :: my_rank, n_ranks my_rank = world_comm%rank() n_ranks = world_comm%size() if (my_rank == 0) then ! Rank 0 is the coordinator call hessian_coordinator(world_comm, sys_geom, config, json_data) else ! Other ranks are workers call hessian_worker(world_comm, sys_geom, config) end if ! Synchronize all ranks before returning call world_comm%barrier() end subroutine distributed_unfragmented_hessian