global_coordinator_impl Subroutine

subroutine global_coordinator_impl(ctx, json_data)

Uses

  • proc~~global_coordinator_impl~~UsesGraph proc~global_coordinator_impl global_coordinator_impl module~mqc_json_output_types mqc_json_output_types proc~global_coordinator_impl->module~mqc_json_output_types module~mqc_many_body_expansion mqc_many_body_expansion proc~global_coordinator_impl->module~mqc_many_body_expansion module~mqc_result_types mqc_result_types proc~global_coordinator_impl->module~mqc_result_types module~mqc_thermochemistry mqc_thermochemistry module~mqc_json_output_types->module~mqc_thermochemistry pic_types pic_types module~mqc_json_output_types->pic_types module~mqc_many_body_expansion->module~mqc_json_output_types module~mqc_config_adapter mqc_config_adapter module~mqc_many_body_expansion->module~mqc_config_adapter module~mqc_method_config mqc_method_config module~mqc_many_body_expansion->module~mqc_method_config module~mqc_physical_fragment mqc_physical_fragment module~mqc_many_body_expansion->module~mqc_physical_fragment module~mqc_resources mqc_resources module~mqc_many_body_expansion->module~mqc_resources module~mqc_many_body_expansion->pic_types module~mqc_error mqc_error module~mqc_result_types->module~mqc_error pic_mpi_lib pic_mpi_lib module~mqc_result_types->pic_mpi_lib module~mqc_result_types->pic_types module~mqc_config_adapter->module~mqc_error module~mqc_config_adapter->module~mqc_method_config module~mqc_config_adapter->module~mqc_physical_fragment module~mqc_config_adapter->pic_types module~mqc_calculation_keywords mqc_calculation_keywords module~mqc_config_adapter->module~mqc_calculation_keywords module~mqc_config_parser mqc_config_parser module~mqc_config_adapter->module~mqc_config_parser module~mqc_elements mqc_elements module~mqc_config_adapter->module~mqc_elements pic_logger pic_logger module~mqc_config_adapter->pic_logger module~mqc_method_config->pic_types module~mqc_method_types mqc_method_types module~mqc_method_config->module~mqc_method_types module~mqc_physical_fragment->module~mqc_error module~mqc_physical_fragment->pic_types module~mqc_cgto mqc_cgto module~mqc_physical_fragment->module~mqc_cgto module~mqc_physical_fragment->module~mqc_elements module~mqc_geometry mqc_geometry module~mqc_physical_fragment->module~mqc_geometry module~mqc_physical_constants mqc_physical_constants module~mqc_physical_fragment->module~mqc_physical_constants module~mqc_xyz_reader mqc_xyz_reader module~mqc_physical_fragment->module~mqc_xyz_reader module~mqc_mpi_comms mqc_mpi_comms module~mqc_resources->module~mqc_mpi_comms module~mqc_thermochemistry->pic_types module~mqc_thermochemistry->module~mqc_elements module~mqc_thermochemistry->module~mqc_physical_constants pic_io pic_io module~mqc_thermochemistry->pic_io pic_lapack_interfaces pic_lapack_interfaces module~mqc_thermochemistry->pic_lapack_interfaces module~mqc_thermochemistry->pic_logger module~mqc_calculation_keywords->pic_types module~mqc_calculation_defaults mqc_calculation_defaults module~mqc_calculation_keywords->module~mqc_calculation_defaults module~mqc_cgto->pic_types module~mqc_config_parser->module~mqc_error module~mqc_config_parser->module~mqc_physical_fragment module~mqc_config_parser->pic_types module~mqc_config_parser->module~mqc_geometry module~mqc_config_parser->module~mqc_method_types module~mqc_calc_types mqc_calc_types module~mqc_config_parser->module~mqc_calc_types module~mqc_config_parser->module~mqc_calculation_defaults module~mqc_elements->pic_types pic_ascii pic_ascii module~mqc_elements->pic_ascii module~mqc_geometry->pic_types module~mqc_method_types->pic_types module~mqc_mpi_comms->pic_mpi_lib module~mqc_physical_constants->pic_types module~mqc_xyz_reader->module~mqc_error module~mqc_xyz_reader->pic_types module~mqc_xyz_reader->module~mqc_geometry module~mqc_calc_types->pic_types module~mqc_calculation_defaults->pic_types

Internal implementation of global_coordinator with typed context

Arguments

Type IntentOptional Attributes Name
type(mbe_context_t), intent(in) :: ctx
type(json_output_data_t), intent(out), optional :: json_data

JSON output data


Calls

proc~~global_coordinator_impl~~CallsGraph proc~global_coordinator_impl global_coordinator_impl abort_comm abort_comm proc~global_coordinator_impl->abort_comm allocate_dipole allocate_dipole proc~global_coordinator_impl->allocate_dipole allocate_gradient allocate_gradient proc~global_coordinator_impl->allocate_gradient allocate_hessian allocate_hessian proc~global_coordinator_impl->allocate_hessian destroy destroy proc~global_coordinator_impl->destroy error error proc~global_coordinator_impl->error get_elapsed_time get_elapsed_time proc~global_coordinator_impl->get_elapsed_time info info proc~global_coordinator_impl->info proc~compute_mbe compute_mbe proc~global_coordinator_impl->proc~compute_mbe proc~handle_group_node_requests handle_group_node_requests proc~global_coordinator_impl->proc~handle_group_node_requests proc~handle_group_results handle_group_results proc~global_coordinator_impl->proc~handle_group_results proc~handle_local_worker_requests_group handle_local_worker_requests_group proc~global_coordinator_impl->proc~handle_local_worker_requests_group proc~handle_local_worker_results handle_local_worker_results proc~global_coordinator_impl->proc~handle_local_worker_results proc~handle_node_results handle_node_results proc~global_coordinator_impl->proc~handle_node_results proc~mbe_base_has_geometry many_body_expansion_t%mbe_base_has_geometry proc~global_coordinator_impl->proc~mbe_base_has_geometry proc~queue_destroy queue_destroy proc~global_coordinator_impl->proc~queue_destroy proc~queue_init_from_list queue_init_from_list proc~global_coordinator_impl->proc~queue_init_from_list proc~queue_is_empty queue_is_empty proc~global_coordinator_impl->proc~queue_is_empty proc~send_group_assignment_matrix send_group_assignment_matrix proc~global_coordinator_impl->proc~send_group_assignment_matrix start start proc~global_coordinator_impl->start temp_ids temp_ids proc~global_coordinator_impl->temp_ids to_char to_char proc~global_coordinator_impl->to_char verbose verbose proc~global_coordinator_impl->verbose proc~compute_mbe->abort_comm proc~compute_mbe->error proc~compute_mbe->info proc~compute_mbe->to_char cart_disp cart_disp proc~compute_mbe->cart_disp configuration configuration proc~compute_mbe->configuration fc_mdyne fc_mdyne proc~compute_mbe->fc_mdyne force_constants force_constants proc~compute_mbe->force_constants frequencies frequencies proc~compute_mbe->frequencies get_message get_message proc~compute_mbe->get_message has_error has_error proc~compute_mbe->has_error proc~build_mbe_lookup_table build_mbe_lookup_table proc~compute_mbe->proc~build_mbe_lookup_table proc~compute_mbe_delta compute_mbe_delta proc~compute_mbe->proc~compute_mbe_delta proc~compute_mbe_dipole compute_mbe_dipole proc~compute_mbe->proc~compute_mbe_dipole proc~compute_mbe_dipole_derivatives compute_mbe_dipole_derivatives proc~compute_mbe->proc~compute_mbe_dipole_derivatives proc~compute_mbe_gradient compute_mbe_gradient proc~compute_mbe->proc~compute_mbe_gradient proc~compute_mbe_hessian compute_mbe_hessian proc~compute_mbe->proc~compute_mbe_hessian proc~compute_thermochemistry compute_thermochemistry proc~compute_mbe->proc~compute_thermochemistry proc~compute_vibrational_analysis compute_vibrational_analysis proc~compute_mbe->proc~compute_vibrational_analysis proc~energy_total energy_t%energy_total proc~compute_mbe->proc~energy_total proc~fragment_lookup_destroy fragment_lookup_t%fragment_lookup_destroy proc~compute_mbe->proc~fragment_lookup_destroy proc~map_fragment_to_system_dipole_derivatives map_fragment_to_system_dipole_derivatives proc~compute_mbe->proc~map_fragment_to_system_dipole_derivatives proc~map_fragment_to_system_gradient map_fragment_to_system_gradient proc~compute_mbe->proc~map_fragment_to_system_gradient proc~map_fragment_to_system_hessian map_fragment_to_system_hessian proc~compute_mbe->proc~map_fragment_to_system_hessian proc~print_detailed_breakdown print_detailed_breakdown proc~compute_mbe->proc~print_detailed_breakdown proc~print_mbe_energy_breakdown print_mbe_energy_breakdown proc~compute_mbe->proc~print_mbe_energy_breakdown proc~print_mbe_gradient_info print_mbe_gradient_info proc~compute_mbe->proc~print_mbe_gradient_info proc~print_vibrational_analysis print_vibrational_analysis proc~compute_mbe->proc~print_vibrational_analysis reduced_masses reduced_masses proc~compute_mbe->reduced_masses warning warning proc~compute_mbe->warning iprobe iprobe proc~handle_group_node_requests->iprobe irecv irecv proc~handle_group_node_requests->irecv isend isend proc~handle_group_node_requests->isend proc~queue_pop queue_pop proc~handle_group_node_requests->proc~queue_pop proc~send_fragment_payload_from_row send_fragment_payload_from_row proc~handle_group_node_requests->proc~send_fragment_payload_from_row proc~handle_group_results->abort_comm proc~handle_group_results->error proc~handle_group_results->get_elapsed_time proc~handle_group_results->info proc~handle_group_results->to_char proc~handle_group_results->iprobe proc~handle_group_results->irecv proc~error_get_message error_t%error_get_message proc~handle_group_results->proc~error_get_message proc~result_irecv result_irecv proc~handle_group_results->proc~result_irecv recv recv proc~handle_group_results->recv proc~handle_local_worker_requests_group->iprobe proc~handle_local_worker_requests_group->irecv proc~handle_local_worker_requests_group->isend proc~handle_local_worker_requests_group->proc~queue_pop proc~handle_local_worker_requests_group->proc~send_fragment_payload_from_row proc~handle_local_worker_results->abort_comm proc~handle_local_worker_results->error proc~handle_local_worker_results->get_elapsed_time proc~handle_local_worker_results->info proc~handle_local_worker_results->to_char proc~handle_local_worker_results->iprobe proc~handle_local_worker_results->proc~error_get_message proc~handle_local_worker_results->proc~result_irecv proc~handle_node_results->abort_comm proc~handle_node_results->error proc~handle_node_results->get_elapsed_time proc~handle_node_results->info proc~handle_node_results->to_char proc~handle_node_results->iprobe proc~handle_node_results->irecv proc~handle_node_results->proc~error_get_message proc~handle_node_results->proc~result_irecv proc~send_group_assignment_matrix->isend proc~build_mbe_lookup_table->get_elapsed_time proc~build_mbe_lookup_table->start proc~build_mbe_lookup_table->to_char debug debug proc~build_mbe_lookup_table->debug proc~error_add_context error_t%error_add_context proc~build_mbe_lookup_table->proc~error_add_context proc~error_has_error error_t%error_has_error proc~build_mbe_lookup_table->proc~error_has_error proc~fragment_lookup_init fragment_lookup_t%fragment_lookup_init proc~build_mbe_lookup_table->proc~fragment_lookup_init proc~fragment_lookup_insert fragment_lookup_t%fragment_lookup_insert proc~build_mbe_lookup_table->proc~fragment_lookup_insert proc~compute_mbe_delta->abort_comm proc~compute_mbe_delta->error proc~fragment_lookup_find fragment_lookup_t%fragment_lookup_find proc~compute_mbe_delta->proc~fragment_lookup_find proc~get_next_combination get_next_combination proc~compute_mbe_delta->proc~get_next_combination proc~compute_mbe_dipole->abort_comm proc~compute_mbe_dipole->error proc~compute_mbe_dipole->proc~fragment_lookup_find proc~compute_mbe_dipole->proc~get_next_combination proc~compute_mbe_dipole_derivatives->proc~map_fragment_to_system_dipole_derivatives proc~compute_mbe_dipole_derivatives->proc~fragment_lookup_find proc~compute_mbe_dipole_derivatives->proc~get_next_combination proc~compute_mbe_gradient->abort_comm proc~compute_mbe_gradient->error proc~compute_mbe_gradient->proc~map_fragment_to_system_gradient proc~compute_mbe_gradient->proc~fragment_lookup_find proc~compute_mbe_gradient->proc~get_next_combination proc~compute_mbe_hessian->proc~map_fragment_to_system_hessian proc~compute_mbe_hessian->proc~fragment_lookup_find proc~compute_mbe_hessian->proc~get_next_combination proc~compute_electronic_entropy compute_electronic_entropy proc~compute_thermochemistry->proc~compute_electronic_entropy proc~compute_moments_of_inertia compute_moments_of_inertia proc~compute_thermochemistry->proc~compute_moments_of_inertia proc~compute_partition_functions compute_partition_functions proc~compute_thermochemistry->proc~compute_partition_functions proc~compute_rotational_constants compute_rotational_constants proc~compute_thermochemistry->proc~compute_rotational_constants proc~compute_rotational_thermo compute_rotational_thermo proc~compute_thermochemistry->proc~compute_rotational_thermo proc~compute_translational_thermo compute_translational_thermo proc~compute_thermochemistry->proc~compute_translational_thermo proc~compute_vibrational_thermo compute_vibrational_thermo proc~compute_thermochemistry->proc~compute_vibrational_thermo proc~compute_zpe compute_zpe proc~compute_thermochemistry->proc~compute_zpe proc~compute_cartesian_displacements compute_cartesian_displacements proc~compute_vibrational_analysis->proc~compute_cartesian_displacements proc~compute_force_constants compute_force_constants proc~compute_vibrational_analysis->proc~compute_force_constants proc~compute_ir_intensities compute_ir_intensities proc~compute_vibrational_analysis->proc~compute_ir_intensities proc~compute_reduced_masses compute_reduced_masses proc~compute_vibrational_analysis->proc~compute_reduced_masses proc~compute_vibrational_frequencies compute_vibrational_frequencies proc~compute_vibrational_analysis->proc~compute_vibrational_frequencies proc~mp2_total mp2_energy_t%mp2_total proc~energy_total->proc~mp2_total proc~build_fragment_from_indices build_fragment_from_indices proc~map_fragment_to_system_dipole_derivatives->proc~build_fragment_from_indices proc~fragment_destroy physical_fragment_t%fragment_destroy proc~map_fragment_to_system_dipole_derivatives->proc~fragment_destroy proc~redistribute_cap_dipole_derivatives redistribute_cap_dipole_derivatives proc~map_fragment_to_system_dipole_derivatives->proc~redistribute_cap_dipole_derivatives proc~map_fragment_to_system_gradient->abort_comm proc~map_fragment_to_system_gradient->error proc~map_fragment_to_system_gradient->configuration proc~map_fragment_to_system_gradient->debug proc~map_fragment_to_system_gradient->proc~build_fragment_from_indices proc~error_get_full_trace error_t%error_get_full_trace proc~map_fragment_to_system_gradient->proc~error_get_full_trace proc~map_fragment_to_system_gradient->proc~error_has_error proc~map_fragment_to_system_gradient->proc~fragment_destroy proc~redistribute_cap_gradients redistribute_cap_gradients proc~map_fragment_to_system_gradient->proc~redistribute_cap_gradients proc~map_fragment_to_system_hessian->proc~build_fragment_from_indices proc~map_fragment_to_system_hessian->proc~fragment_destroy proc~redistribute_cap_hessian redistribute_cap_hessian proc~map_fragment_to_system_hessian->proc~redistribute_cap_hessian proc~print_detailed_breakdown->verbose proc~print_detailed_breakdown->warning header header proc~print_detailed_breakdown->header level_name level_name proc~print_detailed_breakdown->level_name proc~get_frag_level_name get_frag_level_name proc~print_detailed_breakdown->proc~get_frag_level_name proc~print_mbe_energy_breakdown->info proc~print_mbe_gradient_info->info proc~print_mbe_gradient_info->to_char proc~print_vibrational_analysis->info proc~print_vibrational_analysis->proc~compute_thermochemistry proc~print_vibrational_analysis->warning proc~element_number_to_symbol element_number_to_symbol proc~print_vibrational_analysis->proc~element_number_to_symbol proc~print_thermochemistry print_thermochemistry proc~print_vibrational_analysis->proc~print_thermochemistry proc~result_irecv->irecv proc~result_irecv->recv proc~send_fragment_payload_from_row->isend proc~build_fragment_payload_from_row build_fragment_payload_from_row proc~send_fragment_payload_from_row->proc~build_fragment_payload_from_row proc~build_fragment_from_indices->proc~error_add_context proc~build_fragment_from_indices->proc~error_has_error proc~add_hydrogen_caps add_hydrogen_caps proc~build_fragment_from_indices->proc~add_hydrogen_caps proc~calculate_monomer_distance calculate_monomer_distance proc~build_fragment_from_indices->proc~calculate_monomer_distance proc~check_duplicate_atoms check_duplicate_atoms proc~build_fragment_from_indices->proc~check_duplicate_atoms proc~count_hydrogen_caps count_hydrogen_caps proc~build_fragment_from_indices->proc~count_hydrogen_caps proc~fragment_compute_nelec physical_fragment_t%fragment_compute_nelec proc~build_fragment_from_indices->proc~fragment_compute_nelec proc~element_mass element_mass proc~compute_cartesian_displacements->proc~element_mass proc~compute_ir_intensities->proc~element_mass proc~compute_moments_of_inertia->to_char proc~compute_moments_of_inertia->warning pic_syev pic_syev proc~compute_moments_of_inertia->pic_syev proc~compute_moments_of_inertia->proc~element_mass proc~compute_reduced_masses->proc~element_mass proc~compute_vibrational_frequencies->error proc~compute_vibrational_frequencies->warning proc~compute_vibrational_frequencies->pic_syev proc~mass_weight_hessian mass_weight_hessian proc~compute_vibrational_frequencies->proc~mass_weight_hessian proc~project_translation_rotation project_translation_rotation proc~compute_vibrational_frequencies->proc~project_translation_rotation proc~compute_zpe->to_char proc~compute_zpe->warning proc~error_get_full_trace->proc~error_has_error proc~basis_set_destroy molecular_basis_type%basis_set_destroy proc~fragment_destroy->proc~basis_set_destroy fnv_1a_hash fnv_1a_hash proc~fragment_lookup_find->fnv_1a_hash proc~arrays_equal_internal arrays_equal_internal proc~fragment_lookup_find->proc~arrays_equal_internal sort sort proc~fragment_lookup_find->sort proc~next_prime_internal next_prime_internal proc~fragment_lookup_init->proc~next_prime_internal proc~fragment_lookup_insert->fnv_1a_hash proc~error_set error_t%error_set proc~fragment_lookup_insert->proc~error_set proc~fragment_lookup_insert->sort proc~print_thermochemistry->info proc~atomic_basis_destroy atomic_basis_type%atomic_basis_destroy proc~basis_set_destroy->proc~atomic_basis_destroy proc~to_angstrom to_angstrom proc~calculate_monomer_distance->proc~to_angstrom proc~check_duplicate_atoms->error proc~check_duplicate_atoms->to_char proc~check_duplicate_atoms->proc~element_number_to_symbol proc~check_duplicate_atoms->proc~error_set proc~mass_weight_hessian->proc~element_mass proc~project_translation_rotation->proc~element_mass pic_gesvd pic_gesvd proc~project_translation_rotation->pic_gesvd

Called by

proc~~global_coordinator_impl~~CalledByGraph proc~global_coordinator_impl global_coordinator_impl proc~global_coordinator global_coordinator proc~global_coordinator->proc~global_coordinator_impl interface~global_coordinator global_coordinator interface~global_coordinator->proc~global_coordinator proc~mbe_run_distributed mbe_context_t%mbe_run_distributed proc~mbe_run_distributed->interface~global_coordinator

Variables

Type Visibility Attributes Name Initial
integer(kind=int32), private :: calc_type_local
integer(kind=int64), private :: chunk_id
integer(kind=int64), private :: chunk_size
type(timer_type), private :: coord_timer
integer(kind=int64), private :: fragment_idx
integer, private :: group0_done
integer, private :: group0_finished_nodes
integer(kind=int64), private, allocatable :: group0_fragment_ids(:)
integer, private :: group0_node_count
integer, private, allocatable :: group0_polymers(:,:)
type(queue_t), private :: group0_queue
integer(kind=int64), private, allocatable :: group_counts(:)
integer, private :: group_done_count
integer(kind=int64), private, allocatable :: group_fill(:)
integer, private :: group_id
integer, private, allocatable :: group_leader_by_group(:)
integer, private, allocatable :: group_node_counts(:)
type(group_shard_t), private, allocatable :: group_shards(:)
integer, private :: i
integer, private :: local_finished_workers
integer, private :: local_node_done
integer, private :: n_cols
type(request_t), private :: req
type(calculation_result_t), private, allocatable :: results(:)
integer(kind=int64), private :: results_received
integer(kind=int64), private :: worker_fragment_map(ctx%resources%mpi_comms%node_comm%size())

Derived Types

type ::  ../../../group_shard_t

Components

Type Visibility Attributes Name Initial
integer(kind=int64), public, allocatable :: fragment_ids(:)
integer, public, allocatable :: polymers(:,:)

Source Code

   subroutine global_coordinator_impl(ctx, json_data)
      !! Internal implementation of global_coordinator with typed context
      use mqc_json_output_types, only: json_output_data_t
      use mqc_many_body_expansion, only: mbe_context_t
      type(mbe_context_t), intent(in) :: ctx
      type(json_output_data_t), intent(out), optional :: json_data  !! JSON output data

      type :: group_shard_t
         integer(int64), allocatable :: fragment_ids(:)
         integer, allocatable :: polymers(:, :)
      end type group_shard_t

      type(timer_type) :: coord_timer
      integer(int64) :: results_received
      integer :: group_done_count
      integer :: group0_node_count
      integer :: group0_finished_nodes
      integer :: group_id
      integer :: i
      integer :: local_finished_workers
      integer :: group0_done
      integer :: local_node_done
      integer(int32) :: calc_type_local

      ! Storage for results
      type(calculation_result_t), allocatable :: results(:)
      integer(int64) :: worker_fragment_map(ctx%resources%mpi_comms%node_comm%size())
      type(queue_t) :: group0_queue
      integer(int64), allocatable :: group0_fragment_ids(:)
      integer, allocatable :: group0_polymers(:, :)

      integer(int64) :: fragment_idx
      integer(int64) :: chunk_id, chunk_size
      integer(int64), allocatable :: group_counts(:)
      integer(int64), allocatable :: group_fill(:)
      integer, allocatable :: group_leader_by_group(:)
      integer, allocatable :: group_node_counts(:)
      integer :: n_cols
      type(group_shard_t), allocatable :: group_shards(:)

      ! MPI request handles for non-blocking operations
      type(request_t) :: req

      calc_type_local = ctx%calc_type

      results_received = 0_int64
      group_done_count = 0
      group0_finished_nodes = 0
      local_finished_workers = 0
      group0_done = 0
      local_node_done = 0

      ! Allocate storage for results
      allocate (results(ctx%total_fragments))
      worker_fragment_map = 0

      call logger%verbose("Super-global coordinator starting with "//to_char(ctx%total_fragments)// &
                          " fragments for "//to_char(ctx%num_nodes)//" nodes and "// &
                          to_char(ctx%global_groups)//" groups")

      ! Build group leader map and node counts
      allocate (group_leader_by_group(ctx%global_groups))
      group_leader_by_group = -1
      allocate (group_node_counts(ctx%global_groups))
      group_node_counts = 0
      do i = 1, size(ctx%node_leader_ranks)
         group_id = ctx%group_ids(i)
         group_node_counts(group_id) = group_node_counts(group_id) + 1
         if (group_leader_by_group(group_id) == -1) then
            group_leader_by_group(group_id) = ctx%node_leader_ranks(i)
         end if
      end do
      group0_node_count = group_node_counts(1)

      ! Partition fragments into group shards (chunked round-robin)
      allocate (group_counts(ctx%global_groups))
      group_counts = 0_int64
      if (ctx%total_fragments > 0) then
         chunk_size = max(1_int64, ctx%total_fragments/int(ctx%global_groups, int64))
         do fragment_idx = 1_int64, ctx%total_fragments
            chunk_id = (fragment_idx - 1_int64)/chunk_size + 1_int64
            group_id = int(mod(chunk_id - 1_int64, int(ctx%global_groups, int64)) + 1_int64)
            group_counts(group_id) = group_counts(group_id) + 1_int64
         end do
      end if

      allocate (group_shards(ctx%global_groups))
      allocate (group_fill(ctx%global_groups))
      group_fill = 0_int64
      n_cols = size(ctx%polymers, 2)
      do i = 1, ctx%global_groups
         if (group_counts(i) > 0_int64) then
            allocate (group_shards(i)%fragment_ids(group_counts(i)))
            allocate (group_shards(i)%polymers(group_counts(i), n_cols))
         end if
      end do

      if (ctx%total_fragments > 0) then
         do fragment_idx = 1_int64, ctx%total_fragments
            chunk_id = (fragment_idx - 1_int64)/chunk_size + 1_int64
            group_id = int(mod(chunk_id - 1_int64, int(ctx%global_groups, int64)) + 1_int64)
            group_fill(group_id) = group_fill(group_id) + 1_int64
            group_shards(group_id)%fragment_ids(group_fill(group_id)) = fragment_idx
            group_shards(group_id)%polymers(group_fill(group_id), :) = ctx%polymers(fragment_idx, :)
         end do
      end if

      ! Dispatch shards to group globals
      do i = 1, ctx%global_groups
         if (group_leader_by_group(i) == 0) then
            if (allocated(group_shards(i)%fragment_ids)) then
               call move_alloc(group_shards(i)%fragment_ids, group0_fragment_ids)
               call move_alloc(group_shards(i)%polymers, group0_polymers)
            else
               allocate (group0_fragment_ids(0))
               allocate (group0_polymers(0, n_cols))
            end if
         else if (group_leader_by_group(i) > 0) then
            call send_group_assignment_matrix(ctx%resources%mpi_comms%world_comm, group_leader_by_group(i), &
                                              group_shards(i)%fragment_ids, group_shards(i)%polymers)
         end if
         if (allocated(group_shards(i)%fragment_ids)) deallocate (group_shards(i)%fragment_ids)
         if (allocated(group_shards(i)%polymers)) deallocate (group_shards(i)%polymers)
      end do
      deallocate (group_shards)
      deallocate (group_counts)
      deallocate (group_fill)

      ! Initialize local group queue (group 0)
      if (.not. allocated(group0_fragment_ids)) then
         allocate (group0_fragment_ids(0))
         allocate (group0_polymers(0, n_cols))
      end if
      block
         integer(int64), allocatable :: temp_ids(:)
         integer(int64) :: idx

         if (size(group0_fragment_ids) > 0) then
            ! Queue stores local indices (1..N) into group0_fragment_ids/polymers.
            allocate (temp_ids(size(group0_fragment_ids)))
            do idx = 1_int64, size(group0_fragment_ids, kind=int64)
               temp_ids(idx) = idx
            end do
            call queue_init_from_list(group0_queue, temp_ids)
            deallocate (temp_ids)
         else
            group0_queue%count = 0_int64
            group0_queue%head = 1_int64
         end if
      end block

      call coord_timer%start()
      do while (group_done_count < ctx%global_groups .or. results_received < ctx%total_fragments)

         ! PRIORITY 1: Receive batched results from group globals
         call handle_group_results(ctx%resources%mpi_comms%world_comm, results, results_received, &
                                   ctx%total_fragments, coord_timer, group_done_count, "fragment")

         ! PRIORITY 2: Check for incoming results from local workers
         if (ctx%resources%mpi_comms%node_comm%size() > 1) then
            call handle_local_worker_results(ctx, worker_fragment_map, results, results_received, coord_timer)
         end if

         ! PRIORITY 3: Check for incoming results from node coordinators (group 0 only)
         call handle_node_results(ctx, results, results_received, coord_timer)

         ! PRIORITY 4: Remote node coordinator requests for group 0
         call handle_group_node_requests(ctx, group0_queue, group0_fragment_ids, group0_polymers, group0_finished_nodes)

         ! PRIORITY 5: Local workers (shared memory) - send new work for group 0
         if (ctx%resources%mpi_comms%node_comm%size() > 1 .and. &
             local_finished_workers < ctx%resources%mpi_comms%node_comm%size() - 1) then
            call handle_local_worker_requests_group(ctx, group0_queue, group0_fragment_ids, group0_polymers, &
                                                    worker_fragment_map, local_finished_workers)
         end if

         ! Mark local node completion once all local workers are finished and queue is empty
         if (local_node_done == 0) then
            if (queue_is_empty(group0_queue) .and. &
                (ctx%resources%mpi_comms%node_comm%size() == 1 .or. &
                 local_finished_workers >= ctx%resources%mpi_comms%node_comm%size() - 1)) then
               local_node_done = 1
               group0_finished_nodes = group0_finished_nodes + 1
            end if
         end if

         if (group0_done == 0) then
            if (group0_finished_nodes >= group0_node_count) then
               group0_done = 1
               group_done_count = group_done_count + 1
            end if
         end if
      end do

      call logger%verbose("Super-global coordinator finished all fragments")
      call coord_timer%stop()
      call logger%info("Time to evaluate all fragments "//to_char(coord_timer%get_elapsed_time())//" s")
      block
         use mqc_result_types, only: mbe_result_t
         type(mbe_result_t) :: mbe_result

         ! Compute the many-body expansion
         call logger%info(" ")
         call logger%info("Computing Many-Body Expansion (MBE)...")
         call coord_timer%start()

         ! Allocate mbe_result components based on calc_type
         call mbe_result%allocate_dipole()  ! Always compute dipole
         if (calc_type_local == CALC_TYPE_HESSIAN) then
            if (.not. ctx%has_geometry()) then
               call logger%error("sys_geom required for Hessian calculation in global_coordinator")
               call abort_comm(ctx%resources%mpi_comms%world_comm, 1)
            end if
            call mbe_result%allocate_gradient(ctx%sys_geom%total_atoms)
            call mbe_result%allocate_hessian(ctx%sys_geom%total_atoms)
         else if (calc_type_local == CALC_TYPE_GRADIENT) then
            if (.not. ctx%has_geometry()) then
               call logger%error("sys_geom required for gradient calculation in global_coordinator")
               call abort_comm(ctx%resources%mpi_comms%world_comm, 1)
            end if
            call mbe_result%allocate_gradient(ctx%sys_geom%total_atoms)
         end if

         call compute_mbe(ctx%polymers, ctx%total_fragments, ctx%max_level, results, mbe_result, &
                          ctx%sys_geom, ctx%resources%mpi_comms%world_comm, json_data)
         call mbe_result%destroy()

         call coord_timer%stop()
         call logger%info("Time to compute MBE "//to_char(coord_timer%get_elapsed_time())//" s")

      end block

      ! Cleanup
      call queue_destroy(group0_queue)
      if (allocated(group0_fragment_ids)) deallocate (group0_fragment_ids)
      if (allocated(group0_polymers)) deallocate (group0_polymers)
      if (allocated(group_leader_by_group)) deallocate (group_leader_by_group)
      if (allocated(group_node_counts)) deallocate (group_node_counts)
      deallocate (results)
   end subroutine global_coordinator_impl