global_coordinator Module Subroutine

module subroutine global_coordinator(resources, total_fragments, polymers, max_level, node_leader_ranks, num_nodes, sys_geom, method_config, calc_type, json_data)

Uses

  • proc~~global_coordinator~~UsesGraph proc~global_coordinator global_coordinator module~mqc_json_output_types mqc_json_output_types proc~global_coordinator->module~mqc_json_output_types module~mqc_result_types mqc_result_types proc~global_coordinator->module~mqc_result_types module~mqc_thermochemistry mqc_thermochemistry module~mqc_json_output_types->module~mqc_thermochemistry pic_types pic_types module~mqc_json_output_types->pic_types module~mqc_error mqc_error module~mqc_result_types->module~mqc_error pic_mpi_lib pic_mpi_lib module~mqc_result_types->pic_mpi_lib module~mqc_result_types->pic_types module~mqc_thermochemistry->pic_types module~mqc_elements mqc_elements module~mqc_thermochemistry->module~mqc_elements module~mqc_physical_constants mqc_physical_constants module~mqc_thermochemistry->module~mqc_physical_constants pic_io pic_io module~mqc_thermochemistry->pic_io pic_lapack_interfaces pic_lapack_interfaces module~mqc_thermochemistry->pic_lapack_interfaces pic_logger pic_logger module~mqc_thermochemistry->pic_logger module~mqc_elements->pic_types pic_ascii pic_ascii module~mqc_elements->pic_ascii module~mqc_physical_constants->pic_types

Global coordinator for distributing fragments to node coordinators will act as a node coordinator for a single node calculation Uses int64 for total_fragments to handle large fragment counts that overflow int32. Bond connectivity is accessed via sys_geom%bonds

Arguments

Type IntentOptional Attributes Name
type(resources_t), intent(in) :: resources
integer(kind=int64), intent(in) :: total_fragments
integer, intent(in) :: polymers(:,:)
integer, intent(in) :: max_level
integer, intent(in) :: node_leader_ranks(:)
integer, intent(in) :: num_nodes
type(system_geometry_t), intent(in), optional :: sys_geom
type(method_config_t), intent(in) :: method_config

Method configuration

integer(kind=int32), intent(in) :: calc_type
type(json_output_data_t), intent(out), optional :: json_data

JSON output data


Calls

proc~~global_coordinator~~CallsGraph proc~global_coordinator global_coordinator abort_comm abort_comm proc~global_coordinator->abort_comm allocate_dipole allocate_dipole proc~global_coordinator->allocate_dipole allocate_gradient allocate_gradient proc~global_coordinator->allocate_gradient allocate_hessian allocate_hessian proc~global_coordinator->allocate_hessian debug debug proc~global_coordinator->debug destroy destroy proc~global_coordinator->destroy error error proc~global_coordinator->error get_elapsed_time get_elapsed_time proc~global_coordinator->get_elapsed_time info info proc~global_coordinator->info iprobe iprobe proc~global_coordinator->iprobe irecv irecv proc~global_coordinator->irecv isend isend proc~global_coordinator->isend proc~compute_mbe compute_mbe proc~global_coordinator->proc~compute_mbe proc~error_get_message error_t%error_get_message proc~global_coordinator->proc~error_get_message proc~result_irecv result_irecv proc~global_coordinator->proc~result_irecv proc~send_fragment_to_node send_fragment_to_node proc~global_coordinator->proc~send_fragment_to_node proc~send_fragment_to_worker send_fragment_to_worker proc~global_coordinator->proc~send_fragment_to_worker start start proc~global_coordinator->start to_char to_char proc~global_coordinator->to_char verbose verbose proc~global_coordinator->verbose proc~compute_mbe->abort_comm proc~compute_mbe->error proc~compute_mbe->info proc~compute_mbe->to_char cart_disp cart_disp proc~compute_mbe->cart_disp configuration configuration proc~compute_mbe->configuration fc_mdyne fc_mdyne proc~compute_mbe->fc_mdyne force_constants force_constants proc~compute_mbe->force_constants frequencies frequencies proc~compute_mbe->frequencies get_message get_message proc~compute_mbe->get_message has_error has_error proc~compute_mbe->has_error proc~build_mbe_lookup_table build_mbe_lookup_table proc~compute_mbe->proc~build_mbe_lookup_table proc~compute_mbe_delta compute_mbe_delta proc~compute_mbe->proc~compute_mbe_delta proc~compute_mbe_dipole compute_mbe_dipole proc~compute_mbe->proc~compute_mbe_dipole proc~compute_mbe_dipole_derivatives compute_mbe_dipole_derivatives proc~compute_mbe->proc~compute_mbe_dipole_derivatives proc~compute_mbe_gradient compute_mbe_gradient proc~compute_mbe->proc~compute_mbe_gradient proc~compute_mbe_hessian compute_mbe_hessian proc~compute_mbe->proc~compute_mbe_hessian proc~compute_thermochemistry compute_thermochemistry proc~compute_mbe->proc~compute_thermochemistry proc~compute_vibrational_analysis compute_vibrational_analysis proc~compute_mbe->proc~compute_vibrational_analysis proc~energy_total energy_t%energy_total proc~compute_mbe->proc~energy_total proc~fragment_lookup_destroy fragment_lookup_t%fragment_lookup_destroy proc~compute_mbe->proc~fragment_lookup_destroy proc~map_fragment_to_system_dipole_derivatives map_fragment_to_system_dipole_derivatives proc~compute_mbe->proc~map_fragment_to_system_dipole_derivatives proc~map_fragment_to_system_gradient map_fragment_to_system_gradient proc~compute_mbe->proc~map_fragment_to_system_gradient proc~map_fragment_to_system_hessian map_fragment_to_system_hessian proc~compute_mbe->proc~map_fragment_to_system_hessian proc~print_detailed_breakdown print_detailed_breakdown proc~compute_mbe->proc~print_detailed_breakdown proc~print_mbe_energy_breakdown print_mbe_energy_breakdown proc~compute_mbe->proc~print_mbe_energy_breakdown proc~print_mbe_gradient_info print_mbe_gradient_info proc~compute_mbe->proc~print_mbe_gradient_info proc~print_vibrational_analysis print_vibrational_analysis proc~compute_mbe->proc~print_vibrational_analysis reduced_masses reduced_masses proc~compute_mbe->reduced_masses warning warning proc~compute_mbe->warning proc~result_irecv->irecv recv recv proc~result_irecv->recv proc~send_fragment_to_node->isend proc~send_fragment_to_worker->isend proc~build_mbe_lookup_table->debug proc~build_mbe_lookup_table->get_elapsed_time proc~build_mbe_lookup_table->start proc~build_mbe_lookup_table->to_char proc~error_add_context error_t%error_add_context proc~build_mbe_lookup_table->proc~error_add_context proc~error_has_error error_t%error_has_error proc~build_mbe_lookup_table->proc~error_has_error proc~fragment_lookup_init fragment_lookup_t%fragment_lookup_init proc~build_mbe_lookup_table->proc~fragment_lookup_init proc~fragment_lookup_insert fragment_lookup_t%fragment_lookup_insert proc~build_mbe_lookup_table->proc~fragment_lookup_insert proc~compute_mbe_delta->abort_comm proc~compute_mbe_delta->error proc~fragment_lookup_find fragment_lookup_t%fragment_lookup_find proc~compute_mbe_delta->proc~fragment_lookup_find proc~get_next_combination get_next_combination proc~compute_mbe_delta->proc~get_next_combination proc~compute_mbe_dipole->abort_comm proc~compute_mbe_dipole->error proc~compute_mbe_dipole->proc~fragment_lookup_find proc~compute_mbe_dipole->proc~get_next_combination proc~compute_mbe_dipole_derivatives->proc~map_fragment_to_system_dipole_derivatives proc~compute_mbe_dipole_derivatives->proc~fragment_lookup_find proc~compute_mbe_dipole_derivatives->proc~get_next_combination proc~compute_mbe_gradient->abort_comm proc~compute_mbe_gradient->error proc~compute_mbe_gradient->proc~map_fragment_to_system_gradient proc~compute_mbe_gradient->proc~fragment_lookup_find proc~compute_mbe_gradient->proc~get_next_combination proc~compute_mbe_hessian->proc~map_fragment_to_system_hessian proc~compute_mbe_hessian->proc~fragment_lookup_find proc~compute_mbe_hessian->proc~get_next_combination proc~compute_electronic_entropy compute_electronic_entropy proc~compute_thermochemistry->proc~compute_electronic_entropy proc~compute_moments_of_inertia compute_moments_of_inertia proc~compute_thermochemistry->proc~compute_moments_of_inertia proc~compute_partition_functions compute_partition_functions proc~compute_thermochemistry->proc~compute_partition_functions proc~compute_rotational_constants compute_rotational_constants proc~compute_thermochemistry->proc~compute_rotational_constants proc~compute_rotational_thermo compute_rotational_thermo proc~compute_thermochemistry->proc~compute_rotational_thermo proc~compute_translational_thermo compute_translational_thermo proc~compute_thermochemistry->proc~compute_translational_thermo proc~compute_vibrational_thermo compute_vibrational_thermo proc~compute_thermochemistry->proc~compute_vibrational_thermo proc~compute_zpe compute_zpe proc~compute_thermochemistry->proc~compute_zpe proc~compute_cartesian_displacements compute_cartesian_displacements proc~compute_vibrational_analysis->proc~compute_cartesian_displacements proc~compute_force_constants compute_force_constants proc~compute_vibrational_analysis->proc~compute_force_constants proc~compute_ir_intensities compute_ir_intensities proc~compute_vibrational_analysis->proc~compute_ir_intensities proc~compute_reduced_masses compute_reduced_masses proc~compute_vibrational_analysis->proc~compute_reduced_masses proc~compute_vibrational_frequencies compute_vibrational_frequencies proc~compute_vibrational_analysis->proc~compute_vibrational_frequencies proc~mp2_total mp2_energy_t%mp2_total proc~energy_total->proc~mp2_total proc~build_fragment_from_indices build_fragment_from_indices proc~map_fragment_to_system_dipole_derivatives->proc~build_fragment_from_indices proc~fragment_destroy physical_fragment_t%fragment_destroy proc~map_fragment_to_system_dipole_derivatives->proc~fragment_destroy proc~redistribute_cap_dipole_derivatives redistribute_cap_dipole_derivatives proc~map_fragment_to_system_dipole_derivatives->proc~redistribute_cap_dipole_derivatives proc~map_fragment_to_system_gradient->abort_comm proc~map_fragment_to_system_gradient->debug proc~map_fragment_to_system_gradient->error proc~map_fragment_to_system_gradient->configuration proc~map_fragment_to_system_gradient->proc~build_fragment_from_indices proc~error_get_full_trace error_t%error_get_full_trace proc~map_fragment_to_system_gradient->proc~error_get_full_trace proc~map_fragment_to_system_gradient->proc~error_has_error proc~map_fragment_to_system_gradient->proc~fragment_destroy proc~redistribute_cap_gradients redistribute_cap_gradients proc~map_fragment_to_system_gradient->proc~redistribute_cap_gradients proc~map_fragment_to_system_hessian->proc~build_fragment_from_indices proc~map_fragment_to_system_hessian->proc~fragment_destroy proc~redistribute_cap_hessian redistribute_cap_hessian proc~map_fragment_to_system_hessian->proc~redistribute_cap_hessian proc~print_detailed_breakdown->verbose proc~print_detailed_breakdown->warning header header proc~print_detailed_breakdown->header level_name level_name proc~print_detailed_breakdown->level_name proc~get_frag_level_name get_frag_level_name proc~print_detailed_breakdown->proc~get_frag_level_name proc~print_mbe_energy_breakdown->info proc~print_mbe_gradient_info->info proc~print_mbe_gradient_info->to_char proc~print_vibrational_analysis->info proc~print_vibrational_analysis->proc~compute_thermochemistry proc~print_vibrational_analysis->warning proc~element_number_to_symbol element_number_to_symbol proc~print_vibrational_analysis->proc~element_number_to_symbol proc~print_thermochemistry print_thermochemistry proc~print_vibrational_analysis->proc~print_thermochemistry proc~build_fragment_from_indices->proc~error_add_context proc~build_fragment_from_indices->proc~error_has_error proc~add_hydrogen_caps add_hydrogen_caps proc~build_fragment_from_indices->proc~add_hydrogen_caps proc~calculate_monomer_distance calculate_monomer_distance proc~build_fragment_from_indices->proc~calculate_monomer_distance proc~check_duplicate_atoms check_duplicate_atoms proc~build_fragment_from_indices->proc~check_duplicate_atoms proc~count_hydrogen_caps count_hydrogen_caps proc~build_fragment_from_indices->proc~count_hydrogen_caps proc~fragment_compute_nelec physical_fragment_t%fragment_compute_nelec proc~build_fragment_from_indices->proc~fragment_compute_nelec proc~element_mass element_mass proc~compute_cartesian_displacements->proc~element_mass proc~compute_ir_intensities->proc~element_mass proc~compute_moments_of_inertia->to_char proc~compute_moments_of_inertia->warning pic_syev pic_syev proc~compute_moments_of_inertia->pic_syev proc~compute_moments_of_inertia->proc~element_mass proc~compute_reduced_masses->proc~element_mass proc~compute_vibrational_frequencies->error proc~compute_vibrational_frequencies->warning proc~compute_vibrational_frequencies->pic_syev proc~mass_weight_hessian mass_weight_hessian proc~compute_vibrational_frequencies->proc~mass_weight_hessian proc~project_translation_rotation project_translation_rotation proc~compute_vibrational_frequencies->proc~project_translation_rotation proc~compute_zpe->to_char proc~compute_zpe->warning proc~error_get_full_trace->proc~error_has_error proc~basis_set_destroy molecular_basis_type%basis_set_destroy proc~fragment_destroy->proc~basis_set_destroy fnv_1a_hash fnv_1a_hash proc~fragment_lookup_find->fnv_1a_hash proc~arrays_equal_internal arrays_equal_internal proc~fragment_lookup_find->proc~arrays_equal_internal sort sort proc~fragment_lookup_find->sort proc~next_prime_internal next_prime_internal proc~fragment_lookup_init->proc~next_prime_internal proc~fragment_lookup_insert->fnv_1a_hash proc~error_set error_t%error_set proc~fragment_lookup_insert->proc~error_set proc~fragment_lookup_insert->sort proc~print_thermochemistry->info proc~atomic_basis_destroy atomic_basis_type%atomic_basis_destroy proc~basis_set_destroy->proc~atomic_basis_destroy proc~to_angstrom to_angstrom proc~calculate_monomer_distance->proc~to_angstrom proc~check_duplicate_atoms->error proc~check_duplicate_atoms->to_char proc~check_duplicate_atoms->proc~element_number_to_symbol proc~check_duplicate_atoms->proc~error_set proc~mass_weight_hessian->proc~element_mass proc~project_translation_rotation->proc~element_mass pic_gesvd pic_gesvd proc~project_translation_rotation->pic_gesvd

Called by

proc~~global_coordinator~~CalledByGraph proc~global_coordinator global_coordinator interface~global_coordinator global_coordinator interface~global_coordinator->proc~global_coordinator proc~mbe_run_distributed mbe_context_t%mbe_run_distributed proc~mbe_run_distributed->interface~global_coordinator

Variables

Type Visibility Attributes Name Initial
integer(kind=int32), private :: calc_type_local
type(timer_type), private :: coord_timer
integer(kind=int64), private :: current_fragment
integer, private :: dummy_msg
integer, private :: finished_nodes
integer(kind=int64), private :: fragment_idx
logical, private :: handling_local_workers
logical, private :: has_pending
integer, private :: local_dummy
integer, private :: local_finished_workers
type(MPI_Status), private :: local_status
type(request_t), private :: req
integer, private :: request_source
type(calculation_result_t), private, allocatable :: results(:)
integer(kind=int64), private :: results_received
type(MPI_Status), private :: status
integer(kind=int64), private :: worker_fragment_map(resources%mpi_comms%node_comm%size())
integer, private :: worker_source

Source Code

   module subroutine global_coordinator(resources, total_fragments, polymers, max_level, &
                                        node_leader_ranks, num_nodes, sys_geom, method_config, calc_type, json_data)
      !! Global coordinator for distributing fragments to node coordinators
      !! will act as a node coordinator for a single node calculation
      !! Uses int64 for total_fragments to handle large fragment counts that overflow int32.
      !! Bond connectivity is accessed via sys_geom%bonds
      use mqc_json_output_types, only: json_output_data_t
      type(resources_t), intent(in) :: resources
      integer(int64), intent(in) :: total_fragments
      integer, intent(in) :: max_level, num_nodes
      integer, intent(in) :: polymers(:, :), node_leader_ranks(:)
      type(system_geometry_t), intent(in), optional :: sys_geom
      type(method_config_t), intent(in) :: method_config  !! Method configuration
      integer(int32), intent(in) :: calc_type
      type(json_output_data_t), intent(out), optional :: json_data  !! JSON output data

      type(timer_type) :: coord_timer
      integer(int64) :: current_fragment, results_received
      integer :: finished_nodes
      integer :: request_source, dummy_msg
      integer(int64) :: fragment_idx
      type(MPI_Status) :: status, local_status
      logical :: handling_local_workers
      logical :: has_pending
      integer(int32) :: calc_type_local

      ! For local workers
      integer :: local_finished_workers, local_dummy

      ! Storage for results
      type(calculation_result_t), allocatable :: results(:)
      integer(int64) :: worker_fragment_map(resources%mpi_comms%node_comm%size())
      integer :: worker_source

      ! MPI request handles for non-blocking operations
      type(request_t) :: req

      calc_type_local = calc_type

      current_fragment = total_fragments
      finished_nodes = 0
      local_finished_workers = 0
      handling_local_workers = (resources%mpi_comms%node_comm%size() > 1)
      results_received = 0_int64

      ! Allocate storage for results
      allocate (results(total_fragments))
      worker_fragment_map = 0

      call logger%verbose("Global coordinator starting with "//to_char(total_fragments)// &
                          " fragments for "//to_char(num_nodes)//" nodes")

      call coord_timer%start()
      do while (finished_nodes < num_nodes)

         ! PRIORITY 1: Check for incoming results from local workers
         ! This MUST be checked before sending new work to avoid race conditions
         if (handling_local_workers) then
            ! Keep checking for results until there are none pending
            do
           call iprobe(resources%mpi_comms%node_comm, MPI_ANY_SOURCE, TAG_WORKER_SCALAR_RESULT, has_pending, local_status)
               if (.not. has_pending) exit

               worker_source = local_status%MPI_SOURCE

               ! Safety check: worker should have a fragment assigned
               if (worker_fragment_map(worker_source) == 0) then
                  call logger%error("Received result from worker "//to_char(worker_source)// &
                                    " but no fragment was assigned!")
                  call abort_comm(resources%mpi_comms%world_comm, 1)
               end if

               ! Receive result and store it using the fragment index for this worker
            call result_irecv(results(worker_fragment_map(worker_source)), resources%mpi_comms%node_comm, worker_source, &
                                 TAG_WORKER_SCALAR_RESULT, req)
               call wait(req)

               ! Check for calculation errors from worker
               if (results(worker_fragment_map(worker_source))%has_error) then
                  call logger%error("Fragment "//to_char(worker_fragment_map(worker_source))// &
                                    " calculation failed: "// &
                                    results(worker_fragment_map(worker_source))%error%get_message())
                  call abort_comm(resources%mpi_comms%world_comm, 1)
               end if

               ! Clear the mapping since we've received the result
               worker_fragment_map(worker_source) = 0
               results_received = results_received + 1
               if (mod(results_received, max(1_int64, total_fragments/10)) == 0 .or. &
                   results_received == total_fragments) then
                  call logger%info("  Processed "//to_char(results_received)//"/"// &
                                   to_char(total_fragments)//" fragments ["// &
                                   to_char(coord_timer%get_elapsed_time())//" s]")
               end if
            end do
         end if

         ! PRIORITY 1b: Check for incoming results from remote node coordinators
         do
            call iprobe(resources%mpi_comms%world_comm, MPI_ANY_SOURCE, TAG_NODE_SCALAR_RESULT, has_pending, status)
            if (.not. has_pending) exit

            ! Receive fragment index and result from node coordinator
            ! TODO: serialize the data for better performance
            call irecv(resources%mpi_comms%world_comm, fragment_idx, status%MPI_SOURCE, TAG_NODE_SCALAR_RESULT, req)
            call wait(req)
  call result_irecv(results(fragment_idx), resources%mpi_comms%world_comm, status%MPI_SOURCE, TAG_NODE_SCALAR_RESULT, req)
            call wait(req)

            ! Check for calculation errors from node coordinator
            if (results(fragment_idx)%has_error) then
               call logger%error("Fragment "//to_char(fragment_idx)//" calculation failed: "// &
                                 results(fragment_idx)%error%get_message())
               call abort_comm(resources%mpi_comms%world_comm, 1)
            end if

            results_received = results_received + 1
            if (mod(results_received, max(1_int64, total_fragments/10)) == 0 .or. &
                results_received == total_fragments) then
               call logger%info("  Processed "//to_char(results_received)//"/"// &
                                to_char(total_fragments)//" fragments ["// &
                                to_char(coord_timer%get_elapsed_time())//" s]")
            end if
         end do

         ! PRIORITY 2: Remote node coordinator requests
         call iprobe(resources%mpi_comms%world_comm, MPI_ANY_SOURCE, TAG_NODE_REQUEST, has_pending, status)
         if (has_pending) then
            call irecv(resources%mpi_comms%world_comm, dummy_msg, status%MPI_SOURCE, TAG_NODE_REQUEST, req)
            call wait(req)
            request_source = status%MPI_SOURCE

            if (current_fragment >= 1) then
               call send_fragment_to_node(resources%mpi_comms%world_comm, current_fragment, polymers, request_source)
               current_fragment = current_fragment - 1
            else
               call isend(resources%mpi_comms%world_comm, -1, request_source, TAG_NODE_FINISH, req)
               call wait(req)
               finished_nodes = finished_nodes + 1
            end if
         end if

         ! PRIORITY 3: Local workers (shared memory) - send new work
         if (handling_local_workers .and. local_finished_workers < resources%mpi_comms%node_comm%size() - 1) then
            call iprobe(resources%mpi_comms%node_comm, MPI_ANY_SOURCE, TAG_WORKER_REQUEST, has_pending, local_status)
            if (has_pending) then
               ! Only process work request if this worker doesn't have pending results
               if (worker_fragment_map(local_status%MPI_SOURCE) == 0) then
                  call irecv(resources%mpi_comms%node_comm, local_dummy, local_status%MPI_SOURCE, TAG_WORKER_REQUEST, req)
                  call wait(req)

                  if (current_fragment >= 1) then
                     call send_fragment_to_worker(resources%mpi_comms%node_comm, current_fragment, polymers, &
                                                  local_status%MPI_SOURCE)
                     ! Track which fragment was sent to this worker
                     worker_fragment_map(local_status%MPI_SOURCE) = current_fragment
                     current_fragment = current_fragment - 1
                  else
                     call isend(resources%mpi_comms%node_comm, -1, local_status%MPI_SOURCE, TAG_WORKER_FINISH, req)
                     call wait(req)
                     local_finished_workers = local_finished_workers + 1
                  end if
               end if
               ! If worker still has pending results, skip the work request
               ! It will be processed on the next iteration after results are received
            end if
         end if

         ! Finalize local worker completion
         if (handling_local_workers .and. local_finished_workers >= resources%mpi_comms%node_comm%size() - 1 &
             .and. results_received >= total_fragments) then
            handling_local_workers = .false.
            if (num_nodes == 1) then
               finished_nodes = finished_nodes + 1
               call logger%debug("Manually incremented finished_nodes for self")
            else
               finished_nodes = finished_nodes + 1
               call logger%verbose("Global coordinator finished local workers")
            end if
         end if
      end do

      call logger%verbose("Global coordinator finished all fragments")
      call coord_timer%stop()
      call logger%info("Time to evaluate all fragments "//to_char(coord_timer%get_elapsed_time())//" s")
      block
         use mqc_result_types, only: mbe_result_t
         type(mbe_result_t) :: mbe_result

         ! Compute the many-body expansion
         call logger%info(" ")
         call logger%info("Computing Many-Body Expansion (MBE)...")
         call coord_timer%start()

         ! Allocate mbe_result components based on calc_type
         call mbe_result%allocate_dipole()  ! Always compute dipole
         if (calc_type_local == CALC_TYPE_HESSIAN) then
            if (.not. present(sys_geom)) then
               call logger%error("sys_geom required for Hessian calculation in global_coordinator")
               call abort_comm(resources%mpi_comms%world_comm, 1)
            end if
            call mbe_result%allocate_gradient(sys_geom%total_atoms)
            call mbe_result%allocate_hessian(sys_geom%total_atoms)
         else if (calc_type_local == CALC_TYPE_GRADIENT) then
            if (.not. present(sys_geom)) then
               call logger%error("sys_geom required for gradient calculation in global_coordinator")
               call abort_comm(resources%mpi_comms%world_comm, 1)
            end if
            call mbe_result%allocate_gradient(sys_geom%total_atoms)
         end if

         call compute_mbe(polymers, total_fragments, max_level, results, mbe_result, &
                          sys_geom, resources%mpi_comms%world_comm, json_data)
         call mbe_result%destroy()

         call coord_timer%stop()
         call logger%info("Time to compute MBE "//to_char(coord_timer%get_elapsed_time())//" s")

      end block

      ! Cleanup
      deallocate (results)
   end subroutine global_coordinator