Handle fragmented calculation (nlevel > 0)
Generates fragments, distributes work across MPI processes organized in nodes, and coordinates many-body expansion calculation using hierarchical parallelism. If allow_overlapping_fragments=true, uses GMBE with intersection correction.
| Type | Intent | Optional | Attributes | Name | ||
|---|---|---|---|---|---|---|
| type(resources_t), | intent(in), | target | :: | resources |
Resources container (MPI comms, etc.) |
|
| type(driver_config_t), | intent(in) | :: | config |
Driver configuration (includes method_config, calc_type, etc.) |
||
| type(system_geometry_t), | intent(in) | :: | sys_geom |
System geometry and fragment info |
||
| type(bond_t), | intent(in), | optional | :: | bonds(:) |
Bond connectivity information |
|
| type(json_output_data_t), | intent(out), | optional | :: | json_data |
JSON output data |
| Type | Visibility | Attributes | Name | Initial | |||
|---|---|---|---|---|---|---|---|
| integer, | private, | allocatable | :: | all_node_leader_ranks(:) |
Node leader status for all ranks |
||
| logical, | private | :: | allow_overlapping_fragments |
Use GMBE for overlapping fragments |
|||
| class(many_body_expansion_t), | private, | allocatable | :: | expansion | |||
| integer, | private | :: | global_node_rank |
Global rank if this process leads a node, -1 otherwise |
|||
| integer, | private | :: | i |
Loop counters |
|||
| integer, | private | :: | j |
Loop counters |
|||
| integer, | private | :: | max_intersection_level |
Maximum k-way intersection depth for GMBE |
|||
| integer, | private | :: | max_level |
Maximum fragment level for MBE |
|||
| integer, | private, | allocatable | :: | monomers(:) |
Temporary monomer list for fragment generation |
||
| integer(kind=int64), | private | :: | n_expected_frags |
Expected number of fragments based on combinatorics (int64 to handle large systems) |
|||
| integer(kind=int64), | private | :: | n_pie_terms |
Number of unique PIE terms |
|||
| integer, | private | :: | n_primaries |
Number of primary polymers |
|||
| integer(kind=int64), | private | :: | n_primaries_i64 |
For binomial calculation |
|||
| integer(kind=int64), | private | :: | n_rows |
Number of rows needed for polymers array (int64 to handle large systems) |
|||
| integer, | private, | allocatable | :: | node_leader_ranks(:) |
Ranks of processes that lead each node |
||
| integer, | private | :: | num_nodes |
Number of compute nodes |
|||
| integer, | private, | allocatable | :: | pie_atom_sets(:,:) |
Unique atom sets (max_atoms, n_pie_terms) |
||
| integer, | private, | allocatable | :: | pie_coefficients(:) |
PIE coefficient for each term |
||
| type(error_t), | private | :: | pie_error |
Error from PIE enumeration |
|||
| integer, | private, | allocatable | :: | polymers(:,:) |
Fragment composition array (fragment, monomer_indices) |
||
| integer(kind=int64), | private | :: | total_fragments |
Total number of fragments generated (int64 to handle large systems) |
subroutine run_fragmented_calculation(resources, config, sys_geom, bonds, json_data) !! Handle fragmented calculation (nlevel > 0) !! !! Generates fragments, distributes work across MPI processes organized in nodes, !! and coordinates many-body expansion calculation using hierarchical parallelism. !! If allow_overlapping_fragments=true, uses GMBE with intersection correction. type(resources_t), intent(in), target :: resources !! Resources container (MPI comms, etc.) type(driver_config_t), intent(in) :: config !! Driver configuration (includes method_config, calc_type, etc.) type(system_geometry_t), intent(in) :: sys_geom !! System geometry and fragment info type(bond_t), intent(in), optional :: bonds(:) !! Bond connectivity information type(json_output_data_t), intent(out), optional :: json_data !! JSON output data ! Local variables extracted from config for readability integer :: max_level !! Maximum fragment level for MBE logical :: allow_overlapping_fragments !! Use GMBE for overlapping fragments integer :: max_intersection_level !! Maximum k-way intersection depth for GMBE integer(int64) :: total_fragments !! Total number of fragments generated (int64 to handle large systems) integer, allocatable :: polymers(:, :) !! Fragment composition array (fragment, monomer_indices) integer :: num_nodes !! Number of compute nodes integer :: i, j !! Loop counters integer, allocatable :: node_leader_ranks(:) !! Ranks of processes that lead each node integer, allocatable :: monomers(:) !! Temporary monomer list for fragment generation integer(int64) :: n_expected_frags !! Expected number of fragments based on combinatorics (int64 to handle large systems) integer(int64) :: n_rows !! Number of rows needed for polymers array (int64 to handle large systems) integer :: global_node_rank !! Global rank if this process leads a node, -1 otherwise integer, allocatable :: all_node_leader_ranks(:) !! Node leader status for all ranks ! Polymorphic expansion context for unified MBE/GMBE handling class(many_body_expansion_t), allocatable :: expansion ! GMBE PIE-based variables integer :: n_primaries !! Number of primary polymers integer(int64) :: n_primaries_i64 !! For binomial calculation integer, allocatable :: pie_atom_sets(:, :) !! Unique atom sets (max_atoms, n_pie_terms) integer, allocatable :: pie_coefficients(:) !! PIE coefficient for each term integer(int64) :: n_pie_terms !! Number of unique PIE terms type(error_t) :: pie_error !! Error from PIE enumeration ! Extract values from config for readability max_level = config%nlevel allow_overlapping_fragments = config%allow_overlapping_fragments max_intersection_level = config%max_intersection_level ! Generate fragments if (resources%mpi_comms%world_comm%rank() == 0) then if (allow_overlapping_fragments) then ! GMBE mode: PIE-based inclusion-exclusion ! GMBE(1): primaries are monomers ! GMBE(N): primaries are N-mers (e.g., dimers for N=2) ! Generate primaries if (max_level == 1) then ! GMBE(1): primaries are base monomers n_primaries = sys_geom%n_monomers allocate (polymers(n_primaries, 1)) do i = 1, n_primaries polymers(i, 1) = i end do else ! GMBE(N): primaries are all C(M, N) N-tuples n_primaries_i64 = binomial(sys_geom%n_monomers, max_level) n_primaries = int(n_primaries_i64) allocate (monomers(sys_geom%n_monomers)) allocate (polymers(n_primaries, max_level)) polymers = 0 call create_monomer_list(monomers) total_fragments = 0_int64 call combine(monomers, sys_geom%n_monomers, max_level, polymers, total_fragments) n_primaries = int(total_fragments) deallocate (monomers) ! Apply distance-based screening to primaries if cutoffs are provided if (max_level > 1) then ! Only screen if primaries are n-mers (not for GMBE(1) where primaries are monomers) total_fragments = int(n_primaries, int64) call apply_distance_screening(polymers, total_fragments, sys_geom, config, max_level) n_primaries = int(total_fragments) end if ! Sort primaries by size (largest first) ! TODO: Currently disabled - see comment in MBE section above ! total_fragments = int(n_primaries, int64) call sort_fragments_by_size(polymers, total_fragments, max_level) end if call logger%info("Generated "//to_char(n_primaries)//" primary "//to_char(max_level)//"-mers for GMBE("// & to_char(max_level)//")") ! Use DFS to enumerate PIE terms with coefficients call gmbe_enumerate_pie_terms(sys_geom, polymers, n_primaries, max_level, max_intersection_level, & pie_atom_sets, pie_coefficients, n_pie_terms, pie_error) if (pie_error%has_error()) then call logger%error("GMBE PIE enumeration failed: "//pie_error%get_message()) call abort_comm(resources%mpi_comms%world_comm, 1) end if call logger%info("GMBE PIE enumeration complete: "//to_char(n_pie_terms)//" unique subsystems to evaluate") ! For now: total_fragments = n_pie_terms (each PIE term is a subsystem to evaluate) total_fragments = n_pie_terms else ! Standard MBE mode ! Calculate expected number of fragments n_expected_frags = get_nfrags(sys_geom%n_monomers, max_level) n_rows = n_expected_frags ! Allocate monomer list and polymers array allocate (monomers(sys_geom%n_monomers)) allocate (polymers(n_rows, max_level)) polymers = 0 ! Create monomer list [1, 2, 3, ..., n_monomers] call create_monomer_list(monomers) ! Generate all fragments (includes monomers in polymers array) total_fragments = 0_int64 ! First add monomers do i = 1, sys_geom%n_monomers total_fragments = total_fragments + 1_int64 polymers(total_fragments, 1) = i end do ! Then add n-mers for n >= 2 call generate_fragment_list(monomers, max_level, polymers, total_fragments) deallocate (monomers) ! Apply distance-based screening if cutoffs are provided call apply_distance_screening(polymers, total_fragments, sys_geom, config, max_level) ! Sort fragments by size (largest first) for better load balancing ! TODO: Currently disabled - MBE assembly is now order-independent (uses nested loops), ! but sorting still causes "Subset not found" errors in real validation cases. ! Unit tests pass with arbitrary order, so there may be an issue with the hash table ! or fragment generation in production code. Needs investigation. call sort_fragments_by_size(polymers, total_fragments, max_level) call logger%info("Generated fragments:") call logger%info(" Total fragments: "//to_char(total_fragments)) call logger%info(" Max level: "//to_char(max_level)) end if end if ! Broadcast total_fragments to all ranks call bcast(resources%mpi_comms%world_comm, total_fragments, 1, 0) ! Determine node leaders global_node_rank = -1 if (resources%mpi_comms%node_comm%rank() == 0) global_node_rank = resources%mpi_comms%world_comm%rank() allocate (all_node_leader_ranks(resources%mpi_comms%world_comm%size())) call allgather(resources%mpi_comms%world_comm, global_node_rank, all_node_leader_ranks) num_nodes = count(all_node_leader_ranks /= -1) if (resources%mpi_comms%world_comm%rank() == 0) then call logger%info("Running with "//to_char(num_nodes)//" node(s)") end if allocate (node_leader_ranks(num_nodes)) i = 0 do j = 1, resources%mpi_comms%world_comm%size() if (all_node_leader_ranks(j) /= -1) then i = i + 1 node_leader_ranks(i) = all_node_leader_ranks(j) end if end do deallocate (all_node_leader_ranks) ! Build polymorphic expansion context if (allow_overlapping_fragments) then ! GMBE: allocate gmbe_context_t allocate (gmbe_context_t :: expansion) select type (expansion) type is (gmbe_context_t) call expansion%init(config%method_config, config%calc_type) allocate (expansion%sys_geom, source=sys_geom) if (present(bonds)) then allocate (expansion%sys_geom%bonds, source=bonds) end if expansion%n_pie_terms = n_pie_terms if (resources%mpi_comms%world_comm%rank() == 0) then allocate (expansion%pie_atom_sets, source=pie_atom_sets) allocate (expansion%pie_coefficients, source=pie_coefficients) end if expansion%resources => resources expansion%node_leader_ranks = node_leader_ranks expansion%num_nodes = num_nodes end select else ! Standard MBE: allocate mbe_context_t allocate (mbe_context_t :: expansion) select type (expansion) type is (mbe_context_t) call expansion%init(config%method_config, config%calc_type) allocate (expansion%sys_geom, source=sys_geom) if (present(bonds)) then allocate (expansion%sys_geom%bonds, source=bonds) end if expansion%total_fragments = total_fragments if (resources%mpi_comms%world_comm%rank() == 0) then allocate (expansion%polymers, source=polymers) end if expansion%max_level = max_level expansion%resources => resources expansion%node_leader_ranks = node_leader_ranks expansion%num_nodes = num_nodes end select end if ! Execute calculation using polymorphic dispatch if (resources%mpi_comms%world_comm%size() == 1) then call logger%info("Running in serial mode (single MPI rank)") call expansion%run_serial(json_data) else call expansion%run_distributed(json_data) end if ! Clean up expansion context select type (expansion) type is (mbe_context_t) call expansion%destroy() type is (gmbe_context_t) call expansion%destroy() end select deallocate (expansion) ! Cleanup if (resources%mpi_comms%world_comm%rank() == 0) then if (allocated(polymers)) deallocate (polymers) if (allocated(node_leader_ranks)) deallocate (node_leader_ranks) if (allocated(pie_atom_sets)) deallocate (pie_atom_sets) if (allocated(pie_coefficients)) deallocate (pie_coefficients) end if end subroutine run_fragmented_calculation