diff --git a/src/mesh/Connectivity.hpp b/src/mesh/Connectivity.hpp index 24d9ef656f0cdd0746db0ee2f27881ce5244c8b0..aa1a471d6e84b554c1715ca7eecc7a2361744b07 100644 --- a/src/mesh/Connectivity.hpp +++ b/src/mesh/Connectivity.hpp @@ -19,7 +19,6 @@ #include <utils/PugsTraits.hpp> #include <utils/PugsUtils.hpp> -#include <algorithm> #include <iostream> #include <set> #include <vector> diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt index 333d37768f20fa91f47c342a64e809f526142095..bc66a97faf800dc0a10be3165f8d4f2c94059010 100644 --- a/src/utils/CMakeLists.txt +++ b/src/utils/CMakeLists.txt @@ -19,6 +19,7 @@ add_library( PartitionerOptions.cpp PETScWrapper.cpp PluginsLoader.cpp + PTScotchPartitioner.cpp PugsUtils.cpp RandomEngine.cpp ReproducibleSumManager.cpp diff --git a/src/utils/PTScotchPartitioner.cpp b/src/utils/PTScotchPartitioner.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e86acfa4494c437dfbe8886a83d6ecbf01f514f2 --- /dev/null +++ b/src/utils/PTScotchPartitioner.cpp @@ -0,0 +1,110 @@ +#include <utils/PTScotchPartitioner.hpp> + +#include <utils/pugs_config.hpp> + +#ifdef PUGS_HAS_PTSCOTCH + +#include <utils/Exceptions.hpp> +#include <utils/Messenger.hpp> + +#include <ptscotch.h> + +#include <iostream> +#include <vector> + +Array<int> +PTScotchPartitioner::partition(const CRSGraph& graph) +{ + std::cout << "Partitioning graph into " << rang::style::bold << parallel::size() << rang::style::reset + << " parts using " << rang::fgB::green << "PTScotch" << rang::fg::reset << '\n'; + + MPI_Group world_group; + MPI_Comm_group(parallel::Messenger::getInstance().comm(), &world_group); + + MPI_Group mesh_group; + std::vector<int> group_ranks = [&]() { + Array<int> graph_node_owners = parallel::allGather(static_cast<int>(graph.numberOfNodes())); + std::vector<int> grp_ranks; + grp_ranks.reserve(graph_node_owners.size()); + for (size_t i = 0; i < graph_node_owners.size(); ++i) { + if (graph_node_owners[i] > 0) { + grp_ranks.push_back(i); + } + } + return grp_ranks; + }(); + + MPI_Group_incl(world_group, group_ranks.size(), &(group_ranks[0]), &mesh_group); + + MPI_Comm partitioning_comm; + MPI_Comm_create_group(parallel::Messenger::getInstance().comm(), mesh_group, 1, &partitioning_comm); + + Array<int> partition; + if (graph.numberOfNodes() > 0) { + SCOTCH_Strat scotch_strategy; + SCOTCH_Dgraph scotch_grah; + + SCOTCH_stratInit(&scotch_strategy); // use default strategy + SCOTCH_dgraphInit(&scotch_grah, partitioning_comm); + + const Array<const int>& entries = graph.entries(); + const Array<const int>& neighbors = graph.neighbors(); + + static_assert(std::is_same_v<int, SCOTCH_Num>); + + SCOTCH_Num* entries_ptr = const_cast<int*>(&(entries[0])); + SCOTCH_Num* neighbors_ptr = const_cast<int*>(&(neighbors[0])); + + if (SCOTCH_dgraphBuild(&scotch_grah, + 0, // 0 for C-like arrays + graph.numberOfNodes(), // + graph.numberOfNodes(), // + entries_ptr, // + nullptr, // + nullptr, // + nullptr, // optional local node label array + graph.neighbors().size(), // + graph.neighbors().size(), // + neighbors_ptr, // + nullptr, // + nullptr) != 0) { + // LCOV_EXCL_START + throw UnexpectedError("PTScotch invalid graph"); + // LCOV_EXCL_STOP + } + + partition = Array<int>(graph.numberOfNodes()); + + SCOTCH_Num* partition_ptr = static_cast<SCOTCH_Num*>(&(partition[0])); + + if (SCOTCH_dgraphPart(&scotch_grah, // + parallel::size(), // + &scotch_strategy, // + partition_ptr) != 0) { + // LCOV_EXCL_START + throw UnexpectedError("PTScotch Error"); + // LCOV_EXCL_STOP + } + + SCOTCH_dgraphExit(&scotch_grah); + SCOTCH_stratExit(&scotch_strategy); + } + + MPI_Comm_free(&partitioning_comm); + MPI_Group_free(&mesh_group); + MPI_Group_free(&world_group); + + return partition; +} + +#else // PUGS_HAS_PTSCOTCH + +Array<int> +PTScotchPartitioner::partition(const CRSGraph& graph) +{ + Array<int> partition{graph.entries().size() - 1}; + partition.fill(0); + return partition; +} + +#endif // PUGS_HAS_PTSCOTCH diff --git a/src/utils/PTScotchPartitioner.hpp b/src/utils/PTScotchPartitioner.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cefb0a1df2e1c39966d6c8ed0eaffe655a836df0 --- /dev/null +++ b/src/utils/PTScotchPartitioner.hpp @@ -0,0 +1,19 @@ +#ifndef PT_SCOTCH_PARTITIONER_HPP +#define PT_SCOTCH_PARTITIONER_HPP + +#include <utils/Array.hpp> +#include <utils/CRSGraph.hpp> + +class PTScotchPartitioner +{ + private: + friend class Partitioner; + + // Forbids direct calls + static Array<int> partition(const CRSGraph& graph); + + public: + PTScotchPartitioner() = delete; +}; + +#endif // PT_SCOTCH_PARTITIONER_HPP diff --git a/src/utils/ParMETISPartitioner.cpp b/src/utils/ParMETISPartitioner.cpp index 93018f44e10f94f5d88f2987e618336caf494f27..47864de1fd3337bc6e193ad3453ad8d6c75eb797 100644 --- a/src/utils/ParMETISPartitioner.cpp +++ b/src/utils/ParMETISPartitioner.cpp @@ -18,16 +18,6 @@ ParMETISPartitioner::partition(const CRSGraph& graph) std::cout << "Partitioning graph into " << rang::style::bold << parallel::size() << rang::style::reset << " parts using " << rang::fgB::green << "ParMETIS" << rang::fg::reset << '\n'; - int wgtflag = 0; - int numflag = 0; - int ncon = 1; - int npart = parallel::size(); - std::vector<float> tpwgts(npart, 1. / npart); - - std::vector<float> ubvec{1.05}; - std::vector<int> options{1, 0, 0}; - int edgecut = 0; - MPI_Group world_group; MPI_Comm_group(parallel::Messenger::getInstance().comm(), &world_group); @@ -46,13 +36,23 @@ ParMETISPartitioner::partition(const CRSGraph& graph) MPI_Group_incl(world_group, group_ranks.size(), &(group_ranks[0]), &mesh_group); - MPI_Comm parmetis_comm; - MPI_Comm_create_group(parallel::Messenger::getInstance().comm(), mesh_group, 1, &parmetis_comm); - - int local_number_of_nodes = graph.numberOfNodes(); + MPI_Comm partitioning_comm; + MPI_Comm_create_group(parallel::Messenger::getInstance().comm(), mesh_group, 1, &partitioning_comm); Array<int> partition; if (graph.numberOfNodes() > 0) { + int wgtflag = 0; + int numflag = 0; + int ncon = 1; + int npart = parallel::size(); + std::vector<float> tpwgts(npart, 1. / npart); + + std::vector<float> ubvec{1.05}; + std::vector<int> options{1, 0, 0}; + int edgecut = 0; + + int local_number_of_nodes = graph.numberOfNodes(); + partition = Array<int>(local_number_of_nodes); std::vector<int> vtxdist{0, local_number_of_nodes}; @@ -64,16 +64,15 @@ ParMETISPartitioner::partition(const CRSGraph& graph) int result = ParMETIS_V3_PartKway(&(vtxdist[0]), entries_ptr, neighbors_ptr, NULL, NULL, &wgtflag, &numflag, &ncon, &npart, - &(tpwgts[0]), &(ubvec[0]), &(options[0]), &edgecut, &(partition[0]), &parmetis_comm); + &(tpwgts[0]), &(ubvec[0]), &(options[0]), &edgecut, &(partition[0]), &partitioning_comm); // LCOV_EXCL_START if (result == METIS_ERROR) { throw UnexpectedError("Metis Error"); } // LCOV_EXCL_STOP - - MPI_Comm_free(&parmetis_comm); } + MPI_Comm_free(&partitioning_comm); MPI_Group_free(&mesh_group); MPI_Group_free(&world_group); diff --git a/src/utils/Partitioner.cpp b/src/utils/Partitioner.cpp index 4c7145cf247c3e67bb5e2df802cf4c64e2050d66..4f53d322f5e52c130122ccc3966270e7220d34d7 100644 --- a/src/utils/Partitioner.cpp +++ b/src/utils/Partitioner.cpp @@ -1,5 +1,6 @@ #include <utils/Partitioner.hpp> +#include <utils/PTScotchPartitioner.hpp> #include <utils/ParMETISPartitioner.hpp> Partitioner::Partitioner(const PartitionerOptions& options) : m_partitioner_options{options} {} @@ -12,7 +13,7 @@ Partitioner::partition(const CRSGraph& graph) return ParMETISPartitioner::partition(graph); } case PartitionerLibrary::ptscotch: { - throw NotImplementedError("PTScotch"); + return PTScotchPartitioner::partition(graph); } default: { throw UnexpectedError("invalid partition library");