Skip to content
Snippets Groups Projects
Commit 1e461806 authored by Stéphane Del Pino's avatar Stéphane Del Pino
Browse files

Plug PTScotch partitioner

parent 31152b0f
Branches
No related tags found
1 merge request!202Prepare PTScotch integration
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <utils/PugsTraits.hpp> #include <utils/PugsTraits.hpp>
#include <utils/PugsUtils.hpp> #include <utils/PugsUtils.hpp>
#include <algorithm>
#include <iostream> #include <iostream>
#include <set> #include <set>
#include <vector> #include <vector>
......
...@@ -19,6 +19,7 @@ add_library( ...@@ -19,6 +19,7 @@ add_library(
PartitionerOptions.cpp PartitionerOptions.cpp
PETScWrapper.cpp PETScWrapper.cpp
PluginsLoader.cpp PluginsLoader.cpp
PTScotchPartitioner.cpp
PugsUtils.cpp PugsUtils.cpp
RandomEngine.cpp RandomEngine.cpp
ReproducibleSumManager.cpp ReproducibleSumManager.cpp
......
#include <utils/PTScotchPartitioner.hpp>
#include <utils/pugs_config.hpp>
#ifdef PUGS_HAS_PTSCOTCH
#include <utils/Exceptions.hpp>
#include <utils/Messenger.hpp>
#include <ptscotch.h>
#include <iostream>
#include <vector>
Array<int>
PTScotchPartitioner::partition(const CRSGraph& graph)
{
std::cout << "Partitioning graph into " << rang::style::bold << parallel::size() << rang::style::reset
<< " parts using " << rang::fgB::green << "PTScotch" << rang::fg::reset << '\n';
MPI_Group world_group;
MPI_Comm_group(parallel::Messenger::getInstance().comm(), &world_group);
MPI_Group mesh_group;
std::vector<int> group_ranks = [&]() {
Array<int> graph_node_owners = parallel::allGather(static_cast<int>(graph.numberOfNodes()));
std::vector<int> grp_ranks;
grp_ranks.reserve(graph_node_owners.size());
for (size_t i = 0; i < graph_node_owners.size(); ++i) {
if (graph_node_owners[i] > 0) {
grp_ranks.push_back(i);
}
}
return grp_ranks;
}();
MPI_Group_incl(world_group, group_ranks.size(), &(group_ranks[0]), &mesh_group);
MPI_Comm partitioning_comm;
MPI_Comm_create_group(parallel::Messenger::getInstance().comm(), mesh_group, 1, &partitioning_comm);
Array<int> partition;
if (graph.numberOfNodes() > 0) {
SCOTCH_Strat scotch_strategy;
SCOTCH_Dgraph scotch_grah;
SCOTCH_stratInit(&scotch_strategy); // use default strategy
SCOTCH_dgraphInit(&scotch_grah, partitioning_comm);
const Array<const int>& entries = graph.entries();
const Array<const int>& neighbors = graph.neighbors();
static_assert(std::is_same_v<int, SCOTCH_Num>);
SCOTCH_Num* entries_ptr = const_cast<int*>(&(entries[0]));
SCOTCH_Num* neighbors_ptr = const_cast<int*>(&(neighbors[0]));
if (SCOTCH_dgraphBuild(&scotch_grah,
0, // 0 for C-like arrays
graph.numberOfNodes(), //
graph.numberOfNodes(), //
entries_ptr, //
nullptr, //
nullptr, //
nullptr, // optional local node label array
graph.neighbors().size(), //
graph.neighbors().size(), //
neighbors_ptr, //
nullptr, //
nullptr) != 0) {
// LCOV_EXCL_START
throw UnexpectedError("PTScotch invalid graph");
// LCOV_EXCL_STOP
}
partition = Array<int>(graph.numberOfNodes());
SCOTCH_Num* partition_ptr = static_cast<SCOTCH_Num*>(&(partition[0]));
if (SCOTCH_dgraphPart(&scotch_grah, //
parallel::size(), //
&scotch_strategy, //
partition_ptr) != 0) {
// LCOV_EXCL_START
throw UnexpectedError("PTScotch Error");
// LCOV_EXCL_STOP
}
SCOTCH_dgraphExit(&scotch_grah);
SCOTCH_stratExit(&scotch_strategy);
}
MPI_Comm_free(&partitioning_comm);
MPI_Group_free(&mesh_group);
MPI_Group_free(&world_group);
return partition;
}
#else // PUGS_HAS_PTSCOTCH
Array<int>
PTScotchPartitioner::partition(const CRSGraph& graph)
{
Array<int> partition{graph.entries().size() - 1};
partition.fill(0);
return partition;
}
#endif // PUGS_HAS_PTSCOTCH
#ifndef PT_SCOTCH_PARTITIONER_HPP
#define PT_SCOTCH_PARTITIONER_HPP
#include <utils/Array.hpp>
#include <utils/CRSGraph.hpp>
class PTScotchPartitioner
{
private:
friend class Partitioner;
// Forbids direct calls
static Array<int> partition(const CRSGraph& graph);
public:
PTScotchPartitioner() = delete;
};
#endif // PT_SCOTCH_PARTITIONER_HPP
...@@ -18,16 +18,6 @@ ParMETISPartitioner::partition(const CRSGraph& graph) ...@@ -18,16 +18,6 @@ ParMETISPartitioner::partition(const CRSGraph& graph)
std::cout << "Partitioning graph into " << rang::style::bold << parallel::size() << rang::style::reset std::cout << "Partitioning graph into " << rang::style::bold << parallel::size() << rang::style::reset
<< " parts using " << rang::fgB::green << "ParMETIS" << rang::fg::reset << '\n'; << " parts using " << rang::fgB::green << "ParMETIS" << rang::fg::reset << '\n';
int wgtflag = 0;
int numflag = 0;
int ncon = 1;
int npart = parallel::size();
std::vector<float> tpwgts(npart, 1. / npart);
std::vector<float> ubvec{1.05};
std::vector<int> options{1, 0, 0};
int edgecut = 0;
MPI_Group world_group; MPI_Group world_group;
MPI_Comm_group(parallel::Messenger::getInstance().comm(), &world_group); MPI_Comm_group(parallel::Messenger::getInstance().comm(), &world_group);
...@@ -46,13 +36,23 @@ ParMETISPartitioner::partition(const CRSGraph& graph) ...@@ -46,13 +36,23 @@ ParMETISPartitioner::partition(const CRSGraph& graph)
MPI_Group_incl(world_group, group_ranks.size(), &(group_ranks[0]), &mesh_group); MPI_Group_incl(world_group, group_ranks.size(), &(group_ranks[0]), &mesh_group);
MPI_Comm parmetis_comm; MPI_Comm partitioning_comm;
MPI_Comm_create_group(parallel::Messenger::getInstance().comm(), mesh_group, 1, &parmetis_comm); MPI_Comm_create_group(parallel::Messenger::getInstance().comm(), mesh_group, 1, &partitioning_comm);
int local_number_of_nodes = graph.numberOfNodes();
Array<int> partition; Array<int> partition;
if (graph.numberOfNodes() > 0) { if (graph.numberOfNodes() > 0) {
int wgtflag = 0;
int numflag = 0;
int ncon = 1;
int npart = parallel::size();
std::vector<float> tpwgts(npart, 1. / npart);
std::vector<float> ubvec{1.05};
std::vector<int> options{1, 0, 0};
int edgecut = 0;
int local_number_of_nodes = graph.numberOfNodes();
partition = Array<int>(local_number_of_nodes); partition = Array<int>(local_number_of_nodes);
std::vector<int> vtxdist{0, local_number_of_nodes}; std::vector<int> vtxdist{0, local_number_of_nodes};
...@@ -64,16 +64,15 @@ ParMETISPartitioner::partition(const CRSGraph& graph) ...@@ -64,16 +64,15 @@ ParMETISPartitioner::partition(const CRSGraph& graph)
int result = int result =
ParMETIS_V3_PartKway(&(vtxdist[0]), entries_ptr, neighbors_ptr, NULL, NULL, &wgtflag, &numflag, &ncon, &npart, ParMETIS_V3_PartKway(&(vtxdist[0]), entries_ptr, neighbors_ptr, NULL, NULL, &wgtflag, &numflag, &ncon, &npart,
&(tpwgts[0]), &(ubvec[0]), &(options[0]), &edgecut, &(partition[0]), &parmetis_comm); &(tpwgts[0]), &(ubvec[0]), &(options[0]), &edgecut, &(partition[0]), &partitioning_comm);
// LCOV_EXCL_START // LCOV_EXCL_START
if (result == METIS_ERROR) { if (result == METIS_ERROR) {
throw UnexpectedError("Metis Error"); throw UnexpectedError("Metis Error");
} }
// LCOV_EXCL_STOP // LCOV_EXCL_STOP
MPI_Comm_free(&parmetis_comm);
} }
MPI_Comm_free(&partitioning_comm);
MPI_Group_free(&mesh_group); MPI_Group_free(&mesh_group);
MPI_Group_free(&world_group); MPI_Group_free(&world_group);
......
#include <utils/Partitioner.hpp> #include <utils/Partitioner.hpp>
#include <utils/PTScotchPartitioner.hpp>
#include <utils/ParMETISPartitioner.hpp> #include <utils/ParMETISPartitioner.hpp>
Partitioner::Partitioner(const PartitionerOptions& options) : m_partitioner_options{options} {} Partitioner::Partitioner(const PartitionerOptions& options) : m_partitioner_options{options} {}
...@@ -12,7 +13,7 @@ Partitioner::partition(const CRSGraph& graph) ...@@ -12,7 +13,7 @@ Partitioner::partition(const CRSGraph& graph)
return ParMETISPartitioner::partition(graph); return ParMETISPartitioner::partition(graph);
} }
case PartitionerLibrary::ptscotch: { case PartitionerLibrary::ptscotch: {
throw NotImplementedError("PTScotch"); return PTScotchPartitioner::partition(graph);
} }
default: { default: {
throw UnexpectedError("invalid partition library"); throw UnexpectedError("invalid partition library");
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment