From ee9030ecd04bdc7fa1ff10aa93f02c37eb810bbe Mon Sep 17 00:00:00 2001 From: Stephane Del Pino <stephane.delpino44@gmail.com> Date: Wed, 24 Oct 2018 12:47:24 +0200 Subject: [PATCH] Add the parallel namespace to isolate parallel methods --- src/mesh/GmshReader.cpp | 37 +++++++------- src/utils/Messenger.cpp | 5 ++ src/utils/Messenger.hpp | 7 ++- src/utils/Partitioner.cpp | 6 +-- src/utils/PastisUtils.cpp | 6 +-- tests/mpi_test_Messenger.cpp | 95 +++++++++++++++++++----------------- tests/mpi_test_main.cpp | 6 +-- 7 files changed, 88 insertions(+), 74 deletions(-) diff --git a/src/mesh/GmshReader.cpp b/src/mesh/GmshReader.cpp index 8b71419c6..0526eb869 100644 --- a/src/mesh/GmshReader.cpp +++ b/src/mesh/GmshReader.cpp @@ -164,13 +164,13 @@ class MeshDispatcher Partitioner P; Array<int> cell_new_owner = P.partition(mesh_graph); - std::vector<std::vector<CellId>> cell_vector_to_send_by_proc(commSize()); + std::vector<std::vector<CellId>> cell_vector_to_send_by_proc(parallel::commSize()); for (size_t i=0; i<cell_new_owner.size(); ++i) { cell_vector_to_send_by_proc[cell_new_owner[i]].push_back(CellId{i}); } - std::vector<Array<const CellId>> cell_list_to_send_by_proc(commSize()); - for (size_t i=0; i<commSize(); ++i) { + std::vector<Array<const CellId>> cell_list_to_send_by_proc(parallel::commSize()); + for (size_t i=0; i<parallel::commSize(); ++i) { cell_list_to_send_by_proc[i] = convert_to_array(cell_vector_to_send_by_proc[i]); } @@ -179,8 +179,8 @@ class MeshDispatcher Array<int> _buildNbCellToSend() { - Array<int> nb_cell_to_send_by_proc(commSize()); - for (size_t i=0; i<commSize(); ++i) { + Array<int> nb_cell_to_send_by_proc(parallel::commSize()); + for (size_t i=0; i<parallel::commSize(); ++i) { nb_cell_to_send_by_proc[i] = m_cell_list_to_send_by_proc[i].size(); } return nb_cell_to_send_by_proc; @@ -193,8 +193,8 @@ class MeshDispatcher exchange(const CellValue<DataType>& cell_value) const { using MutableDataType = std::remove_const_t<DataType>; - std::vector<Array<DataType>> cell_value_to_send_by_proc(commSize()); - for (size_t i=0; i<commSize(); ++i) { + std::vector<Array<DataType>> cell_value_to_send_by_proc(parallel::commSize()); + for (size_t i=0; i<parallel::commSize(); ++i) { const Array<const CellId>& cell_list = m_cell_list_to_send_by_proc[i]; Array<MutableDataType> cell_value_list(cell_list.size()); parallel_for (cell_list.size(), PASTIS_LAMBDA(const CellId& cell_id) { @@ -203,12 +203,12 @@ class MeshDispatcher cell_value_to_send_by_proc[i] = cell_value_list; } - std::vector<Array<MutableDataType>> recv_cell_value_by_proc(commSize()); - for (size_t i=0; i<commSize(); ++i) { + std::vector<Array<MutableDataType>> recv_cell_value_by_proc(parallel::commSize()); + for (size_t i=0; i<parallel::commSize(); ++i) { recv_cell_value_by_proc[i] = Array<MutableDataType>(m_nb_cell_to_recv_by_proc[i]); } - ::exchange(cell_value_to_send_by_proc, recv_cell_value_by_proc); + parallel::exchange(cell_value_to_send_by_proc, recv_cell_value_by_proc); return recv_cell_value_by_proc; } @@ -222,7 +222,7 @@ class MeshDispatcher : m_mesh(mesh), m_cell_list_to_send_by_proc(_buildCellListToSend()), m_nb_cell_to_send_by_proc(_buildNbCellToSend()), - m_nb_cell_to_recv_by_proc(allToAll(m_nb_cell_to_send_by_proc)) + m_nb_cell_to_recv_by_proc(parallel::allToAll(m_nb_cell_to_send_by_proc)) { ; } @@ -261,10 +261,10 @@ void GmshReader::_dispatch() std::vector<Array<CellType>> recv_cell_type_by_proc = dispatcher.exchange(mesh.connectivity().cellType()); - for (int i_rank=0; i_rank < commSize(); ++i_rank) { - if (commRank() == i_rank) { + for (int i_rank=0; i_rank < parallel::commSize(); ++i_rank) { + if (parallel::commRank() == i_rank) { std::cout << "----- rank=" << i_rank << " -----\n"; - for (int j_rank=0; j_rank < commSize(); ++j_rank) { + for (int j_rank=0; j_rank < parallel::commSize(); ++j_rank) { std::cout << "recv from " << j_rank << ':'; for (size_t i=0; i<recv_cell_number_by_proc[j_rank].size(); ++i) { std::cout << ' ' << recv_cell_number_by_proc[j_rank][i] << '[' << name(recv_cell_type_by_proc[j_rank][i])<< "]:" << recv_cell_center_by_proc[j_rank][i]; @@ -274,8 +274,7 @@ void GmshReader::_dispatch() } } - - Messenger::destroy(); + parallel::Messenger::destroy(); std::exit(0); } @@ -283,7 +282,7 @@ void GmshReader::_dispatch() GmshReader::GmshReader(const std::string& filename) : m_filename(filename) { - if (commRank() == commSize()-1) { + if (parallel::commRank() == parallel::commSize()-1) { try { m_fin.open(m_filename); if (not m_fin) { @@ -444,7 +443,7 @@ GmshReader::GmshReader(const std::string& filename) } } - if (commSize() > 1) { + if (parallel::commSize() > 1) { pout() << "Sequential mesh read! Need to be dispatched\n" << std::flush; const int mesh_dimension @@ -454,7 +453,7 @@ GmshReader::GmshReader(const std::string& filename) mesh_dimension = m_mesh->meshDimension(); } - Array<int> dimensions = allGather(mesh_dimension); + Array<int> dimensions = parallel::allGather(mesh_dimension); std::set<int> dimension_set; for (size_t i=0; i<dimensions.size(); ++i) { const int i_dimension = dimensions[i]; diff --git a/src/utils/Messenger.cpp b/src/utils/Messenger.cpp index 6c9a940e9..e5e9668c2 100644 --- a/src/utils/Messenger.cpp +++ b/src/utils/Messenger.cpp @@ -1,6 +1,9 @@ #include <Messenger.hpp> #include <PastisOStream.hpp> +namespace parallel +{ + Messenger* Messenger::m_instance = nullptr; void Messenger::create(int& argc, char* argv[]) @@ -51,3 +54,5 @@ void Messenger::barrier() const MPI_Barrier(MPI_COMM_WORLD); #endif // PASTIS_HAS_MPI } + +} // namespace parallel diff --git a/src/utils/Messenger.hpp b/src/utils/Messenger.hpp index 32dd632bc..b27a952c6 100644 --- a/src/utils/Messenger.hpp +++ b/src/utils/Messenger.hpp @@ -14,6 +14,9 @@ #include <mpi.h> #endif // PASTIS_HAS_MPI +namespace parallel +{ + class Messenger { private: @@ -562,7 +565,7 @@ void exchange(const std::vector<Array<SendDataType>>& sent_array_list, static_assert(not std::is_const_v<RecvDataType>, "receive data type cannot be const"); - messenger().exchange(sent_array_list, recv_array_list); + messenger().exchange(sent_array_list, recv_array_list); } #ifdef PASTIS_HAS_MPI @@ -617,4 +620,6 @@ Messenger::helper::mpiType<bool>() {return MPI_CXX_BOOL; } #endif // PASTIS_HAS_MPI +} // namespace parallel + #endif // MESSENGER_HPP diff --git a/src/utils/Partitioner.cpp b/src/utils/Partitioner.cpp index 29451cc14..83322adf0 100644 --- a/src/utils/Partitioner.cpp +++ b/src/utils/Partitioner.cpp @@ -17,13 +17,13 @@ Array<int> Partitioner::partition(const CSRGraph& graph) { pout() << "Partitioning graph into " - << rang::style::bold << commSize() << rang::style::reset + << rang::style::bold << parallel::commSize() << rang::style::reset << " parts\n"; int wgtflag = 0; int numflag = 0; int ncon = 1; - int npart= commSize(); + int npart= parallel::commSize(); std::vector<float> tpwgts(npart, 1./npart); std::vector<float> ubvec{1.05}; @@ -38,7 +38,7 @@ Array<int> Partitioner::partition(const CSRGraph& graph) std::vector<int> group_ranks = [&]() { Array<int> graph_node_owners - = allGather(static_cast<int>(graph.numberOfNodes())); + = parallel::allGather(static_cast<int>(graph.numberOfNodes())); std::vector<int> group_ranks; group_ranks.reserve(graph_node_owners.size()); for (size_t i=0; i<graph_node_owners.size(); ++i) { diff --git a/src/utils/PastisUtils.cpp b/src/utils/PastisUtils.cpp index 66aa50b49..4a22dec2b 100644 --- a/src/utils/PastisUtils.cpp +++ b/src/utils/PastisUtils.cpp @@ -18,7 +18,7 @@ std::string initialize(int& argc, char* argv[]) { - Messenger::create(argc, argv); + parallel::Messenger::create(argc, argv); long unsigned number = 10; std::string filename; @@ -78,7 +78,7 @@ std::string initialize(int& argc, char* argv[]) try { app.parse(argc, argv); } catch (const CLI::ParseError &e) { - Messenger::destroy(); + parallel::Messenger::destroy(); std::exit(app.exit(e, pout(), perr())); } @@ -107,5 +107,5 @@ std::string initialize(int& argc, char* argv[]) void finalize() { Kokkos::finalize(); - Messenger::destroy(); + parallel::Messenger::destroy(); } diff --git a/tests/mpi_test_Messenger.cpp b/tests/mpi_test_Messenger.cpp index 66e247d99..2837e0830 100644 --- a/tests/mpi_test_Messenger.cpp +++ b/tests/mpi_test_Messenger.cpp @@ -38,11 +38,11 @@ struct tri_int template <typename T> void test_allToAll() { - Array<T> data_array(commSize()); + Array<T> data_array(parallel::commSize()); for (size_t i=0; i< data_array.size(); ++i) { - data_array[i] = commRank(); + data_array[i] = parallel::commRank(); } - auto exchanged_array = allToAll(data_array); + auto exchanged_array = parallel::allToAll(data_array); for (size_t i=0; i< data_array.size(); ++i) { REQUIRE(exchanged_array[i] == i); @@ -52,11 +52,11 @@ void test_allToAll() template <> void test_allToAll<bool>() { - Array<bool> data_array(commSize()); + Array<bool> data_array(parallel::commSize()); for (size_t i=0; i< data_array.size(); ++i) { - data_array[i] = ((commRank()%2)==0); + data_array[i] = ((parallel::commRank()%2)==0); } - auto exchanged_array = allToAll(data_array); + auto exchanged_array = parallel::allToAll(data_array); for (size_t i=0; i< data_array.size(); ++i) { REQUIRE(exchanged_array[i] == ((i%2)==0)); @@ -66,12 +66,12 @@ void test_allToAll<bool>() template <> void test_allToAll<tri_int>() { - Array<tri_int> data_array(commSize()); + Array<tri_int> data_array(parallel::commSize()); for (size_t i=0; i< data_array.size(); ++i) { - const int val = 1+commRank(); + const int val = 1+parallel::commRank(); data_array[i] = tri_int{val, 2*val, val+3 }; } - auto exchanged_array = allToAll(data_array); + auto exchanged_array = parallel::allToAll(data_array); for (size_t i=0; i< data_array.size(); ++i) { const int val = 1+i; @@ -86,19 +86,19 @@ TEST_CASE("Messenger", "[mpi]") { SECTION("communication info") { int rank=0; IF_MPI(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); - REQUIRE(rank == commRank()); + REQUIRE(rank == parallel::commRank()); int size=1; IF_MPI(MPI_Comm_size(MPI_COMM_WORLD, &size)); - REQUIRE(size == commSize()); + REQUIRE(size == parallel::commSize()); } SECTION("reduction") { - const int min_value = allReduceMin(commRank()+3); + const int min_value = parallel::allReduceMin(parallel::commRank()+3); REQUIRE(min_value ==3); - const int max_value = allReduceMax(commRank()+3); - REQUIRE(max_value == ((commSize()-1) + 3)); + const int max_value = parallel::allReduceMax(parallel::commRank()+3); + REQUIRE(max_value == ((parallel::commSize()-1) + 3)); } SECTION("all to all") { @@ -134,11 +134,11 @@ TEST_CASE("Messenger", "[mpi]") { #ifndef NDEBUG SECTION("checking invalid all to all") { - Array<int> invalid_all_to_all(commSize()+1); - REQUIRE_THROWS_AS(allToAll(invalid_all_to_all), AssertError); + Array<int> invalid_all_to_all(parallel::commSize()+1); + REQUIRE_THROWS_AS(parallel::allToAll(invalid_all_to_all), AssertError); - Array<int> different_size_all_to_all(commSize()*(commRank()+1)); - REQUIRE_THROWS_AS(allToAll(different_size_all_to_all), AssertError); + Array<int> different_size_all_to_all(parallel::commSize()*(parallel::commRank()+1)); + REQUIRE_THROWS_AS(parallel::allToAll(different_size_all_to_all), AssertError); } #endif // NDEBUG } @@ -146,22 +146,24 @@ TEST_CASE("Messenger", "[mpi]") { SECTION("broadcast value") { { // simple type - int value{(3+commRank())*2}; - broadcast(value, 0); + int value{(3+parallel::commRank())*2}; + parallel::broadcast(value, 0); REQUIRE(value == 6); } { // trivial simple type - mpi_check::integer value{(3+commRank())*2}; - broadcast(value, 0); + mpi_check::integer value{(3+parallel::commRank())*2}; + parallel::broadcast(value, 0); REQUIRE((value == 6)); } { // compound trivial type - mpi_check::tri_int value{(3+commRank())*2, 2+commRank(), 4-commRank()}; - broadcast(value, 0); + mpi_check::tri_int value{(3+parallel::commRank())*2, + 2+parallel::commRank(), + 4-parallel::commRank()}; + parallel::broadcast(value, 0); REQUIRE((value == mpi_check::tri_int{6,2,4})); } } @@ -170,30 +172,30 @@ TEST_CASE("Messenger", "[mpi]") { { // simple type Array<int> array(3); - array[0] = (3+commRank())*2; - array[1] = 2+commRank(); - array[2] = 4-commRank(); - broadcast(array, 0); + array[0] = (3+parallel::commRank())*2; + array[1] = 2+parallel::commRank(); + array[2] = 4-parallel::commRank(); + parallel::broadcast(array, 0); REQUIRE(((array[0]==6) and (array[1]==2) and (array[2]==4))); } { // trivial simple type Array<mpi_check::integer> array(3); - array[0] = (3+commRank())*2; - array[1] = 2+commRank(); - array[2] = 4-commRank(); - broadcast(array, 0); + array[0] = (3+parallel::commRank())*2; + array[1] = 2+parallel::commRank(); + array[2] = 4-parallel::commRank(); + parallel::broadcast(array, 0); REQUIRE(((array[0]==6) and (array[1]==2) and (array[2]==4))); } { // compound trivial type Array<mpi_check::tri_int> array(3); - array[0] = mpi_check::tri_int{(3+commRank())*2, 2+commRank(), 4-commRank()}; - array[1] = mpi_check::tri_int{(2+commRank())*4, 3+commRank(), 1-commRank()}; - array[2] = mpi_check::tri_int{(5+commRank()), -3+commRank(), commRank()}; - broadcast(array, 0); + array[0] = mpi_check::tri_int{(3+parallel::commRank())*2, 2+parallel::commRank(), 4-parallel::commRank()}; + array[1] = mpi_check::tri_int{(2+parallel::commRank())*4, 3+parallel::commRank(), 1-parallel::commRank()}; + array[2] = mpi_check::tri_int{(5+parallel::commRank()), -3+parallel::commRank(), parallel::commRank()}; + parallel::broadcast(array, 0); REQUIRE(((array[0] == mpi_check::tri_int{6, 2,4}) and (array[1] == mpi_check::tri_int{8, 3,1}) and (array[2] == mpi_check::tri_int{5,-3,0}))); @@ -203,9 +205,9 @@ TEST_CASE("Messenger", "[mpi]") { SECTION("all gather value") { { // simple type - int value{(3+commRank())*2}; - Array<int> gather_array = allGather(value); - REQUIRE(gather_array.size() == commSize()); + int value{(3+parallel::commRank())*2}; + Array<int> gather_array = parallel::allGather(value); + REQUIRE(gather_array.size() == parallel::commSize()); for (size_t i=0; i<gather_array.size(); ++i) { REQUIRE((gather_array[i] == (3+i)*2)); @@ -214,9 +216,9 @@ TEST_CASE("Messenger", "[mpi]") { { // trivial simple type - mpi_check::integer value{(3+commRank())*2}; - Array<mpi_check::integer> gather_array = allGather(value); - REQUIRE(gather_array.size() == commSize()); + mpi_check::integer value{(3+parallel::commRank())*2}; + Array<mpi_check::integer> gather_array = parallel::allGather(value); + REQUIRE(gather_array.size() == parallel::commSize()); for (size_t i=0; i<gather_array.size(); ++i) { REQUIRE((gather_array[i] == (3+i)*2)); @@ -225,10 +227,13 @@ TEST_CASE("Messenger", "[mpi]") { { // compound trivial type - mpi_check::tri_int value{(3+commRank())*2, 2+commRank(), 4-commRank()}; - Array<mpi_check::tri_int> gather_array = allGather(value); - REQUIRE(gather_array.size() == commSize()); + mpi_check::tri_int value{(3+parallel::commRank())*2, + 2+parallel::commRank(), + 4-parallel::commRank()}; + Array<mpi_check::tri_int> gather_array + = parallel::allGather(value); + REQUIRE(gather_array.size() == parallel::commSize()); for (size_t i=0; i<gather_array.size(); ++i) { mpi_check::tri_int expected_value{static_cast<int>((3+i)*2), static_cast<int>(2+i), diff --git a/tests/mpi_test_main.cpp b/tests/mpi_test_main.cpp index b370c1f82..7607f79a8 100644 --- a/tests/mpi_test_main.cpp +++ b/tests/mpi_test_main.cpp @@ -8,17 +8,17 @@ int main( int argc, char* argv[] ) { - Messenger::create(argc, argv); + parallel::Messenger::create(argc, argv); Kokkos::initialize({4,-1,-1,true}); - if (commRank() != 0) { + if (parallel::commRank() != 0) { setenv("GCOV_PREFIX", "/dev/null", 1); } int result = Catch::Session().run( argc, argv ); Kokkos::finalize(); - Messenger::destroy(); + parallel::Messenger::destroy(); return result; } -- GitLab