From 19a9b71771396be640c18df8af1d675bb8354091 Mon Sep 17 00:00:00 2001 From: Stephane Del Pino <stephane.delpino44@gmail.com> Date: Thu, 25 Oct 2018 15:01:54 +0200 Subject: [PATCH] Rename parallel::commSize() and parallel::commRank() parallel::commSize() -> parallel::size() parallel::commRank() -> parallel::rank() --- src/mesh/GmshReader.cpp | 28 +++---- src/utils/Messenger.cpp | 14 +++- src/utils/Messenger.hpp | 14 ++-- src/utils/Partitioner.cpp | 4 +- tests/mpi_test_Messenger.cpp | 142 ++++++++++++++++++----------------- tests/mpi_test_main.cpp | 2 +- 6 files changed, 109 insertions(+), 95 deletions(-) diff --git a/src/mesh/GmshReader.cpp b/src/mesh/GmshReader.cpp index 0526eb869..d4167505e 100644 --- a/src/mesh/GmshReader.cpp +++ b/src/mesh/GmshReader.cpp @@ -164,13 +164,13 @@ class MeshDispatcher Partitioner P; Array<int> cell_new_owner = P.partition(mesh_graph); - std::vector<std::vector<CellId>> cell_vector_to_send_by_proc(parallel::commSize()); + std::vector<std::vector<CellId>> cell_vector_to_send_by_proc(parallel::size()); for (size_t i=0; i<cell_new_owner.size(); ++i) { cell_vector_to_send_by_proc[cell_new_owner[i]].push_back(CellId{i}); } - std::vector<Array<const CellId>> cell_list_to_send_by_proc(parallel::commSize()); - for (size_t i=0; i<parallel::commSize(); ++i) { + std::vector<Array<const CellId>> cell_list_to_send_by_proc(parallel::size()); + for (size_t i=0; i<parallel::size(); ++i) { cell_list_to_send_by_proc[i] = convert_to_array(cell_vector_to_send_by_proc[i]); } @@ -179,8 +179,8 @@ class MeshDispatcher Array<int> _buildNbCellToSend() { - Array<int> nb_cell_to_send_by_proc(parallel::commSize()); - for (size_t i=0; i<parallel::commSize(); ++i) { + Array<int> nb_cell_to_send_by_proc(parallel::size()); + for (size_t i=0; i<parallel::size(); ++i) { nb_cell_to_send_by_proc[i] = m_cell_list_to_send_by_proc[i].size(); } return nb_cell_to_send_by_proc; @@ -193,8 +193,8 @@ class MeshDispatcher exchange(const CellValue<DataType>& cell_value) const { using MutableDataType = std::remove_const_t<DataType>; - std::vector<Array<DataType>> cell_value_to_send_by_proc(parallel::commSize()); - for (size_t i=0; i<parallel::commSize(); ++i) { + std::vector<Array<DataType>> cell_value_to_send_by_proc(parallel::size()); + for (size_t i=0; i<parallel::size(); ++i) { const Array<const CellId>& cell_list = m_cell_list_to_send_by_proc[i]; Array<MutableDataType> cell_value_list(cell_list.size()); parallel_for (cell_list.size(), PASTIS_LAMBDA(const CellId& cell_id) { @@ -203,8 +203,8 @@ class MeshDispatcher cell_value_to_send_by_proc[i] = cell_value_list; } - std::vector<Array<MutableDataType>> recv_cell_value_by_proc(parallel::commSize()); - for (size_t i=0; i<parallel::commSize(); ++i) { + std::vector<Array<MutableDataType>> recv_cell_value_by_proc(parallel::size()); + for (size_t i=0; i<parallel::size(); ++i) { recv_cell_value_by_proc[i] = Array<MutableDataType>(m_nb_cell_to_recv_by_proc[i]); } @@ -261,10 +261,10 @@ void GmshReader::_dispatch() std::vector<Array<CellType>> recv_cell_type_by_proc = dispatcher.exchange(mesh.connectivity().cellType()); - for (int i_rank=0; i_rank < parallel::commSize(); ++i_rank) { - if (parallel::commRank() == i_rank) { + for (int i_rank=0; i_rank < parallel::size(); ++i_rank) { + if (parallel::rank() == i_rank) { std::cout << "----- rank=" << i_rank << " -----\n"; - for (int j_rank=0; j_rank < parallel::commSize(); ++j_rank) { + for (int j_rank=0; j_rank < parallel::size(); ++j_rank) { std::cout << "recv from " << j_rank << ':'; for (size_t i=0; i<recv_cell_number_by_proc[j_rank].size(); ++i) { std::cout << ' ' << recv_cell_number_by_proc[j_rank][i] << '[' << name(recv_cell_type_by_proc[j_rank][i])<< "]:" << recv_cell_center_by_proc[j_rank][i]; @@ -282,7 +282,7 @@ void GmshReader::_dispatch() GmshReader::GmshReader(const std::string& filename) : m_filename(filename) { - if (parallel::commRank() == parallel::commSize()-1) { + if (parallel::rank() == parallel::size()-1) { try { m_fin.open(m_filename); if (not m_fin) { @@ -443,7 +443,7 @@ GmshReader::GmshReader(const std::string& filename) } } - if (parallel::commSize() > 1) { + if (parallel::size() > 1) { pout() << "Sequential mesh read! Need to be dispatched\n" << std::flush; const int mesh_dimension diff --git a/src/utils/Messenger.cpp b/src/utils/Messenger.cpp index b4de28efb..2bf82efac 100644 --- a/src/utils/Messenger.cpp +++ b/src/utils/Messenger.cpp @@ -30,8 +30,18 @@ Messenger(int& argc, char* argv[]) { #ifdef PASTIS_HAS_MPI MPI_Init(&argc, &argv); - MPI_Comm_rank(MPI_COMM_WORLD, &m_rank); - MPI_Comm_size(MPI_COMM_WORLD, &m_size); + + m_rank = [] () { + int rank; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + return rank; + } (); + + m_size = [] () { + int size=0; + MPI_Comm_size(MPI_COMM_WORLD, &size); + return size; + } (); if (m_rank != 0) { // LCOV_EXCL_START diff --git a/src/utils/Messenger.hpp b/src/utils/Messenger.hpp index 06ee020f1..1702d8642 100644 --- a/src/utils/Messenger.hpp +++ b/src/utils/Messenger.hpp @@ -83,8 +83,8 @@ class Messenger static Messenger* m_instance; Messenger(int& argc, char* argv[]); - int m_rank{0}; - int m_size{1}; + size_t m_rank{0}; + size_t m_size{1}; template <typename DataType> void _allGather(const DataType& data, @@ -274,13 +274,13 @@ class Messenger } PASTIS_INLINE - const int& rank() const + const size_t& rank() const { return m_rank; } PASTIS_INLINE - const int& size() const + const size_t& size() const { return m_size; } @@ -502,16 +502,14 @@ const Messenger& messenger() return Messenger::getInstance(); } -[[deprecated("use better name")]] PASTIS_INLINE -const int& commRank() +const size_t& rank() { return messenger().rank(); } -[[deprecated("use better name")]] PASTIS_INLINE -const int& commSize() +const size_t& size() { return messenger().size(); } diff --git a/src/utils/Partitioner.cpp b/src/utils/Partitioner.cpp index 83322adf0..77839a25c 100644 --- a/src/utils/Partitioner.cpp +++ b/src/utils/Partitioner.cpp @@ -17,13 +17,13 @@ Array<int> Partitioner::partition(const CSRGraph& graph) { pout() << "Partitioning graph into " - << rang::style::bold << parallel::commSize() << rang::style::reset + << rang::style::bold << parallel::size() << rang::style::reset << " parts\n"; int wgtflag = 0; int numflag = 0; int ncon = 1; - int npart= parallel::commSize(); + int npart= parallel::size(); std::vector<float> tpwgts(npart, 1./npart); std::vector<float> ubvec{1.05}; diff --git a/tests/mpi_test_Messenger.cpp b/tests/mpi_test_Messenger.cpp index 081ab8ef0..ecad3949c 100644 --- a/tests/mpi_test_Messenger.cpp +++ b/tests/mpi_test_Messenger.cpp @@ -38,9 +38,9 @@ struct tri_int template <typename T> void test_allToAll() { - Array<T> data_array(parallel::commSize()); + Array<T> data_array(parallel::size()); for (size_t i=0; i< data_array.size(); ++i) { - data_array[i] = parallel::commRank(); + data_array[i] = parallel::rank(); } auto exchanged_array = parallel::allToAll(data_array); @@ -52,9 +52,9 @@ void test_allToAll() template <> void test_allToAll<bool>() { - Array<bool> data_array(parallel::commSize()); + Array<bool> data_array(parallel::size()); for (size_t i=0; i< data_array.size(); ++i) { - data_array[i] = ((parallel::commRank()%2)==0); + data_array[i] = ((parallel::rank()%2)==0); } auto exchanged_array = parallel::allToAll(data_array); @@ -66,9 +66,9 @@ void test_allToAll<bool>() template <> void test_allToAll<tri_int>() { - Array<tri_int> data_array(parallel::commSize()); + Array<tri_int> data_array(parallel::size()); for (size_t i=0; i< data_array.size(); ++i) { - const int val = 1+parallel::commRank(); + const int val = 1+parallel::rank(); data_array[i] = tri_int{val, 2*val, val+3 }; } auto exchanged_array = parallel::allToAll(data_array); @@ -86,19 +86,19 @@ TEST_CASE("Messenger", "[mpi]") { SECTION("communication info") { int rank=0; IF_MPI(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); - REQUIRE(rank == parallel::commRank()); + REQUIRE(rank == parallel::rank()); int size=1; IF_MPI(MPI_Comm_size(MPI_COMM_WORLD, &size)); - REQUIRE(size == parallel::commSize()); + REQUIRE(size == parallel::size()); } SECTION("reduction") { - const int min_value = parallel::allReduceMin(parallel::commRank()+3); + const int min_value = parallel::allReduceMin(parallel::rank()+3); REQUIRE(min_value ==3); - const int max_value = parallel::allReduceMax(parallel::commRank()+3); - REQUIRE(max_value == ((parallel::commSize()-1) + 3)); + const int max_value = parallel::allReduceMax(parallel::rank()+3); + REQUIRE(max_value == ((parallel::size()-1) + 3)); } SECTION("all to all") { @@ -134,11 +134,11 @@ TEST_CASE("Messenger", "[mpi]") { #ifndef NDEBUG SECTION("checking invalid all to all") { - if (parallel::commSize() > 1) { - Array<int> invalid_all_to_all(parallel::commSize()+1); + if (parallel::size() > 1) { + Array<int> invalid_all_to_all(parallel::size()+1); REQUIRE_THROWS_AS(parallel::allToAll(invalid_all_to_all), AssertError); - Array<int> different_size_all_to_all(parallel::commSize()*(parallel::commRank()+1)); + Array<int> different_size_all_to_all(parallel::size()*(parallel::rank()+1)); REQUIRE_THROWS_AS(parallel::allToAll(different_size_all_to_all), AssertError); } } @@ -148,23 +148,23 @@ TEST_CASE("Messenger", "[mpi]") { SECTION("broadcast value") { { // simple type - int value{(3+parallel::commRank())*2}; + size_t value{(3+parallel::rank())*2}; parallel::broadcast(value, 0); REQUIRE(value == 6); } { // trivial simple type - mpi_check::integer value{(3+parallel::commRank())*2}; + mpi_check::integer value{static_cast<int>((3+parallel::rank())*2)}; parallel::broadcast(value, 0); REQUIRE((value == 6)); } { // compound trivial type - mpi_check::tri_int value{(3+parallel::commRank())*2, - 2+parallel::commRank(), - 4-parallel::commRank()}; + mpi_check::tri_int value{static_cast<int>((3+parallel::rank())*2), + static_cast<int>(2+parallel::rank()), + static_cast<int>(4-parallel::rank())}; parallel::broadcast(value, 0); REQUIRE((value == mpi_check::tri_int{6,2,4})); } @@ -173,10 +173,10 @@ TEST_CASE("Messenger", "[mpi]") { SECTION("broadcast array") { { // simple type - Array<int> array(3); - array[0] = (3+parallel::commRank())*2; - array[1] = 2+parallel::commRank(); - array[2] = 4-parallel::commRank(); + Array<size_t> array(3); + array[0] = (3+parallel::rank())*2; + array[1] = 2+parallel::rank(); + array[2] = 4-parallel::rank(); parallel::broadcast(array, 0); REQUIRE(((array[0]==6) and (array[1]==2) and (array[2]==4))); } @@ -184,9 +184,9 @@ TEST_CASE("Messenger", "[mpi]") { { // trivial simple type Array<mpi_check::integer> array(3); - array[0] = (3+parallel::commRank())*2; - array[1] = 2+parallel::commRank(); - array[2] = 4-parallel::commRank(); + array[0] = static_cast<int>((3+parallel::rank())*2); + array[1] = static_cast<int>(2+parallel::rank()); + array[2] = static_cast<int>(4-parallel::rank()); parallel::broadcast(array, 0); REQUIRE(((array[0]==6) and (array[1]==2) and (array[2]==4))); } @@ -194,9 +194,15 @@ TEST_CASE("Messenger", "[mpi]") { { // compound trivial type Array<mpi_check::tri_int> array(3); - array[0] = mpi_check::tri_int{(3+parallel::commRank())*2, 2+parallel::commRank(), 4-parallel::commRank()}; - array[1] = mpi_check::tri_int{(2+parallel::commRank())*4, 3+parallel::commRank(), 1-parallel::commRank()}; - array[2] = mpi_check::tri_int{(5+parallel::commRank()), -3+parallel::commRank(), parallel::commRank()}; + array[0] = mpi_check::tri_int{static_cast<int>((3+parallel::rank())*2), + static_cast<int>(2+parallel::rank()), + static_cast<int>(4-parallel::rank())}; + array[1] = mpi_check::tri_int{static_cast<int>((2+parallel::rank())*4), + static_cast<int>(3+parallel::rank()), + static_cast<int>(1-parallel::rank())}; + array[2] = mpi_check::tri_int{static_cast<int>((5+parallel::rank())), + static_cast<int>(-3+parallel::rank()), + static_cast<int>(parallel::rank())}; parallel::broadcast(array, 0); REQUIRE(((array[0] == mpi_check::tri_int{6, 2,4}) and (array[1] == mpi_check::tri_int{8, 3,1}) and @@ -207,9 +213,9 @@ TEST_CASE("Messenger", "[mpi]") { SECTION("all gather value") { { // simple type - int value{(3+parallel::commRank())*2}; - Array<int> gather_array = parallel::allGather(value); - REQUIRE(gather_array.size() == parallel::commSize()); + size_t value{(3+parallel::rank())*2}; + Array<size_t> gather_array = parallel::allGather(value); + REQUIRE(gather_array.size() == parallel::size()); for (size_t i=0; i<gather_array.size(); ++i) { REQUIRE((gather_array[i] == (3+i)*2)); @@ -218,9 +224,9 @@ TEST_CASE("Messenger", "[mpi]") { { // trivial simple type - mpi_check::integer value{(3+parallel::commRank())*2}; + mpi_check::integer value{static_cast<int>((3+parallel::rank())*2)}; Array<mpi_check::integer> gather_array = parallel::allGather(value); - REQUIRE(gather_array.size() == parallel::commSize()); + REQUIRE(gather_array.size() == parallel::size()); for (size_t i=0; i<gather_array.size(); ++i) { REQUIRE((gather_array[i] == (3+i)*2)); @@ -229,13 +235,13 @@ TEST_CASE("Messenger", "[mpi]") { { // compound trivial type - mpi_check::tri_int value{(3+parallel::commRank())*2, - 2+parallel::commRank(), - 4-parallel::commRank()}; + mpi_check::tri_int value{static_cast<int>((3+parallel::rank())*2), + static_cast<int>(2+parallel::rank()), + static_cast<int>(4-parallel::rank())}; Array<mpi_check::tri_int> gather_array = parallel::allGather(value); - REQUIRE(gather_array.size() == parallel::commSize()); + REQUIRE(gather_array.size() == parallel::size()); for (size_t i=0; i<gather_array.size(); ++i) { mpi_check::tri_int expected_value{static_cast<int>((3+i)*2), static_cast<int>(2+i), @@ -250,10 +256,10 @@ TEST_CASE("Messenger", "[mpi]") { // simple type Array<int> array(3); for (size_t i=0; i<array.size(); ++i) { - array[i] = (3+parallel::commRank())*2+i; + array[i] = (3+parallel::rank())*2+i; } Array<int> gather_array = parallel::allGather(array); - REQUIRE(gather_array.size() == array.size()*parallel::commSize()); + REQUIRE(gather_array.size() == array.size()*parallel::size()); for (size_t i=0; i<gather_array.size(); ++i) { REQUIRE((gather_array[i] == (3+i/array.size())*2+(i%array.size()))); @@ -264,10 +270,10 @@ TEST_CASE("Messenger", "[mpi]") { // trivial simple type Array<mpi_check::integer> array(3); for (size_t i=0; i<array.size(); ++i) { - array[i] = (3+parallel::commRank())*2+i; + array[i] = (3+parallel::rank())*2+i; } Array<mpi_check::integer> gather_array = parallel::allGather(array); - REQUIRE(gather_array.size() == array.size()*parallel::commSize()); + REQUIRE(gather_array.size() == array.size()*parallel::size()); for (size_t i=0; i<gather_array.size(); ++i) { REQUIRE((gather_array[i] == (3+i/array.size())*2+(i%array.size()))); @@ -278,14 +284,14 @@ TEST_CASE("Messenger", "[mpi]") { // compound trivial type Array<mpi_check::tri_int> array(3); for (size_t i=0; i<array.size(); ++i) { - array[i] = mpi_check::tri_int{static_cast<int>((3+parallel::commRank())*2), - static_cast<int>(2+parallel::commRank()+i), - static_cast<int>(4-parallel::commRank()-i)}; + array[i] = mpi_check::tri_int{static_cast<int>((3+parallel::rank())*2), + static_cast<int>(2+parallel::rank()+i), + static_cast<int>(4-parallel::rank()-i)}; } Array<mpi_check::tri_int> gather_array = parallel::allGather(array); - REQUIRE(gather_array.size() == array.size()*parallel::commSize()); + REQUIRE(gather_array.size() == array.size()*parallel::size()); for (size_t i=0; i<gather_array.size(); ++i) { mpi_check::tri_int expected_value{static_cast<int>((3+i/array.size())*2), static_cast<int>(2+i/array.size()+(i%array.size())), @@ -297,22 +303,22 @@ TEST_CASE("Messenger", "[mpi]") { SECTION("all array exchanges") { { // simple type - std::vector<Array<const int>> send_array_list(parallel::commSize()); + std::vector<Array<const int>> send_array_list(parallel::size()); for (size_t i=0; i<send_array_list.size(); ++i) { Array<int> send_array(i+1); for (size_t j=0; j<send_array.size(); ++j) { - send_array[j] = (parallel::commRank()+1)*j; + send_array[j] = (parallel::rank()+1)*j; } send_array_list[i] = send_array; } - std::vector<Array<int>> recv_array_list(parallel::commSize()); + std::vector<Array<int>> recv_array_list(parallel::size()); for (size_t i=0; i<recv_array_list.size(); ++i) { - recv_array_list[i] = Array<int>(parallel::commRank()+1); + recv_array_list[i] = Array<int>(parallel::rank()+1); } parallel::exchange(send_array_list, recv_array_list); - for (size_t i=0; i<parallel::commSize(); ++i) { + for (size_t i=0; i<parallel::size(); ++i) { const Array<const int> recv_array = recv_array_list[i]; for (size_t j=0; j<recv_array.size(); ++j) { REQUIRE(recv_array[j] == (i+1)*j); @@ -321,22 +327,22 @@ TEST_CASE("Messenger", "[mpi]") { } { // trivial simple type - std::vector<Array<mpi_check::integer>> send_array_list(parallel::commSize()); + std::vector<Array<mpi_check::integer>> send_array_list(parallel::size()); for (size_t i=0; i<send_array_list.size(); ++i) { Array<mpi_check::integer> send_array(i+1); for (size_t j=0; j<send_array.size(); ++j) { - send_array[j] = static_cast<int>((parallel::commRank()+1)*j); + send_array[j] = static_cast<int>((parallel::rank()+1)*j); } send_array_list[i] = send_array; } - std::vector<Array<mpi_check::integer>> recv_array_list(parallel::commSize()); + std::vector<Array<mpi_check::integer>> recv_array_list(parallel::size()); for (size_t i=0; i<recv_array_list.size(); ++i) { - recv_array_list[i] = Array<mpi_check::integer>(parallel::commRank()+1); + recv_array_list[i] = Array<mpi_check::integer>(parallel::rank()+1); } parallel::exchange(send_array_list, recv_array_list); - for (size_t i=0; i<parallel::commSize(); ++i) { + for (size_t i=0; i<parallel::size(); ++i) { const Array<const mpi_check::integer> recv_array = recv_array_list[i]; for (size_t j=0; j<recv_array.size(); ++j) { REQUIRE(recv_array[j] == (i+1)*j); @@ -346,24 +352,24 @@ TEST_CASE("Messenger", "[mpi]") { { // compound trivial type - std::vector<Array<mpi_check::tri_int>> send_array_list(parallel::commSize()); + std::vector<Array<mpi_check::tri_int>> send_array_list(parallel::size()); for (size_t i=0; i<send_array_list.size(); ++i) { Array<mpi_check::tri_int> send_array(i+1); for (size_t j=0; j<send_array.size(); ++j) { - send_array[j] = mpi_check::tri_int{static_cast<int>((parallel::commRank()+1)*j), - static_cast<int>(parallel::commRank()), + send_array[j] = mpi_check::tri_int{static_cast<int>((parallel::rank()+1)*j), + static_cast<int>(parallel::rank()), static_cast<int>(j)}; } send_array_list[i] = send_array; } - std::vector<Array<mpi_check::tri_int>> recv_array_list(parallel::commSize()); + std::vector<Array<mpi_check::tri_int>> recv_array_list(parallel::size()); for (size_t i=0; i<recv_array_list.size(); ++i) { - recv_array_list[i] = Array<mpi_check::tri_int>(parallel::commRank()+1); + recv_array_list[i] = Array<mpi_check::tri_int>(parallel::rank()+1); } parallel::exchange(send_array_list, recv_array_list); - for (size_t i=0; i<parallel::commSize(); ++i) { + for (size_t i=0; i<parallel::size(); ++i) { const Array<const mpi_check::tri_int> recv_array = recv_array_list[i]; for (size_t j=0; j<recv_array.size(); ++j) { mpi_check::tri_int expected_value{static_cast<int>((i+1)*j), @@ -377,22 +383,22 @@ TEST_CASE("Messenger", "[mpi]") { #ifndef NDEBUG SECTION("checking all array exchange invalid sizes") { - std::vector<Array<const int>> send_array_list(parallel::commSize()); + std::vector<Array<const int>> send_array_list(parallel::size()); for (size_t i=0; i<send_array_list.size(); ++i) { Array<int> send_array(i+1); - send_array.fill(parallel::commRank()); + send_array.fill(parallel::rank()); send_array_list[i] = send_array; } - std::vector<Array<int>> recv_array_list(parallel::commSize()); + std::vector<Array<int>> recv_array_list(parallel::size()); REQUIRE_THROWS_AS(parallel::exchange(send_array_list, recv_array_list), AssertError); } #endif // NDEBUG SECTION("checking barrier") { - for (size_t i=0; i<parallel::commSize(); ++i) { - if (i==parallel::commRank()) { + for (size_t i=0; i<parallel::size(); ++i) { + if (i==parallel::rank()) { std::ofstream file; if (i==0) { file.open("barrier_test", std::ios_base::out); @@ -414,7 +420,7 @@ TEST_CASE("Messenger", "[mpi]") { number_list.push_back(value); } } - REQUIRE(number_list.size() == parallel::commSize()); + REQUIRE(number_list.size() == parallel::size()); for (size_t i=0; i<number_list.size(); ++i) { REQUIRE(number_list[i] == i); } diff --git a/tests/mpi_test_main.cpp b/tests/mpi_test_main.cpp index 7607f79a8..c3b8593f3 100644 --- a/tests/mpi_test_main.cpp +++ b/tests/mpi_test_main.cpp @@ -11,7 +11,7 @@ int main( int argc, char* argv[] ) parallel::Messenger::create(argc, argv); Kokkos::initialize({4,-1,-1,true}); - if (parallel::commRank() != 0) { + if (parallel::rank() != 0) { setenv("GCOV_PREFIX", "/dev/null", 1); } -- GitLab