diff --git a/src/algebra/CRSMatrix.hpp b/src/algebra/CRSMatrix.hpp index c2df50e454fa883bf402c56ac81f8617cf9b7742..413466b0a065134006ff2d06805fc1dc47f12806 100644 --- a/src/algebra/CRSMatrix.hpp +++ b/src/algebra/CRSMatrix.hpp @@ -190,7 +190,7 @@ class CRSMatrix Vector<MutableDataType> Ax(m_nb_rows); parallel_for( - m_nb_rows, PUGS_LAMBDA(const IndexType& i_row) { + m_nb_rows, PUGS_CLASS_LAMBDA(const IndexType& i_row) { const auto row_begin = m_row_map[i_row]; const auto row_end = m_row_map[i_row + 1]; MutableDataType sum{0}; diff --git a/src/algebra/SmallMatrix.hpp b/src/algebra/SmallMatrix.hpp index b8d3b81b5fad0ac65521a94aa4f94183f4cff333..03d69f329dbfc9592d5eed02e6fad66885df7de6 100644 --- a/src/algebra/SmallMatrix.hpp +++ b/src/algebra/SmallMatrix.hpp @@ -31,17 +31,20 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE public: PUGS_INLINE - bool isSquare() const noexcept + bool + isSquare() const noexcept { return m_nb_rows == m_nb_columns; } - friend PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> copy(const SmallMatrix& A) noexcept + friend PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + copy(const SmallMatrix& A) noexcept { return SmallMatrix<std::remove_const_t<DataType>>{A.m_nb_rows, A.m_nb_columns, copy(A.m_values)}; } - friend PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> transpose(const SmallMatrix& A) + friend PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + transpose(const SmallMatrix& A) { SmallMatrix<std::remove_const_t<DataType>> A_transpose{A.m_nb_columns, A.m_nb_rows}; for (size_t i = 0; i < A.m_nb_rows; ++i) { @@ -52,14 +55,16 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE return A_transpose; } - friend PUGS_INLINE SmallMatrix operator*(const DataType& a, const SmallMatrix& A) + friend PUGS_INLINE SmallMatrix + operator*(const DataType& a, const SmallMatrix& A) { SmallMatrix<std::remove_const_t<DataType>> aA = copy(A); return aA *= a; } template <typename DataType2> - PUGS_INLINE SmallVector<std::remove_const_t<DataType>> operator*(const SmallVector<DataType2>& x) const + PUGS_INLINE SmallVector<std::remove_const_t<DataType>> + operator*(const SmallVector<DataType2>& x) const { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -77,7 +82,8 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE } template <typename DataType2> - PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> operator*(const SmallMatrix<DataType2>& B) const + PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + operator*(const SmallMatrix<DataType2>& B) const { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -98,22 +104,25 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator/=(const DataType2& a) + PUGS_INLINE SmallMatrix& + operator/=(const DataType2& a) { const auto inv_a = 1. / a; return (*this) *= inv_a; } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator*=(const DataType2& a) + PUGS_INLINE SmallMatrix& + operator*=(const DataType2& a) { parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { m_values[i] *= a; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] *= a; }); return *this; } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator-=(const SmallMatrix<DataType2>& B) + PUGS_INLINE SmallMatrix& + operator-=(const SmallMatrix<DataType2>& B) { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -121,12 +130,13 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE "cannot substract matrix: incompatible sizes"); parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { m_values[i] -= B.m_values[i]; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] -= B.m_values[i]; }); return *this; } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator+=(const SmallMatrix<DataType2>& B) + PUGS_INLINE SmallMatrix& + operator+=(const SmallMatrix<DataType2>& B) { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -134,12 +144,13 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE "cannot add matrix: incompatible sizes"); parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { m_values[i] += B.m_values[i]; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] += B.m_values[i]; }); return *this; } template <typename DataType2> - PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> operator+(const SmallMatrix<DataType2>& B) const + PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + operator+(const SmallMatrix<DataType2>& B) const { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -149,13 +160,14 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE SmallMatrix<std::remove_const_t<DataType>> sum{B.numberOfRows(), B.numberOfColumns()}; parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + B.m_values[i]; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + B.m_values[i]; }); return sum; } template <typename DataType2> - PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> operator-(const SmallMatrix<DataType2>& B) const + PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + operator-(const SmallMatrix<DataType2>& B) const { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -165,53 +177,60 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE SmallMatrix<std::remove_const_t<DataType>> difference{B.numberOfRows(), B.numberOfColumns()}; parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { difference.m_values[i] = m_values[i] - B.m_values[i]; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { difference.m_values[i] = m_values[i] - B.m_values[i]; }); return difference; } PUGS_INLINE - DataType& operator()(index_type i, index_type j) const noexcept(NO_ASSERT) + DataType& + operator()(index_type i, index_type j) const noexcept(NO_ASSERT) { Assert(i < m_nb_rows and j < m_nb_columns, "cannot access element: invalid indices"); return m_values[i * m_nb_columns + j]; } PUGS_INLINE - size_t numberOfRows() const noexcept + size_t + numberOfRows() const noexcept { return m_nb_rows; } PUGS_INLINE - size_t numberOfColumns() const noexcept + size_t + numberOfColumns() const noexcept { return m_nb_columns; } - PUGS_INLINE void fill(const DataType& value) noexcept + PUGS_INLINE void + fill(const DataType& value) noexcept { m_values.fill(value); } - PUGS_INLINE SmallMatrix& operator=(ZeroType) noexcept + PUGS_INLINE SmallMatrix& + operator=(ZeroType) noexcept { m_values.fill(0); return *this; } - PUGS_INLINE SmallMatrix& operator=(IdentityType) noexcept(NO_ASSERT) + PUGS_INLINE SmallMatrix& + operator=(IdentityType) noexcept(NO_ASSERT) { Assert(m_nb_rows == m_nb_columns, "identity must be a square matrix"); m_values.fill(0); parallel_for( - m_nb_rows, PUGS_LAMBDA(const index_type i) { m_values[i * m_nb_rows + i] = 1; }); + m_nb_rows, PUGS_CLASS_LAMBDA(const index_type i) { m_values[i * m_nb_rows + i] = 1; }); return *this; } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator=(const SmallMatrix<DataType2>& A) noexcept + PUGS_INLINE SmallMatrix& + operator=(const SmallMatrix<DataType2>& A) noexcept { // ensures that DataType is the same as source DataType2 static_assert(std::is_same<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>(), @@ -232,7 +251,8 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE PUGS_INLINE SmallMatrix& operator=(SmallMatrix&&) = default; - friend std::ostream& operator<<(std::ostream& os, const SmallMatrix& A) + friend std::ostream& + operator<<(std::ostream& os, const SmallMatrix& A) { for (size_t i = 0; i < A.numberOfRows(); ++i) { os << i << '|'; @@ -259,7 +279,7 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE SmallMatrix(const SmallMatrix&) = default; - SmallMatrix(SmallMatrix &&) = default; + SmallMatrix(SmallMatrix&&) = default; explicit SmallMatrix(size_t nb_rows, size_t nb_columns, const SmallArray<DataType>& values) : m_nb_rows{nb_rows}, m_nb_columns{nb_columns}, m_values{values} diff --git a/src/algebra/SmallVector.hpp b/src/algebra/SmallVector.hpp index d4a0bf84268bdf1d164cf6216153117adafaab56..296b8be71ce8b20346174ce40f41edeb78f1b1c2 100644 --- a/src/algebra/SmallVector.hpp +++ b/src/algebra/SmallVector.hpp @@ -58,7 +58,7 @@ class SmallVector // LCOV_EXCL_LINE { SmallVector<std::remove_const_t<DataType>> opposite(this->size()); parallel_for( - opposite.size(), PUGS_LAMBDA(index_type i) { opposite.m_values[i] = -m_values[i]; }); + opposite.size(), PUGS_CLASS_LAMBDA(index_type i) { opposite.m_values[i] = -m_values[i]; }); return opposite; } @@ -98,7 +98,7 @@ class SmallVector // LCOV_EXCL_LINE operator*=(const DataType2& a) { parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] *= a; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] *= a; }); return *this; } @@ -109,7 +109,7 @@ class SmallVector // LCOV_EXCL_LINE Assert(this->size() == y.size(), "cannot substract vector: incompatible sizes"); parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] -= y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] -= y[i]; }); return *this; } @@ -121,7 +121,7 @@ class SmallVector // LCOV_EXCL_LINE Assert(this->size() == y.size(), "cannot add vector: incompatible sizes"); parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] += y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] += y[i]; }); return *this; } @@ -134,7 +134,7 @@ class SmallVector // LCOV_EXCL_LINE SmallVector<std::remove_const_t<DataType>> sum{y.size()}; parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + y[i]; }); return sum; } @@ -147,7 +147,7 @@ class SmallVector // LCOV_EXCL_LINE SmallVector<std::remove_const_t<DataType>> sum{y.size()}; parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] - y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] - y[i]; }); return sum; } diff --git a/src/algebra/Vector.hpp b/src/algebra/Vector.hpp index 749a795a489f12a671f4ecfd452104702ba68147..e034429e94ea732cf22fd39c56d2826faf8322d5 100644 --- a/src/algebra/Vector.hpp +++ b/src/algebra/Vector.hpp @@ -59,7 +59,7 @@ class Vector // LCOV_EXCL_LINE { Vector<std::remove_const_t<DataType>> opposite(this->size()); parallel_for( - opposite.size(), PUGS_LAMBDA(index_type i) { opposite.m_values[i] = -m_values[i]; }); + opposite.size(), PUGS_CLASS_LAMBDA(index_type i) { opposite.m_values[i] = -m_values[i]; }); return opposite; } @@ -98,7 +98,7 @@ class Vector // LCOV_EXCL_LINE operator*=(const DataType2& a) { parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] *= a; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] *= a; }); return *this; } @@ -109,7 +109,7 @@ class Vector // LCOV_EXCL_LINE Assert(this->size() == y.size(), "cannot substract vector: incompatible sizes"); parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] -= y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] -= y[i]; }); return *this; } @@ -121,7 +121,7 @@ class Vector // LCOV_EXCL_LINE Assert(this->size() == y.size(), "cannot add vector: incompatible sizes"); parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] += y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] += y[i]; }); return *this; } @@ -134,7 +134,7 @@ class Vector // LCOV_EXCL_LINE Vector<std::remove_const_t<DataType>> sum{y.size()}; parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + y[i]; }); return sum; } @@ -147,7 +147,7 @@ class Vector // LCOV_EXCL_LINE Vector<std::remove_const_t<DataType>> sum{y.size()}; parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] - y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] - y[i]; }); return sum; } diff --git a/src/dev/ParallelChecker.hpp b/src/dev/ParallelChecker.hpp index e37729292e46212dec5257fa5681e83a2e591f57..3c5bcc7146e95c45ae06b5356c6471c5cffeaf5b 100644 --- a/src/dev/ParallelChecker.hpp +++ b/src/dev/ParallelChecker.hpp @@ -222,6 +222,31 @@ class ParallelChecker } } + template <ItemType item_type, ItemType sub_item_type> + Array<const typename ConnectivityMatrix::IndexType> + _getSubItemValues(const std::shared_ptr<const IConnectivity>& i_connectivity) + { + switch (i_connectivity->dimension()) { + case 1: { + const Connectivity<1>& connectivity = dynamic_cast<const Connectivity<1>&>(*i_connectivity); + return connectivity.getMatrix(item_type, sub_item_type).values(); + } + case 2: { + const Connectivity<2>& connectivity = dynamic_cast<const Connectivity<2>&>(*i_connectivity); + return connectivity.getMatrix(item_type, sub_item_type).values(); + } + case 3: { + const Connectivity<3>& connectivity = dynamic_cast<const Connectivity<3>&>(*i_connectivity); + return connectivity.getMatrix(item_type, sub_item_type).values(); + } + // LCOV_EXCL_START + default: { + throw UnexpectedError("unexpected connectivity dimension"); + } + // LCOV_EXCL_STOP + } + } + template <ItemType item_type> Array<const int> _getItemOwner(const std::shared_ptr<const IConnectivity>& i_connectivity) const @@ -301,14 +326,14 @@ class ParallelChecker } HighFive::DataSet item_numbers = file.getDataSet(item_number_path + "/numbers"); - group.createHardLink("numbers", item_numbers); + group.createHardLink(std::string(itemName(item_type)) + "_numbers", item_numbers); } template <typename ItemOfItem> void - _writeSubItemRowsMap(const std::shared_ptr<const IConnectivity> i_connectivity, - HighFive::File file, - HighFive::Group group) + _writeSubItemMatrix(const std::shared_ptr<const IConnectivity> i_connectivity, + HighFive::File file, + HighFive::Group group) { constexpr ItemType item_type = ItemOfItem::item_type; constexpr ItemType sub_item_type = ItemOfItem::sub_item_type; @@ -321,10 +346,14 @@ class ParallelChecker HighFive::Group subitem_of_item_row_map_group = file.createGroup(subitem_of_item_row_map_path); this->_writeArray(subitem_of_item_row_map_group, "rows_map", this->_getSubItemRowsMap<item_type, sub_item_type>(i_connectivity)); + this->_writeArray(subitem_of_item_row_map_group, "sub_item_index", + this->_getSubItemValues<item_type, sub_item_type>(i_connectivity)); } HighFive::DataSet subitem_of_item_row_map = file.getDataSet(subitem_of_item_row_map_path + "/rows_map"); group.createHardLink("rows_map", subitem_of_item_row_map); + HighFive::DataSet subitem_of_item_index = file.getDataSet(subitem_of_item_row_map_path + "/sub_item_index"); + group.createHardLink("sub_item_index", subitem_of_item_index); } template <typename DataType, ItemType item_type> @@ -362,7 +391,10 @@ class ParallelChecker << rang::fg::reset << ")\n"; is_comparable = false; } - std::vector reference_data_shape = group.getDataSet(reference_name).getSpace().getDimensions(); + + auto data_group = group.getGroup("data"); + + std::vector reference_data_shape = data_group.getDataSet(reference_name).getSpace().getDimensions(); if (reference_data_shape.size() != data_shape.size()) { std::cout << rang::fg::cyan << " | " << rang::fgB::red << "different data shape kind: reference (" << rang::fgB::yellow << reference_data_shape.size() << "d array" << rang::fgB::red << ") / target (" @@ -484,7 +516,9 @@ class ParallelChecker group.createAttribute("item_type", std::string{itemName(item_type)}); group.createAttribute("data_type", demangle<DataType>()); - this->_writeArray(group, name, item_value.arrayView()); + auto data_group = group.createGroup("data"); + + this->_writeArray(data_group, name, item_value.arrayView()); this->_writeItemNumbers<item_type>(i_connectivity, file, group); @@ -523,7 +557,8 @@ class ParallelChecker group.createAttribute("item_type", std::string{itemName(item_type)}); group.createAttribute("data_type", demangle<DataType>()); - this->_writeTable(group, name, item_array.tableView()); + auto data_group = group.createGroup("data"); + this->_writeTable(data_group, name, item_array.tableView()); this->_writeItemNumbers<item_type>(i_connectivity, file, group); @@ -567,10 +602,12 @@ class ParallelChecker group.createAttribute("data_type", demangle<DataType>()); - this->_writeArray(group, name, subitem_value_per_item.arrayView()); + auto data_group = group.createGroup("data"); + this->_writeArray(data_group, name, subitem_value_per_item.arrayView()); this->_writeItemNumbers<item_type>(i_connectivity, file, group); - this->_writeSubItemRowsMap<ItemOfItem>(i_connectivity, file, group); + this->_writeItemNumbers<sub_item_type>(i_connectivity, file, group); + this->_writeSubItemMatrix<ItemOfItem>(i_connectivity, file, group); ++m_tag; @@ -612,10 +649,12 @@ class ParallelChecker group.createAttribute("data_type", demangle<DataType>()); - this->_writeTable(group, name, subitem_value_per_item.tableView()); + auto data_group = group.createGroup("data"); + this->_writeTable(data_group, name, subitem_value_per_item.tableView()); this->_writeItemNumbers<item_type>(i_connectivity, file, group); - this->_writeSubItemRowsMap<ItemOfItem>(i_connectivity, file, group); + this->_writeItemNumbers<sub_item_type>(i_connectivity, file, group); + this->_writeSubItemMatrix<ItemOfItem>(i_connectivity, file, group); ++m_tag; @@ -650,8 +689,12 @@ class ParallelChecker std::vector<size_t>{item_value.numberOfItems()}, i_connectivity, group); - Array<const int> reference_item_numbers = this->_readArray<int>(group, "numbers"); - Array<const DataType> reference_item_value = this->_readArray<DataType>(group, reference_name); + Array<const int> reference_item_numbers = + this->_readArray<int>(group, std::string{itemName(item_type)} + "_numbers"); + + auto data_group = group.getGroup("data"); + + Array<const DataType> reference_item_value = this->_readArray<DataType>(data_group, reference_name); Array<const int> item_numbers = this->_getItemNumber<item_type>(i_connectivity); @@ -776,8 +819,12 @@ class ParallelChecker item_array.sizeOfArrays()}, i_connectivity, group); - Array<const int> reference_item_numbers = this->_readArray<int>(group, "numbers"); - Table<const DataType> reference_item_array = this->_readTable<DataType>(group, reference_name); + Array<const int> reference_item_numbers = + this->_readArray<int>(group, std::string{itemName(item_type)} + "_numbers"); + + auto data_group = group.getGroup("data"); + + Table<const DataType> reference_item_array = this->_readTable<DataType>(data_group, reference_name); Array<const int> item_numbers = this->_getItemNumber<item_type>(i_connectivity); @@ -910,12 +957,19 @@ class ParallelChecker constexpr ItemType sub_item_type = ItemOfItem::sub_item_type; using IndexType = typename ConnectivityMatrix::IndexType; - Array<const int> reference_item_numbers = this->_readArray<int>(group, "numbers"); + Array<const int> reference_item_numbers = + this->_readArray<int>(group, std::string{itemName(item_type)} + "_numbers"); + Array<const int> reference_sub_item_numbers = + this->_readArray<int>(group, std::string{itemName(sub_item_type)} + "_numbers"); Array<const IndexType> reference_subitem_rows_map = this->_readArray<IndexType>(group, "rows_map"); + Array<const IndexType> reference_subitem_index = this->_readArray<IndexType>(group, "sub_item_index"); + + auto data_group = group.getGroup("data"); - Array<const DataType> reference_subitem_value_per_item = this->_readArray<DataType>(group, reference_name); + Array<const DataType> reference_subitem_value_per_item = this->_readArray<DataType>(data_group, reference_name); Array<const int> item_numbers = this->_getItemNumber<item_type>(i_connectivity); + Array<const int> sub_item_numbers = this->_getItemNumber<sub_item_type>(i_connectivity); Array<const IndexType> sub_item_row_map = this->_getSubItemRowsMap<item_type, sub_item_type>(i_connectivity); using ItemId = ItemIdT<item_type>; @@ -949,6 +1003,8 @@ class ParallelChecker } this->_checkGlobalNumberOfItems<item_type>(i_connectivity, reference_item_numbers.size()); + auto item_to_sub_item_matrix = i_connectivity->getMatrix(item_type, sub_item_type); + Array<const int> owner = this->_getItemOwner<item_type>(i_connectivity); bool has_own_differences = false; @@ -960,12 +1016,29 @@ class ParallelChecker const size_t index_end_in_reference = reference_subitem_rows_map[reference_item_index + 1]; bool item_is_same = true; - if ((index_end_in_reference - index_begin_in_reference) != subitem_value_per_item[item_id].size()) { - item_is_same = false; + + if ((item_type > sub_item_type) or (static_cast<size_t>(owner[item_id]) == parallel::rank())) { + if ((index_end_in_reference - index_begin_in_reference) != subitem_value_per_item[item_id].size()) { + item_is_same = false; + } else { + for (size_t i_sub_item = 0; i_sub_item < subitem_value_per_item[item_id].size(); ++i_sub_item) { + if (reference_subitem_value_per_item[index_begin_in_reference + i_sub_item] != + subitem_value_per_item[item_id][i_sub_item]) { + item_is_same = false; + } + } + } } else { - for (size_t i_sub_item = 0; i_sub_item < subitem_value_per_item[item_id].size(); ++i_sub_item) { - if (reference_subitem_value_per_item[index_begin_in_reference + i_sub_item] != - subitem_value_per_item[item_id][i_sub_item]) { + std::map<int, size_t> sub_item_number_to_index; + for (size_t index = index_begin_in_reference; index < index_end_in_reference; ++index) { + sub_item_number_to_index[reference_sub_item_numbers[reference_subitem_index[index]]] = index; + } + + auto sub_item_list = item_to_sub_item_matrix[item_id]; + for (size_t i_sub_item = 0; i_sub_item < sub_item_list.size(); ++i_sub_item) { + auto sub_item_id = sub_item_list[i_sub_item]; + auto index = sub_item_number_to_index.at(sub_item_numbers[sub_item_id]); + if (reference_subitem_value_per_item[index] != subitem_value_per_item[item_id][i_sub_item]) { item_is_same = false; } } @@ -1077,12 +1150,19 @@ class ParallelChecker constexpr ItemType sub_item_type = ItemOfItem::sub_item_type; using IndexType = typename ConnectivityMatrix::IndexType; - Array<const int> reference_item_numbers = this->_readArray<int>(group, "numbers"); + Array<const int> reference_item_numbers = + this->_readArray<int>(group, std::string{itemName(item_type)} + "_numbers"); + Array<const int> reference_sub_item_numbers = + this->_readArray<int>(group, std::string{itemName(sub_item_type)} + "_numbers"); Array<const IndexType> reference_subitem_rows_map = this->_readArray<IndexType>(group, "rows_map"); + Array<const IndexType> reference_subitem_index = this->_readArray<IndexType>(group, "sub_item_index"); + + auto data_group = group.getGroup("data"); - Table<const DataType> reference_subitem_array_per_item = this->_readTable<DataType>(group, reference_name); + Table<const DataType> reference_subitem_array_per_item = this->_readTable<DataType>(data_group, reference_name); Array<const int> item_numbers = this->_getItemNumber<item_type>(i_connectivity); + Array<const int> sub_item_numbers = this->_getItemNumber<sub_item_type>(i_connectivity); Array<const IndexType> sub_item_row_map = this->_getSubItemRowsMap<item_type, sub_item_type>(i_connectivity); using ItemId = ItemIdT<item_type>; @@ -1116,6 +1196,8 @@ class ParallelChecker } this->_checkGlobalNumberOfItems<item_type>(i_connectivity, reference_item_numbers.size()); + auto item_to_sub_item_matrix = i_connectivity->getMatrix(item_type, sub_item_type); + Array<const int> owner = this->_getItemOwner<item_type>(i_connectivity); bool has_own_differences = false; @@ -1127,13 +1209,32 @@ class ParallelChecker const size_t index_end_in_reference = reference_subitem_rows_map[reference_item_index + 1]; bool item_is_same = true; - if ((index_end_in_reference - index_begin_in_reference) != subitem_array_per_item[item_id].numberOfRows()) { - item_is_same = false; + + if ((item_type > sub_item_type) or (static_cast<size_t>(owner[item_id]) == parallel::rank())) { + if ((index_end_in_reference - index_begin_in_reference) != subitem_array_per_item[item_id].numberOfRows()) { + item_is_same = false; + } else { + for (size_t i_sub_item = 0; i_sub_item < subitem_array_per_item[item_id].numberOfRows(); ++i_sub_item) { + for (size_t i = 0; i < subitem_array_per_item.sizeOfArrays(); ++i) { + if (reference_subitem_array_per_item[index_begin_in_reference + i_sub_item][i] != + subitem_array_per_item[item_id][i_sub_item][i]) { + item_is_same = false; + } + } + } + } } else { - for (size_t i_sub_item = 0; i_sub_item < subitem_array_per_item[item_id].numberOfRows(); ++i_sub_item) { + std::map<int, size_t> sub_item_number_to_index; + for (size_t index = index_begin_in_reference; index < index_end_in_reference; ++index) { + sub_item_number_to_index[reference_sub_item_numbers[reference_subitem_index[index]]] = index; + } + + auto sub_item_list = item_to_sub_item_matrix[item_id]; + for (size_t i_sub_item = 0; i_sub_item < sub_item_list.size(); ++i_sub_item) { + auto sub_item_id = sub_item_list[i_sub_item]; + auto index = sub_item_number_to_index.at(sub_item_numbers[sub_item_id]); for (size_t i = 0; i < subitem_array_per_item.sizeOfArrays(); ++i) { - if (reference_subitem_array_per_item[index_begin_in_reference + i_sub_item][i] != - subitem_array_per_item[item_id][i_sub_item][i]) { + if (reference_subitem_array_per_item[index][i] != subitem_array_per_item[item_id][i_sub_item][i]) { item_is_same = false; } } diff --git a/src/language/utils/BuiltinFunctionEmbedder.hpp b/src/language/utils/BuiltinFunctionEmbedder.hpp index 251c39e6d5eb5dc46423e75dedea28947d1a29be..da9ab46a32a2f0570f6f685d7aad35dac886f461 100644 --- a/src/language/utils/BuiltinFunctionEmbedder.hpp +++ b/src/language/utils/BuiltinFunctionEmbedder.hpp @@ -106,7 +106,8 @@ class BuiltinFunctionEmbedderBase<FX(Args...)> : public IBuiltinFunctionEmbedder } template <size_t... I> - PUGS_INLINE std::vector<std::shared_ptr<const ASTNodeDataType>> _getCompoundDataTypes(std::index_sequence<I...>) const + PUGS_INLINE std::vector<std::shared_ptr<const ASTNodeDataType>> + _getCompoundDataTypes(std::index_sequence<I...>) const { std::vector<std::shared_ptr<const ASTNodeDataType>> compound_type_list; (compound_type_list.push_back(std::make_shared<ASTNodeDataType>(this->_getOneElementDataType<FX, I>())), ...); @@ -174,8 +175,8 @@ class BuiltinFunctionEmbedder }; template <typename T> -inline constexpr bool is_const_ref_or_non_ref = (std::is_const_v<T> and std::is_lvalue_reference_v<T>) or - (not std::is_lvalue_reference_v<T>); +inline constexpr bool is_const_ref_or_non_ref = + (std::is_const_v<T> and std::is_lvalue_reference_v<T>) or (not std::is_lvalue_reference_v<T>); template <typename FX, typename... Args> class BuiltinFunctionEmbedder<FX(Args...)> : public BuiltinFunctionEmbedderBase<FX(Args...)> @@ -283,7 +284,8 @@ class BuiltinFunctionEmbedder<FX(Args...)> : public BuiltinFunctionEmbedderBase< } template <size_t... I> - PUGS_INLINE std::vector<ASTNodeDataType> _getParameterDataTypes(std::index_sequence<I...>) const + PUGS_INLINE std::vector<ASTNodeDataType> + _getParameterDataTypes(std::index_sequence<I...>) const { std::vector<ASTNodeDataType> parameter_type_list; (parameter_type_list.push_back(this->template _getOneElementDataType<ArgsTuple, I>()), ...); @@ -298,9 +300,8 @@ class BuiltinFunctionEmbedder<FX(Args...)> : public BuiltinFunctionEmbedderBase< std::vector<DataVariant> vector_result; vector_result.reserve(std::tuple_size_v<decltype(tuple_result)>); - std:: - apply([&](auto&&... result) { ((vector_result.emplace_back(this->template _resultToDataVariant(result))), ...); }, - tuple_result); + std::apply([&](auto&&... result) { ((vector_result.emplace_back(this->_resultToDataVariant(result))), ...); }, + tuple_result); return AggregateDataVariant{std::move(vector_result)}; } @@ -338,7 +339,7 @@ class BuiltinFunctionEmbedder<FX(Args...)> : public BuiltinFunctionEmbedderBase< std::apply(m_f, t); return {}; } else { - return this->template _createHandler(std::apply(m_f, t)); + return this->_createHandler(std::apply(m_f, t)); } } @@ -375,9 +376,8 @@ class BuiltinFunctionEmbedder<FX(void)> : public BuiltinFunctionEmbedderBase<FX( std::vector<DataVariant> vector_result; vector_result.reserve(std::tuple_size_v<decltype(tuple_result)>); - std:: - apply([&](auto&&... result) { ((vector_result.emplace_back(this->template _resultToDataVariant(result))), ...); }, - tuple_result); + std::apply([&](auto&&... result) { ((vector_result.emplace_back(this->_resultToDataVariant(result))), ...); }, + tuple_result); return AggregateDataVariant{std::move(vector_result)}; } @@ -407,7 +407,7 @@ class BuiltinFunctionEmbedder<FX(void)> : public BuiltinFunctionEmbedderBase<FX( m_f(); return {}; } else { - return EmbeddedData(this->template _createHandler(m_f())); + return EmbeddedData(this->_createHandler(m_f())); } } diff --git a/src/mesh/Connectivity.cpp b/src/mesh/Connectivity.cpp index bd1e0d85b6f94d1d2d00326857853f4da75eacfe..65a8804950abad22fe2205569217cd8ff162b043 100644 --- a/src/mesh/Connectivity.cpp +++ b/src/mesh/Connectivity.cpp @@ -60,7 +60,7 @@ Connectivity<Dimension>::_buildFrom(const ConnectivityDescriptor& descriptor) const int rank = parallel::rank(); WeakCellValue<bool> cell_is_owned(*this); parallel_for( - this->numberOfCells(), PUGS_LAMBDA(CellId j) { cell_is_owned[j] = (m_cell_owner[j] == rank); }); + this->numberOfCells(), PUGS_CLASS_LAMBDA(CellId j) { cell_is_owned[j] = (m_cell_owner[j] == rank); }); m_cell_is_owned = cell_is_owned; } @@ -71,7 +71,7 @@ Connectivity<Dimension>::_buildFrom(const ConnectivityDescriptor& descriptor) const int rank = parallel::rank(); WeakNodeValue<bool> node_is_owned(*this, node_is_owned_array); parallel_for( - this->numberOfNodes(), PUGS_LAMBDA(NodeId r) { node_is_owned[r] = (m_node_owner[r] == rank); }); + this->numberOfNodes(), PUGS_CLASS_LAMBDA(NodeId r) { node_is_owned[r] = (m_node_owner[r] == rank); }); m_node_is_owned = node_is_owned; } @@ -122,7 +122,7 @@ Connectivity<Dimension>::_buildFrom(const ConnectivityDescriptor& descriptor) const int rank = parallel::rank(); WeakFaceValue<bool> face_is_owned(*this, face_is_owned_array); parallel_for( - this->numberOfFaces(), PUGS_LAMBDA(FaceId l) { face_is_owned[l] = (m_face_owner[l] == rank); }); + this->numberOfFaces(), PUGS_CLASS_LAMBDA(FaceId l) { face_is_owned[l] = (m_face_owner[l] == rank); }); m_face_is_owned = face_is_owned; } @@ -163,7 +163,7 @@ Connectivity<Dimension>::_buildFrom(const ConnectivityDescriptor& descriptor) const int rank = parallel::rank(); WeakEdgeValue<bool> edge_is_owned(*this); parallel_for( - this->numberOfEdges(), PUGS_LAMBDA(EdgeId e) { edge_is_owned[e] = (m_edge_owner[e] == rank); }); + this->numberOfEdges(), PUGS_CLASS_LAMBDA(EdgeId e) { edge_is_owned[e] = (m_edge_owner[e] == rank); }); m_edge_is_owned = edge_is_owned; } diff --git a/src/mesh/ItemValue.hpp b/src/mesh/ItemValue.hpp index da6bb88eb5a9d92dda08ee7ed535aae104639d95..077e97358ed1b71e9f15cbfa48533a2db3602499 100644 --- a/src/mesh/ItemValue.hpp +++ b/src/mesh/ItemValue.hpp @@ -69,7 +69,7 @@ class ItemValue copy_to(const ItemValue<DataType, item_type, ConnectivityPtr>& source, const ItemValue<std::remove_const_t<DataType>, item_type, ConnectivityPtr2>& destination) { - Assert(destination.connectivity_ptr() == source.connectivity_ptr(), "different connectivities"); + Assert(destination.m_connectivity_ptr->id() == source.m_connectivity_ptr->id(), "different connectivities"); copy_to(source.m_values, destination.m_values); } diff --git a/src/mesh/MeshData.hpp b/src/mesh/MeshData.hpp index 4fb87bc5ad4aee87c7dd92e9232ec4508145a14f..1dc4498b3a6ddb9c3c9ff42af4de4a32c39271d1 100644 --- a/src/mesh/MeshData.hpp +++ b/src/mesh/MeshData.hpp @@ -257,7 +257,7 @@ class MeshData<Mesh<Dimension>> CellValue<Rd> cell_centroid{m_mesh.connectivity()}; parallel_for( - m_mesh.numberOfCells(), PUGS_LAMBDA(CellId j) { + m_mesh.numberOfCells(), PUGS_CLASS_LAMBDA(CellId j) { const Rd xj = m_cell_iso_barycenter[j]; const auto& cell_faces = cell_to_face_matrix[j]; diff --git a/src/mesh/PrimalToDiamondDualConnectivityDataMapper.hpp b/src/mesh/PrimalToDiamondDualConnectivityDataMapper.hpp index a803732dbb59be501f2a6ae995c57505a6dc4b0a..7412a72a6e3aa63290421f823354bceef6f9f1ac 100644 --- a/src/mesh/PrimalToDiamondDualConnectivityDataMapper.hpp +++ b/src/mesh/PrimalToDiamondDualConnectivityDataMapper.hpp @@ -41,14 +41,14 @@ class PrimalToDiamondDualConnectivityDataMapper : public IPrimalToDualConnectivi "unexpected connectivity for dual NodeValue"); parallel_for( - m_primal_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_node_to_dual_node_map[i]; dual_node_value[dual_node_id] = primal_node_value[primal_node_id]; }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; dual_node_value[dual_node_id] = primal_cell_value[primal_cell_id]; }); @@ -73,14 +73,14 @@ class PrimalToDiamondDualConnectivityDataMapper : public IPrimalToDualConnectivi "unexpected connectivity for dual NodeValue"); parallel_for( - m_primal_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_node_to_dual_node_map[i]; primal_node_value[primal_node_id] = dual_node_value[dual_node_id]; }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; primal_cell_value[primal_cell_id] = dual_node_value[dual_node_id]; }); @@ -105,7 +105,7 @@ class PrimalToDiamondDualConnectivityDataMapper : public IPrimalToDualConnectivi using OriginFaceId = ItemIdT<origin_face_type>; parallel_for( - m_primal_face_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_face_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_cell_id] = m_primal_face_to_dual_cell_map[i]; const OriginFaceId origin_face_id = static_cast<typename OriginFaceId::base_type>(primal_face_id); @@ -134,7 +134,7 @@ class PrimalToDiamondDualConnectivityDataMapper : public IPrimalToDualConnectivi using DestinationFaceId = ItemIdT<destination_face_type>; parallel_for( - m_primal_face_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_face_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_cell_id] = m_primal_face_to_dual_cell_map[i]; const DestinationFaceId destination_face_id = diff --git a/src/mesh/PrimalToDual1DConnectivityDataMapper.hpp b/src/mesh/PrimalToDual1DConnectivityDataMapper.hpp index 22570810227cbff09c1a72c680f5d34c0f522a44..052559ef98fd5ed23db17237c44a725eb4753cbb 100644 --- a/src/mesh/PrimalToDual1DConnectivityDataMapper.hpp +++ b/src/mesh/PrimalToDual1DConnectivityDataMapper.hpp @@ -47,7 +47,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat using DestinationNodeId = ItemIdT<destination_node_type>; parallel_for( - m_primal_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_node_to_dual_node_map[i]; DestinationNodeId destination_node_id = static_cast<typename DestinationNodeId::base_type>(dual_node_id); @@ -57,7 +57,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; DestinationNodeId destination_node_id = static_cast<typename DestinationNodeId::base_type>(dual_node_id); @@ -95,7 +95,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat using DestinationNodeId = ItemIdT<destination_node_type>; parallel_for( - m_primal_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_node_to_dual_node_map[i]; DestinationNodeId destination_node_id = static_cast<typename DestinationNodeId::base_type>(primal_node_id); @@ -105,7 +105,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; OriginNodeId origin_node_id = static_cast<typename OriginNodeId::base_type>(dual_node_id); @@ -131,7 +131,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat using OriginNodeId = ItemIdT<origin_node_type>; parallel_for( - m_primal_node_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_cell_id] = m_primal_node_to_dual_cell_map[i]; const OriginNodeId origin_node_id = static_cast<typename OriginNodeId::base_type>(primal_node_id); @@ -158,7 +158,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat "unexpected connectivity for dual CellValue"); parallel_for( - m_primal_node_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_cell_id] = m_primal_node_to_dual_cell_map[i]; const DestinationNodeId destination_node_id = diff --git a/src/mesh/PrimalToMedianDualConnectivityDataMapper.hpp b/src/mesh/PrimalToMedianDualConnectivityDataMapper.hpp index 366508eb3e6b09026529b9ff75b9d822e98e4b1f..22628812eea19e29ca9c6f22b88dac991f2a18a2 100644 --- a/src/mesh/PrimalToMedianDualConnectivityDataMapper.hpp +++ b/src/mesh/PrimalToMedianDualConnectivityDataMapper.hpp @@ -53,7 +53,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit "unexpected connectivity for dual NodeValue"); parallel_for( - m_primal_boundary_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_boundary_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_boundary_node_to_dual_node_map[i]; dual_node_value[dual_node_id] = primal_node_value[primal_node_id]; @@ -62,7 +62,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit using OriginFaceId = ItemIdT<origin_face_type>; parallel_for( - m_primal_face_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_face_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_node_id] = m_primal_face_to_dual_node_map[i]; const OriginFaceId origin_face_id = static_cast<typename OriginFaceId::base_type>(primal_face_id); @@ -71,7 +71,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; dual_node_value[dual_node_id] = primal_cell_value[primal_cell_id]; @@ -110,7 +110,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit "unexpected connectivity for dual NodeValue"); parallel_for( - m_primal_boundary_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_boundary_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_boundary_node_to_dual_node_map[i]; primal_node_value[primal_node_id] = dual_node_value[dual_node_id]; @@ -119,7 +119,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit using DestinationFaceId = ItemIdT<destination_face_type>; parallel_for( - m_primal_face_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_face_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_node_id] = m_primal_face_to_dual_node_map[i]; const DestinationFaceId destination_face_id = @@ -129,7 +129,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; primal_cell_value[primal_cell_id] = dual_node_value[dual_node_id]; @@ -150,7 +150,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit "unexpected connectivity for dual CellValue"); parallel_for( - m_primal_node_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_cell_id] = m_primal_node_to_dual_cell_map[i]; dual_cell_value[dual_cell_id] = primal_node_value[primal_node_id]; @@ -171,7 +171,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit "unexpected connectivity for dual CellValue"); parallel_for( - m_primal_node_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_cell_id] = m_primal_node_to_dual_cell_map[i]; primal_node_value[primal_face_id] = dual_cell_value[dual_cell_id]; diff --git a/src/output/GnuplotWriter.cpp b/src/output/GnuplotWriter.cpp index 4b249aa2e5f8bd11a6251f9edac463d69b8562df..a63f1f7a654de6b526584b843379ab6ed25c6aa1 100644 --- a/src/output/GnuplotWriter.cpp +++ b/src/output/GnuplotWriter.cpp @@ -188,12 +188,12 @@ GnuplotWriter::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { using ItemDataType = std::decay_t<decltype(item_data)>; if (ItemDataType::item_t == ItemType::face) { std::ostringstream error_msg; - error_msg << "gnuplot_writer does not support face data, cannot save variable \"" << rang::fgB::yellow << name - << rang::fg::reset << '"'; + error_msg << "gnuplot_writer does not support face data, cannot save variable \"" << rang::fgB::yellow + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -202,12 +202,12 @@ GnuplotWriter::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { using ItemDataType = std::decay_t<decltype(item_data)>; if (ItemDataType::item_t == ItemType::edge) { std::ostringstream error_msg; - error_msg << "gnuplot_writer does not support edge data, cannot save variable \"" << rang::fgB::yellow << name - << rang::fg::reset << '"'; + error_msg << "gnuplot_writer does not support edge data, cannot save variable \"" << rang::fgB::yellow + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, diff --git a/src/output/GnuplotWriter1D.cpp b/src/output/GnuplotWriter1D.cpp index 10aafe1f5fbd478a8aa0d39b86ecd5f76b8aa64f..1f6bd91f6d54e93054f77ab95b6b54a08e4447ef 100644 --- a/src/output/GnuplotWriter1D.cpp +++ b/src/output/GnuplotWriter1D.cpp @@ -183,11 +183,11 @@ GnuplotWriter1D::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { if (this->_is_face_data(item_data)) { std::ostringstream error_msg; error_msg << "gnuplot_1d_writer does not support face data, cannot save variable \"" << rang::fgB::yellow - << name << rang::fg::reset << '"'; + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -196,11 +196,11 @@ GnuplotWriter1D::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { if (this->_is_edge_data(item_data)) { std::ostringstream error_msg; error_msg << "gnuplot_1d_writer does not support edge data, cannot save variable \"" << rang::fgB::yellow - << name << rang::fg::reset << '"'; + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -216,6 +216,8 @@ GnuplotWriter1D::_write(const MeshType& mesh, throw NormalError("cannot store both node and cell data in the same gnuplot file"); } + createDirectoryIfNeeded(_getFilename()); + std::ofstream fout; createDirectoryIfNeeded(m_base_filename); diff --git a/src/output/GnuplotWriterRaw.cpp b/src/output/GnuplotWriterRaw.cpp index ac93ba0029f3d73583c98d62c85beb7901f3569f..e04a9c797de2525359737c93699425fc252db4fe 100644 --- a/src/output/GnuplotWriterRaw.cpp +++ b/src/output/GnuplotWriterRaw.cpp @@ -163,11 +163,11 @@ GnuplotWriterRaw::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { if (this->_is_face_data(item_data)) { std::ostringstream error_msg; error_msg << "gnuplot_raw_writer does not support face data, cannot save variable \"" << rang::fgB::yellow - << name << rang::fg::reset << '"'; + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -176,11 +176,11 @@ GnuplotWriterRaw::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { if (this->_is_edge_data(item_data)) { std::ostringstream error_msg; error_msg << "gnuplot_1d_writer does not support edge data, cannot save variable \"" << rang::fgB::yellow - << name << rang::fg::reset << '"'; + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -196,6 +196,8 @@ GnuplotWriterRaw::_write(const MeshType& mesh, throw NormalError("cannot store both node and cell data in the same gnuplot file"); } + createDirectoryIfNeeded(_getFilename()); + std::ofstream fout; if (parallel::rank() == 0) { diff --git a/src/output/VTKWriter.cpp b/src/output/VTKWriter.cpp index 3a7f439429a49ac5e7da27ddf6b7e92e2069d1ed..64d35434462e5980a11b69ea2257ce779b3ec581 100644 --- a/src/output/VTKWriter.cpp +++ b/src/output/VTKWriter.cpp @@ -396,8 +396,9 @@ VTKWriter::_write(const MeshType& mesh, << "\">\n"; fout << "<CellData>\n"; for (const auto& [name, item_value_variant] : output_named_item_data_set) { - std::visit([&, name = name]( - auto&& item_value) { return this->_write_cell_data(fout, name, item_value, serialize_data_list); }, + std::visit([&, var_name = name]( + auto&& + item_value) { return this->_write_cell_data(fout, var_name, item_value, serialize_data_list); }, item_value_variant); } if (parallel::size() > 1) { @@ -412,8 +413,9 @@ VTKWriter::_write(const MeshType& mesh, fout << "</CellData>\n"; fout << "<PointData>\n"; for (const auto& [name, item_value_variant] : output_named_item_data_set) { - std::visit([&, name = name]( - auto&& item_value) { return this->_write_node_data(fout, name, item_value, serialize_data_list); }, + std::visit([&, var_name = name]( + auto&& + item_value) { return this->_write_node_data(fout, var_name, item_value, serialize_data_list); }, item_value_variant); } fout << "</PointData>\n"; @@ -622,18 +624,18 @@ VTKWriter::_write(const MeshType& mesh, for (const auto& [name, item_value_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_value) { + [&, var_name = name](auto&& item_value) { using ItemValueType = std::decay_t<decltype(item_value)>; if constexpr (ItemValueType::item_t == ItemType::edge) { std::ostringstream error_msg; - error_msg << "VTK format does not support edge data, cannot save variable \"" << rang::fgB::yellow << name - << rang::fg::reset << '"'; + error_msg << "VTK format does not support edge data, cannot save variable \"" << rang::fgB::yellow + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } if constexpr (ItemValueType::item_t == ItemType::face) { std::ostringstream error_msg; - error_msg << "VTK format does not support face data, cannot save variable \"" << rang::fgB::yellow << name - << rang::fg::reset << '"'; + error_msg << "VTK format does not support face data, cannot save variable \"" << rang::fgB::yellow + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -642,14 +644,14 @@ VTKWriter::_write(const MeshType& mesh, fout << "<PPointData>\n"; for (const auto& [name, item_value_variant] : output_named_item_data_set) { - std::visit([&, name = name](auto&& item_value) { return this->_write_node_pvtu(fout, name, item_value); }, + std::visit([&, var_name = name](auto&& item_value) { return this->_write_node_pvtu(fout, var_name, item_value); }, item_value_variant); } fout << "</PPointData>\n"; fout << "<PCellData>\n"; for (const auto& [name, item_value_variant] : output_named_item_data_set) { - std::visit([&, name = name](auto&& item_value) { return this->_write_cell_pvtu(fout, name, item_value); }, + std::visit([&, var_name = name](auto&& item_value) { return this->_write_cell_pvtu(fout, var_name, item_value); }, item_value_variant); } if (parallel::size() > 1) { diff --git a/src/scheme/DiscreteFunctionVectorIntegrator.cpp b/src/scheme/DiscreteFunctionVectorIntegrator.cpp index fccc6f79db41dd61cd6feea80baae7c8021487d3..0fa08f21498154a2023875548bcd763c5c6a8255 100644 --- a/src/scheme/DiscreteFunctionVectorIntegrator.cpp +++ b/src/scheme/DiscreteFunctionVectorIntegrator.cpp @@ -47,9 +47,8 @@ DiscreteFunctionVectorIntegrator::_integrateOnZoneList() const } Table<const DataType> table = - IntegrateCellArray<DataType(TinyVector<Dimension>)>::template integrate(m_function_id_list, - *m_quadrature_descriptor, *p_mesh, - zone_cell_list); + IntegrateCellArray<DataType(TinyVector<Dimension>)>::integrate(m_function_id_list, *m_quadrature_descriptor, + *p_mesh, zone_cell_list); CellArray<DataType> cell_array{p_mesh->connectivity(), m_function_id_list.size()}; if constexpr (is_tiny_vector_v<DataType> or is_tiny_matrix_v<DataType>) { @@ -78,9 +77,8 @@ DiscreteFunctionVectorIntegrator::_integrateGlobally() const constexpr size_t Dimension = MeshType::Dimension; return DiscreteFunctionP0Vector< - DataType>(m_mesh, - IntegrateCellArray<DataType(TinyVector<Dimension>)>::template integrate(m_function_id_list, - *m_quadrature_descriptor, *mesh)); + DataType>(m_mesh, IntegrateCellArray<DataType(TinyVector<Dimension>)>::integrate(m_function_id_list, + *m_quadrature_descriptor, *mesh)); } template <MeshConcept MeshType, typename DataType> diff --git a/src/scheme/FluxingAdvectionSolver.cpp b/src/scheme/FluxingAdvectionSolver.cpp index a2ebb2b256a3957169cf52457fd87a3d3236e8f3..1dec92a7dcc8e5e0c281d5d0aaa33bce9872b128 100644 --- a/src/scheme/FluxingAdvectionSolver.cpp +++ b/src/scheme/FluxingAdvectionSolver.cpp @@ -410,7 +410,7 @@ FluxingAdvectionSolver<MeshType>::_computeCycleNumber(FaceValue<double> fluxing_ total_negative_flux.fill(0); parallel_for( - m_old_mesh->numberOfCells(), PUGS_LAMBDA(CellId cell_id) { + m_old_mesh->numberOfCells(), PUGS_CLASS_LAMBDA(CellId cell_id) { const auto& cell_to_face = cell_to_face_matrix[cell_id]; for (size_t i_face = 0; i_face < cell_to_face.size(); ++i_face) { FaceId face_id = cell_to_face[i_face]; @@ -486,7 +486,7 @@ FluxingAdvectionSolver<MeshType>::_remapOne(const CellValue<const double>& step_ // First we deal with inner faces parallel_for( - m_new_mesh->numberOfCells(), PUGS_LAMBDA(CellId cell_id) { + m_new_mesh->numberOfCells(), PUGS_CLASS_LAMBDA(CellId cell_id) { const auto& cell_to_face = cell_to_face_matrix[cell_id]; for (size_t i_face = 0; i_face < cell_to_face.size(); ++i_face) { const FaceId face_id = cell_to_face[i_face]; @@ -653,7 +653,7 @@ FluxingAdvectionSolver<MeshType>::_remapAllQuantities() } parallel_for( - m_new_mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) { + m_new_mesh->numberOfCells(), PUGS_CLASS_LAMBDA(const CellId cell_id) { const auto& cell_to_face = cell_to_face_matrix[cell_id]; for (size_t i_face = 0; i_face < cell_to_face.size(); ++i_face) { const FaceId face_id = cell_to_face[i_face]; diff --git a/src/utils/PugsMacros.hpp b/src/utils/PugsMacros.hpp index 7248bad4b843106b02d1fa5a48cee46703f932ba..541646748a976d8b518b7ff006c76691850ed346 100644 --- a/src/utils/PugsMacros.hpp +++ b/src/utils/PugsMacros.hpp @@ -9,7 +9,7 @@ #define PUGS_FORCEINLINE KOKKOS_FORCEINLINE_FUNCTION #define PUGS_LAMBDA KOKKOS_LAMBDA -#define PUGS_CLASS_LAMBDA KOKKOS_CLASS_LAMBDA +#define PUGS_CLASS_LAMBDA [ =, this ] // Sets macro to ignore unknown pragma diff --git a/tests/test_ParallelChecker_read.cpp b/tests/test_ParallelChecker_read.cpp index 12a65e475ea0c254d95458e880da9f148aba6fa9..70045c2844cefeed0a69ca1bb058d541c8b5936a 100644 --- a/tests/test_ParallelChecker_read.cpp +++ b/tests/test_ParallelChecker_read.cpp @@ -201,6 +201,116 @@ TEST_CASE("ParallelChecker_read", "[dev]") } }; + auto get_subitem_index = [&get_item_numbers]<typename ConnectivityT>(const ConnectivityT& connectivity, + ItemType item_type, ItemType subitem_type) + -> Array<const typename ConnectivityMatrix::IndexType> { + auto item_to_subitem_matrix = connectivity.getMatrix(item_type, subitem_type); + + Array sub_item_index = item_to_subitem_matrix.values(); + Array rows_map = connectivity.getMatrix(item_type, subitem_type).rowsMap(); + + if (parallel::size() == 1) { + return sub_item_index; + } else { + Array<const bool> is_owned; + switch (item_type) { + case ItemType::cell: { + is_owned = connectivity.cellIsOwned().arrayView(); + break; + } + case ItemType::face: { + is_owned = connectivity.faceIsOwned().arrayView(); + break; + } + case ItemType::edge: { + is_owned = connectivity.edgeIsOwned().arrayView(); + break; + } + case ItemType::node: { + is_owned = connectivity.nodeIsOwned().arrayView(); + break; + } + } + + Array<const int> subitem_numbers; + switch (subitem_type) { + case ItemType::cell: { + subitem_numbers = connectivity.cellNumber().arrayView(); + break; + } + case ItemType::face: { + subitem_numbers = connectivity.faceNumber().arrayView(); + break; + } + case ItemType::edge: { + subitem_numbers = connectivity.edgeNumber().arrayView(); + break; + } + case ItemType::node: { + subitem_numbers = connectivity.nodeNumber().arrayView(); + break; + } + } + + Array<size_t> nb_subitem_per_item(rows_map.size() - 1); + + for (size_t i = 0; i < nb_subitem_per_item.size(); ++i) { + nb_subitem_per_item[i] = rows_map[i + 1] - rows_map[i]; + } + + const size_t nb_local_item = [is_owned]() { + size_t count = 0; + for (size_t i = 0; i < is_owned.size(); ++i) { + count += is_owned[i]; + } + return count; + }(); + + Array<int> global_subitem_numbers; + { + Array<size_t> owned_nb_subitem_per_item{nb_local_item}; + for (size_t i = 0, l = 0; i < is_owned.size(); ++i) { + if (is_owned[i]) { + owned_nb_subitem_per_item[l++] = nb_subitem_per_item[i]; + } + } + + size_t owned_nb_subitem = sum(owned_nb_subitem_per_item); + + Array<int> owned_subitem_numbers{owned_nb_subitem}; + + size_t l = 0; + for (size_t item_id = 0; item_id < item_to_subitem_matrix.numberOfRows(); ++item_id) { + if (is_owned[item_id]) { + for (size_t i = 0; i < item_to_subitem_matrix[item_id].size(); ++i) { + owned_subitem_numbers[l++] = subitem_numbers[item_to_subitem_matrix[item_id][i]]; + } + } + } + + global_subitem_numbers = parallel::allGatherVariable(owned_subitem_numbers); + } + + Array sub_item_numbers = get_item_numbers(connectivity, subitem_type); + + std::map<typename ConnectivityMatrix::IndexType, int> number_to_id_map; + { + for (size_t i = 0; i < sub_item_numbers.size(); ++i) { + number_to_id_map[sub_item_numbers[i]] = i; + } + REQUIRE(sub_item_numbers.size() == number_to_id_map.size()); + } + + Array<typename ConnectivityMatrix::IndexType> global_values{global_subitem_numbers.size()}; + + for (size_t i = 0; i < global_subitem_numbers.size(); ++i) { + global_values[i] = number_to_id_map.at(global_subitem_numbers[i]); + } + + return global_values; + } + }; + SECTION("check parallel write implementation") { if (parallel::size() == 1) { @@ -227,21 +337,21 @@ TEST_CASE("ParallelChecker_read", "[dev]") SourceLocation source_location; - Array numbers = get_item_numbers(connectivity, ItemType::cell); + Array cell_numbers = get_item_numbers(connectivity, ItemType::cell); int tag = 12; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("cell_numbers", HighFive::DataSpace{std::vector<size_t>{cell_numbers.size()}}) + .write_raw<int>(&(cell_numbers[0])); - Array<double> values{numbers.size()}; - for (size_t i = 0; i < numbers.size(); ++i) { - values[i] = std::sin(numbers[i]); + Array<double> values{cell_numbers.size()}; + for (size_t i = 0; i < cell_numbers.size(); ++i) { + values[i] = std::sin(cell_numbers[i]); } - group.createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) + group.createDataSet<double>("data/" + name, HighFive::DataSpace{std::vector<size_t>{cell_numbers.size()}}) .write_raw<double>(&(values[0])); group.createAttribute("filename", source_location.filename()); @@ -412,7 +522,7 @@ TEST_CASE("ParallelChecker_read", "[dev]") HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) + group.createDataSet<int>("cell_numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) .write_raw<int>(&(numbers[0])); Table<double> double_table{numbers.size(), 2}; @@ -422,8 +532,9 @@ TEST_CASE("ParallelChecker_read", "[dev]") } } group - .createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{double_table.numberOfRows(), - double_table.numberOfColumns()}}) + .createDataSet<double>("data/" + name, + HighFive::DataSpace{ + std::vector<size_t>{double_table.numberOfRows(), double_table.numberOfColumns()}}) .write_raw<double>(&(double_table(0, 0))); group.createAttribute("filename", source_location.filename()); @@ -545,7 +656,7 @@ TEST_CASE("ParallelChecker_read", "[dev]") SourceLocation source_location; - Array numbers = get_item_numbers(connectivity, ItemType::node); + Array node_numbers = get_item_numbers(connectivity, ItemType::node); using DataType = TinyVector<3>; @@ -554,17 +665,17 @@ TEST_CASE("ParallelChecker_read", "[dev]") HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("node_numbers", HighFive::DataSpace{std::vector<size_t>{node_numbers.size()}}) + .write_raw<int>(&(node_numbers[0])); - Array<DataType> values{numbers.size()}; - for (size_t i = 0; i < numbers.size(); ++i) { + Array<DataType> values{node_numbers.size()}; + for (size_t i = 0; i < node_numbers.size(); ++i) { for (size_t j = 0; j < DataType::Dimension; ++j) { - values[i][j] = std::sin(numbers[i] + j); + values[i][j] = std::sin(node_numbers[i] + j); } } group - .createDataSet(name, HighFive::DataSpace{std::vector<size_t>{numbers.size()}}, + .createDataSet("data/" + name, HighFive::DataSpace{std::vector<size_t>{node_numbers.size()}}, test_TinyVectorDataType<DataType>{}) .template write_raw<double>(&(values[0][0]), test_TinyVectorDataType<DataType>{}); @@ -640,28 +751,28 @@ TEST_CASE("ParallelChecker_read", "[dev]") using DataType = TinyMatrix<3, 2>; - Array numbers = get_item_numbers(connectivity, ItemType::face); + Array face_numbers = get_item_numbers(connectivity, ItemType::face); int tag = 12; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("face_numbers", HighFive::DataSpace{std::vector<size_t>{face_numbers.size()}}) + .write_raw<int>(&(face_numbers[0])); - Table<DataType> dt_table{numbers.size(), 2}; + Table<DataType> dt_table{face_numbers.size(), 2}; for (size_t i = 0; i < dt_table.numberOfRows(); ++i) { for (size_t j = 0; j < dt_table.numberOfColumns(); ++j) { for (size_t k = 0; k < DataType::NumberOfRows; ++k) { for (size_t l = 0; l < DataType::NumberOfColumns; ++l) { - dt_table[i][j](k, l) = std::sin(2 * numbers[i] + j + 3 * k + 2 * l); + dt_table[i][j](k, l) = std::sin(2 * face_numbers[i] + j + 3 * k + 2 * l); } } } } group - .createDataSet(name, + .createDataSet("data/" + name, HighFive::DataSpace{std::vector<size_t>{dt_table.numberOfRows(), dt_table.numberOfColumns()}}, test_TinyMatrixDataType<DataType>{}) .template write_raw<double>(&(dt_table[0][0](0, 0)), test_TinyMatrixDataType<DataType>{}); @@ -789,7 +900,7 @@ TEST_CASE("ParallelChecker_read", "[dev]") SourceLocation source_location; - Array numbers = get_item_numbers(connectivity, ItemType::node); + Array node_numbers = get_item_numbers(connectivity, ItemType::node); using DataType = TinyMatrix<2, 3>; @@ -798,19 +909,19 @@ TEST_CASE("ParallelChecker_read", "[dev]") HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("node_numbers", HighFive::DataSpace{std::vector<size_t>{node_numbers.size()}}) + .write_raw<int>(&(node_numbers[0])); - Array<DataType> values{numbers.size()}; - for (size_t i = 0; i < numbers.size(); ++i) { + Array<DataType> values{node_numbers.size()}; + for (size_t i = 0; i < node_numbers.size(); ++i) { for (size_t j = 0; j < DataType::NumberOfRows; ++j) { for (size_t k = 0; k < DataType::NumberOfColumns; ++k) { - values[i](j, k) = std::sin(numbers[i] + j + 2 * k); + values[i](j, k) = std::sin(node_numbers[i] + j + 2 * k); } } } group - .createDataSet(name, HighFive::DataSpace{std::vector<size_t>{numbers.size()}}, + .createDataSet("data/" + name, HighFive::DataSpace{std::vector<size_t>{node_numbers.size()}}, test_TinyMatrixDataType<DataType>{}) .template write_raw<double>(&(values[0](0, 0)), test_TinyMatrixDataType<DataType>{}); @@ -888,26 +999,26 @@ TEST_CASE("ParallelChecker_read", "[dev]") using DataType = TinyVector<2>; - Array numbers = get_item_numbers(connectivity, ItemType::face); + Array face_numbers = get_item_numbers(connectivity, ItemType::face); int tag = 7; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("face_numbers", HighFive::DataSpace{std::vector<size_t>{face_numbers.size()}}) + .write_raw<int>(&(face_numbers[0])); - Table<DataType> dt_table{numbers.size(), 2}; + Table<DataType> dt_table{face_numbers.size(), 2}; for (size_t i = 0; i < dt_table.numberOfRows(); ++i) { for (size_t j = 0; j < dt_table.numberOfColumns(); ++j) { for (size_t k = 0; k < DataType::Dimension; ++k) { - dt_table[i][j][k] = std::sin(2 * numbers[i] + j + 3 * k); + dt_table[i][j][k] = std::sin(2 * face_numbers[i] + j + 3 * k); } } } group - .createDataSet(name, + .createDataSet("data/" + name, HighFive::DataSpace{std::vector<size_t>{dt_table.numberOfRows(), dt_table.numberOfColumns()}}, test_TinyVectorDataType<DataType>{}) .template write_raw<double>(&(dt_table[0][0][0]), test_TinyVectorDataType<DataType>{}); @@ -1034,31 +1145,41 @@ TEST_CASE("ParallelChecker_read", "[dev]") const std::string name = "sin"; SourceLocation source_location; - Array numbers = get_item_numbers(connectivity, ItemType::node); + Array node_numbers = get_item_numbers(connectivity, ItemType::node); + Array cell_numbers = get_item_numbers(connectivity, ItemType::cell); Array<const typename ConnectivityMatrix::IndexType> rows_map = get_subitem_rows_map(connectivity, ItemType::node, ItemType::cell); + Array<const typename ConnectivityMatrix::IndexType> sub_item_index = + get_subitem_index(connectivity, ItemType::node, ItemType::cell); int tag = 6; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("node_numbers", HighFive::DataSpace{std::vector<size_t>{node_numbers.size()}}) + .write_raw<int>(&(node_numbers[0])); + group.createDataSet<int>("cell_numbers", HighFive::DataSpace{std::vector<size_t>{cell_numbers.size()}}) + .write_raw<int>(&(cell_numbers[0])); group .createDataSet<typename ConnectivityMatrix::IndexType>("rows_map", HighFive::DataSpace{std::vector<size_t>{ rows_map.size()}}) .write_raw<typename ConnectivityMatrix::IndexType>(&(rows_map[0])); + group + .createDataSet<typename ConnectivityMatrix::IndexType>("sub_item_index", + HighFive::DataSpace{ + std::vector<size_t>{sub_item_index.size()}}) + .write_raw<typename ConnectivityMatrix::IndexType>(&(sub_item_index[0])); Array<double> values{rows_map[rows_map.size() - 1]}; - for (size_t i = 0; i < numbers.size(); ++i) { + for (size_t i = 0; i < node_numbers.size(); ++i) { for (size_t i_row = rows_map[i]; i_row < rows_map[i + 1]; ++i_row) { const size_t j = i_row - rows_map[i]; - values[i_row] = std::sin(numbers[i] + 2 * j); + values[i_row] = std::sin(node_numbers[i] + 2 * j); } } - group.createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{values.size()}}) + group.createDataSet<double>("data/" + name, HighFive::DataSpace{std::vector<size_t>{values.size()}}) .write_raw<double>(&(values[0])); group.createAttribute("filename", source_location.filename()); @@ -1242,34 +1363,45 @@ TEST_CASE("ParallelChecker_read", "[dev]") SourceLocation source_location; - Array numbers = get_item_numbers(connectivity, ItemType::cell); + Array cell_numbers = get_item_numbers(connectivity, ItemType::cell); + Array node_numbers = get_item_numbers(connectivity, ItemType::node); Array<const typename ConnectivityMatrix::IndexType> rows_map = get_subitem_rows_map(connectivity, ItemType::cell, ItemType::node); + Array<const typename ConnectivityMatrix::IndexType> sub_item_index = + get_subitem_index(connectivity, ItemType::cell, ItemType::node); int tag = 12; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("cell_numbers", HighFive::DataSpace{std::vector<size_t>{cell_numbers.size()}}) + .write_raw<int>(&(cell_numbers[0])); + group.createDataSet<int>("node_numbers", HighFive::DataSpace{std::vector<size_t>{node_numbers.size()}}) + .write_raw<int>(&(node_numbers[0])); group .createDataSet<typename ConnectivityMatrix::IndexType>("rows_map", HighFive::DataSpace{std::vector<size_t>{ rows_map.size()}}) .write_raw<typename ConnectivityMatrix::IndexType>(&(rows_map[0])); + group + .createDataSet<typename ConnectivityMatrix::IndexType>("sub_item_index", + HighFive::DataSpace{ + std::vector<size_t>{sub_item_index.size()}}) + .write_raw<typename ConnectivityMatrix::IndexType>(&(sub_item_index[0])); Table<double> double_table{rows_map[rows_map.size() - 1], 2}; - for (size_t i = 0; i < numbers.size(); ++i) { + for (size_t i = 0; i < cell_numbers.size(); ++i) { for (size_t i_row = rows_map[i]; i_row < rows_map[i + 1]; ++i_row) { const size_t j = i_row - rows_map[i]; for (size_t k = 0; k < 2; ++k) { - double_table[i_row][k] = std::sin(2 * numbers[i] + (1 + k) * j); + double_table[i_row][k] = std::sin(2 * cell_numbers[i] + (1 + k) * j); } } } group - .createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{double_table.numberOfRows(), - double_table.numberOfColumns()}}) + .createDataSet<double>("data/" + name, + HighFive::DataSpace{ + std::vector<size_t>{double_table.numberOfRows(), double_table.numberOfColumns()}}) .write_raw<double>(&(double_table(0, 0))); group.createAttribute("filename", source_location.filename()); @@ -1398,28 +1530,28 @@ TEST_CASE("ParallelChecker_read", "[dev]") using DataType = TinyMatrix<3, 2>; - Array numbers = get_item_numbers(connectivity, ItemType::face); + Array face_numbers = get_item_numbers(connectivity, ItemType::face); int tag = 12; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("face_numbers", HighFive::DataSpace{std::vector<size_t>{face_numbers.size()}}) + .write_raw<int>(&(face_numbers[0])); - Table<DataType> dt_table{numbers.size(), 2}; + Table<DataType> dt_table{face_numbers.size(), 2}; for (size_t i = 0; i < dt_table.numberOfRows(); ++i) { for (size_t j = 0; j < dt_table.numberOfColumns(); ++j) { for (size_t k = 0; k < DataType::NumberOfRows; ++k) { for (size_t l = 0; l < DataType::NumberOfColumns; ++l) { - dt_table[i][j](k, l) = std::sin(2 * numbers[i] + j + 3 * k + 2 * l); + dt_table[i][j](k, l) = std::sin(2 * face_numbers[i] + j + 3 * k + 2 * l); } } } } group - .createDataSet(name, + .createDataSet("data/" + name, HighFive::DataSpace{std::vector<size_t>{dt_table.numberOfRows(), dt_table.numberOfColumns()}}, test_TinyMatrixDataType<DataType>{}) .template write_raw<double>(&(dt_table[0][0](0, 0)), test_TinyMatrixDataType<DataType>{}); @@ -1546,31 +1678,41 @@ TEST_CASE("ParallelChecker_read", "[dev]") const std::string name = "sin"; SourceLocation source_location; - Array numbers = get_item_numbers(connectivity, ItemType::node); + Array node_numbers = get_item_numbers(connectivity, ItemType::node); + Array cell_numbers = get_item_numbers(connectivity, ItemType::cell); Array<const typename ConnectivityMatrix::IndexType> rows_map = get_subitem_rows_map(connectivity, ItemType::node, ItemType::cell); + Array<const typename ConnectivityMatrix::IndexType> sub_item_index = + get_subitem_index(connectivity, ItemType::node, ItemType::cell); int tag = 6; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("node_numbers", HighFive::DataSpace{std::vector<size_t>{node_numbers.size()}}) + .write_raw<int>(&(node_numbers[0])); + group.createDataSet<int>("cell_numbers", HighFive::DataSpace{std::vector<size_t>{cell_numbers.size()}}) + .write_raw<int>(&(cell_numbers[0])); group .createDataSet<typename ConnectivityMatrix::IndexType>("rows_map", HighFive::DataSpace{std::vector<size_t>{ rows_map.size()}}) .write_raw<typename ConnectivityMatrix::IndexType>(&(rows_map[0])); + group + .createDataSet<typename ConnectivityMatrix::IndexType>("sub_item_index", + HighFive::DataSpace{ + std::vector<size_t>{sub_item_index.size()}}) + .write_raw<typename ConnectivityMatrix::IndexType>(&(sub_item_index[0])); Array<double> values{rows_map[rows_map.size() - 1]}; - for (size_t i = 0; i < numbers.size(); ++i) { + for (size_t i = 0; i < node_numbers.size(); ++i) { for (size_t i_row = rows_map[i]; i_row < rows_map[i + 1]; ++i_row) { const size_t j = i_row - rows_map[i]; - values[i_row] = std::sin(numbers[i] + 2 * j); + values[i_row] = std::sin(node_numbers[i] + 2 * j); } } - group.createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{values.size()}}) + group.createDataSet<double>("data/" + name, HighFive::DataSpace{std::vector<size_t>{values.size()}}) .write_raw<double>(&(values[0])); group.createAttribute("filename", source_location.filename()); @@ -1754,26 +1896,26 @@ TEST_CASE("ParallelChecker_read", "[dev]") using DataType = TinyVector<2>; - Array numbers = get_item_numbers(connectivity, ItemType::face); + Array face_numbers = get_item_numbers(connectivity, ItemType::face); int tag = 7; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("face_numbers", HighFive::DataSpace{std::vector<size_t>{face_numbers.size()}}) + .write_raw<int>(&(face_numbers[0])); - Table<DataType> dt_table{numbers.size(), 2}; + Table<DataType> dt_table{face_numbers.size(), 2}; for (size_t i = 0; i < dt_table.numberOfRows(); ++i) { for (size_t j = 0; j < dt_table.numberOfColumns(); ++j) { for (size_t k = 0; k < DataType::Dimension; ++k) { - dt_table[i][j][k] = std::sin(2 * numbers[i] + j + 3 * k); + dt_table[i][j][k] = std::sin(2 * face_numbers[i] + j + 3 * k); } } } group - .createDataSet(name, + .createDataSet("data/" + name, HighFive::DataSpace{std::vector<size_t>{dt_table.numberOfRows(), dt_table.numberOfColumns()}}, test_TinyVectorDataType<DataType>{}) .template write_raw<double>(&(dt_table[0][0][0]), test_TinyVectorDataType<DataType>{}); @@ -1898,34 +2040,45 @@ TEST_CASE("ParallelChecker_read", "[dev]") SourceLocation source_location; - Array numbers = get_item_numbers(connectivity, ItemType::cell); + Array cell_numbers = get_item_numbers(connectivity, ItemType::cell); + Array node_numbers = get_item_numbers(connectivity, ItemType::node); Array<const typename ConnectivityMatrix::IndexType> rows_map = get_subitem_rows_map(connectivity, ItemType::cell, ItemType::node); + Array<const typename ConnectivityMatrix::IndexType> sub_item_index = + get_subitem_index(connectivity, ItemType::cell, ItemType::node); int tag = 12; if (parallel::rank() == 0) { HighFive::File file(filename, HighFive::File::Overwrite); HighFive::Group group = file.createGroup("/values/" + std::to_string(tag)); - group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}}) - .write_raw<int>(&(numbers[0])); + group.createDataSet<int>("cell_numbers", HighFive::DataSpace{std::vector<size_t>{cell_numbers.size()}}) + .write_raw<int>(&(cell_numbers[0])); + group.createDataSet<int>("node_numbers", HighFive::DataSpace{std::vector<size_t>{node_numbers.size()}}) + .write_raw<int>(&(node_numbers[0])); group .createDataSet<typename ConnectivityMatrix::IndexType>("rows_map", HighFive::DataSpace{std::vector<size_t>{ rows_map.size()}}) .write_raw<typename ConnectivityMatrix::IndexType>(&(rows_map[0])); + group + .createDataSet<typename ConnectivityMatrix::IndexType>("sub_item_index", + HighFive::DataSpace{ + std::vector<size_t>{sub_item_index.size()}}) + .write_raw<typename ConnectivityMatrix::IndexType>(&(sub_item_index[0])); Table<double> double_table{rows_map[rows_map.size() - 1], 2}; - for (size_t i = 0; i < numbers.size(); ++i) { + for (size_t i = 0; i < cell_numbers.size(); ++i) { for (size_t i_row = rows_map[i]; i_row < rows_map[i + 1]; ++i_row) { const size_t j = i_row - rows_map[i]; for (size_t k = 0; k < 2; ++k) { - double_table[i_row][k] = std::sin(2 * numbers[i] + (1 + k) * j); + double_table[i_row][k] = std::sin(2 * cell_numbers[i] + (1 + k) * j); } } } group - .createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{double_table.numberOfRows(), - double_table.numberOfColumns()}}) + .createDataSet<double>("data/" + name, + HighFive::DataSpace{ + std::vector<size_t>{double_table.numberOfRows(), double_table.numberOfColumns()}}) .write_raw<double>(&(double_table(0, 0))); group.createAttribute("filename", source_location.filename()); diff --git a/tests/test_ParallelChecker_write.cpp b/tests/test_ParallelChecker_write.cpp index 8ca7ceb50cdd2d2e8c1d4f3f5a2fd2669d9d9806..e1463d00b1b1469db4a5ecdb595f559de4af3b6f 100644 --- a/tests/test_ParallelChecker_write.cpp +++ b/tests/test_ParallelChecker_write.cpp @@ -108,8 +108,8 @@ TEST_CASE("ParallelChecker_write", "[dev]") HighFive::Group group_var0 = file.getGroup("values/" + std::to_string(tag)); REQUIRE(group_var0.getNumberObjects() == 2); - REQUIRE(group_var0.exist("numbers")); - REQUIRE(group_var0.exist(var_name)); + REQUIRE(group_var0.exist(std::string{itemName(item_type)} + "_numbers")); + REQUIRE(group_var0.exist("data/" + var_name)); REQUIRE(group_var0.getNumberAttributes() == 7); REQUIRE(group_var0.hasAttribute("filename")); @@ -366,11 +366,13 @@ TEST_CASE("ParallelChecker_write", "[dev]") HighFive::File file(ParallelChecker::instance().filename(), HighFive::File::ReadOnly); HighFive::Group group_var0 = file.getGroup("values/" + std::to_string(tag)); - REQUIRE(group_var0.getNumberObjects() == 3); + REQUIRE(group_var0.getNumberObjects() == 5); - REQUIRE(group_var0.exist("numbers")); + REQUIRE(group_var0.exist(std::string{itemName(item_type)} + "_numbers")); + REQUIRE(group_var0.exist(std::string{itemName(sub_item_type)} + "_numbers")); + REQUIRE(group_var0.exist("sub_item_index")); REQUIRE(group_var0.exist("rows_map")); - REQUIRE(group_var0.exist(var_name)); + REQUIRE(group_var0.exist("data/" + var_name)); REQUIRE(group_var0.getNumberAttributes() == 8); REQUIRE(group_var0.hasAttribute("filename"));