diff --git a/src/algebra/CRSMatrix.hpp b/src/algebra/CRSMatrix.hpp index c2df50e454fa883bf402c56ac81f8617cf9b7742..413466b0a065134006ff2d06805fc1dc47f12806 100644 --- a/src/algebra/CRSMatrix.hpp +++ b/src/algebra/CRSMatrix.hpp @@ -190,7 +190,7 @@ class CRSMatrix Vector<MutableDataType> Ax(m_nb_rows); parallel_for( - m_nb_rows, PUGS_LAMBDA(const IndexType& i_row) { + m_nb_rows, PUGS_CLASS_LAMBDA(const IndexType& i_row) { const auto row_begin = m_row_map[i_row]; const auto row_end = m_row_map[i_row + 1]; MutableDataType sum{0}; diff --git a/src/algebra/SmallMatrix.hpp b/src/algebra/SmallMatrix.hpp index b8d3b81b5fad0ac65521a94aa4f94183f4cff333..03d69f329dbfc9592d5eed02e6fad66885df7de6 100644 --- a/src/algebra/SmallMatrix.hpp +++ b/src/algebra/SmallMatrix.hpp @@ -31,17 +31,20 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE public: PUGS_INLINE - bool isSquare() const noexcept + bool + isSquare() const noexcept { return m_nb_rows == m_nb_columns; } - friend PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> copy(const SmallMatrix& A) noexcept + friend PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + copy(const SmallMatrix& A) noexcept { return SmallMatrix<std::remove_const_t<DataType>>{A.m_nb_rows, A.m_nb_columns, copy(A.m_values)}; } - friend PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> transpose(const SmallMatrix& A) + friend PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + transpose(const SmallMatrix& A) { SmallMatrix<std::remove_const_t<DataType>> A_transpose{A.m_nb_columns, A.m_nb_rows}; for (size_t i = 0; i < A.m_nb_rows; ++i) { @@ -52,14 +55,16 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE return A_transpose; } - friend PUGS_INLINE SmallMatrix operator*(const DataType& a, const SmallMatrix& A) + friend PUGS_INLINE SmallMatrix + operator*(const DataType& a, const SmallMatrix& A) { SmallMatrix<std::remove_const_t<DataType>> aA = copy(A); return aA *= a; } template <typename DataType2> - PUGS_INLINE SmallVector<std::remove_const_t<DataType>> operator*(const SmallVector<DataType2>& x) const + PUGS_INLINE SmallVector<std::remove_const_t<DataType>> + operator*(const SmallVector<DataType2>& x) const { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -77,7 +82,8 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE } template <typename DataType2> - PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> operator*(const SmallMatrix<DataType2>& B) const + PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + operator*(const SmallMatrix<DataType2>& B) const { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -98,22 +104,25 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator/=(const DataType2& a) + PUGS_INLINE SmallMatrix& + operator/=(const DataType2& a) { const auto inv_a = 1. / a; return (*this) *= inv_a; } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator*=(const DataType2& a) + PUGS_INLINE SmallMatrix& + operator*=(const DataType2& a) { parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { m_values[i] *= a; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] *= a; }); return *this; } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator-=(const SmallMatrix<DataType2>& B) + PUGS_INLINE SmallMatrix& + operator-=(const SmallMatrix<DataType2>& B) { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -121,12 +130,13 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE "cannot substract matrix: incompatible sizes"); parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { m_values[i] -= B.m_values[i]; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] -= B.m_values[i]; }); return *this; } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator+=(const SmallMatrix<DataType2>& B) + PUGS_INLINE SmallMatrix& + operator+=(const SmallMatrix<DataType2>& B) { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -134,12 +144,13 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE "cannot add matrix: incompatible sizes"); parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { m_values[i] += B.m_values[i]; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] += B.m_values[i]; }); return *this; } template <typename DataType2> - PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> operator+(const SmallMatrix<DataType2>& B) const + PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + operator+(const SmallMatrix<DataType2>& B) const { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -149,13 +160,14 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE SmallMatrix<std::remove_const_t<DataType>> sum{B.numberOfRows(), B.numberOfColumns()}; parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + B.m_values[i]; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + B.m_values[i]; }); return sum; } template <typename DataType2> - PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> operator-(const SmallMatrix<DataType2>& B) const + PUGS_INLINE SmallMatrix<std::remove_const_t<DataType>> + operator-(const SmallMatrix<DataType2>& B) const { static_assert(std::is_same_v<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>, "incompatible data types"); @@ -165,53 +177,60 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE SmallMatrix<std::remove_const_t<DataType>> difference{B.numberOfRows(), B.numberOfColumns()}; parallel_for( - m_values.size(), PUGS_LAMBDA(index_type i) { difference.m_values[i] = m_values[i] - B.m_values[i]; }); + m_values.size(), PUGS_CLASS_LAMBDA(index_type i) { difference.m_values[i] = m_values[i] - B.m_values[i]; }); return difference; } PUGS_INLINE - DataType& operator()(index_type i, index_type j) const noexcept(NO_ASSERT) + DataType& + operator()(index_type i, index_type j) const noexcept(NO_ASSERT) { Assert(i < m_nb_rows and j < m_nb_columns, "cannot access element: invalid indices"); return m_values[i * m_nb_columns + j]; } PUGS_INLINE - size_t numberOfRows() const noexcept + size_t + numberOfRows() const noexcept { return m_nb_rows; } PUGS_INLINE - size_t numberOfColumns() const noexcept + size_t + numberOfColumns() const noexcept { return m_nb_columns; } - PUGS_INLINE void fill(const DataType& value) noexcept + PUGS_INLINE void + fill(const DataType& value) noexcept { m_values.fill(value); } - PUGS_INLINE SmallMatrix& operator=(ZeroType) noexcept + PUGS_INLINE SmallMatrix& + operator=(ZeroType) noexcept { m_values.fill(0); return *this; } - PUGS_INLINE SmallMatrix& operator=(IdentityType) noexcept(NO_ASSERT) + PUGS_INLINE SmallMatrix& + operator=(IdentityType) noexcept(NO_ASSERT) { Assert(m_nb_rows == m_nb_columns, "identity must be a square matrix"); m_values.fill(0); parallel_for( - m_nb_rows, PUGS_LAMBDA(const index_type i) { m_values[i * m_nb_rows + i] = 1; }); + m_nb_rows, PUGS_CLASS_LAMBDA(const index_type i) { m_values[i * m_nb_rows + i] = 1; }); return *this; } template <typename DataType2> - PUGS_INLINE SmallMatrix& operator=(const SmallMatrix<DataType2>& A) noexcept + PUGS_INLINE SmallMatrix& + operator=(const SmallMatrix<DataType2>& A) noexcept { // ensures that DataType is the same as source DataType2 static_assert(std::is_same<std::remove_const_t<DataType>, std::remove_const_t<DataType2>>(), @@ -232,7 +251,8 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE PUGS_INLINE SmallMatrix& operator=(SmallMatrix&&) = default; - friend std::ostream& operator<<(std::ostream& os, const SmallMatrix& A) + friend std::ostream& + operator<<(std::ostream& os, const SmallMatrix& A) { for (size_t i = 0; i < A.numberOfRows(); ++i) { os << i << '|'; @@ -259,7 +279,7 @@ class [[nodiscard]] SmallMatrix // LCOV_EXCL_LINE SmallMatrix(const SmallMatrix&) = default; - SmallMatrix(SmallMatrix &&) = default; + SmallMatrix(SmallMatrix&&) = default; explicit SmallMatrix(size_t nb_rows, size_t nb_columns, const SmallArray<DataType>& values) : m_nb_rows{nb_rows}, m_nb_columns{nb_columns}, m_values{values} diff --git a/src/algebra/SmallVector.hpp b/src/algebra/SmallVector.hpp index d4a0bf84268bdf1d164cf6216153117adafaab56..296b8be71ce8b20346174ce40f41edeb78f1b1c2 100644 --- a/src/algebra/SmallVector.hpp +++ b/src/algebra/SmallVector.hpp @@ -58,7 +58,7 @@ class SmallVector // LCOV_EXCL_LINE { SmallVector<std::remove_const_t<DataType>> opposite(this->size()); parallel_for( - opposite.size(), PUGS_LAMBDA(index_type i) { opposite.m_values[i] = -m_values[i]; }); + opposite.size(), PUGS_CLASS_LAMBDA(index_type i) { opposite.m_values[i] = -m_values[i]; }); return opposite; } @@ -98,7 +98,7 @@ class SmallVector // LCOV_EXCL_LINE operator*=(const DataType2& a) { parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] *= a; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] *= a; }); return *this; } @@ -109,7 +109,7 @@ class SmallVector // LCOV_EXCL_LINE Assert(this->size() == y.size(), "cannot substract vector: incompatible sizes"); parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] -= y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] -= y[i]; }); return *this; } @@ -121,7 +121,7 @@ class SmallVector // LCOV_EXCL_LINE Assert(this->size() == y.size(), "cannot add vector: incompatible sizes"); parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] += y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] += y[i]; }); return *this; } @@ -134,7 +134,7 @@ class SmallVector // LCOV_EXCL_LINE SmallVector<std::remove_const_t<DataType>> sum{y.size()}; parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + y[i]; }); return sum; } @@ -147,7 +147,7 @@ class SmallVector // LCOV_EXCL_LINE SmallVector<std::remove_const_t<DataType>> sum{y.size()}; parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] - y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] - y[i]; }); return sum; } diff --git a/src/algebra/Vector.hpp b/src/algebra/Vector.hpp index 749a795a489f12a671f4ecfd452104702ba68147..e034429e94ea732cf22fd39c56d2826faf8322d5 100644 --- a/src/algebra/Vector.hpp +++ b/src/algebra/Vector.hpp @@ -59,7 +59,7 @@ class Vector // LCOV_EXCL_LINE { Vector<std::remove_const_t<DataType>> opposite(this->size()); parallel_for( - opposite.size(), PUGS_LAMBDA(index_type i) { opposite.m_values[i] = -m_values[i]; }); + opposite.size(), PUGS_CLASS_LAMBDA(index_type i) { opposite.m_values[i] = -m_values[i]; }); return opposite; } @@ -98,7 +98,7 @@ class Vector // LCOV_EXCL_LINE operator*=(const DataType2& a) { parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] *= a; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] *= a; }); return *this; } @@ -109,7 +109,7 @@ class Vector // LCOV_EXCL_LINE Assert(this->size() == y.size(), "cannot substract vector: incompatible sizes"); parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] -= y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] -= y[i]; }); return *this; } @@ -121,7 +121,7 @@ class Vector // LCOV_EXCL_LINE Assert(this->size() == y.size(), "cannot add vector: incompatible sizes"); parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { m_values[i] += y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { m_values[i] += y[i]; }); return *this; } @@ -134,7 +134,7 @@ class Vector // LCOV_EXCL_LINE Vector<std::remove_const_t<DataType>> sum{y.size()}; parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] + y[i]; }); return sum; } @@ -147,7 +147,7 @@ class Vector // LCOV_EXCL_LINE Vector<std::remove_const_t<DataType>> sum{y.size()}; parallel_for( - this->size(), PUGS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] - y[i]; }); + this->size(), PUGS_CLASS_LAMBDA(index_type i) { sum.m_values[i] = m_values[i] - y[i]; }); return sum; } diff --git a/src/language/utils/BuiltinFunctionEmbedder.hpp b/src/language/utils/BuiltinFunctionEmbedder.hpp index 251c39e6d5eb5dc46423e75dedea28947d1a29be..da9ab46a32a2f0570f6f685d7aad35dac886f461 100644 --- a/src/language/utils/BuiltinFunctionEmbedder.hpp +++ b/src/language/utils/BuiltinFunctionEmbedder.hpp @@ -106,7 +106,8 @@ class BuiltinFunctionEmbedderBase<FX(Args...)> : public IBuiltinFunctionEmbedder } template <size_t... I> - PUGS_INLINE std::vector<std::shared_ptr<const ASTNodeDataType>> _getCompoundDataTypes(std::index_sequence<I...>) const + PUGS_INLINE std::vector<std::shared_ptr<const ASTNodeDataType>> + _getCompoundDataTypes(std::index_sequence<I...>) const { std::vector<std::shared_ptr<const ASTNodeDataType>> compound_type_list; (compound_type_list.push_back(std::make_shared<ASTNodeDataType>(this->_getOneElementDataType<FX, I>())), ...); @@ -174,8 +175,8 @@ class BuiltinFunctionEmbedder }; template <typename T> -inline constexpr bool is_const_ref_or_non_ref = (std::is_const_v<T> and std::is_lvalue_reference_v<T>) or - (not std::is_lvalue_reference_v<T>); +inline constexpr bool is_const_ref_or_non_ref = + (std::is_const_v<T> and std::is_lvalue_reference_v<T>) or (not std::is_lvalue_reference_v<T>); template <typename FX, typename... Args> class BuiltinFunctionEmbedder<FX(Args...)> : public BuiltinFunctionEmbedderBase<FX(Args...)> @@ -283,7 +284,8 @@ class BuiltinFunctionEmbedder<FX(Args...)> : public BuiltinFunctionEmbedderBase< } template <size_t... I> - PUGS_INLINE std::vector<ASTNodeDataType> _getParameterDataTypes(std::index_sequence<I...>) const + PUGS_INLINE std::vector<ASTNodeDataType> + _getParameterDataTypes(std::index_sequence<I...>) const { std::vector<ASTNodeDataType> parameter_type_list; (parameter_type_list.push_back(this->template _getOneElementDataType<ArgsTuple, I>()), ...); @@ -298,9 +300,8 @@ class BuiltinFunctionEmbedder<FX(Args...)> : public BuiltinFunctionEmbedderBase< std::vector<DataVariant> vector_result; vector_result.reserve(std::tuple_size_v<decltype(tuple_result)>); - std:: - apply([&](auto&&... result) { ((vector_result.emplace_back(this->template _resultToDataVariant(result))), ...); }, - tuple_result); + std::apply([&](auto&&... result) { ((vector_result.emplace_back(this->_resultToDataVariant(result))), ...); }, + tuple_result); return AggregateDataVariant{std::move(vector_result)}; } @@ -338,7 +339,7 @@ class BuiltinFunctionEmbedder<FX(Args...)> : public BuiltinFunctionEmbedderBase< std::apply(m_f, t); return {}; } else { - return this->template _createHandler(std::apply(m_f, t)); + return this->_createHandler(std::apply(m_f, t)); } } @@ -375,9 +376,8 @@ class BuiltinFunctionEmbedder<FX(void)> : public BuiltinFunctionEmbedderBase<FX( std::vector<DataVariant> vector_result; vector_result.reserve(std::tuple_size_v<decltype(tuple_result)>); - std:: - apply([&](auto&&... result) { ((vector_result.emplace_back(this->template _resultToDataVariant(result))), ...); }, - tuple_result); + std::apply([&](auto&&... result) { ((vector_result.emplace_back(this->_resultToDataVariant(result))), ...); }, + tuple_result); return AggregateDataVariant{std::move(vector_result)}; } @@ -407,7 +407,7 @@ class BuiltinFunctionEmbedder<FX(void)> : public BuiltinFunctionEmbedderBase<FX( m_f(); return {}; } else { - return EmbeddedData(this->template _createHandler(m_f())); + return EmbeddedData(this->_createHandler(m_f())); } } diff --git a/src/mesh/Connectivity.cpp b/src/mesh/Connectivity.cpp index bd1e0d85b6f94d1d2d00326857853f4da75eacfe..65a8804950abad22fe2205569217cd8ff162b043 100644 --- a/src/mesh/Connectivity.cpp +++ b/src/mesh/Connectivity.cpp @@ -60,7 +60,7 @@ Connectivity<Dimension>::_buildFrom(const ConnectivityDescriptor& descriptor) const int rank = parallel::rank(); WeakCellValue<bool> cell_is_owned(*this); parallel_for( - this->numberOfCells(), PUGS_LAMBDA(CellId j) { cell_is_owned[j] = (m_cell_owner[j] == rank); }); + this->numberOfCells(), PUGS_CLASS_LAMBDA(CellId j) { cell_is_owned[j] = (m_cell_owner[j] == rank); }); m_cell_is_owned = cell_is_owned; } @@ -71,7 +71,7 @@ Connectivity<Dimension>::_buildFrom(const ConnectivityDescriptor& descriptor) const int rank = parallel::rank(); WeakNodeValue<bool> node_is_owned(*this, node_is_owned_array); parallel_for( - this->numberOfNodes(), PUGS_LAMBDA(NodeId r) { node_is_owned[r] = (m_node_owner[r] == rank); }); + this->numberOfNodes(), PUGS_CLASS_LAMBDA(NodeId r) { node_is_owned[r] = (m_node_owner[r] == rank); }); m_node_is_owned = node_is_owned; } @@ -122,7 +122,7 @@ Connectivity<Dimension>::_buildFrom(const ConnectivityDescriptor& descriptor) const int rank = parallel::rank(); WeakFaceValue<bool> face_is_owned(*this, face_is_owned_array); parallel_for( - this->numberOfFaces(), PUGS_LAMBDA(FaceId l) { face_is_owned[l] = (m_face_owner[l] == rank); }); + this->numberOfFaces(), PUGS_CLASS_LAMBDA(FaceId l) { face_is_owned[l] = (m_face_owner[l] == rank); }); m_face_is_owned = face_is_owned; } @@ -163,7 +163,7 @@ Connectivity<Dimension>::_buildFrom(const ConnectivityDescriptor& descriptor) const int rank = parallel::rank(); WeakEdgeValue<bool> edge_is_owned(*this); parallel_for( - this->numberOfEdges(), PUGS_LAMBDA(EdgeId e) { edge_is_owned[e] = (m_edge_owner[e] == rank); }); + this->numberOfEdges(), PUGS_CLASS_LAMBDA(EdgeId e) { edge_is_owned[e] = (m_edge_owner[e] == rank); }); m_edge_is_owned = edge_is_owned; } diff --git a/src/mesh/ItemValue.hpp b/src/mesh/ItemValue.hpp index da6bb88eb5a9d92dda08ee7ed535aae104639d95..077e97358ed1b71e9f15cbfa48533a2db3602499 100644 --- a/src/mesh/ItemValue.hpp +++ b/src/mesh/ItemValue.hpp @@ -69,7 +69,7 @@ class ItemValue copy_to(const ItemValue<DataType, item_type, ConnectivityPtr>& source, const ItemValue<std::remove_const_t<DataType>, item_type, ConnectivityPtr2>& destination) { - Assert(destination.connectivity_ptr() == source.connectivity_ptr(), "different connectivities"); + Assert(destination.m_connectivity_ptr->id() == source.m_connectivity_ptr->id(), "different connectivities"); copy_to(source.m_values, destination.m_values); } diff --git a/src/mesh/MeshData.hpp b/src/mesh/MeshData.hpp index 4fb87bc5ad4aee87c7dd92e9232ec4508145a14f..1dc4498b3a6ddb9c3c9ff42af4de4a32c39271d1 100644 --- a/src/mesh/MeshData.hpp +++ b/src/mesh/MeshData.hpp @@ -257,7 +257,7 @@ class MeshData<Mesh<Dimension>> CellValue<Rd> cell_centroid{m_mesh.connectivity()}; parallel_for( - m_mesh.numberOfCells(), PUGS_LAMBDA(CellId j) { + m_mesh.numberOfCells(), PUGS_CLASS_LAMBDA(CellId j) { const Rd xj = m_cell_iso_barycenter[j]; const auto& cell_faces = cell_to_face_matrix[j]; diff --git a/src/mesh/PrimalToDiamondDualConnectivityDataMapper.hpp b/src/mesh/PrimalToDiamondDualConnectivityDataMapper.hpp index a803732dbb59be501f2a6ae995c57505a6dc4b0a..7412a72a6e3aa63290421f823354bceef6f9f1ac 100644 --- a/src/mesh/PrimalToDiamondDualConnectivityDataMapper.hpp +++ b/src/mesh/PrimalToDiamondDualConnectivityDataMapper.hpp @@ -41,14 +41,14 @@ class PrimalToDiamondDualConnectivityDataMapper : public IPrimalToDualConnectivi "unexpected connectivity for dual NodeValue"); parallel_for( - m_primal_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_node_to_dual_node_map[i]; dual_node_value[dual_node_id] = primal_node_value[primal_node_id]; }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; dual_node_value[dual_node_id] = primal_cell_value[primal_cell_id]; }); @@ -73,14 +73,14 @@ class PrimalToDiamondDualConnectivityDataMapper : public IPrimalToDualConnectivi "unexpected connectivity for dual NodeValue"); parallel_for( - m_primal_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_node_to_dual_node_map[i]; primal_node_value[primal_node_id] = dual_node_value[dual_node_id]; }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; primal_cell_value[primal_cell_id] = dual_node_value[dual_node_id]; }); @@ -105,7 +105,7 @@ class PrimalToDiamondDualConnectivityDataMapper : public IPrimalToDualConnectivi using OriginFaceId = ItemIdT<origin_face_type>; parallel_for( - m_primal_face_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_face_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_cell_id] = m_primal_face_to_dual_cell_map[i]; const OriginFaceId origin_face_id = static_cast<typename OriginFaceId::base_type>(primal_face_id); @@ -134,7 +134,7 @@ class PrimalToDiamondDualConnectivityDataMapper : public IPrimalToDualConnectivi using DestinationFaceId = ItemIdT<destination_face_type>; parallel_for( - m_primal_face_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_face_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_cell_id] = m_primal_face_to_dual_cell_map[i]; const DestinationFaceId destination_face_id = diff --git a/src/mesh/PrimalToDual1DConnectivityDataMapper.hpp b/src/mesh/PrimalToDual1DConnectivityDataMapper.hpp index 22570810227cbff09c1a72c680f5d34c0f522a44..052559ef98fd5ed23db17237c44a725eb4753cbb 100644 --- a/src/mesh/PrimalToDual1DConnectivityDataMapper.hpp +++ b/src/mesh/PrimalToDual1DConnectivityDataMapper.hpp @@ -47,7 +47,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat using DestinationNodeId = ItemIdT<destination_node_type>; parallel_for( - m_primal_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_node_to_dual_node_map[i]; DestinationNodeId destination_node_id = static_cast<typename DestinationNodeId::base_type>(dual_node_id); @@ -57,7 +57,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; DestinationNodeId destination_node_id = static_cast<typename DestinationNodeId::base_type>(dual_node_id); @@ -95,7 +95,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat using DestinationNodeId = ItemIdT<destination_node_type>; parallel_for( - m_primal_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_node_to_dual_node_map[i]; DestinationNodeId destination_node_id = static_cast<typename DestinationNodeId::base_type>(primal_node_id); @@ -105,7 +105,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; OriginNodeId origin_node_id = static_cast<typename OriginNodeId::base_type>(dual_node_id); @@ -131,7 +131,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat using OriginNodeId = ItemIdT<origin_node_type>; parallel_for( - m_primal_node_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_cell_id] = m_primal_node_to_dual_cell_map[i]; const OriginNodeId origin_node_id = static_cast<typename OriginNodeId::base_type>(primal_node_id); @@ -158,7 +158,7 @@ class PrimalToDual1DConnectivityDataMapper : public IPrimalToDualConnectivityDat "unexpected connectivity for dual CellValue"); parallel_for( - m_primal_node_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_cell_id] = m_primal_node_to_dual_cell_map[i]; const DestinationNodeId destination_node_id = diff --git a/src/mesh/PrimalToMedianDualConnectivityDataMapper.hpp b/src/mesh/PrimalToMedianDualConnectivityDataMapper.hpp index 366508eb3e6b09026529b9ff75b9d822e98e4b1f..22628812eea19e29ca9c6f22b88dac991f2a18a2 100644 --- a/src/mesh/PrimalToMedianDualConnectivityDataMapper.hpp +++ b/src/mesh/PrimalToMedianDualConnectivityDataMapper.hpp @@ -53,7 +53,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit "unexpected connectivity for dual NodeValue"); parallel_for( - m_primal_boundary_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_boundary_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_boundary_node_to_dual_node_map[i]; dual_node_value[dual_node_id] = primal_node_value[primal_node_id]; @@ -62,7 +62,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit using OriginFaceId = ItemIdT<origin_face_type>; parallel_for( - m_primal_face_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_face_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_node_id] = m_primal_face_to_dual_node_map[i]; const OriginFaceId origin_face_id = static_cast<typename OriginFaceId::base_type>(primal_face_id); @@ -71,7 +71,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; dual_node_value[dual_node_id] = primal_cell_value[primal_cell_id]; @@ -110,7 +110,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit "unexpected connectivity for dual NodeValue"); parallel_for( - m_primal_boundary_node_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_boundary_node_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_node_id] = m_primal_boundary_node_to_dual_node_map[i]; primal_node_value[primal_node_id] = dual_node_value[dual_node_id]; @@ -119,7 +119,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit using DestinationFaceId = ItemIdT<destination_face_type>; parallel_for( - m_primal_face_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_face_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_node_id] = m_primal_face_to_dual_node_map[i]; const DestinationFaceId destination_face_id = @@ -129,7 +129,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit }); parallel_for( - m_primal_cell_to_dual_node_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_cell_to_dual_node_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_cell_id, dual_node_id] = m_primal_cell_to_dual_node_map[i]; primal_cell_value[primal_cell_id] = dual_node_value[dual_node_id]; @@ -150,7 +150,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit "unexpected connectivity for dual CellValue"); parallel_for( - m_primal_node_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_node_id, dual_cell_id] = m_primal_node_to_dual_cell_map[i]; dual_cell_value[dual_cell_id] = primal_node_value[primal_node_id]; @@ -171,7 +171,7 @@ class PrimalToMedianDualConnectivityDataMapper : public IPrimalToDualConnectivit "unexpected connectivity for dual CellValue"); parallel_for( - m_primal_node_to_dual_cell_map.size(), PUGS_LAMBDA(size_t i) { + m_primal_node_to_dual_cell_map.size(), PUGS_CLASS_LAMBDA(size_t i) { const auto [primal_face_id, dual_cell_id] = m_primal_node_to_dual_cell_map[i]; primal_node_value[primal_face_id] = dual_cell_value[dual_cell_id]; diff --git a/src/output/GnuplotWriter.cpp b/src/output/GnuplotWriter.cpp index 83e2d8effaae57605d0bcf6f09f0a521c6524ff7..5bc70494c2d6dab1f6d0d243acc60ad229c07b1c 100644 --- a/src/output/GnuplotWriter.cpp +++ b/src/output/GnuplotWriter.cpp @@ -188,12 +188,12 @@ GnuplotWriter::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { using ItemDataType = std::decay_t<decltype(item_data)>; if (ItemDataType::item_t == ItemType::face) { std::ostringstream error_msg; - error_msg << "gnuplot_writer does not support face data, cannot save variable \"" << rang::fgB::yellow << name - << rang::fg::reset << '"'; + error_msg << "gnuplot_writer does not support face data, cannot save variable \"" << rang::fgB::yellow + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -202,12 +202,12 @@ GnuplotWriter::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { using ItemDataType = std::decay_t<decltype(item_data)>; if (ItemDataType::item_t == ItemType::edge) { std::ostringstream error_msg; - error_msg << "gnuplot_writer does not support edge data, cannot save variable \"" << rang::fgB::yellow << name - << rang::fg::reset << '"'; + error_msg << "gnuplot_writer does not support edge data, cannot save variable \"" << rang::fgB::yellow + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, diff --git a/src/output/GnuplotWriter1D.cpp b/src/output/GnuplotWriter1D.cpp index 799a8d51d7dceafeea7f3555cbd7d73088d116c0..bc85f5e4caabb07b838c3cb48cd206d1ab271ebc 100644 --- a/src/output/GnuplotWriter1D.cpp +++ b/src/output/GnuplotWriter1D.cpp @@ -183,11 +183,11 @@ GnuplotWriter1D::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { if (this->_is_face_data(item_data)) { std::ostringstream error_msg; error_msg << "gnuplot_1d_writer does not support face data, cannot save variable \"" << rang::fgB::yellow - << name << rang::fg::reset << '"'; + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -196,11 +196,11 @@ GnuplotWriter1D::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { if (this->_is_edge_data(item_data)) { std::ostringstream error_msg; error_msg << "gnuplot_1d_writer does not support edge data, cannot save variable \"" << rang::fgB::yellow - << name << rang::fg::reset << '"'; + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, diff --git a/src/output/GnuplotWriterRaw.cpp b/src/output/GnuplotWriterRaw.cpp index ac93ba0029f3d73583c98d62c85beb7901f3569f..9e266851e2f57f9d4e1a4c106eb53b09690e0fee 100644 --- a/src/output/GnuplotWriterRaw.cpp +++ b/src/output/GnuplotWriterRaw.cpp @@ -163,11 +163,11 @@ GnuplotWriterRaw::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { if (this->_is_face_data(item_data)) { std::ostringstream error_msg; error_msg << "gnuplot_raw_writer does not support face data, cannot save variable \"" << rang::fgB::yellow - << name << rang::fg::reset << '"'; + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -176,11 +176,11 @@ GnuplotWriterRaw::_write(const MeshType& mesh, for (const auto& [name, item_data_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_data) { + [&, var_name = name](auto&& item_data) { if (this->_is_edge_data(item_data)) { std::ostringstream error_msg; error_msg << "gnuplot_1d_writer does not support edge data, cannot save variable \"" << rang::fgB::yellow - << name << rang::fg::reset << '"'; + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, diff --git a/src/output/VTKWriter.cpp b/src/output/VTKWriter.cpp index 3a7f439429a49ac5e7da27ddf6b7e92e2069d1ed..64d35434462e5980a11b69ea2257ce779b3ec581 100644 --- a/src/output/VTKWriter.cpp +++ b/src/output/VTKWriter.cpp @@ -396,8 +396,9 @@ VTKWriter::_write(const MeshType& mesh, << "\">\n"; fout << "<CellData>\n"; for (const auto& [name, item_value_variant] : output_named_item_data_set) { - std::visit([&, name = name]( - auto&& item_value) { return this->_write_cell_data(fout, name, item_value, serialize_data_list); }, + std::visit([&, var_name = name]( + auto&& + item_value) { return this->_write_cell_data(fout, var_name, item_value, serialize_data_list); }, item_value_variant); } if (parallel::size() > 1) { @@ -412,8 +413,9 @@ VTKWriter::_write(const MeshType& mesh, fout << "</CellData>\n"; fout << "<PointData>\n"; for (const auto& [name, item_value_variant] : output_named_item_data_set) { - std::visit([&, name = name]( - auto&& item_value) { return this->_write_node_data(fout, name, item_value, serialize_data_list); }, + std::visit([&, var_name = name]( + auto&& + item_value) { return this->_write_node_data(fout, var_name, item_value, serialize_data_list); }, item_value_variant); } fout << "</PointData>\n"; @@ -622,18 +624,18 @@ VTKWriter::_write(const MeshType& mesh, for (const auto& [name, item_value_variant] : output_named_item_data_set) { std::visit( - [&, name = name](auto&& item_value) { + [&, var_name = name](auto&& item_value) { using ItemValueType = std::decay_t<decltype(item_value)>; if constexpr (ItemValueType::item_t == ItemType::edge) { std::ostringstream error_msg; - error_msg << "VTK format does not support edge data, cannot save variable \"" << rang::fgB::yellow << name - << rang::fg::reset << '"'; + error_msg << "VTK format does not support edge data, cannot save variable \"" << rang::fgB::yellow + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } if constexpr (ItemValueType::item_t == ItemType::face) { std::ostringstream error_msg; - error_msg << "VTK format does not support face data, cannot save variable \"" << rang::fgB::yellow << name - << rang::fg::reset << '"'; + error_msg << "VTK format does not support face data, cannot save variable \"" << rang::fgB::yellow + << var_name << rang::fg::reset << '"'; throw NormalError(error_msg.str()); } }, @@ -642,14 +644,14 @@ VTKWriter::_write(const MeshType& mesh, fout << "<PPointData>\n"; for (const auto& [name, item_value_variant] : output_named_item_data_set) { - std::visit([&, name = name](auto&& item_value) { return this->_write_node_pvtu(fout, name, item_value); }, + std::visit([&, var_name = name](auto&& item_value) { return this->_write_node_pvtu(fout, var_name, item_value); }, item_value_variant); } fout << "</PPointData>\n"; fout << "<PCellData>\n"; for (const auto& [name, item_value_variant] : output_named_item_data_set) { - std::visit([&, name = name](auto&& item_value) { return this->_write_cell_pvtu(fout, name, item_value); }, + std::visit([&, var_name = name](auto&& item_value) { return this->_write_cell_pvtu(fout, var_name, item_value); }, item_value_variant); } if (parallel::size() > 1) { diff --git a/src/scheme/DiscreteFunctionVectorIntegrator.cpp b/src/scheme/DiscreteFunctionVectorIntegrator.cpp index fccc6f79db41dd61cd6feea80baae7c8021487d3..0fa08f21498154a2023875548bcd763c5c6a8255 100644 --- a/src/scheme/DiscreteFunctionVectorIntegrator.cpp +++ b/src/scheme/DiscreteFunctionVectorIntegrator.cpp @@ -47,9 +47,8 @@ DiscreteFunctionVectorIntegrator::_integrateOnZoneList() const } Table<const DataType> table = - IntegrateCellArray<DataType(TinyVector<Dimension>)>::template integrate(m_function_id_list, - *m_quadrature_descriptor, *p_mesh, - zone_cell_list); + IntegrateCellArray<DataType(TinyVector<Dimension>)>::integrate(m_function_id_list, *m_quadrature_descriptor, + *p_mesh, zone_cell_list); CellArray<DataType> cell_array{p_mesh->connectivity(), m_function_id_list.size()}; if constexpr (is_tiny_vector_v<DataType> or is_tiny_matrix_v<DataType>) { @@ -78,9 +77,8 @@ DiscreteFunctionVectorIntegrator::_integrateGlobally() const constexpr size_t Dimension = MeshType::Dimension; return DiscreteFunctionP0Vector< - DataType>(m_mesh, - IntegrateCellArray<DataType(TinyVector<Dimension>)>::template integrate(m_function_id_list, - *m_quadrature_descriptor, *mesh)); + DataType>(m_mesh, IntegrateCellArray<DataType(TinyVector<Dimension>)>::integrate(m_function_id_list, + *m_quadrature_descriptor, *mesh)); } template <MeshConcept MeshType, typename DataType> diff --git a/src/scheme/FluxingAdvectionSolver.cpp b/src/scheme/FluxingAdvectionSolver.cpp index a2ebb2b256a3957169cf52457fd87a3d3236e8f3..1dec92a7dcc8e5e0c281d5d0aaa33bce9872b128 100644 --- a/src/scheme/FluxingAdvectionSolver.cpp +++ b/src/scheme/FluxingAdvectionSolver.cpp @@ -410,7 +410,7 @@ FluxingAdvectionSolver<MeshType>::_computeCycleNumber(FaceValue<double> fluxing_ total_negative_flux.fill(0); parallel_for( - m_old_mesh->numberOfCells(), PUGS_LAMBDA(CellId cell_id) { + m_old_mesh->numberOfCells(), PUGS_CLASS_LAMBDA(CellId cell_id) { const auto& cell_to_face = cell_to_face_matrix[cell_id]; for (size_t i_face = 0; i_face < cell_to_face.size(); ++i_face) { FaceId face_id = cell_to_face[i_face]; @@ -486,7 +486,7 @@ FluxingAdvectionSolver<MeshType>::_remapOne(const CellValue<const double>& step_ // First we deal with inner faces parallel_for( - m_new_mesh->numberOfCells(), PUGS_LAMBDA(CellId cell_id) { + m_new_mesh->numberOfCells(), PUGS_CLASS_LAMBDA(CellId cell_id) { const auto& cell_to_face = cell_to_face_matrix[cell_id]; for (size_t i_face = 0; i_face < cell_to_face.size(); ++i_face) { const FaceId face_id = cell_to_face[i_face]; @@ -653,7 +653,7 @@ FluxingAdvectionSolver<MeshType>::_remapAllQuantities() } parallel_for( - m_new_mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) { + m_new_mesh->numberOfCells(), PUGS_CLASS_LAMBDA(const CellId cell_id) { const auto& cell_to_face = cell_to_face_matrix[cell_id]; for (size_t i_face = 0; i_face < cell_to_face.size(); ++i_face) { const FaceId face_id = cell_to_face[i_face]; diff --git a/src/utils/PugsMacros.hpp b/src/utils/PugsMacros.hpp index 7248bad4b843106b02d1fa5a48cee46703f932ba..541646748a976d8b518b7ff006c76691850ed346 100644 --- a/src/utils/PugsMacros.hpp +++ b/src/utils/PugsMacros.hpp @@ -9,7 +9,7 @@ #define PUGS_FORCEINLINE KOKKOS_FORCEINLINE_FUNCTION #define PUGS_LAMBDA KOKKOS_LAMBDA -#define PUGS_CLASS_LAMBDA KOKKOS_CLASS_LAMBDA +#define PUGS_CLASS_LAMBDA [ =, this ] // Sets macro to ignore unknown pragma