diff --git a/doc/userdoc.org b/doc/userdoc.org
index 8901f2d93968259a3f8b8dc3b718d35264673b2c..243fbde6af57b702f741d211bc9cb734ebb73364 100644
--- a/doc/userdoc.org
+++ b/doc/userdoc.org
@@ -2976,6 +2976,20 @@ $\mathbb{R}^{1\times1}$, $\mathbb{R}^{2\times2}$ and $\mathbb{R}^{3\times3}$.
 The output is
 #+RESULTS: trace-examples
 
+The ~doubleDot~ functions compute the double-dot ($A:B = \mathrm{tr}(A^T
+B)$) of two matrices of $\mathbb{R}^{1\times1}$, $\mathbb{R}^{2\times2}$ and
+$\mathbb{R}^{3\times3}$.
+#+NAME: double-dot-examples
+#+BEGIN_SRC pugs :exports both :results output
+   import math;
+   cout << "doubleDot([[1.2]],[[2.3]]) = " << doubleDot([[1.2]],[[2.3]]) << "\n";
+   cout << "doubleDot([[1,2],[3,4]],[[3,6],[2,5]]) = " << doubleDot([[1,2],[3,4]],[[3,6],[2,5]]) << "\n";
+   cout << "doubleDot([[1,2,3],[4,5,6],[7,8,9]], [[5,2,1],[4,2,8],[2,6,2]]) = "
+        << doubleDot([[1,2,3],[4,5,6],[7,8,9]], [[5,2,1],[4,2,8],[2,6,2]]) << "\n";
+#+END_SRC
+The output is
+#+RESULTS: double-dot-examples
+
 Also, one can compute inverses of $\mathbb{R}^{1\times1}$,
 $\mathbb{R}^{2\times2}$ and $\mathbb{R}^{3\times3}$ matrices using the
 ~inverse~ function set.
@@ -2984,7 +2998,7 @@ $\mathbb{R}^{2\times2}$ and $\mathbb{R}^{3\times3}$ matrices using the
    import math;
    cout << "inverse([[1.2]]) = " << inverse([[1.2]]) << "\n";
    cout << "inverse([[1,2],[3,4]]) = " << inverse([[1,2],[3,4]]) << "\n";
-   cout << "inverse([[3,2,1],[5,6,4],[7,8,9]]) = "
+   cout << "inverse([[3,2,1],[5,6,4],[7,8,9]])\n = "
         << inverse([[3,2,1],[5,6,4],[7,8,9]]) << "\n";
 #+END_SRC
 The output is
@@ -3674,6 +3688,13 @@ This function requires that both arguments are defined on the same
 mesh and have the same data type. The result is a
 $\mathbb{P}_0(\mathbb{R})$ function.
 
+Finally the equivalent exists for discrete functions of matrices and
+applies to $\mathbb{P}_0(\mathbb{R}^1x1)$, $\mathbb{P}_0(\mathbb{R}^2x2)$,
+$\mathbb{P}_0(\mathbb{R}^3x3)$
+- ~doubleDot: Vh*Vh -> Vh~
+Both arguments must be defined on the same mesh and have the same data
+type. The result is a $\mathbb{P}_0(\mathbb{R})$ function.
+
 ****** ~R*Vh -> Vh~ and ~Vh*R -> Vh~
 
 These functions are defined for $\mathbb{P}_0(\mathbb{R})$ data and the
@@ -3717,6 +3738,33 @@ The following functions
 - ~dot: Rˆ3*Vh -> Vh~
 - ~dot: Vh*Rˆ3 -> Vh~
 
+****** ~R^1x1*Vh -> Vh~ and ~Vh*R^1x1 -> Vh~
+
+These functions are defined for $\mathbb{P}_0(\mathbb{R}^1x1)$ data and the
+return value is a $\mathbb{P}_0(\mathbb{R})$ function.
+
+The following functions
+- ~doubleDot: Rˆ1x1*Vh -> Vh~
+- ~doubleDot: Vh*Rˆ1x1 -> Vh~
+
+****** ~R^2x2*Vh -> Vh~ and ~Vh*R^2x2 -> Vh~
+
+These functions are defined for $\mathbb{P}_0(\mathbb{R}^2x2)$ data and the
+return value is a $\mathbb{P}_0(\mathbb{R})$ function.
+
+The following functions
+- ~doubleDot: Rˆ2x2*Vh -> Vh~
+- ~doubleDot: Vh*Rˆ2x2 -> Vh~
+
+****** ~R^3x3*Vh -> Vh~ and ~Vh*R^3x3 -> Vh~
+
+These functions are defined for $\mathbb{P}_0(\mathbb{R}^3x3)$ data and the
+return value is a $\mathbb{P}_0(\mathbb{R})$ function.
+
+The following functions
+- ~doubleDot: Rˆ3x3*Vh -> Vh~
+- ~doubleDot: Vh*Rˆ3x3 -> Vh~
+
 ******  ~Vh -> R~
 
 These functions are defined for $\mathbb{P}_0(\mathbb{R})$ data and the
diff --git a/src/algebra/TinyMatrix.hpp b/src/algebra/TinyMatrix.hpp
index 94590ff656784d018e9ca57991674da773d64568..212e5d948315fa07f4c056cbb863d1b575699ac0 100644
--- a/src/algebra/TinyMatrix.hpp
+++ b/src/algebra/TinyMatrix.hpp
@@ -56,15 +56,13 @@ class [[nodiscard]] TinyMatrix
   }
 
  public:
-  PUGS_INLINE
-  constexpr bool
+  [[nodiscard]] PUGS_INLINE constexpr bool
   isSquare() const noexcept
   {
     return M == N;
   }
 
-  PUGS_INLINE
-  constexpr friend TinyMatrix<N, M, T>
+  [[nodiscard]] PUGS_INLINE constexpr friend TinyMatrix<N, M, T>
   transpose(const TinyMatrix& A)
   {
     TinyMatrix<N, M, T> tA;
@@ -76,36 +74,31 @@ class [[nodiscard]] TinyMatrix
     return tA;
   }
 
-  PUGS_INLINE
-  constexpr size_t
+  [[nodiscard]] PUGS_INLINE constexpr size_t
   dimension() const
   {
     return M * N;
   }
 
-  PUGS_INLINE
-  constexpr size_t
+  [[nodiscard]] PUGS_INLINE constexpr size_t
   numberOfValues() const
   {
     return this->dimension();
   }
 
-  PUGS_INLINE
-  constexpr size_t
+  [[nodiscard]] PUGS_INLINE constexpr size_t
   numberOfRows() const
   {
     return M;
   }
 
-  PUGS_INLINE
-  constexpr size_t
+  [[nodiscard]] PUGS_INLINE constexpr size_t
   numberOfColumns() const
   {
     return N;
   }
 
-  PUGS_INLINE
-  constexpr TinyMatrix
+  PUGS_INLINE constexpr TinyMatrix
   operator-() const
   {
     TinyMatrix opposite;
@@ -140,6 +133,16 @@ class [[nodiscard]] TinyMatrix
     return *this;
   }
 
+  [[nodiscard]] PUGS_INLINE constexpr friend T
+  doubleDot(const TinyMatrix& A, const TinyMatrix& B)
+  {
+    T t = A.m_values[0] * B.m_values[0];
+    for (size_t i = 1; i < M * N; ++i) {
+      t += A.m_values[i] * B.m_values[i];
+    }
+    return t;
+  }
+
   template <size_t P>
   PUGS_INLINE constexpr TinyMatrix<M, P, T>
   operator*(const TinyMatrix<N, P, T>& B) const
@@ -194,8 +197,7 @@ class [[nodiscard]] TinyMatrix
     return os;
   }
 
-  PUGS_INLINE
-  constexpr bool
+  [[nodiscard]] PUGS_INLINE constexpr bool
   operator==(const TinyMatrix& A) const
   {
     for (size_t i = 0; i < M * N; ++i) {
@@ -205,8 +207,7 @@ class [[nodiscard]] TinyMatrix
     return true;
   }
 
-  PUGS_INLINE
-  constexpr bool
+  [[nodiscard]] PUGS_INLINE constexpr bool
   operator!=(const TinyMatrix& A) const
   {
     return not this->operator==(A);
@@ -272,24 +273,21 @@ class [[nodiscard]] TinyMatrix
     return *this;
   }
 
-  PUGS_INLINE
-  constexpr T&
+  [[nodiscard]] PUGS_INLINE constexpr T&
   operator()(size_t i, size_t j) noexcept(NO_ASSERT)
   {
     Assert((i < M) and (j < N));
     return m_values[_index(i, j)];
   }
 
-  PUGS_INLINE
-  constexpr const T&
+  [[nodiscard]] PUGS_INLINE constexpr const T&
   operator()(size_t i, size_t j) const noexcept(NO_ASSERT)
   {
     Assert((i < M) and (j < N));
     return m_values[_index(i, j)];
   }
 
-  PUGS_INLINE
-  constexpr TinyMatrix&
+  PUGS_INLINE constexpr TinyMatrix&
   operator=(ZeroType) noexcept
   {
     static_assert(std::is_arithmetic<T>(), "Cannot assign 'zero' value for non-arithmetic types");
@@ -377,7 +375,7 @@ class [[nodiscard]] TinyMatrix
 };
 
 template <size_t M, size_t N, typename T>
-PUGS_INLINE constexpr TinyMatrix<M, N, T>
+[[nodiscard]] PUGS_INLINE constexpr TinyMatrix<M, N, T>
 tensorProduct(const TinyVector<M, T>& x, const TinyVector<N, T>& y)
 {
   TinyMatrix<M, N, T> A;
@@ -390,7 +388,7 @@ tensorProduct(const TinyVector<M, T>& x, const TinyVector<N, T>& y)
 }
 
 template <size_t N, typename T>
-PUGS_INLINE constexpr T
+[[nodiscard]] PUGS_INLINE constexpr T
 det(const TinyMatrix<N, N, T>& A)
 {
   static_assert(std::is_arithmetic<T>::value, "determinant is not defined for non-arithmetic types");
@@ -441,7 +439,7 @@ det(const TinyMatrix<N, N, T>& A)
 }
 
 template <typename T>
-PUGS_INLINE constexpr T
+[[nodiscard]] PUGS_INLINE constexpr T
 det(const TinyMatrix<1, 1, T>& A)
 {
   static_assert(std::is_arithmetic<T>::value, "determinent is not defined for non-arithmetic types");
@@ -449,7 +447,7 @@ det(const TinyMatrix<1, 1, T>& A)
 }
 
 template <typename T>
-PUGS_INLINE constexpr T
+[[nodiscard]] PUGS_INLINE constexpr T
 det(const TinyMatrix<2, 2, T>& A)
 {
   static_assert(std::is_arithmetic<T>::value, "determinent is not defined for non-arithmetic types");
@@ -457,7 +455,7 @@ det(const TinyMatrix<2, 2, T>& A)
 }
 
 template <typename T>
-PUGS_INLINE constexpr T
+[[nodiscard]] PUGS_INLINE constexpr T
 det(const TinyMatrix<3, 3, T>& A)
 {
   static_assert(std::is_arithmetic<T>::value, "determinent is not defined for non-arithmetic types");
@@ -466,7 +464,7 @@ det(const TinyMatrix<3, 3, T>& A)
 }
 
 template <size_t M, size_t N, typename T>
-PUGS_INLINE constexpr TinyMatrix<M - 1, N - 1, T>
+[[nodiscard]] PUGS_INLINE constexpr TinyMatrix<M - 1, N - 1, T>
 getMinor(const TinyMatrix<M, N, T>& A, size_t I, size_t J)
 {
   static_assert(M >= 2 and N >= 2, "minor calculation requires at least 2x2 matrices");
@@ -492,7 +490,7 @@ getMinor(const TinyMatrix<M, N, T>& A, size_t I, size_t J)
 }
 
 template <size_t N, typename T>
-PUGS_INLINE T
+[[nodiscard]] PUGS_INLINE T
 trace(const TinyMatrix<N, N, T>& A)
 {
   static_assert(std::is_arithmetic<T>::value, "trace is not defined for non-arithmetic types");
@@ -504,11 +502,21 @@ trace(const TinyMatrix<N, N, T>& A)
   return t;
 }
 
+template <size_t M, size_t N, typename T>
+[[nodiscard]] PUGS_INLINE constexpr decltype(std::sqrt(std::declval<T>()))
+frobeniusNorm(const TinyMatrix<M, N, T>& A)
+{
+  static_assert(std::is_arithmetic<T>::value, "norm is not defined for non-arithmetic types");
+  static_assert(std::is_floating_point<T>::value, "Frobenius norm is defined for floating point types only");
+
+  return std::sqrt(doubleDot(A, A));
+}
+
 template <size_t N, typename T>
-PUGS_INLINE constexpr TinyMatrix<N, N, T> inverse(const TinyMatrix<N, N, T>& A);
+[[nodiscard]] PUGS_INLINE constexpr TinyMatrix<N, N, T> inverse(const TinyMatrix<N, N, T>& A);
 
 template <typename T>
-PUGS_INLINE constexpr TinyMatrix<1, 1, T>
+[[nodiscard]] PUGS_INLINE constexpr TinyMatrix<1, 1, T>
 inverse(const TinyMatrix<1, 1, T>& A)
 {
   static_assert(std::is_arithmetic<T>::value, "inverse is not defined for non-arithmetic types");
@@ -519,7 +527,7 @@ inverse(const TinyMatrix<1, 1, T>& A)
 }
 
 template <size_t N, typename T>
-PUGS_INLINE constexpr T
+[[nodiscard]] PUGS_INLINE constexpr T
 cofactor(const TinyMatrix<N, N, T>& A, size_t i, size_t j)
 {
   static_assert(std::is_arithmetic<T>::value, "cofactor is not defined for non-arithmetic types");
@@ -529,7 +537,7 @@ cofactor(const TinyMatrix<N, N, T>& A, size_t i, size_t j)
 }
 
 template <typename T>
-PUGS_INLINE constexpr TinyMatrix<2, 2, T>
+[[nodiscard]] PUGS_INLINE constexpr TinyMatrix<2, 2, T>
 inverse(const TinyMatrix<2, 2, T>& A)
 {
   static_assert(std::is_arithmetic<T>::value, "inverse is not defined for non-arithmetic types");
@@ -543,7 +551,7 @@ inverse(const TinyMatrix<2, 2, T>& A)
 }
 
 template <typename T>
-PUGS_INLINE constexpr TinyMatrix<3, 3, T>
+[[nodiscard]] PUGS_INLINE constexpr TinyMatrix<3, 3, T>
 inverse(const TinyMatrix<3, 3, T>& A)
 {
   static_assert(std::is_arithmetic<T>::value, "inverse is not defined for non-arithmetic types");
diff --git a/src/algebra/TinyVector.hpp b/src/algebra/TinyVector.hpp
index 2bc9c79c36ebe2caef6cf63ac6dc653d3a8b0bcb..be0efac46ede1211f1519d0e5b9760bcfa20b72d 100644
--- a/src/algebra/TinyVector.hpp
+++ b/src/algebra/TinyVector.hpp
@@ -33,8 +33,7 @@ class [[nodiscard]] TinyVector
   }
 
  public:
-  PUGS_INLINE
-  constexpr TinyVector
+  [[nodiscard]] PUGS_INLINE constexpr TinyVector
   operator-() const
   {
     TinyVector opposite;
@@ -249,7 +248,7 @@ class [[nodiscard]] TinyVector
 };
 
 template <size_t N, typename T>
-[[nodiscard]] PUGS_INLINE constexpr T
+[[nodiscard]] PUGS_INLINE constexpr decltype(std::sqrt(std::declval<T>()))
 l2Norm(const TinyVector<N, T>& x)
 {
   static_assert(std::is_arithmetic<T>(), "Cannot compute L2 norm for non-arithmetic types");
diff --git a/src/language/modules/MathModule.cpp b/src/language/modules/MathModule.cpp
index 5f5199af2d850edc93dc961e00dbe4a4ead58116..19a35a1529b1b65310c3c51d69fe4c5d02dc4d8a 100644
--- a/src/language/modules/MathModule.cpp
+++ b/src/language/modules/MathModule.cpp
@@ -71,6 +71,18 @@ MathModule::MathModule()
                               return dot(x, y);
                             }));
 
+  this->_addBuiltinFunction("doubleDot", std::function([](const TinyMatrix<1> A, const TinyMatrix<1> B) -> double {
+                              return doubleDot(A, B);
+                            }));
+
+  this->_addBuiltinFunction("doubleDot", std::function([](const TinyMatrix<2> A, const TinyMatrix<2> B) -> double {
+                              return doubleDot(A, B);
+                            }));
+
+  this->_addBuiltinFunction("doubleDot", std::function([](const TinyMatrix<3>& A, const TinyMatrix<3>& B) -> double {
+                              return doubleDot(A, B);
+                            }));
+
   this->_addBuiltinFunction("tensorProduct",
                             std::function([](const TinyVector<1> x, const TinyVector<1> y) -> TinyMatrix<1> {
                               return tensorProduct(x, y);
diff --git a/src/language/utils/EmbeddedDiscreteFunctionMathFunctions.cpp b/src/language/utils/EmbeddedDiscreteFunctionMathFunctions.cpp
index 961cc0e4092ba57c001866a52d7aed113e6b0f5d..5d000813aeff1e670a2f547a69827a679cbfa699 100644
--- a/src/language/utils/EmbeddedDiscreteFunctionMathFunctions.cpp
+++ b/src/language/utils/EmbeddedDiscreteFunctionMathFunctions.cpp
@@ -315,6 +315,107 @@ template std::shared_ptr<const DiscreteFunctionVariant> dot(const TinyVector<2>&
 template std::shared_ptr<const DiscreteFunctionVariant> dot(const TinyVector<3>&,
                                                             const std::shared_ptr<const DiscreteFunctionVariant>&);
 
+std::shared_ptr<const DiscreteFunctionVariant>
+doubleDot(const std::shared_ptr<const DiscreteFunctionVariant>& f_v,
+          const std::shared_ptr<const DiscreteFunctionVariant>& g_v)
+{
+  if (not hasSameMesh({f_v, g_v})) {
+    throw NormalError("operands are defined on different meshes");
+  }
+
+  return std::visit(
+    [&](auto&& f, auto&& g) -> std::shared_ptr<DiscreteFunctionVariant> {
+      using TypeOfF = std::decay_t<decltype(f)>;
+      using TypeOfG = std::decay_t<decltype(g)>;
+      if constexpr (not std::is_same_v<TypeOfF, TypeOfG>) {
+        throw NormalError(EmbeddedDiscreteFunctionUtils::incompatibleOperandTypes(f, g));
+      } else {
+        using DataType = std::decay_t<typename TypeOfF::data_type>;
+        if constexpr (is_discrete_function_P0_v<TypeOfF>) {
+          if constexpr (is_tiny_matrix_v<DataType>) {
+            return std::make_shared<DiscreteFunctionVariant>(doubleDot(f, g));
+          } else {
+            throw NormalError(EmbeddedDiscreteFunctionUtils::invalidOperandType(f));
+          }
+        } else {
+          throw NormalError(EmbeddedDiscreteFunctionUtils::invalidOperandType(f));
+        }
+      }
+    },
+    f_v->discreteFunction(), g_v->discreteFunction());
+}
+
+template <size_t MatrixDimension>
+std::shared_ptr<const DiscreteFunctionVariant>
+doubleDot(const std::shared_ptr<const DiscreteFunctionVariant>& f, const TinyMatrix<MatrixDimension>& a)
+{
+  return std::visit(
+    [&](auto&& discrete_function0) -> std::shared_ptr<DiscreteFunctionVariant> {
+      using DiscreteFunction0Type = std::decay_t<decltype(discrete_function0)>;
+      if constexpr (is_discrete_function_P0_v<DiscreteFunction0Type>) {
+        using DataType = std::decay_t<typename DiscreteFunction0Type::data_type>;
+        if constexpr (is_tiny_matrix_v<DataType>) {
+          if constexpr (std::is_same_v<DataType, TinyMatrix<MatrixDimension>>) {
+            return std::make_shared<DiscreteFunctionVariant>(doubleDot(discrete_function0, a));
+          } else {
+            throw NormalError(EmbeddedDiscreteFunctionUtils::incompatibleOperandTypes(f, a));
+          }
+        } else {
+          throw NormalError(EmbeddedDiscreteFunctionUtils::invalidOperandType(f));
+        }
+      } else {
+        throw NormalError(EmbeddedDiscreteFunctionUtils::incompatibleOperandTypes(f, a));
+      }
+    },
+    f->discreteFunction());
+}
+
+template <size_t MatrixDimension>
+std::shared_ptr<const DiscreteFunctionVariant>
+doubleDot(const TinyMatrix<MatrixDimension>& a, const std::shared_ptr<const DiscreteFunctionVariant>& f)
+{
+  return std::visit(
+    [&](auto&& discrete_function0) -> std::shared_ptr<DiscreteFunctionVariant> {
+      using DiscreteFunction0Type = std::decay_t<decltype(discrete_function0)>;
+      if constexpr (is_discrete_function_P0_v<DiscreteFunction0Type>) {
+        using DataType = std::decay_t<typename DiscreteFunction0Type::data_type>;
+        if constexpr (is_tiny_matrix_v<DataType>) {
+          if constexpr (std::is_same_v<DataType, TinyMatrix<MatrixDimension>>) {
+            return std::make_shared<DiscreteFunctionVariant>(doubleDot(a, discrete_function0));
+          } else {
+            throw NormalError(EmbeddedDiscreteFunctionUtils::incompatibleOperandTypes(a, f));
+          }
+        } else {
+          throw NormalError(EmbeddedDiscreteFunctionUtils::invalidOperandType(f));
+        }
+      } else {
+        throw NormalError(EmbeddedDiscreteFunctionUtils::incompatibleOperandTypes(a, f));
+      }
+    },
+    f->discreteFunction());
+}
+
+template std::shared_ptr<const DiscreteFunctionVariant> doubleDot(const std::shared_ptr<const DiscreteFunctionVariant>&,
+                                                                  const TinyMatrix<1>&);
+
+template std::shared_ptr<const DiscreteFunctionVariant> doubleDot(const std::shared_ptr<const DiscreteFunctionVariant>&,
+                                                                  const TinyMatrix<2>&);
+
+template std::shared_ptr<const DiscreteFunctionVariant> doubleDot(const std::shared_ptr<const DiscreteFunctionVariant>&,
+                                                                  const TinyMatrix<3>&);
+
+template std::shared_ptr<const DiscreteFunctionVariant> doubleDot(
+  const TinyMatrix<1>&,
+  const std::shared_ptr<const DiscreteFunctionVariant>&);
+
+template std::shared_ptr<const DiscreteFunctionVariant> doubleDot(
+  const TinyMatrix<2>&,
+  const std::shared_ptr<const DiscreteFunctionVariant>&);
+
+template std::shared_ptr<const DiscreteFunctionVariant> doubleDot(
+  const TinyMatrix<3>&,
+  const std::shared_ptr<const DiscreteFunctionVariant>&);
+
 std::shared_ptr<const DiscreteFunctionVariant>
 tensorProduct(const std::shared_ptr<const DiscreteFunctionVariant>& f_v,
               const std::shared_ptr<const DiscreteFunctionVariant>& g_v)
diff --git a/src/language/utils/EmbeddedDiscreteFunctionMathFunctions.hpp b/src/language/utils/EmbeddedDiscreteFunctionMathFunctions.hpp
index bcda26113a4fac6d72e7b54edfe0f0e8d8d74b2d..35ad4f38a03b1f0dfc9962877fe169c8c3265f71 100644
--- a/src/language/utils/EmbeddedDiscreteFunctionMathFunctions.hpp
+++ b/src/language/utils/EmbeddedDiscreteFunctionMathFunctions.hpp
@@ -69,6 +69,17 @@ template <size_t VectorDimension>
 std::shared_ptr<const DiscreteFunctionVariant> dot(const TinyVector<VectorDimension>&,
                                                    const std::shared_ptr<const DiscreteFunctionVariant>&);
 
+std::shared_ptr<const DiscreteFunctionVariant> doubleDot(const std::shared_ptr<const DiscreteFunctionVariant>&,
+                                                         const std::shared_ptr<const DiscreteFunctionVariant>&);
+
+template <size_t SquareMatrixNbRow>
+std::shared_ptr<const DiscreteFunctionVariant> doubleDot(const std::shared_ptr<const DiscreteFunctionVariant>&,
+                                                         const TinyMatrix<SquareMatrixNbRow>&);
+
+template <size_t SquareMatrixNbRow>
+std::shared_ptr<const DiscreteFunctionVariant> doubleDot(const TinyMatrix<SquareMatrixNbRow>&,
+                                                         const std::shared_ptr<const DiscreteFunctionVariant>&);
+
 std::shared_ptr<const DiscreteFunctionVariant> tensorProduct(const std::shared_ptr<const DiscreteFunctionVariant>&,
                                                              const std::shared_ptr<const DiscreteFunctionVariant>&);
 
diff --git a/src/scheme/DiscreteFunctionP0.hpp b/src/scheme/DiscreteFunctionP0.hpp
index ea0ec56aeeebb28e4af381ae352a20c9abd43bd4..ea53974e85cc65ecad0a99cb1f12333b83bf8518 100644
--- a/src/scheme/DiscreteFunctionP0.hpp
+++ b/src/scheme/DiscreteFunctionP0.hpp
@@ -606,6 +606,44 @@ class DiscreteFunctionP0
     return result;
   }
 
+  PUGS_INLINE friend DiscreteFunctionP0<double>
+  doubleDot(const DiscreteFunctionP0& f, const DiscreteFunctionP0& g)
+  {
+    static_assert(is_tiny_matrix_v<std::decay_t<DataType>>);
+    Assert(f.m_cell_values.isBuilt() and g.m_cell_values.isBuilt());
+    Assert(f.m_mesh_v->id() == g.m_mesh_v->id());
+    DiscreteFunctionP0<double> result{f.m_mesh_v};
+    parallel_for(
+      f.m_mesh_v->numberOfCells(),
+      PUGS_LAMBDA(CellId cell_id) { result[cell_id] = doubleDot(f[cell_id], g[cell_id]); });
+
+    return result;
+  }
+
+  PUGS_INLINE friend DiscreteFunctionP0<double>
+  doubleDot(const DiscreteFunctionP0& f, const DataType& a)
+  {
+    static_assert(is_tiny_matrix_v<std::decay_t<DataType>>);
+    Assert(f.m_cell_values.isBuilt());
+    DiscreteFunctionP0<double> result{f.m_mesh_v};
+    parallel_for(
+      f.m_mesh_v->numberOfCells(), PUGS_LAMBDA(CellId cell_id) { result[cell_id] = doubleDot(f[cell_id], a); });
+
+    return result;
+  }
+
+  PUGS_INLINE friend DiscreteFunctionP0<double>
+  doubleDot(const DataType& a, const DiscreteFunctionP0& f)
+  {
+    static_assert(is_tiny_matrix_v<std::decay_t<DataType>>);
+    Assert(f.m_cell_values.isBuilt());
+    DiscreteFunctionP0<double> result{f.m_mesh_v};
+    parallel_for(
+      f.m_mesh_v->numberOfCells(), PUGS_LAMBDA(CellId cell_id) { result[cell_id] = doubleDot(a, f[cell_id]); });
+
+    return result;
+  }
+
   template <typename LHSDataType>
   PUGS_INLINE friend DiscreteFunctionP0<decltype(tensorProduct(LHSDataType{}, DataType{}))>
   tensorProduct(const LHSDataType& u, const DiscreteFunctionP0& v)
diff --git a/tests/test_BuiltinFunctionProcessor.cpp b/tests/test_BuiltinFunctionProcessor.cpp
index 3983080979ec88397ec8e81786a1a9376e1b9e74..ca5a79339a99ac55a9d0bbd36340d570eab3b7e5 100644
--- a/tests/test_BuiltinFunctionProcessor.cpp
+++ b/tests/test_BuiltinFunctionProcessor.cpp
@@ -365,7 +365,43 @@ let s:R, s = dot(x,y);
       CHECK_BUILTIN_FUNCTION_EVALUATION_RESULT(data, "s", dot(TinyVector<3>{-2, 3, 4}, TinyVector<3>{4, 3, 5}));
     }
 
-    {   // dot
+    {   // double-dot
+      tested_function_set.insert("doubleDot:R^1x1*R^1x1");
+      std::string_view data = R"(
+import math;
+let A1:R^1x1, A1 = [[-2]];
+let A2:R^1x1, A2 = [[4]];
+let s:R, s = doubleDot(A1,A2);
+)";
+      CHECK_BUILTIN_FUNCTION_EVALUATION_RESULT(data, "s", double{-2 * 4});
+    }
+
+    {   // double-dot
+      tested_function_set.insert("doubleDot:R^2x2*R^2x2");
+      std::string_view data = R"(
+import math;
+let A1:R^2x2, A1 = [[-2, 3],[5,-2]];
+let A2:R^2x2, A2 = [[4, 3],[7,3]];
+let s:R, s = doubleDot(A1,A2);
+)";
+      CHECK_BUILTIN_FUNCTION_EVALUATION_RESULT(data, "s",
+                                               doubleDot(TinyMatrix<2>{-2, 3, 5, -2}, TinyMatrix<2>{4, 3, 7, 3}));
+    }
+
+    {   // double-dot
+      tested_function_set.insert("doubleDot:R^3x3*R^3x3");
+      std::string_view data = R"(
+import math;
+let A1:R^3x3, A1 = [[-2, 3, 4],[1,2,3],[6,3,2]];
+let A2:R^3x3, A2 = [[4, 3, 5],[2, 3, 1],[2, 6, 1]];
+let s:R, s = doubleDot(A1,A2);
+)";
+      CHECK_BUILTIN_FUNCTION_EVALUATION_RESULT(data, "s",
+                                               doubleDot(TinyMatrix<3>{-2, 3, 4, 1, 2, 3, 6, 3, 2},
+                                                         TinyMatrix<3>{4, 3, 5, 2, 3, 1, 2, 6, 1}));
+    }
+
+    {   // tensor product
       tested_function_set.insert("tensorProduct:R^1*R^1");
       std::string_view data = R"(
 import math;
@@ -376,7 +412,7 @@ let s:R^1x1, s = tensorProduct(x,y);
       CHECK_BUILTIN_FUNCTION_EVALUATION_RESULT(data, "s", TinyMatrix<1>{-2 * 4});
     }
 
-    {   // dot
+    {   // tensor product
       tested_function_set.insert("tensorProduct:R^2*R^2");
       std::string_view data = R"(
 import math;
@@ -387,7 +423,7 @@ let s:R^2x2, s = tensorProduct(x,y);
       CHECK_BUILTIN_FUNCTION_EVALUATION_RESULT(data, "s", tensorProduct(TinyVector<2>{-2, 3}, TinyVector<2>{4, 3}));
     }
 
-    {   // dot
+    {   // tensor product
       tested_function_set.insert("tensorProduct:R^3*R^3");
       std::string_view data = R"(
 import math;
diff --git a/tests/test_DiscreteFunctionP0.cpp b/tests/test_DiscreteFunctionP0.cpp
index 14c96b0db712daa220951f407e66eb1f774f126e..7eb264566b916944c1800ca27b2e35a88d656561 100644
--- a/tests/test_DiscreteFunctionP0.cpp
+++ b/tests/test_DiscreteFunctionP0.cpp
@@ -2729,6 +2729,53 @@ TEST_CASE("DiscreteFunctionP0", "[scheme]")
             CHECK_STD_BINARY_MATH_FUNCTION_WITH_LHS_VALUE(u, vh, dot);
           }
 
+          SECTION("doubleDot(Ah,Bh)")
+          {
+            DiscreteFunctionP0<TinyMatrix<2>> Ah{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Ah[cell_id]    = TinyMatrix<2>{x + 1, 2 * x - 3, 3 - x, 3 * x - 2};
+              });
+
+            DiscreteFunctionP0<TinyMatrix<2>> Bh{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Bh[cell_id]    = TinyMatrix<2>{2.3 * x, 1 - x, 2 * x, 1.5 * x};
+              });
+
+            CHECK_STD_BINARY_MATH_FUNCTION(Ah, Bh, doubleDot);
+          }
+
+          SECTION("doubleDot(Ah,B)")
+          {
+            DiscreteFunctionP0<TinyMatrix<2>> Ah{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Ah[cell_id]    = TinyMatrix<2>{x + 1, 2 * x - 3, 3 - x, 3 * x - 2};
+              });
+
+            const TinyMatrix<2> B{1, 2, 3, 4};
+
+            CHECK_STD_BINARY_MATH_FUNCTION_WITH_RHS_VALUE(Ah, B, doubleDot);
+          }
+
+          SECTION("doubleDot(A,Bh)")
+          {
+            const TinyMatrix<2> A{3, -2, 4, 5};
+
+            DiscreteFunctionP0<TinyMatrix<2>> Bh{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Bh[cell_id]    = TinyMatrix<2>{2.3 * x, 1 - x, 2 * x, -x};
+              });
+
+            CHECK_STD_BINARY_MATH_FUNCTION_WITH_LHS_VALUE(A, Bh, doubleDot);
+          }
+
           SECTION("tensorProduct(uh,hv)")
           {
             DiscreteFunctionP0<TinyVector<2>> uh{mesh};
@@ -3118,6 +3165,53 @@ TEST_CASE("DiscreteFunctionP0", "[scheme]")
             CHECK_STD_BINARY_MATH_FUNCTION_WITH_LHS_VALUE(u, vh, dot);
           }
 
+          SECTION("doubleDot(Ah,Bh)")
+          {
+            DiscreteFunctionP0<TinyMatrix<2>> Ah{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Ah[cell_id]    = TinyMatrix<2>{x + 1, 2 * x - 3, 3 - x, 3 * x - 2};
+              });
+
+            DiscreteFunctionP0<TinyMatrix<2>> Bh{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Bh[cell_id]    = TinyMatrix<2>{2.3 * x, 1 - x, 2 * x, 1.5 * x};
+              });
+
+            CHECK_STD_BINARY_MATH_FUNCTION(Ah, Bh, doubleDot);
+          }
+
+          SECTION("doubleDot(Ah,B)")
+          {
+            DiscreteFunctionP0<TinyMatrix<2>> Ah{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Ah[cell_id]    = TinyMatrix<2>{x + 1, 2 * x - 3, 3 - x, 3 * x - 2};
+              });
+
+            const TinyMatrix<2> B{1, 2, 3, 4};
+
+            CHECK_STD_BINARY_MATH_FUNCTION_WITH_RHS_VALUE(Ah, B, doubleDot);
+          }
+
+          SECTION("doubleDot(A,Bh)")
+          {
+            const TinyMatrix<2> A{3, -2, 4, 5};
+
+            DiscreteFunctionP0<TinyMatrix<2>> Bh{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Bh[cell_id]    = TinyMatrix<2>{2.3 * x, 1 - x, 2 * x, -x};
+              });
+
+            CHECK_STD_BINARY_MATH_FUNCTION_WITH_LHS_VALUE(A, Bh, doubleDot);
+          }
+
           SECTION("scalar sum")
           {
             const CellValue<const double> cell_value = positive_function.cellValues();
@@ -3460,6 +3554,53 @@ TEST_CASE("DiscreteFunctionP0", "[scheme]")
             CHECK_STD_BINARY_MATH_FUNCTION_WITH_LHS_VALUE(u, vh, dot);
           }
 
+          SECTION("doubleDot(Ah,Bh)")
+          {
+            DiscreteFunctionP0<TinyMatrix<2>> Ah{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Ah[cell_id]    = TinyMatrix<2>{x + 1, 2 * x - 3, 3 - x, 3 * x - 2};
+              });
+
+            DiscreteFunctionP0<TinyMatrix<2>> Bh{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Bh[cell_id]    = TinyMatrix<2>{2.3 * x, 1 - x, 2 * x, 1.5 * x};
+              });
+
+            CHECK_STD_BINARY_MATH_FUNCTION(Ah, Bh, doubleDot);
+          }
+
+          SECTION("doubleDot(Ah,B)")
+          {
+            DiscreteFunctionP0<TinyMatrix<2>> Ah{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Ah[cell_id]    = TinyMatrix<2>{x + 1, 2 * x - 3, 3 - x, 3 * x - 2};
+              });
+
+            const TinyMatrix<2> B{1, 2, 3, 4};
+
+            CHECK_STD_BINARY_MATH_FUNCTION_WITH_RHS_VALUE(Ah, B, doubleDot);
+          }
+
+          SECTION("doubleDot(A,Bh)")
+          {
+            const TinyMatrix<2> A{3, -2, 4, 5};
+
+            DiscreteFunctionP0<TinyMatrix<2>> Bh{mesh};
+            parallel_for(
+              mesh->numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+                const double x = xj[cell_id][0];
+                Bh[cell_id]    = TinyMatrix<2>{2.3 * x, 1 - x, 2 * x, -x};
+              });
+
+            CHECK_STD_BINARY_MATH_FUNCTION_WITH_LHS_VALUE(A, Bh, doubleDot);
+          }
+
           SECTION("det(Ah)")
           {
             DiscreteFunctionP0<TinyMatrix<2>> Ah{mesh};
diff --git a/tests/test_EmbeddedDiscreteFunctionMathFunctions1D.cpp b/tests/test_EmbeddedDiscreteFunctionMathFunctions1D.cpp
index 41f3c0d2233b1884051f0eafd5df637ce37ed7e0..8b0b3fbb7b2475ba885434331607977badc2ce1c 100644
--- a/tests/test_EmbeddedDiscreteFunctionMathFunctions1D.cpp
+++ b/tests/test_EmbeddedDiscreteFunctionMathFunctions1D.cpp
@@ -171,6 +171,18 @@ TEST_CASE("EmbeddedDiscreteFunctionVariantMathFunctions1D", "[scheme]")
         return std::make_shared<const DiscreteFunctionVariant>(DiscreteFunctionR1x1(mesh, uj));
       }();
 
+      std::shared_ptr p_other_mesh_R1x1_u = std::make_shared<const DiscreteFunctionVariant>(
+        DiscreteFunctionR1x1(other_mesh, p_R1x1_u->get<DiscreteFunctionR1x1>().cellValues()));
+
+      std::shared_ptr p_R1x1_v = [=] {
+        CellValue<TinyMatrix<1>> vj{mesh->connectivity()};
+        parallel_for(
+          vj.numberOfItems(),
+          PUGS_LAMBDA(const CellId cell_id) { vj[cell_id] = TinyMatrix<1>{3 * xj[cell_id][0] - 2}; });
+
+        return std::make_shared<const DiscreteFunctionVariant>(DiscreteFunctionR1x1(mesh, vj));
+      }();
+
       std::shared_ptr p_R2x2_u = [=] {
         CellValue<TinyMatrix<2>> uj{mesh->connectivity()};
         parallel_for(
@@ -184,6 +196,22 @@ TEST_CASE("EmbeddedDiscreteFunctionVariantMathFunctions1D", "[scheme]")
         return std::make_shared<const DiscreteFunctionVariant>(DiscreteFunctionR2x2(mesh, uj));
       }();
 
+      std::shared_ptr p_other_mesh_R2x2_u = std::make_shared<const DiscreteFunctionVariant>(
+        DiscreteFunctionR2x2(other_mesh, p_R2x2_u->get<DiscreteFunctionR2x2>().cellValues()));
+
+      std::shared_ptr p_R2x2_v = [=] {
+        CellValue<TinyMatrix<2>> vj{mesh->connectivity()};
+        parallel_for(
+          vj.numberOfItems(), PUGS_LAMBDA(const CellId cell_id) {
+            const TinyVector<2> x = to_2d(xj[cell_id]);
+
+            vj[cell_id] = TinyMatrix<2>{3 * x[0] + 2, 2 - x[0],   //
+                                        5 * x[1], x[1]};
+          });
+
+        return std::make_shared<const DiscreteFunctionVariant>(DiscreteFunctionR2x2(mesh, vj));
+      }();
+
       std::shared_ptr p_R3x3_u = [=] {
         CellValue<TinyMatrix<3>> uj{mesh->connectivity()};
         parallel_for(
@@ -198,6 +226,25 @@ TEST_CASE("EmbeddedDiscreteFunctionVariantMathFunctions1D", "[scheme]")
         return std::make_shared<const DiscreteFunctionVariant>(DiscreteFunctionR3x3(mesh, uj));
       }();
 
+      std::shared_ptr p_other_mesh_R3x3_u = std::make_shared<const DiscreteFunctionVariant>(
+        DiscreteFunctionR3x3(other_mesh, p_R3x3_u->get<DiscreteFunctionR3x3>().cellValues()));
+
+      std::shared_ptr p_R3x3_v = [=] {
+        CellValue<TinyMatrix<3>> vj{mesh->connectivity()};
+        parallel_for(
+          vj.numberOfItems(), PUGS_LAMBDA(const CellId cell_id) {
+            const TinyVector<3> x = to_3d(xj[cell_id]);
+
+            vj[cell_id] = TinyMatrix<3>{3 * x[0] + 1, 2 - x[1], 2,
+                                        //
+                                        2 * x[0], x[2], x[1] - x[2],
+                                        //
+                                        2 * x[1] - x[2], x[1] - 2 * x[2], x[0] - x[2]};
+          });
+
+        return std::make_shared<const DiscreteFunctionVariant>(DiscreteFunctionR3x3(mesh, vj));
+      }();
+
       std::shared_ptr p_Vector3_u = [=] {
         CellArray<double> uj_vector{mesh->connectivity(), 3};
         parallel_for(
@@ -502,6 +549,30 @@ TEST_CASE("EmbeddedDiscreteFunctionVariantMathFunctions1D", "[scheme]")
         REQUIRE_THROWS_WITH(dot(p_Vector3_u, p_Vector2_w), "error: operands have different dimension");
       }
 
+      SECTION("doubleDot Vh*Vh -> Vh")
+      {
+        CHECK_EMBEDDED_VH2_TO_VH_FUNCTION_EVALUATION(p_R1x1_u, p_R1x1_v, doubleDot,   //
+                                                     DiscreteFunctionR1x1, DiscreteFunctionR1x1, DiscreteFunctionR);
+        CHECK_EMBEDDED_VH2_TO_VH_FUNCTION_EVALUATION(p_R2x2_u, p_R2x2_v, doubleDot,   //
+                                                     DiscreteFunctionR2x2, DiscreteFunctionR2x2, DiscreteFunctionR);
+        CHECK_EMBEDDED_VH2_TO_VH_FUNCTION_EVALUATION(p_R3x3_u, p_R3x3_v, doubleDot,   //
+                                                     DiscreteFunctionR3x3, DiscreteFunctionR3x3, DiscreteFunctionR);
+
+        REQUIRE_THROWS_WITH(doubleDot(p_R1x1_u, p_other_mesh_R1x1_u),
+                            "error: operands are defined on different meshes");
+        REQUIRE_THROWS_WITH(doubleDot(p_R2x2_u, p_other_mesh_R2x2_u),
+                            "error: operands are defined on different meshes");
+        REQUIRE_THROWS_WITH(doubleDot(p_R3x3_u, p_other_mesh_R3x3_u),
+                            "error: operands are defined on different meshes");
+        REQUIRE_THROWS_WITH(doubleDot(p_R1x1_u, p_R3x3_u),
+                            "error: incompatible operand types Vh(P0:R^1x1) and Vh(P0:R^3x3)");
+        REQUIRE_THROWS_WITH(doubleDot(p_u, p_u), "error: invalid operand type Vh(P0:R)");
+        REQUIRE_THROWS_WITH(doubleDot(p_R1_u, p_R1_u), "error: invalid operand type Vh(P0:R^1)");
+        REQUIRE_THROWS_WITH(doubleDot(p_R2_u, p_R2_u), "error: invalid operand type Vh(P0:R^2)");
+        REQUIRE_THROWS_WITH(doubleDot(p_R3_u, p_R3_u), "error: invalid operand type Vh(P0:R^3)");
+        REQUIRE_THROWS_WITH(doubleDot(p_Vector3_u, p_Vector2_w), "error: invalid operand type Vh(P0Vector:R)");
+      }
+
       SECTION("tensorProduct Vh*Vh -> Vh")
       {
         CHECK_EMBEDDED_VH2_TO_VH_FUNCTION_EVALUATION(p_R1_u, p_R1_v, tensorProduct,   //
diff --git a/tests/test_MathModule.cpp b/tests/test_MathModule.cpp
index 89a480e1ac4993b3cdbbd6702f5e2130a1872ec0..54befcd82fd4804262c27225a1eca28ba9824b37 100644
--- a/tests/test_MathModule.cpp
+++ b/tests/test_MathModule.cpp
@@ -13,7 +13,7 @@ TEST_CASE("MathModule", "[language]")
   MathModule math_module;
   const auto& name_builtin_function = math_module.getNameBuiltinFunctionMap();
 
-  REQUIRE(name_builtin_function.size() == 45);
+  REQUIRE(name_builtin_function.size() == 48);
 
   SECTION("Z -> N")
   {
@@ -458,6 +458,69 @@ TEST_CASE("MathModule", "[language]")
     }
   }
 
+  SECTION("(R^dxd, R^dxd) -> double")
+  {
+    SECTION("doubleDot:R^1x1*R^1x1")
+    {
+      TinyMatrix<1> arg0{3};
+      TinyMatrix<1> arg1{2};
+
+      DataVariant arg0_variant = arg0;
+      DataVariant arg1_variant = arg1;
+
+      auto i_function = name_builtin_function.find("doubleDot:R^1x1*R^1x1");
+      REQUIRE(i_function != name_builtin_function.end());
+
+      IBuiltinFunctionEmbedder& function_embedder = *i_function->second;
+      DataVariant result_variant                  = function_embedder.apply({arg0_variant, arg1_variant});
+
+      const double result = doubleDot(arg0, arg1);
+      REQUIRE(std::get<double>(result_variant) == result);
+    }
+
+    SECTION("doubleDot:R^2x2*R^2x2")
+    {
+      TinyMatrix<2> arg0{+3, +2,   //
+                         -1, +4};
+      TinyMatrix<2> arg1{-2, -5,   //
+                         +7, +1.3};
+
+      DataVariant arg0_variant = arg0;
+      DataVariant arg1_variant = arg1;
+
+      auto i_function = name_builtin_function.find("doubleDot:R^2x2*R^2x2");
+      REQUIRE(i_function != name_builtin_function.end());
+
+      IBuiltinFunctionEmbedder& function_embedder = *i_function->second;
+      DataVariant result_variant                  = function_embedder.apply({arg0_variant, arg1_variant});
+
+      const double result = doubleDot(arg0, arg1);
+      REQUIRE(std::get<double>(result_variant) == result);
+    }
+
+    SECTION("doubleDot:R^3x3*R^3x3")
+    {
+      TinyMatrix<3> arg0{+3, +2, +4,   //
+                         -1, +3, -6,   //
+                         +2, +5, +1};
+      TinyMatrix<3> arg1{-2, +5, +2,   //
+                         +1, +7, -2,   //
+                         +7, -1, +3};
+
+      DataVariant arg0_variant = arg0;
+      DataVariant arg1_variant = arg1;
+
+      auto i_function = name_builtin_function.find("doubleDot:R^3x3*R^3x3");
+      REQUIRE(i_function != name_builtin_function.end());
+
+      IBuiltinFunctionEmbedder& function_embedder = *i_function->second;
+      DataVariant result_variant                  = function_embedder.apply({arg0_variant, arg1_variant});
+
+      const double result = doubleDot(arg0, arg1);
+      REQUIRE(std::get<double>(result_variant) == result);
+    }
+  }
+
   SECTION("(R^d, R^d) -> R^dxd")
   {
     SECTION("tensorProduct:R^1*R^1")
diff --git a/tests/test_TinyMatrix.cpp b/tests/test_TinyMatrix.cpp
index 80159a021b54ebb162d339be2bbd5195aec815a0..58a96f3acdf690653a4450f6e927febaf027a385 100644
--- a/tests/test_TinyMatrix.cpp
+++ b/tests/test_TinyMatrix.cpp
@@ -209,6 +209,28 @@ TEST_CASE("TinyMatrix", "[algebra]")
     REQUIRE(trace(TinyMatrix<4>(1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2, 0, 0, 2, 2)) == 1 + 0 + 1 + 2);
   }
 
+  SECTION("checking for doubleDot calculations")
+  {
+    REQUIRE(doubleDot(TinyMatrix<1, 1, int>(6), TinyMatrix<1, 1, int>(4)) == 24);
+    REQUIRE(doubleDot(TinyMatrix<2, 2>(5, 1, -3, 6), TinyMatrix<2, 2>(-2, 3, -5, 1)) ==
+            Catch::Approx(-10 + 3 + 15 + 6).epsilon(1E-14));
+    REQUIRE(doubleDot(TinyMatrix<4>(1, 2.3, 7, -6.2, 3, 4, 9, 1, 4.1, 5, 2, -3, 2, 27, 3, 17.5),
+                      TinyMatrix<4>(1, 2.3, 7, -6.2, 3, 4, 9, 1, 4.1, 5, 2, -3, 2, 27, 3, 17.5)) ==
+            Catch::Approx(1 + 2.3 * 2.3 + 7 * 7 + 6.2 * 6.2 + 9 + 16 + 81 + 1 + 4.1 * 4.1 + 25 + 4 + 9 + 4 + 27 * 27 +
+                          9 + 17.5 * 17.5)
+              .epsilon(1E-14));
+  }
+
+  SECTION("checking for norm calculations")
+  {
+    REQUIRE(frobeniusNorm(TinyMatrix<1, 1>(6)) == Catch::Approx(6));
+    REQUIRE(frobeniusNorm(TinyMatrix<2, 2>(5, 1, -3, 6)) == Catch::Approx(std::sqrt(25 + 1 + 9 + 36)).epsilon(1E-14));
+    REQUIRE(frobeniusNorm(TinyMatrix<4>(1, 2.3, 7, -6.2, 3, 4, 9, 1, 4.1, 5, 2, -3, 2, 27, 3, 17.5)) ==
+            Catch::Approx(std::sqrt(1 + 2.3 * 2.3 + 7 * 7 + 6.2 * 6.2 + 9 + 16 + 81 + 1 + 4.1 * 4.1 + 25 + 4 + 9 + 4 +
+                                    27 * 27 + 9 + 17.5 * 17.5))
+              .epsilon(1E-14));
+  }
+
   SECTION("checking for inverse calculations")
   {
     {