diff --git a/src/mesh/Synchronizer.hpp b/src/mesh/Synchronizer.hpp
index 63e4363ef7dc1b360ca75626ef01e31dcb0728b7..ed385f838235f2e70465029e23bfba3dda57d277 100644
--- a/src/mesh/Synchronizer.hpp
+++ b/src/mesh/Synchronizer.hpp
@@ -11,9 +11,8 @@
 
 #include <utils/pugs_config.hpp>
 
-#include <iostream>
-#include <map>
 #include <memory>
+#include <unordered_map>
 
 #ifdef PUGS_HAS_MPI
 
@@ -23,182 +22,364 @@ class Synchronizer
   template <ItemType item_type>
   using ExchangeItemTypeInfo = std::vector<Array<const ItemIdT<item_type>>>;
 
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::cell>> m_requested_cell_info;
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::cell>> m_provided_cell_info;
+  using ExchangeItemInfoRepository = std::tuple<std::unique_ptr<ExchangeItemTypeInfo<ItemType::node>>,
+                                                std::unique_ptr<ExchangeItemTypeInfo<ItemType::edge>>,
+                                                std::unique_ptr<ExchangeItemTypeInfo<ItemType::face>>,
+                                                std::unique_ptr<ExchangeItemTypeInfo<ItemType::cell>>>;
 
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::face>> m_requested_face_info;
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::face>> m_provided_face_info;
+  ExchangeItemInfoRepository m_requested_item_info_list;
+  ExchangeItemInfoRepository m_provided_item_info_list;
 
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::edge>> m_requested_edge_info;
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::edge>> m_provided_edge_info;
+  // here 12 is the maximum number (3d) of sub item of item
+  // configurations (cell->edge, face->node...)
+  using ExchangeSubItemPerItemTotalSizeList = std::array<std::unique_ptr<std::vector<size_t>>, 12>;
+  using SubItemPerItemProvidedList          = std::array<std::unique_ptr<std::vector<Array<const size_t>>>, 12>;
+  using NumberOfSubItemPerItemProvidedList  = std::array<std::unique_ptr<std::vector<Array<const size_t>>>, 12>;
 
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::node>> m_requested_node_info;
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::node>> m_provided_node_info;
+  ExchangeSubItemPerItemTotalSizeList m_sub_item_per_item_requested_total_size_list;
+  ExchangeSubItemPerItemTotalSizeList m_sub_item_per_item_provided_total_size_list;
+  SubItemPerItemProvidedList m_sub_item_per_item_provided_list;
+  NumberOfSubItemPerItemProvidedList m_number_of_sub_item_per_item_provided_list;
 
-  using ExchangeSubItemPerItemSize = std::vector<std::map<std::pair<ItemType, ItemType>, size_t>>;
+  template <typename ConnectivityType, ItemType item_type>
+  void
+  _buildSynchronizeInfoIfNeeded(const ConnectivityType& connectivity)
+  {
+    const auto& item_owner = connectivity.template owner<item_type>();
+    using ItemId           = ItemIdT<item_type>;
 
-  ExchangeSubItemPerItemSize m_sub_item_per_item_requested_size_list;
-  ExchangeSubItemPerItemSize m_sub_item_per_item_provided_size_list;
+    auto& p_requested_item_info = std::get<static_cast<int>(item_type)>(m_requested_item_info_list);
+    auto& p_provided_item_info  = std::get<static_cast<int>(item_type)>(m_provided_item_info_list);
 
-  template <ItemType item_type>
-  PUGS_INLINE constexpr auto&
-  _getRequestedItemInfo()
-  {
-    if constexpr (item_type == ItemType::cell) {
-      return m_requested_cell_info;
-    } else if constexpr (item_type == ItemType::face) {
-      return m_requested_face_info;
-    } else if constexpr (item_type == ItemType::edge) {
-      return m_requested_edge_info;
-    } else if constexpr (item_type == ItemType::node) {
-      return m_requested_node_info;
+    Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
+
+    if (not p_provided_item_info) {
+      p_requested_item_info = [&]() {
+        std::vector<std::vector<ItemId>> requested_item_vector_info(parallel::size());
+        for (ItemId item_id = 0; item_id < item_owner.numberOfItems(); ++item_id) {
+          if (const size_t owner = item_owner[item_id]; owner != parallel::rank()) {
+            requested_item_vector_info[owner].emplace_back(item_id);
+          }
+        }
+        ExchangeItemTypeInfo<item_type> requested_item_info(parallel::size());
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          const auto& requested_item_vector = requested_item_vector_info[i_rank];
+          requested_item_info[i_rank]       = convert_to_array(requested_item_vector);
+        }
+        return std::make_unique<ExchangeItemTypeInfo<item_type>>(std::move(requested_item_info));
+      }();
+
+      auto& requested_item_info = *p_requested_item_info;
+
+      Array<unsigned int> local_number_of_requested_values(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        local_number_of_requested_values[i_rank] = requested_item_info[i_rank].size();
+      }
+
+      Array<unsigned int> local_number_of_values_to_send = parallel::allToAll(local_number_of_requested_values);
+
+      std::vector<Array<const int>> requested_item_number_list_by_proc(parallel::size());
+      const auto& item_number = connectivity.template number<item_type>();
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+        Array<int> item_number_list{requested_item_info_from_rank.size()};
+        parallel_for(
+          requested_item_info_from_rank.size(), PUGS_LAMBDA(size_t i_item) {
+            item_number_list[i_item] = item_number[requested_item_info_from_rank[i_item]];
+          });
+        requested_item_number_list_by_proc[i_rank] = item_number_list;
+      }
+
+      std::vector<Array<int>> provided_item_number_list_by_rank(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        provided_item_number_list_by_rank[i_rank] = Array<int>{local_number_of_values_to_send[i_rank]};
+      }
+
+      parallel::exchange(requested_item_number_list_by_proc, provided_item_number_list_by_rank);
+
+      std::unordered_map<int, ItemId> item_number_to_id_correspondance;
+      for (ItemId item_id = 0; item_id < item_number.numberOfItems(); ++item_id) {
+        item_number_to_id_correspondance[item_number[item_id]] = item_id;
+      }
+
+      p_provided_item_info = [&]() {
+        ExchangeItemTypeInfo<item_type> provided_item_info(parallel::size());
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          Array<ItemId> provided_item_id_to_rank{local_number_of_values_to_send[i_rank]};
+          const Array<int>& provided_item_number_to_rank = provided_item_number_list_by_rank[i_rank];
+          for (size_t i = 0; i < provided_item_number_to_rank.size(); ++i) {
+            provided_item_id_to_rank[i] =
+              item_number_to_id_correspondance.find(provided_item_number_to_rank[i])->second;
+          }
+          provided_item_info[i_rank] = provided_item_id_to_rank;
+        }
+        return std::make_unique<ExchangeItemTypeInfo<item_type>>(provided_item_info);
+      }();
     }
   }
 
-  template <ItemType item_type>
+  template <typename ConnectivityType, ItemType item_type>
   PUGS_INLINE constexpr auto&
-  _getProvidedItemInfo()
+  _getRequestedItemInfo(const ConnectivityType& connectivity)
   {
-    if constexpr (item_type == ItemType::cell) {
-      return m_provided_cell_info;
-    } else if constexpr (item_type == ItemType::face) {
-      return m_provided_face_info;
-    } else if constexpr (item_type == ItemType::edge) {
-      return m_provided_edge_info;
-    } else if constexpr (item_type == ItemType::node) {
-      return m_provided_node_info;
-    }
+    this->_buildSynchronizeInfoIfNeeded<ConnectivityType, item_type>(connectivity);
+    return *std::get<static_cast<int>(item_type)>(m_requested_item_info_list);
   }
 
   template <typename ConnectivityType, ItemType item_type>
-  void
-  _buildSynchronizeInfo(const ConnectivityType& connectivity)
+  PUGS_INLINE constexpr auto&
+  _getProvidedItemInfo(const ConnectivityType& connectivity)
   {
-    const auto& item_owner = connectivity.template owner<item_type>();
-    using ItemId           = ItemIdT<item_type>;
+    this->_buildSynchronizeInfoIfNeeded<ConnectivityType, item_type>(connectivity);
+    return *std::get<static_cast<int>(item_type)>(m_provided_item_info_list);
+  }
 
-    auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
-    p_requested_item_info       = [&]() {
-      std::vector<std::vector<ItemId>> requested_item_vector_info(parallel::size());
-      for (ItemId item_id = 0; item_id < item_owner.numberOfItems(); ++item_id) {
-        if (const size_t owner = item_owner[item_id]; owner != parallel::rank()) {
-          requested_item_vector_info[owner].emplace_back(item_id);
-        }
-      }
-      ExchangeItemTypeInfo<item_type> requested_item_info(parallel::size());
+  template <ItemType item_type, ItemType sub_item_type, typename ConnectivityType>
+  PUGS_INLINE const std::vector<size_t>&
+  _getSubItemPerItemRequestedTotalSize(const ConnectivityType& connectivity)
+  {
+    auto& p_sub_item_per_item_requested_total_size =
+      m_sub_item_per_item_requested_total_size_list[item_of_item_type_index<sub_item_type, item_type>];
+    if (not p_sub_item_per_item_requested_total_size) {
+      std::vector<size_t> sub_item_per_item_requested_total_size(parallel::size());
+      const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const auto& requested_item_vector = requested_item_vector_info[i_rank];
-        requested_item_info[i_rank]       = convert_to_array(requested_item_vector);
-      }
-      return std::make_unique<ExchangeItemTypeInfo<item_type>>(std::move(requested_item_info));
-    }();
+        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
 
-    auto& requested_item_info = *p_requested_item_info;
+        const auto& item_to_item_matrix = connectivity.template getItemToItemMatrix<item_type, sub_item_type>();
 
-    Array<unsigned int> local_number_of_requested_values(parallel::size());
-    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-      local_number_of_requested_values[i_rank] = requested_item_info[i_rank].size();
+        size_t count = 0;
+        for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
+          count += item_to_item_matrix[requested_item_info_from_rank[i]].size();
+        }
+
+        sub_item_per_item_requested_total_size[i_rank] = count;
+      }
+      p_sub_item_per_item_requested_total_size =
+        std::make_unique<std::vector<size_t>>(std::move(sub_item_per_item_requested_total_size));
     }
 
-    Array<unsigned int> local_number_of_values_to_send = parallel::allToAll(local_number_of_requested_values);
+    return (*p_sub_item_per_item_requested_total_size);
+  }
 
-    std::vector<Array<const int>> requested_item_number_list_by_proc(parallel::size());
-    const auto& item_number = connectivity.template number<item_type>();
-    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-      const auto& requested_item_info_from_rank = requested_item_info[i_rank];
-      Array<int> item_number_list{requested_item_info_from_rank.size()};
-      parallel_for(
-        requested_item_info_from_rank.size(),
-        PUGS_LAMBDA(size_t i_item) { item_number_list[i_item] = item_number[requested_item_info_from_rank[i_item]]; });
-      requested_item_number_list_by_proc[i_rank] = item_number_list;
-    }
+  template <ItemType item_type, ItemType sub_item_type, typename ConnectivityType>
+  PUGS_INLINE const std::vector<size_t>&
+  _getSubItemPerItemProvidedTotalSize(const ConnectivityType& connectivity)
+  {
+    static_assert(item_type != sub_item_type);
 
-    std::vector<Array<int>> provided_item_number_list_by_rank(parallel::size());
-    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-      provided_item_number_list_by_rank[i_rank] = Array<int>{local_number_of_values_to_send[i_rank]};
-    }
+    auto& p_sub_item_per_item_provided_total_size =
+      m_sub_item_per_item_provided_total_size_list[item_of_item_type_index<sub_item_type, item_type>];
 
-    parallel::exchange(requested_item_number_list_by_proc, provided_item_number_list_by_rank);
+    if (not p_sub_item_per_item_provided_total_size) {
+      if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(item_type) >
+                    ItemTypeId<ConnectivityType::Dimension>::dimension(sub_item_type)) {
+        std::vector<size_t> sub_item_per_item_provided_total_size(parallel::size());
+        const auto& provided_item_info = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          const auto& provided_item_info_from_rank = provided_item_info[i_rank];
 
-    std::map<int, ItemId> item_number_to_id_correspondance;
-    for (ItemId item_id = 0; item_id < item_number.numberOfItems(); ++item_id) {
-      item_number_to_id_correspondance[item_number[item_id]] = item_id;
-    }
+          const auto& item_to_item_matrix = connectivity.template getItemToItemMatrix<item_type, sub_item_type>();
 
-    auto& p_provided_item_info = this->_getProvidedItemInfo<item_type>();
-    p_provided_item_info       = [&]() {
-      ExchangeItemTypeInfo<item_type> provided_item_info(parallel::size());
-      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        Array<ItemId> provided_item_id_to_rank{local_number_of_values_to_send[i_rank]};
-        const Array<int>& provided_item_number_to_rank = provided_item_number_list_by_rank[i_rank];
-        for (size_t i = 0; i < provided_item_number_to_rank.size(); ++i) {
-          provided_item_id_to_rank[i] = item_number_to_id_correspondance.find(provided_item_number_to_rank[i])->second;
+          size_t count = 0;
+          for (size_t i = 0; i < provided_item_info_from_rank.size(); ++i) {
+            count += item_to_item_matrix[provided_item_info_from_rank[i]].size();
+          }
+
+          sub_item_per_item_provided_total_size[i_rank] = count;
+        }
+        p_sub_item_per_item_provided_total_size =
+          std::make_unique<std::vector<size_t>>(std::move(sub_item_per_item_provided_total_size));
+      } else {
+        std::vector<size_t> sub_item_per_item_provided_total_size(parallel::size());
+
+        const auto& sub_item_required_total_size =
+          _getSubItemPerItemRequestedTotalSize<item_type, sub_item_type>(connectivity);
+        const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
+
+        std::vector<Array<size_t>> sub_item_required_total_size_exchange(parallel::size());
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          Assert((sub_item_required_total_size[i_rank] == 0) xor (requested_item_info[i_rank].size() > 0),
+                 "unexpected sub_item size info");
+          if (requested_item_info[i_rank].size() > 0) {
+            Array<size_t> size_0d_array(1);
+            size_0d_array[0]                              = sub_item_required_total_size[i_rank];
+            sub_item_required_total_size_exchange[i_rank] = size_0d_array;
+          }
+        }
+
+        const auto& provided_item_info = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+        std::vector<Array<size_t>> sub_item_provided_total_size_exchange(parallel::size());
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          if (provided_item_info[i_rank].size() > 0) {
+            Array<size_t> size_0d_array(1);
+            sub_item_provided_total_size_exchange[i_rank] = size_0d_array;
+          }
         }
-        provided_item_info[i_rank] = provided_item_id_to_rank;
+
+        parallel::exchange(sub_item_required_total_size_exchange, sub_item_provided_total_size_exchange);
+
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          if (sub_item_provided_total_size_exchange[i_rank].size() > 0) {
+            sub_item_per_item_provided_total_size[i_rank] = sub_item_provided_total_size_exchange[i_rank][0];
+          }
+        }
+
+        p_sub_item_per_item_provided_total_size =
+          std::make_unique<std::vector<size_t>>(std::move(sub_item_per_item_provided_total_size));
       }
-      return std::make_unique<ExchangeItemTypeInfo<item_type>>(provided_item_info);
-    }();
+    }
 
-    m_sub_item_per_item_provided_size_list.resize(parallel::size());
-    m_sub_item_per_item_requested_size_list.resize(parallel::size());
+    return (*p_sub_item_per_item_provided_total_size);
   }
 
-  template <ItemType item_type, ItemType sub_item_type, size_t Dimension>
-  PUGS_INLINE size_t
-  _getSubItemPerItemRequestedSize(const Connectivity<Dimension>& connectivity, const size_t i_rank)
+  template <ItemType item_type, ItemType sub_item_type, typename ConnectivityType>
+  PUGS_INLINE const std::vector<Array<const size_t>>&
+  _getNumberOfSubItemPerItemProvidedList(const ConnectivityType& connectivity)
   {
-    Assert(m_sub_item_per_item_requested_size_list.size() == parallel::size());
+    static_assert(ItemTypeId<ConnectivityType::Dimension>::dimension(sub_item_type) >
+                    ItemTypeId<ConnectivityType::Dimension>::dimension(item_type),
+                  "should not be called if dimension of sub item is lower than item");
 
-    auto key = std::make_pair(item_type, sub_item_type);
-    if (auto i_size_list = m_sub_item_per_item_requested_size_list[i_rank].find(key);
-        i_size_list != m_sub_item_per_item_requested_size_list[i_rank].end()) {
-      return i_size_list->second;
-    } else {
-      const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
+    auto& p_number_of_sub_item_per_item_provided_list =
+      m_number_of_sub_item_per_item_provided_list[item_of_item_type_index<sub_item_type, item_type>];
 
-      Assert(static_cast<bool>(p_requested_item_info) == true,
-             "this function should be called after calculation of exchange info");
-      const auto& requested_item_info_from_rank = (*p_requested_item_info)[i_rank];
+    if (not p_number_of_sub_item_per_item_provided_list) {
+      using ItemId = ItemIdT<item_type>;
 
       const auto& item_to_item_matrix = connectivity.template getItemToItemMatrix<item_type, sub_item_type>();
+      const auto number_of_sub_item   = connectivity.template number<sub_item_type>();
 
-      size_t count = 0;
-      for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
-        count += item_to_item_matrix[requested_item_info_from_rank[i]].size();
+      const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
+
+      std::vector<Array<size_t>> number_of_sub_item_per_item_required_exchange(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+        if (requested_item_info_from_rank.size() > 0) {
+          Array<size_t> number_of_sub_item_per_item(requested_item_info_from_rank.size());
+
+          size_t count = 0;
+          for (size_t i_item = 0; i_item < requested_item_info_from_rank.size(); ++i_item) {
+            const ItemId item_id                 = requested_item_info_from_rank[i_item];
+            number_of_sub_item_per_item[count++] = item_to_item_matrix[item_id].size();
+          }
+          number_of_sub_item_per_item_required_exchange[i_rank] = number_of_sub_item_per_item;
+        }
       }
 
-      m_sub_item_per_item_requested_size_list[i_rank][key] = count;
-      return count;
+      const auto& provided_item_info = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+      std::vector<Array<size_t>> number_of_sub_item_per_item_provided_exchange(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        if (provided_item_info[i_rank].size() > 0) {
+          number_of_sub_item_per_item_provided_exchange[i_rank] = Array<size_t>{provided_item_info[i_rank].size()};
+        }
+      }
+
+      parallel::exchange(number_of_sub_item_per_item_required_exchange, number_of_sub_item_per_item_provided_exchange);
+
+      std::vector<Array<const size_t>> number_of_sub_item_per_item_provided_list(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        number_of_sub_item_per_item_provided_list[i_rank] = number_of_sub_item_per_item_provided_exchange[i_rank];
+      }
+      p_number_of_sub_item_per_item_provided_list =
+        std::make_unique<std::vector<Array<const size_t>>>(std::move(number_of_sub_item_per_item_provided_list));
     }
+    return *p_number_of_sub_item_per_item_provided_list;
   }
 
-  template <ItemType item_type, ItemType sub_item_type, size_t Dimension>
-  PUGS_INLINE size_t
-  _getSubItemPerItemProvidedSize(const Connectivity<Dimension>& connectivity, const size_t i_rank)
+  template <ItemType item_type, ItemType sub_item_type, typename ConnectivityType>
+  PUGS_INLINE const std::vector<Array<const size_t>>&
+  _getSubItemPerItemProvidedList(const ConnectivityType& connectivity)
   {
-    Assert(m_sub_item_per_item_provided_size_list.size() == parallel::size());
-
-    auto key = std::make_pair(item_type, sub_item_type);
-    if (auto i_size_list = m_sub_item_per_item_provided_size_list[i_rank].find(key);
-        i_size_list != m_sub_item_per_item_provided_size_list[i_rank].end()) {
-      return i_size_list->second;
-    } else {
-      const auto& p_provided_item_info = this->_getProvidedItemInfo<item_type>();
+    static_assert(ItemTypeId<ConnectivityType::Dimension>::dimension(sub_item_type) >
+                    ItemTypeId<ConnectivityType::Dimension>::dimension(item_type),
+                  "should not be called if dimension of sub item is lower than item");
+    auto& p_sub_item_per_item_provided_list =
+      m_sub_item_per_item_provided_list[item_of_item_type_index<sub_item_type, item_type>];
 
-      Assert(static_cast<bool>(p_provided_item_info) == true,
-             "this function should be called after calculation of exchange info");
-      const auto& provided_item_info_from_rank = (*p_provided_item_info)[i_rank];
+    if (not p_sub_item_per_item_provided_list) {
+      using ItemId = ItemIdT<item_type>;
 
       const auto& item_to_item_matrix = connectivity.template getItemToItemMatrix<item_type, sub_item_type>();
+      const auto number_of_sub_item   = connectivity.template number<sub_item_type>();
+
+      const auto& sub_item_required_size = _getSubItemPerItemRequestedTotalSize<item_type, sub_item_type>(connectivity);
+      const auto& requested_item_info    = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
-      size_t count = 0;
-      for (size_t i = 0; i < provided_item_info_from_rank.size(); ++i) {
-        count += item_to_item_matrix[provided_item_info_from_rank[i]].size();
+      std::vector<Array<int>> sub_item_per_item_required_numbers_exchange(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        Assert((sub_item_required_size[i_rank] == 0) xor (requested_item_info[i_rank].size() > 0),
+               "unexpected sub_item size info");
+        if (requested_item_info[i_rank].size() > 0) {
+          Array<int> sub_item_numbers(sub_item_required_size[i_rank]);
+
+          size_t count                              = 0;
+          const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+          for (size_t i_item = 0; i_item < requested_item_info_from_rank.size(); ++i_item) {
+            const ItemId item_id = requested_item_info_from_rank[i_item];
+            auto item_sub_items  = item_to_item_matrix[item_id];
+            for (size_t i_sub_item = 0; i_sub_item < item_sub_items.size(); ++i_sub_item) {
+              sub_item_numbers[count++] = number_of_sub_item[item_sub_items[i_sub_item]];
+            }
+          }
+          Assert(count == sub_item_numbers.size());
+          sub_item_per_item_required_numbers_exchange[i_rank] = sub_item_numbers;
+        }
       }
 
-      m_sub_item_per_item_provided_size_list[i_rank][key] = count;
-      return count;
+      const auto& provided_item_info     = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+      const auto& sub_item_provided_size = _getSubItemPerItemProvidedTotalSize<item_type, sub_item_type>(connectivity);
+      std::vector<Array<int>> sub_item_per_item_provided_numbers_exchange(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        if (provided_item_info[i_rank].size() > 0) {
+          sub_item_per_item_provided_numbers_exchange[i_rank] = Array<int>{sub_item_provided_size[i_rank]};
+        }
+      }
+
+      parallel::exchange(sub_item_per_item_required_numbers_exchange, sub_item_per_item_provided_numbers_exchange);
+
+      const auto& number_of_sub_item_per_item_provided_list =
+        this->_getNumberOfSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
+
+      std::vector<Array<const size_t>> sub_item_per_item_provided_list(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        if (provided_item_info[i_rank].size() > 0) {
+          const auto& sub_item_numbers            = sub_item_per_item_provided_numbers_exchange[i_rank];
+          const auto& number_of_sub_item_per_item = number_of_sub_item_per_item_provided_list[i_rank];
+          Array<size_t> sub_item_list{sub_item_provided_size[i_rank]};
+          size_t count = 0;
+
+          const auto& provided_item_info_to_rank = provided_item_info[i_rank];
+          for (size_t i_item = 0; i_item < provided_item_info_to_rank.size(); ++i_item) {
+            const ItemId item_id = provided_item_info_to_rank[i_item];
+            auto item_sub_items  = item_to_item_matrix[item_id];
+            bool found           = false;
+            for (size_t i_sub_item = 0, i_requied_sub_item = 0; i_sub_item < item_sub_items.size(); ++i_sub_item) {
+              found      = false;
+              int number = sub_item_numbers[count];
+              if (number == number_of_sub_item[item_sub_items[i_sub_item]]) {
+                found                = true;
+                sub_item_list[count] = i_sub_item;
+                i_requied_sub_item++;
+                count++;
+                if (i_requied_sub_item == number_of_sub_item_per_item[i_item]) {
+                  break;
+                }
+              }
+            }
+            Assert(found, "something wierd occured");
+          }
+
+          Assert(count == sub_item_list.size());
+          sub_item_per_item_provided_list[i_rank] = sub_item_list;
+        }
+      }
+
+      p_sub_item_per_item_provided_list =
+        std::make_unique<std::vector<Array<const size_t>>>(std::move(sub_item_per_item_provided_list));
     }
+
+    return (*p_sub_item_per_item_provided_list);
   }
 
   template <typename ConnectivityType, typename DataType, ItemType item_type, typename ConnectivityPtr>
@@ -209,19 +390,8 @@ class Synchronizer
 
     using ItemId = ItemIdT<item_type>;
 
-    const auto& p_provided_item_info  = this->_getProvidedItemInfo<item_type>();
-    const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
-
-    Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
-
-    if (not p_provided_item_info) {
-      this->_buildSynchronizeInfo<ConnectivityType, item_type>(connectivity);
-    }
-
-    const auto& provided_item_info  = *p_provided_item_info;
-    const auto& requested_item_info = *p_requested_item_info;
-
-    Assert(requested_item_info.size() == provided_item_info.size());
+    const auto& provided_item_info  = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+    const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
     std::vector<Array<const DataType>> provided_data_list(parallel::size());
     for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
@@ -258,20 +428,10 @@ class Synchronizer
 
     using ItemId = ItemIdT<item_type>;
 
-    const auto& p_provided_item_info  = this->_getProvidedItemInfo<item_type>();
-    const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
-
-    Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
-
-    if (not p_provided_item_info) {
-      this->_buildSynchronizeInfo<ConnectivityType, item_type>(connectivity);
-    }
-
-    const auto& provided_item_info  = *p_provided_item_info;
-    const auto& requested_item_info = *p_requested_item_info;
+    const auto& provided_item_info  = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+    const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
     Assert(requested_item_info.size() == provided_item_info.size());
-
     const size_t size_of_arrays = item_array.sizeOfArrays();
 
     std::vector<Array<const DataType>> provided_data_list(parallel::size());
@@ -316,34 +476,27 @@ class Synchronizer
   _synchronize(const ConnectivityType& connectivity,
                SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_value_per_item)
   {
+    static_assert(ItemOfItem::item_type != ItemOfItem::sub_item_type);
     static_assert(not std::is_abstract_v<ConnectivityType>, "_synchronize must be called on a concrete connectivity");
-    if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) >
-                  ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
-      constexpr ItemType item_type     = ItemOfItem::item_type;
-      constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
 
-      using ItemId = ItemIdT<item_type>;
-
-      const auto& p_provided_item_info  = this->_getProvidedItemInfo<item_type>();
-      const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
+    constexpr ItemType item_type     = ItemOfItem::item_type;
+    constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
 
-      Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
+    using ItemId = ItemIdT<item_type>;
 
-      if (not p_provided_item_info) {
-        this->_buildSynchronizeInfo<ConnectivityType, item_type>(connectivity);
-      }
+    const auto& provided_item_info  = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+    const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
-      const auto& provided_item_info  = *p_provided_item_info;
-      const auto& requested_item_info = *p_requested_item_info;
+    const auto& sub_item_per_item_provided_size =
+      _getSubItemPerItemProvidedTotalSize<item_type, sub_item_type>(connectivity);
 
-      Assert(requested_item_info.size() == provided_item_info.size());
+    std::vector<Array<const DataType>> provided_data_list(parallel::size());
 
-      std::vector<Array<const DataType>> provided_data_list(parallel::size());
+    if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) >
+                  ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
         const Array<const ItemId>& provided_item_info_to_rank = provided_item_info[i_rank];
-        const size_t send_size = _getSubItemPerItemProvidedSize<item_type, sub_item_type>(connectivity, i_rank);
-
-        Array<DataType> provided_data{send_size};
+        Array<DataType> provided_data{sub_item_per_item_provided_size[i_rank]};
         size_t index = 0;
         for (size_t i = 0; i < provided_item_info_to_rank.size(); ++i) {
           const ItemId item_id   = provided_item_info_to_rank[i];
@@ -354,32 +507,54 @@ class Synchronizer
         }
         provided_data_list[i_rank] = provided_data;
       }
+    } else if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) <
+                         ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
+      const auto& number_of_sub_item_per_item_provided_list =
+        this->_getNumberOfSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
+      const auto& sub_item_per_item_provided_list =
+        this->_getSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
 
-      std::vector<Array<DataType>> requested_data_list(parallel::size());
-      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const size_t recv_size      = _getSubItemPerItemRequestedSize<item_type, sub_item_type>(connectivity, i_rank);
-        requested_data_list[i_rank] = Array<DataType>{recv_size};
-      }
-
-      parallel::exchange(provided_data_list, requested_data_list);
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
-        const auto& requested_data                = requested_data_list[i_rank];
+        const Array<const ItemId>& provided_item_info_to_rank              = provided_item_info[i_rank];
+        const Array<const size_t>& sub_item_per_item_provided_list_to_rank = sub_item_per_item_provided_list[i_rank];
+        const Array<const size_t>& number_of_sub_item_per_item_provided_list_to_rank =
+          number_of_sub_item_per_item_provided_list[i_rank];
 
+        Array<DataType> provided_data{sub_item_per_item_provided_size[i_rank]};
         size_t index = 0;
-        for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
-          const ItemId item_id   = requested_item_info_from_rank[i];
+        for (size_t i = 0; i < provided_item_info_to_rank.size(); ++i) {
+          const ItemId item_id   = provided_item_info_to_rank[i];
           const auto item_values = sub_item_value_per_item.itemArray(item_id);
-          for (size_t j = 0; j < item_values.size(); ++j) {
-            item_values[j] = requested_data[index++];
+          for (size_t j = 0; j < number_of_sub_item_per_item_provided_list_to_rank[i]; ++j, ++index) {
+            provided_data[index] = item_values[sub_item_per_item_provided_list_to_rank[index]];
           }
         }
+        provided_data_list[i_rank] = provided_data;
+      }
+    }
+
+    const auto& sub_item_per_item_requested_size =
+      _getSubItemPerItemRequestedTotalSize<item_type, sub_item_type>(connectivity);
+
+    std::vector<Array<DataType>> requested_data_list(parallel::size());
+    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+      requested_data_list[i_rank] = Array<DataType>{sub_item_per_item_requested_size[i_rank]};
+    }
+
+    parallel::exchange(provided_data_list, requested_data_list);
+
+    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+      const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+      const auto& requested_data                = requested_data_list[i_rank];
+
+      size_t index = 0;
+      for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
+        const ItemId item_id       = requested_item_info_from_rank[i];
+        const auto sub_item_values = sub_item_value_per_item.itemArray(item_id);
+        for (size_t j = 0; j < sub_item_values.size(); ++j) {
+          sub_item_values[j] = requested_data[index++];
+        }
       }
-    } else {
-      std::ostringstream os;
-      os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-         << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-      throw UnexpectedError(os.str());
     }
   }
 
@@ -388,34 +563,28 @@ class Synchronizer
   _synchronize(const ConnectivityType& connectivity,
                SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_array_per_item)
   {
+    static_assert(ItemOfItem::item_type != ItemOfItem::sub_item_type);
     static_assert(not std::is_abstract_v<ConnectivityType>, "_synchronize must be called on a concrete connectivity");
-    if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) >
-                  ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
-      constexpr ItemType item_type     = ItemOfItem::item_type;
-      constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
-
-      using ItemId = ItemIdT<item_type>;
 
-      const auto& p_provided_item_info  = this->_getProvidedItemInfo<item_type>();
-      const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
+    constexpr ItemType item_type     = ItemOfItem::item_type;
+    constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
 
-      Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
+    using ItemId = ItemIdT<item_type>;
 
-      if (not p_provided_item_info) {
-        this->_buildSynchronizeInfo<ConnectivityType, item_type>(connectivity);
-      }
+    const auto& provided_item_info  = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+    const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
-      const auto& provided_item_info  = *p_provided_item_info;
-      const auto& requested_item_info = *p_requested_item_info;
+    const auto& sub_item_per_item_provided_size =
+      _getSubItemPerItemProvidedTotalSize<item_type, sub_item_type>(connectivity);
 
-      Assert(requested_item_info.size() == provided_item_info.size());
+    std::vector<Array<const DataType>> provided_data_list(parallel::size());
 
-      std::vector<Array<const DataType>> provided_data_list(parallel::size());
+    if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) >
+                  ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
         const Array<const ItemId>& provided_item_info_to_rank = provided_item_info[i_rank];
-        const size_t send_size = _getSubItemPerItemProvidedSize<item_type, sub_item_type>(connectivity, i_rank);
 
-        Array<DataType> provided_data{send_size * sub_item_array_per_item.sizeOfArrays()};
+        Array<DataType> provided_data{sub_item_per_item_provided_size[i_rank] * sub_item_array_per_item.sizeOfArrays()};
         size_t index = 0;
         for (size_t i = 0; i < provided_item_info_to_rank.size(); ++i) {
           const ItemId item_id  = provided_item_info_to_rank[i];
@@ -429,35 +598,62 @@ class Synchronizer
         }
         provided_data_list[i_rank] = provided_data;
       }
+    } else if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) <
+                         ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
+      const auto& number_of_sub_item_per_item_provided_list =
+        this->_getNumberOfSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
+      const auto& sub_item_per_item_provided_list =
+        this->_getSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
 
-      std::vector<Array<DataType>> requested_data_list(parallel::size());
-      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const size_t recv_size      = _getSubItemPerItemRequestedSize<item_type, sub_item_type>(connectivity, i_rank);
-        requested_data_list[i_rank] = Array<DataType>{recv_size * sub_item_array_per_item.sizeOfArrays()};
-      }
-
-      parallel::exchange(provided_data_list, requested_data_list);
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
-        const auto& requested_data                = requested_data_list[i_rank];
+        const Array<const ItemId>& provided_item_info_to_rank              = provided_item_info[i_rank];
+        const Array<const size_t>& sub_item_per_item_provided_list_to_rank = sub_item_per_item_provided_list[i_rank];
+        const Array<const size_t>& number_of_sub_item_per_item_provided_list_to_rank =
+          number_of_sub_item_per_item_provided_list[i_rank];
 
+        Array<DataType> provided_data{sub_item_per_item_provided_size[i_rank] * sub_item_array_per_item.sizeOfArrays()};
         size_t index = 0;
-        for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
-          const ItemId item_id  = requested_item_info_from_rank[i];
+        for (size_t i = 0; i < provided_item_info_to_rank.size(); ++i) {
+          const ItemId item_id  = provided_item_info_to_rank[i];
           const auto item_table = sub_item_array_per_item.itemTable(item_id);
-          for (size_t j = 0; j < item_table.numberOfRows(); ++j) {
+          for (size_t j = 0; j < number_of_sub_item_per_item_provided_list_to_rank[i]; ++j, ++index) {
             Assert(item_table.numberOfColumns() == sub_item_array_per_item.sizeOfArrays());
             for (size_t k = 0; k < sub_item_array_per_item.sizeOfArrays(); ++k) {
-              item_table(j, k) = requested_data[index++];
+              provided_data[sub_item_array_per_item.sizeOfArrays() * index + k] =
+                item_table(sub_item_per_item_provided_list_to_rank[index], k);
             }
           }
         }
+        Assert(index == sub_item_per_item_provided_list_to_rank.size());
+        provided_data_list[i_rank] = provided_data;
+      }
+    }
+
+    const auto& sub_item_per_item_requested_size =
+      _getSubItemPerItemRequestedTotalSize<item_type, sub_item_type>(connectivity);
+
+    std::vector<Array<DataType>> requested_data_list(parallel::size());
+    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+      requested_data_list[i_rank] =
+        Array<DataType>{sub_item_per_item_requested_size[i_rank] * sub_item_array_per_item.sizeOfArrays()};
+    }
+
+    parallel::exchange(provided_data_list, requested_data_list);
+    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+      const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+      const auto& requested_data                = requested_data_list[i_rank];
+
+      size_t index = 0;
+      for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
+        const ItemId item_id  = requested_item_info_from_rank[i];
+        const auto item_table = sub_item_array_per_item.itemTable(item_id);
+        for (size_t j = 0; j < item_table.numberOfRows(); ++j) {
+          Assert(item_table.numberOfColumns() == sub_item_array_per_item.sizeOfArrays());
+          for (size_t k = 0; k < sub_item_array_per_item.sizeOfArrays(); ++k) {
+            item_table(j, k) = requested_data[index++];
+          }
+        }
       }
-    } else {
-      std::ostringstream os;
-      os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-         << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-      throw UnexpectedError(os.str());
     }
   }
 
@@ -466,6 +662,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(ItemValue<DataType, item_type, ConnectivityPtr>& item_value)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize ItemValue of const data");
     Assert(item_value.connectivity_ptr().use_count() > 0, "No connectivity is associated to this ItemValue");
     const IConnectivity& connectivity = *item_value.connectivity_ptr();
 
@@ -494,6 +691,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(ItemArray<DataType, item_type, ConnectivityPtr>& item_array)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize ItemArray of const data");
     Assert(item_array.connectivity_ptr().use_count() > 0, "No connectivity is associated to this ItemArray");
     const IConnectivity& connectivity = *item_array.connectivity_ptr();
 
@@ -522,6 +720,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_value_per_item)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize SubItemValuePerItem of const data");
     Assert(sub_item_value_per_item.connectivity_ptr().use_count() > 0,
            "No connectivity is associated to this SubItemValuePerItem");
 
@@ -552,6 +751,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_value_per_item)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize SubItemArrayPerItem of const data");
     Assert(sub_item_value_per_item.connectivity_ptr().use_count() > 0,
            "No connectivity is associated to this SubItemValuePerItem");
 
@@ -600,6 +800,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(ItemValue<DataType, item_type, ConnectivityPtr>& item_value)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize ItemValue of const data");
     Assert(item_value.connectivity_ptr().use_count() > 0, "No connectivity is associated to this ItemValue");
   }
 
@@ -607,6 +808,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(ItemArray<DataType, item_type, ConnectivityPtr>& item_value)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize ItemArray of const data");
     Assert(item_value.connectivity_ptr().use_count() > 0, "No connectivity is associated to this ItemValue");
   }
 
@@ -614,96 +816,18 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_value_per_item)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize SubItemValuePerItem of const data");
     Assert(sub_item_value_per_item.connectivity_ptr().use_count() > 0,
            "No connectivity is associated to this SubItemValuePerItem");
-
-    const IConnectivity& connectivity = *sub_item_value_per_item.connectivity_ptr();
-
-    switch (connectivity.dimension()) {
-    case 1: {
-      if constexpr (ItemTypeId<1>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<1>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-    case 2: {
-      if constexpr (ItemTypeId<2>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<2>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-    case 3: {
-      if constexpr (ItemTypeId<3>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<3>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-      // LCOV_EXCL_START
-    default: {
-      throw UnexpectedError("unexpected dimension");
-    }
-      // LCOV_EXCL_STOP
-    }
   }
 
   template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
   PUGS_INLINE void
   synchronize(SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_array_per_item)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize SubItemArrayPerItem of const data");
     Assert(sub_item_array_per_item.connectivity_ptr().use_count() > 0,
            "No connectivity is associated to this SubItemArrayPerItem");
-
-    const IConnectivity& connectivity = *sub_item_array_per_item.connectivity_ptr();
-
-    switch (connectivity.dimension()) {
-    case 1: {
-      if constexpr (ItemTypeId<1>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<1>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-    case 2: {
-      if constexpr (ItemTypeId<2>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<2>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-    case 3: {
-      if constexpr (ItemTypeId<3>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<3>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-      // LCOV_EXCL_START
-    default: {
-      throw UnexpectedError("unexpected dimension");
-    }
-      // LCOV_EXCL_STOP
-    }
   }
 
   Synchronizer(const Synchronizer&) = delete;
diff --git a/tests/test_Synchronizer.cpp b/tests/test_Synchronizer.cpp
index 54a065b475c757bdf8e108e01d1b8f859daa262d..fcad9e414d4f5d3c88a6c50e41c1108cf996a13b 100644
--- a/tests/test_Synchronizer.cpp
+++ b/tests/test_Synchronizer.cpp
@@ -914,32 +914,147 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellValuePerNode")
       {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+        CellValuePerNode<int> cell_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              cell_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+            }
+          });
+
+        CellValuePerNode<int> cell_value_per_node{connectivity};
+        cell_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                cell_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_node);
+        REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+          synchronizer.synchronize(cell_value_per_node);
+          REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize CellValuePerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellValuePerEdge<int> cell_value_per_edge_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge_ref.numberOfSubValues(edge_id); ++j) {
+              cell_value_per_edge_ref(edge_id, j) =   // edge_owner[edge_id] +
+                edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
+            }
+          });
+
+        CellValuePerEdge<int> cell_value_per_edge{connectivity};
+        cell_value_per_edge.fill(0);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge.numberOfSubValues(edge_id); ++j) {
+              if (edge_is_owned[edge_id]) {
+                cell_value_per_edge(edge_id, j) =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+        }
+
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_edge);
+        REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
 
-        SECTION("CellValuePerNode")
-        {
-          CellValuePerNode<int> cell_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_edge, connectivity.edgeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+          synchronizer.synchronize(cell_value_per_edge);
+          REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
         }
+      }
+
+      SECTION("synchonize CellValuePerFace")
+      {
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellValuePerFace<int> cell_value_per_face_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face_ref.numberOfSubValues(face_id); ++j) {
+              cell_value_per_face_ref(face_id, j) =   // face_owner[face_id] +
+                face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
+            }
+          });
 
-        SECTION("CellValuePerEdge")
-        {
-          CellValuePerEdge<int> cell_value_per_edge{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
+        CellValuePerFace<int> cell_value_per_face{connectivity};
+        cell_value_per_face.fill(0);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face.numberOfSubValues(face_id); ++j) {
+              if (face_is_owned[face_id]) {
+                cell_value_per_face(face_id, j) =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
 
-        SECTION("CellValuePerFace")
-        {
-          CellValuePerFace<int> cell_value_per_face{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_face);
+        REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_face, connectivity.faceOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+          synchronizer.synchronize(cell_value_per_face);
+          REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
       }
     }
@@ -1156,48 +1271,243 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellValuePerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+        CellValuePerNode<int> cell_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              cell_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+            }
+          });
+
+        CellValuePerNode<int> cell_value_per_node{connectivity};
+        cell_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                cell_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_node);
+        REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+          synchronizer.synchronize(cell_value_per_node);
+          REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize CellValuePerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellValuePerEdge<int> cell_value_per_edge_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge_ref.numberOfSubValues(edge_id); ++j) {
+              cell_value_per_edge_ref(edge_id, j) =   // edge_owner[edge_id] +
+                edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
+            }
+          });
+
+        CellValuePerEdge<int> cell_value_per_edge{connectivity};
+        cell_value_per_edge.fill(0);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge.numberOfSubValues(edge_id); ++j) {
+              if (edge_is_owned[edge_id]) {
+                cell_value_per_edge(edge_id, j) =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_edge);
+        REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_edge, connectivity.edgeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+          synchronizer.synchronize(cell_value_per_edge);
+          REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+        }
+      }
+
+      SECTION("synchonize CellValuePerFace")
       {
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellValuePerFace<int> cell_value_per_face_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face_ref.numberOfSubValues(face_id); ++j) {
+              cell_value_per_face_ref(face_id, j) =   // face_owner[face_id] +
+                face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
+            }
+          });
+
+        CellValuePerFace<int> cell_value_per_face{connectivity};
+        cell_value_per_face.fill(0);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face.numberOfSubValues(face_id); ++j) {
+              if (face_is_owned[face_id]) {
+                cell_value_per_face(face_id, j) =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+        }
+
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_face);
+        REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
 
-        SECTION("CellValuePerNode")
-        {
-          CellValuePerNode<int> cell_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_face, connectivity.faceOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+          synchronizer.synchronize(cell_value_per_face);
+          REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
+      }
+
+      SECTION("synchonize FaceValuePerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto node_to_face_matrix = connectivity.nodeToFaceMatrix();
 
-        SECTION("CellValuePerEdge")
-        {
-          CellValuePerEdge<int> cell_value_per_edge{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
+        FaceValuePerNode<int> face_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              face_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]];
+            }
+          });
+
+        FaceValuePerNode<int> face_value_per_node{connectivity};
+        face_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                face_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(face_value_per_node, face_value_per_node_ref));
         }
 
-        SECTION("CellValuePerFace")
-        {
-          CellValuePerFace<int> cell_value_per_face{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_value_per_node);
+        REQUIRE(is_same_item_value(face_value_per_node, face_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(face_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(face_value_per_node, face_value_per_node_ref));
+          synchronizer.synchronize(face_value_per_node);
+          REQUIRE(is_same_item_value(face_value_per_node, face_value_per_node_ref));
         }
+      }
+
+      SECTION("synchonize EdgeValuePerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto edge_number   = connectivity.edgeNumber();
+
+        const auto node_to_edge_matrix = connectivity.nodeToEdgeMatrix();
+
+        EdgeValuePerNode<int> edge_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              edge_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]];
+            }
+          });
+
+        EdgeValuePerNode<int> edge_value_per_node{connectivity};
+        edge_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                edge_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]];
+              }
+            }
+          });
 
-        SECTION("FaceValuePerNode")
-        {
-          FaceValuePerNode<int> face_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (node)");
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
         }
 
-        SECTION("EdgeValuePerNode")
-        {
-          EdgeValuePerNode<int> edge_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(edge_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (edge) to be of lower "
-                              "dimension than item (node)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_value_per_node);
+        REQUIRE(is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(edge_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
+          synchronizer.synchronize(edge_value_per_node);
+          REQUIRE(is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
         }
       }
     }
@@ -1455,267 +1765,335 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellValuePerNode")
       {
-        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-
-        SECTION("CellValuePerNode")
-        {
-          CellValuePerNode<int> cell_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
-        }
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        SECTION("CellValuePerEdge")
-        {
-          CellValuePerEdge<int> cell_value_per_edge{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
-        }
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
 
-        SECTION("CellValuePerFace")
-        {
-          CellValuePerFace<int> cell_value_per_face{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
-        }
+        CellValuePerNode<int> cell_value_per_node_ref{connectivity};
 
-        SECTION("FaceValuePerNode")
-        {
-          FaceValuePerNode<int> face_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (node)");
-        }
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              cell_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+            }
+          });
 
-        SECTION("FaceValuePerEdge")
-        {
-          FaceValuePerEdge<int> face_value_per_edge{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_value_per_edge),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (edge)");
-        }
+        CellValuePerNode<int> cell_value_per_node{connectivity};
+        cell_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                cell_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+              }
+            }
+          });
 
-        SECTION("EdgeValuePerNode")
-        {
-          EdgeValuePerNode<int> edge_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(edge_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (edge) to be of lower "
-                              "dimension than item (node)");
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
         }
-      }
-    }
-  }
 
-  SECTION("SubItemArrayPerItem")
-  {
-    auto is_same_item_array = [](auto a, auto b) {
-      using IndexT = typename decltype(a)::index_type;
-      bool is_same = true;
-      for (IndexT i_item = 0; i_item < a.numberOfItems(); ++i_item) {
-        for (size_t l = 0; l < a.numberOfSubArrays(i_item); ++l) {
-          for (size_t k = 0; k < a.sizeOfArrays(); ++k) {
-            is_same &= (a(i_item, l)[k] == b(i_item, l)[k]);
-          }
-        }
-      }
-      return parallel::allReduceAnd(is_same);
-    };
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_node);
+        REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
 
-    auto reset_ghost_arrays = [](auto sub_item_array_per_item, auto item_owner, auto value) {
-      using IndexT = typename decltype(sub_item_array_per_item)::index_type;
-      static_assert(std::is_same_v<typename decltype(sub_item_array_per_item)::index_type,
-                                   typename decltype(item_owner)::index_type>);
-      for (IndexT i_item = 0; i_item < sub_item_array_per_item.numberOfItems(); ++i_item) {
-        if (item_owner[i_item] != static_cast<int>(parallel::rank())) {
-          for (size_t l = 0; l < sub_item_array_per_item.numberOfSubArrays(i_item); ++l) {
-            sub_item_array_per_item(i_item, l).fill(value);
-          }
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+          synchronizer.synchronize(cell_value_per_node);
+          REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
         }
       }
-    };
 
-    SECTION("1D")
-    {
-      constexpr size_t Dimension = 1;
-      using ConnectivityType     = Connectivity<Dimension>;
-
-      const ConnectivityType& connectivity = MeshDataBaseForTests::get().unordered1DMesh()->connectivity();
-
-      SECTION("synchonize NodeArrayPerCell")
+      SECTION("synchonize CellValuePerEdge")
       {
-        const auto cell_owner  = connectivity.cellOwner();
-        const auto cell_number = connectivity.cellNumber();
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        NodeArrayPerCell<int> node_array_per_cell_ref{connectivity, 3};
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellValuePerEdge<int> cell_value_per_edge_ref{connectivity};
 
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < node_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < node_array_per_cell_ref.sizeOfArrays(); ++k) {
-                node_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
-              }
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge_ref.numberOfSubValues(edge_id); ++j) {
+              cell_value_per_edge_ref(edge_id, j) =   // edge_owner[edge_id] +
+                edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
             }
           });
 
-        NodeArrayPerCell<int> node_array_per_cell{connectivity, 3};
+        CellValuePerEdge<int> cell_value_per_edge{connectivity};
+        cell_value_per_edge.fill(0);
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < node_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < node_array_per_cell_ref.sizeOfArrays(); ++k) {
-                node_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge.numberOfSubValues(edge_id); ++j) {
+              if (edge_is_owned[edge_id]) {
+                cell_value_per_edge(edge_id, j) =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(node_array_per_cell);
-
-        REQUIRE(is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+        synchronizer.synchronize(cell_value_per_edge);
+        REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(node_array_per_cell, cell_owner, 0);
-          REQUIRE(not is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
-          synchronizer.synchronize(node_array_per_cell);
-          REQUIRE(is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+          reset_ghost_values(cell_value_per_edge, connectivity.edgeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+          synchronizer.synchronize(cell_value_per_edge);
+          REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
         }
       }
 
-      SECTION("synchonize EdgeArrayPerCell")
+      SECTION("synchonize CellValuePerFace")
       {
-        const auto cell_owner  = connectivity.cellOwner();
-        const auto cell_number = connectivity.cellNumber();
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        EdgeArrayPerCell<int> edge_array_per_cell_ref{connectivity, 3};
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellValuePerFace<int> cell_value_per_face_ref{connectivity};
 
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < edge_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < edge_array_per_cell_ref.sizeOfArrays(); ++k) {
-                edge_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
-              }
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face_ref.numberOfSubValues(face_id); ++j) {
+              cell_value_per_face_ref(face_id, j) =   // face_owner[face_id] +
+                face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
             }
           });
 
-        EdgeArrayPerCell<int> edge_array_per_cell{connectivity, 3};
+        CellValuePerFace<int> cell_value_per_face{connectivity};
+        cell_value_per_face.fill(0);
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < edge_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < edge_array_per_cell_ref.sizeOfArrays(); ++k) {
-                edge_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face.numberOfSubValues(face_id); ++j) {
+              if (face_is_owned[face_id]) {
+                cell_value_per_face(face_id, j) =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(edge_array_per_cell);
-
-        REQUIRE(is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+        synchronizer.synchronize(cell_value_per_face);
+        REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(edge_array_per_cell, cell_owner, 0);
-          REQUIRE(not is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
-          synchronizer.synchronize(edge_array_per_cell);
-          REQUIRE(is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+          reset_ghost_values(cell_value_per_face, connectivity.faceOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+          synchronizer.synchronize(cell_value_per_face);
+          REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
       }
 
-      SECTION("synchonize FaceArrayPerCell")
+      SECTION("synchonize FaceValuePerNode")
       {
-        const auto cell_owner  = connectivity.cellOwner();
-        const auto cell_number = connectivity.cellNumber();
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto face_number   = connectivity.faceNumber();
 
-        FaceArrayPerCell<int> face_array_per_cell_ref{connectivity, 3};
+        const auto node_to_face_matrix = connectivity.nodeToFaceMatrix();
+
+        FaceValuePerNode<int> face_value_per_node_ref{connectivity};
 
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < face_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < face_array_per_cell_ref.sizeOfArrays(); ++k) {
-                face_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
-              }
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              face_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]];
             }
           });
 
-        FaceArrayPerCell<int> face_array_per_cell{connectivity, 3};
+        FaceValuePerNode<int> face_value_per_node{connectivity};
+        face_value_per_node.fill(0);
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < face_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < face_array_per_cell_ref.sizeOfArrays(); ++k) {
-                face_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                face_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]];
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+          REQUIRE(not is_same_item_value(face_value_per_node, face_value_per_node_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(face_array_per_cell);
-
-        REQUIRE(is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+        synchronizer.synchronize(face_value_per_node);
+        REQUIRE(is_same_item_value(face_value_per_node, face_value_per_node_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(face_array_per_cell, cell_owner, 0);
-          REQUIRE(not is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
-          synchronizer.synchronize(face_array_per_cell);
-          REQUIRE(is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+          reset_ghost_values(face_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(face_value_per_node, face_value_per_node_ref));
+          synchronizer.synchronize(face_value_per_node);
+          REQUIRE(is_same_item_value(face_value_per_node, face_value_per_node_ref));
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize FaceValuePerEdge")
       {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto edge_to_face_matrix = connectivity.edgeToFaceMatrix();
+
+        FaceValuePerEdge<int> face_value_per_edge_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < face_value_per_edge_ref.numberOfSubValues(edge_id); ++j) {
+              face_value_per_edge_ref(edge_id, j) =   // edge_owner[edge_id] +
+                edge_number[edge_id] + 100 * face_number[edge_to_face_matrix[edge_id][j]];
+            }
+          });
+
+        FaceValuePerEdge<int> face_value_per_edge{connectivity};
+        face_value_per_edge.fill(0);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < face_value_per_edge.numberOfSubValues(edge_id); ++j) {
+              if (edge_is_owned[edge_id]) {
+                face_value_per_edge(edge_id, j) =
+                  edge_number[edge_id] + 100 * face_number[edge_to_face_matrix[edge_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(face_value_per_edge, face_value_per_edge_ref));
+        }
+
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_value_per_edge);
+        REQUIRE(is_same_item_value(face_value_per_edge, face_value_per_edge_ref));
 
-        SECTION("CellArrayPerNode")
-        {
-          CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(face_value_per_edge, connectivity.edgeOwner(), 0);
+          REQUIRE(not is_same_item_value(face_value_per_edge, face_value_per_edge_ref));
+          synchronizer.synchronize(face_value_per_edge);
+          REQUIRE(is_same_item_value(face_value_per_edge, face_value_per_edge_ref));
         }
+      }
+
+      SECTION("synchonize EdgeValuePerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto edge_number   = connectivity.edgeNumber();
+
+        const auto node_to_edge_matrix = connectivity.nodeToEdgeMatrix();
+
+        EdgeValuePerNode<int> edge_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              edge_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]];
+            }
+          });
 
-        SECTION("CellArrayPerEdge")
-        {
-          CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
+        EdgeValuePerNode<int> edge_value_per_node{connectivity};
+        edge_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                edge_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
         }
 
-        SECTION("CellArrayPerFace")
-        {
-          CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_value_per_node);
+        REQUIRE(is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(edge_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
+          synchronizer.synchronize(edge_value_per_node);
+          REQUIRE(is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
         }
       }
     }
+  }
 
-    SECTION("2D")
+  SECTION("SubItemArrayPerItem")
+  {
+    auto is_same_item_array = [](auto a, auto b) {
+      using IndexT = typename decltype(a)::index_type;
+      bool is_same = true;
+      for (IndexT i_item = 0; i_item < a.numberOfItems(); ++i_item) {
+        for (size_t l = 0; l < a.numberOfSubArrays(i_item); ++l) {
+          for (size_t k = 0; k < a.sizeOfArrays(); ++k) {
+            is_same &= (a(i_item, l)[k] == b(i_item, l)[k]);
+
+            if (a(i_item, l)[k] != b(i_item, l)[k]) {
+              std::cout << i_item << ":" << l << " a[" << k << "]=" << a(i_item, l)[k] << " b[" << k
+                        << "]=" << b(i_item, l)[k] << '\n';
+            }
+          }
+        }
+      }
+      return parallel::allReduceAnd(is_same);
+    };
+
+    auto reset_ghost_arrays = [](auto sub_item_array_per_item, auto item_owner, auto value) {
+      using IndexT = typename decltype(sub_item_array_per_item)::index_type;
+      static_assert(std::is_same_v<typename decltype(sub_item_array_per_item)::index_type,
+                                   typename decltype(item_owner)::index_type>);
+      for (IndexT i_item = 0; i_item < sub_item_array_per_item.numberOfItems(); ++i_item) {
+        if (item_owner[i_item] != static_cast<int>(parallel::rank())) {
+          for (size_t l = 0; l < sub_item_array_per_item.numberOfSubArrays(i_item); ++l) {
+            sub_item_array_per_item(i_item, l).fill(value);
+          }
+        }
+      }
+    };
+
+    SECTION("1D")
     {
-      constexpr size_t Dimension = 2;
+      constexpr size_t Dimension = 1;
       using ConnectivityType     = Connectivity<Dimension>;
 
-      const ConnectivityType& connectivity = MeshDataBaseForTests::get().hybrid2DMesh()->connectivity();
+      const ConnectivityType& connectivity = MeshDataBaseForTests::get().unordered1DMesh()->connectivity();
 
       SECTION("synchonize NodeArrayPerCell")
       {
@@ -1852,148 +2230,175 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("synchonize NodeArrayPerFace")
+      SECTION("synchonize CellArrayPerNode")
       {
-        const auto face_owner  = connectivity.faceOwner();
-        const auto face_number = connectivity.faceNumber();
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        NodeArrayPerFace<int> node_array_per_face_ref{connectivity, 3};
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+        CellArrayPerNode<int> cell_array_per_node_ref{connectivity, 3};
 
         parallel_for(
-          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
-            for (size_t j = 0; j < node_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
-              for (size_t k = 0; k < node_array_per_face_ref.sizeOfArrays(); ++k) {
-                node_array_per_face_ref(face_id, j)[k] = face_owner[face_id] + face_number[face_id] + j + 2 * k;
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_node_ref.sizeOfArrays(); ++k) {
+                cell_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
               }
             }
           });
 
-        NodeArrayPerFace<int> node_array_per_face{connectivity, 3};
+        CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
+        cell_array_per_node.fill(-1);
         parallel_for(
-          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
-            for (size_t j = 0; j < node_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
-              for (size_t k = 0; k < node_array_per_face_ref.sizeOfArrays(); ++k) {
-                node_array_per_face(face_id, j)[k] = parallel::rank() + face_number[face_id] + j + 2 * k;
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < cell_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_node.sizeOfArrays(); ++k) {
+                  cell_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+                }
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(node_array_per_face, node_array_per_face_ref));
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(node_array_per_face);
+        synchronizer.synchronize(cell_array_per_node);
 
-        REQUIRE(is_same_item_array(node_array_per_face, node_array_per_face_ref));
+        REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(node_array_per_face, face_owner, 0);
-          REQUIRE(not is_same_item_array(node_array_per_face, node_array_per_face_ref));
-          synchronizer.synchronize(node_array_per_face);
-          REQUIRE(is_same_item_array(node_array_per_face, node_array_per_face_ref));
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(cell_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+          synchronizer.synchronize(cell_array_per_node);
+          REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
         }
       }
 
-      SECTION("synchonize NodeArrayPerEdge")
+      SECTION("synchonize CellArrayPerEdge")
       {
-        const auto edge_owner  = connectivity.edgeOwner();
-        const auto edge_number = connectivity.edgeNumber();
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        NodeArrayPerEdge<int> node_array_per_edge_ref{connectivity, 3};
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellArrayPerEdge<int> cell_array_per_edge_ref{connectivity, 3};
 
         parallel_for(
           connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
-            for (size_t j = 0; j < node_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
-              for (size_t k = 0; k < node_array_per_edge_ref.sizeOfArrays(); ++k) {
-                node_array_per_edge_ref(edge_id, j)[k] = edge_owner[edge_id] + edge_number[edge_id] + j + 2 * k;
+            for (size_t j = 0; j < cell_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_edge_ref.sizeOfArrays(); ++k) {
+                cell_array_per_edge_ref(edge_id, j)[k] =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
               }
             }
           });
 
-        NodeArrayPerEdge<int> node_array_per_edge{connectivity, 3};
+        CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
+        cell_array_per_edge.fill(-1);
         parallel_for(
           connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
-            for (size_t j = 0; j < node_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
-              for (size_t k = 0; k < node_array_per_edge_ref.sizeOfArrays(); ++k) {
-                node_array_per_edge(edge_id, j)[k] = parallel::rank() + edge_number[edge_id] + j + 2 * k;
+            if (edge_is_owned[edge_id]) {
+              for (size_t j = 0; j < cell_array_per_edge.numberOfSubArrays(edge_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_edge.sizeOfArrays(); ++k) {
+                  cell_array_per_edge(edge_id, j)[k] =
+                    edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+                }
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(node_array_per_edge);
+        synchronizer.synchronize(cell_array_per_edge);
 
-        REQUIRE(is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+        REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(node_array_per_edge, edge_owner, 0);
-          REQUIRE(not is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
-          synchronizer.synchronize(node_array_per_edge);
-          REQUIRE(is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+          const auto& edge_owner = connectivity.edgeOwner();
+          reset_ghost_arrays(cell_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+          synchronizer.synchronize(cell_array_per_edge);
+          REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellArrayPerFace")
       {
-        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        SECTION("CellArrayPerNode")
-        {
-          CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
-        }
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
 
-        SECTION("CellArrayPerEdge")
-        {
-          CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
-        }
+        CellArrayPerFace<int> cell_array_per_face_ref{connectivity, 3};
 
-        SECTION("CellArrayPerFace")
-        {
-          CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
-        }
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_face_ref.sizeOfArrays(); ++k) {
+                cell_array_per_face_ref(face_id, j)[k] =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
+        cell_array_per_face.fill(-1);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            if (face_is_owned[face_id]) {
+              for (size_t j = 0; j < cell_array_per_face.numberOfSubArrays(face_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_face.sizeOfArrays(); ++k) {
+                  cell_array_per_face(face_id, j)[k] =
+                    face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
 
-        SECTION("FaceArrayPerNode")
-        {
-          FaceArrayPerNode<int> face_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (node)");
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
         }
 
-        SECTION("EdgeArrayPerNode")
-        {
-          EdgeArrayPerNode<int> edge_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(edge_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (edge) to be of lower "
-                              "dimension than item (node)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_face);
+
+        REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& face_owner = connectivity.faceOwner();
+          reset_ghost_arrays(cell_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+          synchronizer.synchronize(cell_array_per_face);
+          REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
         }
       }
     }
 
-    SECTION("3D")
+    SECTION("2D")
     {
-      constexpr size_t Dimension = 3;
+      constexpr size_t Dimension = 2;
       using ConnectivityType     = Connectivity<Dimension>;
 
-      const ConnectivityType& connectivity = MeshDataBaseForTests::get().hybrid3DMesh()->connectivity();
+      const ConnectivityType& connectivity = MeshDataBaseForTests::get().hybrid2DMesh()->connectivity();
 
       SECTION("synchonize NodeArrayPerCell")
       {
@@ -2175,51 +2580,6 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("synchonize EdgeArrayPerFace")
-      {
-        const auto face_owner  = connectivity.faceOwner();
-        const auto face_number = connectivity.faceNumber();
-
-        EdgeArrayPerFace<int> edge_array_per_face_ref{connectivity, 3};
-
-        parallel_for(
-          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
-            for (size_t j = 0; j < edge_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
-              for (size_t k = 0; k < edge_array_per_face_ref.sizeOfArrays(); ++k) {
-                edge_array_per_face_ref(face_id, j)[k] = face_owner[face_id] + face_number[face_id] + j + 2 * k;
-              }
-            }
-          });
-
-        EdgeArrayPerFace<int> edge_array_per_face{connectivity, 3};
-        parallel_for(
-          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
-            for (size_t j = 0; j < edge_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
-              for (size_t k = 0; k < edge_array_per_face_ref.sizeOfArrays(); ++k) {
-                edge_array_per_face(face_id, j)[k] = parallel::rank() + face_number[face_id] + j + 2 * k;
-              }
-            }
-          });
-
-        if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
-        }
-
-        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(edge_array_per_face);
-
-        REQUIRE(is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
-
-        // Check that exchange sizes are correctly stored (require
-        // lines to be covered)
-        if (parallel::size() > 1) {
-          reset_ghost_arrays(edge_array_per_face, face_owner, 0);
-          REQUIRE(not is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
-          synchronizer.synchronize(edge_array_per_face);
-          REQUIRE(is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
-        }
-      }
-
       SECTION("synchonize NodeArrayPerEdge")
       {
         const auto edge_owner  = connectivity.edgeOwner();
@@ -2265,56 +2625,875 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellArrayPerNode")
       {
-        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        SECTION("CellArrayPerNode")
-        {
-          CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
-        }
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
 
-        SECTION("CellArrayPerEdge")
-        {
-          CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
-        }
+        CellArrayPerNode<int> cell_array_per_node_ref{connectivity, 3};
 
-        SECTION("CellArrayPerFace")
-        {
-          CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
-        }
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_node_ref.sizeOfArrays(); ++k) {
+                cell_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
+        cell_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < cell_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_node.sizeOfArrays(); ++k) {
+                  cell_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_node);
+
+        REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(cell_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+          synchronizer.synchronize(cell_array_per_node);
+          REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellArrayPerEdge<int> cell_array_per_edge_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_edge_ref.sizeOfArrays(); ++k) {
+                cell_array_per_edge_ref(edge_id, j)[k] =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
+        cell_array_per_edge.fill(-1);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            if (edge_is_owned[edge_id]) {
+              for (size_t j = 0; j < cell_array_per_edge.numberOfSubArrays(edge_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_edge.sizeOfArrays(); ++k) {
+                  cell_array_per_edge(edge_id, j)[k] =
+                    edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_edge);
+
+        REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& edge_owner = connectivity.edgeOwner();
+          reset_ghost_arrays(cell_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+          synchronizer.synchronize(cell_array_per_edge);
+          REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerFace")
+      {
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellArrayPerFace<int> cell_array_per_face_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_face_ref.sizeOfArrays(); ++k) {
+                cell_array_per_face_ref(face_id, j)[k] =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
+        cell_array_per_face.fill(-1);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            if (face_is_owned[face_id]) {
+              for (size_t j = 0; j < cell_array_per_face.numberOfSubArrays(face_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_face.sizeOfArrays(); ++k) {
+                  cell_array_per_face(face_id, j)[k] =
+                    face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_face);
+
+        REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& face_owner = connectivity.faceOwner();
+          reset_ghost_arrays(cell_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+          synchronizer.synchronize(cell_array_per_face);
+          REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+        }
+      }
+
+      SECTION("synchonize FaceArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto node_to_face_matrix = connectivity.nodeToFaceMatrix();
+
+        FaceArrayPerNode<int> face_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < face_array_per_node_ref.sizeOfArrays(); ++k) {
+                face_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        FaceArrayPerNode<int> face_array_per_node{connectivity, 3};
+        face_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < face_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < face_array_per_node.sizeOfArrays(); ++k) {
+                  face_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(face_array_per_node, face_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_array_per_node);
+
+        REQUIRE(is_same_item_array(face_array_per_node, face_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(face_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(face_array_per_node, face_array_per_node_ref));
+          synchronizer.synchronize(face_array_per_node);
+          REQUIRE(is_same_item_array(face_array_per_node, face_array_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize EdgeArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto edge_number   = connectivity.edgeNumber();
+
+        const auto node_to_edge_matrix = connectivity.nodeToEdgeMatrix();
+
+        EdgeArrayPerNode<int> edge_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_node_ref.sizeOfArrays(); ++k) {
+                edge_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        EdgeArrayPerNode<int> edge_array_per_node{connectivity, 3};
+        edge_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < edge_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < edge_array_per_node.sizeOfArrays(); ++k) {
+                  edge_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_array_per_node);
+
+        REQUIRE(is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(edge_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+          synchronizer.synchronize(edge_array_per_node);
+          REQUIRE(is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+        }
+      }
+    }
+
+    SECTION("3D")
+    {
+      constexpr size_t Dimension = 3;
+      using ConnectivityType     = Connectivity<Dimension>;
+
+      const ConnectivityType& connectivity = MeshDataBaseForTests::get().hybrid3DMesh()->connectivity();
+
+      SECTION("synchonize NodeArrayPerCell")
+      {
+        const auto cell_owner  = connectivity.cellOwner();
+        const auto cell_number = connectivity.cellNumber();
+
+        NodeArrayPerCell<int> node_array_per_cell_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < node_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < node_array_per_cell_ref.sizeOfArrays(); ++k) {
+                node_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        NodeArrayPerCell<int> node_array_per_cell{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < node_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < node_array_per_cell_ref.sizeOfArrays(); ++k) {
+                node_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(node_array_per_cell);
+
+        REQUIRE(is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(node_array_per_cell, cell_owner, 0);
+          REQUIRE(not is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+          synchronizer.synchronize(node_array_per_cell);
+          REQUIRE(is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+        }
+      }
+
+      SECTION("synchonize EdgeArrayPerCell")
+      {
+        const auto cell_owner  = connectivity.cellOwner();
+        const auto cell_number = connectivity.cellNumber();
+
+        EdgeArrayPerCell<int> edge_array_per_cell_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < edge_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_cell_ref.sizeOfArrays(); ++k) {
+                edge_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        EdgeArrayPerCell<int> edge_array_per_cell{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < edge_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_cell_ref.sizeOfArrays(); ++k) {
+                edge_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_array_per_cell);
+
+        REQUIRE(is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(edge_array_per_cell, cell_owner, 0);
+          REQUIRE(not is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+          synchronizer.synchronize(edge_array_per_cell);
+          REQUIRE(is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+        }
+      }
+
+      SECTION("synchonize FaceArrayPerCell")
+      {
+        const auto cell_owner  = connectivity.cellOwner();
+        const auto cell_number = connectivity.cellNumber();
+
+        FaceArrayPerCell<int> face_array_per_cell_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < face_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < face_array_per_cell_ref.sizeOfArrays(); ++k) {
+                face_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        FaceArrayPerCell<int> face_array_per_cell{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < face_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < face_array_per_cell_ref.sizeOfArrays(); ++k) {
+                face_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_array_per_cell);
+
+        REQUIRE(is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(face_array_per_cell, cell_owner, 0);
+          REQUIRE(not is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+          synchronizer.synchronize(face_array_per_cell);
+          REQUIRE(is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+        }
+      }
+
+      SECTION("synchonize NodeArrayPerFace")
+      {
+        const auto face_owner  = connectivity.faceOwner();
+        const auto face_number = connectivity.faceNumber();
+
+        NodeArrayPerFace<int> node_array_per_face_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < node_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < node_array_per_face_ref.sizeOfArrays(); ++k) {
+                node_array_per_face_ref(face_id, j)[k] = face_owner[face_id] + face_number[face_id] + j + 2 * k;
+              }
+            }
+          });
+
+        NodeArrayPerFace<int> node_array_per_face{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < node_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < node_array_per_face_ref.sizeOfArrays(); ++k) {
+                node_array_per_face(face_id, j)[k] = parallel::rank() + face_number[face_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(node_array_per_face, node_array_per_face_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(node_array_per_face);
+
+        REQUIRE(is_same_item_array(node_array_per_face, node_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(node_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(node_array_per_face, node_array_per_face_ref));
+          synchronizer.synchronize(node_array_per_face);
+          REQUIRE(is_same_item_array(node_array_per_face, node_array_per_face_ref));
+        }
+      }
+
+      SECTION("synchonize EdgeArrayPerFace")
+      {
+        const auto face_owner  = connectivity.faceOwner();
+        const auto face_number = connectivity.faceNumber();
+
+        EdgeArrayPerFace<int> edge_array_per_face_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < edge_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_face_ref.sizeOfArrays(); ++k) {
+                edge_array_per_face_ref(face_id, j)[k] = face_owner[face_id] + face_number[face_id] + j + 2 * k;
+              }
+            }
+          });
+
+        EdgeArrayPerFace<int> edge_array_per_face{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < edge_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_face_ref.sizeOfArrays(); ++k) {
+                edge_array_per_face(face_id, j)[k] = parallel::rank() + face_number[face_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_array_per_face);
+
+        REQUIRE(is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(edge_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
+          synchronizer.synchronize(edge_array_per_face);
+          REQUIRE(is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
+        }
+      }
+
+      SECTION("synchonize NodeArrayPerEdge")
+      {
+        const auto edge_owner  = connectivity.edgeOwner();
+        const auto edge_number = connectivity.edgeNumber();
+
+        NodeArrayPerEdge<int> node_array_per_edge_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < node_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < node_array_per_edge_ref.sizeOfArrays(); ++k) {
+                node_array_per_edge_ref(edge_id, j)[k] = edge_owner[edge_id] + edge_number[edge_id] + j + 2 * k;
+              }
+            }
+          });
+
+        NodeArrayPerEdge<int> node_array_per_edge{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < node_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < node_array_per_edge_ref.sizeOfArrays(); ++k) {
+                node_array_per_edge(edge_id, j)[k] = parallel::rank() + edge_number[edge_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(node_array_per_edge);
+
+        REQUIRE(is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(node_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+          synchronizer.synchronize(node_array_per_edge);
+          REQUIRE(is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+        CellArrayPerNode<int> cell_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_node_ref.sizeOfArrays(); ++k) {
+                cell_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
+        cell_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < cell_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_node.sizeOfArrays(); ++k) {
+                  cell_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_node);
+
+        REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(cell_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+          synchronizer.synchronize(cell_array_per_node);
+          REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellArrayPerEdge<int> cell_array_per_edge_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_edge_ref.sizeOfArrays(); ++k) {
+                cell_array_per_edge_ref(edge_id, j)[k] =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
+        cell_array_per_edge.fill(-1);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            if (edge_is_owned[edge_id]) {
+              for (size_t j = 0; j < cell_array_per_edge.numberOfSubArrays(edge_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_edge.sizeOfArrays(); ++k) {
+                  cell_array_per_edge(edge_id, j)[k] =
+                    edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_edge);
+
+        REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& edge_owner = connectivity.edgeOwner();
+          reset_ghost_arrays(cell_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+          synchronizer.synchronize(cell_array_per_edge);
+          REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerFace")
+      {
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellArrayPerFace<int> cell_array_per_face_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_face_ref.sizeOfArrays(); ++k) {
+                cell_array_per_face_ref(face_id, j)[k] =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
+        cell_array_per_face.fill(-1);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            if (face_is_owned[face_id]) {
+              for (size_t j = 0; j < cell_array_per_face.numberOfSubArrays(face_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_face.sizeOfArrays(); ++k) {
+                  cell_array_per_face(face_id, j)[k] =
+                    face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_face);
+
+        REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& face_owner = connectivity.faceOwner();
+          reset_ghost_arrays(cell_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+          synchronizer.synchronize(cell_array_per_face);
+          REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+        }
+      }
+
+      SECTION("synchonize FaceArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto node_to_face_matrix = connectivity.nodeToFaceMatrix();
+
+        FaceArrayPerNode<int> face_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < face_array_per_node_ref.sizeOfArrays(); ++k) {
+                face_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        FaceArrayPerNode<int> face_array_per_node{connectivity, 3};
+        face_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < face_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < face_array_per_node.sizeOfArrays(); ++k) {
+                  face_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(face_array_per_node, face_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_array_per_node);
+
+        REQUIRE(is_same_item_array(face_array_per_node, face_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(face_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(face_array_per_node, face_array_per_node_ref));
+          synchronizer.synchronize(face_array_per_node);
+          REQUIRE(is_same_item_array(face_array_per_node, face_array_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize FaceArrayPerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto edge_to_face_matrix = connectivity.edgeToFaceMatrix();
+
+        FaceArrayPerEdge<int> face_array_per_edge_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < face_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < face_array_per_edge_ref.sizeOfArrays(); ++k) {
+                face_array_per_edge_ref(edge_id, j)[k] =
+                  edge_number[edge_id] + 100 * face_number[edge_to_face_matrix[edge_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        FaceArrayPerEdge<int> face_array_per_edge{connectivity, 3};
+        face_array_per_edge.fill(-1);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            if (edge_is_owned[edge_id]) {
+              for (size_t j = 0; j < face_array_per_edge.numberOfSubArrays(edge_id); ++j) {
+                for (size_t k = 0; k < face_array_per_edge.sizeOfArrays(); ++k) {
+                  face_array_per_edge(edge_id, j)[k] =
+                    edge_number[edge_id] + 100 * face_number[edge_to_face_matrix[edge_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(face_array_per_edge, face_array_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_array_per_edge);
 
-        SECTION("FaceArrayPerNode")
-        {
-          FaceArrayPerNode<int> face_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (node)");
+        REQUIRE(is_same_item_array(face_array_per_edge, face_array_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& edge_owner = connectivity.edgeOwner();
+          reset_ghost_arrays(face_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(face_array_per_edge, face_array_per_edge_ref));
+          synchronizer.synchronize(face_array_per_edge);
+          REQUIRE(is_same_item_array(face_array_per_edge, face_array_per_edge_ref));
         }
+      }
+
+      SECTION("synchonize EdgeArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto edge_number   = connectivity.edgeNumber();
+
+        const auto node_to_edge_matrix = connectivity.nodeToEdgeMatrix();
+
+        EdgeArrayPerNode<int> edge_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_node_ref.sizeOfArrays(); ++k) {
+                edge_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        EdgeArrayPerNode<int> edge_array_per_node{connectivity, 3};
+        edge_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < edge_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < edge_array_per_node.sizeOfArrays(); ++k) {
+                  edge_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
 
-        SECTION("FaceArrayPerEdge")
-        {
-          FaceArrayPerEdge<int> face_array_per_edge{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_array_per_edge),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (edge)");
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
         }
 
-        SECTION("EdgeArrayPerNode")
-        {
-          EdgeArrayPerNode<int> edge_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(edge_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (edge) to be of lower "
-                              "dimension than item (node)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_array_per_node);
+
+        REQUIRE(is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(edge_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+          synchronizer.synchronize(edge_array_per_node);
+          REQUIRE(is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
         }
       }
     }