diff --git a/CMakeLists.txt b/CMakeLists.txt
index 93eb3fa005eda92b4884d61cfa8963fc237b5354..560dd74c04bf5f76dee000a9a8883276ff99bdfd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -105,12 +105,6 @@ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
   set(PUGS_CXX_FLAGS "${PUGS_CXX_FLAGS} -Wsign-compare -Wunused -Wunused-member-function -Wunused-private-field")
 endif()
 
-# if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
-#   if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0.0")
-#     set(PUGS_STD_LINK_FLAGS "-lstdc++fs")
-#   endif()
-# endif()
-
 #------------------------------------------------------
 
 include (TestBigEndian)
@@ -178,7 +172,7 @@ else()
 endif()
 
 if (${PETSC_FOUND})
-  include_directories(SYSTEM ${PETSC_INCLUDE_DIRS})
+  include_directories(SYSTEM "${PETSC_INCLUDE_DIRS}")
 else()
   if (PUGS_ENABLE_PETSC MATCHES "^ON$")
     message(FATAL_ERROR "Could not find PETSc!")
@@ -206,7 +200,7 @@ else()
 endif()
 
 if (${SLEPC_FOUND})
-  include_directories(SYSTEM ${SLEPC_INCLUDE_DIRS})
+  include_directories(SYSTEM "${SLEPC_INCLUDE_DIRS}")
 else()
   if (PUGS_ENABLE_SLEPC MATCHES "^ON$")
     message(FATAL_ERROR "Could not find SLEPc!")
@@ -217,13 +211,39 @@ endif()
 
 if (${MPI_FOUND})
   set(PUGS_CXX_FLAGS "${PUGS_CXX_FLAGS} ${MPI_CXX_COMPILER_FLAGS}")
-  include_directories(SYSTEM ${MPI_CXX_INCLUDE_DIRS})
+  include_directories(SYSTEM "${MPI_CXX_INCLUDE_DIRS}")
 elseif(PUGS_ENABLE_MPI STREQUAL "ON")
   message(FATAL_ERROR "Could not find MPI library while requested")
 endif()
 
 set(PUGS_HAS_MPI ${MPI_FOUND})
 
+#------------------------------------------------------
+# search for HDF5
+
+set(PUGS_ENABLE_HDF5 AUTO CACHE STRING
+  "Choose one of: AUTO ON OFF")
+
+if (PUGS_ENABLE_HDF5 MATCHES "^(AUTO|ON)$")
+  # May be risky. (make to show pugs build options)
+  find_package(HDF5)
+  if (HDF5_FOUND)
+    # HighFive
+    set(HIGHFIVE_USE_BOOST  OFF)   # no Boost
+    set(HIGHFIVE_BUILD_DOCS OFF)   # no doc
+    set(HIGHFIVE_UNIT_TESTS OFF)   # no unit tests
+    set(HIGHFIVE_UNIT_TESTS OFF)   # no unit tests
+    set(HIGHFIVE_EXAMPLES OFF)     # no examples
+    set(HIGHFIVE_PARALLEL_HDF5 ON) # activate parallel HDF5
+    add_subdirectory(${PUGS_SOURCE_DIR}/packages/HighFive/)
+    set(HIGHFIVE_TARGET HighFive)
+  endif()
+  set(PUGS_HAS_HDF5 ${HDF5_FOUND})
+else()
+  unset(HIGHFIVE_TARGET)
+  unset(PUGS_HAS_HDF5)
+endif()
+
 #------------------------------------------------------
 # search for clang-format
 
@@ -283,11 +303,11 @@ endif()
 add_subdirectory("${PUGS_SOURCE_DIR}/packages/kokkos")
 
 # set as SYSTEM for static analysis
-include_directories(SYSTEM ${KOKKOS_SOURCE_DIR}/core/src)
-include_directories(SYSTEM ${KOKKOS_SOURCE_DIR}/containers/src)
-include_directories(SYSTEM ${KOKKOS_SOURCE_DIR}/tpls/desul/include)
-include_directories(SYSTEM ${KOKKOS_BINARY_DIR}/core/src)
-include_directories(SYSTEM ${KOKKOS_BINARY_DIR})
+include_directories(SYSTEM "${KOKKOS_SOURCE_DIR}/core/src")
+include_directories(SYSTEM "${KOKKOS_SOURCE_DIR}/containers/src")
+include_directories(SYSTEM "${KOKKOS_SOURCE_DIR}/tpls/desul/include")
+include_directories(SYSTEM "${KOKKOS_BINARY_DIR}/core/src")
+include_directories(SYSTEM "${KOKKOS_BINARY_DIR}")
 
 set(PUGS_BUILD_KOKKOS_DEVICES "")
 if(${Kokkos_ENABLE_PTHREAD})
@@ -339,18 +359,18 @@ endif()
 #------------------------------------------------------
 
 # Rang (colors? Useless thus necessary!)
-include_directories(${PUGS_SOURCE_DIR}/packages/rang/include)
+include_directories(SYSTEM "${PUGS_SOURCE_DIR}/packages/rang/include")
 
 # CLI11
-include_directories(${PUGS_SOURCE_DIR}/packages/CLI11/include)
+include_directories(SYSTEM "${PUGS_SOURCE_DIR}/packages/CLI11/include")
 
 # PEGTL
-include_directories(SYSTEM ${PUGS_SOURCE_DIR}/packages/PEGTL/include/tao)
+include_directories(SYSTEM "${PUGS_SOURCE_DIR}/packages/PEGTL/include/tao")
 
 # Pugs src
-add_subdirectory(${PUGS_SOURCE_DIR}/src)
-include_directories(${PUGS_SOURCE_DIR}/src)
-include_directories(${PUGS_BINARY_DIR}/src)
+add_subdirectory("${PUGS_SOURCE_DIR}/src")
+include_directories("${PUGS_SOURCE_DIR}/src")
+include_directories("${PUGS_BINARY_DIR}/src")
 
 # Pugs tests
 set(CATCH_MODULE_PATH "${PUGS_SOURCE_DIR}/packages/Catch2")
@@ -577,6 +597,7 @@ target_link_libraries(
   PugsMesh
   PugsAlgebra
   PugsAnalysis
+  PugsDev
   PugsUtils
   PugsLanguage
   PugsLanguageAST
@@ -596,6 +617,7 @@ target_link_libraries(
   ${KOKKOS_CXX_FLAGS}
   ${OPENMP_LINK_FLAGS}
   ${PUGS_STD_LINK_FLAGS}
+  ${HIGHFIVE_TARGET}
   stdc++fs
   )
 
@@ -613,6 +635,7 @@ install(TARGETS
   PugsMesh
   PugsAlgebra
   PugsAnalysis
+  PugsDev
   PugsUtils
   PugsLanguage
   PugsLanguageAST
@@ -674,6 +697,16 @@ else()
   endif()
 endif()
 
+if (HDF5_FOUND)
+  message(" HDF5: ${HDF5_VERSION} parallel: ${HDF5_IS_PARALLEL}")
+else()
+  if (PUGS_ENABLE_HDF5 MATCHES "^(AUTO|ON)$")
+    message(" HDF5: not found!")
+  else()
+      message(" HDF5: explicitly deactivated!")
+  endif()
+endif()
+
 message("----------- utilities ----------")
 
 if(CLANG_FORMAT)
diff --git a/cmake/PugsDoc.cmake b/cmake/PugsDoc.cmake
index b067a5cc828ec0c245e8d51c0bda46a96dcce702..2be770b3a3163c6ea490413b0f73a1f4f828c82f 100644
--- a/cmake/PugsDoc.cmake
+++ b/cmake/PugsDoc.cmake
@@ -97,7 +97,19 @@ if (EMACS AND GNUPLOT_FOUND AND GMSH)
       COMMENT "Building user documentation in doc/userdoc.pdf"
       VERBATIM)
 
-    add_custom_target(userdoc-pdf DEPENDS pugsdoc-dir "${PUGS_BINARY_DIR}/doc/userdoc.pdf" )
+    configure_file("${PUGS_SOURCE_DIR}/doc/build-userdoc-pdf.sh.in"
+      "${PUGS_BINARY_DIR}/doc/build-userdoc-pdf.sh"
+      FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE
+      @ONLY)
+
+    set_source_files_properties(
+      ${PUGS_BINARY_DIR}/build-pdf.sh2
+      PROPERTIES
+      GENERATED TRUE
+      HEADER_FILE_ONLY TRUE
+    )
+
+    add_custom_target(userdoc-pdf DEPENDS pugsdoc-dir "${PUGS_BINARY_DIR}/doc/userdoc.pdf" "${PUGS_BINARY_DIR}/doc/build-userdoc-pdf.sh")
 
     add_dependencies(userdoc userdoc-pdf)
 
diff --git a/doc/build-userdoc-pdf.sh.in b/doc/build-userdoc-pdf.sh.in
new file mode 100644
index 0000000000000000000000000000000000000000..9445e7e75cbb62486a4aa3647fa41a171adb7087
--- /dev/null
+++ b/doc/build-userdoc-pdf.sh.in
@@ -0,0 +1,5 @@
+#! /usr/bin/env bash
+
+@PDFLATEX_COMPILER@ -shell-escape -interaction nonstopmode userdoc
+@PDFLATEX_COMPILER@ -shell-escape -interaction nonstopmode userdoc
+@PDFLATEX_COMPILER@ -shell-escape -interaction nonstopmode userdoc
diff --git a/doc/lisp/build-doc-config.el b/doc/lisp/build-doc-config.el
index 1da3a5c9fecf3540dfd701380a340722379642cf..6f06400a6dc1176370550342ea7bfb04ac976c11 100644
--- a/doc/lisp/build-doc-config.el
+++ b/doc/lisp/build-doc-config.el
@@ -64,9 +64,7 @@
 (setq org-latex-listings 'minted
       org-latex-packages-alist '(("" "minted"))
       org-latex-pdf-process
-      '("cd ${PUGS_BINARY_DIR}/doc; pdflatex -shell-escape -interaction nonstopmode -output-directory %o %f"
-        "cd ${PUGS_BINARY_DIR}/doc; pdflatex -shell-escape -interaction nonstopmode -output-directory %o %f"
-        "cd ${PUGS_BINARY_DIR}/doc; pdflatex -shell-escape -interaction nonstopmode -output-directory %o %f"))
+      '("cd ${PUGS_BINARY_DIR}/doc; ./build-userdoc-pdf.sh"))
 
 (setq python-indent-guess-indent-offset-verbose nil)
 
diff --git a/doc/lisp/share/ob-pugs-error.el b/doc/lisp/share/ob-pugs-error.el
index 1e4680cbcc71729be805622081b37dfad6176d9c..e2228f9f4cc3ca0a0fcb65d32d8c2f8780419229 100644
--- a/doc/lisp/share/ob-pugs-error.el
+++ b/doc/lisp/share/ob-pugs-error.el
@@ -131,7 +131,7 @@
     (with-temp-file in-file
       (insert body))
     (org-babel-eval
-		(format "${PUGS} --no-preamble --no-color %s 2>&1 | sed 's@/.*\.pgs:@test.pgs:@'"
+		(format "${PUGS} --no-exec-stat --no-preamble --no-color --threads=1 %s 2>&1 | sed 's@/.*\.pgs:@test.pgs:@'"
 			(org-babel-process-file-name in-file))
 		"")))
 
diff --git a/doc/lisp/share/ob-pugs.el b/doc/lisp/share/ob-pugs.el
index 6c3d72956d519bea339b63cb883c57f9bf1cd000..fb346b80b68f95a10b214192a99c8a1e1d8f4a48 100644
--- a/doc/lisp/share/ob-pugs.el
+++ b/doc/lisp/share/ob-pugs.el
@@ -130,7 +130,7 @@
     (with-temp-file in-file
       (insert body))
     (org-babel-eval
-		(format "${PUGS} --no-preamble --no-color %s"
+		(format "${PUGS} --no-exec-stat --no-preamble --no-color --threads=1 %s"
 			(org-babel-process-file-name in-file))
 		"")))
 
diff --git a/doc/userdoc.org b/doc/userdoc.org
index 490db865223c800e05ee0ee31b52bd86486f8872..b50768b072d372e1d462508343aaed9e2acff75b 100644
--- a/doc/userdoc.org
+++ b/doc/userdoc.org
@@ -391,8 +391,8 @@ answer a specific need. It must not be done /because it is possible to
 do it/!
 
 #+begin_verse
-When designing a language, the difficulty is not to offer new functionalities,\\
-it is generally to decide not to offer them.\\
+When designing a language, the difficulty is not to offer new functionalities,
+it is generally to decide not to offer them.
 --- Bjarne Stroustrup, C++ conference 2021.
 #+end_verse
 
@@ -1390,7 +1390,7 @@ they follow a few rules.
   When comparing a boolean value (type ~B~) with another scalar value
   type (~N~, ~Z~ or ~R~), the value ~true~ is interpreted as $1$ and the value
   ~false~ as $0$.
-\\
+  \\
   For vector and matrix basic types, the only allowed operators are ~==~
   and ~!=~.
   #+begin_src latex :results drawer :exports results
@@ -1406,7 +1406,7 @@ they follow a few rules.
       \right.
     \end{equation*}
   #+end_src
-\\
+
   This is also the case for ~string~ values: only allowed operators are
   ~==~ and ~!=~.
   #+begin_src latex :results drawer :exports results
@@ -2667,6 +2667,58 @@ Gets the abstract syntax tree associated to a user function into a
 Saves the AST of the script into the file (whose name is given as
 argument) using the dot format.
 
+***** Parallel checker utilities
+
+Developing parallel applications, it is generally useful to be able to
+check that results are reproducible bit-to-bit, whichever the number
+of processes or threads are. Such a mechanism is provided by ~pugs~.
+
+It works as follows. One first runs the code to build a reference (by
+now the reference cannot be build using ~MPI~ parallelism), then a
+second run will compare the obtained results and reference ones. If
+ghost value differs, a warning is printed, stating that the data is
+not synchronized (this may not be a mistake) and if non-ghost value
+differs the execution stops indicating an error.
+
+Parallel checking points can be placed directly in the source code
+(see developer documentation), but for convenience, one can also check
+parallelism directly in the scripting language.
+
+The provided functions are
+- ~parallel_check: Vh*string -> void~
+- ~parallel_check: item_array*string -> void~
+- ~parallel_check: item_value*string -> void~
+- ~parallel_check: sub_item_array*string -> void~
+- ~parallel_check: sub_item_value*string -> void~
+
+They allow to check parallelism for ~Vh~ functions, ~item_value~,
+~item_array~, ~sub_item_value~ or ~sub_item_arrays~ variables. These types
+are discussed below when describing the ~mesh~ and ~schemes~ module (see
+sections [[mesh]] and [[scheme]]). The second argument is just a string that is just
+used as a tag to ease the reading of the output.
+
+To create the reference, one launches the code as
+#+BEGIN_SRC shell :exports source
+./pugs example.pgs --parallel-checker-file="/tmp/my-ref.h5"
+#+END_SRC
+Observe that the option ~--parallel-checker-file="/tmp/my-ref.h5"~ is
+not mandatory. If not specified, the reference is created in the
+current directory in the file ~parallel_checker.h5~. Also observe that
+this command runs the code in sequential (in the sense that there is
+no message passing). In that case parallel checker runs in write mode
+automatically. To force the read mode (for comparison), one can use
+the ~--parallel-checker-mode=read~ option
+#+BEGIN_SRC shell :exports source
+./pugs example.pgs \
+  --parallel-checker-file="/tmp/my-ref.h5" \
+  --parallel-checker-mode=read
+#+END_SRC
+Running the code in parallel automatically triggers the
+read/comparison mode.
+#+BEGIN_SRC shell :exports source
+mpirun -n 3 pugs example.pgs --parallel-checker-file="/tmp/my-ref.h5"
+#+END_SRC
+
 *** The ~math~ module
 
 The ~math~ module is a small utility module that provides a set of
@@ -2765,7 +2817,7 @@ mathematically ${x^y}^z = x^{(y^z)}$, many softwares treat it (by mistake)
 as ${(x^y)}^z$. Thus, using the ~pow~ function avoids any confusion.
 #+END_note
 
-*** The ~mesh~ module
+*** The ~mesh~ module<<mesh>>
 
 This is an important module. It provides mesh utility tools.
 #+NAME: get-module-info-mesh
@@ -3136,7 +3188,6 @@ available in parallel
 
 ***** Item types
 
-\\
 The following functions are used to designate a specific ~item_type~
 - ~cell: void -> item_type~
 - ~face: void -> item_type~
diff --git a/packages/HighFive/.clang-format b/packages/HighFive/.clang-format
new file mode 100644
index 0000000000000000000000000000000000000000..8a59ff4a312d335b35607d9fb972f5ca553772f9
--- /dev/null
+++ b/packages/HighFive/.clang-format
@@ -0,0 +1,93 @@
+---
+AccessModifierOffset: -2
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: false
+AlignConsecutiveMacros: true
+AlignEscapedNewlinesLeft: true
+AlignOperands: true
+AlignTrailingComments: true
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortBlocksOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: Empty
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: true
+BasedOnStyle: WebKit
+BinPackArguments: false
+BinPackParameters: false
+BraceWrapping:
+  AfterClass: false
+  AfterControlStatement: false
+  AfterEnum: false
+  AfterExternBlock: false
+  AfterFunction: false
+  AfterNamespace: false
+  AfterStruct: false
+  AfterUnion: false
+  BeforeCatch: false
+  BeforeElse: false
+BreakBeforeBraces: Custom
+BreakBeforeBinaryOperators: false
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: true
+BreakStringLiterals: true
+ColumnLimit: 100
+CommentPragmas:  '^\\.+'
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: true
+DerivePointerAlignment: false
+DerivePointerBinding: true
+ExperimentalAutoDetectBinPacking: false
+FixNamespaceComments: true
+ForEachMacros:   [ foreach, Q_FOREACH, BOOST_FOREACH ]
+IncludeCategories:
+  - Regex:           '^"(llvm|llvm-c|clang|clang-c)/'
+    Priority:        2
+  - Regex:           '^(<|"(gtest|isl|json)/)'
+    Priority:        3
+  - Regex:           '.*'
+    Priority:        1
+IncludeIsMainRegex: '$'
+IndentCaseLabels: false
+IndentWidth:     4
+IndentWrappedFunctionNames: false
+KeepEmptyLinesAtTheStartOfBlocks: false
+Language: Cpp
+MaxEmptyLinesToKeep: 2
+NamespaceIndentation: None
+PenaltyBreakAssignment: 40
+PenaltyBreakBeforeFirstCallParameter: 100
+PenaltyBreakComment: 60
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakString: 1000
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 200
+PointerAlignment: Left
+PointerBindsToType: true
+ReflowComments:  true
+SortIncludes: false
+SpaceAfterCStyleCast: true
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeCpp11BracedList: false
+SpaceBeforeCtorInitializerColon: false
+SpaceBeforeInheritanceColon: false
+SpaceBeforeParens: ControlStatements
+SpaceBeforeRangeBasedForLoopColon: false
+SpaceInEmptyBlock: false
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 2
+SpacesInAngles: false # '< ' style
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false # '(' style
+SpacesInSquareBrackets: false
+Standard: c++14
+TabWidth: 4
+UseTab: Never
+...
diff --git a/packages/HighFive/.git-blame-ignore-revs b/packages/HighFive/.git-blame-ignore-revs
new file mode 100644
index 0000000000000000000000000000000000000000..a7a0c41e2179015394175fe081caee3e7316c7ef
--- /dev/null
+++ b/packages/HighFive/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# clang-format the whole repository
+6fc243144b6a7802bf29d3fbb2c028051a71ca28
diff --git a/packages/HighFive/.github/ISSUE_TEMPLATE/bug_report.md b/packages/HighFive/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000000000000000000000000000000000..2558a56c406bc2f7bd2bebb6f71a7b8a0f156049
--- /dev/null
+++ b/packages/HighFive/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,31 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Stacktrace**
+If applicable, add a stacktrace and error messages to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. ubuntu 20.10, macos 10.15]
+ - Version [e.g. master branch]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/packages/HighFive/.github/ISSUE_TEMPLATE/config.yml b/packages/HighFive/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1d9540b00b224d6c07e2a20159108348107c69dd
--- /dev/null
+++ b/packages/HighFive/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: true
+contact_links:
+  - name: HighFive Community Support
+    url: https://github.com/BlueBrain/HighFive/discussions
+    about: Please ask and answer questions here.
diff --git a/packages/HighFive/.github/ISSUE_TEMPLATE/feature_request.md b/packages/HighFive/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000000000000000000000000000000000..4ead48053da37e38f6d9dce4a07d6824968ee963
--- /dev/null
+++ b/packages/HighFive/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context about the feature request here.
diff --git a/packages/HighFive/.github/build.sh b/packages/HighFive/.github/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4cdad7e3ffdeb5c21a6ef7edb8173c2398f5158d
--- /dev/null
+++ b/packages/HighFive/.github/build.sh
@@ -0,0 +1,11 @@
+# Build env
+
+[ "$CC" ] && $CC --version
+cmake --version
+set -x
+export HIGHFIVE_BUILD=$GITHUB_WORKSPACE/build
+cmake -B $HIGHFIVE_BUILD -S $GITHUB_WORKSPACE \
+  -DCMAKE_BUILD_TYPE=$BUILD_TYPE \
+  -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR \
+  "${CMAKE_OPTIONS[@]}"
+cmake --build $HIGHFIVE_BUILD --config $BUILD_TYPE --parallel 2 --verbose
diff --git a/packages/HighFive/.github/pull_request_template.md b/packages/HighFive/.github/pull_request_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..58abc6c608ca3452deb51b29f4962e33886c1a0c
--- /dev/null
+++ b/packages/HighFive/.github/pull_request_template.md
@@ -0,0 +1,25 @@
+**Description**
+
+Please include a summary of the change and which issue is fixed or which feature is added.
+
+- [ ] Issue 1 fixed
+- [ ] Issue 2 fixed
+- [ ] Feature 1 added
+- [ ] Feature 2 added
+
+Fixes #(issue)
+
+**How to test this?**
+
+Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce if there is no integration test added with this PR. Please also list any relevant details for your test configuration
+
+```bash
+cmake ..
+make -j8
+make test
+```
+
+**Test System**
+ - OS: [e.g. Ubuntu 20.04]
+ - Compiler: [e.g. clang 12.0.0]
+ - Dependency versions: [e.g. hdf5 1.12]
diff --git a/packages/HighFive/.github/run_examples.sh b/packages/HighFive/.github/run_examples.sh
new file mode 100755
index 0000000000000000000000000000000000000000..77861f38846c279d551ec355b481892b34d4f89a
--- /dev/null
+++ b/packages/HighFive/.github/run_examples.sh
@@ -0,0 +1,20 @@
+#! /usr/bin/env bash
+
+set -e
+
+if [[ $# -eq 0 ]]
+then
+  examples_dir="."
+elif [[ $# -eq 1 ]]
+then
+  examples_dir="$1"
+else
+  echo "Usage: $0 [EXAMPLES_DIR]"
+  exit -1
+fi
+
+for f in "${examples_dir}"/*_bin
+do
+  echo "-- ${f}"
+  "${f}"
+done
diff --git a/packages/HighFive/.github/workflows/check_doxygen_awesome_version.yml b/packages/HighFive/.github/workflows/check_doxygen_awesome_version.yml
new file mode 100644
index 0000000000000000000000000000000000000000..233577ef862b841eb07cbd38c61f4d25a29fe1c8
--- /dev/null
+++ b/packages/HighFive/.github/workflows/check_doxygen_awesome_version.yml
@@ -0,0 +1,49 @@
+name: Auto-update doxygen-awesome
+
+on:
+  workflow_dispatch:
+  schedule:
+  - cron: 0 2 * * 1
+
+jobs:
+  check-for-updates:
+    runs-on: ubuntu-latest
+    env:
+      GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+    steps:
+      - uses: actions/checkout@v2
+      - name: Open PR if doxygen-awesome outdated.
+        run: |
+
+          VERSION=$(doc/doxygen-awesome-css/update_doxygen_awesome.sh "$(mktemp -d)")
+          BRANCH=update-doxygen-awesome-${VERSION}
+          COMMIT_MESSAGE="Update doxygen-awesome to ${VERSION}"
+
+          # NOTE: In a later runs of CI we will search for PR with this exact
+          #       title. Only if no such PR exists will the script create a
+          #       new PR.
+          PR_TITLE="[docs] Update doxygen-awesome to ${VERSION}"
+
+          if [[ -z "$(git status --porcelain)" ]]
+          then
+              echo "No differences detected: doxygen-awesome is up-to-date."
+              exit 0
+          fi
+
+          if [[ -z "$(gh pr list --state all --search "${PR_TITLE}")" ]]
+          then
+
+              git checkout -b $BRANCH
+              git config user.name github-actions
+              git config user.email github-actions@github.com
+              git commit -a -m "${COMMIT_MESSAGE}"
+
+              git push -u origin ${BRANCH}
+              gh pr create \
+                  --title "${PR_TITLE}" \
+                  --body "This PR was generated by a Github Actions workflow."
+
+          else
+              echo "Old PR detected: didn't create a new one."
+          fi
diff --git a/packages/HighFive/.github/workflows/ci.yml b/packages/HighFive/.github/workflows/ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e7f5fca10b9bf26ceb00276ec3dc749e11b0b94c
--- /dev/null
+++ b/packages/HighFive/.github/workflows/ci.yml
@@ -0,0 +1,351 @@
+name: HighFive_CI
+
+concurrency:
+  group: ${{ github.workflow }}#${{ github.ref }}
+  cancel-in-progress: true
+
+on:
+  push:
+    branches:
+      - ci_test
+      - release/**
+  pull_request:
+    branches:
+      - master
+      - release/**
+    paths-ignore:
+      - '**.md'
+      - '**.rst'
+      - 'doc/**'
+
+env:
+  HOMEBREW_NO_AUTO_UPDATE: 1  # for reproducibility, dont autoupdate
+  BUILD_TYPE: RelWithDebInfo
+  INSTALL_DIR: install
+
+jobs:
+
+  # Job testing compiling on several Ubuntu systems + MPI
+  # =========================================================
+  #
+  # For 20.04:  bare and activate Boost, OpenCV
+  # For latest: activate Boost, Eigen, OpenCV, with Ninja
+  #
+  # XTensor tests are run for conda/mamba and MacOS
+  Linux_MPI:
+    runs-on: ${{matrix.config.os}}
+    name: Linux_MPI (${{toJson(matrix.config)}})
+    strategy:
+      matrix:
+        include:
+          - config:
+              os: ubuntu-20.04
+              pkgs: ''
+              flags: '-DHIGHFIVE_USE_BOOST:Bool=OFF'
+          - config:
+              os: ubuntu-20.04
+              pkgs: 'libboost-all-dev libopencv-dev'
+              flags: '-DHIGHFIVE_USE_OPENCV:Bool=ON -GNinja'
+          - config:
+              os: ubuntu-latest
+              pkgs: 'libboost-all-dev libeigen3-dev libopencv-dev'
+              flags: '-DHIGHFIVE_USE_EIGEN:Bool=ON -DHIGHFIVE_USE_OPENCV:Bool=ON -GNinja'
+          - config:
+              os: ubuntu-20.04
+              pkgs: 'libboost-all-dev'
+              flags: '-DCMAKE_CXX_STANDARD=17'
+          - config:
+              os: ubuntu-22.04
+              flags: '-DHIGHFIVE_USE_BOOST=Off -DCMAKE_CXX_STANDARD=20'
+
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+
+    - name: "Setup MPI"
+      uses: mpi4py/setup-mpi@v1
+      with:
+        mpi: openmpi
+
+    - name: "Install libraries"
+      run: |
+        sudo apt-get -qq update
+        sudo apt-get -qq install libhdf5-openmpi-dev libsz2 ninja-build ${{ matrix.config.pkgs }}
+
+    - name: Build
+      run: |
+        CMAKE_OPTIONS=(-DHIGHFIVE_PARALLEL_HDF5:BOOL=ON ${{ matrix.config.flags }})
+        source $GITHUB_WORKSPACE/.github/build.sh
+
+    - name: Test
+      working-directory: ${{github.workspace}}/build
+      run: ctest -j2 --output-on-failure -C $BUILD_TYPE
+
+
+  # Job testing several versions of hdf5
+  # ===================================================
+  Linux_HDF5_Versions:
+    runs-on: ubuntu-latest
+    strategy:
+      matrix:
+        hdf5_version : [ hdf5-1_8_23, hdf5-1_10_11, hdf5-1_12_2, hdf5-1_14_3 ]
+
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+    - name: "Install libraries"
+      run: |
+        sudo apt-get -qq update
+        sudo apt-get -qq install ninja-build libsz2 zlib1g-dev libboost-all-dev
+
+    - name: Build HDF5
+      run: |
+        wget https://github.com/HDFGroup/hdf5/archive/refs/tags/${{ matrix.hdf5_version }}.tar.gz --output-document hdf5.tar.gz
+        tar xf hdf5.tar.gz
+        mkdir -p hdf5-${{ matrix.hdf5_version }}/BUILD && cd hdf5-${{ matrix.hdf5_version }}/BUILD
+        cmake .. -DCMAKE_BUILD_TYPE=Release -GNinja -DCMAKE_INSTALL_PREFIX=$HOME/${{ matrix.hdf5_version }} -DHDF5_ENABLE_Z_LIB_SUPPORT=ON -DUSE_LIBAEC=ON -DHDF5_BUILD_EXAMPLES=OFF -DBUILD_STATIC_LIBS=OFF -DBUILD_TESTING=OFF
+        ninja && ninja install
+
+    - name: Build
+      run: |
+        CMAKE_OPTIONS=(
+          -GNinja
+          -DHDF5_ROOT=$HOME/${{ matrix.hdf5_version }}
+        )
+        source $GITHUB_WORKSPACE/.github/build.sh
+
+    - name: Test
+      working-directory: ${{github.workspace}}/build
+      run: ctest -j2 --output-on-failure -C $BUILD_TYPE
+
+
+    - name: Examples
+      working-directory: ${{github.workspace}}/build/src/examples
+      run: $GITHUB_WORKSPACE/.github/run_examples.sh
+
+  # Job testing several compilers on a stable Linux
+  # ====================================================
+  Linux_Compilers:
+    runs-on: ubuntu-20.04
+    strategy:
+      matrix:
+        env: [
+          {CC: clang-10, CXX: clang++-10},
+          {CC: clang-12, CXX: clang++-12},
+          {CC: gcc-9, CXX: g++-9},
+          {CC: gcc-10, CXX: g++-10},
+        ]
+
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+
+    - name: "Install libraries"
+      run: |
+        sudo apt-get -qq update
+        sudo apt-get -qq install libboost-all-dev libhdf5-dev libsz2 ninja-build
+
+    - name: Build
+      env: ${{matrix.env}}
+      run: |
+        CMAKE_OPTIONS=(-GNinja)
+        source $GITHUB_WORKSPACE/.github/build.sh
+
+    - name: Test
+      working-directory: ${{github.workspace}}/build
+      run: ctest -j2 --output-on-failure -C $BUILD_TYPE
+
+    - name: Examples
+      working-directory: ${{github.workspace}}/build/src/examples
+      run: $GITHUB_WORKSPACE/.github/run_examples.sh
+
+  # Job running unit-test with sanitizers
+  # =====================================
+  Linux_Sanitizers:
+    runs-on: ubuntu-20.04
+    strategy:
+      matrix:
+        env: [
+          {CC: clang-12, CXX: clang++-12, HIGHFIVE_SANITIZER: address},
+          {CC: clang-12, CXX: clang++-12, HIGHFIVE_SANITIZER: undefined},
+          {CC: gcc-10, CXX: g++-10, HIGHFIVE_GLIBCXX_ASSERTIONS: On},
+        ]
+
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+
+    - name: "Install libraries"
+      run: |
+        sudo apt-get -qq update
+        sudo apt-get -qq install libboost-all-dev libeigen3-dev libhdf5-dev libsz2 ninja-build
+
+    - name: Build
+      env: ${{matrix.env}}
+      run: |
+        CMAKE_OPTIONS=(
+          -GNinja
+          -DHIGHFIVE_USE_BOOST:BOOL=ON
+          -DHIGHFIVE_USE_EIGEN:BOOL=ON
+          -DHIGHFIVE_BUILD_DOCS:BOOL=FALSE
+          -DHIGHFIVE_GLIBCXX_ASSERTIONS=${HIGHFIVE_GLIBCXX_ASSERTIONS:-OFF}
+          -DHIGHFIVE_SANITIZER=${HIGHFIVE_SANITIZER:-OFF}
+        )
+        source $GITHUB_WORKSPACE/.github/build.sh
+
+    - name: Test
+      working-directory: ${{github.workspace}}/build
+      run: ctest -j2 --output-on-failure -C $BUILD_TYPE
+
+    - name: Examples
+      working-directory: ${{github.workspace}}/build/src/examples
+      run: $GITHUB_WORKSPACE/.github/run_examples.sh
+
+
+  # Job to check using HighFive from other CMake projects
+  # =====================================================
+  CMake_Project:
+    runs-on: ubuntu-20.04
+    strategy:
+      matrix:
+        parallelism: [ serial, parallel ]
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+
+    - name: "Update Ubuntu"
+      run: |
+        sudo apt-get -qq update
+
+    - name: "Install common libraries"
+      run: |
+        sudo apt-get -qq install libboost-all-dev libsz2 ninja-build
+
+    - name: "Install serial HDF5"
+      if: matrix.parallelism == 'serial'
+      run: |
+        sudo apt-get -qq install libhdf5-dev
+
+
+    - name: "Install parallel HDF5"
+      if: matrix.parallelism == 'parallel'
+      run: |
+        sudo apt-get -qq install libhdf5-openmpi-dev
+
+    - name: "CMake Project Integration"
+      run: bash tests/test_project_integration.sh
+
+
+  # Job checking the benchmarks work
+  # ================================
+  Benchmarks:
+    runs-on: ubuntu-20.04
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+
+    - name: "Install libraries"
+      run: |
+        sudo apt -y update
+        sudo apt -y  install --no-install-recommends  libhdf5-dev pkg-config
+
+    - name: "Build benchmarks"
+      working-directory: ${{github.workspace}}/src/benchmarks
+      run: make
+
+    - run: time ${{github.workspace}}/src/benchmarks/highfive_bench
+
+
+  # Job testing in OSX
+  # ==================
+  OSX:
+    runs-on: ${{matrix.os}}
+    strategy:
+      matrix:
+        os: [ "macOS-12" ]
+        cxxstd: ["14", "17", "20"]
+
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+
+    - name: "Install libraries (OSX)"
+      run: brew install boost eigen hdf5 ninja xtensor
+
+    - name: Build
+      run: |
+        CMAKE_OPTIONS=(
+          -GNinja
+          -DHIGHFIVE_USE_BOOST:BOOL=ON
+          -DHIGHFIVE_USE_EIGEN:BOOL=ON
+          -DHIGHFIVE_USE_XTENSOR:BOOL=ON
+          -DHIGHFIVE_BUILD_DOCS:BOOL=FALSE
+          -DHIGHFIVE_TEST_SINGLE_INCLUDES=ON
+          -DCMAKE_CXX_FLAGS="-coverage -O0"
+          -DCMAKE_CXX_STANDARD=${{matrix.cxxstd}}
+        )
+        source $GITHUB_WORKSPACE/.github/build.sh
+
+    - name: Test
+      working-directory: ${{github.workspace}}/build
+      run: ctest -j2 --output-on-failure -C $BUILD_TYPE
+
+    - name: Examples
+      working-directory: ${{github.workspace}}/build/src/examples
+      run: $GITHUB_WORKSPACE/.github/run_examples.sh
+
+
+  # Job testing in Windows
+  # ======================
+  Windows:
+    runs-on:  ${{matrix.os}}
+    strategy:
+      matrix:
+        os: [ "windows-2022"]
+        vs-toolset: [ "v141", "v143" ]
+        cxxstd: ["14", "17", "20"]
+
+        include:
+          - os: "windows-2019"
+            vs-toolset: "v142"
+            cxxstd: "14"
+
+          - os: "windows-2019"
+            vs-toolset: "v142"
+            cxxstd: "17"
+
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+
+    - uses: mamba-org/setup-micromamba@v1
+      with:
+        environment-file: doc/environment.yaml
+        environment-name: win-test
+
+    - name: Build
+      shell: bash -l {0}
+      run: |
+        CMAKE_OPTIONS=(
+          -T ${{matrix.vs-toolset}}
+          -DCMAKE_CXX_STANDARD=${{matrix.cxxstd}}
+          -DHIGHFIVE_UNIT_TESTS=ON
+          -DHIGHFIVE_USE_BOOST:BOOL=ON
+          -DHIGHFIVE_USE_EIGEN:BOOL=ON
+          -DHIGHFIVE_USE_XTENSOR:BOOL=ON
+          -DHIGHFIVE_TEST_SINGLE_INCLUDES=ON
+        )
+        source $GITHUB_WORKSPACE/.github/build.sh
+
+    - name: Test
+      working-directory: ${{github.workspace}}/build
+      shell: bash -l {0}
+      run: ctest -j2 --output-on-failure -C $BUILD_TYPE
diff --git a/packages/HighFive/.github/workflows/clang_format.yml b/packages/HighFive/.github/workflows/clang_format.yml
new file mode 100644
index 0000000000000000000000000000000000000000..56f2fd8d53b78f0d709ae0ccc937207f1ba57083
--- /dev/null
+++ b/packages/HighFive/.github/workflows/clang_format.yml
@@ -0,0 +1,39 @@
+name: ClangFormat
+
+concurrency:
+  group: ${{ github.workflow }}#${{ github.ref }}
+  cancel-in-progress: true
+
+on:
+  pull_request:
+    branches:
+      - master
+    paths-ignore:
+      - '**.md'
+      - '**.rst'
+      - 'doc/**'
+
+jobs:
+  Code_Format:
+    runs-on: ubuntu-latest
+
+    steps:
+    - uses: actions/checkout@v3
+
+    - name: Run clang-format
+      run: |
+          clang-format --version
+          for i in $(git ls-files | grep ".[ch]pp$");
+          do
+            clang-format -i "$i" > /dev/null 2>&1;
+          done
+          modified_files=$(git diff --name-only)
+          if [ -n "$modified_files" ];
+          then
+              echo "Some files are not well formatted:"
+              echo $modified_files
+              echo ""
+              echo "The diff is:"
+              git diff
+              exit 1
+          fi
diff --git a/packages/HighFive/.github/workflows/coverage.yml b/packages/HighFive/.github/workflows/coverage.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b3f4a212bcc442cef74ade505681ff3a1e61ac28
--- /dev/null
+++ b/packages/HighFive/.github/workflows/coverage.yml
@@ -0,0 +1,68 @@
+name: Coverage
+
+concurrency:
+  group: ${{ github.workflow }}#${{ github.ref }}
+  cancel-in-progress: true
+
+on:
+  push:
+    branches:
+      - master
+      - ci_test
+      - release/**
+  pull_request:
+    branches:
+      - master
+      - release/**
+    paths-ignore:
+      - '**.md'
+      - '**.rst'
+      - 'doc/**'
+
+env:
+  BUILD_TYPE: RelWithDebInfo
+  INSTALL_DIR: install
+
+jobs:
+  # Job producing code coverage report
+  # ==================================
+  Code_coverage:
+    # When ubuntu version will be upgrade, you should look if xtensor is now available
+    runs-on: ubuntu-20.04
+
+    steps:
+    - name: "Install libraries"
+      run: |
+        sudo apt-get update
+        sudo apt-get install lcov libboost-all-dev libhdf5-dev libeigen3-dev libopencv-dev libsz2 ninja-build
+
+    - uses: actions/checkout@v3
+      with:
+        fetch-depth: 2
+        submodules: true
+
+    - name: Build for code coverage
+      run: |
+        CMAKE_OPTIONS=(
+          -GNinja
+          -DHIGHFIVE_USE_BOOST:BOOL=ON
+          -DHIGHFIVE_USE_EIGEN:BOOL=ON
+          -DHIGHFIVE_USE_OPENCV:BOOL=ON
+          #-DHIGHFIVE_USE_XTENSOR:BOOL=ON
+          -DHIGHFIVE_TEST_SINGLE_INCLUDES=ON
+          -DHIGHFIVE_BUILD_DOCS:BOOL=FALSE
+          -DCMAKE_CXX_FLAGS="-coverage -O0"
+        )
+        source $GITHUB_WORKSPACE/.github/build.sh
+    - name: Test for code coverage
+      run: |
+        lcov --capture  --initial --directory . --no-external --output-file build/coverage-base.info
+        (cd build; cmake --build . --target test)
+        lcov --capture  --directory . --no-external --output-file build/coverage-run.info
+        (cd build; lcov --add-tracefile coverage-base.info --add-tracefile coverage-run.info --output-file coverage-combined.info)
+    - uses: codecov/codecov-action@v3
+      with:
+        files: ./build/coverage-combined.info
+        fail_ci_if_error: false
+        verbose: true
+        token: ${{ secrets.CODECOV_TOKEN }}
diff --git a/packages/HighFive/.github/workflows/gh-pages.yml b/packages/HighFive/.github/workflows/gh-pages.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2032f91abc042c9704b420b021e72abbf52241f3
--- /dev/null
+++ b/packages/HighFive/.github/workflows/gh-pages.yml
@@ -0,0 +1,50 @@
+name: gh-pages
+
+on:
+  push:
+    branches:
+      - master
+  pull_request:
+    branches:
+      - master
+
+jobs:
+
+  publish:
+
+    runs-on: ubuntu-latest
+
+    defaults:
+      run:
+        shell: bash -l {0}
+
+    steps:
+
+    - uses: actions/checkout@v3
+      with:
+        submodules: 'recursive'
+
+    - uses: mamba-org/setup-micromamba@v1
+      with:
+        environment-file: doc/environment.yaml
+        environment-name: doc-build
+
+    - name: Run doxygen
+      run: |
+        CMAKE_OPTIONS=(
+          -DHIGHFIVE_UNIT_TESTS=OFF
+        )
+        cmake -B build -S . "${CMAKE_OPTIONS[@]}"
+        cmake --build build --target doc
+        cp -r doc/poster build/doc/html/
+
+    - name: Deploy to GitHub Pages
+      if: ${{ success() && github.ref == 'refs/heads/master' && github.event_name == 'push' }}
+      uses: crazy-max/ghaction-github-pages@v2
+      with:
+        target_branch: gh-pages
+        build_dir: build/doc/html
+        jekyll: false
+        keep_history: false
+      env:
+        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/packages/HighFive/.github/workflows/integration_trigger.yml b/packages/HighFive/.github/workflows/integration_trigger.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3fe65f329f89f07225b486e643579d88747014f9
--- /dev/null
+++ b/packages/HighFive/.github/workflows/integration_trigger.yml
@@ -0,0 +1,15 @@
+name: Integration Test Trigger
+on:
+  pull_request:
+    types: [closed]
+jobs:
+  merge-PR:
+    if: github.event.pull_request.merged == true
+    runs-on: ubuntu-latest
+    steps:
+      - name: Trigger integration tests on BlueBrain/HighFive-testing
+        run: |
+          curl -X POST https://api.github.com/repos/BlueBrain/HighFive-testing/dispatches \
+          -H 'Accept: application/vnd.github.everest-preview+json' \
+          -u ${{ secrets.ACCESS_TOKEN }} \
+          --data '{"event_type": "merge", "client_payload": { "repository": "'"$GITHUB_REPOSITORY"'" }}'
diff --git a/packages/HighFive/.github/workflows/version_file.yml b/packages/HighFive/.github/workflows/version_file.yml
new file mode 100644
index 0000000000000000000000000000000000000000..816137e955fe920c432cf985cbd3a43fa2e924ff
--- /dev/null
+++ b/packages/HighFive/.github/workflows/version_file.yml
@@ -0,0 +1,36 @@
+name: HighFive Check Version File
+
+on:
+  push:
+    branches:
+      - ci_test
+      - release/**
+  pull_request:
+    branches:
+      - master
+      - release/**
+
+jobs:
+  CheckVersion:
+    runs-on: ubuntu-latest
+
+    steps:
+    - uses: actions/checkout@v3
+      with:
+        submodules: true
+
+    - name: "Install libraries"
+      run: |
+        sudo apt-get -qq update
+        sudo apt-get -qq install libhdf5-dev ninja-build
+
+    - name: Build
+      run: |
+        # Will trigger `configure_file` for H5Version.hpp.
+        cmake -DHIGHFIVE_USE_BOOST=Off -B build .
+
+    - name: Test
+      run: |
+        # Check that the file hasn't changed, i.e. was updated
+        # after changing the version number.
+        ! git status | grep include/highfive/H5Version.hpp
diff --git a/packages/HighFive/.gitignore b/packages/HighFive/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..75c4e044b835537c2b9069e5c401af9d8d99d48a
--- /dev/null
+++ b/packages/HighFive/.gitignore
@@ -0,0 +1,5 @@
+build
+tests/test_project
+.idea
+
+.vs/
diff --git a/packages/HighFive/.gitmodules b/packages/HighFive/.gitmodules
new file mode 100644
index 0000000000000000000000000000000000000000..615f78fe150bf80a0a4eeaec54417e2c218fc22b
--- /dev/null
+++ b/packages/HighFive/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "deps/catch2"]
+	path = deps/catch2
+	url = https://github.com/catchorg/Catch2.git
diff --git a/packages/HighFive/.gitrepo b/packages/HighFive/.gitrepo
new file mode 100644
index 0000000000000000000000000000000000000000..14b8f5fba715beda2d02da0beb4c2e267ac16373
--- /dev/null
+++ b/packages/HighFive/.gitrepo
@@ -0,0 +1,12 @@
+; DO NOT EDIT (unless you know what you are doing)
+;
+; This subdirectory is a git "subrepo", and this file is maintained by the
+; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme
+;
+[subrepo]
+	remote = git@github.com:BlueBrain/HighFive.git
+	branch = master
+	commit = 88fcc898970f93a63be8e25760d6b9f33589690f
+	parent = 88cd46aaec511360705df0fc7e577098877862d0
+	method = merge
+	cmdver = 0.4.6
diff --git a/packages/HighFive/.travis.yml b/packages/HighFive/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..bc5d34081016e68a344a5eb34ee671075ae8f22e
--- /dev/null
+++ b/packages/HighFive/.travis.yml
@@ -0,0 +1,138 @@
+# Adapted from various sources, including:
+# - Louis Dionne's Hana: https://github.com/ldionne/hana
+# - Paul Fultz II's FIT: https://github.com/pfultz2/Fit
+# - Eric Niebler's range-v3: https://github.com/ericniebler/range-v3
+# - Gabi Melman spdlog: https://github.com/gabime/spdlog
+
+sudo: required
+language: cpp
+
+addons: &gcc7
+  apt:
+    packages:
+      - g++-7
+      - libboost-all-dev
+      - libhdf5-openmpi-dev
+      - libeigen3-dev
+      - ninja-build
+    sources:
+      - ubuntu-toolchain-r-test
+
+matrix:
+  include:
+    # Older linux (trusty) with default gcc
+    # Install serial hdf5 + build serial
+    - os: linux
+      dist: trusty
+      env:
+        - HIGHFIVE_USE_XTENSOR=False
+        - HIGHFIVE_USE_OPENCV=False
+        - HIGHFIVE_PARALLEL_HDF5=False
+        - IS_BASE_ENVIRON=1
+      addons:
+        apt:
+          packages:
+            - libboost-all-dev
+            - libeigen3-dev
+            - libhdf5-serial-dev
+            - ninja-build
+
+    # Linux gcc-7
+    # Install parallel hdf5 + build parallel
+    - os: linux
+      dist: xenial
+      env:
+        - GCC_VERSION=7
+        - HIGHFIVE_USE_XTENSOR=True
+        - HIGHFIVE_USE_OPENCV=False
+        - HIGHFIVE_PARALLEL_HDF5=True
+      addons: *gcc7
+
+    # Mac OSX XCode 10
+    - os: osx
+      osx_image: xcode10.3
+      env:
+        - HIGHFIVE_USE_XTENSOR=True
+        - HIGHFIVE_USE_OPENCV=True
+        - HIGHFIVE_PARALLEL_HDF5=False
+
+    # Windows
+    - os: windows
+      env:
+        - HIGHFIVE_USE_XTENSOR=True
+        - HIGHFIVE_USE_OPENCV=True
+        - HIGHFIVE_PARALLEL_HDF5=False
+
+env:
+  global:
+    - MINCONDA_VERSION="latest"
+    - MINCONDA_LINUX="Linux-x86_64"
+    - MINCONDA_OSX="MacOSX-x86_64"
+
+install:
+  - export HOMEBREW_NO_AUTO_UPDATE=1  # for reproducibility, dont autoupdate
+
+  - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
+      MINCONDA_OS=$MINCONDA_LINUX;
+    elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
+      if [ "$BREW_USE_LATEST" ]; then
+        brew update;
+        brew install hdf5; brew upgrade hdf5;
+      fi;
+      brew install boost hdf5 eigen ninja;
+      MINCONDA_OS=$MINCONDA_OSX;
+    fi
+
+  - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
+      export CMAKE_GENERATOR="Visual Studio 15 2017 Win64" ;
+      export TESTS_TARGET="RUN_TESTS";
+      choco install --yes miniconda3 ;
+      source C:/Tools/miniconda3/Scripts/activate ;
+    else
+      export CMAKE_GENERATOR="Ninja" ;
+      export TESTS_TARGET="test";
+      wget "http://repo.continuum.io/miniconda/Miniconda3-$MINCONDA_VERSION-$MINCONDA_OS.sh" -O miniconda.sh;
+      bash miniconda.sh -b -p $HOME/miniconda ;
+      source $HOME/miniconda/bin/activate;
+      hash -r ;
+    fi
+  - conda config --set always_yes yes --set changeps1 no
+  - conda update -q conda
+  - conda install -c conda-forge mamba
+  - if [[ "$HIGHFIVE_USE_XTENSOR" == "True" ]]; then
+      mamba install -c conda-forge xtl xsimd xtensor;
+    fi
+  - if [[ "$HIGHFIVE_USE_OPENCV" == "True" ]]; then
+      mamba install -c conda-forge libopencv opencv;
+    fi
+  - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
+      mamba install -c conda-forge boost-cpp hdf5 eigen;
+    fi
+
+before_script:
+  - if [ -n "$GCC_VERSION" ]; then export CXX="g++-${GCC_VERSION}" CC="gcc-${GCC_VERSION}"; fi
+  - if [ -n "$CLANG_VERSION" ]; then export CXX="clang++-${CLANG_VERSION}" CC="clang-${CLANG_VERSION}"; fi
+  - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export CXX="clang++" CC="clang"; fi
+  - which $CXX
+  - which $CC
+  - $CXX --version
+  - cmake --version
+
+script:
+  - cd ${TRAVIS_BUILD_DIR}
+  - mkdir -p build && pushd build
+  - >
+    cmake --warn-uninitialized --debug-output
+    -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON
+    -DHIGHFIVE_TEST_SINGLE_INCLUDES:BOOL=ON
+    -DHIGHFIVE_PARALLEL_HDF5:BOOL=${HIGHFIVE_PARALLEL_HDF5}
+    -DHIGHFIVE_USE_EIGEN:BOOL=ON
+    -DHIGHFIVE_USE_XTENSOR:BOOL=${HIGHFIVE_USE_XTENSOR}
+    -DHIGHFIVE_USE_OPENCV:BOOL=${HIGHFIVE_USE_OPENCV}
+    -G "${CMAKE_GENERATOR}" ../
+  - cmake --build .
+  - CTEST_OUTPUT_ON_FAILURE=1 cmake --build . --target ${TESTS_TARGET}
+  - popd
+  - if [ $IS_BASE_ENVIRON ]; then
+      bash tests/test_project_integration.sh;
+    fi
diff --git a/packages/HighFive/AUTHORS.txt b/packages/HighFive/AUTHORS.txt
new file mode 100644
index 0000000000000000000000000000000000000000..78f573a9a6abfe5db14e7ea8921b89e80bb78d21
--- /dev/null
+++ b/packages/HighFive/AUTHORS.txt
@@ -0,0 +1,58 @@
+Adrien Devresse 
+Alexandru Săvulescu
+Ali Can Demiralp
+Angelos Plastropoulos
+@antonysigma
+Chris Byrohl
+Chris De Grendele
+@contre
+Daniel Nachbaur
+Dmitri Bichko
+@eudoxos
+Fernando L. Pereira
+@guoxy
+Haoran Ni
+Henry Schreiner
+@hn-sl
+Hunter Belanger
+@JaWSnl
+Jia Li
+John W. Peterson
+Jonas Karlsson
+Jorge Blanco Alonso
+Kerim Khemraev
+Luc Grosheintz
+Marian Heil
+Mario Emmenlauer
+Mark Bicknell
+Mathieu Bernard
+Matthias Wolf
+Maximilian Nöthe
+@Mightrider
+Mike DePalatis
+Mike Gevaert
+Moritz Koenemann
+Nico Jahn
+Nicolas Cornu (maintainer)
+Omar Awile
+Pablo Toharia
+Philip Deegan
+Philipp Gloor
+Pramod Kumbhar
+@Quark-X10
+Richard Shaw
+Rick Nitsche
+Rob Latham
+Sergio Botelh
+Sergio Rivas-Gomez
+@spacescientist
+Taiguara Tupinambás
+@timocafe
+Tino Wagner
+Tobias Klauser
+Tom de Geus
+Tom Vander Aa
+Torsten Reuschel
+Tristan Carel
+Wolf Vollprecht
+Y. Yang
diff --git a/packages/HighFive/CHANGELOG.md b/packages/HighFive/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..9a8cd86139f80bc1523e6f0e33fc049316a0f830
--- /dev/null
+++ b/packages/HighFive/CHANGELOG.md
@@ -0,0 +1,278 @@
+# Changes
+## Version 2.8.0 - 2023-11-02
+### Important Change
+    - `Eigen::Matrix` is (by default) stored with column-major index ordering. Under
+      certain conditions `Eigen::Matrix` was written and read as row-major.
+      Due to code duplication H5Easy isn't affected by this bug. Starting
+      `2.8.0` HighFive will now throw an exception whenever prior versions would
+      have read with incorrect assumptions about the index ordering. (#731)
+
+### New Features
+    - Improve reading and writing `std::string` as fixed and variable length HDF5 strings (#744).
+    - Implement creation of hard links (#765). Thanks to @Quark-X10.
+    - Get the size of file and amound of tracked unused space (#764). Thanks to @Quark-X10.
+    - `class DataType` has a new ctor to open a commited `DataType` (#796). Thanks to @Quark-X10.
+    - Allow user-specified `mem_space` for hyperslabs. (#740)
+    - New properties: `AttributePhaseChange`. (#785)
+    - New options to link against HDF5 statically (#823). Thanks @HunterBelanger.
+    - Add support for `std::complex<integral_type>` valid with C++23 (#828). Thanks @unbtorsten.
+    - Add a top-level header to include all compononents (#818).
+
+### Improvements
+    - Add concept checks to `Property` if C++20 for better errors (#811). Thanks @antonysigma.
+    - Add parallel HDF5 test in CI (#760).
+    - Simplify github workflow (#761).
+    - Move inspectors in their own file to be able to better implements strings (#759).
+
+### Bug Fix
+    - Fix vector constructor ambiguity in H5DataType.hpp (#775). Thanks to @hn-sl.
+    - `getElementCount()` fixed. (#787)
+    - Remove leak when calling dtor of `CompoundType`. (#798)
+
+## Version 2.7.1 - 2023-04-04
+### Bug Fix
+    - Revert removing `#include "H5FileDriver.hpp"` from `H5File.hpp` (#711).
+    - Change relative import to "../H5Utility.hpp" (#726).
+    - Fix nameclash with macros on Windows (#717 #722 #723).
+    - Add workaround for MSVC bug (#728).
+    - Don't downgrade the requested C++ standard (#729).
+
+## Version 2.7.0 - 2023-03-31
+### New Features
+    - Properties can now be read (#684).
+    - Adding a property for LinkCreationOrder (#683).
+    - Adding a logging infrastructure (#690).
+    - Support of bool in the way of h5py (#654).
+    - Support `std::bool` in C++17 mode (#698).
+
+### Improvements
+    - Catch2 move to v3 (#655).
+
+### Bug Fix
+    - To avoid build failure in certain circumstances, user can not set `Boost_NO_BOOST_CMAKE` (#687).
+    - Fix leak when reading variable length strings (#660).
+    - Use `H5free_memory` instead of `free` in error handler (#665). Thanks to Moritz Koenemann.
+    - Fix a bug with old GCC due to templated friend classes (#688).
+    - Fix regression in broadcasting support (#697).
+    - Fix bug related to zero-length datasets (#702).
+
+## Version 2.6.2 - 2022-11-10
+### Bug Fix
+    - Allow CMake to use Config mode to find HDF5.
+
+## Version 2.6.1 - 2022-11-08
+### Bug Fix
+    - Version bump in `CMakeLists.txt`.
+
+## Version 2.6.0 - 2022-11-08
+### New Features
+    - Enable page buffered reading (#639).
+
+### Improvements
+    - Warn when detecting lossy reads or write of floating point data (#636).
+
+## Version 2.5.1 - 2022-11-07
+### Bug Fix
+    - Fix missing `inline` for collective metadata properties.
+
+## Version 2.5.0 - 2022-11-03
+### New Features
+    - Enable collective MPI IO using the Data Transfer Property (#623). Thanks to Rob Latham.
+    - Add a support for half-precision (16-bit) floating-point based on the Half library (http://half.sourceforge.net) (#587). Thanks to Sergio Botelh.
+    - Enable choosing the allocation time of datasets (#627).
+    - Add possibility to get and set file space strategy. For page allocated files wrap the API to set/retrieve the page size (#618).
+    - Add API for getting Access and Create property lists of HighFive objects (#629).
+    - Let users configure metadata reads and writes at file level (#624). Thanks to Rob Latham.
+
+### Improvements
+    - MPIOFileDriver is now deprecated. Use FileAccessProps (#622).
+    - Support of block argument in API (#584).
+    - Serialization of types is now automagic and so recursive (#586).
+    - Add an argument to specific File Create Properties in File class construtor (#626).
+
+### Bug Fixes
+    - Padding of Compound Types (#581).
+    - Compilation with Visual Studio with C++17 or later (#578). Thanks to Mark Bicknell.
+    - Avoid leaking when printing stack for error (#583).
+
+## Version 2.4.1 - 2022-05-11
+### New Features
+    - Support `std::complex`. Thanks to Philipp.
+
+### Improvements
+    - Improve EnumType/CompoundType
+    - Revert quirky behaviour of `select(const HyperSlab&)`.
+    - All `get_name` functions takes `size_t` and not `hsize_t`.
+    - Remove nix recipes.
+
+### Bug Fixes
+    - Computation of padding.
+    - Related to `0` being an invalid hid but not equal to `H5I_INVALID_HID`.
+
+## Version 2.4.0 - 2022-04-05
+### New Features
+    - Construct a compound type from an already existing hid (#469). Thanks to Maximilian Nöthe.
+    - Add support for long double (#494)
+    - Add support for H5Pset_libver_bounds and H5Pset_meta_block_size support (#500)
+    - New interface to select complex hyperslabs, irregular hyperslabs are limited to/from 1D array (#538 and #545)
+### Improvements
+    - Use inline where it is needed, otherwise some code can lead to "multiple definition" (#516). Thanks to Chris Byrohl.
+    - Use Catch2 instead of boost for tests, reduces dependencies (#521)
+    - CI reworked to test external libraries more thoroughly (boost, eigen, xtensor) (#536)
+### Bug Fixes
+    - Better support of const types (#460). Thanks to Philip Deegan.
+    - Vector of size zero was previously lead to UB (#502). Thanks to Haoran Ni.
+    - Use H5T_NATIVE_SCHAR instead of H5T_NATIVE_CHAR for "signed char" (#518)
+
+## Version 2.3.1 - 2021-08-04
+### Improvements
+    - Clean cmake files from old code (#465)
+    - Adding path to type warning message (#471)
+    - Adding compound types example, w dataset and attr (#467)
+
+### Bug Fixes
+    - Resolve an issue where padding of nested compound types were being calculated incorrectly (#461) (#468)
+    - GHA: drop previous runs (#462)
+
+## Version 2.3 - 2021-05-07
+### New Features:
+    - Add SZIP support (#435)
+    - Add option *parents* to createDataSet (#425)
+    - Implementing getting the filename dynamically (#424)
+    - Ability to create soft and external links (#421)
+    - Generalizing getPath and adding getFile as PathTraits (#417)
+
+### Improvements:
+    - Unified reading/writing attributes and datasets (#450)
+    - Old compilers have been removed from docker image (#430)
+    - Cleaning up and improving property lists (#429)
+    - An example using hdf5 references (#396) (#397)
+    - Add all property lists alias for completeness (#427)
+    - Add property CreateIntermediateGroup (#423)
+    - Add code coverage through codecov.io (#420)
+    - Introducing GitHub Actions CI (#416)
+    - Create issue and PR templates (#412)
+    - Initialize SilenceHDF5 to true in _exist (#411)
+    - Generalizing xtensor API (#407)
+    - Minor doc updates (#409)
+    - Fixing minor error in GH Action (#408)
+    - Uploading docs to gh-pages using GitHub Actions (#403)
+    - Various minor documentation updates (#405)
+    - optional documentation building in cmake (#377)
+    - From can be automatic now (#384)
+    - get_dim_vector in inspector (#383)
+    - Put type_of_array in inspector (#382)
+    - Move array_dims in the future manipulator (#381)
+    - Unify interface of H5Attribute with H5Slice_traits (#378)
+    - Use std::move in NRVO depending of version of GCC (#375)
+    - Fixed typo '-DD' to '-D' in 'Dependencies'. (#371)
+    - Changing date format (#364)
+
+### Bug fixes:
+    - Fix use before initialization (#414)
+    - Adding CMake include guard (#389)
+
+## Version 2.2.2 - 2020-07-30
+### New Features:
+    - [H5Easy] Adding OpenCV support (#343)
+    - [H5Easy] Enabling compression & Adding attributes (#337)
+    - Adding missing function to H5Attribute (#337) 
+    - Add methods to retrieve Node paths or Dataset names and rename objects (#346)
+    - Add a file with the current version number of HighFive (#349)
+
+### Improvements
+    - [H5Easy] Updating error message dump (#335)
+    - [H5Easy] Switching implementation to partial specialization based on static dispatch (#327)
+    - Simplifying imports, new policy (#324)
+
+## Version 2.2.1 - 2020-04-28
+### Improvements
+    - Add a mechanism to not include target HighFive several times (#336)
+    - Fix SilenceHDF5 initialization for NodeTraits (#333)
+
+## Version 2.2 - 2020-03-23
+### New Features:
+    - Compound Types: API to register and read/write structs (#78). Thanks to Richard Shaw.
+    - Fixed-length strings. API via char[] and `FixedLenStringArray`(#277)
+    - Enum data types (#297)
+    - Datasets of HDF5 References. Support to dereference groups and datasets (#306)
+    - Objects (hard/soft link) can now be deleted with `unlink` (#284). Thanks to Tom Vander Aa.
+    - Attributes can be deleted with `deleteAttribute` (#239)
+
+### Improvements:
+    - `Attribute`s (metadata) now support additional types (#298)
+    - H5Easy: Reworked for compatibility with `Eigen::ref` and `Eigen::Map` (#291, #293)
+    - Hdf5 1.12 compatibility: working `Object::getInfo` and marking getAddress deprecated (#311)
+    - Strict compatibility with CMake 3.1 and C++11 (#304)
+    - CMake: Dependencies may be re-detected on FindPackage, fixed export targets and added integration tests (#255, #304, #312, #317)
+    - Support for array of `Eigen::Matrix` (#258)
+    - Selection: `ElementSet` working for N-dimensions (#247)
+
+### Bug Fixes:
+    - Shortcut syntax with c arrays (#273)
+    - Compatibility with in MSVC (Exception messages #263 and avoid throwing in `exist` check #308)
+
+## Version 2.1 - 2019-10-30
+### New Features:
+    - Inspection: API to get the type of links/objects and datasets data-types (#221)
+    - H5Easy: API for simple import/export to Eigen and xtensor (#141)
+    - Support for chunk and deflate configuration at dataset creation/open (#125). Added generic RawPropertyLists. (#157)
+    - Recursive `createGroup` and `exist` (#152)
+    - Shortcut syntax: ability to create a filled dataset in a single line (#130)
+    - DataSet now accepts `std::complex` and `std::array`'s (#128, #129)
+
+### Improvements:
+    - Improved compat with MSVC and ICC compilers
+    - CMake build system: modernized, create exported targets, better messages, etc.
+    - Building and publishing documentation: https://bluebrain.github.io/HighFive/
+    - Several other. See #231
+
+### Bug Fixes:
+    - Fixed header dependencies. They are now all include-able (#225)
+    - Fixed read/write of N-Dimensional data as nested vectors (#191)
+    - Fixed data broadcasting for reading (#136)
+
+## Version 2.0 - 2018-07-19
+    - First version with C++11 enforcement
+    - Support for property list
+    - Support for Chunking
+    - Support for Compression / Deflate
+    - Fix: missing move constructor for properties
+    - Fix: typo in MPI IO driver
+    - Fix: several typo fixes
+    - Fix: Add missing include
+
+## Version 1.5 - 2018-01-06
+    - SliceTraits::read split in two overloads, the first one for plain C arrays
+      and the second one for other types.
+    - Add support for complex number
+    - Add exist() method to the API
+    - Will be last release before 2.0 and enforcement of C++11
+
+## Version 1.4 - 2017-08-25
+	- Support id selection for the `select` function
+	- Suport STL containers of const elements
+	- Support scalar values and strings management
+	- Fix attribute assignment issue #40
+    - Fix Object assignment operator missing unref (possible memory leak )
+    - Introduce SilenceHDF5 for HDF5 error report
+    - Fix a unit test issue with SilenceHDF5
+
+## Version 1.3 - 2017-06-21
+    - Minor fixes
+
+## Version 1.2 - 2017-04-03
+	- Add Attribute support for Dataset
+	- Extend testing of Attribute support
+	- Fix issue related to multiple definitions in default driver
+	- Add more examples about attribute support
+
+## Version 1.1 - 2017-03-23
+    - Add support and examples for Parallel HDF5
+    - Initial implementation for H5 Properties
+    - Support for Attributes
+    - Improve documentation
+    - Add example for boost.Ublas matrix support
+
+## Version 1.0 - Init
+	- Initial release
diff --git a/packages/HighFive/CMake/HighFiveConfig.cmake.in b/packages/HighFive/CMake/HighFiveConfig.cmake.in
new file mode 100644
index 0000000000000000000000000000000000000000..464a645d038f8221583fd5019eb81f05662fdef4
--- /dev/null
+++ b/packages/HighFive/CMake/HighFiveConfig.cmake.in
@@ -0,0 +1,74 @@
+function(copy_interface_properties target source)
+  foreach(prop
+          INTERFACE_COMPILE_DEFINITIONS
+          INTERFACE_COMPILE_FEATURES
+          INTERFACE_COMPILE_OPTIONS
+          INTERFACE_INCLUDE_DIRECTORIES
+          INTERFACE_LINK_LIBRARIES
+          INTERFACE_SOURCES
+          INTERFACE_SYSTEM_INCLUDE_DIRECTORIES)
+    set_property(TARGET ${target} APPEND PROPERTY ${prop} $<TARGET_PROPERTY:${source},${prop}>)
+  endforeach()
+endfunction()
+
+if(TARGET HighFive)
+    return()
+endif()
+
+@PACKAGE_INIT@
+
+# Get HighFive targets
+include("${CMAKE_CURRENT_LIST_DIR}/HighFiveTargets.cmake")
+
+# Recreate combined HighFive
+add_library(HighFive INTERFACE IMPORTED)
+set_property(TARGET HighFive APPEND PROPERTY INTERFACE_COMPILE_DEFINITIONS MPI_NO_CPPBIND)  # No c++ bindings
+
+# Ensure we activate required C++ std
+if(NOT DEFINED CMAKE_CXX_STANDARD)
+  if(CMAKE_VERSION VERSION_LESS 3.8)
+    message(WARNING "HighFive requires minimum C++11. (C++14 for XTensor) \
+        You may need to set CMAKE_CXX_STANDARD in you project")
+  else()
+    # A client request for a higher std overrides this
+    target_compile_features(HighFive INTERFACE cxx_std_11)
+  endif()
+endif()
+
+# If the user sets this flag, all dependencies are preserved.
+# Useful in central deployments where dependencies are not prepared later
+set(HIGHFIVE_USE_INSTALL_DEPS @HIGHFIVE_USE_INSTALL_DEPS@ CACHE BOOL "Use original Highfive dependencies")
+if(HIGHFIVE_USE_INSTALL_DEPS)
+  # If enabled in the deploy config, request c++14
+  if(@HIGHFIVE_USE_XTENSOR@ AND NOT CMAKE_VERSION VERSION_LESS 3.8)
+    set_property(TARGET HighFive APPEND PROPERTY INTERFACE_COMPILE_FEATURES cxx_std_14)
+  endif()
+  message(STATUS "HIGHFIVE @PROJECT_VERSION@: Using original dependencies (HIGHFIVE_USE_INSTALL_DEPS=YES)")
+  copy_interface_properties(HighFive HighFive_HighFive)
+  return()
+endif()
+
+# When not using the pre-built dependencies, give user options
+if(DEFINED HIGHFIVE_USE_BOOST)
+  set(HIGHFIVE_USE_BOOST ${HIGHFIVE_USE_BOOST} CACHE BOOL "Enable Boost Support")
+else()
+  set(HIGHFIVE_USE_BOOST @HIGHFIVE_USE_BOOST@ CACHE BOOL "Enable Boost Support")
+endif()
+set(HIGHFIVE_USE_EIGEN "${HIGHFIVE_USE_EIGEN}" CACHE BOOL "Enable Eigen testing")
+set(HIGHFIVE_USE_XTENSOR "${HIGHFIVE_USE_XTENSOR}" CACHE BOOL "Enable xtensor testing")
+set(HIGHFIVE_PARALLEL_HDF5 @HIGHFIVE_PARALLEL_HDF5@ CACHE BOOL "Enable Parallel HDF5 support")
+option(HIGHFIVE_VERBOSE "Enable verbose logging" @HIGHFIVE_VERBOSE@)
+
+if(HIGHFIVE_USE_XTENSOR AND NOT CMAKE_VERSION VERSION_LESS 3.8)
+  set_property(TARGET HighFive APPEND PROPERTY INTERFACE_COMPILE_FEATURES cxx_std_14)
+endif()
+
+if(NOT HighFive_FIND_QUIETLY)
+  message(STATUS "HIGHFIVE @PROJECT_VERSION@: (Re)Detecting Highfive dependencies (HIGHFIVE_USE_INSTALL_DEPS=NO)")
+endif()
+include("${CMAKE_CURRENT_LIST_DIR}/HighFiveTargetDeps.cmake")
+foreach(dependency HighFive_libheaders libdeps)
+    copy_interface_properties(HighFive ${dependency})
+endforeach()
+
+check_required_components(HighFive)
diff --git a/packages/HighFive/CMake/HighFiveTargetDeps.cmake b/packages/HighFive/CMake/HighFiveTargetDeps.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..919b53544ea198e040d86d5620bb05f05ab5a7f2
--- /dev/null
+++ b/packages/HighFive/CMake/HighFiveTargetDeps.cmake
@@ -0,0 +1,122 @@
+# Link against target system libs
+# -------------------------------
+
+if(NOT TARGET libdeps)
+
+  # Independent target to make it possible to have new dependencies each build
+  add_library(libdeps INTERFACE)
+
+  if(HIGHFIVE_VERBOSE)
+    target_compile_definitions(libdeps INTERFACE -DHIGHFIVE_LOG_LEVEL=0)
+  endif()
+
+  if(HIGHFIVE_GLIBCXX_ASSERTIONS)
+    target_compile_definitions(libdeps INTERFACE -D_GLIBCXX_ASSERTIONS)
+  endif()
+
+  if(HIGHFIVE_HAS_FRIEND_DECLARATIONS)
+    target_compile_definitions(libdeps INTERFACE -DHIGHFIVE_HAS_FRIEND_DECLARATIONS=1)
+  endif()
+
+  if(HIGHFIVE_SANITIZER)
+    target_compile_options(libdeps INTERFACE -fsanitize=${HIGHFIVE_SANITIZER})
+    target_link_options(libdeps INTERFACE -fsanitize=${HIGHFIVE_SANITIZER})
+  endif()
+
+  # HDF5
+  if(NOT DEFINED HDF5_C_LIBRARIES)
+    set(HDF5_PREFER_PARALLEL ${HIGHFIVE_PARALLEL_HDF5})
+    set(HDF5_USE_STATIC_LIBRARIES ${HIGHFIVE_STATIC_HDF5})
+    find_package(HDF5 REQUIRED)
+  endif()
+
+  if(HIGHFIVE_PARALLEL_HDF5 AND NOT HDF5_IS_PARALLEL)
+    message(WARNING "Parallel HDF5 requested but libhdf5 doesnt support it")
+  endif()
+
+  target_include_directories(libdeps SYSTEM INTERFACE ${HDF5_INCLUDE_DIRS})
+  target_link_libraries(libdeps INTERFACE ${HDF5_LIBRARIES})
+  target_compile_definitions(libdeps INTERFACE ${HDF5_DEFINITIONS})
+  target_compile_definitions(libdeps INTERFACE HIGHFIVE_HAS_CONCEPTS=$<BOOL:${HIGHFIVE_HAS_CONCEPTS}>)
+
+
+  # Boost
+  if(HIGHFIVE_USE_BOOST)
+    if(NOT DEFINED Boost_NO_BOOST_CMAKE)
+      # HighFive deactivated finding Boost via Boost's own CMake files
+      # in Oct 2016 (commit '25627b085'). Likely to appease one cluster.
+      # Boost's CMake support has since improved and likely this setting
+      # isn't needed anymore. It is kept for backwards compatibility.
+      # However, a rework of HighFive's CMake code should consider removing
+      # this default. Hard coding this to true has been reported to cause
+      # build failures.
+      set(Boost_NO_BOOST_CMAKE TRUE)
+    endif()
+    find_package(Boost REQUIRED COMPONENTS system serialization)
+    # Dont use imported targets yet, not avail before cmake 3.5
+    target_include_directories(libdeps SYSTEM INTERFACE ${Boost_INCLUDE_DIR})
+    target_compile_definitions(libdeps INTERFACE BOOST_ALL_NO_LIB H5_USE_BOOST)
+  endif()
+
+  # Half
+  if(HIGHFIVE_USE_HALF_FLOAT)
+    find_file(FOUND_HALF half.hpp)
+    if (NOT FOUND_HALF)
+      message(FATAL_ERROR "Half-precision floating-point support requested but file half.hpp not found")
+    endif()
+    target_compile_definitions(libdeps INTERFACE H5_USE_HALF_FLOAT)
+  endif()
+
+  # Eigen
+  if(HIGHFIVE_USE_EIGEN)
+    if (NOT EIGEN3_INCLUDE_DIRS)
+      find_package(Eigen3 NO_MODULE)
+      if(Eigen3_FOUND)
+        message(STATUS "Found Eigen ${Eigen3_VERSION}: ${EIGEN3_INCLUDE_DIRS}")
+      else()
+        find_package(PkgConfig)
+        pkg_check_modules(EIGEN3 REQUIRED eigen3)
+      endif()
+    endif()
+    if (NOT EIGEN3_INCLUDE_DIRS)
+      message(FATAL_ERROR "Eigen was requested but could not be found")
+    endif()
+    target_include_directories(libdeps SYSTEM INTERFACE ${EIGEN3_INCLUDE_DIRS})
+    target_compile_definitions(libdeps INTERFACE H5_USE_EIGEN)
+  endif()
+
+  # xtensor
+  if(HIGHFIVE_USE_XTENSOR)
+    if (NOT xtensor_INCLUDE_DIRS)
+      find_package(xtensor REQUIRED)
+    endif()
+    if (NOT xtl_INCLUDE_DIRS)
+      find_package(xtl REQUIRED)
+    endif()
+    target_include_directories(libdeps SYSTEM INTERFACE ${xtensor_INCLUDE_DIRS} ${xtl_INCLUDE_DIRS})
+    target_compile_definitions(libdeps INTERFACE H5_USE_XTENSOR)
+  endif()
+
+  # OpenCV
+  if(HIGHFIVE_USE_OPENCV)
+    if (NOT OpenCV_INCLUDE_DIRS)
+      find_package(OpenCV REQUIRED)
+    endif()
+    target_include_directories(libdeps SYSTEM INTERFACE ${OpenCV_INCLUDE_DIRS})
+    target_link_libraries(libdeps INTERFACE ${OpenCV_LIBS})
+    target_compile_definitions(libdeps INTERFACE H5_USE_OPENCV)
+  endif()
+
+  # MPI
+  if(HIGHFIVE_PARALLEL_HDF5 OR HDF5_IS_PARALLEL)
+    find_package(MPI REQUIRED)
+    target_include_directories(libdeps SYSTEM INTERFACE ${MPI_CXX_INCLUDE_PATH})
+    target_link_libraries(libdeps INTERFACE ${MPI_CXX_LIBRARIES})
+    if(CMAKE_VERSION VERSION_LESS 3.13)
+      target_link_libraries(libdeps INTERFACE ${MPI_CXX_LINK_FLAGS})
+    else()
+      target_link_options(libdeps INTERFACE "SHELL:${MPI_CXX_LINK_FLAGS}")
+    endif()
+  endif()
+
+endif()
diff --git a/packages/HighFive/CMake/HighFiveTargetExport.cmake b/packages/HighFive/CMake/HighFiveTargetExport.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..9906f39513c385e9dba2dca5194453fe9b2eec16
--- /dev/null
+++ b/packages/HighFive/CMake/HighFiveTargetExport.cmake
@@ -0,0 +1,48 @@
+
+# Define the HighFive INTERFACE library
+add_library(libheaders INTERFACE)
+
+target_include_directories(libheaders INTERFACE
+  "$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>"
+  "$<INSTALL_INTERFACE:include>")
+
+# Combined HighFive
+add_library(HighFive INTERFACE)
+target_compile_definitions(HighFive INTERFACE MPI_NO_CPPBIND)  # No c++ bindings
+target_link_libraries(HighFive INTERFACE libheaders libdeps)
+
+
+# Generate ${PROJECT_NAME}Config.cmake
+
+include(CMakePackageConfigHelpers)
+configure_package_config_file(${CMAKE_CURRENT_LIST_DIR}/HighFiveConfig.cmake.in
+  ${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
+  INSTALL_DESTINATION share/${PROJECT_NAME}/CMake)
+
+write_basic_package_version_file(
+    ${PROJECT_NAME}ConfigVersion.cmake
+    VERSION ${PROJECT_VERSION}
+    COMPATIBILITY AnyNewerVersion)
+
+install(FILES
+    CMake/HighFiveTargetDeps.cmake
+    ${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
+    ${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
+  DESTINATION share/${PROJECT_NAME}/CMake)
+
+
+# Provides IMPORTED targets when using this project from build/install trees.
+
+# Specify targets to include in the HighFive Exports
+install(TARGETS HighFive libheaders libdeps
+        EXPORT HighFiveTargets)
+
+# Generate & install the Export for the INSTALL_INTERFACE
+install(EXPORT HighFiveTargets
+        NAMESPACE HighFive_
+        FILE HighFiveTargets.cmake
+        DESTINATION share/${PROJECT_NAME}/CMake)
+
+# Generate the Export for the BUILD_INTERACE (hardly used)
+export(EXPORT HighFiveTargets
+       FILE "${PROJECT_BINARY_DIR}/HighFiveTargets.cmake")
diff --git a/packages/HighFive/CMake/HighFiveWarnings.cmake b/packages/HighFive/CMake/HighFiveWarnings.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..8e8ec22019dd3d8f9d46668218db2c7e39994539
--- /dev/null
+++ b/packages/HighFive/CMake/HighFiveWarnings.cmake
@@ -0,0 +1,36 @@
+if(TARGET HighFiveWarnings)
+    # Allow multiple `include(HighFiveWarnings)`, which would
+    # attempt to redefine `HighFiveWarnings` and fail without
+    # this check.
+    return()
+endif()
+
+add_library(HighFiveWarnings INTERFACE)
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "Clang"
+   OR CMAKE_CXX_COMPILER_ID MATCHES "GNU"
+   OR CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+
+    target_compile_options(HighFiveWarnings
+        INTERFACE
+            -Wall
+            -Wextra
+            -Wshadow
+            -Wnon-virtual-dtor
+            -Wunused
+            -Woverloaded-virtual
+            -Wformat=2
+            -Wconversion
+            -Wsign-conversion
+            -Wno-error=deprecated-declarations
+    )
+
+    if(NOT CMAKE_CXX_COMPILER_ID MATCHES "Intel")
+        target_compile_options(HighFiveWarnings
+            INTERFACE
+                -Wpedantic
+                -Wcast-align
+                -Wdouble-promotion
+        )
+    endif()
+endif()
diff --git a/packages/HighFive/CMake/config/TestHelpers.cmake b/packages/HighFive/CMake/config/TestHelpers.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..f3ca1cb74108a6a348f1c9739e83042c56096787
--- /dev/null
+++ b/packages/HighFive/CMake/config/TestHelpers.cmake
@@ -0,0 +1,113 @@
+# TestHelpers.cmake
+# 
+# set of Convenience functions for unit testing with cmake
+#
+# License: BSD 3
+#
+# Copyright (c) 2016, Adrien Devresse
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+
+
+
+##
+# enable or disable detection of SLURM and MPIEXEC
+option(AUTO_TEST_WITH_SLURM "automatically add srun as test prefix in a SLURM environment" TRUE)
+option(AUTO_TEST_WITH_MPIEXEC "automatically add mpiexec as test prefix in a MPICH2/OpenMPI environment" TRUE)
+
+###
+##
+## Basic SLURM support
+## the prefix "srun" is added to any test in the environment
+## For a slurm test execution, simply run "salloc [your_exec_parameters] ctest"
+##    
+##
+if(AUTO_TEST_WITH_SLURM)
+    if(NOT DEFINED SLURM_SRUN_COMMAND)
+        find_program(SLURM_SRUN_COMMAND
+                       NAMES "srun"
+                       HINTS "${SLURM_ROOT}/bin")
+    endif()
+    
+    if(SLURM_SRUN_COMMAND)
+        set(TEST_EXEC_PREFIX_DEFAULT "${SLURM_SRUN_COMMAND}")
+        set(TEST_MPI_EXEC_PREFIX_DEFAULT "${SLURM_SRUN_COMMAND}")
+        set(TEST_MPI_EXEC_BIN_DEFAULT "${SLURM_SRUN_COMMAND}")
+	set(TEST_WITH_SLURM ON)
+        message(STATUS " - AUTO_TEST_WITH_SLURM with slurm cmd ${TEST_EXEC_PREFIX_DEFAULT} ")
+        message(STATUS "  -- set test execution prefix to ${TEST_EXEC_PREFIX_DEFAULT} ")
+        message(STATUS "  -- set MPI test execution prefix to ${TEST_MPI_EXEC_PREFIX_DEFAULT} ")
+    endif()
+
+endif()
+
+###
+## Basic MPIExec support, will just forward mpiexec as prefix
+## 
+if(AUTO_TEST_WITH_MPIEXEC AND NOT TEST_WITH_SLURM)
+
+   if(NOT DEFINED MPIEXEC)
+        find_program(MPIEXEC
+                     NAMES "mpiexec"
+                     HINTS "${MPI_ROOT}/bin")
+   endif()
+
+
+   if(MPIEXEC)
+        set(TEST_MPI_EXEC_PREFIX_DEFAULT "${MPIEXEC}")
+        set(TEST_MPI_EXEC_BIN_DEFAULT "${MPIEXEC}")
+	set(TEST_WITH_MPIEXEC ON)
+        message(STATUS " - AUTO_TEST_WITH_MPIEXEC cmd ${MPIEXEC} ")
+        message(STATUS "  -- set MPI test execution prefix to ${TEST_MPI_EXEC_PREFIX_DEFAULT} ")
+
+   endif()
+
+endif()
+
+
+
+###
+##  MPI executor program path without arguments used for testing.
+##  default: srun or mpiexec if found
+##
+set(TEST_MPI_EXEC_BIN "${TEST_MPI_EXEC_BIN_DEFAULT}" CACHE STRING "path of the MPI executor (mpiexec, mpirun) for test execution")
+
+
+
+###
+## Test execution prefix. Override this variable for any execution prefix required in clustered environment
+## 
+## To specify manually a command with argument, e.g -DTEST_EXEC_PREFIX="/var/empty/bin/srun;-n;-4" for a srun execution
+## with 4 nodes
+##
+## default: srun if found
+##
+set(TEST_EXEC_PREFIX "${TEST_EXEC_PREFIX_DEFAULT}" CACHE STRING "prefix command for the test executions")
+
+
+
+###
+## Test execution prefix specific for MPI programs.
+## 
+## To specify manually a command with argument, use the cmake list syntax. e.g -DTEST_EXEC_PREFIX="/var/empty/bin/mpiexec;-n;-4" for an MPI execution
+## with 4 nodes
+##
+## default: srun or mpiexec if found
+##
+set(TEST_MPI_EXEC_PREFIX "${TEST_MPI_EXEC_PREFIX_DEFAULT}" CACHE STRING "prefix command for the MPI test executions")
+
+
+
+
+
+
+
diff --git a/packages/HighFive/CMakeLists.txt b/packages/HighFive/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..af274d9e25ea0ba40cfbd0407531db449be95f67
--- /dev/null
+++ b/packages/HighFive/CMakeLists.txt
@@ -0,0 +1,144 @@
+cmake_minimum_required(VERSION 3.1)
+if(${CMAKE_VERSION} VERSION_LESS 3.13)
+  cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
+else()
+  cmake_policy(VERSION 3.13)
+endif()
+
+project(HighFive VERSION 2.8.0)
+
+configure_file(${CMAKE_CURRENT_SOURCE_DIR}/include/highfive/H5Version.hpp.in
+               ${CMAKE_CURRENT_SOURCE_DIR}/include/highfive/H5Version.hpp)
+# INCLUDES
+list(APPEND CMAKE_MODULE_PATH
+  ${CMAKE_CURRENT_SOURCE_DIR}/CMake
+  ${CMAKE_CURRENT_SOURCE_DIR}/CMake/config)
+
+# OPTIONS
+# Compatibility within Highfive 2.x series
+set(USE_BOOST ON CACHE BOOL "Enable Boost Support")
+set(USE_EIGEN OFF CACHE BOOL "Enable Eigen testing")
+set(USE_XTENSOR OFF CACHE BOOL "Enable xtensor testing")
+set(USE_OPENCV OFF CACHE BOOL "Enable OpenCV testing")
+mark_as_advanced(USE_BOOST USE_EIGEN USE_XTENSOR)
+
+set(HIGHFIVE_UNIT_TESTS AUTO CACHE STRING "Enable unit tests (requires Catch2 to be present)")
+set_property(CACHE HIGHFIVE_UNIT_TESTS PROPERTY STRINGS AUTO ON OFF)
+
+option(HIGHFIVE_USE_BOOST "Enable Boost Support" ${USE_BOOST})
+option(HIGHFIVE_USE_HALF_FLOAT "Enable half-precision floats" ${USE_HALF_FLOAT})
+option(HIGHFIVE_USE_EIGEN "Enable Eigen testing" ${USE_EIGEN})
+option(HIGHFIVE_USE_OPENCV "Enable OpenCV testing" ${USE_OPENCV})
+option(HIGHFIVE_USE_XTENSOR "Enable xtensor testing" ${USE_XTENSOR})
+option(HIGHFIVE_EXAMPLES "Compile examples" ON)
+option(HIGHFIVE_PARALLEL_HDF5 "Enable Parallel HDF5 support" OFF)
+option(HIGHFIVE_STATIC_HDF5 "Staticly link to HDF5 library" OFF)
+option(HIGHFIVE_BUILD_DOCS "Enable documentation building" ON)
+option(HIGHFIVE_VERBOSE "Set logging level to verbose." OFF)
+option(HIGHFIVE_GLIBCXX_ASSERTIONS "Enable bounds check for STL." OFF)
+option(HIGHFIVE_HAS_CONCEPTS "Print readable compiler errors w/ C++20 concepts" ON)
+
+# Controls if HighFive classes are friends of each other.
+#
+# There are two compiler bugs that require incompatible choices. The
+# GCC compiler bug [1] prevents us from writing:
+#
+#     template<class D>
+#     friend class NodeTraits<D>;
+#
+# While a MSVC compiler bug [2] complains that it can't access a
+# protected constructor, e.g., `HighFive::Object::Object`.
+#
+# Starting with `2.7.0` these friend declarations don't matter
+# anymore. It's mearly a means of appeasing a compiler.
+#
+# The values of `HIGHFIVE_HAS_FRIEND_DECLARATIONS` are:
+#   - that the macro is undefined.
+#   - `0` which implies not adding the friend declarations.
+#   - any non-zero integer, i.e. `1`, to add the friend declarations.
+#
+# Not defining the macro implies that it'll be set to `1` if MSVC is
+# detected (or other compilers requiring the friend declarations).
+#
+# [1]: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52625
+# [2]: https://developercommunity.visualstudio.com/t/MSVC-compiler-improperly-implements-N489/1516410
+option(HIGHFIVE_HAS_FRIEND_DECLARATIONS "Enable additional friend declarations. Certain compiler require this On, others Off." OFF)
+mark_as_advanced(HIGHFIVE_HAS_FRIEND_DECLARATIONS)
+
+set(HIGHFIVE_SANITIZER OFF CACHE STRING "Enable a group of sanitizers, requires compiler support. Supported: 'address' and 'undefined'.")
+mark_as_advanced(HIGHFIVE_SANITIZER)
+
+# In deployments we probably don't want/cant have dynamic dependencies
+option(HIGHFIVE_USE_INSTALL_DEPS "End applications by default use detected dependencies here" OFF)
+mark_as_advanced(HIGHFIVE_USE_INSTALL_DEPS)
+
+
+# Check compiler cxx_std requirements
+# -----------------------------------
+
+if(CMAKE_CXX_STANDARD EQUAL 98)
+    message(FATAL_ERROR "HighFive needs to be compiled with at least C++11")
+endif()
+
+if(NOT DEFINED CMAKE_CXX_STANDARD)
+    set(CMAKE_CXX_STANDARD 11)
+    set(CMAKE_CXX_STANDARD_REQUIRED ON)
+    set(CMAKE_CXX_EXTENSIONS OFF)
+endif()
+
+if(HIGHFIVE_USE_XTENSOR AND CMAKE_CXX_STANDARD LESS 14)
+    set(CMAKE_CXX_STANDARD 14)
+    set(CMAKE_CXX_STANDARD_REQUIRED ON)
+endif()
+
+add_compile_definitions(HIGHFIVE_CXX_STD=${CMAKE_CXX_STANDARD})
+
+# Search dependencies (hdf5, boost, eigen, xtensor, mpi) and build target highfive_deps
+include(${PROJECT_SOURCE_DIR}/CMake/HighFiveTargetDeps.cmake)
+
+# Set-up HighFive to be used in 3rd party project using exports. Create a HighFive target
+include(${PROJECT_SOURCE_DIR}/CMake/HighFiveTargetExport.cmake)
+
+# Installation of headers (HighFive is only interface)
+install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/
+  DESTINATION "include"
+  PATTERN "*.in" EXCLUDE)
+
+# Preparing local building (tests, examples)
+# ------------------------------------------
+
+# Disable test if Boost was expressly disabled, or if HighFive is a sub-project
+if (NOT CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
+  if(HIGHFIVE_UNIT_TESTS AND NOT HighFive_FIND_QUIETLY)
+    message(WARNING "Unit tests have been DISABLED.")
+  endif()
+  set(HIGHFIVE_UNIT_TESTS FALSE)
+endif()
+
+if(HIGHFIVE_UNIT_TESTS)
+  if(EXISTS ${CMAKE_CURRENT_LIST_DIR}/deps/catch2/CMakeLists.txt)
+    add_subdirectory(deps/catch2 EXCLUDE_FROM_ALL)
+    list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/deps/catch2/contrib)
+  else()
+    find_package(Catch2)
+    if(NOT HIGHFIVE_UNIT_TESTS STREQUAL "AUTO" AND HIGHFIVE_UNIT_TESTS AND NOT Catch2_FOUND)
+      message(FATAL_ERROR "Please provide a Catch2 installation or clone the submodule")
+    elseif(NOT Catch2_FOUND)
+      message(WARNING "No Catch2 installation was found; Disabling unit tests.")
+      set(HIGHFIVE_UNIT_TESTS OFF)
+    endif()
+  endif()
+endif()
+
+if(HIGHFIVE_EXAMPLES)
+  add_subdirectory(src/examples)
+endif()
+
+if(HIGHFIVE_UNIT_TESTS)
+  enable_testing()
+  add_subdirectory(tests/unit)
+endif()
+
+if(HIGHFIVE_BUILD_DOCS)
+  add_subdirectory(doc)
+endif()
diff --git a/packages/HighFive/LICENSE b/packages/HighFive/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..bc1edcab2e3c7c8dcb721591a321af4551a245c0
--- /dev/null
+++ b/packages/HighFive/LICENSE
@@ -0,0 +1,25 @@
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+
diff --git a/packages/HighFive/README.md b/packages/HighFive/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3ea0680157cdc72040a39c68c2f4ebe2d1569ca1
--- /dev/null
+++ b/packages/HighFive/README.md
@@ -0,0 +1,263 @@
+# HighFive - HDF5 header-only C++ Library
+
+[![Doxygen -> gh-pages](https://github.com/BlueBrain/HighFive/workflows/gh-pages/badge.svg)](https://BlueBrain.github.io/HighFive)
+[![codecov](https://codecov.io/gh/BlueBrain/HighFive/branch/master/graph/badge.svg?token=UBKxHEn7RS)](https://codecov.io/gh/BlueBrain/HighFive)
+[![HighFive_Integration_tests](https://github.com/BlueBrain/HighFive-testing/actions/workflows/integration.yml/badge.svg)](https://github.com/BlueBrain/HighFive-testing/actions/workflows/integration.yml)
+
+Documentation: https://bluebrain.github.io/HighFive/
+
+## Brief
+
+HighFive is a modern header-only C++11 friendly interface for libhdf5.
+
+HighFive supports STL vector/string, Boost::UBLAS, Boost::Multi-array and Xtensor. It handles C++ from/to HDF5 with automatic type mapping.
+HighFive does not require additional libraries (see dependencies).
+
+It integrates nicely with other CMake projects by defining (and exporting) a HighFive target.
+
+### Design
+- Simple C++-ish minimalist interface
+- No other dependency than libhdf5
+- Zero overhead
+- Support C++11
+
+### Feature support
+- create/read/write files, datasets, attributes, groups, dataspaces.
+- automatic memory management / ref counting
+- automatic conversion of `std::vector` and nested `std::vector` from/to any dataset with basic types
+- automatic conversion of `std::string` to/from variable length string dataset
+- selection() / slice support
+- parallel Read/Write operations from several nodes with Parallel HDF5
+- Advanced types: Compound, Enum, Arrays of Fixed-length strings, References
+- half-precision (16-bit) floating-point datasets
+- `std::byte` in C++17 mode (with `-DCMAKE_CXX_STANDARD=17` or higher)
+- etc... (see [ChangeLog](./CHANGELOG.md))
+
+### Dependencies
+- hdf5 (dev)
+- hdf5-mpi (optional, opt-in with -D*HIGHFIVE_PARALLEL_HDF5*=ON)
+- boost >= 1.41 (recommended, opt-out with -D*HIGHFIVE_USE_BOOST*=OFF)
+- eigen3 (optional, opt-in with -D*HIGHFIVE_USE_EIGEN*=ON)
+- xtensor (optional, opt-in with -D*HIGHFIVE_USE_XTENSOR*=ON)
+- half (optional, opt-in with -D*HIGHFIVE_USE_HALF_FLOAT*=ON)
+
+### Known flaws
+- HighFive is not thread-safe. At best it has the same limitations as the HDF5 library. However, HighFive objects modify their members without protecting these writes. Users have reported that HighFive is not thread-safe even when using the threadsafe HDF5 library, e.g., https://github.com/BlueBrain/HighFive/discussions/675.
+- Eigen support in core HighFive is broken. See https://github.com/BlueBrain/HighFive/issues/532. H5Easy is not
+  affected.
+- The support of fixed length strings isn't ideal.
+
+
+## Examples
+
+#### Write a std::vector<int> to 1D HDF5 dataset and read it back
+
+```c++
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+std::string filename = "/tmp/new_file.h5";
+
+{
+    // We create an empty HDF55 file, by truncating an existing
+    // file if required:
+    File file(filename, File::Truncate);
+
+    std::vector<int> data(50, 1);
+    file.createDataSet("grp/data", data);
+}
+
+{
+    // We open the file as read-only:
+    File file(filename, File::ReadOnly);
+    auto dataset = file.getDataSet("grp/data");
+
+    // Read back, with allocating:
+    auto data = dataset.read<std::vector<int>>();
+
+    // Because `data` has the correct size, this will
+    // not cause `data` to be reallocated:
+    dataset.read(data);
+}
+```
+
+**Note:** `H5File.hpp` is the top-level header of HighFive core which should be always included.
+
+**Note:** For advanced usecases the dataset can be created without immediately
+writing to it. This is common in MPI-IO related patterns, or when growing a
+dataset over the course of a simulation.
+
+#### Write a 2 dimensional C double float array to a 2D HDF5 dataset
+
+See [create_dataset_double.cpp](https://github.com/BlueBrain/HighFive/blob/master/src/examples/create_dataset_double.cpp)
+
+#### Write and read a matrix of double float (boost::ublas) to a 2D HDF5 dataset
+
+See [boost_ublas_double.cpp](https://github.com/BlueBrain/HighFive/blob/master/src/examples/boost_ublas_double.cpp)
+
+#### Write and read a subset of a 2D double dataset
+
+See [select_partial_dataset_cpp11.cpp](https://github.com/BlueBrain/HighFive/blob/master/src/examples/select_partial_dataset_cpp11.cpp)
+
+#### Create, write and list HDF5 attributes
+
+See [create_attribute_string_integer.cpp](https://github.com/BlueBrain/HighFive/blob/master/src/examples/create_attribute_string_integer.cpp)
+
+#### And others
+
+See [src/examples/](https://github.com/BlueBrain/HighFive/blob/master/src/examples/) subdirectory for more info.
+
+
+### H5Easy
+
+For several 'standard' use cases the [highfive/H5Easy.hpp](include/highfive/H5Easy.hpp) interface is available. It allows:
+
+* Reading/writing in a single line of:
+
+    - scalars (to/from an extendible DataSet),
+    - strings,
+    - vectors (of standard types),
+    - [Eigen::Matrix](http://eigen.tuxfamily.org) (optional, enable CMake option `HIGHFIVE_USE_EIGEN`),
+    - [xt::xarray](https://github.com/QuantStack/xtensor) and [xt::xtensor](https://github.com/QuantStack/xtensor)
+      (optional, enable CMake option `HIGHFIVE_USE_XTENSOR`).
+    - [cv::Mat_](https://docs.opencv.org/master/df/dfc/classcv_1_1Mat__.html)
+      (optional, enable CMake option `HIGHFIVE_USE_OPENCV`).
+
+* Getting in a single line:
+
+    - the size of a DataSet,
+    - the shape of a DataSet.
+
+#### Example
+
+```cpp
+#include <highfive/H5Easy.hpp>
+
+int main() {
+    H5Easy::File file("example.h5", H5Easy::File::Overwrite);
+
+    int A = ...;
+    H5Easy::dump(file, "/path/to/A", A);
+
+    A = H5Easy::load<int>(file, "/path/to/A");
+}
+```
+
+whereby the `int` type of this example can be replaced by any of the above types. See [easy_load_dump.cpp](src/examples/easy_load_dump.cpp) for more details.
+
+**Note:** Classes such as `H5Easy::File` are just short for the regular `HighFive` classes (in this case `HighFive::File`). They can thus be used interchangeably.
+
+
+## CMake integration
+There's two common paths of integrating HighFive into a CMake based project.
+The first is to "vendor" HighFive, the second is to install HighFive as a
+normal C++ library. Due to how HighFive CMake code works, sometimes following
+the third Bailout Approach is needed.
+
+### Vendoring HighFive
+
+In this approach the HighFive sources are included in a subdirectory of the
+project (typically as a git submodule), for example in `third_party/HighFive`.
+
+The projects `CMakeLists.txt` add the following lines
+```cmake
+add_executable(foo foo.cpp)
+
+# You might want to turn off Boost support:
+if(NOT DEFINED HIGHFIVE_USE_BOOST)
+  set(HIGHFIVE_USE_BOOST Off)
+endif()
+
+# Include the subdirectory and use the target HighFive.
+add_subdirectory(third_party/HighFive)
+target_link_libraries(foo HighFive)
+```
+
+**Note:** `add_subdirectory(third_party/HighFive)` will search and "link" HDF5
+and optional dependencies such as Boost.
+
+### Regular Installation of HighFive
+
+Alternatively you can install HighFive once and use it in several projects via
+`find_package()`. First one should clone the sources:
+```bash
+git clone --recursive https://github.com/BlueBrain/HighFive.git HighFive-src
+```
+By default CMake will install systemwide, which is likely not appropriate. The
+instruction below allow users to select a custom path where HighFive will be
+installed, e.g. `HIGHFIVE_INSTALL_PREFIX=${HOME}/third_party/HighFive` or some
+other location. The CMake invocations would be
+```bash
+cmake -DHIGHFIVE_EXAMPLES=Off \
+      -DHIGHFIVE_USE_BOOST=Off \
+      -DHIGHFIVE_UNIT_TESTS=Off \
+      -DCMAKE_INSTALL_PREFIX=${HIGHFIVE_INSTALL_PREFIX} \
+      -B HighFive-src/build \
+      HighFive-src
+
+cmake --build HighFive-src/build
+cmake --install HighFive-src/build
+```
+This will install (i.e. copy) the headers to
+`${HIGHFIVE_INSTALL_PREFIX}/include` and some CMake files into an appropriate
+subfolder of `${HIGHFIVE_INSTALL_PREFIX}`.
+
+The projects `CMakeLists.txt` should add the following:
+```cmake
+# ...
+add_executable(foo foo.cpp)
+
+find_package(HighFive REQUIRED)
+target_link_libraries(foo HighFive)
+```
+
+**Note:** If HighFive hasn't been installed in a default location, CMake needs
+to be told where to find it which can be done by adding
+`-DCMAKE_PREFIX_PATH=${HIGHFIVE_INSTALL_PREFIX}` to the CMake command for
+building the project using HighFive. The variable `CMAKE_PREFIX_PATH` is a
+semi-colon `;` separated list of directories.
+
+**Note:** `find_package(HighFive)` will search and "link" HDF5 and optional
+dependencies such as Boost.
+
+### The Bailout Approach
+Since both `add_subdirectory` and `find_package` will trigger finding HDF5 and
+other optional dependencies of HighFive as well as the `target_link_libraries`
+code for "linking" with the dependencies, things can go wrong.
+
+Fortunately, HighFive is a header only library and all that's needed is the
+headers. Preferably, the version obtained by installing HighFive, since those
+include `H5Version.hpp`. Let's assume they've been copied to
+`third_party/HighFive`. Then one could create a target:
+
+```bash
+add_library(HighFive INTERFACE)
+target_include_directory(HighFive INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/HighFive/include)
+
+
+add_executable(foo foo.cpp)
+target_link_libraries(foo HighFive)
+```
+
+One known case where this is required is when vendoring the optional
+dependencies of HighFive.
+
+# Questions?
+
+Do you have questions on how to use HighFive? Would you like to share an interesting example or
+discuss HighFive features? Head over to the [Discussions](https://github.com/BlueBrain/HighFive/discussions)
+forum and join the community.
+
+For bugs and issues please use [Issues](https://github.com/BlueBrain/HighFive/issues).
+
+# Funding & Acknowledgment
+ 
+The development of this software was supported by funding to the Blue Brain Project, a research center of the École polytechnique fédérale de Lausanne (EPFL), from the Swiss government's ETH Board of the Swiss Federal Institutes of Technology.
+ 
+Copyright © 2015-2022 Blue Brain Project/EPFL
+
+
+### License
+
+Boost Software License 1.0
diff --git a/packages/HighFive/codecov.yml b/packages/HighFive/codecov.yml
new file mode 100644
index 0000000000000000000000000000000000000000..bfdc9877d9acc165c0342befa33f2e8f769538cf
--- /dev/null
+++ b/packages/HighFive/codecov.yml
@@ -0,0 +1,8 @@
+coverage:
+  status:
+    project:
+      default:
+        informational: true
+    patch:
+      default:
+        informational: true
diff --git a/packages/HighFive/deps/catch2 b/packages/HighFive/deps/catch2
new file mode 160000
index 0000000000000000000000000000000000000000..3f0283de7a9c43200033da996ff9093be3ac84dc
--- /dev/null
+++ b/packages/HighFive/deps/catch2
@@ -0,0 +1 @@
+Subproject commit 3f0283de7a9c43200033da996ff9093be3ac84dc
diff --git a/packages/HighFive/doc/CMakeLists.txt b/packages/HighFive/doc/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2538af62d2b336c526b927ecd7fe3b77183a79e5
--- /dev/null
+++ b/packages/HighFive/doc/CMakeLists.txt
@@ -0,0 +1,7 @@
+find_package(Doxygen)
+if(Doxygen_FOUND)
+    configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY)
+    add_custom_target(doc COMMAND Doxygen::doxygen ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
+else()
+    message(STATUS "  Documentation (doc) cannot be built since Doxygen is not available.")
+endif()
diff --git a/packages/HighFive/doc/Doxyfile b/packages/HighFive/doc/Doxyfile
new file mode 100644
index 0000000000000000000000000000000000000000..6ebc393ec29919bda054ca1ebf962c23a1f1ceff
--- /dev/null
+++ b/packages/HighFive/doc/Doxyfile
@@ -0,0 +1,2621 @@
+# Doxyfile 1.9.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the configuration
+# file that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = HighFive
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         = @PROJECT_VERSION@
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = "HighFive - Header-only C++ HDF5 interface"
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       =
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all generated output in the proper direction.
+# Possible values are: None, LTR, RTL and Context.
+# The default value is: None.
+
+OUTPUT_TEXT_DIRECTION  = None
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        = @CMAKE_CURRENT_SOURCE_DIR@/../include
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
+# such as
+# /***************
+# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
+# Javadoc-style will behave just like regular comments and it will not be
+# interpreted by doxygen.
+# The default value is: NO.
+
+JAVADOC_BANNER         = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# By default Python docstrings are displayed as preformatted text and doxygen's
+# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
+# doxygen's special commands can be used and the contents of the docstring
+# documentation blocks is shown as doxygen documentation.
+# The default value is: YES.
+
+PYTHON_DOCSTRING       = YES
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines (in the resulting output). You can put ^^ in the value part of an
+# alias to insert a newline as if a physical newline was in the original file.
+# When you need a literal { or } or , in the value part of an alias you have to
+# escape them by means of a backslash (\), this can lead to conflicts with the
+# commands \{ and \} for these it is advised to use the version @{ and @} or use
+# a double escape (\\{ and \\})
+
+ALIASES                =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
+# sources only. Doxygen will then generate output that is more tailored for that
+# language. For instance, namespaces will be presented as modules, types will be
+# separated into more groups, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_SLICE  = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
+# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
+# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
+# tries to guess whether the code is fixed or free formatted code, this is the
+# default for Fortran type files). For instance to make doxygen treat .inc files
+# as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen. When specifying no_extension you should add
+# * to the FILE_PATTERNS.
+#
+# Note see also the list of default file extension mappings.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See https://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 5.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS   = 0
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use
+# during processing. When set to 0 doxygen will based this on the number of
+# cores available in the system. You can set it explicitly to a value larger
+# than 0 to get more control over the balance between CPU load and processing
+# speed. At this moment only the input processing can be done using multiple
+# threads. Since this is still an experimental feature the default is set to 1,
+# which efficively disables parallel processing. Please report any issues you
+# encounter. Generating dot graphs in parallel is controlled by the
+# DOT_NUM_THREADS setting.
+# Minimum value: 0, maximum value: 32, default value: 1.
+
+NUM_PROC_THREADS       = 1
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
+# methods of a class will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIV_VIRTUAL   = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If this flag is set to YES, the name of an unnamed parameter in a declaration
+# will be determined by the corresponding definition. By default unnamed
+# parameters remain unnamed in the output.
+# The default value is: YES.
+
+RESOLVE_UNNAMED_PARAMS = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# declarations. If set to NO, these declarations will be included in the
+# documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
+# able to match the capabilities of the underlying filesystem. In case the
+# filesystem is case sensitive (i.e. it supports files in the same directory
+# whose names only differ in casing), the option must be set to YES to properly
+# deal with such files in case they appear in the input. For filesystems that
+# are not case sensitive the option should be be set to NO to properly deal with
+# output files written for symbols that only differ in casing, such as for two
+# classes, one named CLASS and the other named Class, and to also support
+# references to files without having to specify the exact matching casing. On
+# Windows (including Cygwin) and MacOS, users should typically set this option
+# to NO, whereas on Linux or other Unix flavors it should typically be set to
+# YES.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            = @CMAKE_CURRENT_SOURCE_DIR@/DoxygenLayout.xml
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation. If
+# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
+# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
+# at the end of the doxygen process doxygen will return with a non-zero status.
+# Possible values are: NO, YES and FAIL_ON_WARNINGS.
+# The default value is: NO.
+
+WARN_AS_ERROR          = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = @CMAKE_CURRENT_SOURCE_DIR@/../include \
+                         @CMAKE_CURRENT_SOURCE_DIR@/installation.md \
+                         @CMAKE_CURRENT_SOURCE_DIR@/developer_guide.md \
+                         @CMAKE_CURRENT_SOURCE_DIR@/../CHANGELOG.md \
+                         @CMAKE_CURRENT_SOURCE_DIR@/../README.md
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see:
+# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# Note the list of default checked file patterns might differ from the list of
+# default file extension mappings.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
+# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl,
+# *.ucf, *.qsf and *.ice.
+
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.c++ \
+                         *.java \
+                         *.ii \
+                         *.ixx \
+                         *.ipp \
+                         *.i++ \
+                         *.inl \
+                         *.idl \
+                         *.ddl \
+                         *.odl \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.h++ \
+                         *.cs \
+                         *.d \
+                         *.php \
+                         *.php4 \
+                         *.php5 \
+                         *.phtml \
+                         *.inc \
+                         *.m \
+                         *.markdown \
+                         *.md \
+                         *.mm \
+                         *.dox \
+                         *.py \
+                         *.pyw \
+                         *.f90 \
+                         *.f95 \
+                         *.f03 \
+                         *.f08 \
+                         *.f \
+                         *.for \
+                         *.tcl \
+                         *.vhd \
+                         *.vhdl \
+                         *.ucf \
+                         *.qsf
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        = *detail*
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE = @CMAKE_CURRENT_SOURCE_DIR@/../README.md
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# entity all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see https://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = YES
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  = @CMAKE_CURRENT_SOURCE_DIR@/doxygen-awesome-css/doxygen-awesome.css
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = NO
+
+# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
+# documentation will contain a main index with vertical navigation menus that
+# are dynamically created via JavaScript. If disabled, the navigation index will
+# consists of multiple levels of tabs that are statically embedded in every HTML
+# page. Disable this option to support browsers that do not have JavaScript,
+# like the Qt help browser.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_MENUS     = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see:
+# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
+# create a documentation set, doxygen will generate a Makefile in the HTML
+# output directory. Running make will produce the docset in that directory and
+# running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
+# genXcode/_index.html for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see:
+# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the main .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location (absolute path
+# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
+# run qhelpgenerator on the generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
+# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
+# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
+# the HTML output. These images will generally look nicer at scaled resolutions.
+# Possible values are: png (the default) and svg (looks nicer but requires the
+# pdf2svg or inkscape tool).
+# The default value is: png.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FORMULA_FORMAT    = png
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
+# to create new LaTeX commands to be used in formulas as building blocks. See
+# the section "Including formulas" for details.
+
+FORMULA_MACROFILE      =
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# https://www.mathjax.org) which uses client side JavaScript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from https://www.mathjax.org before deployment.
+# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using JavaScript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see:
+# https://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see:
+# https://xapian.org/). See the section "External Indexing and Searching" for
+# details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when not enabling USE_PDFLATEX the default is latex when enabling
+# USE_PDFLATEX the default is pdflatex and when in the later case latex is
+# chosen this is overwritten by pdflatex. For specific output languages the
+# default can have been set differently, this depends on the implementation of
+# the output language.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# Note: This tag is used in the Makefile / make.bat.
+# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
+# (.tex).
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
+# generate index for LaTeX. In case there is no backslash (\) as first character
+# it will be automatically added in the LaTeX code.
+# Note: This tag is used in the generated output file (.tex).
+# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
+# The default value is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_MAKEINDEX_CMD    = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
+# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
+# files. Set this option to YES, to get a higher quality PDF documentation.
+#
+# See also section LATEX_CMD_NAME for selecting the engine.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP        = NO
+
+# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
+# path from which the emoji images will be read. If a relative path is entered,
+# it will be relative to the LATEX_OUTPUT directory. If left blank the
+# LATEX_OUTPUT directory will be used.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EMOJI_DIRECTORY  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# configuration file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's configuration file. A template extensions file can be
+# generated using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE        = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
+# namespace members in file scope as well, matching the HTML output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_NS_MEMB_FILE_SCOPE = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
+# the structure of the code including all documentation. Note that this feature
+# is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = YES
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             = H5_HAVE_PARALLEL H5_USE_EIGEN H5_USE_BOOST H5_USE_OPENCV H5_USE_XTENSOR H5_USE_HALF_FLOAT H5_DEPRECATED(x):=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = YES
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT               = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag UML_LOOK is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and
+# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
+# tag is set to YES, doxygen will add type and arguments for attributes and
+# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen
+# will not generate fields with class member information in the UML graphs. The
+# class diagrams will look similar to the default class diagrams but using UML
+# notation for the relationships.
+# Possible values are: NO, YES and NONE.
+# The default value is: NO.
+# This tag requires that the tag UML_LOOK is set to YES.
+
+DOT_UML_DETAILS        = NO
+
+# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
+# to display on a single line. If the actual line length exceeds this threshold
+# significantly it will wrapped across multiple lines. Some heuristics are apply
+# to avoid ugly line breaks.
+# Minimum value: 0, maximum value: 1000, default value: 17.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_WRAP_THRESHOLD     = 17
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH      =
+
+# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
+# configuration file for plantuml.
+
+PLANTUML_CFG_FILE      =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH  =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
+# files that are used to generate the various graphs.
+#
+# Note: This setting is not only used for dot files but also for msc and
+# plantuml temporary files.
+# The default value is: YES.
+
+DOT_CLEANUP            = YES
diff --git a/packages/HighFive/doc/DoxygenLayout.xml b/packages/HighFive/doc/DoxygenLayout.xml
new file mode 100644
index 0000000000000000000000000000000000000000..8fee2ee56a2e85e58efa9dcb7bb67b1a1425a23b
--- /dev/null
+++ b/packages/HighFive/doc/DoxygenLayout.xml
@@ -0,0 +1,241 @@
+<doxygenlayout version="1.0">
+  <!-- Generated by doxygen 1.9.3 -->
+  <!-- Navigation index tabs for HTML output -->
+  <navindex>
+    <tab type="mainpage" visible="yes" title=""/>
+    <tab type="pages" visible="yes" title="" intro=""/>
+    <tab type="modules" visible="yes" title="" intro=""/>
+    <tab type="namespaces" visible="yes" title="">
+      <tab type="namespacelist" visible="yes" title="" intro=""/>
+      <tab type="namespacemembers" visible="yes" title="" intro=""/>
+    </tab>
+    <tab type="concepts" visible="yes" title="">
+    </tab>
+    <tab type="interfaces" visible="yes" title="">
+      <tab type="interfacelist" visible="yes" title="" intro=""/>
+      <tab type="interfaceindex" visible="$ALPHABETICAL_INDEX" title=""/> 
+      <tab type="interfacehierarchy" visible="yes" title="" intro=""/>
+    </tab>
+    <tab type="classes" visible="yes" title="">
+      <tab type="classlist" visible="yes" title="" intro=""/>
+      <tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/> 
+      <tab type="hierarchy" visible="yes" title="" intro=""/>
+      <tab type="classmembers" visible="yes" title="" intro=""/>
+    </tab>
+    <tab type="structs" visible="yes" title="">
+      <tab type="structlist" visible="yes" title="" intro=""/>
+      <tab type="structindex" visible="$ALPHABETICAL_INDEX" title=""/> 
+    </tab>
+    <tab type="exceptions" visible="yes" title="">
+      <tab type="exceptionlist" visible="yes" title="" intro=""/>
+      <tab type="exceptionindex" visible="$ALPHABETICAL_INDEX" title=""/> 
+      <tab type="exceptionhierarchy" visible="yes" title="" intro=""/>
+    </tab>
+    <tab type="files" visible="yes" title="">
+      <tab type="filelist" visible="yes" title="" intro=""/>
+      <tab type="globals" visible="yes" title="" intro=""/>
+    </tab>
+    <tab type="examples" visible="yes" title="" intro=""/>  
+    <tab type="user" visible="yes" url='https://github.com/BlueBrain/HighFive' title="Github Page" intro=""/>
+  </navindex>
+
+  <!-- Layout definition for a class page -->
+  <class>
+    <briefdescription visible="yes"/>
+    <includes visible="$SHOW_HEADERFILE"/>
+    <inheritancegraph visible="$CLASS_GRAPH"/>
+    <collaborationgraph visible="$COLLABORATION_GRAPH"/>
+    <memberdecl>
+      <nestedclasses visible="yes" title=""/>
+      <publictypes title=""/>
+      <services title=""/>
+      <interfaces title=""/>
+      <publicslots title=""/>
+      <signals title=""/>
+      <publicmethods title=""/>
+      <publicstaticmethods title=""/>
+      <publicattributes title=""/>
+      <publicstaticattributes title=""/>
+      <protectedtypes title=""/>
+      <protectedslots title=""/>
+      <protectedmethods title=""/>
+      <protectedstaticmethods title=""/>
+      <protectedattributes title=""/>
+      <protectedstaticattributes title=""/>
+      <packagetypes title=""/>
+      <packagemethods title=""/>
+      <packagestaticmethods title=""/>
+      <packageattributes title=""/>
+      <packagestaticattributes title=""/>
+      <properties title=""/>
+      <events title=""/>
+      <privatetypes title=""/>
+      <privateslots title=""/>
+      <privatemethods title=""/>
+      <privatestaticmethods title=""/>
+      <privateattributes title=""/>
+      <privatestaticattributes title=""/>
+      <friends title=""/>
+      <related title="" subtitle=""/>
+      <membergroups visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+    <memberdef>
+      <inlineclasses title=""/>
+      <typedefs title=""/>
+      <enums title=""/>
+      <services title=""/>
+      <interfaces title=""/>
+      <constructors title=""/>
+      <functions title=""/>
+      <related title=""/>
+      <variables title=""/>
+      <properties title=""/>
+      <events title=""/>
+    </memberdef>
+    <allmemberslink visible="yes"/>
+    <usedfiles visible="$SHOW_USED_FILES"/>
+    <authorsection visible="yes"/>
+  </class>
+
+  <!-- Layout definition for a namespace page -->
+  <namespace>
+    <briefdescription visible="yes"/>
+    <memberdecl>
+      <nestednamespaces visible="yes" title=""/>
+      <constantgroups visible="yes" title=""/>
+      <interfaces visible="yes" title=""/>
+      <classes visible="yes" title=""/>
+      <concepts visible="yes" title=""/>
+      <structs visible="yes" title=""/>
+      <exceptions visible="yes" title=""/>
+      <typedefs title=""/>
+      <sequences title=""/>
+      <dictionaries title=""/>
+      <enums title=""/>
+      <functions title=""/>
+      <variables title=""/>
+      <membergroups visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+    <memberdef>
+      <inlineclasses title=""/>
+      <typedefs title=""/>
+      <sequences title=""/>
+      <dictionaries title=""/>
+      <enums title=""/>
+      <functions title=""/>
+      <variables title=""/>
+    </memberdef>
+    <authorsection visible="yes"/>
+  </namespace>
+
+  <!-- Layout definition for a concept page -->
+  <concept>
+    <briefdescription visible="yes"/>
+    <includes visible="$SHOW_HEADERFILE"/>
+    <definition visible="yes" title=""/>
+    <detaileddescription title=""/>
+    <authorsection visible="yes"/>
+  </concept>
+
+  <!-- Layout definition for a file page -->
+  <file>
+    <briefdescription visible="yes"/>
+    <includes visible="$SHOW_INCLUDE_FILES"/>
+    <includegraph visible="$INCLUDE_GRAPH"/>
+    <includedbygraph visible="$INCLUDED_BY_GRAPH"/>
+    <sourcelink visible="yes"/>
+    <memberdecl>
+      <interfaces visible="yes" title=""/>
+      <classes visible="yes" title=""/>
+      <structs visible="yes" title=""/>
+      <exceptions visible="yes" title=""/>
+      <namespaces visible="yes" title=""/>
+      <concepts visible="yes" title=""/>
+      <constantgroups visible="yes" title=""/>
+      <defines title=""/>
+      <typedefs title=""/>
+      <sequences title=""/>
+      <dictionaries title=""/>
+      <enums title=""/>
+      <functions title=""/>
+      <variables title=""/>
+      <membergroups visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+    <memberdef>
+      <inlineclasses title=""/>
+      <defines title=""/>
+      <typedefs title=""/>
+      <sequences title=""/>
+      <dictionaries title=""/>
+      <enums title=""/>
+      <functions title=""/>
+      <variables title=""/>
+    </memberdef>
+    <authorsection/>
+  </file>
+
+  <!-- Layout definition for a group page -->
+  <group>
+    <briefdescription visible="yes"/>
+    <groupgraph visible="$GROUP_GRAPHS"/>
+    <detaileddescription title=""/>
+    <memberdecl>
+      <nestedgroups visible="yes" title=""/>
+      <dirs visible="yes" title=""/>
+      <files visible="yes" title=""/>
+      <namespaces visible="yes" title=""/>
+      <concepts visible="yes" title=""/>
+      <classes visible="yes" title=""/>
+      <defines title=""/>
+      <typedefs title=""/>
+      <sequences title=""/>
+      <dictionaries title=""/>
+      <enums title=""/>
+      <enumvalues title=""/>
+      <functions title=""/>
+      <variables title=""/>
+      <signals title=""/>
+      <publicslots title=""/>
+      <protectedslots title=""/>
+      <privateslots title=""/>
+      <events title=""/>
+      <properties title=""/>
+      <friends title=""/>
+      <membergroups visible="yes"/>
+    </memberdecl>
+    <memberdef>
+      <pagedocs/>
+      <inlineclasses title=""/>
+      <defines title=""/>
+      <typedefs title=""/>
+      <sequences title=""/>
+      <dictionaries title=""/>
+      <enums title=""/>
+      <enumvalues title=""/>
+      <functions title=""/>
+      <variables title=""/>
+      <signals title=""/>
+      <publicslots title=""/>
+      <protectedslots title=""/>
+      <privateslots title=""/>
+      <events title=""/>
+      <properties title=""/>
+      <friends title=""/>
+    </memberdef>
+    <authorsection visible="yes"/>
+  </group>
+
+  <!-- Layout definition for a directory page -->
+  <directory>
+    <briefdescription visible="yes"/>
+    <directorygraph visible="yes"/>
+    <memberdecl>
+      <dirs visible="yes"/>
+      <files visible="yes"/>
+    </memberdecl>
+    <detaileddescription title=""/>
+  </directory>
+</doxygenlayout>
diff --git a/packages/HighFive/doc/developer_guide.md b/packages/HighFive/doc/developer_guide.md
new file mode 100644
index 0000000000000000000000000000000000000000..3017289b5172bbb443829da0daf6bcaa04413e54
--- /dev/null
+++ b/packages/HighFive/doc/developer_guide.md
@@ -0,0 +1,93 @@
+# Developer Guide
+First clone the repository and remember the `--recursive`:
+```bash
+git clone --recursive git@github.com:BlueBrain/HighFive.git
+```
+The instructions to recover if you forgot are:
+```bash
+git submodule update --init --recursive
+```
+
+One remark on submodules: each HighFive commit expects that the submodules are
+at a particular commit. The catch is that performing `git checkout` will not
+update the submodules automatically. Hence, sometimes a `git submodule update
+--recursive` might be needed to checkout the expected version of the
+submodules.
+
+## Compiling and Running the Tests
+The instructions for compiling with examples and unit-tests are:
+
+```bash
+cmake -B build -DCMAKE_BUILD_TYPE={Debug,Release} .
+cmake --build build --parallel
+ctest --test-dir build
+```
+
+You might want to turn off Boost `-DHIGHFIVE_USE_BOOST=Off` or turn on other
+optional dependencies.
+
+## Contributing
+There's numerous HDF5 features that haven't been wrapped yet. HighFive is a
+collaborative effort to slowly cover ever larger parts of the HDF5 library.
+The process of contributing is to fork the repository and then create a PR.
+Please ensure that any new API is appropriately documented and covered with
+tests.
+
+### Code formatting
+The project is formatted using clang-format version 12.0.1 and CI will complain
+if a commit isn't formatted accordingly. The `.clang-format` is at the root of
+the git repository. Conveniently, `clang-format` is available via `pip`:
+
+```bash
+python -m venv venv
+source venv/bin/activate
+
+pip install clang-format==12.0.1
+```
+
+The changed lines can be formatted with `git-clang-format`, e.g. to format all lines changed compared to master:
+
+```bash
+git-clang-format master
+```
+(add `-f` to allow formatting unstaged changes if you trust it to not destroy
+your changes.)
+
+## Releasing HighFive
+Before releasing a new version perform the following:
+
+* Update `CHANGELOG.md` and `AUTHORS.txt` as required.
+* Update `CMakeLists.txt` and `include/highfive/H5Version.hpp`.
+* Follow semantic versioning when deciding the next version number.
+* Check that
+  [HighFive-testing](https://github.com/BlueBrain/HighFive-testing/actions) ran
+  recently.
+
+At this point there should be a commit on master which will be the release
+candidate. Don't tag it yet.
+
+Next step is to update the [HighFive/spack](https://github.com/BlueBrain/spack)
+recipe such that the proposed version points to the release candidate using the
+SHA of that commit. The recipe will look something like this:
+
+```python
+    # ...
+
+    version("2.8.0", commit="094400f22145bcdcd2726ce72888d9d1c21e7068")
+    version("2.7.1", sha256="25b4c51a94d1e670dc93b9b73f51e79b65d8ff49bcd6e5d5582d5ecd2789a249")
+    version("2.7.0", sha256="8e05672ddf81a59ce014b1d065bd9a8c5034dbd91a5c2578e805ef880afa5907")
+    # ...
+```
+
+Push the changes to the BlueBrain spack repository. This will trigger building
+all BBP dependencies of HighFive, i.e. another integration test. Don't actually
+merge this commit yet.
+
+Now that we know that the integration test ran, and all BBP software can be
+built with the proposed version of HighFive, we can proceed and create the
+release. Once this is done perform a final round of updates:
+
+* Download the archive (`*.tar.gz`) and compute its SHA256.
+* Update BlueBrain Spack recipe to use the archive and not the Git commit.
+* Update the upstream Spack recipe.
+
diff --git a/packages/HighFive/doc/doxygen-awesome-css/doxygen-awesome.css b/packages/HighFive/doc/doxygen-awesome-css/doxygen-awesome.css
new file mode 100644
index 0000000000000000000000000000000000000000..08238977a627938adbae1ea06fb174b4ecaafde3
--- /dev/null
+++ b/packages/HighFive/doc/doxygen-awesome-css/doxygen-awesome.css
@@ -0,0 +1,2530 @@
+/**
+
+Doxygen Awesome
+https://github.com/jothepro/doxygen-awesome-css
+
+MIT License
+
+Copyright (c) 2021 - 2023 jothepro
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+*/
+
+html {
+    /* primary theme color. This will affect the entire websites color scheme: links, arrows, labels, ... */
+    --primary-color: #1779c4;
+    --primary-dark-color: #335c80;
+    --primary-light-color: #70b1e9;
+
+    /* page base colors */
+    --page-background-color: #ffffff;
+    --page-foreground-color: #2f4153;
+    --page-secondary-foreground-color: #6f7e8e;
+
+    /* color for all separators on the website: hr, borders, ... */
+    --separator-color: #dedede;
+
+    /* border radius for all rounded components. Will affect many components, like dropdowns, memitems, codeblocks, ... */
+    --border-radius-large: 8px;
+    --border-radius-small: 4px;
+    --border-radius-medium: 6px;
+
+    /* default spacings. Most components reference these values for spacing, to provide uniform spacing on the page. */
+    --spacing-small: 5px;
+    --spacing-medium: 10px;
+    --spacing-large: 16px;
+
+    /* default box shadow used for raising an element above the normal content. Used in dropdowns, search result, ... */
+    --box-shadow: 0 2px 8px 0 rgba(0,0,0,.075);
+
+    --odd-color: rgba(0,0,0,.028);
+
+    /* font-families. will affect all text on the website
+     * font-family: the normal font for text, headlines, menus
+     * font-family-monospace: used for preformatted text in memtitle, code, fragments
+     */
+    --font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;
+    --font-family-monospace: ui-monospace,SFMono-Regular,SF Mono,Menlo,Consolas,Liberation Mono,monospace;
+
+    /* font sizes */
+    --page-font-size: 15.6px;
+    --navigation-font-size: 14.4px;
+    --toc-font-size: 13.4px;
+    --code-font-size: 14px; /* affects code, fragment */
+    --title-font-size: 22px;
+
+    /* content text properties. These only affect the page content, not the navigation or any other ui elements */
+    --content-line-height: 27px;
+    /* The content is centered and constraint in it's width. To make the content fill the whole page, set the variable to auto.*/
+    --content-maxwidth: 1050px;
+    --table-line-height: 24px;
+    --toc-sticky-top: var(--spacing-medium);
+    --toc-width: 200px;
+    --toc-max-height: calc(100vh - 2 * var(--spacing-medium) - 85px);
+
+    /* colors for various content boxes: @warning, @note, @deprecated @bug */
+    --warning-color: #f8d1cc;
+    --warning-color-dark: #b61825;
+    --warning-color-darker: #75070f;
+    --note-color: #faf3d8;
+    --note-color-dark: #f3a600;
+    --note-color-darker: #5f4204;
+    --todo-color: #e4f3ff;
+    --todo-color-dark: #1879C4;
+    --todo-color-darker: #274a5c;
+    --deprecated-color: #ecf0f3;
+    --deprecated-color-dark: #5b6269;
+    --deprecated-color-darker: #43454a;
+    --bug-color: #e4dafd;
+    --bug-color-dark: #5b2bdd;
+    --bug-color-darker: #2a0d72;
+    --invariant-color: #d8f1e3;
+    --invariant-color-dark: #44b86f;
+    --invariant-color-darker: #265532;
+
+    /* blockquote colors */
+    --blockquote-background: #f8f9fa;
+    --blockquote-foreground: #636568;
+
+    /* table colors */
+    --tablehead-background: #f1f1f1;
+    --tablehead-foreground: var(--page-foreground-color);
+
+    /* menu-display: block | none
+     * Visibility of the top navigation on screens >= 768px. On smaller screen the menu is always visible.
+     * `GENERATE_TREEVIEW` MUST be enabled!
+     */
+    --menu-display: block;
+
+    --menu-focus-foreground: var(--page-background-color);
+    --menu-focus-background: var(--primary-color);
+    --menu-selected-background: rgba(0,0,0,.05);
+
+
+    --header-background: var(--page-background-color);
+    --header-foreground: var(--page-foreground-color);
+
+    /* searchbar colors */
+    --searchbar-background: var(--side-nav-background);
+    --searchbar-foreground: var(--page-foreground-color);
+
+    /* searchbar size
+     * (`searchbar-width` is only applied on screens >= 768px.
+     * on smaller screens the searchbar will always fill the entire screen width) */
+    --searchbar-height: 33px;
+    --searchbar-width: 210px;
+    --searchbar-border-radius: var(--searchbar-height);
+
+    /* code block colors */
+    --code-background: #f5f5f5;
+    --code-foreground: var(--page-foreground-color);
+
+    /* fragment colors */
+    --fragment-background: #F8F9FA;
+    --fragment-foreground: #37474F;
+    --fragment-keyword: #bb6bb2;
+    --fragment-keywordtype: #8258b3;
+    --fragment-keywordflow: #d67c3b;
+    --fragment-token: #438a59;
+    --fragment-comment: #969696;
+    --fragment-link: #5383d6;
+    --fragment-preprocessor: #46aaa5;
+    --fragment-linenumber-color: #797979;
+    --fragment-linenumber-background: #f4f4f5;
+    --fragment-linenumber-border: #e3e5e7;
+    --fragment-lineheight: 20px;
+
+    /* sidebar navigation (treeview) colors */
+    --side-nav-background: #fbfbfb;
+    --side-nav-foreground: var(--page-foreground-color);
+    --side-nav-arrow-opacity: 0;
+    --side-nav-arrow-hover-opacity: 0.9;
+
+    --toc-background: var(--side-nav-background);
+    --toc-foreground: var(--side-nav-foreground);
+
+    /* height of an item in any tree / collapsible table */
+    --tree-item-height: 30px;
+
+    --memname-font-size: var(--code-font-size);
+    --memtitle-font-size: 18px;
+
+    --webkit-scrollbar-size: 7px;
+    --webkit-scrollbar-padding: 4px;
+    --webkit-scrollbar-color: var(--separator-color);
+}
+
+@media screen and (max-width: 767px) {
+    html {
+        --page-font-size: 16px;
+        --navigation-font-size: 16px;
+        --toc-font-size: 15px;
+        --code-font-size: 15px; /* affects code, fragment */
+        --title-font-size: 22px;
+    }
+}
+
+@media (prefers-color-scheme: dark) {
+    html:not(.light-mode) {
+        color-scheme: dark;
+
+        --primary-color: #1982d2;
+        --primary-dark-color: #86a9c4;
+        --primary-light-color: #4779ac;
+
+        --box-shadow: 0 2px 8px 0 rgba(0,0,0,.35);
+
+        --odd-color: rgba(100,100,100,.06);
+
+        --menu-selected-background: rgba(0,0,0,.4);
+
+        --page-background-color: #1C1D1F;
+        --page-foreground-color: #d2dbde;
+        --page-secondary-foreground-color: #859399;
+        --separator-color: #38393b;
+        --side-nav-background: #252628;
+
+        --code-background: #2a2c2f;
+
+        --tablehead-background: #2a2c2f;
+    
+        --blockquote-background: #222325;
+        --blockquote-foreground: #7e8c92;
+
+        --warning-color: #2e1917;
+        --warning-color-dark: #ad2617;
+        --warning-color-darker: #f5b1aa;
+        --note-color: #3b2e04;
+        --note-color-dark: #f1b602;
+        --note-color-darker: #ceb670;
+        --todo-color: #163750;
+        --todo-color-dark: #1982D2;
+        --todo-color-darker: #dcf0fa;
+        --deprecated-color: #2e323b;
+        --deprecated-color-dark: #738396;
+        --deprecated-color-darker: #abb0bd;
+        --bug-color: #2a2536;
+        --bug-color-dark: #7661b3;
+        --bug-color-darker: #ae9ed6;
+        --invariant-color: #303a35;
+        --invariant-color-dark: #76ce96;
+        --invariant-color-darker: #cceed5;
+
+        --fragment-background: #282c34;
+        --fragment-foreground: #dbe4eb;
+        --fragment-keyword: #cc99cd;
+        --fragment-keywordtype: #ab99cd;
+        --fragment-keywordflow: #e08000;
+        --fragment-token: #7ec699;
+        --fragment-comment: #999999;
+        --fragment-link: #98c0e3;
+        --fragment-preprocessor: #65cabe;
+        --fragment-linenumber-color: #cccccc;
+        --fragment-linenumber-background: #35393c;
+        --fragment-linenumber-border: #1f1f1f;
+    }
+}
+
+/* dark mode variables are defined twice, to support both the dark-mode without and with doxygen-awesome-darkmode-toggle.js */
+html.dark-mode {
+    color-scheme: dark;
+
+    --primary-color: #1982d2;
+    --primary-dark-color: #86a9c4;
+    --primary-light-color: #4779ac;
+
+    --box-shadow: 0 2px 8px 0 rgba(0,0,0,.30);
+
+    --odd-color: rgba(100,100,100,.06);
+
+    --menu-selected-background: rgba(0,0,0,.4);
+
+    --page-background-color: #1C1D1F;
+    --page-foreground-color: #d2dbde;
+    --page-secondary-foreground-color: #859399;
+    --separator-color: #38393b;
+    --side-nav-background: #252628;
+
+    --code-background: #2a2c2f;
+
+    --tablehead-background: #2a2c2f;
+
+    --blockquote-background: #222325;
+    --blockquote-foreground: #7e8c92;
+
+    --warning-color: #2e1917;
+    --warning-color-dark: #ad2617;
+    --warning-color-darker: #f5b1aa;
+    --note-color: #3b2e04;
+    --note-color-dark: #f1b602;
+    --note-color-darker: #ceb670;
+    --todo-color: #163750;
+    --todo-color-dark: #1982D2;
+    --todo-color-darker: #dcf0fa;
+    --deprecated-color: #2e323b;
+    --deprecated-color-dark: #738396;
+    --deprecated-color-darker: #abb0bd;
+    --bug-color: #2a2536;
+    --bug-color-dark: #7661b3;
+    --bug-color-darker: #ae9ed6;
+    --invariant-color: #303a35;
+    --invariant-color-dark: #76ce96;
+    --invariant-color-darker: #cceed5;
+
+    --fragment-background: #282c34;
+    --fragment-foreground: #dbe4eb;
+    --fragment-keyword: #cc99cd;
+    --fragment-keywordtype: #ab99cd;
+    --fragment-keywordflow: #e08000;
+    --fragment-token: #7ec699;
+    --fragment-comment: #999999;
+    --fragment-link: #98c0e3;
+    --fragment-preprocessor: #65cabe;
+    --fragment-linenumber-color: #cccccc;
+    --fragment-linenumber-background: #35393c;
+    --fragment-linenumber-border: #1f1f1f;
+}
+
+body {
+    color: var(--page-foreground-color);
+    background-color: var(--page-background-color);
+    font-size: var(--page-font-size);
+}
+
+body, table, div, p, dl, #nav-tree .label, .title,
+.sm-dox a, .sm-dox a:hover, .sm-dox a:focus, #projectname,
+.SelectItem, #MSearchField, .navpath li.navelem a,
+.navpath li.navelem a:hover, p.reference, p.definition {
+    font-family: var(--font-family);
+}
+
+h1, h2, h3, h4, h5 {
+    margin-top: .9em;
+    font-weight: 600;
+    line-height: initial;
+}
+
+p, div, table, dl, p.reference, p.definition {
+    font-size: var(--page-font-size);
+}
+
+p.reference, p.definition {
+    color: var(--page-secondary-foreground-color);
+}
+
+a:link, a:visited, a:hover, a:focus, a:active {
+    color: var(--primary-color) !important;
+    font-weight: 500;
+}
+
+a.anchor {
+    scroll-margin-top: var(--spacing-large);
+    display: block;
+}
+
+/*
+ Title and top navigation
+ */
+
+#top {
+    background: var(--header-background);
+    border-bottom: 1px solid var(--separator-color);
+}
+
+@media screen and (min-width: 768px) {
+    #top {
+        display: flex;
+        flex-wrap: wrap;
+        justify-content: space-between;
+        align-items: center;
+    }
+}
+
+#main-nav {
+    flex-grow: 5;
+    padding: var(--spacing-small) var(--spacing-medium);
+}
+
+#titlearea {
+    width: auto;
+    padding: var(--spacing-medium) var(--spacing-large);
+    background: none;
+    color: var(--header-foreground);
+    border-bottom: none;
+}
+
+@media screen and (max-width: 767px) {
+    #titlearea {
+        padding-bottom: var(--spacing-small);
+    }
+}
+
+#titlearea table tbody tr {
+    height: auto !important;
+}
+
+#projectname {
+    font-size: var(--title-font-size);
+    font-weight: 600;
+}
+
+#projectnumber {
+    font-family: inherit;
+    font-size: 60%;
+}
+
+#projectbrief {
+    font-family: inherit;
+    font-size: 80%;
+}
+
+#projectlogo {
+    vertical-align: middle;
+}
+
+#projectlogo img {
+    max-height: calc(var(--title-font-size) * 2);
+    margin-right: var(--spacing-small);
+}
+
+.sm-dox, .tabs, .tabs2, .tabs3 {
+    background: none;
+    padding: 0;
+}
+
+.tabs, .tabs2, .tabs3 {
+    border-bottom: 1px solid var(--separator-color);
+    margin-bottom: -1px;
+}
+
+.main-menu-btn-icon, .main-menu-btn-icon:before, .main-menu-btn-icon:after {
+    background: var(--page-secondary-foreground-color);
+}
+
+@media screen and (max-width: 767px) {
+    .sm-dox a span.sub-arrow {
+        background: var(--code-background);
+    }
+
+    #main-menu a.has-submenu span.sub-arrow {
+        color: var(--page-secondary-foreground-color);
+        border-radius: var(--border-radius-medium);
+    }
+
+    #main-menu a.has-submenu:hover span.sub-arrow {
+        color: var(--page-foreground-color);
+    }
+}
+
+@media screen and (min-width: 768px) {
+    .sm-dox li, .tablist li {
+        display: var(--menu-display);
+    }
+
+    .sm-dox a span.sub-arrow {
+        border-color: var(--header-foreground) transparent transparent transparent;
+    }
+
+    .sm-dox a:hover span.sub-arrow {
+        border-color: var(--menu-focus-foreground) transparent transparent transparent;
+    }
+
+    .sm-dox ul a span.sub-arrow {
+        border-color: transparent transparent transparent var(--page-foreground-color);
+    }
+
+    .sm-dox ul a:hover span.sub-arrow {
+        border-color: transparent transparent transparent var(--menu-focus-foreground);
+    }
+}
+
+.sm-dox ul {
+    background: var(--page-background-color);
+    box-shadow: var(--box-shadow);
+    border: 1px solid var(--separator-color);
+    border-radius: var(--border-radius-medium) !important;
+    padding: var(--spacing-small);
+    animation: ease-out 150ms slideInMenu;
+}
+
+@keyframes slideInMenu {
+    from {
+        opacity: 0;
+        transform: translate(0px, -2px);
+    }
+
+    to {
+        opacity: 1;
+        transform: translate(0px, 0px);
+    }
+}
+
+.sm-dox ul a {
+    color: var(--page-foreground-color) !important;
+    background: var(--page-background-color);
+    font-size: var(--navigation-font-size);
+}
+
+.sm-dox>li>ul:after {
+    border-bottom-color: var(--page-background-color) !important;
+}
+
+.sm-dox>li>ul:before {
+    border-bottom-color: var(--separator-color) !important;
+}
+
+.sm-dox ul a:hover, .sm-dox ul a:active, .sm-dox ul a:focus {
+    font-size: var(--navigation-font-size) !important;
+    color: var(--menu-focus-foreground) !important;
+    text-shadow: none;
+    background-color: var(--menu-focus-background);
+    border-radius: var(--border-radius-small) !important;
+}
+
+.sm-dox a, .sm-dox a:focus, .tablist li, .tablist li a, .tablist li.current a {
+    text-shadow: none;
+    background: transparent;
+    background-image: none !important;
+    color: var(--header-foreground) !important;
+    font-weight: normal;
+    font-size: var(--navigation-font-size);
+    border-radius: var(--border-radius-small) !important;
+}
+
+.sm-dox a:focus {
+    outline: auto;
+}
+
+.sm-dox a:hover, .sm-dox a:active, .tablist li a:hover {
+    text-shadow: none;
+    font-weight: normal;
+    background: var(--menu-focus-background);
+    color: var(--menu-focus-foreground) !important;
+    border-radius: var(--border-radius-small) !important;
+    font-size: var(--navigation-font-size);
+}
+
+.tablist li.current {
+    border-radius: var(--border-radius-small);
+    background: var(--menu-selected-background);
+}
+
+.tablist li {
+    margin: var(--spacing-small) 0 var(--spacing-small) var(--spacing-small);
+}
+
+.tablist a {
+    padding: 0 var(--spacing-large);
+}
+
+
+/*
+ Search box
+ */
+
+#MSearchBox {
+    height: var(--searchbar-height);
+    background: var(--searchbar-background);
+    border-radius: var(--searchbar-border-radius);
+    border: 1px solid var(--separator-color);
+    overflow: hidden;
+    width: var(--searchbar-width);
+    position: relative;
+    box-shadow: none;
+    display: block;
+    margin-top: 0;
+}
+
+/* until Doxygen 1.9.4 */
+.left img#MSearchSelect {
+    left: 0;
+    user-select: none;
+    padding-left: 8px;
+}
+
+/* Doxygen 1.9.5 */
+.left span#MSearchSelect {
+    left: 0;
+    user-select: none;
+    margin-left: 8px;
+    padding: 0;
+}
+
+.left #MSearchSelect[src$=".png"] {
+    padding-left: 0
+}
+
+.SelectionMark {
+    user-select: none;
+}
+
+.tabs .left #MSearchSelect {
+    padding-left: 0;
+}
+
+.tabs #MSearchBox {
+    position: absolute;
+    right: var(--spacing-medium);
+}
+
+@media screen and (max-width: 767px) {
+    .tabs #MSearchBox {
+        position: relative;
+        right: 0;
+        margin-left: var(--spacing-medium);
+        margin-top: 0;
+    }
+}
+
+#MSearchSelectWindow, #MSearchResultsWindow {
+    z-index: 9999;
+}
+
+#MSearchBox.MSearchBoxActive {
+    border-color: var(--primary-color);
+    box-shadow: inset 0 0 0 1px var(--primary-color);
+}
+
+#main-menu > li:last-child {
+    margin-right: 0;
+}
+
+@media screen and (max-width: 767px) {
+    #main-menu > li:last-child {
+        height: 50px;
+    }
+}
+
+#MSearchField {
+    font-size: var(--navigation-font-size);
+    height: calc(var(--searchbar-height) - 2px);
+    background: transparent;
+    width: calc(var(--searchbar-width) - 64px);
+}
+
+.MSearchBoxActive #MSearchField {
+    color: var(--searchbar-foreground);
+}
+
+#MSearchSelect {
+    top: calc(calc(var(--searchbar-height) / 2) - 11px);
+}
+
+#MSearchBox span.left, #MSearchBox span.right {
+    background: none;
+    background-image: none;
+}
+
+#MSearchBox span.right {
+    padding-top: calc(calc(var(--searchbar-height) / 2) - 12px);
+    position: absolute;
+    right: var(--spacing-small);
+}
+
+.tabs #MSearchBox span.right {
+    top: calc(calc(var(--searchbar-height) / 2) - 12px);
+}
+
+@keyframes slideInSearchResults {
+    from {
+        opacity: 0;
+        transform: translate(0, 15px);
+    }
+
+    to {
+        opacity: 1;
+        transform: translate(0, 20px);
+    }
+}
+
+#MSearchResultsWindow {
+    left: auto !important;
+    right: var(--spacing-medium);
+    border-radius: var(--border-radius-large);
+    border: 1px solid var(--separator-color);
+    transform: translate(0, 20px);
+    box-shadow: var(--box-shadow);
+    animation: ease-out 280ms slideInSearchResults;
+    background: var(--page-background-color);
+}
+
+iframe#MSearchResults {
+    margin: 4px;
+}
+
+iframe {
+    color-scheme: normal;
+}
+
+@media (prefers-color-scheme: dark) {
+    html:not(.light-mode) iframe#MSearchResults {
+        filter: invert() hue-rotate(180deg);
+    }
+}
+
+html.dark-mode iframe#MSearchResults {
+    filter: invert() hue-rotate(180deg);
+}
+
+#MSearchResults .SRPage {
+    background-color: transparent;
+}
+
+#MSearchResults .SRPage .SREntry {
+    font-size: 10pt;
+    padding: var(--spacing-small) var(--spacing-medium);
+}
+
+#MSearchSelectWindow {
+    border: 1px solid var(--separator-color);
+    border-radius: var(--border-radius-medium);
+    box-shadow: var(--box-shadow);
+    background: var(--page-background-color);
+    padding-top: var(--spacing-small);
+    padding-bottom: var(--spacing-small);
+}
+
+#MSearchSelectWindow a.SelectItem {
+    font-size: var(--navigation-font-size);
+    line-height: var(--content-line-height);
+    margin: 0 var(--spacing-small);
+    border-radius: var(--border-radius-small);
+    color: var(--page-foreground-color) !important;
+    font-weight: normal;
+}
+
+#MSearchSelectWindow a.SelectItem:hover {
+    background: var(--menu-focus-background);
+    color: var(--menu-focus-foreground) !important;
+}
+
+@media screen and (max-width: 767px) {
+    #MSearchBox {
+        margin-top: var(--spacing-medium);
+        margin-bottom: var(--spacing-medium);
+        width: calc(100vw - 30px);
+    }
+
+    #main-menu > li:last-child {
+        float: none !important;
+    }
+
+    #MSearchField {
+        width: calc(100vw - 110px);
+    }
+
+    @keyframes slideInSearchResultsMobile {
+        from {
+            opacity: 0;
+            transform: translate(0, 15px);
+        }
+
+        to {
+            opacity: 1;
+            transform: translate(0, 20px);
+        }
+    }
+
+    #MSearchResultsWindow {
+        left: var(--spacing-medium) !important;
+        right: var(--spacing-medium);
+        overflow: auto;
+        transform: translate(0, 20px);
+        animation: ease-out 280ms slideInSearchResultsMobile;
+        width: auto !important;
+    }
+
+    /*
+     * Overwrites for fixing the searchbox on mobile in doxygen 1.9.2
+     */
+    label.main-menu-btn ~ #searchBoxPos1 {
+        top: 3px !important;
+        right: 6px !important;
+        left: 45px;
+        display: flex;
+    }
+
+    label.main-menu-btn ~ #searchBoxPos1 > #MSearchBox {
+        margin-top: 0;
+        margin-bottom: 0;
+        flex-grow: 2;
+        float: left;
+    }
+}
+
+/*
+ Tree view
+ */
+
+#side-nav {
+    padding: 0 !important;
+    background: var(--side-nav-background);
+    min-width: 8px;
+    max-width: 50vw;
+}
+
+@media screen and (max-width: 767px) {
+    #side-nav {
+        display: none;
+    }
+
+    #doc-content {
+        margin-left: 0 !important;
+    }
+}
+
+#nav-tree {
+    background: transparent;
+    margin-right: 1px;
+}
+
+#nav-tree .label {
+    font-size: var(--navigation-font-size);
+}
+
+#nav-tree .item {
+    height: var(--tree-item-height);
+    line-height: var(--tree-item-height);
+}
+
+#nav-sync {
+    bottom: 12px;
+    right: 12px;
+    top: auto !important;
+    user-select: none;
+}
+
+#nav-tree .selected {
+    text-shadow: none;
+    background-image: none;
+    background-color: transparent;
+    position: relative;
+}
+
+#nav-tree .selected::after {
+    content: "";
+    position: absolute;
+    top: 1px;
+    bottom: 1px;
+    left: 0;
+    width: 4px;
+    border-radius: 0 var(--border-radius-small) var(--border-radius-small) 0;
+    background: var(--primary-color);
+}
+
+
+#nav-tree a {
+    color: var(--side-nav-foreground) !important;
+    font-weight: normal;
+}
+
+#nav-tree a:focus {
+    outline-style: auto;
+}
+
+#nav-tree .arrow {
+    opacity: var(--side-nav-arrow-opacity);
+}
+
+.arrow {
+    color: inherit;
+    cursor: pointer;
+    font-size: 45%;
+    vertical-align: middle;
+    margin-right: 2px;
+    font-family: serif;
+    height: auto;
+    text-align: right;
+}
+
+#nav-tree div.item:hover .arrow, #nav-tree a:focus .arrow {
+    opacity: var(--side-nav-arrow-hover-opacity);
+}
+
+#nav-tree .selected a {
+    color: var(--primary-color) !important;
+    font-weight: bolder;
+    font-weight: 600;
+}
+
+.ui-resizable-e {
+    width: 4px;
+    background: transparent;
+    box-shadow: inset -1px 0 0 0 var(--separator-color);
+}
+
+/*
+ Contents
+ */
+
+div.header {
+    border-bottom: 1px solid var(--separator-color);
+    background-color: var(--page-background-color);
+    background-image: none;
+}
+
+@media screen and (min-width: 1000px) {
+    #doc-content > div > div.contents,
+    .PageDoc > div.contents {
+        display: flex;
+        flex-direction: row-reverse;
+        flex-wrap: nowrap;
+        align-items: flex-start;
+    }
+    
+    div.contents .textblock {
+        min-width: 200px;
+        flex-grow: 1;
+    }
+}
+
+div.contents, div.header .title, div.header .summary {
+    max-width: var(--content-maxwidth);
+}
+
+div.contents, div.header .title  {
+    line-height: initial;
+    margin: calc(var(--spacing-medium) + .2em) auto var(--spacing-medium) auto;
+}
+
+div.header .summary {
+    margin: var(--spacing-medium) auto 0 auto;
+}
+
+div.headertitle {
+    padding: 0;
+}
+
+div.header .title {
+    font-weight: 600;
+    font-size: 225%;
+    padding: var(--spacing-medium) var(--spacing-large);
+    word-break: break-word;
+}
+
+div.header .summary {
+    width: auto;
+    display: block;
+    float: none;
+    padding: 0 var(--spacing-large);
+}
+
+td.memSeparator {
+    border-color: var(--separator-color);
+}
+
+span.mlabel {
+    background: var(--primary-color);
+    border: none;
+    padding: 4px 9px;
+    border-radius: 12px;
+    margin-right: var(--spacing-medium);
+}
+
+span.mlabel:last-of-type {
+    margin-right: 2px;
+}
+
+div.contents {
+    padding: 0 var(--spacing-large);
+}
+
+div.contents p, div.contents li {
+    line-height: var(--content-line-height);
+}
+
+div.contents div.dyncontent {
+    margin: var(--spacing-medium) 0;
+}
+
+@media (prefers-color-scheme: dark) {
+    html:not(.light-mode) div.contents div.dyncontent img,
+    html:not(.light-mode) div.contents center img,
+    html:not(.light-mode) div.contents > table img,
+    html:not(.light-mode) div.contents div.dyncontent iframe,
+    html:not(.light-mode) div.contents center iframe,
+    html:not(.light-mode) div.contents table iframe,
+    html:not(.light-mode) div.contents .dotgraph iframe {
+        filter: brightness(89%) hue-rotate(180deg) invert();
+    }
+}
+
+html.dark-mode div.contents div.dyncontent img,
+html.dark-mode div.contents center img,
+html.dark-mode div.contents > table img,
+html.dark-mode div.contents div.dyncontent iframe,
+html.dark-mode div.contents center iframe,
+html.dark-mode div.contents table iframe,
+html.dark-mode div.contents .dotgraph iframe
+ {
+    filter: brightness(89%) hue-rotate(180deg) invert();
+}
+
+h2.groupheader {
+    border-bottom: 0px;
+    color: var(--page-foreground-color);
+    box-shadow: 
+        100px 0 var(--page-background-color), 
+        -100px 0 var(--page-background-color),
+        100px 0.75px var(--separator-color),
+        -100px 0.75px var(--separator-color),
+        500px 0 var(--page-background-color), 
+        -500px 0 var(--page-background-color),
+        500px 0.75px var(--separator-color),
+        -500px 0.75px var(--separator-color),
+        900px 0 var(--page-background-color), 
+        -900px 0 var(--page-background-color),
+        900px 0.75px var(--separator-color),
+        -900px 0.75px var(--separator-color),
+        1400px 0 var(--page-background-color),
+        -1400px 0 var(--page-background-color), 
+        1400px 0.75px var(--separator-color),
+        -1400px 0.75px var(--separator-color),
+        1900px 0 var(--page-background-color),
+        -1900px 0 var(--page-background-color),
+        1900px 0.75px var(--separator-color),
+        -1900px 0.75px var(--separator-color);
+}
+
+blockquote {
+    margin: 0 var(--spacing-medium) 0 var(--spacing-medium);
+    padding: var(--spacing-small) var(--spacing-large);
+    background: var(--blockquote-background);
+    color: var(--blockquote-foreground);
+    border-left: 0;
+    overflow: visible;
+    border-radius: var(--border-radius-medium);
+    overflow: visible;
+    position: relative;
+}
+
+blockquote::before, blockquote::after {
+    font-weight: bold;
+    font-family: serif;
+    font-size: 360%;
+    opacity: .15;
+    position: absolute;
+}
+
+blockquote::before {
+    content: "“";
+    left: -10px;
+    top: 4px;
+}
+
+blockquote::after {
+    content: "”";
+    right: -8px;
+    bottom: -25px;
+}
+
+blockquote p {
+    margin: var(--spacing-small) 0 var(--spacing-medium) 0;
+}
+.paramname {
+    font-weight: 600;
+    color: var(--primary-dark-color);
+}
+
+.paramname > code {
+    border: 0;
+}
+
+table.params .paramname {
+    font-weight: 600;
+    font-family: var(--font-family-monospace);
+    font-size: var(--code-font-size);
+    padding-right: var(--spacing-small);
+    line-height: var(--table-line-height);
+}
+
+h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow {
+    text-shadow: 0 0 15px var(--primary-light-color);
+}
+
+.alphachar a {
+    color: var(--page-foreground-color);
+}
+
+.dotgraph {
+    max-width: 100%;
+    overflow-x: scroll;
+}
+
+.dotgraph .caption {
+    position: sticky;
+    left: 0;
+}
+
+/* Wrap Graphviz graphs with the `interactive_dotgraph` class if `INTERACTIVE_SVG = YES` */
+.interactive_dotgraph .dotgraph iframe {
+    max-width: 100%;
+}
+
+/*
+ Table of Contents
+ */
+
+div.contents .toc {
+    max-height: var(--toc-max-height);
+    min-width: var(--toc-width);
+    border: 0;
+    border-left: 1px solid var(--separator-color);
+    border-radius: 0;
+    background-color: transparent;
+    box-shadow: none;
+    position: sticky;
+    top: var(--toc-sticky-top);
+    padding: 0 var(--spacing-large);
+    margin: var(--spacing-small) 0 var(--spacing-large) var(--spacing-large);
+}
+
+div.toc h3 {
+    color: var(--toc-foreground);
+    font-size: var(--navigation-font-size);
+    margin: var(--spacing-large) 0 var(--spacing-medium) 0;
+}
+
+div.toc li {
+    padding: 0;
+    background: none;
+    line-height: var(--toc-font-size);
+    margin: var(--toc-font-size) 0 0 0;
+}
+
+div.toc li::before {
+    display: none;
+}
+
+div.toc ul {
+    margin-top: 0
+}
+
+div.toc li a {
+    font-size: var(--toc-font-size);
+    color: var(--page-foreground-color) !important;
+    text-decoration: none;
+}
+
+div.toc li a:hover, div.toc li a.active {
+    color: var(--primary-color) !important;
+}
+
+div.toc li a.aboveActive {
+    color: var(--page-secondary-foreground-color) !important;
+}
+
+
+@media screen and (max-width: 999px) {
+    div.contents .toc {
+        max-height: 45vh;
+        float: none;
+        width: auto;
+        margin: 0 0 var(--spacing-medium) 0;
+        position: relative;
+        top: 0;
+        position: relative;
+        border: 1px solid var(--separator-color);
+        border-radius: var(--border-radius-medium);
+        background-color: var(--toc-background);
+        box-shadow: var(--box-shadow);
+    }
+
+    div.contents .toc.interactive {
+        max-height: calc(var(--navigation-font-size) + 2 * var(--spacing-large));
+        overflow: hidden;
+    }
+
+    div.contents .toc > h3 {
+        -webkit-tap-highlight-color: transparent;
+        cursor: pointer;
+        position: sticky;
+        top: 0;
+        background-color: var(--toc-background);
+        margin: 0;
+        padding: var(--spacing-large) 0;
+        display: block;
+    }
+
+    div.contents .toc.interactive > h3::before {
+        content: "";
+        width: 0; 
+        height: 0; 
+        border-left: 4px solid transparent;
+        border-right: 4px solid transparent;
+        border-top: 5px solid var(--primary-color);
+        display: inline-block;
+        margin-right: var(--spacing-small);
+        margin-bottom: calc(var(--navigation-font-size) / 4);
+        transform: rotate(-90deg);
+        transition: transform 0.25s ease-out;
+    }
+
+    div.contents .toc.interactive.open > h3::before {
+        transform: rotate(0deg);
+    }
+
+    div.contents .toc.interactive.open {
+        max-height: 45vh;
+        overflow: auto;
+        transition: max-height 0.2s ease-in-out;
+    }
+
+    div.contents .toc a, div.contents .toc a.active {
+        color: var(--primary-color) !important;
+    }
+
+    div.contents .toc a:hover {
+        text-decoration: underline;
+    }
+}
+
+/*
+ Code & Fragments
+ */
+
+code, div.fragment, pre.fragment {
+    border-radius: var(--border-radius-small);
+    border: 1px solid var(--separator-color);
+    overflow: hidden;
+}
+
+code {
+    display: inline;
+    background: var(--code-background);
+    color: var(--code-foreground);
+    padding: 2px 6px;
+}
+
+div.fragment, pre.fragment {
+    margin: var(--spacing-medium) 0;
+    padding: calc(var(--spacing-large) - (var(--spacing-large) / 6)) var(--spacing-large);
+    background: var(--fragment-background);
+    color: var(--fragment-foreground);
+    overflow-x: auto;
+}
+
+@media screen and (max-width: 767px) {
+    div.fragment, pre.fragment {
+        border-top-right-radius: 0;
+        border-bottom-right-radius: 0;
+        border-right: 0;
+    }
+
+    .contents > div.fragment,
+    .textblock > div.fragment,
+    .textblock > pre.fragment,
+    .contents > .doxygen-awesome-fragment-wrapper > div.fragment,
+    .textblock > .doxygen-awesome-fragment-wrapper > div.fragment,
+    .textblock > .doxygen-awesome-fragment-wrapper > pre.fragment {
+        margin: var(--spacing-medium) calc(0px - var(--spacing-large));
+        border-radius: 0;
+        border-left: 0;
+    }
+
+    .textblock li > .fragment,
+    .textblock li > .doxygen-awesome-fragment-wrapper > .fragment {
+        margin: var(--spacing-medium) calc(0px - var(--spacing-large));
+    }
+
+    .memdoc li > .fragment,
+    .memdoc li > .doxygen-awesome-fragment-wrapper > .fragment {
+        margin: var(--spacing-medium) calc(0px - var(--spacing-medium));
+    }
+
+    .textblock ul, .memdoc ul {
+        overflow: initial;
+    }
+
+    .memdoc > div.fragment,
+    .memdoc > pre.fragment,
+    dl dd > div.fragment,
+    dl dd pre.fragment,
+    .memdoc > .doxygen-awesome-fragment-wrapper > div.fragment,
+    .memdoc > .doxygen-awesome-fragment-wrapper > pre.fragment,
+    dl dd > .doxygen-awesome-fragment-wrapper > div.fragment,
+    dl dd .doxygen-awesome-fragment-wrapper > pre.fragment {
+        margin: var(--spacing-medium) calc(0px - var(--spacing-medium));
+        border-radius: 0;
+        border-left: 0;
+    }
+}
+
+code, code a, pre.fragment, div.fragment, div.fragment .line, div.fragment span, div.fragment .line a, div.fragment .line span {
+    font-family: var(--font-family-monospace);
+    font-size: var(--code-font-size) !important;
+}
+
+div.line:after {
+    margin-right: var(--spacing-medium);
+}
+
+div.fragment .line, pre.fragment {
+    white-space: pre;
+    word-wrap: initial;
+    line-height: var(--fragment-lineheight);
+}
+
+div.fragment span.keyword {
+    color: var(--fragment-keyword);
+}
+
+div.fragment span.keywordtype {
+    color: var(--fragment-keywordtype);
+}
+
+div.fragment span.keywordflow {
+    color: var(--fragment-keywordflow);
+}
+
+div.fragment span.stringliteral {
+    color: var(--fragment-token)
+}
+
+div.fragment span.comment {
+    color: var(--fragment-comment);
+}
+
+div.fragment a.code {
+    color: var(--fragment-link) !important;
+}
+
+div.fragment span.preprocessor {
+    color: var(--fragment-preprocessor);
+}
+
+div.fragment span.lineno {
+    display: inline-block;
+    width: 27px;
+    border-right: none;
+    background: var(--fragment-linenumber-background);
+    color: var(--fragment-linenumber-color);
+}
+
+div.fragment span.lineno a {
+    background: none;
+    color: var(--fragment-link) !important;
+}
+
+div.fragment .line:first-child .lineno {
+    box-shadow: -999999px 0px 0 999999px var(--fragment-linenumber-background), -999998px 0px 0 999999px var(--fragment-linenumber-border);
+}
+
+div.line {
+    border-radius: var(--border-radius-small);
+}
+
+div.line.glow {
+    background-color: var(--primary-light-color);
+    box-shadow: none;
+}
+
+/*
+ dl warning, attention, note, deprecated, bug, ...
+ */
+
+dl.bug dt a, dl.deprecated dt a, dl.todo dt a {
+    font-weight: bold !important;
+}
+
+dl.warning, dl.attention, dl.note, dl.deprecated, dl.bug, dl.invariant, dl.pre, dl.post, dl.todo, dl.remark {
+    padding: var(--spacing-medium);
+    margin: var(--spacing-medium) 0;
+    color: var(--page-background-color);
+    overflow: hidden;
+    margin-left: 0;
+    border-radius: var(--border-radius-small);
+}
+
+dl.section dd {
+    margin-bottom: 2px;
+}
+
+dl.warning, dl.attention {
+    background: var(--warning-color);
+    border-left: 8px solid var(--warning-color-dark);
+    color: var(--warning-color-darker);
+}
+
+dl.warning dt, dl.attention dt {
+    color: var(--warning-color-dark);
+}
+
+dl.note, dl.remark {
+    background: var(--note-color);
+    border-left: 8px solid var(--note-color-dark);
+    color: var(--note-color-darker);
+}
+
+dl.note dt, dl.remark dt {
+    color: var(--note-color-dark);
+}
+
+dl.todo {
+    background: var(--todo-color);
+    border-left: 8px solid var(--todo-color-dark);
+    color: var(--todo-color-darker);
+}
+
+dl.todo dt {
+    color: var(--todo-color-dark);
+}
+
+dl.bug dt a {
+    color: var(--todo-color-dark) !important;
+}
+
+dl.bug {
+    background: var(--bug-color);
+    border-left: 8px solid var(--bug-color-dark);
+    color: var(--bug-color-darker);
+}
+
+dl.bug dt a {
+    color: var(--bug-color-dark) !important;
+}
+
+dl.deprecated {
+    background: var(--deprecated-color);
+    border-left: 8px solid var(--deprecated-color-dark);
+    color: var(--deprecated-color-darker);
+}
+
+dl.deprecated dt a {
+    color: var(--deprecated-color-dark) !important;
+}
+
+dl.section dd, dl.bug dd, dl.deprecated dd, dl.todo dd {
+    margin-inline-start: 0px;
+}
+
+dl.invariant, dl.pre, dl.post {
+    background: var(--invariant-color);
+    border-left: 8px solid var(--invariant-color-dark);
+    color: var(--invariant-color-darker);
+}
+
+dl.invariant dt, dl.pre dt, dl.post dt {
+    color: var(--invariant-color-dark);
+}
+
+/*
+ memitem
+ */
+
+div.memdoc, div.memproto, h2.memtitle {
+    box-shadow: none;
+    background-image: none;
+    border: none;
+}
+
+div.memdoc {
+    padding: 0 var(--spacing-medium);
+    background: var(--page-background-color);
+}
+
+h2.memtitle, div.memitem {
+    border: 1px solid var(--separator-color);
+    box-shadow: var(--box-shadow);
+}
+
+h2.memtitle {
+    box-shadow: 0px var(--spacing-medium) 0 -1px var(--fragment-background), var(--box-shadow);
+}
+
+div.memitem {
+    transition: none;
+}
+
+div.memproto, h2.memtitle {
+    background: var(--fragment-background);
+}
+
+h2.memtitle {
+    font-weight: 500;
+    font-size: var(--memtitle-font-size);
+    font-family: var(--font-family-monospace);
+    border-bottom: none;
+    border-top-left-radius: var(--border-radius-medium);
+    border-top-right-radius: var(--border-radius-medium);
+    word-break: break-all;
+    position: relative;
+}
+
+h2.memtitle:after {
+    content: "";
+    display: block;
+    background: var(--fragment-background);
+    height: var(--spacing-medium);
+    bottom: calc(0px - var(--spacing-medium));
+    left: 0;
+    right: -14px;
+    position: absolute;
+    border-top-right-radius: var(--border-radius-medium);
+}
+
+h2.memtitle > span.permalink {
+    font-size: inherit;
+}
+
+h2.memtitle > span.permalink > a {
+    text-decoration: none;
+    padding-left: 3px;
+    margin-right: -4px;
+    user-select: none;
+    display: inline-block;
+    margin-top: -6px;
+}
+
+h2.memtitle > span.permalink > a:hover {
+    color: var(--primary-dark-color) !important;
+}
+
+a:target + h2.memtitle, a:target + h2.memtitle + div.memitem {
+    border-color: var(--primary-light-color);
+}
+
+div.memitem {
+    border-top-right-radius: var(--border-radius-medium);
+    border-bottom-right-radius: var(--border-radius-medium);
+    border-bottom-left-radius: var(--border-radius-medium);
+    overflow: hidden;
+    display: block !important;
+}
+
+div.memdoc {
+    border-radius: 0;
+}
+
+div.memproto {
+    border-radius: 0 var(--border-radius-small) 0 0;
+    overflow: auto;
+    border-bottom: 1px solid var(--separator-color);
+    padding: var(--spacing-medium);
+    margin-bottom: -1px;
+}
+
+div.memtitle {
+    border-top-right-radius: var(--border-radius-medium);
+    border-top-left-radius: var(--border-radius-medium);
+}
+
+div.memproto table.memname {
+    font-family: var(--font-family-monospace);
+    color: var(--page-foreground-color);
+    font-size: var(--memname-font-size);
+    text-shadow: none;
+}
+
+div.memproto div.memtemplate {
+    font-family: var(--font-family-monospace);
+    color: var(--primary-dark-color);
+    font-size: var(--memname-font-size);
+    margin-left: 2px;
+    text-shadow: none;
+}
+
+table.mlabels, table.mlabels > tbody {
+    display: block;
+}
+
+td.mlabels-left {
+    width: auto;
+}
+
+td.mlabels-right {
+    margin-top: 3px;
+    position: sticky;
+    left: 0;
+}
+
+table.mlabels > tbody > tr:first-child {
+    display: flex;
+    justify-content: space-between;
+    flex-wrap: wrap;
+}
+
+.memname, .memitem span.mlabels {
+    margin: 0
+}
+
+/*
+ reflist
+ */
+
+dl.reflist {
+    box-shadow: var(--box-shadow);
+    border-radius: var(--border-radius-medium);
+    border: 1px solid var(--separator-color);
+    overflow: hidden;
+    padding: 0;
+}
+
+
+dl.reflist dt, dl.reflist dd {
+    box-shadow: none;
+    text-shadow: none;
+    background-image: none;
+    border: none;
+    padding: 12px;
+}
+
+
+dl.reflist dt {
+    font-weight: 500;
+    border-radius: 0;
+    background: var(--code-background);
+    border-bottom: 1px solid var(--separator-color);
+    color: var(--page-foreground-color)
+}
+
+
+dl.reflist dd {
+    background: none;
+}
+
+/*
+ Table
+ */
+
+.contents table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname),
+.contents table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) tbody {
+    display: inline-block;
+    max-width: 100%;
+}
+
+.contents > table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname):not(.classindex) {
+    margin-left: calc(0px - var(--spacing-large));
+    margin-right: calc(0px - var(--spacing-large));
+    max-width: calc(100% + 2 * var(--spacing-large));
+}
+
+table.fieldtable,
+table.markdownTable tbody,
+table.doxtable tbody {
+    border: none;
+    margin: var(--spacing-medium) 0;
+    box-shadow: 0 0 0 1px var(--separator-color);
+    border-radius: var(--border-radius-small);
+}
+
+table.markdownTable, table.doxtable, table.fieldtable {
+    padding: 1px;
+}
+
+table.doxtable caption {
+    display: block;
+}
+
+table.fieldtable {
+    border-collapse: collapse;
+    width: 100%;
+}
+
+th.markdownTableHeadLeft,
+th.markdownTableHeadRight,
+th.markdownTableHeadCenter,
+th.markdownTableHeadNone,
+table.doxtable th {
+    background: var(--tablehead-background);
+    color: var(--tablehead-foreground);
+    font-weight: 600;
+    font-size: var(--page-font-size);
+}
+
+th.markdownTableHeadLeft:first-child,
+th.markdownTableHeadRight:first-child,
+th.markdownTableHeadCenter:first-child,
+th.markdownTableHeadNone:first-child,
+table.doxtable tr th:first-child {
+    border-top-left-radius: var(--border-radius-small);
+}
+
+th.markdownTableHeadLeft:last-child,
+th.markdownTableHeadRight:last-child,
+th.markdownTableHeadCenter:last-child,
+th.markdownTableHeadNone:last-child,
+table.doxtable tr th:last-child {
+    border-top-right-radius: var(--border-radius-small);
+}
+
+table.markdownTable td,
+table.markdownTable th,
+table.fieldtable td,
+table.fieldtable th,
+table.doxtable td,
+table.doxtable th {
+    border: 1px solid var(--separator-color);
+    padding: var(--spacing-small) var(--spacing-medium);
+}
+
+table.markdownTable td:last-child,
+table.markdownTable th:last-child,
+table.fieldtable td:last-child,
+table.fieldtable th:last-child,
+table.doxtable td:last-child,
+table.doxtable th:last-child {
+    border-right: none;
+}
+
+table.markdownTable td:first-child,
+table.markdownTable th:first-child,
+table.fieldtable td:first-child,
+table.fieldtable th:first-child,
+table.doxtable td:first-child,
+table.doxtable th:first-child {
+    border-left: none;
+}
+
+table.markdownTable tr:first-child td,
+table.markdownTable tr:first-child th,
+table.fieldtable tr:first-child td,
+table.fieldtable tr:first-child th,
+table.doxtable tr:first-child td,
+table.doxtable tr:first-child th {
+    border-top: none;
+}
+
+table.markdownTable tr:last-child td,
+table.markdownTable tr:last-child th,
+table.fieldtable tr:last-child td,
+table.fieldtable tr:last-child th,
+table.doxtable tr:last-child td,
+table.doxtable tr:last-child th {
+    border-bottom: none;
+}
+
+table.markdownTable tr, table.doxtable tr {
+    border-bottom: 1px solid var(--separator-color);
+}
+
+table.markdownTable tr:last-child, table.doxtable tr:last-child {
+    border-bottom: none;
+}
+
+.full_width_table table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) {
+    display: block;
+}
+
+.full_width_table table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) tbody {
+    display: table;
+    width: 100%;
+}
+
+table.fieldtable th {
+    font-size: var(--page-font-size);
+    font-weight: 600;
+    background-image: none;
+    background-color: var(--tablehead-background);
+    color: var(--tablehead-foreground);
+}
+
+table.fieldtable td.fieldtype, .fieldtable td.fieldname, .fieldtable td.fielddoc, .fieldtable th {
+    border-bottom: 1px solid var(--separator-color);
+    border-right: 1px solid var(--separator-color);
+}
+
+table.fieldtable tr:last-child td:first-child {
+    border-bottom-left-radius: var(--border-radius-small);
+}
+
+table.fieldtable tr:last-child td:last-child {
+    border-bottom-right-radius: var(--border-radius-small);
+}
+
+.memberdecls td.glow, .fieldtable tr.glow {
+    background-color: var(--primary-light-color);
+    box-shadow: none;
+}
+
+table.memberdecls {
+    display: block;
+    -webkit-tap-highlight-color: transparent;
+}
+
+table.memberdecls tr[class^='memitem'] {
+    font-family: var(--font-family-monospace);
+    font-size: var(--code-font-size);
+}
+
+table.memberdecls tr[class^='memitem'] .memTemplParams {
+    font-family: var(--font-family-monospace);
+    font-size: var(--code-font-size);
+    color: var(--primary-dark-color);
+    white-space: normal;
+}
+
+table.memberdecls .memItemLeft,
+table.memberdecls .memItemRight,
+table.memberdecls .memTemplItemLeft,
+table.memberdecls .memTemplItemRight,
+table.memberdecls .memTemplParams {
+    transition: none;
+    padding-top: var(--spacing-small);
+    padding-bottom: var(--spacing-small);
+    border-top: 1px solid var(--separator-color);
+    border-bottom: 1px solid var(--separator-color);
+    background-color: var(--fragment-background);
+}
+
+table.memberdecls .memTemplItemLeft,
+table.memberdecls .memTemplItemRight {
+    padding-top: 2px;
+}
+
+table.memberdecls .memTemplParams {
+    border-bottom: 0;
+    border-left: 1px solid var(--separator-color);
+    border-right: 1px solid var(--separator-color);
+    border-radius: var(--border-radius-small) var(--border-radius-small) 0 0;
+    padding-bottom: var(--spacing-small);
+}
+
+table.memberdecls .memTemplItemLeft {
+    border-radius: 0 0 0 var(--border-radius-small);
+    border-left: 1px solid var(--separator-color);
+    border-top: 0;
+}
+
+table.memberdecls .memTemplItemRight {
+    border-radius: 0 0 var(--border-radius-small) 0;
+    border-right: 1px solid var(--separator-color);
+    padding-left: 0;
+    border-top: 0;
+}
+
+table.memberdecls .memItemLeft {
+    border-radius: var(--border-radius-small) 0 0 var(--border-radius-small);
+    border-left: 1px solid var(--separator-color);
+    padding-left: var(--spacing-medium);
+    padding-right: 0;
+}
+
+table.memberdecls .memItemRight  {
+    border-radius: 0 var(--border-radius-small) var(--border-radius-small) 0;
+    border-right: 1px solid var(--separator-color);
+    padding-right: var(--spacing-medium);
+    padding-left: 0;
+
+}
+
+table.memberdecls .mdescLeft, table.memberdecls .mdescRight {
+    background: none;
+    color: var(--page-foreground-color);
+    padding: var(--spacing-small) 0;
+}
+
+table.memberdecls .memItemLeft,
+table.memberdecls .memTemplItemLeft {
+    padding-right: var(--spacing-medium);
+}
+
+table.memberdecls .memSeparator {
+    background: var(--page-background-color);
+    height: var(--spacing-large);
+    border: 0;
+    transition: none;
+}
+
+table.memberdecls .groupheader {
+    margin-bottom: var(--spacing-large);
+}
+
+table.memberdecls .inherit_header td {
+    padding: 0 0 var(--spacing-medium) 0;
+    text-indent: -12px;
+    color: var(--page-secondary-foreground-color);
+}
+
+table.memberdecls img[src="closed.png"],
+table.memberdecls img[src="open.png"],
+div.dynheader img[src="open.png"],
+div.dynheader img[src="closed.png"] {
+    width: 0; 
+    height: 0; 
+    border-left: 4px solid transparent;
+    border-right: 4px solid transparent;
+    border-top: 5px solid var(--primary-color);
+    margin-top: 8px;
+    display: block;
+    float: left;
+    margin-left: -10px;
+    transition: transform 0.25s ease-out;
+}
+
+table.memberdecls img {
+    margin-right: 10px;
+}
+
+table.memberdecls img[src="closed.png"],
+div.dynheader img[src="closed.png"] {
+    transform: rotate(-90deg);
+    
+}
+
+.compoundTemplParams {
+    font-family: var(--font-family-monospace);
+    color: var(--primary-dark-color);
+    font-size: var(--code-font-size);
+}
+
+@media screen and (max-width: 767px) {
+
+    table.memberdecls .memItemLeft,
+    table.memberdecls .memItemRight,
+    table.memberdecls .mdescLeft,
+    table.memberdecls .mdescRight,
+    table.memberdecls .memTemplItemLeft,
+    table.memberdecls .memTemplItemRight,
+    table.memberdecls .memTemplParams {
+        display: block;
+        text-align: left;
+        padding-left: var(--spacing-large);
+        margin: 0 calc(0px - var(--spacing-large)) 0 calc(0px - var(--spacing-large));
+        border-right: none;
+        border-left: none;
+        border-radius: 0;
+        white-space: normal;
+    }
+
+    table.memberdecls .memItemLeft,
+    table.memberdecls .mdescLeft,
+    table.memberdecls .memTemplItemLeft {
+        border-bottom: 0;
+        padding-bottom: 0;
+    }
+
+    table.memberdecls .memTemplItemLeft {
+        padding-top: 0;
+    }
+
+    table.memberdecls .mdescLeft {
+        margin-bottom: calc(0px - var(--page-font-size));
+    }
+
+    table.memberdecls .memItemRight, 
+    table.memberdecls .mdescRight,
+    table.memberdecls .memTemplItemRight {
+        border-top: 0;
+        padding-top: 0;
+        padding-right: var(--spacing-large);
+        overflow-x: auto;
+    }
+
+    table.memberdecls tr[class^='memitem']:not(.inherit) {
+        display: block;
+        width: calc(100vw - 2 * var(--spacing-large));
+    }
+
+    table.memberdecls .mdescRight {
+        color: var(--page-foreground-color);
+    }
+
+    table.memberdecls tr.inherit {
+        visibility: hidden;
+    }
+
+    table.memberdecls tr[style="display: table-row;"] {
+        display: block !important;
+        visibility: visible;
+        width: calc(100vw - 2 * var(--spacing-large));
+        animation: fade .5s;
+    }
+
+    @keyframes fade {
+        0% {
+            opacity: 0;
+            max-height: 0;
+        }
+
+        100% {
+            opacity: 1;
+            max-height: 200px;
+        }
+    }
+}
+
+
+/*
+ Horizontal Rule
+ */
+
+hr {
+    margin-top: var(--spacing-large);
+    margin-bottom: var(--spacing-large);
+    height: 1px;
+    background-color: var(--separator-color);
+    border: 0;
+}
+
+.contents hr {
+    box-shadow: 100px 0 0 var(--separator-color),
+                -100px 0 0 var(--separator-color),
+                500px 0 0 var(--separator-color),
+                -500px 0 0 var(--separator-color),
+                1500px 0 0 var(--separator-color),
+                -1500px 0 0 var(--separator-color),
+                2000px 0 0 var(--separator-color),
+                -2000px 0 0 var(--separator-color);
+}
+
+.contents img, .contents .center, .contents center, .contents div.image object {
+    max-width: 100%;
+    overflow: auto;
+}
+
+@media screen and (max-width: 767px) {
+    .contents .dyncontent > .center, .contents > center {
+        margin-left: calc(0px - var(--spacing-large));
+        margin-right: calc(0px - var(--spacing-large));
+        max-width: calc(100% + 2 * var(--spacing-large));
+    }
+}
+
+/*
+ Directories
+ */
+div.directory {
+    border-top: 1px solid var(--separator-color);
+    border-bottom: 1px solid var(--separator-color);
+    width: auto;
+}
+
+table.directory {
+    font-family: var(--font-family);
+    font-size: var(--page-font-size);
+    font-weight: normal;
+    width: 100%;
+}
+
+table.directory td.entry, table.directory td.desc {
+    padding: calc(var(--spacing-small) / 2) var(--spacing-small);
+    line-height: var(--table-line-height);
+}
+
+table.directory tr.even td:last-child {
+    border-radius: 0 var(--border-radius-small) var(--border-radius-small) 0;
+}
+
+table.directory tr.even td:first-child {
+    border-radius: var(--border-radius-small) 0 0 var(--border-radius-small);
+}
+
+table.directory tr.even:last-child td:last-child {
+    border-radius: 0 var(--border-radius-small) 0 0;
+}
+
+table.directory tr.even:last-child td:first-child {
+    border-radius: var(--border-radius-small) 0 0 0;
+}
+
+table.directory td.desc {
+    min-width: 250px;
+}
+
+table.directory tr.even {
+    background-color: var(--odd-color);
+}
+
+table.directory tr.odd {
+    background-color: transparent;
+}
+
+.icona {
+    width: auto;
+    height: auto;
+    margin: 0 var(--spacing-small);
+}
+
+.icon {
+    background: var(--primary-color);
+    border-radius: var(--border-radius-small);
+    font-size: var(--page-font-size);
+    padding: calc(var(--page-font-size) / 5);
+    line-height: var(--page-font-size);
+    transform: scale(0.8);
+    height: auto;
+    width: var(--page-font-size);
+    user-select: none;
+}
+
+.iconfopen, .icondoc, .iconfclosed {
+    background-position: center;
+    margin-bottom: 0;
+    height: var(--table-line-height);
+}
+
+.icondoc {
+    filter: saturate(0.2);
+}
+
+@media screen and (max-width: 767px) {
+    div.directory {
+        margin-left: calc(0px - var(--spacing-large));
+        margin-right: calc(0px - var(--spacing-large));
+    }
+}
+
+@media (prefers-color-scheme: dark) {
+    html:not(.light-mode) .iconfopen, html:not(.light-mode) .iconfclosed {
+        filter: hue-rotate(180deg) invert();
+    }
+}
+
+html.dark-mode .iconfopen, html.dark-mode .iconfclosed {
+    filter: hue-rotate(180deg) invert();
+}
+
+/*
+ Class list
+ */
+
+.classindex dl.odd {
+    background: var(--odd-color);
+    border-radius: var(--border-radius-small);
+}
+
+.classindex dl.even {
+    background-color: transparent;
+}
+
+/* 
+ Class Index Doxygen 1.8 
+*/
+
+table.classindex {
+    margin-left: 0;
+    margin-right: 0;
+    width: 100%;
+}
+
+table.classindex table div.ah {
+    background-image: none;
+    background-color: initial;
+    border-color: var(--separator-color);
+    color: var(--page-foreground-color);
+    box-shadow: var(--box-shadow);
+    border-radius: var(--border-radius-large);
+    padding: var(--spacing-small);
+}
+
+div.qindex {
+    background-color: var(--odd-color);
+    border-radius: var(--border-radius-small);
+    border: 1px solid var(--separator-color);
+    padding: var(--spacing-small) 0;
+}
+
+/*
+  Footer and nav-path
+ */
+
+#nav-path {
+    width: 100%;
+}
+
+#nav-path ul {
+    background-image: none;
+    background: var(--page-background-color);
+    border: none;
+    border-top: 1px solid var(--separator-color);
+    border-bottom: 1px solid var(--separator-color);
+    border-bottom: 0;
+    box-shadow: 0 0.75px 0 var(--separator-color);
+    font-size: var(--navigation-font-size);
+}
+
+img.footer {
+    width: 60px;
+}
+
+.navpath li.footer {
+    color: var(--page-secondary-foreground-color);
+}
+
+address.footer {
+    color: var(--page-secondary-foreground-color);
+    margin-bottom: var(--spacing-large);
+}
+
+#nav-path li.navelem {
+    background-image: none;
+    display: flex;
+    align-items: center;
+}
+
+.navpath li.navelem a {
+    text-shadow: none;
+    display: inline-block;
+    color: var(--primary-color) !important;
+}
+
+.navpath li.navelem b {
+    color: var(--primary-dark-color);
+    font-weight: 500;
+}
+
+li.navelem {
+    padding: 0;
+    margin-left: -8px;
+}
+
+li.navelem:first-child {
+    margin-left: var(--spacing-large);
+}
+
+li.navelem:first-child:before {
+    display: none;
+}
+
+#nav-path li.navelem:after {
+    content: '';
+    border: 5px solid var(--page-background-color);
+    border-bottom-color: transparent;
+    border-right-color: transparent;
+    border-top-color: transparent;
+    transform: translateY(-1px) scaleY(4.2);
+    z-index: 10;
+    margin-left: 6px;
+}
+
+#nav-path li.navelem:before {
+    content: '';
+    border: 5px solid var(--separator-color);
+    border-bottom-color: transparent;
+    border-right-color: transparent;
+    border-top-color: transparent;
+    transform: translateY(-1px) scaleY(3.2);
+    margin-right: var(--spacing-small);
+}
+
+.navpath li.navelem a:hover {
+    color: var(--primary-color);
+}
+
+/*
+ Scrollbars for Webkit
+*/
+
+#nav-tree::-webkit-scrollbar,
+div.fragment::-webkit-scrollbar,
+pre.fragment::-webkit-scrollbar,
+div.memproto::-webkit-scrollbar,
+.contents center::-webkit-scrollbar,
+.contents .center::-webkit-scrollbar,
+.contents table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) tbody::-webkit-scrollbar,
+div.contents .toc::-webkit-scrollbar,
+.contents .dotgraph::-webkit-scrollbar,
+.contents .tabs-overview-container::-webkit-scrollbar {
+    background: transparent;
+    width: calc(var(--webkit-scrollbar-size) + var(--webkit-scrollbar-padding) + var(--webkit-scrollbar-padding));
+    height: calc(var(--webkit-scrollbar-size) + var(--webkit-scrollbar-padding) + var(--webkit-scrollbar-padding));
+}
+
+#nav-tree::-webkit-scrollbar-thumb,
+div.fragment::-webkit-scrollbar-thumb,
+pre.fragment::-webkit-scrollbar-thumb,
+div.memproto::-webkit-scrollbar-thumb,
+.contents center::-webkit-scrollbar-thumb,
+.contents .center::-webkit-scrollbar-thumb,
+.contents table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) tbody::-webkit-scrollbar-thumb,
+div.contents .toc::-webkit-scrollbar-thumb,
+.contents .dotgraph::-webkit-scrollbar-thumb,
+.contents .tabs-overview-container::-webkit-scrollbar-thumb {
+    background-color: transparent;
+    border: var(--webkit-scrollbar-padding) solid transparent;
+    border-radius: calc(var(--webkit-scrollbar-padding) + var(--webkit-scrollbar-padding));
+    background-clip: padding-box;  
+}
+
+#nav-tree:hover::-webkit-scrollbar-thumb,
+div.fragment:hover::-webkit-scrollbar-thumb,
+pre.fragment:hover::-webkit-scrollbar-thumb,
+div.memproto:hover::-webkit-scrollbar-thumb,
+.contents center:hover::-webkit-scrollbar-thumb,
+.contents .center:hover::-webkit-scrollbar-thumb,
+.contents table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) tbody:hover::-webkit-scrollbar-thumb,
+div.contents .toc:hover::-webkit-scrollbar-thumb,
+.contents .dotgraph:hover::-webkit-scrollbar-thumb,
+.contents .tabs-overview-container:hover::-webkit-scrollbar-thumb {
+    background-color: var(--webkit-scrollbar-color);
+}
+
+#nav-tree::-webkit-scrollbar-track,
+div.fragment::-webkit-scrollbar-track,
+pre.fragment::-webkit-scrollbar-track,
+div.memproto::-webkit-scrollbar-track,
+.contents center::-webkit-scrollbar-track,
+.contents .center::-webkit-scrollbar-track,
+.contents table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) tbody::-webkit-scrollbar-track,
+div.contents .toc::-webkit-scrollbar-track,
+.contents .dotgraph::-webkit-scrollbar-track,
+.contents .tabs-overview-container::-webkit-scrollbar-track {
+    background: transparent;
+}
+
+#nav-tree::-webkit-scrollbar-corner {
+    background-color: var(--side-nav-background);
+}
+
+#nav-tree,
+div.fragment,
+pre.fragment,
+div.memproto,
+.contents center,
+.contents .center,
+.contents table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) tbody,
+div.contents .toc {
+    overflow-x: auto;
+    overflow-x: overlay;
+}
+
+#nav-tree {
+    overflow-x: auto;
+    overflow-y: auto;
+    overflow-y: overlay;
+}
+
+/*
+ Scrollbars for Firefox
+*/
+
+#nav-tree,
+div.fragment,
+pre.fragment,
+div.memproto,
+.contents center,
+.contents .center,
+.contents table:not(.memberdecls):not(.mlabels):not(.fieldtable):not(.memname) tbody,
+div.contents .toc,
+.contents .dotgraph,
+.contents .tabs-overview-container {
+    scrollbar-width: thin;
+}
+
+/*
+  Optional Dark mode toggle button
+*/
+
+doxygen-awesome-dark-mode-toggle {
+    display: inline-block;
+    margin: 0 0 0 var(--spacing-small);
+    padding: 0;
+    width: var(--searchbar-height);
+    height: var(--searchbar-height);
+    background: none;
+    border: none;
+    border-radius: var(--searchbar-height);
+    vertical-align: middle;
+    text-align: center;
+    line-height: var(--searchbar-height);
+    font-size: 22px;
+    display: flex;
+    align-items: center;
+    justify-content: center;
+    user-select: none;
+    cursor: pointer;
+}
+
+doxygen-awesome-dark-mode-toggle > svg {
+    transition: transform .1s ease-in-out;
+}
+
+doxygen-awesome-dark-mode-toggle:active > svg {
+    transform: scale(.5);
+}
+
+doxygen-awesome-dark-mode-toggle:hover {
+    background-color: rgba(0,0,0,.03);
+}
+
+html.dark-mode doxygen-awesome-dark-mode-toggle:hover {
+    background-color: rgba(0,0,0,.18);
+}
+
+/*
+ Optional fragment copy button
+*/
+.doxygen-awesome-fragment-wrapper {
+    position: relative;
+}
+
+doxygen-awesome-fragment-copy-button {
+    opacity: 0;
+    background: var(--fragment-background);
+    width: 28px;
+    height: 28px;
+    position: absolute;
+    right: calc(var(--spacing-large) - (var(--spacing-large) / 2.5));
+    top: calc(var(--spacing-large) - (var(--spacing-large) / 2.5));
+    border: 1px solid var(--fragment-foreground);
+    cursor: pointer;
+    border-radius: var(--border-radius-small);
+    display: flex;
+    justify-content: center;
+    align-items: center;
+}
+
+.doxygen-awesome-fragment-wrapper:hover doxygen-awesome-fragment-copy-button, doxygen-awesome-fragment-copy-button.success {
+    opacity: .28;
+}
+
+doxygen-awesome-fragment-copy-button:hover, doxygen-awesome-fragment-copy-button.success {
+    opacity: 1 !important;
+}
+
+doxygen-awesome-fragment-copy-button:active:not([class~=success]) svg {
+    transform: scale(.91);
+}
+
+doxygen-awesome-fragment-copy-button svg {
+    fill: var(--fragment-foreground);
+    width: 18px;
+    height: 18px;
+}
+
+doxygen-awesome-fragment-copy-button.success svg {
+    fill: rgb(14, 168, 14);
+}
+
+doxygen-awesome-fragment-copy-button.success {
+    border-color: rgb(14, 168, 14);
+}
+
+@media screen and (max-width: 767px) {
+    .textblock > .doxygen-awesome-fragment-wrapper > doxygen-awesome-fragment-copy-button,
+    .textblock li > .doxygen-awesome-fragment-wrapper > doxygen-awesome-fragment-copy-button,
+    .memdoc li > .doxygen-awesome-fragment-wrapper > doxygen-awesome-fragment-copy-button,
+    .memdoc > .doxygen-awesome-fragment-wrapper > doxygen-awesome-fragment-copy-button,
+    dl dd > .doxygen-awesome-fragment-wrapper > doxygen-awesome-fragment-copy-button {
+        right: 0;
+    }
+}
+
+/*
+ Optional paragraph link button
+*/
+
+a.anchorlink {
+    font-size: 90%;
+    margin-left: var(--spacing-small);
+    color: var(--page-foreground-color) !important;
+    text-decoration: none;
+    opacity: .15;
+    display: none;
+    transition: opacity .1s ease-in-out, color .1s ease-in-out;
+}
+
+a.anchorlink svg {
+    fill: var(--page-foreground-color);
+}
+
+h3 a.anchorlink svg, h4 a.anchorlink svg {
+    margin-bottom: -3px;
+    margin-top: -4px;
+}
+
+a.anchorlink:hover {
+    opacity: .45;
+}
+
+h2:hover a.anchorlink, h1:hover a.anchorlink, h3:hover a.anchorlink, h4:hover a.anchorlink  {
+    display: inline-block;
+}
+
+/*
+ Optional tab feature
+*/
+
+.tabbed {
+    margin: var(--spacing-medium) auto;
+}
+
+.tabbed ul {
+    padding-inline-start: 0px;
+    margin: 0;
+    padding: var(--spacing-small) 0;
+    border-bottom: 1px solid var(--separator-color);
+}
+
+.tabbed li {
+    display: none;
+}
+
+.tabbed li.selected {
+    display: block;
+}
+
+.tabs-overview-container {
+    overflow-x: auto;
+    display: block;
+    overflow-y: visible;
+}
+
+.tabs-overview {
+    border-bottom: 1px solid var(--separator-color);
+    display: flex;
+    flex-direction: row;
+}
+
+.tabs-overview button.tab-button {
+    color: var(--page-foreground-color);
+    margin: 0;
+    border: none;
+    background: transparent;
+    padding: var(--spacing-small) 0;
+    display: inline-block;
+    font-size: var(--page-font-size);
+    cursor: pointer;
+    box-shadow: 0 1px 0 0 var(--separator-color);
+    position: relative;
+}
+
+.tabs-overview button.tab-button .tab-title {
+    float: left;
+    white-space: nowrap;
+    padding: var(--spacing-small) var(--spacing-large);
+    border-radius: var(--border-radius-medium);
+}
+
+.tabs-overview button.tab-button:not(:last-child) .tab-title {
+    box-shadow: 8px 0 0 -7px var(--separator-color);
+}
+
+.tabs-overview button.tab-button:hover .tab-title {
+    background: var(--separator-color);
+    box-shadow: none;
+}
+
+.tabs-overview button.tab-button.active {
+    color: var(--primary-color);
+}
+
+.tabs-overview button.tab-button.active::after {
+    content: '';
+    display: block;
+    position: absolute;
+    left: 0px;
+    bottom: 0;
+    right: 0px;
+    height: 3px;
+    border-radius: var(--border-radius-small) var(--border-radius-small) 0 0;
+    background-color: var(--primary-color);
+}
diff --git a/packages/HighFive/doc/doxygen-awesome-css/update_doxygen_awesome.sh b/packages/HighFive/doc/doxygen-awesome-css/update_doxygen_awesome.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d38c8398deda71ffdbdf4096f0baf914a4051f94
--- /dev/null
+++ b/packages/HighFive/doc/doxygen-awesome-css/update_doxygen_awesome.sh
@@ -0,0 +1,47 @@
+#! /usr/bin/env bash
+
+#
+# Copyright (c), 2022, Blue Brain Project
+#
+# Distributed under the Boost Software License, Version 1.0.
+#   (See accompanying file LICENSE_1_0.txt or copy at
+#         http://www.boost.org/LICENSE_1_0.txt)
+#
+
+set -e
+
+if [[ $# -ne 1 ]]
+then
+  echo "Usage: $0 TMP_DIR"
+  echo ""
+  echo "TMP_DIR must point to a writeable, empty, temporary directory."
+
+  exit 1
+fi
+
+TMP_DIR="$(realpath "$1")"
+DOXYGEN_AWESOME_DIR="$(dirname "$(realpath "$0")")"
+REPO_URL="https://github.com/jothepro/doxygen-awesome-css"
+REPO_DIR="${TMP_DIR}/doxygen-awesome-css"
+
+CONTENT_URL="https://raw.githubusercontent.com/jothepro/doxygen-awesome-css"
+
+mkdir -p "${TMP_DIR}"
+git clone ${REPO_URL} "${REPO_DIR}" 1>&2
+pushd "${REPO_DIR}" 1>&2
+
+VERSION="$(git tag -l | sed -e '/^v[0-9]*\.[0-9]*\.[0-9]*$/!d' | sort -V | tail -n 1)"
+
+popd 1>&2
+
+if [[ -z "$VERSION" ]]
+then
+  exit 1
+fi
+
+STYLESHEET="doxygen-awesome.css"
+curl "${CONTENT_URL}/${VERSION}/${STYLESHEET}" \
+     --output "${DOXYGEN_AWESOME_DIR}/${STYLESHEET}" \
+     1>&2
+
+echo "${VERSION}"
diff --git a/packages/HighFive/doc/environment.yaml b/packages/HighFive/doc/environment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a08f03e890e11d1b15303b591b0c0694fb0d262a
--- /dev/null
+++ b/packages/HighFive/doc/environment.yaml
@@ -0,0 +1,12 @@
+channels:
+  - conda-forge
+dependencies:
+  - boost-cpp
+  - catch2
+  - cmake
+  - doxygen
+  - eigen
+  - graphviz
+  - hdf5
+  - xtensor
+  - xtl
diff --git a/packages/HighFive/doc/installation.md b/packages/HighFive/doc/installation.md
new file mode 100644
index 0000000000000000000000000000000000000000..41521bba5e57212e7a7cd08b9423c5fcea655ddd
--- /dev/null
+++ b/packages/HighFive/doc/installation.md
@@ -0,0 +1,254 @@
+# Beginners Installation Guide on Linux
+
+These installation instruction are aimed at developers that aren't very
+familiar with installing C/C++ software and using CMake on Linux.
+
+## Obtaining CMake
+You'll need a compiler and CMake. We'll assume that a reasonably modern C++
+compiler is available. Often a sufficiently new version CMake is also present
+on the system.
+
+If not, there's two options: use the system package manager or use `pip`. CMake
+is improving in leaps and bounds. Which means you want a recent version. We
+suggest reconsidering fighting an older version of CMake if you can simply
+install the newest version via `pip`.
+
+## Obtaining HDF5
+First, you need to decide if you need MPI-support. Rule of thumb is: if you're
+unsure, then you don't need it.  If you need MPI you must install an
+MPI-enabled version of HDF5. Otherwise pick either one, if something is already
+installed, e.g. because `h5py` was installed as a system package, stick with
+that version.
+
+The options for installing HDF5 are:
+1. Use the system package manager.
+2. On a cluster use modules.
+3. Use [Spack](https://github.com/spack/spack).
+4. Use [Conan](https://conan.io).
+5. Manually install it.
+
+The system package manager will install HDF5 in a default location, were CMake
+will later be able to find it without further help. All the other approaches
+install into a non-default location and CMake might need help locating HDF5.
+The way one tells CMake where to find HDF5 is through `CMAKE_PREFIX_PATH`,
+e.g.,
+
+    cmake -DCMAKE_PREFIX_PATH="${HDF5_ROOT}" ...
+
+Note that `${HDF5_ROOT}` points to the folder which contains the two folders
+`include` and `lib`.
+
+### System Package Manager
+The default choice is to use the system package manager to install HDF5.
+One thing to keep an eye out is that certain Linux distributions don't install
+the headers automatically. Since you're about to develop an application which
+(indirectly) uses HDF5, you need those headers. If the packages are split, the
+development package is often suffixed with `-dev` or `-devel`.
+
+#### Ubuntu
+The package manager is apt. To install the HDF5 C library without MPI support:
+
+    sudo apt-get install libhdf5-dev
+
+for MPI support you'd install `libhdf5-openmpi-dev`.
+
+#### ArchLinux
+On ArchLinux you install
+
+    sudo pacman -S hdf5
+
+or `hdf5-openmpi` for HDF5 with OpenMPI support.
+
+
+### Using Modules
+If you're on a cluster, HDF5 has almost certainly been installed for you.
+Figure out how to use it. This is the preferred solution on clusters. As
+always, the precise instructions depend on the cluster, but something like
+
+    module load hdf5
+
+will probably be part of the solution. Try if `module avail` helps. Otherwise,
+you'd need to check the documentation for your cluster. Cluster admins like to
+hide the good stuff, i.e. modern versions, behind another package `"new"` or
+some other mechanism.
+
+You might need to know where HDF5 has been installed. You can find out what a
+module does by typing
+
+    module show hdf5
+
+If it contains something about prepending to `CMAKE_PREFIX_PATH`, then CMake
+should find the correct version automatically after loading the module.
+
+### Using Spack
+If neither of the above work, the next best choice might be Spack. It's a
+package manager for scientific computing. The general idea behind it is to
+avoid dependency issues by compiling a compatible set of everything.
+
+Obtain Spack by cloning their repo:
+
+    git clone https://github.com/spack/spack.git
+
+Activate Spack by sourcing a magic file:
+
+    source spack/share/spack/setup-env.sh
+
+which will put the command `spack` into your `PATH`. Okay, now we're set. First
+step is to create an environment for your project, which we'll call `useful`:
+
+    spack env create useful
+    spack env activate -p useful
+    spack add hdf5
+    spack install --jobs NPROC
+
+If you need MPI support use `hdf5+mpi` instead. The location of the HDF5
+installation is `spack location --install-dir hdf5`.
+
+### Conan
+If Spack doesn't work, you can try Conan.
+
+### Manually Install HDF5
+If all else fails, you can try to manually install HDF5. First you need to
+obtain the source code. For example by using `git` or by downloading an archive
+from their webpage.
+
+    git clone https://github.com/HDFGroup/hdf5
+    cd hdf5
+    git checkout hdf5-1_14_0
+
+Now, fingers crossed it'll compile and install:
+
+    cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=../hdf5-v1.14.0 -B build .
+    cmake --build build --parallel [NPROC]
+    cmake --install build
+
+Note that here we picked the installation path (or more precisely prefix) to be
+`../hdf5-v1.14.0`. You might want to install HDF5 somewhere else. This
+installation prefix is the also the path you need to give CMake so it's able to
+find HDF5 later on.
+
+### Confirming HDF5 Has Been Installed
+For this purpose we need a dummy file `dummy.cpp` to compile:
+
+    #include <hdf5.h>
+
+    int main() {
+      auto file = H5Fcreate("foo.h5", H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);
+      H5Fclose(file);
+      return 0;
+    }
+
+and a `CMakeLists.txt` with the following contents
+
+    cmake_minimum_required(VERSION 3.19)
+    project(Dummy)
+
+    find_package(HDF5 REQUIRED)
+    add_executable(dummy dummy.cpp)
+    target_link_libraries(dummy HDF5::HDF5)
+
+Now run CMake to configure the build system and keep an eye out for some a line
+related to HDF5, e.g.
+
+    $ cmake -B build .
+    ...
+    -- Found HDF5: hdf5-shared (found version "1.14.0")
+    ...
+
+Compile and check that it's doing something with sensible
+
+    $ cmake -build build --verbose
+    [ 50%] Building CXX object CMakeFiles/dummy.dir/dummy.cpp.o
+    /usr/bin/c++ ... -isystem ${HDF5_ROOT}/include ... -c ${PWD}/dummy.cpp
+    [100%] Linking CXX executable dummy
+    /usr/bin/c++ ... -o dummy -Wl,-rpath,${HDF5_ROOT}/lib ${HDF5_ROOT}/lib/libhdf5.so.310.0.0 ...
+
+mostly you're checking that the paths are what you'd expect them to be. If
+this command was successful, chances are high that HDF5 is properly installed
+and you've figured out the correct CMake invocations. If you want you can run
+the executable:
+
+    build/dummy
+
+which would create an empty file `foo.h5`.
+
+## Obtaining HighFive
+
+In principle the same instruction as for HDF5 can be used. However, HighFive is
+much less popular compared to HDF5 and therefore the system package manager
+likely doesn't know about it, nor does Conan. You're left with Spack and the
+manual process. It seems someone has done the wonderful work of adding HighFive
+to conda-forge, so maybe that's also an option.
+
+### Git Submodules
+This is the well-tested method for "vendoring" HighFive, i.e. including the
+HighFive sources with those of you project.
+
+### Spack
+Similarly as for HDF5, you can use Spack to install HighFive:
+
+    spack env activate -p useful
+    spack add highfive
+    spack install --jobs NPROC
+
+Again `spack location --install-dir highfive` will return the path where
+HighFive was installed. Since the Spack recipe of HighFive declares HDF5 as a
+dependency, technically, it's not necessary to add `hdf5`, just `highfive` is
+enough.
+
+### Manually Install HighFive
+Just like before the steps are, clone, configure, compile (kinda a no-op),
+install. The detailed instructions would be
+
+    git clone --recursive https://github.com/BlueBrain/HighFive.git
+    cd HighFive
+    git checkout v2.8.0
+
+If it complains that Catch is missing, you forgot the `--recursive`. To fix
+this you type
+
+    git submodule update --init --recursive
+
+Okay, on to configure, compile and install. The CMake commands are
+
+    cmake -DCMAKE_INSTALL_PREFIX=../highfive-v2.7.1 -DHIGHFIVE_USE_BOOST=Off -B build .
+    cmake --build build --parallel
+    cmake --install build
+
+### Confirming It Works
+We again need a dummy file called `dummy.cpp` with the following contents
+
+    #include <highfive/highfive.hpp>
+
+    int main() {
+      auto file = HighFive::File("foo.h5", HighFive::File::Create);
+      return 0;
+    }
+
+and the following `CMakeLists.txt`:
+
+    cmake_minimum_required(VERSION 3.19)
+    project(UseHighFive)
+
+    find_package(HighFive REQUIRED)
+    add_executable(dummy dummy.cpp)
+    target_link_libraries(dummy HighFive)
+
+The required CMake commands are:
+
+    $ cmake -DCMAKE_PREFIX_PATH="${HDF5_ROOT};${HIGHFIVE_ROOT}" -B build .
+    ...
+    -- HIGHFIVE 2.7.1: (Re)Detecting Highfive dependencies (HIGHFIVE_USE_INSTALL_DEPS=NO)
+    -- Found HDF5: hdf5-shared (found version "1.14.0")
+    ...
+
+    $ cmake --build build --verbose
+    [ 50%] Building CXX object CMakeFiles/dummy.dir/dummy.cpp.o
+    /usr/bin/c++ ... -isystem ${HIGHFIVE_ROOT}/include -isystem ${HDF5_ROOT}/include ... -c dummy.cpp
+    [100%] Linking CXX executable dummy
+    /usr/bin/c++ ... -o dummy -Wl,-rpath,${HDF5_ROOT}/lib ${HDF5_ROOT}/lib/libhdf5.so.310.0.0 ...
+
+Pay attention to the semi-colon (not colon like the rest of Linux) used to
+separate directories in `CMAKE_PREFIX_PATH`. If this worked you should be set
+to either copy the instruction to your "real" project, or start developing the
+rest of your project.
diff --git a/packages/HighFive/doc/poster/example1_hdf5.cpp b/packages/HighFive/doc/poster/example1_hdf5.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f551d90c1caceca5da4c80ffe4d2ba703aa12266
--- /dev/null
+++ b/packages/HighFive/doc/poster/example1_hdf5.cpp
@@ -0,0 +1,53 @@
+#include <vector>
+#include "hdf5.h"
+
+void data_io() {
+    hid_t file_id, dset_id, dspace_id, group_id; /* identifiers */
+    herr_t status;
+
+    // Setup dataset dimensions and input data
+    int ndims = 1;
+    hsize_t dims[ndims];
+    dims[0] = 50;
+    std::vector<double> data(50, 1);
+
+    // Open a file
+    file_id = H5Fcreate("new_file.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+    // Create a group
+    group_id = H5Gcreate2(file_id, "/group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+
+    // Create a dataset
+    dspace_id = H5Screate_simple(1, dims, NULL);
+    dset_id = H5Dcreate2(
+        group_id, "dset1", H5T_STD_I32BE, dspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+
+    // Write the data
+    status = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data.data());
+
+    // Close dataset after writing
+    status = H5Dclose(dset_id);
+
+    // Retrieve result size and preallocate vector
+    std::vector<double> result;
+    dset_id = H5Dopen(file_id, "/group/dset1", H5P_DEFAULT);
+    dspace_id = H5Dget_space(dset_id);
+    ndims = H5Sget_simple_extent_ndims(dspace_id);
+    hsize_t res_dims[ndims];
+    status = H5Sget_simple_extent_dims(dspace_id, res_dims, NULL);
+    int res_sz = 1;
+    for (int i = 0; i < ndims; i++) {
+        res_sz *= res_dims[i];
+    }
+    result.resize(res_sz);
+
+    // Read the data
+    status = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, result.data());
+
+    // Close the dataset and group
+    status = H5Dclose(dset_id);
+    status = H5Gclose(group_id);
+
+    // Close the file
+    status = H5Fclose(file_id);
+}
diff --git a/packages/HighFive/doc/poster/example1_highfive.cpp b/packages/HighFive/doc/poster/example1_highfive.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1d3c5148ad569787354e06ed82e60eb4dc54b5b3
--- /dev/null
+++ b/packages/HighFive/doc/poster/example1_highfive.cpp
@@ -0,0 +1,17 @@
+#include <highfive/H5File.hpp>
+
+using HighFive::File;
+
+void write_io() {
+    std::vector<int> d1(50, 1);
+
+    // Open a file
+    File file("tmp.h5", File::ReadWrite | File::Truncate);
+
+    // Create DataSet and write data (short form)
+    file.createDataSet("/group/dset1", d1);
+
+    // Read the data
+    std::vector<int> d1_read;
+    file.getDataSet("/group/dset1").read(d1_read);
+}
diff --git a/packages/HighFive/doc/poster/example3.cpp b/packages/HighFive/doc/poster/example3.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e18fbbf83e4fb015647a70d91857f18902eb0c39
--- /dev/null
+++ b/packages/HighFive/doc/poster/example3.cpp
@@ -0,0 +1,29 @@
+#include <highfive/highfive.hpp>
+
+
+typedef struct {
+    double width;
+    double height;
+} Size2D;
+
+
+HighFive::CompoundType create_compound_Size2D() {
+    return {{"width", HighFive::AtomicType<double>{}}, {"height", HighFive::AtomicType<double>{}}};
+}
+
+HIGHFIVE_REGISTER_TYPE(Size2D, create_compound_Size2D)
+
+int data_io() {
+    const std::string DATASET_NAME("points");
+
+    HighFive::File file("compounds.h5", HighFive::File::Truncate);
+
+    auto t1 = create_compound_Size2D();
+    t1.commit(file, "Size2D");
+
+    std::vector<Size2D> pts = {{1., 2.5}, {3., 4.5}};
+    auto dataset = file.createDataSet(DATASET_NAME, pts);
+
+    auto g1 = file.createGroup("group1");
+    g1.createAttribute(DATASET_NAME, pts);
+}
diff --git a/packages/HighFive/doc/poster/example6.cpp b/packages/HighFive/doc/poster/example6.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..41a050570598d4fb1a2dec0aa040eee6d72763d5
--- /dev/null
+++ b/packages/HighFive/doc/poster/example6.cpp
@@ -0,0 +1,49 @@
+#include <iostream>
+
+#include <mpi.h>
+
+#include <highfive/highfive.hpp>
+
+
+int main(int argc, char** argv) {
+    int mpi_rank, mpi_size;
+    const std::string DATASET_NAME("dset");
+
+    // initialize MPI
+    MPI_Init(&argc, &argv);
+    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+    using namespace HighFive;
+    try {
+        // open a new file with the MPI IO driver for parallel Read/Write
+        File file("parallel_highfive.h5",
+                  File::ReadWrite | File::Create | File::Truncate,
+                  MPIOFileDriver(MPI_COMM_WORLD, MPI_INFO_NULL));
+
+        // we define the size of our dataset to
+        //  lines : total number of mpi_rank
+        //  columns : 2
+        std::vector<size_t> dims(2);
+        dims[0] = std::size_t(mpi_size);
+        dims[1] = 2;
+
+        // Create the dataset
+        DataSet dset = file.createDataSet<double>(DATASET_NAME, DataSpace(dims));
+
+        // Each node want to write its own rank two time in
+        // its associated row
+        int data[1][2] = {{mpi_rank, mpi_rank}};
+
+        // write it to the associated mpi_rank
+        dset.select({std::size_t(mpi_rank), 0}, {1, 2}).write(data);
+
+    } catch (const Exception& err) {
+        // catch and print any HDF5 error
+        std::cerr << err.what() << std::endl;
+        MPI_Abort(MPI_COMM_WORLD, 1);
+    }
+
+    MPI_Finalize();
+    return 0;
+}
diff --git a/packages/HighFive/doc/poster/example_boost.cpp b/packages/HighFive/doc/poster/example_boost.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..56b78d074e3b136c193dc4781b9812e9074db88c
--- /dev/null
+++ b/packages/HighFive/doc/poster/example_boost.cpp
@@ -0,0 +1,21 @@
+#include <complex>
+
+#define H5_USE_BOOST 1
+#include <highfive/highfive.hpp>
+
+#include <boost/multi_array.hpp>
+
+using complex_t = std::complex<double>;
+
+void data_io() {
+    boost::multi_array<complex_t, 4> multi_array(boost::extents[3][2][1][1]);
+    std::fill_n(multi_array.origin(), multi_array.num_elements(), 1.0);
+    multi_array[1][1][0][0] = complex_t{1.1, 1.2};
+
+    HighFive::File file("multi_array_complex.h5", HighFive::File::Truncate);
+
+    HighFive::DataSet dataset =
+        file.createDataSet<complex_t>("multi_array", HighFive::DataSpace::From(multi_array));
+
+    dataset.write(multi_array);
+}
diff --git a/packages/HighFive/doc/poster/example_boost_ublas.cpp b/packages/HighFive/doc/poster/example_boost_ublas.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..986a671de57f59d2f0da98c2d1e76efa529bef83
--- /dev/null
+++ b/packages/HighFive/doc/poster/example_boost_ublas.cpp
@@ -0,0 +1,47 @@
+#include <iostream>
+
+#define H5_USE_BOOST 1
+#include <highfive/highfive.hpp>
+
+// In some versions of Boost (starting with 1.64), you have to
+// include the serialization header before ublas
+#include <boost/serialization/vector.hpp>
+
+#include <boost/numeric/ublas/io.hpp>
+#include <boost/numeric/ublas/matrix.hpp>
+
+using namespace HighFive;
+
+void data_io() {
+    const std::string DATASET_NAME("dset");
+    const size_t size_x = 10;
+    const size_t size_y = 10;
+
+    try {
+        typedef typename boost::numeric::ublas::matrix<double> Matrix;
+
+        // create a 10x10 matrix
+        Matrix mat(size_x, size_y);
+
+        // fill it
+        for (std::size_t i = 0; i < size_x; ++i) {
+            mat(i, i) = static_cast<double>(i);
+        }
+
+        // Create a new HDF5 file
+        File file("boost_ublas.h5", File::ReadWrite | File::Create | File::Truncate);
+
+        DataSet dataset = file.createDataSet<double>(DATASET_NAME, DataSpace::From(mat));
+
+        dataset.write(mat);
+
+        Matrix result;
+        dataset.read(result);
+
+        std::cout << "Matrix result:\n" << result << std::endl;
+
+    } catch (const Exception& err) {
+        // catch and print any HDF5 error
+        std::cerr << err.what() << std::endl;
+    }
+}
diff --git a/packages/HighFive/doc/poster/example_easy_h5py.py b/packages/HighFive/doc/poster/example_easy_h5py.py
new file mode 100644
index 0000000000000000000000000000000000000000..aca093490a28ebb2ae17cdc1c4fdab211d242127
--- /dev/null
+++ b/packages/HighFive/doc/poster/example_easy_h5py.py
@@ -0,0 +1,26 @@
+import h5py
+import numpy as np
+
+A = np.ones([10, 3])
+
+with h5py.File("tmp.h5", "w") as file:
+
+    # write dataset (automatically creates groups if needed)
+    file["/path/to/A"] = A
+
+    # read from dataset
+    B = file["/path/to/A"]
+
+    # write attribute
+    file["/path/to/A"].attrs["date"] = "today"
+
+    # read from attribute
+    d = file["/path/to/A"].attrs["date"]
+
+    # create extendible dataset and extend it
+    dset = file.create_dataset("/path/to/extendible", (1,), maxshape=(None,))
+    dset[0] = 0
+
+    for i in range(1, 10):
+        dset.resize((i + 1,))
+        dset[i] = i
diff --git a/packages/HighFive/doc/poster/example_easy_highfive.cpp b/packages/HighFive/doc/poster/example_easy_highfive.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..700056cae978ecc043c60d558927f282ad14b127
--- /dev/null
+++ b/packages/HighFive/doc/poster/example_easy_highfive.cpp
@@ -0,0 +1,29 @@
+#include <xtensor/xarray.hpp>
+
+#include <highfive/H5Easy.hpp>
+
+int main() {
+    xt::xarray<int> A = xt::ones<int>({10, 3});
+
+    // open a file
+    H5Easy::File file("tmp.h5", H5Easy::File::Overwrite);
+
+    // write dataset (automatically creates groups if needed)
+    H5Easy::dump(file, "/path/to/A", A);
+
+    // read from dataset
+    auto B = H5Easy::load<xt::xarray<int>>(file, "/path/to/A");
+
+    // write attribute
+    H5Easy::dumpAttribute(file, "/path/to/A", "date", std::string("today"));
+
+    // read from attribute
+    auto d = H5Easy::loadAttribute<std::string>(file, "/path/to/A", "date");
+
+    // create extendible dataset and extend it
+    for (size_t i = 0; i < 10; ++i) {
+        H5Easy::dump(file, "/path/to/extendible", i, {i});
+    }
+
+    return 0;
+}
diff --git a/packages/HighFive/doc/poster/example_props.cpp b/packages/HighFive/doc/poster/example_props.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0e5b14bde7ce990798d02035fd3c780bd25e0058
--- /dev/null
+++ b/packages/HighFive/doc/poster/example_props.cpp
@@ -0,0 +1,24 @@
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+int write_data() {
+    FileDriver fdrv;
+
+    fdrv.add(FileVersionBounds(H5F_LIBVER_LATEST, H5F_LIBVER_LATEST));
+    fdrv.add(MetadataBlockSize(10240));
+
+    File file("example2.h5", File::Truncate, fdrv);
+
+    GroupCreateProps props;
+    props.add(EstimatedLinkInfo(1000, 500));
+    auto group = file.createGroup("g", props);
+
+    DataSetCreateProps dsprops;
+    dsprops.add(Chunking(std::vector<hsize_t>{2, 2}));
+    dsprops.add(Deflate(9));
+
+
+    std::vector<int> d1(100000, 1);
+    group.createDataSet("dset1", d1, dsprops);
+}
diff --git a/packages/HighFive/doc/poster/examples.js b/packages/HighFive/doc/poster/examples.js
new file mode 100644
index 0000000000000000000000000000000000000000..7870d7ef834af8472654b0bcf0fc559c51f7c775
--- /dev/null
+++ b/packages/HighFive/doc/poster/examples.js
@@ -0,0 +1,37 @@
+
+
+function compile_on_godbolt(example_id) {
+
+  let src = $('#' + example_id).attr("rawsrc");
+
+  let url = `https://godbolt.org/#g:!((g:!((g:!((h:codeEditor,i:(filename:'1',fontScale:14,fontUsePx:'0',j:1,lang:c%2B%2B,source:'${src}'),l:'5',n:'0',o:'C%2B%2B+source+%231',t:'0')),k:50,l:'4',n:'0',o:'',s:0,t:'0'),(g:!((h:compiler,i:(compiler:g112,filters:(b:'0',binary:'1',commentOnly:'0',demangle:'0',directives:'0',execute:'1',intel:'0',libraryCode:'0',trim:'1'),flagsViewOpen:'1',fontScale:14,fontUsePx:'0',j:1,lang:c%2B%2B,libs:!((name:hdf5,ver:'1121'),(name:highfive,ver:trunk)),options:'',selection:(endColumn:1,endLineNumber:1,positionColumn:1,positionLineNumber:1,selectionStartColumn:1,selectionStartLineNumber:1,startColumn:1,startLineNumber:1),source:1,tree:'1'),l:'5',n:'0',o:'x86-64+gcc+11.2+(C%2B%2B,+Editor+%231,+Compiler+%231)',t:'0')),k:50,l:'4',n:'0',o:'',s:0,t:'0')),l:'2',n:'0',o:'',t:'0')),version:4`;
+  window.open(url);
+
+}
+
+function setup_examples() {
+  $(".example").each(function () {
+    let cblock = this;
+    let file = this.id + '.' + this.dataset.lang;
+    $.ajax( {url: file,
+      dataType: 'text',
+      success: function( code ) {
+        res = hljs.highlight(code, {language: cblock.dataset.lang, ignoreIllegals: true });
+        cblock.innerHTML += res.value;
+        let encoded = encodeURIComponent(code);
+        cblock.setAttribute("rawsrc",encoded);
+      }});
+
+  });
+
+  $(".godbolt").each(function(idx, el) {
+    let example_id = el.id.substring(3);
+    el.addEventListener("click", function () {
+      compile_on_godbolt(example_id);
+    });
+  });
+
+}
+
+
+setup_examples();
diff --git a/packages/HighFive/doc/poster/godbolt.org.ico b/packages/HighFive/doc/poster/godbolt.org.ico
new file mode 100644
index 0000000000000000000000000000000000000000..e3e20daefdb6c78c05e18c1c5888d5d4f8e54528
Binary files /dev/null and b/packages/HighFive/doc/poster/godbolt.org.ico differ
diff --git a/packages/HighFive/doc/poster/index.html b/packages/HighFive/doc/poster/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..12712274445c29204b3ee12a10fe286ff63c938c
--- /dev/null
+++ b/packages/HighFive/doc/poster/index.html
@@ -0,0 +1,264 @@
+<!DOCTYPE html>
+<html lang="en">
+  <head>
+    <meta charset="UTF-8">
+    <title>HighFive: An easy-to-use, header-only C++ library for HDF5</title>
+    <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
+    <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
+
+    <link rel="stylesheet"
+          href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.5.0/styles/default.min.css">
+    <script
+        src="https://code.jquery.com/jquery-3.6.0.min.js"
+        integrity="sha256-/xUj+3OJU5yExlq6GSYGSHk7tPXikynS7ogEvDej/m4="
+        crossorigin="anonymous"></script>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.5.0/highlight.min.js"></script>
+    <style>
+      body {
+        padding-top: 60px;
+      }
+      @media (max-width: 979px) {
+        body {
+          padding-top: 0px;
+        }
+      }
+      .code {
+        background-color: #f8f9fa;
+        font-size: small;
+      }
+      .godbolt {
+        cursor: pointer;
+      }
+    </style>
+  </head>
+
+  <body>
+    <div class="container">
+      <div class="container">
+        <nav class="navbar fixed-top navbar-expand-lg navbar-light bg-light">
+          <div class="container-fluid">
+            <a class="navbar-brand" href="#">HighFive</a>
+            <ul class="nav justify-content-end">
+              <a class="nav-link text-muted" href="https://github.com/BlueBrain/HighFive"><span class="d-none d-sm-inline">View on GitHub </span><svg version="1.1" width="32" height="32" viewBox="0 0 16 16" class="octicon octicon-mark-github" aria-hidden="true"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path></svg></a>
+              <li class="nav-item">
+              </li>
+            </ul>
+          </div>
+        </nav>
+      </div>
+      <div class="container">
+        <h1>HighFive: An easy-to-use, header-only C++ library for HDF5</h1>
+
+        <p><i>Adrien Devresse<sup>1</sup>, Omar Awile<sup>1</sup>, Jorge
+          Blanco<sup>1</sup>, Tristan Carel<sup>1</sup>, Nicolas Cornu<sup>1</sup>,
+          Tom de Geus<sup>2</sup>, Luc Grosheintz-Laval<sup>1</sup>, Pramod Kumbhar<sup>1</sup>,
+          Fernando Pereira<sup>1</sup>, Sergio Rivas Gomez<sup>1</sup>, Matthias Wolf<sup>1</sup>,
+          James King<sup>1</sup></i></p>
+      </div>
+      <div class="container">
+        <p>
+        <sup>1</sup> Blue Brain Project, École Polytechnique Fédérale de Lausanne, Switzerland<br/>
+        <sup>2</sup> Physics of Complex Systems Laboratory, École Polytechnique Fédérale de Lausanne,
+        Switzerland</p>
+      </div>
+
+      <div class="container mb-5">
+        <h2>Introduction</h2>
+        <p>The use of portable scientific data formats are vital for managing complex workflows,
+        reliable data storage, knowledge transfer, and long-term maintainability and
+        reproducibility. Hierarchical Data Format (HDF) 5 is considered the de-facto
+        industry-standard for this purpose. While the official HDF5 library is versatile and
+        well supported, it only provides a low-level C/C++ interface. Lacking proper high-level
+        C++ abstractions dissuades the use of HDF5 in scientific applications. There are a
+        number of C++ wrapper libraries available. Many, however, are domain-specific,
+        incomplete or not actively maintained.</p>
+        <p>HighFive is an attempt to address these challenges.</p>
+      </div>
+      <div class="container mb-5">
+        <h2>Basic use of HighFive</h2>
+        <p>It is an easy to use modern C++ header-only library that reduces most of the
+        book-keeping overhead required by HDF5. HighFive uses RAII to handle object life-times
+        and automatically handles reference counting on HDF5 objects. The library makes use of
+        C++ templating for automatic type mapping. These features significantly increase
+        programmer productivity and reduce coding bugs.</p>
+        <table class="table">
+          <thead>
+            <tr>
+              <td>HighFive</td>
+              <td>HDF5</td>
+            </tr>
+          </thead>
+          <tbody>
+            <tr>
+              <td>
+                <div class="card w-100">
+                  <div class="code card-body"><img src="godbolt.org.ico" id="gb_example1_highfive" width=32 height=32  class="godbolt float-end img-thumbnail">
+                    <pre class="font-monospace"><div class="example" data-lang="cpp" id="example1_highfive"></div></pre>
+                  </div>
+                </div>
+              </td>
+              <td>
+                <div class="card w-100">
+                  <div class="code card-body"><img src="godbolt.org.ico" id="gb_example1_hdf5" width=32 height=32  class="godbolt float-end img-thumbnail">
+                    <pre class="font-monospace"><div class="example" data-lang="cpp" id="example1_hdf5"></div></pre>
+                  </div>
+                </div>
+              </td>
+            </tr>
+          </tbody>
+        </table>
+      </div>
+
+      <div class="container mb-5">
+        <h2>Support for HDF5 advanced features</h2>
+        <p>Its simplified data-management does not come at a loss of HDF5's flexibility and
+        advanced features and tunable parameters are exposed through a simple interface. File
+        version bounds can be read and written to define object compatibility, the metadata
+        block size can be set. Group properties for compression, chunking and link info
+        estimates can be set and read:</p>
+        <div class="card w-75">
+          <div class="code card-body"><img src="godbolt.org.ico" id="gb_example_props" width=32 height=32  class="godbolt float-end img-thumbnail">
+              <pre class="font-monospace"><div class="example" data-lang="cpp" id="example_props"></div></pre>
+          </div>
+        </div>
+      </div>
+      <div class="container mb-5">
+        <h2>Complex data types</h2>
+        <p>HighFive is built with scientific applications in mind. Besides scalar and simple STL
+        vectors it is possible to map C++ structs to HDF5 compound types and to read and write
+        Boost, Boost ublas, Eigen and XTensor array types. The library is also able to handle
+        combinations of array types (e.g. <code>std::vector&lt;Eigen::Matrix&gt;</code>). This is
+        achieved through various templated converters. Additionally, HighFive supports enums and
+        various string types.</p>
+        <div class="card w-75">
+          <div class="card-header">
+            <ul class="nav nav-tabs card-header-tabs" id="highfive-type-support" role="tablist">
+              <li class="nav-item" role="presentation">
+                <button class="nav-link active" id="compound-tab" data-bs-toggle="tab"
+                                                                  data-bs-target="#compound" type="button" role="tab" aria-controls="compound"
+                                                                                                                      aria-selected="true">Compound types</button>
+              </li>
+              <li class="nav-item" role="presentation">
+                <button class="nav-link" id="boost-tab" data-bs-toggle="tab" data-bs-target="#boost"
+                                                                             type="button" role="tab" aria-controls="boost" aria-selected="false">Boost</button>
+              </li>
+              <li class="nav-item" role="presentation">
+                <button class="nav-link" id="boost-ublas-tab" data-bs-toggle="tab"
+                                                              data-bs-target="#boost-ublas"
+                                                                             type="button"
+                                                                             role="tab"
+                                                                           aria-controls="boost-ublas"
+                                                                           aria-selected="false">Boost
+                ublas</button>
+              </li>
+              <li class="nav-item" role="presentation">
+                <button class="nav-link" id="eigen-tab" data-bs-toggle="tab" data-bs-target="#eigen"
+                                                                             type="button" role="tab" aria-controls="eigen" aria-selected="false">Eigen</button>
+              </li>
+            </ul>
+          </div>
+          <div class="tab-content code card-body" id="highfive-type-support-content">
+            <div class="tab-pane fade show active" id="compound" role="tabpanel" aria-labelledby="compound-tab">
+              <img src="godbolt.org.ico" id="gb_example3" width=32 height=32  class="godbolt float-end img-thumbnail">
+              <pre class="font-monospace"><div class="example" data-lang="cpp" id="example3"></div></pre>
+            </div>
+            <div class="tab-pane fade" id="boost" role="tabpanel" aria-labelledby="boost-tab">
+              <img src="godbolt.org.ico" id="gb_example_boost" width=32 height=32  class="godbolt float-end img-thumbnail">
+              <pre class="font-monospace"><div class="example" data-lang="cpp" id="example_boost"></div></pre>
+            </div>
+            <div class="tab-pane fade" id="boost-ublas" role="tabpanel" aria-labelledby="boost-ublas-tab">
+              <img src="godbolt.org.ico" id="gb_example_boost_ublas" width=32 height=32  class="godbolt float-end img-thumbnail">
+              <pre class="font-monospace"><div class="example" data-lang="cpp" id="example_boost_ublas"></div></pre>
+            </div>
+            <div class="tab-pane fade" id="eigen" role="tabpanel" aria-labelledby="eigen-tab">
+              <img src="godbolt.org.ico" id="gb_example_eigen" width=32 height=32  class="godbolt float-end img-thumbnail">
+              <pre class="font-monospace"><div class="example" data-lang="cpp" id="example_eigen"></div></pre>
+            </div>
+          </div>
+        </div>
+      </div>
+      <div class="container mb-5">
+        <h2>HighFive for parallel applications</h2>
+        <p>With the aim to support large-scale scientific application, we have made an effort to
+        also natively support the HDF5 MPI backend in HighFive. A special
+        <code>MPIOFileDriver</code> is used in the application code to ensure that HDF5 is correctly
+        initialized. No other special API calls are required since all necessary provisions are
+        handled transparently.</p>
+        <div class="card w-75">
+          <div class="code card-body"><img src="godbolt.org.ico" id="gb_example6" width=32 height=32  class="godbolt float-end img-thumbnail">
+            <pre class="font-monospace"><div class="example" data-lang="cpp" data-lang="cpp" id="example6"></div></pre>
+          </div>
+        </div>
+      </div>
+
+      <div class="container mb-5">
+        <h2>H5Easy: one-liners</h2>
+        <p>HighFive also offers the H5Easy API (on its own namespace).
+        This has an API in which things can be done in one-liners, with a syntax comparable
+        to for example h5py for Python.
+        It offers overloads for STL containers, Boost, Eigen, xtensor, and OpenCV.</p>
+        <table class="table">
+          <thead>
+            <tr>
+              <td>H5Easy</td>
+              <td>h5py</td>
+            </tr>
+          </thead>
+          <tbody>
+            <tr>
+              <td>
+                <div class="card w-100">
+                  <div class="code card-body"><img src="godbolt.org.ico" id="gb_example_easy_highfive" width=32 height=32  class="godbolt float-end img-thumbnail">
+                    <pre class="font-monospace"><div class="example" data-lang="cpp" id="example_easy_highfive"></div></pre>
+                  </div>
+                </div>
+              </td>
+              <td>
+                <div class="card w-100">
+                  <div class="code card-body">
+                    <pre class="font-monospace"><div class="example" data-lang="py" id="example_easy_h5py"></div></pre>
+                  </div>
+                </div>
+              </td>
+            </tr>
+          </tbody>
+        </table>
+      </div>
+
+      <div class="container mb-5">
+        <h2>Obtaining and building HighFive</h2>
+        <p>HighFive is developed open-source and can be cloned and forked from
+        <a href="https://github.com/BlueBrain/HighFive">GitHub</a>.
+        It can also be installed from the clone,
+        via <a href="https://spack.readthedocs.io/en/latest/package_list.html#highfive">spack</a>,
+        or via <a href="https://anaconda.org/conda-forge/highfive">conda</a>.
+        It can then be used for example with <code>find_package(HighFive)</code> in CMake.
+        Being a header-only library, HighFive can be used directly as a subfolder in a C++ project,
+        for example by adding it as a submodule.
+        More details can be found in the
+        <a href=""https://github.com/BlueBrain/HighFive/blob/master/README.md>README.md</a> file.
+        </p>
+      </div> <!-- top level container -->
+      <div class="container">
+        <footer class="d-flex flex-wrap justify-content-between align-items-center py-3 my-4 border-top">
+          <div class="col-md-8 d-flex align-items-left">
+            <span class="text-muted">© 2022 Blue Brain Project/EPFL<br>
+              <small>The development of this software was supported by funding to the Blue Brain
+                Project, a research center of the École polytechnique fédérale de Lausanne (EPFL),
+                from the Swiss government's ETH Board of the Swiss Federal Institutes of
+                Technology.</small>
+            </span><br>
+            <span class="text-muted">
+            </span>
+          </div>
+          <div class="col-md-4 justify-content-end list-unstyled d-flex">
+            <span class="text-muted">Boost Software License 1.0</span>
+          </div>
+        </footer>
+      </div>
+
+      <!-- Example code loading -->
+      <script src="examples.js"></script>
+    </div>
+  </body>
+</html>
diff --git a/packages/HighFive/include/highfive/H5Attribute.hpp b/packages/HighFive/include/highfive/H5Attribute.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..810d388ae8ef555ff2417c3e5cf56edef212f003
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Attribute.hpp
@@ -0,0 +1,266 @@
+/*
+ *  Copyright (c), 2017, Ali Can Demiralp <ali.demiralp@rwth-aachen.de>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <vector>
+
+#include <H5Apublic.h>
+
+#include "H5DataType.hpp"
+#include "H5Object.hpp"
+#include "bits/H5Friends.hpp"
+#include "bits/H5Path_traits.hpp"
+
+namespace HighFive {
+class DataSpace;
+
+namespace detail {
+
+/// \brief Internal hack to create an `Attribute` from an ID.
+///
+/// WARNING: Creating an Attribute from an ID has implications w.r.t. the lifetime of the object
+///          that got passed via its ID. Using this method careless opens up the suite of issues
+///          related to C-style resource management, including the analog of double free, dangling
+///          pointers, etc.
+///
+/// NOTE: This is not part of the API and only serves to work around a compiler issue in GCC which
+///       prevents us from using `friend`s instead. This function should only be used for internal
+///       purposes. The problematic construct is:
+///
+///           template<class Derived>
+///           friend class SomeCRTP<Derived>;
+///
+/// \private
+Attribute make_attribute(hid_t hid);
+}  // namespace detail
+
+/// \brief Class representing an Attribute of a DataSet or Group
+///
+/// \sa AnnotateTraits::createAttribute, AnnotateTraits::getAttribute, AnnotateTraits::listAttributeNames, AnnotateTraits::hasAttribute, AnnotateTraits::deleteAttribute for create, get, list, check or delete Attribute
+class Attribute: public Object, public PathTraits<Attribute> {
+  public:
+    const static ObjectType type = ObjectType::Attribute;
+
+    /// \brief Get the name of the current Attribute.
+    /// \code{.cpp}
+    /// auto attr = dset.createAttribute<std::string>("my_attribute", DataSpace::From(string_list));
+    /// std::cout << attr.getName() << std::endl; // Will print "my_attribute"
+    /// \endcode
+    /// \since 2.2.2
+    std::string getName() const;
+
+    /// \brief The number of bytes required to store the attribute in the HDF5 file.
+    /// \code{.cpp}
+    /// size_t size = dset.createAttribute<int>("foo", DataSpace(1, 2)).getStorageSize();
+    /// \endcode
+    /// \since 1.0
+    size_t getStorageSize() const;
+
+    /// \brief Get the DataType of the Attribute.
+    /// \code{.cpp}
+    /// Attribute attr = dset.createAttribute<int>("foo", DataSpace(1, 2));
+    /// auto dtype = attr.getDataType(); // Will be an hdf5 type deduced from int
+    /// \endcode
+    /// \since 1.0
+    DataType getDataType() const;
+
+    /// \brief Get the DataSpace of the current Attribute.
+    /// \code{.cpp}
+    /// Attribute attr = dset.createAttribute<int>("foo", DataSpace(1, 2));
+    /// auto dspace = attr.getSpace(); // This will be a DataSpace of dimension 1 * 2
+    /// \endcode
+    /// \since 1.0
+    DataSpace getSpace() const;
+
+    /// \brief Get the DataSpace of the current Attribute.
+    /// \note This is an alias of getSpace().
+    /// \since 1.0
+    DataSpace getMemSpace() const;
+
+    /// \brief Get the value of the Attribute.
+    /// \code{.cpp}
+    /// Attribute attr = dset.getAttribute("foo");
+    /// // The value will contains what have been written in the attribute
+    /// std::vector<int> value = attr.read<std::vector<int>>();
+    /// \endcode
+    /// \since 2.5.0
+    template <typename T>
+    T read() const;
+
+    /// \brief Get the value of the Attribute in a buffer.
+    ///
+    /// Read the attribute into an existing object. Only available for
+    /// supported types `T`. If `array` has preallocated the correct amount of
+    /// memory, then this routine should not trigger reallocation. Otherwise,
+    /// if supported, the object will be resized.
+    ///
+    /// An exception is raised if the numbers of dimension of the buffer and of
+    /// the attribute are different.
+    ///
+    /// \code{.cpp}
+    /// // Will read into `value` avoiding memory allocation if the dimensions
+    /// // match, i.e. if the attribute `"foo"` has three element.
+    /// std::vector<int> value(3);
+    /// file.getAttribute("foo").read(value);
+    /// \endcode
+    /// \since 1.0
+    template <typename T>
+    void read(T& array) const;
+
+    /// \brief Read the attribute into a pre-allocated buffer.
+    /// \param array A pointer to the first byte of sufficient pre-allocated memory.
+    /// \param mem_datatype The DataType of the array.
+    ///
+    /// \note This is the shallowest wrapper around `H5Aread`. If possible
+    /// prefer either Attribute::read() const or Attribute::read(T&) const.
+    ///
+    /// \code{.cpp}
+    /// auto attr = file.getAttribute("foo");
+    ///
+    /// // Simulate custom allocation by the application.
+    /// size_t n_elements = attr.getSpace().getElementCount();
+    /// int * ptr = (int*) malloc(n_elements*sizeof(int));
+    ///
+    /// // Read into the pre-allocated memory.
+    /// attr.read(ptr, mem_datatype);
+    /// \endcode
+    /// \since 2.2.2
+    template <typename T>
+    void read(T* array, const DataType& mem_datatype) const;
+
+    /// \brief Read the attribute into a buffer.
+    /// Behaves like Attribute::read(T*, const DataType&) const but
+    /// additionally this overload deduces the memory datatype from `T`.
+    ///
+    /// \param array Pointer to the first byte of pre-allocated memory.
+    ///
+    /// \note If possible prefer either Attribute::read() const or Attribute::read(T&) const.
+    ///
+    /// \code{.cpp}
+    /// auto attr = file.getAttribute("foo");
+    ///
+    /// // Simulate custom allocation by the application.
+    /// size_t n_elements = attr.getSpace().getElementCount();
+    /// int * ptr = (int*) malloc(n_elements*sizeof(int));
+    ///
+    /// // Read into the pre-allocated memory.
+    /// attr.read(ptr);
+    /// \endcode
+    /// \since 2.2.2
+    template <typename T>
+    void read(T* array) const;
+
+    /// \brief Write the value into the Attribute.
+    ///
+    /// Write the value to the attribute. For supported types `T`, this overload
+    /// will write the value to the attribute. The datatype and dataspace are
+    /// deduced automatically. However, since the attribute has already been
+    /// created, the dimensions of `value` must match those of the attribute.
+    ///
+    /// \code{.cpp}
+    /// // Prefer the fused version if creating and writing the attribute
+    /// // at the same time.
+    /// dset.createAttribute("foo", std::vector<int>{1, 2, 3});
+    ///
+    /// // To overwrite the value:
+    /// std::vector<int> value{4, 5, 6};
+    /// dset.getAttribute<int>("foo").write(value);
+    /// \endcode
+    /// \since 1.0
+    template <typename T>
+    void write(const T& value);
+
+    /// \brief Write from a raw pointer.
+    ///
+    /// Values that have been correctly arranged memory, can be written directly
+    /// by passing a raw pointer.
+    ///
+    /// \param buffer Pointer to the first byte of the value.
+    /// \param mem_datatype The DataType of the buffer.
+    ///
+    /// \note This is the shallowest wrapper around `H5Awrite`. It's useful
+    /// if you need full control. If possible prefer Attribute::write.
+    ///
+    /// \code{.cpp}
+    /// Attribute attr = dset.createAttribute<int>("foo", DataSpace(2, 3));
+    ///
+    /// // Simulate the application creating `value` and only exposing access
+    /// // to the raw pointer `ptr`.
+    /// std::vector<std::array<int, 3>> value{{1, 2, 3}, {4, 5, 6}};
+    /// int * ptr = (int*) value.data();
+    ///
+    /// // Simply write the bytes to disk.
+    /// attr.write(ptr, AtomicType<int>());
+    /// \endcode
+    /// \since 2.2.2
+    template <typename T>
+    void write_raw(const T* buffer, const DataType& mem_datatype);
+
+    /// \brief Write from a raw pointer.
+    ///
+    /// Much like Attribute::write_raw(const T*, const DataType&).
+    /// Additionally, this overload attempts to automatically deduce the
+    /// datatype of the buffer. Note, that the file datatype is already set.
+    ///
+    /// \param buffer Pointer to the first byte.
+    ///
+    /// \note If possible prefer Attribute::write.
+    ///
+    /// \code{.cpp}
+    /// // Simulate the application creating `value` and only exposing access
+    /// // to the raw pointer `ptr`.
+    /// std::vector<std::array<int, 3>> value{{1, 2, 3}, {4, 5, 6}};
+    /// int * ptr = (int*) value.data();
+    ///
+    /// // Simply write the bytes to disk.
+    /// attr.write(ptr);
+    /// \endcode
+    /// \since 2.2.2
+    template <typename T>
+    void write_raw(const T* buffer);
+
+    /// \brief The create property list used for this attribute.
+    ///
+    /// Some of HDF5 properties/setting of an attribute are defined by a
+    /// create property list. This method returns a copy of the create
+    /// property list used during creation of the attribute.
+    ///
+    /// \code{.cpp}
+    /// auto acpl = attr.getCreatePropertyList();
+    ///
+    /// // For example to create another attribute with the same properties.
+    /// file.createAttribute("foo", 42, acpl);
+    /// \endcode
+    /// \since 2.5.0
+    AttributeCreateProps getCreatePropertyList() const {
+        return details::get_plist<AttributeCreateProps>(*this, H5Aget_create_plist);
+    }
+
+    // No empty attributes
+    Attribute() = delete;
+
+  protected:
+    using Object::Object;
+
+  private:
+#if HIGHFIVE_HAS_FRIEND_DECLARATIONS
+    template <typename Derivate>
+    friend class ::HighFive::AnnotateTraits;
+#endif
+
+    friend Attribute detail::make_attribute(hid_t);
+};
+
+namespace detail {
+inline Attribute make_attribute(hid_t hid) {
+    return Attribute(hid);
+}
+}  // namespace detail
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/H5DataSet.hpp b/packages/HighFive/include/highfive/H5DataSet.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0236f06c23054fa9e4d937ca87e78a260511cadc
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5DataSet.hpp
@@ -0,0 +1,116 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <vector>
+
+#include "H5DataSpace.hpp"
+#include "H5DataType.hpp"
+#include "H5Object.hpp"
+#include "bits/H5_definitions.hpp"
+#include "bits/H5Annotate_traits.hpp"
+#include "bits/H5Slice_traits.hpp"
+#include "bits/H5Path_traits.hpp"
+#include "bits/H5_definitions.hpp"
+
+namespace HighFive {
+
+///
+/// \brief Class representing a dataset.
+///
+class DataSet: public Object,
+               public SliceTraits<DataSet>,
+               public AnnotateTraits<DataSet>,
+               public PathTraits<DataSet> {
+  public:
+    const static ObjectType type = ObjectType::Dataset;
+
+    ///
+    /// \brief getStorageSize
+    /// \return returns the amount of storage allocated for a dataset.
+    ///
+    uint64_t getStorageSize() const;
+
+    ///
+    /// \brief getOffset
+    /// \return returns DataSet address in file
+    ///
+    uint64_t getOffset() const;
+
+    ///
+    /// \brief getDataType
+    /// \return return the datatype associated with this dataset
+    ///
+    DataType getDataType() const;
+
+    ///
+    /// \brief getSpace
+    /// \return return the dataspace associated with this dataset
+    ///
+    DataSpace getSpace() const;
+
+    ///
+    /// \brief getMemSpace
+    /// \return same than getSpace for DataSet, compatibility with Selection
+    /// class
+    ///
+    DataSpace getMemSpace() const;
+
+
+    /// \brief Change the size of the dataset
+    ///
+    /// This requires that the dataset was created with chunking, and you would
+    /// generally want to have set a larger maxdims setting
+    /// \param dims New size of the dataset
+    void resize(const std::vector<size_t>& dims);
+
+
+    /// \brief Get the dimensions of the whole DataSet.
+    ///       This is a shorthand for getSpace().getDimensions()
+    /// \return The shape of the current HighFive::DataSet
+    ///
+    inline std::vector<size_t> getDimensions() const {
+        return getSpace().getDimensions();
+    }
+
+    /// \brief Get the total number of elements in the current dataset.
+    ///       E.g. 2x2x2 matrix has size 8.
+    ///       This is a shorthand for getSpace().getTotalCount()
+    /// \return The shape of the current HighFive::DataSet
+    ///
+    inline size_t getElementCount() const {
+        return getSpace().getElementCount();
+    }
+
+    /// \brief Get the list of properties for creation of this dataset
+    DataSetCreateProps getCreatePropertyList() const {
+        return details::get_plist<DataSetCreateProps>(*this, H5Dget_create_plist);
+    }
+
+    /// \brief Get the list of properties for accession of this dataset
+    DataSetAccessProps getAccessPropertyList() const {
+        return details::get_plist<DataSetAccessProps>(*this, H5Dget_access_plist);
+    }
+
+    /// \deprecated Default constructor creates unsafe uninitialized objects
+    H5_DEPRECATED("Default constructor creates unsafe uninitialized objects")
+    DataSet() = default;
+
+  protected:
+    using Object::Object;  // bring DataSet(hid_t)
+
+    DataSet(Object&& o) noexcept
+        : Object(std::move(o)) {}
+
+    friend class Reference;
+    template <typename Derivate>
+    friend class NodeTraits;
+};
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/H5DataSpace.hpp b/packages/HighFive/include/highfive/H5DataSpace.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..95d04dbbbd9052e291483c46d362295ba5c72583
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5DataSpace.hpp
@@ -0,0 +1,243 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <vector>
+#include <array>
+#include <cstdint>
+#include <type_traits>
+#include <initializer_list>
+
+#include "H5Object.hpp"
+#include "bits/H5_definitions.hpp"
+
+namespace HighFive {
+
+/// \brief Class representing the space (dimensions) of a DataSet
+///
+/// \code{.cpp}
+/// // Create a DataSpace of dimension 1 x 2 x 3
+/// DataSpace dspace(1, 2, 3);
+/// std::cout << dspace.getElementCount() << std::endl; // Print 1 * 2 * 3 = 6
+/// std::cout << dspace.getNumberDimensions() << std::endl; // Print 3
+/// std::vector<size_t> dims = dspace.getDimensions(); // dims is {1, 2, 3}
+/// \endcode
+class DataSpace: public Object {
+  public:
+    const static ObjectType type = ObjectType::DataSpace;
+
+    /// \brief Magic value to specify that a DataSpace can grow without limit.
+    ///
+    /// This value should be used with DataSpace::DataSpace(const std::vector<size_t>& dims, const
+    /// std::vector<size_t>& maxdims);
+    ///
+    /// \since 2.0
+    static const size_t UNLIMITED = SIZE_MAX;
+
+    /// \brief An enum to create scalar and null DataSpace with DataSpace::DataSpace(DataspaceType dtype).
+    ///
+    /// This enum is needed otherwise we will not be able to distringuish between both with normal
+    /// constructors. Both have a dimension of 0.
+    /// \since 1.3
+    enum DataspaceType {
+        dataspace_scalar,  ///< Value to create scalar DataSpace
+        dataspace_null,    ///< Value to create null DataSpace
+        // simple dataspace are handle directly from their dimensions
+    };
+
+    /// \brief Create a DataSpace of N-dimensions from a std::vector<size_t>.
+    /// \param dims Dimensions of the new DataSpace
+    ///
+    /// \code{.cpp}
+    /// // Create a DataSpace with 2 dimensions: 1 and 3
+    /// DataSpace(std::vector<size_t>{1, 3});
+    /// \endcode
+    /// \since 1.0
+    explicit DataSpace(const std::vector<size_t>& dims);
+
+    /// \brief Create a DataSpace of N-dimensions from a std::array<size_t, N>.
+    /// \param dims Dimensions of the new DataSpace
+    ///
+    /// \code{.cpp}
+    /// // Create a DataSpace with 2 dimensions: 1 and 3
+    /// DataSpace(std::array<size_t, 2>{1, 3});
+    /// \endcode
+    /// \since 2.3
+    template <size_t N>
+    explicit DataSpace(const std::array<size_t, N>& dims);
+
+    /// \brief Create a DataSpace of N-dimensions from an initializer list.
+    /// \param dims Dimensions of the new DataSpace
+    ///
+    /// \code{.cpp}
+    /// // Create a DataSpace with 2 dimensions: 1 and 3
+    /// DataSpace{1, 3};
+    /// \endcode
+    /// \since 2.1
+    DataSpace(const std::initializer_list<size_t>& dims);
+
+    /// \brief Create a DataSpace of N-dimensions from direct values.
+    /// \param dim1 The first dimension
+    /// \param dims The following dimensions
+    ///
+    /// \code{.cpp}
+    /// // Create a DataSpace with 2 dimensions: 1 and 3
+    /// DataSpace(1, 3);
+    /// \endcode
+    /// \since 2.1
+    template <typename... Args>
+    explicit DataSpace(size_t dim1, Args... dims);
+
+    /// \brief Create a DataSpace from a pair of iterators.
+    /// \param begin The beginning of the container
+    /// \param end The end of the container
+    ///
+    /// \code{.cpp}
+    /// // Create a DataSpace with 2 dimensions: 1 and 3
+    /// std::vector<int> v{1, 3};
+    /// DataSpace(v.begin(), v.end());
+    /// \endcode
+    ///
+    /// \since 2.0
+    // Attention: Explicitly disable DataSpace(int_like, int_like) from trying
+    //            to use this constructor
+    template <typename IT,
+              typename = typename std::enable_if<!std::is_integral<IT>::value, IT>::type>
+    DataSpace(const IT begin, const IT end);
+
+    /// \brief Create a resizable N-dimensional DataSpace.
+    /// \param dims Initial size of dataspace
+    /// \param maxdims Maximum size of the dataspace
+    ///
+    /// \code{.cpp}
+    /// // Create a DataSpace with 2 dimensions: 1 and 3.
+    /// // It can later be resized up to a maximum of 10 x 10
+    /// DataSpace(std::vector<size_t>{1, 3}, std::vector<size_t>{10, 10});
+    /// \endcode
+    ///
+    /// \see UNLIMITED for a DataSpace that can be resized without limit.
+    /// \since 2.0
+    explicit DataSpace(const std::vector<size_t>& dims, const std::vector<size_t>& maxdims);
+
+    /// \brief Create a scalar or a null DataSpace.
+    ///
+    /// This overload enables creating scalar or null data spaces, both have
+    /// dimension 0.
+    ///
+    /// \param space_type The value from the enum
+    ///
+    /// \code{.cpp}
+    /// DataSpace(DataspaceType::dataspace_scalar);
+    /// \endcode
+    ///
+    /// \attention Avoid braced intialization in these cases, i.e.
+    /// \code{.cpp}
+    /// // This is not a scalar dataset:
+    /// DataSpace{DataspaceType::dataspace_scalar};
+    /// \endcode
+    ///
+    /// \since 1.3
+    explicit DataSpace(DataspaceType space_type);
+
+    /// \brief Create a copy of the DataSpace which will have different id.
+    ///
+    /// \code{.cpp}
+    /// DataSpace dspace1(1, 3);
+    /// auto dspace2 = dspace.clone();
+    /// \endcode
+    ///
+    /// \since 1.0
+    DataSpace clone() const;
+
+    /// \brief Returns the number of dimensions of a DataSpace.
+    /// \code{.cpp}
+    /// DataSpace dspace(1, 3);
+    /// size_t number_of_dim = dspace.getNumberDimensions(); // returns 2
+    /// \endcode
+    /// \since 1.0
+    size_t getNumberDimensions() const;
+
+    /// \brief Returns the size of the dataset in each dimension.
+    ///
+    /// For zero-dimensional datasets (e.g. scalar or null datasets) an empty
+    /// vector is returned.
+    ///
+    /// \code{.cpp}
+    /// DataSpace dspace(1, 3);
+    /// auto dims = dspace.getDimensions(); // returns {1, 3}
+    /// \endcode
+    ///
+    /// \sa DataSpace::getMaxDimensions
+    ///
+    /// \since 1.0
+    std::vector<size_t> getDimensions() const;
+
+    /// \brief Return the number of elements in this DataSpace.
+    ///
+    /// \code{.cpp}
+    /// DataSpace dspace(1, 3);
+    /// size_t elementcount = dspace.getElementCount(); // return 1 x 3 = 3
+    /// \endcode
+    /// \since 2.1
+    size_t getElementCount() const;
+
+    /// \brief Returns the maximum size of the dataset in each dimension.
+    ///
+    /// This is the maximum size a dataset can be extended to, which may be
+    /// different from the current size of the dataset.
+    ///
+    /// \code{.cpp}
+    /// DataSpace dspace(std::vector<size_t>{1, 3}, std::vector<size_t>{UNLIMITED, 10});
+    /// dspace.getMaxDimensions(); // Return {UNLIMITED, 10}
+    /// \endcode
+    ///
+    /// \sa DataSpace::getDimensions
+    /// \since 2.0
+    std::vector<size_t> getMaxDimensions() const;
+
+    /// \brief Automatically deduce the DataSpace from a container/value.
+    ///
+    /// Certain containers and scalar values are fully supported by HighFive.
+    /// For these containers, HighFive can deduce the dimensions from `value`.
+    ///
+    /// \code{.cpp}
+    /// double d = 42.0;
+    /// std::vector<std::vector<int>> v = {{4, 5, 6}, {7, 8, 9}};
+    /// DataSpace::From(v); // A DataSpace of dimensions 2, 3.
+    /// DataSpace::From(d); // A scalar dataspace.
+    /// \endcode
+    ///
+    /// \since 1.0
+    template <typename T>
+    static DataSpace From(const T& value);
+
+    /// \brief Create a DataSpace from a value of type string array.
+    /// \param string_array An C-array of C-string (null-terminated).
+    ///
+    /// \code{.cpp}
+    /// char string_array[2][10] = {"123456789", "abcdefghi"};
+    /// auto dspace = DataSpace::FromCharArrayStrings(string_array); // dspace is a DataSpace of
+    /// dimensions 2
+    /// \endcode
+    /// \since 2.2
+    template <std::size_t N, std::size_t Width>
+    static DataSpace FromCharArrayStrings(const char (&string_array)[N][Width]);
+
+  protected:
+    DataSpace() = default;
+
+    friend class Attribute;
+    friend class File;
+    friend class DataSet;
+};
+
+}  // namespace HighFive
+
+// We include bits right away since DataSpace is user-constructible
+#include "bits/H5Dataspace_misc.hpp"
diff --git a/packages/HighFive/include/highfive/H5DataType.hpp b/packages/HighFive/include/highfive/H5DataType.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..886107961b87e32911702f3aaf3496acc8ce8c26
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5DataType.hpp
@@ -0,0 +1,488 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <type_traits>
+#include <vector>
+
+#include <H5Tpublic.h>
+
+#include "H5Object.hpp"
+#include "bits/H5Utils.hpp"
+
+#include "bits/string_padding.hpp"
+#include "H5PropertyList.hpp"
+
+namespace HighFive {
+
+
+///
+/// \brief Enum of Fundamental data classes
+///
+enum class DataTypeClass {
+    Time = 1 << 1,
+    Integer = 1 << 2,
+    Float = 1 << 3,
+    String = 1 << 4,
+    BitField = 1 << 5,
+    Opaque = 1 << 6,
+    Compound = 1 << 7,
+    Reference = 1 << 8,
+    Enum = 1 << 9,
+    VarLen = 1 << 10,
+    Array = 1 << 11,
+    Invalid = 0
+};
+
+inline DataTypeClass operator|(DataTypeClass lhs, DataTypeClass rhs) {
+    using T = std::underlying_type<DataTypeClass>::type;
+    return static_cast<DataTypeClass>(static_cast<T>(lhs) | static_cast<T>(rhs));
+}
+
+inline DataTypeClass operator&(DataTypeClass lhs, DataTypeClass rhs) {
+    using T = std::underlying_type<DataTypeClass>::type;
+    return static_cast<DataTypeClass>(static_cast<T>(lhs) & static_cast<T>(rhs));
+}
+
+class StringType;
+
+///
+/// \brief HDF5 Data Type
+///
+class DataType: public Object {
+  public:
+    bool operator==(const DataType& other) const;
+
+    bool operator!=(const DataType& other) const;
+
+    ///
+    /// \brief Return the fundamental type.
+    ///
+    DataTypeClass getClass() const;
+
+    ///
+    /// \brief Returns the length (in bytes) of this type elements
+    ///
+    /// Notice that the size of variable length sequences may have limited applicability
+    ///   given that it refers to the size of the control structure. For info see
+    ///   https://support.hdfgroup.org/HDF5/doc/RM/RM_H5T.html#Datatype-GetSize
+    size_t getSize() const;
+
+    ///
+    /// \brief Returns a friendly description of the type (e.g. Float32)
+    ///
+    std::string string() const;
+
+    ///
+    /// \brief Returns whether the type is a variable-length string
+    ///
+    bool isVariableStr() const;
+
+    ///
+    /// \brief Returns whether the type is a fixed-length string
+    ///
+    bool isFixedLenStr() const;
+
+    ///
+    /// \brief Returns this datatype as a `StringType`.
+    ///
+    StringType asStringType() const;
+
+    ///
+    /// \brief Check the DataType was default constructed.
+    /// Such value might represent auto-detection of the datatype from a buffer
+    ///
+    bool empty() const noexcept;
+
+    /// \brief Returns whether the type is a Reference
+    bool isReference() const;
+
+    /// \brief Get the list of properties for creation of this DataType
+    DataTypeCreateProps getCreatePropertyList() const {
+        return details::get_plist<DataTypeCreateProps>(*this, H5Tget_create_plist);
+    }
+
+  protected:
+    using Object::Object;
+
+    friend class Attribute;
+    friend class File;
+    friend class DataSet;
+    friend class CompoundType;
+    template <typename Derivate>
+    friend class NodeTraits;
+};
+
+
+enum class CharacterSet : std::underlying_type<H5T_cset_t>::type {
+    Ascii = H5T_CSET_ASCII,
+    Utf8 = H5T_CSET_UTF8,
+};
+
+class StringType: public DataType {
+  public:
+    ///
+    /// \brief For stings return the character set.
+    ///
+    CharacterSet getCharacterSet() const;
+
+    ///
+    /// \brief For fixed length stings return the padding.
+    ///
+    StringPadding getPadding() const;
+
+  protected:
+    using DataType::DataType;
+    friend class DataType;
+};
+
+class FixedLengthStringType: public StringType {
+  public:
+    ///
+    /// \brief Create a fixed length string datatype.
+    ///
+    /// The string will be `size` bytes long, regardless whether it's ASCII or
+    /// UTF8. In particular, a string with `n` UFT8 characters in general
+    /// requires `4*n` bytes.
+    ///
+    /// The string padding is subtle, essentially it's just a hint. A
+    /// nullterminated string is guaranteed to have one `'\0'` which marks the
+    /// semantic end of the string. The length of the buffer must be at least
+    /// `size` bytes regardless. HDF5 will read or write `size` bytes,
+    /// irrespective of the when the `\0` occurs.
+    ///
+    /// Note that when writing passing `StringPadding::NullTerminated` is a
+    /// guarantee to the reader that it contains a `\0`. Therefore, make sure
+    /// that the string really is nullterminated. Otherwise prefer a
+    /// null-padded string which only means states that the buffer is filled up
+    /// with 0 or more `\0`.
+    FixedLengthStringType(size_t size,
+                          StringPadding padding,
+                          CharacterSet character_set = CharacterSet::Ascii);
+};
+
+class VariableLengthStringType: public StringType {
+  public:
+    ///
+    /// \brief Create a variable length string HDF5 datatype.
+    ///
+    VariableLengthStringType(CharacterSet character_set = CharacterSet::Ascii);
+};
+
+
+///
+/// \brief create an HDF5 DataType from a C++ type
+///
+///  Support only basic data type
+///
+template <typename T>
+class AtomicType: public DataType {
+  public:
+    AtomicType();
+
+    using basic_type = T;
+};
+
+
+///
+/// \brief Create a compound HDF5 datatype
+///
+class CompoundType: public DataType {
+  public:
+    ///
+    /// \brief Use for defining a sub-type of compound type
+    struct member_def {
+        member_def(std::string t_name, DataType t_base_type, size_t t_offset = 0)
+            : name(std::move(t_name))
+            , base_type(std::move(t_base_type))
+            , offset(t_offset) {}
+        std::string name;
+        DataType base_type;
+        size_t offset;
+    };
+
+    CompoundType(const CompoundType& other) = default;
+
+    ///
+    /// \brief Initializes a compound type from a vector of member definitions
+    /// \param t_members
+    /// \param size
+    inline CompoundType(const std::vector<member_def>& t_members, size_t size = 0)
+        : members(t_members) {
+        create(size);
+    }
+    inline CompoundType(std::vector<member_def>&& t_members, size_t size = 0)
+        : members(std::move(t_members)) {
+        create(size);
+    }
+    inline CompoundType(const std::initializer_list<member_def>& t_members, size_t size = 0)
+        : members(t_members) {
+        create(size);
+    }
+
+    ///
+    /// \brief Initializes a compound type from a DataType
+    /// \param type
+    inline CompoundType(DataType&& type)
+        : DataType(type) {
+        if (getClass() != DataTypeClass::Compound) {
+            std::ostringstream ss;
+            ss << "hid " << _hid << " does not refer to a compound data type";
+            throw DataTypeException(ss.str());
+        }
+        int result = H5Tget_nmembers(_hid);
+        if (result < 0) {
+            throw DataTypeException("Could not get members of compound datatype");
+        }
+        size_t n_members = static_cast<size_t>(result);
+        members.reserve(n_members);
+        for (unsigned i = 0; i < n_members; i++) {
+            char* name = H5Tget_member_name(_hid, i);
+            size_t offset = H5Tget_member_offset(_hid, i);
+            hid_t member_hid = H5Tget_member_type(_hid, i);
+            DataType member_type{member_hid};
+            members.emplace_back(std::string(name), member_type, offset);
+            if (H5free_memory(name) < 0) {
+                throw DataTypeException("Could not free names from the compound datatype");
+            }
+        }
+    }
+
+    /// \brief Commit datatype into the given Object
+    /// \param object Location to commit object into
+    /// \param name Name to give the datatype
+    inline void commit(const Object& object, const std::string& name) const;
+
+    /// \brief Get read access to the CompoundType members
+    inline const std::vector<member_def>& getMembers() const noexcept {
+        return members;
+    }
+
+  private:
+    /// A vector of the member_def members of this CompoundType
+    std::vector<member_def> members;
+
+    /// \brief Automatically create the type from the set of members
+    ///        using standard struct alignment.
+    /// \param size Total size of data type
+    void create(size_t size = 0);
+};
+
+///
+/// \brief Create a enum HDF5 datatype
+///
+/// \code{.cpp}
+/// enum class Position {
+///     FIRST = 1,
+///     SECOND = 2,
+/// };
+///
+/// EnumType<Position> create_enum_position() {
+///     return {{"FIRST", Position::FIRST},
+///             {"SECOND", Position::SECOND}};
+/// }
+///
+/// // You have to register the type inside HighFive
+/// HIGHFIVE_REGISTER_TYPE(Position, create_enum_position)
+///
+/// void write_first(H5::File& file) {
+///     auto dataset = file.createDataSet("/foo", Position::FIRST);
+/// }
+/// \endcode
+template <typename T>
+class EnumType: public DataType {
+  public:
+    ///
+    /// \brief Use for defining a member of enum type
+    struct member_def {
+        member_def(const std::string& t_name, T t_value)
+            : name(t_name)
+            , value(std::move(t_value)) {}
+        std::string name;
+        T value;
+    };
+
+    EnumType(const EnumType& other) = default;
+
+    EnumType(const std::vector<member_def>& t_members)
+        : members(t_members) {
+        static_assert(std::is_enum<T>::value, "EnumType<T>::create takes only enum");
+        if (members.empty()) {
+            HDF5ErrMapper::ToException<DataTypeException>(
+                "Could not create an enum without members");
+        }
+        create();
+    }
+
+    EnumType(std::initializer_list<member_def> t_members)
+        : EnumType(std::vector<member_def>(t_members)) {}
+
+    /// \brief Commit datatype into the given Object
+    /// \param object Location to commit object into
+    /// \param name Name to give the datatype
+    void commit(const Object& object, const std::string& name) const;
+
+  private:
+    std::vector<member_def> members;
+
+    void create();
+};
+
+
+/// \brief Create a DataType instance representing type T
+template <typename T>
+DataType create_datatype();
+
+
+/// \brief Create a DataType instance representing type T and perform a sanity check on its size
+template <typename T>
+DataType create_and_check_datatype();
+
+
+///
+/// \brief A structure representing a set of fixed-length strings
+///
+/// Although fixed-len arrays can be created 'raw' without the need for
+/// this structure, to retrieve results efficiently it must be used.
+///
+/// \tparam N Size of the string in bytes, including the null character. Note,
+///           that all string must be null-terminated.
+///
+template <std::size_t N>
+class FixedLenStringArray {
+  public:
+    FixedLenStringArray() = default;
+
+    ///
+    /// \brief Create a FixedStringArray from a raw contiguous buffer.
+    ///
+    /// The argument `n_strings` specifies the number of strings.
+    ///
+    FixedLenStringArray(const char array[][N], std::size_t n_strings);
+
+    ///
+    /// \brief Create a FixedStringArray from a sequence of strings.
+    ///
+    /// Such conversion involves a copy, original vector is not modified
+    ///
+    explicit FixedLenStringArray(const std::vector<std::string>& vec);
+
+    FixedLenStringArray(const std::string* iter_begin, const std::string* iter_end);
+
+    FixedLenStringArray(const std::initializer_list<std::string>&);
+
+    ///
+    /// \brief Append an std::string to the buffer structure
+    ///
+    void push_back(const std::string&);
+
+    void push_back(const std::array<char, N>&);
+
+    ///
+    /// \brief Retrieve a string from the structure as std::string
+    ///
+    std::string getString(std::size_t index) const;
+
+    // Container interface
+    inline const char* operator[](std::size_t i) const noexcept {
+        return datavec[i].data();
+    }
+    inline const char* at(std::size_t i) const {
+        return datavec.at(i).data();
+    }
+    inline bool empty() const noexcept {
+        return datavec.empty();
+    }
+    inline std::size_t size() const noexcept {
+        return datavec.size();
+    }
+    inline void resize(std::size_t n) {
+        datavec.resize(n);
+    }
+    inline const char* front() const {
+        return datavec.front().data();
+    }
+    inline const char* back() const {
+        return datavec.back().data();
+    }
+    inline char* data() noexcept {
+        return datavec[0].data();
+    }
+    inline const char* data() const noexcept {
+        return datavec[0].data();
+    }
+
+  private:
+    using vector_t = typename std::vector<std::array<char, N>>;
+
+  public:
+    // Use the underlying iterator
+    using iterator = typename vector_t::iterator;
+    using const_iterator = typename vector_t::const_iterator;
+    using reverse_iterator = typename vector_t::reverse_iterator;
+    using const_reverse_iterator = typename vector_t::const_reverse_iterator;
+    using value_type = typename vector_t::value_type;
+
+    inline iterator begin() noexcept {
+        return datavec.begin();
+    }
+    inline iterator end() noexcept {
+        return datavec.end();
+    }
+    inline const_iterator begin() const noexcept {
+        return datavec.begin();
+    }
+    inline const_iterator cbegin() const noexcept {
+        return datavec.cbegin();
+    }
+    inline const_iterator end() const noexcept {
+        return datavec.end();
+    }
+    inline const_iterator cend() const noexcept {
+        return datavec.cend();
+    }
+    inline reverse_iterator rbegin() noexcept {
+        return datavec.rbegin();
+    }
+    inline reverse_iterator rend() noexcept {
+        return datavec.rend();
+    }
+    inline const_reverse_iterator rbegin() const noexcept {
+        return datavec.rbegin();
+    }
+    inline const_reverse_iterator rend() const noexcept {
+        return datavec.rend();
+    }
+
+  private:
+    vector_t datavec;
+};
+
+}  // namespace HighFive
+
+
+/// \brief Macro to extend datatype of HighFive
+///
+/// This macro has to be called outside of any namespace.
+///
+/// \code{.cpp}
+/// enum FooBar { FOO = 1, BAR = 2 };
+/// EnumType create_enum_foobar() {
+///    return EnumType<FooBar>({{"FOO", FooBar::FOO},
+///                             {"BAR", FooBar::BAR}});
+/// }
+/// HIGHFIVE_REGISTER_TYPE(FooBar, create_enum_foobar)
+/// \endcode
+#define HIGHFIVE_REGISTER_TYPE(type, function)                    \
+    template <>                                                   \
+    inline HighFive::DataType HighFive::create_datatype<type>() { \
+        return function();                                        \
+    }
+
+#include "bits/H5DataType_misc.hpp"
diff --git a/packages/HighFive/include/highfive/H5Easy.hpp b/packages/HighFive/include/highfive/H5Easy.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e793fd8753179263fb235129d717739abeb362fd
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Easy.hpp
@@ -0,0 +1,400 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+/// \brief
+/// Read/dump DataSets or Attribute using a minimalistic syntax.
+/// To this end, the functions are templated, and accept:
+/// - Any type accepted by HighFive
+/// - Eigen objects
+/// - xtensor objects
+/// - OpenCV objects
+
+#pragma once
+
+#include <string>
+#include <vector>
+
+// optionally enable xtensor plug-in and load the library
+#ifdef XTENSOR_VERSION_MAJOR
+#ifndef H5_USE_XTENSOR
+#define H5_USE_XTENSOR
+#endif
+#endif
+
+#ifdef H5_USE_XTENSOR
+#include <xtensor/xarray.hpp>
+#include <xtensor/xtensor.hpp>
+#endif
+
+// optionally enable Eigen plug-in and load the library
+#ifdef EIGEN_WORLD_VERSION
+#ifndef H5_USE_EIGEN
+#define H5_USE_EIGEN
+#endif
+#endif
+
+#ifdef H5_USE_EIGEN
+#include <Eigen/Eigen>
+#endif
+
+// optionally enable OpenCV plug-in and load the library
+#ifdef CV_MAJOR_VERSION
+#ifndef H5_USE_OPENCV
+#define H5_USE_OPENCV
+#endif
+#endif
+
+#ifdef H5_USE_OPENCV
+#include <opencv2/opencv.hpp>
+#endif
+
+#include "H5File.hpp"
+
+namespace H5Easy {
+
+using HighFive::AtomicType;
+using HighFive::Attribute;
+using HighFive::Chunking;
+using HighFive::DataSet;
+using HighFive::DataSetCreateProps;
+using HighFive::DataSpace;
+using HighFive::Deflate;
+using HighFive::Exception;
+using HighFive::File;
+using HighFive::ObjectType;
+using HighFive::Shuffle;
+
+///
+/// \brief Write mode for DataSets
+enum class DumpMode {
+    Create = 0,   /*!< Dump only if DataSet does not exist, otherwise throw. */
+    Overwrite = 1 /*!< Create or overwrite if DataSet of correct shape exists, otherwise throw. */
+};
+
+///
+/// \brief Signal to enable/disable automatic flushing after write operations.
+enum class Flush {
+    False = 0, /*!< No automatic flushing. */
+    True = 1   /*!< Automatic flushing. */
+};
+
+///
+/// \brief Signal to set compression level for written DataSets.
+class Compression {
+  public:
+    ///
+    /// \brief Enable compression with the highest compression level (9).
+    /// or disable compression (set compression level to 0).
+    ///
+    /// \param enable ``true`` to enable with highest compression level
+    explicit Compression(bool enable = true);
+
+    ///
+    /// \brief Set compression level.
+    ///
+    /// \param level the compression level
+    template <class T>
+    Compression(T level);
+
+    ///
+    /// \brief Return compression level.
+    inline unsigned get() const;
+
+  private:
+    unsigned m_compression_level;
+};
+
+///
+/// \brief Define options for dumping data.
+///
+/// By default:
+/// - DumpMode::Create
+/// - Flush::True
+/// - Compression: false
+/// - ChunkSize: automatic
+class DumpOptions {
+  public:
+    ///
+    /// \brief Constructor: accept all default settings.
+    DumpOptions() = default;
+
+    ///
+    /// \brief Constructor: overwrite (some of the) defaults.
+    /// \param args any of DumpMode(), Flush(), Compression() in arbitrary number and order.
+    template <class... Args>
+    DumpOptions(Args... args) {
+        set(args...);
+    }
+
+    ///
+    /// \brief Overwrite H5Easy::DumpMode setting.
+    /// \param mode: DumpMode.
+    inline void set(DumpMode mode);
+
+    ///
+    /// \brief Overwrite H5Easy::Flush setting.
+    /// \param mode Flush.
+    inline void set(Flush mode);
+
+    ///
+    /// \brief Overwrite H5Easy::Compression setting.
+    /// \param level Compression.
+    inline void set(const Compression& level);
+
+    ///
+    /// \brief Overwrite any setting(s).
+    /// \param arg any of DumpMode(), Flush(), Compression in arbitrary number and order.
+    /// \param args any of DumpMode(), Flush(), Compression in arbitrary number and order.
+    template <class T, class... Args>
+    inline void set(T arg, Args... args);
+
+    ///
+    /// \brief Set chunk-size. If the input is rank (size) zero, automatic chunking is enabled.
+    /// \param shape Chunk size along each dimension.
+    template <class T>
+    inline void setChunkSize(const std::vector<T>& shape);
+
+    ///
+    /// \brief Set chunk-size. If the input is rank (size) zero, automatic chunking is enabled.
+    /// \param shape Chunk size along each dimension.
+    inline void setChunkSize(std::initializer_list<size_t> shape);
+
+    ///
+    /// \brief Get overwrite-mode.
+    /// \return bool
+    inline bool overwrite() const;
+
+    ///
+    /// \brief Get flush-mode.
+    /// \return bool
+    inline bool flush() const;
+
+    ///
+    /// \brief Get compress-mode.
+    /// \return bool
+    inline bool compress() const;
+
+    ///
+    /// \brief Get compression level.
+    /// \return [0..9]
+    inline unsigned getCompressionLevel() const;
+
+    ///
+    /// \brief Get chunking mode: ``true`` is manually set, ``false`` if chunk-size should be
+    /// computed automatically.
+    /// \return bool
+    inline bool isChunked() const;
+
+    ///
+    /// \brief Get chunk size. Use DumpOptions::getChunkSize to check if chunk-size should
+    /// be automatically computed.
+    inline std::vector<hsize_t> getChunkSize() const;
+
+  private:
+    bool m_overwrite = false;
+    bool m_flush = true;
+    unsigned m_compression_level = 0;
+    std::vector<hsize_t> m_chunk_size = {};
+};
+
+///
+/// \brief Get the size of an existing DataSet in an open HDF5 file.
+///
+/// \param file opened file (has to be readable)
+/// \param path path of the DataSet
+///
+/// \return Size of the DataSet
+inline size_t getSize(const File& file, const std::string& path);
+
+///
+/// \brief Get the shape of an existing DataSet in an readable file.
+///
+/// \param file opened file (has to be readable)
+/// \param path Path of the DataSet
+///
+/// \return the shape of the DataSet
+inline std::vector<size_t> getShape(const File& file, const std::string& path);
+
+///
+/// \brief Write object (templated) to a (new) DataSet in an open HDF5 file.
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+/// \param data the data to write (any supported type)
+/// \param mode write mode
+///
+/// \return The newly created DataSet
+///
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    DumpMode mode = DumpMode::Create);
+
+///
+/// \brief Write object (templated) to a (new) DataSet in an open HDF5 file.
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+/// \param data the data to write (any supported type)
+/// \param options dump options
+///
+/// \return The newly created DataSet
+///
+template <class T>
+inline DataSet dump(File& file, const std::string& path, const T& data, const DumpOptions& options);
+
+///
+/// \brief Write a scalar to a (new, extendible) DataSet in an open HDF5 file.
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+/// \param data the data to write (any supported type)
+/// \param idx the indices to which to write
+///
+/// \return The newly created DataSet
+///
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const std::vector<size_t>& idx);
+
+///
+/// \brief Write a scalar to a (new, extendable) DataSet in an open HDF5 file.
+///
+/// \param file open File (has to be writeable)
+/// \param path path of the DataSet
+/// \param data the data to write (any supported type)
+/// \param idx the indices to which to write
+///
+/// \return The newly created DataSet
+///
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const std::initializer_list<size_t>& idx);
+
+///
+/// \brief Write a scalar to a (new, extendible) DataSet in an open HDF5 file.
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+/// \param data the data to write (any supported type)
+/// \param idx the indices to which to write
+/// \param options dump options
+///
+/// \return The newly created DataSet
+///
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const std::vector<size_t>& idx,
+                    const DumpOptions& options);
+
+///
+/// \brief Write a scalar to a (new, extendible) DataSet in an open HDF5 file.
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+/// \param data the data to write (any supported type)
+/// \param idx the indices to which to write
+/// \param options dump options
+///
+/// \return The newly created DataSet
+///
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const std::initializer_list<size_t>& idx,
+                    const DumpOptions& options);
+
+///
+/// \brief Load entry ``{i, j, ...}`` from a DataSet in an open HDF5 file to a scalar.
+///
+/// \param file opened file (has to be writeable)
+/// \param idx the indices to load
+/// \param path path of the DataSet
+///
+/// \return The read data
+///
+template <class T>
+inline T load(const File& file, const std::string& path, const std::vector<size_t>& idx);
+
+///
+/// \brief Load a DataSet in an open HDF5 file to an object (templated).
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+///
+/// \return The read data
+///
+template <class T>
+inline T load(const File& file, const std::string& path);
+
+///
+/// \brief Write object (templated) to a (new) Attribute in an open HDF5 file.
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+/// \param key name of the attribute
+/// \param data the data to write (any supported type)
+/// \param mode write mode
+///
+/// \return The newly created DataSet
+///
+template <class T>
+inline Attribute dumpAttribute(File& file,
+                               const std::string& path,
+                               const std::string& key,
+                               const T& data,
+                               DumpMode mode = DumpMode::Create);
+
+///
+/// \brief Write object (templated) to a (new) Attribute in an open HDF5 file.
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+/// \param key name of the attribute
+/// \param data the data to write (any supported type)
+/// \param options dump options
+///
+/// \return The newly created DataSet
+///
+template <class T>
+inline Attribute dumpAttribute(File& file,
+                               const std::string& path,
+                               const std::string& key,
+                               const T& data,
+                               const DumpOptions& options);
+
+///
+/// \brief Load a Attribute in an open HDF5 file to an object (templated).
+///
+/// \param file opened file (has to be writeable)
+/// \param path path of the DataSet
+/// \param key name of the attribute
+///
+/// \return The read data
+///
+template <class T>
+inline T loadAttribute(const File& file, const std::string& path, const std::string& key);
+
+}  // namespace H5Easy
+
+#include "h5easy_bits/H5Easy_Eigen.hpp"
+#include "h5easy_bits/H5Easy_misc.hpp"
+#include "h5easy_bits/H5Easy_opencv.hpp"
+#include "h5easy_bits/H5Easy_public.hpp"
+#include "h5easy_bits/H5Easy_scalar.hpp"
+#include "h5easy_bits/H5Easy_vector.hpp"
+#include "h5easy_bits/H5Easy_xtensor.hpp"
diff --git a/packages/HighFive/include/highfive/H5Exception.hpp b/packages/HighFive/include/highfive/H5Exception.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..54905aa0feff7245fac61a9cd6b2558585b549e4
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Exception.hpp
@@ -0,0 +1,164 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <memory>
+#include <stdexcept>
+#include <string>
+
+#include <H5Ipublic.h>
+
+namespace HighFive {
+
+///
+/// \brief Basic HighFive Exception class
+///
+///
+class Exception: public std::exception {
+  public:
+    Exception(const std::string& err_msg)
+        : _errmsg(err_msg)
+        , _next()
+        , _err_major(0)
+        , _err_minor(0) {}
+
+    virtual ~Exception() throw() {}
+
+    ///
+    /// \brief get the current exception error message
+    /// \return
+    ///
+    inline const char* what() const throw() override {
+        return _errmsg.c_str();
+    }
+
+    ///
+    /// \brief define the error message
+    /// \param errmsg
+    ///
+    inline virtual void setErrorMsg(const std::string& errmsg) {
+        _errmsg = errmsg;
+    }
+
+    ///
+    /// \brief nextException
+    /// \return pointer to the next exception in the chain, or NULL if not
+    /// existing
+    ///
+    inline Exception* nextException() const {
+        return _next.get();
+    }
+
+    ///
+    /// \brief HDF5 library error mapper
+    /// \return HDF5 major error number
+    ///
+    inline hid_t getErrMajor() const {
+        return _err_major;
+    }
+
+    ///
+    /// \brief HDF5 library error mapper
+    /// \return HDF5 minor error number
+    ///
+    inline hid_t getErrMinor() const {
+        return _err_minor;
+    }
+
+  protected:
+    std::string _errmsg;
+    std::shared_ptr<Exception> _next;
+    hid_t _err_major, _err_minor;
+
+    friend struct HDF5ErrMapper;
+};
+
+///
+/// \brief Exception specific to HighFive Object interface
+///
+class ObjectException: public Exception {
+  public:
+    ObjectException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+
+///
+/// \brief Exception specific to HighFive DataType interface
+///
+class DataTypeException: public Exception {
+  public:
+    DataTypeException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+
+///
+/// \brief Exception specific to HighFive File interface
+///
+class FileException: public Exception {
+  public:
+    FileException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+
+///
+/// \brief Exception specific to HighFive DataSpace interface
+///
+class DataSpaceException: public Exception {
+  public:
+    DataSpaceException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+
+///
+/// \brief Exception specific to HighFive Attribute interface
+///
+class AttributeException: public Exception {
+  public:
+    AttributeException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+
+///
+/// \brief Exception specific to HighFive DataSet interface
+///
+class DataSetException: public Exception {
+  public:
+    DataSetException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+
+///
+/// \brief Exception specific to HighFive Group interface
+///
+class GroupException: public Exception {
+  public:
+    GroupException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+
+///
+/// \brief Exception specific to HighFive Property interface
+///
+class PropertyException: public Exception {
+  public:
+    PropertyException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+
+///
+/// \brief Exception specific to HighFive Reference interface
+///
+class ReferenceException: public Exception {
+  public:
+    ReferenceException(const std::string& err_msg)
+        : Exception(err_msg) {}
+};
+}  // namespace HighFive
+
+#include "bits/H5Exception_misc.hpp"
diff --git a/packages/HighFive/include/highfive/H5File.hpp b/packages/HighFive/include/highfive/H5File.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9b393e5a35ea28411d59a5d29b87e3f31bbbb23d
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5File.hpp
@@ -0,0 +1,141 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <string>
+
+#include "H5FileDriver.hpp"
+#include "H5Object.hpp"
+#include "H5PropertyList.hpp"
+#include "bits/H5Annotate_traits.hpp"
+#include "bits/H5Node_traits.hpp"
+
+namespace HighFive {
+
+///
+/// \brief File class
+///
+class File: public Object, public NodeTraits<File>, public AnnotateTraits<File> {
+  public:
+    const static ObjectType type = ObjectType::File;
+
+    enum : unsigned {
+        /// Open flag: Read only access
+        ReadOnly = 0x00u,
+        /// Open flag: Read Write access
+        ReadWrite = 0x01u,
+        /// Open flag: Truncate a file if already existing
+        Truncate = 0x02u,
+        /// Open flag: Open will fail if file already exist
+        Excl = 0x04u,
+        /// Open flag: Open in debug mode
+        Debug = 0x08u,
+        /// Open flag: Create non existing file
+        Create = 0x10u,
+        /// Derived open flag: common write mode (=ReadWrite|Create|Truncate)
+        Overwrite = Truncate,
+        /// Derived open flag: Opens RW or exclusively creates
+        OpenOrCreate = ReadWrite | Create
+    };
+
+    ///
+    /// \brief File
+    /// \param filename: filepath of the HDF5 file
+    /// \param openFlags: Open mode / flags ( ReadOnly, ReadWrite)
+    /// \param fileAccessProps: the file access properties
+    ///
+    /// Open or create a new HDF5 file
+    explicit File(const std::string& filename,
+                  unsigned openFlags = ReadOnly,
+                  const FileAccessProps& fileAccessProps = FileAccessProps::Default());
+
+    ///
+    /// \brief File
+    /// \param filename: filepath of the HDF5 file
+    /// \param openFlags: Open mode / flags ( ReadOnly, ReadWrite)
+    /// \param fileCreateProps: the file create properties
+    /// \param fileAccessProps: the file access properties
+    ///
+    /// Open or create a new HDF5 file
+    File(const std::string& filename,
+         unsigned openFlags,
+         const FileCreateProps& fileCreateProps,
+         const FileAccessProps& fileAccessProps = FileAccessProps::Default());
+
+    ///
+    /// \brief Return the name of the file
+    ///
+    const std::string& getName() const noexcept;
+
+
+    /// \brief Object path of a File is always "/"
+    std::string getPath() const noexcept {
+        return "/";
+    }
+
+    /// \brief Returns the block size for metadata in bytes
+    hsize_t getMetadataBlockSize() const;
+
+    /// \brief Returns the HDF5 version compatibility bounds
+    std::pair<H5F_libver_t, H5F_libver_t> getVersionBounds() const;
+
+#if H5_VERSION_GE(1, 10, 1)
+    /// \brief Returns the HDF5 file space strategy.
+    H5F_fspace_strategy_t getFileSpaceStrategy() const;
+
+    /// \brief Returns the page size, if paged allocation is used.
+    hsize_t getFileSpacePageSize() const;
+#endif
+
+    ///
+    /// \brief flush
+    ///
+    /// Flushes all buffers associated with a file to disk
+    ///
+    void flush();
+
+    /// \brief Get the list of properties for creation of this file
+    FileCreateProps getCreatePropertyList() const {
+        return details::get_plist<FileCreateProps>(*this, H5Fget_create_plist);
+    }
+
+    /// \brief Get the list of properties for accession of this file
+    FileAccessProps getAccessPropertyList() const {
+        return details::get_plist<FileAccessProps>(*this, H5Fget_access_plist);
+    }
+
+    /// \brief Get the size of this file in bytes
+    size_t getFileSize() const;
+
+    /// \brief Get the amount of tracked, unused space in bytes.
+    ///
+    /// Note, this is a wrapper for `H5Fget_freespace` and returns the number
+    /// bytes in the free space manager. This might be different from the total
+    /// amount of unused space in the HDF5 file, since the free space manager
+    /// might not track everything or not track across open-close cycles.
+    size_t getFreeSpace() const;
+
+  protected:
+    File() = default;
+    using Object::Object;
+
+  private:
+    mutable std::string _filename{};
+
+    template <typename>
+    friend class PathTraits;
+};
+
+}  // namespace HighFive
+
+// H5File is the main user constructible -> bring in implementation headers
+#include "bits/H5Annotate_traits_misc.hpp"
+#include "bits/H5File_misc.hpp"
+#include "bits/H5Node_traits_misc.hpp"
+#include "bits/H5Path_traits_misc.hpp"
diff --git a/packages/HighFive/include/highfive/H5FileDriver.hpp b/packages/HighFive/include/highfive/H5FileDriver.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2cd4813a36112407166a3815828ba0918e4b88b0
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5FileDriver.hpp
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "H5PropertyList.hpp"
+#include "bits/H5_definitions.hpp"
+
+namespace HighFive {
+
+/// \brief file driver base concept
+/// \deprecated Use FileAccessProps directly
+class H5_DEPRECATED("Use FileAccessProps directly") FileDriver: public FileAccessProps {};
+
+#ifdef H5_HAVE_PARALLEL
+/// \brief MPIIO Driver for Parallel HDF5
+/// \deprecated Add MPIOFileAccess directly to FileAccessProps
+class H5_DEPRECATED("Add MPIOFileAccess directly to FileAccessProps") MPIOFileDriver
+    : public FileAccessProps {
+  public:
+    inline MPIOFileDriver(MPI_Comm mpi_comm, MPI_Info mpi_info);
+};
+#endif
+
+}  // namespace HighFive
+
+#include "bits/H5FileDriver_misc.hpp"
diff --git a/packages/HighFive/include/highfive/H5Group.hpp b/packages/HighFive/include/highfive/H5Group.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0a6a4cdae605012f25ca541a9d584f743af1c93b
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Group.hpp
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <H5Gpublic.h>
+
+#include "H5Object.hpp"
+#include "bits/H5Friends.hpp"
+#include "bits/H5_definitions.hpp"
+#include "bits/H5Annotate_traits.hpp"
+#include "bits/H5Node_traits.hpp"
+#include "bits/H5Path_traits.hpp"
+
+namespace HighFive {
+
+namespace detail {
+/// \brief Internal hack to create an `Group` from an ID.
+///
+/// WARNING: Creating an Group from an ID has implications w.r.t. the lifetime of the object
+///          that got passed via its ID. Using this method careless opens up the suite of issues
+///          related to C-style resource management, including the analog of double free, dangling
+///          pointers, etc.
+///
+/// NOTE: This is not part of the API and only serves to work around a compiler issue in GCC which
+///       prevents us from using `friend`s instead. This function should only be used for internal
+///       purposes. The problematic construct is:
+///
+///           template<class Derived>
+///           friend class SomeCRTP<Derived>;
+///
+/// \private
+Group make_group(hid_t);
+}  // namespace detail
+
+///
+/// \brief Represents an hdf5 group
+class Group: public Object,
+             public NodeTraits<Group>,
+             public AnnotateTraits<Group>,
+             public PathTraits<Group> {
+  public:
+    const static ObjectType type = ObjectType::Group;
+
+    /// \deprecated Default constructor creates unsafe uninitialized objects
+    H5_DEPRECATED("Default constructor creates unsafe uninitialized objects")
+    Group() = default;
+
+    std::pair<unsigned int, unsigned int> getEstimatedLinkInfo() const;
+
+    /// \brief Get the list of properties for creation of this group
+    GroupCreateProps getCreatePropertyList() const {
+        return details::get_plist<GroupCreateProps>(*this, H5Gget_create_plist);
+    }
+
+    Group(Object&& o) noexcept
+        : Object(std::move(o)){};
+
+  protected:
+    using Object::Object;
+
+    friend Group detail::make_group(hid_t);
+    friend class File;
+    friend class Reference;
+#if HIGHFIVE_HAS_FRIEND_DECLARATIONS
+    template <typename Derivate>
+    friend class ::HighFive::NodeTraits;
+#endif
+};
+
+inline std::pair<unsigned int, unsigned int> Group::getEstimatedLinkInfo() const {
+    auto gcpl = getCreatePropertyList();
+    auto eli = EstimatedLinkInfo(gcpl);
+    return std::make_pair(eli.getEntries(), eli.getNameLength());
+}
+
+namespace detail {
+inline Group make_group(hid_t hid) {
+    return Group(hid);
+}
+}  // namespace detail
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/H5Object.hpp b/packages/HighFive/include/highfive/H5Object.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4cf4e7de0882660df7a4ea111b4f6ce390d8cf17
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Object.hpp
@@ -0,0 +1,155 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <ctime>
+
+#include <H5Ipublic.h>
+#include <H5Opublic.h>
+
+#include "bits/H5_definitions.hpp"
+#include "bits/H5Friends.hpp"
+
+namespace HighFive {
+
+///
+/// \brief Enum of the types of objects (H5O api)
+///
+enum class ObjectType {
+    File,
+    Group,
+    UserDataType,
+    DataSpace,
+    Dataset,
+    Attribute,
+    Other  // Internal/custom object type
+};
+
+namespace detail {
+/// \brief Internal hack to create an `Object` from an ID.
+///
+/// WARNING: Creating an Object from an ID has implications w.r.t. the lifetime of the object
+///          that got passed via its ID. Using this method careless opens up the suite of issues
+///          related to C-style resource management, including the analog of double free, dangling
+///          pointers, etc.
+///
+/// NOTE: This is not part of the API and only serves to work around a compiler issue in GCC which
+///       prevents us from using `friend`s instead. This function should only be used for internal
+///       purposes. The problematic construct is:
+///
+///           template<class Derived>
+///           friend class SomeCRTP<Derived>;
+///
+/// \private
+Object make_object(hid_t hid);
+}  // namespace detail
+
+
+class Object {
+  public:
+    // move constructor, reuse hid
+    Object(Object&& other) noexcept;
+
+    // decrease reference counter
+    ~Object();
+
+    ///
+    /// \brief isValid
+    /// \return true if current Object is a valid HDF5Object
+    ///
+    bool isValid() const noexcept;
+
+    ///
+    /// \brief getId
+    /// \return internal HDF5 id to the object
+    ///  provided for C API compatibility
+    ///
+    hid_t getId() const noexcept;
+
+    ///
+    /// \brief Retrieve several infos about the current object (address, dates, etc)
+    ///
+    ObjectInfo getInfo() const;
+
+    ///
+    /// \brief Gets the fundamental type of the object (dataset, group, etc)
+    /// \exception ObjectException when the _hid is negative or the type
+    ///     is custom and not registered yet
+    ///
+    ObjectType getType() const;
+
+    // Check if refer to same object
+    bool operator==(const Object& other) const noexcept {
+        return _hid == other._hid;
+    }
+
+  protected:
+    // empty constructor
+    Object();
+
+    // copy constructor, increase reference counter
+    Object(const Object& other);
+
+    // Init with an low-level object id
+    explicit Object(hid_t);
+
+    // Copy-Assignment operator
+    Object& operator=(const Object& other);
+
+    hid_t _hid;
+
+  private:
+    friend Object detail::make_object(hid_t);
+    friend class Reference;
+    friend class CompoundType;
+
+#if HIGHFIVE_HAS_FRIEND_DECLARATIONS
+    template <typename Derivate>
+    friend class NodeTraits;
+    template <typename Derivate>
+    friend class AnnotateTraits;
+    template <typename Derivate>
+    friend class PathTraits;
+#endif
+};
+
+
+///
+/// \brief A class for accessing hdf5 objects info
+///
+class ObjectInfo {
+  public:
+    /// \brief Retrieve the address of the object (within its file)
+    /// \deprecated Deprecated since HighFive 2.2. Soon supporting VOL tokens
+    H5_DEPRECATED("Deprecated since HighFive 2.2. Soon supporting VOL tokens")
+    haddr_t getAddress() const noexcept;
+
+    /// \brief Retrieve the number of references to this object
+    size_t getRefCount() const noexcept;
+
+    /// \brief Retrieve the object's creation time
+    time_t getCreationTime() const noexcept;
+
+    /// \brief Retrieve the object's last modification time
+    time_t getModificationTime() const noexcept;
+
+  protected:
+#if (H5Oget_info_vers < 3)
+    H5O_info_t raw_info;
+#else
+    // Use compat H5O_info1_t while getAddress() is supported (deprecated)
+    H5O_info1_t raw_info;
+#endif
+
+    friend class Object;
+};
+
+}  // namespace HighFive
+
+#include "bits/H5Object_misc.hpp"
diff --git a/packages/HighFive/include/highfive/H5PropertyList.hpp b/packages/HighFive/include/highfive/H5PropertyList.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..53b3c4a1374bdd9184163b209ae27bc84fedb5f3
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5PropertyList.hpp
@@ -0,0 +1,734 @@
+/*
+ *  Copyright (c), 2017-2018, Adrien Devresse <adrien.devresse@epfl.ch>
+ *                            Juan Hernando <juan.hernando@epfl.ch>
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <vector>
+
+#include <H5Ppublic.h>
+
+// Required by MPIOFileAccess
+#ifdef H5_HAVE_PARALLEL
+#include <H5FDmpi.h>
+#endif
+
+#include "H5Exception.hpp"
+#include "H5Object.hpp"
+
+namespace HighFive {
+
+/// \defgroup PropertyLists Property Lists
+/// HDF5 is configured through what they call property lists. In HDF5 the
+/// process has four steps:
+///
+/// 1. Create a property list. As users we now have an `hid_t` identifying the
+/// property list.
+/// 2. Set properties as desired.
+/// 3. Pass the HID to the HDF5 function to be configured.
+/// 4. Free the property list.
+///
+/// Note that the mental picture is that one creates a settings object, and
+/// then passes those settings to a function such as `H5Dwrite`. In and of
+/// themselves the settings don't change the behaviour of HDF5. Rather they
+/// need to be used to take affect.
+///
+/// The second aspect is that property lists represent any number of related
+/// settings, e.g. there's property lists anything related to creating files
+/// and another for accessing files, same for creating and accessing datasets,
+/// etc. Settings that affect creating files, must be passed a file creation
+/// property list, while settings that affect file access require a file access
+/// property list.
+///
+/// In HighFive the `PropertyList` works similar in that it's a object
+/// representing the settings, i.e. internally it's just the property lists
+/// HID. Just like in HDF5 one adds the settings to the settings object; and
+/// then passes the settings object to the respective method. Example:
+///
+///
+///     // Create an object which contains the setting to
+///     // open files with MPI-IO.
+///     auto fapl = FileAccessProps();
+///     fapl.add(MPIOFileAccess(MPI_COMM_WORLD, MPI_INFO_NULL);
+///
+///     // To open a specific file with MPI-IO, we do:
+///     auto file = File("foo.h5", File::ReadOnly, fapl);
+///
+/// Note that the `MPIOFileAccess` object by itself doesn't affect the
+/// `FileAccessProps`. Rather it needs to be explicitly added to the `fapl`
+/// (the group of file access related settings), and then the `fapl` needs to
+/// be passed to the constructor of `File` for the settings to take affect.
+///
+/// This is important to understand when reading properties. Example:
+///
+///     // Obtain the file access property list:
+///     auto fapl = file.getAccessPropertyList()
+///
+///     // Extracts a copy of the collective MPI-IO metadata settings from
+///     // the group of file access related setting, i.e. the `fapl`:
+///     auto mpio_metadata = MPIOCollectiveMetadata(fapl);
+///
+///     if(mpio_metadata.isCollectiveRead()) {
+///       // something specific if meta data is read collectively.
+///     }
+///
+///     // Careful, this only affects the `mpio_metadata` object, but not the
+///     //  `fapl`, and also not whether `file` uses collective MPI-IO for
+///     // metadata.
+///     mpio_metadata = MPIOCollectiveMetadata(false, false);
+///
+/// @{
+
+///
+/// \brief Types of property lists
+///
+enum class PropertyType : int {
+    OBJECT_CREATE,
+    FILE_CREATE,
+    FILE_ACCESS,
+    DATASET_CREATE,
+    DATASET_ACCESS,
+    DATASET_XFER,
+    GROUP_CREATE,
+    GROUP_ACCESS,
+    DATATYPE_CREATE,
+    DATATYPE_ACCESS,
+    STRING_CREATE,
+    ATTRIBUTE_CREATE,
+    OBJECT_COPY,
+    LINK_CREATE,
+    LINK_ACCESS,
+};
+
+namespace details {
+template <typename T, typename U>
+T get_plist(const U& obj, hid_t (*f)(hid_t)) {
+    auto hid = f(obj.getId());
+    if (hid < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Unable to get property list");
+    }
+    T t{};
+    t._hid = hid;
+    return t;
+}
+}  // namespace details
+
+///
+/// \brief Base Class for Property lists, providing global default
+class PropertyListBase: public Object {
+  public:
+    PropertyListBase() noexcept;
+
+    static const PropertyListBase& Default() noexcept {
+        static const PropertyListBase plist{};
+        return plist;
+    }
+
+  private:
+    template <typename T, typename U>
+    friend T details::get_plist(const U&, hid_t (*f)(hid_t));
+};
+
+/// \interface PropertyInterface
+/// \brief HDF5 file property object
+///
+/// A property is an object which is expected to have a method with the
+/// following signature `void apply(hid_t hid) const`
+///
+/// \sa Instructions to document C++20 concepts with Doxygen: https://github.com/doxygen/doxygen/issues/2732#issuecomment-509629967
+///
+/// \cond
+#if HIGHFIVE_HAS_CONCEPTS && __cplusplus >= 202002L
+template <typename P>
+concept PropertyInterface = requires(P p, const hid_t hid) {
+    {p.apply(hid)};
+};
+
+#else
+#define PropertyInterface typename
+#endif
+/// \endcond
+
+///
+/// \brief HDF5 property Lists
+///
+template <PropertyType T>
+class PropertyList: public PropertyListBase {
+  public:
+    ///
+    /// \brief return the type of this PropertyList
+    constexpr PropertyType getType() const noexcept {
+        return T;
+    }
+
+    ///
+    /// Add a property to this property list.
+    /// A property is an object which is expected to have a method with the
+    /// following signature void apply(hid_t hid) const
+    /// \tparam PropertyInterface
+    template <PropertyInterface P>
+    void add(const P& property);
+
+    ///
+    /// Return the Default property type object
+    static const PropertyList<T>& Default() noexcept {
+        return static_cast<const PropertyList<T>&>(PropertyListBase::Default());
+    }
+
+  protected:
+    void _initializeIfNeeded();
+};
+
+using ObjectCreateProps = PropertyList<PropertyType::OBJECT_CREATE>;
+using FileCreateProps = PropertyList<PropertyType::FILE_CREATE>;
+using FileAccessProps = PropertyList<PropertyType::FILE_ACCESS>;
+using DataSetCreateProps = PropertyList<PropertyType::DATASET_CREATE>;
+using DataSetAccessProps = PropertyList<PropertyType::DATASET_ACCESS>;
+using DataTransferProps = PropertyList<PropertyType::DATASET_XFER>;
+using GroupCreateProps = PropertyList<PropertyType::GROUP_CREATE>;
+using GroupAccessProps = PropertyList<PropertyType::GROUP_ACCESS>;
+using DataTypeCreateProps = PropertyList<PropertyType::DATATYPE_CREATE>;
+using DataTypeAccessProps = PropertyList<PropertyType::DATATYPE_ACCESS>;
+using StringCreateProps = PropertyList<PropertyType::STRING_CREATE>;
+using AttributeCreateProps = PropertyList<PropertyType::ATTRIBUTE_CREATE>;
+using ObjectCopyProps = PropertyList<PropertyType::OBJECT_COPY>;
+using LinkCreateProps = PropertyList<PropertyType::LINK_CREATE>;
+using LinkAccessProps = PropertyList<PropertyType::LINK_ACCESS>;
+
+///
+/// RawPropertyLists are to be used when advanced H5 properties
+/// are desired and are not part of the HighFive API.
+/// Therefore this class is mainly for internal use.
+template <PropertyType T>
+class RawPropertyList: public PropertyList<T> {
+  public:
+    template <typename F, typename... Args>
+    void add(const F& funct, const Args&... args);
+};
+
+#ifdef H5_HAVE_PARALLEL
+///
+/// \brief Configure MPI access for the file
+///
+/// All further modifications to the structure of the file will have to be
+/// done with collective operations
+///
+class MPIOFileAccess {
+  public:
+    MPIOFileAccess(MPI_Comm comm, MPI_Info info);
+
+  private:
+    friend FileAccessProps;
+    void apply(const hid_t list) const;
+
+    MPI_Comm _comm;
+    MPI_Info _info;
+};
+
+///
+/// \brief Use collective MPI-IO for metadata read and write.
+///
+/// See `MPIOCollectiveMetadataRead` and `MPIOCollectiveMetadataWrite`.
+///
+class MPIOCollectiveMetadata {
+  public:
+    explicit MPIOCollectiveMetadata(bool collective = true);
+    explicit MPIOCollectiveMetadata(const FileAccessProps& plist);
+
+    bool isCollectiveRead() const;
+    bool isCollectiveWrite() const;
+
+
+  private:
+    friend FileAccessProps;
+    void apply(hid_t plist) const;
+
+    bool collective_read_;
+    bool collective_write_;
+};
+
+///
+/// \brief Use collective MPI-IO for metadata read?
+///
+/// Note that when used in a file access property list, this will force all reads
+/// of meta data to be collective. HDF5 function may implicitly perform metadata
+/// reads. These functions would become collective. A list of functions that
+/// perform metadata reads can be found in the HDF5 documentation, e.g.
+///    https://docs.hdfgroup.org/hdf5/v1_12/group___g_a_c_p_l.html
+///
+/// In HighFive setting collective read is (currently) only supported on file level.
+///
+/// Please also consult upstream documentation of `H5Pset_all_coll_metadata_ops`.
+///
+class MPIOCollectiveMetadataRead {
+  public:
+    explicit MPIOCollectiveMetadataRead(bool collective = true);
+    explicit MPIOCollectiveMetadataRead(const FileAccessProps& plist);
+
+    bool isCollective() const;
+
+  private:
+    friend FileAccessProps;
+    friend MPIOCollectiveMetadata;
+
+    void apply(hid_t plist) const;
+
+    bool collective_;
+};
+
+///
+/// \brief Use collective MPI-IO for metadata write?
+///
+/// In order to keep the in-memory representation of the file structure
+/// consistent across MPI ranks, writing meta data is always a collective
+/// operation. Meaning all MPI ranks must participate. Passing this setting
+/// enables using MPI-IO collective operations for metadata writes.
+///
+/// Please also consult upstream documentation of `H5Pset_coll_metadata_write`.
+///
+class MPIOCollectiveMetadataWrite {
+  public:
+    explicit MPIOCollectiveMetadataWrite(bool collective = true);
+    explicit MPIOCollectiveMetadataWrite(const FileAccessProps& plist);
+
+    bool isCollective() const;
+
+  private:
+    friend FileAccessProps;
+    friend MPIOCollectiveMetadata;
+
+    void apply(hid_t plist) const;
+
+    bool collective_;
+};
+
+#endif
+
+///
+/// \brief Configure the version bounds for the file
+///
+/// Used to define the compatibility of objects created within HDF5 files,
+/// and affects the format of groups stored in the file.
+///
+/// See also the documentation of \c H5P_SET_LIBVER_BOUNDS in HDF5.
+///
+/// Possible values for \c low and \c high are:
+/// * \c H5F_LIBVER_EARLIEST
+/// * \c H5F_LIBVER_V18
+/// * \c H5F_LIBVER_V110
+/// * \c H5F_LIBVER_NBOUNDS
+/// * \c H5F_LIBVER_LATEST currently defined as \c H5F_LIBVER_V110 within
+///   HDF5
+///
+class FileVersionBounds {
+  public:
+    FileVersionBounds(H5F_libver_t low, H5F_libver_t high);
+    explicit FileVersionBounds(const FileAccessProps& fapl);
+
+    std::pair<H5F_libver_t, H5F_libver_t> getVersion() const;
+
+  private:
+    friend FileAccessProps;
+    void apply(const hid_t list) const;
+
+    H5F_libver_t _low;
+    H5F_libver_t _high;
+};
+
+///
+/// \brief Configure the metadata block size to use writing to files
+///
+/// \param size Metadata block size in bytes
+///
+class MetadataBlockSize {
+  public:
+    explicit MetadataBlockSize(hsize_t size);
+    explicit MetadataBlockSize(const FileAccessProps& fapl);
+
+    hsize_t getSize() const;
+
+  private:
+    friend FileAccessProps;
+    void apply(const hid_t list) const;
+    hsize_t _size;
+};
+
+#if H5_VERSION_GE(1, 10, 1)
+///
+/// \brief Configure the file space strategy.
+///
+/// See the upstream documentation of `H5Pget_file_space_strategy` for more details. Essentially,
+/// it enables configuring how space is allocate in the file.
+///
+class FileSpaceStrategy {
+  public:
+    ///
+    /// \brief Create a file space strategy property.
+    ///
+    /// \param strategy The HDF5 free space strategy.
+    /// \param persist Should free space managers be persisted across file closing and reopening.
+    /// \param threshold The free-space manager wont track sections small than this threshold.
+    FileSpaceStrategy(H5F_fspace_strategy_t strategy, hbool_t persist, hsize_t threshold);
+    explicit FileSpaceStrategy(const FileCreateProps& fcpl);
+
+    H5F_fspace_strategy_t getStrategy() const;
+    hbool_t getPersist() const;
+    hsize_t getThreshold() const;
+
+  private:
+    friend FileCreateProps;
+
+    void apply(const hid_t list) const;
+
+    H5F_fspace_strategy_t _strategy;
+    hbool_t _persist;
+    hsize_t _threshold;
+};
+
+///
+/// \brief Configure the page size for paged allocation.
+///
+/// See the upstream documentation of `H5Pset_file_space_page_size` for more details. Essentially,
+/// it enables configuring the page size when paged allocation is used.
+///
+/// General information about paged allocation can be found in the upstream documentation "RFC: Page
+/// Buffering".
+///
+class FileSpacePageSize {
+  public:
+    ///
+    /// \brief Create a file space strategy property.
+    ///
+    /// \param page_size The page size in bytes.
+    explicit FileSpacePageSize(hsize_t page_size);
+    explicit FileSpacePageSize(const FileCreateProps& fcpl);
+
+    hsize_t getPageSize() const;
+
+  private:
+    friend FileCreateProps;
+    void apply(const hid_t list) const;
+
+    hsize_t _page_size;
+};
+
+#ifndef H5_HAVE_PARALLEL
+/// \brief Set size of the page buffer.
+///
+/// Please, consult the upstream documentation of
+///    H5Pset_page_buffer_size
+///    H5Pget_page_buffer_size
+/// Note that this setting is only valid for page allocated/aggregated
+/// files, i.e. those that have file space strategy "Page".
+///
+/// Tests suggest this doesn't work in the parallel version of the
+/// library. Hence, this isn't available at compile time if the parallel
+/// library was selected.
+class PageBufferSize {
+  public:
+    /// Property to set page buffer sizes.
+    ///
+    /// @param page_buffer_size maximum size of the page buffer in bytes.
+    /// @param min_meta_percent fraction of the page buffer dedicated to meta data, in percent.
+    /// @param min_raw_percent fraction of the page buffer dedicated to raw data, in percent.
+    explicit PageBufferSize(size_t page_buffer_size,
+                            unsigned min_meta_percent = 0,
+                            unsigned min_raw_percent = 0);
+
+    explicit PageBufferSize(const FileAccessProps& fapl);
+
+    size_t getPageBufferSize() const;
+    unsigned getMinMetaPercent() const;
+    unsigned getMinRawPercent() const;
+
+  private:
+    friend FileAccessProps;
+
+    void apply(hid_t list) const;
+
+    size_t _page_buffer_size;
+    unsigned _min_meta;
+    unsigned _min_raw;
+};
+#endif
+#endif
+
+/// \brief Set hints as to how many links to expect and their average length
+/// \implements PropertyInterface
+///
+class EstimatedLinkInfo {
+  public:
+    /// \brief Create a property with the request parameters.
+    ///
+    /// @param entries The estimated number of links in a group.
+    /// @param length The estimated length of the names of links.
+    explicit EstimatedLinkInfo(unsigned entries, unsigned length);
+
+    explicit EstimatedLinkInfo(const GroupCreateProps& gcpl);
+
+    /// \brief The estimated number of links in a group.
+    unsigned getEntries() const;
+
+    /// \brief The estimated length of the names of links.
+    unsigned getNameLength() const;
+
+  private:
+    friend GroupCreateProps;
+    void apply(hid_t hid) const;
+    unsigned _entries;
+    unsigned _length;
+};
+
+
+/// \implements PropertyInterface
+class Chunking {
+  public:
+    explicit Chunking(const std::vector<hsize_t>& dims);
+    Chunking(const std::initializer_list<hsize_t>& items);
+
+    template <typename... Args>
+    explicit Chunking(hsize_t item, Args... args);
+
+    explicit Chunking(DataSetCreateProps& plist, size_t max_dims = 32);
+
+    const std::vector<hsize_t>& getDimensions() const noexcept;
+
+  private:
+    friend DataSetCreateProps;
+    void apply(hid_t hid) const;
+    std::vector<hsize_t> _dims;
+};
+
+/// \implements PropertyInterface
+class Deflate {
+  public:
+    explicit Deflate(unsigned level);
+
+  private:
+    friend DataSetCreateProps;
+    friend GroupCreateProps;
+    void apply(hid_t hid) const;
+    const unsigned _level;
+};
+
+/// \implements PropertyInterface
+class Szip {
+  public:
+    explicit Szip(unsigned options_mask = H5_SZIP_EC_OPTION_MASK,
+                  unsigned pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK);
+
+    unsigned getOptionsMask() const;
+    unsigned getPixelsPerBlock() const;
+
+  private:
+    friend DataSetCreateProps;
+    void apply(hid_t hid) const;
+    const unsigned _options_mask;
+    const unsigned _pixels_per_block;
+};
+
+/// \implements PropertyInterface
+class Shuffle {
+  public:
+    Shuffle() = default;
+
+  private:
+    friend DataSetCreateProps;
+    void apply(hid_t hid) const;
+};
+
+/// \brief When are datasets allocated?
+///
+/// The precise time of when HDF5 requests space to store the dataset
+/// can be configured. Please, consider the upstream documentation for
+/// `H5Pset_alloc_time`.
+/// \implements PropertyInterface
+class AllocationTime {
+  public:
+    explicit AllocationTime(H5D_alloc_time_t alloc_time);
+    explicit AllocationTime(const DataSetCreateProps& dcpl);
+
+    H5D_alloc_time_t getAllocationTime();
+
+  private:
+    friend DataSetCreateProps;
+    void apply(hid_t dcpl) const;
+
+    H5D_alloc_time_t _alloc_time;
+};
+
+/// Dataset access property to control chunk cache configuration.
+/// Do not confuse with the similar file access property for H5Pset_cache
+/// \implements PropertyInterface
+class Caching {
+  public:
+    /// https://support.hdfgroup.org/HDF5/doc/RM/H5P/H5Pset_chunk_cache.html for
+    /// details.
+    Caching(const size_t numSlots,
+            const size_t cacheSize,
+            const double w0 = static_cast<double>(H5D_CHUNK_CACHE_W0_DEFAULT));
+
+    explicit Caching(const DataSetCreateProps& dcpl);
+
+    size_t getNumSlots() const;
+    size_t getCacheSize() const;
+    double getW0() const;
+
+  private:
+    friend DataSetAccessProps;
+    void apply(hid_t hid) const;
+    size_t _numSlots;
+    size_t _cacheSize;
+    double _w0;
+};
+
+/// \implements PropertyInterface
+class CreateIntermediateGroup {
+  public:
+    explicit CreateIntermediateGroup(bool create = true);
+
+    explicit CreateIntermediateGroup(const ObjectCreateProps& ocpl);
+    explicit CreateIntermediateGroup(const LinkCreateProps& lcpl);
+
+    bool isSet() const;
+
+  protected:
+    void fromPropertyList(hid_t hid);
+
+  private:
+    friend ObjectCreateProps;
+    friend LinkCreateProps;
+    void apply(hid_t hid) const;
+    bool _create;
+};
+
+#ifdef H5_HAVE_PARALLEL
+/// \implements PropertyInterface
+class UseCollectiveIO {
+  public:
+    explicit UseCollectiveIO(bool enable = true);
+
+    explicit UseCollectiveIO(const DataTransferProps& dxpl);
+
+    /// \brief Does the property request collective IO?
+    bool isCollective() const;
+
+  private:
+    friend DataTransferProps;
+    void apply(hid_t hid) const;
+    bool _enable;
+};
+
+
+/// \brief The cause for non-collective I/O.
+///
+/// The cause refers to the most recent I/O with data transfer property list  `dxpl` at time of
+/// creation of this object. This object will not update automatically for later data transfers,
+/// i.e. `H5Pget_mpio_no_collective_cause` is called in the constructor, and not when fetching
+/// a value, such as `wasCollective`.
+/// \implements PropertyInterface
+class MpioNoCollectiveCause {
+  public:
+    explicit MpioNoCollectiveCause(const DataTransferProps& dxpl);
+
+    /// \brief Was the datatransfer collective?
+    bool wasCollective() const;
+
+    /// \brief The local cause for a non-collective I/O.
+    uint32_t getLocalCause() const;
+
+    /// \brief The global cause for a non-collective I/O.
+    uint32_t getGlobalCause() const;
+
+    /// \brief A pair of the local and global cause for non-collective I/O.
+    std::pair<uint32_t, uint32_t> getCause() const;
+
+  private:
+    friend DataTransferProps;
+    uint32_t _local_cause;
+    uint32_t _global_cause;
+};
+#endif
+
+struct CreationOrder {
+    enum _CreationOrder {
+        Tracked = H5P_CRT_ORDER_TRACKED,
+        Indexed = H5P_CRT_ORDER_INDEXED,
+    };
+};
+
+///
+/// \brief Track and index creation order time
+///
+/// Let user retrieve objects by creation order time instead of name.
+///
+/// \implements PropertyInterface
+class LinkCreationOrder {
+  public:
+    ///
+    /// \brief Create the property
+    /// \param flags Should be a composition of HighFive::CreationOrder.
+    ///
+    explicit LinkCreationOrder(unsigned flags)
+        : _flags(flags) {}
+
+    explicit LinkCreationOrder(const FileCreateProps& fcpl);
+    explicit LinkCreationOrder(const GroupCreateProps& gcpl);
+
+    unsigned getFlags() const;
+
+  protected:
+    void fromPropertyList(hid_t hid);
+
+  private:
+    friend FileCreateProps;
+    friend GroupCreateProps;
+    void apply(hid_t hid) const;
+    unsigned _flags;
+};
+
+
+///
+/// \brief Set threshold for attribute storage.
+///
+/// HDF5 can store Attributes in the object header (compact) or in the B-tree
+/// (dense). This property sets the threshold when attributes are moved to one
+/// or the other storage format.
+///
+/// Please refer to the upstream documentation of `H5Pset_attr_phase_change` or
+/// Section 8 (Attributes) in the User Guide, in particular Subsection 8.5.
+///
+/// \implements PropertyInterface
+class AttributePhaseChange {
+  public:
+    ///
+    /// \brief Create the property from the threshold values.
+    ///
+    /// When the number of attributes hits `max_compact` the attributes are
+    /// moved to dense storage, once the number drops to below `min_dense` the
+    /// attributes are moved to compact storage.
+    AttributePhaseChange(unsigned max_compact, unsigned min_dense);
+
+    /// \brief Extract threshold values from property list.
+    explicit AttributePhaseChange(const GroupCreateProps& gcpl);
+
+    unsigned max_compact() const;
+    unsigned min_dense() const;
+
+  private:
+    friend GroupCreateProps;
+    void apply(hid_t hid) const;
+
+    unsigned _max_compact;
+    unsigned _min_dense;
+};
+
+/// @}
+
+}  // namespace HighFive
+
+#include "bits/H5PropertyList_misc.hpp"
diff --git a/packages/HighFive/include/highfive/H5Reference.hpp b/packages/HighFive/include/highfive/H5Reference.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..38062e987b73dc64954c733f8873da027b8c4dd3
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Reference.hpp
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c), 2020, EPFL - Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include <H5Ipublic.h>
+#include <H5Rpublic.h>
+
+#include "bits/H5_definitions.hpp"
+
+namespace HighFive {
+
+namespace details {
+template <typename T>
+struct inspector;
+}
+///
+/// \brief An HDF5 (object) reference type
+///
+/// HDF5 object references allow pointing to groups, datasets (and compound types). They
+/// differ from links in their ability to be stored and retrieved as data from the HDF5
+/// file in datasets themselves.
+///
+class Reference {
+  public:
+    /// \brief Create an empty Reference to be initialized later
+    Reference() = default;
+
+    /// \brief Create a Reference to an object residing at a given location
+    ///
+    /// \param location A File or Group where the object being referenced to resides
+    /// \param object A Dataset or Group to be referenced
+    Reference(const Object& location, const Object& object);
+
+    /// \brief Retrieve the Object being referenced by the Reference
+    ///
+    /// \tparam T the appropriate HighFive Container (either DataSet or Group)
+    /// \param location the location where the referenced object is to be found (a File)
+    /// \return the dereferenced Object (either a Group or DataSet)
+    template <typename T>
+    T dereference(const Object& location) const;
+
+    /// \brief Get only the type of the referenced Object
+    ///
+    /// \param location the location where the referenced object is to be found (a File)
+    /// \return the ObjectType of the referenced object
+    ObjectType getType(const Object& location) const;
+
+  protected:
+    /// \brief Create a Reference from a low-level HDF5 object reference
+    inline explicit Reference(const hobj_ref_t h5_ref)
+        : href(h5_ref){};
+
+    /// \brief Create the low-level reference and store it at refptr
+    ///
+    /// \param refptr Pointer to a memory location where the created HDF5 reference will
+    /// be stored
+    void create_ref(hobj_ref_t* refptr) const;
+
+  private:
+    Object get_ref(const Object& location) const;
+
+    hobj_ref_t href{};
+    std::string obj_name{};
+    hid_t parent_id{};
+
+    friend struct details::inspector<Reference>;
+};
+
+}  // namespace HighFive
+
+#include "bits/H5Reference_misc.hpp"
diff --git a/packages/HighFive/include/highfive/H5Selection.hpp b/packages/HighFive/include/highfive/H5Selection.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c00c66d52f99e67d55fee39c4b488a9b2cb110b6
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Selection.hpp
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "H5DataSet.hpp"
+#include "H5DataSpace.hpp"
+#include "bits/H5Slice_traits.hpp"
+#include "bits/H5Friends.hpp"
+
+namespace HighFive {
+
+namespace detail {
+Selection make_selection(const DataSpace&, const DataSpace&, const DataSet&);
+}
+
+///
+/// \brief Selection: represent a view on a slice/part of a dataset
+///
+/// A Selection is valid only if its parent dataset is valid
+///
+class Selection: public SliceTraits<Selection> {
+  public:
+    ///
+    /// \brief getSpace
+    /// \return Dataspace associated with this selection
+    ///
+    DataSpace getSpace() const noexcept;
+
+    ///
+    /// \brief getMemSpace
+    /// \return Dataspace associated with the memory representation of this
+    /// selection
+    ///
+    DataSpace getMemSpace() const noexcept;
+
+    ///
+    /// \brief getDataSet
+    /// \return parent dataset of this selection
+    ///
+    DataSet& getDataset() noexcept;
+    const DataSet& getDataset() const noexcept;
+
+    ///
+    /// \brief return the datatype of the selection
+    /// \return return the datatype of the selection
+    const DataType getDataType() const;
+
+  protected:
+    Selection(const DataSpace& memspace, const DataSpace& file_space, const DataSet& set);
+
+  private:
+    DataSpace _mem_space, _file_space;
+    DataSet _set;
+
+#if HIGHFIVE_HAS_FRIEND_DECLARATIONS
+    template <typename Derivate>
+    friend class ::HighFive::SliceTraits;
+#endif
+    friend Selection detail::make_selection(const DataSpace&, const DataSpace&, const DataSet&);
+};
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/H5Utility.hpp b/packages/HighFive/include/highfive/H5Utility.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..64ac1e5c890b15899a051eed2d3cb9450286fb02
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Utility.hpp
@@ -0,0 +1,215 @@
+/*
+ *  Copyright (c), 2017, Blue Brain Project - EPFL (CH)
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#pragma once
+
+#include <H5Epublic.h>
+#include <functional>
+#include <string>
+#include <iostream>
+
+#include "bits/H5Friends.hpp"
+
+namespace HighFive {
+
+///
+/// \brief Utility class to disable HDF5 stack printing inside a scope.
+///
+class SilenceHDF5 {
+  public:
+    inline SilenceHDF5(bool enable = true)
+        : _client_data(nullptr) {
+        H5Eget_auto2(H5E_DEFAULT, &_func, &_client_data);
+        if (enable)
+            H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+    }
+
+    inline ~SilenceHDF5() {
+        H5Eset_auto2(H5E_DEFAULT, _func, _client_data);
+    }
+
+  private:
+    H5E_auto2_t _func;
+    void* _client_data;
+};
+
+#define HIGHFIVE_LOG_LEVEL_DEBUG 10
+#define HIGHFIVE_LOG_LEVEL_INFO  20
+#define HIGHFIVE_LOG_LEVEL_WARN  30
+#define HIGHFIVE_LOG_LEVEL_ERROR 40
+
+#ifndef HIGHFIVE_LOG_LEVEL
+#define HIGHFIVE_LOG_LEVEL HIGHFIVE_LOG_LEVEL_WARN
+#endif
+
+enum class LogSeverity {
+    Debug = HIGHFIVE_LOG_LEVEL_DEBUG,
+    Info = HIGHFIVE_LOG_LEVEL_INFO,
+    Warn = HIGHFIVE_LOG_LEVEL_WARN,
+    Error = HIGHFIVE_LOG_LEVEL_ERROR
+};
+
+inline std::string to_string(LogSeverity severity) {
+    switch (severity) {
+    case LogSeverity::Debug:
+        return "DEBUG";
+    case LogSeverity::Info:
+        return "INFO";
+    case LogSeverity::Warn:
+        return "WARN";
+    case LogSeverity::Error:
+        return "ERROR";
+    default:
+        return "??";
+    }
+}
+
+/** \brief A logger with supporting basic functionality.
+ *
+ * This logger delegates the logging task to a callback. This level of
+ * indirection enables using the default Python logger from C++; or
+ * integrating HighFive into some custom logging solution.
+ *
+ * Using this class directly to log is not intended. Rather you should use
+ *   - `HIGHFIVE_LOG_DEBUG{,_IF}`
+ *   - `HIGHFIVE_LOG_INFO{,_IF}`
+ *   - `HIGHFIVE_LOG_WARNING{,_IF}`
+ *   - `HIGHFIVE_LOG_ERROR{,_IF}`
+ *
+ * This is intended to used as a singleton, via `get_global_logger()`.
+ */
+class Logger {
+  public:
+    using callback_type =
+        std::function<void(LogSeverity, const std::string&, const std::string&, int)>;
+
+  public:
+    Logger() = delete;
+    Logger(const Logger&) = delete;
+    Logger(Logger&&) = delete;
+
+    explicit Logger(callback_type cb)
+        : _cb(std::move(cb)) {}
+
+    Logger& operator=(const Logger&) = delete;
+    Logger& operator=(Logger&&) = delete;
+
+    inline void log(LogSeverity severity,
+                    const std::string& message,
+                    const std::string& file,
+                    int line) {
+        _cb(severity, message, file, line);
+    }
+
+    inline void set_logging_callback(callback_type cb) {
+        _cb = std::move(cb);
+    }
+
+  private:
+    callback_type _cb;
+};
+
+inline void default_logging_callback(LogSeverity severity,
+                                     const std::string& message,
+                                     const std::string& file,
+                                     int line) {
+    std::clog << file << ": " << line << " :: " << to_string(severity) << message << std::endl;
+}
+
+/// \brief Obtain a reference to the logger used by HighFive.
+///
+/// This uses a Meyers singleton, to ensure that the global logger is
+/// initialized with a safe default logger, before it is used.
+///
+/// Note: You probably don't need to call this function explicitly.
+///
+inline Logger& get_global_logger() {
+    static Logger logger(&default_logging_callback);
+    return logger;
+}
+
+/// \brief Sets the callback that's used by the logger.
+inline void register_logging_callback(Logger::callback_type cb) {
+    auto& logger = get_global_logger();
+    logger.set_logging_callback(std::move(cb));
+}
+
+namespace detail {
+/// \brief Log a `message` with severity `severity`.
+inline void log(LogSeverity severity,
+                const std::string& message,
+                const std::string& file,
+                int line) {
+    auto& logger = get_global_logger();
+    logger.log(severity, message, file, line);
+}
+}  // namespace detail
+
+#if HIGHFIVE_LOG_LEVEL <= HIGHFIVE_LOG_LEVEL_DEBUG
+#define HIGHFIVE_LOG_DEBUG(message) \
+    ::HighFive::detail::log(::HighFive::LogSeverity::Debug, (message), __FILE__, __LINE__);
+
+// Useful, for the common pattern: if ...; then log something.
+#define HIGHFIVE_LOG_DEBUG_IF(cond, message) \
+    if ((cond)) {                            \
+        HIGHFIVE_LOG_DEBUG((message));       \
+    }
+
+#else
+#define HIGHFIVE_LOG_DEBUG(message)          ;
+#define HIGHFIVE_LOG_DEBUG_IF(cond, message) ;
+#endif
+
+#if HIGHFIVE_LOG_LEVEL <= HIGHFIVE_LOG_LEVEL_INFO
+#define HIGHFIVE_LOG_INFO(message) \
+    ::HighFive::detail::log(::HighFive::LogSeverity::Info, (message), __FILE__, __LINE__);
+
+// Useful, for the common pattern: if ...; then log something.
+#define HIGHFIVE_LOG_INFO_IF(cond, message) \
+    if ((cond)) {                           \
+        HIGHFIVE_LOG_INFO((message));       \
+    }
+
+#else
+#define HIGHFIVE_LOG_INFO(message)          ;
+#define HIGHFIVE_LOG_INFO_IF(cond, message) ;
+#endif
+
+
+#if HIGHFIVE_LOG_LEVEL <= HIGHFIVE_LOG_LEVEL_WARN
+#define HIGHFIVE_LOG_WARN(message) \
+    ::HighFive::detail::log(::HighFive::LogSeverity::Warn, (message), __FILE__, __LINE__);
+
+// Useful, for the common pattern: if ...; then log something.
+#define HIGHFIVE_LOG_WARN_IF(cond, message) \
+    if ((cond)) {                           \
+        HIGHFIVE_LOG_WARN((message));       \
+    }
+
+#else
+#define HIGHFIVE_LOG_WARN(message)          ;
+#define HIGHFIVE_LOG_WARN_IF(cond, message) ;
+#endif
+
+#if HIGHFIVE_LOG_LEVEL <= HIGHFIVE_LOG_LEVEL_ERROR
+#define HIGHFIVE_LOG_ERROR(message) \
+    ::HighFive::detail::log(::HighFive::LogSeverity::Error, (message), __FILE__, __LINE__);
+
+// Useful, for the common pattern: if ...; then log something.
+#define HIGHFIVE_LOG_ERROR_IF(cond, message) \
+    if ((cond)) {                            \
+        HIGHFIVE_LOG_ERROR((message));       \
+    }
+
+#else
+#define HIGHFIVE_LOG_ERROR(message)          ;
+#define HIGHFIVE_LOG_ERROR_IF(cond, message) ;
+#endif
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/H5Version.hpp b/packages/HighFive/include/highfive/H5Version.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..dc238432cb01d651769c663276ea15a94008f3e9
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Version.hpp
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c), 2020
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#define HIGHFIVE_VERSION_MAJOR 2
+#define HIGHFIVE_VERSION_MINOR 8
+#define HIGHFIVE_VERSION_PATCH 0
+
+/** \brief Concatenated representation of the HighFive version.
+ *
+ *  \warning The macro `HIGHFIVE_VERSION` by itself isn't valid C/C++.
+ *
+ *  However, it can be stringified with two layers of macros, e.g.,
+ *  \code{.cpp}
+ *  #define STRINGIFY_VALUE(s) STRINGIFY_NAME(s)
+ *  #define STRINGIFY_NAME(s) #s
+ *
+ *  std::cout << STRINGIFY_VALUE(HIGHFIVE_VERSION) << "\n";
+ *  \endcode
+ */
+#define HIGHFIVE_VERSION 2.8.0
+
+/** \brief String representation of the HighFive version.
+ *
+ *  \warning This macro only exists from 2.7.1 onwards.
+ */
+#define HIGHFIVE_VERSION_STRING "2.8.0"
diff --git a/packages/HighFive/include/highfive/H5Version.hpp.in b/packages/HighFive/include/highfive/H5Version.hpp.in
new file mode 100644
index 0000000000000000000000000000000000000000..acddcffd39407ea485e4c629384ef8c572b79586
--- /dev/null
+++ b/packages/HighFive/include/highfive/H5Version.hpp.in
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c), 2020
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#define HIGHFIVE_VERSION_MAJOR @PROJECT_VERSION_MAJOR@
+#define HIGHFIVE_VERSION_MINOR @PROJECT_VERSION_MINOR@
+#define HIGHFIVE_VERSION_PATCH @PROJECT_VERSION_PATCH@
+
+/** \brief Concatenated representation of the HighFive version.
+ *
+ *  \warning The macro `HIGHFIVE_VERSION` by itself isn't valid C/C++.
+ *
+ *  However, it can be stringified with two layers of macros, e.g.,
+ *  \code{.cpp}
+ *  #define STRINGIFY_VALUE(s) STRINGIFY_NAME(s)
+ *  #define STRINGIFY_NAME(s) #s
+ *
+ *  std::cout << STRINGIFY_VALUE(HIGHFIVE_VERSION) << "\n";
+ *  \endcode
+ */
+#define HIGHFIVE_VERSION @PROJECT_VERSION@
+
+/** \brief String representation of the HighFive version.
+ *
+ *  \warning This macro only exists from 2.7.1 onwards.
+ */
+#define HIGHFIVE_VERSION_STRING "@PROJECT_VERSION@"
diff --git a/packages/HighFive/include/highfive/bits/H5Annotate_traits.hpp b/packages/HighFive/include/highfive/bits/H5Annotate_traits.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..375b535a467134934ca62ba84db281d51b918a35
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Annotate_traits.hpp
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <string>
+
+#include "../H5Attribute.hpp"
+
+namespace HighFive {
+
+template <typename Derivate>
+class AnnotateTraits {
+  public:
+    ///
+    /// \brief create a new attribute with the name attribute_name
+    /// \param attribute_name identifier of the attribute
+    /// \param space Associated \ref DataSpace
+    /// \param type
+    /// \return the attribute object
+    ///
+    Attribute createAttribute(const std::string& attribute_name,
+                              const DataSpace& space,
+                              const DataType& type);
+
+    ///
+    /// \brief createAttribute create a new attribute on the current dataset with
+    /// size specified by space
+    /// \param attribute_name identifier of the attribute
+    /// \param space Associated DataSpace
+    /// informations
+    /// \return Attribute Object
+    template <typename Type>
+    Attribute createAttribute(const std::string& attribute_name, const DataSpace& space);
+
+    ///
+    /// \brief createAttribute create a new attribute on the current dataset and
+    /// write to it, inferring the DataSpace from data.
+    /// \param attribute_name identifier of the attribute
+    /// \param data Associated data to write, must support DataSpace::From, see
+    /// \ref DataSpace for more information
+    /// \return Attribute Object
+    ///
+    template <typename T>
+    Attribute createAttribute(const std::string& attribute_name, const T& data);
+
+    ///
+    /// \brief deleteAttribute let you delete an attribute by its name.
+    /// \param attribute_name identifier of the attribute
+    void deleteAttribute(const std::string& attribute_name);
+
+    ///
+    /// \brief open an existing attribute with the name attribute_name
+    /// \param attribute_name identifier of the attribute
+    /// \return the attribute object
+    Attribute getAttribute(const std::string& attribute_name) const;
+
+    ///
+    /// \brief return the number of attributes of the node / group
+    /// \return number of attributes
+    size_t getNumberAttributes() const;
+
+    ///
+    /// \brief list all attribute name of the node / group
+    /// \return number of attributes
+    std::vector<std::string> listAttributeNames() const;
+
+    ///
+    /// \brief checks an attribute exists
+    /// \return number of attributes
+    bool hasAttribute(const std::string& attr_name) const;
+
+  private:
+    using derivate_type = Derivate;
+};
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Annotate_traits_misc.hpp b/packages/HighFive/include/highfive/bits/H5Annotate_traits_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..85d2798fe284f53662f607dc58949dcecfca73fa
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Annotate_traits_misc.hpp
@@ -0,0 +1,118 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include <H5Apublic.h>
+#include <H5Ppublic.h>
+
+#include "H5Attribute_misc.hpp"
+#include "H5Iterables_misc.hpp"
+
+namespace HighFive {
+
+template <typename Derivate>
+inline Attribute AnnotateTraits<Derivate>::createAttribute(const std::string& attribute_name,
+                                                           const DataSpace& space,
+                                                           const DataType& dtype) {
+    auto attr_id = H5Acreate2(static_cast<Derivate*>(this)->getId(),
+                              attribute_name.c_str(),
+                              dtype.getId(),
+                              space.getId(),
+                              H5P_DEFAULT,
+                              H5P_DEFAULT);
+    if (attr_id < 0) {
+        HDF5ErrMapper::ToException<AttributeException>(
+            std::string("Unable to create the attribute \"") + attribute_name + "\":");
+    }
+    return detail::make_attribute(attr_id);
+}
+
+template <typename Derivate>
+template <typename Type>
+inline Attribute AnnotateTraits<Derivate>::createAttribute(const std::string& attribute_name,
+                                                           const DataSpace& space) {
+    return createAttribute(attribute_name, space, create_and_check_datatype<Type>());
+}
+
+template <typename Derivate>
+template <typename T>
+inline Attribute AnnotateTraits<Derivate>::createAttribute(const std::string& attribute_name,
+                                                           const T& data) {
+    Attribute att =
+        createAttribute(attribute_name,
+                        DataSpace::From(data),
+                        create_and_check_datatype<typename details::inspector<T>::base_type>());
+    att.write(data);
+    return att;
+}
+
+template <typename Derivate>
+inline void AnnotateTraits<Derivate>::deleteAttribute(const std::string& attribute_name) {
+    if (H5Adelete(static_cast<const Derivate*>(this)->getId(), attribute_name.c_str()) < 0) {
+        HDF5ErrMapper::ToException<AttributeException>(
+            std::string("Unable to delete attribute \"") + attribute_name + "\":");
+    }
+}
+
+template <typename Derivate>
+inline Attribute AnnotateTraits<Derivate>::getAttribute(const std::string& attribute_name) const {
+    const auto attr_id =
+        H5Aopen(static_cast<const Derivate*>(this)->getId(), attribute_name.c_str(), H5P_DEFAULT);
+    if (attr_id < 0) {
+        HDF5ErrMapper::ToException<AttributeException>(
+            std::string("Unable to open the attribute \"") + attribute_name + "\":");
+    }
+    return detail::make_attribute(attr_id);
+}
+
+template <typename Derivate>
+inline size_t AnnotateTraits<Derivate>::getNumberAttributes() const {
+    int res = H5Aget_num_attrs(static_cast<const Derivate*>(this)->getId());
+    if (res < 0) {
+        HDF5ErrMapper::ToException<AttributeException>(
+            std::string("Unable to count attributes in existing group or file"));
+    }
+    return static_cast<size_t>(res);
+}
+
+template <typename Derivate>
+inline std::vector<std::string> AnnotateTraits<Derivate>::listAttributeNames() const {
+    std::vector<std::string> names;
+    details::HighFiveIterateData iterateData(names);
+
+    size_t num_objs = getNumberAttributes();
+    names.reserve(num_objs);
+
+    if (H5Aiterate2(static_cast<const Derivate*>(this)->getId(),
+                    H5_INDEX_NAME,
+                    H5_ITER_INC,
+                    NULL,
+                    &details::internal_high_five_iterate<H5A_info_t>,
+                    static_cast<void*>(&iterateData)) < 0) {
+        HDF5ErrMapper::ToException<AttributeException>(
+            std::string("Unable to list attributes in group"));
+    }
+
+    return names;
+}
+
+template <typename Derivate>
+inline bool AnnotateTraits<Derivate>::hasAttribute(const std::string& attr_name) const {
+    int res = H5Aexists(static_cast<const Derivate*>(this)->getId(), attr_name.c_str());
+    if (res < 0) {
+        HDF5ErrMapper::ToException<AttributeException>(
+            std::string("Unable to check for attribute in group"));
+    }
+    return res;
+}
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Attribute_misc.hpp b/packages/HighFive/include/highfive/bits/H5Attribute_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6516788297819c9f6e318bd588df3ac668ac63fd
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Attribute_misc.hpp
@@ -0,0 +1,163 @@
+/*
+ *  Copyright (c), 2017, Ali Can Demiralp <ali.demiralp@rwth-aachen.de>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <algorithm>
+#include <functional>
+#include <numeric>
+#include <sstream>
+#include <string>
+
+#include <H5Apublic.h>
+#include <H5Ppublic.h>
+
+#include "../H5DataSpace.hpp"
+#include "H5Converter_misc.hpp"
+#include "H5ReadWrite_misc.hpp"
+#include "H5Utils.hpp"
+
+namespace HighFive {
+
+inline std::string Attribute::getName() const {
+    return details::get_name(
+        [&](char* buffer, size_t length) { return H5Aget_name(_hid, length, buffer); });
+}
+
+inline size_t Attribute::getStorageSize() const {
+    return static_cast<size_t>(H5Aget_storage_size(_hid));
+}
+
+inline DataType Attribute::getDataType() const {
+    DataType res;
+    res._hid = H5Aget_type(_hid);
+    return res;
+}
+
+inline DataSpace Attribute::getSpace() const {
+    DataSpace space;
+    if ((space._hid = H5Aget_space(_hid)) < 0) {
+        HDF5ErrMapper::ToException<AttributeException>("Unable to get DataSpace out of Attribute");
+    }
+    return space;
+}
+
+inline DataSpace Attribute::getMemSpace() const {
+    return getSpace();
+}
+
+template <typename T>
+inline T Attribute::read() const {
+    T array;
+    read(array);
+    return array;
+}
+
+template <typename T>
+inline void Attribute::read(T& array) const {
+    const DataSpace& mem_space = getMemSpace();
+    auto file_datatype = getDataType();
+    const details::BufferInfo<T> buffer_info(
+        file_datatype,
+        [this]() -> std::string { return this->getName(); },
+        details::BufferInfo<T>::read);
+
+    if (!details::checkDimensions(mem_space, buffer_info.n_dimensions)) {
+        std::ostringstream ss;
+        ss << "Impossible to read DataSet of dimensions " << mem_space.getNumberDimensions()
+           << " into arrays of dimensions " << buffer_info.n_dimensions;
+        throw DataSpaceException(ss.str());
+    }
+    auto dims = mem_space.getDimensions();
+
+    if (mem_space.getElementCount() == 0) {
+        auto effective_dims = details::squeezeDimensions(dims,
+                                                         details::inspector<T>::recursive_ndim);
+
+        details::inspector<T>::prepare(array, effective_dims);
+        return;
+    }
+
+    auto r = details::data_converter::get_reader<T>(dims, array, file_datatype);
+    read(r.getPointer(), buffer_info.data_type);
+    // re-arrange results
+    r.unserialize(array);
+
+    auto t = buffer_info.data_type;
+    auto c = t.getClass();
+
+    if (c == DataTypeClass::VarLen || t.isVariableStr()) {
+#if H5_VERSION_GE(1, 12, 0)
+        // This one have been created in 1.12.0
+        (void) H5Treclaim(t.getId(), mem_space.getId(), H5P_DEFAULT, r.getPointer());
+#else
+        // This one is deprecated since 1.12.0
+        (void) H5Dvlen_reclaim(t.getId(), mem_space.getId(), H5P_DEFAULT, r.getPointer());
+#endif
+    }
+}
+
+template <typename T>
+inline void Attribute::read(T* array, const DataType& mem_datatype) const {
+    static_assert(!std::is_const<T>::value,
+                  "read() requires a non-const structure to read data into");
+
+    if (H5Aread(getId(), mem_datatype.getId(), static_cast<void*>(array)) < 0) {
+        HDF5ErrMapper::ToException<AttributeException>("Error during HDF5 Read: ");
+    }
+}
+
+template <typename T>
+inline void Attribute::read(T* array) const {
+    using element_type = typename details::inspector<T>::base_type;
+    const DataType& mem_datatype = create_and_check_datatype<element_type>();
+
+    read(array, mem_datatype);
+}
+
+template <typename T>
+inline void Attribute::write(const T& buffer) {
+    const DataSpace& mem_space = getMemSpace();
+
+    if (mem_space.getElementCount() == 0) {
+        return;
+    }
+
+    auto file_datatype = getDataType();
+
+    const details::BufferInfo<T> buffer_info(
+        file_datatype,
+        [this]() -> std::string { return this->getName(); },
+        details::BufferInfo<T>::write);
+
+    if (!details::checkDimensions(mem_space, buffer_info.n_dimensions)) {
+        std::ostringstream ss;
+        ss << "Impossible to write buffer of dimensions " << buffer_info.n_dimensions
+           << " into dataset of dimensions " << mem_space.getNumberDimensions();
+        throw DataSpaceException(ss.str());
+    }
+    auto w = details::data_converter::serialize<T>(buffer, file_datatype);
+    write_raw(w.getPointer(), buffer_info.data_type);
+}
+
+template <typename T>
+inline void Attribute::write_raw(const T* buffer, const DataType& mem_datatype) {
+    if (H5Awrite(getId(), mem_datatype.getId(), buffer) < 0) {
+        HDF5ErrMapper::ToException<DataSetException>("Error during HDF5 Write: ");
+    }
+}
+
+template <typename T>
+inline void Attribute::write_raw(const T* buffer) {
+    using element_type = typename details::inspector<T>::base_type;
+    const auto& mem_datatype = create_and_check_datatype<element_type>();
+
+    write_raw(buffer, mem_datatype);
+}
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Converter_misc.hpp b/packages/HighFive/include/highfive/bits/H5Converter_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..00749d1b6d6157bc745a86005a7a80d6b195fbb3
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Converter_misc.hpp
@@ -0,0 +1,421 @@
+/*
+ *  Copyright (c) 2022 Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <type_traits>
+
+#include "H5Inspector_misc.hpp"
+#include "../H5DataType.hpp"
+
+namespace HighFive {
+namespace details {
+
+template <class T>
+struct is_std_string {
+    static constexpr bool value =
+        std::is_same<typename inspector<T>::base_type, std::string>::value;
+};
+
+template <class T, class V = void>
+struct enable_shallow_copy
+    : public std::enable_if<!is_std_string<T>::value && inspector<T>::is_trivially_copyable, V> {};
+
+template <class T, class V = void>
+struct enable_deep_copy
+    : public std::enable_if<!is_std_string<T>::value && !inspector<T>::is_trivially_copyable, V> {};
+
+template <class T, class V = void>
+struct enable_string_copy: public std::enable_if<is_std_string<T>::value, V> {};
+
+
+template <typename T, bool IsReadOnly>
+struct ShallowCopyBuffer {
+    using type = unqualified_t<T>;
+    using hdf5_type =
+        typename std::conditional<IsReadOnly,
+                                  typename std::add_const<typename inspector<T>::hdf5_type>::type,
+                                  typename inspector<T>::hdf5_type>::type;
+
+    ShallowCopyBuffer() = delete;
+
+    explicit ShallowCopyBuffer(typename std::conditional<IsReadOnly, const T&, T&>::type val)
+        : ptr(inspector<T>::data(val)){};
+
+    hdf5_type* getPointer() const {
+        return ptr;
+    }
+
+    hdf5_type* begin() const {
+        return getPointer();
+    }
+
+    void unserialize(T& /* val */) const {
+        /* nothing to do. */
+    }
+
+  private:
+    hdf5_type* ptr;
+};
+
+template <class T>
+struct DeepCopyBuffer {
+    using type = unqualified_t<T>;
+    using hdf5_type = typename inspector<type>::hdf5_type;
+
+    explicit DeepCopyBuffer(const std::vector<size_t>& _dims)
+        : buffer(inspector<T>::getSize(_dims))
+        , dims(_dims) {}
+
+    hdf5_type* getPointer() {
+        return buffer.data();
+    }
+
+    hdf5_type const* getPointer() const {
+        return buffer.data();
+    }
+
+    hdf5_type* begin() {
+        return getPointer();
+    }
+
+    hdf5_type const* begin() const {
+        return getPointer();
+    }
+
+    void unserialize(T& val) const {
+        inspector<type>::unserialize(buffer.data(), dims, val);
+    }
+
+  private:
+    std::vector<hdf5_type> buffer;
+    std::vector<size_t> dims;
+};
+
+enum class BufferMode { Read, Write };
+
+
+///
+/// \brief String length in bytes excluding the `\0`.
+///
+inline size_t char_buffer_size(char const* const str, size_t max_string_length) {
+    for (size_t i = 0; i <= max_string_length; ++i) {
+        if (str[i] == '\0') {
+            return i;
+        }
+    }
+
+    return max_string_length;
+}
+
+
+///
+/// \brief A buffer for reading/writing strings.
+///
+/// A string in HDF5 can be represented as a fixed or variable length string.
+/// The important difference for this buffer is that `H5D{read,write}` expects
+/// different input depending on whether the strings are fixed or variable length.
+/// For fixed length strings, it expects an array of chars, i.e. one string
+/// packed after the other contiguously. While for variable length strings it
+/// expects a list of pointers to the beginning of each string. Variable length
+/// string must be null-terminated; because that's how their length is
+/// determined.
+///
+/// This buffer hides the difference between fixed and variable length strings
+/// by having internal data structures available for both cases at compile time.
+/// The choice which internal buffer to use is made at runtime.
+///
+/// Consider an HDF5 dataset with N fixed-length strings, each of which is M
+/// characters long. Then the in-memory strings are copied into an internal
+/// buffer of size N*M. If null- or space-padded the buffer should be filled
+/// with the appropriate character. This is important if the in-memory strings
+/// are less than M characters long.
+///
+/// An HDF5 dataset with N variable-length strings (all null-terminated) uses
+/// the internal list of pointers to the beginning of each string. Those
+/// pointers can either point to the in-memory strings themselves, if those
+/// strings are known to be null-terminated. Otherwise the in-memory strings are
+/// copied to an internal buffer of null-terminated strings; and the pointer
+/// points to the start of the string in the internal buffer.
+///
+/// This class is responsible for arranging the strings properly before passing
+/// the buffers to HDF5. To keep this class generic, it provides a generic
+/// read/write interface to the internal strings, i.e. a pointer with a size.
+/// For reading from the buffer the proxy is called `StringConstView`. This
+/// proxy object is to be used by the `inspector` to copy from the buffer into
+/// the final destination, e.g. an `std::string`.  Similarly, there's a proxy
+/// object for serializing into the buffer, i.e. the `StringView`. Again the
+/// `inspector` is responsible for obtaining the pointer, size and padding of
+/// the string.
+///
+/// Nomenclature:
+///   - size of a string is the number of bytes required to store the string,
+///     including the null character for null-terminated strings.
+///
+///   - length of a string is the number of bytes without the null character.
+///
+/// Note: both 'length' and 'size' are counted in number of bytes, not number
+///   of symbols or characters. Even for UTF8 strings.
+template <typename T, BufferMode buffer_mode>
+struct StringBuffer {
+    using type = unqualified_t<T>;
+    using hdf5_type = typename inspector<type>::hdf5_type;
+
+    class StringView {
+      public:
+        StringView(StringBuffer<T, buffer_mode>& _buffer, size_t _i)
+            : buffer(_buffer)
+            , i(_i) {}
+
+        ///
+        /// \brief Assign the in-memory string to the buffer.
+        ///
+        /// This method copies the in-memory string to the appropriate
+        /// internal buffer as needed.
+        ///
+        /// The `length` is the length of the string in bytes.
+        void assign(char const* data, size_t length, StringPadding padding) {
+            if (buffer.isVariableLengthString()) {
+                if (padding == StringPadding::NullTerminated) {
+                    buffer.variable_length_pointers[i] = data;
+                } else {
+                    buffer.variable_length_buffer[i] = std::string(data, length);
+                    buffer.variable_length_pointers[i] = buffer.variable_length_buffer[i].data();
+                }
+            } else if (buffer.isFixedLengthString()) {
+                // If the buffer is fixed-length and null-terminated, then
+                // `buffer.string_length` doesn't include the null-character.
+                if (length > buffer.string_length) {
+                    throw std::invalid_argument("String length too big.");
+                }
+
+                memcpy(&buffer.fixed_length_buffer[i * buffer.string_size], data, length);
+            }
+        }
+
+      private:
+        StringBuffer<T, buffer_mode>& buffer;
+        size_t i;
+    };
+
+
+    class StringConstView {
+      public:
+        StringConstView(const StringBuffer<T, buffer_mode>& _buffer, size_t _i)
+            : buffer(_buffer)
+            , i(_i) {}
+
+        /// \brief Pointer to the first byte of the string.
+        ///
+        /// The valid indices for this pointer are: 0, ..., length() - 1.
+        char const* data() const {
+            if (buffer.isVariableLengthString()) {
+                return buffer.variable_length_pointers[i];
+            } else {
+                return &buffer.fixed_length_buffer[i * buffer.string_size];
+            }
+        }
+
+        /// \brief Length of the string in bytes.
+        ///
+        /// Note that for null-terminated strings the "length" doesn't include
+        /// the null character. Hence, if storing this string as a
+        /// null-terminated string, the destination buffer needs to be at least
+        /// `length() + 1` bytes long.
+        size_t length() const {
+            if (buffer.isNullTerminated()) {
+                return char_buffer_size(data(), buffer.string_length);
+            } else {
+                return buffer.string_length;
+            }
+        }
+
+      private:
+        const StringBuffer<T, buffer_mode>& buffer;
+        size_t i;
+    };
+
+
+    class Iterator {
+      public:
+        Iterator(StringBuffer<T, buffer_mode>& _buffer, size_t _pos)
+            : buffer(_buffer)
+            , pos(_pos) {}
+
+        Iterator operator+(size_t n_strings) const {
+            return Iterator(buffer, pos + n_strings);
+        }
+
+        void operator+=(size_t n_strings) {
+            pos += n_strings;
+        }
+
+        StringView operator*() {
+            return StringView(buffer, pos);
+        }
+
+        StringConstView operator*() const {
+            return StringConstView(buffer, pos);
+        }
+
+      private:
+        StringBuffer<T, buffer_mode>& buffer;
+        size_t pos;
+    };
+
+    StringBuffer(std::vector<size_t> _dims, const DataType& _file_datatype)
+        : file_datatype(_file_datatype.asStringType())
+        , padding(file_datatype.getPadding())
+        , string_size(file_datatype.isVariableStr() ? size_t(-1) : file_datatype.getSize())
+        , string_length(string_size - size_t(isNullTerminated()))
+        , dims(_dims) {
+        if (string_size == 0 && isNullTerminated()) {
+            throw DataTypeException(
+                "Fixed-length, null-terminated need at least one byte to store the "
+                "null-character.");
+        }
+
+        auto n_strings = compute_total_size(dims);
+        if (isVariableLengthString()) {
+            variable_length_buffer.resize(n_strings);
+            variable_length_pointers.resize(n_strings);
+        } else {
+            char pad = padding == StringPadding::SpacePadded ? ' ' : '\0';
+            fixed_length_buffer.assign(n_strings * string_size, pad);
+        }
+    }
+
+    bool isVariableLengthString() const {
+        return file_datatype.isVariableStr();
+    }
+
+    bool isFixedLengthString() const {
+        return file_datatype.isFixedLenStr();
+    }
+
+    bool isNullTerminated() const {
+        return file_datatype.getPadding() == StringPadding::NullTerminated;
+    }
+
+
+    void* getPointer() {
+        if (file_datatype.isVariableStr()) {
+            return variable_length_pointers.data();
+        } else {
+            return fixed_length_buffer.data();
+        }
+    }
+
+    Iterator begin() {
+        return Iterator(*this, 0ul);
+    }
+
+    void unserialize(T& val) {
+        inspector<type>::unserialize(begin(), dims, val);
+    }
+
+  private:
+    StringType file_datatype;
+    StringPadding padding;
+    size_t string_size;    // Size of buffer required to store the string.
+                           // Meaningful for fixed length strings only.
+    size_t string_length;  // Semantic length of string.
+    std::vector<size_t> dims;
+
+    std::vector<char> fixed_length_buffer;
+    std::vector<std::string> variable_length_buffer;
+    std::vector<
+        typename std::conditional<buffer_mode == BufferMode::Write, const char, char>::type*>
+        variable_length_pointers;
+};
+
+
+template <typename T, typename Enable = void>
+struct Writer;
+
+template <typename T>
+struct Writer<T, typename enable_shallow_copy<T>::type>: public ShallowCopyBuffer<T, true> {
+  private:
+    using super = ShallowCopyBuffer<T, true>;
+
+  public:
+    explicit Writer(const T& val, const DataType& /* file_datatype */)
+        : super(val){};
+};
+
+template <typename T>
+struct Writer<T, typename enable_deep_copy<T>::type>: public DeepCopyBuffer<T> {
+    explicit Writer(const T& val, const DataType& /* file_datatype */)
+        : DeepCopyBuffer<T>(inspector<T>::getDimensions(val)) {
+        inspector<T>::serialize(val, this->begin());
+    }
+};
+
+template <typename T>
+struct Writer<T, typename enable_string_copy<T>::type>: public StringBuffer<T, BufferMode::Write> {
+    explicit Writer(const T& val, const DataType& _file_datatype)
+        : StringBuffer<T, BufferMode::Write>(inspector<T>::getDimensions(val), _file_datatype) {
+        inspector<T>::serialize(val, this->begin());
+    }
+};
+
+template <typename T, typename Enable = void>
+struct Reader;
+
+template <typename T>
+struct Reader<T, typename enable_shallow_copy<T>::type>: public ShallowCopyBuffer<T, false> {
+  private:
+    using super = ShallowCopyBuffer<T, false>;
+    using type = typename super::type;
+
+  public:
+    Reader(const std::vector<size_t>&, type& val, const DataType& /* file_datatype */)
+        : super(val) {}
+};
+
+template <typename T>
+struct Reader<T, typename enable_deep_copy<T>::type>: public DeepCopyBuffer<T> {
+  private:
+    using super = DeepCopyBuffer<T>;
+    using type = typename super::type;
+
+  public:
+    Reader(const std::vector<size_t>& _dims, type&, const DataType& /* file_datatype */)
+        : super(_dims) {}
+};
+
+
+template <typename T>
+struct Reader<T, typename enable_string_copy<T>::type>: public StringBuffer<T, BufferMode::Write> {
+  public:
+    explicit Reader(const std::vector<size_t>& _dims,
+                    const T& /* val */,
+                    const DataType& _file_datatype)
+        : StringBuffer<T, BufferMode::Write>(_dims, _file_datatype) {}
+};
+
+struct data_converter {
+    template <typename T>
+    static Writer<T> serialize(const typename inspector<T>::type& val,
+                               const DataType& file_datatype) {
+        return Writer<T>(val, file_datatype);
+    }
+
+    template <typename T>
+    static Reader<T> get_reader(const std::vector<size_t>& dims,
+                                T& val,
+                                const DataType& file_datatype) {
+        // TODO Use bufferinfo for recursive_ndim
+        auto effective_dims = details::squeezeDimensions(dims, inspector<T>::recursive_ndim);
+        inspector<T>::prepare(val, effective_dims);
+        return Reader<T>(effective_dims, val, file_datatype);
+    }
+};
+
+}  // namespace details
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5DataSet_misc.hpp b/packages/HighFive/include/highfive/bits/H5DataSet_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4411b4c0d42b40714f7e53269d8018e71577dfcb
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5DataSet_misc.hpp
@@ -0,0 +1,67 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <algorithm>
+#include <functional>
+#include <numeric>
+#include <sstream>
+#include <string>
+
+#include <H5Dpublic.h>
+#include <H5Ppublic.h>
+
+#include "H5Utils.hpp"
+
+namespace HighFive {
+
+inline uint64_t DataSet::getStorageSize() const {
+    return H5Dget_storage_size(_hid);
+}
+
+inline DataType DataSet::getDataType() const {
+    return DataType(H5Dget_type(_hid));
+}
+
+inline DataSpace DataSet::getSpace() const {
+    DataSpace space;
+    if ((space._hid = H5Dget_space(_hid)) < 0) {
+        HDF5ErrMapper::ToException<DataSetException>("Unable to get DataSpace out of DataSet");
+    }
+    return space;
+}
+
+inline DataSpace DataSet::getMemSpace() const {
+    return getSpace();
+}
+
+inline uint64_t DataSet::getOffset() const {
+    uint64_t addr = H5Dget_offset(_hid);
+    if (addr == HADDR_UNDEF) {
+        HDF5ErrMapper::ToException<DataSetException>("Cannot get offset of DataSet.");
+    }
+    return addr;
+}
+
+inline void DataSet::resize(const std::vector<size_t>& dims) {
+    const size_t numDimensions = getSpace().getDimensions().size();
+    if (dims.size() != numDimensions) {
+        HDF5ErrMapper::ToException<DataSetException>("Invalid dataspace dimensions, got " +
+                                                     std::to_string(dims.size()) + " expected " +
+                                                     std::to_string(numDimensions));
+    }
+
+    std::vector<hsize_t> real_dims(dims.begin(), dims.end());
+
+    if (H5Dset_extent(getId(), real_dims.data()) < 0) {
+        HDF5ErrMapper::ToException<DataSetException>("Could not resize dataset.");
+    }
+}
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5DataType_misc.hpp b/packages/HighFive/include/highfive/bits/H5DataType_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..8535d617ab443405dbdd88353f52cf4850f8e7d1
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5DataType_misc.hpp
@@ -0,0 +1,600 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <string>
+#include <complex>
+#include <cstring>
+#if HIGHFIVE_CXX_STD >= 17
+#include <cstddef>
+#endif
+
+#include <H5Ppublic.h>
+#include <H5Tpublic.h>
+
+#ifdef H5_USE_HALF_FLOAT
+#include <half.hpp>
+#endif
+
+#include "H5Inspector_misc.hpp"
+
+namespace HighFive {
+
+namespace detail {
+
+inline hid_t h5t_copy(hid_t original) {
+    auto copy = H5Tcopy(original);
+    if (copy == H5I_INVALID_HID) {
+        HDF5ErrMapper::ToException<DataTypeException>("Error copying datatype.");
+    }
+
+    return copy;
+}
+
+inline hsize_t h5t_get_size(hid_t hid) {
+    hsize_t size = H5Tget_size(hid);
+    if (size == 0) {
+        HDF5ErrMapper::ToException<DataTypeException>("Error getting size of datatype.");
+    }
+
+    return size;
+}
+
+inline H5T_cset_t h5t_get_cset(hid_t hid) {
+    auto cset = H5Tget_cset(hid);
+    if (cset == H5T_CSET_ERROR) {
+        HDF5ErrMapper::ToException<DataTypeException>("Error getting cset of datatype.");
+    }
+
+    return cset;
+}
+
+inline H5T_str_t h5t_get_strpad(hid_t hid) {
+    auto strpad = H5Tget_strpad(hid);
+    if (strpad == H5T_STR_ERROR) {
+        HDF5ErrMapper::ToException<DataTypeException>("Error getting strpad of datatype.");
+    }
+
+    return strpad;
+}
+
+inline void h5t_set_size(hid_t hid, hsize_t size) {
+    if (H5Tset_size(hid, size) < 0) {
+        HDF5ErrMapper::ToException<DataTypeException>("Error setting size of datatype.");
+    }
+}
+
+inline void h5t_set_cset(hid_t hid, H5T_cset_t cset) {
+    if (H5Tset_cset(hid, cset) < 0) {
+        HDF5ErrMapper::ToException<DataTypeException>("Error setting cset of datatype.");
+    }
+}
+
+inline void h5t_set_strpad(hid_t hid, H5T_str_t strpad) {
+    if (H5Tset_strpad(hid, strpad) < 0) {
+        HDF5ErrMapper::ToException<DataTypeException>("Error setting strpad of datatype.");
+    }
+}
+}  // namespace detail
+
+
+namespace {  // unnamed
+inline DataTypeClass convert_type_class(const H5T_class_t& tclass);
+inline std::string type_class_string(DataTypeClass);
+inline hid_t create_string(std::size_t length);
+}  // namespace
+
+inline bool DataType::empty() const noexcept {
+    return _hid == H5I_INVALID_HID;
+}
+
+inline DataTypeClass DataType::getClass() const {
+    return convert_type_class(H5Tget_class(_hid));
+}
+
+inline size_t DataType::getSize() const {
+    return detail::h5t_get_size(_hid);
+}
+
+inline bool DataType::operator==(const DataType& other) const {
+    return (H5Tequal(_hid, other._hid) > 0);
+}
+
+inline bool DataType::operator!=(const DataType& other) const {
+    return !(*this == other);
+}
+
+inline bool DataType::isVariableStr() const {
+    auto var_value = H5Tis_variable_str(_hid);
+    if (var_value < 0) {
+        HDF5ErrMapper::ToException<DataTypeException>("Unable to define datatype size to variable");
+    }
+    return static_cast<bool>(var_value);
+}
+
+inline bool DataType::isFixedLenStr() const {
+    return getClass() == DataTypeClass::String && !isVariableStr();
+}
+
+inline bool DataType::isReference() const {
+    return H5Tequal(_hid, H5T_STD_REF_OBJ) > 0;
+}
+
+inline StringType DataType::asStringType() const {
+    if (getClass() != DataTypeClass::String) {
+        throw DataTypeException("Invalid conversion to StringType.");
+    }
+
+    if (isValid() && H5Iinc_ref(_hid) < 0) {
+        throw ObjectException("Reference counter increase failure");
+    }
+
+    return StringType(_hid);
+}
+
+inline std::string DataType::string() const {
+    return type_class_string(getClass()) + std::to_string(getSize() * 8);
+}
+
+inline StringPadding StringType::getPadding() const {
+    return StringPadding(detail::h5t_get_strpad(_hid));
+}
+
+inline CharacterSet StringType::getCharacterSet() const {
+    return CharacterSet(detail::h5t_get_cset(_hid));
+}
+
+inline FixedLengthStringType::FixedLengthStringType(size_t size,
+                                                    StringPadding padding,
+                                                    CharacterSet character_set) {
+    if (size == 0 && padding == StringPadding::NullTerminated) {
+        throw DataTypeException(
+            "Fixed-length, null-terminated need at least one byte to store the null-character.");
+    }
+
+    _hid = detail::h5t_copy(H5T_C_S1);
+
+    detail::h5t_set_size(_hid, hsize_t(size));
+    detail::h5t_set_cset(_hid, H5T_cset_t(character_set));
+    detail::h5t_set_strpad(_hid, H5T_str_t(padding));
+}
+
+inline VariableLengthStringType::VariableLengthStringType(CharacterSet character_set) {
+    _hid = detail::h5t_copy(H5T_C_S1);
+
+    detail::h5t_set_size(_hid, H5T_VARIABLE);
+    detail::h5t_set_cset(_hid, H5T_cset_t(character_set));
+}
+
+// char mapping
+template <>
+inline AtomicType<char>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_CHAR);
+}
+
+template <>
+inline AtomicType<signed char>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_SCHAR);
+}
+
+template <>
+inline AtomicType<unsigned char>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_UCHAR);
+}
+
+// short mapping
+template <>
+inline AtomicType<short>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_SHORT);
+}
+
+template <>
+inline AtomicType<unsigned short>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_USHORT);
+}
+
+// integer mapping
+template <>
+inline AtomicType<int>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_INT);
+}
+
+template <>
+inline AtomicType<unsigned>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_UINT);
+}
+
+// long mapping
+template <>
+inline AtomicType<long>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_LONG);
+}
+
+template <>
+inline AtomicType<unsigned long>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_ULONG);
+}
+
+// long long mapping
+template <>
+inline AtomicType<long long>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_LLONG);
+}
+
+template <>
+inline AtomicType<unsigned long long>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_ULLONG);
+}
+
+// half-float, float, double and long double mapping
+#ifdef H5_USE_HALF_FLOAT
+using float16_t = half_float::half;
+
+template <>
+inline AtomicType<float16_t>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_FLOAT);
+    // Sign position, exponent position, exponent size, mantissa position, mantissa size
+    H5Tset_fields(_hid, 15, 10, 5, 0, 10);
+    // Total datatype size (in bytes)
+    detail::h5t_set_size(_hid, 2);
+    // Floating point exponent bias
+    H5Tset_ebias(_hid, 15);
+}
+#endif
+
+template <>
+inline AtomicType<float>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_FLOAT);
+}
+
+template <>
+inline AtomicType<double>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_DOUBLE);
+}
+
+template <>
+inline AtomicType<long double>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_LDOUBLE);
+}
+
+// std string
+template <>
+inline AtomicType<std::string>::AtomicType() {
+    _hid = create_string(H5T_VARIABLE);
+}
+
+#if HIGHFIVE_CXX_STD >= 17
+// std byte
+template <>
+inline AtomicType<std::byte>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_NATIVE_B8);
+}
+#endif
+
+// Fixed-Length strings
+// require class specialization templated for the char length
+template <size_t StrLen>
+class AtomicType<char[StrLen]>: public DataType {
+  public:
+    inline AtomicType()
+        : DataType(create_string(StrLen)) {}
+};
+
+template <size_t StrLen>
+class AtomicType<FixedLenStringArray<StrLen>>: public DataType {
+  public:
+    inline AtomicType()
+        : DataType(create_string(StrLen)) {}
+};
+
+template <typename T>
+class AtomicType<std::complex<T>>: public DataType {
+  public:
+    inline AtomicType()
+        : DataType(
+              CompoundType({{"r", create_datatype<T>(), 0}, {"i", create_datatype<T>(), sizeof(T)}},
+                           sizeof(std::complex<T>))) {
+        static_assert(std::is_arithmetic<T>::value,
+                      "std::complex accepts only floating point and integral numbers.");
+    }
+};
+
+// For boolean we act as h5py
+inline EnumType<details::Boolean> create_enum_boolean() {
+    return {{"FALSE", details::Boolean::HighFiveFalse}, {"TRUE", details::Boolean::HighFiveTrue}};
+}
+
+// Other cases not supported. Fail early with a user message
+template <typename T>
+AtomicType<T>::AtomicType() {
+    static_assert(details::inspector<T>::recursive_ndim == 0,
+                  "Atomic types cant be arrays, except for char[] (fixed-length strings)");
+    static_assert(details::inspector<T>::recursive_ndim > 0, "Type not supported");
+}
+
+
+// class FixedLenStringArray<N>
+
+template <std::size_t N>
+inline FixedLenStringArray<N>::FixedLenStringArray(const char array[][N], std::size_t length) {
+    datavec.resize(length);
+    std::memcpy(datavec[0].data(), array[0].data(), N * length);
+}
+
+template <std::size_t N>
+inline FixedLenStringArray<N>::FixedLenStringArray(const std::string* iter_begin,
+                                                   const std::string* iter_end) {
+    datavec.reserve(static_cast<std::size_t>(iter_end - iter_begin));
+    for (std::string const* it = iter_begin; it != iter_end; ++it) {
+        push_back(*it);
+    }
+}
+
+template <std::size_t N>
+inline FixedLenStringArray<N>::FixedLenStringArray(const std::vector<std::string>& vec)
+    : FixedLenStringArray(vec.data(), vec.data() + vec.size()) {}
+
+template <std::size_t N>
+inline FixedLenStringArray<N>::FixedLenStringArray(
+    const std::initializer_list<std::string>& init_list)
+    : FixedLenStringArray(init_list.begin(), init_list.end()) {}
+
+template <std::size_t N>
+inline void FixedLenStringArray<N>::push_back(const std::string& src) {
+    datavec.emplace_back();
+    const size_t length = std::min(N - 1, src.length());
+    std::memcpy(datavec.back().data(), src.c_str(), length);
+    datavec.back()[length] = 0;
+}
+
+template <std::size_t N>
+inline void FixedLenStringArray<N>::push_back(const std::array<char, N>& src) {
+    datavec.emplace_back();
+    std::copy(src.begin(), src.end(), datavec.back().data());
+}
+
+template <std::size_t N>
+inline std::string FixedLenStringArray<N>::getString(std::size_t i) const {
+    return std::string(datavec[i].data());
+}
+
+// Internal
+// Reference mapping
+template <>
+inline AtomicType<Reference>::AtomicType() {
+    _hid = detail::h5t_copy(H5T_STD_REF_OBJ);
+}
+
+inline size_t find_first_atomic_member_size(hid_t hid) {
+    // Recursive exit condition
+    if (H5Tget_class(hid) == H5T_COMPOUND) {
+        auto number_of_members = H5Tget_nmembers(hid);
+        if (number_of_members == -1) {
+            throw DataTypeException("Cannot get members of CompoundType with hid: " +
+                                    std::to_string(hid));
+        }
+        if (number_of_members == 0) {
+            throw DataTypeException("No members defined for CompoundType with hid: " +
+                                    std::to_string(hid));
+        }
+
+        auto member_type = H5Tget_member_type(hid, 0);
+        auto size = find_first_atomic_member_size(member_type);
+        H5Tclose(member_type);
+        return size;
+    } else if (H5Tget_class(hid) == H5T_STRING) {
+        return 1;
+    }
+    return detail::h5t_get_size(hid);
+}
+
+// Calculate the padding required to align an element of a struct
+// For padding see explanation here: https://en.cppreference.com/w/cpp/language/object#Alignment
+// It is to compute padding following last element inserted inside a struct
+// 1) We want to push back an element padded to the structure
+// 'current_size' is the size of the structure before adding the new element.
+// 'member_size' the size of the element we want to add.
+// 2) We want to compute the final padding for the global structure
+// 'current_size' is the size of the whole structure without final padding
+// 'member_size' is the maximum size of all element of the struct
+//
+// The basic formula is only to know how much we need to add to 'current_size' to fit
+// 'member_size'.
+// And at the end, we do another computation because the end padding, should fit the biggest
+// element of the struct.
+//
+// As we are with `size_t` element, we need to compute everything inside R+
+#define _H5_STRUCT_PADDING(current_size, member_size)                                \
+    (((member_size) >= (current_size))                                               \
+         ? (((member_size) - (current_size)) % (member_size))                        \
+         : ((((member_size) - (((current_size) - (member_size)) % (member_size)))) % \
+            (member_size)))
+
+inline void CompoundType::create(size_t size) {
+    if (size == 0) {
+        size_t current_size = 0, max_atomic_size = 0;
+
+        // Do a first pass to find the total size of the compound datatype
+        for (auto& member: members) {
+            size_t member_size = detail::h5t_get_size(member.base_type.getId());
+
+            if (member_size == 0) {
+                throw DataTypeException("Cannot get size of DataType with hid: " +
+                                        std::to_string(member.base_type.getId()));
+            }
+
+            size_t first_atomic_size = find_first_atomic_member_size(member.base_type.getId());
+
+            // Set the offset of this member within the struct according to the
+            // standard alignment rules. The c++ standard specifies that:
+            // > objects have an alignment requirement of which their size is a multiple
+            member.offset = current_size + _H5_STRUCT_PADDING(current_size, first_atomic_size);
+
+            // Set the current size to the end of the new member
+            current_size = member.offset + member_size;
+
+            // Keep track of the highest atomic member size because it's needed
+            // for the padding of the complete compound type.
+            max_atomic_size = std::max(max_atomic_size, first_atomic_size);
+        }
+
+        size = current_size + _H5_STRUCT_PADDING(current_size, max_atomic_size);
+    }
+
+    // Create the HDF5 type
+    if ((_hid = H5Tcreate(H5T_COMPOUND, size)) < 0) {
+        HDF5ErrMapper::ToException<DataTypeException>("Could not create new compound datatype");
+    }
+
+    // Loop over all the members and insert them into the datatype
+    for (const auto& member: members) {
+        if (H5Tinsert(_hid, member.name.c_str(), member.offset, member.base_type.getId()) < 0) {
+            HDF5ErrMapper::ToException<DataTypeException>("Could not add new member to datatype");
+        }
+    }
+}
+
+#undef _H5_STRUCT_PADDING
+
+inline void CompoundType::commit(const Object& object, const std::string& name) const {
+    H5Tcommit2(object.getId(), name.c_str(), getId(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+}
+
+template <typename T>
+inline void EnumType<T>::create() {
+    // Create the HDF5 type
+    if ((_hid = H5Tenum_create(AtomicType<typename std::underlying_type<T>::type>{}.getId())) < 0) {
+        HDF5ErrMapper::ToException<DataTypeException>("Could not create new enum datatype");
+    }
+
+    // Loop over all the members and insert them into the datatype
+    for (const auto& member: members) {
+        if (H5Tenum_insert(_hid, member.name.c_str(), &(member.value)) < 0) {
+            HDF5ErrMapper::ToException<DataTypeException>(
+                "Could not add new member to this enum datatype");
+        }
+    }
+}
+
+template <typename T>
+inline void EnumType<T>::commit(const Object& object, const std::string& name) const {
+    H5Tcommit2(object.getId(), name.c_str(), getId(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+}
+
+namespace {
+
+inline hid_t create_string(size_t length) {
+    hid_t _hid = detail::h5t_copy(H5T_C_S1);
+    detail::h5t_set_size(_hid, length);
+    detail::h5t_set_cset(_hid, H5T_CSET_UTF8);
+    return _hid;
+}
+
+
+inline DataTypeClass convert_type_class(const H5T_class_t& tclass) {
+    switch (tclass) {
+    case H5T_TIME:
+        return DataTypeClass::Time;
+    case H5T_INTEGER:
+        return DataTypeClass::Integer;
+    case H5T_FLOAT:
+        return DataTypeClass::Float;
+    case H5T_STRING:
+        return DataTypeClass::String;
+    case H5T_BITFIELD:
+        return DataTypeClass::BitField;
+    case H5T_OPAQUE:
+        return DataTypeClass::Opaque;
+    case H5T_COMPOUND:
+        return DataTypeClass::Compound;
+    case H5T_REFERENCE:
+        return DataTypeClass::Reference;
+    case H5T_ENUM:
+        return DataTypeClass::Enum;
+    case H5T_VLEN:
+        return DataTypeClass::VarLen;
+    case H5T_ARRAY:
+        return DataTypeClass::Array;
+    case H5T_NO_CLASS:
+    case H5T_NCLASSES:
+    default:
+        return DataTypeClass::Invalid;
+    }
+}
+
+
+inline std::string type_class_string(DataTypeClass tclass) {
+    switch (tclass) {
+    case DataTypeClass::Time:
+        return "Time";
+    case DataTypeClass::Integer:
+        return "Integer";
+    case DataTypeClass::Float:
+        return "Float";
+    case DataTypeClass::String:
+        return "String";
+    case DataTypeClass::BitField:
+        return "BitField";
+    case DataTypeClass::Opaque:
+        return "Opaque";
+    case DataTypeClass::Compound:
+        return "Compound";
+    case DataTypeClass::Reference:
+        return "Reference";
+    case DataTypeClass::Enum:
+        return "Enum";
+    case DataTypeClass::VarLen:
+        return "Varlen";
+    case DataTypeClass::Array:
+        return "Array";
+    default:
+        return "(Invalid)";
+    }
+}
+
+}  // unnamed namespace
+
+
+/// \brief Create a DataType instance representing type T
+template <typename T>
+inline DataType create_datatype() {
+    return AtomicType<T>();
+}
+
+
+/// \brief Create a DataType instance representing type T and perform a sanity check on its size
+template <typename T>
+inline DataType create_and_check_datatype() {
+    DataType t = create_datatype<T>();
+    if (t.empty()) {
+        throw DataTypeException("Type given to create_and_check_datatype is not valid");
+    }
+
+    // Skip check if the base type is a variable length string
+    if (t.isVariableStr()) {
+        return t;
+    }
+
+    // Check that the size of the template type matches the size that HDF5 is
+    // expecting.
+    if (t.isReference() || t.isFixedLenStr()) {
+        return t;
+    }
+    if (sizeof(T) != t.getSize()) {
+        std::ostringstream ss;
+        ss << "Size of array type " << sizeof(T) << " != that of memory datatype " << t.getSize()
+           << std::endl;
+        throw DataTypeException(ss.str());
+    }
+
+    return t;
+}
+
+}  // namespace HighFive
+HIGHFIVE_REGISTER_TYPE(HighFive::details::Boolean, HighFive::create_enum_boolean)
diff --git a/packages/HighFive/include/highfive/bits/H5Dataspace_misc.hpp b/packages/HighFive/include/highfive/bits/H5Dataspace_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0fdcacefdb7c5bbcdff0aa5dad6f7d16902d3571
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Dataspace_misc.hpp
@@ -0,0 +1,154 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <array>
+#include <initializer_list>
+#include <vector>
+#include <numeric>
+
+#include <H5Spublic.h>
+
+#include "H5Utils.hpp"
+#include "H5Converter_misc.hpp"
+
+namespace HighFive {
+
+inline DataSpace::DataSpace(const std::vector<size_t>& dims)
+    : DataSpace(dims.begin(), dims.end()) {}
+
+template <size_t N>
+inline DataSpace::DataSpace(const std::array<size_t, N>& dims)
+    : DataSpace(dims.begin(), dims.end()) {}
+
+inline DataSpace::DataSpace(const std::initializer_list<size_t>& items)
+    : DataSpace(std::vector<size_t>(items)) {}
+
+template <typename... Args>
+inline DataSpace::DataSpace(size_t dim1, Args... dims)
+    : DataSpace(std::vector<size_t>{dim1, static_cast<size_t>(dims)...}) {}
+
+template <class IT, typename>
+inline DataSpace::DataSpace(const IT begin, const IT end) {
+    std::vector<hsize_t> real_dims(begin, end);
+
+    if ((_hid = H5Screate_simple(int(real_dims.size()), real_dims.data(), NULL)) < 0) {
+        throw DataSpaceException("Impossible to create dataspace");
+    }
+}
+
+inline DataSpace::DataSpace(const std::vector<size_t>& dims, const std::vector<size_t>& maxdims) {
+    if (dims.size() != maxdims.size()) {
+        throw DataSpaceException("dims and maxdims must be the same length.");
+    }
+
+    std::vector<hsize_t> real_dims(dims.begin(), dims.end());
+    std::vector<hsize_t> real_maxdims(maxdims.begin(), maxdims.end());
+
+    // Replace unlimited flag with actual HDF one
+    std::replace(real_maxdims.begin(),
+                 real_maxdims.end(),
+                 static_cast<hsize_t>(DataSpace::UNLIMITED),
+                 H5S_UNLIMITED);
+
+    if ((_hid = H5Screate_simple(int(dims.size()), real_dims.data(), real_maxdims.data())) < 0) {
+        throw DataSpaceException("Impossible to create dataspace");
+    }
+}  // namespace HighFive
+
+inline DataSpace::DataSpace(DataSpace::DataspaceType space_type) {
+    H5S_class_t h5_dataspace_type;
+    switch (space_type) {
+    case DataSpace::dataspace_scalar:
+        h5_dataspace_type = H5S_SCALAR;
+        break;
+    case DataSpace::dataspace_null:
+        h5_dataspace_type = H5S_NULL;
+        break;
+    default:
+        throw DataSpaceException(
+            "Invalid dataspace type: should be "
+            "dataspace_scalar or dataspace_null");
+    }
+
+    if ((_hid = H5Screate(h5_dataspace_type)) < 0) {
+        throw DataSpaceException("Unable to create dataspace");
+    }
+}
+
+inline DataSpace DataSpace::clone() const {
+    DataSpace res;
+    if ((res._hid = H5Scopy(_hid)) < 0) {
+        throw DataSpaceException("Unable to copy dataspace");
+    }
+    return res;
+}
+
+inline size_t DataSpace::getNumberDimensions() const {
+    const int ndim = H5Sget_simple_extent_ndims(_hid);
+    if (ndim < 0) {
+        HDF5ErrMapper::ToException<DataSetException>(
+            "Unable to get dataspace number of dimensions");
+    }
+    return size_t(ndim);
+}
+
+inline std::vector<size_t> DataSpace::getDimensions() const {
+    std::vector<hsize_t> dims(getNumberDimensions());
+    if (!dims.empty()) {
+        if (H5Sget_simple_extent_dims(_hid, dims.data(), NULL) < 0) {
+            HDF5ErrMapper::ToException<DataSetException>("Unable to get dataspace dimensions");
+        }
+    }
+    return details::to_vector_size_t(std::move(dims));
+}
+
+inline size_t DataSpace::getElementCount() const {
+    hssize_t nelements = H5Sget_simple_extent_npoints(_hid);
+    if (nelements < 0) {
+        HDF5ErrMapper::ToException<DataSetException>(
+            "Unable to get number of elements in dataspace");
+    }
+
+    return static_cast<size_t>(nelements);
+}
+
+inline std::vector<size_t> DataSpace::getMaxDimensions() const {
+    std::vector<hsize_t> maxdims(getNumberDimensions());
+    if (H5Sget_simple_extent_dims(_hid, NULL, maxdims.data()) < 0) {
+        HDF5ErrMapper::ToException<DataSetException>("Unable to get dataspace dimensions");
+    }
+
+    std::replace(maxdims.begin(),
+                 maxdims.end(),
+                 H5S_UNLIMITED,
+                 static_cast<hsize_t>(DataSpace::UNLIMITED));
+    return details::to_vector_size_t(maxdims);
+}
+
+template <typename T>
+inline DataSpace DataSpace::From(const T& value) {
+    auto dims = details::inspector<T>::getDimensions(value);
+    return DataSpace(dims);
+}
+
+template <std::size_t N, std::size_t Width>
+inline DataSpace DataSpace::FromCharArrayStrings(const char (&)[N][Width]) {
+    return DataSpace(N);
+}
+
+namespace details {
+
+/// dimension checks @internal
+inline bool checkDimensions(const DataSpace& mem_space, size_t n_dim_requested) {
+    return checkDimensions(mem_space.getDimensions(), n_dim_requested);
+}
+
+}  // namespace details
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Exception_misc.hpp b/packages/HighFive/include/highfive/bits/H5Exception_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f7382f2c2c5fcc01cd6b064ef6991d200a0ed8f9
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Exception_misc.hpp
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <cstdlib>
+#include <sstream>
+
+#include <H5Epublic.h>
+
+namespace HighFive {
+
+struct HDF5ErrMapper {
+    template <typename ExceptionType>
+    static inline herr_t stackWalk(unsigned n, const H5E_error2_t* err_desc, void* client_data) {
+        auto** e_iter = static_cast<ExceptionType**>(client_data);
+        (void) n;
+
+        const char* major_err = H5Eget_major(err_desc->maj_num);
+        const char* minor_err = H5Eget_minor(err_desc->min_num);
+
+        std::ostringstream oss;
+        oss << '(' << major_err << ") " << minor_err;
+
+        H5free_memory((void*) major_err);
+        H5free_memory((void*) minor_err);
+
+        auto* e = new ExceptionType(oss.str());
+        e->_err_major = err_desc->maj_num;
+        e->_err_minor = err_desc->min_num;
+        (*e_iter)->_next.reset(e);
+        *e_iter = e;
+        return 0;
+    }
+
+    template <typename ExceptionType>
+    [[noreturn]] static inline void ToException(const std::string& prefix_msg) {
+        hid_t err_stack = H5Eget_current_stack();
+        if (err_stack >= 0) {
+            ExceptionType e("");
+            ExceptionType* e_iter = &e;
+
+            H5Ewalk2(err_stack, H5E_WALK_UPWARD, &HDF5ErrMapper::stackWalk<ExceptionType>, &e_iter);
+            H5Eclear2(err_stack);
+
+            const char* next_err_msg = (e.nextException() != NULL) ? (e.nextException()->what())
+                                                                   : ("");
+
+            e.setErrorMsg(prefix_msg + " " + next_err_msg);
+            throw e;
+        }
+        // throw generic error, unrecognized error
+        throw ExceptionType(prefix_msg + ": Unknown HDF5 error");
+    }
+};
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5FileDriver_misc.hpp b/packages/HighFive/include/highfive/bits/H5FileDriver_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a6331bd5a9fc86a1ff182db46206fa4805b40a7d
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5FileDriver_misc.hpp
@@ -0,0 +1,20 @@
+/*
+ *  Copyright (c), 2017-2018, Adrien Devresse <adrien.devresse@epfl.ch>
+ *                            Juan Hernando <juan.hernando@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+namespace HighFive {
+
+#ifdef H5_HAVE_PARALLEL
+inline MPIOFileDriver::MPIOFileDriver(MPI_Comm comm, MPI_Info info) {
+    add(MPIOFileAccess(comm, info));
+}
+#endif
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5File_misc.hpp b/packages/HighFive/include/highfive/bits/H5File_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b90792a71218f9890bfcc51dc2cd78810f4c2968
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5File_misc.hpp
@@ -0,0 +1,148 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <string>
+
+#include <H5Fpublic.h>
+
+#include "../H5Utility.hpp"
+#include "H5Utils.hpp"
+
+namespace HighFive {
+
+namespace {  // unnamed
+
+// libhdf5 uses a preprocessor trick on their oflags
+// we can not declare them constant without a mapper
+inline unsigned convert_open_flag(unsigned openFlags) {
+    unsigned res_open = 0;
+    if (openFlags & File::ReadOnly)
+        res_open |= H5F_ACC_RDONLY;
+    if (openFlags & File::ReadWrite)
+        res_open |= H5F_ACC_RDWR;
+    if (openFlags & File::Create)
+        res_open |= H5F_ACC_CREAT;
+    if (openFlags & File::Truncate)
+        res_open |= H5F_ACC_TRUNC;
+    if (openFlags & File::Excl)
+        res_open |= H5F_ACC_EXCL;
+    return res_open;
+}
+}  // namespace
+
+inline File::File(const std::string& filename,
+                  unsigned openFlags,
+                  const FileAccessProps& fileAccessProps)
+    : File(filename, openFlags, FileCreateProps::Default(), fileAccessProps) {}
+
+
+inline File::File(const std::string& filename,
+                  unsigned openFlags,
+                  const FileCreateProps& fileCreateProps,
+                  const FileAccessProps& fileAccessProps) {
+    openFlags = convert_open_flag(openFlags);
+
+    unsigned createMode = openFlags & (H5F_ACC_TRUNC | H5F_ACC_EXCL);
+    unsigned openMode = openFlags & (H5F_ACC_RDWR | H5F_ACC_RDONLY);
+    bool mustCreate = createMode > 0;
+    bool openOrCreate = (openFlags & H5F_ACC_CREAT) > 0;
+
+    // open is default. It's skipped only if flags require creation
+    // If open fails it will try create() if H5F_ACC_CREAT is set
+    if (!mustCreate) {
+        // Silence open errors if create is allowed
+        std::unique_ptr<SilenceHDF5> silencer;
+        if (openOrCreate)
+            silencer.reset(new SilenceHDF5());
+
+        _hid = H5Fopen(filename.c_str(), openMode, fileAccessProps.getId());
+
+        if (isValid())
+            return;  // Done
+
+        if (openOrCreate) {
+            // Will attempt to create ensuring wont clobber any file
+            createMode = H5F_ACC_EXCL;
+        } else {
+            HDF5ErrMapper::ToException<FileException>(
+                std::string("Unable to open file " + filename));
+        }
+    }
+
+    auto fcpl = fileCreateProps.getId();
+    auto fapl = fileAccessProps.getId();
+    if ((_hid = H5Fcreate(filename.c_str(), createMode, fcpl, fapl)) < 0) {
+        HDF5ErrMapper::ToException<FileException>(std::string("Unable to create file " + filename));
+    }
+}
+
+inline const std::string& File::getName() const noexcept {
+    if (_filename.empty()) {
+        _filename = details::get_name(
+            [this](char* buffer, size_t length) { return H5Fget_name(getId(), buffer, length); });
+    }
+    return _filename;
+}
+
+inline hsize_t File::getMetadataBlockSize() const {
+    auto fapl = getAccessPropertyList();
+    return MetadataBlockSize(fapl).getSize();
+}
+
+inline std::pair<H5F_libver_t, H5F_libver_t> File::getVersionBounds() const {
+    auto fapl = getAccessPropertyList();
+    auto fileVer = FileVersionBounds(fapl);
+    return fileVer.getVersion();
+}
+
+#if H5_VERSION_GE(1, 10, 1)
+inline H5F_fspace_strategy_t File::getFileSpaceStrategy() const {
+    auto fcpl = getCreatePropertyList();
+    FileSpaceStrategy spaceStrategy(fcpl);
+    return spaceStrategy.getStrategy();
+}
+
+inline hsize_t File::getFileSpacePageSize() const {
+    auto fcpl = getCreatePropertyList();
+
+    if (getFileSpaceStrategy() != H5F_FSPACE_STRATEGY_PAGE) {
+        HDF5ErrMapper::ToException<FileException>(
+            std::string("Cannot obtain page size as paged allocation is not used."));
+    }
+
+    return FileSpacePageSize(fcpl).getPageSize();
+}
+#endif
+
+inline void File::flush() {
+    if (H5Fflush(_hid, H5F_SCOPE_GLOBAL) < 0) {
+        HDF5ErrMapper::ToException<FileException>(std::string("Unable to flush file " + getName()));
+    }
+}
+
+inline size_t File::getFileSize() const {
+    hsize_t sizeValue = 0;
+    if (H5Fget_filesize(_hid, &sizeValue) < 0) {
+        HDF5ErrMapper::ToException<FileException>(
+            std::string("Unable to retrieve size of file " + getName()));
+    }
+    return static_cast<size_t>(sizeValue);
+}
+
+inline size_t File::getFreeSpace() const {
+    hssize_t unusedSize = H5Fget_freespace(_hid);
+    if (unusedSize < 0) {
+        HDF5ErrMapper::ToException<FileException>(
+            std::string("Unable to retrieve unused space of file " + getName()));
+    }
+    return static_cast<size_t>(unusedSize);
+}
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Friends.hpp b/packages/HighFive/include/highfive/bits/H5Friends.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d19125c7f5fa4f59c935bc24441a6eaabb53513a
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Friends.hpp
@@ -0,0 +1,10 @@
+#pragma once
+
+#ifndef HIGHFIVE_HAS_FRIEND_DECLARATIONS
+#ifdef _MSC_VER
+// This prevents a compiler bug on certain versions of MSVC.
+// Known to fail: Toolset 141.
+// See `CMakeLists.txt` for more information.
+#define HIGHFIVE_HAS_FRIEND_DECLARATIONS 1
+#endif
+#endif
\ No newline at end of file
diff --git a/packages/HighFive/include/highfive/bits/H5Inspector_misc.hpp b/packages/HighFive/include/highfive/bits/H5Inspector_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..05ed6bc3ec0250b18e0ce4311ff710e703a6fca9
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Inspector_misc.hpp
@@ -0,0 +1,858 @@
+/*
+ *  Copyright (c) 2022 Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#pragma once
+
+#include <type_traits>
+#include <cstring>
+#include <cassert>
+#include <vector>
+#include <array>
+#include <string>
+#include <numeric>
+
+#include "../H5Reference.hpp"
+
+#include "string_padding.hpp"
+
+#ifdef H5_USE_BOOST
+#include <boost/multi_array.hpp>
+// starting Boost 1.64, serialization header must come before ublas
+#include <boost/serialization/vector.hpp>
+#include <boost/numeric/ublas/matrix.hpp>
+#endif
+#ifdef H5_USE_EIGEN
+#include <Eigen/Eigen>
+#endif
+
+
+namespace HighFive {
+
+namespace details {
+
+inline bool checkDimensions(const std::vector<size_t>& dims, size_t n_dim_requested) {
+    size_t n_dim_actual = dims.size();
+
+    // We should allow reading scalar from shapes like `(1, 1, 1)`.
+    if (n_dim_requested == 0) {
+        if (n_dim_actual == 0ul) {
+            return true;
+        }
+
+        return size_t(std::count(dims.begin(), dims.end(), 1ul)) == n_dim_actual;
+    }
+
+    // For non-scalar datasets, we can squeeze away singleton dimension, but
+    // we never add any.
+    if (n_dim_actual < n_dim_requested) {
+        return false;
+    }
+
+    // Special case for 1-dimensional arrays, which can squeeze `1`s from either
+    // side simultaneously if needed.
+    if (n_dim_requested == 1ul) {
+        return n_dim_actual >= 1ul &&
+               size_t(std::count(dims.begin(), dims.end(), 1ul)) >= n_dim_actual - 1ul;
+    }
+
+    // All other cases strip front only. This avoid unstable behaviour when
+    // squeezing singleton dimensions.
+    size_t n_dim_excess = n_dim_actual - n_dim_requested;
+
+    bool squeeze_back = true;
+    for (size_t i = 1; i <= n_dim_excess; ++i) {
+        if (dims[n_dim_actual - i] != 1) {
+            squeeze_back = false;
+            break;
+        }
+    }
+
+    return squeeze_back;
+}
+
+
+inline std::vector<size_t> squeezeDimensions(const std::vector<size_t>& dims,
+                                             size_t n_dim_requested) {
+    auto format_error_message = [&]() -> std::string {
+        return "Can't interpret dims = " + format_vector(dims) + " as " +
+               std::to_string(n_dim_requested) + "-dimensional.";
+    };
+
+    if (n_dim_requested == 0) {
+        if (!checkDimensions(dims, n_dim_requested)) {
+            throw std::invalid_argument(format_error_message());
+        }
+
+        return {1ul};
+    }
+
+    auto n_dim = dims.size();
+    if (n_dim < n_dim_requested) {
+        throw std::invalid_argument(format_error_message());
+    }
+
+    if (n_dim_requested == 1ul) {
+        size_t non_singleton_dim = size_t(-1);
+        for (size_t i = 0; i < n_dim; ++i) {
+            if (dims[i] != 1ul) {
+                if (non_singleton_dim == size_t(-1)) {
+                    non_singleton_dim = i;
+                } else {
+                    throw std::invalid_argument(format_error_message());
+                }
+            }
+        }
+
+        return {dims[std::min(non_singleton_dim, n_dim - 1)]};
+    }
+
+    size_t n_dim_excess = dims.size() - n_dim_requested;
+    for (size_t i = 1; i <= n_dim_excess; ++i) {
+        if (dims[n_dim - i] != 1) {
+            throw std::invalid_argument(format_error_message());
+        }
+    }
+
+    return std::vector<size_t>(dims.begin(),
+                               dims.end() - static_cast<std::ptrdiff_t>(n_dim_excess));
+}
+}  // namespace details
+
+
+inline size_t compute_total_size(const std::vector<size_t>& dims) {
+    return std::accumulate(dims.begin(), dims.end(), size_t{1u}, std::multiplies<size_t>());
+}
+
+template <typename T>
+using unqualified_t = typename std::remove_const<typename std::remove_reference<T>::type>::type;
+
+/*****
+inspector<T> {
+    using type = T
+    // base_type is the base type inside c++ (e.g. std::vector<int> => int)
+    using base_type
+    // hdf5_type is the base read by hdf5 (c-type) (e.g. std::vector<std::string> => const char*)
+    using hdf5_type
+
+    // Number of dimensions starting from here
+    static constexpr size_t recursive_ndim
+    // Is the inner type trivially copyable for optimisation
+    // If this value is true: data() is mandatory
+    // If this value is false: getSizeVal, getSize, serialize, unserialize are mandatory
+    static constexpr bool is_trivially_copyable
+
+    // Reading:
+    // Allocate the value following dims (should be recursive)
+    static void prepare(type& val, const std::vector<std::size_t> dims)
+    // Return the size of the vector pass to/from hdf5 from a vector of dims
+    static size_t getSize(const std::vector<size_t>& dims)
+    // Return a pointer of the first value of val (for reading)
+    static hdf5_type* data(type& val)
+    // Take a serialized vector 'in', some dims and copy value to val (for reading)
+    static void unserialize(const hdf5_type* in, const std::vector<size_t>&i, type& val)
+
+
+    // Writing:
+    // Return the size of the vector pass to/from hdf5 from a value
+    static size_t getSizeVal(const type& val)
+    // Return a point of the first value of val
+    static const hdf5_type* data(const type& val)
+    // Take a val and serialize it inside 'out'
+    static void serialize(const type& val, hdf5_type* out)
+    // Return an array of dimensions of the space needed for writing val
+    static std::vector<size_t> getDimensions(const type& val)
+}
+*****/
+
+
+namespace details {
+template <typename T>
+struct type_helper {
+    using type = unqualified_t<T>;
+    using base_type = unqualified_t<T>;
+    using hdf5_type = base_type;
+
+    static constexpr size_t ndim = 0;
+    static constexpr size_t recursive_ndim = ndim;
+    static constexpr bool is_trivially_copyable = std::is_trivially_copyable<type>::value;
+
+    static std::vector<size_t> getDimensions(const type& /* val */) {
+        return {};
+    }
+
+    static size_t getSizeVal(const type& val) {
+        return compute_total_size(getDimensions(val));
+    }
+
+    static size_t getSize(const std::vector<size_t>& dims) {
+        return compute_total_size(dims);
+    }
+
+    static void prepare(type& /* val */, const std::vector<size_t>& /* dims */) {}
+
+    static hdf5_type* data(type& val) {
+        static_assert(is_trivially_copyable, "The type is not trivially copyable");
+        return &val;
+    }
+
+    static const hdf5_type* data(const type& val) {
+        static_assert(is_trivially_copyable, "The type is not trivially copyable");
+        return &val;
+    }
+
+    static void serialize(const type& val, hdf5_type* m) {
+        static_assert(is_trivially_copyable, "The type is not trivially copyable");
+        *m = val;
+    }
+
+    static void unserialize(const hdf5_type* vec,
+                            const std::vector<size_t>& /* dims */,
+                            type& val) {
+        static_assert(is_trivially_copyable, "The type is not trivially copyable");
+        val = vec[0];
+    }
+};
+
+template <typename T>
+struct inspector: type_helper<T> {};
+
+enum class Boolean : int8_t {
+    HighFiveFalse = 0,
+    HighFiveTrue = 1,
+};
+
+template <>
+struct inspector<bool>: type_helper<bool> {
+    using base_type = Boolean;
+    using hdf5_type = int8_t;
+
+    static constexpr bool is_trivially_copyable = false;
+
+    static hdf5_type* data(type& /* val */) {
+        throw DataSpaceException("A boolean cannot be read directly.");
+    }
+
+    static const hdf5_type* data(const type& /* val */) {
+        throw DataSpaceException("A boolean cannot be written directly.");
+    }
+
+    static void unserialize(const hdf5_type* vec,
+                            const std::vector<size_t>& /* dims */,
+                            type& val) {
+        val = vec[0] != 0 ? true : false;
+    }
+
+    static void serialize(const type& val, hdf5_type* m) {
+        *m = val ? 1 : 0;
+    }
+};
+
+template <>
+struct inspector<std::string>: type_helper<std::string> {
+    using hdf5_type = const char*;
+
+    static hdf5_type* data(type& /* val */) {
+        throw DataSpaceException("A std::string cannot be read directly.");
+    }
+
+    static const hdf5_type* data(const type& /* val */) {
+        throw DataSpaceException("A std::string cannot be written directly.");
+    }
+
+    template <class It>
+    static void serialize(const type& val, It m) {
+        (*m).assign(val.data(), val.size(), StringPadding::NullTerminated);
+    }
+
+    template <class It>
+    static void unserialize(const It& vec, const std::vector<size_t>& /* dims */, type& val) {
+        const auto& view = *vec;
+        val.assign(view.data(), view.length());
+    }
+};
+
+template <>
+struct inspector<Reference>: type_helper<Reference> {
+    using hdf5_type = hobj_ref_t;
+
+    static constexpr bool is_trivially_copyable = false;
+
+    static hdf5_type* data(type& /* val */) {
+        throw DataSpaceException("A Reference cannot be read directly.");
+    }
+
+    static const hdf5_type* data(const type& /* val */) {
+        throw DataSpaceException("A Reference cannot be written directly.");
+    }
+
+    static void serialize(const type& val, hdf5_type* m) {
+        hobj_ref_t ref;
+        val.create_ref(&ref);
+        *m = ref;
+    }
+
+    static void unserialize(const hdf5_type* vec,
+                            const std::vector<size_t>& /* dims */,
+                            type& val) {
+        val = type{vec[0]};
+    }
+};
+
+template <size_t N>
+struct inspector<FixedLenStringArray<N>> {
+    using type = FixedLenStringArray<N>;
+    using value_type = char*;
+    using base_type = FixedLenStringArray<N>;
+    using hdf5_type = char;
+
+    static constexpr size_t ndim = 1;
+    static constexpr size_t recursive_ndim = ndim;
+    static constexpr bool is_trivially_copyable = false;
+
+    static std::vector<size_t> getDimensions(const type& val) {
+        return std::vector<size_t>{val.size()};
+    }
+
+    static size_t getSizeVal(const type& val) {
+        return N * compute_total_size(getDimensions(val));
+    }
+
+    static size_t getSize(const std::vector<size_t>& dims) {
+        return N * compute_total_size(dims);
+    }
+
+    static void prepare(type& /* val */, const std::vector<size_t>& dims) {
+        if (dims[0] > N) {
+            std::ostringstream os;
+            os << "Size of FixedlenStringArray (" << N << ") is too small for dims (" << dims[0]
+               << ").";
+            throw DataSpaceException(os.str());
+        }
+    }
+
+    static hdf5_type* data(type& val) {
+        return val.data();
+    }
+
+    static const hdf5_type* data(const type& val) {
+        return val.data();
+    }
+
+    static void serialize(const type& val, hdf5_type* m) {
+        for (size_t i = 0; i < val.size(); ++i) {
+            std::memcpy(m + i * N, val[i], N);
+        }
+    }
+
+    static void unserialize(const hdf5_type* vec, const std::vector<size_t>& dims, type& val) {
+        for (size_t i = 0; i < dims[0]; ++i) {
+            std::array<char, N> s;
+            std::memcpy(s.data(), vec + (i * N), N);
+            val.push_back(s);
+        }
+    }
+};
+
+template <typename T>
+struct inspector<std::vector<T>> {
+    using type = std::vector<T>;
+    using value_type = unqualified_t<T>;
+    using base_type = typename inspector<value_type>::base_type;
+    using hdf5_type = typename inspector<value_type>::hdf5_type;
+
+    static constexpr size_t ndim = 1;
+    static constexpr size_t recursive_ndim = ndim + inspector<value_type>::recursive_ndim;
+    static constexpr bool is_trivially_copyable = std::is_trivially_copyable<value_type>::value &&
+                                                  inspector<value_type>::is_trivially_copyable;
+
+    static std::vector<size_t> getDimensions(const type& val) {
+        std::vector<size_t> sizes(recursive_ndim, 1ul);
+        sizes[0] = val.size();
+        if (!val.empty()) {
+            auto s = inspector<value_type>::getDimensions(val[0]);
+            assert(s.size() + ndim == sizes.size());
+            for (size_t i = 0; i < s.size(); ++i) {
+                sizes[i + ndim] = s[i];
+            }
+        }
+        return sizes;
+    }
+
+    static size_t getSizeVal(const type& val) {
+        return compute_total_size(getDimensions(val));
+    }
+
+    static size_t getSize(const std::vector<size_t>& dims) {
+        return compute_total_size(dims);
+    }
+
+    static void prepare(type& val, const std::vector<size_t>& dims) {
+        val.resize(dims[0]);
+        std::vector<size_t> next_dims(dims.begin() + 1, dims.end());
+        for (auto&& e: val) {
+            inspector<value_type>::prepare(e, next_dims);
+        }
+    }
+
+    static hdf5_type* data(type& val) {
+        return inspector<value_type>::data(val[0]);
+    }
+
+    static const hdf5_type* data(const type& val) {
+        return inspector<value_type>::data(val[0]);
+    }
+
+    template <class It>
+    static void serialize(const type& val, It m) {
+        size_t subsize = inspector<value_type>::getSizeVal(val[0]);
+        for (auto&& e: val) {
+            inspector<value_type>::serialize(e, m);
+            m += subsize;
+        }
+    }
+
+    template <class It>
+    static void unserialize(const It& vec_align, const std::vector<size_t>& dims, type& val) {
+        std::vector<size_t> next_dims(dims.begin() + 1, dims.end());
+        size_t next_size = compute_total_size(next_dims);
+        for (size_t i = 0; i < dims[0]; ++i) {
+            inspector<value_type>::unserialize(vec_align + i * next_size, next_dims, val[i]);
+        }
+    }
+};
+
+template <>
+struct inspector<std::vector<bool>> {
+    using type = std::vector<bool>;
+    using value_type = bool;
+    using base_type = Boolean;
+    using hdf5_type = uint8_t;
+
+    static constexpr size_t ndim = 1;
+    static constexpr size_t recursive_ndim = ndim;
+    static constexpr bool is_trivially_copyable = false;
+
+    static std::vector<size_t> getDimensions(const type& val) {
+        std::vector<size_t> sizes{val.size()};
+        return sizes;
+    }
+
+    static size_t getSizeVal(const type& val) {
+        return val.size();
+    }
+
+    static size_t getSize(const std::vector<size_t>& dims) {
+        if (dims.size() > 1) {
+            throw DataSpaceException("std::vector<bool> is only 1 dimension.");
+        }
+        return dims[0];
+    }
+
+    static void prepare(type& val, const std::vector<size_t>& dims) {
+        if (dims.size() > 1) {
+            throw DataSpaceException("std::vector<bool> is only 1 dimension.");
+        }
+        val.resize(dims[0]);
+    }
+
+    static hdf5_type* data(type& /* val */) {
+        throw DataSpaceException("A std::vector<bool> cannot be read directly.");
+    }
+
+    static const hdf5_type* data(const type& /* val */) {
+        throw DataSpaceException("A std::vector<bool> cannot be written directly.");
+    }
+
+    static void serialize(const type& val, hdf5_type* m) {
+        for (size_t i = 0; i < val.size(); ++i) {
+            m[i] = val[i] ? 1 : 0;
+        }
+    }
+
+    static void unserialize(const hdf5_type* vec_align,
+                            const std::vector<size_t>& dims,
+                            type& val) {
+        for (size_t i = 0; i < dims[0]; ++i) {
+            val[i] = vec_align[i] != 0 ? true : false;
+        }
+    }
+};
+
+template <typename T, size_t N>
+struct inspector<std::array<T, N>> {
+    using type = std::array<T, N>;
+    using value_type = unqualified_t<T>;
+    using base_type = typename inspector<value_type>::base_type;
+    using hdf5_type = typename inspector<value_type>::hdf5_type;
+
+    static constexpr size_t ndim = 1;
+    static constexpr size_t recursive_ndim = ndim + inspector<value_type>::recursive_ndim;
+    static constexpr bool is_trivially_copyable = std::is_trivially_copyable<value_type>::value &&
+                                                  sizeof(type) == N * sizeof(T) &&
+                                                  inspector<value_type>::is_trivially_copyable;
+
+    static std::vector<size_t> getDimensions(const type& val) {
+        std::vector<size_t> sizes{N};
+        if (!val.empty()) {
+            auto s = inspector<value_type>::getDimensions(val[0]);
+            sizes.insert(sizes.end(), s.begin(), s.end());
+        }
+        return sizes;
+    }
+
+    static size_t getSizeVal(const type& val) {
+        return compute_total_size(getDimensions(val));
+    }
+
+    static size_t getSize(const std::vector<size_t>& dims) {
+        return compute_total_size(dims);
+    }
+
+    static void prepare(type& /* val */, const std::vector<size_t>& dims) {
+        if (dims[0] > N) {
+            std::ostringstream os;
+            os << "Size of std::array (" << N << ") is too small for dims (" << dims[0] << ").";
+            throw DataSpaceException(os.str());
+        }
+    }
+
+    static hdf5_type* data(type& val) {
+        return inspector<value_type>::data(val[0]);
+    }
+
+    static const hdf5_type* data(const type& val) {
+        return inspector<value_type>::data(val[0]);
+    }
+
+    template <class It>
+    static void serialize(const type& val, It m) {
+        size_t subsize = inspector<value_type>::getSizeVal(val[0]);
+        for (auto& e: val) {
+            inspector<value_type>::serialize(e, m);
+            m += subsize;
+        }
+    }
+
+    template <class It>
+    static void unserialize(const It& vec_align, const std::vector<size_t>& dims, type& val) {
+        if (dims[0] != N) {
+            std::ostringstream os;
+            os << "Impossible to pair DataSet with " << dims[0] << " elements into an array with "
+               << N << " elements.";
+            throw DataSpaceException(os.str());
+        }
+        std::vector<size_t> next_dims(dims.begin() + 1, dims.end());
+        size_t next_size = compute_total_size(next_dims);
+        for (size_t i = 0; i < dims[0]; ++i) {
+            inspector<value_type>::unserialize(vec_align + i * next_size, next_dims, val[i]);
+        }
+    }
+};
+
+// Cannot be use for reading
+template <typename T>
+struct inspector<T*> {
+    using type = T*;
+    using value_type = unqualified_t<T>;
+    using base_type = typename inspector<value_type>::base_type;
+    using hdf5_type = typename inspector<value_type>::hdf5_type;
+
+    static constexpr size_t ndim = 1;
+    static constexpr size_t recursive_ndim = ndim + inspector<value_type>::recursive_ndim;
+    static constexpr bool is_trivially_copyable = std::is_trivially_copyable<value_type>::value &&
+                                                  inspector<value_type>::is_trivially_copyable;
+
+    static size_t getSizeVal(const type& /* val */) {
+        throw DataSpaceException("Not possible to have size of a T*");
+    }
+
+    static std::vector<size_t> getDimensions(const type& /* val */) {
+        throw DataSpaceException("Not possible to have size of a T*");
+    }
+
+    static const hdf5_type* data(const type& val) {
+        return reinterpret_cast<const hdf5_type*>(val);
+    }
+
+    /* it works because there is only T[][][] currently
+       we will fix it one day */
+    static void serialize(const type& /* val */, hdf5_type* /* m */) {
+        throw DataSpaceException("Not possible to serialize a T*");
+    }
+};
+
+// Cannot be use for reading
+template <typename T, size_t N>
+struct inspector<T[N]> {
+    using type = T[N];
+    using value_type = unqualified_t<T>;
+    using base_type = typename inspector<value_type>::base_type;
+    using hdf5_type = typename inspector<value_type>::hdf5_type;
+
+    static constexpr size_t ndim = 1;
+    static constexpr size_t recursive_ndim = ndim + inspector<value_type>::recursive_ndim;
+    static constexpr bool is_trivially_copyable = std::is_trivially_copyable<value_type>::value &&
+                                                  inspector<value_type>::is_trivially_copyable;
+
+    static size_t getSizeVal(const type& val) {
+        return compute_total_size(getDimensions(val));
+    }
+
+    static std::vector<size_t> getDimensions(const type& val) {
+        std::vector<size_t> sizes{N};
+        if (N > 0) {
+            auto s = inspector<value_type>::getDimensions(val[0]);
+            sizes.insert(sizes.end(), s.begin(), s.end());
+        }
+        return sizes;
+    }
+
+    static const hdf5_type* data(const type& val) {
+        return inspector<value_type>::data(val[0]);
+    }
+
+    /* it works because there is only T[][][] currently
+       we will fix it one day */
+    static void serialize(const type& val, hdf5_type* m) {
+        size_t subsize = inspector<value_type>::getSizeVal(val[0]);
+        for (size_t i = 0; i < N; ++i) {
+            inspector<value_type>::serialize(val[i], m + i * subsize);
+        }
+    }
+};
+
+#ifdef H5_USE_EIGEN
+template <typename T, int M, int N>
+struct inspector<Eigen::Matrix<T, M, N>> {
+    using type = Eigen::Matrix<T, M, N>;
+    using value_type = T;
+    using base_type = typename inspector<value_type>::base_type;
+    using hdf5_type = base_type;
+
+    static constexpr size_t ndim = 2;
+    static constexpr size_t recursive_ndim = ndim + inspector<value_type>::recursive_ndim;
+    static constexpr bool is_trivially_copyable = std::is_trivially_copyable<value_type>::value &&
+                                                  inspector<value_type>::is_trivially_copyable;
+
+
+    static void assert_not_buggy(Eigen::Index nrows, Eigen::Index ncols) {
+        if (nrows > 1 && ncols > 1) {
+            throw std::runtime_error(
+                "HighFive has been broken for Eigen::Matrix. Please check "
+                "https://github.com/BlueBrain/HighFive/issues/532.");
+        }
+    }
+
+    static std::vector<size_t> getDimensions(const type& val) {
+        assert_not_buggy(val.rows(), val.cols());
+
+        std::vector<size_t> sizes{static_cast<size_t>(val.rows()), static_cast<size_t>(val.cols())};
+        auto s = inspector<value_type>::getDimensions(val.data()[0]);
+        sizes.insert(sizes.end(), s.begin(), s.end());
+        return sizes;
+    }
+
+    static size_t getSizeVal(const type& val) {
+        return compute_total_size(getDimensions(val));
+    }
+
+    static size_t getSize(const std::vector<size_t>& dims) {
+        return compute_total_size(dims);
+    }
+
+    static void prepare(type& val, const std::vector<size_t>& dims) {
+        if (dims[0] != static_cast<size_t>(val.rows()) ||
+            dims[1] != static_cast<size_t>(val.cols())) {
+            val.resize(static_cast<typename type::Index>(dims[0]),
+                       static_cast<typename type::Index>(dims[1]));
+        }
+
+        assert_not_buggy(val.rows(), val.cols());
+    }
+
+    static hdf5_type* data(type& val) {
+        assert_not_buggy(val.rows(), val.cols());
+        return inspector<value_type>::data(*val.data());
+    }
+
+    static const hdf5_type* data(const type& val) {
+        assert_not_buggy(val.rows(), val.cols());
+        return inspector<value_type>::data(*val.data());
+    }
+
+    static void serialize(const type& val, hdf5_type* m) {
+        assert_not_buggy(val.rows(), val.cols());
+        std::memcpy(m, val.data(), static_cast<size_t>(val.size()) * sizeof(hdf5_type));
+    }
+
+    static void unserialize(const hdf5_type* vec_align,
+                            const std::vector<size_t>& dims,
+                            type& val) {
+        assert_not_buggy(val.rows(), val.cols());
+        if (dims.size() < 2) {
+            std::ostringstream os;
+            os << "Impossible to pair DataSet with " << dims.size()
+               << " dimensions into an eigen-matrix.";
+            throw DataSpaceException(os.str());
+        }
+        std::memcpy(val.data(), vec_align, compute_total_size(dims) * sizeof(hdf5_type));
+    }
+};
+#endif
+
+#ifdef H5_USE_BOOST
+template <typename T, size_t Dims>
+struct inspector<boost::multi_array<T, Dims>> {
+    using type = boost::multi_array<T, Dims>;
+    using value_type = T;
+    using base_type = typename inspector<value_type>::base_type;
+    using hdf5_type = typename inspector<value_type>::hdf5_type;
+
+    static constexpr size_t ndim = Dims;
+    static constexpr size_t recursive_ndim = ndim + inspector<value_type>::recursive_ndim;
+    static constexpr bool is_trivially_copyable = std::is_trivially_copyable<value_type>::value &&
+                                                  inspector<value_type>::is_trivially_copyable;
+
+    static std::vector<size_t> getDimensions(const type& val) {
+        std::vector<size_t> sizes;
+        for (size_t i = 0; i < ndim; ++i) {
+            sizes.push_back(val.shape()[i]);
+        }
+        auto s = inspector<value_type>::getDimensions(val.data()[0]);
+        sizes.insert(sizes.end(), s.begin(), s.end());
+        return sizes;
+    }
+
+    static size_t getSizeVal(const type& val) {
+        return compute_total_size(getDimensions(val));
+    }
+
+    static size_t getSize(const std::vector<size_t>& dims) {
+        return compute_total_size(dims);
+    }
+
+    static void prepare(type& val, const std::vector<size_t>& dims) {
+        if (dims.size() < ndim) {
+            std::ostringstream os;
+            os << "Only '" << dims.size() << "' given but boost::multi_array is of size '" << ndim
+               << "'.";
+            throw DataSpaceException(os.str());
+        }
+        boost::array<typename type::index, Dims> ext;
+        std::copy(dims.begin(), dims.begin() + ndim, ext.begin());
+        val.resize(ext);
+        std::vector<size_t> next_dims(dims.begin() + Dims, dims.end());
+        std::size_t size = std::accumulate(dims.begin(),
+                                           dims.begin() + Dims,
+                                           std::size_t{1},
+                                           std::multiplies<size_t>());
+        for (size_t i = 0; i < size; ++i) {
+            inspector<value_type>::prepare(*(val.origin() + i), next_dims);
+        }
+    }
+
+    static hdf5_type* data(type& val) {
+        return inspector<value_type>::data(*val.data());
+    }
+
+    static const hdf5_type* data(const type& val) {
+        return inspector<value_type>::data(*val.data());
+    }
+
+    template <class It>
+    static void serialize(const type& val, It m) {
+        size_t size = val.num_elements();
+        size_t subsize = inspector<value_type>::getSizeVal(*val.origin());
+        for (size_t i = 0; i < size; ++i) {
+            inspector<value_type>::serialize(*(val.origin() + i), m + i * subsize);
+        }
+    }
+
+    template <class It>
+    static void unserialize(It vec_align, const std::vector<size_t>& dims, type& val) {
+        std::vector<size_t> next_dims(dims.begin() + ndim, dims.end());
+        size_t subsize = compute_total_size(next_dims);
+        for (size_t i = 0; i < val.num_elements(); ++i) {
+            inspector<value_type>::unserialize(vec_align + i * subsize,
+                                               next_dims,
+                                               *(val.origin() + i));
+        }
+    }
+};
+
+template <typename T>
+struct inspector<boost::numeric::ublas::matrix<T>> {
+    using type = boost::numeric::ublas::matrix<T>;
+    using value_type = unqualified_t<T>;
+    using base_type = typename inspector<value_type>::base_type;
+    using hdf5_type = typename inspector<value_type>::hdf5_type;
+
+    static constexpr size_t ndim = 2;
+    static constexpr size_t recursive_ndim = ndim + inspector<value_type>::recursive_ndim;
+    static constexpr bool is_trivially_copyable = std::is_trivially_copyable<value_type>::value &&
+                                                  inspector<value_type>::is_trivially_copyable;
+
+    static std::vector<size_t> getDimensions(const type& val) {
+        std::vector<size_t> sizes{val.size1(), val.size2()};
+        auto s = inspector<value_type>::getDimensions(val(0, 0));
+        sizes.insert(sizes.end(), s.begin(), s.end());
+        return sizes;
+    }
+
+    static size_t getSizeVal(const type& val) {
+        return compute_total_size(getDimensions(val));
+    }
+
+    static size_t getSize(const std::vector<size_t>& dims) {
+        return compute_total_size(dims);
+    }
+
+    static void prepare(type& val, const std::vector<size_t>& dims) {
+        if (dims.size() < ndim) {
+            std::ostringstream os;
+            os << "Impossible to pair DataSet with " << dims.size() << " dimensions into a " << ndim
+               << " boost::numeric::ublas::matrix";
+            throw DataSpaceException(os.str());
+        }
+        val.resize(dims[0], dims[1], false);
+    }
+
+    static hdf5_type* data(type& val) {
+        return inspector<value_type>::data(val(0, 0));
+    }
+
+    static const hdf5_type* data(const type& val) {
+        return inspector<value_type>::data(val(0, 0));
+    }
+
+    static void serialize(const type& val, hdf5_type* m) {
+        size_t size = val.size1() * val.size2();
+        size_t subsize = inspector<value_type>::getSizeVal(val(0, 0));
+        for (size_t i = 0; i < size; ++i) {
+            inspector<value_type>::serialize(*(&val(0, 0) + i), m + i * subsize);
+        }
+    }
+
+    static void unserialize(const hdf5_type* vec_align,
+                            const std::vector<size_t>& dims,
+                            type& val) {
+        std::vector<size_t> next_dims(dims.begin() + ndim, dims.end());
+        size_t subsize = compute_total_size(next_dims);
+        size_t size = val.size1() * val.size2();
+        for (size_t i = 0; i < size; ++i) {
+            inspector<value_type>::unserialize(vec_align + i * subsize,
+                                               next_dims,
+                                               *(&val(0, 0) + i));
+        }
+    }
+};
+#endif
+
+}  // namespace details
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Iterables_misc.hpp b/packages/HighFive/include/highfive/bits/H5Iterables_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..38ebda9ab5e09eddef1a732e4f53d25a76746a59
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Iterables_misc.hpp
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <exception>
+#include <string>
+#include <vector>
+
+#include <H5Ipublic.h>
+
+namespace HighFive {
+
+namespace details {
+
+// iterator for H5 iterate
+
+struct HighFiveIterateData {
+    inline HighFiveIterateData(std::vector<std::string>& my_names)
+        : names(my_names)
+        , err(NULL) {}
+
+    std::vector<std::string>& names;
+    std::exception* err;
+
+    inline void throwIfError() {
+        if (err) {
+            throw *err;
+        }
+    }
+};
+
+template <typename InfoType>
+inline herr_t internal_high_five_iterate(hid_t /*id*/,
+                                         const char* name,
+                                         const InfoType* /*info*/,
+                                         void* op_data) {
+    auto* data = static_cast<HighFiveIterateData*>(op_data);
+    try {
+        data->names.emplace_back(name);
+        return 0;
+    } catch (...) {
+        data->err = new ObjectException("Exception during H5Iterate, abort listing");
+    }
+    return -1;
+}
+
+}  // namespace details
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Node_traits.hpp b/packages/HighFive/include/highfive/bits/H5Node_traits.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d53d3f0488f1dc8ec07a645d3f0aaec32e65c3b2
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Node_traits.hpp
@@ -0,0 +1,258 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <string>
+
+#include "../H5PropertyList.hpp"
+#include "H5_definitions.hpp"
+#include "H5Converter_misc.hpp"
+
+namespace HighFive {
+
+enum class IndexType : std::underlying_type<H5_index_t>::type {
+    NAME = H5_INDEX_NAME,
+    CRT_ORDER = H5_INDEX_CRT_ORDER,
+};
+
+///
+/// \brief NodeTraits: Base class for Group and File
+///
+template <typename Derivate>
+class NodeTraits {
+  public:
+    ///
+    /// \brief createDataSet Create a new dataset in the current file of
+    /// datatype type and of size space
+    /// \param dataset_name identifier of the dataset
+    /// \param space Associated DataSpace, see \ref DataSpace for more information
+    /// \param type Type of Data
+    /// \param createProps A property list with data set creation properties
+    /// \param accessProps A property list with data set access properties
+    /// \param parents Create intermediate groups if needed. Default: true.
+    /// \return DataSet Object
+    DataSet createDataSet(const std::string& dataset_name,
+                          const DataSpace& space,
+                          const DataType& type,
+                          const DataSetCreateProps& createProps = DataSetCreateProps::Default(),
+                          const DataSetAccessProps& accessProps = DataSetAccessProps::Default(),
+                          bool parents = true);
+
+    ///
+    /// \brief createDataSet create a new dataset in the current file with a
+    /// size specified by space
+    /// \param dataset_name identifier of the dataset
+    /// \param space Associated DataSpace, see \ref DataSpace for more information
+    /// \param createProps A property list with data set creation properties
+    /// \param accessProps A property list with data set access properties
+    /// \param parents Create intermediate groups if needed. Default: true.
+    /// \return DataSet Object
+    template <typename T,
+              typename std::enable_if<
+                  std::is_same<typename details::inspector<T>::base_type, details::Boolean>::value,
+                  int>::type* = nullptr>
+    DataSet createDataSet(const std::string& dataset_name,
+                          const DataSpace& space,
+                          const DataSetCreateProps& createProps = DataSetCreateProps::Default(),
+                          const DataSetAccessProps& accessProps = DataSetAccessProps::Default(),
+                          bool parents = true);
+
+    template <typename T,
+              typename std::enable_if<
+                  !std::is_same<typename details::inspector<T>::base_type, details::Boolean>::value,
+                  int>::type* = nullptr>
+    DataSet createDataSet(const std::string& dataset_name,
+                          const DataSpace& space,
+                          const DataSetCreateProps& createProps = DataSetCreateProps::Default(),
+                          const DataSetAccessProps& accessProps = DataSetAccessProps::Default(),
+                          bool parents = true);
+
+    ///
+    /// \brief createDataSet create a new dataset in the current file and
+    /// write to it, inferring the DataSpace from the data.
+    /// \param dataset_name identifier of the dataset
+    /// \param data Associated data, must support DataSpace::From, see
+    /// \ref DataSpace for more information
+    /// \param createProps A property list with data set creation properties
+    /// \param accessProps A property list with data set access properties
+    /// \param parents Create intermediate groups if needed. Default: true.
+    /// \return DataSet Object
+    template <typename T>
+    DataSet createDataSet(const std::string& dataset_name,
+                          const T& data,
+                          const DataSetCreateProps& createProps = DataSetCreateProps::Default(),
+                          const DataSetAccessProps& accessProps = DataSetAccessProps::Default(),
+                          bool parents = true);
+
+
+    template <std::size_t N>
+    DataSet createDataSet(const std::string& dataset_name,
+                          const FixedLenStringArray<N>& data,
+                          const DataSetCreateProps& createProps = DataSetCreateProps::Default(),
+                          const DataSetAccessProps& accessProps = DataSetAccessProps::Default(),
+                          bool parents = true);
+
+    ///
+    /// \brief get an existing dataset in the current file
+    /// \param dataset_name
+    /// \param accessProps property list to configure dataset chunk cache
+    /// \return return the named dataset, or throw exception if not found
+    DataSet getDataSet(const std::string& dataset_name,
+                       const DataSetAccessProps& accessProps = DataSetAccessProps::Default()) const;
+
+    ///
+    /// \brief create a new group, and eventually intermediate groups
+    /// \param group_name
+    /// \param parents Create intermediate groups if needed. Default: true.
+    /// \return the group object
+    Group createGroup(const std::string& group_name, bool parents = true);
+
+    ///
+    /// \brief create a new group, and eventually intermediate groups
+    /// \param group_name
+    /// \param createProps A property list with group creation properties
+    /// \param parents Create intermediate groups if needed. Default: true.
+    /// \return the group object
+    Group createGroup(const std::string& group_name,
+                      const GroupCreateProps& createProps,
+                      bool parents = true);
+
+    ///
+    /// \brief open an existing group with the name group_name
+    /// \param group_name
+    /// \return the group object
+    Group getGroup(const std::string& group_name) const;
+
+    ///
+    /// \brief open a commited datatype with the name type_name
+    /// \param type_name
+    /// \return the datatype object
+    DataType getDataType(
+        const std::string& type_name,
+        const DataTypeAccessProps& accessProps = DataTypeAccessProps::Default()) const;
+
+    ///
+    /// \brief return the number of leaf objects of the node / group
+    /// \return number of leaf objects
+    size_t getNumberObjects() const;
+
+    ///
+    /// \brief return the name of the object with the given index
+    /// \return the name of the object
+    std::string getObjectName(size_t index) const;
+
+    ///
+    /// \brief moves an object and its content within an HDF5 file.
+    /// \param src_path relative path of the object to current File/Group
+    /// \param dest_path new relative path of the object to current File/Group
+    /// \param parents Create intermediate groups if needed. Default: true.
+    /// \return boolean that is true if the move was successful
+    bool rename(const std::string& src_path,
+                const std::string& dest_path,
+                bool parents = true) const;
+
+    ///
+    /// \brief list all leaf objects name of the node / group
+    /// \param idx_type tell if the list should be ordered by Name or CreationOrderTime.
+    /// CreationOrderTime can be use only if the file/group has been created with
+    /// the HighFive::LinkCreationTime property.
+    /// \return number of leaf objects
+    std::vector<std::string> listObjectNames(IndexType idx_type = IndexType::NAME) const;
+
+    ///
+    /// \brief check a dataset or group exists in the current node / group
+    /// \param node_name dataset/group name to check
+    /// \return true if a dataset/group with the associated name exists, or false
+    bool exist(const std::string& node_name) const;
+
+    ///
+    /// \brief unlink the given dataset or group
+    /// \param node_name dataset/group name to unlink
+    void unlink(const std::string& node_name) const;
+
+    ///
+    /// \brief Returns the kind of link of the given name (soft, hard...)
+    /// \param node_name The entry to check, path relative to the current group
+    LinkType getLinkType(const std::string& node_name) const;
+
+    ///
+    /// \brief A shorthand to get the kind of object pointed to (group, dataset, type...)
+    /// \param node_name The entry to check, path relative to the current group
+    ObjectType getObjectType(const std::string& node_name) const;
+
+    ///
+    /// \brief A shorthand to create softlink to any object which provides `getPath`
+    /// The link will be created with default properties along with required parent groups
+    template <typename T, typename = decltype(&T::getPath)>
+    void createSoftLink(const std::string& linkName, const T& obj) {
+        static_assert(!std::is_same<T, Attribute>::value,
+                      "hdf5 doesn't support soft links to Attributes");
+        createSoftLink(linkName, obj.getPath());
+    }
+
+    ///
+    /// \brief Creates softlinks
+    /// \param link_name The name of the link
+    /// \param obj_path The target object path
+    /// \param linkCreateProps A Link_Create property list. Notice "parents=true" overrides
+    /// \param linkAccessProps The Link_Access property list
+    /// \param parents Whether parent groups should be created: Default: true
+    void createSoftLink(const std::string& link_name,
+                        const std::string& obj_path,
+                        LinkCreateProps linkCreateProps = LinkCreateProps(),
+                        const LinkAccessProps& linkAccessProps = LinkAccessProps(),
+                        const bool parents = true);
+
+    void createExternalLink(const std::string& link_name,
+                            const std::string& h5_file,
+                            const std::string& obj_path,
+                            LinkCreateProps linkCreateProps = LinkCreateProps(),
+                            const LinkAccessProps& linkAccessProps = LinkAccessProps(),
+                            const bool parents = true);
+
+    ///
+    /// \brief Creates hardlinks
+    /// \param link_name The name of the link
+    /// \param target_obj The target object
+    /// \param linkCreateProps A Link_Create property list. Notice "parents=true" overrides
+    /// \param linkAccessProps The Link_Access property list
+    /// \param parents Whether parent groups should be created: Default: true
+    template <typename T, typename = decltype(&T::getPath)>
+    void createHardLink(const std::string& link_name,
+                        const T& target_obj,
+                        LinkCreateProps linkCreateProps = LinkCreateProps(),
+                        const LinkAccessProps& linkAccessProps = LinkAccessProps(),
+                        const bool parents = true);
+
+  private:
+    using derivate_type = Derivate;
+
+    // A wrapper over the low-level H5Lexist
+    // It makes behavior consistent among versions and by default transforms
+    // errors to exceptions
+    bool _exist(const std::string& node_name, bool raise_errors = true) const;
+
+    // Opens an arbitrary object to obtain info
+    Object _open(const std::string& node_name,
+                 const DataSetAccessProps& accessProps = DataSetAccessProps::Default()) const;
+};
+
+
+///
+/// \brief The possible types of group entries (link concept)
+///
+enum class LinkType {
+    Hard,
+    Soft,
+    External,
+    Other  // Reserved or User-defined
+};
+
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Node_traits_misc.hpp b/packages/HighFive/include/highfive/bits/H5Node_traits_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2f75ff3111efa3f9c12e94599cf995dfdd0f8ae2
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Node_traits_misc.hpp
@@ -0,0 +1,412 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include <H5Apublic.h>
+#include <H5Dpublic.h>
+#include <H5Fpublic.h>
+#include <H5Gpublic.h>
+#include <H5Ppublic.h>
+#include <H5Tpublic.h>
+
+#include "../H5DataSet.hpp"
+#include "../H5Group.hpp"
+#include "../H5Selection.hpp"
+#include "../H5Utility.hpp"
+#include "H5DataSet_misc.hpp"
+#include "H5Iterables_misc.hpp"
+#include "H5Selection_misc.hpp"
+#include "H5Slice_traits_misc.hpp"
+
+namespace HighFive {
+
+
+template <typename Derivate>
+inline DataSet NodeTraits<Derivate>::createDataSet(const std::string& dataset_name,
+                                                   const DataSpace& space,
+                                                   const DataType& dtype,
+                                                   const DataSetCreateProps& createProps,
+                                                   const DataSetAccessProps& accessProps,
+                                                   bool parents) {
+    LinkCreateProps lcpl;
+    lcpl.add(CreateIntermediateGroup(parents));
+    const auto hid = H5Dcreate2(static_cast<Derivate*>(this)->getId(),
+                                dataset_name.c_str(),
+                                dtype.getId(),
+                                space.getId(),
+                                lcpl.getId(),
+                                createProps.getId(),
+                                accessProps.getId());
+    if (hid < 0) {
+        HDF5ErrMapper::ToException<DataSetException>(
+            std::string("Unable to create the dataset \"") + dataset_name + "\":");
+    }
+    return DataSet(hid);
+}
+
+template <typename Derivate>
+template <typename T,
+          typename std::enable_if<
+              std::is_same<typename details::inspector<T>::base_type, details::Boolean>::value,
+              int>::type*>
+inline DataSet NodeTraits<Derivate>::createDataSet(const std::string& dataset_name,
+                                                   const DataSpace& space,
+                                                   const DataSetCreateProps& createProps,
+                                                   const DataSetAccessProps& accessProps,
+                                                   bool parents) {
+    return createDataSet(dataset_name,
+                         space,
+                         create_and_check_datatype<typename details::inspector<T>::base_type>(),
+                         createProps,
+                         accessProps,
+                         parents);
+}
+
+template <typename Derivate>
+template <typename T,
+          typename std::enable_if<
+              !std::is_same<typename details::inspector<T>::base_type, details::Boolean>::value,
+              int>::type*>
+inline DataSet NodeTraits<Derivate>::createDataSet(const std::string& dataset_name,
+                                                   const DataSpace& space,
+                                                   const DataSetCreateProps& createProps,
+                                                   const DataSetAccessProps& accessProps,
+                                                   bool parents) {
+    return createDataSet(
+        dataset_name, space, create_and_check_datatype<T>(), createProps, accessProps, parents);
+}
+
+template <typename Derivate>
+template <typename T>
+inline DataSet NodeTraits<Derivate>::createDataSet(const std::string& dataset_name,
+                                                   const T& data,
+                                                   const DataSetCreateProps& createProps,
+                                                   const DataSetAccessProps& accessProps,
+                                                   bool parents) {
+    DataSet ds =
+        createDataSet(dataset_name,
+                      DataSpace::From(data),
+                      create_and_check_datatype<typename details::inspector<T>::base_type>(),
+                      createProps,
+                      accessProps,
+                      parents);
+    ds.write(data);
+    return ds;
+}
+
+template <typename Derivate>
+template <std::size_t N>
+inline DataSet NodeTraits<Derivate>::createDataSet(const std::string& dataset_name,
+                                                   const FixedLenStringArray<N>& data,
+                                                   const DataSetCreateProps& createProps,
+                                                   const DataSetAccessProps& accessProps,
+                                                   bool parents) {
+    DataSet ds = createDataSet<char[N]>(
+        dataset_name, DataSpace(data.size()), createProps, accessProps, parents);
+    ds.write(data);
+    return ds;
+}
+
+template <typename Derivate>
+inline DataSet NodeTraits<Derivate>::getDataSet(const std::string& dataset_name,
+                                                const DataSetAccessProps& accessProps) const {
+    const auto hid = H5Dopen2(static_cast<const Derivate*>(this)->getId(),
+                              dataset_name.c_str(),
+                              accessProps.getId());
+    if (hid < 0) {
+        HDF5ErrMapper::ToException<DataSetException>(std::string("Unable to open the dataset \"") +
+                                                     dataset_name + "\":");
+    }
+    return DataSet(hid);
+}
+
+template <typename Derivate>
+inline Group NodeTraits<Derivate>::createGroup(const std::string& group_name, bool parents) {
+    LinkCreateProps lcpl;
+    lcpl.add(CreateIntermediateGroup(parents));
+    const auto hid = H5Gcreate2(static_cast<Derivate*>(this)->getId(),
+                                group_name.c_str(),
+                                lcpl.getId(),
+                                H5P_DEFAULT,
+                                H5P_DEFAULT);
+    if (hid < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to create the group \"") +
+                                                   group_name + "\":");
+    }
+    return detail::make_group(hid);
+}
+
+template <typename Derivate>
+inline Group NodeTraits<Derivate>::createGroup(const std::string& group_name,
+                                               const GroupCreateProps& createProps,
+                                               bool parents) {
+    LinkCreateProps lcpl;
+    lcpl.add(CreateIntermediateGroup(parents));
+    const auto hid = H5Gcreate2(static_cast<Derivate*>(this)->getId(),
+                                group_name.c_str(),
+                                lcpl.getId(),
+                                createProps.getId(),
+                                H5P_DEFAULT);
+    if (hid < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to create the group \"") +
+                                                   group_name + "\":");
+    }
+    return detail::make_group(hid);
+}
+
+template <typename Derivate>
+inline Group NodeTraits<Derivate>::getGroup(const std::string& group_name) const {
+    const auto hid =
+        H5Gopen2(static_cast<const Derivate*>(this)->getId(), group_name.c_str(), H5P_DEFAULT);
+    if (hid < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to open the group \"") +
+                                                   group_name + "\":");
+    }
+    return detail::make_group(hid);
+}
+
+template <typename Derivate>
+inline DataType NodeTraits<Derivate>::getDataType(const std::string& type_name,
+                                                  const DataTypeAccessProps& accessProps) const {
+    const auto hid = H5Topen2(static_cast<const Derivate*>(this)->getId(),
+                              type_name.c_str(),
+                              accessProps.getId());
+    if (hid < 0) {
+        HDF5ErrMapper::ToException<DataTypeException>(
+            std::string("Unable to open the datatype \"") + type_name + "\":");
+    }
+    return DataType(hid);
+}
+
+template <typename Derivate>
+inline size_t NodeTraits<Derivate>::getNumberObjects() const {
+    hsize_t res;
+    if (H5Gget_num_objs(static_cast<const Derivate*>(this)->getId(), &res) < 0) {
+        HDF5ErrMapper::ToException<GroupException>(
+            std::string("Unable to count objects in existing group or file"));
+    }
+    return static_cast<size_t>(res);
+}
+
+template <typename Derivate>
+inline std::string NodeTraits<Derivate>::getObjectName(size_t index) const {
+    return details::get_name([&](char* buffer, size_t length) {
+        return H5Lget_name_by_idx(static_cast<const Derivate*>(this)->getId(),
+                                  ".",
+                                  H5_INDEX_NAME,
+                                  H5_ITER_INC,
+                                  index,
+                                  buffer,
+                                  length,
+                                  H5P_DEFAULT);
+    });
+}
+
+template <typename Derivate>
+inline bool NodeTraits<Derivate>::rename(const std::string& src_path,
+                                         const std::string& dst_path,
+                                         bool parents) const {
+    LinkCreateProps lcpl;
+    lcpl.add(CreateIntermediateGroup(parents));
+    herr_t status = H5Lmove(static_cast<const Derivate*>(this)->getId(),
+                            src_path.c_str(),
+                            static_cast<const Derivate*>(this)->getId(),
+                            dst_path.c_str(),
+                            lcpl.getId(),
+                            H5P_DEFAULT);
+    if (status < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to move link to \"") +
+                                                   dst_path + "\":");
+        return false;
+    }
+    return true;
+}
+
+template <typename Derivate>
+inline std::vector<std::string> NodeTraits<Derivate>::listObjectNames(IndexType idx_type) const {
+    std::vector<std::string> names;
+    details::HighFiveIterateData iterateData(names);
+
+    size_t num_objs = getNumberObjects();
+    names.reserve(num_objs);
+
+    if (H5Literate(static_cast<const Derivate*>(this)->getId(),
+                   static_cast<H5_index_t>(idx_type),
+                   H5_ITER_INC,
+                   NULL,
+                   &details::internal_high_five_iterate<H5L_info_t>,
+                   static_cast<void*>(&iterateData)) < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to list objects in group"));
+    }
+
+    return names;
+}
+
+template <typename Derivate>
+inline bool NodeTraits<Derivate>::_exist(const std::string& node_name, bool raise_errors) const {
+    SilenceHDF5 silencer{};
+    const auto val =
+        H5Lexists(static_cast<const Derivate*>(this)->getId(), node_name.c_str(), H5P_DEFAULT);
+    if (val < 0) {
+        if (raise_errors) {
+            HDF5ErrMapper::ToException<GroupException>("Invalid link for exist()");
+        } else {
+            return false;
+        }
+    }
+
+    // The root path always exists, but H5Lexists return 0 or 1
+    // depending of the version of HDF5, so always return true for it
+    // We had to call H5Lexists anyway to check that there are no errors
+    return (node_name == "/") ? true : (val > 0);
+}
+
+template <typename Derivate>
+inline bool NodeTraits<Derivate>::exist(const std::string& group_path) const {
+    // When there are slashes, first check everything is fine
+    // so that subsequent errors are only due to missing intermediate groups
+    if (group_path.find('/') != std::string::npos) {
+        _exist("/");  // Shall not throw under normal circumstances
+        // Unless "/" (already checked), verify path exists (not throwing errors)
+        return (group_path == "/") ? true : _exist(group_path, false);
+    }
+    return _exist(group_path);
+}
+
+
+template <typename Derivate>
+inline void NodeTraits<Derivate>::unlink(const std::string& node_name) const {
+    const herr_t val =
+        H5Ldelete(static_cast<const Derivate*>(this)->getId(), node_name.c_str(), H5P_DEFAULT);
+    if (val < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Invalid name for unlink() "));
+    }
+}
+
+
+// convert internal link types to enum class.
+// This function is internal, so H5L_TYPE_ERROR shall be handled in the calling context
+static inline LinkType _convert_link_type(const H5L_type_t& ltype) noexcept {
+    switch (ltype) {
+    case H5L_TYPE_HARD:
+        return LinkType::Hard;
+    case H5L_TYPE_SOFT:
+        return LinkType::Soft;
+    case H5L_TYPE_EXTERNAL:
+        return LinkType::External;
+    default:
+        // Other link types are possible but are considered strange to HighFive.
+        // see https://support.hdfgroup.org/HDF5/doc/RM/H5L/H5Lregister.htm
+        return LinkType::Other;
+    }
+}
+
+template <typename Derivate>
+inline LinkType NodeTraits<Derivate>::getLinkType(const std::string& node_name) const {
+    H5L_info_t linkinfo;
+    if (H5Lget_info(static_cast<const Derivate*>(this)->getId(),
+                    node_name.c_str(),
+                    &linkinfo,
+                    H5P_DEFAULT) < 0 ||
+        linkinfo.type == H5L_TYPE_ERROR) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to obtain info for link ") +
+                                                   node_name);
+    }
+    return _convert_link_type(linkinfo.type);
+}
+
+template <typename Derivate>
+inline ObjectType NodeTraits<Derivate>::getObjectType(const std::string& node_name) const {
+    return _open(node_name).getType();
+}
+
+
+template <typename Derivate>
+inline void NodeTraits<Derivate>::createSoftLink(const std::string& link_name,
+                                                 const std::string& obj_path,
+                                                 LinkCreateProps linkCreateProps,
+                                                 const LinkAccessProps& linkAccessProps,
+                                                 const bool parents) {
+    if (parents) {
+        linkCreateProps.add(CreateIntermediateGroup{});
+    }
+    auto status = H5Lcreate_soft(obj_path.c_str(),
+                                 static_cast<const Derivate*>(this)->getId(),
+                                 link_name.c_str(),
+                                 linkCreateProps.getId(),
+                                 linkAccessProps.getId());
+    if (status < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to create soft link: "));
+    }
+}
+
+
+template <typename Derivate>
+inline void NodeTraits<Derivate>::createExternalLink(const std::string& link_name,
+                                                     const std::string& h5_file,
+                                                     const std::string& obj_path,
+                                                     LinkCreateProps linkCreateProps,
+                                                     const LinkAccessProps& linkAccessProps,
+                                                     const bool parents) {
+    if (parents) {
+        linkCreateProps.add(CreateIntermediateGroup{});
+    }
+    auto status = H5Lcreate_external(h5_file.c_str(),
+                                     obj_path.c_str(),
+                                     static_cast<const Derivate*>(this)->getId(),
+                                     link_name.c_str(),
+                                     linkCreateProps.getId(),
+                                     linkAccessProps.getId());
+    if (status < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to create external link: "));
+    }
+}
+
+template <typename Derivate>
+template <typename T, typename>
+inline void NodeTraits<Derivate>::createHardLink(const std::string& link_name,
+                                                 const T& target_obj,
+                                                 LinkCreateProps linkCreateProps,
+                                                 const LinkAccessProps& linkAccessProps,
+                                                 const bool parents) {
+    static_assert(!std::is_same<T, Attribute>::value,
+                  "hdf5 doesn't support hard links to Attributes");
+    if (parents) {
+        linkCreateProps.add(CreateIntermediateGroup{});
+    }
+    auto status = H5Lcreate_hard(target_obj.getId(),
+                                 ".",
+                                 static_cast<const Derivate*>(this)->getId(),
+                                 link_name.c_str(),
+                                 linkCreateProps.getId(),
+                                 linkAccessProps.getId());
+    if (status < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to create hard link: "));
+    }
+}
+
+
+template <typename Derivate>
+inline Object NodeTraits<Derivate>::_open(const std::string& node_name,
+                                          const DataSetAccessProps& accessProps) const {
+    const auto id = H5Oopen(static_cast<const Derivate*>(this)->getId(),
+                            node_name.c_str(),
+                            accessProps.getId());
+    if (id < 0) {
+        HDF5ErrMapper::ToException<GroupException>(std::string("Unable to open \"") + node_name +
+                                                   "\":");
+    }
+    return detail::make_object(id);
+}
+
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Object_misc.hpp b/packages/HighFive/include/highfive/bits/H5Object_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f477d7fdf387a10932e0a8b127c1a6bb52eb7307
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Object_misc.hpp
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <iostream>
+
+#include "../H5Exception.hpp"
+#include "../H5Utility.hpp"
+
+namespace HighFive {
+namespace detail {
+inline Object make_object(hid_t hid) {
+    return Object(hid);
+}
+}  // namespace detail
+
+
+inline Object::Object()
+    : _hid(H5I_INVALID_HID) {}
+
+inline Object::Object(hid_t hid)
+    : _hid(hid) {}
+
+inline Object::Object(const Object& other)
+    : _hid(other._hid) {
+    if (other.isValid() && H5Iinc_ref(_hid) < 0) {
+        throw ObjectException("Reference counter increase failure");
+    }
+}
+
+inline Object::Object(Object&& other) noexcept
+    : _hid(other._hid) {
+    other._hid = H5I_INVALID_HID;
+}
+
+inline Object& Object::operator=(const Object& other) {
+    if (this != &other) {
+        if (isValid())
+            H5Idec_ref(_hid);
+
+        _hid = other._hid;
+        if (other.isValid() && H5Iinc_ref(_hid) < 0) {
+            throw ObjectException("Reference counter increase failure");
+        }
+    }
+    return *this;
+}
+
+inline Object::~Object() {
+    if (isValid() && H5Idec_ref(_hid) < 0) {
+        HIGHFIVE_LOG_ERROR("HighFive::~Object: reference counter decrease failure");
+    }
+}
+
+inline bool Object::isValid() const noexcept {
+    return (_hid != H5I_INVALID_HID) && (H5Iis_valid(_hid) != false);
+}
+
+inline hid_t Object::getId() const noexcept {
+    return _hid;
+}
+
+static inline ObjectType _convert_object_type(const H5I_type_t& h5type) {
+    switch (h5type) {
+    case H5I_FILE:
+        return ObjectType::File;
+    case H5I_GROUP:
+        return ObjectType::Group;
+    case H5I_DATATYPE:
+        return ObjectType::UserDataType;
+    case H5I_DATASPACE:
+        return ObjectType::DataSpace;
+    case H5I_DATASET:
+        return ObjectType::Dataset;
+    case H5I_ATTR:
+        return ObjectType::Attribute;
+    default:
+        return ObjectType::Other;
+    }
+}
+
+inline ObjectType Object::getType() const {
+    // H5Iget_type is a very lightweight func which extracts the type from the id
+    H5I_type_t h5type;
+    if ((h5type = H5Iget_type(_hid)) == H5I_BADID) {
+        HDF5ErrMapper::ToException<ObjectException>("Invalid hid or object type");
+    }
+    return _convert_object_type(h5type);
+}
+
+inline ObjectInfo Object::getInfo() const {
+    ObjectInfo info;
+#if (H5Oget_info_vers < 3)
+    if (H5Oget_info(_hid, &info.raw_info) < 0) {
+#else
+    if (H5Oget_info1(_hid, &info.raw_info) < 0) {
+#endif
+        HDF5ErrMapper::ToException<ObjectException>("Unable to obtain info for object");
+    }
+    return info;
+}
+
+inline haddr_t ObjectInfo::getAddress() const noexcept {
+    return raw_info.addr;
+}
+inline size_t ObjectInfo::getRefCount() const noexcept {
+    return raw_info.rc;
+}
+inline time_t ObjectInfo::getCreationTime() const noexcept {
+    return raw_info.btime;
+}
+inline time_t ObjectInfo::getModificationTime() const noexcept {
+    return raw_info.mtime;
+}
+
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Path_traits.hpp b/packages/HighFive/include/highfive/bits/H5Path_traits.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..46a038c4ffa1f3a9e91ae617fb634b0b668a8c09
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Path_traits.hpp
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c), 2020, EPFL - Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "H5_definitions.hpp"
+
+namespace HighFive {
+
+template <typename Derivate>
+class PathTraits {
+  public:
+    PathTraits();
+
+    ///
+    /// \brief return the path to the current object
+    /// \return the path to the object
+    std::string getPath() const;
+
+    ///
+    /// \brief Return a reference to the File object this object belongs
+    /// \return the File object ref
+    File& getFile() const noexcept;
+
+
+  protected:
+    std::shared_ptr<File> _file_obj;  // keep a ref to file so we keep its ref count > 0
+};
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Path_traits_misc.hpp b/packages/HighFive/include/highfive/bits/H5Path_traits_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..444e9294bf8f5aea3233b5b4e0005c2ec774b842
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Path_traits_misc.hpp
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c), 2020, EPFL - Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <H5Ipublic.h>
+
+#include "H5Utils.hpp"
+#include "H5Path_traits.hpp"
+
+namespace HighFive {
+
+template <typename Derivate>
+inline PathTraits<Derivate>::PathTraits() {
+    static_assert(std::is_same<Derivate, Group>::value || std::is_same<Derivate, DataSet>::value ||
+                      std::is_same<Derivate, Attribute>::value,
+                  "PathTraits can only be applied to Group, DataSet and Attribute");
+    const auto& obj = static_cast<const Derivate&>(*this);
+    if (!obj.isValid()) {
+        return;
+    }
+    const hid_t file_id = H5Iget_file_id(obj.getId());
+    if (file_id < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("getFile(): Could not obtain file of object");
+    }
+    _file_obj.reset(new File(file_id));
+}
+
+template <typename Derivate>
+inline std::string PathTraits<Derivate>::getPath() const {
+    return details::get_name([this](char* buffer, size_t length) {
+        return H5Iget_name(static_cast<const Derivate&>(*this).getId(), buffer, length);
+    });
+}
+
+template <typename Derivate>
+inline File& PathTraits<Derivate>::getFile() const noexcept {
+    return *_file_obj;
+}
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5PropertyList_misc.hpp b/packages/HighFive/include/highfive/bits/H5PropertyList_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cef301e53a826215ea58587e2ac4538171c85c9c
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5PropertyList_misc.hpp
@@ -0,0 +1,574 @@
+/*
+ *  Copyright (c), 2017-2018, Adrien Devresse <adrien.devresse@epfl.ch>
+ *                            Juan Hernando <juan.hernando@epfl.ch>
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <H5Ppublic.h>
+
+namespace HighFive {
+
+namespace {
+inline hid_t convert_plist_type(PropertyType propertyType) {
+    // The HP5_XXX are macros with function calls so we can't assign
+    // them as the enum values
+    switch (propertyType) {
+    case PropertyType::OBJECT_CREATE:
+        return H5P_OBJECT_CREATE;
+    case PropertyType::FILE_CREATE:
+        return H5P_FILE_CREATE;
+    case PropertyType::FILE_ACCESS:
+        return H5P_FILE_ACCESS;
+    case PropertyType::DATASET_CREATE:
+        return H5P_DATASET_CREATE;
+    case PropertyType::DATASET_ACCESS:
+        return H5P_DATASET_ACCESS;
+    case PropertyType::DATASET_XFER:
+        return H5P_DATASET_XFER;
+    case PropertyType::GROUP_CREATE:
+        return H5P_GROUP_CREATE;
+    case PropertyType::GROUP_ACCESS:
+        return H5P_GROUP_ACCESS;
+    case PropertyType::DATATYPE_CREATE:
+        return H5P_DATATYPE_CREATE;
+    case PropertyType::DATATYPE_ACCESS:
+        return H5P_DATATYPE_ACCESS;
+    case PropertyType::STRING_CREATE:
+        return H5P_STRING_CREATE;
+    case PropertyType::ATTRIBUTE_CREATE:
+        return H5P_ATTRIBUTE_CREATE;
+    case PropertyType::OBJECT_COPY:
+        return H5P_OBJECT_COPY;
+    case PropertyType::LINK_CREATE:
+        return H5P_LINK_CREATE;
+    case PropertyType::LINK_ACCESS:
+        return H5P_LINK_ACCESS;
+    default:
+        HDF5ErrMapper::ToException<PropertyException>("Unsupported property list type");
+    }
+}
+
+}  // namespace
+
+
+inline PropertyListBase::PropertyListBase() noexcept
+    : Object(H5P_DEFAULT) {}
+
+
+template <PropertyType T>
+inline void PropertyList<T>::_initializeIfNeeded() {
+    if (_hid != H5P_DEFAULT) {
+        return;
+    }
+    if ((_hid = H5Pcreate(convert_plist_type(T))) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Unable to create property list");
+    }
+}
+
+template <PropertyType T>
+template <PropertyInterface P>
+inline void PropertyList<T>::add(const P& property) {
+    _initializeIfNeeded();
+    property.apply(_hid);
+}
+
+template <PropertyType T>
+template <typename F, typename... Args>
+inline void RawPropertyList<T>::add(const F& funct, const Args&... args) {
+    this->_initializeIfNeeded();
+    if (funct(this->_hid, args...) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting raw hdf5 property.");
+    }
+}
+
+// Specific options to be added to Property Lists
+#if H5_VERSION_GE(1, 10, 1)
+inline FileSpaceStrategy::FileSpaceStrategy(H5F_fspace_strategy_t strategy,
+                                            hbool_t persist,
+                                            hsize_t threshold)
+    : _strategy(strategy)
+    , _persist(persist)
+    , _threshold(threshold) {}
+
+inline FileSpaceStrategy::FileSpaceStrategy(const FileCreateProps& fcpl) {
+    if (H5Pget_file_space_strategy(fcpl.getId(), &_strategy, &_persist, &_threshold) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Unable to get file space strategy");
+    }
+}
+
+inline void FileSpaceStrategy::apply(const hid_t list) const {
+    if (H5Pset_file_space_strategy(list, _strategy, _persist, _threshold) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting file space strategy.");
+    }
+}
+
+inline H5F_fspace_strategy_t FileSpaceStrategy::getStrategy() const {
+    return _strategy;
+}
+
+inline hbool_t FileSpaceStrategy::getPersist() const {
+    return _persist;
+}
+
+inline hsize_t FileSpaceStrategy::getThreshold() const {
+    return _threshold;
+}
+
+inline FileSpacePageSize::FileSpacePageSize(hsize_t page_size)
+    : _page_size(page_size) {}
+
+inline void FileSpacePageSize::apply(const hid_t list) const {
+    if (H5Pset_file_space_page_size(list, _page_size) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting file space page size.");
+    }
+}
+
+inline FileSpacePageSize::FileSpacePageSize(const FileCreateProps& fcpl) {
+    if (H5Pget_file_space_page_size(fcpl.getId(), &_page_size) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Unable to get file space page size");
+    }
+}
+
+inline hsize_t FileSpacePageSize::getPageSize() const {
+    return _page_size;
+}
+
+#ifndef H5_HAVE_PARALLEL
+inline PageBufferSize::PageBufferSize(size_t page_buffer_size,
+                                      unsigned min_meta_percent,
+                                      unsigned min_raw_percent)
+    : _page_buffer_size(page_buffer_size)
+    , _min_meta(min_meta_percent)
+    , _min_raw(min_raw_percent) {}
+
+inline PageBufferSize::PageBufferSize(const FileAccessProps& plist) {
+    if (H5Pget_page_buffer_size(plist.getId(), &_page_buffer_size, &_min_meta, &_min_raw) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting page buffer size.");
+    }
+}
+
+inline void PageBufferSize::apply(const hid_t list) const {
+    if (H5Pset_page_buffer_size(list, _page_buffer_size, _min_meta, _min_raw) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting page buffer size.");
+    }
+}
+
+inline size_t PageBufferSize::getPageBufferSize() const {
+    return _page_buffer_size;
+}
+
+inline unsigned PageBufferSize::getMinMetaPercent() const {
+    return _min_meta;
+}
+
+inline unsigned PageBufferSize::getMinRawPercent() const {
+    return _min_raw;
+}
+#endif
+#endif
+
+#ifdef H5_HAVE_PARALLEL
+
+inline MPIOFileAccess::MPIOFileAccess(MPI_Comm comm, MPI_Info info)
+    : _comm(comm)
+    , _info(info) {}
+
+inline void MPIOFileAccess::apply(const hid_t list) const {
+    if (H5Pset_fapl_mpio(list, _comm, _info) < 0) {
+        HDF5ErrMapper::ToException<FileException>("Unable to set-up MPIO Driver configuration");
+    }
+}
+
+inline void MPIOCollectiveMetadata::apply(const hid_t plist) const {
+    auto read = MPIOCollectiveMetadataRead{collective_read_};
+    auto write = MPIOCollectiveMetadataWrite{collective_write_};
+
+    read.apply(plist);
+    write.apply(plist);
+}
+
+inline MPIOCollectiveMetadata::MPIOCollectiveMetadata(bool collective)
+    : collective_read_(collective)
+    , collective_write_(collective) {}
+
+
+inline MPIOCollectiveMetadata::MPIOCollectiveMetadata(const FileAccessProps& plist)
+    : collective_read_(MPIOCollectiveMetadataRead(plist).isCollective())
+    , collective_write_(MPIOCollectiveMetadataWrite(plist).isCollective()) {}
+
+inline bool MPIOCollectiveMetadata::isCollectiveRead() const {
+    return collective_read_;
+}
+
+inline bool MPIOCollectiveMetadata::isCollectiveWrite() const {
+    return collective_write_;
+}
+
+
+inline void MPIOCollectiveMetadataRead::apply(const hid_t plist) const {
+    if (H5Pset_all_coll_metadata_ops(plist, collective_) < 0) {
+        HDF5ErrMapper::ToException<FileException>("Unable to request collective metadata reads");
+    }
+}
+
+inline bool MPIOCollectiveMetadataRead::isCollective() const {
+    return collective_;
+}
+
+inline MPIOCollectiveMetadataRead::MPIOCollectiveMetadataRead(const FileAccessProps& plist) {
+    if (H5Pget_all_coll_metadata_ops(plist.getId(), &collective_) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error loading MPI metadata read.");
+    }
+}
+
+inline MPIOCollectiveMetadataRead::MPIOCollectiveMetadataRead(bool collective)
+    : collective_(collective) {}
+
+inline void MPIOCollectiveMetadataWrite::apply(const hid_t plist) const {
+    if (H5Pset_coll_metadata_write(plist, collective_) < 0) {
+        HDF5ErrMapper::ToException<FileException>("Unable to request collective metadata writes");
+    }
+}
+
+inline bool MPIOCollectiveMetadataWrite::isCollective() const {
+    return collective_;
+}
+
+inline MPIOCollectiveMetadataWrite::MPIOCollectiveMetadataWrite(const FileAccessProps& plist) {
+    if (H5Pget_coll_metadata_write(plist.getId(), &collective_) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error loading MPI metadata write.");
+    }
+}
+
+inline MPIOCollectiveMetadataWrite::MPIOCollectiveMetadataWrite(bool collective)
+    : collective_(collective) {}
+
+#endif
+
+inline FileVersionBounds::FileVersionBounds(H5F_libver_t low, H5F_libver_t high)
+    : _low(low)
+    , _high(high) {}
+
+inline FileVersionBounds::FileVersionBounds(const FileAccessProps& fapl) {
+    if (H5Pget_libver_bounds(fapl.getId(), &_low, &_high) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Unable to access file version bounds");
+    }
+}
+
+inline std::pair<H5F_libver_t, H5F_libver_t> FileVersionBounds::getVersion() const {
+    return std::make_pair(_low, _high);
+}
+
+inline void FileVersionBounds::apply(const hid_t list) const {
+    if (H5Pset_libver_bounds(list, _low, _high) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting file version bounds");
+    }
+}
+
+inline MetadataBlockSize::MetadataBlockSize(hsize_t size)
+    : _size(size) {}
+
+inline MetadataBlockSize::MetadataBlockSize(const FileAccessProps& fapl) {
+    if (H5Pget_meta_block_size(fapl.getId(), &_size) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Unable to access file metadata block size");
+    }
+}
+
+inline void MetadataBlockSize::apply(const hid_t list) const {
+    if (H5Pset_meta_block_size(list, _size) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting metadata block size");
+    }
+}
+
+inline hsize_t MetadataBlockSize::getSize() const {
+    return _size;
+}
+
+inline void EstimatedLinkInfo::apply(const hid_t hid) const {
+    if (H5Pset_est_link_info(hid, _entries, _length) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting estimated link info");
+    }
+}
+
+inline EstimatedLinkInfo::EstimatedLinkInfo(unsigned entries, unsigned length)
+    : _entries(entries)
+    , _length(length) {}
+
+inline EstimatedLinkInfo::EstimatedLinkInfo(const GroupCreateProps& gcpl) {
+    if (H5Pget_est_link_info(gcpl.getId(), &_entries, &_length) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Unable to access group link size property");
+    }
+}
+
+inline unsigned EstimatedLinkInfo::getEntries() const {
+    return _entries;
+}
+
+inline unsigned EstimatedLinkInfo::getNameLength() const {
+    return _length;
+}
+
+inline void Chunking::apply(const hid_t hid) const {
+    if (H5Pset_chunk(hid, static_cast<int>(_dims.size()), _dims.data()) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting chunk property");
+    }
+}
+
+inline Chunking::Chunking(const std::vector<hsize_t>& dims)
+    : _dims(dims) {}
+
+inline Chunking::Chunking(const std::initializer_list<hsize_t>& items)
+    : Chunking(std::vector<hsize_t>{items}) {}
+
+inline Chunking::Chunking(DataSetCreateProps& plist, size_t max_dims)
+    : _dims(max_dims + 1) {
+    auto n_loaded = H5Pget_chunk(plist.getId(), static_cast<int>(_dims.size()), _dims.data());
+    if (n_loaded < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error getting chunk size");
+    }
+
+    if (n_loaded >= static_cast<int>(_dims.size())) {
+        *this = Chunking(plist, 8 * max_dims);
+    } else {
+        _dims.resize(static_cast<size_t>(n_loaded));
+    }
+}
+
+inline const std::vector<hsize_t>& Chunking::getDimensions() const noexcept {
+    return _dims;
+}
+
+template <typename... Args>
+inline Chunking::Chunking(hsize_t item, Args... args)
+    : Chunking(std::vector<hsize_t>{item, static_cast<hsize_t>(args)...}) {}
+
+inline void Deflate::apply(const hid_t hid) const {
+    if (!H5Zfilter_avail(H5Z_FILTER_DEFLATE) || H5Pset_deflate(hid, _level) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting deflate property");
+    }
+}
+
+inline Deflate::Deflate(unsigned int level)
+    : _level(level) {}
+
+inline void Szip::apply(const hid_t hid) const {
+    if (!H5Zfilter_avail(H5Z_FILTER_SZIP)) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting szip property");
+    }
+
+    if (H5Pset_szip(hid, _options_mask, _pixels_per_block) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting szip property");
+    }
+}
+
+inline Szip::Szip(unsigned int options_mask, unsigned int pixels_per_block)
+    : _options_mask(options_mask)
+    , _pixels_per_block(pixels_per_block) {}
+
+inline unsigned Szip::getOptionsMask() const {
+    return _options_mask;
+}
+
+inline unsigned Szip::getPixelsPerBlock() const {
+    return _pixels_per_block;
+}
+
+inline void Shuffle::apply(const hid_t hid) const {
+    if (!H5Zfilter_avail(H5Z_FILTER_SHUFFLE)) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting shuffle property");
+    }
+
+    if (H5Pset_shuffle(hid) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting shuffle property");
+    }
+}
+
+inline AllocationTime::AllocationTime(H5D_alloc_time_t alloc_time)
+    : _alloc_time(alloc_time) {}
+
+inline AllocationTime::AllocationTime(const DataSetCreateProps& dcpl) {
+    if (H5Pget_alloc_time(dcpl.getId(), &_alloc_time) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error getting allocation time");
+    }
+}
+
+inline void AllocationTime::apply(hid_t dcpl) const {
+    if (H5Pset_alloc_time(dcpl, _alloc_time) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting allocation time");
+    }
+}
+
+inline H5D_alloc_time_t AllocationTime::getAllocationTime() {
+    return _alloc_time;
+}
+
+inline Caching::Caching(const DataSetCreateProps& dcpl) {
+    if (H5Pget_chunk_cache(dcpl.getId(), &_numSlots, &_cacheSize, &_w0) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error getting dataset cache parameters");
+    }
+}
+
+inline void Caching::apply(const hid_t hid) const {
+    if (H5Pset_chunk_cache(hid, _numSlots, _cacheSize, _w0) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting dataset cache parameters");
+    }
+}
+
+inline Caching::Caching(const size_t numSlots, const size_t cacheSize, const double w0)
+    : _numSlots(numSlots)
+    , _cacheSize(cacheSize)
+    , _w0(w0) {}
+
+inline size_t Caching::getNumSlots() const {
+    return _numSlots;
+}
+
+inline size_t Caching::getCacheSize() const {
+    return _cacheSize;
+}
+
+inline double Caching::getW0() const {
+    return _w0;
+}
+
+inline CreateIntermediateGroup::CreateIntermediateGroup(bool create)
+    : _create(create) {}
+
+inline CreateIntermediateGroup::CreateIntermediateGroup(const ObjectCreateProps& ocpl) {
+    fromPropertyList(ocpl.getId());
+}
+
+
+inline void CreateIntermediateGroup::apply(const hid_t hid) const {
+    if (H5Pset_create_intermediate_group(hid, _create ? 1 : 0) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>(
+            "Error setting property for create intermediate groups");
+    }
+}
+
+inline CreateIntermediateGroup::CreateIntermediateGroup(const LinkCreateProps& lcpl) {
+    fromPropertyList(lcpl.getId());
+}
+
+inline void CreateIntermediateGroup::fromPropertyList(hid_t hid) {
+    unsigned c_bool = 0;
+    if (H5Pget_create_intermediate_group(hid, &c_bool) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>(
+            "Error getting property for create intermediate groups");
+    }
+
+    _create = bool(c_bool);
+}
+
+inline bool CreateIntermediateGroup::isSet() const {
+    return _create;
+}
+
+#ifdef H5_HAVE_PARALLEL
+inline UseCollectiveIO::UseCollectiveIO(bool enable)
+    : _enable(enable) {}
+
+inline void UseCollectiveIO::apply(const hid_t hid) const {
+    if (H5Pset_dxpl_mpio(hid, _enable ? H5FD_MPIO_COLLECTIVE : H5FD_MPIO_INDEPENDENT) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting H5Pset_dxpl_mpio.");
+    }
+}
+
+inline UseCollectiveIO::UseCollectiveIO(const DataTransferProps& dxpl) {
+    H5FD_mpio_xfer_t collective;
+
+    if (H5Pget_dxpl_mpio(dxpl.getId(), &collective) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error getting H5Pset_dxpl_mpio.");
+    }
+
+    if (collective != H5FD_MPIO_COLLECTIVE && collective != H5FD_MPIO_INDEPENDENT) {
+        throw std::logic_error("H5Pget_dxpl_mpio returned something strange.");
+    }
+
+    _enable = collective == H5FD_MPIO_COLLECTIVE;
+}
+
+inline bool UseCollectiveIO::isCollective() const {
+    return _enable;
+}
+
+inline MpioNoCollectiveCause::MpioNoCollectiveCause(const DataTransferProps& dxpl) {
+    if (H5Pget_mpio_no_collective_cause(dxpl.getId(), &_local_cause, &_global_cause) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Failed to check mpio_no_collective_cause.");
+    }
+}
+
+inline bool MpioNoCollectiveCause::wasCollective() const {
+    return _local_cause == 0 && _global_cause == 0;
+}
+
+inline uint32_t MpioNoCollectiveCause::getLocalCause() const {
+    return _local_cause;
+}
+
+inline uint32_t MpioNoCollectiveCause::getGlobalCause() const {
+    return _global_cause;
+}
+
+inline std::pair<uint32_t, uint32_t> MpioNoCollectiveCause::getCause() const {
+    return {_local_cause, _global_cause};
+}
+#endif
+
+inline LinkCreationOrder::LinkCreationOrder(const FileCreateProps& fcpl) {
+    fromPropertyList(fcpl.getId());
+}
+
+inline LinkCreationOrder::LinkCreationOrder(const GroupCreateProps& gcpl) {
+    fromPropertyList(gcpl.getId());
+}
+
+inline unsigned LinkCreationOrder::getFlags() const {
+    return _flags;
+}
+
+inline void LinkCreationOrder::apply(const hid_t hid) const {
+    if (H5Pset_link_creation_order(hid, _flags) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>("Error setting LinkCreationOrder.");
+    }
+}
+
+inline void LinkCreationOrder::fromPropertyList(hid_t hid) {
+    if (H5Pget_link_creation_order(hid, &_flags) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>(
+            "Error getting property for link creation order");
+    }
+}
+
+inline AttributePhaseChange::AttributePhaseChange(unsigned max_compact, unsigned min_dense)
+    : _max_compact(max_compact)
+    , _min_dense(min_dense) {}
+
+inline AttributePhaseChange::AttributePhaseChange(const GroupCreateProps& gcpl) {
+    if (H5Pget_attr_phase_change(gcpl.getId(), &_max_compact, &_min_dense) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>(
+            "Error getting property for attribute phase change");
+    }
+}
+
+inline unsigned AttributePhaseChange::max_compact() const {
+    return _max_compact;
+}
+
+inline unsigned AttributePhaseChange::min_dense() const {
+    return _min_dense;
+}
+
+inline void AttributePhaseChange::apply(hid_t hid) const {
+    if (H5Pset_attr_phase_change(hid, _max_compact, _min_dense) < 0) {
+        HDF5ErrMapper::ToException<PropertyException>(
+            "Error getting property for attribute phase change");
+    }
+}
+
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5ReadWrite_misc.hpp b/packages/HighFive/include/highfive/bits/H5ReadWrite_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c8e73617400fa9cec3b727d84fb49f1ac8e74c11
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5ReadWrite_misc.hpp
@@ -0,0 +1,156 @@
+/*
+ *  Copyright (c) 2020 Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <H5Tpublic.h>
+#include "H5Utils.hpp"
+
+namespace HighFive {
+
+namespace details {
+
+template <typename T>
+using unqualified_t = typename std::remove_const<typename std::remove_reference<T>::type>::type;
+
+// Find the type of an eventual char array, otherwise void
+template <typename T>
+struct type_char_array {
+    using type = typename std::conditional<
+        std::is_same<typename inspector<T>::base_type, std::string>::value,
+        std::string,
+        void>::type;
+    static constexpr bool is_char_array = false;
+};
+
+template <typename T>
+struct type_char_array<T*> {
+    using type = typename std::conditional<std::is_same<unqualified_t<T>, char>::value,
+                                           char*,
+                                           typename type_char_array<T>::type>::type;
+    static constexpr bool is_char_array = true;
+};
+
+template <typename T, std::size_t N>
+struct type_char_array<T[N]> {
+    using type = typename std::conditional<std::is_same<unqualified_t<T>, char>::value,
+                                           char[N],
+                                           typename type_char_array<T>::type>::type;
+    static constexpr bool is_char_array = true;
+};
+
+template <typename T>
+struct BufferInfo {
+    using type_no_const = typename std::remove_const<T>::type;
+    using elem_type = typename details::inspector<type_no_const>::base_type;
+    using char_array_t = typename details::type_char_array<type_no_const>::type;
+    static constexpr bool is_char_array = details::type_char_array<type_no_const>::is_char_array;
+
+    enum Operation { read, write };
+    const Operation op;
+
+    template <class F>
+    BufferInfo(const DataType& dtype, F getName, Operation _op);
+
+    // member data for info depending on the destination dataset type
+    const bool is_fixed_len_string;
+    const size_t n_dimensions;
+    const DataType data_type;
+};
+
+// details implementation
+template <typename SrcStrT>
+struct string_type_checker {
+    static DataType getDataType(const DataType&, const DataType&);
+};
+
+inline void enforce_ascii_hack(const DataType& dst, const DataType& src) {
+    // Note: constness only refers to constness of the DataType object, which
+    // is just an ID, we can/will change properties of `dst`.
+
+    // TEMP. CHANGE: Ensure that the character set is properly configured to prevent
+    // converter issues on HDF5 <=v1.12.0 when loading ASCII strings first.
+    // See https://github.com/HDFGroup/hdf5/issues/544 for further information.
+    if (H5Tget_cset(src.getId()) == H5T_CSET_ASCII) {
+        H5Tset_cset(dst.getId(), H5T_CSET_ASCII);
+    }
+}
+
+template <>
+struct string_type_checker<void> {
+    inline static DataType getDataType(const DataType& element_type, const DataType& dtype) {
+        if (H5Tget_class(element_type.getId()) == H5T_STRING) {
+            enforce_ascii_hack(element_type, dtype);
+        }
+        return element_type;
+    }
+};
+
+template <>
+struct string_type_checker<std::string> {
+    inline static DataType getDataType(const DataType&, const DataType& file_datatype) {
+        // The StringBuffer ensures that the data is transformed such that it
+        // matches the datatype of the dataset, i.e. `file_datatype` and
+        // `mem_datatype` are the same.
+        return file_datatype;
+    }
+};
+
+template <std::size_t FixedLen>
+struct string_type_checker<char[FixedLen]> {
+    inline static DataType getDataType(const DataType& element_type, const DataType& dtype) {
+        DataType return_type = (dtype.isFixedLenStr()) ? AtomicType<char[FixedLen]>()
+                                                       : element_type;
+        enforce_ascii_hack(return_type, dtype);
+        return return_type;
+    }
+};
+
+template <>
+struct string_type_checker<char*> {
+    inline static DataType getDataType(const DataType&, const DataType& dtype) {
+        if (dtype.isFixedLenStr()) {
+            throw DataSetException("Can't output variable-length to fixed-length strings");
+        }
+        DataType return_type = AtomicType<std::string>();
+        enforce_ascii_hack(return_type, dtype);
+        return return_type;
+    }
+};
+
+template <typename T>
+template <class F>
+BufferInfo<T>::BufferInfo(const DataType& dtype, F getName, Operation _op)
+    : op(_op)
+    , is_fixed_len_string(dtype.isFixedLenStr())
+    // In case we are using Fixed-len strings we need to subtract one dimension
+    , n_dimensions(details::inspector<type_no_const>::recursive_ndim -
+                   ((is_fixed_len_string && is_char_array) ? 1 : 0))
+    , data_type(
+          string_type_checker<char_array_t>::getDataType(create_datatype<elem_type>(), dtype)) {
+    // We warn. In case they are really not convertible an exception will rise on read/write
+    if (dtype.getClass() != data_type.getClass()) {
+        HIGHFIVE_LOG_WARN(getName() + "\": data and hdf5 dataset have different types: " +
+                          data_type.string() + " -> " + dtype.string());
+    } else if ((dtype.getClass() & data_type.getClass()) == DataTypeClass::Float) {
+        HIGHFIVE_LOG_WARN_IF(
+            (op == read) && (dtype.getSize() > data_type.getSize()),
+            getName() + "\": hdf5 dataset has higher floating point precision than data on read: " +
+                dtype.string() + " -> " + data_type.string());
+
+        HIGHFIVE_LOG_WARN_IF(
+            (op == write) && (dtype.getSize() < data_type.getSize()),
+            getName() +
+                "\": data has higher floating point precision than hdf5 dataset on write: " +
+                data_type.string() + " -> " + dtype.string());
+    }
+}
+
+}  // namespace details
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Reference_misc.hpp b/packages/HighFive/include/highfive/bits/H5Reference_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7c8db36fb6b2482689cd0a73057858427b054fdf
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Reference_misc.hpp
@@ -0,0 +1,67 @@
+/*
+ *  Copyright (c), 2020, EPFL - Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#pragma once
+
+#include <string>
+#include <H5Ppublic.h>
+
+#include "H5Utils.hpp"
+
+#include "../H5Object.hpp"
+
+namespace HighFive {
+
+inline Reference::Reference(const Object& location, const Object& object)
+    : parent_id(location.getId()) {
+    obj_name = details::get_name(
+        [&](char* buffer, size_t length) { return H5Iget_name(object.getId(), buffer, length); });
+}
+
+inline void Reference::create_ref(hobj_ref_t* refptr) const {
+    if (H5Rcreate(refptr, parent_id, obj_name.c_str(), H5R_OBJECT, -1) < 0) {
+        HDF5ErrMapper::ToException<ReferenceException>(
+            std::string("Unable to create the reference for \"") + obj_name + "\":");
+    }
+}
+
+inline ObjectType Reference::getType(const Object& location) const {
+    return get_ref(location).getType();
+}
+
+template <typename T>
+inline T Reference::dereference(const Object& location) const {
+    static_assert(std::is_same<DataSet, T>::value || std::is_same<Group, T>::value,
+                  "We can only (de)reference HighFive::Group or HighFive:DataSet");
+    auto obj = get_ref(location);
+    if (obj.getType() != T::type) {
+        HDF5ErrMapper::ToException<ReferenceException>("Trying to dereference the wrong type");
+    }
+#if defined __GNUC__ && __GNUC__ < 9
+    return std::move(obj);
+#else
+    return obj;
+#endif
+}
+
+inline Object Reference::get_ref(const Object& location) const {
+    hid_t res;
+#if (H5Rdereference_vers == 2)
+    if ((res = H5Rdereference(location.getId(), H5P_DEFAULT, H5R_OBJECT, &href)) < 0) {
+        HDF5ErrMapper::ToException<ReferenceException>("Unable to dereference.");
+    }
+#else
+    if ((res = H5Rdereference(location.getId(), H5R_OBJECT, &href)) < 0) {
+        HDF5ErrMapper::ToException<ReferenceException>("Unable to dereference.");
+    }
+#endif
+    return Object(res);
+}
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Selection_misc.hpp b/packages/HighFive/include/highfive/bits/H5Selection_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c35b7bbf32e318450a5a8ca5ebf18e02f7dacd60
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Selection_misc.hpp
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+namespace HighFive {
+
+inline Selection::Selection(const DataSpace& memspace,
+                            const DataSpace& file_space,
+                            const DataSet& set)
+    : _mem_space(memspace)
+    , _file_space(file_space)
+    , _set(set) {}
+
+inline DataSpace Selection::getSpace() const noexcept {
+    return _file_space;
+}
+
+inline DataSpace Selection::getMemSpace() const noexcept {
+    return _mem_space;
+}
+
+inline DataSet& Selection::getDataset() noexcept {
+    return _set;
+}
+
+inline const DataSet& Selection::getDataset() const noexcept {
+    return _set;
+}
+
+// Not only a shortcut but also for templated compat with H5Dataset
+inline const DataType Selection::getDataType() const {
+    return _set.getDataType();
+}
+
+namespace detail {
+inline Selection make_selection(const DataSpace& mem_space,
+                                const DataSpace& file_space,
+                                const DataSet& set) {
+    return Selection(mem_space, file_space, set);
+}
+}  // namespace detail
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Slice_traits.hpp b/packages/HighFive/include/highfive/bits/H5Slice_traits.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..52c52713f02e73a7254720a0cb9b873f8eab681d
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Slice_traits.hpp
@@ -0,0 +1,375 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <cstdlib>
+#include <vector>
+
+#include "H5_definitions.hpp"
+#include "H5Utils.hpp"
+
+#include "../H5PropertyList.hpp"
+
+namespace HighFive {
+
+class ElementSet {
+  public:
+    ///
+    /// \brief Create a list of points of N-dimension for selection.
+    ///
+    /// \param list List of continuous coordinates (e.g.: in 2 dimensions space
+    /// `ElementSet{1, 2, 3 ,4}` creates points `(1, 2)` and `(3, 4)`).
+    explicit ElementSet(std::initializer_list<std::size_t> list);
+    ///
+    /// \brief Create a list of points of N-dimension for selection.
+    ///
+    /// \param list List of N-dim points.
+    explicit ElementSet(std::initializer_list<std::vector<std::size_t>> list);
+    ///
+    /// \brief Create a list of points of N-dimension for selection.
+    ///
+    /// \param element_ids List of continuous coordinates (e.g.: in 2 dimensions space
+    /// `ElementSet{1, 2, 3 ,4}` creates points `(1, 2)` and `(3, 4)`).
+    explicit ElementSet(const std::vector<std::size_t>& element_ids);
+    ///
+    /// \brief Create a list of points of N-dimension for selection.
+    ///
+    /// \param element_ids List of N-dim points.
+    explicit ElementSet(const std::vector<std::vector<std::size_t>>& element_ids);
+
+  private:
+    std::vector<std::size_t> _ids;
+
+    template <typename Derivate>
+    friend class SliceTraits;
+};
+
+namespace detail {
+
+template <class To, class From>
+inline std::vector<To> convertSizeVector(const std::vector<From>& from) {
+    std::vector<To> to(from.size());
+    std::copy(from.cbegin(), from.cend(), to.begin());
+
+    return to;
+}
+}  // namespace detail
+
+inline std::vector<hsize_t> toHDF5SizeVector(const std::vector<size_t>& from) {
+    return detail::convertSizeVector<hsize_t>(from);
+}
+
+inline std::vector<size_t> toSTLSizeVector(const std::vector<hsize_t>& from) {
+    return detail::convertSizeVector<size_t>(from);
+}
+
+struct RegularHyperSlab {
+    RegularHyperSlab() = default;
+
+    RegularHyperSlab(std::vector<size_t> offset_,
+                     std::vector<size_t> count_ = {},
+                     std::vector<size_t> stride_ = {},
+                     std::vector<size_t> block_ = {})
+        : offset(toHDF5SizeVector(offset_))
+        , count(toHDF5SizeVector(count_))
+        , stride(toHDF5SizeVector(stride_))
+        , block(toHDF5SizeVector(block_)) {}
+
+    static RegularHyperSlab fromHDF5Sizes(std::vector<hsize_t> offset_,
+                                          std::vector<hsize_t> count_ = {},
+                                          std::vector<hsize_t> stride_ = {},
+                                          std::vector<hsize_t> block_ = {}) {
+        RegularHyperSlab slab;
+        slab.offset = offset_;
+        slab.count = count_;
+        slab.stride = stride_;
+        slab.block = block_;
+
+        return slab;
+    }
+
+    size_t rank() const {
+        return std::max(std::max(offset.size(), count.size()),
+                        std::max(stride.size(), block.size()));
+    }
+
+    /// Dimensions when all gaps are removed.
+    std::vector<size_t> packedDims() const {
+        auto n_dims = rank();
+        auto dims = std::vector<size_t>(n_dims, 0);
+
+        for (size_t i = 0; i < n_dims; ++i) {
+            dims[i] = count[i] * (block.empty() ? 1 : block[i]);
+        }
+
+        return dims;
+    }
+
+    std::vector<hsize_t> offset;
+    std::vector<hsize_t> count;
+    std::vector<hsize_t> stride;
+    std::vector<hsize_t> block;
+};
+
+class HyperSlab {
+  public:
+    HyperSlab() {
+        selects.emplace_back(RegularHyperSlab{}, Op::None);
+    };
+
+    explicit HyperSlab(const RegularHyperSlab& sel) {
+        selects.emplace_back(sel, Op::Set);
+    }
+
+    HyperSlab operator|(const RegularHyperSlab& sel) const {
+        auto ret = *this;
+        ret |= sel;
+        return ret;
+    }
+
+    HyperSlab& operator|=(const RegularHyperSlab& sel) {
+        selects.emplace_back(sel, Op::Or);
+        return *this;
+    }
+
+    HyperSlab operator&(const RegularHyperSlab& sel) const {
+        auto ret = *this;
+        ret &= sel;
+        return ret;
+    }
+
+    HyperSlab& operator&=(const RegularHyperSlab& sel) {
+        selects.emplace_back(sel, Op::And);
+        return *this;
+    }
+
+    HyperSlab operator^(const RegularHyperSlab& sel) const {
+        auto ret = *this;
+        ret ^= sel;
+        return ret;
+    }
+
+    HyperSlab& operator^=(const RegularHyperSlab& sel) {
+        selects.emplace_back(sel, Op::Xor);
+        return *this;
+    }
+
+    HyperSlab& notA(const RegularHyperSlab& sel) {
+        selects.emplace_back(sel, Op::NotA);
+        return *this;
+    }
+
+    HyperSlab& notB(const RegularHyperSlab& sel) {
+        selects.emplace_back(sel, Op::NotB);
+        return *this;
+    }
+
+    DataSpace apply(const DataSpace& space_) const {
+        auto space = space_.clone();
+        for (const auto& sel: selects) {
+            if (sel.op == Op::None) {
+                H5Sselect_none(space.getId());
+            } else {
+                auto error_code =
+                    H5Sselect_hyperslab(space.getId(),
+                                        convert(sel.op),
+                                        sel.offset.empty() ? nullptr : sel.offset.data(),
+                                        sel.stride.empty() ? nullptr : sel.stride.data(),
+                                        sel.count.empty() ? nullptr : sel.count.data(),
+                                        sel.block.empty() ? nullptr : sel.block.data());
+
+                if (error_code < 0) {
+                    HDF5ErrMapper::ToException<DataSpaceException>("Unable to select hyperslab");
+                }
+            }
+        }
+        return space;
+    }
+
+  private:
+    enum class Op {
+        Noop,
+        Set,
+        Or,
+        And,
+        Xor,
+        NotB,
+        NotA,
+        Append,
+        Prepend,
+        Invalid,
+        None,
+    };
+
+    H5S_seloper_t convert(Op op) const {
+        switch (op) {
+        case Op::Noop:
+            return H5S_SELECT_NOOP;
+        case Op::Set:
+            return H5S_SELECT_SET;
+        case Op::Or:
+            return H5S_SELECT_OR;
+        case Op::And:
+            return H5S_SELECT_AND;
+        case Op::Xor:
+            return H5S_SELECT_XOR;
+        case Op::NotB:
+            return H5S_SELECT_NOTB;
+        case Op::NotA:
+            return H5S_SELECT_NOTA;
+        case Op::Append:
+            return H5S_SELECT_APPEND;
+        case Op::Prepend:
+            return H5S_SELECT_PREPEND;
+        case Op::Invalid:
+            return H5S_SELECT_INVALID;
+        default:
+            throw DataSpaceException("Invalid HyperSlab operation.");
+        }
+    }
+
+    struct Select_: public RegularHyperSlab {
+        Select_(const RegularHyperSlab& sel, Op op_)
+            : RegularHyperSlab(sel)
+            , op(op_) {}
+
+        Op op;
+    };
+
+    std::vector<Select_> selects;
+};
+
+template <typename Derivate>
+class SliceTraits {
+  public:
+    ///
+    /// \brief Select an \p hyperslab in the current Slice/Dataset.
+    ///
+    /// HyperSlabs can be either regular or irregular. Irregular hyperslabs are typically generated
+    /// by taking the union of regular hyperslabs. An irregular hyperslab, in general, does not fit
+    /// nicely into a multi-dimensional array, but only a subset of such an array.
+    ///
+    /// Therefore, the only memspaces supported for general hyperslabs are one-dimensional arrays.
+    Selection select(const HyperSlab& hyperslab) const;
+
+    ///
+    /// \brief Select an \p hyperslab in the current Slice/Dataset.
+    ///
+    /// If the selection can be read into a simple, multi-dimensional dataspace,
+    /// then this overload enable specifying the shape of the memory dataspace
+    /// with `memspace`. Note, that simple implies no offsets, strides or
+    /// number of blocks, just the size of the block in each dimension.
+    Selection select(const HyperSlab& hyperslab, const DataSpace& memspace) const;
+
+    ///
+    /// \brief Select a region in the current Slice/Dataset of \p count points at
+    /// \p offset separated by \p stride. If strides are not provided they will
+    /// default to 1 in all dimensions.
+    ///
+    /// vector offset and count have to be from the same dimension
+    ///
+    Selection select(const std::vector<size_t>& offset,
+                     const std::vector<size_t>& count,
+                     const std::vector<size_t>& stride = {},
+                     const std::vector<size_t>& block = {}) const;
+
+    ///
+    /// \brief Select a set of columns in the last dimension of this dataset.
+    ///
+    /// The column indices must be smaller than the dimension size.
+    ///
+    Selection select(const std::vector<size_t>& columns) const;
+
+    ///
+    /// \brief Select a region in the current Slice/Dataset out of a list of elements.
+    ///
+    Selection select(const ElementSet& elements) const;
+
+    template <typename T>
+    T read(const DataTransferProps& xfer_props = DataTransferProps()) const;
+
+    ///
+    /// Read the entire dataset into a buffer
+    /// An exception is raised is if the numbers of dimension of the buffer and
+    /// of the dataset are different.
+    ///
+    /// The array type can be a N-pointer or a N-vector. For plain pointers
+    /// not dimensionality checking will be performed, it is the user's
+    /// responsibility to ensure that the right amount of space has been
+    /// allocated.
+    template <typename T>
+    void read(T& array, const DataTransferProps& xfer_props = DataTransferProps()) const;
+
+    ///
+    /// Read the entire dataset into a raw buffer
+    ///
+    /// No dimensionality checks will be performed, it is the user's
+    /// responsibility to ensure that the right amount of space has been
+    /// allocated.
+    /// \param array: A buffer containing enough space for the data
+    /// \param dtype: The type of the data, in case it cannot be automatically guessed
+    /// \param xfer_props: Data Transfer properties
+    template <typename T>
+    void read(T* array,
+              const DataType& dtype,
+              const DataTransferProps& xfer_props = DataTransferProps()) const;
+
+    ///
+    /// Read the entire dataset into a raw buffer
+    ///
+    /// Same as `read(T*, const DataType&, const DataTransferProps&)`. However,
+    /// this overload deduces the HDF5 datatype of the element of `array` from
+    /// `T`. Note, that the file datatype is already fixed.
+    ///
+    /// \param array: A buffer containing enough space for the data
+    /// \param xfer_props: Data Transfer properties
+    template <typename T>
+    void read(T* array, const DataTransferProps& xfer_props = DataTransferProps()) const;
+
+    ///
+    /// Write the integrality N-dimension buffer to this dataset
+    /// An exception is raised is if the numbers of dimension of the buffer and
+    /// of the dataset are different
+    ///
+    /// The array type can be a N-pointer or a N-vector ( e.g int** integer two
+    /// dimensional array )
+    template <typename T>
+    void write(const T& buffer, const DataTransferProps& xfer_props = DataTransferProps());
+
+    ///
+    /// Write from a raw pointer into this dataset.
+    ///
+    /// No dimensionality checks will be performed, it is the user's
+    /// responsibility to ensure that the buffer holds the right amount of
+    /// elements. For n-dimensional matrices the buffer layout follows H5
+    /// default conventions.
+    ///
+    /// Note, this is the shallowest wrapper around `H5Dwrite` and should
+    /// be used if full control is needed. Generally prefer `write`.
+    ///
+    /// \param buffer: A buffer containing the data to be written
+    /// \param dtype: The datatype of `buffer`, i.e. the memory data type.
+    /// \param xfer_props: The HDF5 data transfer properties, e.g. collective MPI-IO.
+    template <typename T>
+    void write_raw(const T* buffer,
+                   const DataType& mem_datatype,
+                   const DataTransferProps& xfer_props = DataTransferProps());
+
+    ///
+    /// Write from a raw pointer into this dataset.
+    ///
+    /// Same as `write_raw(const T*, const DataTransferProps&)`. However, this
+    /// overload attempts to guess the data type of `buffer`, i.e. the memory
+    /// datatype. Note that the file datatype is already fixed.
+    ///
+    template <typename T>
+    void write_raw(const T* buffer, const DataTransferProps& xfer_props = DataTransferProps());
+};
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Slice_traits_misc.hpp b/packages/HighFive/include/highfive/bits/H5Slice_traits_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7b07c9abf9406b214e068aadd7c5a34011eb3d60
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Slice_traits_misc.hpp
@@ -0,0 +1,303 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <numeric>
+#include <sstream>
+#include <string>
+
+#include <H5Dpublic.h>
+#include <H5Ppublic.h>
+
+#include "H5ReadWrite_misc.hpp"
+#include "H5Converter_misc.hpp"
+
+namespace HighFive {
+
+namespace details {
+
+// map the correct reference to the dataset depending of the layout
+// dataset -> itself
+// subselection -> parent dataset
+inline const DataSet& get_dataset(const Selection& sel) {
+    return sel.getDataset();
+}
+
+inline const DataSet& get_dataset(const DataSet& ds) {
+    return ds;
+}
+
+// map the correct memspace identifier depending of the layout
+// dataset -> entire memspace
+// selection -> resolve space id
+inline hid_t get_memspace_id(const Selection& ptr) {
+    return ptr.getMemSpace().getId();
+}
+
+inline hid_t get_memspace_id(const DataSet&) {
+    return H5S_ALL;
+}
+}  // namespace details
+
+inline ElementSet::ElementSet(std::initializer_list<std::size_t> list)
+    : _ids(list) {}
+
+inline ElementSet::ElementSet(std::initializer_list<std::vector<std::size_t>> list)
+    : ElementSet(std::vector<std::vector<std::size_t>>(list)) {}
+
+inline ElementSet::ElementSet(const std::vector<std::size_t>& element_ids)
+    : _ids(element_ids) {}
+
+inline ElementSet::ElementSet(const std::vector<std::vector<std::size_t>>& element_ids) {
+    for (const auto& vec: element_ids) {
+        std::copy(vec.begin(), vec.end(), std::back_inserter(_ids));
+    }
+}
+
+template <typename Derivate>
+inline Selection SliceTraits<Derivate>::select(const HyperSlab& hyperslab,
+                                               const DataSpace& memspace) const {
+    // Note: The current limitation are that memspace must describe a
+    //       packed memspace.
+    //
+    //       The reason for this is that we're unable to unpack general
+    //       hyperslabs when the memory is not contiguous, e.g.
+    //       `std::vector<std::vector<double>>`.
+    const auto& slice = static_cast<const Derivate&>(*this);
+    auto filespace = hyperslab.apply(slice.getSpace());
+
+    return detail::make_selection(memspace, filespace, details::get_dataset(slice));
+}
+
+template <typename Derivate>
+inline Selection SliceTraits<Derivate>::select(const HyperSlab& hyper_slab) const {
+    const auto& slice = static_cast<const Derivate&>(*this);
+    auto filespace = slice.getSpace();
+    filespace = hyper_slab.apply(filespace);
+
+    auto n_elements = H5Sget_select_npoints(filespace.getId());
+    auto memspace = DataSpace(std::array<size_t, 1>{size_t(n_elements)});
+
+    return detail::make_selection(memspace, filespace, details::get_dataset(slice));
+}
+
+
+template <typename Derivate>
+inline Selection SliceTraits<Derivate>::select(const std::vector<size_t>& offset,
+                                               const std::vector<size_t>& count,
+                                               const std::vector<size_t>& stride,
+                                               const std::vector<size_t>& block) const {
+    auto slab = HyperSlab(RegularHyperSlab(offset, count, stride, block));
+    auto memspace = DataSpace(count);
+    return select(slab, memspace);
+}
+
+template <typename Derivate>
+inline Selection SliceTraits<Derivate>::select(const std::vector<size_t>& columns) const {
+    const auto& slice = static_cast<const Derivate&>(*this);
+    const DataSpace& space = slice.getSpace();
+    std::vector<size_t> dims = space.getDimensions();
+
+    std::vector<size_t> counts = dims;
+    counts.back() = 1;
+
+    std::vector<size_t> offsets(dims.size(), 0);
+
+    HyperSlab slab;
+    for (const auto& column: columns) {
+        offsets.back() = column;
+        slab |= RegularHyperSlab(offsets, counts);
+    }
+
+    std::vector<size_t> memdims = dims;
+    memdims.back() = columns.size();
+
+    return select(slab, DataSpace(memdims));
+}
+
+template <typename Derivate>
+inline Selection SliceTraits<Derivate>::select(const ElementSet& elements) const {
+    const auto& slice = static_cast<const Derivate&>(*this);
+    const hsize_t* data = nullptr;
+    const DataSpace space = slice.getSpace().clone();
+    const std::size_t length = elements._ids.size();
+    if (length % space.getNumberDimensions() != 0) {
+        throw DataSpaceException(
+            "Number of coordinates in elements picking "
+            "should be a multiple of the dimensions.");
+    }
+    const std::size_t num_elements = length / space.getNumberDimensions();
+    std::vector<hsize_t> raw_elements;
+
+    // optimised at compile time
+    // switch for data conversion on 32bits platforms
+    if (std::is_same<std::size_t, hsize_t>::value) {
+        // `if constexpr` can't be used, thus a reinterpret_cast is needed.
+        data = reinterpret_cast<const hsize_t*>(&(elements._ids[0]));
+    } else {
+        raw_elements.resize(length);
+        std::copy(elements._ids.begin(), elements._ids.end(), raw_elements.begin());
+        data = raw_elements.data();
+    }
+
+    if (H5Sselect_elements(space.getId(), H5S_SELECT_SET, num_elements, data) < 0) {
+        HDF5ErrMapper::ToException<DataSpaceException>("Unable to select elements");
+    }
+
+    return detail::make_selection(DataSpace(num_elements), space, details::get_dataset(slice));
+}
+
+
+template <typename Derivate>
+template <typename T>
+inline T SliceTraits<Derivate>::read(const DataTransferProps& xfer_props) const {
+    T array;
+    read(array, xfer_props);
+    return array;
+}
+
+
+template <typename Derivate>
+template <typename T>
+inline void SliceTraits<Derivate>::read(T& array, const DataTransferProps& xfer_props) const {
+    const auto& slice = static_cast<const Derivate&>(*this);
+    const DataSpace& mem_space = slice.getMemSpace();
+
+    auto file_datatype = slice.getDataType();
+
+    const details::BufferInfo<T> buffer_info(
+        file_datatype,
+        [&slice]() -> std::string { return details::get_dataset(slice).getPath(); },
+        details::BufferInfo<T>::Operation::read);
+
+    if (!details::checkDimensions(mem_space, buffer_info.n_dimensions)) {
+        std::ostringstream ss;
+        ss << "Impossible to read DataSet of dimensions " << mem_space.getNumberDimensions()
+           << " into arrays of dimensions " << buffer_info.n_dimensions;
+        throw DataSpaceException(ss.str());
+    }
+    auto dims = mem_space.getDimensions();
+
+    if (mem_space.getElementCount() == 0) {
+        auto effective_dims = details::squeezeDimensions(dims,
+                                                         details::inspector<T>::recursive_ndim);
+
+        details::inspector<T>::prepare(array, effective_dims);
+        return;
+    }
+
+    auto r = details::data_converter::get_reader<T>(dims, array, file_datatype);
+    read(r.getPointer(), buffer_info.data_type, xfer_props);
+    // re-arrange results
+    r.unserialize(array);
+
+    auto t = buffer_info.data_type;
+    auto c = t.getClass();
+    if (c == DataTypeClass::VarLen || t.isVariableStr()) {
+#if H5_VERSION_GE(1, 12, 0)
+        // This one have been created in 1.12.0
+        (void) H5Treclaim(t.getId(), mem_space.getId(), xfer_props.getId(), r.getPointer());
+#else
+        // This one is deprecated since 1.12.0
+        (void) H5Dvlen_reclaim(t.getId(), mem_space.getId(), xfer_props.getId(), r.getPointer());
+#endif
+    }
+}
+
+
+template <typename Derivate>
+template <typename T>
+inline void SliceTraits<Derivate>::read(T* array,
+                                        const DataType& mem_datatype,
+                                        const DataTransferProps& xfer_props) const {
+    static_assert(!std::is_const<T>::value,
+                  "read() requires a non-const structure to read data into");
+
+    const auto& slice = static_cast<const Derivate&>(*this);
+
+    if (H5Dread(details::get_dataset(slice).getId(),
+                mem_datatype.getId(),
+                details::get_memspace_id(slice),
+                slice.getSpace().getId(),
+                xfer_props.getId(),
+                static_cast<void*>(array)) < 0) {
+        HDF5ErrMapper::ToException<DataSetException>("Error during HDF5 Read.");
+    }
+}
+
+template <typename Derivate>
+template <typename T>
+inline void SliceTraits<Derivate>::read(T* array, const DataTransferProps& xfer_props) const {
+    using element_type = typename details::inspector<T>::base_type;
+    const DataType& mem_datatype = create_and_check_datatype<element_type>();
+
+    read(array, mem_datatype, xfer_props);
+}
+
+
+template <typename Derivate>
+template <typename T>
+inline void SliceTraits<Derivate>::write(const T& buffer, const DataTransferProps& xfer_props) {
+    const auto& slice = static_cast<const Derivate&>(*this);
+    const DataSpace& mem_space = slice.getMemSpace();
+
+    if (mem_space.getElementCount() == 0) {
+        return;
+    }
+
+    auto file_datatype = slice.getDataType();
+
+    const details::BufferInfo<T> buffer_info(
+        file_datatype,
+        [&slice]() -> std::string { return details::get_dataset(slice).getPath(); },
+        details::BufferInfo<T>::Operation::write);
+
+    if (!details::checkDimensions(mem_space, buffer_info.n_dimensions)) {
+        std::ostringstream ss;
+        ss << "Impossible to write buffer of dimensions "
+           << details::format_vector(mem_space.getDimensions())
+           << " into dataset with n = " << buffer_info.n_dimensions << " dimensions.";
+        throw DataSpaceException(ss.str());
+    }
+    auto w = details::data_converter::serialize<T>(buffer, file_datatype);
+    write_raw(w.getPointer(), buffer_info.data_type, xfer_props);
+}
+
+
+template <typename Derivate>
+template <typename T>
+inline void SliceTraits<Derivate>::write_raw(const T* buffer,
+                                             const DataType& mem_datatype,
+                                             const DataTransferProps& xfer_props) {
+    const auto& slice = static_cast<const Derivate&>(*this);
+
+    if (H5Dwrite(details::get_dataset(slice).getId(),
+                 mem_datatype.getId(),
+                 details::get_memspace_id(slice),
+                 slice.getSpace().getId(),
+                 xfer_props.getId(),
+                 static_cast<const void*>(buffer)) < 0) {
+        HDF5ErrMapper::ToException<DataSetException>("Error during HDF5 Write: ");
+    }
+}
+
+template <typename Derivate>
+template <typename T>
+inline void SliceTraits<Derivate>::write_raw(const T* buffer, const DataTransferProps& xfer_props) {
+    using element_type = typename details::inspector<T>::base_type;
+    const auto& mem_datatype = create_and_check_datatype<element_type>();
+
+    write_raw(buffer, mem_datatype, xfer_props);
+}
+
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5Utils.hpp b/packages/HighFive/include/highfive/bits/H5Utils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2d9d24f887174e5719949c7f4a2bb03b52829a94
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5Utils.hpp
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+// internal utilities functions
+#include <algorithm>
+#include <array>
+#include <cstddef>  // __GLIBCXX__
+#include <exception>
+#include <string>
+#include <type_traits>
+#include <vector>
+#include <sstream>
+
+#include <H5public.h>
+
+#include "../H5Exception.hpp"
+#include "H5Friends.hpp"
+
+namespace HighFive {
+
+// If ever used, recognize dimensions of FixedLenStringArray
+template <std::size_t N>
+class FixedLenStringArray;
+
+namespace details {
+// converter function for hsize_t -> size_t when hsize_t != size_t
+template <typename Size>
+inline std::vector<std::size_t> to_vector_size_t(const std::vector<Size>& vec) {
+    static_assert(std::is_same<Size, std::size_t>::value == false,
+                  " hsize_t != size_t mandatory here");
+    std::vector<size_t> res(vec.size());
+    std::transform(vec.cbegin(), vec.cend(), res.begin(), [](Size e) {
+        return static_cast<size_t>(e);
+    });
+    return res;
+}
+
+// converter function for hsize_t -> size_t when size_t == hsize_t
+inline std::vector<std::size_t> to_vector_size_t(const std::vector<std::size_t>& vec) {
+    return vec;
+}
+
+// read name from a H5 object using the specified function
+template <typename T>
+inline std::string get_name(T fct) {
+    const size_t maxLength = 255;
+    char buffer[maxLength + 1];
+    ssize_t retcode = fct(buffer, static_cast<hsize_t>(maxLength) + 1);
+    if (retcode < 0) {
+        HDF5ErrMapper::ToException<GroupException>("Error accessing object name");
+    }
+    const size_t length = static_cast<std::size_t>(retcode);
+    if (length <= maxLength) {
+        return std::string(buffer, length);
+    }
+    std::vector<char> bigBuffer(length + 1, 0);
+    fct(bigBuffer.data(), length + 1);
+    return std::string(bigBuffer.data(), length);
+}
+
+template <class Container>
+inline std::string format_vector(const Container& container) {
+    auto sout = std::stringstream{};
+
+    sout << "[ ";
+    for (size_t i = 0; i < container.size(); ++i) {
+        sout << container[i] << (i == container.size() - 1 ? "" : ", ");
+    }
+    sout << "]";
+
+    return sout.str();
+}
+
+}  // namespace details
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/H5_definitions.hpp b/packages/HighFive/include/highfive/bits/H5_definitions.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..746723c8839344ef8cc2dcc55a9170029ce13f47
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/H5_definitions.hpp
@@ -0,0 +1,50 @@
+#pragma once
+
+#if defined(__GNUC__) || defined(__clang__)
+#define H5_DEPRECATED(msg) __attribute__((deprecated(#msg)))
+#elif defined(_MSC_VER)
+#define H5_DEPRECATED(msg) __declspec(deprecated(#msg))
+#else
+#pragma message("WARNING: Compiler doesnt support deprecation")
+#define H5_DEPRECATED(msg)
+#endif
+
+
+// Forward declarations
+
+namespace HighFive {
+
+enum class LinkType;
+enum class ObjectType;
+enum class PropertyType;
+
+class Attribute;
+class DataSet;
+class DataSpace;
+class DataType;
+class Exception;
+class File;
+class FileDriver;
+class Group;
+class Object;
+class ObjectInfo;
+class Reference;
+class Selection;
+class SilenceHDF5;
+
+template <typename T>
+class AtomicType;
+
+template <typename Derivate>
+class AnnotateTraits;
+
+template <std::size_t N>
+class FixedLenStringArray;
+
+template <typename Derivate>
+class NodeTraits;
+
+template <PropertyType T>
+class PropertyList;
+
+}  // namespace HighFive
diff --git a/packages/HighFive/include/highfive/bits/string_padding.hpp b/packages/HighFive/include/highfive/bits/string_padding.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e6e6908ddc5e320019bff48fb5cf494c76bb96b0
--- /dev/null
+++ b/packages/HighFive/include/highfive/bits/string_padding.hpp
@@ -0,0 +1,14 @@
+#pragma once
+
+#include <H5Tpublic.h>
+
+namespace HighFive {
+
+enum class StringPadding : std::underlying_type<H5T_str_t>::type {
+    NullTerminated = H5T_STR_NULLTERM,
+    NullPadded = H5T_STR_NULLPAD,
+    SpacePadded = H5T_STR_SPACEPAD
+};
+
+
+}
diff --git a/packages/HighFive/include/highfive/h5easy_bits/H5Easy_Eigen.hpp b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_Eigen.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5b5d3b9a5f45e72e832eba821e3ab993b2997250
--- /dev/null
+++ b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_Eigen.hpp
@@ -0,0 +1,145 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "../H5Easy.hpp"
+#include "H5Easy_misc.hpp"
+#include "H5Easy_scalar.hpp"
+
+#ifdef H5_USE_EIGEN
+
+namespace H5Easy {
+
+namespace detail {
+
+template <typename T>
+struct io_impl<T, typename std::enable_if<std::is_base_of<Eigen::DenseBase<T>, T>::value>::type> {
+    // abbreviate row-major <-> col-major conversions
+    template <typename S>
+    struct types {
+        using row_major = Eigen::Ref<
+            const Eigen::Array<typename std::decay<T>::type::Scalar,
+                               std::decay<T>::type::RowsAtCompileTime,
+                               std::decay<T>::type::ColsAtCompileTime,
+                               std::decay<T>::type::ColsAtCompileTime == 1 ? Eigen::ColMajor
+                                                                           : Eigen::RowMajor,
+                               std::decay<T>::type::MaxRowsAtCompileTime,
+                               std::decay<T>::type::MaxColsAtCompileTime>,
+            0,
+            Eigen::InnerStride<1>>;
+
+        using col_major =
+            Eigen::Map<Eigen::Array<typename std::decay<T>::type::Scalar,
+                                    std::decay<T>::type::RowsAtCompileTime,
+                                    std::decay<T>::type::ColsAtCompileTime,
+                                    std::decay<T>::type::ColsAtCompileTime == 1 ? Eigen::ColMajor
+                                                                                : Eigen::RowMajor,
+                                    std::decay<T>::type::MaxRowsAtCompileTime,
+                                    std::decay<T>::type::MaxColsAtCompileTime>>;
+    };
+
+    // return the shape of Eigen::DenseBase<T> object as size 1 or 2 "std::vector<size_t>"
+    inline static std::vector<size_t> shape(const T& data) {
+        if (std::decay<T>::type::RowsAtCompileTime == 1) {
+            return {static_cast<size_t>(data.cols())};
+        }
+        if (std::decay<T>::type::ColsAtCompileTime == 1) {
+            return {static_cast<size_t>(data.rows())};
+        }
+        return {static_cast<size_t>(data.rows()), static_cast<size_t>(data.cols())};
+    }
+
+    using EigenIndex = Eigen::DenseIndex;
+
+    // get the shape of a "DataSet" as size 2 "std::vector<Eigen::Index>"
+    template <class D>
+    inline static std::vector<EigenIndex> shape(const File& file,
+                                                const std::string& path,
+                                                const D& dataset,
+                                                int RowsAtCompileTime) {
+        std::vector<size_t> dims = dataset.getDimensions();
+
+        if (dims.size() == 1 && RowsAtCompileTime == 1) {
+            return std::vector<EigenIndex>{1u, static_cast<EigenIndex>(dims[0])};
+        }
+        if (dims.size() == 1) {
+            return std::vector<EigenIndex>{static_cast<EigenIndex>(dims[0]), 1u};
+        }
+        if (dims.size() == 2) {
+            return std::vector<EigenIndex>{static_cast<EigenIndex>(dims[0]),
+                                           static_cast<EigenIndex>(dims[1])};
+        }
+
+        throw detail::error(file, path, "H5Easy::load: Inconsistent rank");
+    }
+
+    inline static DataSet dump(File& file,
+                               const std::string& path,
+                               const T& data,
+                               const DumpOptions& options) {
+        using row_major_type = typename types<T>::row_major;
+        using value_type = typename std::decay<T>::type::Scalar;
+        row_major_type row_major(data);
+        DataSet dataset = initDataset<value_type>(file, path, shape(data), options);
+        dataset.write_raw(row_major.data());
+        if (options.flush()) {
+            file.flush();
+        }
+        return dataset;
+    }
+
+    inline static T load(const File& file, const std::string& path) {
+        DataSet dataset = file.getDataSet(path);
+        std::vector<typename T::Index> dims = shape(file, path, dataset, T::RowsAtCompileTime);
+        T data(dims[0], dims[1]);
+        dataset.read(data.data());
+        if (data.IsVectorAtCompileTime || data.IsRowMajor) {
+            return data;
+        }
+        using col_major = typename types<T>::col_major;
+        return col_major(data.data(), dims[0], dims[1]);
+    }
+
+    inline static Attribute dumpAttribute(File& file,
+                                          const std::string& path,
+                                          const std::string& key,
+                                          const T& data,
+                                          const DumpOptions& options) {
+        using row_major_type = typename types<T>::row_major;
+        using value_type = typename std::decay<T>::type::Scalar;
+        row_major_type row_major(data);
+        Attribute attribute = initAttribute<value_type>(file, path, key, shape(data), options);
+        attribute.write_raw(row_major.data());
+        if (options.flush()) {
+            file.flush();
+        }
+        return attribute;
+    }
+
+    inline static T loadAttribute(const File& file,
+                                  const std::string& path,
+                                  const std::string& key) {
+        DataSet dataset = file.getDataSet(path);
+        Attribute attribute = dataset.getAttribute(key);
+        DataSpace dataspace = attribute.getSpace();
+        std::vector<typename T::Index> dims = shape(file, path, dataspace, T::RowsAtCompileTime);
+        T data(dims[0], dims[1]);
+        attribute.read(data.data());
+        if (data.IsVectorAtCompileTime || data.IsRowMajor) {
+            return data;
+        }
+        using col_major = typename types<T>::col_major;
+        return col_major(data.data(), dims[0], dims[1]);
+    }
+};
+
+}  // namespace detail
+}  // namespace H5Easy
+
+#endif  // H5_USE_EIGEN
diff --git a/packages/HighFive/include/highfive/h5easy_bits/H5Easy_misc.hpp b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_misc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..69798b2a4e88131817fc2234dab28edc9d77deaf
--- /dev/null
+++ b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_misc.hpp
@@ -0,0 +1,153 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "../H5Easy.hpp"
+
+namespace H5Easy {
+
+namespace detail {
+
+// Generate error-stream and return "Exception" (not yet thrown).
+inline Exception error(const File& file, const std::string& path, const std::string& message) {
+    std::ostringstream ss;
+    ss << message << std::endl
+       << "Path: " << path << std::endl
+       << "Filename: " << file.getName() << std::endl;
+    return Exception(ss.str());
+}
+
+// Generate specific dump error
+inline Exception dump_error(File& file, const std::string& path) {
+    if (file.getObjectType(path) == ObjectType::Dataset) {
+        return error(file,
+                     path,
+                     "H5Easy: Dataset already exists, dump with H5Easy::DumpMode::Overwrite "
+                     "to overwrite (with an array of the same shape).");
+    } else {
+        return error(
+            file,
+            path,
+            "H5Easy: path exists, but does not correspond to a Dataset. Dump not possible.");
+    }
+}
+
+// get a opened DataSet: nd-array
+template <class T>
+inline DataSet initDataset(File& file,
+                           const std::string& path,
+                           const std::vector<size_t>& shape,
+                           const DumpOptions& options) {
+    if (!file.exist(path)) {
+        if (!options.compress() && !options.isChunked()) {
+            return file.createDataSet<T>(path, DataSpace(shape), {}, {}, true);
+        } else {
+            std::vector<hsize_t> chunks(shape.begin(), shape.end());
+            if (options.isChunked()) {
+                chunks = options.getChunkSize();
+                if (chunks.size() != shape.size()) {
+                    throw error(file, path, "H5Easy::dump: Incorrect rank ChunkSize");
+                }
+            }
+            DataSetCreateProps props;
+            props.add(Chunking(chunks));
+            if (options.compress()) {
+                props.add(Shuffle());
+                props.add(Deflate(options.getCompressionLevel()));
+            }
+            return file.createDataSet<T>(path, DataSpace(shape), props, {}, true);
+        }
+    } else if (options.overwrite() && file.getObjectType(path) == ObjectType::Dataset) {
+        DataSet dataset = file.getDataSet(path);
+        if (dataset.getDimensions() != shape) {
+            throw error(file, path, "H5Easy::dump: Inconsistent dimensions");
+        }
+        return dataset;
+    }
+    throw dump_error(file, path);
+}
+
+// get a opened DataSet: scalar
+template <class T>
+inline DataSet initScalarDataset(File& file,
+                                 const std::string& path,
+                                 const T& data,
+                                 const DumpOptions& options) {
+    if (!file.exist(path)) {
+        return file.createDataSet<T>(path, DataSpace::From(data), {}, {}, true);
+    } else if (options.overwrite() && file.getObjectType(path) == ObjectType::Dataset) {
+        DataSet dataset = file.getDataSet(path);
+        if (dataset.getElementCount() != 1) {
+            throw error(file, path, "H5Easy::dump: Existing field not a scalar");
+        }
+        return dataset;
+    }
+    throw dump_error(file, path);
+}
+
+// get a opened Attribute: nd-array
+template <class T>
+inline Attribute initAttribute(File& file,
+                               const std::string& path,
+                               const std::string& key,
+                               const std::vector<size_t>& shape,
+                               const DumpOptions& options) {
+    if (!file.exist(path)) {
+        throw error(file, path, "H5Easy::dumpAttribute: DataSet does not exist");
+    }
+    if (file.getObjectType(path) != ObjectType::Dataset) {
+        throw error(file, path, "H5Easy::dumpAttribute: path not a DataSet");
+    }
+    DataSet dataset = file.getDataSet(path);
+    if (!dataset.hasAttribute(key)) {
+        return dataset.createAttribute<T>(key, DataSpace(shape));
+    } else if (options.overwrite()) {
+        Attribute attribute = dataset.getAttribute(key);
+        DataSpace dataspace = attribute.getSpace();
+        if (dataspace.getDimensions() != shape) {
+            throw error(file, path, "H5Easy::dumpAttribute: Inconsistent dimensions");
+        }
+        return attribute;
+    }
+    throw error(file,
+                path,
+                "H5Easy: Attribute exists, overwrite with H5Easy::DumpMode::Overwrite.");
+}
+
+// get a opened Attribute: scalar
+template <class T>
+inline Attribute initScalarAttribute(File& file,
+                                     const std::string& path,
+                                     const std::string& key,
+                                     const T& data,
+                                     const DumpOptions& options) {
+    if (!file.exist(path)) {
+        throw error(file, path, "H5Easy::dumpAttribute: DataSet does not exist");
+    }
+    if (file.getObjectType(path) != ObjectType::Dataset) {
+        throw error(file, path, "H5Easy::dumpAttribute: path not a DataSet");
+    }
+    DataSet dataset = file.getDataSet(path);
+    if (!dataset.hasAttribute(key)) {
+        return dataset.createAttribute<T>(key, DataSpace::From(data));
+    } else if (options.overwrite()) {
+        Attribute attribute = dataset.getAttribute(key);
+        DataSpace dataspace = attribute.getSpace();
+        if (dataspace.getElementCount() != 1) {
+            throw error(file, path, "H5Easy::dumpAttribute: Existing field not a scalar");
+        }
+        return attribute;
+    }
+    throw error(file,
+                path,
+                "H5Easy: Attribute exists, overwrite with H5Easy::DumpMode::Overwrite.");
+}
+
+}  // namespace detail
+}  // namespace H5Easy
diff --git a/packages/HighFive/include/highfive/h5easy_bits/H5Easy_opencv.hpp b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_opencv.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b640cd854362f3bb55af6edf26888d466a2f27ce
--- /dev/null
+++ b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_opencv.hpp
@@ -0,0 +1,100 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "../H5Easy.hpp"
+#include "H5Easy_misc.hpp"
+#include "H5Easy_scalar.hpp"
+
+#ifdef H5_USE_OPENCV
+
+namespace H5Easy {
+
+namespace detail {
+
+template <class T>
+struct is_opencv: std::false_type {};
+template <class T>
+struct is_opencv<cv::Mat_<T>>: std::true_type {};
+
+template <typename T>
+struct io_impl<T, typename std::enable_if<is_opencv<T>::value>::type> {
+    inline static std::vector<size_t> shape(const T& data) {
+        return std::vector<size_t>{static_cast<size_t>(data.rows), static_cast<size_t>(data.cols)};
+    }
+
+    inline static std::vector<int> shape(const File& file,
+                                         const std::string& path,
+                                         std::vector<size_t> dims) {
+        if (dims.size() == 1) {
+            return std::vector<int>{static_cast<int>(dims[0]), 1ul};
+        }
+        if (dims.size() == 2) {
+            return std::vector<int>{static_cast<int>(dims[0]), static_cast<int>(dims[1])};
+        }
+
+        throw detail::error(file, path, "H5Easy::load: Inconsistent rank");
+    }
+
+    inline static DataSet dump(File& file,
+                               const std::string& path,
+                               const T& data,
+                               const DumpOptions& options) {
+        using value_type = typename T::value_type;
+        DataSet dataset = initDataset<value_type>(file, path, shape(data), options);
+        std::vector<value_type> v(data.begin(), data.end());
+        dataset.write_raw(v.data());
+        if (options.flush()) {
+            file.flush();
+        }
+        return dataset;
+    }
+
+    inline static T load(const File& file, const std::string& path) {
+        using value_type = typename T::value_type;
+        DataSet dataset = file.getDataSet(path);
+        std::vector<int> dims = shape(file, path, dataset.getDimensions());
+        T data(dims[0], dims[1]);
+        dataset.read(reinterpret_cast<value_type*>(data.data));
+        return data;
+    }
+
+    inline static Attribute dumpAttribute(File& file,
+                                          const std::string& path,
+                                          const std::string& key,
+                                          const T& data,
+                                          const DumpOptions& options) {
+        using value_type = typename T::value_type;
+        Attribute attribute = initAttribute<value_type>(file, path, key, shape(data), options);
+        std::vector<value_type> v(data.begin(), data.end());
+        attribute.write_raw(v.data());
+        if (options.flush()) {
+            file.flush();
+        }
+        return attribute;
+    }
+
+    inline static T loadAttribute(const File& file,
+                                  const std::string& path,
+                                  const std::string& key) {
+        using value_type = typename T::value_type;
+        DataSet dataset = file.getDataSet(path);
+        Attribute attribute = dataset.getAttribute(key);
+        DataSpace dataspace = attribute.getSpace();
+        std::vector<int> dims = shape(file, path, dataspace.getDimensions());
+        T data(dims[0], dims[1]);
+        attribute.read(reinterpret_cast<value_type*>(data.data));
+        return data;
+    }
+};
+
+}  // namespace detail
+}  // namespace H5Easy
+
+#endif  // H5_USE_OPENCV
diff --git a/packages/HighFive/include/highfive/h5easy_bits/H5Easy_public.hpp b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_public.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2cc55d0f0f007a6a6cf155213ea44c019b280806
--- /dev/null
+++ b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_public.hpp
@@ -0,0 +1,170 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "../H5Easy.hpp"
+
+namespace H5Easy {
+
+inline Compression::Compression(bool enable) {
+    if (enable) {
+        m_compression_level = 9;
+    } else {
+        m_compression_level = 0;
+    }
+}
+
+template <class T>
+inline Compression::Compression(T level)
+    : m_compression_level(static_cast<unsigned>(level)) {}
+
+inline unsigned Compression::get() const {
+    return m_compression_level;
+}
+
+inline void DumpOptions::set(DumpMode mode) {
+    m_overwrite = static_cast<bool>(mode);
+}
+
+inline void DumpOptions::set(Flush mode) {
+    m_flush = static_cast<bool>(mode);
+}
+
+inline void DumpOptions::set(const Compression& level) {
+    m_compression_level = level.get();
+}
+
+template <class T, class... Args>
+inline void DumpOptions::set(T arg, Args... args) {
+    set(arg);
+    set(args...);
+}
+
+template <class T>
+inline void DumpOptions::setChunkSize(const std::vector<T>& shape) {
+    m_chunk_size = std::vector<hsize_t>(shape.begin(), shape.end());
+}
+
+inline void DumpOptions::setChunkSize(std::initializer_list<size_t> shape) {
+    m_chunk_size = std::vector<hsize_t>(shape.begin(), shape.end());
+}
+
+inline bool DumpOptions::overwrite() const {
+    return m_overwrite;
+}
+
+inline bool DumpOptions::flush() const {
+    return m_flush;
+}
+
+inline bool DumpOptions::compress() const {
+    return m_compression_level > 0;
+}
+
+inline unsigned DumpOptions::getCompressionLevel() const {
+    return m_compression_level;
+}
+
+inline bool DumpOptions::isChunked() const {
+    return m_chunk_size.size() > 0;
+}
+
+inline std::vector<hsize_t> DumpOptions::getChunkSize() const {
+    return m_chunk_size;
+}
+
+inline size_t getSize(const File& file, const std::string& path) {
+    return file.getDataSet(path).getElementCount();
+}
+
+inline std::vector<size_t> getShape(const File& file, const std::string& path) {
+    return file.getDataSet(path).getDimensions();
+}
+
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const DumpOptions& options) {
+    return detail::io_impl<T>::dump(file, path, data, options);
+}
+
+template <class T>
+inline DataSet dump(File& file, const std::string& path, const T& data, DumpMode mode) {
+    return detail::io_impl<T>::dump(file, path, data, DumpOptions(mode));
+}
+
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const std::vector<size_t>& idx,
+                    const DumpOptions& options) {
+    return detail::io_impl<T>::dump_extend(file, path, data, idx, options);
+}
+
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const std::initializer_list<size_t>& idx,
+                    const DumpOptions& options) {
+    return detail::io_impl<T>::dump_extend(file, path, data, idx, options);
+}
+
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const std::vector<size_t>& idx) {
+    return detail::io_impl<T>::dump_extend(file, path, data, idx, DumpOptions());
+}
+
+template <class T>
+inline DataSet dump(File& file,
+                    const std::string& path,
+                    const T& data,
+                    const std::initializer_list<size_t>& idx) {
+    return detail::io_impl<T>::dump_extend(file, path, data, idx, DumpOptions());
+}
+
+template <class T>
+inline T load(const File& file, const std::string& path, const std::vector<size_t>& idx) {
+    return detail::io_impl<T>::load_part(file, path, idx);
+}
+
+template <class T>
+inline T load(const File& file, const std::string& path) {
+    return detail::io_impl<T>::load(file, path);
+}
+
+template <class T>
+inline Attribute dumpAttribute(File& file,
+                               const std::string& path,
+                               const std::string& key,
+                               const T& data,
+                               DumpMode mode) {
+    return detail::io_impl<T>::dumpAttribute(file, path, key, data, DumpOptions(mode));
+}
+
+template <class T>
+inline Attribute dumpAttribute(File& file,
+                               const std::string& path,
+                               const std::string& key,
+                               const T& data,
+                               const DumpOptions& options) {
+    return detail::io_impl<T>::dumpAttribute(file, path, key, data, options);
+}
+
+template <class T>
+inline T loadAttribute(const File& file, const std::string& path, const std::string& key) {
+    return detail::io_impl<T>::loadAttribute(file, path, key);
+}
+
+}  // namespace H5Easy
diff --git a/packages/HighFive/include/highfive/h5easy_bits/H5Easy_scalar.hpp b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_scalar.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..056d8f2dc5aaa57f9754f94efed25155d7b2341a
--- /dev/null
+++ b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_scalar.hpp
@@ -0,0 +1,132 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "../H5Easy.hpp"
+#include "H5Easy_misc.hpp"
+
+namespace H5Easy {
+
+namespace detail {
+
+/*
+Base template for partial specialization: the fallback if specialized templates don't match.
+Used e.g. for scalars.
+*/
+template <typename T, typename = void>
+struct io_impl {
+    inline static DataSet dump(File& file,
+                               const std::string& path,
+                               const T& data,
+                               const DumpOptions& options) {
+        DataSet dataset = initScalarDataset(file, path, data, options);
+        dataset.write(data);
+        if (options.flush()) {
+            file.flush();
+        }
+        return dataset;
+    }
+
+    inline static T load(const File& file, const std::string& path) {
+        DataSet dataset = file.getDataSet(path);
+        T data;
+        dataset.read(data);
+        return data;
+    }
+
+    inline static Attribute dumpAttribute(File& file,
+                                          const std::string& path,
+                                          const std::string& key,
+                                          const T& data,
+                                          const DumpOptions& options) {
+        Attribute attribute = initScalarAttribute(file, path, key, data, options);
+        attribute.write(data);
+        if (options.flush()) {
+            file.flush();
+        }
+        return attribute;
+    }
+
+    inline static T loadAttribute(const File& file,
+                                  const std::string& path,
+                                  const std::string& key) {
+        DataSet dataset = file.getDataSet(path);
+        Attribute attribute = dataset.getAttribute(key);
+        T data;
+        attribute.read(data);
+        return data;
+    }
+
+    inline static DataSet dump_extend(File& file,
+                                      const std::string& path,
+                                      const T& data,
+                                      const std::vector<size_t>& idx,
+                                      const DumpOptions& options) {
+        std::vector<size_t> ones(idx.size(), 1);
+
+        if (file.exist(path)) {
+            DataSet dataset = file.getDataSet(path);
+            std::vector<size_t> dims = dataset.getDimensions();
+            std::vector<size_t> shape = dims;
+            if (dims.size() != idx.size()) {
+                throw detail::error(
+                    file,
+                    path,
+                    "H5Easy::dump: Dimension of the index and the existing field do not match");
+            }
+            for (size_t i = 0; i < dims.size(); ++i) {
+                shape[i] = std::max(dims[i], idx[i] + 1);
+            }
+            if (shape != dims) {
+                dataset.resize(shape);
+            }
+            dataset.select(idx, ones).write(data);
+            if (options.flush()) {
+                file.flush();
+            }
+            return dataset;
+        }
+
+        std::vector<size_t> shape = idx;
+        const size_t unlim = DataSpace::UNLIMITED;
+        std::vector<size_t> unlim_shape(idx.size(), unlim);
+        std::vector<hsize_t> chunks(idx.size(), 10);
+        if (options.isChunked()) {
+            chunks = options.getChunkSize();
+            if (chunks.size() != idx.size()) {
+                throw error(file, path, "H5Easy::dump: Incorrect dimension ChunkSize");
+            }
+        }
+        for (size_t& i: shape) {
+            i++;
+        }
+        DataSpace dataspace = DataSpace(shape, unlim_shape);
+        DataSetCreateProps props;
+        props.add(Chunking(chunks));
+        DataSet dataset = file.createDataSet(path, dataspace, AtomicType<T>(), props, {}, true);
+        dataset.select(idx, ones).write(data);
+        if (options.flush()) {
+            file.flush();
+        }
+        return dataset;
+    }
+
+    inline static T load_part(const File& file,
+                              const std::string& path,
+                              const std::vector<size_t>& idx) {
+        std::vector<size_t> ones(idx.size(), 1);
+        DataSet dataset = file.getDataSet(path);
+        T data;
+        dataset.select(idx, ones).read(data);
+        return data;
+    }
+};
+
+}  // namespace detail
+}  // namespace H5Easy
diff --git a/packages/HighFive/include/highfive/h5easy_bits/H5Easy_vector.hpp b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_vector.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4c60f5cfc632d2f77ecd55c219114ad17404241d
--- /dev/null
+++ b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_vector.hpp
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "../H5Easy.hpp"
+#include "H5Easy_misc.hpp"
+#include "H5Easy_scalar.hpp"
+
+namespace H5Easy {
+
+namespace detail {
+
+template <class T>
+struct is_vector: std::false_type {};
+template <class T>
+struct is_vector<std::vector<T>>: std::true_type {};
+
+using HighFive::details::inspector;
+
+template <typename T>
+struct io_impl<T, typename std::enable_if<is_vector<T>::value>::type> {
+    inline static DataSet dump(File& file,
+                               const std::string& path,
+                               const T& data,
+                               const DumpOptions& options) {
+        using value_type = typename inspector<T>::base_type;
+        auto dims = inspector<T>::getDimensions(data);
+        DataSet dataset = initDataset<value_type>(file,
+                                                  path,
+                                                  std::vector<size_t>(dims.begin(), dims.end()),
+                                                  options);
+        dataset.write(data);
+        if (options.flush()) {
+            file.flush();
+        }
+        return dataset;
+    }
+
+    inline static T load(const File& file, const std::string& path) {
+        DataSet dataset = file.getDataSet(path);
+        T data;
+        dataset.read(data);
+        return data;
+    }
+
+    inline static Attribute dumpAttribute(File& file,
+                                          const std::string& path,
+                                          const std::string& key,
+                                          const T& data,
+                                          const DumpOptions& options) {
+        using value_type = typename inspector<T>::base_type;
+        auto dims = inspector<T>::getDimensions(data);
+        std::vector<size_t> shape(dims.begin(), dims.end());
+        Attribute attribute = initAttribute<value_type>(file, path, key, shape, options);
+        attribute.write(data);
+        if (options.flush()) {
+            file.flush();
+        }
+        return attribute;
+    }
+
+    inline static T loadAttribute(const File& file,
+                                  const std::string& path,
+                                  const std::string& key) {
+        DataSet dataset = file.getDataSet(path);
+        Attribute attribute = dataset.getAttribute(key);
+        T data;
+        attribute.read(data);
+        return data;
+    }
+};
+
+}  // namespace detail
+}  // namespace H5Easy
diff --git a/packages/HighFive/include/highfive/h5easy_bits/H5Easy_xtensor.hpp b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_xtensor.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6b0238c4db86c2aa8b2a492825d7b4bc54a3b5d2
--- /dev/null
+++ b/packages/HighFive/include/highfive/h5easy_bits/H5Easy_xtensor.hpp
@@ -0,0 +1,84 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#pragma once
+
+#include "../H5Easy.hpp"
+#include "H5Easy_misc.hpp"
+#include "H5Easy_scalar.hpp"
+
+#ifdef H5_USE_XTENSOR
+
+namespace H5Easy {
+
+namespace detail {
+
+template <typename T>
+struct io_impl<T, typename std::enable_if<xt::is_xexpression<T>::value>::type> {
+    inline static std::vector<size_t> shape(const T& data) {
+        return std::vector<size_t>(data.shape().cbegin(), data.shape().cend());
+    }
+
+    inline static DataSet dump(File& file,
+                               const std::string& path,
+                               const T& data,
+                               const DumpOptions& options) {
+        using value_type = typename std::decay_t<T>::value_type;
+        DataSet dataset = initDataset<value_type>(file, path, shape(data), options);
+        dataset.write_raw(data.data());
+        if (options.flush()) {
+            file.flush();
+        }
+        return dataset;
+    }
+
+    inline static T load(const File& file, const std::string& path) {
+        static_assert(
+            xt::has_data_interface<T>::value,
+            "Cannot load to xt::xfunction or xt::xgenerator, use e.g. xt::xtensor or xt::xarray");
+        DataSet dataset = file.getDataSet(path);
+        std::vector<size_t> dims = dataset.getDimensions();
+        T data = T::from_shape(dims);
+        dataset.read(data.data());
+        return data;
+    }
+
+    inline static Attribute dumpAttribute(File& file,
+                                          const std::string& path,
+                                          const std::string& key,
+                                          const T& data,
+                                          const DumpOptions& options) {
+        using value_type = typename std::decay_t<T>::value_type;
+        Attribute attribute = initAttribute<value_type>(file, path, key, shape(data), options);
+        attribute.write_raw(data.data());
+        if (options.flush()) {
+            file.flush();
+        }
+        return attribute;
+    }
+
+    inline static T loadAttribute(const File& file,
+                                  const std::string& path,
+                                  const std::string& key) {
+        static_assert(
+            xt::has_data_interface<T>::value,
+            "Cannot load to xt::xfunction or xt::xgenerator, use e.g. xt::xtensor or xt::xarray");
+        DataSet dataset = file.getDataSet(path);
+        Attribute attribute = dataset.getAttribute(key);
+        DataSpace dataspace = attribute.getSpace();
+        std::vector<size_t> dims = dataspace.getDimensions();
+        T data = T::from_shape(dims);
+        attribute.read(data.data());
+        return data;
+    }
+};
+
+}  // namespace detail
+}  // namespace H5Easy
+
+#endif  // H5_USE_XTENSOR
diff --git a/packages/HighFive/include/highfive/highfive.hpp b/packages/HighFive/include/highfive/highfive.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f5e20cae9131ae7e6440f6fa7f16bc8b3430b336
--- /dev/null
+++ b/packages/HighFive/include/highfive/highfive.hpp
@@ -0,0 +1,14 @@
+#pragma once
+
+#include <highfive/H5Attribute.hpp>
+#include <highfive/H5DataSet.hpp>
+#include <highfive/H5DataSpace.hpp>
+#include <highfive/H5DataType.hpp>
+#include <highfive/H5File.hpp>
+#include <highfive/H5FileDriver.hpp>
+#include <highfive/H5Group.hpp>
+#include <highfive/H5PropertyList.hpp>
+#include <highfive/H5Reference.hpp>
+#include <highfive/H5Selection.hpp>
+#include <highfive/H5Utility.hpp>
+#include <highfive/H5Version.hpp>
diff --git a/packages/HighFive/src/benchmarks/Makefile b/packages/HighFive/src/benchmarks/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..fb37fdb6e4f71216063b76406ccef3819e72802f
--- /dev/null
+++ b/packages/HighFive/src/benchmarks/Makefile
@@ -0,0 +1,21 @@
+# This is minimal makefile to build the benchmark programs
+# It was kept as an independent Makefile so that use can trivially change flags
+#
+# Blue Brain Project - EPFL, 2022
+
+PROGRAMS:=hdf5_bench hdf5_bench_improved highfive_bench
+
+CXX?=g++
+COMPILE_OPTS=-g -O2 -Wall
+CXXFLAGS=-I ../../include/ `pkg-config --libs --cflags hdf5` -std=c++11 ${COMPILE_OPTS}
+
+
+all: $(PROGRAMS)
+
+%: %.cpp $(DEPS)
+	$(CXX) -o $@ $< $(CXXFLAGS)
+
+clean:
+	rm -f ${PROGRAMS}
+
+.PHONY: clean
diff --git a/packages/HighFive/src/benchmarks/README.md b/packages/HighFive/src/benchmarks/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7d01db79d629b22d6afef3e7e81d6bdb8b27a649
--- /dev/null
+++ b/packages/HighFive/src/benchmarks/README.md
@@ -0,0 +1,17 @@
+# Benchmarking
+
+This folder contains several baseline programs used to benchmark HighFive and assess
+its overhead wrt no-highfive programs.
+
+It features a straightforward Makefile whose flags can be easily tuned. By default
+compilation features -O2 -g and finds HDF5 via `pkg-config`.
+Additionally, a `run_benchmarks.sh` script is provided to measure execution time and
+profile using hpctoolkit.
+
+## Compile
+
+Basically, run `make`. By default it compiles with `-g -O2` but it's configurable. e.g.
+
+```
+make CXX=clang++ COMPILE_OPTS="-g -O1"
+```
diff --git a/packages/HighFive/src/benchmarks/hdf5_bench.cpp b/packages/HighFive/src/benchmarks/hdf5_bench.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fecb6e0e4ce2b98a2fbdab649543a22abfdd42c9
--- /dev/null
+++ b/packages/HighFive/src/benchmarks/hdf5_bench.cpp
@@ -0,0 +1,59 @@
+#include "hdf5.h"
+#include <iostream>
+#include <stdexcept>
+#include <vector>
+
+#define NROWS      1000000  // 1M
+#define ROW_LENGTH 10
+
+const std::vector<std::vector<int>> data(NROWS, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
+
+int do_iteration() {
+    /* Create a new file using default properties. */
+    hid_t file_id = H5Fcreate("dataset_integer_raw.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+    /* Create the data space for the dataset. */
+    hsize_t dims[] = {NROWS, ROW_LENGTH};
+    hid_t dataspace_id = H5Screate_simple(2, dims, NULL);
+
+    // Row memspace
+    hsize_t mem_dims[] = {1, ROW_LENGTH};
+    hid_t memspace_id = H5Screate_simple(2, mem_dims, NULL);
+
+    /* Create the dataset. */
+    hid_t dataset_id = H5Dcreate2(
+        file_id, "/dataset", H5T_NATIVE_INT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+
+    herr_t status;
+
+    /* Write each row to the dataset. */
+    for (size_t i = 0; i < NROWS; i++) {
+        // File Hyperslabs
+        hsize_t count[] = {1, 10};
+        hsize_t offset[] = {i, 0};
+        status = H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, offset, NULL, count, NULL);
+        if (status != 0) {
+            throw(std::runtime_error("H5Sselect_hyperslab failed"));
+        }
+        status = H5Dwrite(
+            dataset_id, H5T_NATIVE_INT, memspace_id, dataspace_id, H5P_DEFAULT, data[i].data());
+        if (status != 0) {
+            throw(std::runtime_error("H5Dwrite failed"));
+        }
+    }
+
+    status = H5Sclose(memspace_id);
+    status |= H5Sclose(dataspace_id);
+    status |= H5Dclose(dataset_id);
+    status |= H5Fclose(file_id);
+    if (status != 0) {
+        std::cerr << "Error while releasing resources" << std::endl;
+    }
+    return status;
+}
+
+int main() {
+    for (int i = 0; i < 200; i++) {
+        do_iteration();
+    }
+}
diff --git a/packages/HighFive/src/benchmarks/hdf5_bench_improved.cpp b/packages/HighFive/src/benchmarks/hdf5_bench_improved.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cea1ff3819b86a64665ccdf90f09b1aae503e7bc
--- /dev/null
+++ b/packages/HighFive/src/benchmarks/hdf5_bench_improved.cpp
@@ -0,0 +1,62 @@
+#include "hdf5.h"
+#include <iostream>
+#include <sstream>
+#include <stdexcept>
+#include <vector>
+
+#define NROWS      1000000  // 1M
+#define ROW_LENGTH 10
+
+const std::vector<std::vector<int>> data(NROWS, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
+
+inline void check_dimensions(size_t size_vec, size_t size_dataset, size_t dimension) {
+    if (size_vec != size_dataset) {
+        std::ostringstream ss;
+        ss << "Mismatch between vector size (" << size_vec << ") and dataset size ("
+           << size_dataset;
+        ss << ") on dimension " << dimension;
+        throw ss.str();
+    }
+}
+
+int do_iteration() {
+    /* Create a new file using default properties. */
+    hid_t file_id =
+        H5Fcreate("dataset_integer_raw_improved.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
+    /* Create the data space for the dataset. */
+    hsize_t dims[] = {NROWS, ROW_LENGTH};
+    hid_t dataspace_id = H5Screate_simple(2, dims, NULL);
+
+    /* Create the dataset. */
+    hid_t dataset_id = H5Dcreate2(
+        file_id, "/dataset", H5T_NATIVE_INT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+
+    herr_t status;
+
+    /* It's faster to aggregate all rows in a single contiguous buffer to do less IO ops
+     */
+    std::vector<int> data_contig;
+    data_contig.reserve(NROWS * ROW_LENGTH);
+    for (const auto& row: data) {
+        check_dimensions(row.size(), dims[1], 1);
+        data_contig.insert(data_contig.end(), row.begin(), row.end());
+    }
+
+    status =
+        H5Dwrite(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_contig.data());
+
+    status |= H5Sclose(dataspace_id);
+    status |= H5Dclose(dataset_id);
+    status |= H5Fclose(file_id);
+    if (status != 0) {
+        std::cerr << "Error while releasing resources" << std::endl;
+    }
+    return status;
+}
+
+int main() {
+    for (int i = 0; i < 200; i++) {
+        do_iteration();
+    }
+}
diff --git a/packages/HighFive/src/benchmarks/highfive_bench.cpp b/packages/HighFive/src/benchmarks/highfive_bench.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..bcc7be93ce742701d03e8daa6f21286f838efe9e
--- /dev/null
+++ b/packages/HighFive/src/benchmarks/highfive_bench.cpp
@@ -0,0 +1,15 @@
+#include <highfive/highfive.hpp>
+#include <vector>
+
+const std::vector<std::vector<int>> data(1000000, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
+
+int do_iteration() {
+    HighFive::File("dataset_integer.h5", HighFive::File::Truncate).createDataSet("dataset", data);
+    return 0;
+}
+
+int main() {
+    for (int i = 0; i < 200; i++) {
+        do_iteration();
+    }
+}
diff --git a/packages/HighFive/src/benchmarks/imgs/bench_hdf5_base.png b/packages/HighFive/src/benchmarks/imgs/bench_hdf5_base.png
new file mode 100644
index 0000000000000000000000000000000000000000..b72e3e00dc263c0684c38dfa2d75b72a6c4c82f6
Binary files /dev/null and b/packages/HighFive/src/benchmarks/imgs/bench_hdf5_base.png differ
diff --git a/packages/HighFive/src/benchmarks/imgs/bench_hdf5_improved.png b/packages/HighFive/src/benchmarks/imgs/bench_hdf5_improved.png
new file mode 100644
index 0000000000000000000000000000000000000000..fb49fa01183fec4198727e0bc43aa4770c645650
Binary files /dev/null and b/packages/HighFive/src/benchmarks/imgs/bench_hdf5_improved.png differ
diff --git a/packages/HighFive/src/benchmarks/imgs/bench_highfive.png b/packages/HighFive/src/benchmarks/imgs/bench_highfive.png
new file mode 100644
index 0000000000000000000000000000000000000000..085731d6ad398acc9a12aff8d6e8d0721681ce1c
Binary files /dev/null and b/packages/HighFive/src/benchmarks/imgs/bench_highfive.png differ
diff --git a/packages/HighFive/src/benchmarks/run_benchmark.sh b/packages/HighFive/src/benchmarks/run_benchmark.sh
new file mode 100755
index 0000000000000000000000000000000000000000..18da06497ef1bfcecf57abdf5593070de689a8d8
--- /dev/null
+++ b/packages/HighFive/src/benchmarks/run_benchmark.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+set -eu
+
+executables="hdf5_bench hdf5_bench_improved highfive_bench"
+
+# Compile all
+make
+
+# Time it : overview and eventual cache warm up
+for exe in $executables; do
+    echo -e "\nRunning $exe"
+    time ./$exe
+done
+
+if [ $# -eq 0 ]; then
+    echo "Not running hpctoolkit. Please provide a CLI argument to proceed"
+    exit 0
+fi
+
+# Profile using hpctoolkit
+module load hpctoolkit
+rm -rf hpctoolkit-* *.hpcstruct
+
+for exe in $executables; do
+    echo -e "\nProfiling $exe"
+    hpcrun -c f1000000 -e PERF_COUNT_HW_CPU_CYCLES -e REALTIME  ./$exe
+    hpcstruct ./$exe
+    hpcprof -S $exe.hpcstruct -I ./ -I '../../include/*' hpctoolkit-$exe-*
+done
diff --git a/packages/HighFive/src/examples/CMakeLists.txt b/packages/HighFive/src/examples/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b5f8b0affb8f68478582b66ac4c8ba01c0bb6d3
--- /dev/null
+++ b/packages/HighFive/src/examples/CMakeLists.txt
@@ -0,0 +1,51 @@
+include(HighFiveWarnings)
+
+function(compile_example example_source)
+
+    get_filename_component(example_filename ${example_source} NAME)
+    string(REPLACE ".cpp" "_bin" example_name ${example_filename})
+
+    if(${example_filename} MATCHES ".*eigen.*")
+        if(NOT HIGHFIVE_USE_EIGEN)
+            return()
+        endif()
+    endif()
+
+    if(${example_filename} MATCHES ".*boost.*")
+        if(NOT HIGHFIVE_USE_BOOST)
+            return()
+        endif()
+    endif()
+
+    if(${example_filename} MATCHES ".*parallel_hdf5.*")
+        if(NOT HIGHFIVE_PARALLEL_HDF5)
+            return()
+        endif()
+    endif()
+
+    if(${example_filename} MATCHES ".*half_float.*")
+        if(NOT HIGHFIVE_USE_HALF_FLOAT)
+            return()
+        endif()
+    endif()
+
+    if(${example_name} MATCHES ".*hl_hdf5.*")
+        find_package(HDF5 QUIET COMPONENTS HL NAMES HDF5_HL)
+        if(${HDF5_HL_FOUND})
+            message("HDF5 HL: ${HDF5_HL_LIBRARIES}")
+            add_executable(${example_name} ${example_source})
+            target_link_libraries(${example_name} HighFive HighFiveWarnings ${HDF5_HL_LIBRARIES})
+        endif()
+        return()
+    endif()
+
+    add_executable(${example_name} ${example_source})
+    target_link_libraries(${example_name} HighFive HighFiveWarnings)
+
+endfunction()
+
+file(GLOB list_example "*.cpp")
+
+foreach(example_src ${list_example})
+    compile_example(${example_src})
+endforeach()
diff --git a/packages/HighFive/src/examples/boost_multi_array_2D.cpp b/packages/HighFive/src/examples/boost_multi_array_2D.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4bec1ec12991391a58549e8fb044294523f4a74b
--- /dev/null
+++ b/packages/HighFive/src/examples/boost_multi_array_2D.cpp
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+
+#undef H5_USE_BOOST
+#define H5_USE_BOOST
+
+#include <boost/multi_array.hpp>
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+// Create a 2D dataset 10x3 of double with boost multi array
+// and write it to a file.
+int main(void) {
+    const int nx = 10;
+    const int ny = 3;
+
+    boost::multi_array<double, 2> array(boost::extents[nx][ny]);
+
+    for (int i = 0; i < nx; ++i) {
+        for (int j = 0; j < ny; ++j) {
+            array[i][j] = double(j + i * ny);
+        }
+    }
+
+    // We create a new HDF5 file
+    File file("boost_multiarray_example.h5", File::Truncate);
+
+    // let's create our dataset of the size of the boost::multi_array
+    file.createDataSet("dset", array);
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/boost_multiarray_complex.cpp b/packages/HighFive/src/examples/boost_multiarray_complex.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..37481db625b5b21d7d13c0ae7da4223c9d7eb327
--- /dev/null
+++ b/packages/HighFive/src/examples/boost_multiarray_complex.cpp
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c), 2020 Blue Brain Project - EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <algorithm>
+#include <complex>
+
+#undef H5_USE_BOOST
+#define H5_USE_BOOST
+
+#include <highfive/highfive.hpp>
+
+#include <boost/multi_array.hpp>
+
+typedef std::complex<double> complex_t;
+
+int main() {
+    boost::multi_array<complex_t, 4> multi_array(boost::extents[3][2][1][1]);
+    std::fill_n(multi_array.origin(), multi_array.num_elements(), 1.0);
+    multi_array[1][1][0][0] = complex_t{1.1, 1.2};
+
+    HighFive::File file("multi_array_complex.h5", HighFive::File::Truncate);
+    HighFive::DataSet dataset = file.createDataSet("multi_array", multi_array);
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/boost_ublas_double.cpp b/packages/HighFive/src/examples/boost_ublas_double.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b025475b9584b2f27c6dbfcd51e3ee402898031a
--- /dev/null
+++ b/packages/HighFive/src/examples/boost_ublas_double.cpp
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+
+#undef H5_USE_BOOST
+#define H5_USE_BOOST
+
+#include <highfive/highfive.hpp>
+
+// In some versions of Boost (starting with 1.64), you have to include the serialization header
+// before ublas
+#include <boost/serialization/vector.hpp>
+
+#include <boost/numeric/ublas/io.hpp>
+#include <boost/numeric/ublas/matrix.hpp>
+
+using namespace HighFive;
+
+const std::string FILE_NAME("boost_ublas_double.h5");
+const std::string DATASET_NAME("dset");
+
+const size_t size_x = 10;
+const size_t size_y = 10;
+
+int main(void) {
+    try {
+        typedef typename boost::numeric::ublas::matrix<double> Matrix;
+
+        // create a 10x10 matrix
+        Matrix mat(size_x, size_y);
+
+        // fill it
+        for (std::size_t i = 0; i < size_x; ++i) {
+            mat(i, i) = static_cast<double>(i);
+        }
+
+        // Create a new HDF5 file
+        File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+        // create a new dataset with the 10x10 Matrix dimension
+        DataSet dataset = file.createDataSet<double>(DATASET_NAME, DataSpace::From(mat));
+
+        // write it
+        dataset.write(mat);
+
+        // now, let read it back
+        Matrix result;
+        dataset.read(result);
+
+        // print what we read
+        std::cout << "Matrix result:\n" << result << std::endl;
+
+    } catch (Exception& err) {
+        // catch and print any HDF5 error
+        std::cerr << err.what() << std::endl;
+    }
+
+    return 0;  // successfully terminated
+}
diff --git a/packages/HighFive/src/examples/compound_types.cpp b/packages/HighFive/src/examples/compound_types.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..158bc6dc6de6e53314470d1db57bc392bdb765aa
--- /dev/null
+++ b/packages/HighFive/src/examples/compound_types.cpp
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c), 2021, Blue Brain Project, EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+// Compound datatype test :: May 2021
+// //////////////////////////////////
+
+#include <highfive/highfive.hpp>
+
+
+typedef struct {
+    double width;
+    double height;
+} Size2D;
+
+
+HighFive::CompoundType create_compound_Size2D() {
+    return {{"width", HighFive::create_datatype<double>()},
+            {"height", HighFive::create_datatype<double>()}};
+}
+
+HIGHFIVE_REGISTER_TYPE(Size2D, create_compound_Size2D)
+
+int main() {
+    const std::string dataset_name("dims");
+
+    HighFive::File file("compounds_test.h5", HighFive::File::Truncate);
+
+    auto t1 = create_compound_Size2D();
+    t1.commit(file, "Size2D");
+
+    std::vector<Size2D> dims = {{1., 2.5}, {3., 4.5}};
+    auto dataset = file.createDataSet(dataset_name, dims);
+
+    auto g1 = file.createGroup("group1");
+    g1.createAttribute(dataset_name, dims);
+}
diff --git a/packages/HighFive/src/examples/create_attribute_string_integer.cpp b/packages/HighFive/src/examples/create_attribute_string_integer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f658457adfe0e0a51534a7e4512ca982c13fae11
--- /dev/null
+++ b/packages/HighFive/src/examples/create_attribute_string_integer.cpp
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+// create a dataset from a vector of string
+// read it back and print it
+int main(void) {
+    // Create a new file using the default property lists.
+    File file("create_attribute.h5", File::Truncate);
+
+    // Create a dummy dataset of one single integer
+    DataSet dataset = file.createDataSet("dset", DataSpace(1), create_datatype<int>());
+
+    // Now let's add a attribute on this dataset
+    // This attribute will be named "note"
+    // and have the following content
+    std::string note = "Very important Dataset!";
+
+    // Write in one line of code:
+    dataset.createAttribute<std::string>("note", note);
+
+    // We also add a "version" attribute
+    // that will be an array 1x2 of integer
+    std::vector<int> version{1, 0};
+
+    Attribute v = dataset.createAttribute("version", version);
+
+    // We can also create attributes on the file:
+    file.createAttribute("file_version", 1);
+
+    // and on groups in the file:
+    auto group = file.createGroup("group");
+    group.createAttribute("secret", 123);
+
+    // let's now list the keys of all attributes
+    std::vector<std::string> all_attributes_keys = dataset.listAttributeNames();
+    for (const auto& attr: all_attributes_keys) {
+        std::cout << "attribute: " << attr << std::endl;
+    }
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/create_dataset_double.cpp b/packages/HighFive/src/examples/create_dataset_double.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d15fbdc54f196195880347cd56f2e7e6783e453d
--- /dev/null
+++ b/packages/HighFive/src/examples/create_dataset_double.cpp
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+// Create a dataset name "dset" of double 4x6
+int main(void) {
+    using namespace HighFive;
+
+    // Create a new file using the default property lists. Note that
+    // `File::Truncate` will, if present, truncate the file before opening
+    // it for reading and writing.
+    File file("create_dataset_example.h5", File::Truncate);
+
+    // Define the size of our dataset: 2x6
+    std::vector<size_t> dims{2, 6};
+
+    // Create the dataset
+    DataSet dataset = file.createDataSet<double>("dset", DataSpace(dims));
+
+    double data[2][6] = {{1.1, 2.2, 3.3, 4.4, 5.5, 6.6},
+                         {11.11, 12.12, 13.13, 14.14, 15.15, 16.16}};
+
+    // write it
+    dataset.write(data);
+
+
+    return 0;  // successfully terminated
+}
diff --git a/packages/HighFive/src/examples/create_dataset_half_float.cpp b/packages/HighFive/src/examples/create_dataset_half_float.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2b720cd187036c98392ed7fd2e9518934ae948ce
--- /dev/null
+++ b/packages/HighFive/src/examples/create_dataset_half_float.cpp
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c), 2022, Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#ifdef H5_USE_HALF_FLOAT
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+const std::string FILE_NAME("create_dataset_half_float_example.h5");
+const std::string DATASET_NAME("dset");
+
+// Create a dataset name "dset", size 4x6, and type float16_t (i.e., 16-bit half-precision
+// floating-point format)
+//
+int main(void) {
+    using namespace HighFive;
+
+    // Create a new file using the default property lists.
+    File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+    // Define the size of our dataset: 4x6
+    std::vector<size_t> dims{4, 6};
+
+    // Create the dataset
+    DataSet dataset = file.createDataSet<float16_t>(DATASET_NAME, DataSpace(dims));
+
+    std::vector<std::vector<float16_t>> data;
+    for (size_t i = 0; i < 4; ++i) {
+        data.emplace_back();
+        for (size_t j = 0; j < 6; ++j)
+            data[i].emplace_back((i + 1) * (j + 1));
+    }
+
+    // write it
+    dataset.write(data);
+
+    return 0;
+}
+
+#endif
diff --git a/packages/HighFive/src/examples/create_datatype.cpp b/packages/HighFive/src/examples/create_datatype.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0b1a0fb52044c915f185c52d5145f1738d279834
--- /dev/null
+++ b/packages/HighFive/src/examples/create_datatype.cpp
@@ -0,0 +1,101 @@
+#include <iostream>
+
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+static const std::string FILE_NAME("create_datatype_example.h5");
+static const std::string DATASET_NAME("test_dataset");
+
+
+// Struct representation of custom type (type v below)
+typedef struct {
+    char a;
+    short b;
+    unsigned long long c;
+} csl;
+
+bool operator==(csl x, csl y) {
+    return x.a == y.a && x.b == y.b && x.c == y.c;
+}
+
+bool operator!=(csl x, csl y) {
+    return !(x == y);
+}
+
+// Tell HighFive how to create the HDF5 datatype for this base type by
+// using the HIGHFIVE_REGISTER_TYPE macro
+CompoundType create_compound_csl() {
+    return {{"u1", create_datatype<unsigned char>()},
+            {"u2", create_datatype<short>()},
+            {"u3", create_datatype<unsigned long long>()}};
+}
+HIGHFIVE_REGISTER_TYPE(csl, create_compound_csl)
+
+int main(void) {
+    File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+    // Create a simple compound type with automatic alignment of the
+    // members. For this the type alignment is trivial.
+    std::vector<CompoundType::member_def> t_members(
+        {{"real", create_datatype<int>()}, {"imag", create_datatype<int>()}});
+    CompoundType t(t_members);
+    t.commit(file, "new_type1");
+
+    // Create a complex nested datatype with manual alignment
+    CompoundType u({{"u1", t, 0}, {"u2", t, 9}, {"u3", create_datatype<int>(), 20}}, 26);
+    u.commit(file, "new_type3");
+
+    // Create a more complex type with automatic alignment. For this the
+    // type alignment is more complex.
+    CompoundType v_aligned{{"u1", create_datatype<unsigned char>()},
+                           {"u2", create_datatype<short>()},
+                           {"u3", create_datatype<unsigned long long>()}};
+    // introspect the compound type
+    std::cout << "v_aligned size: " << v_aligned.getSize();
+    for (const auto& member: v_aligned.getMembers()) {
+        std::cout << "  field " << member.name << " offset: " << member.offset << std::endl;
+    }
+
+    v_aligned.commit(file, "new_type2_aligned");
+
+    // Create a more complex type with a fully packed alignment. The
+    // equivalent type is created with a standard struct alignment in the
+    // implementation of HighFive::create_datatype above
+    CompoundType v_packed({{"u1", create_datatype<unsigned char>(), 0},
+                           {"u2", create_datatype<short>(), 1},
+                           {"u3", create_datatype<unsigned long long>(), 3}},
+                          11);
+    v_packed.commit(file, "new_type2_packed");
+
+
+    // Initialise some data
+    std::vector<csl> data;
+    data.push_back({'f', 1, 4});
+    data.push_back({'g', -4, 18});
+
+    // Write the data into the file in a fully packed form
+    DataSet dataset = file.createDataSet(DATASET_NAME, DataSpace(2), v_packed);
+    dataset.write(data);
+
+    file.flush();
+
+    // Read a subset of the data back
+    std::vector<csl> result;
+    dataset.select({0}, {2}).read(result);
+
+    for (size_t i = 0; i < data.size(); ++i) {
+        if (result[i] != data[i]) {
+            std::cout << "result[" << i << "]:" << std::endl;
+            std::cout << "    " << result[i].a << std::endl;
+            std::cout << "    " << result[i].b << std::endl;
+            std::cout << "    " << result[i].c << std::endl;
+            std::cout << "data[" << i << "]:" << std::endl;
+            std::cout << "    " << data[i].a << std::endl;
+            std::cout << "    " << data[i].b << std::endl;
+            std::cout << "    " << data[i].c << std::endl;
+        }
+    }
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/create_extensible_dataset.cpp b/packages/HighFive/src/examples/create_extensible_dataset.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..17153cd5740bda666808a2d7f45f7fa19c262235
--- /dev/null
+++ b/packages/HighFive/src/examples/create_extensible_dataset.cpp
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+const std::string FILE_NAME("create_extensible_dataset_example.h5");
+const std::string DATASET_NAME("dset");
+
+// Create a dataset name "dset" of double 4x6
+//
+int main(void) {
+    using namespace HighFive;
+
+    // Create a new file using the default property lists.
+    File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+    // Create a dataspace with initial shape and max shape
+    DataSpace dataspace = DataSpace({4, 5}, {17, DataSpace::UNLIMITED});
+
+    // Use chunking
+    DataSetCreateProps props;
+    props.add(Chunking(std::vector<hsize_t>{2, 2}));
+
+    // Create the dataset
+    DataSet dataset = file.createDataSet(DATASET_NAME, dataspace, create_datatype<double>(), props);
+
+    // Write into the initial part of the dataset
+    double t1[3][1] = {{2.0}, {2.0}, {4.0}};
+    dataset.select({0, 0}, {3, 1}).write(t1);
+
+    // Resize the dataset to a larger size
+    dataset.resize({4, 6});
+
+    // Write into the new part of the dataset
+    double t2[1][3] = {{4.0, 8.0, 6.0}};
+    dataset.select({3, 3}, {1, 3}).write(t2);
+
+    // now we read it back
+    std::vector<std::vector<double>> result;
+    dataset.read(result);
+
+    // we print it out and see:
+    // 2 0 0 0 0 0
+    // 2 0 0 0 0 0
+    // 4 0 0 0 0 0
+    // 0 0 0 4 8 6
+    for (auto row: result) {
+        for (auto col: row)
+            std::cout << " " << col;
+        std::cout << std::endl;
+    }
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/create_large_attribute.cpp b/packages/HighFive/src/examples/create_large_attribute.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..022d11d87bcfdd8e9ab9c6719ea37a500e837176
--- /dev/null
+++ b/packages/HighFive/src/examples/create_large_attribute.cpp
@@ -0,0 +1,19 @@
+#include <numeric>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+int main() {
+    std::vector<double> large_attr(16000, 0.0);
+
+    auto fapl = HighFive::FileAccessProps::Default();
+    fapl.add(HighFive::FileVersionBounds(H5F_LIBVER_LATEST, H5F_LIBVER_LATEST));
+    HighFive::File file("create_large_attribute.h5", HighFive::File::Truncate, fapl);
+    auto gcpl = HighFive::GroupCreateProps::Default();
+    gcpl.add(HighFive::AttributePhaseChange(0, 0));
+
+    auto group = file.createGroup("grp", gcpl);
+    group.createAttribute("attr", large_attr);
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/create_page_allocated_files.cpp b/packages/HighFive/src/examples/create_page_allocated_files.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0b4d29dffb3c1a21ca90114b122ef020fd394ae2
--- /dev/null
+++ b/packages/HighFive/src/examples/create_page_allocated_files.cpp
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c), 2022, Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+// This example requires HDF5 version 1.10.1 or newer.
+#if H5_VERSION_GE(1, 10, 1)
+
+// This example show how to create an HDF5 file that internally aggregates
+// metadata and raw data into separate pages. The advantage of this approach
+// is that reading a single page, pulls in the metadata for a large chunk of
+// the file.
+//
+// This can be very useful when dealing with many small datasets. Note, this
+// is an optimization. Therefore, you must perform measurements in order to
+// know if this should be used.
+//
+// Internally, it uses two free space managers, one for metadata and one for
+// raw data. When space for data is allocated, the corresponding free space
+// manager is asked to allocate space. It will look if there is enough space
+// on a partially filled paged, if yes it keeps filling the page, if not it
+// requests page aligned space from the file driver as needed. Upstream
+// documentation explains the details well in:
+//
+//    RFC: HDF5 File Space Management: Paged Aggregation
+
+int main() {
+    using namespace HighFive;
+
+    // Create a new file requesting paged allocation.
+    auto create_props = FileCreateProps{};
+
+    // Let request pagesizes of 16 kB. This setting should be tuned
+    // in real applications. We'll allow HDF5 to not keep track of
+    // left-over free space of size less than 128 bytes. Finally,
+    // we don't need the free space manager to be stored in the
+    // HDF5 file.
+    size_t pagesize = 16 * 1024;  // Must be tuned.
+    size_t threshold = 128;
+    size_t persist = false;
+
+    create_props.add(FileSpaceStrategy(H5F_FSPACE_STRATEGY_PAGE, persist, threshold));
+    create_props.add(FileSpacePageSize(pagesize));
+
+    File file("create_page_allocated_files.h5", File::Truncate, create_props);
+
+    // The `file` (and also the low-level `file.getId()`) behave as normal, i.e.
+    // one can proceed to add content to the file as usual.
+
+    auto data = std::vector<double>{0.0, 1.0, 2.0};
+    file.createDataSet("data", data);
+
+    return 0;
+}
+#else
+#include <iostream>
+int main() {
+    std::cout << "This example can't be run prior to HDF5 1.10.1.\n";
+    return 0;
+}
+#endif
diff --git a/packages/HighFive/src/examples/easy_attribute.cpp b/packages/HighFive/src/examples/easy_attribute.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..87a7f6592c6a684d068de3d3289ac8c7990ca9bb
--- /dev/null
+++ b/packages/HighFive/src/examples/easy_attribute.cpp
@@ -0,0 +1,38 @@
+/// To enable plug-ins, load the relevant libraries BEFORE HighFive. E.g.
+///
+///   #include <xtensor/xtensor.hpp>
+///   #include <Eigen/Eigen>
+///   #include <highfive/H5Easy.hpp>
+///
+/// or ask HighFive to include them. E.g.
+///
+///   #define H5_USE_XTENSOR
+///   #define H5_USE_EIGEN
+///   #include <highfive/H5Easy.hpp>
+///
+
+// optionally enable plug-in xtensor
+#ifdef H5_USE_XTENSOR
+#include <xtensor/xtensor.hpp>
+#endif
+
+// optionally enable plug-in Eigen
+#ifdef H5_USE_EIGEN
+#include <Eigen/Eigen>
+#endif
+
+#include <highfive/H5Easy.hpp>
+
+int main() {
+    H5Easy::File file("example.h5", H5Easy::File::Overwrite);
+
+    std::vector<double> measurement = {1.0, 2.0, 3.0};
+    std::string desc = "This is an important dataset.";
+    double temperature = 1.234;
+
+    H5Easy::dump(file, "/path/to/measurement", measurement);
+    H5Easy::dumpAttribute(file, "/path/to/measurement", "description", desc);
+    H5Easy::dumpAttribute(file, "/path/to/measurement", "temperature", temperature);
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/easy_dumpoptions.cpp b/packages/HighFive/src/examples/easy_dumpoptions.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..563374f8c248dec6ab6f793e58623164b3166428
--- /dev/null
+++ b/packages/HighFive/src/examples/easy_dumpoptions.cpp
@@ -0,0 +1,88 @@
+/// To enable plug-ins, load the relevant libraries BEFORE HighFive. E.g.
+///
+///   #include <xtensor/xtensor.hpp>
+///   #include <Eigen/Eigen>
+///   #include <highfive/H5Easy.hpp>
+///
+/// or ask HighFive to include them. E.g.
+///
+///   #define H5_USE_XTENSOR
+///   #define H5_USE_EIGEN
+///   #include <highfive/H5Easy.hpp>
+///
+
+// optionally enable plug-in xtensor
+#ifdef H5_USE_XTENSOR
+#include <xtensor/xtensor.hpp>
+#endif
+
+// optionally enable plug-in Eigen
+#ifdef H5_USE_EIGEN
+#include <Eigen/Eigen>
+#endif
+
+#include <highfive/H5Easy.hpp>
+
+int main() {
+    H5Easy::File file("example.h5", H5Easy::File::Overwrite);
+
+    // plain options
+    {
+        std::vector<double> A = {1.0, 2.0, 3.0};
+
+        H5Easy::dump(file, "/path/to/A", A);
+        H5Easy::dump(file, "/path/to/A", A, H5Easy::DumpMode::Overwrite);
+    }
+
+    // advanced - compression
+    {
+        std::vector<double> B = {1.0, 2.0, 3.0};
+
+        H5Easy::dump(file, "/path/to/B", B, H5Easy::DumpOptions(H5Easy::Compression()));
+
+        H5Easy::dump(file,
+                     "/path/to/B",
+                     B,
+                     H5Easy::DumpOptions(H5Easy::Compression(), H5Easy::DumpMode::Overwrite));
+    }
+
+    // advanced - compression - set compression level
+    {
+        std::vector<double> C = {1.0, 2.0, 3.0};
+
+        H5Easy::dump(file, "/path/to/C", C, H5Easy::DumpOptions(H5Easy::Compression(8)));
+    }
+
+    // advanced - compression - set compression level & chunk size
+    {
+        std::vector<double> D = {1.0, 2.0, 3.0};
+
+        H5Easy::DumpOptions options(H5Easy::Compression(8));
+        options.setChunkSize({3});
+
+        H5Easy::dump(file, "/path/to/D", D, options);
+    }
+
+    // advanced - set chunk size
+    {
+        int E = 10;
+
+        H5Easy::DumpOptions options;
+        options.setChunkSize({100, 100});
+
+        H5Easy::dump(file, "/path/to/E", E, {0, 0}, options);
+        H5Easy::dump(file, "/path/to/E", E, {0, 1}, options);
+        // ...
+    }
+
+    // advanced - no automatic flushing
+    {
+        std::vector<double> F = {1.0, 2.0, 3.0};
+
+        H5Easy::dump(file, "/path/to/F", F, H5Easy::DumpOptions(H5Easy::Flush::False));
+
+        file.flush();
+    }
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/easy_load_dump.cpp b/packages/HighFive/src/examples/easy_load_dump.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..82db4aa36f190941d87dfc552aa38792d412fcbb
--- /dev/null
+++ b/packages/HighFive/src/examples/easy_load_dump.cpp
@@ -0,0 +1,102 @@
+/// To enable plug-ins, load the relevant libraries BEFORE HighFive. E.g.
+///
+///   #include <xtensor/xtensor.hpp>
+///   #include <Eigen/Eigen>
+///   #include <highfive/H5Easy.hpp>
+///
+/// or ask HighFive to include them. E.g.
+///
+///   #define H5_USE_XTENSOR
+///   #define H5_USE_EIGEN
+///   #include <highfive/H5Easy.hpp>
+///
+
+// optionally enable plug-in xtensor
+#ifdef H5_USE_XTENSOR
+#include <xtensor/xtensor.hpp>
+#endif
+
+// optionally enable plug-in Eigen
+#ifdef H5_USE_EIGEN
+#include <Eigen/Eigen>
+#endif
+
+#include <highfive/H5Easy.hpp>
+
+int main() {
+    H5Easy::File file("example.h5", H5Easy::File::Overwrite);
+
+    // (over)write and read scalar
+    {
+        int A = 10;
+
+        H5Easy::dump(file, "/path/to/A", A);
+        H5Easy::dump(file, "/path/to/A", A, H5Easy::DumpMode::Overwrite);
+    }
+
+    // (over)write and read std::vector
+    {
+        std::vector<double> B = {1., 2., 3.};
+
+        H5Easy::dump(file, "/path/to/B", B);
+        H5Easy::dump(file, "/path/to/B", B, H5Easy::DumpMode::Overwrite);
+
+        B = H5Easy::load<std::vector<double>>(file, "/path/to/B");
+    }
+
+    // (over)write scalar in (automatically expanding) extendible DataSet,
+    // read item from the DataSet
+    {
+        int C = 10;
+
+        H5Easy::dump(file, "/path/to/C", C, {0});
+        H5Easy::dump(file, "/path/to/C", C, {1});
+        H5Easy::dump(file, "/path/to/C", C, {3});
+
+        C = H5Easy::load<int>(file, "/path/to/C", {0});
+    }
+
+    // get the size/shape of a DataSet
+    {
+        // outputs "size_t"
+        H5Easy::getSize(file, "/path/to/C");
+
+        // outputs "std::vector<size_t>"
+        H5Easy::getShape(file, "/path/to/C");
+    }
+
+#ifdef H5_USE_EIGEN
+    // (over)write and read Eigen::Matrix
+    {
+        // matrix
+        Eigen::MatrixXd D = Eigen::MatrixXd::Random(10, 5);
+
+        H5Easy::dump(file, "/path/to/D", D);
+        H5Easy::dump(file, "/path/to/D", D, H5Easy::DumpMode::Overwrite);
+
+        D = H5Easy::load<Eigen::MatrixXd>(file, "/path/to/D");
+
+
+        Eigen::ArrayXd D2 = Eigen::ArrayXd::Random(30);
+
+        H5Easy::dump(file, "/path/to/D2", D2);
+        H5Easy::dump(file, "/path/to/D2", D2, H5Easy::DumpMode::Overwrite);
+
+        D2 = H5Easy::load<Eigen::ArrayXd>(file, "/path/to/D2");
+    }
+#endif
+
+#ifdef H5_USE_XTENSOR
+    // (over)write and read xt::xtensor (or xt::xarray)
+    {
+        xt::xtensor<size_t, 1> E = xt::arange<size_t>(10);
+
+        H5Easy::dump(file, "/path/to/E", E);
+        H5Easy::dump(file, "/path/to/E", E, H5Easy::DumpMode::Overwrite);
+
+        E = H5Easy::load<xt::xtensor<size_t, 1>>(file, "/path/to/E");
+    }
+#endif
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/hl_hdf5_inmemory_files.cpp b/packages/HighFive/src/examples/hl_hdf5_inmemory_files.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..088fd71cc8db6a165cecdde1307212b4f828e18d
--- /dev/null
+++ b/packages/HighFive/src/examples/hl_hdf5_inmemory_files.cpp
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c), 2022, Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+#include <hdf5_hl.h>
+
+using namespace HighFive;
+
+class InMemoryFile: public HighFive::File {
+  public:
+    explicit InMemoryFile(std::vector<std::uint8_t> buffer)
+        : _buffer(std::move(buffer)) {
+        _hid = H5LTopen_file_image(_buffer.data(),
+                                   sizeof(_buffer[0]) * _buffer.size(),
+                                   H5LT_FILE_IMAGE_DONT_RELEASE | H5LT_FILE_IMAGE_DONT_COPY);
+    }
+
+  private:
+    std::vector<std::uint8_t> _buffer;
+};
+
+
+// Create a 2D dataset 10x3 of double with eigen matrix
+// and write it to a file
+int main(void) {
+    const std::string file_name("inmemory_file.h5");
+    const std::string dataset_name("dset");
+
+    auto data = std::vector<double>{1.0, 2.0, 3.0};
+
+    {
+        // We create an HDF5 file.
+        File file(file_name, File::Truncate);
+        file.createDataSet(dataset_name, data);
+    }
+
+    // Simulate having an inmemory file by reading a file
+    // byte-by-byte into RAM.
+    auto buffer = std::vector<std::uint8_t>(1ul << 20);
+    auto file = std::fopen(file_name.c_str(), "r");
+    auto nread = std::fread(buffer.data(), sizeof(buffer[0]), buffer.size(), file);
+    std::cout << "Bytes read: " << nread << "\n";
+
+    // Create a file from a buffer.
+    auto h5 = InMemoryFile(std::move(buffer));
+
+    // Read a dataset as usual.
+    auto read_back = h5.getDataSet(dataset_name).read<std::vector<double>>();
+
+    // Check if the values match.
+    for (size_t i = 0; i < read_back.size(); ++i) {
+        if (read_back[i] != data[i]) {
+            throw std::runtime_error("Values don't match.");
+        } else {
+            std::cout << "read_back[" << i << "] = " << read_back[i] << "\n";
+        }
+    }
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/parallel_hdf5_collective_io.cpp b/packages/HighFive/src/examples/parallel_hdf5_collective_io.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7261b7cf1d8b8a92623ef166bea72156250d9b7b
--- /dev/null
+++ b/packages/HighFive/src/examples/parallel_hdf5_collective_io.cpp
@@ -0,0 +1,118 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *  Copyright (c), 2022, Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <mpi.h>
+
+#include <highfive/highfive.hpp>
+
+const std::string file_name("parallel_collective_example.h5");
+const std::string dataset_name("dset");
+
+// Currently, HighFive doesn't wrap retrieving information from property lists.
+// Therefore, one needs to use HDF5 directly. For example, to see if collective
+// MPI-IO operations were used, one may. Conveniently, this also provides identifiers
+// of the cause for not using collective MPI calls.
+void check_collective_io(const HighFive::DataTransferProps& xfer_props) {
+    auto mnccp = HighFive::MpioNoCollectiveCause(xfer_props);
+    if (mnccp.getLocalCause() || mnccp.getGlobalCause()) {
+        std::cout
+            << "The operation was successful, but couldn't use collective MPI-IO. local cause: "
+            << mnccp.getLocalCause() << " global cause:" << mnccp.getGlobalCause() << std::endl;
+    }
+}
+
+
+// This is an example of how to write HDF5 files when all
+// operations are collective, i.e. all MPI ranks participate in
+// all HDF5 related function calls.
+//
+// If this assumption is met then one can ask HDF5 to use
+// collective MPI-IO operations. This enables MPI-IO to optimize
+// reads and writes.
+//
+// In this example we will create groups, and let every MPI rank
+// write part of a 2D array; and then have all MPI ranks read back
+// a different part of the array.
+int main(int argc, char** argv) {
+    int mpi_rank, mpi_size;
+
+    // initialize MPI
+    MPI_Init(&argc, &argv);
+    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+    using namespace HighFive;
+
+    try {
+        // MPI-IO requires informing HDF5 that we want something other than
+        // the default behaviour. This is done through property lists. We
+        // need a file access property list.
+        FileAccessProps fapl;
+        // We tell HDF5 to use MPI-IO
+        fapl.add(MPIOFileAccess{MPI_COMM_WORLD, MPI_INFO_NULL});
+        // We also specify that we want all meta-data related operations
+        // to use MPI collective operations. This implies that all MPI ranks
+        // in the communicator must participate in any HDF5 operation that
+        // reads or writes metadata. Essentially, this is safe if all MPI ranks
+        // participate in all HDF5 operations.
+        fapl.add(MPIOCollectiveMetadata{});
+
+        // Now we can create the file as usual.
+        File file(file_name, File::Truncate, fapl);
+
+        // We can create a group as usual, but all MPI ranks must participate.
+        auto group = file.createGroup("grp");
+
+        // We define the dataset have one row per MPI rank and two columns.
+        std::vector<size_t> dims(2);
+        dims[0] = std::size_t(mpi_size);
+        dims[1] = 2ul;
+
+        // We follow the path for
+        DataSet dataset = group.createDataSet<double>(dataset_name, DataSpace(dims));
+
+        // Each node want to write its own rank two time in
+        // its associated row
+        auto data = std::array<double, 2>{mpi_rank * 1.0, mpi_rank * 2.0};
+
+        auto xfer_props = DataTransferProps{};
+        xfer_props.add(UseCollectiveIO{});
+
+        // Each MPI rank writes a non-overlapping part of the array.
+        std::vector<size_t> offset{std::size_t(mpi_rank), 0ul};
+        std::vector<size_t> count{1ul, 2ul};
+
+        dataset.select(offset, count).write(data, xfer_props);
+        check_collective_io(xfer_props);
+
+        // Let's ensure that everything has been written do disk.
+        file.flush();
+
+        // We'd like to read back some data. For simplicity, we'll read the
+        // row from the MPI above us (wrapping)
+        offset[0] = (offset[0] + 1ul) % dims[0];
+
+        // MPI ranks don't have to read non-overlapping parts, but in this
+        // example they happen to. Again all rank participate in this call.
+        dataset.select(offset, count).read(data, xfer_props);
+        check_collective_io(xfer_props);
+
+    } catch (Exception& err) {
+        // catch and print any HDF5 error
+        std::cerr << err.what() << std::endl;
+        MPI_Abort(MPI_COMM_WORLD, 1);
+    }
+
+    MPI_Finalize();
+    return 0;  // successfully terminated
+}
diff --git a/packages/HighFive/src/examples/parallel_hdf5_independent_io.cpp b/packages/HighFive/src/examples/parallel_hdf5_independent_io.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b43012890c209a05dfd0700333f608a6dd247211
--- /dev/null
+++ b/packages/HighFive/src/examples/parallel_hdf5_independent_io.cpp
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *  Copyright (c), 2022, Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <mpi.h>
+
+#include <highfive/highfive.hpp>
+
+const std::string file_name("parallel_independent_example.h5");
+
+// This is an example of how to let MPI ranks read independent parts of the
+// HDF5 file.
+int main(int argc, char** argv) {
+    int mpi_rank, mpi_size;
+
+    // initialize MPI
+    MPI_Init(&argc, &argv);
+    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+    using namespace HighFive;
+    try {
+        // We perform a preprocessing step, to create a file:
+        // {
+        //   "g0": { "x": [ 0.0, 0.0, 0.0 ] }
+        //   "g1": { "x": [ 1.0, 2.0, 3.0 ] }
+        //   "g2": { "x": [ 2.0, 4.0, 6.0 ] }
+        //   ...
+        // }
+        if (mpi_rank == 0) {
+            File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+            for (int i = 0; i < mpi_size; ++i) {
+                std::stringstream group_name;
+                group_name << "g" << i;
+
+                // Create a group: `f"g{mpi_rank}"`
+                auto group = file.createGroup(group_name.str());
+
+                std::vector<double> x{double(i), 2 * double(i), 3 * double(i)};
+                group.createDataSet("x", x);
+            }
+        }
+
+        // We need to wait for the file to be created, before proceeding with the
+        // actual example.
+        MPI_Barrier(MPI_COMM_WORLD);
+
+        // The example can start!
+        //
+        // Let's inform HDF5 that we want MPI-IO. We need a file access property
+        // list, and request MPI-IO file access.
+        FileAccessProps fapl;
+        fapl.add(MPIOFileAccess{MPI_COMM_WORLD, MPI_INFO_NULL});
+
+        // Do not ask for collective metadata I/O reads. You can consider
+        // asking for collective metadata writes, (since they must be collective
+        // anyway, otherwise MPI ranks might have differing view of how the same
+        // HDF5 is internally structured). But here we only read.
+        //
+        // fapl.add(MPIOCollectiveMetadataWrite{});
+
+        // Now we can create the file as usual.
+        File file(file_name, File::ReadOnly, fapl);
+
+        // Note that this operation isn't collective. Each MPI rank is requesting to
+        // open a different group.
+        std::stringstream dataset_name;
+        dataset_name << "g" << mpi_rank << "/x";
+
+        // Again this isn't collective since, different MPI ranks are reading
+        // from different datasets.
+        auto x = file.getDataSet(dataset_name.str()).read<std::vector<double>>();
+
+        // Let's create some more obviously independent accesses, and explicitely
+        // open the intermediate group:
+        if (mpi_rank % 2 == 0) {
+            std::stringstream other_group_name;
+            other_group_name << "g" << (mpi_rank + 1) % mpi_size;
+            auto other_group = file.getGroup(other_group_name.str());
+
+            auto y = other_group.getDataSet("x").read<std::vector<double>>();
+        }
+    } catch (Exception& err) {
+        // catch and print any HDF5 error
+        std::cerr << err.what() << std::endl;
+        MPI_Abort(MPI_COMM_WORLD, 1);
+    }
+
+    MPI_Finalize();
+    return 0;  // successfully terminated
+}
diff --git a/packages/HighFive/src/examples/read_write_dataset_string.cpp b/packages/HighFive/src/examples/read_write_dataset_string.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2a4c3c491105c8585ec3838fe609279e71b52ba9
--- /dev/null
+++ b/packages/HighFive/src/examples/read_write_dataset_string.cpp
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+const std::string file_name("create_dataset_string_example.h5");
+const std::string dataset_name("story");
+
+// create a dataset from a vector of string
+// read it back and print it
+int main(void) {
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    std::vector<std::string> string_list;
+    string_list.push_back("Hello World !");
+    string_list.push_back(
+        "This string list is mapped to a dataset of "
+        "variable length string");
+    string_list.push_back("Encoding is done in UTF-8 - 你好 - Здравствуйте!");
+    string_list.push_back("May the force be with you");
+    string_list.push_back("Enjoy !");
+
+    // create a dataset ready to contains strings of the size of the vector
+    // string_list
+    DataSet dataset = file.createDataSet<std::string>(dataset_name, DataSpace::From(string_list));
+
+    // let's write our vector of  string
+    dataset.write(string_list);
+
+    // now we read it back
+    std::vector<std::string> result_string_list;
+    dataset.read(result_string_list);
+
+    for (size_t i = 0; i < result_string_list.size(); ++i) {
+        std::cout << ":" << i << " " << result_string_list[i] << "\n";
+    }
+
+    return 0;  // successfully terminated
+}
diff --git a/packages/HighFive/src/examples/read_write_fixedlen_string.cpp b/packages/HighFive/src/examples/read_write_fixedlen_string.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..60589637ea97e6c526ab8b8c6754026c7191331d
--- /dev/null
+++ b/packages/HighFive/src/examples/read_write_fixedlen_string.cpp
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c), 2020, Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+// This examples shows how compile time constant strings work.
+//
+// Note, that as of version 2.8.0., writing `std::string` as fixed-length
+// strings there's a simpler API.
+int main() {
+    // Create a new file using the default property lists.
+    File file("create_dataset_string_example.h5", File::Truncate);
+    const char strings_fixed[][16] = {"abcabcabcabcabc", "123123123123123"};
+
+    // create a dataset ready to contains strings of the size of the vector
+    file.createDataSet<char[10]>("ds1", DataSpace(2)).write(strings_fixed);
+
+    // Without specific type info this will create an int8 dataset
+    file.createDataSet("ds2", strings_fixed);
+
+    // Now test the new interface type
+    FixedLenStringArray<10> arr{"0000000", "1111111"};
+    auto ds = file.createDataSet("ds3", arr);
+
+    // Read back truncating to 4 chars
+    FixedLenStringArray<4> array_back;
+    ds.read(array_back);
+    std::cout << "First item is '" << array_back[0] << "'\n"
+              << "Second item is '" << array_back[1] << "'\n";
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/read_write_raw_ptr.cpp b/packages/HighFive/src/examples/read_write_raw_ptr.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b6cd9eda59e74e842a988f777ca5c95ffb26b702
--- /dev/null
+++ b/packages/HighFive/src/examples/read_write_raw_ptr.cpp
@@ -0,0 +1,73 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *  Copyright (c), 2022, Blue Brain Project
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+const std::string file_name("read_write_raw_ptr.h5");
+const std::string dataset_name("array");
+
+// This create a "multi-dimensional" array. Meaning a pointer with
+// dimensions. The `std::vector<double>` is mearly a convenient way
+// of allocating and releasing memory.
+//
+// Conceptionually this is only a raw pointer with dimensions. The
+// data is store in row-major, aka C-style, without stride, offset
+// or padding.
+std::vector<double> make_array(const std::vector<size_t>& dims) {
+    auto n_elements = dims[0] * dims[1];
+    std::vector<double> nd_array(n_elements, 0.0);
+
+    for (size_t i = 0; i < dims[0]; ++i) {
+        for (size_t j = 0; j < dims[1]; ++j) {
+            nd_array[j + i * dims[1]] = double(j) + 100.0 * double(i);
+        }
+    }
+
+    return nd_array;
+}
+
+int main(void) {
+    using namespace HighFive;
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    // Let's write to file.
+    {
+        std::vector<size_t> dims{3, 5};
+        auto nd_array = make_array(dims);
+
+        // First, create a dataset with the correct dimensions.
+        auto dataset = file.createDataSet<double>(dataset_name, DataSpace(dims));
+
+        // Then write, using the raw pointer.
+        dataset.write_raw(nd_array.data());
+    }
+
+    // Let's read from file.
+    {
+        auto dataset = file.getDataSet(dataset_name);
+
+        // First read the dimensions.
+        auto dims = dataset.getDimensions();
+
+        // Then allocate memory.
+        auto n_elements = dims[0] * dims[1];
+        auto nd_array = std::vector<double>(n_elements);
+
+        // Finally, read into the memory by passing a raw pointer to the library.
+        dataset.read<double>(nd_array.data());
+    }
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/read_write_single_scalar.cpp b/packages/HighFive/src/examples/read_write_single_scalar.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4b4c6887c96e47bffba798dff3cebfd2ce3cbccd
--- /dev/null
+++ b/packages/HighFive/src/examples/read_write_single_scalar.cpp
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+const std::string file_name("read_write_scalar.h5");
+const std::string dataset_name("single_scalar");
+
+// Create a dataset name "single_scalar"
+// which contains only the perfect integer number "42"
+//
+int main(void) {
+    using namespace HighFive;
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    int perfect_number = 42;
+
+    // Create the dataset
+    DataSet dataset = file.createDataSet<double>(dataset_name, DataSpace::From(perfect_number));
+
+    // write it
+    dataset.write(perfect_number);
+
+    // flush everything
+    file.flush();
+
+    // let's read it back
+    int potentially_perfect_number;
+
+    dataset.read(potentially_perfect_number);
+
+    std::cout << "perfect number: " << potentially_perfect_number << std::endl;
+
+    return 0;  // successfully terminated
+}
diff --git a/packages/HighFive/src/examples/read_write_std_strings.cpp b/packages/HighFive/src/examples/read_write_std_strings.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7699e0c0c4177da12ccc6d0372c9e42f10691246
--- /dev/null
+++ b/packages/HighFive/src/examples/read_write_std_strings.cpp
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c), 2023, Blue Brain Project, EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/H5File.hpp>
+#include <highfive/H5DataSet.hpp>
+#include <highfive/H5DataSpace.hpp>
+
+using namespace HighFive;
+
+// This example shows how to write (containers of) `std::string`
+// to dataset either as fixed or variable length HDF5 strings.
+// The feature is available from 2.8.0 onwards.
+int main(void) {
+    auto file = File("read_write_std_string.h5", File::Truncate);
+
+    // A string of length 3 in a buffer of size 4 bytes. We'll use "length" for
+    // the semantic length of the string, i.e. excluding the '\0' character and
+    // "size" to refer to the length of the buffer in which the string is stored.
+    // For null-terminated strings, the `size == length + 1`.
+    std::string ascii_string = "foo";
+    auto scalar_dataspace = DataSpace(DataSpace::dataspace_scalar);
+
+    // Just write the string:
+    file.createDataSet("single_automatic", ascii_string);
+
+    // The above results in writing the string as an HDF5 variable length UTF8
+    // string. In HDF5 a variable length string doesn't specify the length of
+    // the string. Variable length strings are always null-terminated.
+    auto variable_stringtype = VariableLengthStringType();
+    file.createDataSet("single_variable", scalar_dataspace, variable_stringtype)
+        .write(ascii_string);
+
+    // HDF5 also has the concept of fixed length string. In fixed length strings
+    // the size of the string, in bytes, is part of the datatype. The HDF5 API
+    // for fixed and variable length strings is distinct. Hence, when writing
+    // string that need to be read by other programs, it can matter if the string
+    // is stored as fixed or variable length.
+    //
+    // Important: The HDF5 string size is the size of the buffer required to
+    // store the string.
+    //
+    // We know that ascii_string requires 4 bytes to store, but want to store
+    // it in fixed length strings of length 8. Additionally, we promise that
+    // the strings are null-terminated. The character set defaults to ASCII.
+    auto fixed_stringtype = FixedLengthStringType(8, StringPadding::NullTerminated);
+    file.createDataSet("single_fixed_nullterm", scalar_dataspace, fixed_stringtype)
+        .write(ascii_string);
+
+    // When reading into an `std::string` it doesn't matter if the HDF5 datatype
+    // is fixed or variable length. HighFive will internally read into a buffer
+    // and then write to the final destination.
+    auto from_variable = file.getDataSet("single_variable").read<std::string>();
+    auto from_fixed = file.getDataSet("single_fixed_nullterm").read<std::string>();
+
+    // Note that because the fixed length string is null-terminated,
+    // `from_fixed.size() == ascii_string.size()` despite it being stored as a string of
+    // length 8.
+    std::cout << "from_variable = '" << from_variable << "' size = " << from_variable.size()
+              << "\n";
+    std::cout << "from_fixed = '" << from_fixed << "' size = " << from_fixed.size() << "\n";
+
+    // Fixed-length string don't have to be null-terminated. Their length could
+    // be defined simply by the known size of the buffer required to store the
+    // string. To deal with the situation where the string is shorter than the
+    // buffer, one defines a padding character. This must be either the null or
+    // space character. We'll show null-padded, space-padded works the same way.
+    auto fixed_nullpad = FixedLengthStringType(8, StringPadding::NullPadded);
+    file.createDataSet("single_fixed_nullpad", scalar_dataspace, fixed_nullpad).write(ascii_string);
+
+    // Note that because we only know that the string is padded with nulls, but we
+    // don't know if those nulls were part of the string to begin with. The full
+    // size of the buffer is read into the `std::string`. The length of the
+    // `std::string` is the size of the string type.
+    auto from_nullpad = file.getDataSet("single_fixed_nullpad").read<std::string>();
+    std::cout << "from_nullpad = '" << from_nullpad << "' size = " << from_nullpad.size() << "\n";
+
+    // Let's look at UTF8 strings. In HDF5 the size of a string is the size in
+    // bytes of the buffer required to store the string. A UTF8 symbol/character
+    // requires 1 to 4 bytes.
+    //
+    // The 'a' is 1 byte, the 'α' 2 bytes, therefore a total of 3 bytes (same
+    // as `utf8_string.size()`). Which including the null character fits into
+    // 8 bytes. However, 8 bytes would, in general not be enough to store 2
+    // UTF8 characters and the null character. Which would require 9 bytes.
+    std::string utf8_string = "aα";
+    auto fixed_utf8_type =
+        FixedLengthStringType(8, StringPadding::NullTerminated, CharacterSet::Utf8);
+    file.createDataSet("single_fixed_utf8", scalar_dataspace, fixed_utf8_type).write(utf8_string);
+
+    auto from_utf8 = file.getDataSet("single_fixed_utf8").read<std::string>();
+    std::cout << "from_utf8 = '" << from_utf8 << "' size = " << from_utf8.size() << "\n";
+
+    // Finally, containers of `std::string`s work analogously:
+    auto ascii_strings = std::vector<std::string>{"123", "456"};
+    file.createDataSet("multi_fixed_nullterm", DataSpace::From(ascii_strings), fixed_stringtype)
+        .write(ascii_strings);
+
+    auto ascii_strings_from_fixed =
+        file.getDataSet("multi_fixed_nullterm").read<std::vector<std::string>>();
+
+    // In order to see details of how each is stored in the HDF5 file use:
+    //     h5dump read_write_std_string.h5
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/read_write_vector_dataset.cpp b/packages/HighFive/src/examples/read_write_vector_dataset.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9718c1c2bc6d3cb915f6c504dc8d94aabfcccdf9
--- /dev/null
+++ b/packages/HighFive/src/examples/read_write_vector_dataset.cpp
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+const std::string file_name("dataset_integer.h5");
+const std::string dataset_name("dset");
+const size_t size_dataset = 20;
+
+// create a dataset 1D from a vector of string
+void write_dataset() {
+    // we create a new hdf5 file
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    std::vector<int> data(size_dataset);
+    for (size_t i = 0; i < data.size(); ++i) {
+        data[i] = int(i);
+    }
+
+    // let's create a dataset of native integer with the size of the vector
+    // 'data'
+    DataSet dataset = file.createDataSet<int>(dataset_name, DataSpace::From(data));
+
+    // let's write our vector of int to the HDF5 dataset
+    dataset.write(data);
+}
+
+// read our data back
+void read_dataset() {
+    // we open the existing hdf5 file we created before
+    File file(file_name, File::ReadOnly);
+
+    std::vector<int> read_data;
+
+    // we get the dataset
+    DataSet dataset = file.getDataSet(dataset_name);
+
+    // we convert the hdf5 dataset to a single dimension vector
+    dataset.read(read_data);
+
+    for (size_t i = 0; i < read_data.size(); ++i) {
+        std::cout << read_data[i] << " ";
+    }
+    std::cout << "\n";
+}
+
+int main(void) {
+    write_dataset();
+    read_dataset();
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/read_write_vector_dataset_references.cpp b/packages/HighFive/src/examples/read_write_vector_dataset_references.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ca08467682184a6c5244dc63707f097012b05c8b
--- /dev/null
+++ b/packages/HighFive/src/examples/read_write_vector_dataset_references.cpp
@@ -0,0 +1,73 @@
+/*
+ *   Copyright (c), 2021 Blue Brain Project - EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+// create a dataset 1D from a vector of int
+void write_dataset() {
+    // we create a new hdf5 file
+    HighFive::File file("dataset_integer.h5", HighFive::File::Overwrite);
+
+    // we create a new group
+    HighFive::Group group = file.createGroup("a_group");
+
+    std::vector<int> data(20);
+    std::iota(data.begin(), data.end(), 0);
+
+    // let's create a dataset of native integer with the size of the vector
+    // 'data' inside the group
+    auto dataset = group.createDataSet("source_dataset", data);
+
+    // create a reference to the dataset containing the integers
+    HighFive::Reference ref = HighFive::Reference(group, dataset);
+    std::vector<HighFive::Reference> ref_container{ref};
+
+    // in similar fashion, we store as dataset the vector of reference that we want
+    HighFive::DataSet ref_set = group.createDataSet("reference_dataset", ref_container);
+}
+
+// read our data back
+void read_dataset() {
+    // we open the existing hdf5 file we created before
+    HighFive::File file("dataset_integer.h5", HighFive::File::ReadOnly);
+
+    // we load the group
+    HighFive::Group my_group = file.getGroup("a_group");
+
+    // we load the dataset that contains the reference
+    HighFive::DataSet ref_dataset = my_group.getDataSet("reference_dataset");
+
+    // we load the vector of references
+    std::vector<HighFive::Reference> expected_references;
+    ref_dataset.read(expected_references);
+
+    // we use the stored reference and dereference it to gain access in the integers'
+    // dataset
+    HighFive::DataSet expected_dataset = expected_references[0].dereference<HighFive::DataSet>(
+        my_group);
+
+    // as usual, we load the vector with numbers from the extracted dataset
+    std::vector<int> read_data;
+    expected_dataset.read(read_data);
+
+    // and voila, the payload we excepted
+    for (int i: read_data) {
+        std::cout << i << " ";
+    }
+}
+
+int main() {
+    write_dataset();
+    read_dataset();
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/readme_snippet.cpp b/packages/HighFive/src/examples/readme_snippet.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..160dabce569c7d30dd75d7aee74686f1f339d1c1
--- /dev/null
+++ b/packages/HighFive/src/examples/readme_snippet.cpp
@@ -0,0 +1,32 @@
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+int main() {
+    std::string filename = "/tmp/new_file.h5";
+
+    {
+        // We create an empty HDF55 file, by truncating an existing
+        // file if required:
+        File file(filename, File::Truncate);
+
+        std::vector<int> data(50, 1);
+        file.createDataSet("grp/data", data);
+    }
+
+    {
+        // We open the file as read-only:
+        File file(filename, File::ReadOnly);
+        auto dataset = file.getDataSet("grp/data");
+
+        // Read back, with allocating:
+        auto data = dataset.read<std::vector<int>>();
+
+        // Because `pre_allocated` has the correct size, this will
+        // not cause `pre_allocated` to be reallocated:
+        auto pre_allocated = std::vector<int>(50);
+        dataset.read(pre_allocated);
+    }
+
+    return 0;
+}
diff --git a/packages/HighFive/src/examples/renaming_objects.cpp b/packages/HighFive/src/examples/renaming_objects.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f0759e52a2374c9e72f63289e0fe2e0cded9c1bc
--- /dev/null
+++ b/packages/HighFive/src/examples/renaming_objects.cpp
@@ -0,0 +1,68 @@
+#include <highfive/highfive.hpp>
+
+using namespace HighFive;
+
+int main(void) {
+    /* We are going to create group in root directory then add
+     * dataset to this group and attach attribute to the dataset.
+     * Then we are trying to get path to the root, group dataset
+     * and the name of the dataset.
+     * Secondly we will move dataset with attached attribute to
+     * some destination path.
+     * To check if dataset object is still valid, we create a
+     * second attribute */
+
+    // Create a new file using the default property lists.
+    File file("names.h5", File::ReadWrite | File::Create | File::Truncate);
+
+    // Create a group
+    Group group = file.createGroup("group");
+
+    // Create a dummy dataset of one single integer
+    DataSet dataset = group.createDataSet("data", DataSpace(1), create_datatype<int>());
+    dataset.write(100);
+
+    // Let's also add a attribute to this dataset
+    std::string string_list("very important DataSet!");
+    Attribute attribute = dataset.createAttribute<std::string>("attribute",
+                                                               DataSpace::From(string_list));
+    attribute.write(string_list);
+
+    // Get path and names
+    std::cout << "root path: " << file.getPath() << std::endl;
+    std::cout << "group path: " << group.getPath() << std::endl;
+    std::cout << "dataset path: " << dataset.getPath() << std::endl;
+    std::cout << "attribute name: " << attribute.getName() << std::endl;
+    std::cout << std::endl;
+
+    // Move dataset with its attribute to another destination path
+    file.rename("/group/data", "/NewGroup/SubGroup/movedData");
+
+    // As you can see to reach destination path new groups were created as well
+    std::cout << "dataset new path: " << dataset.getPath() << std::endl;
+
+    // We can still use moved dataset
+    // Let's create new attribute
+    Attribute attributeNew = dataset.createAttribute<std::string>("attributeNew",
+                                                                  DataSpace::From(string_list));
+    attribute.write(string_list);
+    std::cout << "new attribute name: " << attributeNew.getName() << std::endl;
+    std::cout << std::endl;
+
+    // Move the folder with its content to other place
+    file.rename("/NewGroup/SubGroup", "/FinalDestination");
+
+    // Here is the important moment. The old 'dataset' variable tells us
+    // that dataset directory wasn't changed
+    std::cout << "DataSet's path wasn't changed?" << std::endl;
+    std::cout << "dataset path: " << dataset.getPath() << std::endl;
+    std::cout << std::endl;
+
+    // But actually it was moved we just need to update variable
+    dataset = file.getDataSet("/FinalDestination/movedData");
+    std::cout << "Actually it was moved we just need to update it!" << std::endl;
+    std::cout << "dataset path: " << dataset.getPath() << std::endl;
+    std::cout << std::endl;
+
+    file.flush();
+}
diff --git a/packages/HighFive/src/examples/select_by_id_dataset_cpp11.cpp b/packages/HighFive/src/examples/select_by_id_dataset_cpp11.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..973c57435fc26ff8a2f8e1c6232ed2c837691cb6
--- /dev/null
+++ b/packages/HighFive/src/examples/select_by_id_dataset_cpp11.cpp
@@ -0,0 +1,67 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <functional>
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+const std::string file_name("select_partial_string.h5");
+const std::string dataset_name("message");
+
+// Create a dataset name "dset" of double 4x6
+//
+int main(void) {
+    using namespace HighFive;
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::Truncate);
+
+    {
+        // We have a set of string
+        std::vector<std::string> values = {
+            "Cat",
+            "Dog",
+            "Hello",
+            "Tree",
+            "World",
+            "Plane",
+            ", ",
+            "你好",
+            "Tea",
+            "Moon",
+            "صباح جميل",
+            "Spaceship",
+        };
+
+        // let's create a dataset
+        DataSet dataset = file.createDataSet<std::string>(dataset_name, DataSpace::From(values));
+
+        // and write them
+        dataset.write(values);
+    }
+
+    {
+        DataSet dataset = file.getDataSet(dataset_name);
+
+        // now let's read back by cherry pick our interesting string
+        std::vector<std::string> result;
+        // we select only element N° 2 and 5
+        dataset.select(ElementSet({2, 4, 6, 7, 6, 10})).read(result);
+
+        // and display it
+        for (auto i: result) {
+            std::cout << i << " ";
+        }
+        std::cout << "\n";
+    }
+
+    return 0;  // successfully terminated
+}
diff --git a/packages/HighFive/src/examples/select_partial_dataset_cpp11.cpp b/packages/HighFive/src/examples/select_partial_dataset_cpp11.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1e480c1603f460891d6b64b77f9e4d39500cab1c
--- /dev/null
+++ b/packages/HighFive/src/examples/select_partial_dataset_cpp11.cpp
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <functional>
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <highfive/highfive.hpp>
+
+const std::string file_name("select_partial_example.h5");
+const std::string dataset_name("dset");
+
+// Create a dataset name "dset" of double 4x6
+//
+int main(void) {
+    using namespace HighFive;
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    // we have some example values in a 2D vector 2x5
+    std::vector<std::vector<double>> values = {{1.0, 2.0, 4.0, 8.0, 16.0},
+                                               {32.0, 64.0, 128.0, 256.0, 512.0}};
+
+    // let's create a dataset of this size
+    DataSet dataset = file.createDataSet<double>(dataset_name, DataSpace::From(values));
+    // and write them
+    dataset.write(values);
+
+    // now we read back 2x2 values after an offset of 0x2
+    std::vector<std::vector<double>> result;
+    dataset.select({0, 2}, {2, 2}).read(result);
+
+    // we print out 4 values
+    for (auto i: result) {
+        for (auto j: i) {
+            std::cout << " " << j;
+        }
+        std::cout << "\n";
+    }
+
+    return 0;  // successfully terminated
+}
diff --git a/packages/HighFive/tests/test_dependent_library/CMakeLists.txt b/packages/HighFive/tests/test_dependent_library/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..570dba224e65112c7af578f1337647b98daedc55
--- /dev/null
+++ b/packages/HighFive/tests/test_dependent_library/CMakeLists.txt
@@ -0,0 +1,42 @@
+# This is a sample library to test integration via add_subdirectory and CMakeConfig
+cmake_minimum_required(VERSION 3.1)
+
+project(test_project VERSION 0.1)
+
+if(NOT DEFINED CMAKE_CXX_STANDARD)
+  set(CMAKE_CXX11_STANDARD_COMPILE_OPTION "-std=c++11")  # For come compilers under cmake 3.1
+  set(CMAKE_CXX_STANDARD 11)
+  set(CMAKE_CXX_STANDARD_REQUIRED ON)
+  set(CMAKE_CXX_EXTENSIONS OFF)
+endif()
+
+option(USE_BUNDLED_HIGHFIVE "Use highfive from deps folder. Otherwise must be installed" ON)
+
+if(USE_BUNDLED_HIGHFIVE)
+    add_subdirectory("deps/HighFive" EXCLUDE_FROM_ALL)
+else()
+    find_package(HighFive REQUIRED QUIET)
+endif()
+
+add_library(simpleton SHARED "src/simpleton.cpp" "src/otherton.cpp")
+target_include_directories(simpleton
+    PUBLIC
+    $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+    $<INSTALL_INTERFACE:include>)
+target_link_libraries(simpleton PUBLIC HighFive)
+set_property(TARGET simpleton PROPERTY POSITION_INDEPENDENT_CODE ON)
+
+add_library(otherton STATIC "src/simpleton.cpp" "src/otherton.cpp")
+target_include_directories(otherton
+    PUBLIC
+    $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+    $<INSTALL_INTERFACE:include>)
+target_link_libraries(otherton PUBLIC HighFive)
+set_property(TARGET otherton PROPERTY POSITION_INDEPENDENT_CODE OFF)
+
+install(
+    TARGETS simpleton otherton
+    EXPORT simpletonTarget
+    DESTINATION lib
+    ARCHIVE DESTINATION lib)
+install(EXPORT simpletonTarget DESTINATION lib)
diff --git a/packages/HighFive/tests/test_dependent_library/deps/.gitignore b/packages/HighFive/tests/test_dependent_library/deps/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..5e7d2734cfc60289debf74293817c0a8f572ff32
--- /dev/null
+++ b/packages/HighFive/tests/test_dependent_library/deps/.gitignore
@@ -0,0 +1,4 @@
+# Ignore everything in this directory
+*
+# Except this file
+!.gitignore
diff --git a/packages/HighFive/tests/test_dependent_library/include/simpleton.hpp b/packages/HighFive/tests/test_dependent_library/include/simpleton.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b98a09fda04045dbcb72444feff720cbb9db84c4
--- /dev/null
+++ b/packages/HighFive/tests/test_dependent_library/include/simpleton.hpp
@@ -0,0 +1,14 @@
+#ifndef H5_TEST_SIMPLETON_HPP
+#define H5_TEST_SIMPLETON_HPP
+
+// Include all headers here to catch any missing `inline` statements, since
+// they will be included by two different compilation units.
+#include <highfive/highfive.hpp>
+
+// Boost should always be found in this setup
+#include <boost/numeric/ublas/matrix.hpp>
+
+void function(const HighFive::Object& obj);
+void other_function(const boost::numeric::ublas::matrix<double>& m);
+
+#endif
diff --git a/packages/HighFive/tests/test_dependent_library/src/otherton.cpp b/packages/HighFive/tests/test_dependent_library/src/otherton.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3e10a3630ddbec8d5883a67541d0ec4ea89fc15d
--- /dev/null
+++ b/packages/HighFive/tests/test_dependent_library/src/otherton.cpp
@@ -0,0 +1,5 @@
+#include "simpleton.hpp"
+
+void other_function(const boost::numeric::ublas::matrix<double>& m) {
+    m(0, 0) * 0.0;
+}
diff --git a/packages/HighFive/tests/test_dependent_library/src/simpleton.cpp b/packages/HighFive/tests/test_dependent_library/src/simpleton.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..12cef5bfc49022af397669d06c40048c223d17b8
--- /dev/null
+++ b/packages/HighFive/tests/test_dependent_library/src/simpleton.cpp
@@ -0,0 +1,9 @@
+#include <stdexcept>
+
+#include "simpleton.hpp"
+
+void function(const HighFive::Object& obj) {
+    if (!obj.isValid()) {
+        throw std::exception();
+    }
+}
diff --git a/packages/HighFive/tests/test_project/CMakeLists.txt b/packages/HighFive/tests/test_project/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1a8ef098ae0f68cd6317eb0f526913d1cb5c2b0b
--- /dev/null
+++ b/packages/HighFive/tests/test_project/CMakeLists.txt
@@ -0,0 +1,25 @@
+# This is a sample project to test integration via add_subdirectory and CMakeConfig
+cmake_minimum_required(VERSION 3.1)
+
+project(test_project VERSION 0.1)
+
+if(NOT DEFINED CMAKE_CXX_STANDARD)
+  set(CMAKE_CXX11_STANDARD_COMPILE_OPTION "-std=c++11")  # For come compilers under cmake 3.1
+  set(CMAKE_CXX_STANDARD 11)
+  set(CMAKE_CXX_STANDARD_REQUIRED ON)
+  set(CMAKE_CXX_EXTENSIONS OFF)
+endif()
+
+option(USE_BUNDLED_HIGHFIVE "Use highfive from deps folder. Otherwise must be installed" ON)
+
+if(USE_BUNDLED_HIGHFIVE)
+    add_subdirectory("deps/HighFive" EXCLUDE_FROM_ALL)
+else()
+    find_package(HighFive REQUIRED)
+endif()
+
+add_executable(read_write_bin "read_write_vector_dataset.cpp")
+target_link_libraries(read_write_bin HighFive)
+
+enable_testing()
+add_test(NAME test_project COMMAND ${CMAKE_CURRENT_BINARY_DIR}/read_write_bin)
diff --git a/packages/HighFive/tests/test_project/deps/.gitignore b/packages/HighFive/tests/test_project/deps/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..5e7d2734cfc60289debf74293817c0a8f572ff32
--- /dev/null
+++ b/packages/HighFive/tests/test_project/deps/.gitignore
@@ -0,0 +1,4 @@
+# Ignore everything in this directory
+*
+# Except this file
+!.gitignore
diff --git a/packages/HighFive/tests/test_project/read_write_vector_dataset.cpp b/packages/HighFive/tests/test_project/read_write_vector_dataset.cpp
new file mode 120000
index 0000000000000000000000000000000000000000..84b5175b888472b478660178c7ec96b5edac3cf9
--- /dev/null
+++ b/packages/HighFive/tests/test_project/read_write_vector_dataset.cpp
@@ -0,0 +1 @@
+../../src/examples/read_write_vector_dataset.cpp
\ No newline at end of file
diff --git a/packages/HighFive/tests/test_project_integration.sh b/packages/HighFive/tests/test_project_integration.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ae88695a0ca1fed8aae2e36276320bf876cb5b4f
--- /dev/null
+++ b/packages/HighFive/tests/test_project_integration.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+set -xeo pipefail
+cd "$( dirname "${BASH_SOURCE[0]}")"  # cd here
+
+BUILDDIR="${PWD}/build"
+ROOT="${PWD}/.."
+TESTDIR="${PWD}"
+
+test_install() {
+    local project="${1}"
+    local builddir="${BUILDDIR}/${project}/${2}"
+    shift
+    shift
+    ln -sf ../../.. "${TESTDIR}/${project}/deps/HighFive"
+    rm -rf "${builddir}"
+    mkdir -p "${builddir}"
+    pushd "${builddir}"
+    cmake "${TESTDIR}/${project}" "$@"
+    cmake --build . --verbose
+    ctest
+    popd
+    rm "${TESTDIR}/${project}/deps/HighFive"
+}
+
+rm -rf "${BUILDDIR}/highfive"
+mkdir -p "${BUILDDIR}/highfive"
+pushd "${BUILDDIR}/highfive"
+cmake "${ROOT}" \
+    -DHIGHFIVE_EXAMPLES=OFF \
+    -DHIGHFIVE_UNIT_TESTS=OFF \
+    -DCMAKE_INSTALL_PREFIX="${PWD}/install"
+cmake --build . --target install
+popd
+
+for project in test_project test_dependent_library; do
+    # Case 1. Base case: include subdirectory
+    test_install "${project}" subdir
+
+    # Case 2. We use an install dir and all deps configuration
+    # Install highfive (no tests required)
+    test_install "${project}" reuse_deps \
+        -DUSE_BUNDLED_HIGHFIVE=NO \
+        -DHIGHFIVE_USE_INSTALL_DEPS=YES \
+        -DCMAKE_PREFIX_PATH="${BUILDDIR}/highfive/install"
+            #
+    # Case 3. We redetect-dependencies
+    test_install "${project}" install_new_deps \
+        -DUSE_BUNDLED_HIGHFIVE=NO \
+        -DHIGHFIVE_USE_INSTALL_DEPS=NO \
+        -DCMAKE_PREFIX_PATH="${BUILDDIR}/highfive/install"
+done
diff --git a/packages/HighFive/tests/unit/CMakeLists.txt b/packages/HighFive/tests/unit/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3644d117ce11f7464e8f67fd0d360ff871b115fb
--- /dev/null
+++ b/packages/HighFive/tests/unit/CMakeLists.txt
@@ -0,0 +1,54 @@
+include(CTest)
+include(Catch)
+include(HighFiveWarnings)
+
+if(MSVC)
+  add_definitions(/bigobj)
+endif()
+
+## Base tests
+foreach(test_name tests_high_five_base tests_high_five_multi_dims tests_high_five_easy test_all_types)
+  add_executable(${test_name} "${test_name}.cpp")
+  target_link_libraries(${test_name} HighFive HighFiveWarnings Catch2::Catch2WithMain)
+  catch_discover_tests(${test_name})
+endforeach()
+
+if(HIGHFIVE_PARALLEL_HDF5)
+  include(TestHelpers)
+  set(tests_parallel_src "tests_high_five_parallel.cpp")
+
+  ## parallel MPI tests
+  add_executable(tests_parallel_bin ${tests_parallel_src})
+  target_link_libraries(tests_parallel_bin HighFive HighFiveWarnings Catch2::Catch2)
+
+  # We need to patch in a call to `mpirun` or equivalent when using
+  # parallel tests. Somehow, this is not foreseen in Catch2, modify the
+  # test detection script and fail if the execution method needs updating.
+  set(original_catch_script "${_CATCH_DISCOVER_TESTS_SCRIPT}")
+  set(patched_catch_script "${CMAKE_CURRENT_BINARY_DIR}/patched_catch_test_discovery.cmake")
+  file(READ "${original_catch_script}" original_catch_script_contents)
+  string(REGEX REPLACE
+         "(add_command\\(add_test.*TEST_EXECUTOR})"
+         "\\1 ${TEST_MPI_EXEC_PREFIX_DEFAULT} -n 2"
+         modified_catch_script_contents
+         "${original_catch_script_contents}")
+  if(original_catch_script_contents STREQUAL modified_catch_script_contents)
+    message(FATAL_ERROR "Failed to modify Catch2 test execution")
+  endif()
+  file(WRITE "${patched_catch_script}" "${modified_catch_script_contents}")
+  set(_CATCH_DISCOVER_TESTS_SCRIPT "${patched_catch_script}")
+  catch_discover_tests(tests_parallel_bin)
+  set(_CATCH_DISCOVER_TESTS_SCRIPT "${original_catch_script}")
+endif()
+
+option(HIGHFIVE_TEST_SINGLE_INCLUDES "Enable testing single includes" FALSE)
+
+if(HIGHFIVE_TEST_SINGLE_INCLUDES)
+    file(GLOB public_headers LIST_DIRECTORIES false RELATIVE ${PROJECT_SOURCE_DIR}/include ${PROJECT_SOURCE_DIR}/include/highfive/*.hpp)
+    foreach(PUBLIC_HEADER ${public_headers})
+        get_filename_component(CLASS_NAME ${PUBLIC_HEADER} NAME_WE)
+        configure_file(tests_import_public_headers.cpp "tests_${CLASS_NAME}.cpp" @ONLY)
+        add_executable("tests_include_${CLASS_NAME}" "${CMAKE_CURRENT_BINARY_DIR}/tests_${CLASS_NAME}.cpp")
+        target_link_libraries("tests_include_${CLASS_NAME}" HighFive HighFiveWarnings)
+    endforeach()
+endif()
diff --git a/packages/HighFive/tests/unit/test_all_types.cpp b/packages/HighFive/tests/unit/test_all_types.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d74579af6b23176cd99bd9436d690e623fdc5f9b
--- /dev/null
+++ b/packages/HighFive/tests/unit/test_all_types.cpp
@@ -0,0 +1,205 @@
+/*
+ *  Copyright (c), 2017-2019, Blue Brain Project - EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <string>
+
+#include <catch2/catch_template_test_macros.hpp>
+
+#include <highfive/highfive.hpp>
+#include "tests_high_five.hpp"
+
+using namespace HighFive;
+
+TEMPLATE_TEST_CASE("Scalar in DataSet", "[Types]", bool, std::string) {
+    const std::string FILE_NAME("rw_dataset_" + typeNameHelper<TestType>() + ".h5");
+    const std::string DATASET_NAME("dset");
+    TestType t1{};
+
+    {
+        // Create a new file using the default property lists.
+        File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+        // Create the dataset
+        DataSet dataset =
+            file.createDataSet(DATASET_NAME,
+                               DataSpace(1),
+                               create_datatype<typename details::inspector<TestType>::base_type>());
+
+        // Write into the initial part of the dataset
+        dataset.write(t1);
+    }
+
+    // read it back
+    {
+        File file(FILE_NAME, File::ReadOnly);
+
+        TestType value;
+        DataSet dataset = file.getDataSet("/" + DATASET_NAME);
+        dataset.read(value);
+        CHECK(t1 == value);
+    }
+}
+
+TEMPLATE_PRODUCT_TEST_CASE("Scalar in std::vector", "[Types]", std::vector, (bool, std::string)) {
+    const std::string FILE_NAME("rw_dataset_" + typeNameHelper<TestType>() + ".h5");
+    const std::string DATASET_NAME("dset");
+    TestType t1(5);
+
+    {
+        // Create a new file using the default property lists.
+        File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+        // Create the dataset
+        DataSet dataset = file.createDataSet(
+            DATASET_NAME, {5}, create_datatype<typename details::inspector<TestType>::base_type>());
+
+        // Write into the initial part of the dataset
+        dataset.write(t1);
+    }
+
+    // read it back
+    {
+        File file(FILE_NAME, File::ReadOnly);
+
+        TestType value;
+        DataSet dataset = file.getDataSet("/" + DATASET_NAME);
+        dataset.read(value);
+        CHECK(t1 == value);
+        CHECK(value.size() == 5);
+    }
+}
+
+TEMPLATE_PRODUCT_TEST_CASE("Scalar in std::vector<std::vector>",
+                           "[Types]",
+                           std::vector,
+                           (bool, std::string)) {
+    const std::string FILE_NAME("rw_dataset_vector_" + typeNameHelper<TestType>() + ".h5");
+    const std::string DATASET_NAME("dset");
+    std::vector<TestType> t1(5);
+    for (auto&& e: t1) {
+        e.resize(6);
+    }
+
+    {
+        // Create a new file using the default property lists.
+        File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+        // Create the dataset
+        DataSet dataset = file.createDataSet(
+            DATASET_NAME,
+            {5, 6},
+            create_datatype<typename details::inspector<std::vector<TestType>>::base_type>());
+
+        // Write into the initial part of the dataset
+        dataset.write(t1);
+    }
+
+    // read it back
+    {
+        File file(FILE_NAME, File::ReadOnly);
+
+        std::vector<TestType> value;
+        DataSet dataset = file.getDataSet("/" + DATASET_NAME);
+        dataset.read(value);
+        CHECK(t1 == value);
+        CHECK(value.size() == 5);
+    }
+}
+
+TEMPLATE_TEST_CASE("Scalar in std::array", "[Types]", bool, std::string) {
+    const std::string FILE_NAME("rw_dataset_array_" + typeNameHelper<TestType>() + ".h5");
+    const std::string DATASET_NAME("dset");
+    std::array<TestType, 5> t1{};
+
+    {
+        // Create a new file using the default property lists.
+        File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+        // Create the dataset
+        DataSet dataset = file.createDataSet(
+            DATASET_NAME,
+            {5},
+            create_datatype<typename details::inspector<std::array<TestType, 5>>::base_type>());
+
+        // Write into the initial part of the dataset
+        dataset.write(t1);
+    }
+
+    // read it back
+    {
+        File file(FILE_NAME, File::ReadOnly);
+
+        std::array<TestType, 5> value;
+        DataSet dataset = file.getDataSet("/" + DATASET_NAME);
+        dataset.read(value);
+        CHECK(t1 == value);
+        CHECK(value.size() == 5);
+    }
+}
+
+TEMPLATE_TEST_CASE("Scalar in std::vector<std::array>", "[Types]", bool, std::string) {
+    const std::string FILE_NAME("rw_dataset_vector_array_" + typeNameHelper<TestType>() + ".h5");
+    const std::string DATASET_NAME("dset");
+    std::vector<std::array<TestType, 6>> t1(5);
+
+    {
+        // Create a new file using the default property lists.
+        File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+        // Create the dataset
+        DataSet dataset = file.createDataSet(
+            DATASET_NAME,
+            {5, 6},
+            create_datatype<
+                typename details::inspector<std::vector<std::array<TestType, 5>>>::base_type>());
+
+        // Write into the initial part of the dataset
+        dataset.write(t1);
+    }
+
+    // read it back
+    {
+        File file(FILE_NAME, File::ReadOnly);
+
+        std::vector<std::array<TestType, 6>> value;
+        DataSet dataset = file.getDataSet("/" + DATASET_NAME);
+        dataset.read(value);
+        CHECK(t1 == value);
+        CHECK(value.size() == 5);
+    }
+}
+
+#if HIGHFIVE_CXX_STD >= 17
+TEMPLATE_PRODUCT_TEST_CASE("Scalar in std::vector<std::byte>", "[Types]", std::vector, std::byte) {
+    const std::string FILE_NAME("rw_dataset_vector_" + typeNameHelper<TestType>() + ".h5");
+    const std::string DATASET_NAME("dset");
+    TestType t1(5, std::byte(0xCD));
+
+    {
+        // Create a new file using the default property lists.
+        File file(FILE_NAME, File::ReadWrite | File::Create | File::Truncate);
+
+        // Create the dataset
+        DataSet dataset = file.createDataSet(DATASET_NAME, {5}, create_datatype<std::byte>());
+
+        // Write into the initial part of the dataset
+        dataset.write(t1);
+    }
+
+    // read it back
+    {
+        File file(FILE_NAME, File::ReadOnly);
+
+        TestType value(5, std::byte(0xCD));
+        DataSet dataset = file.getDataSet("/" + DATASET_NAME);
+        dataset.read(value);
+        CHECK(t1 == value);
+        CHECK(value.size() == 5);
+    }
+}
+#endif
diff --git a/packages/HighFive/tests/unit/tests_high_five.hpp b/packages/HighFive/tests/unit/tests_high_five.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0ebd58c44890e65ddbf4ee149d6c13c6662a15e3
--- /dev/null
+++ b/packages/HighFive/tests/unit/tests_high_five.hpp
@@ -0,0 +1,205 @@
+/*
+ *  Copyright (c), 2017-2019, Blue Brain Project - EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#include <complex>
+#include <random>
+#include <string>
+#include <vector>
+#include <tuple>
+#include <sstream>
+#include <functional>
+#include <iomanip>
+
+// We don't need windows specific functionality. However, to better detect defects caused by macros,
+// we include this header.
+// The list of identifiers is taken from `Boost::Predef`.
+#if defined(_WIN32) || defined(_WIN64) || defined(__WIN32__) || defined(__TOS_WIN__) || \
+    defined(__WINDOWS__)
+#include <Windows.h>
+#endif
+
+using ldcomplex = std::complex<long double>;
+using dcomplex = std::complex<double>;
+using fcomplex = std::complex<float>;
+
+using base_test_types = std::tuple<int,
+                                   unsigned int,
+                                   long,
+                                   unsigned long,
+                                   unsigned char,
+                                   char,
+                                   float,
+                                   double,
+                                   long long,
+                                   unsigned long long,
+                                   ldcomplex,
+                                   dcomplex,
+                                   fcomplex>;
+
+#ifdef H5_USE_HALF_FLOAT
+#include <half.hpp>
+
+using float16_t = half_float::half;
+using numerical_test_types =
+    decltype(std::tuple_cat(std::declval<base_test_types>(), std::tuple<float16_t>()));
+#else
+using numerical_test_types = base_test_types;
+#endif
+
+using dataset_test_types =
+    std::tuple<int, unsigned int, long, unsigned long, unsigned char, char, float, double>;
+
+
+template <typename T, typename Func>
+void fillVec(std::vector<std::vector<T>>& v, std::vector<size_t> dims, const Func& f) {
+    v.resize(dims[0]);
+    dims.erase(dims.begin());
+    for (auto& subvec: v) {
+        fillVec(subvec, dims, f);
+    }
+}
+
+template <typename T, typename Func>
+void fillVec(std::vector<T>& v, std::vector<size_t> dims, const Func& f) {
+    v.resize(dims[0]);
+    std::generate(v.begin(), v.end(), f);
+}
+
+
+template <typename T>
+bool checkLength(const std::vector<T>& v, std::vector<size_t> dims) {
+    return (dims.size() == 1 && v.size() == dims[0]);
+}
+
+template <typename T>
+bool checkLength(const std::vector<std::vector<T>>& v, std::vector<size_t> dims) {
+    size_t dim0 = dims[0];
+    dims.erase(dims.begin());
+    if (v.size() != dim0) {
+        return false;
+    }
+    return checkLength(v[0], dims);
+}
+
+
+template <typename T, typename Func>
+void generate2D(T* table, size_t x, size_t y, Func& func) {
+    for (size_t i = 0; i < x; i++) {
+        for (size_t j = 0; j < y; j++) {
+            table[i][j] = func();
+        }
+    }
+}
+
+
+template <typename T>
+struct ContentGenerate {
+    ContentGenerate()
+        : _init(0)
+        , _inc(T(1) + T(1) / T(10)) {}
+
+    T operator()() {
+        T ret = _init;
+        _init = static_cast<T>(_init + _inc);
+        return ret;
+    }
+
+    T _init, _inc;
+};
+
+template <>
+ContentGenerate<ldcomplex>::ContentGenerate()
+    : _init(0, 0)
+    , _inc(ldcomplex(1, 1) + ldcomplex(1, 1) / ldcomplex(10)) {}
+
+template <>
+ContentGenerate<dcomplex>::ContentGenerate()
+    : _init(0, 0)
+    , _inc(dcomplex(1, 1) + dcomplex(1, 1) / dcomplex(10)) {}
+
+template <>
+ContentGenerate<fcomplex>::ContentGenerate()
+    : _init(0, 0)
+    , _inc(fcomplex(1, 1) + fcomplex(1, 1) / fcomplex(10)) {}
+
+template <>
+struct ContentGenerate<char> {
+    ContentGenerate()
+        : _init('a') {}
+
+    char operator()() {
+        char ret = _init;
+        if (++_init >= ('a' + 26))
+            _init = 'a';
+        return ret;
+    }
+
+    char _init;
+};
+
+template <>
+struct ContentGenerate<std::string> {
+    ContentGenerate() {}
+
+    std::string operator()() {
+        ContentGenerate<char> gen;
+        std::string random_string;
+        std::mt19937_64 rgen;
+        rgen.seed(88);
+        std::uniform_int_distribution<unsigned> int_dist(0, 1000);
+        const size_t size_string = int_dist(rgen);
+
+        random_string.resize(size_string);
+        std::generate(random_string.begin(), random_string.end(), gen);
+        return random_string;
+    }
+};
+
+
+template <typename T>
+inline std::string typeNameHelper() {
+    std::string name = typeid(T).name();
+    std::replace(std::begin(name), std::end(name), ' ', '_');
+    std::replace(std::begin(name), std::end(name), '<', '_');
+    std::replace(std::begin(name), std::end(name), '>', '_');
+    std::replace(std::begin(name), std::end(name), ':', '_');
+
+    if (name.size() > 64) {
+        std::stringstream hash;
+        hash << std::hex << std::hash<std::string>{}(name);
+
+        return hash.str();
+    } else {
+        return name;
+    }
+}
+
+
+template <typename ElemT, typename DataT>
+inline HighFive::DataSet readWriteDataset(const DataT& ndvec,
+                                          DataT& result,
+                                          const size_t ndims,
+                                          const std::string& struct_t) {
+    using namespace HighFive;
+    const std::string DATASET_NAME("dset");
+
+    std::ostringstream filename;
+    filename << "h5_rw_" << struct_t << "_" << ndims << "d_" << typeNameHelper<ElemT>()
+             << "_test.h5";
+
+    // Create a new file using the default property lists.
+    File file(filename.str(), File::Truncate);
+
+    // Create a dataset with type T points
+    DataSet dataset = file.createDataSet<ElemT>(DATASET_NAME, DataSpace::From(ndvec));
+    dataset.write(ndvec);
+
+    dataset.read(result);
+    return dataset;
+}
diff --git a/packages/HighFive/tests/unit/tests_high_five_base.cpp b/packages/HighFive/tests/unit/tests_high_five_base.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..899170d9328683b2832e592871ff7fac917ea58e
--- /dev/null
+++ b/packages/HighFive/tests/unit/tests_high_five_base.cpp
@@ -0,0 +1,3762 @@
+/*
+ *  Copyright (c), 2017-2019, Blue Brain Project - EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+#include <algorithm>
+#include <cstdio>
+#include <cstdlib>
+#include <ctime>
+#include <iostream>
+#include <map>
+#include <memory>
+#include <random>
+#include <string>
+#include <typeinfo>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/catch_template_test_macros.hpp>
+#include <catch2/matchers/catch_matchers_vector.hpp>
+
+#include <highfive/highfive.hpp>
+#include "tests_high_five.hpp"
+
+using namespace HighFive;
+using Catch::Matchers::Equals;
+
+TEST_CASE("Basic HighFive tests") {
+    const std::string file_name("h5tutr_dset.h5");
+    const std::string dataset_name("dset");
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    CHECK(file.getName() == file_name);
+
+    // Create the data space for the dataset.
+    std::vector<size_t> dims{4, 6};
+
+    DataSpace dataspace(dims);
+
+    // check if the dataset exist
+    CHECK(!file.exist(dataset_name + "_double"));
+
+    // Create a dataset with double precision floating points
+    DataSet dataset_double =
+        file.createDataSet(dataset_name + "_double", dataspace, AtomicType<double>());
+
+    CHECK(file.getObjectName(0) == dataset_name + "_double");
+
+    {
+        // check if it exist again
+        CHECK(file.exist(dataset_name + "_double"));
+
+        // and also try to recreate it to the sake of exception testing
+        SilenceHDF5 silencer;
+        CHECK_THROWS_AS(file.createDataSet(dataset_name + "_double",
+                                           dataspace,
+                                           AtomicType<double>()),
+                        DataSetException);
+    }
+
+    DataSet dataset_size_t = file.createDataSet<size_t>(dataset_name + "_size_t", dataspace);
+}
+
+TEST_CASE("Test silent HighFive") {
+    // Setting up a buffer for stderr so we can detect if the stack trace
+    // was disabled
+    fflush(stderr);
+    char buffer[1024];
+    memset(buffer, 0, sizeof(char) * 1024);
+    setvbuf(stderr, buffer, _IOLBF, 1023);
+
+    try {
+        SilenceHDF5 silence;
+        File file("nonexistent", File::ReadOnly);
+    } catch (const FileException&) {
+    }
+    CHECK(buffer[0] == '\0');
+
+    // restore the dyn allocated buffer
+    // or using stderr will segfault when buffer get out of scope
+    fflush(stderr);
+    setvbuf(stderr, NULL, _IONBF, 0);
+}
+
+TEST_CASE("Test open modes in HighFive") {
+    const std::string file_name("openmodes.h5");
+
+    std::remove(file_name.c_str());
+
+    SilenceHDF5 silencer;
+
+    // Attempt open file only ReadWrite should fail (wont create)
+    CHECK_THROWS_AS(File(file_name, File::ReadWrite), FileException);
+
+    // But with Create flag should be fine
+    { File file(file_name, File::ReadWrite | File::Create); }
+
+    // But if its there and exclusive is given, should fail
+    CHECK_THROWS_AS(File(file_name, File::ReadWrite | File::Excl), FileException);
+    // ReadWrite and Excl flags are fine together (posix)
+    std::remove(file_name.c_str());
+    { File file(file_name, File::ReadWrite | File::Excl); }
+    // All three are fine as well (as long as the file does not exist)
+    std::remove(file_name.c_str());
+    { File file(file_name, File::ReadWrite | File::Create | File::Excl); }
+
+    // Just a few combinations are incompatible, detected by hdf5lib
+    CHECK_THROWS_AS(File(file_name, File::Truncate | File::Excl), FileException);
+
+    std::remove(file_name.c_str());
+    CHECK_THROWS_AS(File(file_name, File::Truncate | File::Excl), FileException);
+
+    // But in most cases we will truncate and that should always work
+    { File file(file_name, File::Truncate); }
+    std::remove(file_name.c_str());
+    { File file(file_name, File::Truncate); }
+
+    // Last but not least, defaults should be ok
+    { File file(file_name); }     // ReadOnly
+    { File file(file_name, 0); }  // force empty-flags, does open without flags
+}
+
+TEST_CASE("Test file version bounds") {
+    const std::string file_name("h5_version_bounds.h5");
+
+    std::remove(file_name.c_str());
+
+    {
+        File file(file_name, File::Truncate);
+        auto bounds = file.getVersionBounds();
+        CHECK(bounds.first == H5F_LIBVER_EARLIEST);
+        CHECK(bounds.second == H5F_LIBVER_LATEST);
+    }
+
+    std::remove(file_name.c_str());
+
+    {
+        FileAccessProps fapl;
+        fapl.add(FileVersionBounds(H5F_LIBVER_LATEST, H5F_LIBVER_LATEST));
+        File file(file_name, File::Truncate, fapl);
+        auto bounds = file.getVersionBounds();
+        CHECK(bounds.first == H5F_LIBVER_LATEST);
+        CHECK(bounds.second == H5F_LIBVER_LATEST);
+    }
+}
+
+#if H5_VERSION_GE(1, 10, 1)
+TEST_CASE("Test file space strategy") {
+    const std::string file_name("h5_file_space_strategy.h5");
+    auto strategies = std::vector<H5F_fspace_strategy_t>{H5F_FSPACE_STRATEGY_FSM_AGGR,
+                                                         H5F_FSPACE_STRATEGY_AGGR,
+                                                         H5F_FSPACE_STRATEGY_PAGE,
+                                                         H5F_FSPACE_STRATEGY_NONE};
+
+    for (const auto& strategy: strategies) {
+        {
+            FileCreateProps create_props;
+            create_props.add(FileSpaceStrategy(strategy, true, 0));
+
+            File file(file_name, File::Truncate, create_props);
+        }
+
+        {
+            File file(file_name, File::ReadOnly);
+            CHECK(file.getFileSpaceStrategy() == strategy);
+        }
+    }
+}
+
+TEST_CASE("Test file space page size") {
+    const std::string file_name("h5_file_space_page_size.h5");
+    hsize_t page_size = 1024;
+    {
+        FileCreateProps create_props;
+        create_props.add(FileSpaceStrategy(H5F_FSPACE_STRATEGY_PAGE, true, 0));
+        create_props.add(FileSpacePageSize(page_size));
+
+        File file(file_name, File::Truncate, create_props);
+    }
+
+    {
+        File file(file_name, File::ReadOnly);
+        CHECK(file.getFileSpacePageSize() == page_size);
+    }
+}
+
+#ifndef H5_HAVE_PARALLEL
+TEST_CASE("Test page buffer size") {
+    const std::string file_name("h5_page_buffer_size.h5");
+    hsize_t page_size = 1024;
+    {
+        FileCreateProps create_props;
+        create_props.add(FileSpaceStrategy(H5F_FSPACE_STRATEGY_PAGE, true, 0));
+        create_props.add(FileSpacePageSize(page_size));
+
+        FileAccessProps access_props;
+        access_props.add(FileVersionBounds(H5F_LIBVER_V110, H5F_LIBVER_V110));
+
+        File file(file_name, File::Truncate, create_props, access_props);
+
+        file.createDataSet("x", std::vector<double>{1.0, 2.0, 3.0});
+    }
+
+    {
+        FileAccessProps access_props;
+        access_props.add(PageBufferSize(1024));
+
+        File file(file_name, File::ReadOnly, access_props);
+
+        auto accesses = std::array<unsigned int, 2>{0, 0};
+        auto hits = std::array<unsigned int, 2>{0, 0};
+        auto misses = std::array<unsigned int, 2>{0, 0};
+        auto evictions = std::array<unsigned int, 2>{0, 0};
+        auto bypasses = std::array<unsigned int, 2>{0, 0};
+
+        auto err = H5Fget_page_buffering_stats(file.getId(),
+                                               accesses.data(),
+                                               hits.data(),
+                                               misses.data(),
+                                               evictions.data(),
+                                               bypasses.data());
+        REQUIRE(err >= 0);
+
+        CHECK(accesses[0] == 0);
+        CHECK(accesses[1] == 0);
+
+        CHECK(bypasses[0] == 0);
+        CHECK(bypasses[1] == 0);
+
+        auto x = file.getDataSet("x").read<std::vector<double>>();
+
+        err = H5Fget_page_buffering_stats(file.getId(),
+                                          accesses.data(),
+                                          hits.data(),
+                                          misses.data(),
+                                          evictions.data(),
+                                          bypasses.data());
+        REQUIRE(err >= 0);
+
+        CHECK(accesses[0] > 0);
+        CHECK(accesses[1] == 1);
+
+        CHECK(bypasses[0] == 0);
+        CHECK(bypasses[1] == 0);
+
+        CHECK(file.getFileSpacePageSize() == page_size);
+    }
+}
+#endif
+#endif
+
+TEST_CASE("Test metadata block size assignment") {
+    const std::string file_name("h5_meta_block_size.h5");
+
+    std::remove(file_name.c_str());
+
+    {
+        File file(file_name, File::Truncate);
+        // Default for HDF5
+        CHECK(file.getMetadataBlockSize() == 2048);
+    }
+
+    std::remove(file_name.c_str());
+
+    {
+        FileAccessProps fapl;
+        fapl.add(MetadataBlockSize(10240));
+        File file(file_name, File::Truncate, fapl);
+        CHECK(file.getMetadataBlockSize() == 10240);
+    }
+}
+
+TEST_CASE("Test group properties") {
+    const std::string file_name("h5_group_properties.h5");
+    FileAccessProps fapl;
+    // When using hdf5 1.10.2 and later, the lower bound may be set to
+    // H5F_LIBVER_V18
+    fapl.add(FileVersionBounds(H5F_LIBVER_LATEST, H5F_LIBVER_LATEST));
+    File file(file_name, File::Truncate, fapl);
+
+    GroupCreateProps props;
+    props.add(EstimatedLinkInfo(10, 60));
+    auto group = file.createGroup("g", props);
+    auto sizes = group.getEstimatedLinkInfo();
+
+    CHECK(sizes.first == 10);
+    CHECK(sizes.second == 60);
+}
+
+TEST_CASE("Test allocation time") {
+    const std::string file_name("h5_dataset_alloc_time.h5");
+    File file(file_name, File::Truncate);
+
+    size_t n_elements = 10;
+    std::vector<double> data(n_elements);
+
+    auto dcpl = DataSetCreateProps{};
+    dcpl.add(AllocationTime(H5D_ALLOC_TIME_EARLY));
+
+    auto dataspace = DataSpace::From(data);
+    auto datatype = create_datatype<decltype(data)::value_type>();
+    auto dataset = file.createDataSet("dset", dataspace, datatype, dcpl);
+
+    auto alloc_size = H5Dget_storage_size(dataset.getId());
+    CHECK(alloc_size == data.size() * sizeof(decltype(data)::value_type));
+}
+
+/*
+ * Test to ensure legacy support: DataSet used to have a default constructor.
+ * However, it is not useful to have a DataSet object that does not actually
+ * refer to a dataset in a file. Hence, the the default constructor was
+ * deprecated.
+ * This test is to ensure that the constructor is not accidentally removed and
+ * thereby break users' code.
+ */
+TEST_CASE("Test default constructors") {
+    const std::string file_name("h5_default_ctors.h5");
+    const std::string dataset_name("dset");
+    File file(file_name, File::Truncate);
+    auto ds = file.createDataSet(dataset_name, std::vector<int>{1, 2, 3, 4, 5});
+
+    DataSet d2;  // expect deprecation warning, as it constructs unsafe object
+    // d2.getFile();  // runtime error
+    CHECK(!d2.isValid());
+    d2 = ds;  // copy
+    CHECK(d2.isValid());
+}
+
+TEST_CASE("Test groups and datasets") {
+    const std::string file_name("h5_group_test.h5");
+    const std::string dataset_name("dset");
+    const std::string chunked_dataset_name("chunked_dset");
+    const std::string chunked_dataset_small_name("chunked_dset_small");
+    const std::string group_name_1("/group1");
+    const std::string group_name_2("group2");
+    const std::string group_nested_name("group_nested");
+
+    {
+        // Create a new file using the default property lists.
+        File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+        // absolute group
+        file.createGroup(group_name_1);
+        // nested group absolute
+        file.createGroup(group_name_1 + "/" + group_nested_name);
+        // relative group
+        Group g1 = file.createGroup(group_name_2);
+        // relative group
+        Group nested = g1.createGroup(group_nested_name);
+
+        // Create the data space for the dataset.
+        std::vector<size_t> dims{4, 6};
+
+        DataSpace dataspace(dims);
+
+        DataSet dataset_absolute = file.createDataSet(group_name_1 + "/" + group_nested_name + "/" +
+                                                          dataset_name,
+                                                      dataspace,
+                                                      AtomicType<double>());
+
+        DataSet dataset_relative =
+            nested.createDataSet(dataset_name, dataspace, AtomicType<double>());
+
+        DataSetCreateProps goodChunking;
+        goodChunking.add(Chunking(std::vector<hsize_t>{2, 2}));
+        DataSetAccessProps cacheConfig;
+        cacheConfig.add(Caching(13, 1024, 0.5));
+
+        // will fail because exceeds dimensions
+        DataSetCreateProps badChunking0;
+        badChunking0.add(Chunking(std::vector<hsize_t>{10, 10}));
+
+        DataSetCreateProps badChunking1;
+        badChunking1.add(Chunking(std::vector<hsize_t>{1, 1, 1}));
+
+        {
+            SilenceHDF5 silencer;
+            CHECK_THROWS_AS(file.createDataSet(chunked_dataset_name,
+                                               dataspace,
+                                               AtomicType<double>(),
+                                               badChunking0),
+                            DataSetException);
+
+            CHECK_THROWS_AS(file.createDataSet(chunked_dataset_name,
+                                               dataspace,
+                                               AtomicType<double>(),
+                                               badChunking1),
+                            DataSetException);
+        }
+
+        // here we use the other signature
+        DataSet dataset_chunked =
+            file.createDataSet<float>(chunked_dataset_name, dataspace, goodChunking, cacheConfig);
+
+        // Here we resize to smaller than the chunking size
+        DataSet dataset_chunked_small =
+            file.createDataSet<float>(chunked_dataset_small_name, dataspace, goodChunking);
+
+        dataset_chunked_small.resize({1, 1});
+    }
+    // read it back
+    {
+        File file(file_name, File::ReadOnly);
+        Group g1 = file.getGroup(group_name_1);
+        Group g2 = file.getGroup(group_name_2);
+        Group nested_group2 = g2.getGroup(group_nested_name);
+
+        DataSet dataset_absolute = file.getDataSet(group_name_1 + "/" + group_nested_name + "/" +
+                                                   dataset_name);
+        CHECK(4 == dataset_absolute.getSpace().getDimensions()[0]);
+
+        DataSet dataset_relative = nested_group2.getDataSet(dataset_name);
+        CHECK(4 == dataset_relative.getSpace().getDimensions()[0]);
+
+        DataSetAccessProps accessProps;
+        accessProps.add(Caching(13, 1024, 0.5));
+        DataSet dataset_chunked = file.getDataSet(chunked_dataset_name, accessProps);
+        CHECK(4 == dataset_chunked.getSpace().getDimensions()[0]);
+
+        DataSet dataset_chunked_small = file.getDataSet(chunked_dataset_small_name);
+        CHECK(1 == dataset_chunked_small.getSpace().getDimensions()[0]);
+    }
+}
+
+TEST_CASE("FileSpace") {
+    const std::string filename = "filespace.h5";
+    const std::string ds_path = "dataset";
+    const std::vector<int> data{13, 24, 36};
+
+    File file(filename, File::Truncate);
+    file.createDataSet(ds_path, data);
+
+    CHECK(file.getFileSize() > 0);
+}
+
+TEST_CASE("FreeSpace (default)") {
+    const std::string filename = "freespace_default.h5";
+    const std::string ds_path = "dataset";
+    const std::vector<int> data{13, 24, 36};
+
+    {
+        File file(filename, File::Truncate);
+        auto dset = file.createDataSet(ds_path, data);
+    }
+
+    {
+        File file(filename, File::ReadWrite);
+        file.unlink(ds_path);
+        CHECK(file.getFreeSpace() > 0);
+        CHECK(file.getFreeSpace() < file.getFileSize());
+    }
+}
+
+#if H5_VERSION_GE(1, 10, 1)
+TEST_CASE("FreeSpace (tracked)") {
+    const std::string filename = "freespace_tracked.h5";
+    const std::string ds_path = "dataset";
+    const std::vector<int> data{13, 24, 36};
+
+    {
+        FileCreateProps fcp;
+        fcp.add(FileSpaceStrategy(H5F_FSPACE_STRATEGY_FSM_AGGR, true, 0));
+        File file(filename, File::Truncate, fcp);
+        auto dset = file.createDataSet(ds_path, data);
+    }
+
+    {
+        File file(filename, File::ReadWrite);
+        file.unlink(ds_path);
+
+#if H5_VERSION_GE(1, 12, 0)
+        // This fails on 1.10.x but starts working in 1.12.0
+        CHECK(file.getFreeSpace() > 0);
+#endif
+        CHECK(file.getFreeSpace() < file.getFileSize());
+    }
+
+    {
+        File file(filename, File::ReadOnly);
+        CHECK(file.getFreeSpace() > 0);
+        CHECK(file.getFreeSpace() < file.getFileSize());
+    }
+}
+#endif
+
+TEST_CASE("Test extensible datasets") {
+    const std::string file_name("create_extensible_dataset_example.h5");
+    const std::string dataset_name("dset");
+    constexpr long double t1[3][1] = {{2.0l}, {2.0l}, {4.0l}};
+    constexpr long double t2[1][3] = {{4.0l, 8.0l, 6.0l}};
+
+    {
+        // Create a new file using the default property lists.
+        File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+        // Create a dataspace with initial shape and max shape
+        DataSpace dataspace = DataSpace({4, 5}, {17, DataSpace::UNLIMITED});
+
+        // Use chunking
+        DataSetCreateProps props;
+        props.add(Chunking(std::vector<hsize_t>{2, 2}));
+
+        // Create the dataset
+        DataSet dataset =
+            file.createDataSet(dataset_name, dataspace, AtomicType<long double>(), props);
+
+        // Write into the initial part of the dataset
+        dataset.select({0, 0}, {3, 1}).write(t1);
+
+        // Resize the dataset to a larger size
+        dataset.resize({4, 6});
+
+        CHECK(4 == dataset.getSpace().getDimensions()[0]);
+        CHECK(6 == dataset.getSpace().getDimensions()[1]);
+
+        // Write into the new part of the dataset
+        dataset.select({3, 3}, {1, 3}).write(t2);
+
+        SilenceHDF5 silencer;
+        // Try resize out of bounds
+        CHECK_THROWS_AS(dataset.resize({18, 1}), DataSetException);
+        // Try resize invalid dimensions
+        CHECK_THROWS_AS(dataset.resize({1, 2, 3}), DataSetException);
+    }
+
+    // read it back
+    {
+        File file(file_name, File::ReadOnly);
+
+        DataSet dataset_absolute = file.getDataSet("/" + dataset_name);
+        const auto dims = dataset_absolute.getSpace().getDimensions();
+        long double values[4][6];
+        dataset_absolute.read(values);
+        CHECK(4 == dims[0]);
+        CHECK(6 == dims[1]);
+
+        CHECK(t1[0][0] == values[0][0]);
+        CHECK(t1[1][0] == values[1][0]);
+        CHECK(t1[2][0] == values[2][0]);
+
+        CHECK(t2[0][0] == values[3][3]);
+        CHECK(t2[0][1] == values[3][4]);
+        CHECK(t2[0][2] == values[3][5]);
+    }
+}
+
+TEST_CASE("Test reference count") {
+    const std::string file_name("h5_ref_count_test.h5");
+    const std::string dataset_name("dset");
+    const std::string group_name_1("/group1");
+    const std::string group_name_2("/group2");
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    std::unique_ptr<DataSet> d1_ptr;
+    std::unique_ptr<Group> g_ptr;
+
+    {
+        // create group
+        Group g1 = file.createGroup(group_name_1);
+
+        // override object
+        g1 = file.createGroup(group_name_2);
+
+        // Create the data space for the dataset.
+        std::vector<size_t> dims = {10, 10};
+
+        DataSpace dataspace(dims);
+
+        DataSet d1 =
+            file.createDataSet(group_name_1 + dataset_name, dataspace, AtomicType<double>());
+
+        double values[10][10] = {{0}};
+        values[5][0] = 1;
+        d1.write(values);
+
+        // force move
+        d1_ptr.reset(new DataSet(std::move(d1)));
+
+        // force copy
+        g_ptr.reset(new Group(g1));
+    }
+    // read it back
+    {
+        DataSet d2(std::move(*d1_ptr));
+        d1_ptr.reset();
+
+        double values[10][10];
+        d2.read(values);
+
+        for (std::size_t i = 0; i < 10; ++i) {
+            for (std::size_t j = 0; j < 10; ++j) {
+                double v = values[i][j];
+
+                if (i == 5 && j == 0) {
+                    REQUIRE(v == 1);
+                } else {
+                    REQUIRE(v == 0);
+                }
+            }
+        }
+
+        // force copy
+        Group g2 = *g_ptr;
+
+        // add a subgroup
+        g2.createGroup("blabla");
+    }
+}
+
+TEST_CASE("Test simple listings") {
+    const std::string file_name("h5_list_test.h5");
+    const std::string group_name_core("group_name");
+    const std::string group_nested_name("/group_nested");
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    {
+        // absolute group
+        for (int i = 0; i < 2; ++i) {
+            std::ostringstream ss;
+            ss << "/" << group_name_core << "_" << i;
+            file.createGroup(ss.str());
+        }
+
+        size_t n_elem = file.getNumberObjects();
+        CHECK(2 == n_elem);
+
+        std::vector<std::string> elems = file.listObjectNames();
+        CHECK(2 == elems.size());
+        std::vector<std::string> reference_elems;
+        for (int i = 0; i < 2; ++i) {
+            std::ostringstream ss;
+            ss << group_name_core << "_" << i;
+            reference_elems.push_back(ss.str());
+        }
+
+        CHECK(elems == reference_elems);
+    }
+
+    {
+        file.createGroup(group_nested_name);
+        Group g_nest = file.getGroup(group_nested_name);
+
+        for (int i = 0; i < 50; ++i) {
+            std::ostringstream ss;
+            ss << group_name_core << "_" << i;
+            g_nest.createGroup(ss.str());
+        }
+
+        size_t n_elem = g_nest.getNumberObjects();
+        CHECK(50 == n_elem);
+
+        std::vector<std::string> elems = g_nest.listObjectNames();
+        CHECK(50 == elems.size());
+        std::vector<std::string> reference_elems;
+
+        for (int i = 0; i < 50; ++i) {
+            std::ostringstream ss;
+            ss << group_name_core << "_" << i;
+            reference_elems.push_back(ss.str());
+        }
+        // there is no guarantee on the order of the hdf5 index, let's sort it
+        // to put them in order
+        std::sort(elems.begin(), elems.end());
+        std::sort(reference_elems.begin(), reference_elems.end());
+
+        CHECK(elems == reference_elems);
+    }
+}
+
+TEST_CASE("Simple test for type equality") {
+    AtomicType<double> d_var;
+    AtomicType<size_t> size_var;
+    AtomicType<double> d_var_test;
+    AtomicType<size_t> size_var_cpy(size_var);
+    AtomicType<int> int_var;
+    AtomicType<unsigned> uint_var;
+
+    // check different type matching
+    CHECK(d_var == d_var_test);
+    CHECK(d_var != size_var);
+
+    // check type copy matching
+    CHECK(size_var_cpy == size_var);
+
+    // check sign change not matching
+    CHECK(int_var != uint_var);
+}
+
+TEST_CASE("StringType") {
+    SECTION("enshrine-defaults") {
+        auto fixed_length = FixedLengthStringType(32, StringPadding::SpacePadded);
+        auto variable_length = VariableLengthStringType();
+
+        REQUIRE(fixed_length.getCharacterSet() == CharacterSet::Ascii);
+        REQUIRE(variable_length.getCharacterSet() == CharacterSet::Ascii);
+    }
+
+    SECTION("fixed-length") {
+        auto fixed_length =
+            FixedLengthStringType(32, StringPadding::SpacePadded, CharacterSet::Utf8);
+        auto string_type = fixed_length.asStringType();
+
+        REQUIRE(string_type.getId() == fixed_length.getId());
+        REQUIRE(string_type.getCharacterSet() == CharacterSet::Utf8);
+        REQUIRE(string_type.getPadding() == StringPadding::SpacePadded);
+        REQUIRE(string_type.getSize() == 32);
+        REQUIRE(!string_type.isVariableStr());
+        REQUIRE(string_type.isFixedLenStr());
+    }
+
+    SECTION("variable-length") {
+        auto variable_length = VariableLengthStringType(CharacterSet::Utf8);
+        auto string_type = variable_length.asStringType();
+
+        REQUIRE(string_type.getId() == variable_length.getId());
+        REQUIRE(string_type.getCharacterSet() == CharacterSet::Utf8);
+        REQUIRE(string_type.isVariableStr());
+        REQUIRE(!string_type.isFixedLenStr());
+    }
+
+    SECTION("atomic") {
+        auto atomic = AtomicType<double>();
+        REQUIRE_THROWS(atomic.asStringType());
+    }
+}
+
+
+TEST_CASE("DataTypeEqualTakeBack") {
+    const std::string file_name("h5tutr_dset.h5");
+    const std::string dataset_name("dset");
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    // Create the data space for the dataset.
+    std::vector<size_t> dims{10, 1};
+
+    DataSpace dataspace(dims);
+
+    // Create a dataset with double precision floating points
+    DataSet dataset = file.createDataSet<size_t>(dataset_name + "_double", dataspace);
+
+    AtomicType<size_t> s;
+    AtomicType<double> d;
+
+    CHECK(s == dataset.getDataType());
+    CHECK(d != dataset.getDataType());
+
+    // Test getAddress and expect deprecation warning
+    auto addr = dataset.getInfo().getAddress();
+    CHECK(addr != 0);
+}
+
+TEST_CASE("DataSpaceTest") {
+    const std::string file_name("h5tutr_space.h5");
+    const std::string dataset_name("dset");
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    // Create the data space for the dataset.
+    DataSpace dataspace{std::vector<size_t>{10, 1}};
+
+    // Create a dataset with size_t type
+    DataSet dataset = file.createDataSet<size_t>(dataset_name, dataspace);
+
+    DataSpace space = dataset.getSpace();
+    DataSpace space2 = dataset.getSpace();
+    auto space3 = space.clone();
+
+    // verify space id are different
+    CHECK(space.getId() != space2.getId());
+    CHECK(space.getId() != space3.getId());
+
+    // verify space id are consistent
+    CHECK(space.getDimensions().size() == 2);
+    CHECK(space.getDimensions()[0] == 10);
+    CHECK(space.getDimensions()[1] == 1);
+}
+
+TEST_CASE("DataSpace::getElementCount") {
+    SECTION("null") {
+        auto space = DataSpace(DataSpace::dataspace_null);
+        CHECK(space.getElementCount() == 0);
+    }
+
+    SECTION("scalar") {
+        auto space = DataSpace(DataSpace::dataspace_scalar);
+        CHECK(space.getElementCount() == 1);
+    }
+
+    SECTION("simple, empty (1D)") {
+        auto space = DataSpace(0);
+        CHECK(space.getElementCount() == 0);
+    }
+
+    SECTION("simple, empty (2D)") {
+        auto space = DataSpace(0, 0);
+        CHECK(space.getElementCount() == 0);
+    }
+
+    SECTION("simple, non-empty (2D)") {
+        auto space = DataSpace(2, 3);
+        CHECK(space.getElementCount() == 6);
+    }
+
+    SECTION("FromCharArrayStrings") {
+        char string_array[2][10] = {"123456789", "abcdefghi"};
+        auto space = DataSpace::FromCharArrayStrings(string_array);
+        CHECK(space.getElementCount() == 2);
+    }
+}
+
+TEST_CASE("DataSpaceVectorTest") {
+    // Create 1D shortcut dataspace
+    DataSpace space(7);
+
+    CHECK(space.getDimensions().size() == 1);
+    CHECK(space.getDimensions()[0] == 7);
+
+    // Initializer list (explicit)
+    DataSpace space3({8, 9, 10});
+    auto space3_res = space3.getDimensions();
+    std::vector<size_t> space3_ans{8, 9, 10};
+
+    CHECK(space3_res == space3_ans);
+
+    // Verify 2D works (note that without the {}, this matches the iterator
+    // constructor)
+    DataSpace space2(std::vector<size_t>{3, 4});
+
+    auto space2_res = space2.getDimensions();
+    std::vector<size_t> space2_ans{3, 4};
+
+    CHECK(space2_res == space2_ans);
+}
+
+TEST_CASE("DataSpaceVariadicTest") {
+    // Create 1D shortcut dataspace
+    DataSpace space1{7};
+
+    auto space1_res = space1.getDimensions();
+    std::vector<size_t> space1_ans{7};
+
+    CHECK(space1_res == space1_ans);
+
+    // Initializer list (explicit)
+    DataSpace space3{8, 9, 10};
+
+    auto space3_res = space3.getDimensions();
+    std::vector<size_t> space3_ans{8, 9, 10};
+
+    CHECK(space3_res == space3_ans);
+
+    // Verify 2D works using explicit syntax
+    DataSpace space2{3, 4};
+
+    auto space2_res = space2.getDimensions();
+    std::vector<size_t> space2_ans{3, 4};
+
+    CHECK(space2_res == space2_ans);
+
+    // Verify 2D works using old syntax (this used to match the iterator!)
+    DataSpace space2b(3, 4);
+
+    auto space2b_res = space2b.getDimensions();
+    std::vector<size_t> space2b_ans{3, 4};
+
+    CHECK(space2b_res == space2b_ans);
+}
+
+TEST_CASE("ChunkingConstructorsTest") {
+    Chunking first(1, 2, 3);
+
+    auto first_res = first.getDimensions();
+    std::vector<hsize_t> first_ans{1, 2, 3};
+
+    CHECK(first_res == first_ans);
+
+    Chunking second{1, 2, 3};
+
+    auto second_res = second.getDimensions();
+    std::vector<hsize_t> second_ans{1, 2, 3};
+
+    CHECK(second_res == second_ans);
+
+    Chunking third({1, 2, 3});
+
+    auto third_res = third.getDimensions();
+    std::vector<hsize_t> third_ans{1, 2, 3};
+
+    CHECK(third_res == third_ans);
+}
+
+TEST_CASE("HighFiveReadWriteShortcut") {
+    std::ostringstream filename;
+    filename << "h5_rw_vec_shortcut_test.h5";
+
+    const unsigned x_size = 800;
+    const std::string dataset_name("dset");
+    std::vector<unsigned> vec;
+    vec.resize(x_size);
+    for (unsigned i = 0; i < x_size; i++)
+        vec[i] = i * 2;
+    std::string at_contents("Contents of string");
+    int my_int = 3;
+    std::vector<std::vector<int>> my_nested = {{1, 2}, {3, 4}};
+
+    // Create a new file using the default property lists.
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+    // Create a dataset with int points
+    DataSet dataset = file.createDataSet(dataset_name, vec);
+    dataset.createAttribute("str", at_contents);
+
+    DataSet ds_int = file.createDataSet("/TmpInt", my_int);
+    DataSet ds_nested = file.createDataSet("/TmpNest", my_nested);
+
+    std::vector<unsigned> result;
+    dataset.read(result);
+    CHECK_THAT(vec, Equals(result));
+
+    std::string read_in;
+    dataset.getAttribute("str").read(read_in);
+    CHECK(read_in == at_contents);
+
+    int out_int = 0;
+    ds_int.read(out_int);
+    CHECK(my_int == out_int);
+
+    decltype(my_nested) out_nested;
+    ds_nested.read(out_nested);
+
+    for (size_t i = 0; i < 2; ++i) {
+        for (size_t j = 0; j < 2; ++j) {
+            REQUIRE(my_nested[i][j] == out_nested[i][j]);
+        }
+    }
+
+    // Plain c arrays. 1D
+    {
+        int int_c_array[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+        DataSet ds_int2 = file.createDataSet("/TmpCArrayInt", int_c_array);
+
+        decltype(int_c_array) int_c_array_out;
+        ds_int2.read(int_c_array_out);
+        for (size_t i = 0; i < 10; ++i) {
+            REQUIRE(int_c_array[i] == int_c_array_out[i]);
+        }
+    }
+
+    // Plain c arrays. 2D
+    {
+        char char_c_2darray[][3] = {"aa", "bb", "cc", "12"};
+        DataSet ds_char2 = file.createDataSet("/TmpCArray2dchar", char_c_2darray);
+
+        decltype(char_c_2darray) char_c_2darray_out;
+        ds_char2.read(char_c_2darray_out);
+        for (size_t i = 0; i < 4; ++i) {
+            for (size_t j = 0; j < 3; ++j) {
+                REQUIRE(char_c_2darray[i][j] == char_c_2darray_out[i][j]);
+            }
+        }
+    }
+}
+
+template <typename T>
+void readWriteAttributeVectorTest() {
+    std::ostringstream filename;
+    filename << "h5_rw_attribute_vec_" << typeNameHelper<T>() << "_test.h5";
+
+    std::srand((unsigned) std::time(0));
+    const size_t x_size = 25;
+    const std::string dataset_name("dset");
+    typename std::vector<T> vec;
+
+    // Create a new file using the default property lists.
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+    vec.resize(x_size);
+    ContentGenerate<T> generator;
+    std::generate(vec.begin(), vec.end(), generator);
+
+    {
+        // Create a dummy group to annotate with an attribute
+        Group g = file.createGroup("dummy_group");
+
+        // check that no attributes are there
+        std::size_t n = g.getNumberAttributes();
+        CHECK(n == 0);
+
+        std::vector<std::string> all_attribute_names = g.listAttributeNames();
+        CHECK(all_attribute_names.size() == 0);
+        CHECK(!g.hasAttribute("my_attribute"));
+
+        Attribute a1 = g.createAttribute<T>("my_attribute", DataSpace::From(vec));
+        a1.write(vec);
+
+        // check now that we effectively have an attribute listable
+        CHECK(g.getNumberAttributes() == 1);
+        CHECK(g.hasAttribute("my_attribute"));
+
+        all_attribute_names = g.listAttributeNames();
+        CHECK(all_attribute_names.size() == 1);
+        CHECK(all_attribute_names[0] == std::string("my_attribute"));
+
+        // Create the same attribute on a newly created dataset
+        DataSet s = g.createDataSet("dummy_dataset", DataSpace(1), AtomicType<int>());
+
+        Attribute a2 = s.createAttribute<T>("my_attribute_copy", DataSpace::From(vec));
+        a2.write(vec);
+
+        // const data, short-circuit syntax
+        const std::vector<int> v{1, 2, 3};
+        s.createAttribute("version_test", v);
+    }
+
+    {
+        typename std::vector<T> result1, result2;
+
+        Attribute a1_read = file.getGroup("dummy_group").getAttribute("my_attribute");
+        a1_read.read(result1);
+
+        CHECK(vec.size() == x_size);
+        CHECK(result1.size() == x_size);
+        CHECK(vec == result1);
+
+        Attribute a2_read =
+            file.getDataSet("/dummy_group/dummy_dataset").getAttribute("my_attribute_copy");
+        a2_read.read(result2);
+
+        CHECK(vec.size() == x_size);
+        CHECK(result2.size() == x_size);
+        CHECK(vec == result2);
+
+        std::vector<int> v;  // with const would print a nice err msg
+        file.getDataSet("/dummy_group/dummy_dataset").getAttribute("version_test").read(v);
+    }
+
+    // Delete some attributes
+    {
+        // From group
+        auto g = file.getGroup("dummy_group");
+        g.deleteAttribute("my_attribute");
+        auto n = g.getNumberAttributes();
+        CHECK(n == 0);
+
+        // From dataset
+        auto d = file.getDataSet("/dummy_group/dummy_dataset");
+        d.deleteAttribute("my_attribute_copy");
+        n = g.getNumberAttributes();
+        CHECK(n == 0);
+    }
+}
+
+TEST_CASE("ReadWriteAttributeVectorString") {
+    readWriteAttributeVectorTest<std::string>();
+}
+
+TEMPLATE_LIST_TEST_CASE("ReadWriteAttributeVector", "[template]", dataset_test_types) {
+    readWriteAttributeVectorTest<TestType>();
+}
+
+TEST_CASE("WriteLargeAttribute") {
+    std::vector<double> large_attr(16000, 0.0);
+
+    auto fapl = HighFive::FileAccessProps::Default();
+    fapl.add(HighFive::FileVersionBounds(H5F_LIBVER_LATEST, H5F_LIBVER_LATEST));
+    HighFive::File file("create_large_attribute.h5", HighFive::File::Truncate, fapl);
+    auto gcpl = HighFive::GroupCreateProps::Default();
+    gcpl.add(HighFive::AttributePhaseChange(0, 0));
+
+    auto group = file.createGroup("grp", gcpl);
+    CHECK_NOTHROW(group.createAttribute("attr", large_attr));
+}
+
+TEST_CASE("AttributePhaseChange") {
+    auto fapl = HighFive::FileAccessProps::Default();
+    fapl.add(HighFive::FileVersionBounds(H5F_LIBVER_LATEST, H5F_LIBVER_LATEST));
+    HighFive::File file("attribute_phase_change.h5", HighFive::File::Truncate, fapl);
+
+    auto gcpl = HighFive::GroupCreateProps::Default();
+    gcpl.add(HighFive::AttributePhaseChange(42, 24));
+
+    auto group = file.createGroup("grp", gcpl);
+
+    auto actual = AttributePhaseChange(group.getCreatePropertyList());
+    CHECK(actual.min_dense() == 24);
+    CHECK(actual.max_compact() == 42);
+}
+
+TEST_CASE("datasetOffset") {
+    std::string filename = "datasetOffset.h5";
+    std::string dsetname = "dset";
+    const size_t size_dataset = 20;
+
+    File file(filename, File::ReadWrite | File::Create | File::Truncate);
+    std::vector<int> data(size_dataset);
+    DataSet ds = file.createDataSet<int>(dsetname, DataSpace::From(data));
+    ds.write(data);
+    DataSet ds_read = file.getDataSet(dsetname);
+    CHECK(ds_read.getOffset() > 0);
+}
+
+template <typename T>
+void selectionArraySimpleTest() {
+    typedef typename std::vector<T> Vector;
+
+    std::ostringstream filename;
+    filename << "h5_rw_select_test_" << typeNameHelper<T>() << "_test.h5";
+
+    const size_t size_x = 10;
+    const size_t offset_x = 2, count_x = 5;
+
+    const std::string dataset_name("dset");
+
+    Vector values(size_x);
+
+    ContentGenerate<T> generator;
+    std::generate(values.begin(), values.end(), generator);
+
+    // Create a new file using the default property lists.
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+    DataSet dataset = file.createDataSet<T>(dataset_name, DataSpace::From(values));
+
+    dataset.write(values);
+
+    file.flush();
+
+    // select slice
+    {
+        // read it back
+        Vector result;
+        std::vector<size_t> offset{offset_x};
+        std::vector<size_t> size{count_x};
+
+        Selection slice = dataset.select(offset, size);
+
+        CHECK(slice.getSpace().getDimensions()[0] == size_x);
+        CHECK(slice.getMemSpace().getDimensions()[0] == count_x);
+
+        slice.read(result);
+
+        CHECK(result.size() == 5);
+
+        for (size_t i = 0; i < count_x; ++i) {
+            REQUIRE(values[i + offset_x] == result[i]);
+        }
+    }
+
+    // select cherry pick
+    {
+        // read it back
+        Vector result;
+        std::vector<size_t> ids{1, 3, 4, 7};
+
+        Selection slice = dataset.select(ElementSet(ids));
+
+        CHECK(slice.getSpace().getDimensions()[0] == size_x);
+        CHECK(slice.getMemSpace().getDimensions()[0] == ids.size());
+
+        slice.read(result);
+
+        CHECK(result.size() == ids.size());
+
+        for (size_t i = 0; i < ids.size(); ++i) {
+            const std::size_t id = ids[i];
+            REQUIRE(values[id] == result[i]);
+        }
+    }
+}
+
+TEST_CASE("selectionArraySimpleString") {
+    selectionArraySimpleTest<std::string>();
+}
+
+TEMPLATE_LIST_TEST_CASE("selectionArraySimple", "[template]", dataset_test_types) {
+    selectionArraySimpleTest<TestType>();
+}
+
+TEST_CASE("selectionByElementMultiDim") {
+    const std::string file_name("h5_test_selection_multi_dim.h5");
+    // Create a 2-dim dataset
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+    std::vector<size_t> dims{3, 3};
+
+    auto set = file.createDataSet("test", DataSpace(dims), AtomicType<int>());
+    int values[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}};
+    set.write(values);
+
+    {
+        int value;
+        set.select(ElementSet{{1, 1}}).read(value);
+        CHECK(value == 5);
+    }
+
+    {
+        int value[2];
+        set.select(ElementSet{0, 0, 2, 2}).read(value);
+        CHECK(value[0] == 1);
+        CHECK(value[1] == 9);
+    }
+
+    {
+        int value[2];
+        set.select(ElementSet{{0, 1}, {1, 2}}).read(value);
+        CHECK(value[0] == 2);
+        CHECK(value[1] == 6);
+    }
+
+    {
+        SilenceHDF5 silencer;
+        CHECK_THROWS_AS(set.select(ElementSet{0, 1, 2}), DataSpaceException);
+    }
+}
+
+template <typename T>
+void columnSelectionTest() {
+    std::ostringstream filename;
+    filename << "h5_rw_select_column_test_" << typeNameHelper<T>() << "_test.h5";
+
+    const size_t x_size = 10;
+    const size_t y_size = 7;
+
+    const std::string dataset_name("dset");
+
+    T values[x_size][y_size];
+
+    ContentGenerate<T> generator;
+    generate2D(values, x_size, y_size, generator);
+
+    // Create a new file using the default property lists.
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+    // Create the data space for the dataset.
+    std::vector<size_t> dims{x_size, y_size};
+
+    DataSpace dataspace(dims);
+    // Create a dataset with arbitrary type
+    DataSet dataset = file.createDataSet<T>(dataset_name, dataspace);
+
+    dataset.write(values);
+
+    file.flush();
+
+    std::vector<size_t> columns{1, 3, 5};
+
+    Selection slice = dataset.select(columns);
+    T result[x_size][3];
+    slice.read(result);
+
+    CHECK(slice.getSpace().getDimensions()[0] == x_size);
+    CHECK(slice.getMemSpace().getDimensions()[0] == x_size);
+
+    for (size_t i = 0; i < 3; ++i)
+        for (size_t j = 0; j < x_size; ++j)
+            REQUIRE(result[j][i] == values[j][columns[i]]);
+}
+
+TEMPLATE_LIST_TEST_CASE("columnSelection", "[template]", numerical_test_types) {
+    columnSelectionTest<TestType>();
+}
+
+std::vector<std::array<size_t, 2>> global_indices_2d(const std::vector<size_t>& offset,
+                                                     const std::vector<size_t>& count) {
+    std::vector<std::array<size_t, 2>> indices;
+    indices.reserve(count[0] * count[1]);
+
+    for (size_t i = 0; i < count[0]; ++i) {
+        for (size_t j = 0; j < count[1]; ++j) {
+            indices.push_back({offset[0] + i, offset[1] + j});
+        }
+    }
+
+    return indices;
+}
+
+std::vector<std::array<size_t, 2>> local_indices_2d(const std::vector<size_t>& count) {
+    return global_indices_2d({0ul, 0ul}, count);
+}
+
+std::vector<std::array<size_t, 1>> local_indices_1d(const std::vector<size_t>& count) {
+    std::vector<std::array<size_t, 1>> local_indices;
+    for (size_t i = 0; i < count[0]; ++i) {
+        local_indices.push_back({i});
+    }
+
+    return local_indices;
+}
+
+struct RegularHyperSlabAnswer {
+    static RegularHyperSlabAnswer createRegular(const std::vector<size_t>& offset,
+                                                const std::vector<size_t>& count) {
+        return RegularHyperSlabAnswer{global_indices_2d(offset, count),
+                                      local_indices_1d({count[0] * count[1]})};
+    }
+
+    // These are the selected indices in the
+    // outer (larger) array.
+    std::vector<std::array<size_t, 2>> global_indices;
+
+    // These are the selected indices in the compacted (inner)
+    // array.
+    std::vector<std::array<size_t, 1>> local_indices;
+};
+
+struct RegularHyperSlabTestData {
+    std::string desc;
+    HyperSlab slab;
+    RegularHyperSlabAnswer answer;
+};
+
+std::vector<RegularHyperSlabTestData> make_regular_hyperslab_test_data() {
+    std::vector<RegularHyperSlabTestData> test_data;
+
+    // The dataset is 10x8, we define the following regular
+    // hyperslabs:
+    //  x----------------x
+    //  |                |
+    //  | x------x   e   |  1
+    //  | |  a   |       |
+    //  x-|------|-------x  3
+    //  | |    x-|-------x  4
+    //  | |    | |  b    |
+    //  | |    c-|-------c  5
+    //  | |    b-|-------b  6
+    //  | |    | |  c    |
+    //  | d----x-d-------x  7
+    //  | |  d   |       |
+    //  | a------a       |  9
+    //  |                |
+    //  ------------------
+    //    1    3 4       8
+
+    std::map<std::string, RegularHyperSlab> slabs;
+
+    slabs["a"] = RegularHyperSlab(/* offset = */ {1ul, 1ul},
+                                  /* count = */ {8ul, 3ul});
+
+    slabs["b"] = RegularHyperSlab(/* offset = */ {4ul, 3ul},
+                                  /* count = */ {2ul, 5ul});
+
+    slabs["c"] = RegularHyperSlab(/* offset = */ {5ul, 3ul},
+                                  /* count = */ {2ul, 5ul});
+
+    slabs["d"] = RegularHyperSlab(/* offset = */ {7ul, 1ul},
+                                  /* count = */ {2ul, 3ul});
+
+    slabs["e"] = RegularHyperSlab(/* offset = */ {0ul, 0ul},
+                                  /* count = */ {3ul, 8ul});
+
+    // Union, regular
+    auto slab_bc_union = HyperSlab(slabs["b"]) | slabs["c"];
+    auto answer_bc_union = RegularHyperSlabAnswer::createRegular({4ul, 3ul}, {3ul, 5ul});
+    test_data.push_back({"b | c", slab_bc_union, answer_bc_union});
+
+    // Intersection, always regular
+    auto slab_ab_cut = HyperSlab(slabs["a"]) & slabs["b"];
+    auto answer_ab_cut = RegularHyperSlabAnswer::createRegular({4ul, 3ul}, {2ul, 1ul});
+    test_data.push_back({"a & b", slab_ab_cut, answer_ab_cut});
+
+    // Intersection, always regular
+    auto slab_bc_cut = HyperSlab(slabs["b"]) & slabs["c"];
+    auto answer_bc_cut = RegularHyperSlabAnswer::createRegular({5ul, 3ul}, {1ul, 5ul});
+    test_data.push_back({"b & c", slab_bc_cut, answer_bc_cut});
+
+    // Xor, regular
+    auto slab_ad_xor = HyperSlab(slabs["a"]) ^ slabs["d"];
+    auto answer_ad_xor = RegularHyperSlabAnswer::createRegular({1ul, 1ul}, {6ul, 3ul});
+    test_data.push_back({"a ^ b", slab_ad_xor, answer_ad_xor});
+
+    // (not b) and c, regular
+    auto slab_bc_nota = HyperSlab(slabs["b"]).notA(slabs["c"]);
+    auto answer_bc_nota = RegularHyperSlabAnswer::createRegular({6ul, 3ul}, {1ul, 5ul});
+    test_data.push_back({"b notA a", slab_bc_nota, answer_bc_nota});
+
+    // (not c) and b, regular
+    auto slab_cb_notb = HyperSlab(slabs["c"]).notB(slabs["b"]);
+    auto answer_cb_notb = RegularHyperSlabAnswer::createRegular({6ul, 3ul}, {1ul, 5ul});
+    test_data.push_back({"c notB b", slab_cb_notb, answer_cb_notb});
+
+    return test_data;
+}
+
+template <class T, size_t x_size, size_t y_size>
+File setupHyperSlabFile(T (&values)[x_size][y_size],
+                        const std::string& filename,
+                        const std::string& dataset_name) {
+    ContentGenerate<T> generator;
+    generate2D(values, x_size, y_size, generator);
+
+    // Create a new file using the default property lists.
+    File file(filename, File::ReadWrite | File::Create | File::Truncate);
+
+    // Create the data space for the dataset.
+    std::vector<size_t> dims{x_size, y_size};
+
+    DataSpace dataspace(dims);
+    // Create a dataset with arbitrary type
+    DataSet dataset = file.createDataSet<T>(dataset_name, dataspace);
+
+    dataset.write(values);
+    file.flush();
+
+    return file;
+}
+
+template <typename T>
+void regularHyperSlabSelectionTest() {
+    std::ostringstream filename;
+    filename << "h5_rw_select_regular_hyperslab_test_" << typeNameHelper<T>() << "_test.h5";
+    const std::string dataset_name("dset");
+
+    const size_t x_size = 10;
+    const size_t y_size = 8;
+
+    T values[x_size][y_size];
+
+    auto file = setupHyperSlabFile(values, filename.str(), dataset_name);
+    auto test_cases = make_regular_hyperslab_test_data();
+
+    for (const auto& test_case: test_cases) {
+        SECTION(test_case.desc) {
+            std::vector<T> result;
+
+            file.getDataSet(dataset_name).select(test_case.slab).read(result);
+
+            auto n_selected = test_case.answer.global_indices.size();
+            for (size_t i = 0; i < n_selected; ++i) {
+                const auto ig = test_case.answer.global_indices[i];
+                const auto il = test_case.answer.local_indices[i];
+
+                REQUIRE(result[il[0]] == values[ig[0]][ig[1]]);
+            }
+        }
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("hyperSlabSelection", "[template]", numerical_test_types) {
+    regularHyperSlabSelectionTest<TestType>();
+}
+
+struct IrregularHyperSlabAnswer {
+    // These are the selected indices in the outer (larger) array.
+    std::vector<std::array<size_t, 2>> global_indices;
+};
+
+struct IrregularHyperSlabTestData {
+    std::string desc;
+    HyperSlab slab;
+    IrregularHyperSlabAnswer answer;
+};
+
+std::vector<IrregularHyperSlabTestData> make_irregular_hyperslab_test_data() {
+    // The dataset is 10x8, with two regular hyperslabs:
+    //  x----------------x
+    //  |                |
+    //  |    bbbb        |
+    //  |    bbbb        |
+    //  |  aaaabb        |
+    //  |  aaaabb        |
+    //  |    bbbb        |
+    //  |    bbbb        |
+    //  |                |
+    //  |                |
+    //  |                |
+    //  |                |
+    //  ------------------
+
+    auto slabs = std::map<std::string, RegularHyperSlab>{};
+    slabs["a"] = RegularHyperSlab{{2ul, 0ul}, {1ul, 2ul}};
+    slabs["b"] = RegularHyperSlab{{1ul, 1ul}, {3ul, 2ul}};
+
+    std::vector<IrregularHyperSlabTestData> test_data;
+
+    // Union, irregular
+    auto slab_ab_union = HyperSlab(slabs["a"]) | slabs["b"];
+    // clang-format off
+    auto answer_ab_union = IrregularHyperSlabAnswer{{
+                    {1ul, 1ul}, {1ul, 2ul},
+        {2ul, 0ul}, {2ul, 1ul}, {2ul, 2ul},
+                    {3ul, 1ul}, {3ul, 2ul}
+    }};
+    // clang-format on
+    test_data.push_back({"a | b", slab_ab_union, answer_ab_union});
+
+    // xor, irregular
+    auto slab_ab_xor = HyperSlab(slabs["a"]) ^ slabs["b"];
+    // clang-format off
+        auto answer_ab_xor = IrregularHyperSlabAnswer{{
+                        {1ul, 1ul}, {1ul, 2ul},
+            {2ul, 0ul},             {2ul, 2ul},
+                        {3ul, 1ul}, {3ul, 2ul}
+        }};
+    // clang-format on
+    test_data.push_back({"a xor b", slab_ab_xor, answer_ab_xor});
+
+    // (not a) and e, irregular
+    auto slab_ab_nota = HyperSlab(slabs["a"]).notA(slabs["b"]);
+    // clang-format off
+        auto answer_ab_nota = IrregularHyperSlabAnswer{{
+                        {1ul, 1ul}, {1ul, 2ul},
+                                    {2ul, 2ul},
+                        {3ul, 1ul}, {3ul, 2ul}
+        }};
+    // clang-format on
+    test_data.push_back({"a nota b", slab_ab_nota, answer_ab_nota});
+
+    // (not a) and e, irregular
+    auto slab_ba_notb = HyperSlab(slabs["b"]).notB(slabs["a"]);
+    // clang-format off
+        auto answer_ba_notb = IrregularHyperSlabAnswer{{
+                         {1ul, 1ul}, {1ul, 2ul},
+                                     {2ul, 2ul},
+                         {3ul, 1ul}, {3ul, 2ul}
+        }};
+    // clang-format on
+    test_data.push_back({"b notb a", slab_ba_notb, answer_ba_notb});
+
+    return test_data;
+}
+
+template <typename T>
+void irregularHyperSlabSelectionReadTest() {
+    std::ostringstream filename;
+    filename << "h5_write_select_irregular_hyperslab_test_" << typeNameHelper<T>() << "_test.h5";
+
+    const std::string dataset_name("dset");
+
+    const size_t x_size = 10;
+    const size_t y_size = 8;
+
+    T values[x_size][y_size];
+    auto file = setupHyperSlabFile(values, filename.str(), dataset_name);
+
+    auto test_cases = make_irregular_hyperslab_test_data();
+
+    for (const auto& test_case: test_cases) {
+        SECTION(test_case.desc) {
+            std::vector<T> result;
+
+            file.getDataSet(dataset_name).select(test_case.slab).read(result);
+
+            auto n_selected = test_case.answer.global_indices.size();
+            for (size_t i = 0; i < n_selected; ++i) {
+                const auto ig = test_case.answer.global_indices[i];
+
+                REQUIRE(result[i] == values[ig[0]][ig[1]]);
+            }
+        }
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("irregularHyperSlabSelectionRead", "[template]", numerical_test_types) {
+    irregularHyperSlabSelectionReadTest<TestType>();
+}
+
+template <typename T>
+void irregularHyperSlabSelectionWriteTest() {
+    std::ostringstream filename;
+    filename << "h5_write_select_irregular_hyperslab_test_" << typeNameHelper<T>() << "_test.h5";
+
+    const std::string dataset_name("dset");
+
+    const size_t x_size = 10;
+    const size_t y_size = 8;
+
+    T orig_values[x_size][y_size];
+    auto file = setupHyperSlabFile(orig_values, filename.str(), dataset_name);
+
+    auto test_cases = make_irregular_hyperslab_test_data();
+
+    for (const auto& test_case: test_cases) {
+        SECTION(test_case.desc) {
+            auto n_selected = test_case.answer.global_indices.size();
+            std::vector<T> changed_values(n_selected);
+            ContentGenerate<T> gen;
+            std::generate(changed_values.begin(), changed_values.end(), gen);
+
+            file.getDataSet(dataset_name).select(test_case.slab).write(changed_values);
+
+            T overwritten_values[x_size][y_size];
+            file.getDataSet(dataset_name).read(overwritten_values);
+
+            T expected_values[x_size][y_size];
+            for (size_t i = 0; i < x_size; ++i) {
+                for (size_t j = 0; j < y_size; ++j) {
+                    expected_values[i][j] = orig_values[i][j];
+                }
+            }
+
+            for (size_t i = 0; i < n_selected; ++i) {
+                const auto ig = test_case.answer.global_indices[i];
+                expected_values[ig[0]][ig[1]] = changed_values[i];
+            }
+
+            for (size_t i = 0; i < x_size; ++i) {
+                for (size_t j = 0; j < y_size; ++j) {
+                    REQUIRE(expected_values[i][j] == overwritten_values[i][j]);
+                }
+            }
+        }
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("irregularHyperSlabSelectionWrite", "[template]", std::tuple<int>) {
+    irregularHyperSlabSelectionWriteTest<TestType>();
+}
+
+template <typename T>
+void attribute_scalar_rw() {
+    std::ostringstream filename;
+    filename << "h5_rw_attribute_scalar_rw" << typeNameHelper<T>() << "_test.h5";
+
+    File h5file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+    ContentGenerate<T> generator;
+
+    const T attribute_value(generator());
+
+    Group g = h5file.createGroup("metadata");
+
+    CHECK(!g.hasAttribute("family"));
+
+    // write a scalar attribute
+    {
+        T out(attribute_value);
+        Attribute att = g.createAttribute<T>("family", DataSpace::From(out));
+        att.write(out);
+    }
+
+    h5file.flush();
+
+    // test if attribute exist
+    CHECK(g.hasAttribute("family"));
+
+    // read back a scalar attribute
+    {
+        T res;
+        Attribute att = g.getAttribute("family");
+        att.read(res);
+        CHECK(res == attribute_value);
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("attribute_scalar_rw_all", "[template]", dataset_test_types) {
+    attribute_scalar_rw<TestType>();
+}
+
+TEST_CASE("attribute_scalar_rw_string") {
+    attribute_scalar_rw<std::string>();
+}
+
+// regression test https://github.com/BlueBrain/HighFive/issues/98
+TEST_CASE("HighFiveOutofDimension") {
+    std::string filename("h5_rw_reg_zero_dim_test.h5");
+
+    const std::string dataset_name("dset");
+
+    {
+        // Create a new file using the default property lists.
+        File file(filename, File::ReadWrite | File::Create | File::Truncate);
+
+        DataSpace d_null(DataSpace::DataspaceType::dataspace_null);
+
+        DataSet d1 = file.createDataSet<double>(dataset_name, d_null);
+
+        file.flush();
+
+        DataSpace recovered_d1 = d1.getSpace();
+
+        auto ndim = recovered_d1.getNumberDimensions();
+        CHECK(ndim == 0);
+
+        auto dims = recovered_d1.getDimensions();
+        CHECK(dims.size() == 0);
+    }
+}
+
+template <typename T>
+void readWriteShuffleDeflateTest() {
+    std::ostringstream filename;
+    filename << "h5_rw_deflate_" << typeNameHelper<T>() << "_test.h5";
+    const std::string dataset_name("dset");
+    const size_t x_size = 128;
+    const size_t y_size = 32;
+    const size_t x_chunk = 16;
+    const size_t y_chunk = 16;
+
+    const int deflate_level = 9;
+
+    T array[x_size][y_size];
+
+    // write a compressed file
+    {
+        File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+        // Create the data space for the dataset.
+        std::vector<size_t> dims{x_size, y_size};
+
+        DataSpace dataspace(dims);
+
+        // Use chunking
+        DataSetCreateProps props;
+        props.add(Chunking(std::vector<hsize_t>{x_chunk, y_chunk}));
+
+        // Enable shuffle
+        props.add(Shuffle());
+
+        // Enable deflate
+        props.add(Deflate(deflate_level));
+
+        // Create a dataset with arbitrary type
+        DataSet dataset = file.createDataSet<T>(dataset_name, dataspace, props);
+
+        ContentGenerate<T> generator;
+        generate2D(array, x_size, y_size, generator);
+
+        dataset.write(array);
+
+        file.flush();
+    }
+
+    // read it back
+    {
+        File file_read(filename.str(), File::ReadOnly);
+        DataSet dataset_read = file_read.getDataSet("/" + dataset_name);
+
+        T result[x_size][y_size];
+
+        dataset_read.read(result);
+
+        for (size_t i = 0; i < x_size; ++i) {
+            for (size_t j = 0; i < y_size; ++i) {
+                REQUIRE(result[i][j] == array[i][j]);
+            }
+        }
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("ReadWriteShuffleDeflate", "[template]", numerical_test_types) {
+    readWriteShuffleDeflateTest<TestType>();
+}
+
+template <typename T>
+void readWriteSzipTest() {
+    std::ostringstream filename;
+    filename << "h5_rw_szip_" << typeNameHelper<T>() << "_test.h5";
+    const std::string dataset_name("dset");
+    const size_t x_size = 128;
+    const size_t y_size = 32;
+    const size_t x_chunk = 8;
+    const size_t y_chunk = 4;
+
+    const int options_mask = H5_SZIP_NN_OPTION_MASK;
+    const int pixels_per_block = 8;
+
+    T array[x_size][y_size];
+
+    // write a compressed file
+    {
+        File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+        // Create the data space for the dataset.
+        std::vector<size_t> dims{x_size, y_size};
+
+        DataSpace dataspace(dims);
+
+        // Use chunking
+        DataSetCreateProps props;
+        props.add(Chunking(std::vector<hsize_t>{x_chunk, y_chunk}));
+
+        // Enable szip
+        props.add(Szip(options_mask, pixels_per_block));
+
+        // Create a dataset with arbitrary type
+        DataSet dataset = file.createDataSet<T>(dataset_name, dataspace, props);
+
+        ContentGenerate<T> generator;
+        generate2D(array, x_size, y_size, generator);
+
+        dataset.write(array);
+
+        file.flush();
+    }
+
+    // read it back
+    {
+        File file_read(filename.str(), File::ReadOnly);
+        DataSet dataset_read = file_read.getDataSet("/" + dataset_name);
+
+        T result[x_size][y_size];
+
+        dataset_read.read(result);
+
+        for (size_t i = 0; i < x_size; ++i) {
+            for (size_t j = 0; i < y_size; ++i) {
+                REQUIRE(result[i][j] == array[i][j]);
+            }
+        }
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("ReadWriteSzip", "[template]", dataset_test_types) {
+    // SZIP is not consistently available across distributions.
+    if (H5Zfilter_avail(H5Z_FILTER_SZIP)) {
+        readWriteSzipTest<TestType>();
+    } else {
+        CHECK_THROWS_AS(readWriteSzipTest<TestType>(), PropertyException);
+    }
+}
+
+TEST_CASE("CheckDimensions") {
+    // List of dims which can all be one-dimensional.
+    std::vector<std::vector<size_t>> test_cases{
+        {1ul, 3ul}, {3ul, 1ul}, {1ul, 1ul, 3ul}, {3ul, 1ul, 1ul}, {1ul, 3ul, 1ul}};
+
+    for (const auto& dims: test_cases) {
+        auto actual = details::checkDimensions(dims, 1ul);
+
+        INFO("dims = " + details::format_vector(dims) + ", n_dims = 1");
+        CHECK(actual);
+
+        INFO("dims = " + details::format_vector(dims) + ", n_dims = 1");
+        CHECK(!details::checkDimensions(dims, dims.size() + 1));
+    }
+
+    CHECK(details::checkDimensions(std::vector<size_t>{1ul}, 0ul));
+    CHECK(details::checkDimensions(std::vector<size_t>{1ul}, 1ul));
+
+    CHECK(!details::checkDimensions(std::vector<size_t>{0ul}, 0ul));
+    CHECK(!details::checkDimensions(std::vector<size_t>{2ul}, 0ul));
+
+    CHECK(!details::checkDimensions(std::vector<size_t>{1ul, 2ul, 3ul}, 2ul));
+    CHECK(details::checkDimensions(std::vector<size_t>{3ul, 2ul, 1ul}, 2ul));
+
+    CHECK(details::checkDimensions(std::vector<size_t>{1ul, 1ul, 1ul, 1ul}, 1ul));
+
+    CHECK(details::checkDimensions(std::vector<size_t>{}, 0ul));
+    CHECK(!details::checkDimensions(std::vector<size_t>{}, 1ul));
+    CHECK(!details::checkDimensions(std::vector<size_t>{}, 2ul));
+}
+
+
+TEST_CASE("SqueezeDimensions") {
+    SECTION("possible") {
+        // List of testcases: the first number is n_dims then the input dimensions
+        // and finally the squeezed dimensions.
+        std::vector<std::tuple<size_t, std::vector<size_t>, std::vector<size_t>>> test_cases{
+            {1ul, {3ul, 1ul}, {3ul}},
+
+            {1ul, {1ul, 1ul, 1ul}, {1ul}},
+
+            {1ul, {1ul, 3ul, 1ul}, {3ul}},
+
+            {1ul, {3ul, 1ul, 1ul}, {3ul}},
+            {2ul, {3ul, 1ul, 1ul}, {3ul, 1ul}},
+            {3ul, {3ul, 1ul, 1ul}, {3ul, 1ul, 1ul}},
+
+            {3ul, {2ul, 1ul, 3ul}, {2ul, 1ul, 3ul}}};
+
+        for (const auto& tc: test_cases) {
+            auto n_dim_requested = std::get<0>(tc);
+            auto dims = std::get<1>(tc);
+            auto expected = std::get<2>(tc);
+            auto actual = details::squeezeDimensions(dims, n_dim_requested);
+
+            CHECK(actual == expected);
+        }
+    }
+
+    SECTION("impossible") {
+        // List of testcases: the first number is n_dims then the input dimensions
+        // and finally the squeezed dimensions.
+        std::vector<std::tuple<size_t, std::vector<size_t>>> test_cases{{1ul, {1ul, 2ul, 3ul}},
+                                                                        {2ul, {1ul, 2ul, 3ul, 1ul}},
+
+                                                                        {1ul, {2ul, 1ul, 3ul}},
+                                                                        {2ul, {2ul, 1ul, 3ul}}};
+
+        for (const auto& tc: test_cases) {
+            auto n_dim_requested = std::get<0>(tc);
+            auto dims = std::get<1>(tc);
+
+            CHECK_THROWS(details::squeezeDimensions(dims, n_dim_requested));
+        }
+    }
+}
+
+void check_broadcast_1d(HighFive::File& file,
+                        const std::vector<size_t> dims,
+                        const std::string& dataset_name) {
+    // This checks that:
+    //   - we can write 1D array into 2D dataset.
+    //   - we can read 2D dataset into a 1D array.
+    std::vector<double> input_data{5.0, 6.0, 7.0};
+
+
+    DataSpace dataspace(dims);
+    DataSet dataset = file.createDataSet(dataset_name, dataspace, AtomicType<double>());
+
+    dataset.write(input_data);
+
+    {
+        std::vector<double> read_back;
+        dataset.read(read_back);
+
+        CHECK(read_back == input_data);
+    }
+
+    {
+        auto read_back = dataset.read<std::vector<double>>();
+        CHECK(read_back == input_data);
+    }
+}
+
+// Broadcasting is supported
+TEST_CASE("ReadInBroadcastDims") {
+    const std::string file_name("h5_broadcast_dset.h5");
+    const std::string dataset_name("dset");
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::Truncate);
+
+    SECTION("one-dimensional (1, 3)") {
+        check_broadcast_1d(file, {1, 3}, dataset_name + "_a");
+    }
+
+    SECTION("one-dimensional (3, 1)") {
+        check_broadcast_1d(file, {3, 1}, dataset_name + "_b");
+    }
+
+    SECTION("two-dimensional (2, 3, 1)") {
+        std::vector<size_t> dims{2, 3, 1};
+        std::vector<std::vector<double>> input_data_2d{{2.0, 3.0, 4.0}, {10.0, 11.0, 12.0}};
+
+        DataSpace dataspace(dims);
+        DataSet dataset = file.createDataSet(dataset_name + "_c", dataspace, AtomicType<double>());
+
+        dataset.write(input_data_2d);
+
+        auto check = [](const std::vector<std::vector<double>>& lhs,
+                        const std::vector<std::vector<double>>& rhs) {
+            CHECK(lhs.size() == rhs.size());
+            for (size_t i = 0; i < rhs.size(); ++i) {
+                CHECK(lhs[i].size() == rhs[i].size());
+
+                for (size_t j = 0; j < rhs[i].size(); ++j) {
+                    CHECK(lhs[i][j] == rhs[i][j]);
+                }
+            }
+        };
+
+        {
+            std::vector<std::vector<double>> read_back;
+            dataset.read(read_back);
+
+            check(read_back, input_data_2d);
+        }
+
+        {
+            auto read_back = dataset.read<std::vector<std::vector<double>>>();
+            check(read_back, input_data_2d);
+        }
+    }
+
+    SECTION("one-dimensional fixed length string") {
+        std::vector<size_t> dims{1, 1, 2};
+        char input_data[2] = "a";
+
+        DataSpace dataspace(dims);
+        DataSet dataset = file.createDataSet(dataset_name + "_d", dataspace, AtomicType<char>());
+        dataset.write(input_data);
+
+        {
+            char read_back[2];
+            dataset.read(read_back);
+
+            CHECK(read_back[0] == 'a');
+            CHECK(read_back[1] == '\0');
+        }
+    }
+}
+
+
+template <int n_dim>
+struct CreateEmptyVector;
+
+template <>
+struct CreateEmptyVector<1> {
+    using container_type = std::vector<int>;
+
+    static container_type create(const std::vector<size_t>& dims) {
+        return container_type(dims[0], 2);
+    }
+};
+
+template <int n_dim>
+struct CreateEmptyVector {
+    using container_type = std::vector<typename CreateEmptyVector<n_dim - 1>::container_type>;
+
+    static container_type create(const std::vector<size_t>& dims) {
+        auto subdims = std::vector<size_t>(dims.begin() + 1, dims.end());
+        return container_type(dims[0], CreateEmptyVector<n_dim - 1>::create(subdims));
+    }
+};
+
+#ifdef H5_USE_BOOST
+template <int n_dim>
+struct CreateEmptyBoostMultiArray {
+    using container_type = boost::multi_array<int, static_cast<long unsigned>(n_dim)>;
+
+    static container_type create(const std::vector<size_t>& dims) {
+        auto container = container_type(dims);
+
+        auto raw_data = std::vector<int>(compute_total_size(dims));
+        container.assign(raw_data.begin(), raw_data.end());
+
+        return container;
+    }
+};
+#endif
+
+
+#ifdef H5_USE_EIGEN
+struct CreateEmptyEigenVector {
+    using container_type = Eigen::VectorXi;
+
+    static container_type create(const std::vector<size_t>& dims) {
+        return container_type::Constant(int(dims[0]), 2);
+    }
+};
+
+struct CreateEmptyEigenMatrix {
+    using container_type = Eigen::MatrixXi;
+
+    static container_type create(const std::vector<size_t>& dims) {
+        return container_type::Constant(int(dims[0]), int(dims[1]), 2);
+    }
+};
+#endif
+
+template <class Container>
+void check_empty_dimensions(const Container& container, const std::vector<size_t>& expected_dims) {
+    auto deduced_dims = details::inspector<Container>::getDimensions(container);
+
+    REQUIRE(expected_dims.size() == deduced_dims.size());
+
+    // The dims after hitting the first `0` are finicky. We allow those to be deduced as either `1`
+    // or what the original dims said. The `1` allows broadcasting, the "same as original" enables
+    // statically sized objects, which conceptually have dims, even if there's no object.
+    bool allow_one = false;
+    for (size_t i = 0; i < expected_dims.size(); ++i) {
+        REQUIRE(((expected_dims[i] == deduced_dims[i]) || (allow_one && (deduced_dims[i] == 1ul))));
+
+        if (expected_dims[i] == 0) {
+            allow_one = true;
+        }
+    }
+}
+
+template <class CreateContainer>
+void check_empty_dimensions(const std::vector<size_t>& dims) {
+    auto input_data = CreateContainer::create(dims);
+    check_empty_dimensions(input_data, dims);
+}
+
+struct ReadWriteAttribute {
+    template <class Container>
+    static void create(HighFive::File& file, const std::string& name, const Container& container) {
+        file.createAttribute(name, container);
+    }
+
+    static HighFive::Attribute get(HighFive::File& file, const std::string& name) {
+        return file.getAttribute(name);
+    }
+};
+
+struct ReadWriteDataSet {
+    template <class Container>
+    static void create(HighFive::File& file, const std::string& name, const Container& container) {
+        file.createDataSet(name, container);
+    }
+
+    static HighFive::DataSet get(HighFive::File& file, const std::string& name) {
+        return file.getDataSet(name);
+    }
+};
+
+template <class ReadWriteInterface, class CreateContainer>
+void check_empty_read_write_cycle(const std::vector<size_t>& dims) {
+    using container_type = typename CreateContainer::container_type;
+
+    const std::string file_name("h5_empty_attr.h5");
+    const std::string dataset_name("dset");
+    File file(file_name, File::Truncate);
+
+    auto input_data = CreateContainer::create(dims);
+    ReadWriteInterface::create(file, dataset_name, input_data);
+
+    SECTION("read; one-dimensional vector (empty)") {
+        auto output_data = CreateEmptyVector<1>::create({0ul});
+
+        ReadWriteInterface::get(file, dataset_name).read(output_data);
+        check_empty_dimensions(output_data, {0ul});
+    }
+
+    SECTION("read; pre-allocated (empty)") {
+        auto output_data = CreateContainer::create(dims);
+        ReadWriteInterface::get(file, dataset_name).read(output_data);
+
+        check_empty_dimensions(output_data, dims);
+    }
+
+    SECTION("read; pre-allocated (oversized)") {
+        auto oversize_dims = std::vector<size_t>(dims.size(), 2ul);
+        auto output_data = CreateContainer::create(oversize_dims);
+        ReadWriteInterface::get(file, dataset_name).read(output_data);
+
+        check_empty_dimensions(output_data, dims);
+    }
+
+    SECTION("read; auto-allocated") {
+        auto output_data =
+            ReadWriteInterface::get(file, dataset_name).template read<container_type>();
+        check_empty_dimensions(output_data, dims);
+    }
+}
+
+template <class CreateContainer>
+void check_empty_dataset(const std::vector<size_t>& dims) {
+    check_empty_read_write_cycle<ReadWriteDataSet, CreateContainer>(dims);
+}
+
+template <class CreateContainer>
+void check_empty_attribute(const std::vector<size_t>& dims) {
+    check_empty_read_write_cycle<ReadWriteAttribute, CreateContainer>(dims);
+}
+
+template <class CreateContainer>
+void check_empty_everything(const std::vector<size_t>& dims) {
+    SECTION("Empty dimensions") {
+        check_empty_dimensions<CreateContainer>(dims);
+    }
+
+    SECTION("Empty datasets") {
+        check_empty_dataset<CreateContainer>(dims);
+    }
+
+    SECTION("Empty attribute") {
+        check_empty_attribute<CreateContainer>(dims);
+    }
+}
+
+#ifdef H5_USE_EIGEN
+template <int ndim>
+void check_empty_eigen(const std::vector<size_t>&) {}
+
+template <>
+void check_empty_eigen<1>(const std::vector<size_t>& dims) {
+    SECTION("Eigen::Vector") {
+        check_empty_everything<CreateEmptyEigenVector>({dims[0], 1ul});
+    }
+}
+
+template <>
+void check_empty_eigen<2>(const std::vector<size_t>& dims) {
+    SECTION("Eigen::Matrix") {
+        check_empty_everything<CreateEmptyEigenMatrix>(dims);
+    }
+}
+#endif
+
+template <int ndim>
+void check_empty(const std::vector<size_t>& dims) {
+    REQUIRE(dims.size() == ndim);
+
+    SECTION("std::vector") {
+        check_empty_everything<CreateEmptyVector<ndim>>(dims);
+    }
+
+#ifdef H5_USE_BOOST
+    SECTION("boost::multi_array") {
+        check_empty_everything<CreateEmptyBoostMultiArray<ndim>>(dims);
+    }
+#endif
+
+#ifdef H5_USE_EIGEN
+    check_empty_eigen<ndim>(dims);
+#endif
+}
+
+TEST_CASE("Empty arrays") {
+    SECTION("one-dimensional") {
+        check_empty<1>({0ul});
+    }
+
+    SECTION("two-dimensional") {
+        std::vector<std::vector<size_t>> testcases{{0ul, 1ul}, {1ul, 0ul}};
+
+        for (const auto& dims: testcases) {
+            SECTION(details::format_vector(dims)) {
+                check_empty<2>(dims);
+            }
+        }
+    }
+
+    SECTION("three-dimensional") {
+        std::vector<std::vector<size_t>> testcases{{0ul, 1ul, 1ul},
+                                                   {1ul, 1ul, 0ul},
+                                                   {1ul, 0ul, 1ul}};
+
+        for (const auto& dims: testcases) {
+            SECTION(details::format_vector(dims)) {
+                check_empty<3>(dims);
+            }
+        }
+    }
+}
+
+TEST_CASE("HighFiveRecursiveGroups") {
+    const std::string file_name("h5_ds_exist.h5");
+    const std::string group_1("group1");
+    const std::string group_2("group2");
+    const std::string ds_path = group_1 + "/" + group_2;
+    const std::string ds_name = "ds";
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    CHECK(file.getName() == file_name);
+
+    // Without parents creating both groups will fail
+    {
+        SilenceHDF5 silencer;
+        CHECK_THROWS_AS(file.createGroup(ds_path, false), std::exception);
+    }
+    Group g2 = file.createGroup(ds_path);
+
+    std::vector<double> some_data{5.0, 6.0, 7.0};
+    g2.createDataSet(ds_name, some_data);
+
+    CHECK(file.exist(group_1));
+
+    Group g1 = file.getGroup(group_1);
+    CHECK(g1.exist(group_2));
+
+    // checks with full path
+    CHECK(file.exist(ds_path));
+    CHECK(file.exist(ds_path + "/" + ds_name));
+
+    // Check with wrong middle path (before would raise Exception)
+    CHECK(!file.exist(std::string("blabla/group2")));
+
+    // Using root slash
+    CHECK(file.exist(std::string("/") + ds_path));
+
+    // Check unlink with existing group
+    CHECK(g1.exist(group_2));
+    g1.unlink(group_2);
+    CHECK(!g1.exist(group_2));
+
+    // Check unlink with non-existing group
+    {
+        SilenceHDF5 silencer;
+        CHECK_THROWS_AS(g1.unlink("x"), HighFive::GroupException);
+    }
+}
+
+TEST_CASE("HighFiveInspect") {
+    const std::string file_name("group_info.h5");
+    const std::string group_1("group1");
+    const std::string ds_name = "ds";
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+    Group g = file.createGroup(group_1);
+
+    std::vector<double> some_data{5.0, 6.0, 7.0};
+    g.createDataSet(ds_name, some_data);
+
+    CHECK(file.getLinkType(group_1) == LinkType::Hard);
+
+    {
+        SilenceHDF5 silencer;
+        CHECK_THROWS_AS(file.getLinkType("x"), HighFive::GroupException);
+    }
+
+    CHECK(file.getObjectType(group_1) == ObjectType::Group);
+    CHECK(file.getObjectType(group_1 + "/" + ds_name) == ObjectType::Dataset);
+    CHECK(g.getObjectType(ds_name) == ObjectType::Dataset);
+
+    {
+        SilenceHDF5 silencer;
+        CHECK_THROWS_AS(file.getObjectType(ds_name), HighFive::GroupException);
+    }
+
+    // Data type
+    auto ds = g.getDataSet(ds_name);
+    auto dt = ds.getDataType();
+    CHECK(dt.getClass() == DataTypeClass::Float);
+    CHECK(dt.getSize() == 8);
+    CHECK(dt.string() == "Float64");
+
+    // meta
+    CHECK(ds.getType() == ObjectType::Dataset);  // internal
+    CHECK(ds.getInfo().getRefCount() == 1);
+}
+
+TEST_CASE("HighFiveGetPath") {
+    File file("getpath.h5", File::ReadWrite | File::Create | File::Truncate);
+
+    int number = 100;
+    Group group = file.createGroup("group");
+    DataSet dataset = group.createDataSet("data", DataSpace(1), AtomicType<int>());
+    dataset.write(number);
+    std::string string_list("Very important DataSet!");
+    Attribute attribute = dataset.createAttribute<std::string>("attribute",
+                                                               DataSpace::From(string_list));
+    attribute.write(string_list);
+
+    CHECK("/" == file.getPath());
+    CHECK("/group" == group.getPath());
+    CHECK("/group/data" == dataset.getPath());
+    CHECK("attribute" == attribute.getName());
+    CHECK("/group/data" == attribute.getPath());
+
+    CHECK(file == dataset.getFile());
+    CHECK(file == attribute.getFile());
+
+    // Destroy file early (it should live inside Dataset/Group)
+    std::unique_ptr<File> f2(new File("getpath.h5"));
+    const auto& d2 = f2->getDataSet("/group/data");
+    f2.reset(nullptr);
+    CHECK(d2.getFile().getPath() == "/");
+}
+
+TEST_CASE("HighFiveSoftLinks") {
+    const std::string file_name("softlinks.h5");
+    const std::string ds_path("/hard_link/dataset");
+    const std::string link_path("/soft_link/to_ds");
+    const std::vector<int> data{11, 22, 33};
+
+    {
+        File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+        auto dset = file.createDataSet(ds_path, data);
+        file.createSoftLink(link_path, dset);
+    }
+
+    {
+        File file(file_name, File::ReadWrite);
+        std::vector<int> data_out;
+        file.getDataSet(link_path).read(data_out);
+        CHECK(data == data_out);
+    }
+
+    {
+        const std::string EXTERNAL_LINK_PATH("/external_link/to_ds");
+        File file2("link_external_to.h5", File::ReadWrite | File::Create | File::Truncate);
+        file2.createExternalLink(EXTERNAL_LINK_PATH, file_name, ds_path);
+
+        std::vector<int> data_out;
+        file2.getDataSet(EXTERNAL_LINK_PATH).read(data_out);
+        CHECK(data == data_out);
+    }
+}
+
+TEST_CASE("HighFiveHardLinks Dataset (create intermediate)") {
+    const std::string file_name("hardlinks_dataset_intermiate.h5");
+    const std::string ds_path("/group/dataset");
+    const std::string ds_link_path("/alternate/dataset");
+    const std::vector<int> data{12, 24, 36};
+
+    {
+        File file(file_name, File::Truncate);
+        auto dset = file.createDataSet(ds_path, data);
+        file.createHardLink(ds_link_path, dset);
+        file.unlink(ds_path);
+    }
+
+    {
+        File file(file_name, File::ReadWrite);
+        auto data_out = file.getDataSet(ds_link_path).read<std::vector<int>>();
+        CHECK(data == data_out);
+    }
+}
+
+TEST_CASE("HighFiveHardLinks Dataset (relative paths)") {
+    const std::string file_name("hardlinks_dataset_relative.h5");
+    const std::string ds_path("/group/dataset");
+    const std::string ds_link_path("/alternate/dataset");
+    const std::vector<int> data{12, 24, 36};
+
+    {
+        File file(file_name, File::Truncate);
+        auto dset = file.createDataSet(ds_path, data);
+
+        auto alternate = file.createGroup("/alternate");
+        alternate.createHardLink("dataset", dset);
+        file.unlink(ds_path);
+    }
+
+    {
+        File file(file_name, File::ReadWrite);
+        auto data_out = file.getDataSet(ds_link_path).read<std::vector<int>>();
+        CHECK(data == data_out);
+    }
+}
+
+TEST_CASE("HighFiveHardLinks Group") {
+    const std::string file_name("hardlinks_group.h5");
+    const std::string group_path("/group");
+    const std::string ds_name("dataset");
+    const std::string group_link_path("/alternate");
+    const std::vector<int> data{12, 24, 36};
+
+    {
+        File file(file_name, File::Truncate);
+        auto dset = file.createDataSet(group_path + "/" + ds_name, data);
+        auto group = file.getGroup(group_path);
+        file.createHardLink(group_link_path, group);
+        file.unlink(group_path);
+    }
+
+    {
+        File file(file_name, File::ReadWrite);
+        auto data_out = file.getDataSet(group_link_path + "/" + ds_name).read<std::vector<int>>();
+        CHECK(data == data_out);
+    }
+}
+
+TEST_CASE("HighFiveRename") {
+    File file("h5_rename.h5", File::ReadWrite | File::Create | File::Truncate);
+
+    int number = 100;
+
+    {
+        Group group = file.createGroup("group");
+        DataSet dataset = group.createDataSet("data", DataSpace(1), AtomicType<int>());
+        dataset.write(number);
+        std::string path = dataset.getPath();
+        CHECK("/group/data" == path);
+    }
+
+    file.rename("/group/data", "/new/group/new/data");
+
+    {
+        DataSet dataset = file.getDataSet("/new/group/new/data");
+        std::string path = dataset.getPath();
+        CHECK("/new/group/new/data" == path);
+        int read;
+        dataset.read(read);
+        CHECK(number == read);
+    }
+}
+
+TEST_CASE("HighFiveRenameRelative") {
+    File file("h5_rename_relative.h5", File::ReadWrite | File::Create | File::Truncate);
+    Group group = file.createGroup("group");
+
+    int number = 100;
+
+    {
+        DataSet dataset = group.createDataSet("data", DataSpace(1), AtomicType<int>());
+        dataset.write(number);
+        CHECK("/group/data" == dataset.getPath());
+    }
+
+    group.rename("data", "new_data");
+
+    {
+        DataSet dataset = group.getDataSet("new_data");
+        CHECK("/group/new_data" == dataset.getPath());
+        int read;
+        dataset.read(read);
+        CHECK(number == read);
+    }
+}
+
+TEST_CASE("HighFivePropertyObjects") {
+    const auto& plist1 = FileCreateProps::Default();  // get const-ref, otherwise copies
+    CHECK(plist1.getId() == H5P_DEFAULT);
+    CHECK(!plist1.isValid());  // not valid -> no inc_ref
+    auto plist2 = plist1;      // copy  (from Object)
+    CHECK(plist2.getId() == H5P_DEFAULT);
+
+    // Underlying object is same (singleton holder of H5P_DEFAULT)
+    const auto& other_plist_type = LinkCreateProps::Default();
+    CHECK((void*) &plist1 == (void*) &other_plist_type);
+
+    LinkCreateProps plist_g;
+    CHECK(plist_g.getId() == H5P_DEFAULT);
+    CHECK(!plist_g.isValid());
+
+    plist_g.add(CreateIntermediateGroup());
+    CHECK(plist_g.isValid());
+    auto plist_g2 = plist_g;
+    CHECK(plist_g2.isValid());
+}
+
+TEST_CASE("HighFiveLinkCreationOrderProperty") {
+    {  // For file
+        const std::string file_name("h5_keep_creation_order_file.h5");
+        FileCreateProps keepCreationOrder{};
+        keepCreationOrder.add(LinkCreationOrder(CreationOrder::Tracked | CreationOrder::Indexed));
+
+        File file(file_name, File::ReadWrite | File::Create | File::Truncate, keepCreationOrder);
+        file.createGroup("1");
+        file.createGroup("2");
+        file.createGroup("10");
+
+        CHECK(file.listObjectNames(IndexType::CRT_ORDER) ==
+              std::vector<std::string>{"1", "2", "10"});
+        CHECK(file.listObjectNames(IndexType::NAME) == std::vector<std::string>{"1", "10", "2"});
+
+        auto fcpl = file.getCreatePropertyList();
+        LinkCreationOrder linkCreationOrder(fcpl);
+        CHECK((linkCreationOrder.getFlags() & CreationOrder::Tracked) != 0);
+        CHECK((linkCreationOrder.getFlags() & CreationOrder::Indexed) != 0);
+    }
+    {  // For groups
+        const std::string file_name("h5_keep_creation_order_group.h5");
+        GroupCreateProps keepCreationOrder{};
+        keepCreationOrder.add(LinkCreationOrder(CreationOrder::Tracked | CreationOrder::Indexed));
+
+        File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+        auto group = file.createGroup("group_crt", keepCreationOrder);
+        group.createGroup("1");
+        group.createGroup("2");
+        group.createGroup("10");
+
+        CHECK(group.listObjectNames(IndexType::CRT_ORDER) ==
+              std::vector<std::string>{"1", "2", "10"});
+        CHECK(group.listObjectNames(IndexType::NAME) == std::vector<std::string>{"1", "10", "2"});
+
+        auto group2 = file.createGroup("group_name");
+        group2.createGroup("1");
+        group2.createGroup("2");
+        group2.createGroup("10");
+
+        CHECK(group2.listObjectNames() == std::vector<std::string>{"1", "10", "2"});
+
+        {
+            auto gcpl = group.getCreatePropertyList();
+            LinkCreationOrder linkCreationOrder(gcpl);
+            CHECK((linkCreationOrder.getFlags() & CreationOrder::Tracked) != 0);
+            CHECK((linkCreationOrder.getFlags() & CreationOrder::Indexed) != 0);
+        }
+        {
+            auto gcpl = group2.getCreatePropertyList();
+            LinkCreationOrder linkCreationOrder(gcpl);
+            CHECK((linkCreationOrder.getFlags() & CreationOrder::Tracked) == 0);
+            CHECK((linkCreationOrder.getFlags() & CreationOrder::Indexed) == 0);
+        }
+    }
+}
+
+struct CSL1 {
+    int m1;
+    int m2;
+    int m3;
+};
+
+struct CSL2 {
+    CSL1 csl1;
+};
+
+CompoundType create_compound_csl1() {
+    auto t2 = AtomicType<int>();
+    CompoundType t1({{"m1", AtomicType<int>{}}, {"m2", AtomicType<int>{}}, {"m3", t2}});
+
+    return t1;
+}
+
+CompoundType create_compound_csl2() {
+    CompoundType t1 = create_compound_csl1();
+
+    CompoundType t2({{"csl1", t1}});
+
+    return t2;
+}
+
+HIGHFIVE_REGISTER_TYPE(CSL1, create_compound_csl1)
+HIGHFIVE_REGISTER_TYPE(CSL2, create_compound_csl2)
+
+TEST_CASE("HighFiveCompounds") {
+    const std::string file_name("compounds_test.h5");
+    const std::string dataset_name1("/a");
+    const std::string dataset_name2("/b");
+
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    auto t3 = AtomicType<int>();
+    CompoundType t1 = create_compound_csl1();
+    t1.commit(file, "my_type");
+
+    CompoundType t2 = create_compound_csl2();
+    t2.commit(file, "my_type2");
+
+    {  // Not nested
+        auto dataset = file.createDataSet(dataset_name1, DataSpace(2), t1);
+
+        std::vector<CSL1> csl = {{1, 1, 1}, {2, 3, 4}};
+        dataset.write(csl);
+
+        file.flush();
+
+        std::vector<CSL1> result;
+        dataset.select({0}, {2}).read(result);
+
+        CHECK(result.size() == 2);
+        CHECK(result[0].m1 == 1);
+        CHECK(result[0].m2 == 1);
+        CHECK(result[0].m3 == 1);
+        CHECK(result[1].m1 == 2);
+        CHECK(result[1].m2 == 3);
+        CHECK(result[1].m3 == 4);
+    }
+
+    {  // Nested
+        auto dataset = file.createDataSet(dataset_name2, DataSpace(2), t2);
+
+        std::vector<CSL2> csl = {{{1, 1, 1}, {2, 3, 4}}};
+        dataset.write(csl);
+
+        file.flush();
+        std::vector<CSL2> result = {{{1, 1, 1}, {2, 3, 4}}};
+        dataset.select({0}, {2}).read(result);
+
+        CHECK(result.size() == 2);
+        CHECK(result[0].csl1.m1 == 1);
+        CHECK(result[0].csl1.m2 == 1);
+        CHECK(result[0].csl1.m3 == 1);
+        CHECK(result[1].csl1.m1 == 2);
+        CHECK(result[1].csl1.m2 == 3);
+        CHECK(result[1].csl1.m3 == 4);
+    }
+
+    // Test the constructor from hid
+    CompoundType t1_from_hid(t1);
+    CHECK(t1 == t1_from_hid);
+
+    CompoundType t2_from_hid(t2);
+    CHECK(t2 == t2_from_hid);
+
+    // Back from a DataType
+    CHECK_NOTHROW(CompoundType(DataType(t1_from_hid)));
+    CHECK_THROWS(CompoundType(AtomicType<uint32_t>{}));
+}
+
+struct GrandChild {
+    uint32_t gcm1;
+    uint32_t gcm2;
+    uint32_t gcm3;
+};
+
+struct Child {
+    GrandChild grandChild;
+    uint32_t cm1;
+};
+
+struct Parent {
+    uint32_t pm1;
+    Child child;
+};
+
+CompoundType create_compound_GrandChild() {
+    auto t2 = AtomicType<uint32_t>();
+    CompoundType t1({{"gcm1", AtomicType<uint32_t>{}},
+                     {"gcm2", AtomicType<uint32_t>{}},
+                     {
+                         "gcm3",
+                         t2,
+                     }});
+    return t1;
+}
+
+CompoundType create_compound_Child() {
+    auto nestedType = create_compound_GrandChild();
+    return CompoundType{{{
+                             "grandChild",
+                             nestedType,
+                         },
+                         {"cm1", AtomicType<uint32_t>{}}}};
+}
+
+CompoundType create_compound_Parent() {
+    auto nestedType = create_compound_Child();
+    return CompoundType{{{"pm1", AtomicType<uint32_t>{}},
+                         {
+                             "child",
+                             nestedType,
+                         }}};
+}
+
+HIGHFIVE_REGISTER_TYPE(GrandChild, create_compound_GrandChild)
+HIGHFIVE_REGISTER_TYPE(Child, create_compound_Child)
+HIGHFIVE_REGISTER_TYPE(Parent, create_compound_Parent)
+
+TEST_CASE("HighFiveCompoundsNested") {
+    const std::string file_name("nested_compounds_test.h5");
+    const std::string dataset_name("/a");
+
+    {  // Write
+        File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+        auto type = create_compound_Parent();
+
+        auto dataset = file.createDataSet(dataset_name, DataSpace(2), type);
+        CHECK(dataset.getDataType().getSize() == 20);
+
+        std::vector<Parent> csl = {Parent{1, Child{GrandChild{1, 1, 1}, 1}},
+                                   Parent{2, Child{GrandChild{3, 4, 5}, 6}}};
+        dataset.write(csl);
+    }
+
+    {  // Read
+        File file(file_name, File::ReadOnly);
+        std::vector<Parent> result;
+        auto dataset = file.getDataSet(dataset_name);
+        CHECK(dataset.getDataType().getSize() == 20);
+        dataset.select({0}, {2}).read(result);
+
+        CHECK(result.size() == 2);
+        CHECK(result[0].pm1 == 1);
+        CHECK(result[0].child.grandChild.gcm1 == 1);
+        CHECK(result[0].child.grandChild.gcm2 == 1);
+        CHECK(result[0].child.grandChild.gcm3 == 1);
+        CHECK(result[0].child.cm1 == 1);
+        CHECK(result[1].pm1 == 2);
+        CHECK(result[1].child.grandChild.gcm1 == 3);
+        CHECK(result[1].child.grandChild.gcm2 == 4);
+        CHECK(result[1].child.grandChild.gcm3 == 5);
+        CHECK(result[1].child.cm1 == 6);
+    }
+}
+
+template <size_t N>
+struct Record {
+    double d = 3.14;
+    int i = 42;
+    char s[N];
+};
+
+template <size_t N>
+void fill(Record<N>& r) {
+    constexpr char ref[] = "123456789a123456789b123456789c123456789d123456789e123456789f";
+    std::copy(ref, ref + N - 1, r.s);
+    r.s[N - 1] = '\0';
+}
+
+template <size_t N>
+CompoundType rec_t() {
+    using RecN = Record<N>;
+    return {{"d", create_datatype<decltype(RecN::d)>()},
+            {"i", create_datatype<decltype(RecN::i)>()},
+            {"s", create_datatype<decltype(RecN::s)>()}};
+}
+
+HIGHFIVE_REGISTER_TYPE(Record<4>, rec_t<4>)
+HIGHFIVE_REGISTER_TYPE(Record<8>, rec_t<8>)
+HIGHFIVE_REGISTER_TYPE(Record<9>, rec_t<9>)
+
+template <size_t N>
+void save(File& f) {
+    const size_t numRec = 2;
+    std::vector<Record<N>> recs(numRec);
+    fill<N>(recs[0]);
+    fill<N>(recs[1]);
+    auto dataset = f.createDataSet<Record<N>>("records" + std::to_string(N), DataSpace::From(recs));
+    dataset.write(recs);
+}
+
+template <size_t N>
+std::string check(File& f) {
+    const size_t numRec = 2;
+    std::vector<Record<N>> recs(numRec);
+    f.getDataSet("records" + std::to_string(N)).read(recs);
+    return std::string(recs[0].s);
+}
+
+TEST_CASE("HighFiveCompoundsSeveralPadding") {
+    const std::string file_name("padded_compounds_test.h5");
+
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+    {  // Write
+        // 4 have been choose because no padding
+        // /* offset      |    size */  type = struct Record<4> {
+        // /*      0      |       8 */    double d;
+        // /*      8      |       4 */    int i;
+        // /*     12      |       4 */    char s[4];
+        // total size (bytes):   16
+        CHECK_NOTHROW(save<4>(file));
+        // 8 have been choose because there is a padding
+        // /* offset      |    size */  type = struct Record<8> {
+        // /*      0      |       8 */    double d;
+        // /*      8      |       4 */    int i;
+        // /*     12      |       8 */    char s[8];
+        // /* XXX  4-byte padding   */
+        // total size (bytes):   24
+        CHECK_NOTHROW(save<8>(file));
+        // 9 have been choose because there should not be a padding on 9
+        // /* offset      |    size */  type = struct Record<9> {
+        // /*      0      |       8 */    double d;
+        // /*      8      |       4 */    int i;
+        // /*     12      |       9 */    char s[9];
+        // /* XXX  3-byte padding   */
+        // total size (bytes):   24
+        CHECK_NOTHROW(save<9>(file));
+    }
+
+    {  // Read
+        CHECK(check<4>(file) == std::string("123"));
+        CHECK(check<8>(file) == std::string("1234567"));
+        CHECK(check<9>(file) == std::string("12345678"));
+    }
+}
+
+enum Position {
+    highfive_first = 1,
+    highfive_second = 2,
+    highfive_third = 3,
+    highfive_last = -1,
+};
+
+enum class Direction : signed char {
+    Forward = 1,
+    Backward = -1,
+    Left = -2,
+    Right = 2,
+};
+
+// This is only for boost test
+std::ostream& operator<<(std::ostream& ost, const Direction& dir) {
+    ost << static_cast<int>(dir);
+    return ost;
+}
+
+EnumType<Position> create_enum_position() {
+    return {{"highfive_first", Position::highfive_first},
+            {"highfive_second", Position::highfive_second},
+            {"highfive_third", Position::highfive_third},
+            {"highfive_last", Position::highfive_last}};
+}
+HIGHFIVE_REGISTER_TYPE(Position, create_enum_position)
+
+EnumType<Direction> create_enum_direction() {
+    return {{"Forward", Direction::Forward},
+            {"Backward", Direction::Backward},
+            {"Left", Direction::Left},
+            {"Right", Direction::Right}};
+}
+HIGHFIVE_REGISTER_TYPE(Direction, create_enum_direction)
+
+TEST_CASE("HighFiveEnum") {
+    const std::string file_name("enum_test.h5");
+    const std::string dataset_name1("/a");
+    const std::string dataset_name2("/b");
+
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    {  // Unscoped enum
+        auto e1 = create_enum_position();
+        e1.commit(file, "Position");
+
+        auto dataset = file.createDataSet(dataset_name1, DataSpace(1), e1);
+        dataset.write(Position::highfive_first);
+
+        file.flush();
+
+        Position result;
+        dataset.select(ElementSet({0})).read(result);
+
+        CHECK(result == Position::highfive_first);
+    }
+
+    {  // Scoped enum
+        auto e1 = create_enum_direction();
+        e1.commit(file, "Direction");
+
+        auto dataset = file.createDataSet(dataset_name2, DataSpace(5), e1);
+        std::vector<Direction> robot_moves({Direction::Backward,
+                                            Direction::Forward,
+                                            Direction::Forward,
+                                            Direction::Left,
+                                            Direction::Left});
+        dataset.write(robot_moves);
+
+        file.flush();
+
+        std::vector<Direction> result;
+        dataset.read(result);
+
+        CHECK(result[0] == Direction::Backward);
+        CHECK(result[1] == Direction::Forward);
+        CHECK(result[2] == Direction::Forward);
+        CHECK(result[3] == Direction::Left);
+        CHECK(result[4] == Direction::Left);
+    }
+}
+
+TEST_CASE("HighFiveReadType") {
+    const std::string file_name("readtype_test.h5");
+    const std::string datatype_name1("my_type");
+    const std::string datatype_name2("position");
+
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+    CompoundType t1 = create_compound_csl1();
+    t1.commit(file, datatype_name1);
+
+    CompoundType t2 = file.getDataType(datatype_name1);
+
+    auto t3 = create_enum_position();
+    t3.commit(file, datatype_name2);
+
+    DataType t4 = file.getDataType(datatype_name2);
+
+    CHECK(t2 == t1);
+    CHECK(t4 == t3);
+}
+
+class ForwardToAttribute {
+  public:
+    ForwardToAttribute(const HighFive::File& file)
+        : _file(file) {}
+
+    template <class T>
+    HighFive::Attribute create(const std::string& name, const T& value) {
+        return _file.createAttribute(name, value);
+    }
+
+    HighFive::Attribute create(const std::string& name,
+                               const HighFive::DataSpace filespace,
+                               const HighFive::DataType& datatype) {
+        return _file.createAttribute(name, filespace, datatype);
+    }
+
+    HighFive::Attribute get(const std::string& name) {
+        return _file.getAttribute(name);
+    }
+
+  private:
+    HighFive::File _file;
+};
+
+class ForwardToDataSet {
+  public:
+    ForwardToDataSet(const HighFive::File& file)
+        : _file(file) {}
+
+    template <class T>
+    HighFive::DataSet create(const std::string& name, const T& value) {
+        return _file.createDataSet(name, value);
+    }
+
+    HighFive::DataSet create(const std::string& name,
+                             const HighFive::DataSpace filespace,
+                             const HighFive::DataType& datatype) {
+        return _file.createDataSet(name, filespace, datatype);
+    }
+
+    HighFive::DataSet get(const std::string& name) {
+        return _file.getDataSet(name);
+    }
+
+  private:
+    HighFive::File _file;
+};
+
+template <class Proxy>
+void check_single_string(Proxy proxy, size_t string_length) {
+    auto value = std::string(string_length, 'o');
+    auto dataspace = DataSpace::From(value);
+
+    auto n_chars = value.size() + 1;
+    auto n_chars_overlength = n_chars + 10;
+    auto fixed_length = FixedLengthStringType(n_chars, StringPadding::NullTerminated);
+    auto overlength_nullterm = FixedLengthStringType(n_chars_overlength,
+                                                     StringPadding::NullTerminated);
+    auto overlength_nullpad = FixedLengthStringType(n_chars_overlength, StringPadding::NullPadded);
+    auto overlength_spacepad = FixedLengthStringType(n_chars_overlength,
+                                                     StringPadding::SpacePadded);
+    auto variable_length = VariableLengthStringType();
+
+    SECTION("automatic") {
+        proxy.create("auto", value);
+        REQUIRE(proxy.get("auto").template read<std::string>() == value);
+    }
+
+    SECTION("fixed length") {
+        proxy.create("fixed", dataspace, fixed_length).write(value);
+        REQUIRE(proxy.get("fixed").template read<std::string>() == value);
+    }
+
+    SECTION("overlength null-terminated") {
+        proxy.create("overlength_nullterm", dataspace, overlength_nullterm).write(value);
+        REQUIRE(proxy.get("overlength_nullterm").template read<std::string>() == value);
+    }
+
+    SECTION("overlength null-padded") {
+        proxy.create("overlength_nullpad", dataspace, overlength_nullpad).write(value);
+        auto expected = std::string(n_chars_overlength, '\0');
+        expected.replace(0, value.size(), value.data());
+        REQUIRE(proxy.get("overlength_nullpad").template read<std::string>() == expected);
+    }
+
+    SECTION("overlength space-padded") {
+        proxy.create("overlength_spacepad", dataspace, overlength_spacepad).write(value);
+        auto expected = std::string(n_chars_overlength, ' ');
+        expected.replace(0, value.size(), value.data());
+        REQUIRE(proxy.get("overlength_spacepad").template read<std::string>() == expected);
+    }
+
+    SECTION("variable length") {
+        proxy.create("variable", dataspace, variable_length).write(value);
+        REQUIRE(proxy.get("variable").template read<std::string>() == value);
+    }
+}
+
+template <class Proxy>
+void check_multiple_string(Proxy proxy, size_t string_length) {
+    using value_t = std::vector<std::string>;
+    auto value = value_t{std::string(string_length, 'o'), std::string(string_length, 'x')};
+
+    auto dataspace = DataSpace::From(value);
+
+    auto string_overlength = string_length + 10;
+    auto onpoint_nullpad = FixedLengthStringType(string_length, StringPadding::NullPadded);
+    auto onpoint_spacepad = FixedLengthStringType(string_length, StringPadding::SpacePadded);
+
+    auto overlength_nullterm = FixedLengthStringType(string_overlength,
+                                                     StringPadding::NullTerminated);
+    auto overlength_nullpad = FixedLengthStringType(string_overlength, StringPadding::NullPadded);
+    auto overlength_spacepad = FixedLengthStringType(string_overlength, StringPadding::SpacePadded);
+    auto variable_length = VariableLengthStringType();
+
+    auto check = [](const value_t actual, const value_t& expected) {
+        REQUIRE(actual.size() == expected.size());
+        for (size_t i = 0; i < actual.size(); ++i) {
+            REQUIRE(actual[i] == expected[i]);
+        }
+    };
+
+    SECTION("automatic") {
+        proxy.create("auto", value);
+        check(proxy.get("auto").template read<value_t>(), value);
+    }
+
+    SECTION("variable length") {
+        proxy.create("variable", dataspace, variable_length).write(value);
+        check(proxy.get("variable").template read<value_t>(), value);
+    }
+
+    auto make_padded_reference = [&](char pad, size_t n) {
+        auto expected = std::vector<std::string>(value.size(), std::string(n, pad));
+        for (size_t i = 0; i < value.size(); ++i) {
+            expected[i].replace(0, value[i].size(), value[i].data());
+        }
+
+        return expected;
+    };
+
+    auto check_fixed_length = [&](const std::string& label, size_t length) {
+        SECTION(label + " null-terminated") {
+            auto datatype = FixedLengthStringType(length + 1, StringPadding::NullTerminated);
+            proxy.create(label + "_nullterm", dataspace, datatype).write(value);
+            check(proxy.get(label + "_nullterm").template read<value_t>(), value);
+        }
+
+        SECTION(label + " null-padded") {
+            auto datatype = FixedLengthStringType(length, StringPadding::NullPadded);
+            proxy.create(label + "_nullpad", dataspace, datatype).write(value);
+            auto expected = make_padded_reference('\0', length);
+            check(proxy.get(label + "_nullpad").template read<value_t>(), expected);
+        }
+
+        SECTION(label + " space-padded") {
+            auto datatype = FixedLengthStringType(length, StringPadding::SpacePadded);
+            proxy.create(label + "_spacepad", dataspace, datatype).write(value);
+            auto expected = make_padded_reference(' ', length);
+            check(proxy.get(label + "_spacepad").template read<value_t>(), expected);
+        }
+    };
+
+    check_fixed_length("onpoint", string_length);
+    check_fixed_length("overlength", string_length + 5);
+
+
+    SECTION("underlength null-terminated") {
+        auto datatype = FixedLengthStringType(string_length, StringPadding::NullTerminated);
+        REQUIRE_THROWS(proxy.create("underlength_nullterm", dataspace, datatype).write(value));
+    }
+
+    SECTION("underlength nullpad") {
+        auto datatype = FixedLengthStringType(string_length - 1, StringPadding::NullPadded);
+        REQUIRE_THROWS(proxy.create("underlength_nullpad", dataspace, datatype).write(value));
+    }
+
+    SECTION("underlength spacepad") {
+        auto datatype = FixedLengthStringType(string_length - 1, StringPadding::NullTerminated);
+        REQUIRE_THROWS(proxy.create("underlength_spacepad", dataspace, datatype).write(value));
+    }
+}
+
+TEST_CASE("HighFiveSTDString (dataset, single, short)") {
+    File file("std_string_dataset_single_short.h5", File::Truncate);
+    check_single_string(ForwardToDataSet(file), 3);
+}
+
+TEST_CASE("HighFiveSTDString (attribute, single, short)") {
+    File file("std_string_attribute_single_short.h5", File::Truncate);
+    check_single_string(ForwardToAttribute(file), 3);
+}
+
+TEST_CASE("HighFiveSTDString (dataset, single, long)") {
+    File file("std_string_dataset_single_long.h5", File::Truncate);
+    check_single_string(ForwardToDataSet(file), 256);
+}
+
+TEST_CASE("HighFiveSTDString (attribute, single, long)") {
+    File file("std_string_attribute_single_long.h5", File::Truncate);
+    check_single_string(ForwardToAttribute(file), 256);
+}
+
+TEST_CASE("HighFiveSTDString (dataset, multiple, short)") {
+    File file("std_string_dataset_multiple_short.h5", File::Truncate);
+    check_multiple_string(ForwardToDataSet(file), 3);
+}
+
+TEST_CASE("HighFiveSTDString (attribute, multiple, short)") {
+    File file("std_string_attribute_multiple_short.h5", File::Truncate);
+    check_multiple_string(ForwardToAttribute(file), 3);
+}
+
+TEST_CASE("HighFiveSTDString (dataset, multiple, long)") {
+    File file("std_string_dataset_multiple_short.h5", File::Truncate);
+    check_multiple_string(ForwardToDataSet(file), 256);
+}
+
+TEST_CASE("HighFiveSTDString (attribute, multiple, long)") {
+    File file("std_string_attribute_multiple_short.h5", File::Truncate);
+    check_multiple_string(ForwardToAttribute(file), 256);
+}
+
+TEST_CASE("HighFiveFixedString") {
+    const std::string file_name("array_atomic_types.h5");
+    const std::string group_1("group1");
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+    char raw_strings[][10] = {"abcd", "1234"};
+
+    /// This will not compile - only char arrays - hits static_assert with a nice
+    /// error
+    // file.createDataSet<int[10]>(ds_name, DataSpace(2)));
+
+    {  // But char should be fine
+        auto ds = file.createDataSet<char[10]>("ds1", DataSpace(2));
+        CHECK(ds.getDataType().getClass() == DataTypeClass::String);
+        ds.write(raw_strings);
+    }
+
+    {  // char[] is, by default, int8
+        auto ds2 = file.createDataSet("ds2", raw_strings);
+        CHECK(ds2.getDataType().getClass() == DataTypeClass::Integer);
+    }
+
+    {  // String Truncate happens low-level if well setup
+        auto ds3 = file.createDataSet<char[6]>("ds3", DataSpace::FromCharArrayStrings(raw_strings));
+        ds3.write(raw_strings);
+    }
+
+    {  // Write as raw elements from pointer (with const)
+        const char(*strings_fixed)[10] = raw_strings;
+        // With a pointer we dont know how many strings -> manual DataSpace
+        file.createDataSet<char[10]>("ds4", DataSpace(2)).write(strings_fixed);
+    }
+
+
+    {  // Cant convert flex-length to fixed-length
+        const char* buffer[] = {"abcd", "1234"};
+        SilenceHDF5 silencer;
+        CHECK_THROWS_AS(file.createDataSet<char[10]>("ds5", DataSpace(2)).write(buffer),
+                        HighFive::DataSetException);
+    }
+
+    {  // scalar char strings
+        const char buffer[] = "abcd";
+        file.createDataSet<char[10]>("ds6", DataSpace(1)).write(buffer);
+    }
+
+    {  // Dedicated FixedLenStringArray
+        FixedLenStringArray<10> arr{"0000000", "1111111"};
+
+        // More API: test inserting something
+        arr.push_back("2222");
+        auto ds = file.createDataSet("ds7", arr);  // Short syntax ok
+
+        // Recover truncating
+        FixedLenStringArray<4> array_back;
+        ds.read(array_back);
+        CHECK(array_back.size() == 3);
+        CHECK(array_back[0] == std::string("000"));
+        CHECK(array_back[1] == std::string("111"));
+        CHECK(array_back[2] == std::string("222"));
+        CHECK(array_back.getString(1) == "111");
+        CHECK(array_back.front() == std::string("000"));
+        CHECK(array_back.back() == std::string("222"));
+        CHECK(array_back.data() == std::string("000"));
+        array_back.data()[0] = 'x';
+        CHECK(array_back.data() == std::string("x00"));
+
+        for (auto& raw_elem: array_back) {
+            raw_elem[1] = 'y';
+        }
+        CHECK(array_back.getString(1) == "1y1");
+        for (auto iter = array_back.cbegin(); iter != array_back.cend(); ++iter) {
+            CHECK((*iter)[1] == 'y');
+        }
+    }
+
+    {
+        // Direct way of writing `std::string` as a fixed length
+        // HDF5 string.
+
+        std::string value = "foo";
+        auto n_chars = value.size() + 1;
+
+        auto datatype = FixedLengthStringType(n_chars, StringPadding::NullTerminated);
+        auto dataspace = DataSpace(1);
+
+        auto ds = file.createDataSet("ds8", dataspace, datatype);
+        ds.write_raw(value.data(), datatype);
+
+        {
+            // Due to missing non-const overload of `data()` until C++17 we'll
+            // read into something else instead (don't forget the '\0').
+            auto expected = std::vector<char>(n_chars, '!');
+            ds.read(expected.data(), datatype);
+
+            CHECK(expected.size() == value.size() + 1);
+            for (size_t i = 0; i < value.size(); ++i) {
+                REQUIRE(expected[i] == value[i]);
+            }
+        }
+
+#if HIGHFIVE_CXX_STD >= 17
+        {
+            auto expected = std::string(value.size(), '-');
+            ds.read(expected.data(), datatype);
+
+            REQUIRE(expected == value);
+        }
+#endif
+    }
+
+    {
+        size_t n_chars = 4;
+        size_t n_strings = 2;
+
+        std::vector<char> value(n_chars * n_strings, '!');
+
+        auto datatype = FixedLengthStringType(n_chars, StringPadding::NullTerminated);
+        auto dataspace = DataSpace(n_strings);
+
+        auto ds = file.createDataSet("ds9", dataspace, datatype);
+        ds.write_raw(value.data(), datatype);
+
+        auto expected = std::vector<char>(value.size(), '-');
+        ds.read(expected.data(), datatype);
+
+        CHECK(expected.size() == value.size());
+        for (size_t i = 0; i < value.size(); ++i) {
+            REQUIRE(expected[i] == value[i]);
+        }
+    }
+}
+
+template <size_t N>
+static void check_fixed_len_string_array_contents(const FixedLenStringArray<N>& array,
+                                                  const std::vector<std::string>& expected) {
+    REQUIRE(array.size() == expected.size());
+
+    for (size_t i = 0; i < array.size(); ++i) {
+        CHECK(array[i] == expected[i]);
+    }
+}
+
+TEST_CASE("HighFiveFixedLenStringArrayStructure") {
+    using fixed_array_t = FixedLenStringArray<10>;
+    // increment the characters of a string written in a std::array
+    auto increment_string = [](const fixed_array_t::value_type arr) {
+        fixed_array_t::value_type output(arr);
+        for (auto& c: output) {
+            if (c == 0) {
+                break;
+            }
+            ++c;
+        }
+        return output;
+    };
+
+    SECTION("create from std::vector (onpoint)") {
+        auto expected = std::vector<std::string>{"000", "111"};
+        auto actual = FixedLenStringArray<4>(expected);
+        check_fixed_len_string_array_contents(actual, expected);
+    }
+
+    SECTION("create from std::vector (oversized)") {
+        auto expected = std::vector<std::string>{"000", "111"};
+        auto actual = FixedLenStringArray<8>(expected);
+        check_fixed_len_string_array_contents(actual, expected);
+    }
+
+    SECTION("create from pointers (onpoint)") {
+        auto expected = std::vector<std::string>{"000", "111"};
+        auto actual = FixedLenStringArray<4>(expected.data(), expected.data() + expected.size());
+        check_fixed_len_string_array_contents(actual, expected);
+    }
+
+    SECTION("create from pointers (oversized)") {
+        auto expected = std::vector<std::string>{"000", "111"};
+        auto actual = FixedLenStringArray<8>(expected.data(), expected.data() + expected.size());
+        check_fixed_len_string_array_contents(actual, expected);
+    }
+
+
+    SECTION("create from std::initializer_list (onpoint)") {
+        auto expected = std::vector<std::string>{"000", "111"};
+        auto actual = FixedLenStringArray<4>{"000", "111"};
+        check_fixed_len_string_array_contents(actual, expected);
+    }
+
+    SECTION("create from std::initializer_list (oversized)") {
+        auto expected = std::vector<std::string>{"000", "111"};
+        auto actual = FixedLenStringArray<8>{"000", "111"};
+        check_fixed_len_string_array_contents(actual, expected);
+    }
+
+    // manipulate FixedLenStringArray with std::copy
+    SECTION("compatible with std::copy") {
+        const fixed_array_t arr1{"0000000", "1111111"};
+        fixed_array_t arr2{"0000000", "1111111"};
+        std::copy(arr1.begin(), arr1.end(), std::back_inserter(arr2));
+        CHECK(arr2.size() == 4);
+    }
+
+    SECTION("compatible with std::transform") {
+        fixed_array_t arr;
+        {
+            const fixed_array_t arr1{"0000000", "1111111"};
+            std::transform(arr1.begin(), arr1.end(), std::back_inserter(arr), increment_string);
+        }
+        CHECK(arr.size() == 2);
+        CHECK(arr[0] == std::string("1111111"));
+        CHECK(arr[1] == std::string("2222222"));
+    }
+
+    SECTION("compatible with std::transform (reverse iterator)") {
+        fixed_array_t arr;
+        {
+            const fixed_array_t arr1{"0000000", "1111111"};
+            std::copy(arr1.rbegin(), arr1.rend(), std::back_inserter(arr));
+        }
+        CHECK(arr.size() == 2);
+        CHECK(arr[0] == std::string("1111111"));
+        CHECK(arr[1] == std::string("0000000"));
+    }
+
+    SECTION("compatible with std::remove_copy_if") {
+        fixed_array_t arr2;
+        {
+            const fixed_array_t arr1{"0000000", "1111111"};
+            std::remove_copy_if(arr1.begin(),
+                                arr1.end(),
+                                std::back_inserter(arr2),
+                                [](const fixed_array_t::value_type& s) {
+                                    return std::strncmp(s.data(), "1111111", 7) == 0;
+                                });
+        }
+        CHECK(arr2.size() == 1);
+        CHECK(arr2[0] == std::string("0000000"));
+    }
+}
+
+TEST_CASE("HighFiveFixedLenStringArrayAttribute") {
+    const std::string file_name("fixed_array_attr.h5");
+    // Create a new file using the default property lists.
+    {
+        File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+        FixedLenStringArray<10> arr{"Hello", "world"};
+        file.createAttribute("str", arr);
+    }
+    // Re-read it
+    {
+        File file(file_name);
+        FixedLenStringArray<8> arr;  // notice the output strings can be smaller
+        file.getAttribute("str").read(arr);
+        CHECK(arr.size() == 2);
+        CHECK(arr[0] == std::string("Hello"));
+        CHECK(arr[1] == std::string("world"));
+    }
+}
+
+TEST_CASE("HighFiveReference") {
+    const std::string file_name("h5_ref_test.h5");
+    const std::string dataset1_name("dset1");
+    const std::string dataset2_name("dset2");
+    const std::string group_name("/group1");
+    const std::string refgroup_name("/group2");
+    const std::string refdataset_name("dset2");
+
+    ContentGenerate<double> generator;
+    std::vector<double> vec1(4);
+    std::vector<double> vec2(4);
+    std::generate(vec1.begin(), vec1.end(), generator);
+    std::generate(vec2.begin(), vec2.end(), generator);
+    {
+        // Create a new file using the default property lists.
+        File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+
+        // create group
+        Group g1 = file.createGroup(group_name);
+
+        // create datasets and write some data
+        DataSet dataset1 = g1.createDataSet(dataset1_name, vec1);
+        DataSet dataset2 = g1.createDataSet(dataset2_name, vec2);
+
+        // create group to hold reference
+        Group refgroup = file.createGroup(refgroup_name);
+
+        // create the references and write them into a new dataset inside refgroup
+        auto references = std::vector<Reference>({{g1, dataset1}, {file, g1}});
+        DataSet ref_ds = refgroup.createDataSet(refdataset_name, references);
+    }
+    // read it back
+    {
+        File file(file_name, File::ReadOnly);
+        Group refgroup = file.getGroup(refgroup_name);
+
+        DataSet refdataset = refgroup.getDataSet(refdataset_name);
+        CHECK(2 == refdataset.getSpace().getDimensions()[0]);
+        auto refs = std::vector<Reference>();
+        refdataset.read(refs);
+        CHECK_THROWS_AS(refs[0].dereference<Group>(file), HighFive::ReferenceException);
+        auto data_ds = refs[0].dereference<DataSet>(file);
+        std::vector<double> rdata;
+        data_ds.read(rdata);
+        for (size_t i = 0; i < rdata.size(); ++i) {
+            CHECK(rdata[i] == vec1[i]);
+        }
+
+        auto group = refs[1].dereference<Group>(file);
+        DataSet data_ds2 = group.getDataSet(dataset2_name);
+        std::vector<double> rdata2;
+        data_ds2.read(rdata2);
+        for (size_t i = 0; i < rdata2.size(); ++i) {
+            CHECK(rdata2[i] == vec2[i]);
+        }
+    }
+}
+
+TEST_CASE("HighFiveReadWriteConsts") {
+    const std::string file_name("3d_dataset_from_flat.h5");
+    const std::string dataset_name("dset");
+    const std::array<std::size_t, 3> DIMS{3, 3, 3};
+    using datatype = int;
+
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+    DataSpace dataspace = DataSpace(DIMS);
+
+    DataSet dataset = file.createDataSet<datatype>(dataset_name, dataspace);
+    std::vector<datatype> const t1(DIMS[0] * DIMS[1] * DIMS[2], 1);
+    auto raw_3d_vec_const = reinterpret_cast<datatype const* const* const*>(t1.data());
+    dataset.write(raw_3d_vec_const);
+
+    std::vector<std::vector<std::vector<datatype>>> result;
+    dataset.read(result);
+    for (const auto& vec2d: result) {
+        for (const auto& vec1d: vec2d) {
+            REQUIRE(vec1d == (std::vector<datatype>{1, 1, 1}));
+        }
+    }
+}
+
+TEST_CASE("HighFiveDataTypeClass") {
+    auto Float = DataTypeClass::Float;
+    auto String = DataTypeClass::String;
+    auto Invalid = DataTypeClass::Invalid;
+
+    CHECK(Float != Invalid);
+
+    CHECK((Float & Float) == Float);
+    CHECK((Float | Float) == Float);
+
+    CHECK((Float & String) == Invalid);
+    CHECK((Float | String) != Invalid);
+
+    CHECK(((Float & String) & Float) == Invalid);
+    CHECK(((Float | String) & Float) == Float);
+    CHECK(((Float | String) & String) == String);
+}
+
+#ifdef H5_USE_EIGEN
+
+template <typename T>
+void test_eigen_vec(File& file, const std::string& test_flavor, const T& vec_input, T& vec_output) {
+    const std::string ds_name = "ds";
+    file.createDataSet(ds_name + test_flavor, vec_input).write(vec_input);
+    file.getDataSet(ds_name + test_flavor).read(vec_output);
+    CHECK(vec_input == vec_output);
+}
+
+TEST_CASE("HighFiveEigen") {
+    const std::string file_name("test_eigen.h5");
+
+    // Create a new file using the default property lists.
+    File file(file_name, File::ReadWrite | File::Create | File::Truncate);
+    std::string ds_name_flavor;
+
+    // std::vector<of vector <of POD>>
+    {
+        ds_name_flavor = "VectorOfVectorOfPOD";
+        std::vector<std::vector<float>> vec_in{{5.0f, 6.0f, 7.0f},
+                                               {5.1f, 6.1f, 7.1f},
+                                               {5.2f, 6.2f, 7.2f}};
+        std::vector<std::vector<float>> vec_out;
+        test_eigen_vec(file, ds_name_flavor, vec_in, vec_out);
+    }
+
+    // std::vector<Eigen::Vector3d>
+    {
+        ds_name_flavor = "VectorOfEigenVector3d";
+        std::vector<Eigen::Vector3d> vec_in{{5.0, 6.0, 7.0}, {7.0, 8.0, 9.0}};
+        std::vector<Eigen::Vector3d> vec_out;
+        test_eigen_vec(file, ds_name_flavor, vec_in, vec_out);
+    }
+
+    // Eigen Vector2d
+    {
+        ds_name_flavor = "EigenVector2d";
+        Eigen::Vector2d vec_in{5.0, 6.0};
+        Eigen::Vector2d vec_out;
+
+        test_eigen_vec(file, ds_name_flavor, vec_in, vec_out);
+    }
+
+    // Eigen Matrix
+    {
+        ds_name_flavor = "EigenMatrix";
+        Eigen::Matrix<double, 3, 3> vec_in;
+        vec_in << 1, 2, 3, 4, 5, 6, 7, 8, 9;
+        Eigen::Matrix<double, 3, 3> vec_out;
+
+        CHECK_THROWS(test_eigen_vec(file, ds_name_flavor, vec_in, vec_out));
+    }
+
+    // Eigen MatrixXd
+    {
+        ds_name_flavor = "EigenMatrixXd";
+        Eigen::MatrixXd vec_in = 100. * Eigen::MatrixXd::Random(20, 5);
+        Eigen::MatrixXd vec_out(20, 5);
+
+        CHECK_THROWS(test_eigen_vec(file, ds_name_flavor, vec_in, vec_out));
+    }
+
+    // std::vector<of EigenMatrixXd>
+    {
+        ds_name_flavor = "VectorEigenMatrixXd";
+
+        Eigen::MatrixXd m1 = 100. * Eigen::MatrixXd::Random(20, 5);
+        Eigen::MatrixXd m2 = 100. * Eigen::MatrixXd::Random(20, 5);
+        std::vector<Eigen::MatrixXd> vec_in;
+        vec_in.push_back(m1);
+        vec_in.push_back(m2);
+        std::vector<Eigen::MatrixXd> vec_out(2, Eigen::MatrixXd::Zero(20, 5));
+
+        CHECK_THROWS(test_eigen_vec(file, ds_name_flavor, vec_in, vec_out));
+    }
+
+#ifdef H5_USE_BOOST
+    // boost::multi_array<of EigenVector3f>
+    {
+        ds_name_flavor = "BMultiEigenVector3f";
+
+        boost::multi_array<Eigen::Vector3f, 3> vec_in(boost::extents[3][2][2]);
+        for (int i = 0; i < 3; ++i) {
+            for (int j = 0; j < 2; ++j) {
+                for (int k = 0; k < 2; ++k) {
+                    vec_in[i][j][k] = Eigen::Vector3f::Random(3);
+                }
+            }
+        }
+        boost::multi_array<Eigen::Vector3f, 3> vec_out(boost::extents[3][2][2]);
+
+        test_eigen_vec(file, ds_name_flavor, vec_in, vec_out);
+    }
+
+    // boost::multi_array<of EigenMatrixXd>
+    {
+        ds_name_flavor = "BMultiEigenMatrixXd";
+
+        boost::multi_array<Eigen::MatrixXd, 3> vec_in(boost::extents[3][2][2]);
+        for (int i = 0; i < 3; ++i) {
+            for (int j = 0; j < 2; ++j) {
+                for (int k = 0; k < 2; ++k) {
+                    vec_in[i][j][k] = Eigen::MatrixXd::Random(3, 3);
+                }
+            }
+        }
+        boost::multi_array<Eigen::MatrixXd, 3> vec_out(boost::extents[3][2][2]);
+        for (int i = 0; i < 3; ++i) {
+            for (int j = 0; j < 2; ++j) {
+                for (int k = 0; k < 2; ++k) {
+                    vec_out[i][j][k] = Eigen::MatrixXd::Zero(3, 3);
+                }
+            }
+        }
+
+        CHECK_THROWS(test_eigen_vec(file, ds_name_flavor, vec_in, vec_out));
+    }
+
+#endif
+}
+#endif
+
+TEST_CASE("Logging") {
+    struct TestLogger {
+        LogSeverity last_log_severity = LogSeverity(11);
+        std::string last_message = "---";
+
+        void operator()(LogSeverity log_severity,
+                        const std::string& message,
+                        const std::string&,
+                        int) {
+            std::cout << "Something got logged !!!" << message << " " << to_string(log_severity)
+                      << std::endl;
+            last_log_severity = log_severity;
+            last_message = message;
+        }
+    };
+
+    auto test_logger = TestLogger();
+
+    register_logging_callback(
+        [&test_logger](LogSeverity log_severity,
+                       const std::string& message,
+                       const std::string& file,
+                       int line) { test_logger(log_severity, message, file, line); });
+
+    auto check = [&test_logger](bool should_log, const auto& message, LogSeverity log_severity) {
+        if (should_log && (HIGHFIVE_LOG_LEVEL <= int(log_severity))) {
+            REQUIRE(test_logger.last_message == message);
+            REQUIRE(test_logger.last_log_severity == log_severity);
+        } else {
+            REQUIRE(test_logger.last_message != message);
+            REQUIRE(test_logger.last_log_severity != log_severity);
+        }
+    };
+
+    SECTION("LOG_DEBUG") {
+        auto message = "Debug!";
+        HIGHFIVE_LOG_DEBUG(message);
+        check(true, message, LogSeverity::Debug);
+    }
+
+    SECTION("LOG_DEBUG_IF true") {
+        auto message = "DEBUG_IF true!";
+        HIGHFIVE_LOG_DEBUG_IF(true, message);
+        check(true, message, LogSeverity::Debug);
+    }
+
+    SECTION("LOG_DEBUG_IF false") {
+        auto message = "DEBUG_IF false!";
+        HIGHFIVE_LOG_DEBUG_IF(false, message);
+        check(false, message, LogSeverity::Debug);
+    }
+
+    SECTION("LOG_INFO") {
+        auto message = "Info!";
+        HIGHFIVE_LOG_INFO(message);
+        check(true, message, LogSeverity::Info);
+    }
+
+    SECTION("LOG_INFO_IF true") {
+        auto message = "INFO_IF true!";
+        HIGHFIVE_LOG_INFO_IF(true, message);
+        check(true, message, LogSeverity::Info);
+    }
+
+    SECTION("LOG_INFO_IF false") {
+        auto message = "INFO_IF false!";
+        HIGHFIVE_LOG_INFO_IF(false, message);
+        check(false, message, LogSeverity::Info);
+    }
+
+    SECTION("LOG_WARN") {
+        auto message = "Warn!";
+        HIGHFIVE_LOG_WARN(message);
+        check(true, message, LogSeverity::Warn);
+    }
+
+    SECTION("LOG_WARN_IF true") {
+        auto message = "WARN_IF true!";
+        HIGHFIVE_LOG_WARN_IF(true, message);
+        check(true, message, LogSeverity::Warn);
+    }
+
+    SECTION("LOG_WARN_IF false") {
+        auto message = "WARN_IF false!";
+        HIGHFIVE_LOG_WARN_IF(false, message);
+        check(false, message, LogSeverity::Warn);
+    }
+
+    SECTION("LOG_ERROR") {
+        auto message = "Error!";
+        HIGHFIVE_LOG_ERROR(message);
+        check(true, message, LogSeverity::Error);
+    }
+
+    SECTION("LOG_ERROR_IF true") {
+        auto message = "ERROR_IF true!";
+        HIGHFIVE_LOG_ERROR_IF(true, message);
+        check(true, message, LogSeverity::Error);
+    }
+
+    SECTION("LOG_ERROR_IF false") {
+        auto message = "ERROR_IF false!";
+        HIGHFIVE_LOG_ERROR_IF(false, message);
+        check(false, message, LogSeverity::Error);
+    }
+}
+
+#define HIGHFIVE_STRINGIFY_VALUE(s) HIGHFIVE_STRINGIFY_NAME(s)
+#define HIGHFIVE_STRINGIFY_NAME(s)  #s
+
+
+TEST_CASE("Version Numbers") {
+    int major = HIGHFIVE_VERSION_MAJOR;
+    int minor = HIGHFIVE_VERSION_MINOR;
+    int patch = HIGHFIVE_VERSION_PATCH;
+    std::string version = HIGHFIVE_STRINGIFY_VALUE(HIGHFIVE_VERSION);
+
+    auto expected = std::to_string(major) + "." + std::to_string(minor) + "." +
+                    std::to_string(patch);
+
+    CHECK(version == expected);
+    CHECK(HIGHFIVE_VERSION_STRING == expected);
+}
+
+#undef HIGHFIVE_STRINGIFY_VALUE
+#undef HIGHFIVE_STRINGIFY_NAME
diff --git a/packages/HighFive/tests/unit/tests_high_five_easy.cpp b/packages/HighFive/tests/unit/tests_high_five_easy.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e003c323401a9460332883abd618fef10a9a169d
--- /dev/null
+++ b/packages/HighFive/tests/unit/tests_high_five_easy.cpp
@@ -0,0 +1,476 @@
+/*
+ *  Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#include <complex>
+#include <cstdio>
+#include <cstdlib>
+#include <ctime>
+#include <iostream>
+#include <memory>
+#include <random>
+#include <string>
+#include <typeinfo>
+#include <vector>
+
+#include <stdio.h>
+
+#include <highfive/H5Easy.hpp>
+
+#ifdef H5_USE_XTENSOR
+#include <xtensor/xrandom.hpp>
+#include <xtensor/xview.hpp>
+#endif
+
+#include <catch2/catch_test_macros.hpp>
+
+TEST_CASE("H5Easy_Compression") {
+    {
+        H5Easy::DumpOptions options = H5Easy::DumpOptions(H5Easy::Compression());
+        CHECK(options.compress());
+        CHECK(options.getCompressionLevel() == 9);
+    }
+
+    {
+        H5Easy::DumpOptions options(H5Easy::Compression(true));
+        CHECK(options.compress());
+        CHECK(options.getCompressionLevel() == 9);
+    }
+
+    {
+        H5Easy::DumpOptions options(H5Easy::Compression(false));
+        CHECK(!options.compress());
+        CHECK(options.getCompressionLevel() == 0);
+    }
+
+    {
+        H5Easy::DumpOptions options(H5Easy::Compression(8));
+        CHECK(options.compress());
+        CHECK(options.getCompressionLevel() == 8);
+    }
+}
+
+TEST_CASE("H5Easy_scalar") {
+    H5Easy::File file("h5easy_scalar.h5", H5Easy::File::Overwrite);
+
+    double a = 1.2345;
+    int b = 12345;
+    std::string c = "12345";
+    std::complex<double> d = std::complex<double>(1.2345, -5.4321);
+    std::complex<int32_t> e = std::complex<int32_t>(12345, -54321);
+
+    H5Easy::dump(file, "/path/to/a", a);
+    H5Easy::dump(file, "/path/to/b", b);
+    H5Easy::dump(file, "/path/to/c", c);
+    H5Easy::dump(file, "/path/to/c", c, H5Easy::DumpMode::Overwrite);
+    H5Easy::dump(file, "/path/to/d", d);
+    H5Easy::dump(file, "/path/to/e", e);
+
+    double a_r = H5Easy::load<double>(file, "/path/to/a");
+    int b_r = H5Easy::load<int>(file, "/path/to/b");
+    std::string c_r = H5Easy::load<std::string>(file, "/path/to/c");
+    std::complex<double> d_r = H5Easy::load<std::complex<double>>(file, "/path/to/d");
+    std::complex<int32_t> e_r = H5Easy::load<std::complex<int32_t>>(file, "/path/to/e");
+
+    CHECK(a == a_r);
+    CHECK(b == b_r);
+    CHECK(c == c_r);
+    CHECK(d == d_r);
+    CHECK(e == e_r);
+}
+
+TEST_CASE("H5Easy_vector1d") {
+    H5Easy::File file("h5easy_vector1d.h5", H5Easy::File::Overwrite);
+
+    std::vector<size_t> a = {1, 2, 3, 4, 5};
+    std::vector<std::complex<double>> b = {std::complex<double>(1, .1),
+                                           std::complex<double>(2, -.4),
+                                           std::complex<double>(3, .9),
+                                           std::complex<double>(4, -.16),
+                                           std::complex<double>(5, .25)};
+    std::vector<std::complex<int32_t>> c = {std::complex<int32_t>(1, -5),
+                                            std::complex<int32_t>(2, -4),
+                                            std::complex<int32_t>(3, -3),
+                                            std::complex<int32_t>(4, -2),
+                                            std::complex<int32_t>(5, -1)};
+
+    H5Easy::dump(file, "/path/to/a", a);
+    H5Easy::dump(file, "/path/to/b", b);
+    H5Easy::dump(file, "/path/to/c", c);
+
+    std::vector<size_t> a_r = H5Easy::load<std::vector<size_t>>(file, "/path/to/a");
+    std::vector<std::complex<double>> b_r =
+        H5Easy::load<std::vector<std::complex<double>>>(file, "/path/to/b");
+    std::vector<std::complex<int32_t>> c_r =
+        H5Easy::load<std::vector<std::complex<int32_t>>>(file, "/path/to/c");
+
+    CHECK(a == a_r);
+    CHECK(b == b_r);
+    CHECK(c == c_r);
+}
+
+TEST_CASE("H5Easy_vector2d") {
+    H5Easy::File file("h5easy_vector2d.h5", H5Easy::File::Overwrite);
+
+    std::vector<std::vector<size_t>> a({{0, 1}, {2, 3}, {4, 5}});
+
+    H5Easy::dump(file, "/path/to/a", a);
+
+    decltype(a) a_r = H5Easy::load<decltype(a)>(file, "/path/to/a");
+
+    CHECK(a == a_r);
+}
+
+TEST_CASE("H5Easy_vector2d_compression") {
+    H5Easy::File file("h5easy_vector2d_compression.h5", H5Easy::File::Overwrite);
+
+    std::vector<std::vector<size_t>> a({{0, 1}, {2, 3}, {4, 5}});
+
+    H5Easy::dump(file, "/path/to/a", a, H5Easy::DumpOptions(H5Easy::Compression(9)));
+
+    H5Easy::dump(file,
+                 "/path/to/a",
+                 a,
+                 H5Easy::DumpOptions(H5Easy::Compression(), H5Easy::DumpMode::Overwrite));
+
+    decltype(a) a_r = H5Easy::load<decltype(a)>(file, "/path/to/a");
+
+    CHECK(a == a_r);
+}
+
+TEST_CASE("H5Easy_vector3d") {
+    H5Easy::File file("h5easy_vector3d.h5", H5Easy::File::Overwrite);
+
+    using type = std::vector<std::vector<std::vector<size_t>>>;
+
+    type a({{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}, {{8, 9}, {10, 11}}});
+
+    H5Easy::dump(file, "/path/to/a", a);
+
+    type a_r = H5Easy::load<type>(file, "/path/to/a");
+
+    CHECK(a == a_r);
+}
+
+TEST_CASE("H5Easy_Attribute_scalar") {
+    H5Easy::File file("h5easy_attribute_scalar.h5", H5Easy::File::Overwrite);
+
+    double a = 1.2345;
+    int b = 12345;
+    std::string c = "12345";
+
+    H5Easy::dump(file, "/path/to/a", a);
+    H5Easy::dumpAttribute(file, "/path/to/a", "a", a);
+    H5Easy::dumpAttribute(file, "/path/to/a", "a", a, H5Easy::DumpMode::Overwrite);
+    H5Easy::dumpAttribute(file, "/path/to/a", "b", b);
+    H5Easy::dumpAttribute(file, "/path/to/a", "c", c);
+
+    double a_r = H5Easy::loadAttribute<double>(file, "/path/to/a", "a");
+    int b_r = H5Easy::loadAttribute<int>(file, "/path/to/a", "b");
+    std::string c_r = H5Easy::loadAttribute<std::string>(file, "/path/to/a", "c");
+
+    CHECK(a == a_r);
+    CHECK(b == b_r);
+    CHECK(c == c_r);
+}
+
+#ifdef H5_USE_XTENSOR
+TEST_CASE("H5Easy_extend1d") {
+    H5Easy::File file("h5easy_extend1d.h5", H5Easy::File::Overwrite);
+
+    for (size_t i = 0; i < 10; ++i) {
+        H5Easy::dump(file, "/path/to/A", i, {i});
+    }
+
+    xt::xarray<size_t> A = xt::arange<size_t>(10);
+
+    xt::xarray<size_t> A_r = H5Easy::load<xt::xarray<size_t>>(file, "/path/to/A");
+
+    size_t Amax = H5Easy::load<size_t>(file, "/path/to/A", {9});
+
+    CHECK(xt::allclose(A, A_r));
+    CHECK(Amax == 9);
+}
+
+TEST_CASE("H5Easy_extend2d") {
+    H5Easy::File file("h5easy_extend2d.h5", H5Easy::File::Overwrite);
+
+    for (size_t i = 0; i < 10; ++i) {
+        for (size_t j = 0; j < 5; ++j) {
+            H5Easy::dump(file, "/path/to/A", i * 5 + j, {i, j});
+        }
+    }
+
+    xt::xarray<size_t> A = xt::arange<size_t>(10 * 5);
+
+    A.reshape({10, 5});
+
+    xt::xarray<size_t> A_r = H5Easy::load<xt::xarray<size_t>>(file, "/path/to/A");
+
+    size_t Amax = H5Easy::load<size_t>(file, "/path/to/A", {9, 4});
+
+    CHECK(xt::allclose(A, A_r));
+    CHECK(Amax == 49);
+}
+
+TEST_CASE("H5Easy_xtensor") {
+    H5Easy::File file("h5easy_xtensor.h5", H5Easy::File::Overwrite);
+
+    xt::xtensor<double, 2> A = 100. * xt::random::randn<double>({20, 5});
+    xt::xtensor<int, 2> B = A;
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dump(file, "/path/to/B", B);
+
+    xt::xtensor<double, 2> A_r = H5Easy::load<xt::xtensor<double, 2>>(file, "/path/to/A");
+    xt::xtensor<int, 2> B_r = H5Easy::load<xt::xtensor<int, 2>>(file, "/path/to/B");
+
+    CHECK(xt::allclose(A, A_r));
+    CHECK(xt::all(xt::equal(B, B_r)));
+}
+
+TEST_CASE("H5Easy_xarray") {
+    H5Easy::File file("h5easy_xarray.h5", H5Easy::File::Overwrite);
+
+    xt::xarray<double> A = 100. * xt::random::randn<double>({20, 5});
+    xt::xarray<int> B = A;
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dump(file, "/path/to/B", B);
+
+    xt::xarray<double> A_r = H5Easy::load<xt::xarray<double>>(file, "/path/to/A");
+    xt::xarray<int> B_r = H5Easy::load<xt::xarray<int>>(file, "/path/to/B");
+
+    CHECK(xt::allclose(A, A_r));
+    CHECK(xt::all(xt::equal(B, B_r)));
+}
+
+TEST_CASE("H5Easy_view") {
+    H5Easy::File file("h5easy_view.h5", H5Easy::File::Overwrite);
+
+    xt::xtensor<double, 2> A = 100. * xt::random::randn<double>({20, 5});
+    auto a = xt::view(A, xt::range(0, 10), xt::range(0, 10));
+
+    H5Easy::dump(file, "/path/to/a", a);
+
+    xt::xtensor<double, 2> a_r = H5Easy::load<xt::xtensor<double, 2>>(file, "/path/to/a");
+
+    CHECK(xt::allclose(a, a_r));
+}
+
+TEST_CASE("H5Easy_xtensor_compress") {
+    H5Easy::File file("h5easy_xtensor_compress.h5", H5Easy::File::Overwrite);
+
+    xt::xtensor<double, 2> A = 100. * xt::random::randn<double>({20, 5});
+    xt::xtensor<int, 2> B = A;
+
+    H5Easy::dump(file, "/path/to/A", A, H5Easy::DumpOptions(H5Easy::Compression()));
+
+    H5Easy::dump(file,
+                 "/path/to/A",
+                 A,
+                 H5Easy::DumpOptions(H5Easy::Compression(), H5Easy::DumpMode::Overwrite));
+
+    H5Easy::dump(file, "/path/to/B", B, H5Easy::DumpOptions(H5Easy::Compression()));
+
+    xt::xtensor<double, 2> A_r = H5Easy::load<xt::xtensor<double, 2>>(file, "/path/to/A");
+    xt::xtensor<int, 2> B_r = H5Easy::load<xt::xtensor<int, 2>>(file, "/path/to/B");
+
+    CHECK(xt::allclose(A, A_r));
+    CHECK(xt::all(xt::equal(B, B_r)));
+}
+
+TEST_CASE("H5Easy_Attribute_xtensor") {
+    H5Easy::File file("h5easy_attribute_xtensor.h5", H5Easy::File::Overwrite);
+
+    xt::xtensor<double, 2> A = 100. * xt::random::randn<double>({20, 5});
+    xt::xtensor<int, 2> B = A;
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dumpAttribute(file, "/path/to/A", "A", A);
+    H5Easy::dumpAttribute(file, "/path/to/A", "B", B);
+
+    xt::xtensor<double, 2> A_r =
+        H5Easy::loadAttribute<xt::xtensor<double, 2>>(file, "/path/to/A", "A");
+    xt::xtensor<int, 2> B_r = H5Easy::loadAttribute<xt::xtensor<int, 2>>(file, "/path/to/A", "B");
+
+    CHECK(xt::allclose(A, A_r));
+    CHECK(xt::all(xt::equal(B, B_r)));
+}
+#endif
+
+#ifdef H5_USE_EIGEN
+TEST_CASE("H5Easy_Eigen_MatrixX") {
+    H5Easy::File file("h5easy_eigen_MatrixX.h5", H5Easy::File::Overwrite);
+
+    Eigen::MatrixXd A = 100. * Eigen::MatrixXd::Random(20, 5);
+    Eigen::MatrixXi B = A.cast<int>();
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dump(file, "/path/to/B", B);
+
+    Eigen::MatrixXd A_r = H5Easy::load<Eigen::MatrixXd>(file, "/path/to/A");
+    Eigen::MatrixXi B_r = H5Easy::load<Eigen::MatrixXi>(file, "/path/to/B");
+
+    CHECK(A.isApprox(A_r));
+    CHECK(B.isApprox(B_r));
+}
+
+TEST_CASE("H5Easy_Eigen_ArrayXX") {
+    H5Easy::File file("h5easy_eigen_ArrayXX.h5", H5Easy::File::Overwrite);
+
+    Eigen::ArrayXXf A = 100. * Eigen::ArrayXXf::Random(20, 5);
+    Eigen::ArrayXXi B = A.cast<int>();
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dump(file, "/path/to/B", B);
+
+    Eigen::ArrayXXf A_r = H5Easy::load<Eigen::MatrixXf>(file, "/path/to/A");
+    Eigen::ArrayXXi B_r = H5Easy::load<Eigen::MatrixXi>(file, "/path/to/B");
+
+    CHECK(A.isApprox(A_r));
+    CHECK(B.isApprox(B_r));
+}
+
+TEST_CASE("H5Easy_Eigen_ArrayX") {
+    H5Easy::File file("h5easy_eigen_ArrayX.h5", H5Easy::File::Overwrite);
+
+    Eigen::ArrayXf A = Eigen::ArrayXf::Random(50);
+    Eigen::ArrayXi B = A.cast<int>();
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dump(file, "/path/to/B", B);
+
+    Eigen::ArrayXf A_r = H5Easy::load<Eigen::ArrayXf>(file, "/path/to/A");
+    Eigen::ArrayXi B_r = H5Easy::load<Eigen::ArrayXi>(file, "/path/to/B");
+
+    CHECK(A.isApprox(A_r));
+    CHECK(B.isApprox(B_r));
+}
+
+
+TEST_CASE("H5Easy_Eigen_VectorX") {
+    H5Easy::File file("h5easy_eigen_VectorX.h5", H5Easy::File::Overwrite);
+
+    Eigen::VectorXd A = 100. * Eigen::VectorXd::Random(20);
+    Eigen::VectorXi B = A.cast<int>();
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dump(file, "/path/to/B", B);
+
+    Eigen::VectorXd A_r = H5Easy::load<Eigen::VectorXd>(file, "/path/to/A");
+    Eigen::VectorXi B_r = H5Easy::load<Eigen::VectorXi>(file, "/path/to/B");
+
+    CHECK(A.isApprox(A_r));
+    CHECK(B.isApprox(B_r));
+}
+
+TEST_CASE("H5Easy_Eigen_MatrixXRowMajor") {
+    typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixXd;
+    typedef Eigen::Matrix<int, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixXi;
+
+    H5Easy::File file("H5Easy_Eigen_MatrixXRowMajor.h5", H5Easy::File::Overwrite);
+
+    MatrixXd A = 100. * MatrixXd::Random(20, 5);
+    MatrixXi B = A.cast<int>();
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dump(file, "/path/to/B", B);
+
+    MatrixXd A_r = H5Easy::load<MatrixXd>(file, "/path/to/A");
+    MatrixXi B_r = H5Easy::load<MatrixXi>(file, "/path/to/B");
+
+    CHECK(A.isApprox(A_r));
+    CHECK(B.isApprox(B_r));
+}
+
+TEST_CASE("H5Easy_Eigen_VectorXRowMajor") {
+    typedef Eigen::Matrix<double, 1, Eigen::Dynamic, Eigen::RowMajor> VectorXd;
+    typedef Eigen::Matrix<int, 1, Eigen::Dynamic, Eigen::RowMajor> VectorXi;
+
+    H5Easy::File file("h5easy_eigen_VectorXRowMajor.h5", H5Easy::File::Overwrite);
+
+    VectorXd A = 100. * VectorXd::Random(20);
+    VectorXi B = A.cast<int>();
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dump(file, "/path/to/B", B);
+
+    VectorXd A_r = H5Easy::load<VectorXd>(file, "/path/to/A");
+    VectorXi B_r = H5Easy::load<VectorXi>(file, "/path/to/B");
+
+    CHECK(A.isApprox(A_r));
+    CHECK(B.isApprox(B_r));
+}
+
+TEST_CASE("H5Easy_Eigen_Map") {
+    H5Easy::File file("h5easy_eigen_Map.h5", H5Easy::File::Overwrite);
+
+    std::vector<int> A = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    Eigen::Map<Eigen::VectorXi> mapped_vector(A.data(), static_cast<int>(A.size()));
+
+    H5Easy::dump(file, "/path/to/A", mapped_vector);
+
+    std::vector<int> A_r = H5Easy::load<std::vector<int>>(file, "/path/to/A");
+
+    CHECK(A == A_r);
+}
+
+TEST_CASE("H5Easy_Attribute_Eigen_MatrixX") {
+    H5Easy::File file("h5easy_attribute_eigen_MatrixX.h5", H5Easy::File::Overwrite);
+
+    Eigen::MatrixXd A = 100. * Eigen::MatrixXd::Random(20, 5);
+    Eigen::MatrixXi B = A.cast<int>();
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dumpAttribute(file, "/path/to/A", "A", A);
+    H5Easy::dumpAttribute(file, "/path/to/A", "B", B);
+
+    Eigen::MatrixXd A_r = H5Easy::loadAttribute<Eigen::MatrixXd>(file, "/path/to/A", "A");
+    Eigen::MatrixXi B_r = H5Easy::loadAttribute<Eigen::MatrixXi>(file, "/path/to/A", "B");
+
+    CHECK(A.isApprox(A_r));
+    CHECK(B.isApprox(B_r));
+}
+#endif
+
+#ifdef H5_USE_OPENCV
+TEST_CASE("H5Easy_OpenCV_Mat_") {
+    H5Easy::File file("h5easy_opencv_Mat_.h5", H5Easy::File::Overwrite);
+
+    using T = typename cv::Mat_<double>;
+
+    T A(3, 4, 0.0);
+    A(0, 0) = 0.0;
+    A(0, 1) = 1.0;
+    A(0, 2) = 2.0;
+    A(0, 3) = 3.0;
+    A(1, 0) = 4.0;
+    A(1, 1) = 5.0;
+    A(1, 2) = 6.0;
+    A(1, 3) = 7.0;
+    A(2, 0) = 8.0;
+    A(2, 1) = 9.0;
+    A(2, 2) = 10.0;
+    A(2, 3) = 11.0;
+
+    H5Easy::dump(file, "/path/to/A", A);
+    H5Easy::dumpAttribute(file, "/path/to/A", "attr", A);
+
+
+    T A_r = H5Easy::load<T>(file, "/path/to/A");
+    T B_r = H5Easy::loadAttribute<T>(file, "/path/to/A", "attr");
+
+    std::vector<double> a(A.begin(), A.end());
+    std::vector<double> a_r(A_r.begin(), A_r.end());
+    std::vector<double> b_r(A_r.begin(), A_r.end());
+
+    CHECK(a == a_r);
+    CHECK(a == b_r);
+}
+#endif
diff --git a/packages/HighFive/tests/unit/tests_high_five_multi_dims.cpp b/packages/HighFive/tests/unit/tests_high_five_multi_dims.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..442f1c9cc5d92ceb3a4ef4729d03a724ab900e8c
--- /dev/null
+++ b/packages/HighFive/tests/unit/tests_high_five_multi_dims.cpp
@@ -0,0 +1,213 @@
+/*
+ *  Copyright (c), 2017-2019, Blue Brain Project - EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#include <string>
+#include <iostream>
+
+#include <highfive/highfive.hpp>
+
+
+#ifdef H5_USE_BOOST
+#include <boost/multi_array.hpp>
+#endif
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/catch_template_test_macros.hpp>
+
+#include "tests_high_five.hpp"
+
+using namespace HighFive;
+
+/// \brief Test for 2D old-style arrays (T array[x][y])
+template <typename T>
+void readWrite2DArrayTest() {
+    std::ostringstream filename;
+    filename << "h5_rw_2d_array_" << typeNameHelper<T>() << "_test.h5";
+    const std::string DATASET_NAME("dset");
+    const size_t x_size = 100;
+    const size_t y_size = 10;
+
+    // Create a new file using the default property lists.
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+    // Create the data space for the dataset.
+    std::vector<size_t> dims{x_size, y_size};
+
+    DataSpace dataspace(dims);
+
+    // Create a dataset with arbitrary type
+    DataSet dataset = file.createDataSet<T>(DATASET_NAME, dataspace);
+
+    T array[x_size][y_size];
+
+    ContentGenerate<T> generator;
+    generate2D(array, x_size, y_size, generator);
+
+    dataset.write(array);
+
+    T result[x_size][y_size];
+    dataset.read(result);
+
+    for (size_t i = 0; i < x_size; ++i) {
+        for (size_t j = 0; j < y_size; ++j) {
+            CHECK(result[i][j] == array[i][j]);
+        }
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("ReadWrite2DArray", "[template]", numerical_test_types) {
+    readWrite2DArrayTest<TestType>();
+}
+
+template <typename T>
+void readWriteArrayTest() {
+    const size_t x_size = 200;
+    typename std::array<T, x_size> vec;
+    ContentGenerate<T> generator;
+    std::generate(vec.begin(), vec.end(), generator);
+
+    typename std::array<T, x_size> result;
+    auto dataset = readWriteDataset<T>(vec, result, 1, "std-array");
+
+    CHECK(result == vec);
+
+    typename std::array<T, 1> tooSmall;
+    CHECK_THROWS_AS(dataset.read(tooSmall), DataSpaceException);
+}
+TEMPLATE_LIST_TEST_CASE("readWriteArray", "[template]", numerical_test_types) {
+    readWriteArrayTest<TestType>();
+}
+
+
+template <typename T, typename VectorSubT>
+void readWriteVectorNDTest(std::vector<VectorSubT>& ndvec, const std::vector<size_t>& dims) {
+    fillVec(ndvec, dims, ContentGenerate<T>());
+
+    std::vector<VectorSubT> result;
+    readWriteDataset<T>(ndvec, result, dims.size(), "vector");
+
+    CHECK(checkLength(result, dims));
+    CHECK(ndvec == result);
+}
+
+TEMPLATE_LIST_TEST_CASE("readWritSimpleVector", "[template]", numerical_test_types) {
+    std::vector<TestType> vec;
+    readWriteVectorNDTest<TestType>(vec, {50});
+}
+
+TEMPLATE_LIST_TEST_CASE("readWrite2DVector", "[template]", numerical_test_types) {
+    std::vector<std::vector<TestType>> _2dvec;
+    readWriteVectorNDTest<TestType>(_2dvec, {10, 8});
+}
+
+TEMPLATE_LIST_TEST_CASE("readWrite3DVector", "[template]", numerical_test_types) {
+    std::vector<std::vector<std::vector<TestType>>> _3dvec;
+    readWriteVectorNDTest<TestType>(_3dvec, {10, 5, 4});
+}
+
+TEMPLATE_LIST_TEST_CASE("readWrite4DVector", "[template]", numerical_test_types) {
+    std::vector<std::vector<std::vector<std::vector<TestType>>>> _4dvec;
+    readWriteVectorNDTest<TestType>(_4dvec, {5, 4, 3, 2});
+}
+
+TEMPLATE_LIST_TEST_CASE("vector of array", "[template]", numerical_test_types) {
+    std::vector<std::array<TestType, 4>> vec{{1, 2, 3, 4}, {1, 2, 3, 4}};
+    std::vector<std::array<TestType, 4>> result;
+    readWriteDataset<TestType>(vec, result, 2, "vector");
+
+    CHECK(vec.size() == result.size());
+    CHECK(vec[0].size() == result[0].size());
+    CHECK(vec == result);
+}
+
+
+#ifdef H5_USE_BOOST
+
+template <typename T>
+void MultiArray3DTest() {
+    typedef typename boost::multi_array<T, 3> MultiArray;
+
+    std::ostringstream filename;
+    filename << "h5_rw_multiarray_" << typeNameHelper<T>() << "_test.h5";
+
+    const int size_x = 10, size_y = 10, size_z = 10;
+    const std::string DATASET_NAME("dset");
+    MultiArray array(boost::extents[size_x][size_y][size_z]);
+
+    ContentGenerate<T> generator;
+    std::generate(array.data(), array.data() + array.num_elements(), generator);
+
+    // Create a new file using the default property lists.
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+    DataSet dataset = file.createDataSet<T>(DATASET_NAME, DataSpace::From(array));
+
+    dataset.write(array);
+
+    // read it back
+    MultiArray result;
+
+    dataset.read(result);
+
+    for (long i = 0; i < size_x; ++i) {
+        for (long j = 0; j < size_y; ++j) {
+            for (long k = 0; k < size_z; ++k) {
+                CHECK(array[i][j][k] == result[i][j][k]);
+            }
+        }
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("MultiArray3D", "[template]", numerical_test_types) {
+    MultiArray3DTest<TestType>();
+}
+
+template <typename T>
+void ublas_matrix_Test() {
+    using Matrix = boost::numeric::ublas::matrix<T>;
+
+    std::ostringstream filename;
+    filename << "h5_rw_multiarray_" << typeNameHelper<T>() << "_test.h5";
+
+    const size_t size_x = 10, size_y = 10;
+    const std::string DATASET_NAME("dset");
+
+    Matrix mat(size_x, size_y);
+
+    ContentGenerate<T> generator;
+    for (std::size_t i = 0; i < mat.size1(); ++i) {
+        for (std::size_t j = 0; j < mat.size2(); ++j) {
+            mat(i, j) = generator();
+        }
+    }
+
+    // Create a new file using the default property lists.
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate);
+
+    DataSet dataset = file.createDataSet<T>(DATASET_NAME, DataSpace::From(mat));
+
+    dataset.write(mat);
+
+    // read it back
+    Matrix result;
+
+    dataset.read(result);
+
+    for (size_t i = 0; i < size_x; ++i) {
+        for (size_t j = 0; j < size_y; ++j) {
+            CHECK(mat(i, j) == result(i, j));
+        }
+    }
+}
+
+TEMPLATE_LIST_TEST_CASE("ublas_matrix", "[template]", numerical_test_types) {
+    ublas_matrix_Test<TestType>();
+}
+
+#endif
diff --git a/packages/HighFive/tests/unit/tests_high_five_parallel.cpp b/packages/HighFive/tests/unit/tests_high_five_parallel.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8b096205e8528df17c6ff03cb68c592c81e0c9bd
--- /dev/null
+++ b/packages/HighFive/tests/unit/tests_high_five_parallel.cpp
@@ -0,0 +1,177 @@
+/*
+ *  Copyright (c), 2017-2019, Blue Brain Project - EPFL
+ *
+ *  Distributed under the Boost Software License, Version 1.0.
+ *    (See accompanying file LICENSE_1_0.txt or copy at
+ *          http://www.boost.org/LICENSE_1_0.txt)
+ *
+ */
+
+#include <cstdlib>
+#include <iostream>
+#include <string>
+#include <typeinfo>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/catch_template_test_macros.hpp>
+#include <catch2/catch_session.hpp>
+
+#include <highfive/highfive.hpp>
+#include "tests_high_five.hpp"
+
+using namespace HighFive;
+
+struct MpiFixture {
+    MpiFixture(int argc, char** argv) {
+        MPI_Init(&argc, &argv);
+        MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+        MPI_Comm_size(MPI_COMM_WORLD, &size);
+    }
+    ~MpiFixture() {
+        MPI_Finalize();
+    }
+
+    int rank;
+    int size;
+};
+
+void check_was_collective(const DataTransferProps& xfer_props) {
+    auto mnccp = MpioNoCollectiveCause(xfer_props);
+    CHECK(mnccp.wasCollective());
+    CHECK(mnccp.getLocalCause() == 0);
+    CHECK(mnccp.getGlobalCause() == 0);
+}
+
+template <typename T>
+void selectionArraySimpleTestParallel(File& file) {
+    int mpi_rank, mpi_size;
+    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+    using Vector = std::vector<T>;
+
+    const auto size = static_cast<size_t>(mpi_size);
+    Vector values(size);
+
+    ContentGenerate<T> generator;
+    std::generate(values.begin(), values.end(), generator);
+
+    const std::string d1_name("dset1");
+    DataSet d1 = file.createDataSet<T>(d1_name, DataSpace::From(values));
+    if (mpi_rank == 0) {
+        d1.write(values);
+    }
+
+    const std::string d2_name("dset2");
+    DataSet d2 = file.createDataSet<T>(d2_name, DataSpace::From(values));
+
+    auto xfer_props = DataTransferProps{};
+    xfer_props.add(UseCollectiveIO{});
+
+    {
+        auto offset = std::vector<size_t>{static_cast<size_t>(mpi_rank)};
+        auto count = std::vector<size_t>{1ul};
+        auto slice = d2.select(offset, count);
+
+        auto local_values = Vector(count[0]);
+        local_values[0] = values[offset[0]];
+
+        // Write collectively, each MPI rank writes one slab.
+        slice.write(local_values, xfer_props);
+        check_was_collective(xfer_props);
+    }
+
+    file.flush();
+
+    // -- read it back
+    const auto offset = static_cast<size_t>(mpi_rank);
+    const auto count = static_cast<size_t>(mpi_size - mpi_rank);
+
+    auto check_result = [&values, offset, count](const Vector& result) {
+        CHECK(result.size() == count);
+
+        for (size_t i = offset; i < count; ++i) {
+            CHECK(values[i + offset] == result[i]);
+        }
+    };
+
+    auto make_slice = [size, offset, count](DataSet& dataset) {
+        auto slice = dataset.select(std::vector<size_t>{offset}, std::vector<size_t>{count});
+
+        CHECK(slice.getSpace().getDimensions()[0] == size);
+        CHECK(slice.getMemSpace().getDimensions()[0] == count);
+
+        return slice;
+    };
+
+    auto s1 = make_slice(d1);
+    check_result(s1.template read<Vector>());
+
+    auto s2 = make_slice(d2);
+    check_result(s2.template read<Vector>(xfer_props));
+    check_was_collective(xfer_props);
+}
+
+template <typename T>
+void selectionArraySimpleTestParallelDefaultProps() {
+    std::ostringstream filename;
+    filename << "h5_rw_default_props_select_parallel_test_" << typeNameHelper<T>() << "_test.h5";
+
+    // Create a new file using the default property lists.
+    auto fapl = FileAccessProps{};
+    fapl.add(MPIOFileAccess(MPI_COMM_WORLD, MPI_INFO_NULL));
+
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate, fapl);
+
+    selectionArraySimpleTestParallel<T>(file);
+}
+
+template <typename T>
+void selectionArraySimpleTestParallelCollectiveMDProps() {
+    std::ostringstream filename;
+    filename << "h5_rw_collective_md_props_select_parallel_test_" << typeNameHelper<T>()
+             << "_test.h5";
+
+    // Create a new file using the default property lists.
+    auto fapl = FileAccessProps{};
+    fapl.add(MPIOFileAccess(MPI_COMM_WORLD, MPI_INFO_NULL));
+    fapl.add(MPIOCollectiveMetadata());
+
+    File file(filename.str(), File::ReadWrite | File::Create | File::Truncate, fapl);
+
+    selectionArraySimpleTestParallel<T>(file);
+}
+
+TEMPLATE_LIST_TEST_CASE("mpiSelectionArraySimpleDefaultProps", "[template]", numerical_test_types) {
+    selectionArraySimpleTestParallelDefaultProps<TestType>();
+}
+
+TEMPLATE_LIST_TEST_CASE("mpiSelectionArraySimpleCollectiveMD", "[template]", numerical_test_types) {
+    selectionArraySimpleTestParallelCollectiveMDProps<TestType>();
+}
+
+
+int main(int argc, char* argv[]) {
+    MpiFixture mpi(argc, argv);
+
+    // Capture stdout along solutions in
+    // https://stackoverflow.com/questions/58289895/is-it-possible-to-use-catch2-for-testing-an-mpi-code
+    std::stringstream ss;
+    auto cout_buf = std::cout.rdbuf(ss.rdbuf());
+    int result = Catch::Session().run(argc, argv);
+    std::cout.rdbuf(cout_buf);
+
+    for (int i = mpi.size - 1; i > 0; --i) {
+        MPI_Barrier(MPI_COMM_WORLD);
+        if (i == mpi.rank && ss.str().rfind("All tests passed") == std::string::npos) {
+            std::cout << ss.str();
+        }
+    }
+    MPI_Barrier(MPI_COMM_WORLD);
+    if (mpi.rank == 0) {
+        std::cout << ss.str();
+    }
+
+    return result;
+}
diff --git a/packages/HighFive/tests/unit/tests_import_public_headers.cpp b/packages/HighFive/tests/unit/tests_import_public_headers.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..133d171153e075e830d4edf63e56b81d00c6da4d
--- /dev/null
+++ b/packages/HighFive/tests/unit/tests_import_public_headers.cpp
@@ -0,0 +1,7 @@
+// This is a compilation test to be sure that public header can be included alone
+
+#include <@PUBLIC_HEADER@>
+
+int main() {
+    return 0;
+}
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 54cae625bc7c899c433bfdf4021ef6242dd5aaf4..eebccac12c3c1c7b80b6b1e13ff81d0e802a8def 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -6,6 +6,9 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
 # Pugs utils
 add_subdirectory(utils)
 
+# Pugs dev
+add_subdirectory(dev)
+
 # Pugs language
 add_subdirectory(language)
 
diff --git a/src/algebra/CMakeLists.txt b/src/algebra/CMakeLists.txt
index 6310f48dcf69eb93e2c1178e05d32dbc7fe5afad..74f40290a2f9250cc2296ec62358b44c366ffc85 100644
--- a/src/algebra/CMakeLists.txt
+++ b/src/algebra/CMakeLists.txt
@@ -11,4 +11,5 @@ target_link_libraries(
   PugsAlgebra
   ${PETSC_LIBRARIES}
   ${SLEPC_LIBRARIES}
+  ${HIGHFIVE_TARGET}
 )
diff --git a/src/analysis/CMakeLists.txt b/src/analysis/CMakeLists.txt
index b675ea696e20afb930e0163fc451d72111cf6f0a..4ab57395b9b153ea416299b600c168e8f6e2015c 100644
--- a/src/analysis/CMakeLists.txt
+++ b/src/analysis/CMakeLists.txt
@@ -12,6 +12,11 @@ add_library(
   TetrahedronGaussQuadrature.cpp
   TriangleGaussQuadrature.cpp)
 
+target_link_libraries(
+  PugsAnalysis
+  ${HIGHFIVE_TARGET}
+)
+
 if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
   if((CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "12.0.0") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "13.0.0"))
     # Deactivated since it produces false positive warning in this file only ...
diff --git a/src/dev/CMakeLists.txt b/src/dev/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a212a76fa67fed236fc0ccce8fe0b114740ff20f
--- /dev/null
+++ b/src/dev/CMakeLists.txt
@@ -0,0 +1,16 @@
+# ------------------- Source files --------------------
+
+add_library(
+  PugsDev
+  ParallelChecker.cpp
+  ParallelCheckerDiscreteFunctionVariant.cpp
+  ParallelCheckerItemArrayVariant.cpp
+  ParallelCheckerItemValueVariant.cpp
+  ParallelCheckerSubItemArrayPerItemVariant.cpp
+  ParallelCheckerSubItemValuePerItemVariant.cpp
+)
+
+target_link_libraries(
+  PugsDev
+  ${HIGHFIVE_TARGET}
+)
diff --git a/src/dev/ParallelChecker.cpp b/src/dev/ParallelChecker.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c052c9ea0bdc76d01833e47710534ec776e9613e
--- /dev/null
+++ b/src/dev/ParallelChecker.cpp
@@ -0,0 +1,18 @@
+#include <dev/ParallelChecker.hpp>
+
+ParallelChecker* ParallelChecker::m_instance = nullptr;
+
+void
+ParallelChecker::create()
+{
+  Assert(ParallelChecker::m_instance == nullptr, "ParallelChecker has already been created");
+  ParallelChecker::m_instance = new ParallelChecker;
+}
+
+void
+ParallelChecker::destroy()
+{
+  Assert(ParallelChecker::m_instance != nullptr, "ParallelChecker has already been destroyed");
+  delete ParallelChecker::m_instance;
+  ParallelChecker::m_instance = nullptr;
+}
diff --git a/src/dev/ParallelChecker.hpp b/src/dev/ParallelChecker.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..97d82ee4bac3badfdc760c973d81454e89102200
--- /dev/null
+++ b/src/dev/ParallelChecker.hpp
@@ -0,0 +1,1473 @@
+#ifndef PARALLEL_CHECKER_HPP
+#define PARALLEL_CHECKER_HPP
+
+#include <utils/pugs_config.hpp>
+#ifdef PUGS_HAS_HDF5
+#include <highfive/highfive.hpp>
+#endif   // PUGS_HAS_HDF5
+
+#include <mesh/Connectivity.hpp>
+#include <mesh/ItemArrayVariant.hpp>
+#include <mesh/ItemValueVariant.hpp>
+#include <mesh/SubItemArrayPerItemVariant.hpp>
+#include <mesh/SubItemValuePerItemVariant.hpp>
+#include <scheme/DiscreteFunctionVariant.hpp>
+#include <utils/Demangle.hpp>
+#include <utils/Filesystem.hpp>
+#include <utils/Messenger.hpp>
+#include <utils/SourceLocation.hpp>
+
+#include <fstream>
+
+template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+void parallel_check(const ItemValue<DataType, item_type, ConnectivityPtr>& item_value,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+void parallel_check(const ItemArray<DataType, item_type, ConnectivityPtr>& item_array,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+void parallel_check(const SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_value_per_item,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+void parallel_check(const SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_array_per_item,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+class ParallelChecker
+{
+ public:
+  enum class Mode
+  {
+    automatic,   // write in sequential, read in parallel
+    read,
+    write
+  };
+
+  // to allow special manipulations in tests
+  friend class ParallelCheckerTester;
+
+ private:
+  static ParallelChecker* m_instance;
+
+  Mode m_mode  = Mode::automatic;
+  size_t m_tag = 0;
+
+  std::string m_filename = "parallel_checker.h5";
+  std::string m_path     = "";
+
+  ParallelChecker() = default;
+
+ public:
+  template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+  friend void parallel_check(const ItemValue<DataType, item_type, ConnectivityPtr>& item_value,
+                             const std::string& name,
+                             const SourceLocation& source_location);
+
+  template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+  friend void parallel_check(const ItemArray<DataType, item_type, ConnectivityPtr>& item_array,
+                             const std::string& name,
+                             const SourceLocation& source_location);
+
+  template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+  friend void parallel_check(const SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_value_per_item,
+                             const std::string& name,
+                             const SourceLocation& source_location);
+
+  template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+  friend void parallel_check(const SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_array_per_item,
+                             const std::string& name,
+                             const SourceLocation& source_location);
+
+#ifdef PUGS_HAS_HDF5
+ private:
+  template <typename T>
+  struct TinyVectorDataType;
+
+  template <size_t Dimension, typename DataT>
+  struct TinyVectorDataType<TinyVector<Dimension, DataT>> : public HighFive::DataType
+  {
+    TinyVectorDataType()
+    {
+      hsize_t dim[]     = {Dimension};
+      auto h5_data_type = HighFive::create_datatype<DataT>();
+      _hid              = H5Tarray_create(h5_data_type.getId(), 1, dim);
+    }
+  };
+
+  template <typename T>
+  struct TinyMatrixDataType;
+
+  template <size_t M, size_t N, typename DataT>
+  struct TinyMatrixDataType<TinyMatrix<M, N, DataT>> : public HighFive::DataType
+  {
+    TinyMatrixDataType()
+    {
+      hsize_t dim[]     = {M, N};
+      auto h5_data_type = HighFive::create_datatype<DataT>();
+      _hid              = H5Tarray_create(h5_data_type.getId(), 2, dim);
+    }
+  };
+
+  HighFive::File
+  _createOrOpenFileRW() const
+  {
+    if (m_tag == 0) {
+      createDirectoryIfNeeded(m_filename);
+      return HighFive::File{m_filename, HighFive::File::Truncate};
+    } else {
+      return HighFive::File{m_filename, HighFive::File::ReadWrite};
+    }
+  }
+
+  void
+  _printHeader(const std::string& name, const SourceLocation& source_location) const
+  {
+    std::cout << rang::fg::cyan << " - " << rang::fgB::cyan << "parallel checker" << rang::fg::cyan << " for \""
+              << rang::fgB::magenta << name << rang::fg::cyan << "\" tag " << rang::fgB::blue << m_tag
+              << rang::fg::reset << '\n';
+    std::cout << rang::fg::cyan << " | from " << rang::fgB::blue << source_location.filename() << rang::fg::reset << ':'
+              << rang::style::bold << source_location.line() << rang::style::reset << '\n';
+  }
+
+  template <typename DataType>
+  void
+  _writeArray(HighFive::Group& group, const std::string& name, const Array<DataType>& array) const
+  {
+    using data_type = std::remove_const_t<DataType>;
+    if constexpr (is_tiny_vector_v<data_type>) {
+      auto dataset = group.createDataSet(name, HighFive::DataSpace{std::vector<size_t>{array.size()}},
+                                         TinyVectorDataType<data_type>{});
+
+      dataset.template write_raw<typename data_type::data_type>(&(array[0][0]), TinyVectorDataType<data_type>{});
+    } else if constexpr (is_tiny_matrix_v<data_type>) {
+      auto dataset = group.createDataSet(name, HighFive::DataSpace{std::vector<size_t>{array.size()}},
+                                         TinyMatrixDataType<data_type>{});
+
+      dataset.template write_raw<typename data_type::data_type>(&(array[0](0, 0)), TinyMatrixDataType<data_type>{});
+    } else {
+      auto dataset = group.createDataSet<data_type>(name, HighFive::DataSpace{std::vector<size_t>{array.size()}});
+      dataset.template write_raw<data_type>(&(array[0]));
+    }
+  }
+
+  template <typename DataType>
+  void
+  _writeTable(HighFive::Group& group, const std::string& name, const Table<DataType>& table) const
+  {
+    using data_type = std::remove_const_t<DataType>;
+    if constexpr (is_tiny_vector_v<data_type>) {
+      auto dataset =
+        group.createDataSet(name,
+                            HighFive::DataSpace{std::vector<size_t>{table.numberOfRows(), table.numberOfColumns()}},
+                            TinyVectorDataType<data_type>{});
+
+      dataset.template write_raw<typename data_type::data_type>(&(table(0, 0)[0]), TinyVectorDataType<data_type>{});
+    } else if constexpr (is_tiny_matrix_v<data_type>) {
+      auto dataset =
+        group.createDataSet(name,
+                            HighFive::DataSpace{std::vector<size_t>{table.numberOfRows(), table.numberOfColumns()}},
+                            TinyMatrixDataType<data_type>{});
+
+      dataset.template write_raw<typename data_type::data_type>(&(table(0, 0)(0, 0)), TinyMatrixDataType<data_type>{});
+    } else {
+      auto dataset =
+        group.createDataSet<data_type>(name, HighFive::DataSpace{
+                                               std::vector<size_t>{table.numberOfRows(), table.numberOfColumns()}});
+      dataset.template write_raw<data_type>(&(table(0, 0)));
+    }
+  }
+
+  template <typename DataType>
+  Array<std::remove_const_t<DataType>>
+  _readArray(HighFive::Group& group, const std::string& name) const
+  {
+    using data_type = std::remove_const_t<DataType>;
+
+    auto dataset = group.getDataSet(name);
+    Array<data_type> array(dataset.getElementCount());
+
+    if constexpr (is_tiny_vector_v<data_type>) {
+      dataset.read<data_type>(&(array[0]), TinyVectorDataType<data_type>{});
+    } else if constexpr (is_tiny_matrix_v<data_type>) {
+      dataset.read<data_type>(&(array[0]), TinyMatrixDataType<data_type>{});
+    } else {
+      dataset.read<data_type>(&(array[0]));
+    }
+    return array;
+  }
+
+  template <typename DataType>
+  Table<std::remove_const_t<DataType>>
+  _readTable(HighFive::Group& group, const std::string& name) const
+  {
+    using data_type = std::remove_const_t<DataType>;
+
+    auto dataset = group.getDataSet(name);
+    Table<data_type> table(dataset.getDimensions()[0], dataset.getDimensions()[1]);
+
+    if constexpr (is_tiny_vector_v<data_type>) {
+      dataset.read<data_type>(&(table(0, 0)), TinyVectorDataType<data_type>{});
+    } else if constexpr (is_tiny_matrix_v<data_type>) {
+      dataset.read<data_type>(&(table(0, 0)), TinyMatrixDataType<data_type>{});
+    } else {
+      dataset.read<data_type>(&(table(0, 0)));
+    }
+    return table;
+  }
+
+  size_t
+  _getConnectivityId(const std::shared_ptr<const IConnectivity>& i_connectivity) const
+  {
+    switch (i_connectivity->dimension()) {
+    case 1: {
+      return dynamic_cast<const Connectivity<1>&>(*i_connectivity).id();
+    }
+    case 2: {
+      return dynamic_cast<const Connectivity<2>&>(*i_connectivity).id();
+    }
+    case 3: {
+      return dynamic_cast<const Connectivity<3>&>(*i_connectivity).id();
+    }
+      // LCOV_EXCL_START
+    default: {
+      throw UnexpectedError("unexpected connectivity dimension");
+    }
+      // LCOV_EXCL_STOP
+    }
+  }
+
+  template <ItemType item_type>
+  Array<const int>
+  _getItemNumber(const std::shared_ptr<const IConnectivity>& i_connectivity) const
+  {
+    switch (i_connectivity->dimension()) {
+    case 1: {
+      const Connectivity<1>& connectivity = dynamic_cast<const Connectivity<1>&>(*i_connectivity);
+      return connectivity.number<item_type>().arrayView();
+    }
+    case 2: {
+      const Connectivity<2>& connectivity = dynamic_cast<const Connectivity<2>&>(*i_connectivity);
+      return connectivity.number<item_type>().arrayView();
+    }
+    case 3: {
+      const Connectivity<3>& connectivity = dynamic_cast<const Connectivity<3>&>(*i_connectivity);
+      return connectivity.number<item_type>().arrayView();
+    }
+      // LCOV_EXCL_START
+    default: {
+      throw UnexpectedError("unexpected connectivity dimension");
+    }
+      // LCOV_EXCL_STOP
+    }
+  }
+
+  template <ItemType item_type, ItemType sub_item_type>
+  Array<const typename ConnectivityMatrix::IndexType>
+  _getSubItemRowsMap(const std::shared_ptr<const IConnectivity>& i_connectivity)
+  {
+    switch (i_connectivity->dimension()) {
+    case 1: {
+      const Connectivity<1>& connectivity = dynamic_cast<const Connectivity<1>&>(*i_connectivity);
+      return connectivity.getMatrix(item_type, sub_item_type).rowsMap();
+    }
+    case 2: {
+      const Connectivity<2>& connectivity = dynamic_cast<const Connectivity<2>&>(*i_connectivity);
+      return connectivity.getMatrix(item_type, sub_item_type).rowsMap();
+    }
+    case 3: {
+      const Connectivity<3>& connectivity = dynamic_cast<const Connectivity<3>&>(*i_connectivity);
+      return connectivity.getMatrix(item_type, sub_item_type).rowsMap();
+    }
+      // LCOV_EXCL_START
+    default: {
+      throw UnexpectedError("unexpected connectivity dimension");
+    }
+      // LCOV_EXCL_STOP
+    }
+  }
+
+  template <ItemType item_type>
+  Array<const int>
+  _getItemOwner(const std::shared_ptr<const IConnectivity>& i_connectivity) const
+  {
+    switch (i_connectivity->dimension()) {
+    case 1: {
+      const Connectivity<1>& connectivity = dynamic_cast<const Connectivity<1>&>(*i_connectivity);
+      return connectivity.owner<item_type>().arrayView();
+    }
+    case 2: {
+      const Connectivity<2>& connectivity = dynamic_cast<const Connectivity<2>&>(*i_connectivity);
+      return connectivity.owner<item_type>().arrayView();
+    }
+    case 3: {
+      const Connectivity<3>& connectivity = dynamic_cast<const Connectivity<3>&>(*i_connectivity);
+      return connectivity.owner<item_type>().arrayView();
+    }
+      // LCOV_EXCL_START
+    default: {
+      throw UnexpectedError("unexpected connectivity dimension");
+    }
+      // LCOV_EXCL_STOP
+    }
+  }
+
+  template <ItemType item_type>
+  void
+  _checkGlobalNumberOfItems(const std::shared_ptr<const IConnectivity>& i_connectivity,
+                            size_t reference_number_of_items)
+  {
+    Array<const bool> is_owned;
+    switch (i_connectivity->dimension()) {
+    case 1: {
+      const Connectivity<1>& connectivity = dynamic_cast<const Connectivity<1>&>(*i_connectivity);
+      is_owned                            = connectivity.isOwned<item_type>().arrayView();
+      break;
+    }
+    case 2: {
+      const Connectivity<2>& connectivity = dynamic_cast<const Connectivity<2>&>(*i_connectivity);
+      is_owned                            = connectivity.isOwned<item_type>().arrayView();
+      break;
+    }
+    case 3: {
+      const Connectivity<3>& connectivity = dynamic_cast<const Connectivity<3>&>(*i_connectivity);
+      is_owned                            = connectivity.isOwned<item_type>().arrayView();
+      break;
+    }
+      // LCOV_EXCL_START
+    default: {
+      throw UnexpectedError("unexpected connectivity dimension");
+    }
+      // LCOV_EXCL_STOP
+    }
+
+    size_t number_of_items = 0;
+    for (size_t i = 0; i < is_owned.size(); ++i) {
+      number_of_items += is_owned[i];
+    }
+
+    if (parallel::allReduceSum(number_of_items) != reference_number_of_items) {
+      throw NormalError("number of items differs from reference");
+    }
+  }
+
+  template <ItemType item_type>
+  void
+  _writeItemNumbers(const std::shared_ptr<const IConnectivity> i_connectivity,
+                    HighFive::File file,
+                    HighFive::Group group) const
+  {
+    std::string item_number_path = "/connectivities/" + std::to_string(this->_getConnectivityId(i_connectivity)) + '/' +
+                                   std::string{itemName(item_type)};
+
+    if (not file.exist(item_number_path)) {
+      HighFive::Group item_number_group = file.createGroup(item_number_path);
+      this->_writeArray(item_number_group, "numbers", this->_getItemNumber<item_type>(i_connectivity));
+    }
+
+    HighFive::DataSet item_numbers = file.getDataSet(item_number_path + "/numbers");
+    group.createHardLink("numbers", item_numbers);
+  }
+
+  template <typename ItemOfItem>
+  void
+  _writeSubItemRowsMap(const std::shared_ptr<const IConnectivity> i_connectivity,
+                       HighFive::File file,
+                       HighFive::Group group)
+  {
+    constexpr ItemType item_type     = ItemOfItem::item_type;
+    constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
+
+    std::string subitem_of_item_row_map_path =
+      "/connectivities/" + std::to_string(this->_getConnectivityId(i_connectivity)) + '/' +
+      std::string{itemName(sub_item_type)} + "of" + std::string{itemName(item_type)};
+
+    if (not file.exist(subitem_of_item_row_map_path)) {
+      HighFive::Group subitem_of_item_row_map_group = file.createGroup(subitem_of_item_row_map_path);
+      this->_writeArray(subitem_of_item_row_map_group, "rows_map",
+                        this->_getSubItemRowsMap<item_type, sub_item_type>(i_connectivity));
+    }
+
+    HighFive::DataSet subitem_of_item_row_map = file.getDataSet(subitem_of_item_row_map_path + "/rows_map");
+    group.createHardLink("rows_map", subitem_of_item_row_map);
+  }
+
+  template <typename DataType, ItemType item_type>
+  bool
+  _checkIsComparable(const std::string& name,
+                     const SourceLocation& source_location,
+                     const std::vector<size_t> data_shape,
+                     const std::shared_ptr<const IConnectivity>& i_connectivity,
+                     HighFive::Group group) const
+  {
+    const std::string reference_name          = group.getAttribute("name").read<std::string>();
+    const std::string reference_file_name     = group.getAttribute("filename").read<std::string>();
+    const std::string reference_function_name = group.getAttribute("function").read<std::string>();
+    const size_t reference_line_number        = group.getAttribute("line").read<size_t>();
+    const size_t reference_dimension          = group.getAttribute("dimension").read<size_t>();
+    const std::string reference_item_type     = group.getAttribute("item_type").read<std::string>();
+    const std::string reference_data_type     = group.getAttribute("data_type").read<std::string>();
+
+    bool is_comparable = true;
+    if (i_connectivity->dimension() != reference_dimension) {
+      std::cout << rang::fg::cyan << " | " << rang::fgB::red << "different support dimensions: reference ("
+                << rang::fgB::yellow << reference_dimension << rang::fgB::red << ") / target (" << rang::fgB::yellow
+                << i_connectivity->dimension() << rang::fg::reset << ")\n";
+      is_comparable = false;
+    }
+    if (itemName(item_type) != reference_item_type) {
+      std::cout << rang::fg::cyan << " | " << rang::fgB::red << "different item types: reference (" << rang::fgB::yellow
+                << reference_item_type << rang::fgB::red << ") / target (" << rang::fgB::yellow << itemName(item_type)
+                << rang::fg::reset << ")\n";
+      is_comparable = false;
+    }
+    if (demangle<DataType>() != reference_data_type) {
+      std::cout << rang::fg::cyan << " | " << rang::fgB::red << "different data types: reference (" << rang::fgB::yellow
+                << reference_data_type << rang::fgB::red << ") / target (" << rang::fgB::yellow << demangle<DataType>()
+                << rang::fg::reset << ")\n";
+      is_comparable = false;
+    }
+    std::vector reference_data_shape = group.getDataSet(reference_name).getSpace().getDimensions();
+    if (reference_data_shape.size() != data_shape.size()) {
+      std::cout << rang::fg::cyan << " | " << rang::fgB::red << "different data shape kind: reference ("
+                << rang::fgB::yellow << reference_data_shape.size() << "d array" << rang::fgB::red << ") / target ("
+                << rang::fgB::yellow << data_shape.size() << "d array" << rang::fg::reset << ")\n";
+      is_comparable = false;
+    }
+    {
+      bool same_shapes = (reference_data_shape.size() == data_shape.size());
+      if (same_shapes) {
+        for (size_t i = 1; i < reference_data_shape.size(); ++i) {
+          same_shapes &= (reference_data_shape[i] == data_shape[i]);
+        }
+      }
+      if (not same_shapes) {
+        std::cout << rang::fg::cyan << " | " << rang::fgB::red << "different data shape: reference ("
+                  << rang::fgB::yellow << "*";
+        for (size_t i = 1; i < reference_data_shape.size(); ++i) {
+          std::cout << ":" << reference_data_shape[i];
+        }
+        std::cout << rang::fgB::red << ") / target (" << rang::fgB::yellow << "*";
+        for (size_t i = 1; i < data_shape.size(); ++i) {
+          std::cout << ":" << data_shape[i];
+        }
+        std::cout << rang::fg::reset << ")\n";
+        is_comparable = false;
+      }
+    }
+    if (name != reference_name) {
+      // Just warn for different labels (maybe useful for some kind of
+      // debugging...)
+      std::cout << rang::fg::cyan << " | " << rang::fgB::magenta << "different names: reference (" << rang::fgB::yellow
+                << rang::style::reversed << reference_name << rang::style::reset << rang::fgB::magenta << ") / target ("
+                << rang::fgB::yellow << rang::style::reversed << name << rang::style::reset << rang::fg::reset << ")\n";
+      std::cout << rang::fg::cyan << " | " << rang::fgB::magenta << "reference from " << rang::fgB::blue
+                << reference_file_name << rang::fg::reset << ':' << rang::style::bold << reference_line_number
+                << rang::style::reset << '\n';
+      if ((reference_function_name.size() > 0) or (source_location.function().size() > 0)) {
+        std::cout << rang::fg::cyan << " | " << rang::fgB::magenta << "reference function " << rang::fgB::blue
+                  << reference_function_name << rang::fg::reset << '\n';
+        std::cout << rang::fg::cyan << " | " << rang::fgB::magenta << "target function " << rang::fgB::blue
+                  << source_location.function() << rang::fg::reset << '\n';
+      }
+    }
+
+    return is_comparable;
+  }
+
+  template <ItemType sub_item_type>
+  bool
+  _checkIsComparable(HighFive::Group group) const
+  {
+    const std::string reference_sub_item_type = group.getAttribute("subitem_type").read<std::string>();
+
+    bool is_comparable = true;
+    if (itemName(sub_item_type) != reference_sub_item_type) {
+      std::cout << rang::fg::cyan << " | " << rang::fgB::red << "different sub_item types: reference ("
+                << rang::fgB::yellow << reference_sub_item_type << rang::fgB::red << ") / target (" << rang::fgB::yellow
+                << itemName(sub_item_type) << rang::fg::reset << ")\n";
+      is_comparable = false;
+    }
+
+    return is_comparable;
+  }
+
+  template <typename DataType, ItemType item_type>
+  void
+  _throwIfNotComparable(const std::string& name,
+                        const SourceLocation& source_location,
+                        const std::vector<size_t> data_shape,
+                        const std::shared_ptr<const IConnectivity>& i_connectivity,
+                        HighFive::Group group) const
+  {
+    bool is_comparable =
+      this->_checkIsComparable<DataType, item_type>(name, source_location, data_shape, i_connectivity, group);
+    if (not parallel::allReduceAnd(is_comparable)) {
+      throw NormalError("cannot compare data");
+    }
+  }
+
+  template <typename DataType, typename ItemOfItem>
+  void
+  _throwIfNotComparable(const std::string& name,
+                        const SourceLocation& source_location,
+                        const std::vector<size_t> data_shape,
+                        const std::shared_ptr<const IConnectivity>& i_connectivity,
+                        HighFive::Group group) const
+  {
+    bool is_comparable = this->_checkIsComparable<DataType, ItemOfItem::item_type>(name, source_location, data_shape,
+                                                                                   i_connectivity, group)   //
+                         and this->_checkIsComparable<ItemOfItem::sub_item_type>(group);
+
+    if (not parallel::allReduceAnd(is_comparable)) {
+      throw NormalError("cannot compare data");
+    }
+  }
+
+ private:
+  template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+  void
+  write(const ItemValue<DataType, item_type, ConnectivityPtr>& item_value,
+        const std::string& name,
+        const SourceLocation& source_location)
+  {
+    HighFive::SilenceHDF5 m_silence_hdf5{true};
+    this->_printHeader(name, source_location);
+
+    try {
+      HighFive::File file = this->_createOrOpenFileRW();
+
+      auto group = file.createGroup("values/" + std::to_string(m_tag));
+
+      group.createAttribute("filename", std::string{source_location.filename()});
+      group.createAttribute("function", std::string{source_location.function()});
+      group.createAttribute("line", static_cast<size_t>(source_location.line()));
+      group.createAttribute("name", name);
+
+      std::shared_ptr<const IConnectivity> i_connectivity = item_value.connectivity_ptr();
+      group.createAttribute("dimension", static_cast<size_t>(i_connectivity->dimension()));
+      group.createAttribute("item_type", std::string{itemName(item_type)});
+      group.createAttribute("data_type", demangle<DataType>());
+
+      this->_writeArray(group, name, item_value.arrayView());
+
+      this->_writeItemNumbers<item_type>(i_connectivity, file, group);
+
+      ++m_tag;
+
+      std::cout << rang::fg::cyan << " | writing " << rang::fgB::green << "success" << rang::fg::reset << '\n';
+    }
+    // LCOV_EXCL_START
+    catch (HighFive::Exception& e) {
+      throw NormalError(e.what());
+    }
+    // LCOV_EXCL_STOP
+  }
+
+  template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+  void
+  write(const ItemArray<DataType, item_type, ConnectivityPtr>& item_array,
+        const std::string& name,
+        const SourceLocation& source_location)
+  {
+    HighFive::SilenceHDF5 m_silence_hdf5{true};
+    this->_printHeader(name, source_location);
+
+    try {
+      HighFive::File file = this->_createOrOpenFileRW();
+
+      auto group = file.createGroup("values/" + std::to_string(m_tag));
+
+      group.createAttribute("filename", std::string{source_location.filename()});
+      group.createAttribute("function", std::string{source_location.function()});
+      group.createAttribute("line", static_cast<size_t>(source_location.line()));
+      group.createAttribute("name", name);
+
+      std::shared_ptr<const IConnectivity> i_connectivity = item_array.connectivity_ptr();
+      group.createAttribute("dimension", static_cast<size_t>(i_connectivity->dimension()));
+      group.createAttribute("item_type", std::string{itemName(item_type)});
+      group.createAttribute("data_type", demangle<DataType>());
+
+      this->_writeTable(group, name, item_array.tableView());
+
+      this->_writeItemNumbers<item_type>(i_connectivity, file, group);
+
+      ++m_tag;
+
+      std::cout << rang::fg::cyan << " | writing " << rang::fgB::green << "success" << rang::fg::reset << '\n';
+    }
+    // LCOV_EXCL_START
+    catch (HighFive::Exception& e) {
+      throw NormalError(e.what());
+    }
+    // LCOV_EXCL_STOP
+  }
+
+  template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+  void
+  write(const SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_value_per_item,
+        const std::string& name,
+        const SourceLocation& source_location)
+  {
+    constexpr ItemType item_type     = ItemOfItem::item_type;
+    constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
+
+    HighFive::SilenceHDF5 m_silence_hdf5{true};
+    this->_printHeader(name, source_location);
+
+    try {
+      HighFive::File file = this->_createOrOpenFileRW();
+
+      auto group = file.createGroup("values/" + std::to_string(m_tag));
+
+      group.createAttribute("filename", std::string{source_location.filename()});
+      group.createAttribute("function", std::string{source_location.function()});
+      group.createAttribute("line", static_cast<size_t>(source_location.line()));
+      group.createAttribute("name", name);
+
+      std::shared_ptr<const IConnectivity> i_connectivity = subitem_value_per_item.connectivity_ptr();
+      group.createAttribute("dimension", static_cast<size_t>(i_connectivity->dimension()));
+      group.createAttribute("item_type", std::string{itemName(item_type)});
+      group.createAttribute("subitem_type", std::string{itemName(sub_item_type)});
+
+      group.createAttribute("data_type", demangle<DataType>());
+
+      this->_writeArray(group, name, subitem_value_per_item.arrayView());
+
+      this->_writeItemNumbers<item_type>(i_connectivity, file, group);
+      this->_writeSubItemRowsMap<ItemOfItem>(i_connectivity, file, group);
+
+      ++m_tag;
+
+      std::cout << rang::fg::cyan << " | writing " << rang::fgB::green << "success" << rang::fg::reset << '\n';
+    }
+    // LCOV_EXCL_START
+    catch (HighFive::Exception& e) {
+      throw NormalError(e.what());
+    }
+    // LCOV_EXCL_STOP
+  }
+
+  template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+  void
+  write(const SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_value_per_item,
+        const std::string& name,
+        const SourceLocation& source_location)
+  {
+    constexpr ItemType item_type     = ItemOfItem::item_type;
+    constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
+
+    HighFive::SilenceHDF5 m_silence_hdf5{true};
+    this->_printHeader(name, source_location);
+
+    try {
+      HighFive::File file = this->_createOrOpenFileRW();
+
+      auto group = file.createGroup("values/" + std::to_string(m_tag));
+
+      group.createAttribute("filename", std::string{source_location.filename()});
+      group.createAttribute("function", std::string{source_location.function()});
+      group.createAttribute("line", static_cast<size_t>(source_location.line()));
+      group.createAttribute("name", name);
+
+      std::shared_ptr<const IConnectivity> i_connectivity = subitem_value_per_item.connectivity_ptr();
+      group.createAttribute("dimension", static_cast<size_t>(i_connectivity->dimension()));
+      group.createAttribute("item_type", std::string{itemName(item_type)});
+      group.createAttribute("subitem_type", std::string{itemName(sub_item_type)});
+
+      group.createAttribute("data_type", demangle<DataType>());
+
+      this->_writeTable(group, name, subitem_value_per_item.tableView());
+
+      this->_writeItemNumbers<item_type>(i_connectivity, file, group);
+      this->_writeSubItemRowsMap<ItemOfItem>(i_connectivity, file, group);
+
+      ++m_tag;
+
+      std::cout << rang::fg::cyan << " | writing " << rang::fgB::green << "success" << rang::fg::reset << '\n';
+    }
+    // LCOV_EXCL_START
+    catch (HighFive::Exception& e) {
+      throw NormalError(e.what());
+    }
+    // LCOV_EXCL_STOP
+  }
+
+  template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+  void
+  compare(const ItemValue<DataType, item_type, ConnectivityPtr>& item_value,
+          const std::string& name,
+          const SourceLocation& source_location)
+  {
+    HighFive::SilenceHDF5 m_silence_hdf5{true};
+    this->_printHeader(name, source_location);
+
+    try {
+      HighFive::File file{m_filename, HighFive::File::ReadOnly};
+
+      auto group = file.getGroup("values/" + std::to_string(m_tag));
+
+      const std::string reference_name = group.getAttribute("name").read<std::string>();
+
+      std::shared_ptr<const IConnectivity> i_connectivity = item_value.connectivity_ptr();
+
+      this->_throwIfNotComparable<DataType, item_type>(name, source_location,
+                                                       std::vector<size_t>{item_value.numberOfItems()}, i_connectivity,
+                                                       group);
+
+      Array<const int> reference_item_numbers    = this->_readArray<int>(group, "numbers");
+      Array<const DataType> reference_item_value = this->_readArray<DataType>(group, reference_name);
+
+      Array<const int> item_numbers = this->_getItemNumber<item_type>(i_connectivity);
+
+      using ItemId = ItemIdT<item_type>;
+
+      std::unordered_map<int, ItemId> item_number_to_item_id_map;
+
+      for (ItemId item_id = 0; item_id < item_numbers.size(); ++item_id) {
+        const auto& [iterator, success] =
+          item_number_to_item_id_map.insert(std::make_pair(item_numbers[item_id], item_id));
+
+        // LCOV_EXCL_START
+        if (not success) {
+          throw UnexpectedError("item numbers have duplicate values");
+        }
+        // LCOV_EXCL_STOP
+      }
+
+      Assert(item_number_to_item_id_map.size() == item_numbers.size());
+
+      Array<int> index_in_reference(item_numbers.size());
+      index_in_reference.fill(-1);
+      for (size_t i = 0; i < reference_item_numbers.size(); ++i) {
+        const auto& i_number_to_item_id = item_number_to_item_id_map.find(reference_item_numbers[i]);
+        if (i_number_to_item_id != item_number_to_item_id_map.end()) {
+          index_in_reference[i_number_to_item_id->second] = i;
+        }
+      }
+
+      if (parallel::allReduceMin(min(index_in_reference)) < 0) {
+        throw NormalError("some item numbers are not defined in reference");
+      }
+      this->_checkGlobalNumberOfItems<item_type>(i_connectivity, reference_item_numbers.size());
+
+      Array<const int> owner = this->_getItemOwner<item_type>(i_connectivity);
+
+      bool has_own_differences = false;
+      bool is_same             = true;
+
+      for (ItemId item_id = 0; item_id < item_value.numberOfItems(); ++item_id) {
+        if (reference_item_value[index_in_reference[item_id]] != item_value[item_id]) {
+          is_same = false;
+          if (static_cast<size_t>(owner[item_id]) == parallel::rank()) {
+            has_own_differences = true;
+          }
+        }
+      }
+
+      is_same             = parallel::allReduceAnd(is_same);
+      has_own_differences = parallel::allReduceOr(has_own_differences);
+
+      if (is_same) {
+        std::cout << rang::fg::cyan << " | compare: " << rang::fgB::green << "success" << rang::fg::reset << '\n';
+      } else {
+        if (has_own_differences) {
+          std::cout << rang::fg::cyan << " | compare: " << rang::fgB::red << "failed!" << rang::fg::reset;
+        } else {
+          std::cout << rang::fg::cyan << " | compare: " << rang::fgB::yellow << "not synchronized" << rang::fg::reset;
+        }
+        std::cout << rang::fg::cyan << " [see \"" << rang::fgB::blue << m_path + "parallel_differences_" << m_tag
+                  << "_*" << rang::fg::cyan << "\" files for details]" << rang::fg::reset << '\n';
+
+        {
+          std::ofstream fout(std::string{m_path + "parallel_differences_"} + stringify(m_tag) + std::string{"_"} +
+                             stringify(parallel::rank()));
+
+          fout.precision(15);
+          for (ItemId item_id = 0; item_id < item_value.numberOfItems(); ++item_id) {
+            if (reference_item_value[index_in_reference[item_id]] != item_value[item_id]) {
+              const bool is_own_difference = (parallel::rank() == static_cast<size_t>(owner[item_id]));
+              if (is_own_difference) {
+                fout << rang::fgB::red << "[ own ]" << rang::fg::reset;
+              } else {
+                fout << rang::fgB::yellow << "[ghost]" << rang::fg::reset;
+              }
+              fout << " rank=" << parallel::rank() << " owner=" << owner[item_id] << " item_id=" << item_id
+                   << " number=" << item_numbers[item_id]
+                   << " reference=" << reference_item_value[index_in_reference[item_id]]
+                   << " target=" << item_value[item_id]
+                   << " difference=" << reference_item_value[index_in_reference[item_id]] - item_value[item_id] << '\n';
+              if (static_cast<size_t>(owner[item_id]) == parallel::rank()) {
+                has_own_differences = true;
+              }
+            }
+          }
+        }
+
+        if (parallel::allReduceAnd(has_own_differences)) {
+          throw NormalError("calculations differ!");
+        }
+      }
+
+      ++m_tag;
+    }
+    // LCOV_EXCL_START
+    catch (HighFive::Exception& e) {
+      throw NormalError(e.what());
+    }
+    // LCOV_EXCL_STOP
+  }
+
+  template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+  void
+  compare(const ItemArray<DataType, item_type, ConnectivityPtr>& item_array,
+          const std::string& name,
+          const SourceLocation& source_location)
+  {
+    HighFive::SilenceHDF5 m_silence_hdf5{true};
+    this->_printHeader(name, source_location);
+
+    try {
+      HighFive::File file{m_filename, HighFive::File::ReadOnly};
+
+      auto group = file.getGroup("values/" + std::to_string(m_tag));
+
+      const std::string reference_name = group.getAttribute("name").read<std::string>();
+
+      std::shared_ptr<const IConnectivity> i_connectivity = item_array.connectivity_ptr();
+
+      this->_throwIfNotComparable<DataType, item_type>(name, source_location,
+                                                       std::vector<size_t>{item_array.numberOfItems(),
+                                                                           item_array.sizeOfArrays()},
+                                                       i_connectivity, group);
+
+      Array<const int> reference_item_numbers    = this->_readArray<int>(group, "numbers");
+      Table<const DataType> reference_item_array = this->_readTable<DataType>(group, reference_name);
+
+      Array<const int> item_numbers = this->_getItemNumber<item_type>(i_connectivity);
+
+      using ItemId = ItemIdT<item_type>;
+
+      std::unordered_map<int, ItemId> item_number_to_item_id_map;
+
+      for (ItemId item_id = 0; item_id < item_numbers.size(); ++item_id) {
+        const auto& [iterator, success] =
+          item_number_to_item_id_map.insert(std::make_pair(item_numbers[item_id], item_id));
+
+        // LCOV_EXCL_START
+        if (not success) {
+          throw UnexpectedError("item numbers have duplicate values");
+        }
+        // LCOV_EXCL_STOP
+      }
+
+      Assert(item_number_to_item_id_map.size() == item_numbers.size());
+
+      Array<int> index_in_reference(item_numbers.size());
+      index_in_reference.fill(-1);
+      for (size_t i = 0; i < reference_item_numbers.size(); ++i) {
+        const auto& i_number_to_item_id = item_number_to_item_id_map.find(reference_item_numbers[i]);
+        if (i_number_to_item_id != item_number_to_item_id_map.end()) {
+          index_in_reference[i_number_to_item_id->second] = i;
+        }
+      }
+
+      if (parallel::allReduceMin(min(index_in_reference)) < 0) {
+        throw NormalError("some item numbers are not defined in reference");
+      }
+      this->_checkGlobalNumberOfItems<item_type>(i_connectivity, reference_item_numbers.size());
+
+      Array<const int> owner = this->_getItemOwner<item_type>(i_connectivity);
+
+      bool has_own_differences = false;
+      bool is_same             = true;
+
+      for (ItemId item_id = 0; item_id < item_array.numberOfItems(); ++item_id) {
+        for (size_t i = 0; i < reference_item_array.numberOfColumns(); ++i) {
+          if (reference_item_array[index_in_reference[item_id]][i] != item_array[item_id][i]) {
+            is_same = false;
+            if (static_cast<size_t>(owner[item_id]) == parallel::rank()) {
+              has_own_differences = true;
+            }
+          }
+        }
+      }
+
+      is_same             = parallel::allReduceAnd(is_same);
+      has_own_differences = parallel::allReduceOr(has_own_differences);
+
+      if (is_same) {
+        std::cout << rang::fg::cyan << " | compare: " << rang::fgB::green << "success" << rang::fg::reset << '\n';
+      } else {
+        if (has_own_differences) {
+          std::cout << rang::fg::cyan << " | compare: " << rang::fgB::red << "failed!" << rang::fg::reset;
+        } else {
+          std::cout << rang::fg::cyan << " | compare: " << rang::fgB::yellow << "not synchronized" << rang::fg::reset;
+        }
+        std::cout << rang::fg::cyan << " [see \"" << rang::fgB::blue << "parallel_differences_" << m_tag << "_*"
+                  << rang::fg::cyan << "\" files for details]" << rang::fg::reset << '\n';
+
+        {
+          std::ofstream fout(std::string{m_path + "parallel_differences_"} + stringify(m_tag) + std::string{"_"} +
+                             stringify(parallel::rank()));
+
+          fout.precision(15);
+          for (ItemId item_id = 0; item_id < item_array.numberOfItems(); ++item_id) {
+            for (size_t i = 0; i < item_array.sizeOfArrays(); ++i) {
+              if (reference_item_array[index_in_reference[item_id]][i] != item_array[item_id][i]) {
+                const bool is_own_difference = (parallel::rank() == static_cast<size_t>(owner[item_id]));
+                if (is_own_difference) {
+                  fout << rang::fgB::red << "[ own ]" << rang::fg::reset;
+                } else {
+                  fout << rang::fgB::yellow << "[ghost]" << rang::fg::reset;
+                }
+                fout << " rank=" << parallel::rank() << " owner=" << owner[item_id] << " item_id=" << item_id
+                     << " column=" << i << " number=" << item_numbers[item_id]
+                     << " reference=" << reference_item_array[index_in_reference[item_id]][i]
+                     << " target=" << item_array[item_id][i]
+                     << " difference=" << reference_item_array[index_in_reference[item_id]][i] - item_array[item_id][i]
+                     << '\n';
+                if (static_cast<size_t>(owner[item_id]) == parallel::rank()) {
+                  has_own_differences = true;
+                }
+              }
+            }
+          }
+        }
+
+        if (parallel::allReduceAnd(has_own_differences)) {
+          throw NormalError("calculations differ!");
+        }
+      }
+
+      ++m_tag;
+    }
+    // LCOV_EXCL_START
+    catch (HighFive::Exception& e) {
+      throw NormalError(e.what());
+    }
+    // LCOV_EXCL_STOP
+  }
+
+  template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+  void
+  compare(const SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_value_per_item,
+          const std::string& name,
+          const SourceLocation& source_location)
+  {
+    HighFive::SilenceHDF5 m_silence_hdf5{true};
+    this->_printHeader(name, source_location);
+
+    try {
+      HighFive::File file{m_filename, HighFive::File::ReadOnly};
+
+      auto group = file.getGroup("values/" + std::to_string(m_tag));
+
+      const std::string reference_name = group.getAttribute("name").read<std::string>();
+
+      std::shared_ptr<const IConnectivity> i_connectivity = subitem_value_per_item.connectivity_ptr();
+
+      this->_throwIfNotComparable<DataType, ItemOfItem>(name, source_location,
+                                                        std::vector<size_t>{subitem_value_per_item.numberOfItems()},
+                                                        i_connectivity, group);
+
+      constexpr ItemType item_type     = ItemOfItem::item_type;
+      constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
+      using IndexType                  = typename ConnectivityMatrix::IndexType;
+
+      Array<const int> reference_item_numbers           = this->_readArray<int>(group, "numbers");
+      Array<const IndexType> reference_subitem_rows_map = this->_readArray<IndexType>(group, "rows_map");
+
+      Array<const DataType> reference_subitem_value_per_item = this->_readArray<DataType>(group, reference_name);
+
+      Array<const int> item_numbers           = this->_getItemNumber<item_type>(i_connectivity);
+      Array<const IndexType> sub_item_row_map = this->_getSubItemRowsMap<item_type, sub_item_type>(i_connectivity);
+
+      using ItemId = ItemIdT<item_type>;
+
+      std::unordered_map<int, ItemId> item_number_to_item_id_map;
+
+      for (ItemId item_id = 0; item_id < item_numbers.size(); ++item_id) {
+        const auto& [iterator, success] =
+          item_number_to_item_id_map.insert(std::make_pair(item_numbers[item_id], item_id));
+
+        // LCOV_EXCL_START
+        if (not success) {
+          throw UnexpectedError("item numbers have duplicate values");
+        }
+        // LCOV_EXCL_STOP
+      }
+
+      Assert(item_number_to_item_id_map.size() == item_numbers.size());
+
+      Array<int> item_index_in_reference(item_numbers.size());
+      item_index_in_reference.fill(-1);
+      for (size_t i = 0; i < reference_item_numbers.size(); ++i) {
+        const auto& i_number_to_item_id = item_number_to_item_id_map.find(reference_item_numbers[i]);
+        if (i_number_to_item_id != item_number_to_item_id_map.end()) {
+          item_index_in_reference[i_number_to_item_id->second] = i;
+        }
+      }
+
+      if (parallel::allReduceMin(min(item_index_in_reference)) < 0) {
+        throw NormalError("some item numbers are not defined in reference");
+      }
+      this->_checkGlobalNumberOfItems<item_type>(i_connectivity, reference_item_numbers.size());
+
+      Array<const int> owner = this->_getItemOwner<item_type>(i_connectivity);
+
+      bool has_own_differences = false;
+      bool is_same             = true;
+
+      for (ItemId item_id = 0; item_id < subitem_value_per_item.numberOfItems(); ++item_id) {
+        const size_t reference_item_index     = item_index_in_reference[item_id];
+        const size_t index_begin_in_reference = reference_subitem_rows_map[reference_item_index];
+        const size_t index_end_in_reference   = reference_subitem_rows_map[reference_item_index + 1];
+
+        bool item_is_same = true;
+        if ((index_end_in_reference - index_begin_in_reference) != subitem_value_per_item[item_id].size()) {
+          item_is_same = false;
+        } else {
+          for (size_t i_sub_item = 0; i_sub_item < subitem_value_per_item[item_id].size(); ++i_sub_item) {
+            if (reference_subitem_value_per_item[index_begin_in_reference + i_sub_item] !=
+                subitem_value_per_item[item_id][i_sub_item]) {
+              item_is_same = false;
+            }
+          }
+        }
+        if (not item_is_same) {
+          is_same = false;
+          if (static_cast<size_t>(owner[item_id]) == parallel::rank()) {
+            has_own_differences = true;
+          }
+        }
+      }
+
+      is_same             = parallel::allReduceAnd(is_same);
+      has_own_differences = parallel::allReduceOr(has_own_differences);
+
+      if (is_same) {
+        std::cout << rang::fg::cyan << " | compare: " << rang::fgB::green << "success" << rang::fg::reset << '\n';
+      } else {
+        if (has_own_differences) {
+          std::cout << rang::fg::cyan << " | compare: " << rang::fgB::red << "failed!" << rang::fg::reset;
+        } else {
+          std::cout << rang::fg::cyan << " | compare: " << rang::fgB::yellow << "not synchronized" << rang::fg::reset;
+        }
+        std::cout << rang::fg::cyan << " [see \"" << rang::fgB::blue << m_path + "parallel_differences_" << m_tag
+                  << "_*" << rang::fg::cyan << "\" files for details]" << rang::fg::reset << '\n';
+
+        {
+          std::ofstream fout(std::string{m_path + "parallel_differences_"} + stringify(m_tag) + std::string{"_"} +
+                             stringify(parallel::rank()));
+
+          fout.precision(15);
+          for (ItemId item_id = 0; item_id < subitem_value_per_item.numberOfItems(); ++item_id) {
+            const size_t reference_item_index     = item_index_in_reference[item_id];
+            const size_t index_begin_in_reference = reference_subitem_rows_map[reference_item_index];
+            const size_t index_end_in_reference   = reference_subitem_rows_map[reference_item_index + 1];
+            if ((index_end_in_reference - index_begin_in_reference) != subitem_value_per_item[item_id].size()) {
+              const bool is_own_difference = (parallel::rank() == static_cast<size_t>(owner[item_id]));
+              if (is_own_difference) {
+                fout << rang::fgB::red << "[ own ]" << rang::fg::reset;
+              } else {
+                fout << rang::fgB::yellow << "[ghost]" << rang::fg::reset;
+              }
+              fout << " rank=" << parallel::rank() << " owner=" << owner[item_id] << " item_id=" << item_id
+                   << " number=" << item_numbers[item_id]
+                   << " reference[subitems number]=" << index_end_in_reference - index_begin_in_reference
+                   << " target[subitems number]=" << subitem_value_per_item[item_id].size() << '\n';
+
+            } else {
+              for (size_t i_sub_item = 0; i_sub_item < subitem_value_per_item[item_id].size(); ++i_sub_item) {
+                if (reference_subitem_value_per_item[index_begin_in_reference + i_sub_item] !=
+                    subitem_value_per_item[item_id][i_sub_item]) {
+                  const bool is_own_difference = (parallel::rank() == static_cast<size_t>(owner[item_id]));
+                  if (is_own_difference) {
+                    fout << rang::fgB::red << "[ own ]" << rang::fg::reset;
+                  } else {
+                    fout << rang::fgB::yellow << "[ghost]" << rang::fg::reset;
+                  }
+                  fout << " rank=" << parallel::rank() << " owner=" << owner[item_id] << " item_id=" << item_id
+                       << " number=" << item_numbers[item_id] << " i_subitem=" << i_sub_item
+                       << " reference=" << reference_subitem_value_per_item[index_begin_in_reference + i_sub_item]
+                       << " target=" << subitem_value_per_item[item_id][i_sub_item] << " difference="
+                       << reference_subitem_value_per_item[index_begin_in_reference + i_sub_item] -
+                            subitem_value_per_item[item_id][i_sub_item]
+                       << '\n';
+                }
+              }
+            }
+          }
+        }
+
+        if (has_own_differences) {
+          throw NormalError("calculations differ!");
+        }
+      }
+
+      ++m_tag;
+    }
+    // LCOV_EXCL_START
+    catch (HighFive::Exception& e) {
+      throw NormalError(e.what());
+    }
+    // LCOV_EXCL_STOP
+  }
+
+  template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+  void
+  compare(const SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_array_per_item,
+          const std::string& name,
+          const SourceLocation& source_location)
+  {
+    HighFive::SilenceHDF5 m_silence_hdf5{true};
+    this->_printHeader(name, source_location);
+
+    try {
+      HighFive::File file{m_filename, HighFive::File::ReadOnly};
+
+      auto group = file.getGroup("values/" + std::to_string(m_tag));
+
+      const std::string reference_name = group.getAttribute("name").read<std::string>();
+
+      std::shared_ptr<const IConnectivity> i_connectivity = subitem_array_per_item.connectivity_ptr();
+
+      this->_throwIfNotComparable<DataType, ItemOfItem>(name, source_location,
+                                                        std::vector<size_t>{subitem_array_per_item.numberOfItems(),
+                                                                            subitem_array_per_item.sizeOfArrays()},
+                                                        i_connectivity, group);
+
+      constexpr ItemType item_type     = ItemOfItem::item_type;
+      constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
+      using IndexType                  = typename ConnectivityMatrix::IndexType;
+
+      Array<const int> reference_item_numbers           = this->_readArray<int>(group, "numbers");
+      Array<const IndexType> reference_subitem_rows_map = this->_readArray<IndexType>(group, "rows_map");
+
+      Table<const DataType> reference_subitem_array_per_item = this->_readTable<DataType>(group, reference_name);
+
+      Array<const int> item_numbers           = this->_getItemNumber<item_type>(i_connectivity);
+      Array<const IndexType> sub_item_row_map = this->_getSubItemRowsMap<item_type, sub_item_type>(i_connectivity);
+
+      using ItemId = ItemIdT<item_type>;
+
+      std::unordered_map<int, ItemId> item_number_to_item_id_map;
+
+      for (ItemId item_id = 0; item_id < item_numbers.size(); ++item_id) {
+        const auto& [iterator, success] =
+          item_number_to_item_id_map.insert(std::make_pair(item_numbers[item_id], item_id));
+
+        // LCOV_EXCL_START
+        if (not success) {
+          throw UnexpectedError("item numbers have duplicate values");
+        }
+        // LCOV_EXCL_STOP
+      }
+
+      Assert(item_number_to_item_id_map.size() == item_numbers.size());
+
+      Array<int> item_index_in_reference(item_numbers.size());
+      item_index_in_reference.fill(-1);
+      for (size_t i = 0; i < reference_item_numbers.size(); ++i) {
+        const auto& i_number_to_item_id = item_number_to_item_id_map.find(reference_item_numbers[i]);
+        if (i_number_to_item_id != item_number_to_item_id_map.end()) {
+          item_index_in_reference[i_number_to_item_id->second] = i;
+        }
+      }
+
+      if (parallel::allReduceMin(min(item_index_in_reference)) < 0) {
+        throw NormalError("some item numbers are not defined in reference");
+      }
+      this->_checkGlobalNumberOfItems<item_type>(i_connectivity, reference_item_numbers.size());
+
+      Array<const int> owner = this->_getItemOwner<item_type>(i_connectivity);
+
+      bool has_own_differences = false;
+      bool is_same             = true;
+
+      for (ItemId item_id = 0; item_id < subitem_array_per_item.numberOfItems(); ++item_id) {
+        const size_t reference_item_index     = item_index_in_reference[item_id];
+        const size_t index_begin_in_reference = reference_subitem_rows_map[reference_item_index];
+        const size_t index_end_in_reference   = reference_subitem_rows_map[reference_item_index + 1];
+
+        bool item_is_same = true;
+        if ((index_end_in_reference - index_begin_in_reference) != subitem_array_per_item[item_id].numberOfRows()) {
+          item_is_same = false;
+        } else {
+          for (size_t i_sub_item = 0; i_sub_item < subitem_array_per_item[item_id].numberOfRows(); ++i_sub_item) {
+            for (size_t i = 0; i < subitem_array_per_item.sizeOfArrays(); ++i) {
+              if (reference_subitem_array_per_item[index_begin_in_reference + i_sub_item][i] !=
+                  subitem_array_per_item[item_id][i_sub_item][i]) {
+                item_is_same = false;
+              }
+            }
+          }
+        }
+        if (not item_is_same) {
+          is_same = false;
+          if (static_cast<size_t>(owner[item_id]) == parallel::rank()) {
+            has_own_differences = true;
+          }
+        }
+      }
+
+      is_same             = parallel::allReduceAnd(is_same);
+      has_own_differences = parallel::allReduceOr(has_own_differences);
+
+      if (is_same) {
+        std::cout << rang::fg::cyan << " | compare: " << rang::fgB::green << "success" << rang::fg::reset << '\n';
+      } else {
+        if (has_own_differences) {
+          std::cout << rang::fg::cyan << " | compare: " << rang::fgB::red << "failed!" << rang::fg::reset;
+        } else {
+          std::cout << rang::fg::cyan << " | compare: " << rang::fgB::yellow << "not synchronized" << rang::fg::reset;
+        }
+        std::cout << rang::fg::cyan << " [see \"" << rang::fgB::blue << m_path + "parallel_differences_" << m_tag
+                  << "_*" << rang::fg::cyan << "\" files for details]" << rang::fg::reset << '\n';
+
+        {
+          std::ofstream fout(std::string{m_path + "parallel_differences_"} + stringify(m_tag) + std::string{"_"} +
+                             stringify(parallel::rank()));
+
+          fout.precision(15);
+          for (ItemId item_id = 0; item_id < subitem_array_per_item.numberOfItems(); ++item_id) {
+            const size_t reference_item_index     = item_index_in_reference[item_id];
+            const size_t index_begin_in_reference = reference_subitem_rows_map[reference_item_index];
+            const size_t index_end_in_reference   = reference_subitem_rows_map[reference_item_index + 1];
+            if ((index_end_in_reference - index_begin_in_reference) != subitem_array_per_item[item_id].numberOfRows()) {
+              const bool is_own_difference = (parallel::rank() == static_cast<size_t>(owner[item_id]));
+              if (is_own_difference) {
+                fout << rang::fgB::red << "[ own ]" << rang::fg::reset;
+              } else {
+                fout << rang::fgB::yellow << "[ghost]" << rang::fg::reset;
+              }
+              fout << " rank=" << parallel::rank() << " owner=" << owner[item_id] << " item_id=" << item_id
+                   << " number=" << item_numbers[item_id]
+                   << " reference[subitems number]=" << index_end_in_reference - index_begin_in_reference
+                   << " target[subitems number]=" << subitem_array_per_item[item_id].numberOfRows() << '\n';
+
+            } else {
+              for (size_t i_sub_item = 0; i_sub_item < subitem_array_per_item[item_id].numberOfRows(); ++i_sub_item) {
+                for (size_t i = 0; i < subitem_array_per_item.sizeOfArrays(); ++i) {
+                  if (reference_subitem_array_per_item[index_begin_in_reference + i_sub_item][i] !=
+                      subitem_array_per_item[item_id][i_sub_item][i]) {
+                    const bool is_own_difference = (parallel::rank() == static_cast<size_t>(owner[item_id]));
+                    if (is_own_difference) {
+                      fout << rang::fgB::red << "[ own ]" << rang::fg::reset;
+                    } else {
+                      fout << rang::fgB::yellow << "[ghost]" << rang::fg::reset;
+                    }
+                    fout << " rank=" << parallel::rank() << " owner=" << owner[item_id] << " item_id=" << item_id
+                         << " number=" << item_numbers[item_id] << " i_subitem=" << i_sub_item << " i_value=" << i
+                         << " reference=" << reference_subitem_array_per_item[index_begin_in_reference + i_sub_item]
+                         << " target=" << subitem_array_per_item[item_id][i_sub_item] << " difference="
+                         << reference_subitem_array_per_item[index_begin_in_reference + i_sub_item][i] -
+                              subitem_array_per_item[item_id][i_sub_item][i]
+                         << '\n';
+                  }
+                }
+              }
+            }
+          }
+        }
+
+        if (has_own_differences) {
+          throw NormalError("calculations differ!");
+        }
+      }
+
+      ++m_tag;
+    }
+    // LCOV_EXCL_START
+    catch (HighFive::Exception& e) {
+      throw NormalError(e.what());
+    }
+    // LCOV_EXCL_STOP
+  }
+
+#else    // PUGS_HAS_HDF5
+
+  template <typename T>
+  void
+  write(const T&, const std::string&, const SourceLocation&)
+  {
+    throw NormalError("parallel checker cannot be used without HDF5 support");
+  }
+
+  template <typename T>
+  void
+  compare(const T&, const std::string&, const SourceLocation&)
+  {
+    throw NormalError("parallel checker cannot be used without HDF5 support");
+  }
+#endif   // PUGS_HAS_HDF5
+
+ public:
+  static void create();
+  static void destroy();
+
+  static ParallelChecker&
+  instance()
+  {
+    Assert(m_instance != nullptr, "ParallelChecker was not created");
+    return *m_instance;
+  }
+
+  Mode
+  mode() const
+  {
+    return m_mode;
+  }
+
+  void
+  setMode(const Mode& mode)
+  {
+    if (m_tag != 0) {
+      throw UnexpectedError("Cannot modify parallel checker mode if it was already used");
+    }
+
+    // LCOV_EXCL_START
+    if ((mode == Mode::write) and (parallel::size() > 1)) {
+      throw NotImplementedError("parallel check write in parallel");
+    }
+    // LCOV_EXCL_STOP
+
+    m_mode = mode;
+  }
+
+  const std::string&
+  filename() const
+  {
+    return m_filename;
+  }
+
+  void
+  setFilename(const std::string& filename)
+  {
+    if (m_tag != 0) {
+      throw UnexpectedError("Cannot modify parallel checker file if it was already used");
+    }
+    m_filename = filename;
+    m_path     = std::filesystem::path(filename).remove_filename();
+  }
+
+  bool
+  isWriting() const
+  {
+    bool is_writting = false;
+    switch (m_mode) {
+    case Mode::automatic: {
+      is_writting = (parallel::size() == 1);
+      break;
+    }
+    case Mode::write: {
+      is_writting = true;
+      break;
+    }
+    case Mode::read: {
+      is_writting = false;
+      break;
+    }
+    }
+
+    return is_writting;
+  }
+};
+
+template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+void
+parallel_check(const ItemValue<DataType, item_type, ConnectivityPtr>& item_value,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  if (ParallelChecker::instance().isWriting()) {
+    ParallelChecker::instance().write(item_value, name, source_location);
+  } else {
+    ParallelChecker::instance().compare(item_value, name, source_location);
+  }
+}
+
+template <typename DataType, ItemType item_type, typename ConnectivityPtr>
+void
+parallel_check(const ItemArray<DataType, item_type, ConnectivityPtr>& item_array,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  if (ParallelChecker::instance().isWriting()) {
+    ParallelChecker::instance().write(item_array, name, source_location);
+  } else {
+    ParallelChecker::instance().compare(item_array, name, source_location);
+  }
+}
+
+template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+void
+parallel_check(const SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_value_per_item,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  if (ParallelChecker::instance().isWriting()) {
+    ParallelChecker::instance().write(subitem_value_per_item, name, source_location);
+  } else {
+    ParallelChecker::instance().compare(subitem_value_per_item, name, source_location);
+  }
+}
+
+template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
+void
+parallel_check(const SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& subitem_array_per_item,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  if (ParallelChecker::instance().isWriting()) {
+    ParallelChecker::instance().write(subitem_array_per_item, name, source_location);
+  } else {
+    ParallelChecker::instance().compare(subitem_array_per_item, name, source_location);
+  }
+}
+
+template <size_t Dimension, typename DataType>
+void PUGS_INLINE
+parallel_check(const DiscreteFunctionP0<Dimension, DataType>& discrete_function,
+               const std::string& name,
+               const SourceLocation& source_location = SourceLocation{})
+{
+  parallel_check(discrete_function.cellValues(), name, source_location);
+}
+
+template <size_t Dimension, typename DataType>
+void PUGS_INLINE
+parallel_check(const DiscreteFunctionP0Vector<Dimension, DataType>& discrete_function,
+               const std::string& name,
+               const SourceLocation& source_location = SourceLocation{})
+{
+  parallel_check(discrete_function.cellArrays(), name, source_location);
+}
+
+void parallel_check(const ItemValueVariant& item_value_variant,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+void parallel_check(const ItemArrayVariant& item_array_variant,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+void parallel_check(const SubItemValuePerItemVariant& subitem_value_per_item_variant,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+void parallel_check(const SubItemArrayPerItemVariant& subitem_array_per_item_variant,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+void parallel_check(const DiscreteFunctionVariant& discrete_function_variant,
+                    const std::string& name,
+                    const SourceLocation& source_location = SourceLocation{});
+
+#endif   // PARALLEL_CHECKER_HPP
diff --git a/src/dev/ParallelCheckerDiscreteFunctionVariant.cpp b/src/dev/ParallelCheckerDiscreteFunctionVariant.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..626fa684207a9f89a03c11844ced0778a564c948
--- /dev/null
+++ b/src/dev/ParallelCheckerDiscreteFunctionVariant.cpp
@@ -0,0 +1,10 @@
+#include <dev/ParallelChecker.hpp>
+
+void
+parallel_check(const DiscreteFunctionVariant& discrete_function_variant,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  std::visit([&](auto&& discrete_function) { parallel_check(discrete_function, name, source_location); },
+             discrete_function_variant.discreteFunction());
+}
diff --git a/src/dev/ParallelCheckerItemArrayVariant.cpp b/src/dev/ParallelCheckerItemArrayVariant.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2ca0f7e3fc3b581aed9d2adb8f77494c06e42288
--- /dev/null
+++ b/src/dev/ParallelCheckerItemArrayVariant.cpp
@@ -0,0 +1,10 @@
+#include <dev/ParallelChecker.hpp>
+
+void
+parallel_check(const ItemArrayVariant& item_array_variant,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  std::visit([&](auto&& item_value) { parallel_check(item_value, name, source_location); },
+             item_array_variant.itemArray());
+}
diff --git a/src/dev/ParallelCheckerItemValueVariant.cpp b/src/dev/ParallelCheckerItemValueVariant.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4cbc9ed2195c8075f41043bb6247b6ff842cc2ff
--- /dev/null
+++ b/src/dev/ParallelCheckerItemValueVariant.cpp
@@ -0,0 +1,10 @@
+#include <dev/ParallelChecker.hpp>
+
+void
+parallel_check(const ItemValueVariant& item_value_variant,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  std::visit([&](auto&& item_value) { parallel_check(item_value, name, source_location); },
+             item_value_variant.itemValue());
+}
diff --git a/src/dev/ParallelCheckerSubItemArrayPerItemVariant.cpp b/src/dev/ParallelCheckerSubItemArrayPerItemVariant.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d33898f50039651fd71b739ba337d6e4a1463e27
--- /dev/null
+++ b/src/dev/ParallelCheckerSubItemArrayPerItemVariant.cpp
@@ -0,0 +1,10 @@
+#include <dev/ParallelChecker.hpp>
+
+void
+parallel_check(const SubItemArrayPerItemVariant& subitem_array_per_item_variant,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  std::visit([&](auto&& item_value) { parallel_check(item_value, name, source_location); },
+             subitem_array_per_item_variant.subItemArrayPerItem());
+}
diff --git a/src/dev/ParallelCheckerSubItemValuePerItemVariant.cpp b/src/dev/ParallelCheckerSubItemValuePerItemVariant.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4584202e25649f464e1dd01f22e0ff154f847d20
--- /dev/null
+++ b/src/dev/ParallelCheckerSubItemValuePerItemVariant.cpp
@@ -0,0 +1,10 @@
+#include <dev/ParallelChecker.hpp>
+
+void
+parallel_check(const SubItemValuePerItemVariant& subitem_value_per_item_variant,
+               const std::string& name,
+               const SourceLocation& source_location)
+{
+  std::visit([&](auto&& item_value) { parallel_check(item_value, name, source_location); },
+             subitem_value_per_item_variant.subItemValuePerItem());
+}
diff --git a/src/language/modules/CMakeLists.txt b/src/language/modules/CMakeLists.txt
index 47f4905806ae1da9cfbdee6659f51e2dbc1a83a6..5f87dcf34f5b8652f976a2f88d851eb381d8385b 100644
--- a/src/language/modules/CMakeLists.txt
+++ b/src/language/modules/CMakeLists.txt
@@ -1,6 +1,7 @@
 # ------------------- Source files --------------------
 
-add_library(PugsLanguageModules
+add_library(
+  PugsLanguageModules
   BinaryOperatorRegisterForVh.cpp
   BuiltinModule.cpp
   CoreModule.cpp
@@ -16,8 +17,14 @@ add_library(PugsLanguageModules
   WriterModule.cpp
 )
 
+target_link_libraries(
+  PugsLanguageModules
+  ${HIGHFIVE_TARGET}
+)
 
-add_dependencies(PugsLanguageModules
+add_dependencies(
+  PugsLanguageModules
   PugsLanguageAlgorithms
   PugsUtils
-  PugsMesh)
+  PugsMesh
+)
diff --git a/src/language/modules/DevUtilsModule.cpp b/src/language/modules/DevUtilsModule.cpp
index 2dace9adffa8c18482043fe7a4f154b7ec15abbb..7685443b081fb2c8f71e65c8eed8aae16273100b 100644
--- a/src/language/modules/DevUtilsModule.cpp
+++ b/src/language/modules/DevUtilsModule.cpp
@@ -1,5 +1,6 @@
 #include <language/modules/DevUtilsModule.hpp>
 
+#include <dev/ParallelChecker.hpp>
 #include <language/utils/ASTDotPrinter.hpp>
 #include <language/utils/ASTExecutionInfo.hpp>
 #include <language/utils/ASTPrinter.hpp>
@@ -8,6 +9,31 @@
 
 #include <fstream>
 
+class DiscreteFunctionVariant;
+template <>
+inline ASTNodeDataType ast_node_data_type_from<std::shared_ptr<const DiscreteFunctionVariant>> =
+  ASTNodeDataType::build<ASTNodeDataType::type_id_t>("Vh");
+
+class ItemValueVariant;
+template <>
+inline ASTNodeDataType ast_node_data_type_from<std::shared_ptr<const ItemValueVariant>> =
+  ASTNodeDataType::build<ASTNodeDataType::type_id_t>("item_value");
+
+class ItemArrayVariant;
+template <>
+inline ASTNodeDataType ast_node_data_type_from<std::shared_ptr<const ItemArrayVariant>> =
+  ASTNodeDataType::build<ASTNodeDataType::type_id_t>("item_array");
+
+class SubItemValuePerItemVariant;
+template <>
+inline ASTNodeDataType ast_node_data_type_from<std::shared_ptr<const SubItemValuePerItemVariant>> =
+  ASTNodeDataType::build<ASTNodeDataType::type_id_t>("sub_item_value");
+
+class SubItemArrayPerItemVariant;
+template <>
+inline ASTNodeDataType ast_node_data_type_from<std::shared_ptr<const SubItemArrayPerItemVariant>> =
+  ASTNodeDataType::build<ASTNodeDataType::type_id_t>("sub_item_array");
+
 DevUtilsModule::DevUtilsModule()
 {
   this->_addBuiltinFunction("getAST", std::function(
@@ -63,6 +89,56 @@ DevUtilsModule::DevUtilsModule()
                                                 }
 
                                                 ));
+
+  this->_addBuiltinFunction("parallel_check",
+                            std::function(
+
+                              [](const std::shared_ptr<const DiscreteFunctionVariant>& discrete_function,
+                                 const std::string& name) {
+                                parallel_check(*discrete_function, name, ASTBacktrace::getInstance().sourceLocation());
+                              }
+
+                              ));
+
+  this->_addBuiltinFunction("parallel_check",
+                            std::function(
+
+                              [](const std::shared_ptr<const ItemValueVariant>& item_value, const std::string& name) {
+                                parallel_check(*item_value, name, ASTBacktrace::getInstance().sourceLocation());
+                              }
+
+                              ));
+
+  this->_addBuiltinFunction("parallel_check",
+                            std::function(
+
+                              [](const std::shared_ptr<const ItemArrayVariant>& item_array, const std::string& name) {
+                                parallel_check(*item_array, name, ASTBacktrace::getInstance().sourceLocation());
+                              }
+
+                              ));
+
+  this->_addBuiltinFunction("parallel_check",
+                            std::function(
+
+                              [](const std::shared_ptr<const SubItemValuePerItemVariant>& subitem_value_per_item,
+                                 const std::string& name) {
+                                parallel_check(*subitem_value_per_item, name,
+                                               ASTBacktrace::getInstance().sourceLocation());
+                              }
+
+                              ));
+
+  this->_addBuiltinFunction("parallel_check",
+                            std::function(
+
+                              [](const std::shared_ptr<const SubItemArrayPerItemVariant>& subitem_array_per_item,
+                                 const std::string& name) {
+                                parallel_check(*subitem_array_per_item, name,
+                                               ASTBacktrace::getInstance().sourceLocation());
+                              }
+
+                              ));
 }
 
 void
diff --git a/src/language/utils/CMakeLists.txt b/src/language/utils/CMakeLists.txt
index db3e7856b758b6a4822d97ac0330958b3b367176..33a41962e8f954ef214e0c6a50eefde1d8e29f7f 100644
--- a/src/language/utils/CMakeLists.txt
+++ b/src/language/utils/CMakeLists.txt
@@ -38,10 +38,15 @@ add_library(PugsLanguageUtils
   UnaryOperatorRegisterForRn.cpp
   UnaryOperatorRegisterForRnxn.cpp
   UnaryOperatorRegisterForZ.cpp
-  )
+)
 
-
-
-add_dependencies(PugsLanguageModules
+add_dependencies(PugsLanguageUtils
+  PugsLanguageModules
   PugsUtils
-  PugsMesh)
+  PugsMesh
+)
+
+target_link_libraries(
+  PugsLanguageUtils
+  ${HIGHFIVE_TARGET}
+)
diff --git a/src/main.cpp b/src/main.cpp
index 7e03cdc8093c3b4034d24312b98a16a6f606b2d1..7ee38e518aba13655318300e2ac53442ad4e8f49 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1,15 +1,21 @@
 #include <analysis/QuadratureManager.hpp>
+#include <dev/ParallelChecker.hpp>
 #include <language/PugsParser.hpp>
 #include <mesh/DualConnectivityManager.hpp>
 #include <mesh/DualMeshManager.hpp>
 #include <mesh/MeshDataManager.hpp>
 #include <mesh/SynchronizerManager.hpp>
+#include <utils/ExecutionStatManager.hpp>
+#include <utils/GlobalVariableManager.hpp>
 #include <utils/PugsUtils.hpp>
 #include <utils/RandomEngine.hpp>
 
 int
 main(int argc, char* argv[])
 {
+  ExecutionStatManager::create();
+  ParallelChecker::create();
+
   std::string filename = initialize(argc, argv);
 
   SynchronizerManager::create();
@@ -19,16 +25,24 @@ main(int argc, char* argv[])
   DualConnectivityManager::create();
   DualMeshManager::create();
 
+  GlobalVariableManager::create();
+
   parser(filename);
+  ExecutionStatManager::printInfo();
+
+  GlobalVariableManager::destroy();
 
   DualMeshManager::destroy();
   DualConnectivityManager::destroy();
   MeshDataManager::destroy();
-  RandomEngine::destroy();
   QuadratureManager::destroy();
+  RandomEngine::destroy();
   SynchronizerManager::destroy();
 
   finalize();
 
+  ParallelChecker::destroy();
+  ExecutionStatManager::destroy();
+
   return 0;
 }
diff --git a/src/mesh/CMakeLists.txt b/src/mesh/CMakeLists.txt
index 63b1b000484f10031214f93d605db9d1c869a1a1..0735cfeb477949cb477848d3f5d65b6416946af4 100644
--- a/src/mesh/CMakeLists.txt
+++ b/src/mesh/CMakeLists.txt
@@ -42,4 +42,10 @@ add_library(
   MeshSmootherEscobar.cpp
   MeshSmootherJun.cpp
   MeshTransformer.cpp
-  SynchronizerManager.cpp)
+  SynchronizerManager.cpp
+)
+
+target_link_libraries(
+  PugsMesh
+  ${HIGHFIVE_TARGET}
+)
diff --git a/src/mesh/Connectivity.cpp b/src/mesh/Connectivity.cpp
index 27850821bf64eb590915bbdccce8e8b365fc0eb4..bd1e0d85b6f94d1d2d00326857853f4da75eacfe 100644
--- a/src/mesh/Connectivity.cpp
+++ b/src/mesh/Connectivity.cpp
@@ -2,12 +2,14 @@
 
 #include <mesh/ConnectivityDescriptor.hpp>
 #include <mesh/ItemValueUtils.hpp>
+#include <utils/GlobalVariableManager.hpp>
 #include <utils/Messenger.hpp>
 
 #include <map>
 
 template <size_t Dimension>
-Connectivity<Dimension>::Connectivity() = default;
+Connectivity<Dimension>::Connectivity() : m_id{GlobalVariableManager::instance().getAndIncrementConnectivityId()}
+{}
 
 template <size_t Dimension>
 void
diff --git a/src/mesh/Connectivity.hpp b/src/mesh/Connectivity.hpp
index ee7b07fe043a44262c23afc053e748bcac448b50..24d9ef656f0cdd0746db0ee2f27881ce5244c8b0 100644
--- a/src/mesh/Connectivity.hpp
+++ b/src/mesh/Connectivity.hpp
@@ -50,6 +50,8 @@ class Connectivity final : public IConnectivity
   }
 
  private:
+  const size_t m_id;
+
   size_t m_number_of_cells;
   size_t m_number_of_faces;
   size_t m_number_of_edges;
@@ -115,6 +117,12 @@ class Connectivity final : public IConnectivity
   friend class ConnectivityComputer;
 
  public:
+  size_t
+  id() const
+  {
+    return m_id;
+  }
+
   PUGS_INLINE
   const ConnectivityMatrix&
   getMatrix(const ItemType& item_type_0, const ItemType& item_type_1) const final
@@ -562,6 +570,41 @@ class Connectivity final : public IConnectivity
     return m_node_local_numbers_in_their_edges;
   }
 
+  template <typename ItemOfItemType>
+  PUGS_INLINE SubItemValuePerItem<const uint16_t, ItemOfItemType>
+  itemLocalNumbersInTheirSubItems() const
+  {
+    if constexpr (std::is_same_v<ItemOfItemType, NodeOfCell>) {
+      return cellLocalNumbersInTheirNodes();
+    } else if constexpr (std::is_same_v<ItemOfItemType, EdgeOfCell>) {
+      return cellLocalNumbersInTheirEdges();
+    } else if constexpr (std::is_same_v<ItemOfItemType, FaceOfCell>) {
+      return cellLocalNumbersInTheirFaces();
+    } else if constexpr (std::is_same_v<ItemOfItemType, NodeOfFace> and (Dimension > 1)) {
+      return faceLocalNumbersInTheirNodes();
+    } else if constexpr (std::is_same_v<ItemOfItemType, EdgeOfFace> and (Dimension > 2)) {
+      return faceLocalNumbersInTheirEdges();
+    } else if constexpr (std::is_same_v<ItemOfItemType, CellOfFace>) {
+      return faceLocalNumbersInTheirCells();
+    } else if constexpr (std::is_same_v<ItemOfItemType, NodeOfEdge> and (Dimension > 1)) {
+      return edgeLocalNumbersInTheirNodes();
+    } else if constexpr (std::is_same_v<ItemOfItemType, FaceOfEdge> and (Dimension > 2)) {
+      return edgeLocalNumbersInTheirFaces()();
+    } else if constexpr (std::is_same_v<ItemOfItemType, CellOfEdge>) {
+      return edgeLocalNumbersInTheirCells();
+    } else if constexpr (std::is_same_v<ItemOfItemType, EdgeOfNode> and (Dimension > 1)) {
+      return nodeLocalNumbersInTheirEdges();
+    } else if constexpr (std::is_same_v<ItemOfItemType, FaceOfNode> and (Dimension > 1)) {
+      return nodeLocalNumbersInTheirFaces()();
+    } else if constexpr (std::is_same_v<ItemOfItemType, CellOfNode>) {
+      return nodeLocalNumbersInTheirCells();
+    } else {
+      // LCOV_EXCL_START
+      throw UnexpectedError("invalid ItemOfItemType");
+      // LCOV_EXCL_STOP
+    }
+  }
+
   template <ItemType item_type>
   size_t
   numberOfRefItemList() const
diff --git a/src/mesh/ConnectivityDispatcher.cpp b/src/mesh/ConnectivityDispatcher.cpp
index 797ac6cff42c5a1077686f28515793f990b6218a..dde1f930d26d31faa21d06bf2b8d94568ccb93f1 100644
--- a/src/mesh/ConnectivityDispatcher.cpp
+++ b/src/mesh/ConnectivityDispatcher.cpp
@@ -685,6 +685,13 @@ template <int Dimension>
 ConnectivityDispatcher<Dimension>::ConnectivityDispatcher(const ConnectivityType& connectivity)
   : m_connectivity(connectivity)
 {
+  {
+    Array connectivity_id_list = parallel::allGather(connectivity.id());
+    if (min(connectivity_id_list) != max(connectivity_id_list)) {
+      throw UnexpectedError("connectivity ids diverged in parallel");
+    }
+  }
+
   this->_buildNewOwner<ItemType::cell>();
   if constexpr (Dimension > 1) {
     this->_buildNewOwner<ItemType::face>();
@@ -745,6 +752,13 @@ ConnectivityDispatcher<Dimension>::ConnectivityDispatcher(const ConnectivityType
   this->_buildItemReferenceList<ItemType::node>();
 
   m_dispatched_connectivity = ConnectivityType::build(m_new_descriptor);
+
+  {
+    Array connectivity_id_list = parallel::allGather(m_dispatched_connectivity->id());
+    if (min(connectivity_id_list) != max(connectivity_id_list)) {
+      throw UnexpectedError("connectivity ids diverged in parallel");
+    }
+  }
 }
 
 template ConnectivityDispatcher<1>::ConnectivityDispatcher(const ConnectivityType&);
diff --git a/src/mesh/ItemArray.hpp b/src/mesh/ItemArray.hpp
index 175537ef27f1d48123f83b0db9a951042b6ceb0e..f29947d8c37e7d3ba4a8b92a316b9f87d04767e2 100644
--- a/src/mesh/ItemArray.hpp
+++ b/src/mesh/ItemArray.hpp
@@ -43,6 +43,13 @@ class ItemArray
   friend ItemArray<std::remove_const_t<DataType>, item_type, ConnectivityWeakPtr>;
 
  public:
+  // This is not the correct way to look at ItemArray, use with care
+  Table<const DataType>
+  tableView() const
+  {
+    return m_values;
+  }
+
   [[nodiscard]] friend PUGS_INLINE ItemArray<std::remove_const_t<DataType>, item_type, ConnectivityPtr>
   copy(const ItemArray<DataType, item_type, ConnectivityPtr>& source)
   {
diff --git a/src/mesh/ItemArrayUtils.hpp b/src/mesh/ItemArrayUtils.hpp
index 1bb8d929f12a2f834535a70166e43a130fb73e40..0d90f77af27add5f08982e2be535bd7b779837a9 100644
--- a/src/mesh/ItemArrayUtils.hpp
+++ b/src/mesh/ItemArrayUtils.hpp
@@ -250,7 +250,7 @@ synchronize(ItemArray<DataType, item_type, ConnectivityPtr> item_array)
 
 template <typename DataType, ItemType item_type, typename ConnectivityPtr>
 bool
-isSynchronized(ItemArray<const DataType, item_type, ConnectivityPtr> item_array)
+isSynchronized(ItemArray<DataType, item_type, ConnectivityPtr> item_array)
 {
   bool is_synchronized = true;
 
diff --git a/src/mesh/ItemOfItemType.hpp b/src/mesh/ItemOfItemType.hpp
index 8d27a599bf80fd754ffb73b2756b03ce4193c1b0..970736adae357d6e450b75096d4f828217f4efa3 100644
--- a/src/mesh/ItemOfItemType.hpp
+++ b/src/mesh/ItemOfItemType.hpp
@@ -30,4 +30,35 @@ using CellOfNode = ItemOfItemType<ItemType::cell, ItemType::node>;
 using FaceOfNode = ItemOfItemType<ItemType::face, ItemType::node>;
 using EdgeOfNode = ItemOfItemType<ItemType::edge, ItemType::node>;
 
+template <ItemType sub_item_type, ItemType item_type>
+constexpr inline int item_of_item_type_index = -1;
+
+template <>
+constexpr inline int item_of_item_type_index<ItemType::face, ItemType::cell> = 0;
+template <>
+constexpr inline int item_of_item_type_index<ItemType::edge, ItemType::cell> = 1;
+template <>
+constexpr inline int item_of_item_type_index<ItemType::node, ItemType::cell> = 2;
+
+template <>
+constexpr inline int item_of_item_type_index<ItemType::cell, ItemType::face> = 3;
+template <>
+constexpr inline int item_of_item_type_index<ItemType::edge, ItemType::face> = 4;
+template <>
+constexpr inline int item_of_item_type_index<ItemType::node, ItemType::face> = 5;
+
+template <>
+constexpr inline int item_of_item_type_index<ItemType::cell, ItemType::edge> = 6;
+template <>
+constexpr inline int item_of_item_type_index<ItemType::face, ItemType::edge> = 7;
+template <>
+constexpr inline int item_of_item_type_index<ItemType::node, ItemType::edge> = 8;
+
+template <>
+constexpr inline int item_of_item_type_index<ItemType::cell, ItemType::node> = 9;
+template <>
+constexpr inline int item_of_item_type_index<ItemType::face, ItemType::node> = 10;
+template <>
+constexpr inline int item_of_item_type_index<ItemType::edge, ItemType::node> = 11;
+
 #endif   // ITEM_OF_ITEM_TYPE_HPP
diff --git a/src/mesh/SubItemArrayPerItem.hpp b/src/mesh/SubItemArrayPerItem.hpp
index a98c039e085cc9539b077bf3a717f94c9fda7667..14e0caa632f35a6ee905ce181cd9d96658fb7827 100644
--- a/src/mesh/SubItemArrayPerItem.hpp
+++ b/src/mesh/SubItemArrayPerItem.hpp
@@ -49,6 +49,13 @@ class SubItemArrayPerItem
   friend SubItemArrayPerItem<std::remove_const_t<DataType>, ItemOfItem, ConnectivityWeakPtr>;
 
  public:
+  // This is not the correct way to look at SubItemArrayPerItem, use with care
+  Table<const DataType>
+  tableView() const
+  {
+    return m_values;
+  }
+
   [[nodiscard]] friend PUGS_INLINE SubItemArrayPerItem<std::remove_const_t<DataType>, ItemOfItem, ConnectivityPtr>
   copy(const SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& source)
   {
diff --git a/src/mesh/SubItemArrayPerItemVariant.hpp b/src/mesh/SubItemArrayPerItemVariant.hpp
index ffb0ea673241236a7e6fd1a3ee0566439abcf765..2f917aa7cdb501580cacc54d55781335a24ca854 100644
--- a/src/mesh/SubItemArrayPerItemVariant.hpp
+++ b/src/mesh/SubItemArrayPerItemVariant.hpp
@@ -146,7 +146,7 @@ class SubItemArrayPerItemVariant
  public:
   PUGS_INLINE
   const Variant&
-  itemArray() const
+  subItemArrayPerItem() const
   {
     return m_sub_item_array_per_item;
   }
@@ -189,7 +189,7 @@ class SubItemArrayPerItemVariant
                   "SubItemArrayPerItem with this DataType is not allowed in variant");
   }
 
-  SubItemArrayPerItemVariant& operator=(SubItemArrayPerItemVariant&&) = default;
+  SubItemArrayPerItemVariant& operator=(SubItemArrayPerItemVariant&&)      = default;
   SubItemArrayPerItemVariant& operator=(const SubItemArrayPerItemVariant&) = default;
 
   SubItemArrayPerItemVariant(const SubItemArrayPerItemVariant&) = default;
diff --git a/src/mesh/SubItemValuePerItem.hpp b/src/mesh/SubItemValuePerItem.hpp
index f5ce39d5f7c67c448c327baf8fea9719d17021fd..d1b56991956495dff7cbb52e3d88896f2333c5c3 100644
--- a/src/mesh/SubItemValuePerItem.hpp
+++ b/src/mesh/SubItemValuePerItem.hpp
@@ -48,6 +48,14 @@ class SubItemValuePerItem
   friend SubItemValuePerItem<std::remove_const_t<DataType>, ItemOfItem, ConnectivityWeakPtr>;
 
  public:
+  // This is not the correct way to look at SubItemValuePerItem, use
+  // with care
+  Array<const DataType>
+  arrayView() const
+  {
+    return m_values;
+  }
+
   [[nodiscard]] friend PUGS_INLINE SubItemValuePerItem<std::remove_const_t<DataType>, ItemOfItem, ConnectivityPtr>
   copy(const SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& source)
   {
diff --git a/src/mesh/SubItemValuePerItemVariant.hpp b/src/mesh/SubItemValuePerItemVariant.hpp
index 618fd82d9b9a158941ffc80d00b5b4fa56a25e31..ef2bf58e388a69505c0a55f77ad3ff2d202bb401 100644
--- a/src/mesh/SubItemValuePerItemVariant.hpp
+++ b/src/mesh/SubItemValuePerItemVariant.hpp
@@ -146,7 +146,7 @@ class SubItemValuePerItemVariant
  public:
   PUGS_INLINE
   const Variant&
-  itemValue() const
+  subItemValuePerItem() const
   {
     return m_sub_item_value_per_item;
   }
@@ -189,7 +189,7 @@ class SubItemValuePerItemVariant
                   "SubItemValuePerItem with this DataType is not allowed in variant");
   }
 
-  SubItemValuePerItemVariant& operator=(SubItemValuePerItemVariant&&) = default;
+  SubItemValuePerItemVariant& operator=(SubItemValuePerItemVariant&&)      = default;
   SubItemValuePerItemVariant& operator=(const SubItemValuePerItemVariant&) = default;
 
   SubItemValuePerItemVariant(const SubItemValuePerItemVariant&) = default;
diff --git a/src/mesh/Synchronizer.hpp b/src/mesh/Synchronizer.hpp
index 63e4363ef7dc1b360ca75626ef01e31dcb0728b7..ed385f838235f2e70465029e23bfba3dda57d277 100644
--- a/src/mesh/Synchronizer.hpp
+++ b/src/mesh/Synchronizer.hpp
@@ -11,9 +11,8 @@
 
 #include <utils/pugs_config.hpp>
 
-#include <iostream>
-#include <map>
 #include <memory>
+#include <unordered_map>
 
 #ifdef PUGS_HAS_MPI
 
@@ -23,182 +22,364 @@ class Synchronizer
   template <ItemType item_type>
   using ExchangeItemTypeInfo = std::vector<Array<const ItemIdT<item_type>>>;
 
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::cell>> m_requested_cell_info;
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::cell>> m_provided_cell_info;
+  using ExchangeItemInfoRepository = std::tuple<std::unique_ptr<ExchangeItemTypeInfo<ItemType::node>>,
+                                                std::unique_ptr<ExchangeItemTypeInfo<ItemType::edge>>,
+                                                std::unique_ptr<ExchangeItemTypeInfo<ItemType::face>>,
+                                                std::unique_ptr<ExchangeItemTypeInfo<ItemType::cell>>>;
 
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::face>> m_requested_face_info;
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::face>> m_provided_face_info;
+  ExchangeItemInfoRepository m_requested_item_info_list;
+  ExchangeItemInfoRepository m_provided_item_info_list;
 
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::edge>> m_requested_edge_info;
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::edge>> m_provided_edge_info;
+  // here 12 is the maximum number (3d) of sub item of item
+  // configurations (cell->edge, face->node...)
+  using ExchangeSubItemPerItemTotalSizeList = std::array<std::unique_ptr<std::vector<size_t>>, 12>;
+  using SubItemPerItemProvidedList          = std::array<std::unique_ptr<std::vector<Array<const size_t>>>, 12>;
+  using NumberOfSubItemPerItemProvidedList  = std::array<std::unique_ptr<std::vector<Array<const size_t>>>, 12>;
 
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::node>> m_requested_node_info;
-  std::unique_ptr<ExchangeItemTypeInfo<ItemType::node>> m_provided_node_info;
+  ExchangeSubItemPerItemTotalSizeList m_sub_item_per_item_requested_total_size_list;
+  ExchangeSubItemPerItemTotalSizeList m_sub_item_per_item_provided_total_size_list;
+  SubItemPerItemProvidedList m_sub_item_per_item_provided_list;
+  NumberOfSubItemPerItemProvidedList m_number_of_sub_item_per_item_provided_list;
 
-  using ExchangeSubItemPerItemSize = std::vector<std::map<std::pair<ItemType, ItemType>, size_t>>;
+  template <typename ConnectivityType, ItemType item_type>
+  void
+  _buildSynchronizeInfoIfNeeded(const ConnectivityType& connectivity)
+  {
+    const auto& item_owner = connectivity.template owner<item_type>();
+    using ItemId           = ItemIdT<item_type>;
 
-  ExchangeSubItemPerItemSize m_sub_item_per_item_requested_size_list;
-  ExchangeSubItemPerItemSize m_sub_item_per_item_provided_size_list;
+    auto& p_requested_item_info = std::get<static_cast<int>(item_type)>(m_requested_item_info_list);
+    auto& p_provided_item_info  = std::get<static_cast<int>(item_type)>(m_provided_item_info_list);
 
-  template <ItemType item_type>
-  PUGS_INLINE constexpr auto&
-  _getRequestedItemInfo()
-  {
-    if constexpr (item_type == ItemType::cell) {
-      return m_requested_cell_info;
-    } else if constexpr (item_type == ItemType::face) {
-      return m_requested_face_info;
-    } else if constexpr (item_type == ItemType::edge) {
-      return m_requested_edge_info;
-    } else if constexpr (item_type == ItemType::node) {
-      return m_requested_node_info;
+    Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
+
+    if (not p_provided_item_info) {
+      p_requested_item_info = [&]() {
+        std::vector<std::vector<ItemId>> requested_item_vector_info(parallel::size());
+        for (ItemId item_id = 0; item_id < item_owner.numberOfItems(); ++item_id) {
+          if (const size_t owner = item_owner[item_id]; owner != parallel::rank()) {
+            requested_item_vector_info[owner].emplace_back(item_id);
+          }
+        }
+        ExchangeItemTypeInfo<item_type> requested_item_info(parallel::size());
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          const auto& requested_item_vector = requested_item_vector_info[i_rank];
+          requested_item_info[i_rank]       = convert_to_array(requested_item_vector);
+        }
+        return std::make_unique<ExchangeItemTypeInfo<item_type>>(std::move(requested_item_info));
+      }();
+
+      auto& requested_item_info = *p_requested_item_info;
+
+      Array<unsigned int> local_number_of_requested_values(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        local_number_of_requested_values[i_rank] = requested_item_info[i_rank].size();
+      }
+
+      Array<unsigned int> local_number_of_values_to_send = parallel::allToAll(local_number_of_requested_values);
+
+      std::vector<Array<const int>> requested_item_number_list_by_proc(parallel::size());
+      const auto& item_number = connectivity.template number<item_type>();
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+        Array<int> item_number_list{requested_item_info_from_rank.size()};
+        parallel_for(
+          requested_item_info_from_rank.size(), PUGS_LAMBDA(size_t i_item) {
+            item_number_list[i_item] = item_number[requested_item_info_from_rank[i_item]];
+          });
+        requested_item_number_list_by_proc[i_rank] = item_number_list;
+      }
+
+      std::vector<Array<int>> provided_item_number_list_by_rank(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        provided_item_number_list_by_rank[i_rank] = Array<int>{local_number_of_values_to_send[i_rank]};
+      }
+
+      parallel::exchange(requested_item_number_list_by_proc, provided_item_number_list_by_rank);
+
+      std::unordered_map<int, ItemId> item_number_to_id_correspondance;
+      for (ItemId item_id = 0; item_id < item_number.numberOfItems(); ++item_id) {
+        item_number_to_id_correspondance[item_number[item_id]] = item_id;
+      }
+
+      p_provided_item_info = [&]() {
+        ExchangeItemTypeInfo<item_type> provided_item_info(parallel::size());
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          Array<ItemId> provided_item_id_to_rank{local_number_of_values_to_send[i_rank]};
+          const Array<int>& provided_item_number_to_rank = provided_item_number_list_by_rank[i_rank];
+          for (size_t i = 0; i < provided_item_number_to_rank.size(); ++i) {
+            provided_item_id_to_rank[i] =
+              item_number_to_id_correspondance.find(provided_item_number_to_rank[i])->second;
+          }
+          provided_item_info[i_rank] = provided_item_id_to_rank;
+        }
+        return std::make_unique<ExchangeItemTypeInfo<item_type>>(provided_item_info);
+      }();
     }
   }
 
-  template <ItemType item_type>
+  template <typename ConnectivityType, ItemType item_type>
   PUGS_INLINE constexpr auto&
-  _getProvidedItemInfo()
+  _getRequestedItemInfo(const ConnectivityType& connectivity)
   {
-    if constexpr (item_type == ItemType::cell) {
-      return m_provided_cell_info;
-    } else if constexpr (item_type == ItemType::face) {
-      return m_provided_face_info;
-    } else if constexpr (item_type == ItemType::edge) {
-      return m_provided_edge_info;
-    } else if constexpr (item_type == ItemType::node) {
-      return m_provided_node_info;
-    }
+    this->_buildSynchronizeInfoIfNeeded<ConnectivityType, item_type>(connectivity);
+    return *std::get<static_cast<int>(item_type)>(m_requested_item_info_list);
   }
 
   template <typename ConnectivityType, ItemType item_type>
-  void
-  _buildSynchronizeInfo(const ConnectivityType& connectivity)
+  PUGS_INLINE constexpr auto&
+  _getProvidedItemInfo(const ConnectivityType& connectivity)
   {
-    const auto& item_owner = connectivity.template owner<item_type>();
-    using ItemId           = ItemIdT<item_type>;
+    this->_buildSynchronizeInfoIfNeeded<ConnectivityType, item_type>(connectivity);
+    return *std::get<static_cast<int>(item_type)>(m_provided_item_info_list);
+  }
 
-    auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
-    p_requested_item_info       = [&]() {
-      std::vector<std::vector<ItemId>> requested_item_vector_info(parallel::size());
-      for (ItemId item_id = 0; item_id < item_owner.numberOfItems(); ++item_id) {
-        if (const size_t owner = item_owner[item_id]; owner != parallel::rank()) {
-          requested_item_vector_info[owner].emplace_back(item_id);
-        }
-      }
-      ExchangeItemTypeInfo<item_type> requested_item_info(parallel::size());
+  template <ItemType item_type, ItemType sub_item_type, typename ConnectivityType>
+  PUGS_INLINE const std::vector<size_t>&
+  _getSubItemPerItemRequestedTotalSize(const ConnectivityType& connectivity)
+  {
+    auto& p_sub_item_per_item_requested_total_size =
+      m_sub_item_per_item_requested_total_size_list[item_of_item_type_index<sub_item_type, item_type>];
+    if (not p_sub_item_per_item_requested_total_size) {
+      std::vector<size_t> sub_item_per_item_requested_total_size(parallel::size());
+      const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const auto& requested_item_vector = requested_item_vector_info[i_rank];
-        requested_item_info[i_rank]       = convert_to_array(requested_item_vector);
-      }
-      return std::make_unique<ExchangeItemTypeInfo<item_type>>(std::move(requested_item_info));
-    }();
+        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
 
-    auto& requested_item_info = *p_requested_item_info;
+        const auto& item_to_item_matrix = connectivity.template getItemToItemMatrix<item_type, sub_item_type>();
 
-    Array<unsigned int> local_number_of_requested_values(parallel::size());
-    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-      local_number_of_requested_values[i_rank] = requested_item_info[i_rank].size();
+        size_t count = 0;
+        for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
+          count += item_to_item_matrix[requested_item_info_from_rank[i]].size();
+        }
+
+        sub_item_per_item_requested_total_size[i_rank] = count;
+      }
+      p_sub_item_per_item_requested_total_size =
+        std::make_unique<std::vector<size_t>>(std::move(sub_item_per_item_requested_total_size));
     }
 
-    Array<unsigned int> local_number_of_values_to_send = parallel::allToAll(local_number_of_requested_values);
+    return (*p_sub_item_per_item_requested_total_size);
+  }
 
-    std::vector<Array<const int>> requested_item_number_list_by_proc(parallel::size());
-    const auto& item_number = connectivity.template number<item_type>();
-    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-      const auto& requested_item_info_from_rank = requested_item_info[i_rank];
-      Array<int> item_number_list{requested_item_info_from_rank.size()};
-      parallel_for(
-        requested_item_info_from_rank.size(),
-        PUGS_LAMBDA(size_t i_item) { item_number_list[i_item] = item_number[requested_item_info_from_rank[i_item]]; });
-      requested_item_number_list_by_proc[i_rank] = item_number_list;
-    }
+  template <ItemType item_type, ItemType sub_item_type, typename ConnectivityType>
+  PUGS_INLINE const std::vector<size_t>&
+  _getSubItemPerItemProvidedTotalSize(const ConnectivityType& connectivity)
+  {
+    static_assert(item_type != sub_item_type);
 
-    std::vector<Array<int>> provided_item_number_list_by_rank(parallel::size());
-    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-      provided_item_number_list_by_rank[i_rank] = Array<int>{local_number_of_values_to_send[i_rank]};
-    }
+    auto& p_sub_item_per_item_provided_total_size =
+      m_sub_item_per_item_provided_total_size_list[item_of_item_type_index<sub_item_type, item_type>];
 
-    parallel::exchange(requested_item_number_list_by_proc, provided_item_number_list_by_rank);
+    if (not p_sub_item_per_item_provided_total_size) {
+      if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(item_type) >
+                    ItemTypeId<ConnectivityType::Dimension>::dimension(sub_item_type)) {
+        std::vector<size_t> sub_item_per_item_provided_total_size(parallel::size());
+        const auto& provided_item_info = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          const auto& provided_item_info_from_rank = provided_item_info[i_rank];
 
-    std::map<int, ItemId> item_number_to_id_correspondance;
-    for (ItemId item_id = 0; item_id < item_number.numberOfItems(); ++item_id) {
-      item_number_to_id_correspondance[item_number[item_id]] = item_id;
-    }
+          const auto& item_to_item_matrix = connectivity.template getItemToItemMatrix<item_type, sub_item_type>();
 
-    auto& p_provided_item_info = this->_getProvidedItemInfo<item_type>();
-    p_provided_item_info       = [&]() {
-      ExchangeItemTypeInfo<item_type> provided_item_info(parallel::size());
-      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        Array<ItemId> provided_item_id_to_rank{local_number_of_values_to_send[i_rank]};
-        const Array<int>& provided_item_number_to_rank = provided_item_number_list_by_rank[i_rank];
-        for (size_t i = 0; i < provided_item_number_to_rank.size(); ++i) {
-          provided_item_id_to_rank[i] = item_number_to_id_correspondance.find(provided_item_number_to_rank[i])->second;
+          size_t count = 0;
+          for (size_t i = 0; i < provided_item_info_from_rank.size(); ++i) {
+            count += item_to_item_matrix[provided_item_info_from_rank[i]].size();
+          }
+
+          sub_item_per_item_provided_total_size[i_rank] = count;
+        }
+        p_sub_item_per_item_provided_total_size =
+          std::make_unique<std::vector<size_t>>(std::move(sub_item_per_item_provided_total_size));
+      } else {
+        std::vector<size_t> sub_item_per_item_provided_total_size(parallel::size());
+
+        const auto& sub_item_required_total_size =
+          _getSubItemPerItemRequestedTotalSize<item_type, sub_item_type>(connectivity);
+        const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
+
+        std::vector<Array<size_t>> sub_item_required_total_size_exchange(parallel::size());
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          Assert((sub_item_required_total_size[i_rank] == 0) xor (requested_item_info[i_rank].size() > 0),
+                 "unexpected sub_item size info");
+          if (requested_item_info[i_rank].size() > 0) {
+            Array<size_t> size_0d_array(1);
+            size_0d_array[0]                              = sub_item_required_total_size[i_rank];
+            sub_item_required_total_size_exchange[i_rank] = size_0d_array;
+          }
+        }
+
+        const auto& provided_item_info = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+        std::vector<Array<size_t>> sub_item_provided_total_size_exchange(parallel::size());
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          if (provided_item_info[i_rank].size() > 0) {
+            Array<size_t> size_0d_array(1);
+            sub_item_provided_total_size_exchange[i_rank] = size_0d_array;
+          }
         }
-        provided_item_info[i_rank] = provided_item_id_to_rank;
+
+        parallel::exchange(sub_item_required_total_size_exchange, sub_item_provided_total_size_exchange);
+
+        for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+          if (sub_item_provided_total_size_exchange[i_rank].size() > 0) {
+            sub_item_per_item_provided_total_size[i_rank] = sub_item_provided_total_size_exchange[i_rank][0];
+          }
+        }
+
+        p_sub_item_per_item_provided_total_size =
+          std::make_unique<std::vector<size_t>>(std::move(sub_item_per_item_provided_total_size));
       }
-      return std::make_unique<ExchangeItemTypeInfo<item_type>>(provided_item_info);
-    }();
+    }
 
-    m_sub_item_per_item_provided_size_list.resize(parallel::size());
-    m_sub_item_per_item_requested_size_list.resize(parallel::size());
+    return (*p_sub_item_per_item_provided_total_size);
   }
 
-  template <ItemType item_type, ItemType sub_item_type, size_t Dimension>
-  PUGS_INLINE size_t
-  _getSubItemPerItemRequestedSize(const Connectivity<Dimension>& connectivity, const size_t i_rank)
+  template <ItemType item_type, ItemType sub_item_type, typename ConnectivityType>
+  PUGS_INLINE const std::vector<Array<const size_t>>&
+  _getNumberOfSubItemPerItemProvidedList(const ConnectivityType& connectivity)
   {
-    Assert(m_sub_item_per_item_requested_size_list.size() == parallel::size());
+    static_assert(ItemTypeId<ConnectivityType::Dimension>::dimension(sub_item_type) >
+                    ItemTypeId<ConnectivityType::Dimension>::dimension(item_type),
+                  "should not be called if dimension of sub item is lower than item");
 
-    auto key = std::make_pair(item_type, sub_item_type);
-    if (auto i_size_list = m_sub_item_per_item_requested_size_list[i_rank].find(key);
-        i_size_list != m_sub_item_per_item_requested_size_list[i_rank].end()) {
-      return i_size_list->second;
-    } else {
-      const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
+    auto& p_number_of_sub_item_per_item_provided_list =
+      m_number_of_sub_item_per_item_provided_list[item_of_item_type_index<sub_item_type, item_type>];
 
-      Assert(static_cast<bool>(p_requested_item_info) == true,
-             "this function should be called after calculation of exchange info");
-      const auto& requested_item_info_from_rank = (*p_requested_item_info)[i_rank];
+    if (not p_number_of_sub_item_per_item_provided_list) {
+      using ItemId = ItemIdT<item_type>;
 
       const auto& item_to_item_matrix = connectivity.template getItemToItemMatrix<item_type, sub_item_type>();
+      const auto number_of_sub_item   = connectivity.template number<sub_item_type>();
 
-      size_t count = 0;
-      for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
-        count += item_to_item_matrix[requested_item_info_from_rank[i]].size();
+      const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
+
+      std::vector<Array<size_t>> number_of_sub_item_per_item_required_exchange(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+        if (requested_item_info_from_rank.size() > 0) {
+          Array<size_t> number_of_sub_item_per_item(requested_item_info_from_rank.size());
+
+          size_t count = 0;
+          for (size_t i_item = 0; i_item < requested_item_info_from_rank.size(); ++i_item) {
+            const ItemId item_id                 = requested_item_info_from_rank[i_item];
+            number_of_sub_item_per_item[count++] = item_to_item_matrix[item_id].size();
+          }
+          number_of_sub_item_per_item_required_exchange[i_rank] = number_of_sub_item_per_item;
+        }
       }
 
-      m_sub_item_per_item_requested_size_list[i_rank][key] = count;
-      return count;
+      const auto& provided_item_info = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+      std::vector<Array<size_t>> number_of_sub_item_per_item_provided_exchange(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        if (provided_item_info[i_rank].size() > 0) {
+          number_of_sub_item_per_item_provided_exchange[i_rank] = Array<size_t>{provided_item_info[i_rank].size()};
+        }
+      }
+
+      parallel::exchange(number_of_sub_item_per_item_required_exchange, number_of_sub_item_per_item_provided_exchange);
+
+      std::vector<Array<const size_t>> number_of_sub_item_per_item_provided_list(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        number_of_sub_item_per_item_provided_list[i_rank] = number_of_sub_item_per_item_provided_exchange[i_rank];
+      }
+      p_number_of_sub_item_per_item_provided_list =
+        std::make_unique<std::vector<Array<const size_t>>>(std::move(number_of_sub_item_per_item_provided_list));
     }
+    return *p_number_of_sub_item_per_item_provided_list;
   }
 
-  template <ItemType item_type, ItemType sub_item_type, size_t Dimension>
-  PUGS_INLINE size_t
-  _getSubItemPerItemProvidedSize(const Connectivity<Dimension>& connectivity, const size_t i_rank)
+  template <ItemType item_type, ItemType sub_item_type, typename ConnectivityType>
+  PUGS_INLINE const std::vector<Array<const size_t>>&
+  _getSubItemPerItemProvidedList(const ConnectivityType& connectivity)
   {
-    Assert(m_sub_item_per_item_provided_size_list.size() == parallel::size());
-
-    auto key = std::make_pair(item_type, sub_item_type);
-    if (auto i_size_list = m_sub_item_per_item_provided_size_list[i_rank].find(key);
-        i_size_list != m_sub_item_per_item_provided_size_list[i_rank].end()) {
-      return i_size_list->second;
-    } else {
-      const auto& p_provided_item_info = this->_getProvidedItemInfo<item_type>();
+    static_assert(ItemTypeId<ConnectivityType::Dimension>::dimension(sub_item_type) >
+                    ItemTypeId<ConnectivityType::Dimension>::dimension(item_type),
+                  "should not be called if dimension of sub item is lower than item");
+    auto& p_sub_item_per_item_provided_list =
+      m_sub_item_per_item_provided_list[item_of_item_type_index<sub_item_type, item_type>];
 
-      Assert(static_cast<bool>(p_provided_item_info) == true,
-             "this function should be called after calculation of exchange info");
-      const auto& provided_item_info_from_rank = (*p_provided_item_info)[i_rank];
+    if (not p_sub_item_per_item_provided_list) {
+      using ItemId = ItemIdT<item_type>;
 
       const auto& item_to_item_matrix = connectivity.template getItemToItemMatrix<item_type, sub_item_type>();
+      const auto number_of_sub_item   = connectivity.template number<sub_item_type>();
+
+      const auto& sub_item_required_size = _getSubItemPerItemRequestedTotalSize<item_type, sub_item_type>(connectivity);
+      const auto& requested_item_info    = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
-      size_t count = 0;
-      for (size_t i = 0; i < provided_item_info_from_rank.size(); ++i) {
-        count += item_to_item_matrix[provided_item_info_from_rank[i]].size();
+      std::vector<Array<int>> sub_item_per_item_required_numbers_exchange(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        Assert((sub_item_required_size[i_rank] == 0) xor (requested_item_info[i_rank].size() > 0),
+               "unexpected sub_item size info");
+        if (requested_item_info[i_rank].size() > 0) {
+          Array<int> sub_item_numbers(sub_item_required_size[i_rank]);
+
+          size_t count                              = 0;
+          const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+          for (size_t i_item = 0; i_item < requested_item_info_from_rank.size(); ++i_item) {
+            const ItemId item_id = requested_item_info_from_rank[i_item];
+            auto item_sub_items  = item_to_item_matrix[item_id];
+            for (size_t i_sub_item = 0; i_sub_item < item_sub_items.size(); ++i_sub_item) {
+              sub_item_numbers[count++] = number_of_sub_item[item_sub_items[i_sub_item]];
+            }
+          }
+          Assert(count == sub_item_numbers.size());
+          sub_item_per_item_required_numbers_exchange[i_rank] = sub_item_numbers;
+        }
       }
 
-      m_sub_item_per_item_provided_size_list[i_rank][key] = count;
-      return count;
+      const auto& provided_item_info     = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+      const auto& sub_item_provided_size = _getSubItemPerItemProvidedTotalSize<item_type, sub_item_type>(connectivity);
+      std::vector<Array<int>> sub_item_per_item_provided_numbers_exchange(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        if (provided_item_info[i_rank].size() > 0) {
+          sub_item_per_item_provided_numbers_exchange[i_rank] = Array<int>{sub_item_provided_size[i_rank]};
+        }
+      }
+
+      parallel::exchange(sub_item_per_item_required_numbers_exchange, sub_item_per_item_provided_numbers_exchange);
+
+      const auto& number_of_sub_item_per_item_provided_list =
+        this->_getNumberOfSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
+
+      std::vector<Array<const size_t>> sub_item_per_item_provided_list(parallel::size());
+      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+        if (provided_item_info[i_rank].size() > 0) {
+          const auto& sub_item_numbers            = sub_item_per_item_provided_numbers_exchange[i_rank];
+          const auto& number_of_sub_item_per_item = number_of_sub_item_per_item_provided_list[i_rank];
+          Array<size_t> sub_item_list{sub_item_provided_size[i_rank]};
+          size_t count = 0;
+
+          const auto& provided_item_info_to_rank = provided_item_info[i_rank];
+          for (size_t i_item = 0; i_item < provided_item_info_to_rank.size(); ++i_item) {
+            const ItemId item_id = provided_item_info_to_rank[i_item];
+            auto item_sub_items  = item_to_item_matrix[item_id];
+            bool found           = false;
+            for (size_t i_sub_item = 0, i_requied_sub_item = 0; i_sub_item < item_sub_items.size(); ++i_sub_item) {
+              found      = false;
+              int number = sub_item_numbers[count];
+              if (number == number_of_sub_item[item_sub_items[i_sub_item]]) {
+                found                = true;
+                sub_item_list[count] = i_sub_item;
+                i_requied_sub_item++;
+                count++;
+                if (i_requied_sub_item == number_of_sub_item_per_item[i_item]) {
+                  break;
+                }
+              }
+            }
+            Assert(found, "something wierd occured");
+          }
+
+          Assert(count == sub_item_list.size());
+          sub_item_per_item_provided_list[i_rank] = sub_item_list;
+        }
+      }
+
+      p_sub_item_per_item_provided_list =
+        std::make_unique<std::vector<Array<const size_t>>>(std::move(sub_item_per_item_provided_list));
     }
+
+    return (*p_sub_item_per_item_provided_list);
   }
 
   template <typename ConnectivityType, typename DataType, ItemType item_type, typename ConnectivityPtr>
@@ -209,19 +390,8 @@ class Synchronizer
 
     using ItemId = ItemIdT<item_type>;
 
-    const auto& p_provided_item_info  = this->_getProvidedItemInfo<item_type>();
-    const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
-
-    Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
-
-    if (not p_provided_item_info) {
-      this->_buildSynchronizeInfo<ConnectivityType, item_type>(connectivity);
-    }
-
-    const auto& provided_item_info  = *p_provided_item_info;
-    const auto& requested_item_info = *p_requested_item_info;
-
-    Assert(requested_item_info.size() == provided_item_info.size());
+    const auto& provided_item_info  = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+    const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
     std::vector<Array<const DataType>> provided_data_list(parallel::size());
     for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
@@ -258,20 +428,10 @@ class Synchronizer
 
     using ItemId = ItemIdT<item_type>;
 
-    const auto& p_provided_item_info  = this->_getProvidedItemInfo<item_type>();
-    const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
-
-    Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
-
-    if (not p_provided_item_info) {
-      this->_buildSynchronizeInfo<ConnectivityType, item_type>(connectivity);
-    }
-
-    const auto& provided_item_info  = *p_provided_item_info;
-    const auto& requested_item_info = *p_requested_item_info;
+    const auto& provided_item_info  = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+    const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
     Assert(requested_item_info.size() == provided_item_info.size());
-
     const size_t size_of_arrays = item_array.sizeOfArrays();
 
     std::vector<Array<const DataType>> provided_data_list(parallel::size());
@@ -316,34 +476,27 @@ class Synchronizer
   _synchronize(const ConnectivityType& connectivity,
                SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_value_per_item)
   {
+    static_assert(ItemOfItem::item_type != ItemOfItem::sub_item_type);
     static_assert(not std::is_abstract_v<ConnectivityType>, "_synchronize must be called on a concrete connectivity");
-    if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) >
-                  ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
-      constexpr ItemType item_type     = ItemOfItem::item_type;
-      constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
 
-      using ItemId = ItemIdT<item_type>;
-
-      const auto& p_provided_item_info  = this->_getProvidedItemInfo<item_type>();
-      const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
+    constexpr ItemType item_type     = ItemOfItem::item_type;
+    constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
 
-      Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
+    using ItemId = ItemIdT<item_type>;
 
-      if (not p_provided_item_info) {
-        this->_buildSynchronizeInfo<ConnectivityType, item_type>(connectivity);
-      }
+    const auto& provided_item_info  = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+    const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
-      const auto& provided_item_info  = *p_provided_item_info;
-      const auto& requested_item_info = *p_requested_item_info;
+    const auto& sub_item_per_item_provided_size =
+      _getSubItemPerItemProvidedTotalSize<item_type, sub_item_type>(connectivity);
 
-      Assert(requested_item_info.size() == provided_item_info.size());
+    std::vector<Array<const DataType>> provided_data_list(parallel::size());
 
-      std::vector<Array<const DataType>> provided_data_list(parallel::size());
+    if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) >
+                  ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
         const Array<const ItemId>& provided_item_info_to_rank = provided_item_info[i_rank];
-        const size_t send_size = _getSubItemPerItemProvidedSize<item_type, sub_item_type>(connectivity, i_rank);
-
-        Array<DataType> provided_data{send_size};
+        Array<DataType> provided_data{sub_item_per_item_provided_size[i_rank]};
         size_t index = 0;
         for (size_t i = 0; i < provided_item_info_to_rank.size(); ++i) {
           const ItemId item_id   = provided_item_info_to_rank[i];
@@ -354,32 +507,54 @@ class Synchronizer
         }
         provided_data_list[i_rank] = provided_data;
       }
+    } else if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) <
+                         ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
+      const auto& number_of_sub_item_per_item_provided_list =
+        this->_getNumberOfSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
+      const auto& sub_item_per_item_provided_list =
+        this->_getSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
 
-      std::vector<Array<DataType>> requested_data_list(parallel::size());
-      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const size_t recv_size      = _getSubItemPerItemRequestedSize<item_type, sub_item_type>(connectivity, i_rank);
-        requested_data_list[i_rank] = Array<DataType>{recv_size};
-      }
-
-      parallel::exchange(provided_data_list, requested_data_list);
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
-        const auto& requested_data                = requested_data_list[i_rank];
+        const Array<const ItemId>& provided_item_info_to_rank              = provided_item_info[i_rank];
+        const Array<const size_t>& sub_item_per_item_provided_list_to_rank = sub_item_per_item_provided_list[i_rank];
+        const Array<const size_t>& number_of_sub_item_per_item_provided_list_to_rank =
+          number_of_sub_item_per_item_provided_list[i_rank];
 
+        Array<DataType> provided_data{sub_item_per_item_provided_size[i_rank]};
         size_t index = 0;
-        for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
-          const ItemId item_id   = requested_item_info_from_rank[i];
+        for (size_t i = 0; i < provided_item_info_to_rank.size(); ++i) {
+          const ItemId item_id   = provided_item_info_to_rank[i];
           const auto item_values = sub_item_value_per_item.itemArray(item_id);
-          for (size_t j = 0; j < item_values.size(); ++j) {
-            item_values[j] = requested_data[index++];
+          for (size_t j = 0; j < number_of_sub_item_per_item_provided_list_to_rank[i]; ++j, ++index) {
+            provided_data[index] = item_values[sub_item_per_item_provided_list_to_rank[index]];
           }
         }
+        provided_data_list[i_rank] = provided_data;
+      }
+    }
+
+    const auto& sub_item_per_item_requested_size =
+      _getSubItemPerItemRequestedTotalSize<item_type, sub_item_type>(connectivity);
+
+    std::vector<Array<DataType>> requested_data_list(parallel::size());
+    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+      requested_data_list[i_rank] = Array<DataType>{sub_item_per_item_requested_size[i_rank]};
+    }
+
+    parallel::exchange(provided_data_list, requested_data_list);
+
+    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+      const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+      const auto& requested_data                = requested_data_list[i_rank];
+
+      size_t index = 0;
+      for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
+        const ItemId item_id       = requested_item_info_from_rank[i];
+        const auto sub_item_values = sub_item_value_per_item.itemArray(item_id);
+        for (size_t j = 0; j < sub_item_values.size(); ++j) {
+          sub_item_values[j] = requested_data[index++];
+        }
       }
-    } else {
-      std::ostringstream os;
-      os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-         << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-      throw UnexpectedError(os.str());
     }
   }
 
@@ -388,34 +563,28 @@ class Synchronizer
   _synchronize(const ConnectivityType& connectivity,
                SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_array_per_item)
   {
+    static_assert(ItemOfItem::item_type != ItemOfItem::sub_item_type);
     static_assert(not std::is_abstract_v<ConnectivityType>, "_synchronize must be called on a concrete connectivity");
-    if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) >
-                  ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
-      constexpr ItemType item_type     = ItemOfItem::item_type;
-      constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
-
-      using ItemId = ItemIdT<item_type>;
 
-      const auto& p_provided_item_info  = this->_getProvidedItemInfo<item_type>();
-      const auto& p_requested_item_info = this->_getRequestedItemInfo<item_type>();
+    constexpr ItemType item_type     = ItemOfItem::item_type;
+    constexpr ItemType sub_item_type = ItemOfItem::sub_item_type;
 
-      Assert(static_cast<bool>(p_provided_item_info) == static_cast<bool>(p_requested_item_info));
+    using ItemId = ItemIdT<item_type>;
 
-      if (not p_provided_item_info) {
-        this->_buildSynchronizeInfo<ConnectivityType, item_type>(connectivity);
-      }
+    const auto& provided_item_info  = this->_getProvidedItemInfo<ConnectivityType, item_type>(connectivity);
+    const auto& requested_item_info = this->_getRequestedItemInfo<ConnectivityType, item_type>(connectivity);
 
-      const auto& provided_item_info  = *p_provided_item_info;
-      const auto& requested_item_info = *p_requested_item_info;
+    const auto& sub_item_per_item_provided_size =
+      _getSubItemPerItemProvidedTotalSize<item_type, sub_item_type>(connectivity);
 
-      Assert(requested_item_info.size() == provided_item_info.size());
+    std::vector<Array<const DataType>> provided_data_list(parallel::size());
 
-      std::vector<Array<const DataType>> provided_data_list(parallel::size());
+    if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) >
+                  ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
         const Array<const ItemId>& provided_item_info_to_rank = provided_item_info[i_rank];
-        const size_t send_size = _getSubItemPerItemProvidedSize<item_type, sub_item_type>(connectivity, i_rank);
 
-        Array<DataType> provided_data{send_size * sub_item_array_per_item.sizeOfArrays()};
+        Array<DataType> provided_data{sub_item_per_item_provided_size[i_rank] * sub_item_array_per_item.sizeOfArrays()};
         size_t index = 0;
         for (size_t i = 0; i < provided_item_info_to_rank.size(); ++i) {
           const ItemId item_id  = provided_item_info_to_rank[i];
@@ -429,35 +598,62 @@ class Synchronizer
         }
         provided_data_list[i_rank] = provided_data;
       }
+    } else if constexpr (ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::item_type) <
+                         ItemTypeId<ConnectivityType::Dimension>::dimension(ItemOfItem::sub_item_type)) {
+      const auto& number_of_sub_item_per_item_provided_list =
+        this->_getNumberOfSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
+      const auto& sub_item_per_item_provided_list =
+        this->_getSubItemPerItemProvidedList<item_type, sub_item_type>(connectivity);
 
-      std::vector<Array<DataType>> requested_data_list(parallel::size());
-      for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const size_t recv_size      = _getSubItemPerItemRequestedSize<item_type, sub_item_type>(connectivity, i_rank);
-        requested_data_list[i_rank] = Array<DataType>{recv_size * sub_item_array_per_item.sizeOfArrays()};
-      }
-
-      parallel::exchange(provided_data_list, requested_data_list);
       for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
-        const auto& requested_item_info_from_rank = requested_item_info[i_rank];
-        const auto& requested_data                = requested_data_list[i_rank];
+        const Array<const ItemId>& provided_item_info_to_rank              = provided_item_info[i_rank];
+        const Array<const size_t>& sub_item_per_item_provided_list_to_rank = sub_item_per_item_provided_list[i_rank];
+        const Array<const size_t>& number_of_sub_item_per_item_provided_list_to_rank =
+          number_of_sub_item_per_item_provided_list[i_rank];
 
+        Array<DataType> provided_data{sub_item_per_item_provided_size[i_rank] * sub_item_array_per_item.sizeOfArrays()};
         size_t index = 0;
-        for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
-          const ItemId item_id  = requested_item_info_from_rank[i];
+        for (size_t i = 0; i < provided_item_info_to_rank.size(); ++i) {
+          const ItemId item_id  = provided_item_info_to_rank[i];
           const auto item_table = sub_item_array_per_item.itemTable(item_id);
-          for (size_t j = 0; j < item_table.numberOfRows(); ++j) {
+          for (size_t j = 0; j < number_of_sub_item_per_item_provided_list_to_rank[i]; ++j, ++index) {
             Assert(item_table.numberOfColumns() == sub_item_array_per_item.sizeOfArrays());
             for (size_t k = 0; k < sub_item_array_per_item.sizeOfArrays(); ++k) {
-              item_table(j, k) = requested_data[index++];
+              provided_data[sub_item_array_per_item.sizeOfArrays() * index + k] =
+                item_table(sub_item_per_item_provided_list_to_rank[index], k);
             }
           }
         }
+        Assert(index == sub_item_per_item_provided_list_to_rank.size());
+        provided_data_list[i_rank] = provided_data;
+      }
+    }
+
+    const auto& sub_item_per_item_requested_size =
+      _getSubItemPerItemRequestedTotalSize<item_type, sub_item_type>(connectivity);
+
+    std::vector<Array<DataType>> requested_data_list(parallel::size());
+    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+      requested_data_list[i_rank] =
+        Array<DataType>{sub_item_per_item_requested_size[i_rank] * sub_item_array_per_item.sizeOfArrays()};
+    }
+
+    parallel::exchange(provided_data_list, requested_data_list);
+    for (size_t i_rank = 0; i_rank < parallel::size(); ++i_rank) {
+      const auto& requested_item_info_from_rank = requested_item_info[i_rank];
+      const auto& requested_data                = requested_data_list[i_rank];
+
+      size_t index = 0;
+      for (size_t i = 0; i < requested_item_info_from_rank.size(); ++i) {
+        const ItemId item_id  = requested_item_info_from_rank[i];
+        const auto item_table = sub_item_array_per_item.itemTable(item_id);
+        for (size_t j = 0; j < item_table.numberOfRows(); ++j) {
+          Assert(item_table.numberOfColumns() == sub_item_array_per_item.sizeOfArrays());
+          for (size_t k = 0; k < sub_item_array_per_item.sizeOfArrays(); ++k) {
+            item_table(j, k) = requested_data[index++];
+          }
+        }
       }
-    } else {
-      std::ostringstream os;
-      os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-         << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-      throw UnexpectedError(os.str());
     }
   }
 
@@ -466,6 +662,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(ItemValue<DataType, item_type, ConnectivityPtr>& item_value)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize ItemValue of const data");
     Assert(item_value.connectivity_ptr().use_count() > 0, "No connectivity is associated to this ItemValue");
     const IConnectivity& connectivity = *item_value.connectivity_ptr();
 
@@ -494,6 +691,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(ItemArray<DataType, item_type, ConnectivityPtr>& item_array)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize ItemArray of const data");
     Assert(item_array.connectivity_ptr().use_count() > 0, "No connectivity is associated to this ItemArray");
     const IConnectivity& connectivity = *item_array.connectivity_ptr();
 
@@ -522,6 +720,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_value_per_item)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize SubItemValuePerItem of const data");
     Assert(sub_item_value_per_item.connectivity_ptr().use_count() > 0,
            "No connectivity is associated to this SubItemValuePerItem");
 
@@ -552,6 +751,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_value_per_item)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize SubItemArrayPerItem of const data");
     Assert(sub_item_value_per_item.connectivity_ptr().use_count() > 0,
            "No connectivity is associated to this SubItemValuePerItem");
 
@@ -600,6 +800,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(ItemValue<DataType, item_type, ConnectivityPtr>& item_value)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize ItemValue of const data");
     Assert(item_value.connectivity_ptr().use_count() > 0, "No connectivity is associated to this ItemValue");
   }
 
@@ -607,6 +808,7 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(ItemArray<DataType, item_type, ConnectivityPtr>& item_value)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize ItemArray of const data");
     Assert(item_value.connectivity_ptr().use_count() > 0, "No connectivity is associated to this ItemValue");
   }
 
@@ -614,96 +816,18 @@ class Synchronizer
   PUGS_INLINE void
   synchronize(SubItemValuePerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_value_per_item)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize SubItemValuePerItem of const data");
     Assert(sub_item_value_per_item.connectivity_ptr().use_count() > 0,
            "No connectivity is associated to this SubItemValuePerItem");
-
-    const IConnectivity& connectivity = *sub_item_value_per_item.connectivity_ptr();
-
-    switch (connectivity.dimension()) {
-    case 1: {
-      if constexpr (ItemTypeId<1>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<1>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-    case 2: {
-      if constexpr (ItemTypeId<2>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<2>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-    case 3: {
-      if constexpr (ItemTypeId<3>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<3>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-      // LCOV_EXCL_START
-    default: {
-      throw UnexpectedError("unexpected dimension");
-    }
-      // LCOV_EXCL_STOP
-    }
   }
 
   template <typename DataType, typename ItemOfItem, typename ConnectivityPtr>
   PUGS_INLINE void
   synchronize(SubItemArrayPerItem<DataType, ItemOfItem, ConnectivityPtr>& sub_item_array_per_item)
   {
+    static_assert(not std::is_const_v<DataType>, "cannot synchronize SubItemArrayPerItem of const data");
     Assert(sub_item_array_per_item.connectivity_ptr().use_count() > 0,
            "No connectivity is associated to this SubItemArrayPerItem");
-
-    const IConnectivity& connectivity = *sub_item_array_per_item.connectivity_ptr();
-
-    switch (connectivity.dimension()) {
-    case 1: {
-      if constexpr (ItemTypeId<1>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<1>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-    case 2: {
-      if constexpr (ItemTypeId<2>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<2>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-    case 3: {
-      if constexpr (ItemTypeId<3>::dimension(ItemOfItem::item_type) <=
-                    ItemTypeId<3>::dimension(ItemOfItem::sub_item_type)) {
-        std::ostringstream os;
-        os << "synchronization requires sub-item type (" << itemName(ItemOfItem::sub_item_type)
-           << ") to be of lower dimension than item (" << itemName(ItemOfItem::item_type) << ")";
-        throw UnexpectedError(os.str());
-      }
-      break;
-    }
-      // LCOV_EXCL_START
-    default: {
-      throw UnexpectedError("unexpected dimension");
-    }
-      // LCOV_EXCL_STOP
-    }
   }
 
   Synchronizer(const Synchronizer&) = delete;
diff --git a/src/output/CMakeLists.txt b/src/output/CMakeLists.txt
index f3173dd2613ff281671c1dadb9e60d9e6b95bc64..051e96480431aaa733059e2cf9c9518b29216812 100644
--- a/src/output/CMakeLists.txt
+++ b/src/output/CMakeLists.txt
@@ -6,3 +6,8 @@ add_library(
   GnuplotWriter1D.cpp
   VTKWriter.cpp
   WriterBase.cpp)
+
+target_link_libraries(
+  PugsOutput
+  ${HIGHFIVE_TARGET}
+)
diff --git a/src/scheme/CMakeLists.txt b/src/scheme/CMakeLists.txt
index 1379aea67566c559a17f19a5315335b4248e74c2..dadbe73bb24ad84e491248b89016baf85f0dbc31 100644
--- a/src/scheme/CMakeLists.txt
+++ b/src/scheme/CMakeLists.txt
@@ -9,4 +9,10 @@ add_library(
   DiscreteFunctionUtils.cpp
   DiscreteFunctionVectorIntegrator.cpp
   DiscreteFunctionVectorInterpoler.cpp
-  FluxingAdvectionSolver.cpp)
+  FluxingAdvectionSolver.cpp
+)
+
+target_link_libraries(
+  PugsScheme
+  ${HIGHFIVE_TARGET}
+)
diff --git a/src/scheme/DiscreteFunctionVariant.hpp b/src/scheme/DiscreteFunctionVariant.hpp
index 6feb86bcbc564898ae7801a0f3ca2bab8d370097..af1b1cc3757ad38a5f5561c41705301c2f086ebe 100644
--- a/src/scheme/DiscreteFunctionVariant.hpp
+++ b/src/scheme/DiscreteFunctionVariant.hpp
@@ -92,7 +92,7 @@ class DiscreteFunctionVariant
                   "DiscreteFunctionP0Vector with this DataType is not allowed in variant");
   }
 
-  DiscreteFunctionVariant& operator=(DiscreteFunctionVariant&&) = default;
+  DiscreteFunctionVariant& operator=(DiscreteFunctionVariant&&)      = default;
   DiscreteFunctionVariant& operator=(const DiscreteFunctionVariant&) = default;
 
   DiscreteFunctionVariant(const DiscreteFunctionVariant&) = default;
diff --git a/src/utils/BuildInfo.cpp b/src/utils/BuildInfo.cpp
index 5a8fdb1bfdd66b209714a69b8f48c0de2fba07bd..c6ac6cde607ba6e32e47b9586b6bc88ec3c9d2f5 100644
--- a/src/utils/BuildInfo.cpp
+++ b/src/utils/BuildInfo.cpp
@@ -17,6 +17,10 @@
 #include <slepc.h>
 #endif   // PUGS_HAS_PETSC
 
+#ifdef PUGS_HAS_HDF5
+#include <highfive/highfive.hpp>
+#endif   // PUGS_HAS_HDF5
+
 std::string
 BuildInfo::type()
 {
@@ -73,3 +77,19 @@ BuildInfo::slepcLibrary()
   return "none";
 #endif   // PUGS_HAS_SLEPC
 }
+
+std::string
+BuildInfo::hdf5Library()
+{
+#ifdef PUGS_HAS_HDF5
+
+#ifdef H5_HAVE_PARALLEL
+  return stringify(H5_VERSION) + " [parallel]";
+#else    // H5_HAVE_PARALLEL
+  return stringify(H5_VERSION) + " [sequential]";
+#endif   // H5_HAVE_PARALLEL
+
+#else
+  return "none";
+#endif   // PUGS_HAS_HDF5
+}
diff --git a/src/utils/BuildInfo.hpp b/src/utils/BuildInfo.hpp
index bc83cf3f1426bcab2972ebb8466f790cadf8a9ef..89a73e5411716c27a69de65fa9976fb062f77dca 100644
--- a/src/utils/BuildInfo.hpp
+++ b/src/utils/BuildInfo.hpp
@@ -11,6 +11,7 @@ struct BuildInfo
   static std::string mpiLibrary();
   static std::string petscLibrary();
   static std::string slepcLibrary();
+  static std::string hdf5Library();
 };
 
 #endif   // BUILD_INFO_HPP
diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt
index c4070ff64a94711d600db51bf0a614795ed7aca3..b5a383c9c7d05088ef840d10b58fc57daa5cea04 100644
--- a/src/utils/CMakeLists.txt
+++ b/src/utils/CMakeLists.txt
@@ -8,7 +8,9 @@ add_library(
   ConsoleManager.cpp
   Demangle.cpp
   Exceptions.cpp
+  ExecutionStatManager.cpp
   FPEManager.cpp
+  GlobalVariableManager.cpp
   Messenger.cpp
   Partitioner.cpp
   PETScWrapper.cpp
@@ -24,6 +26,7 @@ target_link_libraries(
   PugsUtils
   ${PETSC_LIBRARIES}
   ${SLEPC_LIBRARIES}
+  ${HIGHFIVE_TARGET}
 )
 
 # --------------- get git revision info ---------------
diff --git a/src/utils/ExecutionStatManager.cpp b/src/utils/ExecutionStatManager.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6918d4d7acfc89bb12420b1540a240e1bcd026dc
--- /dev/null
+++ b/src/utils/ExecutionStatManager.cpp
@@ -0,0 +1,163 @@
+#include <utils/ExecutionStatManager.hpp>
+
+#include <utils/Exceptions.hpp>
+#include <utils/Messenger.hpp>
+
+#include <cmath>
+#include <iomanip>
+#include <rang.hpp>
+#include <sys/resource.h>
+
+ExecutionStatManager* ExecutionStatManager::m_instance = nullptr;
+
+std::string
+ExecutionStatManager::_prettyPrintTime(double time_in_seconds) const
+{
+  std::ostringstream os;
+  size_t seconds    = std::floor(time_in_seconds);
+  const size_t days = seconds / (24 * 3600);
+  seconds -= days * (24 * 3600);
+  const size_t hours = seconds / 3600;
+  seconds -= hours * 3600;
+  const size_t minutes = seconds / 60;
+  seconds -= minutes * 60;
+  os << rang::style::bold;
+  bool print = false;
+  if (days > 0) {
+    print = true;
+    os << days << "d" << ' ';
+  }
+  if (print or (hours > 0)) {
+    print = true;
+    os << std::setw(2) << std::setfill('0') << hours << "h";
+  }
+  if (print or (minutes > 0)) {
+    print = true;
+    os << std::setw(2) << std::setfill('0') << minutes << "mn";
+  }
+  if (print) {
+    os << rang::style::bold << std::setw(2) << std::setfill('0') << seconds << "s";
+  }
+  os << rang::style::reset;
+
+  return os.str();
+}
+
+void
+ExecutionStatManager::_printMaxResidentMemory() const
+{
+  class Memory
+  {
+   private:
+    double m_value;
+
+   public:
+    PUGS_INLINE const double&
+    value() const
+    {
+      return m_value;
+    }
+
+    std::string
+    prettyPrint() const
+    {
+      const std::vector<std::string> units = {"B", "KB", "MB", "GB", "TB", "PB", "EB"};
+
+      double local_memory = m_value;
+      size_t i_unit       = 0;
+      while ((local_memory >= 1024) and (i_unit < units.size())) {
+        ++i_unit;
+        local_memory /= 1024;
+      }
+      std::ostringstream os;
+      os << local_memory << units[i_unit];
+      return os.str();
+    }
+
+    Memory()
+    {
+      rusage u;
+      getrusage(RUSAGE_SELF, &u);
+      m_value = u.ru_maxrss * 1024;
+    }
+
+    Memory(double value) : m_value{value} {}
+  };
+
+  Memory memory;
+  std::cout << "Memory: " << rang::style::bold << Memory{parallel::allReduceSum(memory.value())}.prettyPrint()
+            << rang::style::reset;
+  if (parallel::size() > 1) {
+    std::cout << " (over " << parallel::size() << " processes)";
+    std::cout << " Avg: " << rang::style::bold
+              << Memory{parallel::allReduceSum(memory.value()) / parallel::size()}.prettyPrint() << rang::style::reset;
+    std::cout << " Min: " << rang::style::bold << Memory{parallel::allReduceMin(memory.value())}.prettyPrint()
+              << rang::style::reset;
+    std::cout << " Max: " << rang::style::bold << Memory{parallel::allReduceMax(memory.value())}.prettyPrint()
+              << rang::style::reset;
+  }
+  std::cout << '\n';
+}
+
+void
+ExecutionStatManager::_printElapseTime() const
+{
+  const double elapse_time = m_instance->m_elapse_time.seconds();
+  std::cout << "Execution: " << rang::style::bold << m_instance->m_elapse_time.seconds() << 's' << rang::style::reset;
+  if (elapse_time > 60) {
+    std::cout << " [" << rang::style::bold << this->_prettyPrintTime(elapse_time) << rang::style::reset << ']';
+  }
+  std::cout << '\n';
+}
+
+void
+ExecutionStatManager::_printTotalCPUTime() const
+{
+  rusage u;
+  getrusage(RUSAGE_SELF, &u);
+
+  const double total_cpu_time =
+    u.ru_utime.tv_sec + u.ru_stime.tv_sec + (u.ru_utime.tv_usec + u.ru_stime.tv_usec) * 1E-6;
+
+  std::cout << "Total CPU: " << rang::style::bold << parallel::allReduceSum(total_cpu_time) << 's'
+            << rang::style::reset;
+  std::cout << " (" << parallel::allReduceSum(Kokkos::DefaultHostExecutionSpace::concurrency()) << " threads over "
+            << parallel::size() << " processes)";
+  if (total_cpu_time > 60) {
+    std::cout << " [" << _prettyPrintTime(total_cpu_time) << ']';
+  }
+  std::cout << '\n';
+}
+
+void
+ExecutionStatManager::printInfo()
+{
+  if (ExecutionStatManager::getInstance().doPrint()) {
+    std::cout << "----------------- " << rang::fg::green << "pugs exec stats" << rang::fg::reset
+              << " ---------------------\n";
+
+    ExecutionStatManager::getInstance()._printElapseTime();
+    ExecutionStatManager::getInstance()._printTotalCPUTime();
+    ExecutionStatManager::getInstance()._printMaxResidentMemory();
+  }
+}
+
+void
+ExecutionStatManager::create()
+{
+  if (ExecutionStatManager::m_instance == nullptr) {
+    ExecutionStatManager::m_instance = new ExecutionStatManager;
+  } else {
+    throw UnexpectedError("ExecutionStatManager already created");
+  }
+}
+
+void
+ExecutionStatManager::destroy()
+{
+  // One allows multiple destruction to handle unexpected code exit
+  if (ExecutionStatManager::m_instance != nullptr) {
+    delete ExecutionStatManager::m_instance;
+    ExecutionStatManager::m_instance = nullptr;
+  }
+}
diff --git a/src/utils/ExecutionStatManager.hpp b/src/utils/ExecutionStatManager.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6f7c333561147ab3fede6c534a9c2680a29bd4cd
--- /dev/null
+++ b/src/utils/ExecutionStatManager.hpp
@@ -0,0 +1,54 @@
+#ifndef EXECUTION_STAT_MANAGER_HPP
+#define EXECUTION_STAT_MANAGER_HPP
+
+#include <utils/PugsAssert.hpp>
+#include <utils/Timer.hpp>
+
+class ExecutionStatManager
+{
+ private:
+  static ExecutionStatManager* m_instance;
+
+  Timer m_elapse_time;
+  bool m_do_print = true;
+
+  std::string _prettyPrintTime(double seconds) const;
+
+  void _printMaxResidentMemory() const;
+  void _printElapseTime() const;
+  void _printTotalCPUTime() const;
+
+  explicit ExecutionStatManager()                   = default;
+  ExecutionStatManager(ExecutionStatManager&&)      = delete;
+  ExecutionStatManager(const ExecutionStatManager&) = delete;
+  ~ExecutionStatManager()                           = default;
+
+ public:
+  PUGS_INLINE
+  bool
+  doPrint() const
+  {
+    return m_do_print;
+  }
+
+  PUGS_INLINE
+  void
+  setPrint(bool do_print)
+  {
+    m_do_print = do_print;
+  }
+
+  PUGS_INLINE
+  static ExecutionStatManager&
+  getInstance()
+  {
+    Assert(m_instance != nullptr);   // LCOV_EXCL_LINE
+    return *m_instance;
+  }
+
+  static void printInfo();
+  static void create();
+  static void destroy();
+};
+
+#endif   // EXECUTION_STAT_MANAGER_HPP
diff --git a/src/utils/GlobalVariableManager.cpp b/src/utils/GlobalVariableManager.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4887f3c48271128239550a214662c2ea8a4396f5
--- /dev/null
+++ b/src/utils/GlobalVariableManager.cpp
@@ -0,0 +1,18 @@
+#include <utils/GlobalVariableManager.hpp>
+
+GlobalVariableManager* GlobalVariableManager::m_instance = nullptr;
+
+void
+GlobalVariableManager::create()
+{
+  Assert(m_instance == nullptr);
+  m_instance = new GlobalVariableManager;
+}
+
+void
+GlobalVariableManager::destroy()
+{
+  Assert(m_instance != nullptr);
+  delete m_instance;
+  m_instance = nullptr;
+}
diff --git a/src/utils/GlobalVariableManager.hpp b/src/utils/GlobalVariableManager.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9df677fac507545ca7559ecca4b6f55197e35c48
--- /dev/null
+++ b/src/utils/GlobalVariableManager.hpp
@@ -0,0 +1,39 @@
+#ifndef GLOBAL_VARIABLE_MANAGER_HPP
+#define GLOBAL_VARIABLE_MANAGER_HPP
+
+#include <utils/PugsAssert.hpp>
+#include <utils/PugsMacros.hpp>
+
+class GlobalVariableManager
+{
+ private:
+  size_t m_connectivity_id = 0;
+
+  static GlobalVariableManager* m_instance;
+
+  explicit GlobalVariableManager()                    = default;
+  GlobalVariableManager(GlobalVariableManager&&)      = delete;
+  GlobalVariableManager(const GlobalVariableManager&) = delete;
+  ~GlobalVariableManager()                            = default;
+
+ public:
+  PUGS_INLINE
+  size_t
+  getAndIncrementConnectivityId()
+  {
+    return m_connectivity_id++;
+  }
+
+  PUGS_INLINE
+  static GlobalVariableManager&
+  instance()
+  {
+    Assert(m_instance != nullptr);
+    return *m_instance;
+  }
+
+  static void create();
+  static void destroy();
+};
+
+#endif   // GLOBAL_VARIABLE_MANAGER_HPP
diff --git a/src/utils/Messenger.hpp b/src/utils/Messenger.hpp
index 21e17abf24600869b619a66415dc51714ec2cb70..ac79ee8490882a1bac8a9fd74db58b6cd8dccdb7 100644
--- a/src/utils/Messenger.hpp
+++ b/src/utils/Messenger.hpp
@@ -766,6 +766,15 @@ class Messenger
         CastArray cast_array = cast_value_to<CastType>::from(data);
         _broadcast_array(cast_array, root_rank);
       }
+    } else if constexpr (std::is_same_v<std::string, DataType>) {
+      Array s = convert_to_array(data);
+      broadcast(s, root_rank);
+      if (m_rank != root_rank) {
+        data.resize(s.size());
+        for (size_t i = 0; i < s.size(); ++i) {
+          data[i] = s[i];
+        }
+      }
     } else {
       static_assert(is_false_v<DataType>, "unexpected type of data");
     }
diff --git a/src/utils/PugsUtils.cpp b/src/utils/PugsUtils.cpp
index 8531aa6e8ee3091c952c886ec438b42067d4eee9..104e70cddc5cc29d927583adde0ffa53afe72e95 100644
--- a/src/utils/PugsUtils.cpp
+++ b/src/utils/PugsUtils.cpp
@@ -1,9 +1,11 @@
 #include <utils/PugsUtils.hpp>
 
+#include <dev/ParallelChecker.hpp>
 #include <utils/BacktraceManager.hpp>
 #include <utils/BuildInfo.hpp>
 #include <utils/CommunicatorManager.hpp>
 #include <utils/ConsoleManager.hpp>
+#include <utils/ExecutionStatManager.hpp>
 #include <utils/FPEManager.hpp>
 #include <utils/Messenger.hpp>
 #include <utils/PETScWrapper.hpp>
@@ -61,6 +63,7 @@ pugsBuildInfo()
   os << "MPI:      " << rang::style::bold << BuildInfo::mpiLibrary() << rang::style::reset << '\n';
   os << "PETSc:    " << rang::style::bold << BuildInfo::petscLibrary() << rang::style::reset << '\n';
   os << "SLEPc:    " << rang::style::bold << BuildInfo::slepcLibrary() << rang::style::reset << '\n';
+  os << "HDF5:     " << rang::style::bold << BuildInfo::hdf5Library() << rang::style::reset << '\n';
   os << "-------------------------------------------------------";
 
   return os.str();
@@ -86,6 +89,9 @@ initialize(int& argc, char* argv[])
   bool enable_signals = true;
   int nb_threads      = -1;
 
+  ParallelChecker::Mode pc_mode = ParallelChecker::Mode::automatic;
+  std::string pc_filename       = ParallelChecker::instance().filename();
+
   std::string filename;
   {
     CLI::App app{"pugs help"};
@@ -110,8 +116,12 @@ initialize(int& argc, char* argv[])
     bool show_preamble = true;
     app.add_flag("--preamble,!--no-preamble", show_preamble, "Show execution info preamble [default: true]");
 
-    bool show_backtrace = true;
-    app.add_flag("-b,--backtrace,!--no-backtrace", show_backtrace, "Show backtrace on failure [default: true]");
+    bool print_exec_stat = true;
+    app.add_flag("--exec-stat,!--no-exec-stat", print_exec_stat,
+                 "Display memory and CPU usage after execution [default: true]");
+
+    bool show_backtrace = false;
+    app.add_flag("-b,--backtrace,!--no-backtrace", show_backtrace, "Show backtrace on failure [default: false]");
 
     app.add_flag("--signal,!--no-signal", enable_signals, "Catches signals [default: true]");
 
@@ -122,6 +132,17 @@ initialize(int& argc, char* argv[])
     app.add_flag("--reproducible-sums,!--no-reproducible-sums", show_preamble,
                  "Special treatment of array sums to ensure reproducibility [default: true]");
 
+    std::map<std::string, ParallelChecker::Mode> pc_mode_map{{"auto", ParallelChecker::Mode::automatic},
+                                                             {"write", ParallelChecker::Mode::write},
+                                                             {"read", ParallelChecker::Mode::read}};
+    app
+      .add_option("--parallel-checker-mode", pc_mode,
+                  "Parallel checker mode (auto: sequential write/parallel read) [default: auto]")
+      ->transform(CLI::CheckedTransformer(pc_mode_map));
+
+    app.add_option("--parallel-checker-file", pc_filename,
+                   "Parallel checker filename   [default: " + pc_filename + "]");
+
     int mpi_split_color = -1;
     app.add_option("--mpi-split-color", mpi_split_color, "Sets the MPI split color value (for MPMD applications)")
       ->check(CLI::Range(0, std::numeric_limits<decltype(mpi_split_color)>::max()));
@@ -141,6 +162,7 @@ initialize(int& argc, char* argv[])
       CommunicatorManager::setSplitColor(mpi_split_color);
     }
 
+    ExecutionStatManager::getInstance().setPrint(print_exec_stat);
     BacktraceManager::setShow(show_backtrace);
     ConsoleManager::setShowPreamble(show_preamble);
     ConsoleManager::init(enable_color);
@@ -165,6 +187,9 @@ initialize(int& argc, char* argv[])
     Kokkos::initialize(args);
   }
 
+  ParallelChecker::instance().setMode(pc_mode);
+  ParallelChecker::instance().setFilename(pc_filename);
+
   if (ConsoleManager::showPreamble()) {
     std::cout << "----------------- " << rang::fg::green << "pugs exec info" << rang::fg::reset
               << " ----------------------" << '\n';
diff --git a/src/utils/SignalManager.cpp b/src/utils/SignalManager.cpp
index f2721177285d3ed5c8edb7ca484a685679c94e5d..62505d7b4d171ff3ff9bf207f9e357aada060fda 100644
--- a/src/utils/SignalManager.cpp
+++ b/src/utils/SignalManager.cpp
@@ -54,6 +54,8 @@ SignalManager::pauseForDebug(int signal)
 {
   if (std::string(PUGS_BUILD_TYPE) != "Release") {
     if (s_pause_on_error) {
+      // Each failing process must write
+      std::cerr.clear();
       std::cerr << "\n======================================\n"
                 << rang::style::reset << rang::fg::reset << rang::style::bold << "to attach gdb to this process run\n"
                 << "\tgdb -pid " << rang::fg::red << getpid() << rang::fg::reset << '\n'
@@ -77,7 +79,7 @@ SignalManager::handler(int signal)
     std::signal(SIGABRT, SIG_DFL);
 
     // Each failing process must write
-    std::cerr.setstate(std::ios::goodbit);
+    std::cerr.clear();
 
     std::cerr << BacktraceManager{} << '\n';
 
diff --git a/src/utils/pugs_config.hpp.in b/src/utils/pugs_config.hpp.in
index 0736003b9f40c4f71feea089072c19d9238e4033..4dc5babd4c157b9b41161dfbd3bb1156c1c6b631 100644
--- a/src/utils/pugs_config.hpp.in
+++ b/src/utils/pugs_config.hpp.in
@@ -5,6 +5,7 @@
 #cmakedefine PUGS_HAS_MPI
 #cmakedefine PUGS_HAS_PETSC
 #cmakedefine PUGS_HAS_SLEPC
+#cmakedefine PUGS_HAS_HDF5
 
 #cmakedefine SYSTEM_IS_LINUX
 #cmakedefine SYSTEM_IS_DARWIN
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 659e7760166b89a4772395125579781b567847b2..aaace485deb7ed83e66d80c2e75d9bce2cc9bc21 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -114,6 +114,7 @@ add_executable (unit_tests
   test_NameProcessor.cpp
   test_NaNHelper.cpp
   test_OStream.cpp
+  test_ParallelChecker_write.cpp
   test_ParseError.cpp
   test_PETScUtils.cpp
   test_PrimalToDiamondDualConnectivityDataMapper.cpp
@@ -198,6 +199,7 @@ add_executable (mpi_unit_tests
   test_MeshNodeInterface.cpp
   test_Messenger.cpp
   test_OFStream.cpp
+  test_ParallelChecker_read.cpp
   test_Partitioner.cpp
   test_RandomEngine.cpp
   test_SubItemArrayPerItemVariant.cpp
@@ -212,8 +214,15 @@ add_executable (mpi_unit_tests
 add_library(test_Pugs_MeshDataBase
   MeshDataBaseForTests.cpp)
 
+add_library(test_Pugs_ParallelCheckerTester
+  ParallelCheckerTester.cpp)
+
+target_link_libraries (test_Pugs_ParallelCheckerTester
+  ${HIGHFIVE_TARGET})
+
 target_link_libraries (unit_tests
   test_Pugs_MeshDataBase
+  test_Pugs_ParallelCheckerTester
   PugsLanguageAST
   PugsLanguageModules
   PugsLanguageAlgorithms
@@ -225,17 +234,20 @@ target_link_libraries (unit_tests
   PugsScheme
   PugsOutput
   PugsUtils
+  PugsDev
   Kokkos::kokkos
   ${PARMETIS_LIBRARIES}
   ${MPI_CXX_LINK_FLAGS} ${MPI_CXX_LIBRARIES}
   ${PETSC_LIBRARIES}
   Catch2
   ${PUGS_STD_LINK_FLAGS}
+  ${HIGHFIVE_TARGET}
   stdc++fs
   )
 
 target_link_libraries (mpi_unit_tests
   test_Pugs_MeshDataBase
+  test_Pugs_ParallelCheckerTester
   PugsAlgebra
   PugsAnalysis
   PugsUtils
@@ -249,6 +261,7 @@ target_link_libraries (mpi_unit_tests
   PugsLanguageUtils
   PugsScheme
   PugsOutput
+  PugsDev
   PugsUtils
   PugsAlgebra
   PugsMesh
@@ -258,6 +271,7 @@ target_link_libraries (mpi_unit_tests
   ${PETSC_LIBRARIES}
   Catch2
   ${PUGS_STD_LINK_FLAGS}
+  ${HIGHFIVE_TARGET}
   stdc++fs
   )
 
diff --git a/tests/ParallelCheckerTester.cpp b/tests/ParallelCheckerTester.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..80e92e4494afda30bdf5c8b6653b29f3a200970e
--- /dev/null
+++ b/tests/ParallelCheckerTester.cpp
@@ -0,0 +1,43 @@
+#include <ParallelCheckerTester.hpp>
+
+bool
+ParallelCheckerTester::isCreated() const
+{
+  return ParallelChecker::m_instance != nullptr;
+}
+
+std::string
+ParallelCheckerTester::getFilename() const
+{
+  return ParallelChecker::instance().m_filename;
+}
+
+ParallelChecker::Mode
+ParallelCheckerTester::getMode() const
+{
+  return ParallelChecker::instance().m_mode;
+}
+
+size_t
+ParallelCheckerTester::getTag() const
+{
+  return ParallelChecker::instance().m_tag;
+}
+
+void
+ParallelCheckerTester::setFilename(const std::string& filename) const
+{
+  ParallelChecker::instance().m_filename = filename;
+}
+
+void
+ParallelCheckerTester::setMode(ParallelChecker::Mode mode) const
+{
+  ParallelChecker::instance().m_mode = mode;
+}
+
+void
+ParallelCheckerTester::setTag(size_t tag) const
+{
+  ParallelChecker::instance().m_tag = tag;
+}
diff --git a/tests/ParallelCheckerTester.hpp b/tests/ParallelCheckerTester.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9aefed506d34e7b899a9fa3fae1128180c03958e
--- /dev/null
+++ b/tests/ParallelCheckerTester.hpp
@@ -0,0 +1,23 @@
+#ifndef PARALLEL_CHECKER_TESTER_HPP
+#define PARALLEL_CHECKER_TESTER_HPP
+
+#include <dev/ParallelChecker.hpp>
+
+class ParallelCheckerTester
+{
+ public:
+  bool isCreated() const;
+
+  std::string getFilename() const;
+  ParallelChecker::Mode getMode() const;
+  size_t getTag() const;
+
+  void setFilename(const std::string& filename) const;
+  void setMode(ParallelChecker::Mode mode) const;
+  void setTag(size_t tag) const;
+
+  ParallelCheckerTester()  = default;
+  ~ParallelCheckerTester() = default;
+};
+
+#endif   // PARALLEL_CHECKER_TESTER_HPP
diff --git a/tests/mpi_test_main.cpp b/tests/mpi_test_main.cpp
index 1692bd7aca6edafd683fe7351c104cea8283a81c..fb0da8569382769cf495dedceb42a91528076dd9 100644
--- a/tests/mpi_test_main.cpp
+++ b/tests/mpi_test_main.cpp
@@ -8,6 +8,7 @@
 #include <mesh/DualMeshManager.hpp>
 #include <mesh/MeshDataManager.hpp>
 #include <mesh/SynchronizerManager.hpp>
+#include <utils/GlobalVariableManager.hpp>
 #include <utils/Messenger.hpp>
 #include <utils/PETScWrapper.hpp>
 #include <utils/RandomEngine.hpp>
@@ -51,6 +52,9 @@ main(int argc, char* argv[])
   Catch::Session session;
   int result = session.applyCommandLine(argc, argv);
 
+  // disable file locking to avoid mess in tests
+  setenv("HDF5_USE_FILE_LOCKING", "FALSE", 1);
+
   if (result == 0) {
     const auto& config = session.config();
     if (config.listReporters() or config.listTags() or config.listTests()) {
@@ -95,6 +99,7 @@ main(int argc, char* argv[])
       MeshDataManager::create();
       DualConnectivityManager::create();
       DualMeshManager::create();
+      GlobalVariableManager::create();
 
       MeshDataBaseForTests::create();
 
@@ -106,6 +111,7 @@ main(int argc, char* argv[])
 
       MeshDataBaseForTests::destroy();
 
+      GlobalVariableManager::destroy();
       DualMeshManager::destroy();
       DualConnectivityManager::destroy();
       MeshDataManager::destroy();
diff --git a/tests/test_Array.cpp b/tests/test_Array.cpp
index 7efc346e29693e2ee6302dfd4498ac2a9cb51b04..2651ecf43287fed07345f7fdbfef63e7d6f770a8 100644
--- a/tests/test_Array.cpp
+++ b/tests/test_Array.cpp
@@ -9,6 +9,7 @@
 
 #include <deque>
 #include <list>
+#include <random>
 #include <set>
 #include <unordered_set>
 #include <valarray>
@@ -343,7 +344,7 @@ TEST_CASE("Array", "[utils]")
 
       const auto sum_before_shuffle = sum(array);
 
-      std::random_shuffle(&(array[0]), &(array[0]) + array.size());
+      std::shuffle(&(array[0]), &(array[0]) + array.size(), std::mt19937{std::random_device{}()});
 
       const auto sum_after_shuffle = sum(array);
 
@@ -362,13 +363,13 @@ TEST_CASE("Array", "[utils]")
 
       const auto sum_before_shuffle = sum(array);
 
-      std::random_shuffle(&(array[0]), &(array[0]) + array.size());
+      std::shuffle(&(array[0]), &(array[0]) + array.size(), std::mt19937{std::random_device{}()});
 
       const auto sum_after_shuffle = sum(array);
 
       REQUIRE(sum_before_shuffle == sum_after_shuffle);
 
-      REQUIRE(sum_before_shuffle == Catch::Approx(direct_sum(array)));
+      REQUIRE(sum_before_shuffle == Catch::Approx(direct_sum(array)).epsilon(1E-4));
     }
 
     SECTION("reproducible TinyVector<3,double> sum")
@@ -384,7 +385,7 @@ TEST_CASE("Array", "[utils]")
 
       const auto sum_before_shuffle = sum(array);
 
-      std::random_shuffle(&(array[0]), &(array[0]) + array.size());
+      std::shuffle(&(array[0]), &(array[0]) + array.size(), std::mt19937{std::random_device{}()});
 
       ReproducibleTinyVectorSum s0(array);
 
@@ -411,7 +412,7 @@ TEST_CASE("Array", "[utils]")
 
       const auto sum_before_shuffle = sum(array);
 
-      std::random_shuffle(&(array[0]), &(array[0]) + array.size());
+      std::shuffle(&(array[0]), &(array[0]) + array.size(), std::mt19937{std::random_device{}()});
 
       ReproducibleTinyVectorSum s0(array);
 
@@ -420,9 +421,9 @@ TEST_CASE("Array", "[utils]")
       REQUIRE(sum_before_shuffle == sum_after_shuffle);
 
       auto naive_sum = direct_sum(array);
-      REQUIRE(sum_before_shuffle[0] == Catch::Approx(naive_sum[0]));
-      REQUIRE(sum_before_shuffle[1] == Catch::Approx(naive_sum[1]));
-      REQUIRE(sum_before_shuffle[2] == Catch::Approx(naive_sum[2]));
+      REQUIRE(sum_before_shuffle[0] == Catch::Approx(naive_sum[0]).epsilon(1E-4));
+      REQUIRE(sum_before_shuffle[1] == Catch::Approx(naive_sum[1]).epsilon(1E-4));
+      REQUIRE(sum_before_shuffle[2] == Catch::Approx(naive_sum[2]).epsilon(1E-4));
     }
 
     SECTION("reproducible TinyMatrix<2, 3> sum")
@@ -441,7 +442,7 @@ TEST_CASE("Array", "[utils]")
 
       const auto sum_before_shuffle = sum(array);
 
-      std::random_shuffle(&(array[0]), &(array[0]) + array.size());
+      std::shuffle(&(array[0]), &(array[0]) + array.size(), std::mt19937{std::random_device{}()});
 
       const auto sum_after_shuffle = sum(array);
 
@@ -472,7 +473,7 @@ TEST_CASE("Array", "[utils]")
 
       const auto sum_before_shuffle = sum(array);
 
-      std::random_shuffle(&(array[0]), &(array[0]) + array.size());
+      std::shuffle(&(array[0]), &(array[0]) + array.size(), std::mt19937{std::random_device{}()});
 
       const auto sum_after_shuffle = sum(array);
 
@@ -482,7 +483,7 @@ TEST_CASE("Array", "[utils]")
       REQUIRE(sum_before_shuffle(0, 0) == Catch::Approx(naive_sum(0, 0)));
       REQUIRE(sum_before_shuffle(0, 1) == Catch::Approx(naive_sum(0, 1)));
       REQUIRE(sum_before_shuffle(0, 2) == Catch::Approx(naive_sum(0, 2)));
-      REQUIRE(sum_before_shuffle(1, 0) == Catch::Approx(naive_sum(1, 0)).epsilon(1E-4));
+      REQUIRE(sum_before_shuffle(1, 0) == Catch::Approx(naive_sum(1, 0)).epsilon(5E-4));
       REQUIRE(sum_before_shuffle(1, 1) == Catch::Approx(naive_sum(1, 1)).margin(1E-6));
       REQUIRE(sum_before_shuffle(1, 2) == Catch::Approx(naive_sum(1, 2)));
     }
diff --git a/tests/test_Messenger.cpp b/tests/test_Messenger.cpp
index 4f76e250ddab60d81e1d49eacb1bcfc23425e317..4565614fbcd4e4dc387527b77ddb1be23b092e23 100644
--- a/tests/test_Messenger.cpp
+++ b/tests/test_Messenger.cpp
@@ -223,6 +223,18 @@ TEST_CASE("Messenger", "[mpi]")
       parallel::broadcast(value, 0);
       REQUIRE((value == mpi_check::tri_int{6, 2, 4}));
     }
+
+    {
+      std::string data;
+      if (parallel::rank() == 0) {
+        data = "foo";
+      } else {
+        data = "bar";
+      }
+
+      parallel::broadcast(data, 0);
+      REQUIRE(data == "foo");
+    }
   }
 
   SECTION("broadcast array")
diff --git a/tests/test_OFStream.cpp b/tests/test_OFStream.cpp
index 6ca7d98df0a6f36bf30add2b891a659beba757c2..2ea0615c87aac0d519b62d47b33392a022d9a10d 100644
--- a/tests/test_OFStream.cpp
+++ b/tests/test_OFStream.cpp
@@ -48,4 +48,11 @@ TEST_CASE("OFStream", "[language]")
 
     REQUIRE(not std::filesystem::exists(filename));
   }
+
+  SECTION("bad filename")
+  {
+    if (parallel::rank() == 0) {
+      REQUIRE_THROWS_WITH(OFStream{"/"}, "error: cannot create file /");
+    }
+  }
 }
diff --git a/tests/test_ParallelChecker_read.cpp b/tests/test_ParallelChecker_read.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..497e1729d7ed719749327bcf652cf43a29ecaa4e
--- /dev/null
+++ b/tests/test_ParallelChecker_read.cpp
@@ -0,0 +1,2078 @@
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/matchers/catch_matchers_all.hpp>
+
+#include <dev/ParallelChecker.hpp>
+
+#include <MeshDataBaseForTests.hpp>
+
+#include <mesh/ItemArrayUtils.hpp>
+#include <mesh/SubItemArrayPerItemUtils.hpp>
+#include <mesh/SubItemValuePerItemUtils.hpp>
+
+#include <filesystem>
+
+// clazy:excludeall=non-pod-global-static
+
+#ifdef PUGS_HAS_HDF5
+
+#include <ParallelCheckerTester.hpp>
+
+template <typename T>
+struct test_TinyVectorDataType;
+
+template <size_t Dimension, typename DataT>
+struct test_TinyVectorDataType<TinyVector<Dimension, DataT>> : public HighFive::DataType
+{
+  test_TinyVectorDataType()
+  {
+    hsize_t dim[]     = {Dimension};
+    auto h5_data_type = HighFive::create_datatype<DataT>();
+    _hid              = H5Tarray_create(h5_data_type.getId(), 1, dim);
+  }
+};
+
+template <typename T>
+struct test_TinyMatrixDataType;
+
+template <size_t M, size_t N, typename DataT>
+struct test_TinyMatrixDataType<TinyMatrix<M, N, DataT>> : public HighFive::DataType
+{
+  test_TinyMatrixDataType()
+  {
+    hsize_t dim[]     = {M, N};
+    auto h5_data_type = HighFive::create_datatype<DataT>();
+    _hid              = H5Tarray_create(h5_data_type.getId(), 2, dim);
+  }
+};
+
+TEST_CASE("ParallelChecker_read", "[dev]")
+{
+  {
+    ParallelCheckerTester pc_tester;
+    if (pc_tester.isCreated()) {
+      REQUIRE_NOTHROW(ParallelChecker::destroy());
+    }
+  }
+  REQUIRE_NOTHROW(ParallelChecker::create());
+  REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::read));
+
+  std::string tmp_dirname;
+
+  {
+    if (parallel::rank() == 0) {
+      tmp_dirname = [&]() -> std::string {
+        std::string temp_filename = std::filesystem::temp_directory_path() / "pugs_test_read_h5_XXXXXX";
+        return std::string{mkdtemp(&temp_filename[0])};
+      }();
+    }
+    parallel::broadcast(tmp_dirname, 0);
+
+    std::filesystem::path path = tmp_dirname;
+    REQUIRE_NOTHROW(ParallelChecker::instance().setFilename(path / "parallel_check.h5"));
+  }
+
+  auto get_item_numbers = []<typename ConnectivityT>(const ConnectivityT& connectivity, ItemType item_type) {
+    Array<const int> number;
+    Array<const bool> is_owned;
+
+    switch (item_type) {
+    case ItemType::cell: {
+      number   = connectivity.cellNumber().arrayView();
+      is_owned = connectivity.cellIsOwned().arrayView();
+      break;
+    }
+    case ItemType::face: {
+      number   = connectivity.faceNumber().arrayView();
+      is_owned = connectivity.faceIsOwned().arrayView();
+      break;
+    }
+    case ItemType::edge: {
+      number   = connectivity.edgeNumber().arrayView();
+      is_owned = connectivity.edgeIsOwned().arrayView();
+      break;
+    }
+    case ItemType::node: {
+      number   = connectivity.nodeNumber().arrayView();
+      is_owned = connectivity.nodeIsOwned().arrayView();
+      break;
+    }
+    }
+
+    if (parallel::size() > 1) {
+      const size_t nb_local_item = [is_owned]() {
+        size_t count = 0;
+        for (size_t i = 0; i < is_owned.size(); ++i) {
+          count += is_owned[i];
+        }
+        return count;
+      }();
+
+      Array<int> owned_number{nb_local_item};
+      for (size_t i = 0, l = 0; i < is_owned.size(); ++i) {
+        if (is_owned[i]) {
+          owned_number[l++] = number[i];
+        }
+      }
+
+      number = parallel::allGatherVariable(owned_number);
+    }
+
+    return number;
+  };
+
+  auto get_subitem_rows_map =
+    []<typename ConnectivityT>(const ConnectivityT& connectivity, ItemType item_type,
+                               ItemType subitem_type) -> Array<const typename ConnectivityMatrix::IndexType> {
+    Array rows_map = connectivity.getMatrix(item_type, subitem_type).rowsMap();
+
+    if (parallel::size() == 1) {
+      return rows_map;
+    } else {
+      Array<const bool> is_owned;
+      switch (item_type) {
+      case ItemType::cell: {
+        is_owned = connectivity.cellIsOwned().arrayView();
+        break;
+      }
+      case ItemType::face: {
+        is_owned = connectivity.faceIsOwned().arrayView();
+        break;
+      }
+      case ItemType::edge: {
+        is_owned = connectivity.edgeIsOwned().arrayView();
+        break;
+      }
+      case ItemType::node: {
+        is_owned = connectivity.nodeIsOwned().arrayView();
+        break;
+      }
+      }
+
+      Array<size_t> nb_subitem_per_item(rows_map.size() - 1);
+
+      for (size_t i = 0; i < nb_subitem_per_item.size(); ++i) {
+        nb_subitem_per_item[i] = rows_map[i + 1] - rows_map[i];
+      }
+
+      const size_t nb_local_item = [is_owned]() {
+        size_t count = 0;
+        for (size_t i = 0; i < is_owned.size(); ++i) {
+          count += is_owned[i];
+        }
+        return count;
+      }();
+
+      {
+        Array<size_t> owned_nb_subitem_per_item{nb_local_item};
+        for (size_t i = 0, l = 0; i < is_owned.size(); ++i) {
+          if (is_owned[i]) {
+            owned_nb_subitem_per_item[l++] = nb_subitem_per_item[i];
+          }
+        }
+        nb_subitem_per_item = parallel::allGatherVariable(owned_nb_subitem_per_item);
+      }
+
+      Array<typename ConnectivityMatrix::IndexType> global_rows_map{nb_subitem_per_item.size() + 1};
+      global_rows_map[0] = 0;
+      for (size_t i = 0; i < nb_subitem_per_item.size(); ++i) {
+        global_rows_map[i + 1] = global_rows_map[i] + nb_subitem_per_item[i];
+      }
+      return global_rows_map;
+    }
+  };
+
+  SECTION("check parallel write implementation")
+  {
+    if (parallel::size() == 1) {
+      REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::write));
+    } else {
+      REQUIRE_THROWS_WITH(ParallelChecker::instance().setMode(ParallelChecker::Mode::write),
+                          "not implemented yet: parallel check write in parallel");
+    }
+
+    ParallelCheckerTester pc_tester;
+    pc_tester.setMode(ParallelChecker::Mode::automatic);
+    REQUIRE(ParallelChecker::instance().isWriting() == (parallel::size() == 1));
+  }
+
+  SECTION("check ItemValue/ItemArray")
+  {
+    // ItemValues
+    {   // 1d
+      auto mesh                           = MeshDataBaseForTests::get().unordered1DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<1>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::cell);
+
+      int tag = 12;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+
+        Array<double> values{numbers.size()};
+        for (size_t i = 0; i < numbers.size(); ++i) {
+          values[i] = std::sin(numbers[i]);
+        }
+        group.createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<double>(&(values[0]));
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::cell)});
+        group.createAttribute("data_type", demangle<double>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      CellValue<double> values{connectivity};
+      CellValue<const int> cell_number = connectivity.cellNumber();
+      for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+        values[cell_id] = std::sin(cell_number[cell_id]);
+      }
+
+      REQUIRE_NOTHROW(parallel_check(values, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        CellValue<double> not_sync     = copy(values);
+        CellValue<const bool> is_owned = connectivity.cellIsOwned();
+        if (parallel::rank() == 0) {
+          for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+            if (not is_owned[cell_id]) {
+              not_sync[cell_id] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        CellValue<double> different = copy(values);
+        bool has_difference         = false;
+        if (parallel::rank() == 0) {
+          CellValue<const bool> is_owned = connectivity.cellIsOwned();
+          for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+            if (is_owned[cell_id]) {
+              different[cell_id] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        CellValue<int> other_data_type{connectivity};
+        other_data_type.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        CellArray<double> arrays{connectivity, 1};
+        arrays.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        CellArray<double> arrays{connectivity, 2};
+        arrays.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("dimension").write(size_t{2});
+        }
+        parallel::barrier();
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(values, "sin", source_location), "error: cannot compare data");
+
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("dimension").write(connectivity.dimension());
+        }
+        parallel::barrier();
+      }
+
+      {
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("item_type").write(std::string{itemName(ItemType::node)});
+        }
+        parallel::barrier();
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(values, "sin", source_location), "error: cannot compare data");
+
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("item_type").write(std::string{itemName(ItemType::cell)});
+        }
+        parallel::barrier();
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian1DMesh();
+        const Connectivity<1>& other_connectivity = other_mesh->connectivity();
+
+        CellValue<double> other_shape{other_connectivity};
+        other_shape.fill(1);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location),
+                            "error: some item numbers are not defined in reference");
+      }
+    }
+
+    // ItemArray
+    {   // 1d
+      auto mesh                           = MeshDataBaseForTests::get().unordered1DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<1>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::cell);
+
+      int tag = 12;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+
+        Table<double> arrays{numbers.size(), 2};
+        for (size_t i = 0; i < numbers.size(); ++i) {
+          for (size_t j = 0; j < 2; ++j) {
+            arrays[i][j] = std::sin(2 * numbers[i] + j);
+          }
+        }
+        group
+          .createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{arrays.numberOfRows(),
+                                                                               arrays.numberOfColumns()}})
+          .write_raw<double>(&(arrays(0, 0)));
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::cell)});
+        group.createAttribute("data_type", demangle<double>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      CellArray<double> arrays{connectivity, 2};
+      CellValue<const int> cell_number = connectivity.cellNumber();
+      for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+        for (size_t j = 0; j < 2; ++j) {
+          arrays[cell_id][j] = std::sin(2 * cell_number[cell_id] + j);
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        CellArray<double> not_sync     = copy(arrays);
+        CellValue<const bool> is_owned = connectivity.cellIsOwned();
+        if (parallel::rank() == 0) {
+          for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+            if (not is_owned[cell_id]) {
+              not_sync[cell_id][0] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        CellArray<double> different = copy(arrays);
+        bool has_difference         = false;
+        if (parallel::rank() == 0) {
+          CellValue<const bool> is_owned = connectivity.cellIsOwned();
+          for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+            if (is_owned[cell_id]) {
+              different[cell_id][0] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        CellValue<int> other_data_type{connectivity};
+        other_data_type.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        CellArray<double> arrays{connectivity, 1};
+        arrays.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian1DMesh();
+        const Connectivity<1>& other_connectivity = other_mesh->connectivity();
+
+        CellArray<double> other_shape{other_connectivity, 2};
+        other_shape.fill(1);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location),
+                            "error: some item numbers are not defined in reference");
+      }
+    }
+
+    // ItemValues
+    {   // 2d
+      auto mesh                           = MeshDataBaseForTests::get().hybrid2DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<2>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::node);
+
+      using DataType = TinyVector<3>;
+
+      int tag = 9;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+
+        Array<DataType> values{numbers.size()};
+        for (size_t i = 0; i < numbers.size(); ++i) {
+          for (size_t j = 0; j < DataType::Dimension; ++j) {
+            values[i][j] = std::sin(numbers[i] + j);
+          }
+        }
+        group
+          .createDataSet(name, HighFive::DataSpace{std::vector<size_t>{numbers.size()}},
+                         test_TinyVectorDataType<DataType>{})
+          .template write_raw<double>(&(values[0][0]), test_TinyVectorDataType<DataType>{});
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::node)});
+        group.createAttribute("data_type", demangle<DataType>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      NodeValue<DataType> values{connectivity};
+      NodeValue<const int> node_number = connectivity.nodeNumber();
+      for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+        for (size_t j = 0; j < DataType::Dimension; ++j) {
+          values[node_id][j] = std::sin(node_number[node_id] + j);
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(values, "sin", source_location));
+
+      if (parallel::size() > 1) {
+        NodeValue<DataType> not_sync   = copy(values);
+        NodeValue<const bool> is_owned = connectivity.nodeIsOwned();
+        if (parallel::rank() == 0) {
+          for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+            if (not is_owned[node_id]) {
+              not_sync[node_id][0] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        NodeValue<DataType> different = copy(values);
+        bool has_difference           = false;
+        if (parallel::rank() == 0) {
+          NodeValue<const bool> is_owned = connectivity.nodeIsOwned();
+          for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+            if (is_owned[node_id]) {
+              different[node_id][0] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+    }
+
+    // ItemArray
+    {   // 2d
+      auto mesh                           = MeshDataBaseForTests::get().hybrid2DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<2>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      using DataType = TinyMatrix<3, 2>;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::face);
+
+      int tag = 12;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+
+        Table<DataType> arrays{numbers.size(), 2};
+        for (size_t i = 0; i < arrays.numberOfRows(); ++i) {
+          for (size_t j = 0; j < arrays.numberOfColumns(); ++j) {
+            for (size_t k = 0; k < DataType::NumberOfRows; ++k) {
+              for (size_t l = 0; l < DataType::NumberOfColumns; ++l) {
+                arrays[i][j](k, l) = std::sin(2 * numbers[i] + j + 3 * k + 2 * l);
+              }
+            }
+          }
+        }
+        group
+          .createDataSet(name,
+                         HighFive::DataSpace{std::vector<size_t>{arrays.numberOfRows(), arrays.numberOfColumns()}},
+                         test_TinyMatrixDataType<DataType>{})
+          .template write_raw<double>(&(arrays[0][0](0, 0)), test_TinyMatrixDataType<DataType>{});
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::face)});
+        group.createAttribute("data_type", demangle<DataType>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      FaceArray<DataType> arrays{connectivity, 2};
+      FaceValue<const int> face_number = connectivity.faceNumber();
+      for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+        for (size_t j = 0; j < arrays.sizeOfArrays(); ++j) {
+          for (size_t k = 0; k < DataType::NumberOfRows; ++k) {
+            for (size_t l = 0; l < DataType::NumberOfColumns; ++l) {
+              arrays[face_id][j](k, l) = std::sin(2 * face_number[face_id] + j + 3 * k + 2 * l);
+            }
+          }
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        FaceArray<DataType> not_sync   = copy(arrays);
+        FaceValue<const bool> is_owned = connectivity.faceIsOwned();
+        if (parallel::rank() == 0) {
+          for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+            if (not is_owned[face_id]) {
+              not_sync[face_id][0](0, 0) += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        FaceArray<DataType> different = copy(arrays);
+        bool has_difference           = false;
+        if (parallel::rank() == 0) {
+          FaceValue<const bool> is_owned = connectivity.faceIsOwned();
+          for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+            if (is_owned[face_id]) {
+              different[face_id][0](0, 0) += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        FaceValue<TinyVector<6>> other_data_type{connectivity};
+        other_data_type.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        FaceArray<DataType> arrays{connectivity, 1};
+        arrays.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian2DMesh();
+        const Connectivity<2>& other_connectivity = other_mesh->connectivity();
+
+        FaceArray<DataType> other_shape{other_connectivity, 2};
+        other_shape.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location),
+                            "error: number of items differs from reference");
+      }
+    }
+
+    // ItemValues
+    {   // 3d
+      auto mesh                           = MeshDataBaseForTests::get().hybrid3DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<3>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::node);
+
+      using DataType = TinyMatrix<2, 3>;
+
+      int tag = 9;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+
+        Array<DataType> values{numbers.size()};
+        for (size_t i = 0; i < numbers.size(); ++i) {
+          for (size_t j = 0; j < DataType::NumberOfRows; ++j) {
+            for (size_t k = 0; k < DataType::NumberOfColumns; ++k) {
+              values[i](j, k) = std::sin(numbers[i] + j + 2 * k);
+            }
+          }
+        }
+        group
+          .createDataSet(name, HighFive::DataSpace{std::vector<size_t>{numbers.size()}},
+                         test_TinyMatrixDataType<DataType>{})
+          .template write_raw<double>(&(values[0](0, 0)), test_TinyMatrixDataType<DataType>{});
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::node)});
+        group.createAttribute("data_type", demangle<DataType>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      NodeValue<DataType> values{connectivity};
+      NodeValue<const int> node_number = connectivity.nodeNumber();
+      for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+        for (size_t j = 0; j < DataType::NumberOfRows; ++j) {
+          for (size_t k = 0; k < DataType::NumberOfColumns; ++k) {
+            values[node_id](j, k) = std::sin(node_number[node_id] + j + 2 * k);
+          }
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(values, "sin", source_location));
+
+      if (parallel::size() > 1) {
+        NodeValue<DataType> not_sync   = copy(values);
+        NodeValue<const bool> is_owned = connectivity.nodeIsOwned();
+        if (parallel::rank() == 0) {
+          for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+            if (not is_owned[node_id]) {
+              not_sync[node_id](0, 0) += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        NodeValue<DataType> different = copy(values);
+        bool has_difference           = false;
+        if (parallel::rank() == 0) {
+          NodeValue<const bool> is_owned = connectivity.nodeIsOwned();
+          for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+            if (is_owned[node_id]) {
+              different[node_id](0, 0) += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+    }
+
+    // ItemArray
+    {   // 3d
+      auto mesh                           = MeshDataBaseForTests::get().hybrid3DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<3>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      using DataType = TinyVector<2>;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::face);
+
+      int tag = 7;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+
+        Table<DataType> arrays{numbers.size(), 2};
+        for (size_t i = 0; i < arrays.numberOfRows(); ++i) {
+          for (size_t j = 0; j < arrays.numberOfColumns(); ++j) {
+            for (size_t k = 0; k < DataType::Dimension; ++k) {
+              arrays[i][j][k] = std::sin(2 * numbers[i] + j + 3 * k);
+            }
+          }
+        }
+        group
+          .createDataSet(name,
+                         HighFive::DataSpace{std::vector<size_t>{arrays.numberOfRows(), arrays.numberOfColumns()}},
+                         test_TinyVectorDataType<DataType>{})
+          .template write_raw<double>(&(arrays[0][0][0]), test_TinyVectorDataType<DataType>{});
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::face)});
+        group.createAttribute("data_type", demangle<DataType>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      FaceArray<DataType> arrays{connectivity, 2};
+      FaceValue<const int> face_number = connectivity.faceNumber();
+      for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+        for (size_t j = 0; j < arrays.sizeOfArrays(); ++j) {
+          for (size_t k = 0; k < DataType::Dimension; ++k) {
+            arrays[face_id][j][k] = std::sin(2 * face_number[face_id] + j + 3 * k);
+          }
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        FaceArray<DataType> not_sync   = copy(arrays);
+        FaceValue<const bool> is_owned = connectivity.faceIsOwned();
+        if (parallel::rank() == 0) {
+          for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+            if (not is_owned[face_id]) {
+              not_sync[face_id][0][0] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        FaceArray<DataType> different = copy(arrays);
+        bool has_difference           = false;
+        if (parallel::rank() == 0) {
+          FaceValue<const bool> is_owned = connectivity.faceIsOwned();
+          for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+            if (is_owned[face_id]) {
+              different[face_id][0][0] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        FaceValue<TinyVector<6>> other_data_type{connectivity};
+        other_data_type.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        FaceArray<DataType> arrays{connectivity, 1};
+        arrays.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian2DMesh();
+        const Connectivity<2>& other_connectivity = other_mesh->connectivity();
+
+        FaceArray<DataType> other_shape{other_connectivity, 2};
+        other_shape.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location), "error: cannot compare data");
+      }
+    }
+  }
+
+  SECTION("check SubItemValuePerItem/SubItemArrayPerItem")
+  {
+    // SubItemValuePerItem
+    {   // 1d
+      auto mesh                           = MeshDataBaseForTests::get().unordered1DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<1>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+      Array numbers = get_item_numbers(connectivity, ItemType::node);
+      Array<const typename ConnectivityMatrix::IndexType> rows_map =
+        get_subitem_rows_map(connectivity, ItemType::node, ItemType::cell);
+
+      int tag = 6;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+        group
+          .createDataSet<typename ConnectivityMatrix::IndexType>("rows_map", HighFive::DataSpace{std::vector<size_t>{
+                                                                               rows_map.size()}})
+          .write_raw<typename ConnectivityMatrix::IndexType>(&(rows_map[0]));
+
+        Array<double> values{rows_map[rows_map.size() - 1]};
+        for (size_t i = 0; i < numbers.size(); ++i) {
+          for (size_t i_row = rows_map[i]; i_row < rows_map[i + 1]; ++i_row) {
+            const size_t j = i_row - rows_map[i];
+            values[i_row]  = std::sin(numbers[i] + 2 * j);
+          }
+        }
+
+        group.createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{values.size()}})
+          .write_raw<double>(&(values[0]));
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::node)});
+        group.createAttribute("subitem_type", std::string{itemName(ItemType::cell)});
+        group.createAttribute("data_type", demangle<double>());
+      }
+
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+      CellValuePerNode<double> values{connectivity};
+      NodeValue<const int> node_number = connectivity.nodeNumber();
+      for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+        auto cell_list = node_to_cell_matrix[node_id];
+        for (size_t i_cell = 0; i_cell < cell_list.size(); ++i_cell) {
+          values[node_id][i_cell] = std::sin(node_number[node_id] + 2 * i_cell);
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(values, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        CellValuePerNode<double> not_sync = copy(values);
+        NodeValue<const bool> is_owned    = connectivity.nodeIsOwned();
+        if (parallel::rank() == 0) {
+          for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+            if (not is_owned[node_id]) {
+              not_sync[node_id][0] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        CellValuePerNode<double> different = copy(values);
+        bool has_difference                = false;
+        if (parallel::rank() == 0) {
+          NodeValue<const bool> is_owned = connectivity.nodeIsOwned();
+          for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+            if (is_owned[node_id]) {
+              different[node_id][0] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        CellValuePerNode<int> other_data_type{connectivity};
+        other_data_type.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        CellArrayPerNode<double> arrays{connectivity, 1};
+        arrays.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("dimension").write(size_t{2});
+        }
+        parallel::barrier();
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(values, "sin", source_location), "error: cannot compare data");
+
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("dimension").write(connectivity.dimension());
+        }
+        parallel::barrier();
+      }
+
+      {
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("item_type").write(std::string{itemName(ItemType::face)});
+        }
+        parallel::barrier();
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(values, "sin", source_location), "error: cannot compare data");
+
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("item_type").write(std::string{itemName(ItemType::node)});
+        }
+        parallel::barrier();
+      }
+
+      {
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("subitem_type").write(std::string{itemName(ItemType::face)});
+        }
+        parallel::barrier();
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(values, "sin", source_location), "error: cannot compare data");
+
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("subitem_type").write(std::string{itemName(ItemType::cell)});
+        }
+        parallel::barrier();
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian1DMesh();
+        const Connectivity<1>& other_connectivity = other_mesh->connectivity();
+
+        CellValuePerNode<double> other_shape{other_connectivity};
+        other_shape.fill(1);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location),
+                            "error: some item numbers are not defined in reference");
+      }
+    }
+
+    // SubItemArrayPerItem
+    {   // 1d
+      auto mesh                           = MeshDataBaseForTests::get().unordered1DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<1>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::cell);
+      Array<const typename ConnectivityMatrix::IndexType> rows_map =
+        get_subitem_rows_map(connectivity, ItemType::cell, ItemType::node);
+
+      int tag = 12;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+        group
+          .createDataSet<typename ConnectivityMatrix::IndexType>("rows_map", HighFive::DataSpace{std::vector<size_t>{
+                                                                               rows_map.size()}})
+          .write_raw<typename ConnectivityMatrix::IndexType>(&(rows_map[0]));
+
+        Table<double> arrays{rows_map[rows_map.size() - 1], 2};
+        for (size_t i = 0; i < numbers.size(); ++i) {
+          for (size_t i_row = rows_map[i]; i_row < rows_map[i + 1]; ++i_row) {
+            const size_t j = i_row - rows_map[i];
+            for (size_t k = 0; k < 2; ++k) {
+              arrays[i_row][k] = std::sin(2 * numbers[i] + (1 + k) * j);
+            }
+          }
+        }
+        group
+          .createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{arrays.numberOfRows(),
+                                                                               arrays.numberOfColumns()}})
+          .write_raw<double>(&(arrays(0, 0)));
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::cell)});
+        group.createAttribute("subitem_type", std::string{itemName(ItemType::node)});
+        group.createAttribute("data_type", demangle<double>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      auto cell_to_node_matrix = connectivity.cellToNodeMatrix();
+
+      NodeArrayPerCell<double> arrays{connectivity, 2};
+      CellValue<const int> cell_number = connectivity.cellNumber();
+      for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+        for (size_t j = 0; j < cell_to_node_matrix[cell_id].size(); ++j) {
+          for (size_t k = 0; k < 2; ++k) {
+            arrays[cell_id][j][k] = std::sin(2 * cell_number[cell_id] + (1 + k) * j);
+          }
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        NodeArrayPerCell<double> not_sync = copy(arrays);
+        CellValue<const bool> is_owned    = connectivity.cellIsOwned();
+        if (parallel::rank() == 0) {
+          for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+            if (not is_owned[cell_id]) {
+              not_sync[cell_id][0][1] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        NodeArrayPerCell<double> different = copy(arrays);
+        bool has_difference                = false;
+        if (parallel::rank() == 0) {
+          CellValue<const bool> is_owned = connectivity.cellIsOwned();
+          for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+            if (is_owned[cell_id]) {
+              different[cell_id][0][1] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        CellValue<int> other_data_type{connectivity};
+        other_data_type.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        CellArray<double> arrays{connectivity, 1};
+        arrays.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian1DMesh();
+        const Connectivity<1>& other_connectivity = other_mesh->connectivity();
+
+        CellArray<double> other_shape{other_connectivity, 2};
+        other_shape.fill(1);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location),
+                            "error: some item numbers are not defined in reference");
+      }
+    }
+
+    // SubItemValuePerItem
+    {   // 2d
+      auto mesh                           = MeshDataBaseForTests::get().hybrid2DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<2>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      using DataType = TinyMatrix<3, 2>;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::face);
+
+      int tag = 12;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+
+        Table<DataType> arrays{numbers.size(), 2};
+        for (size_t i = 0; i < arrays.numberOfRows(); ++i) {
+          for (size_t j = 0; j < arrays.numberOfColumns(); ++j) {
+            for (size_t k = 0; k < DataType::NumberOfRows; ++k) {
+              for (size_t l = 0; l < DataType::NumberOfColumns; ++l) {
+                arrays[i][j](k, l) = std::sin(2 * numbers[i] + j + 3 * k + 2 * l);
+              }
+            }
+          }
+        }
+        group
+          .createDataSet(name,
+                         HighFive::DataSpace{std::vector<size_t>{arrays.numberOfRows(), arrays.numberOfColumns()}},
+                         test_TinyMatrixDataType<DataType>{})
+          .template write_raw<double>(&(arrays[0][0](0, 0)), test_TinyMatrixDataType<DataType>{});
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::face)});
+        group.createAttribute("data_type", demangle<DataType>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      FaceArray<DataType> arrays{connectivity, 2};
+      FaceValue<const int> face_number = connectivity.faceNumber();
+      for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+        for (size_t j = 0; j < arrays.sizeOfArrays(); ++j) {
+          for (size_t k = 0; k < DataType::NumberOfRows; ++k) {
+            for (size_t l = 0; l < DataType::NumberOfColumns; ++l) {
+              arrays[face_id][j](k, l) = std::sin(2 * face_number[face_id] + j + 3 * k + 2 * l);
+            }
+          }
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        FaceArray<DataType> not_sync   = copy(arrays);
+        FaceValue<const bool> is_owned = connectivity.faceIsOwned();
+        if (parallel::rank() == 0) {
+          for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+            if (not is_owned[face_id]) {
+              not_sync[face_id][0](0, 0) += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        FaceArray<DataType> different = copy(arrays);
+        bool has_difference           = false;
+        if (parallel::rank() == 0) {
+          FaceValue<const bool> is_owned = connectivity.faceIsOwned();
+          for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+            if (is_owned[face_id]) {
+              different[face_id][0](0, 0) += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        FaceValue<TinyVector<6>> other_data_type{connectivity};
+        other_data_type.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        FaceArray<DataType> arrays{connectivity, 1};
+        arrays.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian2DMesh();
+        const Connectivity<2>& other_connectivity = other_mesh->connectivity();
+
+        FaceArray<DataType> other_shape{other_connectivity, 2};
+        other_shape.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location),
+                            "error: number of items differs from reference");
+      }
+    }
+
+    // SubItemArrayPerItem
+    {   // 2d
+      auto mesh                           = MeshDataBaseForTests::get().hybrid2DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<2>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+      Array numbers = get_item_numbers(connectivity, ItemType::node);
+      Array<const typename ConnectivityMatrix::IndexType> rows_map =
+        get_subitem_rows_map(connectivity, ItemType::node, ItemType::cell);
+
+      int tag = 6;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+        group
+          .createDataSet<typename ConnectivityMatrix::IndexType>("rows_map", HighFive::DataSpace{std::vector<size_t>{
+                                                                               rows_map.size()}})
+          .write_raw<typename ConnectivityMatrix::IndexType>(&(rows_map[0]));
+
+        Array<double> values{rows_map[rows_map.size() - 1]};
+        for (size_t i = 0; i < numbers.size(); ++i) {
+          for (size_t i_row = rows_map[i]; i_row < rows_map[i + 1]; ++i_row) {
+            const size_t j = i_row - rows_map[i];
+            values[i_row]  = std::sin(numbers[i] + 2 * j);
+          }
+        }
+
+        group.createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{values.size()}})
+          .write_raw<double>(&(values[0]));
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::node)});
+        group.createAttribute("subitem_type", std::string{itemName(ItemType::cell)});
+        group.createAttribute("data_type", demangle<double>());
+      }
+
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+      CellValuePerNode<double> values{connectivity};
+      NodeValue<const int> node_number = connectivity.nodeNumber();
+      for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+        auto cell_list = node_to_cell_matrix[node_id];
+        for (size_t i_cell = 0; i_cell < cell_list.size(); ++i_cell) {
+          values[node_id][i_cell] = std::sin(node_number[node_id] + 2 * i_cell);
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(values, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(values, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        CellValuePerNode<double> not_sync = copy(values);
+        NodeValue<const bool> is_owned    = connectivity.nodeIsOwned();
+        if (parallel::rank() == 0) {
+          for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+            if (not is_owned[node_id]) {
+              not_sync[node_id][0] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        CellValuePerNode<double> different = copy(values);
+        bool has_difference                = false;
+        if (parallel::rank() == 0) {
+          NodeValue<const bool> is_owned = connectivity.nodeIsOwned();
+          for (NodeId node_id = 0; node_id < connectivity.numberOfNodes(); ++node_id) {
+            if (is_owned[node_id]) {
+              different[node_id][0] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        CellValuePerNode<int> other_data_type{connectivity};
+        other_data_type.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        CellArrayPerNode<double> arrays{connectivity, 1};
+        arrays.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("dimension").write(size_t{1});
+        }
+        parallel::barrier();
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(values, "sin", source_location), "error: cannot compare data");
+
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("dimension").write(connectivity.dimension());
+        }
+        parallel::barrier();
+      }
+
+      {
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("item_type").write(std::string{itemName(ItemType::face)});
+        }
+        parallel::barrier();
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(values, "sin", source_location), "error: cannot compare data");
+
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("item_type").write(std::string{itemName(ItemType::node)});
+        }
+        parallel::barrier();
+      }
+
+      {
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("subitem_type").write(std::string{itemName(ItemType::face)});
+        }
+        parallel::barrier();
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(values, "sin", source_location), "error: cannot compare data");
+
+        if (parallel::rank() == 0) {
+          HighFive::File file(filename, HighFive::File::ReadWrite);
+          HighFive::Group group = file.getGroup("/values/" + std::to_string(tag));
+          group.getAttribute("subitem_type").write(std::string{itemName(ItemType::cell)});
+        }
+        parallel::barrier();
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian2DMesh();
+        const Connectivity<2>& other_connectivity = other_mesh->connectivity();
+
+        CellValuePerNode<double> other_shape{other_connectivity};
+        other_shape.fill(1);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location),
+                            "error: some item numbers are not defined in reference");
+      }
+    }
+
+    // SubItemValuePerItem
+    {   // 3d
+      auto mesh                           = MeshDataBaseForTests::get().hybrid3DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<3>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      using DataType = TinyVector<2>;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::face);
+
+      int tag = 7;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+
+        Table<DataType> arrays{numbers.size(), 2};
+        for (size_t i = 0; i < arrays.numberOfRows(); ++i) {
+          for (size_t j = 0; j < arrays.numberOfColumns(); ++j) {
+            for (size_t k = 0; k < DataType::Dimension; ++k) {
+              arrays[i][j][k] = std::sin(2 * numbers[i] + j + 3 * k);
+            }
+          }
+        }
+        group
+          .createDataSet(name,
+                         HighFive::DataSpace{std::vector<size_t>{arrays.numberOfRows(), arrays.numberOfColumns()}},
+                         test_TinyVectorDataType<DataType>{})
+          .template write_raw<double>(&(arrays[0][0][0]), test_TinyVectorDataType<DataType>{});
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::face)});
+        group.createAttribute("data_type", demangle<DataType>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      FaceArray<DataType> arrays{connectivity, 2};
+      FaceValue<const int> face_number = connectivity.faceNumber();
+      for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+        for (size_t j = 0; j < arrays.sizeOfArrays(); ++j) {
+          for (size_t k = 0; k < DataType::Dimension; ++k) {
+            arrays[face_id][j][k] = std::sin(2 * face_number[face_id] + j + 3 * k);
+          }
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        FaceArray<DataType> not_sync   = copy(arrays);
+        FaceValue<const bool> is_owned = connectivity.faceIsOwned();
+        if (parallel::rank() == 0) {
+          for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+            if (not is_owned[face_id]) {
+              not_sync[face_id][0][0] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        FaceArray<DataType> different = copy(arrays);
+        bool has_difference           = false;
+        if (parallel::rank() == 0) {
+          FaceValue<const bool> is_owned = connectivity.faceIsOwned();
+          for (FaceId face_id = 0; face_id < connectivity.numberOfFaces(); ++face_id) {
+            if (is_owned[face_id]) {
+              different[face_id][0][0] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        FaceValue<TinyVector<6>> other_data_type{connectivity};
+        other_data_type.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        FaceArray<DataType> arrays{connectivity, 1};
+        arrays.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian2DMesh();
+        const Connectivity<2>& other_connectivity = other_mesh->connectivity();
+
+        FaceArray<DataType> other_shape{other_connectivity, 2};
+        other_shape.fill(zero);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location), "error: cannot compare data");
+      }
+    }
+
+    // SubItemArrayPerItem
+    {   // 3d
+      auto mesh                           = MeshDataBaseForTests::get().hybrid3DMesh();
+      std::string filename                = ParallelChecker::instance().filename();
+      const Connectivity<3>& connectivity = mesh->connectivity();
+
+      const std::string name = "sin";
+
+      SourceLocation source_location;
+
+      Array numbers = get_item_numbers(connectivity, ItemType::cell);
+      Array<const typename ConnectivityMatrix::IndexType> rows_map =
+        get_subitem_rows_map(connectivity, ItemType::cell, ItemType::node);
+
+      int tag = 12;
+      if (parallel::rank() == 0) {
+        HighFive::File file(filename, HighFive::File::Overwrite);
+        HighFive::Group group = file.createGroup("/values/" + std::to_string(tag));
+
+        group.createDataSet<int>("numbers", HighFive::DataSpace{std::vector<size_t>{numbers.size()}})
+          .write_raw<int>(&(numbers[0]));
+        group
+          .createDataSet<typename ConnectivityMatrix::IndexType>("rows_map", HighFive::DataSpace{std::vector<size_t>{
+                                                                               rows_map.size()}})
+          .write_raw<typename ConnectivityMatrix::IndexType>(&(rows_map[0]));
+
+        Table<double> arrays{rows_map[rows_map.size() - 1], 2};
+        for (size_t i = 0; i < numbers.size(); ++i) {
+          for (size_t i_row = rows_map[i]; i_row < rows_map[i + 1]; ++i_row) {
+            const size_t j = i_row - rows_map[i];
+            for (size_t k = 0; k < 2; ++k) {
+              arrays[i_row][k] = std::sin(2 * numbers[i] + (1 + k) * j);
+            }
+          }
+        }
+        group
+          .createDataSet<double>(name, HighFive::DataSpace{std::vector<size_t>{arrays.numberOfRows(),
+                                                                               arrays.numberOfColumns()}})
+          .write_raw<double>(&(arrays(0, 0)));
+
+        group.createAttribute("filename", source_location.filename());
+        group.createAttribute("line", source_location.line());
+        group.createAttribute("function", source_location.function());
+        group.createAttribute("name", name);
+        group.createAttribute("dimension", connectivity.dimension());
+        group.createAttribute("item_type", std::string{itemName(ItemType::cell)});
+        group.createAttribute("subitem_type", std::string{itemName(ItemType::node)});
+        group.createAttribute("data_type", demangle<double>());
+      }
+      parallel::barrier();
+      ParallelCheckerTester pc_tester;
+      pc_tester.setTag(tag);
+
+      auto cell_to_node_matrix = connectivity.cellToNodeMatrix();
+
+      NodeArrayPerCell<double> arrays{connectivity, 2};
+      CellValue<const int> cell_number = connectivity.cellNumber();
+      for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+        for (size_t j = 0; j < cell_to_node_matrix[cell_id].size(); ++j) {
+          for (size_t k = 0; k < 2; ++k) {
+            arrays[cell_id][j][k] = std::sin(2 * cell_number[cell_id] + (1 + k) * j);
+          }
+        }
+      }
+
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin", source_location));
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different name in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "not_sin", source_location));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source file in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{"other-source-file", source_location.line(),
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source line in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line() + 100,
+                                                    source_location.column(), source_location.function()}));
+
+      pc_tester.setTag(tag);
+      UNSCOPED_INFO("can have different source function in ref");
+      REQUIRE_NOTHROW(parallel_check(arrays, "sin",
+                                     SourceLocation{source_location.filename(), source_location.line(),
+                                                    source_location.column(), "foo"}));
+
+      if (parallel::size() > 1) {
+        NodeArrayPerCell<double> not_sync = copy(arrays);
+        CellValue<const bool> is_owned    = connectivity.cellIsOwned();
+        if (parallel::rank() == 0) {
+          for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+            if (not is_owned[cell_id]) {
+              not_sync[cell_id][0][1] += 3.2;
+              break;
+            }
+          }
+        }
+        REQUIRE(not isSynchronized(not_sync));
+        pc_tester.setTag(tag);
+        UNSCOPED_INFO("can have different ghost values in ref (no exception)");
+        REQUIRE_NOTHROW(parallel_check(not_sync, "sin", source_location));
+      }
+
+      {
+        NodeArrayPerCell<double> different = copy(arrays);
+        bool has_difference                = false;
+        if (parallel::rank() == 0) {
+          CellValue<const bool> is_owned = connectivity.cellIsOwned();
+          for (CellId cell_id = 0; cell_id < connectivity.numberOfCells(); ++cell_id) {
+            if (is_owned[cell_id]) {
+              different[cell_id][0][1] += 3.2;
+              has_difference = true;
+              break;
+            }
+          }
+        }
+        has_difference = parallel::allReduceOr(has_difference);
+
+        REQUIRE(has_difference);
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(different, "sin", source_location), "error: calculations differ!");
+      }
+
+      {
+        CellValue<int> other_data_type{connectivity};
+        other_data_type.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_data_type, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        CellArray<double> arrays{connectivity, 1};
+        arrays.fill(0);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(arrays, "sin", source_location), "error: cannot compare data");
+      }
+
+      {
+        auto other_mesh                           = MeshDataBaseForTests::get().cartesian3DMesh();
+        const Connectivity<3>& other_connectivity = other_mesh->connectivity();
+
+        CellArray<double> other_shape{other_connectivity, 2};
+        other_shape.fill(1);
+
+        pc_tester.setTag(tag);
+        REQUIRE_THROWS_WITH(parallel_check(other_shape, "sin", source_location),
+                            "error: some item numbers are not defined in reference");
+      }
+    }
+  }
+
+  std::error_code err_code;
+  std::filesystem::remove_all(tmp_dirname, err_code);
+  // error is not handled to avoid exception throws if the directory
+  // has been removed by another processor
+
+  REQUIRE_NOTHROW(ParallelChecker::destroy());
+}
+
+#else   // PUGS_HAS_HDF5
+
+TEST_CASE("ParallelChecker_read", "[dev]")
+{
+  REQUIRE_NOTHROW(ParallelChecker::create());
+  if (parallel::size() > 1) {
+    REQUIRE_THROWS_WITH(ParallelChecker::instance().setMode(ParallelChecker::Mode::write),
+                        "not implemented yet: parallel check write in parallel");
+  } else {
+    REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::write));
+    REQUIRE_NOTHROW(ParallelChecker::instance().isWriting());
+  }
+  REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::automatic));
+  REQUIRE_NOTHROW(ParallelChecker::instance().isWriting() == (parallel::size() > 1));
+  REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::read));
+  REQUIRE_NOTHROW(not ParallelChecker::instance().isWriting());
+
+  auto mesh = MeshDataBaseForTests::get().unordered1DMesh();
+
+  const Connectivity<1>& connectivity = mesh->connectivity();
+
+  NodeValue<double> nv{connectivity};
+  REQUIRE_THROWS_WITH(parallel_check(nv, "test"), "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_THROWS_WITH(parallel_check(ItemValueVariant{nv}, "test"),
+                      "error: parallel checker cannot be used without HDF5 support");
+
+  NodeArray<double> na{connectivity, 2};
+  REQUIRE_THROWS_WITH(parallel_check(na, "test"), "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_THROWS_WITH(parallel_check(ItemArrayVariant{na}, "test"),
+                      "error: parallel checker cannot be used without HDF5 support");
+
+  NodeValuePerCell<double> nvpc{connectivity};
+  REQUIRE_THROWS_WITH(parallel_check(nvpc, "test"), "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_THROWS_WITH(parallel_check(SubItemValuePerItemVariant{nvpc}, "test"),
+                      "error: parallel checker cannot be used without HDF5 support");
+
+  NodeArrayPerCell<double> napc{connectivity, 2};
+  REQUIRE_THROWS_WITH(parallel_check(napc, "test"), "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_THROWS_WITH(parallel_check(SubItemArrayPerItemVariant{napc}, "test"),
+                      "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_NOTHROW(ParallelChecker::destroy());
+}
+
+#endif   // PUGS_HAS_HDF5
diff --git a/tests/test_ParallelChecker_write.cpp b/tests/test_ParallelChecker_write.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2854852373f5449c19403f4dd89b5811d02224bc
--- /dev/null
+++ b/tests/test_ParallelChecker_write.cpp
@@ -0,0 +1,654 @@
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/matchers/catch_matchers_all.hpp>
+
+#include <dev/ParallelChecker.hpp>
+
+#include <MeshDataBaseForTests.hpp>
+
+#include <filesystem>
+
+// clazy:excludeall=non-pod-global-static
+
+#ifdef PUGS_HAS_HDF5
+
+#include <ParallelCheckerTester.hpp>
+
+TEST_CASE("ParallelChecker_write", "[dev]")
+{
+  {
+    ParallelCheckerTester pc_tester;
+    if (pc_tester.isCreated()) {
+      REQUIRE_NOTHROW(ParallelChecker::destroy());
+    }
+  }
+
+  REQUIRE_NOTHROW(ParallelChecker::create());
+  REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::write));
+
+  auto get_pc_options = []() -> std::tuple<std::string, ParallelChecker::Mode, size_t> {
+    ParallelCheckerTester pc_tester;
+    return std::make_tuple(pc_tester.getFilename(), pc_tester.getMode(), pc_tester.getTag());
+  };
+
+  auto set_pc_options = [](const std::tuple<std::string, ParallelChecker::Mode, size_t>& options) {
+    auto [filename, mode, tag] = options;
+    ParallelCheckerTester pc_tester;
+    pc_tester.setFilename(filename);
+    pc_tester.setMode(mode);
+    pc_tester.setTag(tag);
+  };
+
+  SECTION("set config at init")
+  {
+    auto [filename, mode, tag] = get_pc_options();
+
+    REQUIRE(ParallelChecker::instance().filename() == "parallel_checker.h5");
+    REQUIRE(ParallelChecker::instance().mode() == mode);
+    REQUIRE(tag == 0);
+
+    ParallelChecker::instance().setFilename("foo.h5");
+    ParallelChecker::instance().setMode(ParallelChecker::Mode::automatic);
+
+    REQUIRE(ParallelChecker::instance().filename() == "foo.h5");
+    REQUIRE(ParallelChecker::instance().mode() == ParallelChecker::Mode::automatic);
+  }
+
+  std::string tmp_dirname;
+
+  {
+    if (parallel::rank() == 0) {
+      tmp_dirname = [&]() -> std::string {
+        std::string temp_filename = std::filesystem::temp_directory_path() / "pugs_test_write_h5_XXXXXX";
+        return std::string{mkdtemp(&temp_filename[0])};
+      }();
+    }
+    parallel::broadcast(tmp_dirname, 0);
+
+    std::filesystem::path path = tmp_dirname;
+    REQUIRE_NOTHROW(ParallelChecker::instance().setFilename(path / "parallel_check.h5"));
+  }
+
+  SECTION("set values")
+  {
+    set_pc_options(std::make_tuple(std::string{"foo.h5"}, ParallelChecker::Mode::write, 37));
+
+    auto pc_options = get_pc_options();
+
+    auto [filename, mode, tag] = pc_options;
+
+    REQUIRE(ParallelChecker::instance().filename() == filename);
+    REQUIRE(ParallelChecker::instance().filename() == "foo.h5");
+    REQUIRE(ParallelChecker::instance().mode() == ParallelChecker::Mode::write);
+    REQUIRE(ParallelChecker::instance().mode() == mode);
+    REQUIRE(tag == 37);
+  }
+
+  SECTION("is writing")
+  {
+    ParallelCheckerTester pc_tester;
+
+    pc_tester.setMode(ParallelChecker::Mode::write);
+    REQUIRE(ParallelChecker::instance().isWriting());
+
+    pc_tester.setMode(ParallelChecker::Mode::read);
+    REQUIRE(not ParallelChecker::instance().isWriting());
+
+    pc_tester.setMode(ParallelChecker::Mode::automatic);
+    REQUIRE(ParallelChecker::instance().isWriting() == (parallel::size() == 1));
+  }
+
+  SECTION("check ItemValue/ItemArray attributes")
+  {
+    auto check = []<typename ItemValueT>(const ItemValueT& item_value, const std::string& var_name,
+                                         const SourceLocation& source_location, const size_t tag) {
+      ItemType item_type = ItemValueT::item_t;
+      using DataType     = typename ItemValueT::data_type;
+
+      HighFive::File file(ParallelChecker::instance().filename(), HighFive::File::ReadOnly);
+      HighFive::Group group_var0 = file.getGroup("values/" + std::to_string(tag));
+      REQUIRE(group_var0.getNumberObjects() == 2);
+
+      REQUIRE(group_var0.exist("numbers"));
+      REQUIRE(group_var0.exist(var_name));
+
+      REQUIRE(group_var0.getNumberAttributes() == 7);
+      REQUIRE(group_var0.hasAttribute("filename"));
+      REQUIRE(group_var0.hasAttribute("function"));
+      REQUIRE(group_var0.hasAttribute("line"));
+      REQUIRE(group_var0.hasAttribute("dimension"));
+      REQUIRE(group_var0.hasAttribute("data_type"));
+      REQUIRE(group_var0.hasAttribute("item_type"));
+      REQUIRE(group_var0.hasAttribute("name"));
+
+      REQUIRE(group_var0.getAttribute("filename").read<std::string>() == source_location.filename());
+      REQUIRE(group_var0.getAttribute("function").read<std::string>() == source_location.function());
+      REQUIRE(group_var0.getAttribute("line").read<size_t>() == source_location.line());
+      REQUIRE(group_var0.getAttribute("dimension").read<size_t>() == item_value.connectivity_ptr()->dimension());
+      REQUIRE(group_var0.getAttribute("data_type").read<std::string>() == demangle<DataType>());
+      REQUIRE(group_var0.getAttribute("item_type").read<std::string>() == itemName(item_type));
+      REQUIRE(group_var0.getAttribute("name").read<std::string>() == var_name);
+    };
+
+    // ItemValues
+    {   // 1d
+      auto mesh = MeshDataBaseForTests::get().unordered1DMesh();
+
+      const Connectivity<1>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        CellValue<double> var{connectivity};
+        var.fill(1);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        DiscreteFunctionP0<1, double> var{mesh};
+        var.fill(1);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var.cellValues(), var_name, source_location, tag);
+      }
+      {
+        NodeValue<TinyVector<2>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(ItemValueVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    {   // 2d
+      auto mesh = MeshDataBaseForTests::get().hybrid2DMesh();
+
+      const Connectivity<2>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        FaceValue<TinyMatrix<3, 2>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        FaceValue<TinyVector<1>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(ItemValueVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    {   // 3d
+      auto mesh = MeshDataBaseForTests::get().hybrid3DMesh();
+
+      const Connectivity<3>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        EdgeValue<TinyMatrix<2, 2>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        NodeValue<TinyVector<3>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(ItemValueVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        DiscreteFunctionP0<3, TinyVector<3>> var{mesh};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(DiscreteFunctionVariant{var}, var_name, source_location);
+        check(var.cellValues(), var_name, source_location, tag);
+      }
+    }
+
+    // ItemArrays
+    {   // 1d
+      auto mesh = MeshDataBaseForTests::get().unordered1DMesh();
+
+      const Connectivity<1>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        CellArray<double> var{connectivity, 2};
+        var.fill(1);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        DiscreteFunctionP0Vector<1, double> var{mesh, 2};
+        var.fill(1);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var.cellArrays(), var_name, source_location, tag);
+      }
+      {
+        NodeArray<TinyVector<2>> var{connectivity, 1};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(ItemArrayVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    {   // 2d
+      auto mesh = MeshDataBaseForTests::get().hybrid2DMesh();
+
+      const Connectivity<2>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        FaceArray<TinyMatrix<3, 2>> var{connectivity, 2};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        FaceArray<TinyVector<1>> var{connectivity, 3};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(ItemArrayVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    {   // 3d
+      auto mesh = MeshDataBaseForTests::get().hybrid3DMesh();
+
+      const Connectivity<3>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        EdgeArray<TinyMatrix<2, 2>> var{connectivity, 2};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        NodeArray<TinyVector<3>> var{connectivity, 3};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(ItemArrayVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        DiscreteFunctionP0Vector<3, double> var{mesh, 3};
+        var.fill(0);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(DiscreteFunctionVariant{var}, var_name, source_location);
+        check(var.cellArrays(), var_name, source_location, tag);
+      }
+    }
+  }
+
+  SECTION("check SubItemValuePerItem/SubItemArrayPerItem attributes")
+  {
+    auto check = []<typename SubItemValuePerItemT>(const SubItemValuePerItemT& item_value, const std::string& var_name,
+                                                   const SourceLocation& source_location, const size_t tag) {
+      ItemType item_type     = SubItemValuePerItemT::item_type;
+      ItemType sub_item_type = SubItemValuePerItemT::sub_item_type;
+      using DataType         = typename SubItemValuePerItemT::data_type;
+
+      HighFive::File file(ParallelChecker::instance().filename(), HighFive::File::ReadOnly);
+      HighFive::Group group_var0 = file.getGroup("values/" + std::to_string(tag));
+      REQUIRE(group_var0.getNumberObjects() == 3);
+
+      REQUIRE(group_var0.exist("numbers"));
+      REQUIRE(group_var0.exist("rows_map"));
+      REQUIRE(group_var0.exist(var_name));
+
+      REQUIRE(group_var0.getNumberAttributes() == 8);
+      REQUIRE(group_var0.hasAttribute("filename"));
+      REQUIRE(group_var0.hasAttribute("function"));
+      REQUIRE(group_var0.hasAttribute("line"));
+      REQUIRE(group_var0.hasAttribute("dimension"));
+      REQUIRE(group_var0.hasAttribute("data_type"));
+      REQUIRE(group_var0.hasAttribute("item_type"));
+      REQUIRE(group_var0.hasAttribute("subitem_type"));
+      REQUIRE(group_var0.hasAttribute("name"));
+
+      REQUIRE(group_var0.getAttribute("filename").read<std::string>() == source_location.filename());
+      REQUIRE(group_var0.getAttribute("function").read<std::string>() == source_location.function());
+      REQUIRE(group_var0.getAttribute("line").read<size_t>() == source_location.line());
+      REQUIRE(group_var0.getAttribute("dimension").read<size_t>() == item_value.connectivity_ptr()->dimension());
+      REQUIRE(group_var0.getAttribute("data_type").read<std::string>() == demangle<DataType>());
+      REQUIRE(group_var0.getAttribute("item_type").read<std::string>() == itemName(item_type));
+      REQUIRE(group_var0.getAttribute("subitem_type").read<std::string>() == itemName(sub_item_type));
+      REQUIRE(group_var0.getAttribute("name").read<std::string>() == var_name);
+    };
+
+    // ItemValues
+    {   // 1d
+      auto mesh = MeshDataBaseForTests::get().unordered1DMesh();
+
+      const Connectivity<1>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        CellValuePerNode<double> var{connectivity};
+        var.fill(1);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        NodeValuePerCell<TinyVector<2>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(SubItemValuePerItemVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    {   // 2d
+      auto mesh = MeshDataBaseForTests::get().hybrid2DMesh();
+
+      const Connectivity<2>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        FaceValuePerCell<TinyMatrix<3, 2>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        FaceValuePerNode<TinyVector<1>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(SubItemValuePerItemVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    {   // 3d
+      auto mesh = MeshDataBaseForTests::get().hybrid3DMesh();
+
+      const Connectivity<3>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        EdgeValuePerFace<TinyMatrix<2, 2>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        NodeValuePerCell<TinyVector<3>> var{connectivity};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(SubItemValuePerItemVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    // ItemArrays
+    {   // 1d
+      auto mesh = MeshDataBaseForTests::get().unordered1DMesh();
+
+      const Connectivity<1>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        CellArrayPerNode<double> var{connectivity, 2};
+        var.fill(1);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        NodeArrayPerCell<TinyVector<2>> var{connectivity, 1};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(SubItemArrayPerItemVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    {   // 2d
+      auto mesh = MeshDataBaseForTests::get().hybrid2DMesh();
+
+      const Connectivity<2>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        FaceArrayPerNode<TinyMatrix<3, 2>> var{connectivity, 2};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        FaceArrayPerCell<TinyVector<1>> var{connectivity, 3};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(SubItemArrayPerItemVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+
+    {   // 3d
+      auto mesh = MeshDataBaseForTests::get().hybrid3DMesh();
+
+      const Connectivity<3>& connectivity = mesh->connectivity();
+
+      ParallelCheckerTester pc_tester;
+      {
+        EdgeArrayPerFace<TinyMatrix<2, 2>> var{connectivity, 2};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(var, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+      {
+        NodeArrayPerEdge<TinyVector<3>> var{connectivity, 3};
+        var.fill(zero);
+
+        const SourceLocation source_location;
+        const size_t tag           = pc_tester.getTag();
+        const std::string var_name = "var_" + std::to_string(tag);
+
+        parallel_check(SubItemArrayPerItemVariant{var}, var_name, source_location);
+        check(var, var_name, source_location, tag);
+      }
+    }
+  }
+
+  SECTION("invalid set config at after first write")
+  {
+    auto [filename, mode, tag] = get_pc_options();
+
+    REQUIRE(ParallelChecker::instance().filename() == filename);
+    REQUIRE(ParallelChecker::instance().mode() == mode);
+    REQUIRE(tag == 0);
+
+    set_pc_options(std::make_tuple(filename, mode, 2ul));
+
+    REQUIRE_THROWS_WITH(ParallelChecker::instance().setFilename("foo.h5"),
+                        "unexpected error: Cannot modify parallel checker file if it was already used");
+    REQUIRE_THROWS_WITH(ParallelChecker::instance().setMode(ParallelChecker::Mode::automatic),
+                        "unexpected error: Cannot modify parallel checker mode if it was already used");
+  }
+
+#ifndef NDEBUG
+  SECTION("bad creation/destruction/access")
+  {
+    REQUIRE_THROWS_WITH(ParallelChecker::create(), "ParallelChecker has already been created");
+    REQUIRE_NOTHROW(ParallelChecker::destroy());
+
+    REQUIRE_THROWS_WITH(ParallelChecker::destroy(), "ParallelChecker has already been destroyed");
+    REQUIRE_THROWS_WITH(ParallelChecker::instance(), "ParallelChecker was not created");
+
+    REQUIRE_NOTHROW(ParallelChecker::create());
+  }
+#endif
+
+  std::filesystem::remove_all(std::filesystem::path{tmp_dirname});
+  REQUIRE_NOTHROW(ParallelChecker::destroy());
+}
+
+#else   // PUGS_HAS_HDF5
+
+TEST_CASE("ParallelChecker_write", "[dev]")
+{
+  REQUIRE_NOTHROW(ParallelChecker::create());
+  REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::read));
+  REQUIRE_NOTHROW(not ParallelChecker::instance().isWriting());
+  REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::write));
+  REQUIRE_NOTHROW(ParallelChecker::instance().isWriting());
+  REQUIRE_NOTHROW(ParallelChecker::instance().setMode(ParallelChecker::Mode::automatic));
+  REQUIRE_NOTHROW(ParallelChecker::instance().isWriting() == (parallel::size() == 1));
+
+  auto mesh = MeshDataBaseForTests::get().unordered1DMesh();
+
+  const Connectivity<1>& connectivity = mesh->connectivity();
+
+  NodeValue<double> nv{connectivity};
+  REQUIRE_THROWS_WITH(parallel_check(nv, "test"), "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_THROWS_WITH(parallel_check(ItemValueVariant{nv}, "test"),
+                      "error: parallel checker cannot be used without HDF5 support");
+
+  NodeArray<double> na{connectivity, 2};
+  REQUIRE_THROWS_WITH(parallel_check(na, "test"), "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_THROWS_WITH(parallel_check(ItemArrayVariant{na}, "test"),
+                      "error: parallel checker cannot be used without HDF5 support");
+
+  NodeValuePerCell<double> nvpc{connectivity};
+  REQUIRE_THROWS_WITH(parallel_check(nvpc, "test"), "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_THROWS_WITH(parallel_check(SubItemValuePerItemVariant{nvpc}, "test"),
+                      "error: parallel checker cannot be used without HDF5 support");
+
+  NodeArrayPerCell<double> napc{connectivity, 2};
+  REQUIRE_THROWS_WITH(parallel_check(napc, "test"), "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_THROWS_WITH(parallel_check(SubItemArrayPerItemVariant{napc}, "test"),
+                      "error: parallel checker cannot be used without HDF5 support");
+
+  REQUIRE_NOTHROW(ParallelChecker::destroy());
+}
+
+#endif   // PUGS_HAS_HDF5
diff --git a/tests/test_PugsUtils.cpp b/tests/test_PugsUtils.cpp
index 19d693c76599937de4b789c2c07785844daa100c..9dfd686da65c5027f43bdfcf38ba3b54f90607ba 100644
--- a/tests/test_PugsUtils.cpp
+++ b/tests/test_PugsUtils.cpp
@@ -51,6 +51,7 @@ TEST_CASE("PugsUtils", "[utils]")
       os << "MPI:      " << rang::style::bold << BuildInfo::mpiLibrary() << rang::style::reset << '\n';
       os << "PETSc:    " << rang::style::bold << BuildInfo::petscLibrary() << rang::style::reset << '\n';
       os << "SLEPc:    " << rang::style::bold << BuildInfo::slepcLibrary() << rang::style::reset << '\n';
+      os << "HDF5:     " << rang::style::bold << BuildInfo::hdf5Library() << rang::style::reset << '\n';
       os << "-------------------------------------------------------";
 
       return os.str();
diff --git a/tests/test_Synchronizer.cpp b/tests/test_Synchronizer.cpp
index 54a065b475c757bdf8e108e01d1b8f859daa262d..fcad9e414d4f5d3c88a6c50e41c1108cf996a13b 100644
--- a/tests/test_Synchronizer.cpp
+++ b/tests/test_Synchronizer.cpp
@@ -914,32 +914,147 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellValuePerNode")
       {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+        CellValuePerNode<int> cell_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              cell_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+            }
+          });
+
+        CellValuePerNode<int> cell_value_per_node{connectivity};
+        cell_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                cell_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_node);
+        REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+          synchronizer.synchronize(cell_value_per_node);
+          REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize CellValuePerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellValuePerEdge<int> cell_value_per_edge_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge_ref.numberOfSubValues(edge_id); ++j) {
+              cell_value_per_edge_ref(edge_id, j) =   // edge_owner[edge_id] +
+                edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
+            }
+          });
+
+        CellValuePerEdge<int> cell_value_per_edge{connectivity};
+        cell_value_per_edge.fill(0);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge.numberOfSubValues(edge_id); ++j) {
+              if (edge_is_owned[edge_id]) {
+                cell_value_per_edge(edge_id, j) =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+        }
+
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_edge);
+        REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
 
-        SECTION("CellValuePerNode")
-        {
-          CellValuePerNode<int> cell_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_edge, connectivity.edgeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+          synchronizer.synchronize(cell_value_per_edge);
+          REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
         }
+      }
+
+      SECTION("synchonize CellValuePerFace")
+      {
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellValuePerFace<int> cell_value_per_face_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face_ref.numberOfSubValues(face_id); ++j) {
+              cell_value_per_face_ref(face_id, j) =   // face_owner[face_id] +
+                face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
+            }
+          });
 
-        SECTION("CellValuePerEdge")
-        {
-          CellValuePerEdge<int> cell_value_per_edge{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
+        CellValuePerFace<int> cell_value_per_face{connectivity};
+        cell_value_per_face.fill(0);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face.numberOfSubValues(face_id); ++j) {
+              if (face_is_owned[face_id]) {
+                cell_value_per_face(face_id, j) =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
 
-        SECTION("CellValuePerFace")
-        {
-          CellValuePerFace<int> cell_value_per_face{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_face);
+        REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_face, connectivity.faceOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+          synchronizer.synchronize(cell_value_per_face);
+          REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
       }
     }
@@ -1156,48 +1271,243 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellValuePerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+        CellValuePerNode<int> cell_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              cell_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+            }
+          });
+
+        CellValuePerNode<int> cell_value_per_node{connectivity};
+        cell_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                cell_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_node);
+        REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+          synchronizer.synchronize(cell_value_per_node);
+          REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize CellValuePerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellValuePerEdge<int> cell_value_per_edge_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge_ref.numberOfSubValues(edge_id); ++j) {
+              cell_value_per_edge_ref(edge_id, j) =   // edge_owner[edge_id] +
+                edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
+            }
+          });
+
+        CellValuePerEdge<int> cell_value_per_edge{connectivity};
+        cell_value_per_edge.fill(0);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge.numberOfSubValues(edge_id); ++j) {
+              if (edge_is_owned[edge_id]) {
+                cell_value_per_edge(edge_id, j) =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_edge);
+        REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_edge, connectivity.edgeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+          synchronizer.synchronize(cell_value_per_edge);
+          REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+        }
+      }
+
+      SECTION("synchonize CellValuePerFace")
       {
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellValuePerFace<int> cell_value_per_face_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face_ref.numberOfSubValues(face_id); ++j) {
+              cell_value_per_face_ref(face_id, j) =   // face_owner[face_id] +
+                face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
+            }
+          });
+
+        CellValuePerFace<int> cell_value_per_face{connectivity};
+        cell_value_per_face.fill(0);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face.numberOfSubValues(face_id); ++j) {
+              if (face_is_owned[face_id]) {
+                cell_value_per_face(face_id, j) =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+        }
+
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_face);
+        REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
 
-        SECTION("CellValuePerNode")
-        {
-          CellValuePerNode<int> cell_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_face, connectivity.faceOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+          synchronizer.synchronize(cell_value_per_face);
+          REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
+      }
+
+      SECTION("synchonize FaceValuePerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto node_to_face_matrix = connectivity.nodeToFaceMatrix();
 
-        SECTION("CellValuePerEdge")
-        {
-          CellValuePerEdge<int> cell_value_per_edge{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
+        FaceValuePerNode<int> face_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              face_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]];
+            }
+          });
+
+        FaceValuePerNode<int> face_value_per_node{connectivity};
+        face_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                face_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(face_value_per_node, face_value_per_node_ref));
         }
 
-        SECTION("CellValuePerFace")
-        {
-          CellValuePerFace<int> cell_value_per_face{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_value_per_node);
+        REQUIRE(is_same_item_value(face_value_per_node, face_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(face_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(face_value_per_node, face_value_per_node_ref));
+          synchronizer.synchronize(face_value_per_node);
+          REQUIRE(is_same_item_value(face_value_per_node, face_value_per_node_ref));
         }
+      }
+
+      SECTION("synchonize EdgeValuePerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto edge_number   = connectivity.edgeNumber();
+
+        const auto node_to_edge_matrix = connectivity.nodeToEdgeMatrix();
+
+        EdgeValuePerNode<int> edge_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              edge_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]];
+            }
+          });
+
+        EdgeValuePerNode<int> edge_value_per_node{connectivity};
+        edge_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                edge_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]];
+              }
+            }
+          });
 
-        SECTION("FaceValuePerNode")
-        {
-          FaceValuePerNode<int> face_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (node)");
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
         }
 
-        SECTION("EdgeValuePerNode")
-        {
-          EdgeValuePerNode<int> edge_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(edge_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (edge) to be of lower "
-                              "dimension than item (node)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_value_per_node);
+        REQUIRE(is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(edge_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
+          synchronizer.synchronize(edge_value_per_node);
+          REQUIRE(is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
         }
       }
     }
@@ -1455,267 +1765,335 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellValuePerNode")
       {
-        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-
-        SECTION("CellValuePerNode")
-        {
-          CellValuePerNode<int> cell_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
-        }
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        SECTION("CellValuePerEdge")
-        {
-          CellValuePerEdge<int> cell_value_per_edge{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
-        }
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
 
-        SECTION("CellValuePerFace")
-        {
-          CellValuePerFace<int> cell_value_per_face{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_value_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
-        }
+        CellValuePerNode<int> cell_value_per_node_ref{connectivity};
 
-        SECTION("FaceValuePerNode")
-        {
-          FaceValuePerNode<int> face_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (node)");
-        }
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              cell_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+            }
+          });
 
-        SECTION("FaceValuePerEdge")
-        {
-          FaceValuePerEdge<int> face_value_per_edge{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_value_per_edge),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (edge)");
-        }
+        CellValuePerNode<int> cell_value_per_node{connectivity};
+        cell_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                cell_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]];
+              }
+            }
+          });
 
-        SECTION("EdgeValuePerNode")
-        {
-          EdgeValuePerNode<int> edge_value_per_node{connectivity};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(edge_value_per_node),
-                              "unexpected error: synchronization requires sub-item type (edge) to be of lower "
-                              "dimension than item (node)");
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
         }
-      }
-    }
-  }
 
-  SECTION("SubItemArrayPerItem")
-  {
-    auto is_same_item_array = [](auto a, auto b) {
-      using IndexT = typename decltype(a)::index_type;
-      bool is_same = true;
-      for (IndexT i_item = 0; i_item < a.numberOfItems(); ++i_item) {
-        for (size_t l = 0; l < a.numberOfSubArrays(i_item); ++l) {
-          for (size_t k = 0; k < a.sizeOfArrays(); ++k) {
-            is_same &= (a(i_item, l)[k] == b(i_item, l)[k]);
-          }
-        }
-      }
-      return parallel::allReduceAnd(is_same);
-    };
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_value_per_node);
+        REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
 
-    auto reset_ghost_arrays = [](auto sub_item_array_per_item, auto item_owner, auto value) {
-      using IndexT = typename decltype(sub_item_array_per_item)::index_type;
-      static_assert(std::is_same_v<typename decltype(sub_item_array_per_item)::index_type,
-                                   typename decltype(item_owner)::index_type>);
-      for (IndexT i_item = 0; i_item < sub_item_array_per_item.numberOfItems(); ++i_item) {
-        if (item_owner[i_item] != static_cast<int>(parallel::rank())) {
-          for (size_t l = 0; l < sub_item_array_per_item.numberOfSubArrays(i_item); ++l) {
-            sub_item_array_per_item(i_item, l).fill(value);
-          }
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(cell_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
+          synchronizer.synchronize(cell_value_per_node);
+          REQUIRE(is_same_item_value(cell_value_per_node, cell_value_per_node_ref));
         }
       }
-    };
 
-    SECTION("1D")
-    {
-      constexpr size_t Dimension = 1;
-      using ConnectivityType     = Connectivity<Dimension>;
-
-      const ConnectivityType& connectivity = MeshDataBaseForTests::get().unordered1DMesh()->connectivity();
-
-      SECTION("synchonize NodeArrayPerCell")
+      SECTION("synchonize CellValuePerEdge")
       {
-        const auto cell_owner  = connectivity.cellOwner();
-        const auto cell_number = connectivity.cellNumber();
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        NodeArrayPerCell<int> node_array_per_cell_ref{connectivity, 3};
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellValuePerEdge<int> cell_value_per_edge_ref{connectivity};
 
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < node_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < node_array_per_cell_ref.sizeOfArrays(); ++k) {
-                node_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
-              }
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge_ref.numberOfSubValues(edge_id); ++j) {
+              cell_value_per_edge_ref(edge_id, j) =   // edge_owner[edge_id] +
+                edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
             }
           });
 
-        NodeArrayPerCell<int> node_array_per_cell{connectivity, 3};
+        CellValuePerEdge<int> cell_value_per_edge{connectivity};
+        cell_value_per_edge.fill(0);
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < node_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < node_array_per_cell_ref.sizeOfArrays(); ++k) {
-                node_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_value_per_edge.numberOfSubValues(edge_id); ++j) {
+              if (edge_is_owned[edge_id]) {
+                cell_value_per_edge(edge_id, j) =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]];
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(node_array_per_cell);
-
-        REQUIRE(is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+        synchronizer.synchronize(cell_value_per_edge);
+        REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(node_array_per_cell, cell_owner, 0);
-          REQUIRE(not is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
-          synchronizer.synchronize(node_array_per_cell);
-          REQUIRE(is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+          reset_ghost_values(cell_value_per_edge, connectivity.edgeOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
+          synchronizer.synchronize(cell_value_per_edge);
+          REQUIRE(is_same_item_value(cell_value_per_edge, cell_value_per_edge_ref));
         }
       }
 
-      SECTION("synchonize EdgeArrayPerCell")
+      SECTION("synchonize CellValuePerFace")
       {
-        const auto cell_owner  = connectivity.cellOwner();
-        const auto cell_number = connectivity.cellNumber();
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        EdgeArrayPerCell<int> edge_array_per_cell_ref{connectivity, 3};
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellValuePerFace<int> cell_value_per_face_ref{connectivity};
 
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < edge_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < edge_array_per_cell_ref.sizeOfArrays(); ++k) {
-                edge_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
-              }
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face_ref.numberOfSubValues(face_id); ++j) {
+              cell_value_per_face_ref(face_id, j) =   // face_owner[face_id] +
+                face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
             }
           });
 
-        EdgeArrayPerCell<int> edge_array_per_cell{connectivity, 3};
+        CellValuePerFace<int> cell_value_per_face{connectivity};
+        cell_value_per_face.fill(0);
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < edge_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < edge_array_per_cell_ref.sizeOfArrays(); ++k) {
-                edge_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_value_per_face.numberOfSubValues(face_id); ++j) {
+              if (face_is_owned[face_id]) {
+                cell_value_per_face(face_id, j) =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]];
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(edge_array_per_cell);
-
-        REQUIRE(is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+        synchronizer.synchronize(cell_value_per_face);
+        REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(edge_array_per_cell, cell_owner, 0);
-          REQUIRE(not is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
-          synchronizer.synchronize(edge_array_per_cell);
-          REQUIRE(is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+          reset_ghost_values(cell_value_per_face, connectivity.faceOwner(), 0);
+          REQUIRE(not is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
+          synchronizer.synchronize(cell_value_per_face);
+          REQUIRE(is_same_item_value(cell_value_per_face, cell_value_per_face_ref));
         }
       }
 
-      SECTION("synchonize FaceArrayPerCell")
+      SECTION("synchonize FaceValuePerNode")
       {
-        const auto cell_owner  = connectivity.cellOwner();
-        const auto cell_number = connectivity.cellNumber();
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto face_number   = connectivity.faceNumber();
 
-        FaceArrayPerCell<int> face_array_per_cell_ref{connectivity, 3};
+        const auto node_to_face_matrix = connectivity.nodeToFaceMatrix();
+
+        FaceValuePerNode<int> face_value_per_node_ref{connectivity};
 
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < face_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < face_array_per_cell_ref.sizeOfArrays(); ++k) {
-                face_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
-              }
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              face_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]];
             }
           });
 
-        FaceArrayPerCell<int> face_array_per_cell{connectivity, 3};
+        FaceValuePerNode<int> face_value_per_node{connectivity};
+        face_value_per_node.fill(0);
         parallel_for(
-          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
-            for (size_t j = 0; j < face_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
-              for (size_t k = 0; k < face_array_per_cell_ref.sizeOfArrays(); ++k) {
-                face_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                face_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]];
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+          REQUIRE(not is_same_item_value(face_value_per_node, face_value_per_node_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(face_array_per_cell);
-
-        REQUIRE(is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+        synchronizer.synchronize(face_value_per_node);
+        REQUIRE(is_same_item_value(face_value_per_node, face_value_per_node_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(face_array_per_cell, cell_owner, 0);
-          REQUIRE(not is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
-          synchronizer.synchronize(face_array_per_cell);
-          REQUIRE(is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+          reset_ghost_values(face_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(face_value_per_node, face_value_per_node_ref));
+          synchronizer.synchronize(face_value_per_node);
+          REQUIRE(is_same_item_value(face_value_per_node, face_value_per_node_ref));
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize FaceValuePerEdge")
       {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto edge_to_face_matrix = connectivity.edgeToFaceMatrix();
+
+        FaceValuePerEdge<int> face_value_per_edge_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < face_value_per_edge_ref.numberOfSubValues(edge_id); ++j) {
+              face_value_per_edge_ref(edge_id, j) =   // edge_owner[edge_id] +
+                edge_number[edge_id] + 100 * face_number[edge_to_face_matrix[edge_id][j]];
+            }
+          });
+
+        FaceValuePerEdge<int> face_value_per_edge{connectivity};
+        face_value_per_edge.fill(0);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < face_value_per_edge.numberOfSubValues(edge_id); ++j) {
+              if (edge_is_owned[edge_id]) {
+                face_value_per_edge(edge_id, j) =
+                  edge_number[edge_id] + 100 * face_number[edge_to_face_matrix[edge_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(face_value_per_edge, face_value_per_edge_ref));
+        }
+
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_value_per_edge);
+        REQUIRE(is_same_item_value(face_value_per_edge, face_value_per_edge_ref));
 
-        SECTION("CellArrayPerNode")
-        {
-          CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(face_value_per_edge, connectivity.edgeOwner(), 0);
+          REQUIRE(not is_same_item_value(face_value_per_edge, face_value_per_edge_ref));
+          synchronizer.synchronize(face_value_per_edge);
+          REQUIRE(is_same_item_value(face_value_per_edge, face_value_per_edge_ref));
         }
+      }
+
+      SECTION("synchonize EdgeValuePerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto edge_number   = connectivity.edgeNumber();
+
+        const auto node_to_edge_matrix = connectivity.nodeToEdgeMatrix();
+
+        EdgeValuePerNode<int> edge_value_per_node_ref{connectivity};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_value_per_node_ref.numberOfSubValues(node_id); ++j) {
+              edge_value_per_node_ref(node_id, j) =   // node_owner[node_id] +
+                node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]];
+            }
+          });
 
-        SECTION("CellArrayPerEdge")
-        {
-          CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
+        EdgeValuePerNode<int> edge_value_per_node{connectivity};
+        edge_value_per_node.fill(0);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_value_per_node.numberOfSubValues(node_id); ++j) {
+              if (node_is_owned[node_id]) {
+                edge_value_per_node(node_id, j) =
+                  node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]];
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
         }
 
-        SECTION("CellArrayPerFace")
-        {
-          CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_value_per_node);
+        REQUIRE(is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_values(edge_value_per_node, connectivity.nodeOwner(), 0);
+          REQUIRE(not is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
+          synchronizer.synchronize(edge_value_per_node);
+          REQUIRE(is_same_item_value(edge_value_per_node, edge_value_per_node_ref));
         }
       }
     }
+  }
 
-    SECTION("2D")
+  SECTION("SubItemArrayPerItem")
+  {
+    auto is_same_item_array = [](auto a, auto b) {
+      using IndexT = typename decltype(a)::index_type;
+      bool is_same = true;
+      for (IndexT i_item = 0; i_item < a.numberOfItems(); ++i_item) {
+        for (size_t l = 0; l < a.numberOfSubArrays(i_item); ++l) {
+          for (size_t k = 0; k < a.sizeOfArrays(); ++k) {
+            is_same &= (a(i_item, l)[k] == b(i_item, l)[k]);
+
+            if (a(i_item, l)[k] != b(i_item, l)[k]) {
+              std::cout << i_item << ":" << l << " a[" << k << "]=" << a(i_item, l)[k] << " b[" << k
+                        << "]=" << b(i_item, l)[k] << '\n';
+            }
+          }
+        }
+      }
+      return parallel::allReduceAnd(is_same);
+    };
+
+    auto reset_ghost_arrays = [](auto sub_item_array_per_item, auto item_owner, auto value) {
+      using IndexT = typename decltype(sub_item_array_per_item)::index_type;
+      static_assert(std::is_same_v<typename decltype(sub_item_array_per_item)::index_type,
+                                   typename decltype(item_owner)::index_type>);
+      for (IndexT i_item = 0; i_item < sub_item_array_per_item.numberOfItems(); ++i_item) {
+        if (item_owner[i_item] != static_cast<int>(parallel::rank())) {
+          for (size_t l = 0; l < sub_item_array_per_item.numberOfSubArrays(i_item); ++l) {
+            sub_item_array_per_item(i_item, l).fill(value);
+          }
+        }
+      }
+    };
+
+    SECTION("1D")
     {
-      constexpr size_t Dimension = 2;
+      constexpr size_t Dimension = 1;
       using ConnectivityType     = Connectivity<Dimension>;
 
-      const ConnectivityType& connectivity = MeshDataBaseForTests::get().hybrid2DMesh()->connectivity();
+      const ConnectivityType& connectivity = MeshDataBaseForTests::get().unordered1DMesh()->connectivity();
 
       SECTION("synchonize NodeArrayPerCell")
       {
@@ -1852,148 +2230,175 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("synchonize NodeArrayPerFace")
+      SECTION("synchonize CellArrayPerNode")
       {
-        const auto face_owner  = connectivity.faceOwner();
-        const auto face_number = connectivity.faceNumber();
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        NodeArrayPerFace<int> node_array_per_face_ref{connectivity, 3};
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+        CellArrayPerNode<int> cell_array_per_node_ref{connectivity, 3};
 
         parallel_for(
-          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
-            for (size_t j = 0; j < node_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
-              for (size_t k = 0; k < node_array_per_face_ref.sizeOfArrays(); ++k) {
-                node_array_per_face_ref(face_id, j)[k] = face_owner[face_id] + face_number[face_id] + j + 2 * k;
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_node_ref.sizeOfArrays(); ++k) {
+                cell_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
               }
             }
           });
 
-        NodeArrayPerFace<int> node_array_per_face{connectivity, 3};
+        CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
+        cell_array_per_node.fill(-1);
         parallel_for(
-          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
-            for (size_t j = 0; j < node_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
-              for (size_t k = 0; k < node_array_per_face_ref.sizeOfArrays(); ++k) {
-                node_array_per_face(face_id, j)[k] = parallel::rank() + face_number[face_id] + j + 2 * k;
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < cell_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_node.sizeOfArrays(); ++k) {
+                  cell_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+                }
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(node_array_per_face, node_array_per_face_ref));
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(node_array_per_face);
+        synchronizer.synchronize(cell_array_per_node);
 
-        REQUIRE(is_same_item_array(node_array_per_face, node_array_per_face_ref));
+        REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(node_array_per_face, face_owner, 0);
-          REQUIRE(not is_same_item_array(node_array_per_face, node_array_per_face_ref));
-          synchronizer.synchronize(node_array_per_face);
-          REQUIRE(is_same_item_array(node_array_per_face, node_array_per_face_ref));
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(cell_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+          synchronizer.synchronize(cell_array_per_node);
+          REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
         }
       }
 
-      SECTION("synchonize NodeArrayPerEdge")
+      SECTION("synchonize CellArrayPerEdge")
       {
-        const auto edge_owner  = connectivity.edgeOwner();
-        const auto edge_number = connectivity.edgeNumber();
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        NodeArrayPerEdge<int> node_array_per_edge_ref{connectivity, 3};
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellArrayPerEdge<int> cell_array_per_edge_ref{connectivity, 3};
 
         parallel_for(
           connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
-            for (size_t j = 0; j < node_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
-              for (size_t k = 0; k < node_array_per_edge_ref.sizeOfArrays(); ++k) {
-                node_array_per_edge_ref(edge_id, j)[k] = edge_owner[edge_id] + edge_number[edge_id] + j + 2 * k;
+            for (size_t j = 0; j < cell_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_edge_ref.sizeOfArrays(); ++k) {
+                cell_array_per_edge_ref(edge_id, j)[k] =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
               }
             }
           });
 
-        NodeArrayPerEdge<int> node_array_per_edge{connectivity, 3};
+        CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
+        cell_array_per_edge.fill(-1);
         parallel_for(
           connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
-            for (size_t j = 0; j < node_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
-              for (size_t k = 0; k < node_array_per_edge_ref.sizeOfArrays(); ++k) {
-                node_array_per_edge(edge_id, j)[k] = parallel::rank() + edge_number[edge_id] + j + 2 * k;
+            if (edge_is_owned[edge_id]) {
+              for (size_t j = 0; j < cell_array_per_edge.numberOfSubArrays(edge_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_edge.sizeOfArrays(); ++k) {
+                  cell_array_per_edge(edge_id, j)[k] =
+                    edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+                }
               }
             }
           });
 
         if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
         }
 
         Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(node_array_per_edge);
+        synchronizer.synchronize(cell_array_per_edge);
 
-        REQUIRE(is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+        REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
 
         // Check that exchange sizes are correctly stored (require
         // lines to be covered)
         if (parallel::size() > 1) {
-          reset_ghost_arrays(node_array_per_edge, edge_owner, 0);
-          REQUIRE(not is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
-          synchronizer.synchronize(node_array_per_edge);
-          REQUIRE(is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+          const auto& edge_owner = connectivity.edgeOwner();
+          reset_ghost_arrays(cell_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+          synchronizer.synchronize(cell_array_per_edge);
+          REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellArrayPerFace")
       {
-        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        SECTION("CellArrayPerNode")
-        {
-          CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
-        }
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
 
-        SECTION("CellArrayPerEdge")
-        {
-          CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
-        }
+        CellArrayPerFace<int> cell_array_per_face_ref{connectivity, 3};
 
-        SECTION("CellArrayPerFace")
-        {
-          CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
-        }
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_face_ref.sizeOfArrays(); ++k) {
+                cell_array_per_face_ref(face_id, j)[k] =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
+        cell_array_per_face.fill(-1);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            if (face_is_owned[face_id]) {
+              for (size_t j = 0; j < cell_array_per_face.numberOfSubArrays(face_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_face.sizeOfArrays(); ++k) {
+                  cell_array_per_face(face_id, j)[k] =
+                    face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
 
-        SECTION("FaceArrayPerNode")
-        {
-          FaceArrayPerNode<int> face_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (node)");
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
         }
 
-        SECTION("EdgeArrayPerNode")
-        {
-          EdgeArrayPerNode<int> edge_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(edge_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (edge) to be of lower "
-                              "dimension than item (node)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_face);
+
+        REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& face_owner = connectivity.faceOwner();
+          reset_ghost_arrays(cell_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+          synchronizer.synchronize(cell_array_per_face);
+          REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
         }
       }
     }
 
-    SECTION("3D")
+    SECTION("2D")
     {
-      constexpr size_t Dimension = 3;
+      constexpr size_t Dimension = 2;
       using ConnectivityType     = Connectivity<Dimension>;
 
-      const ConnectivityType& connectivity = MeshDataBaseForTests::get().hybrid3DMesh()->connectivity();
+      const ConnectivityType& connectivity = MeshDataBaseForTests::get().hybrid2DMesh()->connectivity();
 
       SECTION("synchonize NodeArrayPerCell")
       {
@@ -2175,51 +2580,6 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("synchonize EdgeArrayPerFace")
-      {
-        const auto face_owner  = connectivity.faceOwner();
-        const auto face_number = connectivity.faceNumber();
-
-        EdgeArrayPerFace<int> edge_array_per_face_ref{connectivity, 3};
-
-        parallel_for(
-          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
-            for (size_t j = 0; j < edge_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
-              for (size_t k = 0; k < edge_array_per_face_ref.sizeOfArrays(); ++k) {
-                edge_array_per_face_ref(face_id, j)[k] = face_owner[face_id] + face_number[face_id] + j + 2 * k;
-              }
-            }
-          });
-
-        EdgeArrayPerFace<int> edge_array_per_face{connectivity, 3};
-        parallel_for(
-          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
-            for (size_t j = 0; j < edge_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
-              for (size_t k = 0; k < edge_array_per_face_ref.sizeOfArrays(); ++k) {
-                edge_array_per_face(face_id, j)[k] = parallel::rank() + face_number[face_id] + j + 2 * k;
-              }
-            }
-          });
-
-        if (parallel::size() > 1) {
-          REQUIRE(not is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
-        }
-
-        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
-        synchronizer.synchronize(edge_array_per_face);
-
-        REQUIRE(is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
-
-        // Check that exchange sizes are correctly stored (require
-        // lines to be covered)
-        if (parallel::size() > 1) {
-          reset_ghost_arrays(edge_array_per_face, face_owner, 0);
-          REQUIRE(not is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
-          synchronizer.synchronize(edge_array_per_face);
-          REQUIRE(is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
-        }
-      }
-
       SECTION("synchonize NodeArrayPerEdge")
       {
         const auto edge_owner  = connectivity.edgeOwner();
@@ -2265,56 +2625,875 @@ TEST_CASE("Synchronizer", "[mesh]")
         }
       }
 
-      SECTION("forbidden synchronization")
+      SECTION("synchonize CellArrayPerNode")
       {
-        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
 
-        SECTION("CellArrayPerNode")
-        {
-          CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (node)");
-        }
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
 
-        SECTION("CellArrayPerEdge")
-        {
-          CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_edge),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (edge)");
-        }
+        CellArrayPerNode<int> cell_array_per_node_ref{connectivity, 3};
 
-        SECTION("CellArrayPerFace")
-        {
-          CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(cell_array_per_face),
-                              "unexpected error: synchronization requires sub-item type (cell) to be of lower "
-                              "dimension than item (face)");
-        }
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_node_ref.sizeOfArrays(); ++k) {
+                cell_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
+        cell_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < cell_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_node.sizeOfArrays(); ++k) {
+                  cell_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_node);
+
+        REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(cell_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+          synchronizer.synchronize(cell_array_per_node);
+          REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellArrayPerEdge<int> cell_array_per_edge_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_edge_ref.sizeOfArrays(); ++k) {
+                cell_array_per_edge_ref(edge_id, j)[k] =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
+        cell_array_per_edge.fill(-1);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            if (edge_is_owned[edge_id]) {
+              for (size_t j = 0; j < cell_array_per_edge.numberOfSubArrays(edge_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_edge.sizeOfArrays(); ++k) {
+                  cell_array_per_edge(edge_id, j)[k] =
+                    edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_edge);
+
+        REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& edge_owner = connectivity.edgeOwner();
+          reset_ghost_arrays(cell_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+          synchronizer.synchronize(cell_array_per_edge);
+          REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerFace")
+      {
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellArrayPerFace<int> cell_array_per_face_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_face_ref.sizeOfArrays(); ++k) {
+                cell_array_per_face_ref(face_id, j)[k] =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
+        cell_array_per_face.fill(-1);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            if (face_is_owned[face_id]) {
+              for (size_t j = 0; j < cell_array_per_face.numberOfSubArrays(face_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_face.sizeOfArrays(); ++k) {
+                  cell_array_per_face(face_id, j)[k] =
+                    face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_face);
+
+        REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& face_owner = connectivity.faceOwner();
+          reset_ghost_arrays(cell_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+          synchronizer.synchronize(cell_array_per_face);
+          REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+        }
+      }
+
+      SECTION("synchonize FaceArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto node_to_face_matrix = connectivity.nodeToFaceMatrix();
+
+        FaceArrayPerNode<int> face_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < face_array_per_node_ref.sizeOfArrays(); ++k) {
+                face_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        FaceArrayPerNode<int> face_array_per_node{connectivity, 3};
+        face_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < face_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < face_array_per_node.sizeOfArrays(); ++k) {
+                  face_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(face_array_per_node, face_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_array_per_node);
+
+        REQUIRE(is_same_item_array(face_array_per_node, face_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(face_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(face_array_per_node, face_array_per_node_ref));
+          synchronizer.synchronize(face_array_per_node);
+          REQUIRE(is_same_item_array(face_array_per_node, face_array_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize EdgeArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto edge_number   = connectivity.edgeNumber();
+
+        const auto node_to_edge_matrix = connectivity.nodeToEdgeMatrix();
+
+        EdgeArrayPerNode<int> edge_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_node_ref.sizeOfArrays(); ++k) {
+                edge_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        EdgeArrayPerNode<int> edge_array_per_node{connectivity, 3};
+        edge_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < edge_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < edge_array_per_node.sizeOfArrays(); ++k) {
+                  edge_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_array_per_node);
+
+        REQUIRE(is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(edge_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+          synchronizer.synchronize(edge_array_per_node);
+          REQUIRE(is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+        }
+      }
+    }
+
+    SECTION("3D")
+    {
+      constexpr size_t Dimension = 3;
+      using ConnectivityType     = Connectivity<Dimension>;
+
+      const ConnectivityType& connectivity = MeshDataBaseForTests::get().hybrid3DMesh()->connectivity();
+
+      SECTION("synchonize NodeArrayPerCell")
+      {
+        const auto cell_owner  = connectivity.cellOwner();
+        const auto cell_number = connectivity.cellNumber();
+
+        NodeArrayPerCell<int> node_array_per_cell_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < node_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < node_array_per_cell_ref.sizeOfArrays(); ++k) {
+                node_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        NodeArrayPerCell<int> node_array_per_cell{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < node_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < node_array_per_cell_ref.sizeOfArrays(); ++k) {
+                node_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(node_array_per_cell);
+
+        REQUIRE(is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(node_array_per_cell, cell_owner, 0);
+          REQUIRE(not is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+          synchronizer.synchronize(node_array_per_cell);
+          REQUIRE(is_same_item_array(node_array_per_cell, node_array_per_cell_ref));
+        }
+      }
+
+      SECTION("synchonize EdgeArrayPerCell")
+      {
+        const auto cell_owner  = connectivity.cellOwner();
+        const auto cell_number = connectivity.cellNumber();
+
+        EdgeArrayPerCell<int> edge_array_per_cell_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < edge_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_cell_ref.sizeOfArrays(); ++k) {
+                edge_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        EdgeArrayPerCell<int> edge_array_per_cell{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < edge_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_cell_ref.sizeOfArrays(); ++k) {
+                edge_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_array_per_cell);
+
+        REQUIRE(is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(edge_array_per_cell, cell_owner, 0);
+          REQUIRE(not is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+          synchronizer.synchronize(edge_array_per_cell);
+          REQUIRE(is_same_item_array(edge_array_per_cell, edge_array_per_cell_ref));
+        }
+      }
+
+      SECTION("synchonize FaceArrayPerCell")
+      {
+        const auto cell_owner  = connectivity.cellOwner();
+        const auto cell_number = connectivity.cellNumber();
+
+        FaceArrayPerCell<int> face_array_per_cell_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < face_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < face_array_per_cell_ref.sizeOfArrays(); ++k) {
+                face_array_per_cell_ref(cell_id, j)[k] = cell_owner[cell_id] + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        FaceArrayPerCell<int> face_array_per_cell{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfCells(), PUGS_LAMBDA(const CellId cell_id) {
+            for (size_t j = 0; j < face_array_per_cell_ref.numberOfSubArrays(cell_id); ++j) {
+              for (size_t k = 0; k < face_array_per_cell_ref.sizeOfArrays(); ++k) {
+                face_array_per_cell(cell_id, j)[k] = parallel::rank() + cell_number[cell_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_array_per_cell);
+
+        REQUIRE(is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(face_array_per_cell, cell_owner, 0);
+          REQUIRE(not is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+          synchronizer.synchronize(face_array_per_cell);
+          REQUIRE(is_same_item_array(face_array_per_cell, face_array_per_cell_ref));
+        }
+      }
+
+      SECTION("synchonize NodeArrayPerFace")
+      {
+        const auto face_owner  = connectivity.faceOwner();
+        const auto face_number = connectivity.faceNumber();
+
+        NodeArrayPerFace<int> node_array_per_face_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < node_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < node_array_per_face_ref.sizeOfArrays(); ++k) {
+                node_array_per_face_ref(face_id, j)[k] = face_owner[face_id] + face_number[face_id] + j + 2 * k;
+              }
+            }
+          });
+
+        NodeArrayPerFace<int> node_array_per_face{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < node_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < node_array_per_face_ref.sizeOfArrays(); ++k) {
+                node_array_per_face(face_id, j)[k] = parallel::rank() + face_number[face_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(node_array_per_face, node_array_per_face_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(node_array_per_face);
+
+        REQUIRE(is_same_item_array(node_array_per_face, node_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(node_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(node_array_per_face, node_array_per_face_ref));
+          synchronizer.synchronize(node_array_per_face);
+          REQUIRE(is_same_item_array(node_array_per_face, node_array_per_face_ref));
+        }
+      }
+
+      SECTION("synchonize EdgeArrayPerFace")
+      {
+        const auto face_owner  = connectivity.faceOwner();
+        const auto face_number = connectivity.faceNumber();
+
+        EdgeArrayPerFace<int> edge_array_per_face_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < edge_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_face_ref.sizeOfArrays(); ++k) {
+                edge_array_per_face_ref(face_id, j)[k] = face_owner[face_id] + face_number[face_id] + j + 2 * k;
+              }
+            }
+          });
+
+        EdgeArrayPerFace<int> edge_array_per_face{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < edge_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_face_ref.sizeOfArrays(); ++k) {
+                edge_array_per_face(face_id, j)[k] = parallel::rank() + face_number[face_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_array_per_face);
+
+        REQUIRE(is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(edge_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
+          synchronizer.synchronize(edge_array_per_face);
+          REQUIRE(is_same_item_array(edge_array_per_face, edge_array_per_face_ref));
+        }
+      }
+
+      SECTION("synchonize NodeArrayPerEdge")
+      {
+        const auto edge_owner  = connectivity.edgeOwner();
+        const auto edge_number = connectivity.edgeNumber();
+
+        NodeArrayPerEdge<int> node_array_per_edge_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < node_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < node_array_per_edge_ref.sizeOfArrays(); ++k) {
+                node_array_per_edge_ref(edge_id, j)[k] = edge_owner[edge_id] + edge_number[edge_id] + j + 2 * k;
+              }
+            }
+          });
+
+        NodeArrayPerEdge<int> node_array_per_edge{connectivity, 3};
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < node_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < node_array_per_edge_ref.sizeOfArrays(); ++k) {
+                node_array_per_edge(edge_id, j)[k] = parallel::rank() + edge_number[edge_id] + j + 2 * k;
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(node_array_per_edge);
+
+        REQUIRE(is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          reset_ghost_arrays(node_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+          synchronizer.synchronize(node_array_per_edge);
+          REQUIRE(is_same_item_array(node_array_per_edge, node_array_per_edge_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto node_to_cell_matrix = connectivity.nodeToCellMatrix();
+
+        CellArrayPerNode<int> cell_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < cell_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_node_ref.sizeOfArrays(); ++k) {
+                cell_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerNode<int> cell_array_per_node{connectivity, 3};
+        cell_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < cell_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_node.sizeOfArrays(); ++k) {
+                  cell_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * cell_number[node_to_cell_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_node);
+
+        REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(cell_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+          synchronizer.synchronize(cell_array_per_node);
+          REQUIRE(is_same_item_array(cell_array_per_node, cell_array_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto edge_to_cell_matrix = connectivity.edgeToCellMatrix();
+
+        CellArrayPerEdge<int> cell_array_per_edge_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < cell_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_edge_ref.sizeOfArrays(); ++k) {
+                cell_array_per_edge_ref(edge_id, j)[k] =
+                  edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerEdge<int> cell_array_per_edge{connectivity, 3};
+        cell_array_per_edge.fill(-1);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            if (edge_is_owned[edge_id]) {
+              for (size_t j = 0; j < cell_array_per_edge.numberOfSubArrays(edge_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_edge.sizeOfArrays(); ++k) {
+                  cell_array_per_edge(edge_id, j)[k] =
+                    edge_number[edge_id] + 100 * cell_number[edge_to_cell_matrix[edge_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_edge);
+
+        REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& edge_owner = connectivity.edgeOwner();
+          reset_ghost_arrays(cell_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+          synchronizer.synchronize(cell_array_per_edge);
+          REQUIRE(is_same_item_array(cell_array_per_edge, cell_array_per_edge_ref));
+        }
+      }
+
+      SECTION("synchonize CellArrayPerFace")
+      {
+        const auto face_is_owned = connectivity.faceIsOwned();
+        const auto face_number   = connectivity.faceNumber();
+        const auto cell_number   = connectivity.cellNumber();
+
+        const auto face_to_cell_matrix = connectivity.faceToCellMatrix();
+
+        CellArrayPerFace<int> cell_array_per_face_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            for (size_t j = 0; j < cell_array_per_face_ref.numberOfSubArrays(face_id); ++j) {
+              for (size_t k = 0; k < cell_array_per_face_ref.sizeOfArrays(); ++k) {
+                cell_array_per_face_ref(face_id, j)[k] =
+                  face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        CellArrayPerFace<int> cell_array_per_face{connectivity, 3};
+        cell_array_per_face.fill(-1);
+        parallel_for(
+          connectivity.numberOfFaces(), PUGS_LAMBDA(const FaceId face_id) {
+            if (face_is_owned[face_id]) {
+              for (size_t j = 0; j < cell_array_per_face.numberOfSubArrays(face_id); ++j) {
+                for (size_t k = 0; k < cell_array_per_face.sizeOfArrays(); ++k) {
+                  cell_array_per_face(face_id, j)[k] =
+                    face_number[face_id] + 100 * cell_number[face_to_cell_matrix[face_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(cell_array_per_face);
+
+        REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& face_owner = connectivity.faceOwner();
+          reset_ghost_arrays(cell_array_per_face, face_owner, 0);
+          REQUIRE(not is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+          synchronizer.synchronize(cell_array_per_face);
+          REQUIRE(is_same_item_array(cell_array_per_face, cell_array_per_face_ref));
+        }
+      }
+
+      SECTION("synchonize FaceArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto node_to_face_matrix = connectivity.nodeToFaceMatrix();
+
+        FaceArrayPerNode<int> face_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < face_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < face_array_per_node_ref.sizeOfArrays(); ++k) {
+                face_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        FaceArrayPerNode<int> face_array_per_node{connectivity, 3};
+        face_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < face_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < face_array_per_node.sizeOfArrays(); ++k) {
+                  face_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * face_number[node_to_face_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(face_array_per_node, face_array_per_node_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_array_per_node);
+
+        REQUIRE(is_same_item_array(face_array_per_node, face_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(face_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(face_array_per_node, face_array_per_node_ref));
+          synchronizer.synchronize(face_array_per_node);
+          REQUIRE(is_same_item_array(face_array_per_node, face_array_per_node_ref));
+        }
+      }
+
+      SECTION("synchonize FaceArrayPerEdge")
+      {
+        const auto edge_is_owned = connectivity.edgeIsOwned();
+        const auto edge_number   = connectivity.edgeNumber();
+        const auto face_number   = connectivity.faceNumber();
+
+        const auto edge_to_face_matrix = connectivity.edgeToFaceMatrix();
+
+        FaceArrayPerEdge<int> face_array_per_edge_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            for (size_t j = 0; j < face_array_per_edge_ref.numberOfSubArrays(edge_id); ++j) {
+              for (size_t k = 0; k < face_array_per_edge_ref.sizeOfArrays(); ++k) {
+                face_array_per_edge_ref(edge_id, j)[k] =
+                  edge_number[edge_id] + 100 * face_number[edge_to_face_matrix[edge_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        FaceArrayPerEdge<int> face_array_per_edge{connectivity, 3};
+        face_array_per_edge.fill(-1);
+        parallel_for(
+          connectivity.numberOfEdges(), PUGS_LAMBDA(const EdgeId edge_id) {
+            if (edge_is_owned[edge_id]) {
+              for (size_t j = 0; j < face_array_per_edge.numberOfSubArrays(edge_id); ++j) {
+                for (size_t k = 0; k < face_array_per_edge.sizeOfArrays(); ++k) {
+                  face_array_per_edge(edge_id, j)[k] =
+                    edge_number[edge_id] + 100 * face_number[edge_to_face_matrix[edge_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
+
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(face_array_per_edge, face_array_per_edge_ref));
+        }
+
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(face_array_per_edge);
 
-        SECTION("FaceArrayPerNode")
-        {
-          FaceArrayPerNode<int> face_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (node)");
+        REQUIRE(is_same_item_array(face_array_per_edge, face_array_per_edge_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& edge_owner = connectivity.edgeOwner();
+          reset_ghost_arrays(face_array_per_edge, edge_owner, 0);
+          REQUIRE(not is_same_item_array(face_array_per_edge, face_array_per_edge_ref));
+          synchronizer.synchronize(face_array_per_edge);
+          REQUIRE(is_same_item_array(face_array_per_edge, face_array_per_edge_ref));
         }
+      }
+
+      SECTION("synchonize EdgeArrayPerNode")
+      {
+        const auto node_is_owned = connectivity.nodeIsOwned();
+        const auto node_number   = connectivity.nodeNumber();
+        const auto edge_number   = connectivity.edgeNumber();
+
+        const auto node_to_edge_matrix = connectivity.nodeToEdgeMatrix();
+
+        EdgeArrayPerNode<int> edge_array_per_node_ref{connectivity, 3};
+
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            for (size_t j = 0; j < edge_array_per_node_ref.numberOfSubArrays(node_id); ++j) {
+              for (size_t k = 0; k < edge_array_per_node_ref.sizeOfArrays(); ++k) {
+                edge_array_per_node_ref(node_id, j)[k] =
+                  node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]] + 2 * k;
+              }
+            }
+          });
+
+        EdgeArrayPerNode<int> edge_array_per_node{connectivity, 3};
+        edge_array_per_node.fill(-1);
+        parallel_for(
+          connectivity.numberOfNodes(), PUGS_LAMBDA(const NodeId node_id) {
+            if (node_is_owned[node_id]) {
+              for (size_t j = 0; j < edge_array_per_node.numberOfSubArrays(node_id); ++j) {
+                for (size_t k = 0; k < edge_array_per_node.sizeOfArrays(); ++k) {
+                  edge_array_per_node(node_id, j)[k] =
+                    node_number[node_id] + 100 * edge_number[node_to_edge_matrix[node_id][j]] + 2 * k;
+                }
+              }
+            }
+          });
 
-        SECTION("FaceArrayPerEdge")
-        {
-          FaceArrayPerEdge<int> face_array_per_edge{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(face_array_per_edge),
-                              "unexpected error: synchronization requires sub-item type (face) to be of lower "
-                              "dimension than item (edge)");
+        if (parallel::size() > 1) {
+          REQUIRE(not is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
         }
 
-        SECTION("EdgeArrayPerNode")
-        {
-          EdgeArrayPerNode<int> edge_array_per_node{connectivity, 3};
-          REQUIRE_THROWS_WITH(synchronizer.synchronize(edge_array_per_node),
-                              "unexpected error: synchronization requires sub-item type (edge) to be of lower "
-                              "dimension than item (node)");
+        Synchronizer& synchronizer = SynchronizerManager::instance().getConnectivitySynchronizer(&connectivity);
+        synchronizer.synchronize(edge_array_per_node);
+
+        REQUIRE(is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+
+        // Check that exchange sizes are correctly stored (require
+        // lines to be covered)
+        if (parallel::size() > 1) {
+          const auto& node_owner = connectivity.nodeOwner();
+          reset_ghost_arrays(edge_array_per_node, node_owner, 0);
+          REQUIRE(not is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
+          synchronizer.synchronize(edge_array_per_node);
+          REQUIRE(is_same_item_array(edge_array_per_node, edge_array_per_node_ref));
         }
       }
     }
diff --git a/tests/test_main.cpp b/tests/test_main.cpp
index 084d9b9453cb683586ce8f9a6d7b72678bcd3aaa..d8641d318f243eb624915882a92fc15d57a71346 100644
--- a/tests/test_main.cpp
+++ b/tests/test_main.cpp
@@ -8,6 +8,7 @@
 #include <mesh/DualMeshManager.hpp>
 #include <mesh/MeshDataManager.hpp>
 #include <mesh/SynchronizerManager.hpp>
+#include <utils/GlobalVariableManager.hpp>
 #include <utils/Messenger.hpp>
 #include <utils/PETScWrapper.hpp>
 #include <utils/RandomEngine.hpp>
@@ -32,6 +33,9 @@ main(int argc, char* argv[])
     Kokkos::initialize(args);
   }
 
+  // disable file locking to avoid mess in tests
+  setenv("HDF5_USE_FILE_LOCKING", "FALSE", 1);
+
   PETScWrapper::initialize(argc, argv);
   SLEPcWrapper::initialize(argc, argv);
 
@@ -54,6 +58,7 @@ main(int argc, char* argv[])
       MeshDataManager::create();
       DualConnectivityManager::create();
       DualMeshManager::create();
+      GlobalVariableManager::create();
 
       MeshDataBaseForTests::create();
 
@@ -65,6 +70,7 @@ main(int argc, char* argv[])
 
       MeshDataBaseForTests::destroy();
 
+      GlobalVariableManager::destroy();
       DualMeshManager::destroy();
       DualConnectivityManager::destroy();
       MeshDataManager::destroy();