Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
pugs
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
code
pugs
Commits
e2e3bfdf
Commit
e2e3bfdf
authored
2 years ago
by
Stéphane Del Pino
Browse files
Options
Downloads
Patches
Plain Diff
Fix mpi parallelism for flux advection solver
parent
a92634c1
No related branches found
No related tags found
1 merge request
!167
Improve fluxing based remapping
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
src/mesh/ItemArrayUtils.hpp
+1
-1
1 addition, 1 deletion
src/mesh/ItemArrayUtils.hpp
src/mesh/ItemValueUtils.hpp
+2
-2
2 additions, 2 deletions
src/mesh/ItemValueUtils.hpp
src/scheme/FluxingAdvectionSolver.cpp
+15
-5
15 additions, 5 deletions
src/scheme/FluxingAdvectionSolver.cpp
with
18 additions
and
8 deletions
src/mesh/ItemArrayUtils.hpp
+
1
−
1
View file @
e2e3bfdf
...
...
@@ -308,7 +308,7 @@ sum(const ItemArray<DataType, item_type, ConnectivityPtr>& item_value)
template
<
typename
DataType
,
ItemType
item_type
,
typename
ConnectivityPtr
>
void
synchronize
(
ItemArray
<
DataType
,
item_type
,
ConnectivityPtr
>
&
item_array
)
synchronize
(
ItemArray
<
DataType
,
item_type
,
ConnectivityPtr
>
item_array
)
{
static_assert
(
not
std
::
is_const_v
<
DataType
>
,
"cannot synchronize ItemArray of const data"
);
if
(
parallel
::
size
()
>
1
)
{
...
...
This diff is collapsed.
Click to expand it.
src/mesh/ItemValueUtils.hpp
+
2
−
2
View file @
e2e3bfdf
...
...
@@ -296,7 +296,7 @@ sum(const ItemValue<DataType, item_type, ConnectivityPtr>& item_value)
template
<
typename
DataType
,
ItemType
item_type
,
typename
ConnectivityPtr
>
void
synchronize
(
ItemValue
<
DataType
,
item_type
,
ConnectivityPtr
>
&
item_value
)
synchronize
(
ItemValue
<
DataType
,
item_type
,
ConnectivityPtr
>
item_value
)
{
static_assert
(
not
std
::
is_const_v
<
DataType
>
,
"cannot synchronize ItemValue of const data"
);
if
(
parallel
::
size
()
>
1
)
{
...
...
@@ -309,7 +309,7 @@ synchronize(ItemValue<DataType, item_type, ConnectivityPtr>& item_value)
template
<
typename
DataType
,
ItemType
item_type
,
typename
ConnectivityPtr
>
bool
isSynchronized
(
const
ItemValue
<
DataType
,
item_type
,
ConnectivityPtr
>
&
item_value
)
isSynchronized
(
ItemValue
<
DataType
,
item_type
,
ConnectivityPtr
>
item_value
)
{
bool
is_synchronized
=
true
;
...
...
This diff is collapsed.
Click to expand it.
src/scheme/FluxingAdvectionSolver.cpp
+
15
−
5
View file @
e2e3bfdf
...
...
@@ -3,6 +3,8 @@
#include
<language/utils/EvaluateAtPoints.hpp>
#include
<mesh/Connectivity.hpp>
#include
<mesh/IMesh.hpp>
#include
<mesh/ItemArrayUtils.hpp>
#include
<mesh/ItemValueUtils.hpp>
#include
<mesh/Mesh.hpp>
#include
<mesh/MeshData.hpp>
#include
<mesh/MeshDataManager.hpp>
...
...
@@ -150,16 +152,19 @@ template <>
FaceValue
<
double
>
FluxingAdvectionSolver
<
1
>::
_computeAlgebraicFluxingVolume
()
{
Array
<
double
>
algebraic_
fluxing_volumes
{
m_new_mesh
->
numberOfNodes
()};
NodeValue
<
double
>
fluxing_volume
(
m_new_mesh
->
connectivity
(),
algebraic_
fluxing_volumes
);
Array
<
double
>
fluxing_volumes
{
m_new_mesh
->
numberOfNodes
()};
NodeValue
<
double
>
nodal_
fluxing_volume
(
m_new_mesh
->
connectivity
(),
fluxing_volumes
);
auto
old_xr
=
m_old_mesh
->
xr
();
auto
new_xr
=
m_new_mesh
->
xr
();
parallel_for
(
m_new_mesh
->
numberOfNodes
(),
PUGS_LAMBDA
(
NodeId
node_id
)
{
fluxing_volume
[
node_id
]
=
new_xr
[
node_id
][
0
]
-
old_xr
[
node_id
][
0
];
});
PUGS_LAMBDA
(
NodeId
node_id
)
{
nodal_
fluxing_volume
[
node_id
]
=
new_xr
[
node_id
][
0
]
-
old_xr
[
node_id
][
0
];
});
return
FaceValue
<
double
>
{
m_new_mesh
->
connectivity
(),
algebraic_fluxing_volumes
};
FaceValue
<
double
>
algebraic_fluxing_volumes
(
m_new_mesh
->
connectivity
(),
fluxing_volumes
);
synchronize
(
algebraic_fluxing_volumes
);
return
algebraic_fluxing_volumes
;
}
template
<
>
...
...
@@ -185,6 +190,7 @@ FluxingAdvectionSolver<2>::_computeAlgebraicFluxingVolume()
algebraic_fluxing_volume
[
face_id
]
=
0.5
*
det
(
M
);
});
synchronize
(
algebraic_fluxing_volume
);
return
algebraic_fluxing_volume
;
}
...
...
@@ -290,11 +296,12 @@ FluxingAdvectionSolver<Dimension>::_computeCycleNumber(FaceValue<double> fluxing
MeshData
<
Dimension
>&
mesh_data
=
MeshDataManager
::
instance
().
getMeshData
(
*
m_old_mesh
);
const
CellValue
<
const
double
>
Vj
=
mesh_data
.
Vj
();
const
CellValue
<
size_t
>
ratio
(
m_old_mesh
->
connectivity
());
CellValue
<
size_t
>
ratio
(
m_old_mesh
->
connectivity
());
parallel_for
(
m_old_mesh
->
numberOfCells
(),
PUGS_LAMBDA
(
CellId
cell_id
)
{
ratio
[
cell_id
]
=
std
::
ceil
(
total_negative_flux
[
cell_id
]
/
Vj
[
cell_id
]);
});
synchronize
(
ratio
);
size_t
number_of_cycles
=
max
(
ratio
);
...
...
@@ -379,6 +386,7 @@ FluxingAdvectionSolver<Dimension>::_remapOne(const CellValue<const double>& step
}
});
synchronize
(
new_q
);
old_q
=
new_q
;
}
...
...
@@ -411,6 +419,8 @@ FluxingAdvectionSolver<Dimension>::_remapAllQuantities()
}
});
synchronize
(
step_Vj
);
CellValue
<
double
>
inv_Vj
(
m_old_mesh
->
connectivity
());
parallel_for
(
m_new_mesh
->
numberOfCells
(),
PUGS_LAMBDA
(
CellId
cell_id
)
{
inv_Vj
[
cell_id
]
=
1
/
step_Vj
[
cell_id
];
});
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment