Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
W
waLBerla
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Markus Holzer
waLBerla
Commits
bbb9ec41
Commit
bbb9ec41
authored
2 years ago
by
Markus Holzer
Committed by
Christoph Schwarzmeier
2 years ago
Browse files
Options
Downloads
Patches
Plain Diff
Always create process map when creating a blockforest
parent
f8d0e35d
Branches
Branches containing commit
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
src/blockforest/Initialization.cpp
+16
-11
16 additions, 11 deletions
src/blockforest/Initialization.cpp
src/core/mpi/MPIManager.h
+9
-11
9 additions, 11 deletions
src/core/mpi/MPIManager.h
with
25 additions
and
22 deletions
src/blockforest/Initialization.cpp
+
16
−
11
View file @
bbb9ec41
...
@@ -50,7 +50,7 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const shar
...
@@ -50,7 +50,7 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const shar
CellInterval
*
requestedDomainSize
,
CellInterval
*
requestedDomainSize
,
const
bool
keepGlobalBlockInformation
)
const
bool
keepGlobalBlockInformation
)
{
{
if
(
!!
config
)
if
(
config
!=
nullptr
)
{
{
auto
block
=
config
->
getGlobalBlock
();
auto
block
=
config
->
getGlobalBlock
();
if
(
block
)
{
if
(
block
)
{
...
@@ -200,17 +200,17 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const Conf
...
@@ -200,17 +200,17 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const Conf
//**********************************************************************************************************************
//**********************************************************************************************************************
shared_ptr
<
BlockForest
>
shared_ptr
<
BlockForest
>
createBlockForest
(
const
AABB
&
domainAABB
,
createBlockForest
(
const
AABB
&
domainAABB
,
const
uint_t
numberOfXBlocks
,
const
uint_t
numberOfYBlocks
,
const
uint_t
numberOfZBlocks
,
const
uint_t
numberOfXBlocks
,
const
uint_t
numberOfYBlocks
,
const
uint_t
numberOfZBlocks
,
const
uint_t
numberOfXProcesses
,
const
uint_t
numberOfYProcesses
,
const
uint_t
numberOfZProcesses
,
const
uint_t
numberOfXProcesses
,
const
uint_t
numberOfYProcesses
,
const
uint_t
numberOfZProcesses
,
const
bool
xPeriodic
/* = false */
,
const
bool
yPeriodic
/* = false */
,
const
bool
zPeriodic
/* = false */
,
const
bool
xPeriodic
/* = false */
,
const
bool
yPeriodic
/* = false */
,
const
bool
zPeriodic
/* = false */
,
const
bool
keepGlobalBlockInformation
/* = false */
)
{
const
bool
keepGlobalBlockInformation
/* = false */
)
{
const
uint_t
numberOfProcesses
=
numberOfXProcesses
*
numberOfYProcesses
*
numberOfZProcesses
;
const
uint_t
numberOfProcesses
=
numberOfXProcesses
*
numberOfYProcesses
*
numberOfZProcesses
;
if
(
numeric_cast
<
int
>
(
numberOfProcesses
)
!=
MPIManager
::
instance
()
->
numProcesses
()
)
if
(
numeric_cast
<
int
>
(
numberOfProcesses
)
!=
MPIManager
::
instance
()
->
numProcesses
()
)
WALBERLA_ABORT
(
"The number of requested processes ("
<<
numberOfProcesses
<<
") doesn't match the number "
WALBERLA_ABORT
(
"The number of requested processes ("
<<
numberOfProcesses
<<
") doesn't match the number "
"of active MPI processes ("
<<
MPIManager
::
instance
()
->
numProcesses
()
<<
")!"
);
"of active MPI processes ("
<<
MPIManager
::
instance
()
->
numProcesses
()
<<
")!"
);
// initialize SetupBlockForest = determine domain decomposition
// initialize SetupBlockForest = determine domain decomposition
...
@@ -227,10 +227,14 @@ createBlockForest( const AABB& domainAABB,
...
@@ -227,10 +227,14 @@ createBlockForest( const AABB& domainAABB,
WALBERLA_MPI_SECTION
()
WALBERLA_MPI_SECTION
()
{
{
auto
mpiManager
=
MPIManager
::
instance
();
auto
mpiManager
=
MPIManager
::
instance
();
//create cartesian communicator only if not yet a cartesian communicator (or other communicator was created)
if
(
!
mpiManager
->
hasWorldCommSetup
())
if
(
!
mpiManager
->
rankValid
()
)
{
{
mpiManager
->
createCartesianComm
(
numberOfXProcesses
,
numberOfYProcesses
,
numberOfZProcesses
,
xPeriodic
,
yPeriodic
,
zPeriodic
);
//create cartesian communicator only if not yet a cartesian communicator (or other communicator was created)
if
(
!
mpiManager
->
rankValid
()
)
{
mpiManager
->
createCartesianComm
(
numberOfXProcesses
,
numberOfYProcesses
,
numberOfZProcesses
,
xPeriodic
,
yPeriodic
,
zPeriodic
);
}
processIdMap
.
resize
(
numberOfProcesses
);
processIdMap
.
resize
(
numberOfProcesses
);
...
@@ -244,12 +248,13 @@ createBlockForest( const AABB& domainAABB,
...
@@ -244,12 +248,13 @@ createBlockForest( const AABB& domainAABB,
}
}
}
}
}
}
}
}
// calculate process distribution
// calculate process distribution
sforest
.
balanceLoad
(
blockforest
::
CartesianDistribution
(
numberOfXProcesses
,
numberOfYProcesses
,
numberOfZProcesses
,
&
processIdMap
),
sforest
.
balanceLoad
(
blockforest
::
CartesianDistribution
(
numberOfXProcesses
,
numberOfYProcesses
,
numberOfZProcesses
,
&
processIdMap
),
numberOfXProcesses
*
numberOfYProcesses
*
numberOfZProcesses
);
numberOfXProcesses
*
numberOfYProcesses
*
numberOfZProcesses
);
// create StructuredBlockForest (encapsulates a newly created BlockForest)
// create StructuredBlockForest (encapsulates a newly created BlockForest)
...
...
This diff is collapsed.
Click to expand it.
src/core/mpi/MPIManager.h
+
9
−
11
View file @
bbb9ec41
...
@@ -122,6 +122,7 @@ public:
...
@@ -122,6 +122,7 @@ public:
bool
hasCartesianSetup
()
const
{
return
cartesianSetup_
;
}
bool
hasCartesianSetup
()
const
{
return
cartesianSetup_
;
}
/// Rank is valid after calling createCartesianComm() or useWorldComm()
/// Rank is valid after calling createCartesianComm() or useWorldComm()
bool
rankValid
()
const
{
return
rank_
>=
0
;
}
bool
rankValid
()
const
{
return
rank_
>=
0
;
}
bool
hasWorldCommSetup
()
const
{
return
rankValid
()
&&
!
hasCartesianSetup
();}
/// Indicates whether MPI-IO can be used with the current MPI communicator; certain versions of OpenMPI produce
/// Indicates whether MPI-IO can be used with the current MPI communicator; certain versions of OpenMPI produce
/// segmentation faults when using MPI-IO with a 3D Cartesian MPI communicator (see waLBerla issue #73)
/// segmentation faults when using MPI-IO with a 3D Cartesian MPI communicator (see waLBerla issue #73)
...
@@ -135,13 +136,13 @@ public:
...
@@ -135,13 +136,13 @@ public:
private
:
private
:
/// Rank in MPI_COMM_WORLD
/// Rank in MPI_COMM_WORLD
int
worldRank_
;
int
worldRank_
{
0
}
;
/// Rank in the custom communicator
/// Rank in the custom communicator
int
rank_
;
int
rank_
{
-
1
}
;
/// Total number of processes
/// Total number of processes
int
numProcesses_
;
int
numProcesses_
{
1
}
;
/// Use this communicator for all MPI calls
/// Use this communicator for all MPI calls
/// this is in general not equal to MPI_COMM_WORLD
/// this is in general not equal to MPI_COMM_WORLD
...
@@ -150,20 +151,17 @@ private:
...
@@ -150,20 +151,17 @@ private:
MPI_Comm
comm_
;
MPI_Comm
comm_
;
/// Indicates whether initializeMPI has been called. If true, MPI_Finalize is called upon destruction
/// Indicates whether initializeMPI has been called. If true, MPI_Finalize is called upon destruction
bool
isMPIInitialized_
;
bool
isMPIInitialized_
{
false
}
;
/// Indicates whether a Cartesian communicator has been created
/// Indicates whether a Cartesian communicator has been created
bool
cartesianSetup_
;
bool
cartesianSetup_
{
false
}
;
bool
currentlyAborting_
;
bool
currentlyAborting_
{
false
}
;
bool
finalizeOnDestruction_
;
bool
finalizeOnDestruction_
{
false
}
;
// Singleton
// Singleton
MPIManager
()
:
worldRank_
(
0
),
rank_
(
-
1
),
numProcesses_
(
1
),
comm_
(
MPI_COMM_NULL
),
MPIManager
()
:
comm_
(
MPI_COMM_NULL
)
{
WALBERLA_NON_MPI_SECTION
()
{
rank_
=
0
;
}
}
isMPIInitialized_
(
false
),
cartesianSetup_
(
false
),
currentlyAborting_
(
false
),
finalizeOnDestruction_
(
false
)
{
WALBERLA_NON_MPI_SECTION
()
{
rank_
=
0
;
}
}
};
// class MPIManager
};
// class MPIManager
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment