Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Open sidebar
OPAL
src
Commits
d7850229
Commit
d7850229
authored
Apr 06, 2020
by
frey_m
Browse files
replace MPI_Allreduce with allreduce from Ippl
parent
d31834f0
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
29 additions
and
29 deletions
+29
-29
ippl/src/Particle/ParticleSpatialLayout.h
ippl/src/Particle/ParticleSpatialLayout.h
+2
-2
src/Algorithms/ParallelSliceTracker.cpp
src/Algorithms/ParallelSliceTracker.cpp
+1
-1
src/Algorithms/bet/EnvelopeBunch.cpp
src/Algorithms/bet/EnvelopeBunch.cpp
+24
-24
src/Amr/BoxLibLayout.hpp
src/Amr/BoxLibLayout.hpp
+2
-2
No files found.
ippl/src/Particle/ParticleSpatialLayout.h
View file @
d7850229
...
...
@@ -1141,7 +1141,7 @@ protected:
}
//reduce message count so every node knows how many messages to receive
MPI_A
llreduce
(
&
(
msgsend
[
0
]
),
&
(
msgrecv
[
0
]),
N
,
MPI_INT
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
msgsend
.
data
(
),
msgrecv
.
data
(),
N
,
std
::
plus
<
int
>
());
int
tag
=
Ippl
::
Comm
->
next_tag
(
P_SPATIAL_TRANSFER_TAG
,
P_LAYOUT_CYCLE
);
...
...
@@ -1273,7 +1273,7 @@ protected:
}
//reduce message count so every node knows how many messages to receive
MPI_A
llreduce
(
&
(
msgsend
[
0
]
),
&
(
msgrecv
[
0
]),
N
,
MPI_INT
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
msgsend
.
data
(
),
msgrecv
.
data
(),
N
,
std
::
plus
<
int
>
());
int
tag
=
Ippl
::
Comm
->
next_tag
(
P_SPATIAL_TRANSFER_TAG
,
P_LAYOUT_CYCLE
);
...
...
src/Algorithms/ParallelSliceTracker.cpp
View file @
d7850229
...
...
@@ -288,7 +288,7 @@ void ParallelSliceTracker::execute() {
//reduce(&globalEOL_m, &globalEOL_m, OpBitwiseOrAssign());
//reduce(&globalEOL_m, &globalEOL_m + 1, &globalEOL_m, OpBitwiseAndAssign());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
globalEOL_m
,
1
,
MPI_CXX_BOOL
,
MPI_LAND
,
Ippl
::
getComm
());
a
llreduce
(
&
globalEOL_m
,
1
,
std
::
logical_and
<
bool
>
());
computeSpaceChargeFields
();
timeIntegration
();
...
...
src/Algorithms/bet/EnvelopeBunch.cpp
View file @
d7850229
...
...
@@ -234,7 +234,7 @@ void EnvelopeBunch::runStats(EnvelopeBunchParameter sp, double *xAvg, double *xM
}
int
nVTot
=
nV
;
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
nVTot
,
1
,
MPI_INT
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
nVTot
,
1
,
std
::
plus
<
int
>
());
if
(
nVTot
<=
0
)
{
*
xAvg
=
0.0
;
*
xMax
=
0.0
;
...
...
@@ -259,10 +259,10 @@ void EnvelopeBunch::runStats(EnvelopeBunchParameter sp, double *xAvg, double *xM
}
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
M1
,
1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
M2
,
1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
maxv
,
1
,
MPI_DOUBLE
,
MPI_MAX
,
Ippl
::
getComm
());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
minv
,
1
,
MPI_DOUBLE
,
MPI_MIN
,
Ippl
::
getComm
());
a
llreduce
(
&
M1
,
1
,
std
::
plus
<
double
>
());
a
llreduce
(
&
M2
,
1
,
std
::
plus
<
double
>
());
a
llreduce
(
&
maxv
,
1
,
std
::
greater
<
double
>
());
a
llreduce
(
&
minv
,
1
,
std
::
less
<
double
>
());
*
xAvg
=
M1
/
nVTot
;
*
xMax
=
maxv
;
...
...
@@ -707,8 +707,8 @@ void EnvelopeBunch::synchronizeSlices() {
z_m
[
mySliceStartOffset_m
+
i
]
=
slices_m
[
i
]
->
p
[
SLI_z
];
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
(
z_m
[
0
]),
numSlices_m
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
(
b_m
[
0
]),
numSlices_m
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
(
z_m
[
0
]),
numSlices_m
,
std
::
plus
<
double
>
());
a
llreduce
(
&
(
b_m
[
0
]),
numSlices_m
,
std
::
plus
<
double
>
());
}
void
EnvelopeBunch
::
calcI
()
{
...
...
@@ -805,7 +805,7 @@ void EnvelopeBunch::calcI() {
}
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
(
I1
[
0
]),
n1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
(
I1
[
0
]),
n1
,
std
::
plus
<
double
>
());
for
(
int
i
=
1
;
i
<
n1
-
1
;
i
++
)
{
if
(
I1
[
i
]
==
0.0
)
I1
[
i
]
=
I1
[
i
-
1
];
...
...
@@ -873,8 +873,8 @@ void EnvelopeBunch::calcI() {
}
}
//
MPI_A
llreduce(
MPI_IN_PLACE,
&(z2_temp[0]), n1,
MPI_DOUBLE, MPI_SUM, Ippl::getComm
());
//
MPI_A
llreduce(
MPI_IN_PLACE,
&(I2_temp[0]), n1,
MPI_DOUBLE, MPI_SUM, Ippl::getComm
());
//
a
llreduce(&(z2_temp[0]), n1,
std::plus<double>
());
//
a
llreduce(&(I2_temp[0]), n1,
std::plus<double>
());
////FIXME: we dont need copy of z2 and I2: z2[i-k] = z2[i];
//int k = 0;
...
...
@@ -986,14 +986,14 @@ void EnvelopeBunch::cSpaceCharge() {
}
int
nVTot
=
nV
;
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
nVTot
,
1
,
MPI_INT
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
nVTot
,
1
,
std
::
plus
<
int
>
());
if
(
nVTot
<
2
)
{
msg
<<
"Exiting, to few nV slices"
<<
endl
;
return
;
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
xi
[
0
],
numSlices_m
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
sm
,
1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
xi
[
0
],
numSlices_m
,
std
::
plus
<
double
>
());
a
llreduce
(
&
sm
,
1
,
std
::
plus
<
double
>
());
A0
=
sm
/
nVTot
;
double
dzMin
=
5.0
*
Physics
::
c
*
Q_m
/
(
Imax
*
numSlices_m
);
...
...
@@ -1221,7 +1221,7 @@ void EnvelopeBunch::timeStep(double tStep, double _zCat) {
//double gz0 = 0.0;
//if(Ippl::Comm->myNode() == 0)
//gz0 = slices_m[0]->p[SLI_z];
//
MPI_A
llreduce(
MPI_IN_PLACE, &gz0, 1, MPI_DOUBLE, MPI_SUM, Ippl::getComm
());
//
a
llreduce(
&gz0, 1, std::plus<double>
());
////XXX: bin holds local slice number
//for(int j = 0; j < bins_m[nextBin].size(); j++) {
...
...
@@ -1413,7 +1413,7 @@ double EnvelopeBunch::AvBField() {
}
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
bf
,
1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
bf
,
1
,
std
::
plus
<
double
>
());
return
bf
/
numSlices_m
;
}
...
...
@@ -1425,7 +1425,7 @@ double EnvelopeBunch::AvEField() {
}
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
ef
,
1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
ef
,
1
,
std
::
plus
<
double
>
());
return
ef
/
numSlices_m
;
}
...
...
@@ -1438,8 +1438,8 @@ double EnvelopeBunch::Eavg() {
nValid
++
;
}
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
nValid
,
1
,
MPI_INT
,
MPI_SUM
,
Ippl
::
getComm
());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
sum
,
1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
nValid
,
1
,
std
::
plus
<
int
>
());
a
llreduce
(
&
sum
,
1
,
std
::
plus
<
double
>
());
sum
/=
nValid
;
return
(
nValid
>
0
?
((
Physics
::
EMASS
*
Physics
::
c
*
Physics
::
c
/
Physics
::
q_e
)
*
(
sum
-
1.0
))
:
0.0
);
}
...
...
@@ -1455,8 +1455,8 @@ double EnvelopeBunch::get_sPos() {
}
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
count
,
1
,
MPI_UNSIGNED_LONG
,
MPI_SUM
,
Ippl
::
getComm
());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
refpos
,
1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
count
,
1
,
std
::
plus
<
size_t
>
());
a
llreduce
(
&
refpos
,
1
,
std
::
plus
<
double
>
());
return
refpos
/
count
;
}
...
...
@@ -1470,14 +1470,14 @@ double EnvelopeBunch::zAvg() {
}
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
nV
,
1
,
MPI_INT
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
nV
,
1
,
std
::
plus
<
int
>
());
if
(
nV
<
1
)
{
isValid_m
=
false
;
return
-
1
;
//throw OpalException("EnvelopeBunch", "EnvelopeBunch::zAvg() no valid slices left");
}
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
sum
,
1
,
MPI_DOUBLE
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
sum
,
1
,
std
::
plus
<
double
>
());
return
(
sum
/
nV
);
}
...
...
@@ -1498,7 +1498,7 @@ double EnvelopeBunch::zTail() {
min
=
slices_m
[
i
]
->
p
[
SLI_z
];
//reduce(min, min, OpMinAssign());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
min
,
1
,
MPI_DOUBLE
,
MPI_MIN
,
Ippl
::
getComm
());
a
llreduce
(
&
min
,
1
,
std
::
less
<
double
>
());
return
min
;
}
...
...
@@ -1518,7 +1518,7 @@ double EnvelopeBunch::zHead() {
if
(
slices_m
[
i
]
->
p
[
SLI_z
]
>
max
)
max
=
slices_m
[
i
]
->
p
[
SLI_z
];
//reduce(max, max, OpMaxAssign());
MPI_A
llreduce
(
MPI_IN_PLACE
,
&
max
,
1
,
MPI_DOUBLE
,
MPI_MAX
,
Ippl
::
getComm
());
a
llreduce
(
&
max
,
1
,
std
::
greater
<
double
>
());
return
max
;
}
...
...
src/Amr/BoxLibLayout.hpp
View file @
d7850229
...
...
@@ -234,7 +234,7 @@ void BoxLibLayout<T, Dim>::update(AmrParticleBase< BoxLibLayout<T,Dim> >& PData,
}
//reduce message count so every node knows how many messages to receive
MPI_A
llreduce
(
msgsend
,
msgrecv
,
N
,
MPI_INT
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
msgsend
.
data
()
,
msgrecv
.
data
(),
N
,
std
::
plus
<
int
>
());
int
tag
=
Ippl
::
Comm
->
next_tag
(
P_SPATIAL_TRANSFER_TAG
,
P_LAYOUT_CYCLE
);
...
...
@@ -331,7 +331,7 @@ void BoxLibLayout<T, Dim>::update(AmrParticleBase< BoxLibLayout<T,Dim> >& PData,
//save how many total particles we have
size_t
TotalNum
=
0
;
MPI_A
llreduce
(
&
LocalNum
,
&
TotalNum
,
1
,
MPI_INT
,
MPI_SUM
,
Ippl
::
getComm
());
a
llreduce
(
&
LocalNum
,
&
TotalNum
,
1
,
std
::
plus
<
size_t
>
());
// update our particle number counts
PData
.
setTotalNum
(
TotalNum
);
// set the total atom count
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment