Commit a97645df authored by frey_m's avatar frey_m
Browse files

AMR test case: Update initialization of particles.

modified:   ippl/test/AMR/Distribution.cpp
modified:   ippl/test/AMR/testGaussian.cpp
modified:   ippl/test/AMR/testUnifSphere.cpp
modified:   ippl/test/AMR/testUnifSphereGrid.cpp
parent bfdb1b99
......@@ -24,10 +24,18 @@ void Distribution::uniform(double lower, double upper, size_t nloc, int seed) {
nloc_m = nloc;
std::mt19937_64 mt(seed);
std::mt19937_64 mt(0/*seed*/ /*0*/);
std::uniform_real_distribution<> dist(lower, upper);
// // assume that seed == rank of node
// mt.discard(6 * (nloc + 1) * seed);
// assume that seed == rank of node
// inefficient but only way to make sure that parallel distribution is equal to sequential
for (size_t i = 0; i < 6 * nloc_m * seed; ++i)
dist(mt);
x_m.resize(nloc);
y_m.resize(nloc);
z_m.resize(nloc);
......@@ -55,10 +63,18 @@ void Distribution::gaussian(double mean, double stddev, size_t nloc, int seed) {
nloc_m = nloc;
std::mt19937_64 mt(seed);
std::mt19937_64 mt(0/*seed*/ /*0*/);
std::normal_distribution<double> dist(mean, stddev);
// // assume that seed == rank of node
// mt.discard(6 * (nloc + 1) * seed);
// assume that seed == rank of node
// inefficient but only way to make sure that parallel distribution is equal to sequential
for (size_t i = 0; i < 6 * nloc_m * seed; ++i)
dist(mt);
x_m.resize(nloc);
y_m.resize(nloc);
z_m.resize(nloc);
......
......@@ -240,7 +240,7 @@ void doBoxLib(const Vektor<size_t, 3>& nr, size_t nParticles,
writePlotFile(plotsolve, rhs, phi, grad_phi, rr, geom, 0);
dynamic_cast<AmrPartBunch*>(bunch)->python_format(0);
// dynamic_cast<AmrPartBunch*>(bunch)->python_format(0);
}
......
......@@ -35,9 +35,13 @@
#include <random>
void initSphere(double r, PartBunchBase* bunch, int nParticles) {
bunch->create(nParticles);
bunch->create(nParticles / Ippl::getNodes());
std::mt19937_64 eng;
if ( Ippl::myNode() )
eng.seed(42 + Ippl::myNode() );
std::uniform_real_distribution<> ph(-1.0, 1.0);
std::uniform_real_distribution<> th(0.0, 2.0 * Physics::pi);
std::uniform_real_distribution<> u(0.0, 1.0);
......@@ -96,7 +100,7 @@ void doSolve(AmrOpal& myAmrOpal, PartBunchBase* bunch,
dynamic_cast<AmrPartBunch*>(bunch)->AssignDensity(0, false, rhs, base_level, 1, finest_level);
writeScalarField(rhs, *(geom[0].CellSize()), -0.05, "amr-rho_scalar-level-");
// writeScalarField(rhs, *(geom[0].CellSize()), -0.05, "amr-rho_scalar-level-");
// Check charge conservation
double totCharge = totalCharge(rhs, finest_level, geom);
......@@ -246,10 +250,10 @@ void doBoxLib(const Vektor<size_t, 3>& nr,
#endif
}
writeScalarField(phi, *(geom[0].CellSize()), lower[0], "amr-phi_scalar-level-");
writeVectorField(grad_phi, *(geom[0].CellSize()), lower[0]);
// writeScalarField(phi, *(geom[0].CellSize()), lower[0], "amr-phi_scalar-level-");
// writeVectorField(grad_phi, *(geom[0].CellSize()), lower[0]);
writePlotFile(plotsolve, rhs, phi, grad_phi, rr, geom, 0);
// writePlotFile(plotsolve, rhs, phi, grad_phi, rr, geom, 0);
}
......
......@@ -122,7 +122,7 @@ void doSolve(AmrOpal& myAmrOpal, PartBunchBase* bunch,
initSphereOnGrid(rhs, geom, a, R, nr);
writeScalarField(rhs, *(geom[0].CellSize()), -a, "amr-rho_scalar-level-");
// writeScalarField(rhs, *(geom[0].CellSize()), -a, "amr-rho_scalar-level-");
// Check charge conservation
double totCharge = totalCharge(rhs, finest_level, geom, false);
......@@ -244,10 +244,10 @@ void doBoxLib(const Vektor<size_t, 3>& nr,
#endif
}
writeScalarField(phi, *(geom[0].CellSize()), lower[0], "amr-phi_scalar-level-");
writeVectorField(grad_phi, *(geom[0].CellSize()), lower[0]);
// writeScalarField(phi, *(geom[0].CellSize()), lower[0], "amr-phi_scalar-level-");
// writeVectorField(grad_phi, *(geom[0].CellSize()), lower[0]);
writePlotFile(plotsolve, rhs, phi, grad_phi, rr, geom, 0);
// writePlotFile(plotsolve, rhs, phi, grad_phi, rr, geom, 0);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment