Code indexing in gitaly is broken and leads to code not being visible to the user. We work on the issue with highest priority.

Skip to content
Snippets Groups Projects
Commit 02bc6fdb authored by frey_m's avatar frey_m
Browse files

AMR test case: Add test with uniform charge distribution

modified:   ippl/test/AMR/CMakeLists.txt
new file:   ippl/test/AMR/testUniform.cpp
parent afee9a98
No related branches found
No related tags found
Loading
......@@ -70,6 +70,8 @@ IF (ENABLE_AMR_SOLVER)
# add_executable (testError error.cpp AmrPartBunch.cpp H5Reader.cpp Distribution.cpp PoissonProblems.cpp Solver.cpp AmrOpal.cpp ${F90_source_files} ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/Classic/Physics/Physics.cpp)
add_executable (testGaussian testGaussian.cpp AmrOpal.cpp AmrPartBunch.cpp H5Reader.cpp Distribution.cpp ${F90_source_files} ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/Classic/Physics/Physics.cpp Solver.cpp)
add_executable (testUniform testUniform.cpp AmrOpal.cpp AmrPartBunch.cpp H5Reader.cpp Distribution.cpp ${F90_source_files} ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/Classic/Physics/Physics.cpp Solver.cpp)
add_executable (testUnifSphere testUnifSphere.cpp AmrOpal.cpp AmrPartBunch.cpp ${F90_source_files} ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/Classic/Physics/Physics.cpp Solver.cpp)# HypreABecLap.H HypreABecLap.cpp)
add_executable (testGridSolve testGridSolve.cpp ${F90_source_files} ${CMAKE_CURRENT_SOURCE_DIR}/../../../src/Classic/Physics/Physics.cpp Solver.cpp)
......@@ -93,6 +95,9 @@ IF (ENABLE_AMR_SOLVER)
target_link_libraries (testH5Read ${LIBS} -lgfortran ${CCSE_LIBRARIES} -lcboxlib ${OTHER_CMAKE_EXE_LINKER_FLAGS} ${CMAKE_DL_LIBS})
target_link_libraries (testGaussian ${LIBS} -lgfortran ${CCSE_LIBRARIES} -lcboxlib ${OTHER_CMAKE_EXE_LINKER_FLAGS} ${CMAKE_DL_LIBS})
target_link_libraries (testUniform ${LIBS} -lgfortran ${CCSE_LIBRARIES} -lcboxlib ${OTHER_CMAKE_EXE_LINKER_FLAGS} ${CMAKE_DL_LIBS})
target_link_libraries (testUnifSphere ${LIBS} -lgfortran ${CCSE_LIBRARIES} -lcboxlib ${OTHER_CMAKE_EXE_LINKER_FLAGS} ${CMAKE_DL_LIBS})
target_link_libraries (testGridSolve ${LIBS} -lgfortran ${CCSE_LIBRARIES} -lcboxlib ${OTHER_CMAKE_EXE_LINKER_FLAGS} ${CMAKE_DL_LIBS})
target_link_libraries (testUnifSphereGrid ${LIBS} -lgfortran ${CCSE_LIBRARIES} -lcboxlib ${OTHER_CMAKE_EXE_LINKER_FLAGS} ${CMAKE_DL_LIBS})
......
/*!
* @file testUniform.cpp
* @author Matthias Frey
* @date November 2016
* @details Compute \f$\Delta\phi = -\rho\f$ where the charge
* density is a uniform distribution. Write plot files
* that can be visualized by yt (python visualize.py)
* or by AmrVis of the CCSE group at LBNL.
*
* Domain: [-0.5, 0.5] x [-0.5, 0.5] x [-0.5, 0.5]\n
* BC: Dirichlet boundary conditions\n
* Charge/particle: elementary charge\n
* Uniform particle distribution within [-0.45, 0.45[^3
*
* Call:\n
* mpirun -np [#cores] testUniform [#gridpoints x] [#gridpoints y] [#gridpoints z]
* [#particles] [#levels] [max. box size]
*
* @brief Computes \f$\Delta\phi = -\rho\f$
*/
#include "Ippl.h"
#include <string>
#include <fstream>
#include <vector>
#include <iostream>
#include <set>
#include <sstream>
#include <ParmParse.H>
#include "PartBunch.h"
#include "AmrPartBunch.h"
#include "Distribution.h"
#include "Solver.h"
#include "AmrOpal.h"
#include "helper_functions.h"
#include "writePlotFile.H"
#include <cmath>
#include "Physics/Physics.h"
void doSolve(AmrOpal& myAmrOpal, PartBunchBase* bunch,
container_t& rhs,
container_t& phi,
container_t& grad_phi,
const Array<Geometry>& geom,
const Array<int>& rr,
int nLevels,
Inform& msg)
{
static IpplTimings::TimerRef allocTimer = IpplTimings::getTimer("alloc-memory-grid");
static IpplTimings::TimerRef assignTimer = IpplTimings::getTimer("assign-charge");
// =======================================================================
// 4. prepare for multi-level solve
// =======================================================================
rhs.resize(nLevels);
phi.resize(nLevels);
grad_phi.resize(nLevels);
IpplTimings::startTimer(allocTimer);
for (int lev = 0; lev < nLevels; ++lev) {
initGridData(rhs, phi, grad_phi, myAmrOpal.boxArray()[lev], lev);
}
IpplTimings::stopTimer(allocTimer);
// Define the density on level 0 from all particles at all levels
int base_level = 0;
int finest_level = myAmrOpal.finestLevel();
IpplTimings::startTimer(assignTimer);
dynamic_cast<AmrPartBunch*>(bunch)->AssignDensity(0, false, rhs, base_level, 1, finest_level);
IpplTimings::stopTimer(assignTimer);
double totCharge = totalCharge(rhs, finest_level, geom);
msg << "Total Charge: " << totCharge << " C" << endl
<< "Vacuum permittivity: " << Physics::epsilon_0 << " F/m (= C/(m V)" << endl;
Real vol = (*(geom[0].CellSize()) * *(geom[0].CellSize()) * *(geom[0].CellSize()) );
msg << "Cell volume: " << vol << " m^3" << endl;
// eps in C / (V * m)
double constant = -1.0 / Physics::epsilon_0; // in [V m / C]
for (int i = 0; i <=finest_level; ++i) {
#ifdef UNIQUE_PTR
rhs[i]->mult(constant, 0, 1); // in [V m]
#else
rhs[i].mult(constant, 0, 1);
#endif
}
// **************************************************************************
// Compute the total charge of all particles in order to compute the offset
// to make the Poisson equations solvable
// **************************************************************************
Real offset = 0.;
// solve
Solver sol;
sol.solve_for_accel(rhs,
phi,
grad_phi,
geom,
base_level,
finest_level,
offset);
// for plotting unnormalize
for (int i = 0; i <=finest_level; ++i) {
#ifdef UNIQUE_PTR
rhs[i]->mult(1.0 / constant, 0, 1); // in [V m]
#else
rhs[i].mult(1.0 / constant, 0, 1);
#endif
}
}
void doBoxLib(const Vektor<size_t, 3>& nr, size_t nParticles,
int nLevels, size_t maxBoxSize, Inform& msg)
{
static IpplTimings::TimerRef distTimer = IpplTimings::getTimer("init-distribution");
static IpplTimings::TimerRef regridTimer = IpplTimings::getTimer("regrid");
static IpplTimings::TimerRef redistTimer = IpplTimings::getTimer("particle-redistr");
// ========================================================================
// 1. initialize physical domain (just single-level)
// ========================================================================
std::array<double, BL_SPACEDIM> lower = {{-0.5, -0.5, -0.5}}; // m
std::array<double, BL_SPACEDIM> upper = {{ 0.5, 0.5, 0.5}}; // m
RealBox domain;
Array<BoxArray> ba;
Array<Geometry> geom;
Array<DistributionMapping> dmap;
Array<int> rr;
// in helper_functions.h
init(domain, ba, dmap, geom, rr, nr, nLevels, maxBoxSize, lower, upper);
// ========================================================================
// 2. initialize all particles (just single-level)
// ========================================================================
PartBunchBase* bunch = new AmrPartBunch(geom[0], dmap[0], ba[0]);
// initialize a particle distribution
unsigned long int nloc = nParticles / ParallelDescriptor::NProcs();
Distribution dist;
IpplTimings::startTimer(distTimer);
dist.uniform(-0.45, 0.45, nloc, ParallelDescriptor::MyProc());
// copy particles to the PartBunchBase object.
dist.injectBeam(*bunch);
IpplTimings::stopTimer(distTimer);
for (std::size_t i = 0; i < bunch->getLocalNum(); ++i)
bunch->setQM(Physics::q_e, i); // in [C]
// redistribute on single-level
IpplTimings::startTimer(redistTimer);
bunch->myUpdate();
IpplTimings::stopTimer(redistTimer);
bunch->gatherStatistics();
msg << "#Particles: " << nParticles << endl
<< "Charge per particle: " << bunch->getQM(0) << " C" << endl
<< "Total charge: " << nParticles * bunch->getQM(0) << " C" << endl;
// ========================================================================
// 2. tagging (i.e. create BoxArray's, DistributionMapping's of all
// other levels)
// ========================================================================
/*
* create an Amr object
*/
ParmParse pp("amr");
pp.add("max_grid_size", int(maxBoxSize));
Array<int> error_buf(nLevels, 0);
pp.addarr("n_error_buf", error_buf);
pp.add("grid_eff", 0.95);
Array<int> nCells(3);
for (int i = 0; i < 3; ++i)
nCells[i] = nr[i];
AmrOpal myAmrOpal(&domain, nLevels - 1, nCells, 0 /* cartesian */, bunch);
/*
* do tagging
*/
dynamic_cast<AmrPartBunch*>(bunch)->Define (myAmrOpal.Geom(),
myAmrOpal.DistributionMap(),
myAmrOpal.boxArray(),
rr);
// ========================================================================
// 3. multi-level redistribute
// ========================================================================
IpplTimings::startTimer(regridTimer);
for (int i = 0; i <= myAmrOpal.finestLevel() && i < myAmrOpal.maxLevel(); ++i)
myAmrOpal.regrid(i /*lbase*/, 0.0 /*time*/);
IpplTimings::stopTimer(regridTimer);
container_t rhs;
container_t phi;
container_t grad_phi;
std::string plotsolve = BoxLib::Concatenate("plt", 0, 4);
doSolve(myAmrOpal, bunch, rhs, phi, grad_phi, geom, rr, nLevels, msg);
msg << "Total field energy: " << totalFieldEnergy(grad_phi, rr) << endl;
for (int i = 0; i <= myAmrOpal.finestLevel(); ++i) {
#ifdef UNIQUE_PTR
msg << "Max. potential level " << i << ": "<< phi[i]->max(0) << endl
<< "Min. potential level " << i << ": " << phi[i]->min(0) << endl
<< "Max. ex-field level " << i << ": " << grad_phi[i]->max(0) << endl
<< "Min. ex-field level " << i << ": " << grad_phi[i]->min(0) << endl;
#else
msg << "Max. potential level " << i << ": "<< phi[i].max(0) << endl
<< "Min. potential level " << i << ": " << phi[i].min(0) << endl
<< "Max. ex-field level " << i << ": " << grad_phi[i].max(0) << endl
<< "Min. ex-field level " << i << ": " << grad_phi[i].min(0) << endl;
#endif
}
writePlotFile(plotsolve, rhs, phi, grad_phi, rr, geom, 0);
// dynamic_cast<AmrPartBunch*>(bunch)->python_format(0);
}
int main(int argc, char *argv[]) {
Ippl ippl(argc, argv);
Inform msg("Solver");
static IpplTimings::TimerRef mainTimer = IpplTimings::getTimer("main");
IpplTimings::startTimer(mainTimer);
std::stringstream call;
call << "Call: mpirun -np [#procs] " << argv[0]
<< " [#gridpoints x] [#gridpoints y] [#gridpoints z] [#particles] "
<< "[#levels] [max. box size]";
if ( argc < 7 ) {
msg << call.str() << endl;
return -1;
}
// number of grid points in each direction
Vektor<size_t, 3> nr(std::atoi(argv[1]),
std::atoi(argv[2]),
std::atoi(argv[3]));
size_t nParticles = std::atoi(argv[4]);
msg << "Particle test running with" << endl
<< "- #particles = " << nParticles << endl
<< "- grid = " << nr << endl;
BoxLib::Initialize(argc,argv, false);
size_t nLevels = std::atoi(argv[5]) + 1; // i.e. nLevels = 0 --> only single level
size_t maxBoxSize = std::atoi(argv[6]);
doBoxLib(nr, nParticles, nLevels, maxBoxSize, msg);
IpplTimings::stopTimer(mainTimer);
IpplTimings::print();
std::string timefile = std::string(argv[0])
+ "-timing-cores-"
+ std::to_string(Ippl::getNodes())
+ "-threads-1.dat";
IpplTimings::print(timefile);
return 0;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment