Commit 1c3b8609 authored by snuverink_j's avatar snuverink_j
Browse files

fix typos in comments

parent 4cfc7390
......@@ -27,7 +27,7 @@
* defines the necessary templated classes and functions to make
* ParticleAttrib a capable expression-template participant.
*
* For some types such as Vektor, Tenzor, etc. which have multipple items,
* For some types such as Vektor, Tenzor, etc. which have multiple items,
* we want to involve just the Nth item from each data element in an
* expression. The () operator here returns an object of type
* ParticleAttribElem, which will use the () operator on each individual
......
......@@ -642,7 +642,7 @@ void ChargedParticles<pl>::writeRestartInfo(string Fn, unsigned turn) {
partof_m << spos_m << endl;
partof_m << lost_num << endl;
partof_m << lost2_num << endl;
grfli isch doch schn im engadin!
// grfli isch doch schn im engadin!
for (unsigned i=0; i < nloc; i++)
partof_m << R[i](0) << setw(pwi) << " \t"
<< P[i](0) << setw(pwi) << " \t"
......
......@@ -651,7 +651,7 @@ void ChargedParticles<pl>::writeRestartInfo(string Fn, unsigned turn) {
partof_m << spos_m << endl;
partof_m << lost_num << endl;
partof_m << lost2_num << endl;
grfli isch doch schn im engadin!
// grfli isch doch schn im engadin!
for (unsigned i=0; i < nloc; i++)
partof_m << R[i](0) << setw(pwi) << " \t"
<< P[i](0) << setw(pwi) << " \t"
......
......@@ -2,7 +2,7 @@
\section{Implementation}
The current implementation of the Touschek Lifetime into a program is called ttrack (Touschek tracker). It is written in C/C++ and is based on $IP^2L$, and is to designed to run fully parallel. You to specify linear lattices and beam geometries in the input file.
\subsection{Scaling}
As the following figure shows the program, doesn't scale very well, as the processor number gets higher. However, this might be due to the particle number being very low. We used $10^5$ particles in the simulation. This means that there are less then 400 particles per processor in average.
As the following figure shows the program, doesn't scale very well, as the processor number gets higher. However, this might be due to the particle number being very low. We used $10^5$ particles in the simulation. This means that there are less than 400 particles per processor in average.
\begin{figure}[here]
\centering
\includegraphics[width=0.80\textwidth]{scaling.pdf}
......
......@@ -18,7 +18,7 @@
* according to
*
* \f[
* result = \frac{1}{n} * \sqrt{\sum_{i=0}^n (measurement_i - value_i)^2}
* result = \frac{1}{n} * \sqrt{\sum_{i=start}^end (measurement_i - value_i)^2}
* \f]
*
*/
......
......@@ -408,7 +408,7 @@ void ParallelCyclotronTracker::visitCyclotron(const Cyclotron &cycl) {
*gmsg << "* 1.) It is up to the user to provide appropriate geometry, electric and magnetic fields!" << endl;
*gmsg << "* (Use BANDRF type cyclotron and use RFMAPFN to load both magnetic" << endl;
*gmsg << "* and electric fields, setting SUPERPOSE to an array of TRUE values.)" << endl;
*gmsg << "* 2.) For high currentst is strongly recommended to use the SAAMG fieldsolver," << endl;
*gmsg << "* 2.) For high currents it is strongly recommended to use the SAAMG fieldsolver," << endl;
*gmsg << "* FFT does not give the correct results (boundaty conditions are missing)." << endl;
*gmsg << "* 3.) The whole geometry will be meshed and used for the fieldsolve." << endl;
*gmsg << "* There will be no transformations of the bunch into a local frame und consequently," << endl;
......
......@@ -200,7 +200,7 @@ private:
// Global Function.
// ------------------------------------------------------------------------
/// Euclidian inverse.
/// Euclidean inverse.
inline Euclid3D Inverse(const Euclid3D &t) {
return t.inverse();
}
......
......@@ -2170,7 +2170,7 @@ void Distribution::generateBinomial(size_t numberOfParticles) {
/*!
*
* \brief Following W. Johos for his report <a href="http://gfa.web.psi.ch/publications/presentations/WernerJoho/TM-11-14.pdf"> TM-11-14 </a>
* \brief Following W. Johos for his report <a href="https://intranet.psi.ch/pub/AUTHOR_WWW/ABE/TalksDE/TM-11-14.pdf"> TM-11-14 </a>
*
* For the \f$x,p_x\f$ phase space we have:
* \f[
......@@ -2195,10 +2195,10 @@ void Distribution::generateBinomial(size_t numberOfParticles) {
* cos(asin(correlationMatrix_m(2 * index + 1, 2 * index)));
if (std::abs(emittance(index)) > std::numeric_limits<double>::epsilon()) {
beta(index) = pow(sigmaR_m[index], 2.0) / emittance(index);
beta(index) = pow(sigmaR_m[index], 2.0) / emittance(index);
gamma(index) = pow(sigmaP_m[index], 2.0) / emittance(index);
} else {
beta(index) = sqrt(std::numeric_limits<double>::max());
beta(index) = sqrt(std::numeric_limits<double>::max());
gamma(index) = sqrt(std::numeric_limits<double>::max());
}
alpha(index) = -correlationMatrix_m(2 * index + 1, 2 * index)
......@@ -2278,7 +2278,7 @@ void Distribution::generateBinomial(size_t numberOfParticles) {
AL = Physics::two_pi * gsl_rng_uniform(randGen_m);
U = A * cos(AL);
V = A * sin(AL);
x[2] = X[2] * (Ux * correlationMatrix_m(4, 0) + Vx * l32 + U * l33);
x[2] = X[2] * (Ux * correlationMatrix_m(4, 0) + Vx * l32 + U * l33);
p[2] = PX[2] * (Ux * correlationMatrix_m(5, 0) + Vx * l42 + U * l43 + V * l44);
// Save to each processor in turn.
......
......@@ -133,7 +133,7 @@ public:
private:
//TODO: we need to update this an maybe change attached
//TODO: we need to update this and maybe change attached
//solver!
/// holding the currently active geometry
BoundaryGeometry *currentGeometry;
......
......@@ -289,14 +289,6 @@ void Beam::update() {
}
};
// Emittances added above by JMJ 4/4/2000 so that UNNAMED_BEAM has
// proper defaults (problem found by Julien Pancin, see email today).
// This did not seem to work for him to deleted them again. Confused.
// Slightly worried about this preventing anyone giving
// normalised emittances EXN, EYN, ETN.
// MISSING: Other data for BEAM.
// Set default name.
if(getOpalName().empty()) setOpalName("UNNAMED_BEAM");
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment