Commit 3ed381f8 authored by Christof Metzger-Kraus's avatar Christof Metzger-Kraus
Browse files

Better error messages on failed PAssert, implements #141

parent a511bc66
......@@ -157,13 +157,13 @@ public:
: elem_m(model.elem_m), where_m(model.where_m) { }
const AssignProxy &operator=(const AssignProxy &a)
{
PAssert(where_m != 0 || a.elem_m == -a.elem_m);
PAssert_EQ(where_m != 0 || a.elem_m == -a.elem_m, true);
elem_m = where_m < 0 ? -a.elem_m : a.elem_m;
return *this;
}
const AssignProxy &operator=(const Element_t &e)
{
PAssert(where_m != 0 || e == -e);
PAssert_EQ(where_m != 0 || e == -e, true);
elem_m = where_m < 0 ? -e : e;
return *this;
}
......@@ -358,13 +358,13 @@ public:
: elem_m(model.elem_m), where_m(model.where_m) {}
const AssignProxy& operator=(const AssignProxy& a)
{
PAssert(where_m != 0 || a.elem_m == -a.elem_m);
PAssert_EQ(where_m != 0 || a.elem_m == -a.elem_m, true);
elem_m = where_m < 0 ? -a.elem_m : a.elem_m;
return *this;
}
const AssignProxy& operator=(const Element_t& e)
{
PAssert(where_m != 0 || e == -e);
PAssert_EQ(where_m != 0 || e == -e, true);
elem_m = where_m < 0 ? -e : e;
return *this;
}
......@@ -383,7 +383,7 @@ public:
// Operators
Element_t operator()(unsigned int i, unsigned int j) const {
PAssert(i==j);
PAssert_EQ(i, j);
return T(0.0);
}
......@@ -394,7 +394,7 @@ public:
}
AssignProxy operator()(unsigned int i, unsigned int j) {
PAssert(i==j);
PAssert_EQ(i, j);
return AssignProxy(AntiSymTenzor<T,1>::Zero, 0);
}
......
......@@ -213,25 +213,23 @@ public:
}
Element_t HL(unsigned int hi, unsigned int lo) const {
PAssert( hi >= lo );
PAssert( hi<D );
PAssert( lo<D );
PAssert_GE( hi, lo );
PAssert_LT( hi, D );
return X[hi*(hi+1)/2 + lo];
}
Element_t& HL(unsigned int hi, unsigned int lo) {
PAssert( hi >= lo );
PAssert( hi<D );
PAssert( lo<D );
PAssert_GE( hi, lo );
PAssert_LT( hi, D );
return X[hi*(hi+1)/2 + lo];
}
Element_t& operator[](unsigned int i) {
PAssert (i < Size);
PAssert_LT(i, Size);
return X[i];
}
Element_t operator[](unsigned int i) const {
PAssert (i < Size);
PAssert_LT(i, Size);
return X[i];
}
......
......@@ -175,13 +175,13 @@ public:
Element_t &operator[]( unsigned int i )
{
PAssert(i<Size);
PAssert_LT(i, Size);
return X[i];
}
Element_t operator[]( unsigned int i ) const
{
PAssert(i<Size);
PAssert_LT(i, Size);
return X[i];
}
......
......@@ -169,7 +169,7 @@ public:
// Check domain of incoming Field
const Layout_t& in_layout = f.getLayout();
const Domain_t& in_dom = in_layout.getDomain();
PAssert(this->checkDomain(this->getDomain(),in_dom));
PAssert_EQ(this->checkDomain(this->getDomain(),in_dom), true);
// Common loop iterate and other vars:
size_t d;
......
This diff is collapsed.
......@@ -216,7 +216,7 @@ template <unsigned Dim, class T>
inline void
FFTBase<Dim,T>::setDirectionName(int direction,
const char* directionName) {
PAssert(direction==+1 || direction==-1);
PAssert_EQ(std::abs(direction), 1);
directions_m[directionName] = direction;
return;
}
......@@ -244,7 +244,7 @@ FFTBase<Dim,T>::getDirection(const char* directionName) const {
template <unsigned Dim, class T>
inline bool
FFTBase<Dim,T>::transformDim(unsigned d) const {
PAssert(d<Dim);
PAssert_LT(d, Dim);
return transformDims_m[d];
}
......@@ -258,7 +258,7 @@ FFTBase<Dim,T>::transformDim(unsigned d) const {
template <unsigned Dim, class T>
inline unsigned
FFTBase<Dim,T>::activeDimension(unsigned d) const {
PAssert(d<nTransformDims_m);
PAssert_LT(d, nTransformDims_m);
return activeDims_m[d];
}
......
......@@ -288,8 +288,8 @@ SCSL<T>::callFFT(unsigned transformDim, int direction,
SCSL<T>::Complex_t* data) {
// check transform dimension and direction arguments
PAssert(transformDim<numTransformDims_m);
PAssert(direction==+1 || direction==-1);
PAssert_LT(transformDim, numTransformDims_m);
PAssert_EQ(std::abs(direction), 1);
// cast complex number pointer to T* for calling Fortran routines
T* rdata = reinterpret_cast<T*>(data);
......@@ -328,7 +328,7 @@ inline void
SCSL<T>::callFFT(unsigned transformDim, int direction, T* data) {
// check transform dimension argument
PAssert(transformDim<numTransformDims_m);
PAssert_LT(transformDim, numTransformDims_m);
// branch on transform type for this dimension
switch (transformType_m[transformDim]) {
case 0: // CC FFT
......
......@@ -202,8 +202,8 @@ FFTPACK<T>::callFFT(unsigned transformDim, int direction,
FFTPACK<T>::Complex_t* data) {
// check transform dimension and direction arguments
PAssert(transformDim<numTransformDims_m);
PAssert(direction==+1 || direction==-1);
PAssert_LT(transformDim, numTransformDims_m);
PAssert_EQ(std::abs(direction), 1);
// cast complex number pointer to T* for calling Fortran routines
T* rdata = reinterpret_cast<T*>(data);
......@@ -262,8 +262,8 @@ inline void
FFTPACK<T>::callFFT(unsigned transformDim, int direction, T* data) {
// check transform dimension and direction arguments
PAssert(transformDim<numTransformDims_m);
PAssert(direction==+1 || direction==-1);
PAssert_LT(transformDim, numTransformDims_m);
PAssert_EQ(std::abs(direction), 1);
// branch on transform type for this dimension
switch (transformType_m[transformDim]) {
......
......@@ -90,7 +90,7 @@ BCondBase<T,D,M,C>::BCondBase(unsigned int face, int i, int j)
int hi = i > j ? i : j;
m_component = ((hi+1)*hi/2) + lo;
} else if (getTensorOrder(get_tag(T())) == IPPL_ANTISYMTENSOR) {
PAssert(i > j);
PAssert_GT(i, j);
m_component = ((i-1)*i/2) + j;
} else {
ERRORMSG(
......@@ -4929,7 +4929,7 @@ fillSlabWithZero(Field<T,D,M,C>& field,
#endif // __MWERKS__
// Sanity check.
PAssert(component>=0);
PAssert_GE(component, 0);
// Build the expression and evaluate it. tjw:mwerks dies here:
Expr_t(data,Rhs_t(0),component).apply();
......@@ -5071,9 +5071,9 @@ calcEurekaDomain(const NDIndex<D>& realDomain,
}
// Sanity checks.
PAssert( low<=high );
PAssert( high<=slab[dim].max() );
PAssert( low >=slab[dim].min() );
PAssert_LE( low, high );
PAssert_LE( high, slab[dim].max() );
PAssert_GE( low, slab[dim].min() );
// Build the domain.
slab[dim] = Index(low,high);
......@@ -5154,9 +5154,9 @@ calcEurekaSlabToFill(const Field<T,D,M,CartesianCentering<CE,D,NC> >& field,
low++;
// Sanity checks.
PAssert( low<=high );
PAssert( high<=slab[d].max() );
PAssert( low >=slab[d].min() );
PAssert_LE( low, high );
PAssert_LE( high, slab[d].max() );
PAssert_GE( low, slab[d].min() );
// Record this part of the slab.
slab[d] = Index(low,high);
......@@ -5173,9 +5173,9 @@ calcEurekaSlabToFill(const Field<T,D,M,CartesianCentering<CE,D,NC> >& field,
high--;
// Sanity checks.
PAssert( low<=high );
PAssert( high<=slab[d].max() );
PAssert( low >=slab[d].min() );
PAssert_LE( low, high );
PAssert_LE( high, slab[d].max() );
PAssert_GE( low, slab[d].min() );
// Record this part of the slab.
slab[d] = Index(low,high);
......@@ -5663,4 +5663,4 @@ void PatchBC<T,D,M,C>::apply( Field<T,D,M,C>& A )
* $RCSfile: BCond.cpp,v $ $Author: adelmann $
* $Revision: 1.1.1.1 $ $Date: 2003/01/23 07:40:26 $
* IPPL_VERSION_ID: $Id: BCond.cpp,v 1.1.1.1 2003/01/23 07:40:26 adelmann Exp $
***************************************************************************/
\ No newline at end of file
***************************************************************************/
......@@ -218,7 +218,7 @@ public:
bool IsCompressed() const
{
bool is_compressed = CompressedBrickIterator<T,Dim>::IsCompressed();
PAssert((*CurrentLField).second->IsCompressed() == is_compressed);
PAssert_EQ((*CurrentLField).second->IsCompressed(), is_compressed);
return is_compressed;
}
......
......@@ -308,7 +308,7 @@ permute(const CompressedBrickIterator<T,D1>& iter,
}
// Didn't find it.
// Make sure the length is 1.
PAssert( current[d1].length() == 1 );
PAssert_EQ( current[d1].length(), 1 );
FoundIt:
;
}
......
......@@ -2,8 +2,8 @@
/***************************************************************************
*
* The IPPL Framework
*
* This program was prepared by PSI.
*
* This program was prepared by PSI.
* All rights in the program are reserved by PSI.
* Neither PSI nor the author(s)
* makes any warranty, express or implied, or assumes any liability or
......@@ -17,7 +17,7 @@
/***************************************************************************
*
* The IPPL Framework
*
*
*
* Visit http://people.web.psi.ch/adelmann/ for more details
*
......@@ -91,8 +91,8 @@ MAKE_INITIALIZER(long long)
//////////////////////////////////////////////////////////////////////
template<class T, unsigned Dim>
LField<T,Dim>::LField(const NDIndex<Dim>& owned,
const NDIndex<Dim>& allocated,
LField<T,Dim>::LField(const NDIndex<Dim>& owned,
const NDIndex<Dim>& allocated,
int vnode)
: vnode_m(vnode),
P(0),
......@@ -106,7 +106,7 @@ LField<T,Dim>::LField(const NDIndex<Dim>& owned,
ownedCompressIndex(-1),
offsetBlocks(Unique::get() % IPPL_OFFSET_BLOCKS)
{
// Give the LField some initial (compressed) value
LFieldInitializer<T>::apply(*Begin);
......@@ -120,8 +120,8 @@ LField<T,Dim>::LField(const NDIndex<Dim>& owned,
//UL: for pinned mempory allocation
template<class T, unsigned Dim>
LField<T,Dim>::LField(const NDIndex<Dim>& owned,
const NDIndex<Dim>& allocated,
LField<T,Dim>::LField(const NDIndex<Dim>& owned,
const NDIndex<Dim>& allocated,
int vnode, bool p)
: vnode_m(vnode),
P(0),
......@@ -135,7 +135,7 @@ LField<T,Dim>::LField(const NDIndex<Dim>& owned,
ownedCompressIndex(-1),
offsetBlocks(Unique::get() % IPPL_OFFSET_BLOCKS)
{
// Give the LField some initial (compressed) value
LFieldInitializer<T>::apply(*Begin);
......@@ -167,8 +167,8 @@ LField<T,Dim>::LField(const LField<T,Dim>& lf)
ownedCompressIndex(lf.ownedCompressIndex),
offsetBlocks(Unique::get() % IPPL_OFFSET_BLOCKS)
{
if ( lf.IsCompressed() )
{
......@@ -181,7 +181,7 @@ LField<T,Dim>::LField(const LField<T,Dim>& lf)
else
{
// Make sure we have something in this LField
PAssert(lf.Allocated.size()!=0);
PAssert_NE(lf.Allocated.size(), 0);
// If it is not compressed, allocate storage
int n = lf.Allocated.size();
......@@ -222,8 +222,8 @@ template<class T, unsigned Dim>
bool
LField<T,Dim>::TryCompress(bool baseOnPhysicalCells)
{
if (IsCompressed() || IpplInfo::noFieldCompression)
return false;
......@@ -264,8 +264,8 @@ template<class T, unsigned Dim>
bool
LField<T,Dim>::CanCompress(T val) const
{
// Debugging macro
LFIELDMSG(Inform dbgmsg("CanCompress"));
......@@ -287,11 +287,11 @@ LField<T,Dim>::CanCompress(T val) const
T *ptr1 = P;
T *mid1 = P + allocCompressIndex;
T *end1 = P + sz;
PAssert(sz > 0);
PAssert_GT(sz, 0);
PAssert(P != 0);
PAssert(allocCompressIndex >= 0);
PAssert(allocCompressIndex < sz);
PAssert_GE(allocCompressIndex, 0);
PAssert_LT(allocCompressIndex, sz);
// Quick short-cut check: compare to the last value in the
// array that did not match before.
......@@ -311,7 +311,7 @@ LField<T,Dim>::CanCompress(T val) const
LFIELDMSG(dbgmsg << " of " << allocCompressIndex << endl);
// It failed the test, so we can just keep the same index to
// check next time, and return.
// check next time, and return.
return false;
}
}
......@@ -378,7 +378,7 @@ LField<T,Dim>::CanCompress(T val) const
// If we are at this point, we did not find anything that did not
// match, so we can compress (woo hoo).
LFIELDMSG(dbgmsg << "Found that we CAN compress, after " << sz);
LFIELDMSG(dbgmsg << " compares." << endl);
ADDIPPLSTAT(incCompressionCompares, sz);
......@@ -397,8 +397,8 @@ LField<T,Dim>::CanCompress(T val) const
template<class T, unsigned Dim>
bool LField<T,Dim>::CanCompressBasedOnPhysicalCells() const
{
// Debugging macro
......@@ -426,7 +426,7 @@ bool LField<T,Dim>::CanCompressBasedOnPhysicalCells() const
if (IpplInfo::extraCompressChecks && ownedCompressIndex > 0)
{
// There was a previous value, so get that one to compare against
PAssert((unsigned int) ownedCompressIndex < getAllocated().size());
PAssert_LT((unsigned int) ownedCompressIndex, getAllocated().size());
val = *(P + ownedCompressIndex);
LFIELDMSG(dbgmsg << "Checking owned cells using previous ");
LFIELDMSG(dbgmsg << "comparison value " << val << " from index = ");
......@@ -485,8 +485,8 @@ template<class T, unsigned Dim>
void
LField<T,Dim>::Compress(const T& val)
{
LFIELDMSG(Inform dbgmsg("LField::Compress", INFORM_ALL_NODES));
LFIELDMSG(dbgmsg << "Compressing LField with domain = " << getOwned());
......@@ -535,8 +535,8 @@ template<class T, unsigned Dim>
void
LField<T,Dim>::CompressBasedOnPhysicalCells()
{
// We do nothing in this case if compression is turned off.
......@@ -566,10 +566,10 @@ LField<T,Dim>::CompressBasedOnPhysicalCells()
template<class T, unsigned Dim>
void LField<T,Dim>::ReallyUncompress(bool fill_domain)
{
PAssert(Allocated.size()!=0);
PAssert_NE(Allocated.size(), 0);
// Allocate the data.
......@@ -615,12 +615,12 @@ void LField<T,Dim>::ReallyUncompress(bool fill_domain)
//////////////////////////////////////////////////////////////////////
template<class T, unsigned Dim>
typename LField<T,Dim>::iterator
typename LField<T,Dim>::iterator
LField<T,Dim>::begin(const NDIndex<Dim>& domain)
{
// Remove this profiling because this is too lightweight.
//
//
//
//
return iterator(P,domain,Allocated,CompressedData);
}
......@@ -634,9 +634,9 @@ LField<T,Dim>::begin(const NDIndex<Dim>& domain)
//////////////////////////////////////////////////////////////////////
template<class T, unsigned Dim>
typename LField<T,Dim>::iterator
typename LField<T,Dim>::iterator
LField<T,Dim>::begin(const NDIndex<Dim>& domain, T& compstore)
{
{
if (IsCompressed())
compstore = CompressedData;
......@@ -651,11 +651,11 @@ LField<T,Dim>::begin(const NDIndex<Dim>& domain, T& compstore)
//////////////////////////////////////////////////////////////////////
template<class T, unsigned Dim>
void
void
LField<T,Dim>::swapData( LField<T,Dim>& a )
{
// Swap the pointers to the data.
{
......@@ -663,7 +663,7 @@ LField<T,Dim>::swapData( LField<T,Dim>& a )
P=a.P;
a.P=temp;
}
// Swap the compressed data.
{
T temp = CompressedData;
......@@ -707,15 +707,15 @@ LField<T,Dim>::swapData( LField<T,Dim>& a )
//
//////////////////////////////////////////////////////////////////////
// allocate memory for LField and if DKS is used and page-locked (pl) is +1 allocate
// allocate memory for LField and if DKS is used and page-locked (pl) is +1 allocate
// page-locked memory for storage
template<class T, unsigned Dim>
void
void
LField<T,Dim>::allocateStorage(int newsize)
{
PAssert(P == 0);
PAssert(newsize > 0);
PAssert(offsetBlocks >= 0);
PAssert_GT(newsize, 0);
PAssert_GE(offsetBlocks, 0);
// Determine how many blocks to offset the data, if we are asked to
......@@ -743,7 +743,7 @@ LField<T,Dim>::allocateStorage(int newsize)
//////////////////////////////////////////////////////////////////////
template<class T, unsigned Dim>
void
void
LField<T,Dim>::deallocateStorage()
{
if (P != 0)
......@@ -777,8 +777,8 @@ LField<T,Dim>::deallocateStorage()
template<class T, unsigned Dim>
void LField<T,Dim>::write(std::ostream& out) const
{
for (iterator p = begin(); p!=end(); ++p)
out << *p << " ";
}
......@@ -787,5 +787,5 @@ void LField<T,Dim>::write(std::ostream& out) const
/***************************************************************************
* $RCSfile: LField.cpp,v $ $Author: adelmann $
* $Revision: 1.1.1.1 $ $Date: 2003/01/23 07:40:26 $
* IPPL_VERSION_ID: $Id: LField.cpp,v 1.1.1.1 2003/01/23 07:40:26 adelmann Exp $
***************************************************************************/
* IPPL_VERSION_ID: $Id: LField.cpp,v 1.1.1.1 2003/01/23 07:40:26 adelmann Exp $
***************************************************************************/
\ No newline at end of file
......@@ -103,7 +103,7 @@ FindCutAxis(const NDIndex<Dim> &domain, const FieldLayout<Dim> &layout)
}
// Make sure we found one.
//PAssert(cutAxis>=0);
//PAssert_GE(cutAxis, 0);
if(cutAxis<0)
throw BinaryRepartitionFailed();
......@@ -416,7 +416,7 @@ ReceiveReduce(NDIndex<Dim>& domain, BareField<double,Dim>& weights,
// Receive a message.
int any_node = COMM_ANY_NODE;
Message *mess = Ippl::Comm->receive_block(any_node,reduce_tag);
PAssert(mess != 0);
PAssert(mess);
DEBUGMSG("ReceiveReduce: Comm->Receive from Node " << any_node << ", tag=" << reduce_tag << endl);
// Loop over all the domains in this message.
int received_domains = 0;
......@@ -499,7 +499,7 @@ ReceiveCuts(std::vector< NDIndex<Dim> > &domains,
int whichDomain = COMM_ANY_NODE;
int cutLocation = 0, cutAxis = 0;
Message *mess = Ippl::Comm->receive_block(whichDomain,bcast_tag);
PAssert(mess != 0);
PAssert(mess);
DEBUGMSG("ReceiveCuts: received bcast " << expected << endl);
mess->get(cutLocation);
mess->get(cutAxis);
......@@ -529,7 +529,7 @@ ReceiveCuts(std::vector< NDIndex<Dim> > &domains,
// Strip out the domains with no processors assigned.
domains.clear();
nprocs.clear();
PAssert(cutProcs.size() == cutDomains.size());
PAssert_EQ(cutProcs.size(), cutDomains.size());
for (unsigned int i=0; i<cutProcs.size(); ++i)
{
if ( cutProcs[i] != 0 )
......@@ -539,7 +539,7 @@ ReceiveCuts(std::vector< NDIndex<Dim> > &domains,
}
else
{
PAssert(cutDomains[i].size() == 0);
PAssert_EQ(cutDomains[i].size(), 0);
}
}
}
......
......@@ -267,7 +267,7 @@ private:
msg->get(s);
// Make sure the size isn't negative.
PAssert(s>=0);
PAssert_GE(s, 0);
// If there are any there, unpack them.
if ( s != 0 )
......
......@@ -186,9 +186,9 @@ ConejoBalancer::setupVnodes(int localVnodes, int remoteVnodes)
// is consistent with the previous.
else
{
PAssert(m_localVnodes == localVnodes);
PAssert(m_totalVnodes == localVnodes + remoteVnodes);
PAssert( (m_balancer!=0) == (m_myProc==0 ) );
PAssert_EQ(m_localVnodes, localVnodes);
PAssert_EQ(m_totalVnodes, localVnodes + remoteVnodes);
PAssert_EQ( (m_balancer!=0), (m_myProc==0 ) );
}
}
......@@ -211,7 +211,7 @@ void
ConejoBalancer::recordVnodeCount(int count, int proc)
{
// Make sure this processor number makes sense.
PAssert( proc >= 0 );
PAssert_GE( proc, 0 );
// Check to see if this is the first time it is being called.
if ( m_vnodeCounts[proc] < 0 )
......@@ -223,7 +223,7 @@ ConejoBalancer::recordVnodeCount(int count, int proc)