yade-dev team mailing list archive
-
yade-dev team
-
Mailing list archive
-
Message #10627
[Branch ~yade-pkg/yade/git-trunk] Rev 3862: Hack YADE_CLASS macro to make it work with c++ templates (introduces a macro for pyClass name dif...
------------------------------------------------------------
revno: 3862
committer: Bruno Chareyre <bruno.chareyre@xxxxxxxxxxx>
timestamp: Tue 2014-04-01 15:18:38 +0200
message:
Hack YADE_CLASS macro to make it work with c++ templates (introduces a macro for pyClass name different from c++ class name)
modified:
pkg/dem/DFNFlow.cpp
pkg/dem/DummyFlowEngine.cpp
pkg/dem/FlowEngine.cpp
pkg/dem/FlowEngine.hpp
pkg/dem/FlowEngine.ipp
pkg/dem/PeriodicFlowEngine.cpp
pkg/dem/PeriodicFlowEngine.hpp
--
lp:yade
https://code.launchpad.net/~yade-pkg/yade/git-trunk
Your team Yade developers is subscribed to branch lp:yade.
To unsubscribe from this branch go to https://code.launchpad.net/~yade-pkg/yade/git-trunk/+edit-subscription
=== modified file 'pkg/dem/DFNFlow.cpp'
--- pkg/dem/DFNFlow.cpp 2014-03-21 18:47:45 +0000
+++ pkg/dem/DFNFlow.cpp 2014-04-01 13:18:38 +0000
@@ -10,9 +10,9 @@
//keep this #ifdef for commited versions unless you really have stable version that should be compiled by default
//it will save compilation time for everyone else
//when you want it compiled, you can pass -DDFNFLOW to cmake, or just uncomment the following line
-#define DFNFLOW
+// #define DFNFLOW
#ifdef DFNFLOW
-
+#define TEMPLATE_FLOW_NAME DFNFlowEngineT
#include <yade/pkg/dem/FlowEngine.hpp>
class DFNCellInfo : public FlowCellInfo
=== modified file 'pkg/dem/DummyFlowEngine.cpp'
--- pkg/dem/DummyFlowEngine.cpp 2014-03-21 18:47:45 +0000
+++ pkg/dem/DummyFlowEngine.cpp 2014-04-01 13:18:38 +0000
@@ -13,7 +13,7 @@
//when you want it compiled, you can pass -DDFNFLOW to cmake, or just uncomment the following line
// #define DUMMYFLOW
#ifdef DUMMYFLOW
-
+#define TEMPLATE_FLOW_NAME DummyFlowEngineT
#include <yade/pkg/dem/FlowEngine.hpp>
/// We can add data to the Info types by inheritance
@@ -29,11 +29,11 @@
//same here if needed
};
-typedef TemplateFlowEngine<DummyCellInfo,DummyVertexInfo> DummyFlowEngineT;
-REGISTER_SERIALIZABLE(DummyFlowEngineT);
-YADE_PLUGIN((DummyFlowEngineT));
+typedef TemplateFlowEngine<DummyCellInfo,DummyVertexInfo> TEMPLATE_FLOW_NAME;
+REGISTER_SERIALIZABLE(TEMPLATE_FLOW_NAME);
+YADE_PLUGIN((TEMPLATE_FLOW_NAME));
-class DummyFlowEngine : public DummyFlowEngineT
+class DummyFlowEngine : public TEMPLATE_FLOW_NAME
{
public :
//We can overload every functions of the base engine to make it behave differently
=== modified file 'pkg/dem/FlowEngine.cpp'
--- pkg/dem/FlowEngine.cpp 2014-03-23 02:10:28 +0000
+++ pkg/dem/FlowEngine.cpp 2014-04-01 13:18:38 +0000
@@ -6,10 +6,30 @@
* GNU General Public License v2 or later. See file LICENSE for details. *
*************************************************************************/
#ifdef YADE_CGAL
-
#ifdef FLOW_ENGINE
+
+#define TEMPLATE_FLOW_NAME FlowEngineT
#include "FlowEngine.hpp"
+// To register properly, we need to first instantiate an intermediate class, then inherit from it with correct class names in YADE_CLASS macro
+// The intermediate one would be seen with the name "TemplateFlowEngine" by python, thus it would not work when more than one class are derived, they would all
+// be named "TemplateFlowEngine" ...
+typedef TemplateFlowEngine<FlowCellInfo,FlowVertexInfo> FlowEngineT;
+REGISTER_SERIALIZABLE(FlowEngineT);
+
+class FlowEngine : public FlowEngineT
+{
+ public :
+ YADE_CLASS_BASE_DOC_ATTRS_INIT_CTOR_PY(FlowEngine,FlowEngineT,"An engine to solve flow problem in saturated granular media. Model description can be found in [Chareyre2012a]_ and [Catalano2014a]_. See the example script FluidCouplingPFV/oedometer.py. More documentation to come.\n\n.. note::Multi-threading seems to work fine for Cholesky decomposition, but it fails for the solve phase in which -j1 is the fastest, here we specify thread numbers independently using :yref:`FlowEngine::numFactorizeThreads` and :yref:`FlowEngine::numSolveThreads`. These multhreading settings are only impacting the behaviour of openblas library and are relatively independant of :yref:`FlowEngine::multithread`. However, the settings have to be globally consistent. For instance, :yref:`multithread<FlowEngine::multithread>` =True with yref:`numFactorizeThreads<FlowEngine::numFactorizeThreads>` = yref:`numSolveThreads<FlowEngine::numSolveThreads>` = 4 implies that openblas will mobilize 8 processors at some point. If the system does not have so many procs. it will hurt performance.",
+ ,,
+ ,
+ //nothing special to define here, we simply re-use FlowEngine methods
+ //.def("meanVelocity",&PeriodicFlowEngine::meanVelocity,"measure the mean velocity in the period")
+ )
+ DECLARE_LOGGER;
+};
+REGISTER_SERIALIZABLE(FlowEngine);
+
YADE_PLUGIN((FlowEngineT));
CREATE_LOGGER(FlowEngine );
YADE_PLUGIN((FlowEngine));
=== modified file 'pkg/dem/FlowEngine.hpp'
--- pkg/dem/FlowEngine.hpp 2014-03-23 02:06:54 +0000
+++ pkg/dem/FlowEngine.hpp 2014-04-01 13:18:38 +0000
@@ -29,6 +29,8 @@
- FlowBoundingSphere.hpp/ipp and PeriodicFlow.hpp/ipp + LinSolv variants: implement the solver in itself (mesh, boundary conditions, solving, defining fluid-particles interactions)
- FlowEngine.hpp/ipp/cpp (this file)
+Variants for periodic boundary conditions are also present.
+
*/
#pragma once
@@ -38,12 +40,23 @@
#include<yade/lib/triangulation/FlowBoundingSphere.hpp>
#include<yade/lib/triangulation/PeriodicFlow.hpp>
+/// Frequently used:
+typedef CGT::CVector CVector;
+typedef CGT::Point Point;
+
/// Converters for Eigen and CGAL vectors
-inline CGT::CVector makeCgVect ( const Vector3r& yv ) {return CGT::CVector ( yv[0],yv[1],yv[2] );}
-inline CGT::Point makeCgPoint ( const Vector3r& yv ) {return CGT::Point ( yv[0],yv[1],yv[2] );}
-inline Vector3r makeVector3r ( const CGT::Point& yv ) {return Vector3r ( yv[0],yv[1],yv[2] );}
-inline Vector3r makeVector3r ( const CGT::CVector& yv ) {return Vector3r ( yv[0],yv[1],yv[2] );}
+inline CVector makeCgVect ( const Vector3r& yv ) {return CVector ( yv[0],yv[1],yv[2] );}
+inline Point makeCgPoint ( const Vector3r& yv ) {return Point ( yv[0],yv[1],yv[2] );}
+inline Vector3r makeVector3r ( const Point& yv ) {return Vector3r ( yv[0],yv[1],yv[2] );}
+inline Vector3r makeVector3r ( const CVector& yv ) {return Vector3r ( yv[0],yv[1],yv[2] );}
+/// C++ templates and YADE_CLASS_... macro family are not very compatible, this #define is a hack to make it work
+/// TEMPLATE_FLOW_NAME will be the name of a serializable TemplateFlowEngine<...> instance, which can in turn be
+/// inherited from. The instance itself will be useless for actual simulations.
+#ifndef TEMPLATE_FLOW_NAME
+#error You must define TEMPLATE_FLOW_NAME in your source file before including FlowEngine.hpp
+#endif
+
template<class _CellInfo, class _VertexInfo, class _Tesselation=CGT::_Tesselation<CGT::TriangulationTypes<_VertexInfo,_CellInfo> >, class solverT=CGT::FlowBoundingSphere<_Tesselation> >
class TemplateFlowEngine : public PartialEngine
{
@@ -81,14 +94,15 @@
enum {wall_xmin, wall_xmax, wall_ymin, wall_ymax, wall_zmin, wall_zmax};
Vector3r normal [6];
bool currentTes;
+ bool metisForced;
int idOffset;
double epsVolCumulative;
int ReTrg;
int ellapsedIter;
void initSolver (FlowSolver& flow);
#ifdef LINSOLV
- void setForceMetis (Solver& flow, bool force);
- bool getForceMetis (Solver& flow);
+ void setForceMetis (bool force);
+ bool getForceMetis ();
#endif
void triangulate (Solver& flow);
void addBoundary (Solver& flow);
@@ -187,22 +201,20 @@
}
double averageSlicePressure(double posY){return solver->averageSlicePressure(posY);}
double averagePressure(){return solver->averagePressure();}
- #ifdef LINSOLV
- void exportMatrix(string filename) {if (useSolver==3) solver->exportMatrix(filename.c_str());
- else cerr<<"available for Cholmod solver (useSolver==3)"<<endl;}
- void exportTriplets(string filename) {if (useSolver==3) solver->exportTriplets(filename.c_str());
- else cerr<<"available for Cholmod solver (useSolver==3)"<<endl;}
- #endif
void emulateAction(){
scene = Omega::instance().getScene().get();
action();}
#ifdef LINSOLV
- void cholmodStats() {
- cerr << cholmod_print_common("PFV Cholmod factorization",&(solver->eSolver.cholmod()))<<endl;
+ void exportMatrix(string filename) {if (useSolver==3) solver->exportMatrix(filename.c_str());
+ else cerr<<"available for Cholmod solver (useSolver==3)"<<endl;}
+ void exportTriplets(string filename) {if (useSolver==3) solver->exportTriplets(filename.c_str());
+ else cerr<<"available for Cholmod solver (useSolver==3)"<<endl;}
+ void cholmodStats() {
+ cerr << cholmod_print_common((char*)string("PFV Cholmod factorization").c_str(),&(solver->eSolver.cholmod()))<<endl;
cerr << "cholmod method:" << solver->eSolver.cholmod().selected<<endl;
cerr << "METIS called:"<<solver->eSolver.cholmod().called_nd<<endl;}
- bool metisUsed() {return bool(solver->eSolver.cholmod().called_nd);}
+ bool metisUsed() {return bool(solver->eSolver.cholmod().called_nd);}
#endif
virtual ~TemplateFlowEngine();
@@ -219,7 +231,7 @@
if (solver->T[solver->currentTes].Volume(id) == -1) {compTessVolumes(); LOG_WARN("Computing all volumes now, as you did not request it explicitely.");}
return (solver->T[solver->currentTes].Max_id() >= id) ? solver->T[solver->currentTes].Volume(id) : -1;}
- YADE_CLASS_BASE_DOC_ATTRS_DEPREC_INIT_CTOR_PY(TemplateFlowEngine,PartialEngine,"An engine to solve flow problem in saturated granular media. Model description can be found in [Chareyre2012a]_ and [Catalano2013a]_. See the example script FluidCouplingPFV/oedometer.py. More documentation to come.\n\n.. note::Multi-threading seems to work fine for Cholesky decomposition, but it fails for the solve phase in which -j1 is the fastest, here we specify thread numbers independently using :yref:`FlowEngine::numFactorizeThreads` and :yref:`FlowEngine::numSolveThreads`. These multhreading settings are only impacting the behaviour of openblas library and are relatively independant of :yref:`FlowEngine::multithread`. However, the settings have to be globally consistent. For instance, :yref:`multithread<FlowEngine::multithread>` =True with yref:`numFactorizeThreads<FlowEngine::numFactorizeThreads>` = yref:`numSolveThreads<FlowEngine::numSolveThreads>` = 4 implies that openblas will mobilize 8 processors at some point. If the system does not have so many procs. it will hurt performance.",
+ YADE_CLASS_PYCLASS_BASE_DOC_ATTRS_DEPREC_INIT_CTOR_PY(TemplateFlowEngine,TEMPLATE_FLOW_NAME,PartialEngine,"An engine to solve flow problem in saturated granular media. Model description can be found in [Chareyre2012a]_ and [Catalano2014a]_. See the example script FluidCouplingPFV/oedometer.py. More documentation to come.\n\n.. note::Multi-threading seems to work fine for Cholesky decomposition, but it fails for the solve phase in which -j1 is the fastest, here we specify thread numbers independently using :yref:`FlowEngine::numFactorizeThreads` and :yref:`FlowEngine::numSolveThreads`. These multhreading settings are only impacting the behaviour of openblas library and are relatively independant of :yref:`FlowEngine::multithread`. However, the settings have to be globally consistent. For instance, :yref:`multithread<FlowEngine::multithread>` =True with yref:`numFactorizeThreads<FlowEngine::numFactorizeThreads>` = yref:`numSolveThreads<FlowEngine::numSolveThreads>` = 4 implies that openblas will mobilize 8 processors at some point. If the system does not have so many procs. it will hurt performance.",
((bool,isActivated,true,,"Activates Flow Engine"))
((bool,first,true,,"Controls the initialization/update phases"))
((double, fluidBulkModulus, 0.,,"Bulk modulus of fluid (inverse of compressibility) K=-dP*V/dV [Pa]. Flow is compressible if fluidBulkModulus > 0, else incompressible."))
@@ -291,6 +303,7 @@
ReTrg=1;
backgroundCompleted=true;
ellapsedIter=0;
+ metisForced=false;
,
.def("imposeFlux",&TemplateFlowEngine::imposeFlux,(python::arg("pos"),python::arg("p")),"Impose incoming flux in boundary cell of location 'pos'.")
.def("imposePressure",&TemplateFlowEngine::imposePressure,(python::arg("pos"),python::arg("p")),"Impose pressure in cell of location 'pos'. The index of the condition is returned (for multiple imposed pressures at different points).")
@@ -340,9 +353,6 @@
// Definition of functions in a separate file for clarity
#include<yade/pkg/dem/FlowEngine.ipp>
-typedef CGT::CVector CVector;
-typedef CGT::Point Point;
-
class FlowCellInfo : public CGT::SimpleCellInfo {
public:
//For vector storage of all cells
@@ -418,22 +428,5 @@
inline const CVector ghostShift (void) {return CGAL::NULL_VECTOR;}
};
-// To register properly, we need to first instantiate an intermediate class, then inherit from it with correct class names in YADE_CLASS macro
-// The intermediate one would be seen with the name "TemplateFlowEngine" by python, thus it would not work when more than one class are derived, they would all
-// be named "TemplateFlowEngine" ...
-typedef TemplateFlowEngine<FlowCellInfo,FlowVertexInfo> FlowEngineT;
-REGISTER_SERIALIZABLE(FlowEngineT);
-class FlowEngine : public FlowEngineT
-{
- public :
- YADE_CLASS_BASE_DOC_ATTRS_INIT_CTOR_PY(FlowEngine,FlowEngineT,"An engine to solve flow problem in saturated granular media. Model description can be found in [Chareyre2012a]_ and [Catalano2013a]_. See the example script FluidCouplingPFV/oedometer.py. More documentation to come.\n\n.. note::Multi-threading seems to work fine for Cholesky decomposition, but it fails for the solve phase in which -j1 is the fastest, here we specify thread numbers independently using :yref:`FlowEngine::numFactorizeThreads` and :yref:`FlowEngine::numSolveThreads`. These multhreading settings are only impacting the behaviour of openblas library and are relatively independant of :yref:`FlowEngine::multithread`. However, the settings have to be globally consistent. For instance, :yref:`multithread<FlowEngine::multithread>` =True with yref:`numFactorizeThreads<FlowEngine::numFactorizeThreads>` = yref:`numSolveThreads<FlowEngine::numSolveThreads>` = 4 implies that openblas will mobilize 8 processors at some point. If the system does not have so many procs. it will hurt performance.",
- ,,
- ,
- //nothing special to define here, we simply re-use FlowEngine methods
- //.def("meanVelocity",&PeriodicFlowEngine::meanVelocity,"measure the mean velocity in the period")
- )
- DECLARE_LOGGER;
-};
-REGISTER_SERIALIZABLE(FlowEngine);
=== modified file 'pkg/dem/FlowEngine.ipp'
--- pkg/dem/FlowEngine.ipp 2014-03-21 18:47:45 +0000
+++ pkg/dem/FlowEngine.ipp 2014-04-01 13:18:38 +0000
@@ -110,6 +110,7 @@
if (fluidBulkModulus>0) solver->interpolate (solver->T[solver->currentTes], backgroundSolver->T[backgroundSolver->currentTes]);
solver=backgroundSolver;
backgroundSolver = shared_ptr<FlowSolver> (new FlowSolver);
+ if (metisForced) {backgroundSolver->eSolver.cholmod().nmethods=1; backgroundSolver->eSolver.cholmod().method[0].ordering=CHOLMOD_METIS;}
//Copy imposed pressures/flow from the old solver
backgroundSolver->imposedP = vector<pair<CGT::Point,Real> >(solver->imposedP);
backgroundSolver->imposedF = vector<pair<CGT::Point,Real> >(solver->imposedF);
@@ -161,7 +162,6 @@
template< class _CellInfo, class _VertexInfo, class _Tesselation, class solverT >
void TemplateFlowEngine<_CellInfo,_VertexInfo,_Tesselation,solverT>::boundaryConditions ( Solver& flow )
{
-
for (int k=0;k<6;k++) {
flow.boundary (wallIds[k]).flowCondition=!bndCondIsPressure[k];
flow.boundary (wallIds[k]).value=bndCondValue[k];
@@ -226,15 +226,16 @@
#ifdef LINSOLV
template< class _CellInfo, class _VertexInfo, class _Tesselation, class solverT >
-void TemplateFlowEngine<_CellInfo,_VertexInfo,_Tesselation,solverT>::setForceMetis ( Solver& flow, bool force )
+void TemplateFlowEngine<_CellInfo,_VertexInfo,_Tesselation,solverT>::setForceMetis ( bool force )
{
if (force) {
- flow.eSolver.cholmod().nmethods=1;
- flow.eSolver.cholmod().method[0].ordering=CHOLMOD_METIS;
- } else cholmod_defaults(&(flow.eSolver.cholmod()));
+ metisForced=true;
+ solver->eSolver.cholmod().nmethods=1;
+ solver->eSolver.cholmod().method[0].ordering=CHOLMOD_METIS;
+ } else {cholmod_defaults(&(solver->eSolver.cholmod())); metisForced=false;}
}
template< class _CellInfo, class _VertexInfo, class _Tesselation, class solverT >
-bool TemplateFlowEngine<_CellInfo,_VertexInfo,_Tesselation,solverT>::getForceMetis ( Solver& flow ) {return (flow.eSolver.cholmod().nmethods==1);}
+bool TemplateFlowEngine<_CellInfo,_VertexInfo,_Tesselation,solverT>::getForceMetis () {return (solver->eSolver.cholmod().nmethods==1);}
#endif
template< class _CellInfo, class _VertexInfo, class _Tesselation, class solverT >
void TemplateFlowEngine<_CellInfo,_VertexInfo,_Tesselation,solverT>::buildTriangulation ( Solver& flow )
=== modified file 'pkg/dem/PeriodicFlowEngine.cpp'
--- pkg/dem/PeriodicFlowEngine.cpp 2014-03-21 18:47:45 +0000
+++ pkg/dem/PeriodicFlowEngine.cpp 2014-04-01 13:18:38 +0000
@@ -6,12 +6,12 @@
* GNU General Public License v2 or later. See file LICENSE for details. *
*************************************************************************/
-
-
#ifdef YADE_CGAL
#ifdef FLOW_ENGINE
+#define TEMPLATE_FLOW_NAME FlowEngine_PeriodicInfo
#include "PeriodicFlowEngine.hpp"
+#undef TEMPLATE_FLOW_NAME
CVector PeriodicCellInfo::hSize[]={CVector(),CVector(),CVector()};
CVector PeriodicCellInfo::deltaP=CVector();
=== modified file 'pkg/dem/PeriodicFlowEngine.hpp'
--- pkg/dem/PeriodicFlowEngine.hpp 2014-03-21 18:47:45 +0000
+++ pkg/dem/PeriodicFlowEngine.hpp 2014-04-01 13:18:38 +0000
@@ -10,7 +10,6 @@
/// It is a bit more complicated as for FlowEngine, though, because we need template inheriting from template, which breaks YADE_CLASS_XXX logic_error
/// See below the commented exemple, for a possible solution
-
#include <yade/pkg/dem/FlowEngine.hpp>
class PeriodicCellInfo : public FlowCellInfo
@@ -108,32 +107,4 @@
};
-REGISTER_SERIALIZABLE(PeriodicFlowEngine);
-
-
-
-// //Keep this: Example of inheriting from template combined with YADE_CLASS_... macro. checkPyClassRegistersItself has to be defined, or it wont compile.
-// template<class T>
-// class Test : public T
-// {
-// public :
-// virtual ~Test();
-// // typedef TemplateFlowEngine<T,T> FlowEngineT;
-// virtual void checkPyClassRegistersItself(const std::string& thisClassName) const
-// {
-// if(getClassName()!=thisClassName) throw std::logic_error(("Class "+getClassName()+" does not register with YADE_CLASS_BASE_DOC_ATTR*, would not be accessible from python.").c_str());
-// }
-// YADE_CLASS_BASE_DOC_ATTRS_DEPREC_INIT_CTOR_PY(Test,PeriodicFlowEngine,"A variant of :yref:`FlowEngine` implementing periodic boundary conditions. The API is very similar.",
-// ((Real,duplicateThreshold, 0.06,,"distance from cell borders that will triger periodic duplication in the triangulation |yupdate|"))
-// ((Vector3r, gradP, Vector3r::Zero(),,"Macroscopic pressure gradient"))
-// ,,,
-//
-// ,
-// //nothing special to define, we re-use FlowEngine methods
-// //.def("meanVelocity",&Test::meanVelocity,"measure the mean velocity in the period")
-// )
-// DECLARE_LOGGER;
-//
-//
-// };
-// // REGISTER_SERIALIZABLE(Test);
+REGISTER_SERIALIZABLE(PeriodicFlowEngine);
\ No newline at end of file