← Back to team overview

yade-dev team mailing list archive

[Branch ~yade-pkg/yade/git-trunk] Rev 3926: add FlowEngine::doInterpolate to force interpolation during mesh swap

 

------------------------------------------------------------
revno: 3926
committer: Bruno Chareyre <bruno.chareyre@xxxxxxxxxxx>
timestamp: Thu 2014-04-17 17:10:23 +0200
message:
  add FlowEngine::doInterpolate to force interpolation during mesh swap
modified:
  lib/triangulation/PeriodicFlow.hpp
  pkg/pfv/FlowEngine.hpp
  pkg/pfv/FlowEngine.ipp
  pkg/pfv/PeriodicFlowEngine.cpp


--
lp:yade
https://code.launchpad.net/~yade-pkg/yade/git-trunk

Your team Yade developers is subscribed to branch lp:yade.
To unsubscribe from this branch go to https://code.launchpad.net/~yade-pkg/yade/git-trunk/+edit-subscription
=== modified file 'lib/triangulation/PeriodicFlow.hpp'
--- lib/triangulation/PeriodicFlow.hpp	2014-03-31 13:45:16 +0000
+++ lib/triangulation/PeriodicFlow.hpp	2014-04-17 15:10:23 +0000
@@ -69,6 +69,7 @@
 			center=CVector(coord==0?boundPos:center[0],coord==1?boundPos:center[1],coord==2?boundPos:center[2]);
 		}
                 oldCell = Tri.locate(Point(center[0],center[1],center[2]));
+		//FIXME: should use getInfo
                 newCell->info().p() = oldCell->info().shiftedP();
         }
 //  	Tes.Clear();//Don't reset to avoid segfault when getting pressure in scripts just after interpolation

=== modified file 'pkg/pfv/FlowEngine.hpp'
--- pkg/pfv/FlowEngine.hpp	2014-04-15 16:39:53 +0000
+++ pkg/pfv/FlowEngine.hpp	2014-04-17 15:10:23 +0000
@@ -236,6 +236,7 @@
 		YADE_CLASS_PYCLASS_BASE_DOC_ATTRS_DEPREC_INIT_CTOR_PY(TemplateFlowEngine,TEMPLATE_FLOW_NAME,PartialEngine,"An engine to solve flow problem in saturated granular media. Model description can be found in [Chareyre2012a]_ and [Catalano2014a]_. See the example script FluidCouplingPFV/oedometer.py. More documentation to come.\n\n.. note::Multi-threading seems to work fine for Cholesky decomposition, but it fails for the solve phase in which -j1 is the fastest, here we specify thread numbers independently using :yref:`FlowEngine::numFactorizeThreads` and :yref:`FlowEngine::numSolveThreads`. These multhreading settings are only impacting the behaviour of openblas library and are relatively independant of :yref:`FlowEngine::multithread`. However, the settings have to be globally consistent. For instance, :yref:`multithread<FlowEngine::multithread>` =True with  yref:`numFactorizeThreads<FlowEngine::numFactorizeThreads>` = yref:`numSolveThreads<FlowEngine::numSolveThreads>` = 4 implies that openblas will mobilize 8 processors at some point. If the system does not have so many procs. it will hurt performance.",
 		((bool,isActivated,true,,"Activates Flow Engine"))
 		((bool,first,true,,"Controls the initialization/update phases"))
+		((bool,doInterpolate,false,,"Force the interpolation of cell's info while remeshing. By default, interpolation would be done only for compressible fluids. It can be forced with this flag."))
 		((double, fluidBulkModulus, 0.,,"Bulk modulus of fluid (inverse of compressibility) K=-dP*V/dV [Pa]. Flow is compressible if fluidBulkModulus > 0, else incompressible."))
 		((Real, dt, 0,,"timestep [s]"))
 		((bool,permeabilityMap,false,,"Enable/disable stocking of average permeability scalar in cell infos."))
@@ -379,7 +380,7 @@
 	std::vector<CVector> cellForce;
 	std::vector<double> rayHydr;
 	std::vector<double> modulePermeability;
-	// Partial surfaces of spheres in the double-tetrahedron linking two voronoi centers. [i][j] is for sphere facet "i" and sphere facetVertices[i][j]. Last component for 1/sum_surfaces in the facet.
+	// Partial surfaces of spheres in the double-tetrahedron linking two voronoi centers. [i][j] is for sphere facet "i" and sphere facetVertices[i][j]. Last component for 1/sum_surfaces in the facet.doInterpolate
 	double solidSurfaces [4][4];
 
 	FlowCellInfo (void)

=== modified file 'pkg/pfv/FlowEngine.ipp'
--- pkg/pfv/FlowEngine.ipp	2014-04-07 09:33:19 +0000
+++ pkg/pfv/FlowEngine.ipp	2014-04-17 15:10:23 +0000
@@ -107,7 +107,7 @@
 		if (updateTriangulation || (ellapsedIter>(0.5*meshUpdateInterval) && backgroundCompleted)) {
 			if (debug) cerr<<"switch flow solver"<<endl;
 			if (useSolver==0) LOG_ERROR("background calculations not available for Gauss-Seidel");
-			if (fluidBulkModulus>0) solver->interpolate (solver->T[solver->currentTes], backgroundSolver->T[backgroundSolver->currentTes]);
+			if (fluidBulkModulus>0 || doInterpolate) solver->interpolate (solver->T[solver->currentTes], backgroundSolver->T[backgroundSolver->currentTes]);
 			solver=backgroundSolver;
 			backgroundSolver = shared_ptr<FlowSolver> (new FlowSolver);
 			if (metisForced) {backgroundSolver->eSolver.cholmod().nmethods=1; backgroundSolver->eSolver.cholmod().method[0].ordering=CHOLMOD_METIS;}
@@ -276,7 +276,7 @@
         boundaryConditions ( flow );
         flow.initializePressure ( pZero );
 	
-        if ( !first && !multithread && (useSolver==0 || fluidBulkModulus>0)) flow.interpolate ( flow.T[!flow.currentTes], flow.T[flow.currentTes] );
+        if ( !first && !multithread && (useSolver==0 || fluidBulkModulus>0 || doInterpolate)) flow.interpolate ( flow.T[!flow.currentTes], flow.T[flow.currentTes] );
         if ( waveAction ) flow.applySinusoidalPressure ( flow.T[flow.currentTes].Triangulation(), sineMagnitude, sineAverage, 30 );
         if (normalLubrication || shearLubrication || viscousShear) flow.computeEdgesSurfaces();
 }

=== modified file 'pkg/pfv/PeriodicFlowEngine.cpp'
--- pkg/pfv/PeriodicFlowEngine.cpp	2014-04-07 09:33:19 +0000
+++ pkg/pfv/PeriodicFlowEngine.cpp	2014-04-17 15:10:23 +0000
@@ -183,7 +183,7 @@
 		while (updateTriangulation && !backgroundCompleted) { /*cout<<"sleeping..."<<sleeping++<<endl;*/ 	boost::this_thread::sleep(boost::posix_time::microseconds(1000));}
 		if (updateTriangulation || ellapsedIter>(0.5*meshUpdateInterval)) {
 			if (useSolver==0) LOG_ERROR("background calculations not available for Gauss-Seidel");
-			if (fluidBulkModulus>0) solver->interpolate (solver->T[solver->currentTes], backgroundSolver->T[backgroundSolver->currentTes]);
+			if (fluidBulkModulus>0 || doInterpolate) solver->interpolate (solver->T[solver->currentTes], backgroundSolver->T[backgroundSolver->currentTes]);
 			solver=backgroundSolver;
 			backgroundSolver = shared_ptr<FlowSolver> (new FlowSolver);
 			//Copy imposed pressures/flow from the old solver
@@ -522,7 +522,7 @@
 	
         flow.displayStatistics ();
         //FIXME: check interpolate() for the periodic case, at least use the mean pressure from previous step.
-	if ( !first && !multithread && (useSolver==0 || fluidBulkModulus>0)) flow.interpolate ( flow.T[!flow.currentTes], Tes );
+	if ( !first && !multithread && (useSolver==0 || fluidBulkModulus>0 || doInterpolate)) flow.interpolate ( flow.T[!flow.currentTes], Tes );
 // 	if ( !first && (useSolver==0 || fluidBulkModulus>0)) flow.interpolate ( flow.T[!flow.currentTes], flow.T[flow.currentTes] );
 	
         if ( waveAction ) flow.applySinusoidalPressure ( Tes.Triangulation(), sineMagnitude, sineAverage, 30 );