--- Begin Message ---
------------------------------------------------------------
revno: 6225
committer: Garth N. Wells <gnw20@xxxxxxxxx>
branch nick: dolfin-all
timestamp: Mon 2011-09-19 18:38:19 +0100
message:
Get things working in parallel again. Broken by some cross-commits.
modified:
demo/undocumented/bcs/cpp/main.cpp
demo/undocumented/bcs/python/demo_bcs.py
dolfin/mesh/MeshPartitioning.cpp
--
lp:~dolfin-core/dolfin/wells
https://code.launchpad.net/~dolfin-core/dolfin/wells
You are subscribed to branch lp:~dolfin-core/dolfin/wells.
To unsubscribe from this branch go to https://code.launchpad.net/~dolfin-core/dolfin/wells/+edit-subscription
=== modified file 'demo/undocumented/bcs/cpp/main.cpp'
--- demo/undocumented/bcs/cpp/main.cpp 2011-09-19 15:47:12 +0000
+++ demo/undocumented/bcs/cpp/main.cpp 2011-09-19 17:38:19 +0000
@@ -62,6 +62,8 @@
File file("u.pvd");
file << u;
+ cout << "Norm: " << u.vector().norm("l2") << endl;
+
// Plot solution
plot(u);
=== modified file 'demo/undocumented/bcs/python/demo_bcs.py'
--- demo/undocumented/bcs/python/demo_bcs.py 2011-09-19 15:47:12 +0000
+++ demo/undocumented/bcs/python/demo_bcs.py 2011-09-19 17:38:19 +0000
@@ -51,8 +51,5 @@
u = Function(V)
solve(a == L, u, [bc0, bc1, bc2, bc3])
-# Write solution to file
-File("u.pvd") << u
-
# Plot solution
plot(u, interactive=True)
=== modified file 'dolfin/mesh/MeshPartitioning.cpp'
--- dolfin/mesh/MeshPartitioning.cpp 2011-09-19 15:47:12 +0000
+++ dolfin/mesh/MeshPartitioning.cpp 2011-09-19 17:38:19 +0000
@@ -307,7 +307,9 @@
void MeshPartitioning::mesh_domains(Mesh& mesh,
const LocalMeshData& local_data)
{
- // Local domain data (return of there is no data)
+ mesh.init();
+
+ // Local domain data
const std::map<uint, std::vector<std::vector<dolfin::uint> > > domain_data = local_data.domain_data;
if (domain_data.size() == 0)
return;
@@ -322,15 +324,14 @@
std::map<uint, std::vector<std::vector<dolfin::uint> > >::const_iterator dim_data;
for (dim_data = domain_data.begin(); dim_data != domain_data.end(); ++dim_data)
{
+ // Initialise global numbering
const uint dim = dim_data->first;
+ MeshPartitioning::number_entities(mesh, dim);
+ MeshPartitioning::number_entities(mesh, D);
if (dim == 0)
error("MeshPartitioning::mesh_domains needs updating for vertices.");
- // Initialise global numbering
- MeshPartitioning::number_entities(mesh, dim);
- MeshPartitioning::number_entities(mesh, D);
-
// Get value collection
MeshValueCollection<uint>& markers = mesh.domains().markers(dim);
@@ -355,35 +356,45 @@
it = std::find(global_entity_indices.begin(), global_entity_indices.end(), global_cell_index);
if (it != global_entity_indices.end())
{
- const uint local_cell_index = std::distance(it, global_entity_indices.begin());
+ const uint local_cell_index = it - global_entity_indices.begin();
markers.set_value(local_cell_index, ldata[i][1], ldata[i][2]);
}
}
+
// Determine where to send data
std::vector<dolfin::uint> global_entities;
for (uint i = 0; i < ldata.size(); ++i)
global_entities.push_back(ldata[i][0]);
// Get destinations and local cell index at destination
- /*
- cout << "*** Send size to hosts: " << global_entities.size() << endl;
- cout << "*** Local: " << counter << endl;
- */
const std::map<dolfin::uint, std::set<std::pair<dolfin::uint, dolfin::uint> > >
hosts = MeshDistributed::off_process_indices(global_entities, D, mesh);
- /*
- cout << "*On, Off, predicted off, total-process cell (fancy): "
- << counter << ", "
- << ldata.size() - counter << ", "
- << hosts.size() << ", "
- << ldata.size() << endl;
- */
-
- // Pack data to send
+
+ cout << "Off-process data " << hosts.size() << endl;
+
+ std::map<dolfin::uint, std::set<std::pair<dolfin::uint, dolfin::uint> > >::const_iterator host_data;
+ uint counter0(0), counter1(0);
+ for (host_data = hosts.begin(); host_data != hosts.end(); ++host_data)
+ {
+ const uint index = host_data->first;
+ std::vector<uint>::const_iterator it;
+ it = std::find(global_entity_indices.begin(), global_entity_indices.end(), index);
+ if (it != global_entity_indices.end())
+ ++counter0;
+ else
+ ++counter1;
+ }
+ cout << "Counters: " << counter0 << ", " << counter1 << ". Sums: " << counter0 + counter1 << ", " << global_entity_indices.size() << endl;
+ cout << "Size: " << _global_entity_indices.size() << endl;
+ cout << "Hosts Size: " << hosts.size() << endl;
+ cout << "Data Size: " << ldata.size() << endl;
+ cout << "Global ind size: " << global_entity_indices.size() << endl;
+
+
+ // Pack data to send to appropriate process
std::vector<uint> send_data;
std::vector<uint> destinations;
- std::map<dolfin::uint, std::set<std::pair<dolfin::uint, dolfin::uint> > >::const_iterator host_data;
for (host_data = hosts.begin(); host_data != hosts.end(); ++host_data)
{
const uint global_cell_index = host_data->first;
@@ -391,11 +402,13 @@
for (uint i = 0; i < ldata.size(); ++i)
{
- std::vector<uint> domain_data = ldata[i];
if (ldata[i][0] == global_cell_index)
{
+ std::vector<uint> domain_data = ldata[i];
+ //const uint global_index = domain_data[0];
const uint local_entity_index = domain_data[1];
const uint domain_value = domain_data[2];
+
std::set<std::pair<dolfin::uint, dolfin::uint> >::const_iterator process_data;
for (process_data = processes_data.begin(); process_data != processes_data.end(); ++process_data)
{
@@ -415,7 +428,9 @@
}
// Send/receive data
+ cout << "Send size " << send_data.size() << endl;
MPI::distribute(send_data, destinations);
+ cout << "Received size " << send_data.size() << endl;
assert(send_data.size() == destinations.size());
// Add received data to mesh domain
@@ -424,9 +439,63 @@
const uint local_cell_entity = send_data[i];
const uint local_entity_index = send_data[i + 1];
const uint domain_value = send_data[i + 2];
- assert(local_cell_entity < mesh.num_cells());
markers.set_value(local_cell_entity, local_entity_index, domain_value);
}
+
+
+ /*
+ // Pack data all to send (or testing)
+ std::vector<uint> send_data, destinations;
+ for (uint i = 0; i < ldata.size(); ++i)
+ {
+ const uint global_index = ldata[i][0];
+ const uint local_entity_index = ldata[i][1];
+ const uint domain_value = ldata[i][2];
+
+ // Skip local indices
+ std::vector<uint>::const_iterator it;
+ it = std::find(global_entity_indices.begin(), global_entity_indices.end(), global_index);
+ if (it != global_entity_indices.end())
+ continue;
+
+ for (uint i = 0; i < MPI::num_processes(); ++i)
+ {
+ if (i == MPI::process_number())
+ continue;
+
+ const uint proc = i;
+
+ destinations.push_back(proc);
+ send_data.push_back(global_index);
+
+ destinations.push_back(proc);
+ send_data.push_back(local_entity_index);
+
+ destinations.push_back(proc);
+ send_data.push_back(domain_value);
+ }
+ }
+
+ // Send/receive data
+ cout << "Send size " << send_data.size() << endl;
+ MPI::distribute(send_data, destinations);
+ cout << "Received size " << send_data.size() << endl;
+
+ // Add received data to mesh domain
+ for (uint i = 0; i < send_data.size(); i += 3)
+ {
+ const uint global_cell_entity = send_data[i];
+ std::vector<uint>::const_iterator it;
+ it = std::find(global_entity_indices.begin(), global_entity_indices.end(), global_cell_entity);
+ if (it != global_entity_indices.end())
+ {
+ const uint local_cell_entity = it - global_entity_indices.begin();
+ const uint local_entity_index = send_data[i + 1];
+ const uint domain_value = send_data[i + 2];
+ markers.set_value(local_cell_entity, local_entity_index, domain_value);
+ }
+ }
+ */
}
}
//-----------------------------------------------------------------------------
--- End Message ---