src/tetmesh/paralleltetmesh.cpp

Go to the documentation of this file.
00001 //
00002 // C++ Interface: parallel_tetmesh
00003 //
00004 // Description: 
00005 //
00006 //
00007 // Author: Dag Evensberget <deg.evensberget@psi.ch>, (C) 2005
00008 //
00009 // Copyright: See COPYING file that comes with this distribution
00010 //
00011 //
00012 
00013 #include <set>
00014 #include <stdexcept>
00015 #include "myrlog.h"
00016 #include "paralleltetmesh.h"
00017 
00018 using namespace std;
00019 
00020 namespace mesh {
00021 
00022 ParallelTetMesh::ParallelTetMesh(MPI_Comm comm)
00023     : TetMesh(), _num_global_edges(0) 
00024 {
00025     _comm = comm;   
00026     MPI_Comm_size(_comm, &_mpi_size);
00027     MPI_Comm_rank(_comm, &_mpi_rank);
00028 
00029     _tet_counter = 0;
00030     _point_counter = 0;
00031 }
00032 
00033 ParallelTetMesh::~ParallelTetMesh() {
00034 }
00035         
00036 void ParallelTetMesh::initPoint(int nofPoint) {
00037     _point_gids.clear();
00038     _point_counter = 0;
00039     TetMesh::initPoint(nofPoint);
00040  }   
00041  
00042 void ParallelTetMesh::initTet(int nofTet) {
00043     _tet_counter = 0;    
00044     TetMesh::initTet(nofTet);
00045     
00046 /*    for(int i = 0; i < _point_counter; i ++) 
00047          cerr<<"G->L "<<_point_global_id[i]<<"->"<<_point_local_id[_point_global_id[i]]<<endl;*/
00048    
00049 }
00050 
00051 void ParallelTetMesh::insertPoint(int global_id, double x, double y, double z) {
00052     pair<int,int> p(global_id, _point_counter); 
00053     _point_local_id.insert(p);
00054     //cerr << "Proc " << _mpi_rank << " inserting point g->l " << global_id << "->" << _point_counter << endl;    
00055     _point_gids.push_back(global_id);    
00056     TetMesh::insertPoint(_point_counter, x, y, z);
00057     _point_counter += 1;
00058 }
00059 
00060   void ParallelTetMesh::insertTet(id_t global_id, id_t id0, id_t id1, id_t id2, id_t id3, id_t material){
00061     pair<int,int> p(global_id, _tet_counter);
00062     //Insert with local numbers
00063     TetMesh::insertTet(_tet_counter, _point_local_id[id0], _point_local_id[id1], _point_local_id[id2], _point_local_id[id3], material);
00064     _tet_counter += 1;
00065 }
00066    
00067 id_t ParallelTetMesh::lookupFace(id_t node0, id_t node1, id_t node2) {
00068 //        cerr <<"Proc " << _mpi_rank << " looking up g->l "<< node0<<"->" << _point_local_id[node0]<<" "<< node1<<"->"<< //_point_local_id[node1]<<" "<< node2<<"->"<< _point_local_id[node2]<<endl;
00069     
00070     map<int,int>::iterator iter;
00071     iter = _point_local_id.find(node0);
00072     if (iter == _point_local_id.end())
00073         throw std::runtime_error("Accessed non-existing face (in ParallelTetMesh).");
00074     iter = _point_local_id.find(node1);
00075     if (iter == _point_local_id.end())
00076         throw std::runtime_error("Accessed non-existing face (in ParallelTetMesh).");
00077     iter = _point_local_id.find(node2);
00078     if (iter == _point_local_id.end())
00079         throw std::runtime_error("Accessed non-existing face (in ParallelTetMesh).");
00080         
00081     id_t face = TetMesh::lookupFace(_point_local_id[node0], _point_local_id[node1], _point_local_id[node2]);
00082     return face;
00083 }
00084 
00085 void ParallelTetMesh::fix_edge_orientation() {
00086     for (tet_iterator iter = tet_begin(); iter != tet_end(); ++ iter) {
00087         for (int j = 0; j < 6; j ++) {
00088             // get local edge j
00089             Edge* edge = (*iter).get_edge(j);
00090             rAssert(edge->get_start_id() < edge->get_end_id());
00091             
00092             // test global orientation
00093             if (point_gid(edge->get_start_id()) >= point_gid(edge->get_end_id())) {
00094                 rWarningAll("Global edge orientation error.");
00095                 rAssert(false);
00096                 // FIXME: Should swap edge orientation here
00097             }
00098         }
00099     }
00100 }
00101 
00102 void ParallelTetMesh::finalize_mesh(int nofSym) {
00103     synchronize();
00104     TetMesh::finalize_mesh(nofSym);
00105 }
00106 
00107 void ParallelTetMesh::synchronize() {
00108 
00109     // Assign ownership to nodes (the node ownership info
00110     set_point_ownership();
00111 
00112     // Assign global ids to faces
00113     set_face_gids();
00114     
00115     // Assign global ids to edges
00116     set_edge_gids();
00117     
00118     // Fix edge orientation (must be done consistently across all processors)
00119     fix_edge_orientation();
00120 }
00121 
00122 utility::triple<id_t, id_t, id_t> ParallelTetMesh::get_face_corner_gids(const Face& face) const {
00123     id_t c0 = point_gid(face.get_corner_id(0));
00124     id_t c1 = point_gid(face.get_corner_id(1));
00125     id_t c2 = point_gid(face.get_corner_id(2));
00126     if (c0 > c1)
00127         std::swap<id_t>(c0, c1);
00128     if (c1 > c2)
00129         std::swap<id_t>(c1, c2);
00130     if (c0 > c1)
00131         std::swap<id_t>(c0, c1);                    
00132     return utility::triple<id_t, id_t, id_t>(c0, c1, c2);
00133 }
00134 
00135 std::pair<id_t, id_t> ParallelTetMesh::get_edge_corner_gids(const Edge& edge) const {
00136     id_t c0 = point_gid(edge.get_start_id());
00137     id_t c1 = point_gid(edge.get_end_id());
00138     if (c0 > c1)
00139         std::swap<id_t>(c0, c1);
00140     return std::pair<id_t, id_t>(c0, c1);
00141 }
00142 
00143 void ParallelTetMesh::set_face_gids() {
00144     // Find interprocessor boundary faces
00145     typedef utility::triple<id_t, id_t, id_t> ipb_face_t;
00146     vector<ipb_face_t> bfaces_global_node_ids;
00147     for (face_iterator iter = face_begin(); iter != face_end(); ++iter) {
00148         id_t tet0, tet1;
00149         (*iter).get_tet_ids(&tet0, &tet1);
00150         if (tet1 == ID_NONE) {
00151             if (!(*iter).on_boundary() && (*iter).on_symmetry_planes() == 0) {
00152                 //Face is on iterprocessor boundary.
00153                 bfaces_global_node_ids.push_back(get_face_corner_gids(*iter));
00154             }
00155         }
00156     }
00157     // Assign global ids for ipb faces     
00158     vector<id_t> ipb_face_gids;
00159     vector<int> ipb_faces_counts, ipb_faces_displs;
00160     int num_global_ipb_faces = parallel::assign_gids(bfaces_global_node_ids, 
00161                                                      ipb_face_gids,
00162                                                      ipb_faces_counts,
00163                                                      ipb_faces_displs,
00164                                                      _comm);
00165     
00166     //
00167     // Assign gids for all faces (in order of local face ids)
00168     //
00169     int num_my_nonipb_faces = get_nof_faces() - bfaces_global_node_ids.size();
00170     std::vector<int> counts, displs;
00171     int num_global_nonipb_faces = \
00172         parallel::set_counts_displs(num_my_nonipb_faces, counts, displs, _comm);
00173     int non_ipb_face_counter = 0;
00174     for (face_iterator iter = face_begin(); iter != face_end(); ++ iter) {
00175         id_t tet0, tet1;
00176         (*iter).get_tet_ids(&tet0, &tet1);
00177         if ((tet1 == ID_NONE) &&
00178             (!(*iter).on_boundary() && (*iter).on_symmetry_planes() == 0)) {
00179             //
00180             // Face is on iterprocessor boundary.
00181             //
00182             pair<vector<ipb_face_t>::iterator, vector<ipb_face_t>::iterator> result = \
00183                 equal_range(bfaces_global_node_ids.begin(), 
00184                             bfaces_global_node_ids.end(), 
00185                             get_face_corner_gids(*iter));
00186             assert(result.second - result.first == 1); // Want to find exactly one element.
00187 
00188             // Assign corresponding ipb gid            
00189             _face_gids.push_back(ipb_face_gids[result.first - bfaces_global_node_ids.begin()]);
00190         } else {
00191             //
00192             // Inner face or face on geom boundary or symmetry plane
00193             //
00194             _face_gids.push_back(non_ipb_face_counter + num_global_ipb_faces + displs[_mpi_rank]);
00195             non_ipb_face_counter += 1;
00196         }
00197     }
00198     assert(non_ipb_face_counter == num_my_nonipb_faces);
00199     assert(_face_gids.size() == get_nof_faces());
00200     _num_global_faces = num_global_ipb_faces + num_global_nonipb_faces;
00201     rInfoAll("Number of faces: (owned: %d, local: %d, global: %d)", 
00202              ipb_faces_counts[_mpi_rank]+num_my_nonipb_faces, get_nof_faces(), _num_global_faces);
00203     rInfoAll("Owned faces: ipb:[%d:%d), non-ipb:[%d:%d)",
00204              ipb_faces_displs[_mpi_rank], 
00205              ipb_faces_displs[_mpi_rank] + ipb_faces_counts[_mpi_rank],
00206              num_global_ipb_faces + displs[_mpi_rank], 
00207              num_global_ipb_faces + displs[_mpi_rank] + counts[_mpi_rank]);
00208 
00209     _my_faces[0] = ipb_faces_displs[_mpi_rank];
00210     _my_faces[1] = ipb_faces_displs[_mpi_rank] + ipb_faces_counts[_mpi_rank];
00211     _my_faces[2] = num_global_ipb_faces + displs[_mpi_rank];
00212     _my_faces[3] = num_global_ipb_faces + displs[_mpi_rank] + counts[_mpi_rank];
00213     
00214     // debug
00215 #if 0
00216     vector<id_t> eliminated_gids;
00217     for (face_iterator iter = face_begin(); iter != face_end(); ++ iter) {
00218         if ((*iter).on_boundary())
00219             eliminated_gids.push_back(face_gid((*iter).get_id()));
00220     }
00221     std::map<id_t,id_t> mgid_map;
00222     id_t num_global_mgids;
00223     parallel::compute_mgid_map(num_global_faces,
00224                                _face_gids,
00225                                eliminated_gids,
00226                                &num_global_mgids,
00227                                mgid_map,
00228                                _comm);
00229     rInfoAll("num_global_mgids=%d", num_global_mgids);
00230 #endif
00231 }
00232 
00233 void ParallelTetMesh::set_edge_gids() {
00234     // Find interprocessor boundary edge
00235     typedef std::pair<id_t, id_t> ipb_edge_t;    
00236     vector<ipb_edge_t> bedges_global_node_ids;
00237     set<id_t> edges_ipb;
00238     for (face_iterator iter = face_begin(); iter != face_end(); ++iter) {
00239         id_t tet0, tet1;
00240         (*iter).get_tet_ids(&tet0, &tet1);
00241         if (tet1 == ID_NONE) {
00242             if (!(*iter).on_boundary() && (*iter).on_symmetry_planes() == 0) {
00243                 /*
00244                  * Face is on iterprocessor boundary. Check if edge has already been added.
00245                  * If not, add to bedges_global_node_ids.
00246                  */
00247                 for (int i = 0; i < 3; i++) {                    
00248                     Edge* e = (*iter).get_edge(i);               
00249                     if (edges_ipb.find(e->get_id()) == edges_ipb.end()) {
00250                         bedges_global_node_ids.push_back(get_edge_corner_gids(*e));
00251 #if 1
00252                     ipb_edge_t t = get_edge_corner_gids(*e);
00253                     if (t.first == 376 && t.second == 589) {
00254                         rWarningAll("edge (376, 589) on ipb");
00255                     }
00256 #endif
00257                         edges_ipb.insert(e->get_id());
00258                     } else {
00259                         // Edge has already been added.
00260                     }
00261                 }                
00262             }
00263         }
00264     }
00265     
00266     // For each ipb edge decide whether this processor should claim it and store in boolean array
00267     bool* edges_claim = new bool[bedges_global_node_ids.size()];
00268     for (size_t i = 0; i < bedges_global_node_ids.size(); ++ i)
00269     {
00270         // Only claim edge if at least one of its corners are owned by this processor.
00271         // This guarantees a ML-compatible matrix distribution.
00272         ipb_edge_t* it = &(bedges_global_node_ids[i]);
00273         edges_claim[i] = is_owned_point((*it).first) || is_owned_point((*it).second);
00274     }
00275         
00276     rDebugAll("bedges_global_node_ids.size() = %d", 
00277               static_cast<int>(bedges_global_node_ids.size()));
00278     
00279     // Assign global ids for ipb edges.
00280     vector<id_t> ipb_edge_gids;
00281     vector<int> ipb_edges_counts, ipb_edges_displs;
00282     
00283     /* FIXME: assign_gids2 does not work in all cases. If we are unlucky a processor gets to own
00284      *        both points of an edge but does not actually store that edge locally. In this 
00285      *        assign_gids is unable to find a proper owner for that edge and terminates with
00286      *        a failed assertion.
00287      * 
00288      *        It is not clear whether a proper distribution of edges and points satisfying the
00289      *        ML constraint exists in all cases.
00290      * 
00291      *        One solution would be to allow a processor to own an edge even if it does not
00292      *        store it locally. Some portions of the mesh code may need to be adapted to
00293      *        implement this.
00294      * 
00295      *        For the moment the old assign_gids is used to determine ownership of the edges.
00296      *        The code will thus not generate ML compatible distributions. However it will 
00297      *        generate a distribution which should work in all cases if Maxwell ML is not used.
00298      */
00299     //int num_global_ipb_edges = parallel::assign_gids2(edges_claim, bedges_global_node_ids, ipb_edge_gids, ipb_edges_counts, ipb_edges_displs, _comm);
00300     int num_global_ipb_edges = parallel::assign_gids(bedges_global_node_ids, ipb_edge_gids, ipb_edges_counts, ipb_edges_displs, _comm);
00301     
00302     delete edges_claim;
00303     
00304     // debugging
00305 #if 0
00306     for (int i = 0; i < bedges_global_node_ids.size(); ++ i)
00307         rDebugAll("ipb_edge_gids[%d] = %d", i, ipb_edge_gids[i]);
00308 #endif
00309     
00310     //
00311     // Assign global ID's for all edges. In local edge order.
00312     // 
00313     int num_my_nonipb_edges = get_nof_edges() - bedges_global_node_ids.size();
00314     std::vector<int> counts, displs;
00315     int num_global_nonipb_edges = \
00316         parallel::set_counts_displs(num_my_nonipb_edges, counts, displs, _comm);
00317     int non_ipb_edge_counter = 0;
00318     
00319     for (edge_iterator it = edge_begin(); it != edge_end(); ++it) {
00320         if (edges_ipb.find((*it).get_id()) != edges_ipb.end()) {
00321             //
00322             // Edge is on interprocessor boundary            
00323             // 
00324             pair<vector<ipb_edge_t>::iterator, vector<ipb_edge_t>::iterator> result = \
00325                 equal_range(bedges_global_node_ids.begin(), bedges_global_node_ids.end(), get_edge_corner_gids(*it));
00326             assert (result.second - result.first == 1);
00327             // Assign corresponding ipb gid.
00328             _edge_gids.push_back(ipb_edge_gids[result.first - bedges_global_node_ids.begin()]);
00329         } else {
00330             //
00331             // Inner edge or bondary edge on geometrical boundary or symmetry plane.
00332             //
00333             _edge_gids.push_back(non_ipb_edge_counter + num_global_ipb_edges + displs[_mpi_rank]);
00334             non_ipb_edge_counter++;
00335         }
00336     }
00337     
00338     assert(non_ipb_edge_counter == num_my_nonipb_edges);
00339     assert(_edge_gids.size() == get_nof_edges());
00340     _num_global_edges = num_global_ipb_edges + num_global_nonipb_edges;
00341     rInfoAll("Number of edges: (owned: %d, local: %d, global: %d)", 
00342              ipb_edges_counts[_mpi_rank]+num_my_nonipb_edges, get_nof_edges(), _num_global_edges);
00343     rInfoAll("Owned edges: ipb:[%d:%d), non-ipb:[%d:%d)",
00344              ipb_edges_displs[_mpi_rank], 
00345              ipb_edges_displs[_mpi_rank] + ipb_edges_counts[_mpi_rank],
00346              num_global_ipb_edges + displs[_mpi_rank], 
00347              num_global_ipb_edges + displs[_mpi_rank] + counts[_mpi_rank]);
00348     _my_edges[0] = ipb_edges_displs[_mpi_rank];
00349     _my_edges[1] = ipb_edges_displs[_mpi_rank] + ipb_edges_counts[_mpi_rank];
00350     _my_edges[2] = num_global_ipb_edges + displs[_mpi_rank];
00351     _my_edges[3] = num_global_ipb_edges + displs[_mpi_rank] + counts[_mpi_rank];
00352    
00353     
00354 #if 0             
00355     // debug
00356     vector<id_t> eliminated_gids;
00357     for (edge_iterator iter = edge_begin(); iter != edge_end(); ++iter) {
00358         if ((*iter).on_boundary())
00359             eliminated_gids.push_back(edge_gid((*iter).get_id()));
00360     }
00361     std::map<id_t,id_t> mgid_map;
00362     id_t num_global_mgids;
00363     parallel::compute_mgid_map(_num_global_edges,
00364                                _edge_gids,
00365                                eliminated_gids,
00366                                &num_global_mgids,
00367                                mgid_map,
00368                                _comm);
00369     rInfoAll("num_global_mgids=%d", num_global_mgids);
00370 #endif    
00371 }
00372 
00373 void ParallelTetMesh::set_point_ownership() {
00374     int mpi_rank;
00375     MPI_Comm_rank(_comm, &mpi_rank);
00376 
00377     //
00378     // Determine ipb points
00379     //
00380     set<id_t> ipb_point_gids_set;
00381     // Loop over ipb faces
00382     for (face_iterator iter = face_begin(); iter != face_end(); ++ iter) {
00383         id_t tet0, tet1;
00384         (*iter).get_tet_ids(&tet0, &tet1);
00385         if ((tet1 == ID_NONE) &&
00386             (!(*iter).on_boundary() && (*iter).on_symmetry_planes() == 0)) {
00387             //
00388             // Face is on iterprocessor boundary.
00389             //
00390             id_t c0 = point_gid((*iter).get_corner_id(0));
00391             id_t c1 = point_gid((*iter).get_corner_id(1));
00392             id_t c2 = point_gid((*iter).get_corner_id(2));
00393             ipb_point_gids_set.insert(c0);
00394             ipb_point_gids_set.insert(c1);
00395             ipb_point_gids_set.insert(c2);
00396         }
00397     }    
00398     vector<id_t> ipb_point_gids;
00399     // Store unique point gids in ascending order
00400     for (set<id_t>::iterator it = ipb_point_gids_set.begin();
00401          it != ipb_point_gids_set.end();
00402          ++ it)
00403     {
00404         ipb_point_gids.push_back(*it);
00405     }
00406     ipb_point_gids_set.clear();
00407     
00408     // Set ownership of ipc points
00409     vector<int> ipb_point_owners;
00410     int num_ipb_points_owned = parallel::set_ownership(ipb_point_gids, ipb_point_owners, _comm);
00411     
00412     // Set ownership of all locally stored points
00413     int num_points_owned = num_ipb_points_owned;    
00414     _point_owned_by_me.resize(get_nof_points());
00415     for (point_iterator iter = point_begin(); iter != point_end(); ++ iter) {
00416         id_t lid = (*iter).get_id();
00417         id_t gid = point_gid(lid);
00418         vector<id_t>::iterator found = \
00419             lower_bound(ipb_point_gids.begin(), ipb_point_gids.end(), gid);
00420         if (found == ipb_point_gids.end() || *found != gid) {
00421             // Not on ipb
00422             _point_owned_by_me[lid] = true;
00423             num_points_owned += 1;
00424         } else {
00425             // On ipb
00426             if (ipb_point_owners[found - ipb_point_gids.begin()] == mpi_rank)
00427                 _point_owned_by_me[lid] = true;
00428             else
00429                 _point_owned_by_me[lid] = false;
00430         }
00431     }
00432     
00433     //Set nof points globally    
00434     MPI_Allreduce (&num_points_owned, &_num_global_points, 1, MPI_INT, MPI_SUM, _comm);
00435     
00436     rDebugAll("Owned points: ipb:%d, total:%d.", num_ipb_points_owned, num_points_owned);
00437     rDebug("Number of points globally: %d", _num_global_points);
00438     
00439     // debugging
00440 #if 0
00441     for (point_iterator iter = point_begin(); iter != point_end(); ++ iter) {
00442         id_t lid = (*iter).get_id();
00443         if (_point_owned_by_me[lid])
00444             rDebugAll("own point %d", point_gid(lid));
00445     }
00446 #endif    
00447 }
00448 
00449 id_t ParallelTetMesh::face_gid(id_t lid) const {
00450     return _face_gids[lid];
00451 }
00452 
00453 id_t ParallelTetMesh::edge_gid(id_t lid) const {
00454     return _edge_gids[lid];
00455 }
00456 
00457 id_t ParallelTetMesh::point_gid(id_t lid) const {
00458     return _point_gids[lid];
00459 }
00460 
00463 id_t ParallelTetMesh::get_num_global_tets()
00464 {
00465         int i,j,k; 
00466         id_t ntet; 
00467         id_t gntet; 
00471         ntet=get_nof_tets();
00472         MPI_Allreduce(&ntet,&gntet,1,MPI_UNSIGNED,MPI_SUM,_comm);
00473 
00474         /* go back */
00475         return(gntet);
00476 }
00477 
00478 
00479 
00480 
00481 bool ParallelTetMesh::is_owned_point(id_t gid) const {
00482     map<int,int>::const_iterator it = _point_local_id.find(gid);
00483     if (it == _point_local_id.end())
00484         return false;
00485     else
00486         return _point_owned_by_me[(*it).second];
00487 }
00488 
00489 bool ParallelTetMesh::is_owned_edge(id_t gid) const {
00490     return (_my_edges[0] <= gid && gid < _my_edges[1]) \
00491         || (_my_edges[2] <= gid && gid < _my_edges[3]);
00492 }
00493 
00494 bool ParallelTetMesh::is_owned_face(id_t gid) const {
00495     return (_my_faces[0] <= gid && gid < _my_faces[1]) \
00496         || (_my_faces[2] <= gid && gid < _my_faces[3]);
00497 }
00498 
00499 MPI_Comm ParallelTetMesh::get_comm() {
00500     return _comm;
00501 }
00502 
00503 } // namespace mesh

Generated on Fri Oct 26 13:35:13 2007 for FEMAXX (Finite Element Maxwell Eigensolver) by  doxygen 1.4.7