src/utility/parallel_tools.cpp

Go to the documentation of this file.
00001 #include <numeric>
00002 #include "parallel_tools.h"
00003 
00004 namespace parallel {
00005 
00006 const MPI_Datatype Mpi_DType<int>::mpi_dtype(MPI_INT);
00007 const MPI_Datatype Mpi_DType<unsigned int>::mpi_dtype(MPI_UNSIGNED);
00008 const MPI_Datatype Mpi_DType<unsigned long>::mpi_dtype(MPI_UNSIGNED_LONG);
00009 
00010 template<>
00011 int determine_proc_of_this_element<std::pair<id_t, id_t> >(std::pair<id_t, id_t> nodes, std::vector<int>& procs) {
00012     std::sort(procs.begin(), procs.end());
00013     id_t target = (nodes.first + nodes.second) % procs.size();
00014     assert (target <= procs.size());
00015     return procs[target];
00016 }
00017 
00018 template<>
00019 int determine_proc_of_this_element<utility::triple<id_t, id_t, id_t> >(utility::triple<id_t, id_t, id_t> nodes, std::vector<int>& procs) {
00020     std::sort(procs.begin(), procs.end());
00021     id_t target = (nodes.first + nodes.second + nodes.third) % procs.size();
00022     assert (target <= procs.size());
00023     return procs[target];
00024 }
00025 
00026 template<>
00027 int determine_proc_of_this_element<id_t>(id_t node, std::vector<int>& procs) {
00028     std::sort(procs.begin(), procs.end());
00029     id_t target = node % procs.size();
00030     assert (target <= procs.size());
00031     return procs[target];
00032 }
00033 
00034 std::ostream& operator << (std::ostream& ostr, const utility::triple<id_t, id_t, id_t>& x) {
00035     ostr << "Triple (face) (";
00036     ostr << x.first << ", ";
00037     ostr << x.second << ", ";
00038     ostr << x.third << ")";
00039     return ostr;
00040 }
00041 
00042 std::ostream& operator << (std::ostream& ostr, const std::pair<id_t, id_t>& x) {
00043     ostr << "Pair (edge) ("; 
00044     ostr << x.first << ", ";
00045     ostr << x.second << ")";
00046     return ostr;
00047 }
00048 
00049 int set_ownership(const std::vector<id_t>& gids, std::vector<int>& owned_by, MPI_Comm comm) {
00050     int mpi_rank, mpi_size; 
00051     MPI_Comm_rank(comm, &mpi_rank);      
00052     MPI_Comm_size(comm, &mpi_size);
00053 
00054     // Gather gids from all processors
00055     std::vector<int> recv_counts, recv_displs;
00056     unsigned int recv_size = set_counts_displs(gids.size(), recv_counts, recv_displs, comm);                     
00057     std::vector<id_t> recvbuf(recv_size);
00058     
00059     int* recvcounts_bytes = new int[mpi_size];
00060     for (int i = 0; i < mpi_size; i ++)
00061         recvcounts_bytes[i] = recv_counts[i] * sizeof(id_t);   
00062     
00063     int* displs_bytes = new int[mpi_size];
00064     displs_bytes[0] = 0;
00065     for (int i = 1; i < mpi_size; i++)
00066         displs_bytes[i] = displs_bytes[i-1] + recvcounts_bytes[i-1];            
00067     
00068     MPI_Allgatherv((void *)&gids[0], gids.size()*sizeof(id_t), MPI_BYTE, 
00069                    &recvbuf[0], recvcounts_bytes, displs_bytes, MPI_BYTE, comm);
00070     delete[] recvcounts_bytes;
00071     delete[] displs_bytes;
00072     
00073     // Create multimap key:gid, value:ids of processors claiming gid
00074     std::multimap<id_t,int> procs_claiming_gid;
00075     std::vector<id_t>::iterator current_elem = recvbuf.begin();
00076     for (int proc_id = 0; proc_id < mpi_size; ++ proc_id) {
00077         for (int elem_lid = 0; elem_lid < recv_counts[proc_id]; ++ elem_lid) {
00078             std::pair<id_t, int> p(*current_elem, proc_id);
00079             procs_claiming_gid.insert(p);
00080             ++ current_elem;
00081         }
00082     }
00083     
00084     // Determine owning processors for all gids used locally
00085     int num_owned = 0;
00086     owned_by.clear();
00087     for (std::vector<id_t>::const_iterator it = gids.begin(); it != gids.end(); ++ it) {
00088         std::vector<int> claimers;
00089         std::pair<std::multimap<id_t,int>::iterator, std::multimap<id_t,int>::iterator> range = procs_claiming_gid.equal_range(*it);
00090         for (std::multimap<id_t,int>::iterator j = range.first; j != range.second; ++j)
00091             claimers.push_back((*j).second);
00092         int owner = determine_proc_of_this_element(*it, claimers);
00093         owned_by.push_back(owner);
00094         if (owner == mpi_rank)
00095             num_owned += 1;
00096     }
00097     
00098     return num_owned;
00099 }
00100 
00101 void compute_mgid_map(id_t num_global_gids,
00102                       const std::vector<id_t>& gids,
00103                       std::vector<id_t>& eliminated_gids,
00104                       id_t* num_global_mgids,
00105                       std::map<id_t,id_t>& mgid_map,
00106                       MPI_Comm comm)
00107 {
00108     int mpi_size;
00109     MPI_Comm_size(comm, &mpi_size);
00110 
00111     // Allgatherv eliminated_gids
00112     int num_my_eliminated_gids = eliminated_gids.size();
00113     std::vector<int> recvcnts(mpi_size);
00114     std::vector<int> displs(mpi_size);
00115     int sum_eliminated_gids = set_counts_displs(num_my_eliminated_gids, recvcnts, displs, comm);
00116     std::vector<id_t> eliminated_gid_recv(sum_eliminated_gids);
00117     MPI_Allgatherv(&eliminated_gids[0], num_my_eliminated_gids, MPI_INT,
00118                    &eliminated_gid_recv[0], &recvcnts[0], &displs[0], MPI_INT, comm);
00119 #if 0                   
00120     for (int i = 0; i < sum_eliminated_gids; ++ i)
00121         rDebug("eliminated_gid_recv[%02d]        = %2d", i, eliminated_gid_recv[i]);
00122 #endif
00123     
00124     // Sort, unique
00125     std::sort(eliminated_gid_recv.begin(), eliminated_gid_recv.end());
00126 #if 0                   
00127     for (int i = 0; i < sum_eliminated_gids; ++ i)
00128         rDebug("eliminated_gid_recv_sort[%02d]   = %2d", i, eliminated_gid_recv[i]);
00129 #endif
00130     std::vector<id_t>::iterator new_last;
00131     new_last = std::unique(eliminated_gid_recv.begin(), eliminated_gid_recv.end());
00132 #if 0                   
00133     for (int i = 0; i < new_last - eliminated_gid_recv.begin(); ++ i)
00134         rDebug("eliminated_gid_recv_unique[%02d] = %2d", i, eliminated_gid_recv[i]);
00135 #endif
00136 
00137     // Compute processor-specific mgid map containing only gids required by the processor
00138     mgid_map.clear();
00139     std::vector<id_t>::iterator insert_pos;
00140     for (std::vector<id_t>::const_iterator gid_it=gids.begin(); gid_it != gids.end(); ++ gid_it) {
00141         id_t gid = *gid_it;
00142         insert_pos = std::lower_bound(eliminated_gid_recv.begin(), new_last, gid);
00143         if (insert_pos == new_last || *insert_pos != *gid_it) {
00144             // gid not eliminated
00145             // FIXME: this should also work if insert_pos == new_last
00146             mgid_map[gid] = gid - (insert_pos - eliminated_gid_recv.begin());
00147         }
00148     }
00149     *num_global_mgids = num_global_gids - (new_last - eliminated_gid_recv.begin());
00150 }
00151 
00152 int set_counts_displs(int my_count, 
00153                       std::vector<int>& counts, 
00154                       std::vector<int>& displs, 
00155                       MPI_Comm comm) 
00156 {
00157     int mpi_size; 
00158     MPI_Comm_size(comm, &mpi_size);
00159     counts.resize(mpi_size);
00160     displs.resize(mpi_size);
00161     MPI_Allgather(&my_count, 1, MPI_INT, &counts[0], 1, MPI_INT, comm);
00162     
00163     int size = 0;
00164     for (int i = 0; i < mpi_size; ++ i)
00165         size += counts[i];    
00166     displs[0] = 0;
00167     for (int i = 1; i < mpi_size; ++ i)
00168         displs[i] = displs[i-1] + counts[i-1];
00169     return size;
00170 }
00171 
00172 } // namespace parallel

Generated on Fri Oct 26 13:35:13 2007 for FEMAXX (Finite Element Maxwell Eigensolver) by  doxygen 1.4.7