parallel Namespace Reference


Classes

class  Mpi_DType
class  Mpi_DType< int >
class  Mpi_DType< unsigned int >
class  Mpi_DType< unsigned long >

Functions

template<>
int determine_proc_of_this_element< std::pair< id_t, id_t > > (std::pair< id_t, id_t > nodes, std::vector< int > &procs)
template<>
int determine_proc_of_this_element< utility::triple< id_t, id_t, id_t > > (utility::triple< id_t, id_t, id_t > nodes, std::vector< int > &procs)
template<>
int determine_proc_of_this_element< id_t > (id_t node, std::vector< int > &procs)
std::ostream & operator<< (std::ostream &ostr, const utility::triple< id_t, id_t, id_t > &x)
std::ostream & operator<< (std::ostream &ostr, const std::pair< id_t, id_t > &x)
int set_ownership (const std::vector< id_t > &gids, std::vector< int > &owned_by, MPI_Comm comm)
void compute_mgid_map (id_t num_global_gids, const std::vector< id_t > &gids, std::vector< id_t > &eliminated_gids, id_t *num_global_mgids, std::map< id_t, id_t > &mgid_map, MPI_Comm comm)
int set_counts_displs (int my_count, std::vector< int > &counts, std::vector< int > &displs, MPI_Comm comm)
template<typename T>
int determine_proc_of_this_element (T nodes, std::vector< int > &procs)
template<typename T>
int assign_gids (std::vector< T > &elements, std::vector< id_t > &gids, std::vector< int > &elem_counts, std::vector< int > &elem_displs, MPI_Comm comm)
template<typename T>
int assign_gids2 (bool *my_claims, std::vector< T > &elements, std::vector< id_t > &gids, std::vector< int > &elem_counts, std::vector< int > &elem_displs, MPI_Comm comm)
void remove_gids (std::vector< id_t > gids, std::vector< id_t > eliminated_gids, std::vector< id_t > map_elimated)


Function Documentation

template<>
int parallel::determine_proc_of_this_element< std::pair< id_t, id_t > > ( std::pair< id_t, id_t nodes,
std::vector< int > &  procs 
)

template<>
int parallel::determine_proc_of_this_element< utility::triple< id_t, id_t, id_t > > ( utility::triple< id_t, id_t, id_t nodes,
std::vector< int > &  procs 
)

template<>
int parallel::determine_proc_of_this_element< id_t > ( id_t  node,
std::vector< int > &  procs 
)

std::ostream& parallel::operator<< ( std::ostream &  ostr,
const utility::triple< id_t, id_t, id_t > &  x 
)

Definition at line 34 of file parallel_tools.cpp.

References x.

std::ostream& parallel::operator<< ( std::ostream &  ostr,
const std::pair< id_t, id_t > &  x 
)

Definition at line 42 of file parallel_tools.cpp.

References x.

int parallel::set_ownership ( const std::vector< id_t > &  gids,
std::vector< int > &  owned_by,
MPI_Comm  comm 
)

Determine ownership of global ids.

The owner of a global id is chosen randomly among the egligble processors.

Must be called collectively.

Parameters:
[in] gids Vector of gids which could potentially be owned by the calling processor.
[out] owned_by Vector storing id of owning processor for each gid in gids.
Returns:
Number of locally owned gids.

Definition at line 49 of file parallel_tools.cpp.

References determine_proc_of_this_element(), and set_counts_displs().

Referenced by mesh::ParallelTetMesh::set_point_ownership().

Here is the call graph for this function:

void parallel::compute_mgid_map ( id_t  num_global_gids,
const std::vector< id_t > &  gids,
std::vector< id_t > &  eliminated_gids,
id_t num_global_mgids,
std::map< id_t, id_t > &  mgid_map,
MPI_Comm  comm 
)

Compute mapping of original gids to effective gids after elimination of some gids.

This funtion can be used to compute mapping for boundary conditions.

Must be called collectively.

Parameters:
[in] num_global_gids Number of global gids.
[in] gids Vector of gids used on the local processor (owned by local or remote processors)
[in] eliminated_gids Vector of gids which need to be eliminated.
[out] num_global_mgids Number of global mapped ids (mgids) on output.
[out] mgid_map Map that transforms all locally used gids to mgids, mgid = mgid_map[gid], on output.
[in] comm MPI communicator.
Author:
Roman Geus

Definition at line 101 of file parallel_tools.cpp.

References set_counts_displs().

Referenced by mesh::ParallelTetMesh::set_edge_gids(), mesh::ParallelTetMesh::set_face_gids(), NedelecMesh::set_map(), and LagrangeMesh::set_map().

Here is the call graph for this function:

int parallel::set_counts_displs ( int  my_count,
std::vector< int > &  counts,
std::vector< int > &  displs,
MPI_Comm  comm 
)

Compute counts and displs vectors from local count.

Must be called collectively.

Returns:
Sum of all counts.

Definition at line 152 of file parallel_tools.cpp.

Referenced by assign_gids(), assign_gids2(), compute_mgid_map(), NedelecMesh::generate_sorted_AM(), mesh::ParallelTetMesh::set_edge_gids(), mesh::ParallelTetMesh::set_face_gids(), and set_ownership().

template<typename T>
int parallel::determine_proc_of_this_element ( nodes,
std::vector< int > &  procs 
)

Referenced by set_ownership().

template<typename T>
int parallel::assign_gids ( std::vector< T > &  elements,
std::vector< id_t > &  gids,
std::vector< int > &  elem_counts,
std::vector< int > &  elem_displs,
MPI_Comm  comm 
)

Assign global ids to "elements" (which are possibly shared by processors) and determine ownership of "elements".

Parameters:
[in,out] elements Vector of elements (like faces and edges). The vector will be sorted in-place. Each element is assumed to be unique.
[out] gids Global ids of elements. The order of the gids corresponds to the rearranged ordering of elements.
[out] elem_counts Number of elements owned by each processor.
[out] elem_displs Offsets to the block owned by each processor.
[in] comm MPI Communicator.
Returns:
Number of global ids.
T needs to support comparison for equality and order.

Author:
Dag Evensberget

Vector the size of recvbuf telling which processor owns a particular element

Definition at line 76 of file parallel_tools.h.

References mesh::ID_NONE, rDebugAll, and set_counts_displs().

Referenced by mesh::ParallelTetMesh::set_edge_gids(), and mesh::ParallelTetMesh::set_face_gids().

Here is the call graph for this function:

template<typename T>
int parallel::assign_gids2 ( bool *  my_claims,
std::vector< T > &  elements,
std::vector< id_t > &  gids,
std::vector< int > &  elem_counts,
std::vector< int > &  elem_displs,
MPI_Comm  comm 
)

Assign global ids to "elements" (which are possibly shared by processors) and determine ownership of "elements".

Parameters:
[in] my_claims Boolean vector, ...
[in,out] elements Vector of elements (like faces and edges). The vector will be sorted in-place. Each element is assumed to be unique.
[out] gids Global ids of elements. The order of the gids corresponds to the rearranged ordering of elements.
[out] elem_counts Number of elements owned by each processor.
[out] elem_displs Offsets to the block owned by each processor.
[in] comm MPI Communicator.
Returns:
Number of global ids.
T needs to support comparison for equality and order.

Author:
Dag Evensberget

Vector the size of recvbuf telling which processor owns a particular element

Definition at line 258 of file parallel_tools.h.

References mesh::ID_NONE, rDebugAll, rErrorAll, and set_counts_displs().

Here is the call graph for this function:

void parallel::remove_gids ( std::vector< id_t gids,
std::vector< id_t eliminated_gids,
std::vector< id_t map_elimated 
)


Generated on Fri Oct 26 13:35:18 2007 for FEMAXX (Finite Element Maxwell Eigensolver) by  doxygen 1.4.7