Wrapper class around an MPI_Comm object. More...
#include <MPIWrapper.hpp>
Public Member Functions | |
MPIComm () | |
MPIComm (MPI_Comm c) | |
MPIComm (const MPIComm &c) | |
MPIComm (MPIComm &&c) noexcept | |
virtual | ~MPIComm () |
MPIComm & | operator= (const MPIComm &c) |
MPIComm & | operator= (MPIComm &&c) noexcept |
MPI_Comm | comm () const |
bool | is_null () const |
int | rank () const |
int | size () const |
bool | is_root () const |
void | barrier () const |
template<typename T > | |
void | broadcast (std::vector< T > &sbuf) const |
template<typename T > | |
void | broadcast_from (std::vector< T > &sbuf, int src) const |
template<typename T , std::size_t N> | |
void | broadcast (std::array< T, N > &sbuf) const |
template<typename T > | |
void | broadcast (T &data) const |
template<typename T > | |
void | broadcast_from (T &data, int src) const |
template<typename T > | |
void | broadcast (T *sbuf, std::size_t ssize) const |
template<typename T > | |
void | broadcast_from (T *sbuf, std::size_t ssize, int src) const |
template<typename T > | |
void | all_gather (T *buf, std::size_t rsize) const |
void | all_gather (std::pair< long int, long int > *buf, std::size_t rsize) const |
void | all_gather (std::pair< long long int, long long int > *buf, std::size_t rsize) const |
template<typename T > | |
void | all_gather_v (T *buf, const int *rcnts, const int *displs) const |
template<typename T > | |
void | gather (T *sbuf, int ssize, int *rbuf, int rsize, int root) const |
template<typename T > | |
void | gather_v (T *sbuf, int scnts, T *rbuf, const int *rcnts, const int *displs, int root) const |
template<typename T > | |
MPIRequest | isend (const std::vector< T > &sbuf, int dest, int tag) const |
template<typename T > | |
void | isend (const std::vector< T > &sbuf, int dest, int tag, MPI_Request *req) const |
template<typename T > | |
void | isend (const T *sbuf, std::size_t ssize, int dest, int tag, MPI_Request *req) const |
template<typename T > | |
void | send (const T *sbuf, std::size_t ssize, int dest, int tag) const |
template<typename T > | |
void | isend (const T &buf, int dest, int tag, MPI_Request *req) const |
template<typename T > | |
void | send (const std::vector< T > &sbuf, int dest, int tag) const |
template<typename T > | |
std::vector< T > | recv (int src, int tag) const |
template<typename T > | |
std::pair< int, std::vector< T > > | recv_any_src (int tag) const |
template<typename T > | |
T | recv_one (int src, int tag) const |
template<typename T > | |
void | irecv (const T *rbuf, std::size_t rsize, int src, int tag, MPI_Request *req) const |
template<typename T > | |
void | recv (const T *rbuf, std::size_t rsize, int src, int tag) const |
template<typename T > | |
T | all_reduce (T t, MPI_Op op) const |
template<typename T > | |
T | reduce (T t, MPI_Op op) const |
template<typename T > | |
void | all_reduce (T *t, int ssize, MPI_Op op) const |
template<typename T > | |
void | all_reduce (std::vector< T > &t, MPI_Op op) const |
template<typename T > | |
void | reduce (T *t, int ssize, MPI_Op op, int dest=0) const |
template<typename T > | |
void | all_to_all (const T *sbuf, int scnt, T *rbuf) const |
template<typename T , typename A = std::allocator<T>> | |
std::vector< T, A > | all_to_allv (const T *sbuf, int *scnts, int *sdispls, int *rcnts, int *rdispls) const |
template<typename T > | |
void | all_to_allv (const T *sbuf, int *scnts, int *sdispls, T *rbuf, int *rcnts, int *rdispls) const |
template<typename T , typename A = std::allocator<T>> | |
void | all_to_all_v (std::vector< std::vector< T > > &sbuf, std::vector< T, A > &rbuf, std::vector< T * > &pbuf) const |
template<typename T , typename A = std::allocator<T>> | |
std::vector< T, A > | all_to_all_v (std::vector< std::vector< T > > &sbuf) const |
template<typename T , typename A = std::allocator<T>> | |
void | all_to_all_v (std::vector< std::vector< T > > &sbuf, std::vector< T, A > &rbuf, std::vector< T * > &pbuf, const MPI_Datatype Ttype) const |
MPIComm | sub (int P0, int P, int stride=1) const |
MPIComm | sub_self (int p) const |
Static Public Member Functions | |
static void | control_start (const std::string &name) |
static void | control_stop (const std::string &name) |
static bool | initialized () |
Wrapper class around an MPI_Comm object.
This is a simple wrapper around a MPI_Comm object. The main reason for this class is to simplify resource management of the MPI_Comm object. An object of class MPIComm owns the MPI_Comm that it stores, and is responsible for freeing it (in the destructor).
A number of simple wrappers around basic MPI calls are provided.
|
inline |
Default constructor. This will initialize the encapsulated MPI_Comm to MPI_COMM_WORLD.
|
inline |
Constructor using a MPI_Comm. This will DUPLICATE the input communicator c!
c | the input MPI communicator, this will be duplicated internally and can thus be freed immediately |
|
inline |
Copy constructor. Will DUPLICATE the underlying MPI_Comm object!
c | will be copied, not changed |
|
inlinenoexcept |
Move constructor.
c | will be moved from, will be reset to MPI_COMM_NULL |
|
inlinevirtual |
Virtual destructor. Free the MPI_Comm object, unless it is MPI_COMM_NULL or MPI_COMM_WORLD.
|
inline |
Compute the reduction of op(t[]_i) over all processes i, t[] is an array, and where op can be any MPI_Op, on all processes. This routine is collective on all ranks in this communicator. See documentation for MPI_Allreduce. Every element of the array will be reduced over the different processes. The operation is performed in-place.
T | type of variables to reduce, should have a corresponding mpi_type<T>() implementation |
t | pointer to array of variables to reduce |
ssize | size of array to reduce |
op | reduction operator |
Compute the reduction of op(t_i) over all processes i, where op can be any MPI_Op, on all processes. This routine is collective on all ranks in this communicator. See documentation for MPI_Allreduce.
T | type of variable to reduce, should have a corresponding mpi_type<T>() implementation |
t | variable to reduce, passed by value |
op | reduction operator |
|
inline |
Perform an MPI_Alltoallv. Each rank sends sbuf[i] to process i. The results are received in a single contiguous vector which is returned. This is collective on this MPI communicator.
T | type of data to send, this should have a corresponding mpi_type<T>() implementation or should define T::mpi_type() |
A | allocator to be used for the receive buffer |
sbuf | send buffers (should be size this->size()) |
|
inline |
Perform an MPI_Alltoallv. Each rank sends sbuf[i] to process i. The results are received in a single contiguous vector rbuf. pbuf has pointers into rbuf, with pbuf[i] pointing to the data received from rank i. This is collective on this MPI communicator.
T | type of data to send, this should have a corresponding mpi_type<T>() implementation or should define T::mpi_type() |
A | allocator to be used for the rbuf |
sbuf | send buffers (should be size this->size()) |
rbuf | receive buffer, can be empty, will be allocated |
pbuf | pointers (to positions in rbuf) to where data received from different ranks start |
|
inline |
Perform an MPI_Alltoallv. Each rank sends sbuf[i] to process i. The results are received in a single contiguous vector rbuf. pbuf has pointers into rbuf, with pbuf[i] pointing to the data received from rank i. This is collective on this MPI communicator.
T | type of data to send |
A | allocator to be used for the receive buffer |
sbuf | send buffers (should be size this->size()) |
rbuf | receive buffer, can be empty, will be allocated |
pbuf | pointers (to positions in rbuf) to where data received from different ranks start |
Ttype | MPI_Datatype corresponding to the template parameter T |
|
inline |
Perform a barrier operation. This operation is collective on all the ranks in this communicator.
|
inline |
Returns the underlying MPI_Comm object.
|
inlinestatic |
Call MPI_Pcontrol with level 1, and string name
|
inlinestatic |
Call MPI_Pcontrol with level -1, and string name
|
inline |
Checks whether the underlying MPI_Comm object is MPI_COMM_NULL.
|
inline |
Check whether the current process is the root of this MPI communicator.
|
inline |
Non-blocking send of a vector to a destination process, with a certain tag.
T | template type of the send buffer, should have a corresponding mpi_type<T>() implementation |
sbuf | buffer of type T to be send |
dest | rank of destination process in this MPI communicator |
tag | tag to use in MPI message |
|
inline |
Non-blocking send of a vector to a destination process, with a certain tag.
T | template type of the send buffer, should have a corresponding mpi_type<T>() implementation |
sbuf | buffer of type T to be send |
dest | rank of destination process in this MPI communicator |
tag | tag to use in MPI message |
req | MPI request object |
Assignement operator. This will DUPLICATE the MPI_Comm object.
c | object to copy from, will not be modified. |
Move operator.
c | the object ro be moved from, will be reset to MPI_COMM_NULL |
|
inline |
Return the current rank in this communicator.
Receive a vector of T's from process src, with tag. The message size does not need to be known in advance.
T | template parameter of vector to receive, should have a corresponding mpi_type<T>() implementation |
src | process to receive from |
tag | tag to match the message |
|
inline |
Compute the reduction of op(t[]_i) over all processes i, t[] is an array, and where op can be any MPI_Op, on the root process. This routine is collective on all ranks in this communicator. See documentation for MPI_Reduce. Every element of the array will be reduced over the different processes. The operation is performed in-place.
T | type of variables to reduce, should have a corresponding mpi_type<T>() implementation |
t | pointer to array of variables to reduce |
ssize | size of array to reduce |
op | reduction operator |
dest | where to reduce to |
Compute the reduction of op(t_i) over all processes i, where op can be any MPI_Op, on the root. This routine is collective on all ranks in this communicator. See documentation for MPI_Reduce (with dest == 0).
T | type of variable to reduce, should have a corresponding mpi_type<T>() implementation |
t | variable to reduce, passed by value |
op | reduction operator |
|
inline |
Blocking send of a vector to a destination process, with a certain tag.
T | template type of the send buffer, should have a corresponding mpi_type<T>() implementation |
sbuf | buffer of type T to be send |
dest | rank of destination process in this MPI communicator |
tag | tag to use in MPI message |
|
inline |
Return the size of, ie, number of processes in, this communicator. This communicator should not be MPI_COMM_NULL.
|
inline |
Return a subcommunicator with P ranks, starting from rank P0, using stride stride. Ie., ranks (relative to this communicator) [P0:stride:P0+stride*P) will be included in the new communicator. This operation is collective on all the processes in this communicator.
P0 | first rank in the new communicator |
P | number of ranks in the new communicator |
stride | stride between ranks in this communicator determining which ranks will go into new communicator |
|
inline |