MPIWrapper.hpp
Go to the documentation of this file.
1 /*
2  * STRUMPACK -- STRUctured Matrices PACKage, Copyright (c) 2014, The
3  * Regents of the University of California, through Lawrence Berkeley
4  * National Laboratory (subject to receipt of any required approvals
5  * from the U.S. Dept. of Energy). All rights reserved.
6  *
7  * If you have questions about your rights to use or distribute this
8  * software, please contact Berkeley Lab's Technology Transfer
9  * Department at TTD@lbl.gov.
10  *
11  * NOTICE. This software is owned by the U.S. Department of Energy. As
12  * such, the U.S. Government has been granted for itself and others
13  * acting on its behalf a paid-up, nonexclusive, irrevocable,
14  * worldwide license in the Software to reproduce, prepare derivative
15  * works, and perform publicly and display publicly. Beginning five
16  * (5) years after the date permission to assert copyright is obtained
17  * from the U.S. Department of Energy, and subject to any subsequent
18  * five (5) year renewals, the U.S. Government is granted for itself
19  * and others acting on its behalf a paid-up, nonexclusive,
20  * irrevocable, worldwide license in the Software to reproduce,
21  * prepare derivative works, distribute copies to the public, perform
22  * publicly and display publicly, and to permit others to do so.
23  *
24  * Developers: Pieter Ghysels, Francois-Henry Rouet, Xiaoye S. Li.
25  * (Lawrence Berkeley National Lab, Computational Research
26  * Division).
27  *
28  */
33 #ifndef STRUMPACK_MPI_WRAPPER_HPP
34 #define STRUMPACK_MPI_WRAPPER_HPP
35 
36 #include <vector>
37 #include <complex>
38 #include <cassert>
39 #include <numeric>
40 #include <limits>
41 #include <memory>
42 #include <utility>
43 
44 #define OMPI_SKIP_MPICXX 1
45 #include <mpi.h>
46 
47 #include "StrumpackParameters.hpp"
48 #include "Triplet.hpp"
49 
50 namespace strumpack {
51 
58  template<typename T> MPI_Datatype mpi_type() { return T::mpi_type(); }
60  template<> inline MPI_Datatype mpi_type<char>() { return MPI_CHAR; }
62  template<> inline MPI_Datatype mpi_type<bool>() { return MPI_CXX_BOOL; }
64  template<> inline MPI_Datatype mpi_type<int>() { return MPI_INT; }
66  template<> inline MPI_Datatype mpi_type<long>() { return MPI_LONG; }
68  template<> inline MPI_Datatype mpi_type<unsigned long>() { return MPI_UNSIGNED_LONG; }
70  template<> inline MPI_Datatype mpi_type<long long int>() { return MPI_LONG_LONG_INT; }
72  template<> inline MPI_Datatype mpi_type<float>() { return MPI_FLOAT; }
74  template<> inline MPI_Datatype mpi_type<double>() { return MPI_DOUBLE; }
76  template<> inline MPI_Datatype mpi_type<std::complex<float>>() { return MPI_CXX_FLOAT_COMPLEX; }
78  template<> inline MPI_Datatype mpi_type<std::complex<double>>() { return MPI_CXX_DOUBLE_COMPLEX; }
80  template<> inline MPI_Datatype mpi_type<std::pair<int,int>>() { return MPI_2INT; }
81 
83  template<> inline MPI_Datatype mpi_type<std::pair<long int,long int>>() {
84  static MPI_Datatype l_l_mpi_type = MPI_DATATYPE_NULL;
85  if (l_l_mpi_type == MPI_DATATYPE_NULL) {
86  MPI_Type_contiguous
87  (2, strumpack::mpi_type<long int>(), &l_l_mpi_type);
88  MPI_Type_commit(&l_l_mpi_type);
89  }
90  return l_l_mpi_type;
91  }
93  template<> inline MPI_Datatype mpi_type<std::pair<long long int,long long int>>() {
94  static MPI_Datatype ll_ll_mpi_type = MPI_DATATYPE_NULL;
95  if (ll_ll_mpi_type == MPI_DATATYPE_NULL) {
96  MPI_Type_contiguous
97  (2, strumpack::mpi_type<long long int>(), &ll_ll_mpi_type);
98  MPI_Type_commit(&ll_ll_mpi_type);
99  }
100  return ll_ll_mpi_type;
101  }
102 
119  class MPIRequest {
120  public:
125  req_ = std::unique_ptr<MPI_Request>(new MPI_Request());
126  }
127 
132  MPIRequest(const MPIRequest&) = delete;
133 
137  MPIRequest(MPIRequest&&) = default;
138 
142  MPIRequest& operator=(const MPIRequest&) = delete;
143 
147  MPIRequest& operator=(MPIRequest&&) = default;
148 
152  void wait() { MPI_Wait(req_.get(), MPI_STATUS_IGNORE); }
153 
154  private:
155  std::unique_ptr<MPI_Request> req_;
156  friend class MPIComm;
157  };
158 
168  inline void wait_all(std::vector<MPIRequest>& reqs) {
169  for (auto& r : reqs) r.wait();
170  reqs.clear();
171  }
172 
173  inline void wait_all(std::vector<MPI_Request>& reqs) {
174  MPI_Waitall(reqs.size(), reqs.data(), MPI_STATUSES_IGNORE);
175  }
176 
177 
190  class MPIComm {
191  public:
196  MPIComm() {}
197 
205  MPIComm(MPI_Comm c) { duplicate(c); }
206 
213  MPIComm(const MPIComm& c) { *this = c; }
214 
221  MPIComm(MPIComm&& c) noexcept { *this = std::move(c); }
222 
227  virtual ~MPIComm() {
228  if (comm_ != MPI_COMM_NULL && comm_ != MPI_COMM_WORLD)
229  MPI_Comm_free(&comm_);
230  }
231 
237  MPIComm& operator=(const MPIComm& c) {
238  if (this != &c) duplicate(c.comm());
239  return *this;
240  }
241 
248  MPIComm& operator=(MPIComm&& c) noexcept {
249  comm_ = c.comm_;
250  c.comm_ = MPI_COMM_NULL;
251  return *this;
252  }
253 
257  MPI_Comm comm() const { return comm_; }
258 
262  bool is_null() const { return comm_ == MPI_COMM_NULL; }
263 
267  int rank() const {
268  assert(comm_ != MPI_COMM_NULL);
269  int r;
270  MPI_Comm_rank(comm_, &r);
271  return r;
272  }
273 
278  int size() const {
279  assert(comm_ != MPI_COMM_NULL);
280  int nprocs;
281  MPI_Comm_size(comm_, &nprocs);
282  return nprocs;
283  }
284 
289  bool is_root() const { return rank() == 0; }
290 
295  void barrier() const { MPI_Barrier(comm_); }
296 
297  template<typename T> void
298  broadcast(std::vector<T>& sbuf) const {
299  MPI_Bcast(sbuf.data(), sbuf.size(), mpi_type<T>(), 0, comm_);
300  }
301  template<typename T> void
302  broadcast_from(std::vector<T>& sbuf, int src) const {
303  MPI_Bcast(sbuf.data(), sbuf.size(), mpi_type<T>(), src, comm_);
304  }
305 
306  template<typename T, std::size_t N> void
307  broadcast(std::array<T,N>& sbuf) const {
308  MPI_Bcast(sbuf.data(), sbuf.size(), mpi_type<T>(), 0, comm_);
309  }
310 
311  template<typename T> void
312  broadcast(T& data) const {
313  MPI_Bcast(&data, 1, mpi_type<T>(), 0, comm_);
314  }
315  template<typename T> void
316  broadcast_from(T& data, int src) const {
317  MPI_Bcast(&data, 1, mpi_type<T>(), src, comm_);
318  }
319  template<typename T> void
320  broadcast(T* sbuf, std::size_t ssize) const {
321  MPI_Bcast(sbuf, ssize, mpi_type<T>(), 0, comm_);
322  }
323 
324  template<typename T>
325  void all_gather(T* buf, std::size_t rsize) const {
326  MPI_Allgather
327  (MPI_IN_PLACE, 0, MPI_DATATYPE_NULL,
328  buf, rsize, mpi_type<T>(), comm_);
329  }
330 
331  template<typename T>
332  void all_gather_v(T* buf, const int* rcnts, const int* displs) const {
333  MPI_Allgatherv
334  (MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, buf, rcnts, displs,
335  mpi_type<T>(), comm_);
336  }
337 
338 
352  template<typename T>
353  MPIRequest isend(const std::vector<T>& sbuf, int dest, int tag) const {
354  MPIRequest req;
355  // const_cast is necessary for ancient openmpi version used on Travis
356  MPI_Isend(const_cast<T*>(sbuf.data()), sbuf.size(), mpi_type<T>(),
357  dest, tag, comm_, req.req_.get());
358  return req;
359  }
360 
373  template<typename T>
374  void isend(const std::vector<T>& sbuf, int dest, int tag,
375  MPI_Request* req) const {
376  // const_cast is necessary for ancient openmpi version used on Travis
377  MPI_Isend(const_cast<T*>(sbuf.data()), sbuf.size(), mpi_type<T>(),
378  dest, tag, comm_, req);
379  }
380 
381  template<typename T>
382  void isend(const T* sbuf, std::size_t ssize, int dest,
383  int tag, MPI_Request* req) const {
384  // const_cast is necessary for ancient openmpi version used on Travis
385  MPI_Isend(const_cast<T*>(sbuf), ssize, mpi_type<T>(),
386  dest, tag, comm_, req);
387  }
388 
389  template<typename T>
390  void isend(const T& buf, int dest, int tag, MPI_Request* req) const {
391  // const_cast is necessary for ancient openmpi version used on Travis
392  MPI_Isend(const_cast<T*>(&buf), 1, mpi_type<T>(),
393  dest, tag, comm_, req);
394  }
395 
409  template<typename T>
410  void send(const std::vector<T>& sbuf, int dest, int tag) const {
411  // const_cast is necessary for ancient openmpi version used on Travis
412  MPI_Send(const_cast<T*>(sbuf.data()), sbuf.size(), mpi_type<T>(), dest, tag, comm_);
413  }
414 
427  template<typename T> std::vector<T> recv(int src, int tag) const {
428  MPI_Status stat;
429  MPI_Probe(src, tag, comm_, &stat);
430  int msgsize;
431  MPI_Get_count(&stat, mpi_type<T>(), &msgsize);
432  //std::vector<T,NoInit<T>> rbuf(msgsize);
433  std::vector<T> rbuf(msgsize);
434  MPI_Recv(rbuf.data(), msgsize, mpi_type<T>(), src, tag,
435  comm_, MPI_STATUS_IGNORE);
436  return rbuf;
437  }
438 
439  template<typename T>
440  std::pair<int,std::vector<T>> recv_any_src(int tag) const {
441  MPI_Status stat;
442  MPI_Probe(MPI_ANY_SOURCE, tag, comm_, &stat);
443  int msgsize;
444  MPI_Get_count(&stat, mpi_type<T>(), &msgsize);
445  std::vector<T> rbuf(msgsize);
446  MPI_Recv(rbuf.data(), msgsize, mpi_type<T>(), stat.MPI_SOURCE,
447  tag, comm_, MPI_STATUS_IGNORE);
448  return {stat.MPI_SOURCE, std::move(rbuf)};
449  }
450 
451  template<typename T> T recv_one(int src, int tag) const {
452  T t;
453  MPI_Recv(&t, 1, mpi_type<T>(), src, tag, comm_, MPI_STATUS_IGNORE);
454  return t;
455  }
456 
457  template<typename T>
458  void irecv(const T* rbuf, std::size_t rsize, int src,
459  int tag, MPI_Request* req) const {
460  // const_cast is necessary for ancient openmpi version used on Travis
461  MPI_Irecv(const_cast<T*>(rbuf), rsize, mpi_type<T>(),
462  src, tag, comm_, req);
463  }
464 
479  template<typename T> T all_reduce(T t, MPI_Op op) const {
480  MPI_Allreduce(MPI_IN_PLACE, &t, 1, mpi_type<T>(), op, comm_);
481  return t;
482  }
483 
498  template<typename T> T reduce(T t, MPI_Op op) const {
499  if (is_root())
500  MPI_Reduce(MPI_IN_PLACE, &t, 1, mpi_type<T>(), op, 0, comm_);
501  else MPI_Reduce(&t, &t, 1, mpi_type<T>(), op, 0, comm_);
502  return t;
503  }
504 
520  template<typename T> void all_reduce(T* t, int ssize, MPI_Op op) const {
521  MPI_Allreduce(MPI_IN_PLACE, t, ssize, mpi_type<T>(), op, comm_);
522  }
523 
524  template<typename T> void all_reduce(std::vector<T>& t, MPI_Op op) const {
525  all_reduce(t.data(), t.size(), op);
526  }
527 
543  template<typename T> void reduce(T* t, int ssize, MPI_Op op) const {
544  if (is_root())
545  MPI_Reduce(MPI_IN_PLACE, t, ssize, mpi_type<T>(), op, 0, comm_);
546  else MPI_Reduce(t, t, ssize, mpi_type<T>(), op, 0, comm_);
547  }
548 
549  template<typename T>
550  void all_to_all(const T* sbuf, int scnt, T* rbuf) const {
551  MPI_Alltoall
552  (sbuf, scnt, mpi_type<T>(), rbuf, scnt, mpi_type<T>(), comm_);
553  }
554 
555  template<typename T, typename A=std::allocator<T>> std::vector<T,A>
556  all_to_allv(const T* sbuf, int* scnts, int* sdispls,
557  int* rcnts, int* rdispls) const {
558  std::size_t rsize = 0;
559  for (int p=0; p<size(); p++)
560  rsize += rcnts[p];
561  std::vector<T,A> rbuf(rsize);
562  MPI_Alltoallv
563  (sbuf, scnts, sdispls, mpi_type<T>(),
564  rbuf.data(), rcnts, rdispls, mpi_type<T>(), comm_);
565  return rbuf;
566  }
567 
568  template<typename T> void
569  all_to_allv(const T* sbuf, int* scnts, int* sdispls,
570  T* rbuf, int* rcnts, int* rdispls) const {
571  MPI_Alltoallv
572  (sbuf, scnts, sdispls, mpi_type<T>(),
573  rbuf, rcnts, rdispls, mpi_type<T>(), comm_);
574  }
575 
593  template<typename T, typename A=std::allocator<T>> void
594  all_to_all_v(std::vector<std::vector<T>>& sbuf, std::vector<T,A>& rbuf,
595  std::vector<T*>& pbuf) const {
596  all_to_all_v(sbuf, rbuf, pbuf, mpi_type<T>());
597  }
598 
612  template<typename T, typename A=std::allocator<T>> std::vector<T,A>
613  all_to_all_v(std::vector<std::vector<T>>& sbuf) const {
614  std::vector<T,A> rbuf;
615  std::vector<T*> pbuf;
616  all_to_all_v(sbuf, rbuf, pbuf, mpi_type<T>());
617  return rbuf;
618  }
619 
637  template<typename T, typename A=std::allocator<T>> void
638  all_to_all_v(std::vector<std::vector<T>>& sbuf, std::vector<T,A>& rbuf,
639  std::vector<T*>& pbuf, const MPI_Datatype Ttype) const {
640  assert(sbuf.size() == std::size_t(size()));
641  auto P = size();
642  std::unique_ptr<int[]> iwork(new int[4*P]);
643  auto ssizes = iwork.get();
644  auto rsizes = ssizes + P;
645  auto sdispl = ssizes + 2*P;
646  auto rdispl = ssizes + 3*P;
647  for (int p=0; p<P; p++) {
648  if (sbuf[p].size() >
649  static_cast<std::size_t>(std::numeric_limits<int>::max())) {
650  std::cerr << "# ERROR: 32bit integer overflow in all_to_all_v!!"
651  << std::endl;
652  MPI_Abort(comm_, 1);
653  }
654  ssizes[p] = sbuf[p].size();
655  }
656  MPI_Alltoall
657  (ssizes, 1, mpi_type<int>(), rsizes, 1, mpi_type<int>(), comm_);
658  std::size_t totssize = std::accumulate(ssizes, ssizes+P, std::size_t(0)),
659  totrsize = std::accumulate(rsizes, rsizes+P, std::size_t(0));
660  if (totrsize >
661  static_cast<std::size_t>(std::numeric_limits<int>::max()) ||
662  totssize >
663  static_cast<std::size_t>(std::numeric_limits<int>::max())) {
664  // This case will probably cause an overflow in the
665  // rdispl/sdispl elements. Here we do the all_to_all_v
666  // manually by just using Isend/Irecv. This might be slower
667  // than splitting into multiple calls to MPI_Alltoallv
668  // (although it avoids a copy from the sbuf).
669  rbuf.resize(totrsize);
670  std::unique_ptr<MPI_Request[]> reqs(new MPI_Request[2*P]);
671  std::size_t displ = 0;
672  pbuf.resize(P);
673  for (int p=0; p<P; p++) {
674  pbuf[p] = rbuf.data() + displ;
675  MPI_Irecv(pbuf[p], rsizes[p], Ttype, p, 0, comm_, reqs.get()+p);
676  displ += rsizes[p];
677  }
678  for (int p=0; p<P; p++)
679  MPI_Isend
680  (sbuf[p].data(), ssizes[p], Ttype, p, 0, comm_, reqs.get()+P+p);
681  MPI_Waitall(2*P, reqs.get(), MPI_STATUSES_IGNORE);
682  std::vector<std::vector<T>>().swap(sbuf);
683  } else {
684  std::unique_ptr<T[]> sendbuf_(new T[totssize]);
685  auto sendbuf = sendbuf_.get();
686  sdispl[0] = rdispl[0] = 0;
687  for (int p=1; p<P; p++) {
688  sdispl[p] = sdispl[p-1] + ssizes[p-1];
689  rdispl[p] = rdispl[p-1] + rsizes[p-1];
690  }
691  for (int p=0; p<P; p++)
692  std::copy(sbuf[p].begin(), sbuf[p].end(), sendbuf+sdispl[p]);
693  std::vector<std::vector<T>>().swap(sbuf);
694  rbuf.resize(totrsize);
695  MPI_Alltoallv(sendbuf, ssizes, sdispl, Ttype,
696  rbuf.data(), rsizes, rdispl, Ttype, comm_);
697  pbuf.resize(P);
698  for (int p=0; p<P; p++)
699  pbuf[p] = rbuf.data() + rdispl[p];
700  }
701  }
702 
721  MPIComm sub(int P0, int P, int stride=1) const {
722  if (is_null() || size() == 1)
723  return MPIComm(MPI_COMM_NULL);
724  assert(P0 + P <= size());
725  MPIComm sub_comm;
726  std::vector<int> sub_ranks(P);
727  for (int i=0; i<P; i++)
728  sub_ranks[i] = P0 + i*stride;
729  MPI_Group group, sub_group;
730  MPI_Comm_group(comm_, &group); // get group from comm
731  MPI_Group_incl(group, P, sub_ranks.data(), &sub_group); // group ranks [P0,P0+P) into sub_group
732  MPI_Comm_create(comm_, sub_group, &sub_comm.comm_); // create new sub_comm
733  MPI_Group_free(&group);
734  MPI_Group_free(&sub_group);
735  return sub_comm;
736  }
737 
747  // return MPI_COMM_SELF??? or MPI_COMM_NULL if not rank??
748  MPIComm sub_self(int p) const {
749  if (is_null()) return MPIComm(MPI_COMM_NULL);
750  MPIComm c0;
751  MPI_Group group, sub_group;
752  MPI_Comm_group(comm_, &group);
753  MPI_Group_incl(group, 1, &p, &sub_group);
754  MPI_Comm_create(comm_, sub_group, &c0.comm_);
755  MPI_Group_free(&group);
756  MPI_Group_free(&sub_group);
757  return c0;
758  }
759 
763  static void control_start(const std::string& name) {
764  MPI_Pcontrol(1, name.c_str());
765  }
769  static void control_stop(const std::string& name) {
770  MPI_Pcontrol(-1, name.c_str());
771  }
772 
773  static bool initialized() {
774  int flag;
775  MPI_Initialized(&flag);
776  return static_cast<bool>(flag);
777  }
778 
779  private:
780  MPI_Comm comm_ = MPI_COMM_WORLD;
781 
782  void duplicate(MPI_Comm c) {
783  if (c == MPI_COMM_NULL) comm_ = c;
784  else MPI_Comm_dup(c, &comm_);
785  }
786  };
787 
788 
796  inline int mpi_rank(MPI_Comm c=MPI_COMM_WORLD) {
797  assert(c != MPI_COMM_NULL);
798  int rank;
799  MPI_Comm_rank(c, &rank);
800  return rank;
801  }
802 
810  inline int mpi_nprocs(MPI_Comm c=MPI_COMM_WORLD) {
811  assert(c != MPI_COMM_NULL);
812  int nprocs;
813  MPI_Comm_size(c, &nprocs);
814  return nprocs;
815  }
816 
817 } // end namespace strumpack
818 
819 #endif // STRUMPACK_MPI_WRAPPER_HPP
strumpack::MPIComm::isend
void isend(const std::vector< T > &sbuf, int dest, int tag, MPI_Request *req) const
Definition: MPIWrapper.hpp:374
strumpack::MPIComm::operator=
MPIComm & operator=(const MPIComm &c)
Definition: MPIWrapper.hpp:237
strumpack::mpi_type< float >
MPI_Datatype mpi_type< float >()
Definition: MPIWrapper.hpp:72
strumpack::mpi_type< char >
MPI_Datatype mpi_type< char >()
Definition: MPIWrapper.hpp:60
strumpack::MPIComm
Wrapper class around an MPI_Comm object.
Definition: MPIWrapper.hpp:190
strumpack::MPIRequest::wait
void wait()
Definition: MPIWrapper.hpp:152
strumpack::mpi_type< double >
MPI_Datatype mpi_type< double >()
Definition: MPIWrapper.hpp:74
strumpack::MPIComm::reduce
void reduce(T *t, int ssize, MPI_Op op) const
Definition: MPIWrapper.hpp:543
strumpack::MPIComm::is_null
bool is_null() const
Definition: MPIWrapper.hpp:262
strumpack::MPIComm::is_root
bool is_root() const
Definition: MPIWrapper.hpp:289
strumpack::mpi_type< int >
MPI_Datatype mpi_type< int >()
Definition: MPIWrapper.hpp:64
strumpack
Definition: StrumpackOptions.hpp:42
strumpack::MPIComm::isend
MPIRequest isend(const std::vector< T > &sbuf, int dest, int tag) const
Definition: MPIWrapper.hpp:353
strumpack::MPIRequest
Wrapper around an MPI_Request object.
Definition: MPIWrapper.hpp:119
strumpack::MPIComm::MPIComm
MPIComm()
Definition: MPIWrapper.hpp:196
strumpack::MPIComm::comm
MPI_Comm comm() const
Definition: MPIWrapper.hpp:257
strumpack::mpi_nprocs
int mpi_nprocs(MPI_Comm c=MPI_COMM_WORLD)
Definition: MPIWrapper.hpp:810
StrumpackParameters.hpp
Contains the definition of some useful (global) variables.
strumpack::MPIComm::all_reduce
T all_reduce(T t, MPI_Op op) const
Definition: MPIWrapper.hpp:479
strumpack::mpi_type
MPI_Datatype mpi_type()
Definition: MPIWrapper.hpp:58
strumpack::wait_all
void wait_all(std::vector< MPIRequest > &reqs)
Definition: MPIWrapper.hpp:168
strumpack::copy
void copy(std::size_t m, std::size_t n, const DenseMatrix< scalar_t > &a, std::size_t ia, std::size_t ja, DenseMatrix< scalar_t > &b, std::size_t ib, std::size_t jb)
Definition: DenseMatrix.hpp:1163
strumpack::MPIComm::all_to_all_v
std::vector< T, A > all_to_all_v(std::vector< std::vector< T >> &sbuf) const
Definition: MPIWrapper.hpp:613
strumpack::MPIComm::reduce
T reduce(T t, MPI_Op op) const
Definition: MPIWrapper.hpp:498
strumpack::mpi_type< long long int >
MPI_Datatype mpi_type< long long int >()
Definition: MPIWrapper.hpp:70
strumpack::MPIComm::all_reduce
void all_reduce(T *t, int ssize, MPI_Op op) const
Definition: MPIWrapper.hpp:520
strumpack::MPIComm::sub
MPIComm sub(int P0, int P, int stride=1) const
Definition: MPIWrapper.hpp:721
strumpack::MPIComm::barrier
void barrier() const
Definition: MPIWrapper.hpp:295
strumpack::MPIComm::control_start
static void control_start(const std::string &name)
Definition: MPIWrapper.hpp:763
strumpack::mpi_type< bool >
MPI_Datatype mpi_type< bool >()
Definition: MPIWrapper.hpp:62
strumpack::MPIComm::sub_self
MPIComm sub_self(int p) const
Definition: MPIWrapper.hpp:748
strumpack::MPIComm::rank
int rank() const
Definition: MPIWrapper.hpp:267
strumpack::mpi_type< long >
MPI_Datatype mpi_type< long >()
Definition: MPIWrapper.hpp:66
strumpack::MPIComm::~MPIComm
virtual ~MPIComm()
Definition: MPIWrapper.hpp:227
strumpack::MPIComm::size
int size() const
Definition: MPIWrapper.hpp:278
strumpack::MPIComm::MPIComm
MPIComm(MPI_Comm c)
Definition: MPIWrapper.hpp:205
strumpack::MPIComm::MPIComm
MPIComm(MPIComm &&c) noexcept
Definition: MPIWrapper.hpp:221
strumpack::MPIComm::control_stop
static void control_stop(const std::string &name)
Definition: MPIWrapper.hpp:769
strumpack::MPIComm::send
void send(const std::vector< T > &sbuf, int dest, int tag) const
Definition: MPIWrapper.hpp:410
strumpack::MPIComm::MPIComm
MPIComm(const MPIComm &c)
Definition: MPIWrapper.hpp:213
strumpack::MPIComm::recv
std::vector< T > recv(int src, int tag) const
Definition: MPIWrapper.hpp:427
strumpack::MPIComm::all_to_all_v
void all_to_all_v(std::vector< std::vector< T >> &sbuf, std::vector< T, A > &rbuf, std::vector< T * > &pbuf, const MPI_Datatype Ttype) const
Definition: MPIWrapper.hpp:638
strumpack::MPIComm::operator=
MPIComm & operator=(MPIComm &&c) noexcept
Definition: MPIWrapper.hpp:248
strumpack::Trans::T
@ T
strumpack::mpi_type< unsigned long >
MPI_Datatype mpi_type< unsigned long >()
Definition: MPIWrapper.hpp:68
strumpack::MPIComm::all_to_all_v
void all_to_all_v(std::vector< std::vector< T >> &sbuf, std::vector< T, A > &rbuf, std::vector< T * > &pbuf) const
Definition: MPIWrapper.hpp:594
strumpack::mpi_rank
int mpi_rank(MPI_Comm c=MPI_COMM_WORLD)
Definition: MPIWrapper.hpp:796
strumpack::MPIRequest::MPIRequest
MPIRequest()
Definition: MPIWrapper.hpp:124
strumpack::MPIRequest::operator=
MPIRequest & operator=(const MPIRequest &)=delete