MPIWrapper.hpp
Go to the documentation of this file.
1 /*
2  * STRUMPACK -- STRUctured Matrices PACKage, Copyright (c) 2014, The
3  * Regents of the University of California, through Lawrence Berkeley
4  * National Laboratory (subject to receipt of any required approvals
5  * from the U.S. Dept. of Energy). All rights reserved.
6  *
7  * If you have questions about your rights to use or distribute this
8  * software, please contact Berkeley Lab's Technology Transfer
9  * Department at TTD@lbl.gov.
10  *
11  * NOTICE. This software is owned by the U.S. Department of Energy. As
12  * such, the U.S. Government has been granted for itself and others
13  * acting on its behalf a paid-up, nonexclusive, irrevocable,
14  * worldwide license in the Software to reproduce, prepare derivative
15  * works, and perform publicly and display publicly. Beginning five
16  * (5) years after the date permission to assert copyright is obtained
17  * from the U.S. Department of Energy, and subject to any subsequent
18  * five (5) year renewals, the U.S. Government is granted for itself
19  * and others acting on its behalf a paid-up, nonexclusive,
20  * irrevocable, worldwide license in the Software to reproduce,
21  * prepare derivative works, distribute copies to the public, perform
22  * publicly and display publicly, and to permit others to do so.
23  *
24  * Developers: Pieter Ghysels, Francois-Henry Rouet, Xiaoye S. Li.
25  * (Lawrence Berkeley National Lab, Computational Research
26  * Division).
27  *
28  */
33 #ifndef STRUMPACK_MPI_WRAPPER_HPP
34 #define STRUMPACK_MPI_WRAPPER_HPP
35 
36 #include <vector>
37 #include <complex>
38 #include <cassert>
39 #include <numeric>
40 #include <limits>
41 #include <memory>
42 #include <utility>
43 
44 #define OMPI_SKIP_MPICXX 1
45 #include <mpi.h>
46 
47 #include "StrumpackParameters.hpp"
48 #include "Triplet.hpp"
49 
50 namespace strumpack {
51 
58  template<typename T> MPI_Datatype mpi_type() { return T::mpi_type(); }
60  template<> inline MPI_Datatype mpi_type<char>() { return MPI_CHAR; }
62  template<> inline MPI_Datatype mpi_type<bool>() { return MPI_CXX_BOOL; }
64  template<> inline MPI_Datatype mpi_type<int>() { return MPI_INT; }
66  template<> inline MPI_Datatype mpi_type<long>() { return MPI_LONG; }
68  template<> inline MPI_Datatype mpi_type<unsigned long>() { return MPI_UNSIGNED_LONG; }
70  template<> inline MPI_Datatype mpi_type<long long int>() { return MPI_LONG_LONG_INT; }
72  template<> inline MPI_Datatype mpi_type<float>() { return MPI_FLOAT; }
74  template<> inline MPI_Datatype mpi_type<double>() { return MPI_DOUBLE; }
76  template<> inline MPI_Datatype mpi_type<std::complex<float>>() { return MPI_CXX_FLOAT_COMPLEX; }
78  template<> inline MPI_Datatype mpi_type<std::complex<double>>() { return MPI_CXX_DOUBLE_COMPLEX; }
80  template<> inline MPI_Datatype mpi_type<std::pair<int,int>>() { return MPI_2INT; }
81 
83  template<> inline MPI_Datatype mpi_type<std::pair<long int,long int>>() {
84  static MPI_Datatype l_l_mpi_type = MPI_DATATYPE_NULL;
85  if (l_l_mpi_type == MPI_DATATYPE_NULL) {
86  MPI_Type_contiguous
87  (2, strumpack::mpi_type<long int>(), &l_l_mpi_type);
88  MPI_Type_commit(&l_l_mpi_type);
89  }
90  return l_l_mpi_type;
91  }
93  template<> inline MPI_Datatype mpi_type<std::pair<long long int,long long int>>() {
94  static MPI_Datatype ll_ll_mpi_type = MPI_DATATYPE_NULL;
95  if (ll_ll_mpi_type == MPI_DATATYPE_NULL) {
96  MPI_Type_contiguous
97  (2, strumpack::mpi_type<long long int>(), &ll_ll_mpi_type);
98  MPI_Type_commit(&ll_ll_mpi_type);
99  }
100  return ll_ll_mpi_type;
101  }
102 
119  class MPIRequest {
120  public:
125  req_ = std::unique_ptr<MPI_Request>(new MPI_Request());
126  }
127 
132  MPIRequest(const MPIRequest&) = delete;
133 
137  MPIRequest(MPIRequest&&) = default;
138 
142  MPIRequest& operator=(const MPIRequest&) = delete;
143 
147  MPIRequest& operator=(MPIRequest&&) = default;
148 
152  void wait() { MPI_Wait(req_.get(), MPI_STATUS_IGNORE); }
153 
154  private:
155  std::unique_ptr<MPI_Request> req_;
156  friend class MPIComm;
157  };
158 
168  inline void wait_all(std::vector<MPIRequest>& reqs) {
169  for (auto& r : reqs) r.wait();
170  reqs.clear();
171  }
172 
173  inline void wait_all(std::vector<MPI_Request>& reqs) {
174  MPI_Waitall(reqs.size(), reqs.data(), MPI_STATUSES_IGNORE);
175  }
176 
177 
190  class MPIComm {
191  public:
196  MPIComm() {}
197 
205  MPIComm(MPI_Comm c) { duplicate(c); }
206 
213  MPIComm(const MPIComm& c) { *this = c; }
214 
221  MPIComm(MPIComm&& c) noexcept { *this = std::move(c); }
222 
227  virtual ~MPIComm() {
228  if (comm_ != MPI_COMM_NULL && comm_ != MPI_COMM_WORLD)
229  MPI_Comm_free(&comm_);
230  }
231 
237  MPIComm& operator=(const MPIComm& c) {
238  if (this != &c) duplicate(c.comm());
239  return *this;
240  }
241 
248  MPIComm& operator=(MPIComm&& c) noexcept {
249  comm_ = c.comm_;
250  c.comm_ = MPI_COMM_NULL;
251  return *this;
252  }
253 
257  MPI_Comm comm() const { return comm_; }
258 
262  bool is_null() const { return comm_ == MPI_COMM_NULL; }
263 
267  int rank() const {
268  assert(comm_ != MPI_COMM_NULL);
269  int r;
270  MPI_Comm_rank(comm_, &r);
271  return r;
272  }
273 
278  int size() const {
279  assert(comm_ != MPI_COMM_NULL);
280  int nprocs;
281  MPI_Comm_size(comm_, &nprocs);
282  return nprocs;
283  }
284 
289  bool is_root() const { return rank() == 0; }
290 
295  void barrier() const { MPI_Barrier(comm_); }
296 
297  template<typename T> void
298  broadcast(std::vector<T>& sbuf) const {
299  MPI_Bcast(sbuf.data(), sbuf.size(), mpi_type<T>(), 0, comm_);
300  }
301  template<typename T> void
302  broadcast_from(std::vector<T>& sbuf, int src) const {
303  MPI_Bcast(sbuf.data(), sbuf.size(), mpi_type<T>(), src, comm_);
304  }
305 
306  template<typename T, std::size_t N> void
307  broadcast(std::array<T,N>& sbuf) const {
308  MPI_Bcast(sbuf.data(), sbuf.size(), mpi_type<T>(), 0, comm_);
309  }
310 
311  template<typename T> void
312  broadcast(T& data) const {
313  MPI_Bcast(&data, 1, mpi_type<T>(), 0, comm_);
314  }
315  template<typename T> void
316  broadcast_from(T& data, int src) const {
317  MPI_Bcast(&data, 1, mpi_type<T>(), src, comm_);
318  }
319  template<typename T> void
320  broadcast(T* sbuf, std::size_t ssize) const {
321  MPI_Bcast(sbuf, ssize, mpi_type<T>(), 0, comm_);
322  }
323  template<typename T> void
324  broadcast_from(T* sbuf, std::size_t ssize, int src) const {
325  MPI_Bcast(sbuf, ssize, mpi_type<T>(), src, comm_);
326  }
327 
328  template<typename T>
329  void all_gather(T* buf, std::size_t rsize) const {
330  MPI_Allgather
331  (MPI_IN_PLACE, 0, MPI_DATATYPE_NULL,
332  buf, rsize, mpi_type<T>(), comm_);
333  }
334 
335  template<typename T>
336  void all_gather_v(T* buf, const int* rcnts, const int* displs) const {
337  MPI_Allgatherv
338  (MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, buf, rcnts, displs,
339  mpi_type<T>(), comm_);
340  }
341 
342 
356  template<typename T>
357  MPIRequest isend(const std::vector<T>& sbuf, int dest, int tag) const {
358  MPIRequest req;
359  // const_cast is necessary for ancient openmpi version used on Travis
360  MPI_Isend(const_cast<T*>(sbuf.data()), sbuf.size(), mpi_type<T>(),
361  dest, tag, comm_, req.req_.get());
362  return req;
363  }
364 
377  template<typename T>
378  void isend(const std::vector<T>& sbuf, int dest, int tag,
379  MPI_Request* req) const {
380  // const_cast is necessary for ancient openmpi version used on Travis
381  MPI_Isend(const_cast<T*>(sbuf.data()), sbuf.size(), mpi_type<T>(),
382  dest, tag, comm_, req);
383  }
384 
385  template<typename T>
386  void isend(const T* sbuf, std::size_t ssize, int dest,
387  int tag, MPI_Request* req) const {
388  // const_cast is necessary for ancient openmpi version used on Travis
389  MPI_Isend(const_cast<T*>(sbuf), ssize, mpi_type<T>(),
390  dest, tag, comm_, req);
391  }
392 
393  template<typename T>
394  void isend(const T& buf, int dest, int tag, MPI_Request* req) const {
395  // const_cast is necessary for ancient openmpi version used on Travis
396  MPI_Isend(const_cast<T*>(&buf), 1, mpi_type<T>(),
397  dest, tag, comm_, req);
398  }
399 
413  template<typename T>
414  void send(const std::vector<T>& sbuf, int dest, int tag) const {
415  // const_cast is necessary for ancient openmpi version used on Travis
416  MPI_Send(const_cast<T*>(sbuf.data()), sbuf.size(), mpi_type<T>(), dest, tag, comm_);
417  }
418 
431  template<typename T> std::vector<T> recv(int src, int tag) const {
432  MPI_Status stat;
433  MPI_Probe(src, tag, comm_, &stat);
434  int msgsize;
435  MPI_Get_count(&stat, mpi_type<T>(), &msgsize);
436  //std::vector<T,NoInit<T>> rbuf(msgsize);
437  std::vector<T> rbuf(msgsize);
438  MPI_Recv(rbuf.data(), msgsize, mpi_type<T>(), src, tag,
439  comm_, MPI_STATUS_IGNORE);
440  return rbuf;
441  }
442 
443  template<typename T>
444  std::pair<int,std::vector<T>> recv_any_src(int tag) const {
445  MPI_Status stat;
446  MPI_Probe(MPI_ANY_SOURCE, tag, comm_, &stat);
447  int msgsize;
448  MPI_Get_count(&stat, mpi_type<T>(), &msgsize);
449  std::vector<T> rbuf(msgsize);
450  MPI_Recv(rbuf.data(), msgsize, mpi_type<T>(), stat.MPI_SOURCE,
451  tag, comm_, MPI_STATUS_IGNORE);
452  return {stat.MPI_SOURCE, std::move(rbuf)};
453  }
454 
455  template<typename T> T recv_one(int src, int tag) const {
456  T t;
457  MPI_Recv(&t, 1, mpi_type<T>(), src, tag, comm_, MPI_STATUS_IGNORE);
458  return t;
459  }
460 
461  template<typename T>
462  void irecv(const T* rbuf, std::size_t rsize, int src,
463  int tag, MPI_Request* req) const {
464  // const_cast is necessary for ancient openmpi version used on Travis
465  MPI_Irecv(const_cast<T*>(rbuf), rsize, mpi_type<T>(),
466  src, tag, comm_, req);
467  }
468 
483  template<typename T> T all_reduce(T t, MPI_Op op) const {
484  MPI_Allreduce(MPI_IN_PLACE, &t, 1, mpi_type<T>(), op, comm_);
485  return t;
486  }
487 
502  template<typename T> T reduce(T t, MPI_Op op) const {
503  if (is_root())
504  MPI_Reduce(MPI_IN_PLACE, &t, 1, mpi_type<T>(), op, 0, comm_);
505  else MPI_Reduce(&t, &t, 1, mpi_type<T>(), op, 0, comm_);
506  return t;
507  }
508 
524  template<typename T> void all_reduce(T* t, int ssize, MPI_Op op) const {
525  MPI_Allreduce(MPI_IN_PLACE, t, ssize, mpi_type<T>(), op, comm_);
526  }
527 
528  template<typename T> void all_reduce(std::vector<T>& t, MPI_Op op) const {
529  all_reduce(t.data(), t.size(), op);
530  }
531 
547  template<typename T> void reduce(T* t, int ssize, MPI_Op op) const {
548  if (is_root())
549  MPI_Reduce(MPI_IN_PLACE, t, ssize, mpi_type<T>(), op, 0, comm_);
550  else MPI_Reduce(t, t, ssize, mpi_type<T>(), op, 0, comm_);
551  }
552 
553  template<typename T>
554  void all_to_all(const T* sbuf, int scnt, T* rbuf) const {
555  MPI_Alltoall
556  (sbuf, scnt, mpi_type<T>(), rbuf, scnt, mpi_type<T>(), comm_);
557  }
558 
559  template<typename T, typename A=std::allocator<T>> std::vector<T,A>
560  all_to_allv(const T* sbuf, int* scnts, int* sdispls,
561  int* rcnts, int* rdispls) const {
562  std::size_t rsize = 0;
563  for (int p=0; p<size(); p++)
564  rsize += rcnts[p];
565  std::vector<T,A> rbuf(rsize);
566  MPI_Alltoallv
567  (sbuf, scnts, sdispls, mpi_type<T>(),
568  rbuf.data(), rcnts, rdispls, mpi_type<T>(), comm_);
569  return rbuf;
570  }
571 
572  template<typename T> void
573  all_to_allv(const T* sbuf, int* scnts, int* sdispls,
574  T* rbuf, int* rcnts, int* rdispls) const {
575  MPI_Alltoallv
576  (sbuf, scnts, sdispls, mpi_type<T>(),
577  rbuf, rcnts, rdispls, mpi_type<T>(), comm_);
578  }
579 
597  template<typename T, typename A=std::allocator<T>> void
598  all_to_all_v(std::vector<std::vector<T>>& sbuf, std::vector<T,A>& rbuf,
599  std::vector<T*>& pbuf) const {
600  all_to_all_v(sbuf, rbuf, pbuf, mpi_type<T>());
601  }
602 
616  template<typename T, typename A=std::allocator<T>> std::vector<T,A>
617  all_to_all_v(std::vector<std::vector<T>>& sbuf) const {
618  std::vector<T,A> rbuf;
619  std::vector<T*> pbuf;
620  all_to_all_v(sbuf, rbuf, pbuf, mpi_type<T>());
621  return rbuf;
622  }
623 
641  template<typename T, typename A=std::allocator<T>> void
642  all_to_all_v(std::vector<std::vector<T>>& sbuf, std::vector<T,A>& rbuf,
643  std::vector<T*>& pbuf, const MPI_Datatype Ttype) const {
644  assert(sbuf.size() == std::size_t(size()));
645  auto P = size();
646  std::unique_ptr<int[]> iwork(new int[4*P]);
647  auto ssizes = iwork.get();
648  auto rsizes = ssizes + P;
649  auto sdispl = ssizes + 2*P;
650  auto rdispl = ssizes + 3*P;
651  for (int p=0; p<P; p++) {
652  if (sbuf[p].size() >
653  static_cast<std::size_t>(std::numeric_limits<int>::max())) {
654  std::cerr << "# ERROR: 32bit integer overflow in all_to_all_v!!"
655  << std::endl;
656  MPI_Abort(comm_, 1);
657  }
658  ssizes[p] = sbuf[p].size();
659  }
660  MPI_Alltoall
661  (ssizes, 1, mpi_type<int>(), rsizes, 1, mpi_type<int>(), comm_);
662  std::size_t totssize = std::accumulate(ssizes, ssizes+P, std::size_t(0)),
663  totrsize = std::accumulate(rsizes, rsizes+P, std::size_t(0));
664  if (totrsize >
665  static_cast<std::size_t>(std::numeric_limits<int>::max()) ||
666  totssize >
667  static_cast<std::size_t>(std::numeric_limits<int>::max())) {
668  // This case will probably cause an overflow in the
669  // rdispl/sdispl elements. Here we do the all_to_all_v
670  // manually by just using Isend/Irecv. This might be slower
671  // than splitting into multiple calls to MPI_Alltoallv
672  // (although it avoids a copy from the sbuf).
673  rbuf.resize(totrsize);
674  std::unique_ptr<MPI_Request[]> reqs(new MPI_Request[2*P]);
675  std::size_t displ = 0;
676  pbuf.resize(P);
677  for (int p=0; p<P; p++) {
678  pbuf[p] = rbuf.data() + displ;
679  MPI_Irecv(pbuf[p], rsizes[p], Ttype, p, 0, comm_, reqs.get()+p);
680  displ += rsizes[p];
681  }
682  for (int p=0; p<P; p++)
683  MPI_Isend
684  (sbuf[p].data(), ssizes[p], Ttype, p, 0, comm_, reqs.get()+P+p);
685  MPI_Waitall(2*P, reqs.get(), MPI_STATUSES_IGNORE);
686  std::vector<std::vector<T>>().swap(sbuf);
687  } else {
688  std::unique_ptr<T[]> sendbuf_(new T[totssize]);
689  auto sendbuf = sendbuf_.get();
690  sdispl[0] = rdispl[0] = 0;
691  for (int p=1; p<P; p++) {
692  sdispl[p] = sdispl[p-1] + ssizes[p-1];
693  rdispl[p] = rdispl[p-1] + rsizes[p-1];
694  }
695  for (int p=0; p<P; p++)
696  std::copy(sbuf[p].begin(), sbuf[p].end(), sendbuf+sdispl[p]);
697  std::vector<std::vector<T>>().swap(sbuf);
698  rbuf.resize(totrsize);
699  MPI_Alltoallv(sendbuf, ssizes, sdispl, Ttype,
700  rbuf.data(), rsizes, rdispl, Ttype, comm_);
701  pbuf.resize(P);
702  for (int p=0; p<P; p++)
703  pbuf[p] = rbuf.data() + rdispl[p];
704  }
705  }
706 
725  MPIComm sub(int P0, int P, int stride=1) const {
726  if (is_null() || size() == 1)
727  return MPIComm(MPI_COMM_NULL);
728  assert(P0 + P <= size());
729  MPIComm sub_comm;
730  std::vector<int> sub_ranks(P);
731  for (int i=0; i<P; i++)
732  sub_ranks[i] = P0 + i*stride;
733  MPI_Group group, sub_group;
734  MPI_Comm_group(comm_, &group); // get group from comm
735  MPI_Group_incl(group, P, sub_ranks.data(), &sub_group); // group ranks [P0,P0+P) into sub_group
736  MPI_Comm_create(comm_, sub_group, &sub_comm.comm_); // create new sub_comm
737  MPI_Group_free(&group);
738  MPI_Group_free(&sub_group);
739  return sub_comm;
740  }
741 
751  // return MPI_COMM_SELF??? or MPI_COMM_NULL if not rank??
752  MPIComm sub_self(int p) const {
753  if (is_null()) return MPIComm(MPI_COMM_NULL);
754  MPIComm c0;
755  MPI_Group group, sub_group;
756  MPI_Comm_group(comm_, &group);
757  MPI_Group_incl(group, 1, &p, &sub_group);
758  MPI_Comm_create(comm_, sub_group, &c0.comm_);
759  MPI_Group_free(&group);
760  MPI_Group_free(&sub_group);
761  return c0;
762  }
763 
767  static void control_start(const std::string& name) {
768  MPI_Pcontrol(1, name.c_str());
769  }
773  static void control_stop(const std::string& name) {
774  MPI_Pcontrol(-1, name.c_str());
775  }
776 
777  static bool initialized() {
778  int flag;
779  MPI_Initialized(&flag);
780  return static_cast<bool>(flag);
781  }
782 
783  private:
784  MPI_Comm comm_ = MPI_COMM_WORLD;
785 
786  void duplicate(MPI_Comm c) {
787  if (c == MPI_COMM_NULL) comm_ = c;
788  else MPI_Comm_dup(c, &comm_);
789  }
790  };
791 
792 
800  inline int mpi_rank(MPI_Comm c=MPI_COMM_WORLD) {
801  assert(c != MPI_COMM_NULL);
802  int rank;
803  MPI_Comm_rank(c, &rank);
804  return rank;
805  }
806 
814  inline int mpi_nprocs(MPI_Comm c=MPI_COMM_WORLD) {
815  assert(c != MPI_COMM_NULL);
816  int nprocs;
817  MPI_Comm_size(c, &nprocs);
818  return nprocs;
819  }
820 
821 } // end namespace strumpack
822 
823 #endif // STRUMPACK_MPI_WRAPPER_HPP
strumpack::MPIComm::isend
void isend(const std::vector< T > &sbuf, int dest, int tag, MPI_Request *req) const
Definition: MPIWrapper.hpp:378
strumpack::MPIComm::operator=
MPIComm & operator=(const MPIComm &c)
Definition: MPIWrapper.hpp:237
strumpack::mpi_type< float >
MPI_Datatype mpi_type< float >()
Definition: MPIWrapper.hpp:72
strumpack::mpi_type< char >
MPI_Datatype mpi_type< char >()
Definition: MPIWrapper.hpp:60
strumpack::MPIComm
Wrapper class around an MPI_Comm object.
Definition: MPIWrapper.hpp:190
strumpack::MPIRequest::wait
void wait()
Definition: MPIWrapper.hpp:152
strumpack::mpi_type< double >
MPI_Datatype mpi_type< double >()
Definition: MPIWrapper.hpp:74
strumpack::MPIComm::reduce
void reduce(T *t, int ssize, MPI_Op op) const
Definition: MPIWrapper.hpp:547
strumpack::MPIComm::is_null
bool is_null() const
Definition: MPIWrapper.hpp:262
strumpack::MPIComm::is_root
bool is_root() const
Definition: MPIWrapper.hpp:289
strumpack::mpi_type< int >
MPI_Datatype mpi_type< int >()
Definition: MPIWrapper.hpp:64
strumpack
Definition: StrumpackOptions.hpp:42
strumpack::MPIComm::isend
MPIRequest isend(const std::vector< T > &sbuf, int dest, int tag) const
Definition: MPIWrapper.hpp:357
strumpack::MPIRequest
Wrapper around an MPI_Request object.
Definition: MPIWrapper.hpp:119
strumpack::MPIComm::MPIComm
MPIComm()
Definition: MPIWrapper.hpp:196
strumpack::MPIComm::comm
MPI_Comm comm() const
Definition: MPIWrapper.hpp:257
strumpack::mpi_nprocs
int mpi_nprocs(MPI_Comm c=MPI_COMM_WORLD)
Definition: MPIWrapper.hpp:814
StrumpackParameters.hpp
Contains the definition of some useful (global) variables.
strumpack::MPIComm::all_reduce
T all_reduce(T t, MPI_Op op) const
Definition: MPIWrapper.hpp:483
strumpack::mpi_type
MPI_Datatype mpi_type()
Definition: MPIWrapper.hpp:58
strumpack::wait_all
void wait_all(std::vector< MPIRequest > &reqs)
Definition: MPIWrapper.hpp:168
strumpack::copy
void copy(std::size_t m, std::size_t n, const DenseMatrix< scalar_t > &a, std::size_t ia, std::size_t ja, DenseMatrix< scalar_t > &b, std::size_t ib, std::size_t jb)
Definition: DenseMatrix.hpp:1207
strumpack::MPIComm::all_to_all_v
std::vector< T, A > all_to_all_v(std::vector< std::vector< T >> &sbuf) const
Definition: MPIWrapper.hpp:617
strumpack::MPIComm::reduce
T reduce(T t, MPI_Op op) const
Definition: MPIWrapper.hpp:502
strumpack::mpi_type< long long int >
MPI_Datatype mpi_type< long long int >()
Definition: MPIWrapper.hpp:70
strumpack::MPIComm::all_reduce
void all_reduce(T *t, int ssize, MPI_Op op) const
Definition: MPIWrapper.hpp:524
strumpack::MPIComm::sub
MPIComm sub(int P0, int P, int stride=1) const
Definition: MPIWrapper.hpp:725
strumpack::MPIComm::barrier
void barrier() const
Definition: MPIWrapper.hpp:295
strumpack::MPIComm::control_start
static void control_start(const std::string &name)
Definition: MPIWrapper.hpp:767
strumpack::mpi_type< bool >
MPI_Datatype mpi_type< bool >()
Definition: MPIWrapper.hpp:62
strumpack::MPIComm::sub_self
MPIComm sub_self(int p) const
Definition: MPIWrapper.hpp:752
strumpack::MPIComm::rank
int rank() const
Definition: MPIWrapper.hpp:267
strumpack::mpi_type< long >
MPI_Datatype mpi_type< long >()
Definition: MPIWrapper.hpp:66
strumpack::MPIComm::~MPIComm
virtual ~MPIComm()
Definition: MPIWrapper.hpp:227
strumpack::MPIComm::size
int size() const
Definition: MPIWrapper.hpp:278
strumpack::MPIComm::MPIComm
MPIComm(MPI_Comm c)
Definition: MPIWrapper.hpp:205
strumpack::MPIComm::MPIComm
MPIComm(MPIComm &&c) noexcept
Definition: MPIWrapper.hpp:221
strumpack::MPIComm::control_stop
static void control_stop(const std::string &name)
Definition: MPIWrapper.hpp:773
strumpack::MPIComm::send
void send(const std::vector< T > &sbuf, int dest, int tag) const
Definition: MPIWrapper.hpp:414
strumpack::MPIComm::MPIComm
MPIComm(const MPIComm &c)
Definition: MPIWrapper.hpp:213
strumpack::MPIComm::recv
std::vector< T > recv(int src, int tag) const
Definition: MPIWrapper.hpp:431
strumpack::MPIComm::all_to_all_v
void all_to_all_v(std::vector< std::vector< T >> &sbuf, std::vector< T, A > &rbuf, std::vector< T * > &pbuf, const MPI_Datatype Ttype) const
Definition: MPIWrapper.hpp:642
strumpack::MPIComm::operator=
MPIComm & operator=(MPIComm &&c) noexcept
Definition: MPIWrapper.hpp:248
strumpack::Trans::T
@ T
strumpack::mpi_type< unsigned long >
MPI_Datatype mpi_type< unsigned long >()
Definition: MPIWrapper.hpp:68
strumpack::MPIComm::all_to_all_v
void all_to_all_v(std::vector< std::vector< T >> &sbuf, std::vector< T, A > &rbuf, std::vector< T * > &pbuf) const
Definition: MPIWrapper.hpp:598
strumpack::mpi_rank
int mpi_rank(MPI_Comm c=MPI_COMM_WORLD)
Definition: MPIWrapper.hpp:800
strumpack::MPIRequest::MPIRequest
MPIRequest()
Definition: MPIWrapper.hpp:124
strumpack::MPIRequest::operator=
MPIRequest & operator=(const MPIRequest &)=delete