PSCF v1.1
MpiSendRecv.h
Go to the documentation of this file.
1#ifdef UTIL_MPI
2#ifndef UTIL_MPI_SEND_RECV_H
3#define UTIL_MPI_SEND_RECV_H
4
5/*
6* Util Package - C++ Utilities for Scientific Computation
7*
8* Copyright 2010 - 2017, The Regents of the University of Minnesota
9* Distributed under the terms of the GNU General Public License.
10*/
11
74#include <util/global.h>
75
76#include <util/mpi/MpiTraits.h>
77#include <util/containers/DArray.h>
78#include <util/containers/DMatrix.h>
79
80namespace Util
81{
82
83 // Scalar parameters
84
96 template <typename T>
97 void send(MPI::Comm& comm, T& data, int dest, int tag)
98 {
100 UTIL_THROW("No committed MPI type in send<T>");
101 comm.Send(&data, 1, MpiTraits<T>::type, dest, tag);
102 }
103
115 template <typename T>
116 void recv(MPI::Comm& comm, T& data, int source, int tag)
117 {
119 UTIL_THROW("No committed MPI type in recv<T>");
120 comm.Recv(&data, 1, MpiTraits<T>::type, source, tag);
121 }
122
133 template <typename T>
134 void bcast(MPI::Intracomm& comm, T& data, int root)
135 {
137 UTIL_THROW("No committed MPI type in bcast<T>");
138 comm.Bcast(&data, 1, MpiTraits<T>::type, root);
139 }
140
141 // C Array partial specializations
142
155 template <typename T>
156 void send(MPI::Comm& comm, T* array, int count, int dest, int tag)
157 {
159 comm.Send(array, count, MpiTraits<T>::type, dest, tag);
160 } else {
161 // Try send<T> by element, in case of explicit specialization.
162 // If there is no specialization or type, send<T> throws.
163 for (int i = 0; i < count; ++i) {
164 send<T>(comm, array[i], dest, tag);
165 }
166 }
167 }
168
181 template <typename T>
182 void recv(MPI::Comm& comm, T* array, int count, int source, int tag)
183 {
185 comm.Recv(array, count, MpiTraits<T>::type, source, tag);
186 } else {
187 // Try recv<T> by element, in case of explicit specialization.
188 // If there is no specialization or type, recv<T> throws.
189 for (int i = 0; i < count; ++i) {
190 recv<T>(comm, array[i], source, tag);
191 }
192 }
193 }
194
206 template <typename T>
207 void bcast(MPI::Intracomm& comm, T* array, int count, int root)
208 {
210 comm.Bcast(array, count, MpiTraits<T>::type, root);
211 } else {
212 // Try bcast<T> by element, in case of explicit specialization.
213 // If there is no specialization or type, bcast<T> throws.
214 for (int i = 0; i < count; ++i) {
215 bcast<T>(comm, array[i], root);
216 }
217 }
218 }
219
220 // DArray container partial specializations
221
234 template <typename T>
235 void send(MPI::Comm& comm, DArray<T>& array, int count, int dest, int tag)
236 {
237 // Preconditions
238 if (!(array.isAllocated())) {
239 UTIL_THROW("Cannot read unallocated DArray");
240 }
241 if (count > array.capacity()) {
242 UTIL_THROW("Error: Logical size count > DArray capacity");
243 }
244
246 comm.Send(&array[0], count, MpiTraits<T>::type, dest, tag);
247 } else {
248 // Try send<T> by element, in case of explicit specialization.
249 // If there is no specialization or type, send<T> throws.
250 for (int i = 0; i < count; ++i) {
251 send<T>(comm, array[i], dest, tag);
252 }
253 }
254 }
255
268 template <typename T>
269 void recv(MPI::Comm& comm, DArray<T>& array, int count, int source, int tag)
270 {
271 // Preconditions
272 if (!(array.isAllocated())) {
273 UTIL_THROW("Cannot read unallocated DArray");
274 }
275 if (count > array.capacity()) {
276 UTIL_THROW("Error: Logical size count > DArray capacity");
277 }
278
280 comm.Recv(&array[0], count, MpiTraits<T>::type, source, tag);
281 } else {
282 // Try recv<T> by element, in case of explicit specialization.
283 // If there is no specialization or type, recv<T> throws.
284 for (int i = 0; i < count; ++i) {
285 recv<T>(comm, array[i], source, tag);
286 }
287 }
288 }
289
301 template <typename T>
302 void bcast(MPI::Intracomm& comm, DArray<T>& array, int count, int root)
303 {
304 // Preconditions
305 if (!(array.isAllocated())) {
306 UTIL_THROW("Cannot read unallocated DArray");
307 }
308 if (count > array.capacity()) {
309 UTIL_THROW("Error: Logical size count > DArray capacity");
310 }
311
313 comm.Bcast(&array[0], count, MpiTraits<T>::type, root);
314 } else {
315 // try bcast<T> by element, in case of explicit specialization.
316 // If there is no specialization or type, bcast<T> throws.
317 for (int i = 0; i < count; ++i) {
318 bcast<T>(comm, array[i], root);
319 }
320 }
321 }
322
323 // DMatrix container partial specializations
324
338 template <typename T>
339 void send(MPI::Comm& comm, DMatrix<T>& matrix, int m, int n, int dest, int tag)
340 {
341 // Preconditions
342 if (!(matrix.isAllocated())) {
343 UTIL_THROW("Cannot read unallocated DMatrix");
344 }
345 if (m > matrix.capacity1()) {
346 UTIL_THROW("Error: Logical size m > DMatrix<T>::capacity1()");
347 }
348 if (n > matrix.capacity2()) {
349 UTIL_THROW("Error: Logical size n > DMatrix<T>::capacity2()");
350 }
351
353 int mp = matrix.capacity1();
354 int np = matrix.capacity2();
355 comm.Send(&matrix(0, 0), mp*np, MpiTraits<T>::type, dest, tag);
356 // Note: This method sends the entire physical memory block.
357 } else {
358 // try send<T> by element, in case of explicit specialization.
359 // If there is no specialization or type, send<T> throws.
360 int i, j;
361 for (i = 0; i < m; ++i) {
362 for (j = 0; j < n; ++j) {
363 send<T>(comm, matrix(i, j), dest, tag);
364 }
365 }
366 }
367 }
368
382 template <typename T>
383 void recv(MPI::Comm& comm, DMatrix<T>& matrix, int m, int n,
384 int source, int tag)
385 {
386 // Preconditions
387 if (!(matrix.isAllocated())) {
388 UTIL_THROW("Cannot recv unallocated DMatrix");
389 }
390 if (m > matrix.capacity1()) {
391 UTIL_THROW("Error: Logical size m > DMatrix<T>::capacity1()");
392 }
393 if (n > matrix.capacity2()) {
394 UTIL_THROW("Error: Logical size n > DMatrix<T>::capacity2()");
395 }
396
398 int mp = matrix.capacity1();
399 int np = matrix.capacity2();
400 comm.Recv(&matrix(0, 0), mp*np, MpiTraits<T>::type, source, tag);
401 // Note: This method receives the entire physical memory block.
402 } else {
403 // try recv<T> by element, in case of explicit specialization.
404 // If there is no specialization or type, recv<T> throws.
405 int i, j;
406 for (i = 0; i < m; ++i) {
407 for (j = 0; j < n; ++j) {
408 recv<T>(comm, matrix(i, j), source, tag);
409 }
410 }
411 }
412 }
413
426 template <typename T>
427 void bcast(MPI::Intracomm& comm, DMatrix<T>& matrix, int m, int n, int root)
428 {
429 // Preconditions
430 if (!(matrix.isAllocated())) {
431 UTIL_THROW("Cannot bcast unallocated DMatrix");
432 }
433 if (m > matrix.capacity1()) {
434 UTIL_THROW("Error: Logical size m > DMatrix<T>::capacity1()");
435 }
436 if (n > matrix.capacity2()) {
437 UTIL_THROW("Error: Logical size n > DMatrix<T>::capacity2()");
438 }
439
441 int mp = matrix.capacity1();
442 int np = matrix.capacity2();
443 comm.Bcast(&matrix(0, 0), mp*np, MpiTraits<T>::type, root);
444 // Note: This method receives the entire physical memory block.
445 } else {
446 // Try bcast<T> by element, in case of explicit specialization.
447 // If there is no specialization or type, bcast<T> throws.
448 int i, j;
449 for (i = 0; i < m; ++i) {
450 for (j = 0; j < n; ++j) {
451 bcast<T>(comm, matrix(i, j), root);
452 }
453 }
454 }
455 }
456
457 // bool (explicit specializations)
458
462 template <>
463 void send<bool>(MPI::Comm& comm, bool& data, int dest, int tag);
464
468 template <>
469 void recv<bool>(MPI::Comm& comm, bool& data, int source, int tag);
470
474 template <>
475 void bcast<bool>(MPI::Intracomm& comm, bool& data, int root);
476
477 // std::string (explicit specializations)
478
482 template <> void
483 send<std::string>(MPI::Comm& comm, std::string& data, int dest, int tag);
484
488 template <> void
489 recv<std::string>(MPI::Comm& comm, std::string& data, int source, int tag);
490
494 template <>
495 void bcast<std::string>(MPI::Intracomm& comm, std::string& data, int root);
496
497}
498#endif
499#endif
int capacity() const
Return allocated size.
Definition: Array.h:159
Dynamically allocatable contiguous array template.
Definition: DArray.h:32
bool isAllocated() const
Return true if this DArray has been allocated, false otherwise.
Definition: DArray.h:247
Dynamically allocated Matrix.
Definition: DMatrix.h:25
bool isAllocated() const
Return true if the DMatrix has been allocated, false otherwise.
Definition: DMatrix.h:202
int capacity2() const
Get number of columns (range of the second array index).
Definition: Matrix.h:143
int capacity1() const
Get number of rows (range of the first array index).
Definition: Matrix.h:136
Default MpiTraits class.
Definition: MpiTraits.h:40
File containing preprocessor macros for error handling.
#define UTIL_THROW(msg)
Macro for throwing an Exception, reporting function, file and line number.
Definition: global.h:51
Utility classes for scientific computation.
Definition: accumulators.mod:1
void send(MPI::Comm &comm, T &data, int dest, int tag)
Send a single T value.
Definition: MpiSendRecv.h:97
void recv< std::string >(MPI::Comm &comm, std::string &data, int source, int tag)
Explicit specialization of recv for std::string data.
Definition: MpiSendRecv.cpp:64
void send< bool >(MPI::Comm &comm, bool &data, int dest, int tag)
Explicit specialization of send for bool data.
Definition: MpiSendRecv.cpp:19
void recv< bool >(MPI::Comm &comm, bool &data, int source, int tag)
Explicit specialization of recv for bool data.
Definition: MpiSendRecv.cpp:26
void bcast< std::string >(MPI::Intracomm &comm, std::string &data, int root)
Explicit specialization of bcast for std::string data.
Definition: MpiSendRecv.cpp:80
void bcast(MPI::Intracomm &comm, T &data, int root)
Broadcast a single T value.
Definition: MpiSendRecv.h:134
void bcast< bool >(MPI::Intracomm &comm, bool &data, int root)
Explicit specialization of bcast for bool data.
Definition: MpiSendRecv.cpp:34
void recv(MPI::Comm &comm, T &data, int source, int tag)
Receive a single T value.
Definition: MpiSendRecv.h:116
void send< std::string >(MPI::Comm &comm, std::string &data, int dest, int tag)
Explicit specialization of send for std::string data.
Definition: MpiSendRecv.cpp:48