DOLFINx 0.10.0.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
MPI.h
1// Copyright (C) 2007-2023 Magnus Vikstrøm, Garth N. Wells and Paul T. Kühner
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include "Timer.h"
10#include "log.h"
11#include "types.h"
12#include <algorithm>
13#include <array>
14#include <cassert>
15#include <complex>
16#include <cstdint>
17#include <dolfinx/graph/AdjacencyList.h>
18#include <numeric>
19#include <set>
20#include <span>
21#include <tuple>
22#include <type_traits>
23#include <utility>
24#include <vector>
25
26#define MPICH_IGNORE_CXX_SEEK 1
27#include <mpi.h>
28
30namespace dolfinx::MPI
31{
32
34enum class tag : int
35{
36 consensus_pcx = 1200,
37 consensus_pex = 1201,
38 consensus_nbx = 1202,
39};
40
43class Comm
44{
45public:
47 explicit Comm(MPI_Comm comm, bool duplicate = true);
48
50 Comm(const Comm& comm) noexcept;
51
53 Comm(Comm&& comm) noexcept;
54
55 // Disable copy assignment operator
56 Comm& operator=(const Comm& comm) = delete;
57
59 Comm& operator=(Comm&& comm) noexcept;
60
62 ~Comm();
63
65 MPI_Comm comm() const noexcept;
66
67private:
68 // MPI communicator
69 MPI_Comm _comm;
70};
71
73int rank(MPI_Comm comm);
74
77int size(MPI_Comm comm);
78
83void check_error(MPI_Comm comm, int code);
84
91constexpr std::array<std::int64_t, 2> local_range(int rank, std::int64_t N,
92 int size)
93{
94 assert(rank >= 0);
95 assert(N >= 0);
96 assert(size > 0);
97
98 // Compute number of items per rank and remainder
99 const std::int64_t n = N / size;
100 const std::int64_t r = N % size;
101
102 // Compute local range
103 if (rank < r)
104 return {rank * (n + 1), rank * (n + 1) + n + 1};
105 else
106 return {rank * n + r, rank * n + r + n};
107}
108
115constexpr int index_owner(int size, std::size_t index, std::size_t N)
116{
117 assert(index < N);
118
119 // Compute number of items per rank and remainder
120 const std::size_t n = N / size;
121 const std::size_t r = N % size;
122
123 if (index < r * (n + 1))
124 {
125 // First r ranks own n + 1 indices
126 return index / (n + 1);
127 }
128 else
129 {
130 // Remaining ranks own n indices
131 return r + (index - r * (n + 1)) / n;
132 }
133}
134
158std::vector<int> compute_graph_edges_pcx(MPI_Comm comm,
159 std::span<const int> edges);
160
186std::vector<int>
187compute_graph_edges_nbx(MPI_Comm comm, std::span<const int> edges,
188 int tag = static_cast<int>(tag::consensus_nbx));
189
209template <typename U>
210std::pair<std::vector<std::int32_t>,
211 std::vector<typename std::remove_reference_t<typename U::value_type>>>
212distribute_to_postoffice(MPI_Comm comm, const U& x,
213 std::array<std::int64_t, 2> shape,
214 std::int64_t rank_offset);
215
237template <typename U>
238std::vector<typename std::remove_reference_t<typename U::value_type>>
239distribute_from_postoffice(MPI_Comm comm, std::span<const std::int64_t> indices,
240 const U& x, std::array<std::int64_t, 2> shape,
241 std::int64_t rank_offset);
242
263template <typename U>
264std::vector<typename std::remove_reference_t<typename U::value_type>>
265distribute_data(MPI_Comm comm0, std::span<const std::int64_t> indices,
266 MPI_Comm comm1, const U& x, int shape1);
267
268template <typename T>
269struct dependent_false : std::false_type
270{
271};
272
274
276template <typename T>
278
281template <typename T>
283
286#define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \
287 template <> \
288 struct mpi_type_mapping<cpp_t> \
289 { \
290 static inline MPI_Datatype type = mpi_t; \
291 };
292
296MAP_TO_MPI_TYPE(float, MPI_FLOAT)
297MAP_TO_MPI_TYPE(double, MPI_DOUBLE)
298MAP_TO_MPI_TYPE(std::complex<float>, MPI_C_FLOAT_COMPLEX)
299MAP_TO_MPI_TYPE(std::complex<double>, MPI_C_DOUBLE_COMPLEX)
300MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T)
301MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T)
302MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T)
303MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T)
304MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T)
305MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T)
306MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T)
307MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T)
310
311//---------------------------------------------------------------------------
312template <typename U>
313std::pair<std::vector<std::int32_t>,
314 std::vector<typename std::remove_reference_t<typename U::value_type>>>
315distribute_to_postoffice(MPI_Comm comm, const U& x,
316 std::array<std::int64_t, 2> shape,
317 std::int64_t rank_offset)
318{
319 assert(rank_offset >= 0 or x.empty());
320 using T = typename std::remove_reference_t<typename U::value_type>;
321
322 const int size = dolfinx::MPI::size(comm);
323 const int rank = dolfinx::MPI::rank(comm);
324 assert(x.size() % shape[1] == 0);
325 const std::int32_t shape0_local = x.size() / shape[1];
326
327 spdlog::debug("Sending data to post offices (distribute_to_postoffice)");
328
329 // Post office ranks will receive data from this rank
330 std::vector<int> row_to_dest(shape0_local);
331 for (std::int32_t i = 0; i < shape0_local; ++i)
332 {
333 int dest = MPI::index_owner(size, i + rank_offset, shape[0]);
334 row_to_dest[i] = dest;
335 }
336
337 // Build list of (dest, positions) for each row that doesn't belong to
338 // this rank, then sort
339 std::vector<std::array<std::int32_t, 2>> dest_to_index;
340 dest_to_index.reserve(shape0_local);
341 for (std::int32_t i = 0; i < shape0_local; ++i)
342 {
343 std::size_t idx = i + rank_offset;
344 if (int dest = MPI::index_owner(size, idx, shape[0]); dest != rank)
345 dest_to_index.push_back({dest, i});
346 }
347 std::ranges::sort(dest_to_index);
348
349 // Build list of neighbour src ranks and count number of items (rows
350 // of x) to receive from each src post office (by neighbourhood rank)
351 std::vector<int> dest;
352 std::vector<std::int32_t> num_items_per_dest,
353 pos_to_neigh_rank(shape0_local, -1);
354 {
355 auto it = dest_to_index.begin();
356 while (it != dest_to_index.end())
357 {
358 const int neigh_rank = dest.size();
359
360 // Store global rank
361 dest.push_back((*it)[0]);
362
363 // Find iterator to next global rank
364 auto it1
365 = std::find_if(it, dest_to_index.end(),
366 [r = dest.back()](auto& idx) { return idx[0] != r; });
367
368 // Store number of items for current rank
369 num_items_per_dest.push_back(std::distance(it, it1));
370
371 // Map from local x index to local destination rank
372 for (auto e = it; e != it1; ++e)
373 pos_to_neigh_rank[(*e)[1]] = neigh_rank;
374
375 // Advance iterator
376 it = it1;
377 }
378 }
379
380 // Determine source ranks
381 const std::vector<int> src = MPI::compute_graph_edges_nbx(comm, dest);
382 spdlog::info(
383 "Number of neighbourhood source ranks in distribute_to_postoffice: {}",
384 static_cast<int>(src.size()));
385
386 // Create neighbourhood communicator for sending data to post offices
387 MPI_Comm neigh_comm;
388 int err = MPI_Dist_graph_create_adjacent(
389 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
390 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &neigh_comm);
391 dolfinx::MPI::check_error(comm, err);
392
393 // Compute send displacements
394 std::vector<std::int32_t> send_disp = {0};
395 std::partial_sum(num_items_per_dest.begin(), num_items_per_dest.end(),
396 std::back_inserter(send_disp));
397
398 // Pack send buffers
399 std::vector<T> send_buffer_data(shape[1] * send_disp.back());
400 std::vector<std::int64_t> send_buffer_index(send_disp.back());
401 {
402 std::vector<std::int32_t> send_offsets = send_disp;
403 for (std::int32_t i = 0; i < shape0_local; ++i)
404 {
405 if (int neigh_dest = pos_to_neigh_rank[i]; neigh_dest != -1)
406 {
407 std::size_t pos = send_offsets[neigh_dest];
408 send_buffer_index[pos] = i + rank_offset;
409 std::copy_n(std::next(x.begin(), i * shape[1]), shape[1],
410 std::next(send_buffer_data.begin(), shape[1] * pos));
411 ++send_offsets[neigh_dest];
412 }
413 }
414 }
415
416 // Send number of items to post offices (destination) that I will be
417 // sending
418 std::vector<int> num_items_recv(src.size());
419 num_items_per_dest.reserve(1);
420 num_items_recv.reserve(1);
421 err = MPI_Neighbor_alltoall(num_items_per_dest.data(), 1, MPI_INT,
422 num_items_recv.data(), 1, MPI_INT, neigh_comm);
423 dolfinx::MPI::check_error(comm, err);
424
425 // Prepare receive displacement and buffers
426 std::vector<std::int32_t> recv_disp(num_items_recv.size() + 1, 0);
427 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
428 std::next(recv_disp.begin()));
429
430 // Send/receive global indices
431 std::vector<std::int64_t> recv_buffer_index(recv_disp.back());
432 err = MPI_Neighbor_alltoallv(
433 send_buffer_index.data(), num_items_per_dest.data(), send_disp.data(),
434 MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(),
435 recv_disp.data(), MPI_INT64_T, neigh_comm);
436 dolfinx::MPI::check_error(comm, err);
437
438 // Send/receive data (x)
439 MPI_Datatype compound_type;
440 MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_t<T>, &compound_type);
441 MPI_Type_commit(&compound_type);
442 std::vector<T> recv_buffer_data(shape[1] * recv_disp.back());
443 err = MPI_Neighbor_alltoallv(
444 send_buffer_data.data(), num_items_per_dest.data(), send_disp.data(),
445 compound_type, recv_buffer_data.data(), num_items_recv.data(),
446 recv_disp.data(), compound_type, neigh_comm);
447 dolfinx::MPI::check_error(comm, err);
448 err = MPI_Type_free(&compound_type);
449 dolfinx::MPI::check_error(comm, err);
450 err = MPI_Comm_free(&neigh_comm);
451 dolfinx::MPI::check_error(comm, err);
452
453 spdlog::debug("Completed send data to post offices.");
454
455 // Convert to local indices
456 const std::int64_t r0 = MPI::local_range(rank, shape[0], size)[0];
457 std::vector<std::int32_t> index_local(recv_buffer_index.size());
458 std::ranges::transform(recv_buffer_index, index_local.begin(),
459 [r0](auto idx) { return idx - r0; });
460
461 return {index_local, recv_buffer_data};
462}
463//---------------------------------------------------------------------------
464template <typename U>
465std::vector<typename std::remove_reference_t<typename U::value_type>>
466distribute_from_postoffice(MPI_Comm comm, std::span<const std::int64_t> indices,
467 const U& x, std::array<std::int64_t, 2> shape,
468 std::int64_t rank_offset)
469{
470 assert(rank_offset >= 0 or x.empty());
471 using T = typename std::remove_reference_t<typename U::value_type>;
472
473 common::Timer timer("Distribute row-wise data (scalable)");
474 assert(shape[1] > 0);
475
476 const int size = dolfinx::MPI::size(comm);
477 const int rank = dolfinx::MPI::rank(comm);
478 assert(x.size() % shape[1] == 0);
479 const std::int64_t shape0_local = x.size() / shape[1];
480
481 // 0. Send x data to/from post offices
482
483 // Send receive x data to post office (only for rows that need to be
484 // communicated)
485 auto [post_indices, post_x] = dolfinx::MPI::distribute_to_postoffice(
486 comm, x, {shape[0], shape[1]}, rank_offset);
487 assert(post_indices.size() == post_x.size() / shape[1]);
488
489 // 1. Send request to post office ranks for data
490
491 // Build list of (src, global index, global, index position) for each
492 // entry in 'indices' that doesn't belong to this rank, then sort
493 std::vector<std::tuple<int, std::int64_t, std::int32_t>> src_to_index;
494 for (std::size_t i = 0; i < indices.size(); ++i)
495 {
496 std::size_t idx = indices[i];
497 if (int src = dolfinx::MPI::index_owner(size, idx, shape[0]); src != rank)
498 src_to_index.push_back({src, idx, i});
499 }
500 std::ranges::sort(src_to_index);
501
502 // Build list is neighbour src ranks and count number of items (rows
503 // of x) to receive from each src post office (by neighbourhood rank)
504 std::vector<std::int32_t> num_items_per_src;
505 std::vector<int> src;
506 {
507 auto it = src_to_index.begin();
508 while (it != src_to_index.end())
509 {
510 src.push_back(std::get<0>(*it));
511 auto it1
512 = std::find_if(it, src_to_index.end(), [r = src.back()](auto& idx)
513 { return std::get<0>(idx) != r; });
514 num_items_per_src.push_back(std::distance(it, it1));
515 it = it1;
516 }
517 }
518
519 // Determine 'delivery' destination ranks (ranks that want data from
520 // me)
521 const std::vector<int> dest
523 spdlog::info(
524 "Neighbourhood destination ranks from post office in "
525 "distribute_data (rank, num dests, num dests/mpi_size): {}, {}, {}",
526 rank, static_cast<int>(dest.size()),
527 static_cast<double>(dest.size()) / size);
528
529 // Create neighbourhood communicator for sending data to post offices
530 // (src), and receiving data form my send my post office
531 MPI_Comm neigh_comm0;
532 int err = MPI_Dist_graph_create_adjacent(
533 comm, dest.size(), dest.data(), MPI_UNWEIGHTED, src.size(), src.data(),
534 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &neigh_comm0);
535 dolfinx::MPI::check_error(comm, err);
536
537 // Communicate number of requests to each source
538 std::vector<int> num_items_recv(dest.size());
539 num_items_per_src.reserve(1);
540 num_items_recv.reserve(1);
541 err = MPI_Neighbor_alltoall(num_items_per_src.data(), 1, MPI_INT,
542 num_items_recv.data(), 1, MPI_INT, neigh_comm0);
543 dolfinx::MPI::check_error(comm, err);
544
545 // Prepare send/receive displacements
546 std::vector<std::int32_t> send_disp = {0};
547 std::partial_sum(num_items_per_src.begin(), num_items_per_src.end(),
548 std::back_inserter(send_disp));
549 std::vector<std::int32_t> recv_disp = {0};
550 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
551 std::back_inserter(recv_disp));
552
553 // Pack my requested indices (global) in send buffer ready to send to
554 // post offices
555 assert(send_disp.back() == (int)src_to_index.size());
556 std::vector<std::int64_t> send_buffer_index(src_to_index.size());
557 std::ranges::transform(src_to_index, send_buffer_index.begin(),
558 [](auto x) { return std::get<1>(x); });
559
560 // Prepare the receive buffer
561 std::vector<std::int64_t> recv_buffer_index(recv_disp.back());
562 err = MPI_Neighbor_alltoallv(
563 send_buffer_index.data(), num_items_per_src.data(), send_disp.data(),
564 MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(),
565 recv_disp.data(), MPI_INT64_T, neigh_comm0);
566 dolfinx::MPI::check_error(comm, err);
567
568 err = MPI_Comm_free(&neigh_comm0);
569 dolfinx::MPI::check_error(comm, err);
570
571 // 2. Send data (rows of x) from post office back to requesting ranks
572 // (transpose of the preceding communication pattern operation)
573
574 // Build map from local index to post_indices position. Set to -1 for
575 // data that was already on this rank and was therefore was not
576 // sent/received via a postoffice.
577 const std::array<std::int64_t, 2> postoffice_range
578 = dolfinx::MPI::local_range(rank, shape[0], size);
579 std::vector<std::int32_t> post_indices_map(
580 postoffice_range[1] - postoffice_range[0], -1);
581 for (std::size_t i = 0; i < post_indices.size(); ++i)
582 {
583 assert(post_indices[i] < (int)post_indices_map.size());
584 post_indices_map[post_indices[i]] = i;
585 }
586
587 // Build send buffer
588 std::vector<T> send_buffer_data(shape[1] * recv_disp.back());
589 for (std::size_t p = 0; p < recv_disp.size() - 1; ++p)
590 {
591 int offset = recv_disp[p];
592 for (std::int32_t i = recv_disp[p]; i < recv_disp[p + 1]; ++i)
593 {
594 std::int64_t index = recv_buffer_index[i];
595 if (index >= rank_offset and index < (rank_offset + shape0_local))
596 {
597 // I already had this index before any communication
598 std::int32_t local_index = index - rank_offset;
599 std::copy_n(std::next(x.begin(), shape[1] * local_index), shape[1],
600 std::next(send_buffer_data.begin(), shape[1] * offset));
601 }
602 else
603 {
604 // Take from my 'post bag'
605 auto local_index = index - postoffice_range[0];
606 std::int32_t pos = post_indices_map[local_index];
607 assert(pos != -1);
608 std::copy_n(std::next(post_x.begin(), shape[1] * pos), shape[1],
609 std::next(send_buffer_data.begin(), shape[1] * offset));
610 }
611
612 ++offset;
613 }
614 }
615
616 err = MPI_Dist_graph_create_adjacent(
617 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
618 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &neigh_comm0);
619 dolfinx::MPI::check_error(comm, err);
620
621 MPI_Datatype compound_type0;
622 MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_t<T>, &compound_type0);
623 MPI_Type_commit(&compound_type0);
624
625 std::vector<T> recv_buffer_data(shape[1] * send_disp.back());
626 err = MPI_Neighbor_alltoallv(
627 send_buffer_data.data(), num_items_recv.data(), recv_disp.data(),
628 compound_type0, recv_buffer_data.data(), num_items_per_src.data(),
629 send_disp.data(), compound_type0, neigh_comm0);
630 dolfinx::MPI::check_error(comm, err);
631
632 err = MPI_Type_free(&compound_type0);
633 dolfinx::MPI::check_error(comm, err);
634 err = MPI_Comm_free(&neigh_comm0);
635 dolfinx::MPI::check_error(comm, err);
636
637 std::vector<std::int32_t> index_pos_to_buffer(indices.size(), -1);
638 for (std::size_t i = 0; i < src_to_index.size(); ++i)
639 index_pos_to_buffer[std::get<2>(src_to_index[i])] = i;
640
641 // Extra data to return
642 std::vector<T> x_new(shape[1] * indices.size());
643 for (std::size_t i = 0; i < indices.size(); ++i)
644 {
645 const std::int64_t index = indices[i];
646 if (index >= rank_offset and index < (rank_offset + shape0_local))
647 {
648 // Had data from the start in x
649 auto local_index = index - rank_offset;
650 std::copy_n(std::next(x.begin(), shape[1] * local_index), shape[1],
651 std::next(x_new.begin(), shape[1] * i));
652 }
653 else
654 {
655 if (int src = dolfinx::MPI::index_owner(size, index, shape[0]);
656 src == rank)
657 {
658 // In my post office bag
659 auto local_index = index - postoffice_range[0];
660 std::int32_t pos = post_indices_map[local_index];
661 assert(pos != -1);
662 std::copy_n(std::next(post_x.begin(), shape[1] * pos), shape[1],
663 std::next(x_new.begin(), shape[1] * i));
664 }
665 else
666 {
667 // In my received post
668 std::int32_t pos = index_pos_to_buffer[i];
669 assert(pos != -1);
670 std::copy_n(std::next(recv_buffer_data.begin(), shape[1] * pos),
671 shape[1], std::next(x_new.begin(), shape[1] * i));
672 }
673 }
674 }
675
676 return x_new;
677}
678//---------------------------------------------------------------------------
679template <typename U>
680std::vector<typename std::remove_reference_t<typename U::value_type>>
681distribute_data(MPI_Comm comm0, std::span<const std::int64_t> indices,
682 MPI_Comm comm1, const U& x, int shape1)
683{
684 assert(shape1 > 0);
685 assert(x.size() % shape1 == 0);
686 const std::int64_t shape0_local = x.size() / shape1;
687
688 int err;
689 std::int64_t shape0 = 0;
690 err = MPI_Allreduce(&shape0_local, &shape0, 1, MPI_INT64_T, MPI_SUM, comm0);
691 dolfinx::MPI::check_error(comm0, err);
692
693 std::int64_t rank_offset = -1;
694 if (comm1 != MPI_COMM_NULL)
695 {
696 rank_offset = 0;
697 err = MPI_Exscan(&shape0_local, &rank_offset, 1,
698 dolfinx::MPI::mpi_t<std::int64_t>, MPI_SUM, comm1);
699 dolfinx::MPI::check_error(comm1, err);
700 }
701 else
702 {
703 rank_offset = -1;
704 if (!x.empty())
705 throw std::runtime_error("Non-empty data on null MPI communicator");
706 }
707
708 return distribute_from_postoffice(comm0, indices, x, {shape0, shape1},
709 rank_offset);
710}
711//---------------------------------------------------------------------------
712
713} // namespace dolfinx::MPI
A duplicate MPI communicator and manage lifetime of the communicator.
Definition MPI.h:44
Comm(MPI_Comm comm, bool duplicate=true)
Duplicate communicator and wrap duplicate.
Definition MPI.cpp:12
~Comm()
Destructor (frees wrapped communicator)
Definition MPI.cpp:36
MPI_Comm comm() const noexcept
Return the underlying MPI_Comm object.
Definition MPI.cpp:62
Timer for measuring and logging elapsed time durations.
Definition Timer.h:41
MPI support functionality.
Definition MPI.h:31
MPI_Datatype mpi_t
Retrieves the MPI data type associated to the provided type.
Definition MPI.h:282
std::vector< typename std::remove_reference_t< typename U::value_type > > distribute_data(MPI_Comm comm0, std::span< const std::int64_t > indices, MPI_Comm comm1, const U &x, int shape1)
Distribute rows of a rectangular data array to ranks where they are required (scalable version).
Definition MPI.h:681
std::pair< std::vector< std::int32_t >, std::vector< typename std::remove_reference_t< typename U::value_type > > > distribute_to_postoffice(MPI_Comm comm, const U &x, std::array< std::int64_t, 2 > shape, std::int64_t rank_offset)
Distribute row data to 'post office' ranks.
Definition MPI.h:315
std::vector< int > compute_graph_edges_nbx(MPI_Comm comm, std::span< const int > edges, int tag=static_cast< int >(tag::consensus_nbx))
Determine incoming graph edges using the NBX consensus algorithm.
Definition MPI.cpp:162
constexpr int index_owner(int size, std::size_t index, std::size_t N)
Return which rank owns index in global range [0, N - 1] (inverse of MPI::local_range).
Definition MPI.h:115
std::vector< typename std::remove_reference_t< typename U::value_type > > distribute_from_postoffice(MPI_Comm comm, std::span< const std::int64_t > indices, const U &x, std::array< std::int64_t, 2 > shape, std::int64_t rank_offset)
Distribute rows of a rectangular data array from post office ranks to ranks where they are required.
Definition MPI.h:466
std::vector< int > compute_graph_edges_pcx(MPI_Comm comm, std::span< const int > edges)
Determine incoming graph edges using the PCX consensus algorithm.
Definition MPI.cpp:95
void check_error(MPI_Comm comm, int code)
Check MPI error code. If the error code is not equal to MPI_SUCCESS, then std::abort is called.
Definition MPI.cpp:80
int size(MPI_Comm comm)
Definition MPI.cpp:72
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition MPI.cpp:64
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition MPI.h:91
tag
MPI communication tags.
Definition MPI.h:35
Definition MPI.h:270
MPI Type.
Definition MPI.h:277