Note: this is documentation for an old release. View the latest documentation at docs.fenicsproject.org/dolfinx/v0.9.0/cpp/doxygen/d0/d3b/MPI_8h_source.html
DOLFINx 0.8.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
MPI.h
1// Copyright (C) 2007-2023 Magnus Vikstrøm and Garth N. Wells
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include <algorithm>
10#include <array>
11#include <cassert>
12#include <complex>
13#include <cstdint>
14#include <dolfinx/common/Timer.h>
15#include <dolfinx/common/log.h>
16#include <dolfinx/common/types.h>
17#include <dolfinx/graph/AdjacencyList.h>
18#include <numeric>
19#include <set>
20#include <span>
21#include <tuple>
22#include <type_traits>
23#include <utility>
24#include <vector>
25
26#define MPICH_IGNORE_CXX_SEEK 1
27#include <mpi.h>
28
30namespace dolfinx::MPI
31{
32
34enum class tag : int
35{
36 consensus_pcx,
37 consensus_pex
38};
39
42class Comm
43{
44public:
46 explicit Comm(MPI_Comm comm, bool duplicate = true);
47
49 Comm(const Comm& comm) noexcept;
50
52 Comm(Comm&& comm) noexcept;
53
54 // Disable copy assignment operator
55 Comm& operator=(const Comm& comm) = delete;
56
58 Comm& operator=(Comm&& comm) noexcept;
59
61 ~Comm();
62
64 MPI_Comm comm() const noexcept;
65
66private:
67 // MPI communicator
68 MPI_Comm _comm;
69};
70
72int rank(MPI_Comm comm);
73
76int size(MPI_Comm comm);
77
82void check_error(MPI_Comm comm, int code);
83
91constexpr std::array<std::int64_t, 2> local_range(int rank, std::int64_t N,
92 int size)
93{
94 assert(rank >= 0);
95 assert(N >= 0);
96 assert(size > 0);
97
98 // Compute number of items per rank and remainder
99 const std::int64_t n = N / size;
100 const std::int64_t r = N % size;
101
102 // Compute local range
103 if (rank < r)
104 return {rank * (n + 1), rank * (n + 1) + n + 1};
105 else
106 return {rank * n + r, rank * n + r + n};
107}
108
115constexpr int index_owner(int size, std::size_t index, std::size_t N)
116{
117 assert(index < N);
118
119 // Compute number of items per rank and remainder
120 const std::size_t n = N / size;
121 const std::size_t r = N % size;
122
123 if (index < r * (n + 1))
124 {
125 // First r ranks own n + 1 indices
126 return index / (n + 1);
127 }
128 else
129 {
130 // Remaining ranks own n indices
131 return r + (index - r * (n + 1)) / n;
132 }
133}
134
158std::vector<int> compute_graph_edges_pcx(MPI_Comm comm,
159 std::span<const int> edges);
160
185std::vector<int> compute_graph_edges_nbx(MPI_Comm comm,
186 std::span<const int> edges);
187
207template <typename U>
208std::pair<std::vector<std::int32_t>,
209 std::vector<typename std::remove_reference_t<typename U::value_type>>>
210distribute_to_postoffice(MPI_Comm comm, const U& x,
211 std::array<std::int64_t, 2> shape,
212 std::int64_t rank_offset);
213
235template <typename U>
236std::vector<typename std::remove_reference_t<typename U::value_type>>
237distribute_from_postoffice(MPI_Comm comm, std::span<const std::int64_t> indices,
238 const U& x, std::array<std::int64_t, 2> shape,
239 std::int64_t rank_offset);
240
261template <typename U>
262std::vector<typename std::remove_reference_t<typename U::value_type>>
263distribute_data(MPI_Comm comm0, std::span<const std::int64_t> indices,
264 MPI_Comm comm1, const U& x, int shape1);
265
266template <typename T>
267struct dependent_false : std::false_type
268{
269};
270
272template <typename T>
273constexpr MPI_Datatype mpi_type()
274{
275 if constexpr (std::is_same_v<T, float>)
276 return MPI_FLOAT;
277 else if constexpr (std::is_same_v<T, double>)
278 return MPI_DOUBLE;
279 else if constexpr (std::is_same_v<T, std::complex<double>>)
280 return MPI_C_DOUBLE_COMPLEX;
281 else if constexpr (std::is_same_v<T, std::complex<float>>)
282 return MPI_C_FLOAT_COMPLEX;
283 else if constexpr (std::is_same_v<T, short int>)
284 return MPI_SHORT;
285 else if constexpr (std::is_same_v<T, int>)
286 return MPI_INT;
287 else if constexpr (std::is_same_v<T, unsigned int>)
288 return MPI_UNSIGNED;
289 else if constexpr (std::is_same_v<T, long int>)
290 return MPI_LONG;
291 else if constexpr (std::is_same_v<T, unsigned long>)
292 return MPI_UNSIGNED_LONG;
293 else if constexpr (std::is_same_v<T, long long>)
294 return MPI_LONG_LONG;
295 else if constexpr (std::is_same_v<T, unsigned long long>)
296 return MPI_UNSIGNED_LONG_LONG;
297 else if constexpr (std::is_same_v<T, bool>)
298 return MPI_C_BOOL;
299 else if constexpr (std::is_same_v<T, std::int8_t>)
300 return MPI_INT8_T;
301 else
302 // Issue compile time error
303 static_assert(!std::is_same_v<T, T>);
304}
305
306//---------------------------------------------------------------------------
307template <typename U>
308std::pair<std::vector<std::int32_t>,
309 std::vector<typename std::remove_reference_t<typename U::value_type>>>
310distribute_to_postoffice(MPI_Comm comm, const U& x,
311 std::array<std::int64_t, 2> shape,
312 std::int64_t rank_offset)
313{
314 assert(rank_offset >= 0 or x.empty());
315 using T = typename std::remove_reference_t<typename U::value_type>;
316
317 const int size = dolfinx::MPI::size(comm);
318 const int rank = dolfinx::MPI::rank(comm);
319 assert(x.size() % shape[1] == 0);
320 const std::int32_t shape0_local = x.size() / shape[1];
321
322 LOG(2) << "Sending data to post offices (distribute_to_postoffice)";
323
324 // Post office ranks will receive data from this rank
325 std::vector<int> row_to_dest(shape0_local);
326 for (std::int32_t i = 0; i < shape0_local; ++i)
327 {
328 int dest = MPI::index_owner(size, i + rank_offset, shape[0]);
329 row_to_dest[i] = dest;
330 }
331
332 // Build list of (dest, positions) for each row that doesn't belong to
333 // this rank, then sort
334 std::vector<std::array<std::int32_t, 2>> dest_to_index;
335 dest_to_index.reserve(shape0_local);
336 for (std::int32_t i = 0; i < shape0_local; ++i)
337 {
338 std::size_t idx = i + rank_offset;
339 if (int dest = MPI::index_owner(size, idx, shape[0]); dest != rank)
340 dest_to_index.push_back({dest, i});
341 }
342 std::sort(dest_to_index.begin(), dest_to_index.end());
343
344 // Build list of neighbour src ranks and count number of items (rows
345 // of x) to receive from each src post office (by neighbourhood rank)
346 std::vector<int> dest;
347 std::vector<std::int32_t> num_items_per_dest,
348 pos_to_neigh_rank(shape0_local, -1);
349 {
350 auto it = dest_to_index.begin();
351 while (it != dest_to_index.end())
352 {
353 const int neigh_rank = dest.size();
354
355 // Store global rank
356 dest.push_back((*it)[0]);
357
358 // Find iterator to next global rank
359 auto it1
360 = std::find_if(it, dest_to_index.end(),
361 [r = dest.back()](auto& idx) { return idx[0] != r; });
362
363 // Store number of items for current rank
364 num_items_per_dest.push_back(std::distance(it, it1));
365
366 // Map from local x index to local destination rank
367 for (auto e = it; e != it1; ++e)
368 pos_to_neigh_rank[(*e)[1]] = neigh_rank;
369
370 // Advance iterator
371 it = it1;
372 }
373 }
374
375 // Determine source ranks
376 const std::vector<int> src = MPI::compute_graph_edges_nbx(comm, dest);
377 LOG(INFO)
378 << "Number of neighbourhood source ranks in distribute_to_postoffice: "
379 << src.size();
380
381 // Create neighbourhood communicator for sending data to post offices
382 MPI_Comm neigh_comm;
383 int err = MPI_Dist_graph_create_adjacent(
384 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
385 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &neigh_comm);
386 dolfinx::MPI::check_error(comm, err);
387
388 // Compute send displacements
389 std::vector<std::int32_t> send_disp = {0};
390 std::partial_sum(num_items_per_dest.begin(), num_items_per_dest.end(),
391 std::back_inserter(send_disp));
392
393 // Pack send buffers
394 std::vector<T> send_buffer_data(shape[1] * send_disp.back());
395 std::vector<std::int64_t> send_buffer_index(send_disp.back());
396 {
397 std::vector<std::int32_t> send_offsets = send_disp;
398 for (std::int32_t i = 0; i < shape0_local; ++i)
399 {
400 if (int neigh_dest = pos_to_neigh_rank[i]; neigh_dest != -1)
401 {
402 std::size_t pos = send_offsets[neigh_dest];
403 send_buffer_index[pos] = i + rank_offset;
404 std::copy_n(std::next(x.begin(), i * shape[1]), shape[1],
405 std::next(send_buffer_data.begin(), shape[1] * pos));
406 ++send_offsets[neigh_dest];
407 }
408 }
409 }
410
411 // Send number of items to post offices (destination) that I will be
412 // sending
413 std::vector<int> num_items_recv(src.size());
414 num_items_per_dest.reserve(1);
415 num_items_recv.reserve(1);
416 err = MPI_Neighbor_alltoall(num_items_per_dest.data(), 1, MPI_INT,
417 num_items_recv.data(), 1, MPI_INT, neigh_comm);
418 dolfinx::MPI::check_error(comm, err);
419
420 // Prepare receive displacement and buffers
421 std::vector<std::int32_t> recv_disp(num_items_recv.size() + 1, 0);
422 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
423 std::next(recv_disp.begin()));
424
425 // Send/receive global indices
426 std::vector<std::int64_t> recv_buffer_index(recv_disp.back());
427 err = MPI_Neighbor_alltoallv(
428 send_buffer_index.data(), num_items_per_dest.data(), send_disp.data(),
429 MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(),
430 recv_disp.data(), MPI_INT64_T, neigh_comm);
431 dolfinx::MPI::check_error(comm, err);
432
433 // Send/receive data (x)
434 MPI_Datatype compound_type;
435 MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_type<T>(), &compound_type);
436 MPI_Type_commit(&compound_type);
437 std::vector<T> recv_buffer_data(shape[1] * recv_disp.back());
438 err = MPI_Neighbor_alltoallv(
439 send_buffer_data.data(), num_items_per_dest.data(), send_disp.data(),
440 compound_type, recv_buffer_data.data(), num_items_recv.data(),
441 recv_disp.data(), compound_type, neigh_comm);
442 dolfinx::MPI::check_error(comm, err);
443 err = MPI_Type_free(&compound_type);
444 dolfinx::MPI::check_error(comm, err);
445 err = MPI_Comm_free(&neigh_comm);
446 dolfinx::MPI::check_error(comm, err);
447
448 LOG(2) << "Completed send data to post offices.";
449
450 // Convert to local indices
451 const std::int64_t r0 = MPI::local_range(rank, shape[0], size)[0];
452 std::vector<std::int32_t> index_local(recv_buffer_index.size());
453 std::transform(recv_buffer_index.cbegin(), recv_buffer_index.cend(),
454 index_local.begin(), [r0](auto idx) { return idx - r0; });
455
456 return {index_local, recv_buffer_data};
457}
458//---------------------------------------------------------------------------
459template <typename U>
460std::vector<typename std::remove_reference_t<typename U::value_type>>
461distribute_from_postoffice(MPI_Comm comm, std::span<const std::int64_t> indices,
462 const U& x, std::array<std::int64_t, 2> shape,
463 std::int64_t rank_offset)
464{
465 assert(rank_offset >= 0 or x.empty());
466 using T = typename std::remove_reference_t<typename U::value_type>;
467
468 common::Timer timer("Distribute row-wise data (scalable)");
469 assert(shape[1] > 0);
470
471 const int size = dolfinx::MPI::size(comm);
472 const int rank = dolfinx::MPI::rank(comm);
473 assert(x.size() % shape[1] == 0);
474 const std::int64_t shape0_local = x.size() / shape[1];
475
476 // 0. Send x data to/from post offices
477
478 // Send receive x data to post office (only for rows that need to be
479 // communicated)
480 auto [post_indices, post_x] = dolfinx::MPI::distribute_to_postoffice(
481 comm, x, {shape[0], shape[1]}, rank_offset);
482 assert(post_indices.size() == post_x.size() / shape[1]);
483
484 // 1. Send request to post office ranks for data
485
486 // Build list of (src, global index, global, index position) for each
487 // entry in 'indices' that doesn't belong to this rank, then sort
488 std::vector<std::tuple<int, std::int64_t, std::int32_t>> src_to_index;
489 for (std::size_t i = 0; i < indices.size(); ++i)
490 {
491 std::size_t idx = indices[i];
492 if (int src = dolfinx::MPI::index_owner(size, idx, shape[0]); src != rank)
493 src_to_index.push_back({src, idx, i});
494 }
495 std::sort(src_to_index.begin(), src_to_index.end());
496
497 // Build list is neighbour src ranks and count number of items (rows
498 // of x) to receive from each src post office (by neighbourhood rank)
499 std::vector<std::int32_t> num_items_per_src;
500 std::vector<int> src;
501 {
502 auto it = src_to_index.begin();
503 while (it != src_to_index.end())
504 {
505 src.push_back(std::get<0>(*it));
506 auto it1
507 = std::find_if(it, src_to_index.end(), [r = src.back()](auto& idx)
508 { return std::get<0>(idx) != r; });
509 num_items_per_src.push_back(std::distance(it, it1));
510 it = it1;
511 }
512 }
513
514 // Determine 'delivery' destination ranks (ranks that want data from
515 // me)
516 const std::vector<int> dest
518 LOG(INFO) << "Neighbourhood destination ranks from post office in "
519 "distribute_data (rank, num dests, num dests/mpi_size): "
520 << rank << ", " << dest.size() << ", "
521 << static_cast<double>(dest.size()) / size;
522
523 // Create neighbourhood communicator for sending data to post offices
524 // (src), and receiving data form my send my post office
525 MPI_Comm neigh_comm0;
526 int err = MPI_Dist_graph_create_adjacent(
527 comm, dest.size(), dest.data(), MPI_UNWEIGHTED, src.size(), src.data(),
528 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &neigh_comm0);
529 dolfinx::MPI::check_error(comm, err);
530
531 // Communicate number of requests to each source
532 std::vector<int> num_items_recv(dest.size());
533 num_items_per_src.reserve(1);
534 num_items_recv.reserve(1);
535 err = MPI_Neighbor_alltoall(num_items_per_src.data(), 1, MPI_INT,
536 num_items_recv.data(), 1, MPI_INT, neigh_comm0);
537 dolfinx::MPI::check_error(comm, err);
538
539 // Prepare send/receive displacements
540 std::vector<std::int32_t> send_disp = {0};
541 std::partial_sum(num_items_per_src.begin(), num_items_per_src.end(),
542 std::back_inserter(send_disp));
543 std::vector<std::int32_t> recv_disp = {0};
544 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
545 std::back_inserter(recv_disp));
546
547 // Pack my requested indices (global) in send buffer ready to send to
548 // post offices
549 assert(send_disp.back() == (int)src_to_index.size());
550 std::vector<std::int64_t> send_buffer_index(src_to_index.size());
551 std::transform(src_to_index.cbegin(), src_to_index.cend(),
552 send_buffer_index.begin(),
553 [](auto& x) { return std::get<1>(x); });
554
555 // Prepare the receive buffer
556 std::vector<std::int64_t> recv_buffer_index(recv_disp.back());
557 err = MPI_Neighbor_alltoallv(
558 send_buffer_index.data(), num_items_per_src.data(), send_disp.data(),
559 MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(),
560 recv_disp.data(), MPI_INT64_T, neigh_comm0);
561 dolfinx::MPI::check_error(comm, err);
562
563 err = MPI_Comm_free(&neigh_comm0);
564 dolfinx::MPI::check_error(comm, err);
565
566 // 2. Send data (rows of x) from post office back to requesting ranks
567 // (transpose of the preceding communication pattern operation)
568
569 // Build map from local index to post_indices position. Set to -1 for
570 // data that was already on this rank and was therefore was not
571 // sent/received via a postoffice.
572 const std::array<std::int64_t, 2> postoffice_range
573 = dolfinx::MPI::local_range(rank, shape[0], size);
574 std::vector<std::int32_t> post_indices_map(
575 postoffice_range[1] - postoffice_range[0], -1);
576 for (std::size_t i = 0; i < post_indices.size(); ++i)
577 {
578 assert(post_indices[i] < (int)post_indices_map.size());
579 post_indices_map[post_indices[i]] = i;
580 }
581
582 // Build send buffer
583 std::vector<T> send_buffer_data(shape[1] * recv_disp.back());
584 for (std::size_t p = 0; p < recv_disp.size() - 1; ++p)
585 {
586 int offset = recv_disp[p];
587 for (std::int32_t i = recv_disp[p]; i < recv_disp[p + 1]; ++i)
588 {
589 std::int64_t index = recv_buffer_index[i];
590 if (index >= rank_offset and index < (rank_offset + shape0_local))
591 {
592 // I already had this index before any communication
593 std::int32_t local_index = index - rank_offset;
594 std::copy_n(std::next(x.begin(), shape[1] * local_index), shape[1],
595 std::next(send_buffer_data.begin(), shape[1] * offset));
596 }
597 else
598 {
599 // Take from my 'post bag'
600 auto local_index = index - postoffice_range[0];
601 std::int32_t pos = post_indices_map[local_index];
602 assert(pos != -1);
603 std::copy_n(std::next(post_x.begin(), shape[1] * pos), shape[1],
604 std::next(send_buffer_data.begin(), shape[1] * offset));
605 }
606
607 ++offset;
608 }
609 }
610
611 err = MPI_Dist_graph_create_adjacent(
612 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
613 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &neigh_comm0);
614 dolfinx::MPI::check_error(comm, err);
615
616 MPI_Datatype compound_type0;
617 MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_type<T>(), &compound_type0);
618 MPI_Type_commit(&compound_type0);
619
620 std::vector<T> recv_buffer_data(shape[1] * send_disp.back());
621 err = MPI_Neighbor_alltoallv(
622 send_buffer_data.data(), num_items_recv.data(), recv_disp.data(),
623 compound_type0, recv_buffer_data.data(), num_items_per_src.data(),
624 send_disp.data(), compound_type0, neigh_comm0);
625 dolfinx::MPI::check_error(comm, err);
626
627 err = MPI_Type_free(&compound_type0);
628 dolfinx::MPI::check_error(comm, err);
629 err = MPI_Comm_free(&neigh_comm0);
630 dolfinx::MPI::check_error(comm, err);
631
632 std::vector<std::int32_t> index_pos_to_buffer(indices.size(), -1);
633 for (std::size_t i = 0; i < src_to_index.size(); ++i)
634 index_pos_to_buffer[std::get<2>(src_to_index[i])] = i;
635
636 // Extra data to return
637 std::vector<T> x_new(shape[1] * indices.size());
638 for (std::size_t i = 0; i < indices.size(); ++i)
639 {
640 const std::int64_t index = indices[i];
641 if (index >= rank_offset and index < (rank_offset + shape0_local))
642 {
643 // Had data from the start in x
644 auto local_index = index - rank_offset;
645 std::copy_n(std::next(x.begin(), shape[1] * local_index), shape[1],
646 std::next(x_new.begin(), shape[1] * i));
647 }
648 else
649 {
650 if (int src = dolfinx::MPI::index_owner(size, index, shape[0]);
651 src == rank)
652 {
653 // In my post office bag
654 auto local_index = index - postoffice_range[0];
655 std::int32_t pos = post_indices_map[local_index];
656 assert(pos != -1);
657 std::copy_n(std::next(post_x.begin(), shape[1] * pos), shape[1],
658 std::next(x_new.begin(), shape[1] * i));
659 }
660 else
661 {
662 // In my received post
663 std::int32_t pos = index_pos_to_buffer[i];
664 assert(pos != -1);
665 std::copy_n(std::next(recv_buffer_data.begin(), shape[1] * pos),
666 shape[1], std::next(x_new.begin(), shape[1] * i));
667 }
668 }
669 }
670
671 return x_new;
672}
673//---------------------------------------------------------------------------
674template <typename U>
675std::vector<typename std::remove_reference_t<typename U::value_type>>
676distribute_data(MPI_Comm comm0, std::span<const std::int64_t> indices,
677 MPI_Comm comm1, const U& x, int shape1)
678{
679 assert(shape1 > 0);
680 assert(x.size() % shape1 == 0);
681 const std::int64_t shape0_local = x.size() / shape1;
682
683 int err;
684 std::int64_t shape0 = 0;
685 err = MPI_Allreduce(&shape0_local, &shape0, 1, MPI_INT64_T, MPI_SUM, comm0);
686 dolfinx::MPI::check_error(comm0, err);
687
688 std::int64_t rank_offset = -1;
689 if (comm1 != MPI_COMM_NULL)
690 {
691 rank_offset = 0;
692 err = MPI_Exscan(&shape0_local, &rank_offset, 1, MPI_INT64_T, MPI_SUM,
693 comm1);
694 dolfinx::MPI::check_error(comm1, err);
695 }
696 else
697 {
698 rank_offset = -1;
699 if (!x.empty())
700 throw std::runtime_error("Non-empty data on null MPI communicator");
701 }
702
703 return distribute_from_postoffice(comm0, indices, x, {shape0, shape1},
704 rank_offset);
705}
706//---------------------------------------------------------------------------
707
708} // namespace dolfinx::MPI
A duplicate MPI communicator and manage lifetime of the communicator.
Definition MPI.h:43
Comm(MPI_Comm comm, bool duplicate=true)
Duplicate communicator and wrap duplicate.
Definition MPI.cpp:12
~Comm()
Destructor (frees wrapped communicator)
Definition MPI.cpp:36
MPI_Comm comm() const noexcept
Return the underlying MPI_Comm object.
Definition MPI.cpp:62
Definition Timer.h:31
MPI support functionality.
Definition MPI.h:31
std::vector< typename std::remove_reference_t< typename U::value_type > > distribute_data(MPI_Comm comm0, std::span< const std::int64_t > indices, MPI_Comm comm1, const U &x, int shape1)
Distribute rows of a rectangular data array to ranks where they are required (scalable version).
Definition MPI.h:676
std::pair< std::vector< std::int32_t >, std::vector< typename std::remove_reference_t< typename U::value_type > > > distribute_to_postoffice(MPI_Comm comm, const U &x, std::array< std::int64_t, 2 > shape, std::int64_t rank_offset)
Distribute row data to 'post office' ranks.
Definition MPI.h:310
std::vector< int > compute_graph_edges_nbx(MPI_Comm comm, std::span< const int > edges)
Determine incoming graph edges using the NBX consensus algorithm.
Definition MPI.cpp:162
constexpr int index_owner(int size, std::size_t index, std::size_t N)
Return which rank owns index in global range [0, N - 1] (inverse of MPI::local_range).
Definition MPI.h:115
std::vector< typename std::remove_reference_t< typename U::value_type > > distribute_from_postoffice(MPI_Comm comm, std::span< const std::int64_t > indices, const U &x, std::array< std::int64_t, 2 > shape, std::int64_t rank_offset)
Distribute rows of a rectangular data array from post office ranks to ranks where they are required.
Definition MPI.h:461
std::vector< int > compute_graph_edges_pcx(MPI_Comm comm, std::span< const int > edges)
Determine incoming graph edges using the PCX consensus algorithm.
Definition MPI.cpp:95
void check_error(MPI_Comm comm, int code)
Check MPI error code. If the error code is not equal to MPI_SUCCESS, then std::abort is called.
Definition MPI.cpp:80
int size(MPI_Comm comm)
Definition MPI.cpp:72
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition MPI.cpp:64
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition MPI.h:91
constexpr MPI_Datatype mpi_type()
MPI Type.
Definition MPI.h:273
tag
MPI communication tags.
Definition MPI.h:35
Definition MPI.h:268