Note: this is documentation for an old release. View the latest documentation at docs.fenicsproject.org/v0.1.0/v0.9.0/cpp
DOLFINx  0.1.0
DOLFINx C++ interface
MPI.h
1 // Copyright (C) 2007-2014 Magnus Vikstrøm and Garth N. Wells
2 //
3 // This file is part of DOLFINx (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include <array>
10 #include <cassert>
11 #include <complex>
12 #include <cstdint>
13 #include <dolfinx/graph/AdjacencyList.h>
14 #include <iostream>
15 #include <numeric>
16 #include <set>
17 #include <tuple>
18 #include <type_traits>
19 #include <utility>
20 #include <vector>
21 
22 #define MPICH_IGNORE_CXX_SEEK 1
23 #include <mpi.h>
24 
25 namespace dolfinx
26 {
27 
30 class MPI
31 {
32 public:
35  class Comm
36  {
37  public:
39  explicit Comm(MPI_Comm comm, bool duplicate = true);
40 
42  Comm(const Comm& comm);
43 
45  Comm(Comm&& comm) noexcept;
46 
47  // Disable copy assignment operator
48  Comm& operator=(const Comm& comm) = delete;
49 
51  Comm& operator=(Comm&& comm) noexcept;
52 
54  ~Comm();
55 
57  MPI_Comm comm() const;
58 
59  private:
60  // MPI communicator
61  MPI_Comm _comm;
62  };
63 
65  static int rank(MPI_Comm comm);
66 
69  static int size(MPI_Comm comm);
70 
73  template <typename T>
75  all_to_all(MPI_Comm comm, const graph::AdjacencyList<T>& send_data);
76 
89  static std::vector<int> compute_graph_edges(MPI_Comm comm,
90  const std::set<int>& edges);
91 
95  template <typename T>
97  neighbor_all_to_all(MPI_Comm neighbor_comm,
98  const graph::AdjacencyList<T>& send_data);
99 
105  static std::tuple<std::vector<int>, std::vector<int>>
106  neighbors(MPI_Comm neighbor_comm);
107 
110  static std::size_t global_offset(MPI_Comm comm, std::size_t range,
111  bool exclusive);
112 
115  static std::array<std::int64_t, 2> local_range(int process, std::int64_t N,
116  int size);
117 
123  static int index_owner(int size, std::size_t index, std::size_t N);
124 
125  template <typename T>
126  struct dependent_false : std::false_type
127  {
128  };
129 
131  template <typename T>
132  static MPI_Datatype mpi_type()
133  {
134  static_assert(dependent_false<T>::value, "Unknown MPI type");
135  throw std::runtime_error("MPI data type unknown");
136  return MPI_CHAR;
137  }
138 };
139 
140 // Turn off doxygen for these template specialisations
142 // Specialisations for MPI_Datatypes
143 template <>
144 inline MPI_Datatype MPI::mpi_type<float>()
145 {
146  return MPI_FLOAT;
147 }
148 template <>
149 inline MPI_Datatype MPI::mpi_type<double>()
150 {
151  return MPI_DOUBLE;
152 }
153 template <>
154 inline MPI_Datatype MPI::mpi_type<std::complex<double>>()
155 {
156  return MPI_DOUBLE_COMPLEX;
157 }
158 template <>
159 inline MPI_Datatype MPI::mpi_type<short int>()
160 {
161  return MPI_SHORT;
162 }
163 template <>
164 inline MPI_Datatype MPI::mpi_type<int>()
165 {
166  return MPI_INT;
167 }
168 template <>
169 inline MPI_Datatype MPI::mpi_type<unsigned int>()
170 {
171  return MPI_UNSIGNED;
172 }
173 template <>
174 inline MPI_Datatype MPI::mpi_type<long int>()
175 {
176  return MPI_LONG;
177 }
178 template <>
179 inline MPI_Datatype MPI::mpi_type<unsigned long>()
180 {
181  return MPI_UNSIGNED_LONG;
182 }
183 template <>
184 inline MPI_Datatype MPI::mpi_type<long long>()
185 {
186  return MPI_LONG_LONG;
187 }
188 template <>
189 inline MPI_Datatype MPI::mpi_type<unsigned long long>()
190 {
191  return MPI_UNSIGNED_LONG_LONG;
192 }
193 template <>
194 inline MPI_Datatype MPI::mpi_type<bool>()
195 {
196  return MPI_C_BOOL;
197 }
199 //---------------------------------------------------------------------------
200 template <typename T>
201 graph::AdjacencyList<T>
203  const graph::AdjacencyList<T>& send_data)
204 {
205  const std::vector<std::int32_t>& send_offsets = send_data.offsets();
206  const std::vector<T>& values_in = send_data.array();
207 
208  const int comm_size = MPI::size(comm);
209  assert(send_data.num_nodes() == comm_size);
210 
211  // Data size per destination rank
212  std::vector<int> send_size(comm_size);
213  std::adjacent_difference(std::next(send_offsets.begin(), +1),
214  send_offsets.end(), send_size.begin());
215 
216  // Get received data sizes from each rank
217  std::vector<int> recv_size(comm_size);
218  MPI_Alltoall(send_size.data(), 1, mpi_type<int>(), recv_size.data(), 1,
219  mpi_type<int>(), comm);
220 
221  // Compute receive offset
222  std::vector<std::int32_t> recv_offset(comm_size + 1);
223  recv_offset[0] = 0;
224  std::partial_sum(recv_size.begin(), recv_size.end(),
225  std::next(recv_offset.begin()));
226 
227  // Send/receive data
228  std::vector<T> recv_values(recv_offset[comm_size]);
229  MPI_Alltoallv(values_in.data(), send_size.data(), send_offsets.data(),
230  mpi_type<T>(), recv_values.data(), recv_size.data(),
231  recv_offset.data(), mpi_type<T>(), comm);
232 
233  return graph::AdjacencyList<T>(std::move(recv_values),
234  std::move(recv_offset));
235 }
236 //-----------------------------------------------------------------------------
237 template <typename T>
239 dolfinx::MPI::neighbor_all_to_all(MPI_Comm neighbor_comm,
240  const graph::AdjacencyList<T>& send_data)
241 {
242  // Get neighbor processes
243  int indegree(-1), outdegree(-2), weighted(-1);
244  MPI_Dist_graph_neighbors_count(neighbor_comm, &indegree, &outdegree,
245  &weighted);
246 
247  // Allocate memory (add '1' to handle empty case as OpenMPI fails for
248  // null pointers
249  std::vector<int> send_sizes(outdegree + 1, 0);
250  std::vector<int> recv_sizes(indegree + 1);
251  std::adjacent_difference(std::next(send_data.offsets().begin()),
252  send_data.offsets().end(), send_sizes.begin());
253  // Get receive sizes
254  MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI::mpi_type<int>(),
255  recv_sizes.data(), 1, MPI::mpi_type<int>(),
256  neighbor_comm);
257 
258  // Work out recv offsets. Note use of std::prev to handle OpenMPI
259  // issue mentioned above
260  std::vector<int> recv_offsets(indegree + 1);
261  recv_offsets[0] = 0;
262  std::partial_sum(recv_sizes.begin(), std::prev(recv_sizes.end()),
263  std::next(recv_offsets.begin(), 1));
264 
265  std::vector<T> recv_data(recv_offsets[recv_offsets.size() - 1]);
266  MPI_Neighbor_alltoallv(
267  send_data.array().data(), send_sizes.data(), send_data.offsets().data(),
268  MPI::mpi_type<T>(), recv_data.data(), recv_sizes.data(),
269  recv_offsets.data(), MPI::mpi_type<T>(), neighbor_comm);
270 
271  return graph::AdjacencyList<T>(std::move(recv_data), std::move(recv_offsets));
272 }
273 //---------------------------------------------------------------------------
274 
275 } // namespace dolfinx
dolfinx::MPI::global_offset
static std::size_t global_offset(MPI_Comm comm, std::size_t range, bool exclusive)
Find global offset (index) (wrapper for MPI_(Ex)Scan with MPI_SUM as reduction op)
Definition: MPI.cpp:92
dolfinx::graph::AdjacencyList::num_nodes
std::int32_t num_nodes() const
Get the number of nodes.
Definition: AdjacencyList.h:115
dolfinx::MPI
This class provides utility functions for easy communication with MPI and handles cases when DOLFINx ...
Definition: MPI.h:30
dolfinx::MPI::neighbor_all_to_all
static graph::AdjacencyList< T > neighbor_all_to_all(MPI_Comm neighbor_comm, const graph::AdjacencyList< T > &send_data)
Neighborhood all-to-all. Send data to neighbors. Send in_values[n0] to neighbor process n0 and receiv...
Definition: MPI.h:239
dolfinx::MPI::compute_graph_edges
static std::vector< int > compute_graph_edges(MPI_Comm comm, const std::set< int > &edges)
Definition: MPI.cpp:137
dolfinx::MPI::Comm::Comm
Comm(MPI_Comm comm, bool duplicate=true)
Duplicate communicator and wrap duplicate.
Definition: MPI.cpp:11
dolfinx::MPI::Comm::~Comm
~Comm()
Destructor (frees wrapped communicator)
Definition: MPI.cpp:41
dolfinx::graph::AdjacencyList
This class provides a static adjacency list data structure. It is commonly used to store directed gra...
Definition: AdjacencyList.h:46
dolfinx::MPI::rank
static int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition: MPI.cpp:77
dolfinx::graph::AdjacencyList::array
const std::vector< T > & array() const
Return contiguous array of links for all nodes (const version)
Definition: AdjacencyList.h:147
dolfinx::MPI::size
static int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition: MPI.cpp:85
dolfinx::MPI::all_to_all
static graph::AdjacencyList< T > all_to_all(MPI_Comm comm, const graph::AdjacencyList< T > &send_data)
Send in_values[p0] to process p0 and receive values from process p1 in out_values[p1].
Definition: MPI.h:202
dolfinx::MPI::Comm
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:35
dolfinx::MPI::index_owner
static int index_owner(int size, std::size_t index, std::size_t N)
Return which process owns index (inverse of local_range)
Definition: MPI.cpp:121
dolfinx::MPI::mpi_type
static MPI_Datatype mpi_type()
MPI Type.
Definition: MPI.h:132
dolfinx::MPI::local_range
static std::array< std::int64_t, 2 > local_range(int process, std::int64_t N, int size)
Return local range for given process, splitting [0, N - 1] into size() portions of almost equal size.
Definition: MPI.cpp:103
dolfinx::MPI::dependent_false
Definition: MPI.h:126
dolfinx::MPI::Comm::comm
MPI_Comm comm() const
Return the underlying MPI_Comm object.
Definition: MPI.cpp:75
dolfinx::graph::AdjacencyList::offsets
const std::vector< std::int32_t > & offsets() const
Offset for each node in array() (const version)
Definition: AdjacencyList.h:153
dolfinx::MPI::neighbors
static std::tuple< std::vector< int >, std::vector< int > > neighbors(MPI_Comm neighbor_comm)
Definition: MPI.cpp:159