Note: this is documentation for an old release. View the latest documentation at docs.fenicsproject.org/v0.3.0/v0.9.0/cpp
DOLFINx  0.3.0
DOLFINx C++ interface
IndexMap.h
1 // Copyright (C) 2015-2019 Chris Richardson, Garth N. Wells and Igor Baratta
2 //
3 // This file is part of DOLFINx (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include <array>
10 #include <cstdint>
11 #include <dolfinx/common/MPI.h>
12 #include <dolfinx/graph/AdjacencyList.h>
13 #include <map>
14 #include <memory>
15 #include <tuple>
16 #include <utility>
17 #include <vector>
18 #include <xtl/xspan.hpp>
19 
20 namespace dolfinx::common
21 {
22 // Forward declaration
23 class IndexMap;
24 
34 std::tuple<std::int64_t, std::vector<std::int32_t>,
35  std::vector<std::vector<std::int64_t>>,
36  std::vector<std::vector<int>>>
38  const std::vector<
39  std::pair<std::reference_wrapper<const common::IndexMap>, int>>& maps);
40 
47 
48 class IndexMap
49 {
50 public:
52  enum class Mode
53  {
54  insert,
55  add
56  };
57 
59  enum class Direction
60  {
61  reverse, // Ghost to owner
62  forward, // Owner to ghost
63  };
64 
72  IndexMap(MPI_Comm comm, std::int32_t local_size);
73 
86  IndexMap(MPI_Comm mpi_comm, std::int32_t local_size,
87  const xtl::span<const int>& dest_ranks,
88  const xtl::span<const std::int64_t>& ghosts,
89  const xtl::span<const int>& src_ranks);
90 
91  // Copy constructor
92  IndexMap(const IndexMap& map) = delete;
93 
95  IndexMap(IndexMap&& map) = default;
96 
98  ~IndexMap() = default;
99 
101  IndexMap& operator=(IndexMap&& map) = default;
102 
103  // Copy assignment
104  IndexMap& operator=(const IndexMap& map) = delete;
105 
107  std::array<std::int64_t, 2> local_range() const noexcept;
108 
110  std::int32_t num_ghosts() const noexcept;
111 
113  std::int32_t size_local() const noexcept;
114 
116  std::int64_t size_global() const noexcept;
117 
120  const std::vector<std::int64_t>& ghosts() const noexcept;
121 
126  MPI_Comm comm(Direction dir) const;
127 
131  void local_to_global(const xtl::span<const std::int32_t>& local,
132  const xtl::span<std::int64_t>& global) const;
133 
138  void global_to_local(const xtl::span<const std::int64_t>& global,
139  const xtl::span<std::int32_t>& local) const;
140 
144  std::vector<std::int64_t> global_indices() const;
145 
159  const graph::AdjacencyList<std::int32_t>&
160  scatter_fwd_indices() const noexcept;
161 
167  const std::vector<std::int32_t>& scatter_fwd_ghost_positions() const noexcept;
168 
170  std::vector<int> ghost_owner_rank() const;
171 
178  std::map<std::int32_t, std::set<int>> compute_shared_indices() const;
179 
199  template <typename T>
200  void scatter_fwd_begin(const xtl::span<const T>& send_buffer,
201  MPI_Datatype& data_type, MPI_Request& request,
202  const xtl::span<T>& recv_buffer) const
203  {
204  // Send displacement
205  const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
206 
207  // Return early if there are no incoming or outgoing edges
208  if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
209  return;
210 
211  // Get block size
212  int n;
213  MPI_Type_size(data_type, &n);
214  n /= sizeof(T);
215  if (static_cast<int>(send_buffer.size()) != n * displs_send_fwd.back())
216  throw std::runtime_error("Incompatible send buffer size.");
217  if (static_cast<int>(recv_buffer.size()) != n * _displs_recv_fwd.back())
218  throw std::runtime_error("Incompatible receive buffer size..");
219 
220  // Start send/receive
221  MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_send_fwd.data(),
222  displs_send_fwd.data(), data_type,
223  recv_buffer.data(), _sizes_recv_fwd.data(),
224  _displs_recv_fwd.data(), data_type,
225  _comm_owner_to_ghost.comm(), &request);
226  }
227 
234  void scatter_fwd_end(MPI_Request& request) const
235  {
236  // Return early if there are no incoming or outgoing edges
237  const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
238  if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
239  return;
240 
241  // Wait for communication to complete
242  MPI_Wait(&request, MPI_STATUS_IGNORE);
243  }
244 
255  template <typename T>
256  void scatter_fwd(const xtl::span<const T>& local_data,
257  xtl::span<T> remote_data, int n) const
258  {
259  MPI_Datatype data_type;
260  if (n == 1)
261  data_type = MPI::mpi_type<T>();
262  else
263  {
264  MPI_Type_contiguous(n, dolfinx::MPI::mpi_type<T>(), &data_type);
265  MPI_Type_commit(&data_type);
266  }
267 
268  const std::vector<std::int32_t>& indices = _shared_indices->array();
269  std::vector<T> send_buffer(n * indices.size());
270  for (std::size_t i = 0; i < indices.size(); ++i)
271  {
272  std::copy_n(std::next(local_data.cbegin(), n * indices[i]), n,
273  std::next(send_buffer.begin(), n * i));
274  }
275 
276  MPI_Request request;
277  std::vector<T> buffer_recv(n * _displs_recv_fwd.back());
278  scatter_fwd_begin(xtl::span<const T>(send_buffer), data_type, request,
279  xtl::span<T>(buffer_recv));
280  scatter_fwd_end(request);
281 
282  // Copy into ghost area ("remote_data")
283  assert(remote_data.size() == n * _ghost_pos_recv_fwd.size());
284  for (std::size_t i = 0; i < _ghost_pos_recv_fwd.size(); ++i)
285  {
286  std::copy_n(std::next(buffer_recv.cbegin(), n * _ghost_pos_recv_fwd[i]),
287  n, std::next(remote_data.begin(), n * i));
288  }
289 
290  if (n != 1)
291  MPI_Type_free(&data_type);
292  }
293 
314  template <typename T>
315  void scatter_rev_begin(const xtl::span<const T>& send_buffer,
316  MPI_Datatype& data_type, MPI_Request& request,
317  const xtl::span<T>& recv_buffer) const
318  {
319  // Get displacement vector
320  const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
321 
322  // Return early if there are no incoming or outgoing edges
323  if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
324  return;
325 
326  // Get block size
327  int n;
328  MPI_Type_size(data_type, &n);
329  n /= sizeof(T);
330  if (static_cast<int>(send_buffer.size()) != n * _ghosts.size())
331  throw std::runtime_error("Inconsistent send buffer size.");
332  if (static_cast<int>(recv_buffer.size()) != n * displs_send_fwd.back())
333  throw std::runtime_error("Inconsistent receive buffer size.");
334 
335  // Send and receive data
336  MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_recv_fwd.data(),
337  _displs_recv_fwd.data(), data_type,
338  recv_buffer.data(), _sizes_send_fwd.data(),
339  displs_send_fwd.data(), data_type,
340  _comm_ghost_to_owner.comm(), &request);
341  }
342 
349  void scatter_rev_end(MPI_Request& request) const
350  {
351  // Return early if there are no incoming or outgoing edges
352  const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
353  if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
354  return;
355 
356  // Wait for communication to complete
357  MPI_Wait(&request, MPI_STATUS_IGNORE);
358  }
359 
369  template <typename T>
370  void scatter_rev(xtl::span<T> local_data,
371  const xtl::span<const T>& remote_data, int n,
372  IndexMap::Mode op) const
373  {
374  MPI_Datatype data_type;
375  if (n == 1)
376  data_type = MPI::mpi_type<T>();
377  else
378  {
379  MPI_Type_contiguous(n, dolfinx::MPI::mpi_type<T>(), &data_type);
380  MPI_Type_commit(&data_type);
381  }
382 
383  // Pack send buffer
384  std::vector<T> buffer_send;
385  buffer_send.resize(n * _displs_recv_fwd.back());
386  for (std::size_t i = 0; i < _ghost_pos_recv_fwd.size(); ++i)
387  {
388  std::copy_n(std::next(remote_data.cbegin(), n * i), n,
389  std::next(buffer_send.begin(), n * _ghost_pos_recv_fwd[i]));
390  }
391 
392  // Exchange data
393  MPI_Request request;
394  std::vector<T> buffer_recv(n * _shared_indices->array().size());
395  scatter_rev_begin(xtl::span<const T>(buffer_send), data_type, request,
396  xtl::span<T>(buffer_recv));
397  scatter_rev_end(request);
398 
399  // Copy or accumulate into "local_data"
400  assert(local_data.size() == n * this->size_local());
401  const std::vector<std::int32_t>& shared_indices = _shared_indices->array();
402  switch (op)
403  {
404  case Mode::insert:
405  for (std::size_t i = 0; i < shared_indices.size(); ++i)
406  {
407  std::copy_n(std::next(buffer_recv.cbegin(), n * i), n,
408  std::next(local_data.begin(), n * shared_indices[i]));
409  }
410  break;
411  case Mode::add:
412  for (std::size_t i = 0; i < shared_indices.size(); ++i)
413  {
414  for (int j = 0; j < n; ++j)
415  local_data[shared_indices[i] * n + j] += buffer_recv[i * n + j];
416  }
417  break;
418  }
419 
420  if (n != 1)
421  MPI_Type_free(&data_type);
422  }
423 
424 private:
425  // Range of indices (global) owned by this process
426  std::array<std::int64_t, 2> _local_range;
427 
428  // Number indices across communicator
429  std::int64_t _size_global;
430 
431  // MPI neighborhood communicators
432 
433  // Communicator where the source ranks own the indices in the callers
434  // halo, and the destination ranks 'ghost' indices owned by the
435  // caller. I.e.,
436  // - in-edges (src) are from ranks that own my ghosts
437  // - out-edges (dest) go to ranks that 'ghost' my owned indices
438  dolfinx::MPI::Comm _comm_owner_to_ghost;
439 
440  // Communicator where the source ranks have ghost indices that are
441  // owned by the caller, and the destination ranks are the owners of
442  // indices in the callers halo region. I.e.,
443  // - in-edges (src) are from ranks that 'ghost' my owned indicies
444  // - out-edges (dest) are to the owning ranks of my ghost indices
445  dolfinx::MPI::Comm _comm_ghost_to_owner;
446 
447  // MPI sizes and displacements for forward (owner -> ghost) scatter
448  std::vector<std::int32_t> _sizes_recv_fwd, _sizes_send_fwd, _displs_recv_fwd;
449 
450  // Position in the recv buffer for a forward scatter for the _ghost[i]
451  // entry
452  std::vector<std::int32_t> _ghost_pos_recv_fwd;
453 
454  // Local-to-global map for ghost indices
455  std::vector<std::int64_t> _ghosts;
456 
457  // List of owned local indices that are in the halo (ghost) region on
458  // other ranks, grouped by rank in the neighbor communicator
459  // (destination ranks in forward communicator and source ranks in the
460  // reverse communicator), i.e. `_shared_indices.num_nodes() ==
461  // size(_comm_owner_to_ghost)`.
462  std::unique_ptr<graph::AdjacencyList<std::int32_t>> _shared_indices;
463 };
464 
465 } // namespace dolfinx::common
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:32
This class represents the distribution index arrays across processes. An index array is a contiguous ...
Definition: IndexMap.h:49
std::map< std::int32_t, std::set< int > > compute_shared_indices() const
Definition: IndexMap.cpp:571
void scatter_rev_begin(const xtl::span< const T > &send_buffer, MPI_Datatype &data_type, MPI_Request &request, const xtl::span< T > &recv_buffer) const
Start a non-blocking send of ghost values to the owning rank. The non-blocking communication is compl...
Definition: IndexMap.h:315
std::vector< int > ghost_owner_rank() const
Owner rank on the global communicator of each ghost entry.
Definition: IndexMap.cpp:537
MPI_Comm comm(Direction dir) const
Return a MPI communicator with attached distributed graph topology information.
Definition: IndexMap.cpp:558
void global_to_local(const xtl::span< const std::int64_t > &global, const xtl::span< std::int32_t > &local) const
Compute local indices for array of global indices.
Definition: IndexMap.cpp:485
~IndexMap()=default
Destructor.
std::array< std::int64_t, 2 > local_range() const noexcept
Range of indices (global) owned by this process.
Definition: IndexMap.cpp:447
Mode
Mode for reverse scatter operation.
Definition: IndexMap.h:53
IndexMap(IndexMap &&map)=default
Move constructor.
void scatter_fwd_begin(const xtl::span< const T > &send_buffer, MPI_Datatype &data_type, MPI_Request &request, const xtl::span< T > &recv_buffer) const
Start a non-blocking send of owned data to ranks that ghost the data. The communication is completed ...
Definition: IndexMap.h:200
const graph::AdjacencyList< std::int32_t > & scatter_fwd_indices() const noexcept
Local (owned) indices shared with neighbor processes, i.e. are ghosts on other processes,...
Definition: IndexMap.cpp:525
Direction
Edge directions of neighborhood communicator.
Definition: IndexMap.h:60
std::int64_t size_global() const noexcept
Number indices across communicator.
Definition: IndexMap.cpp:459
void scatter_fwd(const xtl::span< const T > &local_data, xtl::span< T > remote_data, int n) const
Send n values for each index that is owned to processes that have the index as a ghost....
Definition: IndexMap.h:256
std::vector< std::int64_t > global_indices() const
Global indices.
Definition: IndexMap.cpp:511
IndexMap(MPI_Comm comm, std::int32_t local_size)
Create an non-overlapping index map with local_size owned on this process.
Definition: IndexMap.cpp:278
std::int32_t num_ghosts() const noexcept
Number of ghost indices on this process.
Definition: IndexMap.cpp:452
IndexMap & operator=(IndexMap &&map)=default
Move assignment.
void scatter_fwd_end(MPI_Request &request) const
Complete a non-blocking send from the local owner of to process ranks that have the index as a ghost....
Definition: IndexMap.h:234
const std::vector< std::int64_t > & ghosts() const noexcept
Local-to-global map for ghosts (local indexing beyond end of local range)
Definition: IndexMap.cpp:461
void scatter_rev_end(MPI_Request &request) const
Complete a non-blocking send of ghost values to the owning rank. This function complete the communica...
Definition: IndexMap.h:349
const std::vector< std::int32_t > & scatter_fwd_ghost_positions() const noexcept
Position of ghost entries in the receive buffer after a forward scatter, e.g. for a receive buffer b ...
Definition: IndexMap.cpp:532
void local_to_global(const xtl::span< const std::int32_t > &local, const xtl::span< std::int64_t > &global) const
Compute global indices for array of local indices.
Definition: IndexMap.cpp:466
void scatter_rev(xtl::span< T > local_data, const xtl::span< const T > &remote_data, int n, IndexMap::Mode op) const
Send n values for each ghost index to owning to the process.
Definition: IndexMap.h:370
std::int32_t size_local() const noexcept
Number of indices owned by on this process.
Definition: IndexMap.cpp:454
Miscellaneous classes, functions and types.
std::tuple< std::int64_t, std::vector< std::int32_t >, std::vector< std::vector< std::int64_t > >, std::vector< std::vector< int > > > stack_index_maps(const std::vector< std::pair< std::reference_wrapper< const common::IndexMap >, int >> &maps)
Compute layout data and ghost indices for a stacked (concatenated) index map, i.e....
Definition: IndexMap.cpp:159