Note: this is documentation for an old release. View the latest documentation at docs.fenicsproject.org/dolfinx/v0.9.0/cpp/doxygen/d7/d74/IndexMap_8h_source.html
DOLFINx  0.4.0
DOLFINx C++ interface
IndexMap.h
1 // Copyright (C) 2015-2019 Chris Richardson, Garth N. Wells and Igor Baratta
2 //
3 // This file is part of DOLFINx (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include <array>
10 #include <cstdint>
11 #include <dolfinx/common/MPI.h>
12 #include <dolfinx/graph/AdjacencyList.h>
13 #include <map>
14 #include <memory>
15 #include <tuple>
16 #include <utility>
17 #include <vector>
18 #include <xtl/xspan.hpp>
19 
20 namespace dolfinx::common
21 {
22 // Forward declaration
23 class IndexMap;
24 
32 std::vector<int32_t>
33 compute_owned_indices(const xtl::span<const std::int32_t>& indices,
34  const IndexMap& map);
35 
45 std::tuple<std::int64_t, std::vector<std::int32_t>,
46  std::vector<std::vector<std::int64_t>>,
47  std::vector<std::vector<int>>>
49  const std::vector<
50  std::pair<std::reference_wrapper<const common::IndexMap>, int>>& maps);
51 
58 
59 class IndexMap
60 {
61 public:
63  enum class Mode
64  {
65  insert,
66  add
67  };
68 
70  enum class Direction
71  {
72  reverse, // Ghost to owner
73  forward, // Owner to ghost
74  };
75 
83  IndexMap(MPI_Comm comm, std::int32_t local_size);
84 
97  IndexMap(MPI_Comm comm, std::int32_t local_size,
98  const xtl::span<const int>& dest_ranks,
99  const xtl::span<const std::int64_t>& ghosts,
100  const xtl::span<const int>& src_ranks);
101 
102 private:
103  template <typename U, typename V, typename W, typename X>
104  IndexMap(std::array<std::int64_t, 2> local_range, std::size_t size_global,
105  MPI_Comm comm, U&& comm_owner_to_ghost, U&& comm_ghost_to_owner,
106  V&& displs_recv_fwd, V&& ghost_pos_recv_fwd, W&& ghosts,
107  X&& shared_indices)
108  : _local_range(local_range), _size_global(size_global), _comm(comm),
109  _comm_owner_to_ghost(std::forward<U>(comm_owner_to_ghost)),
110  _comm_ghost_to_owner(std::forward<U>(comm_ghost_to_owner)),
111  _displs_recv_fwd(std::forward<V>(displs_recv_fwd)),
112  _ghost_pos_recv_fwd(std::forward<V>(ghost_pos_recv_fwd)),
113  _ghosts(std::forward<W>(ghosts)),
114  _shared_indices(std::forward<X>(shared_indices))
115  {
116  _sizes_recv_fwd.resize(_displs_recv_fwd.size() - 1, 0);
117  std::adjacent_difference(_displs_recv_fwd.cbegin() + 1,
118  _displs_recv_fwd.cend(), _sizes_recv_fwd.begin());
119 
120  const std::vector<int32_t>& displs_send = _shared_indices->offsets();
121  _sizes_send_fwd.resize(_shared_indices->num_nodes(), 0);
122  std::adjacent_difference(displs_send.cbegin() + 1, displs_send.cend(),
123  _sizes_send_fwd.begin());
124  }
125 
126 public:
127  // Copy constructor
128  IndexMap(const IndexMap& map) = delete;
129 
131  IndexMap(IndexMap&& map) = default;
132 
134  ~IndexMap() = default;
135 
137  IndexMap& operator=(IndexMap&& map) = default;
138 
139  // Copy assignment
140  IndexMap& operator=(const IndexMap& map) = delete;
141 
143  std::array<std::int64_t, 2> local_range() const noexcept;
144 
146  std::int32_t num_ghosts() const noexcept;
147 
149  std::int32_t size_local() const noexcept;
150 
152  std::int64_t size_global() const noexcept;
153 
156  const std::vector<std::int64_t>& ghosts() const noexcept;
157 
160  MPI_Comm comm() const;
161 
166  MPI_Comm comm(Direction dir) const;
167 
171  void local_to_global(const xtl::span<const std::int32_t>& local,
172  const xtl::span<std::int64_t>& global) const;
173 
178  void global_to_local(const xtl::span<const std::int64_t>& global,
179  const xtl::span<std::int32_t>& local) const;
180 
184  std::vector<std::int64_t> global_indices() const;
185 
199  const graph::AdjacencyList<std::int32_t>&
200  scatter_fwd_indices() const noexcept;
201 
207  const std::vector<std::int32_t>& scatter_fwd_ghost_positions() const noexcept;
208 
210  std::vector<int> ghost_owner_rank() const;
211 
213  std::vector<int> ghost_owner_neighbor_rank() const;
214 
221  std::map<std::int32_t, std::set<int>> compute_shared_indices() const;
222 
233  std::pair<IndexMap, std::vector<std::int32_t>>
234  create_submap(const xtl::span<const std::int32_t>& indices) const;
235 
255  template <typename T>
256  void scatter_fwd_begin(const xtl::span<const T>& send_buffer,
257  MPI_Datatype& data_type, MPI_Request& request,
258  const xtl::span<T>& recv_buffer) const
259  {
260  // Send displacement
261  const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
262 
263  // Return early if there are no incoming or outgoing edges
264  if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
265  return;
266 
267  // Get block size
268  int n;
269  MPI_Type_size(data_type, &n);
270  n /= sizeof(T);
271  if (static_cast<int>(send_buffer.size()) != n * displs_send_fwd.back())
272  throw std::runtime_error("Incompatible send buffer size.");
273  if (static_cast<int>(recv_buffer.size()) != n * _displs_recv_fwd.back())
274  throw std::runtime_error("Incompatible receive buffer size..");
275 
276  // Start send/receive
277  MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_send_fwd.data(),
278  displs_send_fwd.data(), data_type,
279  recv_buffer.data(), _sizes_recv_fwd.data(),
280  _displs_recv_fwd.data(), data_type,
281  _comm_owner_to_ghost.comm(), &request);
282  }
283 
290  void scatter_fwd_end(MPI_Request& request) const
291  {
292  // Return early if there are no incoming or outgoing edges
293  const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
294  if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
295  return;
296 
297  // Wait for communication to complete
298  MPI_Wait(&request, MPI_STATUS_IGNORE);
299  }
300 
311  template <typename T>
312  void scatter_fwd(const xtl::span<const T>& local_data,
313  xtl::span<T> remote_data, int n) const
314  {
315  MPI_Datatype data_type;
316  if (n == 1)
317  data_type = dolfinx::MPI::mpi_type<T>();
318  else
319  {
320  MPI_Type_contiguous(n, dolfinx::MPI::mpi_type<T>(), &data_type);
321  MPI_Type_commit(&data_type);
322  }
323 
324  const std::vector<std::int32_t>& indices = _shared_indices->array();
325  std::vector<T> send_buffer(n * indices.size());
326  for (std::size_t i = 0; i < indices.size(); ++i)
327  {
328  std::copy_n(std::next(local_data.cbegin(), n * indices[i]), n,
329  std::next(send_buffer.begin(), n * i));
330  }
331 
332  MPI_Request request;
333  std::vector<T> buffer_recv(n * _displs_recv_fwd.back());
334  scatter_fwd_begin(xtl::span<const T>(send_buffer), data_type, request,
335  xtl::span<T>(buffer_recv));
336  scatter_fwd_end(request);
337 
338  // Copy into ghost area ("remote_data")
339  assert(remote_data.size() == n * _ghost_pos_recv_fwd.size());
340  for (std::size_t i = 0; i < _ghost_pos_recv_fwd.size(); ++i)
341  {
342  std::copy_n(std::next(buffer_recv.cbegin(), n * _ghost_pos_recv_fwd[i]),
343  n, std::next(remote_data.begin(), n * i));
344  }
345 
346  if (n != 1)
347  MPI_Type_free(&data_type);
348  }
349 
370  template <typename T>
371  void scatter_rev_begin(const xtl::span<const T>& send_buffer,
372  MPI_Datatype& data_type, MPI_Request& request,
373  const xtl::span<T>& recv_buffer) const
374  {
375  // Get displacement vector
376  const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
377 
378  // Return early if there are no incoming or outgoing edges
379  if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
380  return;
381 
382  // Get block size
383  int n;
384  MPI_Type_size(data_type, &n);
385  n /= sizeof(T);
386  if (static_cast<int>(send_buffer.size()) != n * _ghosts.size())
387  throw std::runtime_error("Inconsistent send buffer size.");
388  if (static_cast<int>(recv_buffer.size()) != n * displs_send_fwd.back())
389  throw std::runtime_error("Inconsistent receive buffer size.");
390 
391  // Send and receive data
392  MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_recv_fwd.data(),
393  _displs_recv_fwd.data(), data_type,
394  recv_buffer.data(), _sizes_send_fwd.data(),
395  displs_send_fwd.data(), data_type,
396  _comm_ghost_to_owner.comm(), &request);
397  }
398 
405  void scatter_rev_end(MPI_Request& request) const
406  {
407  // Return early if there are no incoming or outgoing edges
408  const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
409  if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
410  return;
411 
412  // Wait for communication to complete
413  MPI_Wait(&request, MPI_STATUS_IGNORE);
414  }
415 
425  template <typename T>
426  void scatter_rev(xtl::span<T> local_data,
427  const xtl::span<const T>& remote_data, int n,
428  IndexMap::Mode op) const
429  {
430  MPI_Datatype data_type;
431  if (n == 1)
432  data_type = dolfinx::MPI::mpi_type<T>();
433  else
434  {
435  MPI_Type_contiguous(n, dolfinx::MPI::mpi_type<T>(), &data_type);
436  MPI_Type_commit(&data_type);
437  }
438 
439  // Pack send buffer
440  std::vector<T> buffer_send;
441  buffer_send.resize(n * _displs_recv_fwd.back());
442  for (std::size_t i = 0; i < _ghost_pos_recv_fwd.size(); ++i)
443  {
444  std::copy_n(std::next(remote_data.cbegin(), n * i), n,
445  std::next(buffer_send.begin(), n * _ghost_pos_recv_fwd[i]));
446  }
447 
448  // Exchange data
449  MPI_Request request;
450  std::vector<T> buffer_recv(n * _shared_indices->array().size());
451  scatter_rev_begin(xtl::span<const T>(buffer_send), data_type, request,
452  xtl::span<T>(buffer_recv));
453  scatter_rev_end(request);
454 
455  // Copy or accumulate into "local_data"
456  assert(local_data.size() == n * this->size_local());
457  const std::vector<std::int32_t>& shared_indices = _shared_indices->array();
458  switch (op)
459  {
460  case Mode::insert:
461  for (std::size_t i = 0; i < shared_indices.size(); ++i)
462  {
463  std::copy_n(std::next(buffer_recv.cbegin(), n * i), n,
464  std::next(local_data.begin(), n * shared_indices[i]));
465  }
466  break;
467  case Mode::add:
468  for (std::size_t i = 0; i < shared_indices.size(); ++i)
469  {
470  for (int j = 0; j < n; ++j)
471  local_data[shared_indices[i] * n + j] += buffer_recv[i * n + j];
472  }
473  break;
474  }
475 
476  if (n != 1)
477  MPI_Type_free(&data_type);
478  }
479 
480 private:
481  // Range of indices (global) owned by this process
482  std::array<std::int64_t, 2> _local_range;
483 
484  // Number indices across communicator
485  std::int64_t _size_global;
486 
487  // MPI communicator (duplicated of 'input' communicator)
488  dolfinx::MPI::Comm _comm;
489 
490  // Communicator where the source ranks own the indices in the callers
491  // halo, and the destination ranks 'ghost' indices owned by the
492  // caller. I.e.,
493  // - in-edges (src) are from ranks that own my ghosts
494  // - out-edges (dest) go to ranks that 'ghost' my owned indices
495  dolfinx::MPI::Comm _comm_owner_to_ghost;
496 
497  // Communicator where the source ranks have ghost indices that are
498  // owned by the caller, and the destination ranks are the owners of
499  // indices in the callers halo region. I.e.,
500  // - in-edges (src) are from ranks that 'ghost' my owned indicies
501  // - out-edges (dest) are to the owning ranks of my ghost indices
502  dolfinx::MPI::Comm _comm_ghost_to_owner;
503 
504  // MPI sizes and displacements for forward (owner -> ghost) scatter
505  // Note: '_displs_send_fwd' can be got from _shared_indices->offsets()
506  std::vector<std::int32_t> _sizes_send_fwd, _sizes_recv_fwd, _displs_recv_fwd;
507 
508  // Position in the recv buffer for a forward scatter for the ith ghost
509  // index (_ghost[i]) entry
510  std::vector<std::int32_t> _ghost_pos_recv_fwd;
511 
512  // Local-to-global map for ghost indices
513  std::vector<std::int64_t> _ghosts;
514 
515  // List of owned local indices that are in the ghost (halo) region on
516  // other ranks, grouped by rank in the neighbor communicator
517  // (destination ranks in forward communicator and source ranks in the
518  // reverse communicator), i.e. `_shared_indices.num_nodes() ==
519  // size(_comm_owner_to_ghost)`. The array _shared_indices.offsets() is
520  // equivalent to 'displs_send_fwd'.
521  std::unique_ptr<graph::AdjacencyList<std::int32_t>> _shared_indices;
522 };
523 
524 } // namespace dolfinx::common
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:40
This class represents the distribution index arrays across processes. An index array is a contiguous ...
Definition: IndexMap.h:60
~IndexMap()=default
Destructor.
Direction
Edge directions of neighborhood communicator.
Definition: IndexMap.h:71
const std::vector< std::int32_t > & scatter_fwd_ghost_positions() const noexcept
Position of ghost entries in the receive buffer after a forward scatter, e.g. for a receive buffer b ...
Definition: IndexMap.cpp:603
std::int32_t num_ghosts() const noexcept
Number of ghost indices on this process.
Definition: IndexMap.cpp:523
Mode
Mode for reverse scatter operation.
Definition: IndexMap.h:64
void scatter_rev_end(MPI_Request &request) const
Complete a non-blocking send of ghost values to the owning rank. This function complete the communica...
Definition: IndexMap.h:405
std::array< std::int64_t, 2 > local_range() const noexcept
Range of indices (global) owned by this process.
Definition: IndexMap.cpp:518
std::int32_t size_local() const noexcept
Number of indices owned by on this process.
Definition: IndexMap.cpp:525
std::int64_t size_global() const noexcept
Number indices across communicator.
Definition: IndexMap.cpp:530
IndexMap(IndexMap &&map)=default
Move constructor.
const graph::AdjacencyList< std::int32_t > & scatter_fwd_indices() const noexcept
Local (owned) indices shared with neighbor processes, i.e. are ghosts on other processes,...
Definition: IndexMap.cpp:596
IndexMap & operator=(IndexMap &&map)=default
Move assignment.
void scatter_fwd(const xtl::span< const T > &local_data, xtl::span< T > remote_data, int n) const
Send n values for each index that is owned to processes that have the index as a ghost....
Definition: IndexMap.h:312
std::vector< int > ghost_owner_neighbor_rank() const
Compute the owner on the neighborhood communicator of ghost indices.
Definition: IndexMap.cpp:608
const std::vector< std::int64_t > & ghosts() const noexcept
Local-to-global map for ghosts (local indexing beyond end of local range)
Definition: IndexMap.cpp:532
void global_to_local(const xtl::span< const std::int64_t > &global, const xtl::span< std::int32_t > &local) const
Compute local indices for array of global indices.
Definition: IndexMap.cpp:556
std::pair< IndexMap, std::vector< std::int32_t > > create_submap(const xtl::span< const std::int32_t > &indices) const
Create new index map from a subset of indices in this index map. The order of the indices is preserve...
Definition: IndexMap.cpp:780
void scatter_fwd_end(MPI_Request &request) const
Complete a non-blocking send from the local owner of to process ranks that have the index as a ghost....
Definition: IndexMap.h:290
std::map< std::int32_t, std::set< int > > compute_shared_indices() const
Definition: IndexMap.cpp:661
void local_to_global(const xtl::span< const std::int32_t > &local, const xtl::span< std::int64_t > &global) const
Compute global indices for array of local indices.
Definition: IndexMap.cpp:537
IndexMap(MPI_Comm comm, std::int32_t local_size)
Create an non-overlapping index map with local_size owned on this process.
Definition: IndexMap.cpp:352
void scatter_fwd_begin(const xtl::span< const T > &send_buffer, MPI_Datatype &data_type, MPI_Request &request, const xtl::span< T > &recv_buffer) const
Start a non-blocking send of owned data to ranks that ghost the data. The communication is completed ...
Definition: IndexMap.h:256
MPI_Comm comm() const
Return the MPI communicator used to create the index map.
Definition: IndexMap.cpp:646
std::vector< int > ghost_owner_rank() const
Owner rank on the global communicator of each ghost entry.
Definition: IndexMap.cpp:625
std::vector< std::int64_t > global_indices() const
Global indices.
Definition: IndexMap.cpp:582
void scatter_rev_begin(const xtl::span< const T > &send_buffer, MPI_Datatype &data_type, MPI_Request &request, const xtl::span< T > &recv_buffer) const
Start a non-blocking send of ghost values to the owning rank. The non-blocking communication is compl...
Definition: IndexMap.h:371
void scatter_rev(xtl::span< T > local_data, const xtl::span< const T > &remote_data, int n, IndexMap::Mode op) const
Send n values for each ghost index to owning to the process.
Definition: IndexMap.h:426
Miscellaneous classes, functions and types.
std::vector< int32_t > compute_owned_indices(const xtl::span< const std::int32_t > &indices, const IndexMap &map)
Given a vector of indices (local numbering, owned or ghost) and an index map, this function returns t...
Definition: IndexMap.cpp:132
std::tuple< std::int64_t, std::vector< std::int32_t >, std::vector< std::vector< std::int64_t > >, std::vector< std::vector< int > > > stack_index_maps(const std::vector< std::pair< std::reference_wrapper< const common::IndexMap >, int >> &maps)
Compute layout data and ghost indices for a stacked (concatenated) index map, i.e....
Definition: IndexMap.cpp:233