11 #include <dolfinx/common/MPI.h>
12 #include <dolfinx/graph/AdjacencyList.h>
18 #include <xtl/xspan.hpp>
45 std::tuple<std::int64_t, std::vector<std::int32_t>,
46 std::vector<std::vector<std::int64_t>>,
47 std::vector<std::vector<int>>>
50 std::pair<std::reference_wrapper<const common::IndexMap>,
int>>& maps);
98 const xtl::span<const int>& dest_ranks,
99 const xtl::span<const std::int64_t>&
ghosts,
100 const xtl::span<const int>& src_ranks);
103 template <
typename U,
typename V,
typename W,
typename X>
105 MPI_Comm
comm, U&& comm_owner_to_ghost, U&& comm_ghost_to_owner,
106 V&& displs_recv_fwd, V&& ghost_pos_recv_fwd, W&&
ghosts,
109 _comm_owner_to_ghost(std::forward<U>(comm_owner_to_ghost)),
110 _comm_ghost_to_owner(std::forward<U>(comm_ghost_to_owner)),
111 _displs_recv_fwd(std::forward<V>(displs_recv_fwd)),
112 _ghost_pos_recv_fwd(std::forward<V>(ghost_pos_recv_fwd)),
113 _ghosts(std::forward<W>(
ghosts)),
114 _shared_indices(std::forward<X>(shared_indices))
116 _sizes_recv_fwd.resize(_displs_recv_fwd.size() - 1, 0);
117 std::adjacent_difference(_displs_recv_fwd.cbegin() + 1,
118 _displs_recv_fwd.cend(), _sizes_recv_fwd.begin());
120 const std::vector<int32_t>& displs_send = _shared_indices->offsets();
121 _sizes_send_fwd.resize(_shared_indices->num_nodes(), 0);
122 std::adjacent_difference(displs_send.cbegin() + 1, displs_send.cend(),
123 _sizes_send_fwd.begin());
143 std::array<std::int64_t, 2>
local_range() const noexcept;
156 const std::vector<std::int64_t>&
ghosts() const noexcept;
160 MPI_Comm
comm() const;
172 const xtl::span<std::int64_t>& global) const;
179 const xtl::span<std::int32_t>& local) const;
199 const graph::AdjacencyList<std::int32_t>&
233 std::pair<
IndexMap, std::vector<std::int32_t>>
234 create_submap(const xtl::span<const std::int32_t>& indices) const;
255 template <typename T>
257 MPI_Datatype& data_type, MPI_Request& request,
258 const xtl::span<T>& recv_buffer)
const
261 const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
264 if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
269 MPI_Type_size(data_type, &n);
271 if (
static_cast<int>(send_buffer.size()) != n * displs_send_fwd.back())
272 throw std::runtime_error(
"Incompatible send buffer size.");
273 if (
static_cast<int>(recv_buffer.size()) != n * _displs_recv_fwd.back())
274 throw std::runtime_error(
"Incompatible receive buffer size..");
277 MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_send_fwd.data(),
278 displs_send_fwd.data(), data_type,
279 recv_buffer.data(), _sizes_recv_fwd.data(),
280 _displs_recv_fwd.data(), data_type,
281 _comm_owner_to_ghost.comm(), &request);
293 const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
294 if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
298 MPI_Wait(&request, MPI_STATUS_IGNORE);
311 template <
typename T>
313 xtl::span<T> remote_data,
int n)
const
315 MPI_Datatype data_type;
317 data_type = dolfinx::MPI::mpi_type<T>();
320 MPI_Type_contiguous(n, dolfinx::MPI::mpi_type<T>(), &data_type);
321 MPI_Type_commit(&data_type);
324 const std::vector<std::int32_t>& indices = _shared_indices->array();
325 std::vector<T> send_buffer(n * indices.size());
326 for (std::size_t i = 0; i < indices.size(); ++i)
328 std::copy_n(std::next(local_data.cbegin(), n * indices[i]), n,
329 std::next(send_buffer.begin(), n * i));
333 std::vector<T> buffer_recv(n * _displs_recv_fwd.back());
335 xtl::span<T>(buffer_recv));
339 assert(remote_data.size() == n * _ghost_pos_recv_fwd.size());
340 for (std::size_t i = 0; i < _ghost_pos_recv_fwd.size(); ++i)
342 std::copy_n(std::next(buffer_recv.cbegin(), n * _ghost_pos_recv_fwd[i]),
343 n, std::next(remote_data.begin(), n * i));
347 MPI_Type_free(&data_type);
370 template <
typename T>
372 MPI_Datatype& data_type, MPI_Request& request,
373 const xtl::span<T>& recv_buffer)
const
376 const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
379 if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
384 MPI_Type_size(data_type, &n);
386 if (
static_cast<int>(send_buffer.size()) != n * _ghosts.size())
387 throw std::runtime_error(
"Inconsistent send buffer size.");
388 if (
static_cast<int>(recv_buffer.size()) != n * displs_send_fwd.back())
389 throw std::runtime_error(
"Inconsistent receive buffer size.");
392 MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_recv_fwd.data(),
393 _displs_recv_fwd.data(), data_type,
394 recv_buffer.data(), _sizes_send_fwd.data(),
395 displs_send_fwd.data(), data_type,
396 _comm_ghost_to_owner.comm(), &request);
408 const std::vector<int32_t>& displs_send_fwd = _shared_indices->offsets();
409 if (_displs_recv_fwd.size() == 1 and displs_send_fwd.size() == 1)
413 MPI_Wait(&request, MPI_STATUS_IGNORE);
425 template <
typename T>
427 const xtl::span<const T>& remote_data,
int n,
430 MPI_Datatype data_type;
432 data_type = dolfinx::MPI::mpi_type<T>();
435 MPI_Type_contiguous(n, dolfinx::MPI::mpi_type<T>(), &data_type);
436 MPI_Type_commit(&data_type);
440 std::vector<T> buffer_send;
441 buffer_send.resize(n * _displs_recv_fwd.back());
442 for (std::size_t i = 0; i < _ghost_pos_recv_fwd.size(); ++i)
444 std::copy_n(std::next(remote_data.cbegin(), n * i), n,
445 std::next(buffer_send.begin(), n * _ghost_pos_recv_fwd[i]));
450 std::vector<T> buffer_recv(n * _shared_indices->array().size());
452 xtl::span<T>(buffer_recv));
456 assert(local_data.size() == n * this->size_local());
457 const std::vector<std::int32_t>& shared_indices = _shared_indices->array();
461 for (std::size_t i = 0; i < shared_indices.size(); ++i)
463 std::copy_n(std::next(buffer_recv.cbegin(), n * i), n,
464 std::next(local_data.begin(), n * shared_indices[i]));
468 for (std::size_t i = 0; i < shared_indices.size(); ++i)
470 for (
int j = 0; j < n; ++j)
471 local_data[shared_indices[i] * n + j] += buffer_recv[i * n + j];
477 MPI_Type_free(&data_type);
482 std::array<std::int64_t, 2> _local_range;
485 std::int64_t _size_global;
506 std::vector<std::int32_t> _sizes_send_fwd, _sizes_recv_fwd, _displs_recv_fwd;
510 std::vector<std::int32_t> _ghost_pos_recv_fwd;
513 std::vector<std::int64_t> _ghosts;
521 std::unique_ptr<graph::AdjacencyList<std::int32_t>> _shared_indices;
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:40
This class represents the distribution index arrays across processes. An index array is a contiguous ...
Definition: IndexMap.h:60
~IndexMap()=default
Destructor.
Direction
Edge directions of neighborhood communicator.
Definition: IndexMap.h:71
const std::vector< std::int32_t > & scatter_fwd_ghost_positions() const noexcept
Position of ghost entries in the receive buffer after a forward scatter, e.g. for a receive buffer b ...
Definition: IndexMap.cpp:603
std::int32_t num_ghosts() const noexcept
Number of ghost indices on this process.
Definition: IndexMap.cpp:523
Mode
Mode for reverse scatter operation.
Definition: IndexMap.h:64
void scatter_rev_end(MPI_Request &request) const
Complete a non-blocking send of ghost values to the owning rank. This function complete the communica...
Definition: IndexMap.h:405
std::array< std::int64_t, 2 > local_range() const noexcept
Range of indices (global) owned by this process.
Definition: IndexMap.cpp:518
std::int32_t size_local() const noexcept
Number of indices owned by on this process.
Definition: IndexMap.cpp:525
std::int64_t size_global() const noexcept
Number indices across communicator.
Definition: IndexMap.cpp:530
IndexMap(IndexMap &&map)=default
Move constructor.
const graph::AdjacencyList< std::int32_t > & scatter_fwd_indices() const noexcept
Local (owned) indices shared with neighbor processes, i.e. are ghosts on other processes,...
Definition: IndexMap.cpp:596
IndexMap & operator=(IndexMap &&map)=default
Move assignment.
void scatter_fwd(const xtl::span< const T > &local_data, xtl::span< T > remote_data, int n) const
Send n values for each index that is owned to processes that have the index as a ghost....
Definition: IndexMap.h:312
std::vector< int > ghost_owner_neighbor_rank() const
Compute the owner on the neighborhood communicator of ghost indices.
Definition: IndexMap.cpp:608
const std::vector< std::int64_t > & ghosts() const noexcept
Local-to-global map for ghosts (local indexing beyond end of local range)
Definition: IndexMap.cpp:532
void global_to_local(const xtl::span< const std::int64_t > &global, const xtl::span< std::int32_t > &local) const
Compute local indices for array of global indices.
Definition: IndexMap.cpp:556
std::pair< IndexMap, std::vector< std::int32_t > > create_submap(const xtl::span< const std::int32_t > &indices) const
Create new index map from a subset of indices in this index map. The order of the indices is preserve...
Definition: IndexMap.cpp:780
void scatter_fwd_end(MPI_Request &request) const
Complete a non-blocking send from the local owner of to process ranks that have the index as a ghost....
Definition: IndexMap.h:290
std::map< std::int32_t, std::set< int > > compute_shared_indices() const
Definition: IndexMap.cpp:661
void local_to_global(const xtl::span< const std::int32_t > &local, const xtl::span< std::int64_t > &global) const
Compute global indices for array of local indices.
Definition: IndexMap.cpp:537
IndexMap(MPI_Comm comm, std::int32_t local_size)
Create an non-overlapping index map with local_size owned on this process.
Definition: IndexMap.cpp:352
void scatter_fwd_begin(const xtl::span< const T > &send_buffer, MPI_Datatype &data_type, MPI_Request &request, const xtl::span< T > &recv_buffer) const
Start a non-blocking send of owned data to ranks that ghost the data. The communication is completed ...
Definition: IndexMap.h:256
MPI_Comm comm() const
Return the MPI communicator used to create the index map.
Definition: IndexMap.cpp:646
std::vector< int > ghost_owner_rank() const
Owner rank on the global communicator of each ghost entry.
Definition: IndexMap.cpp:625
std::vector< std::int64_t > global_indices() const
Global indices.
Definition: IndexMap.cpp:582
void scatter_rev_begin(const xtl::span< const T > &send_buffer, MPI_Datatype &data_type, MPI_Request &request, const xtl::span< T > &recv_buffer) const
Start a non-blocking send of ghost values to the owning rank. The non-blocking communication is compl...
Definition: IndexMap.h:371
void scatter_rev(xtl::span< T > local_data, const xtl::span< const T > &remote_data, int n, IndexMap::Mode op) const
Send n values for each ghost index to owning to the process.
Definition: IndexMap.h:426
Miscellaneous classes, functions and types.
std::vector< int32_t > compute_owned_indices(const xtl::span< const std::int32_t > &indices, const IndexMap &map)
Given a vector of indices (local numbering, owned or ghost) and an index map, this function returns t...
Definition: IndexMap.cpp:132
std::tuple< std::int64_t, std::vector< std::int32_t >, std::vector< std::vector< std::int64_t > >, std::vector< std::vector< int > > > stack_index_maps(const std::vector< std::pair< std::reference_wrapper< const common::IndexMap >, int >> &maps)
Compute layout data and ghost indices for a stacked (concatenated) index map, i.e....
Definition: IndexMap.cpp:233