Note: this is documentation for an old release. View the latest documentation at docs.fenicsproject.org/v0.3.0/v0.9.0/cpp
DOLFINx  0.3.0
DOLFINx C++ interface
partition.h
1 // Copyright (C) 2020 Garth N. Wells
2 //
3 // This file is part of DOLFINx (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include <algorithm>
10 #include <cstdint>
11 #include <dolfinx/common/MPI.h>
12 #include <dolfinx/common/Timer.h>
13 #include <dolfinx/graph/AdjacencyList.h>
14 #include <functional>
15 #include <mpi.h>
16 #include <utility>
17 #include <vector>
18 #include <xtl/xspan.hpp>
19 
20 namespace dolfinx::graph
21 {
22 
34 using partition_fn = std::function<graph::AdjacencyList<std::int32_t>(
35  MPI_Comm comm, int nparts, const AdjacencyList<std::int64_t>& local_graph,
36  std::int32_t num_ghost_nodes, bool ghosting)>;
37 
50 partition_graph(MPI_Comm comm, int nparts,
51  const AdjacencyList<std::int64_t>& local_graph,
52  std::int32_t num_ghost_nodes, bool ghosting);
53 
57 namespace build
58 {
70 std::tuple<graph::AdjacencyList<std::int64_t>, std::vector<int>,
71  std::vector<std::int64_t>, std::vector<int>>
72 distribute(MPI_Comm comm, const graph::AdjacencyList<std::int64_t>& list,
73  const graph::AdjacencyList<std::int32_t>& destinations);
74 
82 std::vector<std::int64_t>
83 compute_ghost_indices(MPI_Comm comm,
84  const xtl::span<const std::int64_t>& global_indices,
85  const xtl::span<const int>& ghost_owners);
86 
96 template <typename T>
97 xt::xtensor<T, 2> distribute_data(MPI_Comm comm,
98  const xtl::span<const std::int64_t>& indices,
99  const xt::xtensor<T, 2>& x);
100 
112 std::vector<std::int64_t>
115 
124 std::vector<std::int32_t>
125 compute_local_to_local(const xtl::span<const std::int64_t>& local0_to_global,
126  const xtl::span<const std::int64_t>& local1_to_global);
127 } // namespace build
128 
129 //---------------------------------------------------------------------------
130 // Implementation
131 //---------------------------------------------------------------------------
132 template <typename T>
133 xt::xtensor<T, 2>
135  const xtl::span<const std::int64_t>& indices,
136  const xt::xtensor<T, 2>& x)
137 {
138  common::Timer timer("Fetch float data from remote processes");
139 
140  const std::int64_t num_points_local = x.shape(0);
141  const int size = dolfinx::MPI::size(comm);
142  const int rank = dolfinx::MPI::rank(comm);
143  std::vector<std::int64_t> global_sizes(size);
144  MPI_Allgather(&num_points_local, 1, MPI_INT64_T, global_sizes.data(), 1,
145  MPI_INT64_T, comm);
146  std::vector<std::int64_t> global_offsets(size + 1, 0);
147  std::partial_sum(global_sizes.begin(), global_sizes.end(),
148  global_offsets.begin() + 1);
149 
150  // Build index data requests
151  std::vector<int> number_index_send(size, 0);
152  std::vector<int> index_owner(indices.size());
153  std::vector<int> index_order(indices.size());
154  std::iota(index_order.begin(), index_order.end(), 0);
155  std::sort(index_order.begin(), index_order.end(),
156  [&indices](int a, int b) { return (indices[a] < indices[b]); });
157 
158  int p = 0;
159  for (std::size_t i = 0; i < index_order.size(); ++i)
160  {
161  int j = index_order[i];
162  while (indices[j] >= global_offsets[p + 1])
163  ++p;
164  index_owner[j] = p;
165  number_index_send[p]++;
166  }
167 
168  // Compute send displacements
169  std::vector<int> disp_index_send(size + 1, 0);
170  std::partial_sum(number_index_send.begin(), number_index_send.end(),
171  disp_index_send.begin() + 1);
172 
173  // Pack global index send data
174  std::vector<std::int64_t> indices_send(disp_index_send.back());
175  std::vector<int> disp_tmp = disp_index_send;
176  for (std::size_t i = 0; i < indices.size(); ++i)
177  {
178  const int owner = index_owner[i];
179  indices_send[disp_tmp[owner]++] = indices[i];
180  }
181 
182  // Send/receive number of indices to communicate to each process
183  std::vector<int> number_index_recv(size);
184  MPI_Alltoall(number_index_send.data(), 1, MPI_INT, number_index_recv.data(),
185  1, MPI_INT, comm);
186 
187  // Compute receive displacements
188  std::vector<int> disp_index_recv(size + 1, 0);
189  std::partial_sum(number_index_recv.begin(), number_index_recv.end(),
190  disp_index_recv.begin() + 1);
191 
192  // Send/receive global indices
193  std::vector<std::int64_t> indices_recv(disp_index_recv.back());
194  MPI_Alltoallv(indices_send.data(), number_index_send.data(),
195  disp_index_send.data(), MPI_INT64_T, indices_recv.data(),
196  number_index_recv.data(), disp_index_recv.data(), MPI_INT64_T,
197  comm);
198 
199  assert(x.shape(1) != 0);
200  // Pack point data to send back (transpose)
201  xt::xtensor<T, 2> x_return({indices_recv.size(), x.shape(1)});
202  for (int p = 0; p < size; ++p)
203  {
204  for (int i = disp_index_recv[p]; i < disp_index_recv[p + 1]; ++i)
205  {
206  const std::int32_t index_local = indices_recv[i] - global_offsets[rank];
207  assert(index_local >= 0);
208  for (std::size_t j = 0; j < x.shape(1); ++j)
209  x_return(i, j) = x(index_local, j);
210  }
211  }
212 
213  MPI_Datatype compound_type;
214  MPI_Type_contiguous(x.shape(1), dolfinx::MPI::mpi_type<T>(), &compound_type);
215  MPI_Type_commit(&compound_type);
216 
217  // Send back point data
218  xt::xtensor<T, 2> my_x(
219  {static_cast<std::size_t>(disp_index_send.back()), x.shape(1)});
220  MPI_Alltoallv(x_return.data(), number_index_recv.data(),
221  disp_index_recv.data(), compound_type, my_x.data(),
222  number_index_send.data(), disp_index_send.data(), compound_type,
223  comm);
224  MPI_Type_free(&compound_type);
225 
226  return my_x;
227 }
228 
229 } // namespace dolfinx::graph
A timer can be used for timing tasks. The basic usage is.
Definition: Timer.h:31
This class provides a static adjacency list data structure. It is commonly used to store directed gra...
Definition: AdjacencyList.h:47
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition: MPI.cpp:74
int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition: MPI.cpp:82
std::vector< std::int64_t > compute_local_to_global_links(const graph::AdjacencyList< std::int64_t > &global, const graph::AdjacencyList< std::int32_t > &local)
Given an adjacency list with global, possibly non-contiguous, link indices and a local adjacency list...
Definition: partition.cpp:294
std::tuple< graph::AdjacencyList< std::int64_t >, std::vector< int >, std::vector< std::int64_t >, std::vector< int > > distribute(MPI_Comm comm, const graph::AdjacencyList< std::int64_t > &list, const graph::AdjacencyList< std::int32_t > &destinations)
Distribute adjacency list nodes to destination ranks. The global index of each node is assumed to be ...
Definition: partition.cpp:32
xt::xtensor< T, 2 > distribute_data(MPI_Comm comm, const xtl::span< const std::int64_t > &indices, const xt::xtensor< T, 2 > &x)
Distribute data to process ranks where it it required.
Definition: partition.h:134
std::vector< std::int64_t > compute_ghost_indices(MPI_Comm comm, const xtl::span< const std::int64_t > &global_indices, const xtl::span< const int > &ghost_owners)
Compute ghost indices in a global IndexMap space, from a list of arbitrary global indices,...
Definition: partition.cpp:171
std::vector< std::int32_t > compute_local_to_local(const xtl::span< const std::int64_t > &local0_to_global, const xtl::span< const std::int64_t > &local1_to_global)
Compute a local0-to-local1 map from two local-to-global maps with common global indices.
Definition: partition.cpp:330
Graph data structures and algorithms.
Definition: AdjacencyList.h:19
AdjacencyList< std::int32_t > partition_graph(MPI_Comm comm, int nparts, const AdjacencyList< std::int64_t > &local_graph, std::int32_t num_ghost_nodes, bool ghosting)
Partition graph across processes using the default graph partitioner.
Definition: partition.cpp:22
std::function< graph::AdjacencyList< std::int32_t >(MPI_Comm comm, int nparts, const AdjacencyList< std::int64_t > &local_graph, std::int32_t num_ghost_nodes, bool ghosting)> partition_fn
Signature of functions for computing the parallel partitioning of a distributed graph.
Definition: partition.h:36