DOLFINx 0.10.0.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
utils.h
1// Copyright (C) 2012-2024 Chris N. Richardson, Garth N. Wells, Jørgen S. Dokken
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include <array>
10#include <basix/mdspan.hpp>
11#include <dolfinx/common/types.h>
12#include <dolfinx/fem/ElementDofLayout.h>
13#include <dolfinx/mesh/Topology.h>
14#include <mpi.h>
15#include <span>
16#include <utility>
17#include <vector>
18
19namespace dolfinx
20{
21
22namespace io
23{
24
59template <typename T>
60std::pair<std::vector<std::int32_t>, std::vector<T>> distribute_entity_data(
61 const mesh::Topology& topology, std::span<const std::int64_t> nodes_g,
62 std::int64_t num_nodes_g, const fem::ElementDofLayout& cmap_dof_layout,
63 md::mdspan<const std::int32_t, md::dextents<std::size_t, 2>> xdofmap,
64 int entity_dim,
65 md::mdspan<const std::int64_t, md::dextents<std::size_t, 2>> entities,
66 std::span<const T> data)
67{
68 assert(entities.extent(0) == data.size());
69
70 spdlog::info("XDMF distribute entity data");
71 mesh::CellType cell_type = topology.cell_type();
72
73 // Get layout of dofs on 0th cell entity of dimension entity_dim
74 std::vector<int> cell_vertex_dofs;
75 for (int i = 0; i < mesh::cell_num_entities(cell_type, 0); ++i)
76 {
77 const std::vector<int>& local_index = cmap_dof_layout.entity_dofs(0, i);
78 assert(local_index.size() == 1);
79 cell_vertex_dofs.push_back(local_index[0]);
80 }
81
82 // -- A. Convert from list of entities by 'nodes' to list of entities
83 // by 'vertex nodes'
84 auto to_vertex_entities
85 = [](const fem::ElementDofLayout& cmap_dof_layout, int entity_dim,
86 std::span<const int> cell_vertex_dofs, mesh::CellType cell_type,
87 auto entities)
88 {
89 // Use ElementDofLayout of the cell to get vertex dof indices (local
90 // to a cell), i.e. build a map from local vertex index to associated
91 // local dof index
92 const std::vector<int> entity_layout
93 = cmap_dof_layout.entity_closure_dofs(entity_dim, 0);
94 std::vector<int> entity_vertex_dofs;
95 for (std::size_t i = 0; i < cell_vertex_dofs.size(); ++i)
96 {
97 auto it = std::find(entity_layout.begin(), entity_layout.end(),
98 cell_vertex_dofs[i]);
99 if (it != entity_layout.end())
100 entity_vertex_dofs.push_back(std::distance(entity_layout.begin(), it));
101 }
102
103 const std::size_t num_vert_per_e = mesh::cell_num_entities(
104 mesh::cell_entity_type(cell_type, entity_dim, 0), 0);
105
106 assert(entities.extent(1) == entity_layout.size());
107 std::vector<std::int64_t> entities_v(entities.extent(0) * num_vert_per_e);
108 for (std::size_t e = 0; e < entities.extent(0); ++e)
109 {
110 std::span entity(entities_v.data() + e * num_vert_per_e, num_vert_per_e);
111 for (std::size_t i = 0; i < num_vert_per_e; ++i)
112 entity[i] = entities(e, entity_vertex_dofs[i]);
113 std::ranges::sort(entity);
114 }
115
116 std::array shape{entities.extent(0), num_vert_per_e};
117 return std::pair(std::move(entities_v), shape);
118 };
119 const auto [entities_v_b, shapev] = to_vertex_entities(
120 cmap_dof_layout, entity_dim, cell_vertex_dofs, cell_type, entities);
121
122 md::mdspan<const std::int64_t, md::dextents<std::size_t, 2>> entities_v(
123 entities_v_b.data(), shapev);
124
125 MPI_Comm comm = topology.comm();
126 MPI_Datatype compound_type;
127 MPI_Type_contiguous(entities_v.extent(1), MPI_INT64_T, &compound_type);
128 MPI_Type_commit(&compound_type);
129
130 // -- B. Send entities and entity data to postmaster
131 auto send_entities_to_postmaster
132 = [](MPI_Comm comm, MPI_Datatype compound_type, std::int64_t num_nodes_g,
133 auto entities, std::span<const T> data)
134 {
135 const int size = dolfinx::MPI::size(comm);
136
137 // Determine destination by index of first vertex
138 std::vector<int> dest0;
139 for (std::size_t e = 0; e < entities.extent(0); ++e)
140 {
141 dest0.push_back(
142 dolfinx::MPI::index_owner(size, entities(e, 0), num_nodes_g));
143 }
144 std::vector<int> perm(dest0.size());
145 std::iota(perm.begin(), perm.end(), 0);
146 std::ranges::sort(perm, [&dest0](auto x0, auto x1)
147 { return dest0[x0] < dest0[x1]; });
148
149 // Note: dest[perm[i]] is ordered with increasing i
150 // Build list of neighbour dest ranks and count number of entities to
151 // send to each post office
152 std::vector<int> dest;
153 std::vector<std::int32_t> num_items_send;
154 {
155 auto it = perm.begin();
156 while (it != perm.end())
157 {
158 dest.push_back(dest0[*it]);
159 auto it1
160 = std::find_if(it, perm.end(), [&dest0, r = dest.back()](auto idx)
161 { return dest0[idx] != r; });
162 num_items_send.push_back(std::distance(it, it1));
163 it = it1;
164 }
165 }
166
167 // Compute send displacements
168 std::vector<int> send_disp(num_items_send.size() + 1, 0);
169 std::partial_sum(num_items_send.begin(), num_items_send.end(),
170 std::next(send_disp.begin()));
171
172 // Determine src ranks. Sort ranks so that ownership determination is
173 // deterministic for a given number of ranks.
174 std::vector<int> src = dolfinx::MPI::compute_graph_edges_nbx(comm, dest);
175 std::ranges::sort(src);
176
177 // Create neighbourhood communicator for sending data to post
178 // offices
179 MPI_Comm comm0;
180 int err = MPI_Dist_graph_create_adjacent(
181 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
182 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm0);
183 dolfinx::MPI::check_error(comm, err);
184
185 // Send number of items to post offices (destinations)
186 std::vector<int> num_items_recv(src.size());
187 num_items_send.reserve(1);
188 num_items_recv.reserve(1);
189 MPI_Neighbor_alltoall(num_items_send.data(), 1, MPI_INT,
190 num_items_recv.data(), 1, MPI_INT, comm0);
191 dolfinx::MPI::check_error(comm, err);
192
193 // Compute receive displacements
194 std::vector<int> recv_disp(num_items_recv.size() + 1, 0);
195 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
196 std::next(recv_disp.begin()));
197
198 // Prepare send buffer
199 std::vector<std::int64_t> send_buffer;
200 std::vector<T> send_values_buffer;
201 send_buffer.reserve(entities.size());
202 send_values_buffer.reserve(data.size());
203 for (std::size_t e = 0; e < entities.extent(0); ++e)
204 {
205 auto idx = perm[e];
206 auto it = std::next(entities.data_handle(), idx * entities.extent(1));
207 send_buffer.insert(send_buffer.end(), it, it + entities.extent(1));
208 send_values_buffer.push_back(data[idx]);
209 }
210
211 std::vector<std::int64_t> recv_buffer(recv_disp.back()
212 * entities.extent(1));
213 err = MPI_Neighbor_alltoallv(send_buffer.data(), num_items_send.data(),
214 send_disp.data(), compound_type,
215 recv_buffer.data(), num_items_recv.data(),
216 recv_disp.data(), compound_type, comm0);
217 dolfinx::MPI::check_error(comm, err);
218 std::vector<T> recv_values_buffer(recv_disp.back());
219 err = MPI_Neighbor_alltoallv(
220 send_values_buffer.data(), num_items_send.data(), send_disp.data(),
221 dolfinx::MPI::mpi_t<T>, recv_values_buffer.data(),
222 num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t<T>, comm0);
223 dolfinx::MPI::check_error(comm, err);
224 err = MPI_Comm_free(&comm0);
225 dolfinx::MPI::check_error(comm, err);
226
227 std::array shape{recv_buffer.size() / (entities.extent(1)),
228 (entities.extent(1))};
229 return std::tuple<std::vector<std::int64_t>, std::vector<T>,
230 std::array<std::size_t, 2>>(
231 std::move(recv_buffer), std::move(recv_values_buffer), shape);
232 };
233 const auto [entitiesp_b, entitiesp_v, shapep] = send_entities_to_postmaster(
234 comm, compound_type, num_nodes_g, entities_v, data);
235 md::mdspan<const std::int64_t, md::dextents<std::size_t, 2>> entitiesp(
236 entitiesp_b.data(), shapep);
237
238 // -- C. Send mesh global indices to postmaster
239 auto indices_to_postoffice = [](MPI_Comm comm, std::int64_t num_nodes,
240 std::span<const std::int64_t> indices)
241 {
242 int size = dolfinx::MPI::size(comm);
243 std::vector<std::pair<int, std::int64_t>> dest_to_index;
244 std::ranges::transform(
245 indices, std::back_inserter(dest_to_index),
246 [size, num_nodes](auto n)
247 {
248 return std::pair(dolfinx::MPI::index_owner(size, n, num_nodes), n);
249 });
250 std::ranges::sort(dest_to_index);
251
252 // Build list of neighbour dest ranks and count number of indices to
253 // send to each post office
254 std::vector<int> dest;
255 std::vector<std::int32_t> num_items_send;
256 {
257 auto it = dest_to_index.begin();
258 while (it != dest_to_index.end())
259 {
260 dest.push_back(it->first);
261 auto it1
262 = std::find_if(it, dest_to_index.end(), [r = dest.back()](auto idx)
263 { return idx.first != r; });
264 num_items_send.push_back(std::distance(it, it1));
265 it = it1;
266 }
267 }
268
269 // Compute send displacements
270 std::vector<int> send_disp(num_items_send.size() + 1, 0);
271 std::partial_sum(num_items_send.begin(), num_items_send.end(),
272 std::next(send_disp.begin()));
273
274 // Determine src ranks. Sort ranks so that ownership determination is
275 // deterministic for a given number of ranks.
276 std::vector<int> src = dolfinx::MPI::compute_graph_edges_nbx(comm, dest);
277 std::ranges::sort(src);
278
279 // Create neighbourhood communicator for sending data to post offices
280 MPI_Comm comm0;
281 int err = MPI_Dist_graph_create_adjacent(
282 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
283 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm0);
284 dolfinx::MPI::check_error(comm, err);
285
286 // Send number of items to post offices (destination) that I will be
287 // sending
288 std::vector<int> num_items_recv(src.size());
289 num_items_send.reserve(1);
290 num_items_recv.reserve(1);
291 MPI_Neighbor_alltoall(num_items_send.data(), 1, MPI_INT,
292 num_items_recv.data(), 1, MPI_INT, comm0);
293 dolfinx::MPI::check_error(comm, err);
294
295 // Compute receive displacements
296 std::vector<int> recv_disp(num_items_recv.size() + 1, 0);
297 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
298 std::next(recv_disp.begin()));
299
300 // Prepare send buffer
301 std::vector<std::int64_t> send_buffer;
302 send_buffer.reserve(indices.size());
303 std::ranges::transform(dest_to_index, std::back_inserter(send_buffer),
304 [](auto x) { return x.second; });
305
306 std::vector<std::int64_t> recv_buffer(recv_disp.back());
307 err = MPI_Neighbor_alltoallv(send_buffer.data(), num_items_send.data(),
308 send_disp.data(), MPI_INT64_T,
309 recv_buffer.data(), num_items_recv.data(),
310 recv_disp.data(), MPI_INT64_T, comm0);
311 dolfinx::MPI::check_error(comm, err);
312 err = MPI_Comm_free(&comm0);
313 dolfinx::MPI::check_error(comm, err);
314 return std::tuple(std::move(recv_buffer), std::move(recv_disp),
315 std::move(src), std::move(dest));
316 };
317 const auto [nodes_g_p, recv_disp, src, dest]
318 = indices_to_postoffice(comm, num_nodes_g, nodes_g);
319
320 // D. Send entities to possible owners, based on first entity index
321 auto candidate_ranks
322 = [](MPI_Comm comm, MPI_Datatype compound_type,
323 std::span<const std::int64_t> indices_recv,
324 std::span<const int> indices_recv_disp, std::span<const int> src,
325 std::span<const int> dest, auto entities, std::span<const T> data)
326 {
327 // Build map from received global node indices to neighbourhood
328 // ranks that have the node
329 std::multimap<std::int64_t, int> node_to_rank;
330 for (std::size_t i = 0; i < indices_recv_disp.size() - 1; ++i)
331 for (int j = indices_recv_disp[i]; j < indices_recv_disp[i + 1]; ++j)
332 node_to_rank.insert({indices_recv[j], i});
333
334 std::vector<std::vector<std::int64_t>> send_data(dest.size());
335 std::vector<std::vector<T>> send_values(dest.size());
336 for (std::size_t e = 0; e < entities.extent(0); ++e)
337 {
338 std::span e_recv(entities.data_handle() + e * entities.extent(1),
339 entities.extent(1));
340 auto [it0, it1] = node_to_rank.equal_range(entities(e, 0));
341 for (auto it = it0; it != it1; ++it)
342 {
343 int p = it->second;
344 send_data[p].insert(send_data[p].end(), e_recv.begin(), e_recv.end());
345 send_values[p].push_back(data[e]);
346 }
347 }
348
349 MPI_Comm comm0;
350 int err = MPI_Dist_graph_create_adjacent(
351 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
352 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm0);
353 dolfinx::MPI::check_error(comm, err);
354
355 std::vector<int> num_items_send;
356 for (auto& x : send_data)
357 num_items_send.push_back(x.size() / entities.extent(1));
358
359 std::vector<int> num_items_recv(src.size());
360 num_items_send.reserve(1);
361 num_items_recv.reserve(1);
362 err = MPI_Neighbor_alltoall(num_items_send.data(), 1, MPI_INT,
363 num_items_recv.data(), 1, MPI_INT, comm0);
364 dolfinx::MPI::check_error(comm, err);
365
366 // Compute send displacements
367 std::vector<std::int32_t> send_disp(num_items_send.size() + 1, 0);
368 std::partial_sum(num_items_send.begin(), num_items_send.end(),
369 std::next(send_disp.begin()));
370
371 // Compute receive displacements
372 std::vector<std::int32_t> recv_disp(num_items_recv.size() + 1, 0);
373 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
374 std::next(recv_disp.begin()));
375
376 // Prepare send buffers
377 std::vector<std::int64_t> send_buffer;
378 std::vector<T> send_values_buffer;
379 for (auto& x : send_data)
380 send_buffer.insert(send_buffer.end(), x.begin(), x.end());
381 for (auto& v : send_values)
382 send_values_buffer.insert(send_values_buffer.end(), v.begin(), v.end());
383 std::vector<std::int64_t> recv_buffer(entities.extent(1)
384 * recv_disp.back());
385 err = MPI_Neighbor_alltoallv(send_buffer.data(), num_items_send.data(),
386 send_disp.data(), compound_type,
387 recv_buffer.data(), num_items_recv.data(),
388 recv_disp.data(), compound_type, comm0);
389
390 dolfinx::MPI::check_error(comm, err);
391
392 std::vector<T> recv_values_buffer(recv_disp.back());
393 err = MPI_Neighbor_alltoallv(
394 send_values_buffer.data(), num_items_send.data(), send_disp.data(),
395 dolfinx::MPI::mpi_t<T>, recv_values_buffer.data(),
396 num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t<T>, comm0);
397
398 dolfinx::MPI::check_error(comm, err);
399
400 err = MPI_Comm_free(&comm0);
401 dolfinx::MPI::check_error(comm, err);
402
403 std::array shape{recv_buffer.size() / entities.extent(1),
404 entities.extent(1)};
405 return std::tuple<std::vector<std::int64_t>, std::vector<T>,
406 std::array<std::size_t, 2>>(
407 std::move(recv_buffer), std::move(recv_values_buffer), shape);
408 };
409 // NOTE: src and dest are transposed here because we're reversing the
410 // direction of communication
411 const auto [entities_data_b, entities_values, shape_eb]
412 = candidate_ranks(comm, compound_type, nodes_g_p, recv_disp, dest, src,
413 entitiesp, std::span(entitiesp_v));
414 md::mdspan<const std::int64_t, md::dextents<std::size_t, 2>> entities_data(
415 entities_data_b.data(), shape_eb);
416
417 // -- E. From the received (key, value) data, determine which keys
418 // (entities) are on this process.
419 //
420 // TODO: We have already received possibly tagged entities from other
421 // ranks, so we could use the received data to avoid creating
422 // the std::map for *all* entities and just for candidate
423 // entities.
424 auto select_entities
425 = [](const mesh::Topology& topology, auto xdofmap,
426 std::span<const std::int64_t> nodes_g,
427 std::span<const int> cell_vertex_dofs, auto entities_data,
428 std::span<const T> entities_values)
429 {
430 spdlog::info("XDMF build map");
431 auto c_to_v = topology.connectivity(topology.dim(), 0);
432 if (!c_to_v)
433 throw std::runtime_error("Missing cell-vertex connectivity.");
434
435 std::map<std::int64_t, std::int32_t> input_idx_to_vertex;
436 for (int c = 0; c < c_to_v->num_nodes(); ++c)
437 {
438 auto vertices = c_to_v->links(c);
439 std::span xdofs(xdofmap.data_handle() + c * xdofmap.extent(1),
440 xdofmap.extent(1));
441 for (std::size_t v = 0; v < vertices.size(); ++v)
442 input_idx_to_vertex[nodes_g[xdofs[cell_vertex_dofs[v]]]] = vertices[v];
443 }
444
445 std::vector<std::int32_t> entities;
446 std::vector<T> data;
447 std::vector<std::int32_t> entity(entities_data.extent(1));
448 for (std::size_t e = 0; e < entities_data.extent(0); ++e)
449 {
450 bool entity_found = true;
451 for (std::size_t i = 0; i < entities_data.extent(1); ++i)
452 {
453 if (auto it = input_idx_to_vertex.find(entities_data(e, i));
454 it == input_idx_to_vertex.end())
455 {
456 // As soon as this received index is not in locally owned
457 // input global indices skip the entire entity
458 entity_found = false;
459 break;
460 }
461 else
462 entity[i] = it->second;
463 }
464
465 if (entity_found)
466 {
467 entities.insert(entities.end(), entity.begin(), entity.end());
468 data.push_back(entities_values[e]);
469 }
470 }
471
472 return std::pair(std::move(entities), std::move(data));
473 };
474
475 MPI_Type_free(&compound_type);
476
477 return select_entities(topology, xdofmap, nodes_g, cell_vertex_dofs,
478 entities_data, std::span(entities_values));
479}
480//-----------------------------------------------------------------------------}
481
482} // namespace io
483} // namespace dolfinx
Definition ElementDofLayout.h:30
const std::vector< int > & entity_closure_dofs(int dim, int entity_index) const
Definition ElementDofLayout.cpp:80
const std::vector< int > & entity_dofs(int dim, int entity_index) const
Definition ElementDofLayout.cpp:73
Topology stores the topology of a mesh, consisting of mesh entities and connectivity (incidence relat...
Definition Topology.h:46
std::shared_ptr< const graph::AdjacencyList< std::int32_t > > connectivity(std::array< int, 2 > d0, std::array< int, 2 > d1) const
Get the connectivity from entities of topological dimension d0 to dimension d1.
Definition Topology.cpp:832
int dim() const noexcept
Topological dimension of the mesh.
Definition Topology.cpp:785
MPI_Comm comm() const
Mesh MPI communicator.
Definition Topology.cpp:1010
CellType cell_type() const
Cell type.
Definition Topology.cpp:795
MPI_Datatype mpi_t
Retrieves the MPI data type associated to the provided type.
Definition MPI.h:281
std::vector< int > compute_graph_edges_nbx(MPI_Comm comm, std::span< const int > edges, int tag=static_cast< int >(tag::consensus_nbx))
Determine incoming graph edges using the NBX consensus algorithm.
Definition MPI.cpp:162
constexpr int index_owner(int size, std::size_t index, std::size_t N)
Return which rank owns index in global range [0, N - 1] (inverse of MPI::local_range).
Definition MPI.h:114
void check_error(MPI_Comm comm, int code)
Check MPI error code. If the error code is not equal to MPI_SUCCESS, then std::abort is called.
Definition MPI.cpp:80
int size(MPI_Comm comm)
Definition MPI.cpp:72
Support for file IO.
Definition cells.h:119
std::pair< std::vector< std::int32_t >, std::vector< T > > distribute_entity_data(const mesh::Topology &topology, std::span< const std::int64_t > nodes_g, std::int64_t num_nodes_g, const fem::ElementDofLayout &cmap_dof_layout, md::mdspan< const std::int32_t, md::dextents< std::size_t, 2 > > xdofmap, int entity_dim, md::mdspan< const std::int64_t, md::dextents< std::size_t, 2 > > entities, std::span< const T > data)
Get owned entities and associated data from input entities defined by global 'node' indices.
Definition utils.h:60
CellType cell_entity_type(CellType type, int d, int index)
Return type of cell for entity of dimension d at given entity index.
Definition cell_types.cpp:64
int cell_num_entities(CellType type, int dim)
Definition cell_types.cpp:139
CellType
Cell type identifier.
Definition cell_types.h:22
Top-level namespace.
Definition defines.h:12