DOLFINx 0.10.0.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
utils.h
1// Copyright (C) 2012-2024 Chris N. Richardson, Garth N. Wells, Jørgen S. Dokken
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include <array>
10#include <basix/mdspan.hpp>
11#include <dolfinx/common/types.h>
12#include <dolfinx/fem/ElementDofLayout.h>
13#include <dolfinx/mesh/Topology.h>
14#include <mpi.h>
15#include <span>
16#include <utility>
17#include <vector>
18
19namespace dolfinx
20{
21
22namespace io
23{
24
59template <typename T>
60std::pair<std::vector<std::int32_t>, std::vector<T>> distribute_entity_data(
61 const mesh::Topology& topology, std::span<const std::int64_t> nodes_g,
62 std::int64_t num_nodes_g, const fem::ElementDofLayout& cmap_dof_layout,
63 md::mdspan<const std::int32_t, md::dextents<std::size_t, 2>> xdofmap,
64 int entity_dim,
65 md::mdspan<const std::int64_t, md::dextents<std::size_t, 2>> entities,
66 std::span<const T> data)
67{
68 assert(entities.extent(0) == data.size());
69
70 spdlog::info("XDMF distribute entity data");
71 mesh::CellType cell_type = topology.cell_type();
72
73 // Get layout of dofs on 0th cell entity of dimension entity_dim
74 std::vector<int> cell_vertex_dofs;
75 for (int i = 0; i < mesh::cell_num_entities(cell_type, 0); ++i)
76 {
77 const std::vector<int>& local_index = cmap_dof_layout.entity_dofs(0, i);
78 assert(local_index.size() == 1);
79 cell_vertex_dofs.push_back(local_index[0]);
80 }
81
82 // -- A. Convert from list of entities by 'nodes' to list of entities
83 // by 'vertex nodes'
84 auto to_vertex_entities
85 = [](const fem::ElementDofLayout& cmap_dof_layout, int entity_dim,
86 std::span<const int> cell_vertex_dofs, mesh::CellType cell_type,
87 auto entities)
88 {
89 // Use ElementDofLayout of the cell to get vertex dof indices (local
90 // to a cell), i.e. build a map from local vertex index to associated
91 // local dof index
92 const std::vector<int>& entity_layout
93 = cmap_dof_layout.entity_closure_dofs(entity_dim, 0);
94 std::vector<int> entity_vertex_dofs;
95 for (std::size_t i = 0; i < cell_vertex_dofs.size(); ++i)
96 {
97 auto it = std::find(entity_layout.begin(), entity_layout.end(),
98 cell_vertex_dofs[i]);
99 if (it != entity_layout.end())
100 entity_vertex_dofs.push_back(std::distance(entity_layout.begin(), it));
101 }
102
103 const std::size_t num_vert_per_e = mesh::cell_num_entities(
104 mesh::cell_entity_type(cell_type, entity_dim, 0), 0);
105
106 assert(entities.extent(1) == entity_layout.size());
107 std::vector<std::int64_t> entities_v(entities.extent(0) * num_vert_per_e);
108 for (std::size_t e = 0; e < entities.extent(0); ++e)
109 {
110 std::span entity(entities_v.data() + e * num_vert_per_e, num_vert_per_e);
111 for (std::size_t i = 0; i < num_vert_per_e; ++i)
112 entity[i] = entities(e, entity_vertex_dofs[i]);
113 std::ranges::sort(entity);
114 }
115
116 std::array shape{entities.extent(0), num_vert_per_e};
117 return std::pair(std::move(entities_v), shape);
118 };
119 const auto [entities_v_b, shapev] = to_vertex_entities(
120 cmap_dof_layout, entity_dim, cell_vertex_dofs, cell_type, entities);
121
122 md::mdspan<const std::int64_t, md::dextents<std::size_t, 2>> entities_v(
123 entities_v_b.data(), shapev);
124
125 MPI_Comm comm = topology.comm();
126 MPI_Datatype compound_type;
127 MPI_Type_contiguous(entities_v.extent(1), MPI_INT64_T, &compound_type);
128 MPI_Type_commit(&compound_type);
129
130 // -- B. Send entities and entity data to postmaster
131 auto send_entities_to_postmaster
132 = [](MPI_Comm comm, MPI_Datatype compound_type, std::int64_t num_nodes_g,
133 auto entities, std::span<const T> data)
134 {
135 const int size = dolfinx::MPI::size(comm);
136
137 // Determine destination by index of first vertex
138 std::vector<int> dest0;
139 dest0.reserve(entities.extent(0));
140 for (std::size_t e = 0; e < entities.extent(0); ++e)
141 {
142 dest0.push_back(
143 dolfinx::MPI::index_owner(size, entities(e, 0), num_nodes_g));
144 }
145 std::vector<int> perm(dest0.size());
146 std::iota(perm.begin(), perm.end(), 0);
147 std::ranges::sort(perm, [&dest0](auto x0, auto x1)
148 { return dest0[x0] < dest0[x1]; });
149
150 // Note: dest[perm[i]] is ordered with increasing i
151 // Build list of neighbour dest ranks and count number of entities to
152 // send to each post office
153 std::vector<int> dest;
154 std::vector<std::int32_t> num_items_send;
155 {
156 auto it = perm.begin();
157 while (it != perm.end())
158 {
159 dest.push_back(dest0[*it]);
160 auto it1
161 = std::find_if(it, perm.end(), [&dest0, r = dest.back()](auto idx)
162 { return dest0[idx] != r; });
163 num_items_send.push_back(std::distance(it, it1));
164 it = it1;
165 }
166 }
167
168 // Compute send displacements
169 std::vector<int> send_disp(num_items_send.size() + 1, 0);
170 std::partial_sum(num_items_send.begin(), num_items_send.end(),
171 std::next(send_disp.begin()));
172
173 // Determine src ranks. Sort ranks so that ownership determination is
174 // deterministic for a given number of ranks.
175 std::vector<int> src = dolfinx::MPI::compute_graph_edges_nbx(comm, dest);
176 std::ranges::sort(src);
177
178 // Create neighbourhood communicator for sending data to post
179 // offices
180 MPI_Comm comm0;
181 int err = MPI_Dist_graph_create_adjacent(
182 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
183 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm0);
184 dolfinx::MPI::check_error(comm, err);
185
186 // Send number of items to post offices (destinations)
187 std::vector<int> num_items_recv(src.size());
188 num_items_send.reserve(1);
189 num_items_recv.reserve(1);
190 MPI_Neighbor_alltoall(num_items_send.data(), 1, MPI_INT,
191 num_items_recv.data(), 1, MPI_INT, comm0);
192 dolfinx::MPI::check_error(comm, err);
193
194 // Compute receive displacements
195 std::vector<int> recv_disp(num_items_recv.size() + 1, 0);
196 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
197 std::next(recv_disp.begin()));
198
199 // Prepare send buffer
200 std::vector<std::int64_t> send_buffer;
201 std::vector<T> send_values_buffer;
202 send_buffer.reserve(entities.size());
203 send_values_buffer.reserve(data.size());
204 for (std::size_t e = 0; e < entities.extent(0); ++e)
205 {
206 auto idx = perm[e];
207 auto it = std::next(entities.data_handle(), idx * entities.extent(1));
208 send_buffer.insert(send_buffer.end(), it, it + entities.extent(1));
209 send_values_buffer.push_back(data[idx]);
210 }
211
212 std::vector<std::int64_t> recv_buffer(recv_disp.back()
213 * entities.extent(1));
214 err = MPI_Neighbor_alltoallv(send_buffer.data(), num_items_send.data(),
215 send_disp.data(), compound_type,
216 recv_buffer.data(), num_items_recv.data(),
217 recv_disp.data(), compound_type, comm0);
218 dolfinx::MPI::check_error(comm, err);
219 std::vector<T> recv_values_buffer(recv_disp.back());
220 err = MPI_Neighbor_alltoallv(
221 send_values_buffer.data(), num_items_send.data(), send_disp.data(),
222 dolfinx::MPI::mpi_t<T>, recv_values_buffer.data(),
223 num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t<T>, comm0);
224 dolfinx::MPI::check_error(comm, err);
225 err = MPI_Comm_free(&comm0);
226 dolfinx::MPI::check_error(comm, err);
227
228 std::array shape{recv_buffer.size() / (entities.extent(1)),
229 (entities.extent(1))};
230 return std::tuple<std::vector<std::int64_t>, std::vector<T>,
231 std::array<std::size_t, 2>>(
232 std::move(recv_buffer), std::move(recv_values_buffer), shape);
233 };
234 const auto [entitiesp_b, entitiesp_v, shapep] = send_entities_to_postmaster(
235 comm, compound_type, num_nodes_g, entities_v, data);
236 md::mdspan<const std::int64_t, md::dextents<std::size_t, 2>> entitiesp(
237 entitiesp_b.data(), shapep);
238
239 // -- C. Send mesh global indices to postmaster
240 auto indices_to_postoffice = [](MPI_Comm comm, std::int64_t num_nodes,
241 std::span<const std::int64_t> indices)
242 {
243 int size = dolfinx::MPI::size(comm);
244 std::vector<std::pair<int, std::int64_t>> dest_to_index;
245 std::ranges::transform(
246 indices, std::back_inserter(dest_to_index),
247 [size, num_nodes](auto n)
248 {
249 return std::pair(dolfinx::MPI::index_owner(size, n, num_nodes), n);
250 });
251 std::ranges::sort(dest_to_index);
252
253 // Build list of neighbour dest ranks and count number of indices to
254 // send to each post office
255 std::vector<int> dest;
256 std::vector<std::int32_t> num_items_send;
257 {
258 auto it = dest_to_index.begin();
259 while (it != dest_to_index.end())
260 {
261 dest.push_back(it->first);
262 auto it1
263 = std::find_if(it, dest_to_index.end(), [r = dest.back()](auto idx)
264 { return idx.first != r; });
265 num_items_send.push_back(std::distance(it, it1));
266 it = it1;
267 }
268 }
269
270 // Compute send displacements
271 std::vector<int> send_disp(num_items_send.size() + 1, 0);
272 std::partial_sum(num_items_send.begin(), num_items_send.end(),
273 std::next(send_disp.begin()));
274
275 // Determine src ranks. Sort ranks so that ownership determination is
276 // deterministic for a given number of ranks.
277 std::vector<int> src = dolfinx::MPI::compute_graph_edges_nbx(comm, dest);
278 std::ranges::sort(src);
279
280 // Create neighbourhood communicator for sending data to post offices
281 MPI_Comm comm0;
282 int err = MPI_Dist_graph_create_adjacent(
283 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
284 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm0);
285 dolfinx::MPI::check_error(comm, err);
286
287 // Send number of items to post offices (destination) that I will be
288 // sending
289 std::vector<int> num_items_recv(src.size());
290 num_items_send.reserve(1);
291 num_items_recv.reserve(1);
292 MPI_Neighbor_alltoall(num_items_send.data(), 1, MPI_INT,
293 num_items_recv.data(), 1, MPI_INT, comm0);
294 dolfinx::MPI::check_error(comm, err);
295
296 // Compute receive displacements
297 std::vector<int> recv_disp(num_items_recv.size() + 1, 0);
298 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
299 std::next(recv_disp.begin()));
300
301 // Prepare send buffer
302 std::vector<std::int64_t> send_buffer;
303 send_buffer.reserve(indices.size());
304 std::ranges::transform(dest_to_index, std::back_inserter(send_buffer),
305 [](auto x) { return x.second; });
306
307 std::vector<std::int64_t> recv_buffer(recv_disp.back());
308 err = MPI_Neighbor_alltoallv(send_buffer.data(), num_items_send.data(),
309 send_disp.data(), MPI_INT64_T,
310 recv_buffer.data(), num_items_recv.data(),
311 recv_disp.data(), MPI_INT64_T, comm0);
312 dolfinx::MPI::check_error(comm, err);
313 err = MPI_Comm_free(&comm0);
314 dolfinx::MPI::check_error(comm, err);
315 return std::tuple(std::move(recv_buffer), std::move(recv_disp),
316 std::move(src), std::move(dest));
317 };
318 const auto [nodes_g_p, recv_disp, src, dest]
319 = indices_to_postoffice(comm, num_nodes_g, nodes_g);
320
321 // D. Send entities to possible owners, based on first entity index
322 auto candidate_ranks
323 = [](MPI_Comm comm, MPI_Datatype compound_type,
324 std::span<const std::int64_t> indices_recv,
325 std::span<const int> indices_recv_disp, std::span<const int> src,
326 std::span<const int> dest, auto entities, std::span<const T> data)
327 {
328 // Build map from received global node indices to neighbourhood
329 // ranks that have the node
330 std::multimap<std::int64_t, int> node_to_rank;
331 for (std::size_t i = 0; i < indices_recv_disp.size() - 1; ++i)
332 for (int j = indices_recv_disp[i]; j < indices_recv_disp[i + 1]; ++j)
333 node_to_rank.insert({indices_recv[j], i});
334
335 std::vector<std::vector<std::int64_t>> send_data(dest.size());
336 std::vector<std::vector<T>> send_values(dest.size());
337 for (std::size_t e = 0; e < entities.extent(0); ++e)
338 {
339 std::span e_recv(entities.data_handle() + e * entities.extent(1),
340 entities.extent(1));
341 auto [it0, it1] = node_to_rank.equal_range(entities(e, 0));
342 for (auto it = it0; it != it1; ++it)
343 {
344 int p = it->second;
345 send_data[p].insert(send_data[p].end(), e_recv.begin(), e_recv.end());
346 send_values[p].push_back(data[e]);
347 }
348 }
349
350 MPI_Comm comm0;
351 int err = MPI_Dist_graph_create_adjacent(
352 comm, src.size(), src.data(), MPI_UNWEIGHTED, dest.size(), dest.data(),
353 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm0);
354 dolfinx::MPI::check_error(comm, err);
355
356 std::vector<int> num_items_send;
357 num_items_send.reserve(send_data.size());
358 for (auto& x : send_data)
359 num_items_send.push_back(x.size() / entities.extent(1));
360
361 std::vector<int> num_items_recv(src.size());
362 num_items_send.reserve(1);
363 num_items_recv.reserve(1);
364 err = MPI_Neighbor_alltoall(num_items_send.data(), 1, MPI_INT,
365 num_items_recv.data(), 1, MPI_INT, comm0);
366 dolfinx::MPI::check_error(comm, err);
367
368 // Compute send displacements
369 std::vector<std::int32_t> send_disp(num_items_send.size() + 1, 0);
370 std::partial_sum(num_items_send.begin(), num_items_send.end(),
371 std::next(send_disp.begin()));
372
373 // Compute receive displacements
374 std::vector<std::int32_t> recv_disp(num_items_recv.size() + 1, 0);
375 std::partial_sum(num_items_recv.begin(), num_items_recv.end(),
376 std::next(recv_disp.begin()));
377
378 // Prepare send buffers
379 std::vector<std::int64_t> send_buffer;
380 std::vector<T> send_values_buffer;
381 for (auto& x : send_data)
382 send_buffer.insert(send_buffer.end(), x.begin(), x.end());
383 for (auto& v : send_values)
384 send_values_buffer.insert(send_values_buffer.end(), v.begin(), v.end());
385 std::vector<std::int64_t> recv_buffer(entities.extent(1)
386 * recv_disp.back());
387 err = MPI_Neighbor_alltoallv(send_buffer.data(), num_items_send.data(),
388 send_disp.data(), compound_type,
389 recv_buffer.data(), num_items_recv.data(),
390 recv_disp.data(), compound_type, comm0);
391
392 dolfinx::MPI::check_error(comm, err);
393
394 std::vector<T> recv_values_buffer(recv_disp.back());
395 err = MPI_Neighbor_alltoallv(
396 send_values_buffer.data(), num_items_send.data(), send_disp.data(),
397 dolfinx::MPI::mpi_t<T>, recv_values_buffer.data(),
398 num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t<T>, comm0);
399
400 dolfinx::MPI::check_error(comm, err);
401
402 err = MPI_Comm_free(&comm0);
403 dolfinx::MPI::check_error(comm, err);
404
405 std::array shape{recv_buffer.size() / entities.extent(1),
406 entities.extent(1)};
407 return std::tuple<std::vector<std::int64_t>, std::vector<T>,
408 std::array<std::size_t, 2>>(
409 std::move(recv_buffer), std::move(recv_values_buffer), shape);
410 };
411 // NOTE: src and dest are transposed here because we're reversing the
412 // direction of communication
413 const auto [entities_data_b, entities_values, shape_eb]
414 = candidate_ranks(comm, compound_type, nodes_g_p, recv_disp, dest, src,
415 entitiesp, std::span(entitiesp_v));
416 md::mdspan<const std::int64_t, md::dextents<std::size_t, 2>> entities_data(
417 entities_data_b.data(), shape_eb);
418
419 // -- E. From the received (key, value) data, determine which keys
420 // (entities) are on this process.
421 //
422 // TODO: We have already received possibly tagged entities from other
423 // ranks, so we could use the received data to avoid creating
424 // the std::map for *all* entities and just for candidate
425 // entities.
426 auto select_entities
427 = [](const mesh::Topology& topology, auto xdofmap,
428 std::span<const std::int64_t> nodes_g,
429 std::span<const int> cell_vertex_dofs, auto entities_data,
430 std::span<const T> entities_values)
431 {
432 spdlog::info("XDMF build map");
433 auto c_to_v = topology.connectivity(topology.dim(), 0);
434 if (!c_to_v)
435 throw std::runtime_error("Missing cell-vertex connectivity.");
436
437 std::map<std::int64_t, std::int32_t> input_idx_to_vertex;
438 for (int c = 0; c < c_to_v->num_nodes(); ++c)
439 {
440 auto vertices = c_to_v->links(c);
441 std::span xdofs(xdofmap.data_handle() + c * xdofmap.extent(1),
442 xdofmap.extent(1));
443 for (std::size_t v = 0; v < vertices.size(); ++v)
444 input_idx_to_vertex[nodes_g[xdofs[cell_vertex_dofs[v]]]] = vertices[v];
445 }
446
447 std::vector<std::int32_t> entities;
448 std::vector<T> data;
449 std::vector<std::int32_t> entity(entities_data.extent(1));
450 for (std::size_t e = 0; e < entities_data.extent(0); ++e)
451 {
452 bool entity_found = true;
453 for (std::size_t i = 0; i < entities_data.extent(1); ++i)
454 {
455 if (auto it = input_idx_to_vertex.find(entities_data(e, i));
456 it == input_idx_to_vertex.end())
457 {
458 // As soon as this received index is not in locally owned
459 // input global indices skip the entire entity
460 entity_found = false;
461 break;
462 }
463 else
464 entity[i] = it->second;
465 }
466
467 if (entity_found)
468 {
469 entities.insert(entities.end(), entity.begin(), entity.end());
470 data.push_back(entities_values[e]);
471 }
472 }
473
474 return std::pair(std::move(entities), std::move(data));
475 };
476
477 MPI_Type_free(&compound_type);
478
479 return select_entities(topology, xdofmap, nodes_g, cell_vertex_dofs,
480 entities_data, std::span(entities_values));
481}
482//-----------------------------------------------------------------------------}
483
484} // namespace io
485} // namespace dolfinx
Definition ElementDofLayout.h:31
const std::vector< int > & entity_closure_dofs(int dim, int entity_index) const
Definition ElementDofLayout.cpp:80
const std::vector< int > & entity_dofs(int dim, int entity_index) const
Definition ElementDofLayout.cpp:73
Topology stores the topology of a mesh, consisting of mesh entities and connectivity (incidence relat...
Definition Topology.h:41
std::shared_ptr< const graph::AdjacencyList< std::int32_t > > connectivity(std::array< int, 2 > d0, std::array< int, 2 > d1) const
Get the connectivity from entities of topological dimension d0 to dimension d1.
Definition Topology.cpp:832
int dim() const noexcept
Topological dimension of the mesh.
Definition Topology.cpp:785
MPI_Comm comm() const
Mesh MPI communicator.
Definition Topology.cpp:1010
CellType cell_type() const
Cell type.
Definition Topology.cpp:795
MPI_Datatype mpi_t
Retrieves the MPI data type associated to the provided type.
Definition MPI.h:280
std::vector< int > compute_graph_edges_nbx(MPI_Comm comm, std::span< const int > edges, int tag=static_cast< int >(tag::consensus_nbx))
Determine incoming graph edges using the NBX consensus algorithm.
Definition MPI.cpp:162
constexpr int index_owner(int size, std::size_t index, std::size_t N)
Return which rank owns index in global range [0, N - 1] (inverse of MPI::local_range).
Definition MPI.h:113
void check_error(MPI_Comm comm, int code)
Check MPI error code. If the error code is not equal to MPI_SUCCESS, then std::abort is called.
Definition MPI.cpp:80
int size(MPI_Comm comm)
Definition MPI.cpp:72
Support for file IO.
Definition cells.h:119
std::pair< std::vector< std::int32_t >, std::vector< T > > distribute_entity_data(const mesh::Topology &topology, std::span< const std::int64_t > nodes_g, std::int64_t num_nodes_g, const fem::ElementDofLayout &cmap_dof_layout, md::mdspan< const std::int32_t, md::dextents< std::size_t, 2 > > xdofmap, int entity_dim, md::mdspan< const std::int64_t, md::dextents< std::size_t, 2 > > entities, std::span< const T > data)
Get owned entities and associated data from input entities defined by global 'node' indices.
Definition utils.h:60
CellType cell_entity_type(CellType type, int d, int index)
Return type of cell for entity of dimension d at given entity index.
Definition cell_types.cpp:64
CellType
Cell type identifier.
Definition cell_types.h:22
int cell_num_entities(CellType type, int dim)
Definition cell_types.cpp:139
Top-level namespace.
Definition defines.h:12