DOLFINx 0.10.0.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
generation.h
1// Copyright (C) 2005-2024 Anders Logg and Garth N. Wells
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include "Mesh.h"
10#include "cell_types.h"
11#include "utils.h"
12#include <algorithm>
13#include <array>
14#include <cmath>
15#include <concepts>
16#include <cstddef>
17#include <cstdint>
18#include <dolfinx/graph/ordering.h>
19#include <limits>
20#include <mpi.h>
21#include <stdexcept>
22#include <utility>
23#include <vector>
24
25namespace dolfinx::mesh
26{
28enum class DiagonalType
29{
30 left,
31 right,
32 crossed,
33 shared_facet,
34 left_right,
35 right_left
36};
37
38namespace impl
39{
40template <std::floating_point T>
41std::tuple<std::vector<T>, std::vector<std::int64_t>>
42create_interval_cells(std::array<T, 2> p, std::int64_t n);
43
44template <std::floating_point T>
45Mesh<T> build_tri(MPI_Comm comm, std::array<std::array<T, 2>, 2> p,
46 std::array<std::int64_t, 2> n,
47 const CellPartitionFunction& partitioner,
48 DiagonalType diagonal, const CellReorderFunction& reorder_fn);
49
50template <std::floating_point T>
51Mesh<T> build_quad(MPI_Comm comm, const std::array<std::array<T, 2>, 2> p,
52 std::array<std::int64_t, 2> n,
53 const CellPartitionFunction& partitioner,
54 const CellReorderFunction& reorder_fn);
55
56template <std::floating_point T>
57std::vector<T> create_geom(MPI_Comm comm, std::array<std::array<T, 3>, 2> p,
58 std::array<std::int64_t, 3> n);
59
60template <std::floating_point T>
61Mesh<T> build_tet(MPI_Comm comm, MPI_Comm subcomm,
62 std::array<std::array<T, 3>, 2> p,
63 std::array<std::int64_t, 3> n,
64 const CellPartitionFunction& partitioner,
65 const CellReorderFunction& reorder_fn);
66
67template <std::floating_point T>
68Mesh<T> build_hex(MPI_Comm comm, MPI_Comm subcomm,
69 std::array<std::array<T, 3>, 2> p,
70 std::array<std::int64_t, 3> n,
71 const CellPartitionFunction& partitioner,
72 const CellReorderFunction& reorder_fn);
73
74template <std::floating_point T>
75Mesh<T> build_prism(MPI_Comm comm, MPI_Comm subcomm,
76 std::array<std::array<T, 3>, 2> p,
77 std::array<std::int64_t, 3> n,
78 const CellPartitionFunction& partitioner,
79 const CellReorderFunction& reorder_fn);
80} // namespace impl
81
103template <std::floating_point T = double>
104Mesh<T> create_box(MPI_Comm comm, MPI_Comm subcomm,
105 std::array<std::array<T, 3>, 2> p,
106 std::array<std::int64_t, 3> n, CellType celltype,
107 CellPartitionFunction partitioner = nullptr,
108 const CellReorderFunction& reorder_fn = graph::reorder_gps)
109{
110 if (std::ranges::any_of(n, [](auto e) { return e < 1; }))
111 throw std::runtime_error("At least one cell per dimension is required");
112
113 for (int32_t i = 0; i < 3; i++)
114 {
115 if (p[0][i] >= p[1][i])
116 throw std::runtime_error("It must hold p[0] < p[1].");
117 }
118
119 if (!partitioner and dolfinx::MPI::size(comm) > 1)
120 partitioner = create_cell_partitioner();
121
122 switch (celltype)
123 {
124 case CellType::tetrahedron:
125 return impl::build_tet<T>(comm, subcomm, p, n, partitioner, reorder_fn);
126 case CellType::hexahedron:
127 return impl::build_hex<T>(comm, subcomm, p, n, partitioner, reorder_fn);
128 case CellType::prism:
129 return impl::build_prism<T>(comm, subcomm, p, n, partitioner, reorder_fn);
130 default:
131 throw std::runtime_error("Generate box mesh. Wrong cell type");
132 }
133}
134
152template <std::floating_point T = double>
153Mesh<T> create_box(MPI_Comm comm, std::array<std::array<T, 3>, 2> p,
154 std::array<std::int64_t, 3> n, CellType celltype,
155 const CellPartitionFunction& partitioner = nullptr,
156 const CellReorderFunction& reorder_fn = graph::reorder_gps)
157{
158 return create_box<T>(comm, comm, p, n, celltype, partitioner, reorder_fn);
159}
160
178template <std::floating_point T = double>
179Mesh<T> create_rectangle(MPI_Comm comm, std::array<std::array<T, 2>, 2> p,
180 std::array<std::int64_t, 2> n, CellType celltype,
181 CellPartitionFunction partitioner,
182 DiagonalType diagonal = DiagonalType::right,
183 const CellReorderFunction& reorder_fn
185{
186 if (std::ranges::any_of(n, [](auto e) { return e < 1; }))
187 throw std::runtime_error("At least one cell per dimension is required");
188
189 for (int32_t i = 0; i < 2; i++)
190 {
191 if (p[0][i] >= p[1][i])
192 throw std::runtime_error("It must hold p[0] < p[1].");
193 }
194
195 if (!partitioner and dolfinx::MPI::size(comm) > 1)
196 partitioner = create_cell_partitioner();
197
198 switch (celltype)
199 {
200 case CellType::triangle:
201 return impl::build_tri<T>(comm, p, n, partitioner, diagonal, reorder_fn);
202 case CellType::quadrilateral:
203 return impl::build_quad<T>(comm, p, n, partitioner, reorder_fn);
204 default:
205 throw std::runtime_error("Generate rectangle mesh. Wrong cell type");
206 }
207}
208
223template <std::floating_point T = double>
224Mesh<T> create_rectangle(MPI_Comm comm, std::array<std::array<T, 2>, 2> p,
225 std::array<std::int64_t, 2> n, CellType celltype,
226 DiagonalType diagonal = DiagonalType::right)
227{
228 return create_rectangle<T>(comm, p, n, celltype, nullptr, diagonal);
229}
230
245template <std::floating_point T = double>
246Mesh<T> create_interval(MPI_Comm comm, std::int64_t n, std::array<T, 2> p,
247 mesh::GhostMode ghost_mode = mesh::GhostMode::none,
248 CellPartitionFunction partitioner = nullptr,
249 const CellReorderFunction& reorder_fn
251{
252 if (n < 1)
253 throw std::runtime_error("At least one cell is required.");
254
255 const auto [a, b] = p;
256 if (a >= b)
257 throw std::runtime_error("It must hold p[0] < p[1].");
258 if (std::abs(a - b) < std::numeric_limits<T>::epsilon())
259 {
260 throw std::runtime_error(
261 "Length of interval is zero. Check your dimensions.");
262 }
263
264 if (!partitioner and dolfinx::MPI::size(comm) > 1)
265 partitioner = create_cell_partitioner(ghost_mode);
266
267 fem::CoordinateElement<T> element(CellType::interval, 1);
268 if (dolfinx::MPI::rank(comm) == 0)
269 {
270 auto [x, cells] = impl::create_interval_cells<T>(p, n);
271 return create_mesh(comm, MPI_COMM_SELF, cells, element, MPI_COMM_SELF, x,
272 {x.size(), 1}, partitioner, reorder_fn);
273 }
274 else
275 {
276 return create_mesh(comm, MPI_COMM_NULL, {}, element, MPI_COMM_NULL,
277 std::vector<T>{}, {0, 1}, partitioner, reorder_fn);
278 }
279}
280
281namespace impl
282{
283
284template <std::floating_point T>
285std::tuple<std::vector<T>, std::vector<std::int64_t>>
286create_interval_cells(std::array<T, 2> p, std::int64_t n)
287{
288 const auto [a, b] = p;
289
290 const T h = (b - a) / static_cast<T>(n);
291
292 // Create vertices
293 std::vector<T> x(n + 1);
294 std::ranges::generate(x, [i = std::int64_t(0), a, h]() mutable
295 { return a + h * static_cast<T>(i++); });
296
297 // Create intervals -> cells=[0, 1, 1, ..., n-1, n-1, n]
298 std::vector<std::int64_t> cells(2 * n);
299 for (std::size_t ix = 0; ix < cells.size() / 2; ++ix)
300 {
301 cells[2 * ix] = ix;
302 cells[2 * ix + 1] = ix + 1;
303 }
304
305 return {std::move(x), std::move(cells)};
306}
307
308template <std::floating_point T>
309std::vector<T> create_geom(MPI_Comm comm, std::array<std::array<T, 3>, 2> p,
310 std::array<std::int64_t, 3> n)
311{
312 // Extract data
313 auto [p0, p1] = p;
314 const auto [nx, ny, nz] = n;
315
316 assert(std::ranges::all_of(n, [](auto e) { return e >= 1; }));
317 assert(p0 < p1);
318
319 // Structured grid cuboid extents
320 const std::array<T, 3> extents = {
321 (p1[0] - p0[0]) / static_cast<T>(nx),
322 (p1[1] - p0[1]) / static_cast<T>(ny),
323 (p1[2] - p0[2]) / static_cast<T>(nz),
324 };
325
326 if (std::ranges::any_of(
327 extents, [](auto e)
328 { return std::abs(e) < 2.0 * std::numeric_limits<T>::epsilon(); }))
329 {
330 throw std::runtime_error(
331 "Box seems to have zero width, height or depth. Check dimensions");
332 }
333
334 const std::int64_t n_points = (nx + 1) * (ny + 1) * (nz + 1);
335 const auto [range_begin, range_end] = dolfinx::MPI::local_range(
336 dolfinx::MPI::rank(comm), n_points, dolfinx::MPI::size(comm));
337
338 std::vector<T> geom;
339 geom.reserve((range_end - range_begin) * 3);
340 const std::int64_t sqxy = (nx + 1) * (ny + 1);
341 for (std::int64_t v = range_begin; v < range_end; ++v)
342 {
343 // lexiographic index to spatial index
344 const std::int64_t p = v % sqxy;
345 std::array<std::int64_t, 3> idx = {p % (nx + 1), p / (nx + 1), v / sqxy};
346
347 // vertex = p0 + idx * extents (elementwise)
348 for (std::size_t i = 0; i < idx.size(); i++)
349 geom.push_back(p0[i] + static_cast<T>(idx[i]) * extents[i]);
350 }
351
352 return geom;
353}
354
355template <std::floating_point T>
356Mesh<T> build_tet(MPI_Comm comm, MPI_Comm subcomm,
357 std::array<std::array<T, 3>, 2> p,
358 std::array<std::int64_t, 3> n,
359 const CellPartitionFunction& partitioner,
360 const CellReorderFunction& reorder_fn)
361{
362 common::Timer timer("Build BoxMesh (tetrahedra)");
363 std::vector<T> x;
364 std::vector<std::int64_t> cells;
365 fem::CoordinateElement<T> element(CellType::tetrahedron, 1);
366 if (subcomm != MPI_COMM_NULL)
367 {
368 x = create_geom<T>(subcomm, p, n);
369
370 const auto [nx, ny, nz] = n;
371 const std::int64_t n_cells = nx * ny * nz;
372
373 std::array range_c = dolfinx::MPI::local_range(
374 dolfinx::MPI::rank(subcomm), n_cells, dolfinx::MPI::size(subcomm));
375 cells.reserve(6 * (range_c[1] - range_c[0]) * 4);
376
377 // Create tetrahedra
378 for (std::int64_t i = range_c[0]; i < range_c[1]; ++i)
379 {
380 const std::int64_t iz = i / (nx * ny);
381 const std::int64_t j = i % (nx * ny);
382 const std::int64_t iy = j / nx;
383 const std::int64_t ix = j % nx;
384 const std::int64_t v0 = iz * (nx + 1) * (ny + 1) + iy * (nx + 1) + ix;
385 const std::int64_t v1 = v0 + 1;
386 const std::int64_t v2 = v0 + (nx + 1);
387 const std::int64_t v3 = v1 + (nx + 1);
388 const std::int64_t v4 = v0 + (nx + 1) * (ny + 1);
389 const std::int64_t v5 = v1 + (nx + 1) * (ny + 1);
390 const std::int64_t v6 = v2 + (nx + 1) * (ny + 1);
391 const std::int64_t v7 = v3 + (nx + 1) * (ny + 1);
392
393 // Note that v0 < v1 < v2 < v3 < vmid
394 cells.insert(cells.end(),
395 {v0, v1, v3, v7, v0, v1, v7, v5, v0, v5, v7, v4,
396 v0, v3, v2, v7, v0, v6, v4, v7, v0, v2, v6, v7});
397 }
398 }
399
400 return create_mesh(comm, subcomm, cells, element, subcomm, x,
401 {x.size() / 3, 3}, partitioner, reorder_fn);
402}
403
404template <std::floating_point T>
405mesh::Mesh<T>
406build_hex(MPI_Comm comm, MPI_Comm subcomm, std::array<std::array<T, 3>, 2> p,
407 std::array<std::int64_t, 3> n,
408 const CellPartitionFunction& partitioner,
409 const std::function<std::vector<std::int32_t>(
410 const graph::AdjacencyList<std::int32_t>&)>& reorder_fn)
411{
412 common::Timer timer("Build BoxMesh (hexahedra)");
413 std::vector<T> x;
414 std::vector<std::int64_t> cells;
415 fem::CoordinateElement<T> element(CellType::hexahedron, 1);
416 if (subcomm != MPI_COMM_NULL)
417 {
418 x = create_geom<T>(subcomm, p, n);
419
420 // Create cuboids
421 const auto [nx, ny, nz] = n;
422 const std::int64_t n_cells = nx * ny * nz;
423 std::array range_c = dolfinx::MPI::local_range(
424 dolfinx::MPI::rank(subcomm), n_cells, dolfinx::MPI::size(subcomm));
425 cells.reserve((range_c[1] - range_c[0]) * 8);
426 for (std::int64_t i = range_c[0]; i < range_c[1]; ++i)
427 {
428 const std::int64_t iz = i / (nx * ny);
429 const std::int64_t j = i % (nx * ny);
430 const std::int64_t iy = j / nx;
431 const std::int64_t ix = j % nx;
432
433 const std::int64_t v0 = (iz * (ny + 1) + iy) * (nx + 1) + ix;
434 const std::int64_t v1 = v0 + 1;
435 const std::int64_t v2 = v0 + (nx + 1);
436 const std::int64_t v3 = v1 + (nx + 1);
437 const std::int64_t v4 = v0 + (nx + 1) * (ny + 1);
438 const std::int64_t v5 = v1 + (nx + 1) * (ny + 1);
439 const std::int64_t v6 = v2 + (nx + 1) * (ny + 1);
440 const std::int64_t v7 = v3 + (nx + 1) * (ny + 1);
441 cells.insert(cells.end(), {v0, v1, v2, v3, v4, v5, v6, v7});
442 }
443 }
444
445 return create_mesh(comm, subcomm, cells, element, subcomm, x,
446 {x.size() / 3, 3}, partitioner, reorder_fn);
447}
448
449template <std::floating_point T>
450Mesh<T> build_prism(MPI_Comm comm, MPI_Comm subcomm,
451 std::array<std::array<T, 3>, 2> p,
452 std::array<std::int64_t, 3> n,
453 const CellPartitionFunction& partitioner,
454 const CellReorderFunction& reorder_fn)
455{
456 std::vector<T> x;
457 std::vector<std::int64_t> cells;
458 fem::CoordinateElement<T> element(CellType::prism, 1);
459 if (subcomm != MPI_COMM_NULL)
460 {
461 x = create_geom<T>(subcomm, p, n);
462
463 const std::int64_t nx = n[0];
464 const std::int64_t ny = n[1];
465 const std::int64_t nz = n[2];
466 const std::int64_t n_cells = nx * ny * nz;
467 std::array range_c = dolfinx::MPI::local_range(
468 dolfinx::MPI::rank(comm), n_cells, dolfinx::MPI::size(comm));
469 const std::int64_t cell_range = range_c[1] - range_c[0];
470
471 // Create cuboids
472 cells.reserve(2 * cell_range * 6);
473 for (std::int64_t i = range_c[0]; i < range_c[1]; ++i)
474 {
475 const std::int64_t iz = i / (nx * ny);
476 const std::int64_t j = i % (nx * ny);
477 const std::int64_t iy = j / nx;
478 const std::int64_t ix = j % nx;
479
480 const std::int64_t v0 = (iz * (ny + 1) + iy) * (nx + 1) + ix;
481 const std::int64_t v1 = v0 + 1;
482 const std::int64_t v2 = v0 + (nx + 1);
483 const std::int64_t v3 = v1 + (nx + 1);
484 const std::int64_t v4 = v0 + (nx + 1) * (ny + 1);
485 const std::int64_t v5 = v1 + (nx + 1) * (ny + 1);
486 const std::int64_t v6 = v2 + (nx + 1) * (ny + 1);
487 const std::int64_t v7 = v3 + (nx + 1) * (ny + 1);
488 cells.insert(cells.end(), {v0, v1, v2, v4, v5, v6});
489 cells.insert(cells.end(), {v1, v2, v3, v5, v6, v7});
490 }
491 }
492
493 return create_mesh(comm, subcomm, cells, element, subcomm, x,
494 {x.size() / 3, 3}, partitioner, reorder_fn);
495}
496
497template <std::floating_point T>
498Mesh<T> build_tri(MPI_Comm comm, std::array<std::array<T, 2>, 2> p,
499 std::array<std::int64_t, 2> n,
500 const CellPartitionFunction& partitioner,
501 DiagonalType diagonal, const CellReorderFunction& reorder_fn)
502{
503 fem::CoordinateElement<T> element(CellType::triangle, 1);
504 if (dolfinx::MPI::rank(comm) == 0)
505 {
506 const auto [p0, p1] = p;
507 const auto [nx, ny] = n;
508
509 const auto [a, c] = p0;
510 const auto [b, d] = p1;
511
512 const T ab = (b - a) / static_cast<T>(nx);
513 const T cd = (d - c) / static_cast<T>(ny);
514 if (std::abs(b - a) < std::numeric_limits<T>::epsilon()
515 or std::abs(d - c) < std::numeric_limits<T>::epsilon())
516 {
517 throw std::runtime_error("Rectangle seems to have zero width, height or "
518 "depth. Check dimensions");
519 }
520
521 // Create vertices and cells
522 std::int64_t nv, nc;
523 switch (diagonal)
524 {
525 case DiagonalType::crossed:
526 nv = (nx + 1) * (ny + 1) + nx * ny;
527 nc = 4 * nx * ny;
528 break;
529 default:
530 nv = (nx + 1) * (ny + 1);
531 nc = 2 * nx * ny;
532 }
533
534 std::vector<T> x;
535 x.reserve(nv * 2);
536 std::vector<std::int64_t> cells;
537 cells.reserve(nc * 3);
538
539 // Create main vertices
540 for (std::int64_t iy = 0; iy <= ny; iy++)
541 {
542 T x1 = c + cd * static_cast<T>(iy);
543 for (std::int64_t ix = 0; ix <= nx; ix++)
544 x.insert(x.end(), {a + ab * static_cast<T>(ix), x1});
545 }
546
547 // Create midpoint vertices if the mesh type is crossed
548 switch (diagonal)
549 {
550 case DiagonalType::crossed:
551 for (std::int64_t iy = 0; iy < ny; iy++)
552 {
553 T x1 = c + cd * (static_cast<T>(iy) + 0.5);
554 for (std::int64_t ix = 0; ix < nx; ix++)
555 {
556 T x0 = a + ab * (static_cast<T>(ix) + 0.5);
557 x.insert(x.end(), {x0, x1});
558 }
559 }
560 break;
561 default:
562 break;
563 }
564
565 // Create triangles
566 switch (diagonal)
567 {
568 case DiagonalType::crossed:
569 {
570 for (std::int64_t iy = 0; iy < ny; iy++)
571 {
572 for (std::int64_t ix = 0; ix < nx; ix++)
573 {
574 std::int64_t v0 = iy * (nx + 1) + ix;
575 std::int64_t v1 = v0 + 1;
576 std::int64_t v2 = v0 + (nx + 1);
577 std::int64_t v3 = v1 + (nx + 1);
578 std::int64_t vmid = (nx + 1) * (ny + 1) + iy * nx + ix;
579
580 // Note that v0 < v1 < v2 < v3 < vmid
581 cells.insert(cells.end(), {v0, v1, vmid, v0, v2, vmid, v1, v3, vmid,
582 v2, v3, vmid});
583 }
584 }
585 break;
586 }
587 default:
588 {
589 DiagonalType local_diagonal = diagonal;
590 for (std::int64_t iy = 0; iy < ny; iy++)
591 {
592 // Set up alternating diagonal
593 switch (diagonal)
594 {
595 case DiagonalType::right_left:
596 if (iy % 2)
597 local_diagonal = DiagonalType::right;
598 else
599 local_diagonal = DiagonalType::left;
600 break;
601 case DiagonalType::left_right:
602 if (iy % 2)
603 local_diagonal = DiagonalType::left;
604 else
605 local_diagonal = DiagonalType::right;
606 break;
607 default:
608 break;
609 }
610 for (std::int64_t ix = 0; ix < nx; ix++)
611 {
612 std::int64_t v0 = iy * (nx + 1) + ix;
613 std::int64_t v1 = v0 + 1;
614 std::int64_t v2 = v0 + (nx + 1);
615 std::int64_t v3 = v1 + (nx + 1);
616 switch (local_diagonal)
617 {
618 case DiagonalType::left:
619 {
620 cells.insert(cells.end(), {v0, v1, v2, v1, v2, v3});
621 if (diagonal == DiagonalType::right_left
622 or diagonal == DiagonalType::left_right)
623 {
624 local_diagonal = DiagonalType::right;
625 }
626 break;
627 }
628 default:
629 {
630 cells.insert(cells.end(), {v0, v1, v3, v0, v2, v3});
631 if (diagonal == DiagonalType::right_left
632 or diagonal == DiagonalType::left_right)
633 {
634 local_diagonal = DiagonalType::left;
635 }
636 }
637 }
638 }
639 }
640 }
641 }
642
643 return create_mesh(comm, MPI_COMM_SELF, cells, element, MPI_COMM_SELF, x,
644 {x.size() / 2, 2}, partitioner, reorder_fn);
645 }
646 else
647 {
648 return create_mesh(comm, MPI_COMM_NULL, {}, element, MPI_COMM_NULL,
649 std::vector<T>{}, {0, 2}, partitioner, reorder_fn);
650 }
651}
652
653template <std::floating_point T>
654Mesh<T> build_quad(MPI_Comm comm, const std::array<std::array<T, 2>, 2> p,
655 std::array<std::int64_t, 2> n,
656 const CellPartitionFunction& partitioner,
657 const CellReorderFunction& reorder_fn)
658{
659 fem::CoordinateElement<T> element(CellType::quadrilateral, 1);
660 if (dolfinx::MPI::rank(comm) == 0)
661 {
662 const auto [nx, ny] = n;
663 const auto [a, c] = p[0];
664 const auto [b, d] = p[1];
665
666 const T ab = (b - a) / static_cast<T>(nx);
667 const T cd = (d - c) / static_cast<T>(ny);
668
669 // Create vertices
670 std::vector<T> x;
671 x.reserve((nx + 1) * (ny + 1) * 2);
672 for (std::int64_t ix = 0; ix <= nx; ix++)
673 {
674 T x0 = a + ab * static_cast<T>(ix);
675 for (std::int64_t iy = 0; iy <= ny; iy++)
676 x.insert(x.end(), {x0, c + cd * static_cast<T>(iy)});
677 }
678
679 // Create rectangles
680 std::vector<std::int64_t> cells;
681 cells.reserve(nx * ny * 4);
682 for (std::int64_t ix = 0; ix < nx; ix++)
683 {
684 for (std::int64_t iy = 0; iy < ny; iy++)
685 {
686 std::int64_t i0 = ix * (ny + 1);
687 cells.insert(cells.end(), {i0 + iy, i0 + iy + 1, i0 + iy + ny + 1,
688 i0 + iy + ny + 2});
689 }
690 }
691
692 return create_mesh(comm, MPI_COMM_SELF, cells, element, MPI_COMM_SELF, x,
693 {x.size() / 2, 2}, partitioner, reorder_fn);
694 }
695 else
696 {
697 return create_mesh(comm, MPI_COMM_NULL, {}, element, MPI_COMM_NULL,
698 std::vector<T>{}, {0, 2}, partitioner, reorder_fn);
699 }
700}
701} // namespace impl
702} // namespace dolfinx::mesh
Definition CoordinateElement.h:38
A Mesh consists of a set of connected and numbered mesh topological entities, and geometry data.
Definition Mesh.h:23
Functions supporting mesh operations.
int size(MPI_Comm comm)
Definition MPI.cpp:72
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition MPI.cpp:64
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition MPI.h:90
void cells(la::SparsityPattern &pattern, std::array< std::span< const std::int32_t >, 2 > cells, std::array< std::reference_wrapper< const DofMap >, 2 > dofmaps)
Iterate over cells and insert entries into sparsity pattern.
Definition sparsitybuild.cpp:16
std::vector< std::int32_t > reorder_gps(const graph::AdjacencyList< std::int32_t > &graph)
Re-order a graph using the Gibbs-Poole-Stockmeyer algorithm.
Definition ordering.cpp:360
Mesh data structures and algorithms on meshes.
Definition DofMap.h:32
Mesh< T > create_rectangle(MPI_Comm comm, std::array< std::array< T, 2 >, 2 > p, std::array< std::int64_t, 2 > n, CellType celltype, CellPartitionFunction partitioner, DiagonalType diagonal=DiagonalType::right, const CellReorderFunction &reorder_fn=graph::reorder_gps)
Create a uniform mesh::Mesh over the rectangle spanned by the two points p.
Definition generation.h:179
GhostMode
Enum for different partitioning ghost modes.
Definition utils.h:37
DiagonalType
Enum for different diagonal types.
Definition generation.h:29
std::function< std::vector< std::int32_t >( const graph::AdjacencyList< std::int32_t > &)> CellReorderFunction
Function that reorders (locally) cells that are owned by this process. It takes the local mesh dual g...
Definition utils.h:214
Mesh< T > create_box(MPI_Comm comm, MPI_Comm subcomm, std::array< std::array< T, 3 >, 2 > p, std::array< std::int64_t, 3 > n, CellType celltype, CellPartitionFunction partitioner=nullptr, const CellReorderFunction &reorder_fn=graph::reorder_gps)
Create a uniform mesh::Mesh over rectangular prism spanned by the two points p.
Definition generation.h:104
std::vector< T > h(const Mesh< T > &mesh, std::span< const std::int32_t > entities, int dim)
Compute greatest distance between any two vertices of the mesh entities (h).
Definition utils.h:239
Mesh< T > create_interval(MPI_Comm comm, std::int64_t n, std::array< T, 2 > p, mesh::GhostMode ghost_mode=mesh::GhostMode::none, CellPartitionFunction partitioner=nullptr, const CellReorderFunction &reorder_fn=graph::reorder_gps)
Interval mesh of the 1D line [a, b].
Definition generation.h:246
CellType
Cell type identifier.
Definition cell_types.h:22
std::function< graph::AdjacencyList< std::int32_t >( MPI_Comm comm, int nparts, const std::vector< CellType > &cell_types, const std::vector< std::span< const std::int64_t > > &cells)> CellPartitionFunction
Signature for the cell partitioning function. Function that implement this interface compute the dest...
Definition utils.h:206
Mesh< typename std::remove_reference_t< typename U::value_type > > create_mesh(MPI_Comm comm, MPI_Comm commt, std::vector< std::span< const std::int64_t > > cells, const std::vector< fem::CoordinateElement< typename std::remove_reference_t< typename U::value_type > > > &elements, MPI_Comm commg, const U &x, std::array< std::size_t, 2 > xshape, const CellPartitionFunction &partitioner, const CellReorderFunction &reorder_fn=graph::reorder_gps)
Create a distributed mesh::Mesh from mesh data and using the provided graph partitioning function for...
Definition utils.h:848
CellPartitionFunction create_cell_partitioner(mesh::GhostMode ghost_mode=mesh::GhostMode::none, const graph::partition_fn &partfn=&graph::partition_graph)
Create a function that computes destination rank for mesh cells on this rank by applying the default ...
Definition utils.cpp:100