DOLFINx 0.10.0.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
generation.h
1// Copyright (C) 2005-2024 Anders Logg and Garth N. Wells
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include "Mesh.h"
10#include "cell_types.h"
11#include "utils.h"
12#include <algorithm>
13#include <array>
14#include <cmath>
15#include <concepts>
16#include <cstddef>
17#include <cstdint>
18#include <limits>
19#include <mpi.h>
20#include <stdexcept>
21#include <utility>
22#include <vector>
23
24namespace dolfinx::mesh
25{
27enum class DiagonalType
28{
29 left,
30 right,
31 crossed,
32 shared_facet,
33 left_right,
34 right_left
35};
36
37namespace impl
38{
39template <std::floating_point T>
40std::tuple<std::vector<T>, std::vector<std::int64_t>>
41create_interval_cells(std::array<T, 2> p, std::int64_t n);
42
43template <std::floating_point T>
44Mesh<T> build_tri(MPI_Comm comm, std::array<std::array<T, 2>, 2> p,
45 std::array<std::int64_t, 2> n,
46 const CellPartitionFunction& partitioner,
47 DiagonalType diagonal);
48
49template <std::floating_point T>
50Mesh<T> build_quad(MPI_Comm comm, const std::array<std::array<T, 2>, 2> p,
51 std::array<std::int64_t, 2> n,
52 const CellPartitionFunction& partitioner);
53
54template <std::floating_point T>
55std::vector<T> create_geom(MPI_Comm comm, std::array<std::array<T, 3>, 2> p,
56 std::array<std::int64_t, 3> n);
57
58template <std::floating_point T>
59Mesh<T> build_tet(MPI_Comm comm, MPI_Comm subcomm,
60 std::array<std::array<T, 3>, 2> p,
61 std::array<std::int64_t, 3> n,
62 const CellPartitionFunction& partitioner);
63
64template <std::floating_point T>
65Mesh<T> build_hex(MPI_Comm comm, MPI_Comm subcomm,
66 std::array<std::array<T, 3>, 2> p,
67 std::array<std::int64_t, 3> n,
68 const CellPartitionFunction& partitioner);
69
70template <std::floating_point T>
71Mesh<T> build_prism(MPI_Comm comm, MPI_Comm subcomm,
72 std::array<std::array<T, 3>, 2> p,
73 std::array<std::int64_t, 3> n,
74 const CellPartitionFunction& partitioner);
75} // namespace impl
76
97template <std::floating_point T = double>
98Mesh<T> create_box(MPI_Comm comm, MPI_Comm subcomm,
99 std::array<std::array<T, 3>, 2> p,
100 std::array<std::int64_t, 3> n, CellType celltype,
101 CellPartitionFunction partitioner = nullptr)
102{
103 if (std::ranges::any_of(n, [](auto e) { return e < 1; }))
104 throw std::runtime_error("At least one cell per dimension is required");
105
106 for (int32_t i = 0; i < 3; i++)
107 {
108 if (p[0][i] >= p[1][i])
109 throw std::runtime_error("It must hold p[0] < p[1].");
110 }
111
112 if (!partitioner and dolfinx::MPI::size(comm) > 1)
113 partitioner = create_cell_partitioner();
114
115 switch (celltype)
116 {
117 case CellType::tetrahedron:
118 return impl::build_tet<T>(comm, subcomm, p, n, partitioner);
119 case CellType::hexahedron:
120 return impl::build_hex<T>(comm, subcomm, p, n, partitioner);
121 case CellType::prism:
122 return impl::build_prism<T>(comm, subcomm, p, n, partitioner);
123 default:
124 throw std::runtime_error("Generate box mesh. Wrong cell type");
125 }
126}
127
144template <std::floating_point T = double>
145Mesh<T> create_box(MPI_Comm comm, std::array<std::array<T, 3>, 2> p,
146 std::array<std::int64_t, 3> n, CellType celltype,
147 const CellPartitionFunction& partitioner = nullptr)
148{
149 return create_box<T>(comm, comm, p, n, celltype, partitioner);
150}
151
168template <std::floating_point T = double>
169Mesh<T> create_rectangle(MPI_Comm comm, std::array<std::array<T, 2>, 2> p,
170 std::array<std::int64_t, 2> n, CellType celltype,
171 CellPartitionFunction partitioner,
172 DiagonalType diagonal = DiagonalType::right)
173{
174 if (std::ranges::any_of(n, [](auto e) { return e < 1; }))
175 throw std::runtime_error("At least one cell per dimension is required");
176
177 for (int32_t i = 0; i < 2; i++)
178 {
179 if (p[0][i] >= p[1][i])
180 throw std::runtime_error("It must hold p[0] < p[1].");
181 }
182
183 if (!partitioner and dolfinx::MPI::size(comm) > 1)
184 partitioner = create_cell_partitioner();
185
186 switch (celltype)
187 {
188 case CellType::triangle:
189 return impl::build_tri<T>(comm, p, n, partitioner, diagonal);
190 case CellType::quadrilateral:
191 return impl::build_quad<T>(comm, p, n, partitioner);
192 default:
193 throw std::runtime_error("Generate rectangle mesh. Wrong cell type");
194 }
195}
196
211template <std::floating_point T = double>
212Mesh<T> create_rectangle(MPI_Comm comm, std::array<std::array<T, 2>, 2> p,
213 std::array<std::int64_t, 2> n, CellType celltype,
214 DiagonalType diagonal = DiagonalType::right)
215{
216 return create_rectangle<T>(comm, p, n, celltype, nullptr, diagonal);
217}
218
232template <std::floating_point T = double>
233Mesh<T> create_interval(MPI_Comm comm, std::int64_t n, std::array<T, 2> p,
234 mesh::GhostMode ghost_mode = mesh::GhostMode::none,
235 CellPartitionFunction partitioner = nullptr)
236{
237 if (n < 1)
238 throw std::runtime_error("At least one cell is required.");
239
240 const auto [a, b] = p;
241 if (a >= b)
242 throw std::runtime_error("It must hold p[0] < p[1].");
243 if (std::abs(a - b) < std::numeric_limits<T>::epsilon())
244 {
245 throw std::runtime_error(
246 "Length of interval is zero. Check your dimensions.");
247 }
248
249 if (!partitioner and dolfinx::MPI::size(comm) > 1)
250 partitioner = create_cell_partitioner(ghost_mode);
251
252 fem::CoordinateElement<T> element(CellType::interval, 1);
253 if (dolfinx::MPI::rank(comm) == 0)
254 {
255 auto [x, cells] = impl::create_interval_cells<T>(p, n);
256 return create_mesh(comm, MPI_COMM_SELF, cells, element, MPI_COMM_SELF, x,
257 {x.size(), 1}, partitioner);
258 }
259 else
260 {
261 return create_mesh(comm, MPI_COMM_NULL, {}, element, MPI_COMM_NULL,
262 std::vector<T>{}, {0, 1}, partitioner);
263 }
264}
265
266namespace impl
267{
268
269template <std::floating_point T>
270std::tuple<std::vector<T>, std::vector<std::int64_t>>
271create_interval_cells(std::array<T, 2> p, std::int64_t n)
272{
273 const auto [a, b] = p;
274
275 const T h = (b - a) / static_cast<T>(n);
276
277 // Create vertices
278 std::vector<T> x(n + 1);
279 std::ranges::generate(x, [i = std::int64_t(0), a, h]() mutable
280 { return a + h * static_cast<T>(i++); });
281
282 // Create intervals -> cells=[0, 1, 1, ..., n-1, n-1, n]
283 std::vector<std::int64_t> cells(2 * n);
284 for (std::size_t ix = 0; ix < cells.size() / 2; ++ix)
285 {
286 cells[2 * ix] = ix;
287 cells[2 * ix + 1] = ix + 1;
288 }
289
290 return {std::move(x), std::move(cells)};
291}
292
293template <std::floating_point T>
294std::vector<T> create_geom(MPI_Comm comm, std::array<std::array<T, 3>, 2> p,
295 std::array<std::int64_t, 3> n)
296{
297 // Extract data
298 auto [p0, p1] = p;
299 const auto [nx, ny, nz] = n;
300
301 assert(std::ranges::all_of(n, [](auto e) { return e >= 1; }));
302 assert(p0 < p1);
303
304 // Structured grid cuboid extents
305 const std::array<T, 3> extents = {
306 (p1[0] - p0[0]) / static_cast<T>(nx),
307 (p1[1] - p0[1]) / static_cast<T>(ny),
308 (p1[2] - p0[2]) / static_cast<T>(nz),
309 };
310
311 if (std::ranges::any_of(
312 extents, [](auto e)
313 { return std::abs(e) < 2.0 * std::numeric_limits<T>::epsilon(); }))
314 {
315 throw std::runtime_error(
316 "Box seems to have zero width, height or depth. Check dimensions");
317 }
318
319 const std::int64_t n_points = (nx + 1) * (ny + 1) * (nz + 1);
320 const auto [range_begin, range_end] = dolfinx::MPI::local_range(
321 dolfinx::MPI::rank(comm), n_points, dolfinx::MPI::size(comm));
322
323 std::vector<T> geom;
324 geom.reserve((range_end - range_begin) * 3);
325 const std::int64_t sqxy = (nx + 1) * (ny + 1);
326 for (std::int64_t v = range_begin; v < range_end; ++v)
327 {
328 // lexiographic index to spatial index
329 const std::int64_t p = v % sqxy;
330 std::array<std::int64_t, 3> idx = {p % (nx + 1), p / (nx + 1), v / sqxy};
331
332 // vertex = p0 + idx * extents (elementwise)
333 for (std::size_t i = 0; i < idx.size(); i++)
334 geom.push_back(p0[i] + static_cast<T>(idx[i]) * extents[i]);
335 }
336
337 return geom;
338}
339
340template <std::floating_point T>
341Mesh<T> build_tet(MPI_Comm comm, MPI_Comm subcomm,
342 std::array<std::array<T, 3>, 2> p,
343 std::array<std::int64_t, 3> n,
344 const CellPartitionFunction& partitioner)
345{
346 common::Timer timer("Build BoxMesh (tetrahedra)");
347 std::vector<T> x;
348 std::vector<std::int64_t> cells;
349 fem::CoordinateElement<T> element(CellType::tetrahedron, 1);
350 if (subcomm != MPI_COMM_NULL)
351 {
352 x = create_geom<T>(subcomm, p, n);
353
354 const auto [nx, ny, nz] = n;
355 const std::int64_t n_cells = nx * ny * nz;
356
357 std::array range_c = dolfinx::MPI::local_range(
358 dolfinx::MPI::rank(subcomm), n_cells, dolfinx::MPI::size(subcomm));
359 cells.reserve(6 * (range_c[1] - range_c[0]) * 4);
360
361 // Create tetrahedra
362 for (std::int64_t i = range_c[0]; i < range_c[1]; ++i)
363 {
364 const std::int64_t iz = i / (nx * ny);
365 const std::int64_t j = i % (nx * ny);
366 const std::int64_t iy = j / nx;
367 const std::int64_t ix = j % nx;
368 const std::int64_t v0 = iz * (nx + 1) * (ny + 1) + iy * (nx + 1) + ix;
369 const std::int64_t v1 = v0 + 1;
370 const std::int64_t v2 = v0 + (nx + 1);
371 const std::int64_t v3 = v1 + (nx + 1);
372 const std::int64_t v4 = v0 + (nx + 1) * (ny + 1);
373 const std::int64_t v5 = v1 + (nx + 1) * (ny + 1);
374 const std::int64_t v6 = v2 + (nx + 1) * (ny + 1);
375 const std::int64_t v7 = v3 + (nx + 1) * (ny + 1);
376
377 // Note that v0 < v1 < v2 < v3 < vmid
378 cells.insert(cells.end(),
379 {v0, v1, v3, v7, v0, v1, v7, v5, v0, v5, v7, v4,
380 v0, v3, v2, v7, v0, v6, v4, v7, v0, v2, v6, v7});
381 }
382 }
383
384 return create_mesh(comm, subcomm, cells, element, subcomm, x,
385 {x.size() / 3, 3}, partitioner);
386}
387
388template <std::floating_point T>
389mesh::Mesh<T> build_hex(MPI_Comm comm, MPI_Comm subcomm,
390 std::array<std::array<T, 3>, 2> p,
391 std::array<std::int64_t, 3> n,
392 const CellPartitionFunction& partitioner)
393{
394 common::Timer timer("Build BoxMesh (hexahedra)");
395 std::vector<T> x;
396 std::vector<std::int64_t> cells;
397 fem::CoordinateElement<T> element(CellType::hexahedron, 1);
398 if (subcomm != MPI_COMM_NULL)
399 {
400 x = create_geom<T>(subcomm, p, n);
401
402 // Create cuboids
403 const auto [nx, ny, nz] = n;
404 const std::int64_t n_cells = nx * ny * nz;
405 std::array range_c = dolfinx::MPI::local_range(
406 dolfinx::MPI::rank(subcomm), n_cells, dolfinx::MPI::size(subcomm));
407 cells.reserve((range_c[1] - range_c[0]) * 8);
408 for (std::int64_t i = range_c[0]; i < range_c[1]; ++i)
409 {
410 const std::int64_t iz = i / (nx * ny);
411 const std::int64_t j = i % (nx * ny);
412 const std::int64_t iy = j / nx;
413 const std::int64_t ix = j % nx;
414
415 const std::int64_t v0 = (iz * (ny + 1) + iy) * (nx + 1) + ix;
416 const std::int64_t v1 = v0 + 1;
417 const std::int64_t v2 = v0 + (nx + 1);
418 const std::int64_t v3 = v1 + (nx + 1);
419 const std::int64_t v4 = v0 + (nx + 1) * (ny + 1);
420 const std::int64_t v5 = v1 + (nx + 1) * (ny + 1);
421 const std::int64_t v6 = v2 + (nx + 1) * (ny + 1);
422 const std::int64_t v7 = v3 + (nx + 1) * (ny + 1);
423 cells.insert(cells.end(), {v0, v1, v2, v3, v4, v5, v6, v7});
424 }
425 }
426
427 return create_mesh(comm, subcomm, cells, element, subcomm, x,
428 {x.size() / 3, 3}, partitioner);
429}
430
431template <std::floating_point T>
432Mesh<T> build_prism(MPI_Comm comm, MPI_Comm subcomm,
433 std::array<std::array<T, 3>, 2> p,
434 std::array<std::int64_t, 3> n,
435 const CellPartitionFunction& partitioner)
436{
437 std::vector<T> x;
438 std::vector<std::int64_t> cells;
439 fem::CoordinateElement<T> element(CellType::prism, 1);
440 if (subcomm != MPI_COMM_NULL)
441 {
442 x = create_geom<T>(subcomm, p, n);
443
444 const std::int64_t nx = n[0];
445 const std::int64_t ny = n[1];
446 const std::int64_t nz = n[2];
447 const std::int64_t n_cells = nx * ny * nz;
448 std::array range_c = dolfinx::MPI::local_range(
449 dolfinx::MPI::rank(comm), n_cells, dolfinx::MPI::size(comm));
450 const std::int64_t cell_range = range_c[1] - range_c[0];
451
452 // Create cuboids
453 cells.reserve(2 * cell_range * 6);
454 for (std::int64_t i = range_c[0]; i < range_c[1]; ++i)
455 {
456 const std::int64_t iz = i / (nx * ny);
457 const std::int64_t j = i % (nx * ny);
458 const std::int64_t iy = j / nx;
459 const std::int64_t ix = j % nx;
460
461 const std::int64_t v0 = (iz * (ny + 1) + iy) * (nx + 1) + ix;
462 const std::int64_t v1 = v0 + 1;
463 const std::int64_t v2 = v0 + (nx + 1);
464 const std::int64_t v3 = v1 + (nx + 1);
465 const std::int64_t v4 = v0 + (nx + 1) * (ny + 1);
466 const std::int64_t v5 = v1 + (nx + 1) * (ny + 1);
467 const std::int64_t v6 = v2 + (nx + 1) * (ny + 1);
468 const std::int64_t v7 = v3 + (nx + 1) * (ny + 1);
469 cells.insert(cells.end(), {v0, v1, v2, v4, v5, v6});
470 cells.insert(cells.end(), {v1, v2, v3, v5, v6, v7});
471 }
472 }
473
474 return create_mesh(comm, subcomm, cells, element, subcomm, x,
475 {x.size() / 3, 3}, partitioner);
476}
477
478template <std::floating_point T>
479Mesh<T> build_tri(MPI_Comm comm, std::array<std::array<T, 2>, 2> p,
480 std::array<std::int64_t, 2> n,
481 const CellPartitionFunction& partitioner,
482 DiagonalType diagonal)
483{
484 fem::CoordinateElement<T> element(CellType::triangle, 1);
485 if (dolfinx::MPI::rank(comm) == 0)
486 {
487 const auto [p0, p1] = p;
488 const auto [nx, ny] = n;
489
490 const auto [a, c] = p0;
491 const auto [b, d] = p1;
492
493 const T ab = (b - a) / static_cast<T>(nx);
494 const T cd = (d - c) / static_cast<T>(ny);
495 if (std::abs(b - a) < std::numeric_limits<T>::epsilon()
496 or std::abs(d - c) < std::numeric_limits<T>::epsilon())
497 {
498 throw std::runtime_error("Rectangle seems to have zero width, height or "
499 "depth. Check dimensions");
500 }
501
502 // Create vertices and cells
503 std::int64_t nv, nc;
504 switch (diagonal)
505 {
506 case DiagonalType::crossed:
507 nv = (nx + 1) * (ny + 1) + nx * ny;
508 nc = 4 * nx * ny;
509 break;
510 default:
511 nv = (nx + 1) * (ny + 1);
512 nc = 2 * nx * ny;
513 }
514
515 std::vector<T> x;
516 x.reserve(nv * 2);
517 std::vector<std::int64_t> cells;
518 cells.reserve(nc * 3);
519
520 // Create main vertices
521 std::int64_t vertex = 0;
522 for (std::int64_t iy = 0; iy <= ny; iy++)
523 {
524 T x1 = c + cd * static_cast<T>(iy);
525 for (std::int64_t ix = 0; ix <= nx; ix++)
526 x.insert(x.end(), {a + ab * static_cast<T>(ix), x1});
527 }
528
529 // Create midpoint vertices if the mesh type is crossed
530 switch (diagonal)
531 {
532 case DiagonalType::crossed:
533 for (std::int64_t iy = 0; iy < ny; iy++)
534 {
535 T x1 = c + cd * (static_cast<T>(iy) + 0.5);
536 for (std::int64_t ix = 0; ix < nx; ix++)
537 {
538 T x0 = a + ab * (static_cast<T>(ix) + 0.5);
539 x.insert(x.end(), {x0, x1});
540 }
541 }
542 break;
543 default:
544 break;
545 }
546
547 // Create triangles
548 switch (diagonal)
549 {
550 case DiagonalType::crossed:
551 {
552 for (std::int64_t iy = 0; iy < ny; iy++)
553 {
554 for (std::int64_t ix = 0; ix < nx; ix++)
555 {
556 std::int64_t v0 = iy * (nx + 1) + ix;
557 std::int64_t v1 = v0 + 1;
558 std::int64_t v2 = v0 + (nx + 1);
559 std::int64_t v3 = v1 + (nx + 1);
560 std::int64_t vmid = (nx + 1) * (ny + 1) + iy * nx + ix;
561
562 // Note that v0 < v1 < v2 < v3 < vmid
563 cells.insert(cells.end(), {v0, v1, vmid, v0, v2, vmid, v1, v3, vmid,
564 v2, v3, vmid});
565 }
566 }
567 break;
568 }
569 default:
570 {
571 DiagonalType local_diagonal = diagonal;
572 for (std::int64_t iy = 0; iy < ny; iy++)
573 {
574 // Set up alternating diagonal
575 switch (diagonal)
576 {
577 case DiagonalType::right_left:
578 if (iy % 2)
579 local_diagonal = DiagonalType::right;
580 else
581 local_diagonal = DiagonalType::left;
582 break;
583 case DiagonalType::left_right:
584 if (iy % 2)
585 local_diagonal = DiagonalType::left;
586 else
587 local_diagonal = DiagonalType::right;
588 break;
589 default:
590 break;
591 }
592 for (std::int64_t ix = 0; ix < nx; ix++)
593 {
594 std::int64_t v0 = iy * (nx + 1) + ix;
595 std::int64_t v1 = v0 + 1;
596 std::int64_t v2 = v0 + (nx + 1);
597 std::int64_t v3 = v1 + (nx + 1);
598 switch (local_diagonal)
599 {
600 case DiagonalType::left:
601 {
602 cells.insert(cells.end(), {v0, v1, v2, v1, v2, v3});
603 if (diagonal == DiagonalType::right_left
604 or diagonal == DiagonalType::left_right)
605 {
606 local_diagonal = DiagonalType::right;
607 }
608 break;
609 }
610 default:
611 {
612 cells.insert(cells.end(), {v0, v1, v3, v0, v2, v3});
613 if (diagonal == DiagonalType::right_left
614 or diagonal == DiagonalType::left_right)
615 {
616 local_diagonal = DiagonalType::left;
617 }
618 }
619 }
620 }
621 }
622 }
623 }
624
625 return create_mesh(comm, MPI_COMM_SELF, cells, element, MPI_COMM_SELF, x,
626 {x.size() / 2, 2}, partitioner);
627 }
628 else
629 {
630 return create_mesh(comm, MPI_COMM_NULL, {}, element, MPI_COMM_NULL,
631 std::vector<T>{}, {0, 2}, partitioner);
632 }
633}
634
635template <std::floating_point T>
636Mesh<T> build_quad(MPI_Comm comm, const std::array<std::array<T, 2>, 2> p,
637 std::array<std::int64_t, 2> n,
638 const CellPartitionFunction& partitioner)
639{
640 fem::CoordinateElement<T> element(CellType::quadrilateral, 1);
641 if (dolfinx::MPI::rank(comm) == 0)
642 {
643 const auto [nx, ny] = n;
644 const auto [a, c] = p[0];
645 const auto [b, d] = p[1];
646
647 const T ab = (b - a) / static_cast<T>(nx);
648 const T cd = (d - c) / static_cast<T>(ny);
649
650 // Create vertices
651 std::vector<T> x;
652 x.reserve((nx + 1) * (ny + 1) * 2);
653 std::int64_t vertex = 0;
654 for (std::int64_t ix = 0; ix <= nx; ix++)
655 {
656 T x0 = a + ab * static_cast<T>(ix);
657 for (std::int64_t iy = 0; iy <= ny; iy++)
658 x.insert(x.end(), {x0, c + cd * static_cast<T>(iy)});
659 }
660
661 // Create rectangles
662 std::vector<std::int64_t> cells;
663 cells.reserve(nx * ny * 4);
664 for (std::int64_t ix = 0; ix < nx; ix++)
665 {
666 for (std::int64_t iy = 0; iy < ny; iy++)
667 {
668 std::int64_t i0 = ix * (ny + 1);
669 cells.insert(cells.end(), {i0 + iy, i0 + iy + 1, i0 + iy + ny + 1,
670 i0 + iy + ny + 2});
671 }
672 }
673
674 return create_mesh(comm, MPI_COMM_SELF, cells, element, MPI_COMM_SELF, x,
675 {x.size() / 2, 2}, partitioner);
676 }
677 else
678 {
679 return create_mesh(comm, MPI_COMM_NULL, {}, element, MPI_COMM_NULL,
680 std::vector<T>{}, {0, 2}, partitioner);
681 }
682}
683} // namespace impl
684} // namespace dolfinx::mesh
Definition XDMFFile.h:29
A Mesh consists of a set of connected and numbered mesh topological entities, and geometry data.
Definition Mesh.h:23
Functions supporting mesh operations.
int size(MPI_Comm comm)
Definition MPI.cpp:72
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition MPI.cpp:64
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition MPI.h:91
void cells(la::SparsityPattern &pattern, std::array< std::span< const std::int32_t >, 2 > cells, std::array< std::reference_wrapper< const DofMap >, 2 > dofmaps)
Iterate over cells and insert entries into sparsity pattern.
Definition sparsitybuild.cpp:16
Mesh data structures and algorithms on meshes.
Definition DofMap.h:32
Mesh< T > create_box(MPI_Comm comm, MPI_Comm subcomm, std::array< std::array< T, 3 >, 2 > p, std::array< std::int64_t, 3 > n, CellType celltype, CellPartitionFunction partitioner=nullptr)
Create a uniform mesh::Mesh over rectangular prism spanned by the two points p.
Definition generation.h:98
GhostMode
Enum for different partitioning ghost modes.
Definition utils.h:36
DiagonalType
Enum for different diagonal types.
Definition generation.h:28
Mesh< typename std::remove_reference_t< typename U::value_type > > create_mesh(MPI_Comm comm, MPI_Comm commt, std::span< const std::int64_t > cells, const fem::CoordinateElement< typename std::remove_reference_t< typename U::value_type > > &element, MPI_Comm commg, const U &x, std::array< std::size_t, 2 > xshape, const CellPartitionFunction &partitioner)
Create a distributed mesh from mesh data using a provided graph partitioning function for determining...
Definition utils.h:782
std::vector< T > h(const Mesh< T > &mesh, std::span< const std::int32_t > entities, int dim)
Compute greatest distance between any two vertices of the mesh entities (h).
Definition utils.h:211
Mesh< T > create_rectangle(MPI_Comm comm, std::array< std::array< T, 2 >, 2 > p, std::array< std::int64_t, 2 > n, CellType celltype, CellPartitionFunction partitioner, DiagonalType diagonal=DiagonalType::right)
Create a uniform mesh::Mesh over the rectangle spanned by the two points p.
Definition generation.h:169
Mesh< T > create_interval(MPI_Comm comm, std::int64_t n, std::array< T, 2 > p, mesh::GhostMode ghost_mode=mesh::GhostMode::none, CellPartitionFunction partitioner=nullptr)
Interval mesh of the 1D line [a, b].
Definition generation.h:233
CellType
Cell type identifier.
Definition cell_types.h:22
std::function< graph::AdjacencyList< std::int32_t >( MPI_Comm comm, int nparts, const std::vector< CellType > &cell_types, const std::vector< std::span< const std::int64_t > > &cells)> CellPartitionFunction
Signature for the cell partitioning function. The function should compute the destination rank for ce...
Definition utils.h:185
CellPartitionFunction create_cell_partitioner(mesh::GhostMode ghost_mode=mesh::GhostMode::none, const graph::partition_fn &partfn=&graph::partition_graph)
Definition utils.cpp:85