Note: this is documentation for an old release. View the latest documentation at docs.fenicsproject.org/dolfinx/v0.9.0/cpp/doxygen/dd/d65/MatrixCSR_8h_source.html
DOLFINx 0.7.3
DOLFINx C++ interface
Loading...
Searching...
No Matches
MatrixCSR.h
1// Copyright (C) 2021-2022 Garth N. Wells and Chris N. Richardson
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include "SparsityPattern.h"
10#include "matrix_csr_impl.h"
11#include <dolfinx/common/IndexMap.h>
12#include <dolfinx/common/MPI.h>
13#include <dolfinx/graph/AdjacencyList.h>
14#include <mpi.h>
15#include <numeric>
16#include <span>
17#include <utility>
18#include <vector>
19
20namespace dolfinx::la
21{
23enum class BlockMode : int
24{
25 compact = 0,
27 expanded = 1
30};
31
46template <class Scalar, class Container = std::vector<Scalar>,
47 class ColContainer = std::vector<std::int32_t>,
48 class RowPtrContainer = std::vector<std::int64_t>>
50{
51 static_assert(std::is_same_v<typename Container::value_type, Scalar>);
52
53public:
55 using value_type = Scalar;
56
58 using container_type = Container;
59
61 using column_container_type = ColContainer;
62
64 using rowptr_container_type = RowPtrContainer;
65
66 static_assert(std::is_same_v<value_type, typename container_type::value_type>,
67 "Scalar type and container value type must be the same.");
68
91 template <int BS0 = 1, int BS1 = 1>
93 {
94 if ((BS0 != _bs[0] and BS0 > 1 and _bs[0] > 1)
95 or (BS1 != _bs[1] and BS1 > 1 and _bs[1] > 1))
96 {
97 throw std::runtime_error(
98 "Cannot insert blocks of different size than matrix block size");
99 }
100
101 return [&](std::span<const std::int32_t> rows,
102 std::span<const std::int32_t> cols,
103 std::span<const value_type> data) -> int
104 {
105 this->set<BS0, BS1>(data, rows, cols);
106 return 0;
107 };
108 }
109
132 template <int BS0 = 1, int BS1 = 1>
134 {
135 if ((BS0 != _bs[0] and BS0 > 1 and _bs[0] > 1)
136 or (BS1 != _bs[1] and BS1 > 1 and _bs[1] > 1))
137 {
138 throw std::runtime_error(
139 "Cannot insert blocks of different size than matrix block size");
140 }
141
142 return [&](std::span<const std::int32_t> rows,
143 std::span<const std::int32_t> cols,
144 std::span<const value_type> data) -> int
145 {
146 this->add<BS0, BS1>(data, rows, cols);
147 return 0;
148 };
149 }
150
176 MatrixCSR(const SparsityPattern& p, BlockMode mode = BlockMode::compact);
177
180 MatrixCSR(MatrixCSR&& A) = default;
181
185 void set(value_type x) { std::fill(_data.begin(), _data.end(), x); }
186
201 template <int BS0, int BS1>
202 void set(std::span<const value_type> x, std::span<const std::int32_t> rows,
203 std::span<const std::int32_t> cols)
204 {
205 auto set_fn = [](value_type& y, const value_type& x) { y = x; };
206
207 std::int32_t num_rows
208 = _index_maps[0]->size_local() + _index_maps[0]->num_ghosts();
209 assert(x.size() == rows.size() * cols.size() * BS0 * BS1);
210 if (_bs[0] == BS0 and _bs[1] == BS1)
211 {
212 impl::insert_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols, set_fn,
213 num_rows);
214 }
215 else if (_bs[0] == 1 and _bs[1] == 1)
216 {
217 // Set blocked data in a regular CSR matrix (_bs[0]=1, _bs[1]=1) with
218 // correct sparsity
219 impl::insert_blocked_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols,
220 set_fn, num_rows);
221 }
222 else
223 {
224 assert(BS0 == 1 and BS1 == 1);
225 // Set non-blocked data in a blocked CSR matrix (BS0=1, BS1=1)
226 impl::insert_nonblocked_csr(_data, _cols, _row_ptr, x, rows, cols, set_fn,
227 num_rows, _bs[0], _bs[1]);
228 }
229 }
230
246 template <int BS0 = 1, int BS1 = 1>
247 void add(std::span<const value_type> x, std::span<const std::int32_t> rows,
248 std::span<const std::int32_t> cols)
249 {
250 auto add_fn = [](value_type& y, const value_type& x) { y += x; };
251
252 assert(x.size() == rows.size() * cols.size() * BS0 * BS1);
253 if (_bs[0] == BS0 and _bs[1] == BS1)
254 {
255 impl::insert_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols, add_fn,
256 _row_ptr.size());
257 }
258 else if (_bs[0] == 1 and _bs[1] == 1)
259 {
260 // Add blocked data to a regular CSR matrix (_bs[0]=1, _bs[1]=1)
261 impl::insert_blocked_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols,
262 add_fn, _row_ptr.size());
263 }
264 else
265 {
266 assert(BS0 == 1 and BS1 == 1);
267 // Add non-blocked data to a blocked CSR matrix (BS0=1, BS1=1)
268 impl::insert_nonblocked_csr(_data, _cols, _row_ptr, x, rows, cols, add_fn,
269 _row_ptr.size(), _bs[0], _bs[1]);
270 }
271 }
272
274 std::int32_t num_owned_rows() const { return _index_maps[0]->size_local(); }
275
277 std::int32_t num_all_rows() const { return _row_ptr.size() - 1; }
278
287 std::vector<value_type> to_dense() const;
288
294 {
297 }
298
306 void scatter_rev_begin();
307
313 void scatter_rev_end();
314
317 double squared_norm() const;
318
326 std::shared_ptr<const common::IndexMap> index_map(int dim) const
327 {
328 return _index_maps.at(dim);
329 }
330
333 container_type& values() { return _data; }
334
337 const container_type& values() const { return _data; }
338
341 const rowptr_container_type& row_ptr() const { return _row_ptr; }
342
345 const column_container_type& cols() const { return _cols; }
346
355 {
356 return _off_diagonal_offset;
357 }
358
361 std::array<int, 2> block_size() const { return _bs; }
362
363private:
364 // Maps for the distribution of the ows and columns
365 std::array<std::shared_ptr<const common::IndexMap>, 2> _index_maps;
366
367 // Block mode (compact or expanded)
368 BlockMode _block_mode;
369
370 // Block sizes
371 std::array<int, 2> _bs;
372
373 // Matrix data
374 container_type _data;
376 rowptr_container_type _row_ptr;
377
378 // Start of off-diagonal (unowned columns) on each row
379 rowptr_container_type _off_diagonal_offset;
380
381 // Neighborhood communicator (ghost->owner communicator for rows)
382 dolfinx::MPI::Comm _comm;
383
384 // -- Precomputed data for scatter_rev/update
385
386 // Request in non-blocking communication
387 MPI_Request _request;
388
389 // Position in _data to add received data
390 std::vector<int> _unpack_pos;
391
392 // Displacements for alltoall for each neighbor when sending and
393 // receiving
394 std::vector<int> _val_send_disp, _val_recv_disp;
395
396 // Ownership of each row, by neighbor (for the neighbourhood defined
397 // on _comm)
398 std::vector<int> _ghost_row_to_rank;
399
400 // Temporary stores for data during non-blocking communication
401 container_type _ghost_value_data;
402 container_type _ghost_value_data_in;
403};
404//-----------------------------------------------------------------------------
405template <class U, class V, class W, class X>
407 : _index_maps({p.index_map(0),
408 std::make_shared<common::IndexMap>(p.column_index_map())}),
409 _block_mode(mode), _bs({p.block_size(0), p.block_size(1)}),
410 _data(p.num_nonzeros() * _bs[0] * _bs[1], 0),
411 _cols(p.graph().first.begin(), p.graph().first.end()),
412 _row_ptr(p.graph().second.begin(), p.graph().second.end()),
413 _comm(MPI_COMM_NULL)
414{
415 if (_block_mode == BlockMode::expanded)
416 {
417 // Rebuild IndexMaps
418 for (int i = 0; i < 2; ++i)
419 {
420 const auto im = _index_maps[i];
421 const int size_local = im->size_local() * _bs[i];
422 const std::vector<std::int64_t>& ghost_i = im->ghosts();
423 std::vector<std::int64_t> ghosts;
424 const std::vector<int> ghost_owner_i = im->owners();
425 std::vector<int> src_rank;
426 for (std::size_t j = 0; j < ghost_i.size(); ++j)
427 {
428 for (int k = 0; k < _bs[i]; ++k)
429 {
430 ghosts.push_back(ghost_i[j] * _bs[i] + k);
431 src_rank.push_back(ghost_owner_i[j]);
432 }
433 }
434 const std::array<std::vector<int>, 2> src_dest0
435 = {_index_maps[i]->src(), _index_maps[i]->dest()};
436 _index_maps[i] = std::make_shared<common::IndexMap>(
437 _index_maps[i]->comm(), size_local, src_dest0, ghosts, src_rank);
438 }
439
440 // Convert sparsity pattern and set _bs to 1
441
442 column_container_type new_cols;
443 new_cols.reserve(_data.size());
444 rowptr_container_type new_row_ptr = {0};
445 new_row_ptr.reserve(_row_ptr.size() * _bs[0]);
446 std::span<const std::int32_t> num_diag_nnz = p.off_diagonal_offsets();
447
448 for (std::size_t i = 0; i < _row_ptr.size() - 1; ++i)
449 {
450 // Repeat row _bs[0] times
451 for (int q0 = 0; q0 < _bs[0]; ++q0)
452 {
453 _off_diagonal_offset.push_back(new_row_ptr.back()
454 + num_diag_nnz[i] * _bs[1]);
455 for (auto j = _row_ptr[i]; j < _row_ptr[i + 1]; ++j)
456 {
457 for (int q1 = 0; q1 < _bs[1]; ++q1)
458 new_cols.push_back(_cols[j] * _bs[1] + q1);
459 }
460 new_row_ptr.push_back(new_cols.size());
461 }
462 }
463 _cols = new_cols;
464 _row_ptr = new_row_ptr;
465 _bs[0] = 1;
466 _bs[1] = 1;
467 }
468 else
469 {
470 // Compute off-diagonal offset for each row (compact)
471 std::span<const std::int32_t> num_diag_nnz = p.off_diagonal_offsets();
472 _off_diagonal_offset.reserve(num_diag_nnz.size());
473 std::transform(num_diag_nnz.begin(), num_diag_nnz.end(), _row_ptr.begin(),
474 std::back_inserter(_off_diagonal_offset), std::plus{});
475 }
476
477 // Some short-hand
478 const std::array local_size
479 = {_index_maps[0]->size_local(), _index_maps[1]->size_local()};
480 const std::array local_range
481 = {_index_maps[0]->local_range(), _index_maps[1]->local_range()};
482 const std::vector<std::int64_t>& ghosts1 = _index_maps[1]->ghosts();
483
484 const std::vector<std::int64_t>& ghosts0 = _index_maps[0]->ghosts();
485 const std::vector<int>& src_ranks = _index_maps[0]->src();
486 const std::vector<int>& dest_ranks = _index_maps[0]->dest();
487
488 // Create neighbourhood communicator (owner <- ghost)
489 MPI_Comm comm;
490 MPI_Dist_graph_create_adjacent(_index_maps[0]->comm(), dest_ranks.size(),
491 dest_ranks.data(), MPI_UNWEIGHTED,
492 src_ranks.size(), src_ranks.data(),
493 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm);
494 _comm = dolfinx::MPI::Comm(comm, false);
495
496 // Build map from ghost row index position to owning (neighborhood)
497 // rank
498 _ghost_row_to_rank.reserve(_index_maps[0]->owners().size());
499 for (int r : _index_maps[0]->owners())
500 {
501 auto it = std::lower_bound(src_ranks.begin(), src_ranks.end(), r);
502 assert(it != src_ranks.end() and *it == r);
503 int pos = std::distance(src_ranks.begin(), it);
504 _ghost_row_to_rank.push_back(pos);
505 }
506
507 // Compute size of data to send to each neighbor
508 std::vector<std::int32_t> data_per_proc(src_ranks.size(), 0);
509 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
510 {
511 assert(_ghost_row_to_rank[i] < data_per_proc.size());
512 std::size_t pos = local_size[0] + i;
513 data_per_proc[_ghost_row_to_rank[i]] += _row_ptr[pos + 1] - _row_ptr[pos];
514 }
515
516 // Compute send displacements
517 _val_send_disp.resize(src_ranks.size() + 1, 0);
518 std::partial_sum(data_per_proc.begin(), data_per_proc.end(),
519 std::next(_val_send_disp.begin()));
520
521 // For each ghost row, pack and send indices to neighborhood
522 std::vector<std::int64_t> ghost_index_data(2 * _val_send_disp.back());
523 {
524 std::vector<int> insert_pos = _val_send_disp;
525 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
526 {
527 const int rank = _ghost_row_to_rank[i];
528 int row_id = local_size[0] + i;
529 for (int j = _row_ptr[row_id]; j < _row_ptr[row_id + 1]; ++j)
530 {
531 // Get position in send buffer
532 const std::int32_t idx_pos = 2 * insert_pos[rank];
533
534 // Pack send data (row, col) as global indices
535 ghost_index_data[idx_pos] = ghosts0[i];
536 if (std::int32_t col_local = _cols[j]; col_local < local_size[1])
537 ghost_index_data[idx_pos + 1] = col_local + local_range[1][0];
538 else
539 ghost_index_data[idx_pos + 1] = ghosts1[col_local - local_size[1]];
540
541 insert_pos[rank] += 1;
542 }
543 }
544 }
545
546 // Communicate data with neighborhood
547 std::vector<std::int64_t> ghost_index_array;
548 std::vector<int> recv_disp;
549 {
550 std::vector<int> send_sizes;
551 std::transform(data_per_proc.begin(), data_per_proc.end(),
552 std::back_inserter(send_sizes),
553 [](auto x) { return 2 * x; });
554
555 std::vector<int> recv_sizes(dest_ranks.size());
556 send_sizes.reserve(1);
557 recv_sizes.reserve(1);
558 MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1,
559 MPI_INT, _comm.comm());
560
561 // Build send/recv displacement
562 std::vector<int> send_disp = {0};
563 std::partial_sum(send_sizes.begin(), send_sizes.end(),
564 std::back_inserter(send_disp));
565 recv_disp = {0};
566 std::partial_sum(recv_sizes.begin(), recv_sizes.end(),
567 std::back_inserter(recv_disp));
568
569 ghost_index_array.resize(recv_disp.back());
570 MPI_Neighbor_alltoallv(ghost_index_data.data(), send_sizes.data(),
571 send_disp.data(), MPI_INT64_T,
572 ghost_index_array.data(), recv_sizes.data(),
573 recv_disp.data(), MPI_INT64_T, _comm.comm());
574 }
575
576 // Store receive displacements for future use, when transferring
577 // data values
578 _val_recv_disp.resize(recv_disp.size());
579 const int bs2 = _bs[0] * _bs[1];
580 std::transform(recv_disp.begin(), recv_disp.end(), _val_recv_disp.begin(),
581 [&bs2](auto d) { return bs2 * d / 2; });
582 std::transform(_val_send_disp.begin(), _val_send_disp.end(),
583 _val_send_disp.begin(), [&bs2](auto d) { return d * bs2; });
584
585 // Global-to-local map for ghost columns
586 std::vector<std::pair<std::int64_t, std::int32_t>> global_to_local;
587 global_to_local.reserve(ghosts1.size());
588 for (std::int64_t idx : ghosts1)
589 global_to_local.push_back({idx, global_to_local.size() + local_size[1]});
590 std::sort(global_to_local.begin(), global_to_local.end());
591
592 // Compute location in which data for each index should be stored
593 // when received
594 for (std::size_t i = 0; i < ghost_index_array.size(); i += 2)
595 {
596 // Row must be on this process
597 const std::int32_t local_row = ghost_index_array[i] - local_range[0][0];
598 assert(local_row >= 0 and local_row < local_size[0]);
599
600 // Column may be owned or unowned
601 std::int32_t local_col = ghost_index_array[i + 1] - local_range[1][0];
602 if (local_col < 0 or local_col >= local_size[1])
603 {
604 auto it = std::lower_bound(global_to_local.begin(), global_to_local.end(),
605 std::pair(ghost_index_array[i + 1], -1),
606 [](auto& a, auto& b)
607 { return a.first < b.first; });
608 assert(it != global_to_local.end()
609 and it->first == ghost_index_array[i + 1]);
610 local_col = it->second;
611 }
612 auto cit0 = std::next(_cols.begin(), _row_ptr[local_row]);
613 auto cit1 = std::next(_cols.begin(), _row_ptr[local_row + 1]);
614
615 // Find position of column index and insert data
616 auto cit = std::lower_bound(cit0, cit1, local_col);
617 assert(cit != cit1);
618 assert(*cit == local_col);
619 std::size_t d = std::distance(_cols.begin(), cit);
620 _unpack_pos.push_back(d);
621 }
622}
623//-----------------------------------------------------------------------------
624template <typename U, typename V, typename W, typename X>
625std::vector<typename MatrixCSR<U, V, W, X>::value_type>
627{
628 const std::size_t nrows = num_all_rows();
629 const std::size_t ncols
630 = _index_maps[1]->size_local() + _index_maps[1]->num_ghosts();
631 std::vector<value_type> A(nrows * ncols * _bs[0] * _bs[1], 0.0);
632 for (std::size_t r = 0; r < nrows; ++r)
633 for (std::int32_t j = _row_ptr[r]; j < _row_ptr[r + 1]; ++j)
634 for (int i0 = 0; i0 < _bs[0]; ++i0)
635 for (int i1 = 0; i1 < _bs[1]; ++i1)
636 {
637 A[(r * _bs[1] + i0) * ncols * _bs[0] + _cols[j] * _bs[1] + i1]
638 = _data[j * _bs[0] * _bs[1] + i0 * _bs[1] + i1];
639 }
640
641 return A;
642}
643//-----------------------------------------------------------------------------
644template <typename U, typename V, typename W, typename X>
646{
647 const std::int32_t local_size0 = _index_maps[0]->size_local();
648 const std::int32_t num_ghosts0 = _index_maps[0]->num_ghosts();
649 const int bs2 = _bs[0] * _bs[1];
650
651 // For each ghost row, pack and send values to send to neighborhood
652 std::vector<int> insert_pos = _val_send_disp;
653 _ghost_value_data.resize(_val_send_disp.back());
654 for (int i = 0; i < num_ghosts0; ++i)
655 {
656 const int rank = _ghost_row_to_rank[i];
657
658 // Get position in send buffer to place data to send to this
659 // neighbour
660 const std::int32_t val_pos = insert_pos[rank];
661 std::copy(std::next(_data.data(), _row_ptr[local_size0 + i] * bs2),
662 std::next(_data.data(), _row_ptr[local_size0 + i + 1] * bs2),
663 std::next(_ghost_value_data.begin(), val_pos));
664 insert_pos[rank]
665 += bs2 * (_row_ptr[local_size0 + i + 1] - _row_ptr[local_size0 + i]);
666 }
667
668 _ghost_value_data_in.resize(_val_recv_disp.back());
669
670 // Compute data sizes for send and receive from displacements
671 std::vector<int> val_send_count(_val_send_disp.size() - 1);
672 std::adjacent_difference(std::next(_val_send_disp.begin()),
673 _val_send_disp.end(), val_send_count.begin());
674
675 std::vector<int> val_recv_count(_val_recv_disp.size() - 1);
676 std::adjacent_difference(std::next(_val_recv_disp.begin()),
677 _val_recv_disp.end(), val_recv_count.begin());
678
679 int status = MPI_Ineighbor_alltoallv(
680 _ghost_value_data.data(), val_send_count.data(), _val_send_disp.data(),
681 dolfinx::MPI::mpi_type<value_type>(), _ghost_value_data_in.data(),
682 val_recv_count.data(), _val_recv_disp.data(),
683 dolfinx::MPI::mpi_type<value_type>(), _comm.comm(), &_request);
684 assert(status == MPI_SUCCESS);
685}
686//-----------------------------------------------------------------------------
687template <typename U, typename V, typename W, typename X>
689{
690 int status = MPI_Wait(&_request, MPI_STATUS_IGNORE);
691 assert(status == MPI_SUCCESS);
692
693 _ghost_value_data.clear();
694 _ghost_value_data.shrink_to_fit();
695
696 // Add to local rows
697 const int bs2 = _bs[0] * _bs[1];
698 assert(_ghost_value_data_in.size() == _unpack_pos.size() * bs2);
699 for (std::size_t i = 0; i < _unpack_pos.size(); ++i)
700 for (int j = 0; j < bs2; ++j)
701 _data[_unpack_pos[i] * bs2 + j] += _ghost_value_data_in[i * bs2 + j];
702
703 _ghost_value_data_in.clear();
704 _ghost_value_data_in.shrink_to_fit();
705
706 // Set ghost row data to zero
707 const std::int32_t local_size0 = _index_maps[0]->size_local();
708 std::fill(std::next(_data.begin(), _row_ptr[local_size0] * bs2), _data.end(),
709 0);
710}
711//-----------------------------------------------------------------------------
712template <typename U, typename V, typename W, typename X>
714{
715 const std::size_t num_owned_rows = _index_maps[0]->size_local();
716 const int bs2 = _bs[0] * _bs[1];
717 assert(num_owned_rows < _row_ptr.size());
718 double norm_sq_local = std::accumulate(
719 _data.cbegin(), std::next(_data.cbegin(), _row_ptr[num_owned_rows] * bs2),
720 double(0), [](auto norm, value_type y) { return norm + std::norm(y); });
721 double norm_sq;
722 MPI_Allreduce(&norm_sq_local, &norm_sq, 1, MPI_DOUBLE, MPI_SUM, _comm.comm());
723 return norm_sq;
724}
725//-----------------------------------------------------------------------------
726
727} // namespace dolfinx::la
A duplicate MPI communicator and manage lifetime of the communicator.
Definition MPI.h:43
Distributed sparse matrix.
Definition MatrixCSR.h:50
const container_type & values() const
Get local values (const version)
Definition MatrixCSR.h:337
std::shared_ptr< const common::IndexMap > index_map(int dim) const
Index maps for the row and column space.
Definition MatrixCSR.h:326
const rowptr_container_type & off_diag_offset() const
Get the start of off-diagonal (unowned columns) on each row, allowing the matrix to be split (virtual...
Definition MatrixCSR.h:354
void set(std::span< const value_type > x, std::span< const std::int32_t > rows, std::span< const std::int32_t > cols)
Set values in the matrix.
Definition MatrixCSR.h:202
RowPtrContainer rowptr_container_type
Row pointer container type.
Definition MatrixCSR.h:64
void scatter_rev_end()
End transfer of ghost row data to owning ranks.
Definition MatrixCSR.h:688
container_type & values()
Get local data values.
Definition MatrixCSR.h:333
auto mat_add_values()
Insertion functor for adding values to a matrix. It is typically used in finite element assembly func...
Definition MatrixCSR.h:133
std::vector< value_type > to_dense() const
Copy to a dense matrix.
Definition MatrixCSR.h:626
void add(std::span< const value_type > x, std::span< const std::int32_t > rows, std::span< const std::int32_t > cols)
Accumulate values in the matrix.
Definition MatrixCSR.h:247
std::int32_t num_owned_rows() const
Number of local rows excluding ghost rows.
Definition MatrixCSR.h:274
MatrixCSR(const SparsityPattern &p, BlockMode mode=BlockMode::compact)
Create a distributed matrix.
Definition MatrixCSR.h:406
ColContainer column_container_type
Column index container type.
Definition MatrixCSR.h:61
MatrixCSR(MatrixCSR &&A)=default
Move constructor.
double squared_norm() const
Compute the Frobenius norm squared across all processes.
Definition MatrixCSR.h:713
void scatter_rev()
Transfer ghost row data to the owning ranks accumulating received values on the owned rows,...
Definition MatrixCSR.h:293
Container container_type
Matrix entries container type.
Definition MatrixCSR.h:58
Scalar value_type
Scalar type.
Definition MatrixCSR.h:55
void scatter_rev_begin()
Begin transfer of ghost row data to owning ranks, where it will be accumulated into existing owned ro...
Definition MatrixCSR.h:645
const column_container_type & cols() const
Get local column indices.
Definition MatrixCSR.h:345
void set(value_type x)
Set all non-zero local entries to a value including entries in ghost rows.
Definition MatrixCSR.h:185
std::array< int, 2 > block_size() const
Block size.
Definition MatrixCSR.h:361
std::int32_t num_all_rows() const
Number of local rows including ghost rows.
Definition MatrixCSR.h:277
const rowptr_container_type & row_ptr() const
Get local row pointers.
Definition MatrixCSR.h:341
auto mat_set_values()
Insertion functor for setting values in a matrix. It is typically used in finite element assembly fun...
Definition MatrixCSR.h:92
Sparsity pattern data structure that can be used to initialize sparse matrices. After assembly,...
Definition SparsityPattern.h:26
std::shared_ptr< const common::IndexMap > index_map(int dim) const
Index map for given dimension dimension. Returns the index map for rows and columns that will be set ...
Definition SparsityPattern.cpp:193
common::IndexMap column_index_map() const
Builds the index map for columns after assembly of the sparsity pattern.
Definition SparsityPattern.cpp:213
int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition MPI.cpp:72
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition MPI.cpp:64
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition MPI.h:91
Linear algebra interface.
Definition sparsitybuild.h:15
BlockMode
Modes for representing block structured matrices.
Definition MatrixCSR.h:24
@ expanded
Each entry in the sparsity pattern of the matrix refers to a block of data of size (bs[0],...
auto norm(const V &x, Norm type=Norm::l2)
Compute the norm of the vector.
Definition Vector.h:267