DOLFINx 0.11.0.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
MatrixCSR.h
1// Copyright (C) 2021-2022 Garth N. Wells and Chris N. Richardson
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include "SparsityPattern.h"
10#include "Vector.h"
11#include "matrix_csr_impl.h"
12#include <algorithm>
13#include <dolfinx/common/IndexMap.h>
14#include <dolfinx/common/MPI.h>
15#include <dolfinx/graph/AdjacencyList.h>
16#include <mpi.h>
17#include <numeric>
18#include <span>
19#include <utility>
20#include <vector>
21
22// Define requirements on sparsity pattern required for MatrixCSR constructor
23// allowing alternative implentations that can provide these essentials.
24template <typename T>
25concept SparsityImplementation = requires(T sp, int i) {
26 { sp.graph() };
27 requires std::forward_iterator<typename decltype(sp.graph().first)::iterator>;
28 requires std::convertible_to<std::int32_t,
29 typename decltype(sp.graph().first)::value_type>;
30 requires std::forward_iterator<
31 typename decltype(sp.graph().second)::iterator>;
32 requires std::convertible_to<
33 std::int64_t, typename decltype(sp.graph().second)::value_type>;
34
35 { sp.block_size(i) } -> std::same_as<int>;
36 {
37 sp.index_map(i)
38 } -> std::same_as<std::shared_ptr<const dolfinx::common::IndexMap>>;
39 { sp.column_index_map() } -> std::same_as<dolfinx::common::IndexMap>;
40};
41
42namespace dolfinx::la
43{
45enum class BlockMode : int
46{
47 compact = 0,
52};
53
67template <typename Scalar, typename Container = std::vector<Scalar>,
68 typename ColContainer = std::vector<std::int32_t>,
69 typename RowPtrContainer = std::vector<std::int64_t>>
70class MatrixCSR
71{
72 static_assert(std::is_same_v<typename Container::value_type, Scalar>);
73 static_assert(std::is_integral_v<typename ColContainer::value_type>);
74 static_assert(std::is_integral_v<typename RowPtrContainer::value_type>);
75
76 template <typename, typename, typename, typename>
77 friend class MatrixCSR;
78
79public:
81 using value_type = Scalar;
82
84 using container_type = Container;
85
87 using column_container_type = ColContainer;
88
90 using rowptr_container_type = RowPtrContainer;
91
115 template <int BS0 = 1, int BS1 = 1>
117 {
118 if ((BS0 != _bs[0] and BS0 > 1 and _bs[0] > 1)
119 or (BS1 != _bs[1] and BS1 > 1 and _bs[1] > 1))
120 {
121 throw std::runtime_error(
122 "Cannot insert blocks of different size than matrix block size");
123 }
124
125 return [&](std::span<const std::int32_t> rows,
126 std::span<const std::int32_t> cols,
127 std::span<const value_type> data) -> int
128 {
129 this->set<BS0, BS1>(data, rows, cols);
130 return 0;
131 };
132 }
133
157 template <int BS0 = 1, int BS1 = 1>
159 {
160 if ((BS0 != _bs[0] and BS0 > 1 and _bs[0] > 1)
161 or (BS1 != _bs[1] and BS1 > 1 and _bs[1] > 1))
162 {
163 throw std::runtime_error(
164 "Cannot insert blocks of different size than matrix block size");
165 }
166
167 return [&](std::span<const std::int32_t> rows,
168 std::span<const std::int32_t> cols,
169 std::span<const value_type> data) -> int
170 {
171 this->add<BS0, BS1>(data, rows, cols);
172 return 0;
173 };
174 }
175
199 template <SparsityImplementation T>
200 MatrixCSR(const T& p, BlockMode mode = BlockMode::compact);
201
204 MatrixCSR(MatrixCSR&& A) = default;
205
208 MatrixCSR(const MatrixCSR& A) = default;
209
218 template <typename Scalar0, typename Container0, typename ColContainer0,
219 typename RowPtrContainer0>
220 explicit MatrixCSR(
221 const MatrixCSR<Scalar0, Container0, ColContainer0, RowPtrContainer0>& A)
222 : _index_maps(A._index_maps), _block_mode(A.block_mode()),
223 _bs(A.block_size()), _data(A._data.begin(), A._data.end()),
224 _cols(A.cols().begin(), A.cols().end()),
225 _row_ptr(A.row_ptr().begin(), A.row_ptr().end()),
226 _off_diagonal_offset(A.off_diag_offset().begin(),
227 A.off_diag_offset().end()),
228 _comm(A.comm()), _request(MPI_REQUEST_NULL), _unpack_pos(A._unpack_pos),
229 _val_send_disp(A._val_send_disp), _val_recv_disp(A._val_recv_disp),
230 _ghost_row_to_rank(A._ghost_row_to_rank)
231 {
232 }
233
238 [[deprecated("Use std::ranges::fill(A.values(), v) instead.")]]
240 {
241 std::ranges::fill(_data, x);
242 }
243
260 template <int BS0, int BS1>
261 void set(std::span<const value_type> x, std::span<const std::int32_t> rows,
262 std::span<const std::int32_t> cols)
263 {
264 auto set_fn = [](value_type& y, const value_type& x) { y = x; };
265
266 std::int32_t num_rows
267 = _index_maps[0]->size_local() + _index_maps[0]->num_ghosts();
268 assert(x.size() == rows.size() * cols.size() * BS0 * BS1);
269 if (_bs[0] == BS0 and _bs[1] == BS1)
270 {
271 impl::insert_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols, set_fn,
272 num_rows);
273 }
274 else if (_bs[0] == 1 and _bs[1] == 1)
275 {
276 // Set blocked data in a regular CSR matrix (_bs[0]=1, _bs[1]=1)
277 // with correct sparsity
278 impl::insert_blocked_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols,
279 set_fn, num_rows);
280 }
281 else
282 {
283 assert(BS0 == 1 and BS1 == 1);
284 // Set non-blocked data in a blocked CSR matrix (BS0=1, BS1=1)
285 impl::insert_nonblocked_csr(_data, _cols, _row_ptr, x, rows, cols, set_fn,
286 num_rows, _bs[0], _bs[1]);
287 }
288 }
289
305 template <int BS0 = 1, int BS1 = 1>
306 void add(std::span<const value_type> x, std::span<const std::int32_t> rows,
307 std::span<const std::int32_t> cols)
308 {
309 auto add_fn = [](value_type& y, const value_type& x) { y += x; };
310
311 assert(x.size() == rows.size() * cols.size() * BS0 * BS1);
312 if (_bs[0] == BS0 and _bs[1] == BS1)
313 {
314 impl::insert_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols, add_fn,
315 _row_ptr.size());
316 }
317 else if (_bs[0] == 1 and _bs[1] == 1)
318 {
319 // Add blocked data to a regular CSR matrix (_bs[0]=1, _bs[1]=1)
320 impl::insert_blocked_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols,
321 add_fn, _row_ptr.size());
322 }
323 else
324 {
325 assert(BS0 == 1 and BS1 == 1);
326 // Add non-blocked data to a blocked CSR matrix (BS0=1, BS1=1)
327 impl::insert_nonblocked_csr(_data, _cols, _row_ptr, x, rows, cols, add_fn,
328 _row_ptr.size(), _bs[0], _bs[1]);
329 }
330 }
331
333 std::int32_t num_owned_rows() const { return _index_maps[0]->size_local(); }
334
336 std::int32_t num_all_rows() const { return _row_ptr.size() - 1; }
337
347 std::vector<value_type> to_dense() const
348 {
349 const std::size_t nrows = num_all_rows();
350 const std::size_t ncols = _index_maps[1]->size_global();
351 std::vector<value_type> A(nrows * ncols * _bs[0] * _bs[1], 0.0);
352 for (std::size_t r = 0; r < nrows; ++r)
353 {
354 for (std::int32_t j = _row_ptr[r]; j < _row_ptr[r + 1]; ++j)
355 {
356 for (int i0 = 0; i0 < _bs[0]; ++i0)
357 {
358 for (int i1 = 0; i1 < _bs[1]; ++i1)
359 {
360 std::array<std::int32_t, 1> local_col{_cols[j]};
361 std::array<std::int64_t, 1> global_col{0};
362 _index_maps[1]->local_to_global(local_col, global_col);
363 A[(r * _bs[1] + i0) * ncols * _bs[0] + global_col[0] * _bs[1] + i1]
364 = _data[j * _bs[0] * _bs[1] + i0 * _bs[1] + i1];
365 }
366 }
367 }
368 }
369
370 return A;
371 }
372
380 {
383 }
384
395 {
396 const std::int32_t local_size0 = _index_maps[0]->size_local();
397 const std::int32_t num_ghosts0 = _index_maps[0]->num_ghosts();
398 const int bs2 = _bs[0] * _bs[1];
399
400 // For each ghost row, pack and send values to send to neighborhood
401 std::vector<int> insert_pos = _val_send_disp;
402 _ghost_value_data.resize(_val_send_disp.back());
403 for (int i = 0; i < num_ghosts0; ++i)
404 {
405 int rank = _ghost_row_to_rank[i];
406
407 // Get position in send buffer to place data to send to this
408 // neighbour
409 std::int32_t val_pos = insert_pos[rank];
410 std::copy(std::next(_data.data(), _row_ptr[local_size0 + i] * bs2),
411 std::next(_data.data(), _row_ptr[local_size0 + i + 1] * bs2),
412 std::next(_ghost_value_data.begin(), val_pos));
413 insert_pos[rank]
414 += bs2 * (_row_ptr[local_size0 + i + 1] - _row_ptr[local_size0 + i]);
415 }
416
417 _ghost_value_data_in.resize(_val_recv_disp.back());
418
419 // Compute data sizes for send and receive from displacements
420 std::vector<int> val_send_count(_val_send_disp.size() - 1);
421 std::adjacent_difference(std::next(_val_send_disp.begin()),
422 _val_send_disp.end(), val_send_count.begin());
423
424 std::vector<int> val_recv_count(_val_recv_disp.size() - 1);
425 std::adjacent_difference(std::next(_val_recv_disp.begin()),
426 _val_recv_disp.end(), val_recv_count.begin());
427
428 int status = MPI_Ineighbor_alltoallv(
429 _ghost_value_data.data(), val_send_count.data(), _val_send_disp.data(),
430 dolfinx::MPI::mpi_t<value_type>, _ghost_value_data_in.data(),
431 val_recv_count.data(), _val_recv_disp.data(),
432 dolfinx::MPI::mpi_t<value_type>, _comm.comm(), &_request);
433 dolfinx::MPI::check_error(_comm.comm(), status);
434 }
435
442 {
443 int status = MPI_Wait(&_request, MPI_STATUS_IGNORE);
444 dolfinx::MPI::check_error(_comm.comm(), status);
445
446 _ghost_value_data.clear();
447 _ghost_value_data.shrink_to_fit();
448
449 // Add to local rows
450 int bs2 = _bs[0] * _bs[1];
451 assert(_ghost_value_data_in.size() == _unpack_pos.size() * bs2);
452 for (std::size_t i = 0; i < _unpack_pos.size(); ++i)
453 for (int j = 0; j < bs2; ++j)
454 _data[_unpack_pos[i] * bs2 + j] += _ghost_value_data_in[i * bs2 + j];
455
456 _ghost_value_data_in.clear();
457 _ghost_value_data_in.shrink_to_fit();
458
459 // Set ghost row data to zero
460 std::int32_t local_size0 = _index_maps[0]->size_local();
461 std::fill(std::next(_data.begin(), _row_ptr[local_size0] * bs2),
462 _data.end(), 0);
463 }
464
468 double squared_norm() const
469 {
470 const std::size_t num_owned_rows = _index_maps[0]->size_local();
471 const int bs2 = _bs[0] * _bs[1];
472 assert(num_owned_rows < _row_ptr.size());
473 double norm_sq_local = std::accumulate(
474 _data.cbegin(),
475 std::next(_data.cbegin(), _row_ptr[num_owned_rows] * bs2), double(0),
476 [](auto norm, value_type y) { return norm + std::norm(y); });
477 double norm_sq;
478 MPI_Allreduce(&norm_sq_local, &norm_sq, 1, MPI_DOUBLE, MPI_SUM,
479 _comm.comm());
480 return norm_sq;
481 }
482
494
496 MPI_Comm comm() const { return _comm.comm(); }
497
505 std::shared_ptr<const common::IndexMap> index_map(int dim) const
506 {
507 return _index_maps.at(dim);
508 }
509
512 container_type& values() { return _data; }
513
516 const container_type& values() const { return _data; }
517
520 const rowptr_container_type& row_ptr() const { return _row_ptr; }
521
524 const column_container_type& cols() const { return _cols; }
525
536 {
537 return _off_diagonal_offset;
538 }
539
542 std::array<int, 2> block_size() const { return _bs; }
543
545 BlockMode block_mode() const { return _block_mode; }
546
547private:
548 // Parallel distribution of the rows and columns
549 std::array<std::shared_ptr<const common::IndexMap>, 2> _index_maps;
550
551 // Block mode (compact or expanded)
552 BlockMode _block_mode;
553
554 // Block sizes
555 std::array<int, 2> _bs;
556
557 // Matrix data
558 container_type _data;
560 rowptr_container_type _row_ptr;
561
562 // Start of off-diagonal (unowned columns) on each row
563 rowptr_container_type _off_diagonal_offset;
564
565 // Communicator with neighborhood (ghost->owner communicator for rows)
566 dolfinx::MPI::Comm _comm;
567
568 // -- Precomputed data for scatter_rev/update
569
570 // Request in non-blocking communication
571 MPI_Request _request;
572
573 // Position in _data to add received data
574 std::vector<std::size_t> _unpack_pos;
575
576 // Displacements for alltoall for each neighbor when sending and
577 // receiving
578 std::vector<int> _val_send_disp, _val_recv_disp;
579
580 // Ownership of each row, by neighbor (for the neighbourhood defined
581 // on _comm)
582 std::vector<int> _ghost_row_to_rank;
583
584 // Temporary stores for data during non-blocking communication
585 container_type _ghost_value_data;
586 container_type _ghost_value_data_in;
587};
588//-----------------------------------------------------------------------------
589
591template <typename U, typename V, typename W, typename X>
592template <SparsityImplementation SparsityType>
593MatrixCSR<U, V, W, X>::MatrixCSR(const SparsityType& p, BlockMode mode)
594 : _index_maps({p.index_map(0),
595 std::make_shared<common::IndexMap>(p.column_index_map())}),
596 _block_mode(mode), _bs({p.block_size(0), p.block_size(1)}),
597 _data(p.graph().first.size() * _bs[0] * _bs[1], 0),
598 _cols(p.graph().first.begin(), p.graph().first.end()),
599 _row_ptr(p.graph().second.begin(), p.graph().second.end()),
600 _comm(MPI_COMM_NULL)
601{
602 if (_block_mode == BlockMode::expanded)
603 {
604 // Rebuild IndexMaps
605 for (int i = 0; i < 2; ++i)
606 {
607 auto im = _index_maps[i];
608 std::int32_t size_local = im->size_local() * _bs[i];
609 std::span ghost_i = im->ghosts();
610 std::vector<std::int64_t> ghosts;
611 const std::vector<int> ghost_owner_i(im->owners().begin(),
612 im->owners().end());
613 std::vector<int> src_rank;
614 for (std::size_t j = 0; j < ghost_i.size(); ++j)
615 {
616 for (int k = 0; k < _bs[i]; ++k)
617 {
618 ghosts.push_back(ghost_i[j] * _bs[i] + k);
619 src_rank.push_back(ghost_owner_i[j]);
620 }
621 }
622
623 std::array<std::vector<int>, 2> src_dest0
624 = {std::vector(_index_maps[i]->src().begin(),
625 _index_maps[i]->src().end()),
626 std::vector(_index_maps[i]->dest().begin(),
627 _index_maps[i]->dest().end())};
628 _index_maps[i] = std::make_shared<common::IndexMap>(
629 _index_maps[i]->comm(), size_local, src_dest0, ghosts, src_rank);
630 }
631
632 // Convert sparsity pattern and set _bs to 1
633
634 column_container_type new_cols;
635 new_cols.reserve(_data.size());
636 rowptr_container_type new_row_ptr{0};
637 new_row_ptr.reserve(_row_ptr.size() * _bs[0]);
638 std::span<const std::int32_t> num_diag_nnz = p.off_diagonal_offsets();
639 for (std::size_t i = 0; i < _row_ptr.size() - 1; ++i)
640 {
641 // Repeat row _bs[0] times
642 for (int q0 = 0; q0 < _bs[0]; ++q0)
643 {
644 _off_diagonal_offset.push_back(new_row_ptr.back()
645 + num_diag_nnz[i] * _bs[1]);
646 for (auto j = _row_ptr[i]; j < _row_ptr[i + 1]; ++j)
647 {
648 for (int q1 = 0; q1 < _bs[1]; ++q1)
649 new_cols.push_back(_cols[j] * _bs[1] + q1);
650 }
651 new_row_ptr.push_back(new_cols.size());
652 }
653 }
654 _cols = new_cols;
655 _row_ptr = new_row_ptr;
656 _bs[0] = 1;
657 _bs[1] = 1;
658 }
659 else
660 {
661 // Compute off-diagonal offset for each row (compact)
662 std::span<const std::int32_t> num_diag_nnz = p.off_diagonal_offsets();
663 _off_diagonal_offset.reserve(num_diag_nnz.size());
664 std::ranges::transform(num_diag_nnz, _row_ptr,
665 std::back_inserter(_off_diagonal_offset),
666 std::plus{});
667 }
668
669 // Some short-hand
670 std::array local_size
671 = {_index_maps[0]->size_local(), _index_maps[1]->size_local()};
672 std::array local_range
673 = {_index_maps[0]->local_range(), _index_maps[1]->local_range()};
674 std::span ghosts1 = _index_maps[1]->ghosts();
675
676 std::span ghosts0 = _index_maps[0]->ghosts();
677 std::span src_ranks = _index_maps[0]->src();
678 std::span dest_ranks = _index_maps[0]->dest();
679
680 // Create neighbourhood communicator (owner <- ghost)
681 MPI_Comm comm;
682 MPI_Dist_graph_create_adjacent(_index_maps[0]->comm(), dest_ranks.size(),
683 dest_ranks.data(), MPI_UNWEIGHTED,
684 src_ranks.size(), src_ranks.data(),
685 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm);
686 _comm = dolfinx::MPI::Comm(comm, false);
687
688 // Build map from ghost row index position to owning (neighborhood)
689 // rank
690 _ghost_row_to_rank.reserve(_index_maps[0]->owners().size());
691 for (int r : _index_maps[0]->owners())
692 {
693 auto it = std::ranges::lower_bound(src_ranks, r);
694 assert(it != src_ranks.end() and *it == r);
695 std::size_t pos = std::distance(src_ranks.begin(), it);
696 _ghost_row_to_rank.push_back(pos);
697 }
698
699 // Compute size of data to send to each neighbor
700 std::vector<std::int32_t> data_per_proc(src_ranks.size(), 0);
701 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
702 {
703 assert(_ghost_row_to_rank[i] < (int)data_per_proc.size());
704 std::size_t pos = local_size[0] + i;
705 data_per_proc[_ghost_row_to_rank[i]] += _row_ptr[pos + 1] - _row_ptr[pos];
706 }
707
708 // Compute send displacements
709 _val_send_disp.resize(src_ranks.size() + 1, 0);
710 std::partial_sum(data_per_proc.begin(), data_per_proc.end(),
711 std::next(_val_send_disp.begin()));
712
713 // For each ghost row, pack and send indices to neighborhood
714 std::vector<std::int64_t> ghost_index_data(2 * _val_send_disp.back());
715 {
716 std::vector<int> insert_pos = _val_send_disp;
717 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
718 {
719 int rank = _ghost_row_to_rank[i];
720 std::int32_t row_id = local_size[0] + i;
721 for (int j = _row_ptr[row_id]; j < _row_ptr[row_id + 1]; ++j)
722 {
723 // Get position in send buffer
724 std::int32_t idx_pos = 2 * insert_pos[rank];
725
726 // Pack send data (row, col) as global indices
727 ghost_index_data[idx_pos] = ghosts0[i];
728 if (std::int32_t col_local = _cols[j]; col_local < local_size[1])
729 ghost_index_data[idx_pos + 1] = col_local + local_range[1][0];
730 else
731 ghost_index_data[idx_pos + 1] = ghosts1[col_local - local_size[1]];
732
733 insert_pos[rank] += 1;
734 }
735 }
736 }
737
738 // Communicate data with neighborhood
739 std::vector<std::int64_t> ghost_index_array;
740 std::vector<int> recv_disp;
741 {
742 std::vector<int> send_sizes;
743 std::ranges::transform(data_per_proc, std::back_inserter(send_sizes),
744 [](auto x) { return 2 * x; });
745
746 std::vector<int> recv_sizes(dest_ranks.size());
747 send_sizes.reserve(1);
748 recv_sizes.reserve(1);
749 MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1,
750 MPI_INT, _comm.comm());
751
752 // Build send/recv displacement
753 std::vector<int> send_disp{0};
754 std::partial_sum(send_sizes.begin(), send_sizes.end(),
755 std::back_inserter(send_disp));
756 recv_disp = {0};
757 std::partial_sum(recv_sizes.begin(), recv_sizes.end(),
758 std::back_inserter(recv_disp));
759
760 ghost_index_array.resize(recv_disp.back());
761 MPI_Neighbor_alltoallv(ghost_index_data.data(), send_sizes.data(),
762 send_disp.data(), MPI_INT64_T,
763 ghost_index_array.data(), recv_sizes.data(),
764 recv_disp.data(), MPI_INT64_T, _comm.comm());
765 }
766
767 // Store receive displacements for future use, when transferring
768 // data values
769 _val_recv_disp.resize(recv_disp.size());
770 int bs2 = _bs[0] * _bs[1];
771 std::ranges::transform(recv_disp, _val_recv_disp.begin(),
772 [&bs2](auto d) { return bs2 * d / 2; });
773 std::ranges::transform(_val_send_disp, _val_send_disp.begin(),
774 [&bs2](auto d) { return d * bs2; });
775
776 // Global-to-local map for ghost columns
777 std::vector<std::pair<std::int64_t, std::int32_t>> global_to_local;
778 global_to_local.reserve(ghosts1.size());
779 for (std::int64_t idx : ghosts1)
780 global_to_local.push_back({idx, global_to_local.size() + local_size[1]});
781 std::ranges::sort(global_to_local);
782
783 // Compute location in which data for each index should be stored
784 // when received
785 for (std::size_t i = 0; i < ghost_index_array.size(); i += 2)
786 {
787 // Row must be on this process
788 std::int32_t local_row = ghost_index_array[i] - local_range[0][0];
789 assert(local_row >= 0 and local_row < local_size[0]);
790
791 // Column may be owned or unowned
792 std::int32_t local_col = ghost_index_array[i + 1] - local_range[1][0];
793 if (local_col < 0 or local_col >= local_size[1])
794 {
795 auto it = std::ranges::lower_bound(
796 global_to_local, std::pair(ghost_index_array[i + 1], -1),
797 [](auto a, auto b) { return a.first < b.first; });
798 assert(it != global_to_local.end()
799 and it->first == ghost_index_array[i + 1]);
800 local_col = it->second;
801 }
802 auto cit0 = std::next(_cols.begin(), _row_ptr[local_row]);
803 auto cit1 = std::next(_cols.begin(), _row_ptr[local_row + 1]);
804
805 // Find position of column index and insert data
806 auto cit = std::lower_bound(cit0, cit1, local_col);
807 assert(cit != cit1);
808 assert(*cit == local_col);
809 std::size_t d = std::distance(_cols.begin(), cit);
810 _unpack_pos.push_back(d);
811 }
812
813 _unpack_pos.shrink_to_fit();
814}
815//-----------------------------------------------------------------------------
816
817// The matrix A is distributed across P processes by blocks of rows:
818// A = | A_0 |
819// | A_1 |
820// | ... |
821// | A_P-1 |
822//
823// Each submatrix A_i is owned by a single process "i" and can be further
824// decomposed into diagonal (Ai[0]) and off diagonal (Ai[1]) blocks:
825// Ai = |Ai[0] Ai[1]|
826//
827// If A is square, the diagonal block Ai[0] is also square and contains
828// only owned columns and rows. The block Ai[1] contains ghost columns
829// (unowned dofs).
830
831// Likewise, a local vector x can be decomposed into owned and ghost blocks:
832// xi = | x[0] |
833// | x[1] |
834//
835// So the product y = Ax can be computed into two separate steps:
836// y[0] = |Ai[0] Ai[1]| | x[0] | = Ai[0] x[0] + Ai[1] x[1]
837// | x[1] |
838//
841template <typename Scalar, typename V, typename W, typename X>
844{
845 // start communication (update ghosts)
847
848 std::int32_t nrowslocal = num_owned_rows();
849 std::span<const std::int64_t> Arow_ptr(row_ptr().data(), nrowslocal + 1);
850 std::span<const std::int32_t> Acols(cols().data(), Arow_ptr[nrowslocal]);
851 std::span<const std::int64_t> Aoff_diag_offset(off_diag_offset().data(),
852 nrowslocal);
853 std::span<const Scalar> Avalues(values().data(), Arow_ptr[nrowslocal]);
854
855 std::span<const Scalar> _x = x.array();
856 std::span<Scalar> _y = y.array();
857
858 std::span<const std::int64_t> Arow_begin(Arow_ptr.data(), nrowslocal);
859 std::span<const std::int64_t> Arow_end(Arow_ptr.data() + 1, nrowslocal);
860
861 // First stage: spmv - diagonal
862 // yi[0] += Ai[0] * xi[0]
863 if (_bs[1] == 1)
864 {
865 impl::spmv<Scalar, 1>(Avalues, Arow_begin, Aoff_diag_offset, Acols, _x, _y,
866 _bs[0], 1);
867 }
868 else
869 {
870 impl::spmv<Scalar, -1>(Avalues, Arow_begin, Aoff_diag_offset, Acols, _x, _y,
871 _bs[0], _bs[1]);
872 }
873
874 // finalize ghost update
875 x.scatter_fwd_end();
876
877 // Second stage: spmv - off-diagonal
878 // yi[0] += Ai[1] * xi[1]
879 if (_bs[1] == 1)
880 {
881 impl::spmv<Scalar, 1>(Avalues, Aoff_diag_offset, Arow_end, Acols, _x, _y,
882 _bs[0], 1);
883 }
884 else
885 {
886 impl::spmv<Scalar, -1>(Avalues, Aoff_diag_offset, Arow_end, Acols, _x, _y,
887 _bs[0], _bs[1]);
888 }
889}
890
891} // namespace dolfinx::la
A duplicate MPI communicator and manage lifetime of the communicator.
Definition MPI.h:42
const container_type & values() const
Get local values (const version).
Definition MatrixCSR.h:516
std::shared_ptr< const common::IndexMap > index_map(int dim) const
Index map for the row or column space.
Definition MatrixCSR.h:505
const rowptr_container_type & off_diag_offset() const
Get the start of off-diagonal (unowned columns) on each row, allowing the matrix to be split (virtual...
Definition MatrixCSR.h:535
void set(std::span< const value_type > x, std::span< const std::int32_t > rows, std::span< const std::int32_t > cols)
Set values in the matrix.
Definition MatrixCSR.h:261
MatrixCSR(const MatrixCSR< Scalar0, Container0, ColContainer0, RowPtrContainer0 > &A)
Copy-convert matrix, possibly using to different container types.
Definition MatrixCSR.h:220
RowPtrContainer rowptr_container_type
Row pointer container type.
Definition MatrixCSR.h:90
void scatter_rev_end()
End transfer of ghost row data to owning ranks.
Definition MatrixCSR.h:441
container_type & values()
Get local data values.
Definition MatrixCSR.h:512
auto mat_add_values()
Insertion functor for adding values to a matrix. It is typically used in finite element assembly func...
Definition MatrixCSR.h:158
BlockMode block_mode() const
Get 'block mode'.
Definition MatrixCSR.h:545
void add(std::span< const value_type > x, std::span< const std::int32_t > rows, std::span< const std::int32_t > cols)
Accumulate values in the matrix.
Definition MatrixCSR.h:306
std::int32_t num_owned_rows() const
Number of local rows excluding ghost rows.
Definition MatrixCSR.h:333
ColContainer column_container_type
Column index container type.
Definition MatrixCSR.h:87
MatrixCSR(MatrixCSR &&A)=default
void mult(Vector< value_type > &x, Vector< value_type > &y)
Compute the product y += Ax.
Definition MatrixCSR.h:842
MatrixCSR(const T &p, BlockMode mode=BlockMode::compact)
Create a distributed matrix.
double squared_norm() const
Compute the Frobenius norm squared across all processes.
Definition MatrixCSR.h:468
void scatter_rev()
Transfer ghost row data to the owning ranks accumulating received values on the owned rows,...
Definition MatrixCSR.h:379
Container container_type
Matrix entries container type.
Definition MatrixCSR.h:84
Scalar value_type
Scalar type.
Definition MatrixCSR.h:81
void scatter_rev_begin()
Begin transfer of ghost row data to owning ranks, where it will be accumulated into existing owned ro...
Definition MatrixCSR.h:394
const column_container_type & cols() const
Definition MatrixCSR.h:524
void set(value_type x)
Set all non-zero local entries to a value, including entries in ghost rows.
Definition MatrixCSR.h:239
std::array< int, 2 > block_size() const
Get block sizes.
Definition MatrixCSR.h:542
std::int32_t num_all_rows() const
Number of local rows including ghost rows.
Definition MatrixCSR.h:336
const rowptr_container_type & row_ptr() const
Get local row pointers.
Definition MatrixCSR.h:520
std::vector< value_type > to_dense() const
Copy to a dense matrix.
Definition MatrixCSR.h:347
MPI_Comm comm() const
Get MPI communicator that matrix is defined on.
Definition MatrixCSR.h:496
MatrixCSR(const MatrixCSR &A)=default
auto mat_set_values()
Insertion functor for setting values in a matrix. It is typically used in finite element assembly fun...
Definition MatrixCSR.h:116
A vector that can be distributed across processes.
Definition Vector.h:50
container_type & array()
Get the process-local part of the vector.
Definition Vector.h:387
void scatter_fwd_begin(U pack, GetPtr get_ptr)
Begin scatter (send) of local data that is ghosted on other processes.
Definition Vector.h:219
void scatter_fwd_end(U unpack)
End scatter (send) of local data values that are ghosted on other processes.
Definition Vector.h:256
Definition MatrixCSR.h:25
MPI_Datatype mpi_t
Retrieves the MPI data type associated to the provided type.
Definition MPI.h:280
void check_error(MPI_Comm comm, int code)
Check MPI error code. If the error code is not equal to MPI_SUCCESS, then std::abort is called.
Definition MPI.cpp:80
int size(MPI_Comm comm)
Definition MPI.cpp:72
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition MPI.cpp:64
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition MPI.h:89
Linear algebra interface.
Definition dolfinx_la.h:7
BlockMode
Modes for representing block structured matrices.
Definition MatrixCSR.h:46
@ expanded
Definition MatrixCSR.h:49
auto norm(const V &x, Norm type=Norm::l2)
Compute the norm of the vector.
Definition Vector.h:477