DOLFINx 0.10.0.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
MatrixCSR.h
1// Copyright (C) 2021-2022 Garth N. Wells and Chris N. Richardson
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include "SparsityPattern.h"
10#include "Vector.h"
11#include "matrix_csr_impl.h"
12#include <algorithm>
13#include <dolfinx/common/IndexMap.h>
14#include <dolfinx/common/MPI.h>
15#include <dolfinx/graph/AdjacencyList.h>
16#include <mpi.h>
17#include <numeric>
18#include <span>
19#include <utility>
20#include <vector>
21
22namespace dolfinx::la
23{
25enum class BlockMode : int
26{
27 compact = 0,
32};
33
47template <typename Scalar, typename Container = std::vector<Scalar>,
48 typename ColContainer = std::vector<std::int32_t>,
49 typename RowPtrContainer = std::vector<std::int64_t>>
50class MatrixCSR
51{
52 static_assert(std::is_same_v<typename Container::value_type, Scalar>);
53 static_assert(std::is_integral_v<typename ColContainer::value_type>);
54 static_assert(std::is_integral_v<typename RowPtrContainer::value_type>);
55
56 template <typename, typename, typename, typename>
57 friend class MatrixCSR;
58
59public:
61 using value_type = Scalar;
62
64 using container_type = Container;
65
67 using column_container_type = ColContainer;
68
70 using rowptr_container_type = RowPtrContainer;
71
95 template <int BS0 = 1, int BS1 = 1>
97 {
98 if ((BS0 != _bs[0] and BS0 > 1 and _bs[0] > 1)
99 or (BS1 != _bs[1] and BS1 > 1 and _bs[1] > 1))
100 {
101 throw std::runtime_error(
102 "Cannot insert blocks of different size than matrix block size");
103 }
104
105 return [&](std::span<const std::int32_t> rows,
106 std::span<const std::int32_t> cols,
107 std::span<const value_type> data) -> int
108 {
109 this->set<BS0, BS1>(data, rows, cols);
110 return 0;
111 };
112 }
113
137 template <int BS0 = 1, int BS1 = 1>
139 {
140 if ((BS0 != _bs[0] and BS0 > 1 and _bs[0] > 1)
141 or (BS1 != _bs[1] and BS1 > 1 and _bs[1] > 1))
142 {
143 throw std::runtime_error(
144 "Cannot insert blocks of different size than matrix block size");
145 }
146
147 return [&](std::span<const std::int32_t> rows,
148 std::span<const std::int32_t> cols,
149 std::span<const value_type> data) -> int
150 {
151 this->add<BS0, BS1>(data, rows, cols);
152 return 0;
153 };
154 }
155
179 MatrixCSR(const SparsityPattern& p, BlockMode mode = BlockMode::compact);
180
183 MatrixCSR(MatrixCSR&& A) = default;
184
187 MatrixCSR(const MatrixCSR& A) = default;
188
197 template <typename Scalar0, typename Container0, typename ColContainer0,
198 typename RowPtrContainer0>
199 explicit MatrixCSR(
200 const MatrixCSR<Scalar0, Container0, ColContainer0, RowPtrContainer0>& A)
201 : _index_maps(A._index_maps), _block_mode(A.block_mode()),
202 _bs(A.block_size()), _data(A._data.begin(), A._data.end()),
203 _cols(A.cols().begin(), A.cols().end()),
204 _row_ptr(A.row_ptr().begin(), A.row_ptr().end()),
205 _off_diagonal_offset(A.off_diag_offset().begin(),
206 A.off_diag_offset().end()),
207 _comm(A.comm()), _request(MPI_REQUEST_NULL), _unpack_pos(A._unpack_pos),
208 _val_send_disp(A._val_send_disp), _val_recv_disp(A._val_recv_disp),
209 _ghost_row_to_rank(A._ghost_row_to_rank)
210 {
211 }
212
217 [[deprecated("Use std::ranges::fill(A.values(), v) instead.")]]
219 {
220 std::ranges::fill(_data, x);
221 }
222
239 template <int BS0, int BS1>
240 void set(std::span<const value_type> x, std::span<const std::int32_t> rows,
241 std::span<const std::int32_t> cols)
242 {
243 auto set_fn = [](value_type& y, const value_type& x) { y = x; };
244
245 std::int32_t num_rows
246 = _index_maps[0]->size_local() + _index_maps[0]->num_ghosts();
247 assert(x.size() == rows.size() * cols.size() * BS0 * BS1);
248 if (_bs[0] == BS0 and _bs[1] == BS1)
249 {
250 impl::insert_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols, set_fn,
251 num_rows);
252 }
253 else if (_bs[0] == 1 and _bs[1] == 1)
254 {
255 // Set blocked data in a regular CSR matrix (_bs[0]=1, _bs[1]=1)
256 // with correct sparsity
257 impl::insert_blocked_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols,
258 set_fn, num_rows);
259 }
260 else
261 {
262 assert(BS0 == 1 and BS1 == 1);
263 // Set non-blocked data in a blocked CSR matrix (BS0=1, BS1=1)
264 impl::insert_nonblocked_csr(_data, _cols, _row_ptr, x, rows, cols, set_fn,
265 num_rows, _bs[0], _bs[1]);
266 }
267 }
268
284 template <int BS0 = 1, int BS1 = 1>
285 void add(std::span<const value_type> x, std::span<const std::int32_t> rows,
286 std::span<const std::int32_t> cols)
287 {
288 auto add_fn = [](value_type& y, const value_type& x) { y += x; };
289
290 assert(x.size() == rows.size() * cols.size() * BS0 * BS1);
291 if (_bs[0] == BS0 and _bs[1] == BS1)
292 {
293 impl::insert_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols, add_fn,
294 _row_ptr.size());
295 }
296 else if (_bs[0] == 1 and _bs[1] == 1)
297 {
298 // Add blocked data to a regular CSR matrix (_bs[0]=1, _bs[1]=1)
299 impl::insert_blocked_csr<BS0, BS1>(_data, _cols, _row_ptr, x, rows, cols,
300 add_fn, _row_ptr.size());
301 }
302 else
303 {
304 assert(BS0 == 1 and BS1 == 1);
305 // Add non-blocked data to a blocked CSR matrix (BS0=1, BS1=1)
306 impl::insert_nonblocked_csr(_data, _cols, _row_ptr, x, rows, cols, add_fn,
307 _row_ptr.size(), _bs[0], _bs[1]);
308 }
309 }
310
312 std::int32_t num_owned_rows() const { return _index_maps[0]->size_local(); }
313
315 std::int32_t num_all_rows() const { return _row_ptr.size() - 1; }
316
326 std::vector<value_type> to_dense() const
327 {
328 const std::size_t nrows = num_all_rows();
329 const std::size_t ncols = _index_maps[1]->size_global();
330 std::vector<value_type> A(nrows * ncols * _bs[0] * _bs[1], 0.0);
331 for (std::size_t r = 0; r < nrows; ++r)
332 {
333 for (std::int32_t j = _row_ptr[r]; j < _row_ptr[r + 1]; ++j)
334 {
335 for (int i0 = 0; i0 < _bs[0]; ++i0)
336 {
337 for (int i1 = 0; i1 < _bs[1]; ++i1)
338 {
339 std::array<std::int32_t, 1> local_col{_cols[j]};
340 std::array<std::int64_t, 1> global_col{0};
341 _index_maps[1]->local_to_global(local_col, global_col);
342 A[(r * _bs[1] + i0) * ncols * _bs[0] + global_col[0] * _bs[1] + i1]
343 = _data[j * _bs[0] * _bs[1] + i0 * _bs[1] + i1];
344 }
345 }
346 }
347 }
348
349 return A;
350 }
351
359 {
362 }
363
374 {
375 const std::int32_t local_size0 = _index_maps[0]->size_local();
376 const std::int32_t num_ghosts0 = _index_maps[0]->num_ghosts();
377 const int bs2 = _bs[0] * _bs[1];
378
379 // For each ghost row, pack and send values to send to neighborhood
380 std::vector<int> insert_pos = _val_send_disp;
381 _ghost_value_data.resize(_val_send_disp.back());
382 for (int i = 0; i < num_ghosts0; ++i)
383 {
384 int rank = _ghost_row_to_rank[i];
385
386 // Get position in send buffer to place data to send to this
387 // neighbour
388 std::int32_t val_pos = insert_pos[rank];
389 std::copy(std::next(_data.data(), _row_ptr[local_size0 + i] * bs2),
390 std::next(_data.data(), _row_ptr[local_size0 + i + 1] * bs2),
391 std::next(_ghost_value_data.begin(), val_pos));
392 insert_pos[rank]
393 += bs2 * (_row_ptr[local_size0 + i + 1] - _row_ptr[local_size0 + i]);
394 }
395
396 _ghost_value_data_in.resize(_val_recv_disp.back());
397
398 // Compute data sizes for send and receive from displacements
399 std::vector<int> val_send_count(_val_send_disp.size() - 1);
400 std::adjacent_difference(std::next(_val_send_disp.begin()),
401 _val_send_disp.end(), val_send_count.begin());
402
403 std::vector<int> val_recv_count(_val_recv_disp.size() - 1);
404 std::adjacent_difference(std::next(_val_recv_disp.begin()),
405 _val_recv_disp.end(), val_recv_count.begin());
406
407 int status = MPI_Ineighbor_alltoallv(
408 _ghost_value_data.data(), val_send_count.data(), _val_send_disp.data(),
409 dolfinx::MPI::mpi_t<value_type>, _ghost_value_data_in.data(),
410 val_recv_count.data(), _val_recv_disp.data(),
411 dolfinx::MPI::mpi_t<value_type>, _comm.comm(), &_request);
412 dolfinx::MPI::check_error(_comm.comm(), status);
413 }
414
421 {
422 int status = MPI_Wait(&_request, MPI_STATUS_IGNORE);
423 dolfinx::MPI::check_error(_comm.comm(), status);
424
425 _ghost_value_data.clear();
426 _ghost_value_data.shrink_to_fit();
427
428 // Add to local rows
429 int bs2 = _bs[0] * _bs[1];
430 assert(_ghost_value_data_in.size() == _unpack_pos.size() * bs2);
431 for (std::size_t i = 0; i < _unpack_pos.size(); ++i)
432 for (int j = 0; j < bs2; ++j)
433 _data[_unpack_pos[i] * bs2 + j] += _ghost_value_data_in[i * bs2 + j];
434
435 _ghost_value_data_in.clear();
436 _ghost_value_data_in.shrink_to_fit();
437
438 // Set ghost row data to zero
439 std::int32_t local_size0 = _index_maps[0]->size_local();
440 std::fill(std::next(_data.begin(), _row_ptr[local_size0] * bs2),
441 _data.end(), 0);
442 }
443
447 double squared_norm() const
448 {
449 const std::size_t num_owned_rows = _index_maps[0]->size_local();
450 const int bs2 = _bs[0] * _bs[1];
451 assert(num_owned_rows < _row_ptr.size());
452 double norm_sq_local = std::accumulate(
453 _data.cbegin(),
454 std::next(_data.cbegin(), _row_ptr[num_owned_rows] * bs2), double(0),
455 [](auto norm, value_type y) { return norm + std::norm(y); });
456 double norm_sq;
457 MPI_Allreduce(&norm_sq_local, &norm_sq, 1, MPI_DOUBLE, MPI_SUM,
458 _comm.comm());
459 return norm_sq;
460 }
461
473
475 MPI_Comm comm() const { return _comm.comm(); }
476
484 std::shared_ptr<const common::IndexMap> index_map(int dim) const
485 {
486 return _index_maps.at(dim);
487 }
488
491 container_type& values() { return _data; }
492
495 const container_type& values() const { return _data; }
496
499 const rowptr_container_type& row_ptr() const { return _row_ptr; }
500
503 const column_container_type& cols() const { return _cols; }
504
515 {
516 return _off_diagonal_offset;
517 }
518
521 std::array<int, 2> block_size() const { return _bs; }
522
524 BlockMode block_mode() const { return _block_mode; }
525
526private:
527 // Parallel distribution of the rows and columns
528 std::array<std::shared_ptr<const common::IndexMap>, 2> _index_maps;
529
530 // Block mode (compact or expanded)
531 BlockMode _block_mode;
532
533 // Block sizes
534 std::array<int, 2> _bs;
535
536 // Matrix data
537 container_type _data;
539 rowptr_container_type _row_ptr;
540
541 // Start of off-diagonal (unowned columns) on each row
542 rowptr_container_type _off_diagonal_offset;
543
544 // Communicator with neighborhood (ghost->owner communicator for rows)
545 dolfinx::MPI::Comm _comm;
546
547 // -- Precomputed data for scatter_rev/update
548
549 // Request in non-blocking communication
550 MPI_Request _request;
551
552 // Position in _data to add received data
553 std::vector<std::size_t> _unpack_pos;
554
555 // Displacements for alltoall for each neighbor when sending and
556 // receiving
557 std::vector<int> _val_send_disp, _val_recv_disp;
558
559 // Ownership of each row, by neighbor (for the neighbourhood defined
560 // on _comm)
561 std::vector<int> _ghost_row_to_rank;
562
563 // Temporary stores for data during non-blocking communication
564 container_type _ghost_value_data;
565 container_type _ghost_value_data_in;
566};
567//-----------------------------------------------------------------------------
568template <class U, class V, class W, class X>
569MatrixCSR<U, V, W, X>::MatrixCSR(const SparsityPattern& p, BlockMode mode)
570 : _index_maps({p.index_map(0),
571 std::make_shared<common::IndexMap>(p.column_index_map())}),
572 _block_mode(mode), _bs({p.block_size(0), p.block_size(1)}),
573 _data(p.num_nonzeros() * _bs[0] * _bs[1], 0),
574 _cols(p.graph().first.begin(), p.graph().first.end()),
575 _row_ptr(p.graph().second.begin(), p.graph().second.end()),
576 _comm(MPI_COMM_NULL)
577{
578 if (_block_mode == BlockMode::expanded)
579 {
580 // Rebuild IndexMaps
581 for (int i = 0; i < 2; ++i)
582 {
583 auto im = _index_maps[i];
584 std::int32_t size_local = im->size_local() * _bs[i];
585 std::span ghost_i = im->ghosts();
586 std::vector<std::int64_t> ghosts;
587 const std::vector<int> ghost_owner_i(im->owners().begin(),
588 im->owners().end());
589 std::vector<int> src_rank;
590 for (std::size_t j = 0; j < ghost_i.size(); ++j)
591 {
592 for (int k = 0; k < _bs[i]; ++k)
593 {
594 ghosts.push_back(ghost_i[j] * _bs[i] + k);
595 src_rank.push_back(ghost_owner_i[j]);
596 }
597 }
598
599 std::array<std::vector<int>, 2> src_dest0
600 = {std::vector(_index_maps[i]->src().begin(),
601 _index_maps[i]->src().end()),
602 std::vector(_index_maps[i]->dest().begin(),
603 _index_maps[i]->dest().end())};
604 _index_maps[i] = std::make_shared<common::IndexMap>(
605 _index_maps[i]->comm(), size_local, src_dest0, ghosts, src_rank);
606 }
607
608 // Convert sparsity pattern and set _bs to 1
609
610 column_container_type new_cols;
611 new_cols.reserve(_data.size());
612 rowptr_container_type new_row_ptr{0};
613 new_row_ptr.reserve(_row_ptr.size() * _bs[0]);
614 std::span<const std::int32_t> num_diag_nnz = p.off_diagonal_offsets();
615 for (std::size_t i = 0; i < _row_ptr.size() - 1; ++i)
616 {
617 // Repeat row _bs[0] times
618 for (int q0 = 0; q0 < _bs[0]; ++q0)
619 {
620 _off_diagonal_offset.push_back(new_row_ptr.back()
621 + num_diag_nnz[i] * _bs[1]);
622 for (auto j = _row_ptr[i]; j < _row_ptr[i + 1]; ++j)
623 {
624 for (int q1 = 0; q1 < _bs[1]; ++q1)
625 new_cols.push_back(_cols[j] * _bs[1] + q1);
626 }
627 new_row_ptr.push_back(new_cols.size());
628 }
629 }
630 _cols = new_cols;
631 _row_ptr = new_row_ptr;
632 _bs[0] = 1;
633 _bs[1] = 1;
634 }
635 else
636 {
637 // Compute off-diagonal offset for each row (compact)
638 std::span<const std::int32_t> num_diag_nnz = p.off_diagonal_offsets();
639 _off_diagonal_offset.reserve(num_diag_nnz.size());
640 std::ranges::transform(num_diag_nnz, _row_ptr,
641 std::back_inserter(_off_diagonal_offset),
642 std::plus{});
643 }
644
645 // Some short-hand
646 std::array local_size
647 = {_index_maps[0]->size_local(), _index_maps[1]->size_local()};
648 std::array local_range
649 = {_index_maps[0]->local_range(), _index_maps[1]->local_range()};
650 std::span ghosts1 = _index_maps[1]->ghosts();
651
652 std::span ghosts0 = _index_maps[0]->ghosts();
653 std::span src_ranks = _index_maps[0]->src();
654 std::span dest_ranks = _index_maps[0]->dest();
655
656 // Create neighbourhood communicator (owner <- ghost)
657 MPI_Comm comm;
658 MPI_Dist_graph_create_adjacent(_index_maps[0]->comm(), dest_ranks.size(),
659 dest_ranks.data(), MPI_UNWEIGHTED,
660 src_ranks.size(), src_ranks.data(),
661 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm);
662 _comm = dolfinx::MPI::Comm(comm, false);
663
664 // Build map from ghost row index position to owning (neighborhood)
665 // rank
666 _ghost_row_to_rank.reserve(_index_maps[0]->owners().size());
667 for (int r : _index_maps[0]->owners())
668 {
669 auto it = std::ranges::lower_bound(src_ranks, r);
670 assert(it != src_ranks.end() and *it == r);
671 std::size_t pos = std::distance(src_ranks.begin(), it);
672 _ghost_row_to_rank.push_back(pos);
673 }
674
675 // Compute size of data to send to each neighbor
676 std::vector<std::int32_t> data_per_proc(src_ranks.size(), 0);
677 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
678 {
679 assert(_ghost_row_to_rank[i] < (int)data_per_proc.size());
680 std::size_t pos = local_size[0] + i;
681 data_per_proc[_ghost_row_to_rank[i]] += _row_ptr[pos + 1] - _row_ptr[pos];
682 }
683
684 // Compute send displacements
685 _val_send_disp.resize(src_ranks.size() + 1, 0);
686 std::partial_sum(data_per_proc.begin(), data_per_proc.end(),
687 std::next(_val_send_disp.begin()));
688
689 // For each ghost row, pack and send indices to neighborhood
690 std::vector<std::int64_t> ghost_index_data(2 * _val_send_disp.back());
691 {
692 std::vector<int> insert_pos = _val_send_disp;
693 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
694 {
695 int rank = _ghost_row_to_rank[i];
696 std::int32_t row_id = local_size[0] + i;
697 for (int j = _row_ptr[row_id]; j < _row_ptr[row_id + 1]; ++j)
698 {
699 // Get position in send buffer
700 std::int32_t idx_pos = 2 * insert_pos[rank];
701
702 // Pack send data (row, col) as global indices
703 ghost_index_data[idx_pos] = ghosts0[i];
704 if (std::int32_t col_local = _cols[j]; col_local < local_size[1])
705 ghost_index_data[idx_pos + 1] = col_local + local_range[1][0];
706 else
707 ghost_index_data[idx_pos + 1] = ghosts1[col_local - local_size[1]];
708
709 insert_pos[rank] += 1;
710 }
711 }
712 }
713
714 // Communicate data with neighborhood
715 std::vector<std::int64_t> ghost_index_array;
716 std::vector<int> recv_disp;
717 {
718 std::vector<int> send_sizes;
719 std::ranges::transform(data_per_proc, std::back_inserter(send_sizes),
720 [](auto x) { return 2 * x; });
721
722 std::vector<int> recv_sizes(dest_ranks.size());
723 send_sizes.reserve(1);
724 recv_sizes.reserve(1);
725 MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1,
726 MPI_INT, _comm.comm());
727
728 // Build send/recv displacement
729 std::vector<int> send_disp{0};
730 std::partial_sum(send_sizes.begin(), send_sizes.end(),
731 std::back_inserter(send_disp));
732 recv_disp = {0};
733 std::partial_sum(recv_sizes.begin(), recv_sizes.end(),
734 std::back_inserter(recv_disp));
735
736 ghost_index_array.resize(recv_disp.back());
737 MPI_Neighbor_alltoallv(ghost_index_data.data(), send_sizes.data(),
738 send_disp.data(), MPI_INT64_T,
739 ghost_index_array.data(), recv_sizes.data(),
740 recv_disp.data(), MPI_INT64_T, _comm.comm());
741 }
742
743 // Store receive displacements for future use, when transferring
744 // data values
745 _val_recv_disp.resize(recv_disp.size());
746 int bs2 = _bs[0] * _bs[1];
747 std::ranges::transform(recv_disp, _val_recv_disp.begin(),
748 [&bs2](auto d) { return bs2 * d / 2; });
749 std::ranges::transform(_val_send_disp, _val_send_disp.begin(),
750 [&bs2](auto d) { return d * bs2; });
751
752 // Global-to-local map for ghost columns
753 std::vector<std::pair<std::int64_t, std::int32_t>> global_to_local;
754 global_to_local.reserve(ghosts1.size());
755 for (std::int64_t idx : ghosts1)
756 global_to_local.push_back({idx, global_to_local.size() + local_size[1]});
757 std::ranges::sort(global_to_local);
758
759 // Compute location in which data for each index should be stored
760 // when received
761 for (std::size_t i = 0; i < ghost_index_array.size(); i += 2)
762 {
763 // Row must be on this process
764 std::int32_t local_row = ghost_index_array[i] - local_range[0][0];
765 assert(local_row >= 0 and local_row < local_size[0]);
766
767 // Column may be owned or unowned
768 std::int32_t local_col = ghost_index_array[i + 1] - local_range[1][0];
769 if (local_col < 0 or local_col >= local_size[1])
770 {
771 auto it = std::ranges::lower_bound(
772 global_to_local, std::pair(ghost_index_array[i + 1], -1),
773 [](auto a, auto b) { return a.first < b.first; });
774 assert(it != global_to_local.end()
775 and it->first == ghost_index_array[i + 1]);
776 local_col = it->second;
777 }
778 auto cit0 = std::next(_cols.begin(), _row_ptr[local_row]);
779 auto cit1 = std::next(_cols.begin(), _row_ptr[local_row + 1]);
780
781 // Find position of column index and insert data
782 auto cit = std::lower_bound(cit0, cit1, local_col);
783 assert(cit != cit1);
784 assert(*cit == local_col);
785 std::size_t d = std::distance(_cols.begin(), cit);
786 _unpack_pos.push_back(d);
787 }
788
789 _unpack_pos.shrink_to_fit();
790}
791//-----------------------------------------------------------------------------
792
793// The matrix A is distributed across P processes by blocks of rows:
794// A = | A_0 |
795// | A_1 |
796// | ... |
797// | A_P-1 |
798//
799// Each submatrix A_i is owned by a single process "i" and can be further
800// decomposed into diagonal (Ai[0]) and off diagonal (Ai[1]) blocks:
801// Ai = |Ai[0] Ai[1]|
802//
803// If A is square, the diagonal block Ai[0] is also square and contains
804// only owned columns and rows. The block Ai[1] contains ghost columns
805// (unowned dofs).
806
807// Likewise, a local vector x can be decomposed into owned and ghost blocks:
808// xi = | x[0] |
809// | x[1] |
810//
811// So the product y = Ax can be computed into two separate steps:
812// y[0] = |Ai[0] Ai[1]| | x[0] | = Ai[0] x[0] + Ai[1] x[1]
813// | x[1] |
814//
817template <typename Scalar, typename V, typename W, typename X>
820{
821 // start communication (update ghosts)
823
824 std::int32_t nrowslocal = num_owned_rows();
825 std::span<const std::int64_t> Arow_ptr(row_ptr().data(), nrowslocal + 1);
826 std::span<const std::int32_t> Acols(cols().data(), Arow_ptr[nrowslocal]);
827 std::span<const std::int64_t> Aoff_diag_offset(off_diag_offset().data(),
828 nrowslocal);
829 std::span<const Scalar> Avalues(values().data(), Arow_ptr[nrowslocal]);
830
831 std::span<const Scalar> _x = x.array();
832 std::span<Scalar> _y = y.array();
833
834 std::span<const std::int64_t> Arow_begin(Arow_ptr.data(), nrowslocal);
835 std::span<const std::int64_t> Arow_end(Arow_ptr.data() + 1, nrowslocal);
836
837 // First stage: spmv - diagonal
838 // yi[0] += Ai[0] * xi[0]
839 if (_bs[1] == 1)
840 {
841 impl::spmv<Scalar, 1>(Avalues, Arow_begin, Aoff_diag_offset, Acols, _x, _y,
842 _bs[0], 1);
843 }
844 else
845 {
846 impl::spmv<Scalar, -1>(Avalues, Arow_begin, Aoff_diag_offset, Acols, _x, _y,
847 _bs[0], _bs[1]);
848 }
849
850 // finalize ghost update
851 x.scatter_fwd_end();
852
853 // Second stage: spmv - off-diagonal
854 // yi[0] += Ai[1] * xi[1]
855 if (_bs[1] == 1)
856 {
857 impl::spmv<Scalar, 1>(Avalues, Aoff_diag_offset, Arow_end, Acols, _x, _y,
858 _bs[0], 1);
859 }
860 else
861 {
862 impl::spmv<Scalar, -1>(Avalues, Aoff_diag_offset, Arow_end, Acols, _x, _y,
863 _bs[0], _bs[1]);
864 }
865}
866
867} // namespace dolfinx::la
A duplicate MPI communicator and manage lifetime of the communicator.
Definition MPI.h:42
Distributed sparse matrix using compressed sparse row storage.
Definition MatrixCSR.h:51
const container_type & values() const
Get local values (const version).
Definition MatrixCSR.h:495
std::shared_ptr< const common::IndexMap > index_map(int dim) const
Index map for the row or column space.
Definition MatrixCSR.h:484
const rowptr_container_type & off_diag_offset() const
Get the start of off-diagonal (unowned columns) on each row, allowing the matrix to be split (virtual...
Definition MatrixCSR.h:514
void set(std::span< const value_type > x, std::span< const std::int32_t > rows, std::span< const std::int32_t > cols)
Set values in the matrix.
Definition MatrixCSR.h:240
MatrixCSR(const MatrixCSR< Scalar0, Container0, ColContainer0, RowPtrContainer0 > &A)
Copy-convert matrix, possibly using to different container types.
Definition MatrixCSR.h:199
RowPtrContainer rowptr_container_type
Row pointer container type.
Definition MatrixCSR.h:70
void scatter_rev_end()
End transfer of ghost row data to owning ranks.
Definition MatrixCSR.h:420
container_type & values()
Get local data values.
Definition MatrixCSR.h:491
auto mat_add_values()
Insertion functor for adding values to a matrix. It is typically used in finite element assembly func...
Definition MatrixCSR.h:138
BlockMode block_mode() const
Get 'block mode'.
Definition MatrixCSR.h:524
void add(std::span< const value_type > x, std::span< const std::int32_t > rows, std::span< const std::int32_t > cols)
Accumulate values in the matrix.
Definition MatrixCSR.h:285
std::int32_t num_owned_rows() const
Number of local rows excluding ghost rows.
Definition MatrixCSR.h:312
ColContainer column_container_type
Column index container type.
Definition MatrixCSR.h:67
MatrixCSR(MatrixCSR &&A)=default
void mult(Vector< value_type > &x, Vector< value_type > &y)
Compute the product y += Ax.
Definition MatrixCSR.h:818
double squared_norm() const
Compute the Frobenius norm squared across all processes.
Definition MatrixCSR.h:447
void scatter_rev()
Transfer ghost row data to the owning ranks accumulating received values on the owned rows,...
Definition MatrixCSR.h:358
Container container_type
Matrix entries container type.
Definition MatrixCSR.h:64
Scalar value_type
Scalar type.
Definition MatrixCSR.h:61
void scatter_rev_begin()
Begin transfer of ghost row data to owning ranks, where it will be accumulated into existing owned ro...
Definition MatrixCSR.h:373
const column_container_type & cols() const
Definition MatrixCSR.h:503
void set(value_type x)
Set all non-zero local entries to a value, including entries in ghost rows.
Definition MatrixCSR.h:218
std::array< int, 2 > block_size() const
Get block sizes.
Definition MatrixCSR.h:521
std::int32_t num_all_rows() const
Number of local rows including ghost rows.
Definition MatrixCSR.h:315
const rowptr_container_type & row_ptr() const
Get local row pointers.
Definition MatrixCSR.h:499
std::vector< value_type > to_dense() const
Copy to a dense matrix.
Definition MatrixCSR.h:326
MPI_Comm comm() const
Get MPI communicator that matrix is defined on.
Definition MatrixCSR.h:475
MatrixCSR(const MatrixCSR &A)=default
auto mat_set_values()
Insertion functor for setting values in a matrix. It is typically used in finite element assembly fun...
Definition MatrixCSR.h:96
Definition SparsityPattern.h:26
A vector that can be distributed across processes.
Definition Vector.h:50
container_type & array()
Get the process-local part of the vector.
Definition Vector.h:387
void scatter_fwd_end(U unpack)
End scatter (send) of local data values that are ghosted on other processes.
Definition Vector.h:256
void scatter_fwd_begin(U pack, GetPtr get_ptr)
Begin scatter (send) of local data that is ghosted on other processes.
Definition Vector.h:219
MPI_Datatype mpi_t
Retrieves the MPI data type associated to the provided type.
Definition MPI.h:280
void check_error(MPI_Comm comm, int code)
Check MPI error code. If the error code is not equal to MPI_SUCCESS, then std::abort is called.
Definition MPI.cpp:80
int size(MPI_Comm comm)
Definition MPI.cpp:72
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition MPI.cpp:64
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition MPI.h:89
Linear algebra interface.
Definition sparsitybuild.h:15
BlockMode
Modes for representing block structured matrices.
Definition MatrixCSR.h:26
@ expanded
Definition MatrixCSR.h:29
auto norm(const V &x, Norm type=Norm::l2)
Compute the norm of the vector.
Definition Vector.h:477