Note: this is documentation for an old release. View the latest documentation at docs.fenicsproject.org/dolfinx/v0.9.0/cpp/doxygen/dd/d65/MatrixCSR_8h_source.html
DOLFINx 0.6.0
DOLFINx C++ interface
Loading...
Searching...
No Matches
MatrixCSR.h
1// Copyright (C) 2021-2022 Garth N. Wells and Chris N. Richardson
2//
3// This file is part of DOLFINx (https://www.fenicsproject.org)
4//
5// SPDX-License-Identifier: LGPL-3.0-or-later
6
7#pragma once
8
9#include "SparsityPattern.h"
10#include <dolfinx/common/IndexMap.h>
11#include <dolfinx/common/MPI.h>
12#include <dolfinx/graph/AdjacencyList.h>
13#include <mpi.h>
14#include <numeric>
15#include <span>
16#include <utility>
17#include <vector>
18
19namespace dolfinx::la
20{
21
22namespace impl
23{
36template <typename U, typename V, typename W, typename X>
37void set_csr(U&& data, const V& cols, const V& row_ptr, const W& x,
38 const X& xrows, const X& xcols,
39 [[maybe_unused]] typename X::value_type local_size)
40{
41 assert(x.size() == xrows.size() * xcols.size());
42 for (std::size_t r = 0; r < xrows.size(); ++r)
43 {
44 // Row index and current data row
45 auto row = xrows[r];
46 using T = typename W::value_type;
47 const T* xr = x.data() + r * xcols.size();
48
49#ifndef NDEBUG
50 if (row >= local_size)
51 throw std::runtime_error("Local row out of range");
52#endif
53
54 // Columns indices for row
55 auto cit0 = std::next(cols.begin(), row_ptr[row]);
56 auto cit1 = std::next(cols.begin(), row_ptr[row + 1]);
57 for (std::size_t c = 0; c < xcols.size(); ++c)
58 {
59 // Find position of column index
60 auto it = std::lower_bound(cit0, cit1, xcols[c]);
61 assert(it != cit1);
62 std::size_t d = std::distance(cols.begin(), it);
63 assert(d < data.size());
64 data[d] = xr[c];
65 }
66 }
67}
68
78template <typename U, typename V, typename W, typename X>
79void add_csr(U&& data, const V& cols, const V& row_ptr, const W& x,
80 const X& xrows, const X& xcols)
81{
82 assert(x.size() == xrows.size() * xcols.size());
83 for (std::size_t r = 0; r < xrows.size(); ++r)
84 {
85 // Row index and current data row
86 auto row = xrows[r];
87 using T = typename W::value_type;
88 const T* xr = x.data() + r * xcols.size();
89
90#ifndef NDEBUG
91 if (row >= (int)row_ptr.size())
92 throw std::runtime_error("Local row out of range");
93#endif
94
95 // Columns indices for row
96 auto cit0 = std::next(cols.begin(), row_ptr[row]);
97 auto cit1 = std::next(cols.begin(), row_ptr[row + 1]);
98 for (std::size_t c = 0; c < xcols.size(); ++c)
99 {
100 // Find position of column index
101 auto it = std::lower_bound(cit0, cit1, xcols[c]);
102 assert(it != cit1);
103 std::size_t d = std::distance(cols.begin(), it);
104 assert(d < data.size());
105 data[d] += xr[c];
106 }
107 }
108}
109} // namespace impl
110
125template <typename T, class Allocator = std::allocator<T>>
127{
128public:
130 using value_type = T;
131
133 using allocator_type = Allocator;
134
140 {
141 return [&](const std::span<const std::int32_t>& rows,
142 const std::span<const std::int32_t>& cols,
143 const std::span<const T>& data) -> int
144 {
145 this->set(data, rows, cols);
146 return 0;
147 };
148 }
149
154 {
155 return [&](const std::span<const std::int32_t>& rows,
156 const std::span<const std::int32_t>& cols,
157 const std::span<const T>& data) -> int
158 {
159 this->add(data, rows, cols);
160 return 0;
161 };
162 }
163
168 MatrixCSR(const SparsityPattern& p, const Allocator& alloc = Allocator())
169 : _index_maps({p.index_map(0),
170 std::make_shared<common::IndexMap>(p.column_index_map())}),
171 _bs({p.block_size(0), p.block_size(1)}),
172 _data(p.num_nonzeros(), 0, alloc),
173 _cols(p.graph().array().begin(), p.graph().array().end()),
174 _row_ptr(p.graph().offsets().begin(), p.graph().offsets().end()),
175 _comm(MPI_COMM_NULL)
176 {
177 // TODO: handle block sizes
178 if (_bs[0] > 1 or _bs[1] > 1)
179 throw std::runtime_error("Block size not yet supported");
180
181 // Compute off-diagonal offset for each row
182 std::span<const std::int32_t> num_diag_nnz = p.off_diagonal_offset();
183 _off_diagonal_offset.reserve(num_diag_nnz.size());
184 std::transform(num_diag_nnz.begin(), num_diag_nnz.end(), _row_ptr.begin(),
185 std::back_inserter(_off_diagonal_offset), std::plus{});
186
187 // Some short-hand
188 const std::array local_size
189 = {_index_maps[0]->size_local(), _index_maps[1]->size_local()};
190 const std::array local_range
191 = {_index_maps[0]->local_range(), _index_maps[1]->local_range()};
192 const std::vector<std::int64_t>& ghosts1 = _index_maps[1]->ghosts();
193
194 const std::vector<std::int64_t>& ghosts0 = _index_maps[0]->ghosts();
195 const std::vector<int>& src_ranks = _index_maps[0]->src();
196 const std::vector<int>& dest_ranks = _index_maps[0]->dest();
197
198 // Create neighbourhood communicator (owner <- ghost)
199 MPI_Comm comm;
200 MPI_Dist_graph_create_adjacent(_index_maps[0]->comm(), dest_ranks.size(),
201 dest_ranks.data(), MPI_UNWEIGHTED,
202 src_ranks.size(), src_ranks.data(),
203 MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm);
204 _comm = dolfinx::MPI::Comm(comm, false);
205
206 // Build map from ghost row index position to owning (neighborhood)
207 // rank
208 _ghost_row_to_rank.reserve(_index_maps[0]->owners().size());
209 for (int r : _index_maps[0]->owners())
210 {
211 auto it = std::lower_bound(src_ranks.begin(), src_ranks.end(), r);
212 assert(it != src_ranks.end() and *it == r);
213 int pos = std::distance(src_ranks.begin(), it);
214 _ghost_row_to_rank.push_back(pos);
215 }
216
217 // Compute size of data to send to each neighbor
218 std::vector<std::int32_t> data_per_proc(src_ranks.size(), 0);
219 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
220 {
221 assert(_ghost_row_to_rank[i] < data_per_proc.size());
222 std::size_t pos = local_size[0] + i;
223 data_per_proc[_ghost_row_to_rank[i]] += _row_ptr[pos + 1] - _row_ptr[pos];
224 }
225
226 // Compute send displacements
227 _val_send_disp.resize(src_ranks.size() + 1, 0);
228 std::partial_sum(data_per_proc.begin(), data_per_proc.end(),
229 std::next(_val_send_disp.begin()));
230
231 // For each ghost row, pack and send indices to neighborhood
232 std::vector<std::int64_t> ghost_index_data(2 * _val_send_disp.back());
233 {
234 std::vector<int> insert_pos = _val_send_disp;
235 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
236 {
237 const int rank = _ghost_row_to_rank[i];
238 int row_id = local_size[0] + i;
239 for (int j = _row_ptr[row_id]; j < _row_ptr[row_id + 1]; ++j)
240 {
241 // Get position in send buffer
242 const std::int32_t idx_pos = 2 * insert_pos[rank];
243
244 // Pack send data (row, col) as global indices
245 ghost_index_data[idx_pos] = ghosts0[i];
246 if (std::int32_t col_local = _cols[j]; col_local < local_size[1])
247 ghost_index_data[idx_pos + 1] = col_local + local_range[1][0];
248 else
249 ghost_index_data[idx_pos + 1] = ghosts1[col_local - local_size[1]];
250
251 insert_pos[rank] += 1;
252 }
253 }
254 }
255
256 // Communicate data with neighborhood
257 std::vector<std::int64_t> ghost_index_array;
258 std::vector<int> recv_disp;
259 {
260 std::vector<int> send_sizes;
261 std::transform(data_per_proc.begin(), data_per_proc.end(),
262 std::back_inserter(send_sizes),
263 [](auto x) { return 2 * x; });
264
265 std::vector<int> recv_sizes(dest_ranks.size());
266 send_sizes.reserve(1);
267 recv_sizes.reserve(1);
268 MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1,
269 MPI_INT, _comm.comm());
270
271 // Build send/recv displacement
272 std::vector<int> send_disp = {0};
273 std::partial_sum(send_sizes.begin(), send_sizes.end(),
274 std::back_inserter(send_disp));
275 recv_disp = {0};
276 std::partial_sum(recv_sizes.begin(), recv_sizes.end(),
277 std::back_inserter(recv_disp));
278
279 ghost_index_array.resize(recv_disp.back());
280 MPI_Neighbor_alltoallv(ghost_index_data.data(), send_sizes.data(),
281 send_disp.data(), MPI_INT64_T,
282 ghost_index_array.data(), recv_sizes.data(),
283 recv_disp.data(), MPI_INT64_T, _comm.comm());
284 }
285
286 // Store receive displacements for future use, when transferring
287 // data values
288 _val_recv_disp.resize(recv_disp.size());
289 std::transform(recv_disp.begin(), recv_disp.end(), _val_recv_disp.begin(),
290 [](int d) { return d / 2; });
291
292 // Global-to-local map for ghost columns
293 std::vector<std::pair<std::int64_t, std::int32_t>> global_to_local;
294 global_to_local.reserve(ghosts1.size());
295 std::int32_t local_i = local_size[1];
296 for (std::int64_t idx : ghosts1)
297 global_to_local.push_back({idx, global_to_local.size() + local_size[1]});
298 std::sort(global_to_local.begin(), global_to_local.end());
299
300 // Compute location in which data for each index should be stored
301 // when received
302 for (std::size_t i = 0; i < ghost_index_array.size(); i += 2)
303 {
304 // Row must be on this process
305 const std::int32_t local_row = ghost_index_array[i] - local_range[0][0];
306 assert(local_row >= 0 and local_row < local_size[0]);
307
308 // Column may be owned or unowned
309 std::int32_t local_col = ghost_index_array[i + 1] - local_range[1][0];
310 if (local_col < 0 or local_col >= local_size[1])
311 {
312 const auto it = std::lower_bound(
313 global_to_local.begin(), global_to_local.end(),
314 std::pair(ghost_index_array[i + 1], -1),
315 [](auto& a, auto& b) { return a.first < b.first; });
316 assert(it != global_to_local.end()
317 and it->first == ghost_index_array[i + 1]);
318 local_col = it->second;
319 }
320 auto cit0 = std::next(_cols.begin(), _row_ptr[local_row]);
321 auto cit1 = std::next(_cols.begin(), _row_ptr[local_row + 1]);
322
323 // Find position of column index and insert data
324 auto cit = std::lower_bound(cit0, cit1, local_col);
325 assert(cit != cit1);
326 assert(*cit == local_col);
327 std::size_t d = std::distance(_cols.begin(), cit);
328 _unpack_pos.push_back(d);
329 }
330 }
331
334 MatrixCSR(MatrixCSR&& A) = default;
335
339 void set(T x) { std::fill(_data.begin(), _data.end(), x); }
340
353 void set(const std::span<const T>& x,
354 const std::span<const std::int32_t>& rows,
355 const std::span<const std::int32_t>& cols)
356 {
357 impl::set_csr(_data, _cols, _row_ptr, x, rows, cols,
358 _index_maps[0]->size_local());
359 }
360
373 void add(const std::span<const T>& x,
374 const std::span<const std::int32_t>& rows,
375 const std::span<const std::int32_t>& cols)
376 {
377 impl::add_csr(_data, _cols, _row_ptr, x, rows, cols);
378 }
379
381 std::int32_t num_owned_rows() const { return _index_maps[0]->size_local(); }
382
384 std::int32_t num_all_rows() const { return _row_ptr.size() - 1; }
385
393 std::vector<T> to_dense() const
394 {
395 const std::size_t nrows = num_all_rows();
396 const std::size_t ncols
397 = _index_maps[1]->size_local() + _index_maps[1]->num_ghosts();
398 std::vector<T> A(nrows * ncols);
399 for (std::size_t r = 0; r < nrows; ++r)
400 for (std::int32_t j = _row_ptr[r]; j < _row_ptr[r + 1]; ++j)
401 A[r * ncols + _cols[j]] = _data[j];
402
403 return A;
404 }
405
409 void finalize()
410 {
412 finalize_end();
413 }
414
423 {
424 const std::int32_t local_size0 = _index_maps[0]->size_local();
425 const std::int32_t num_ghosts0 = _index_maps[0]->num_ghosts();
426
427 // For each ghost row, pack and send values to send to neighborhood
428 std::vector<int> insert_pos = _val_send_disp;
429 std::vector<T> ghost_value_data(_val_send_disp.back());
430 for (int i = 0; i < num_ghosts0; ++i)
431 {
432 const int rank = _ghost_row_to_rank[i];
433
434 // Get position in send buffer to place data to send to this
435 // neighbour
436 const std::int32_t val_pos = insert_pos[rank];
437 std::copy(std::next(_data.data(), _row_ptr[local_size0 + i]),
438 std::next(_data.data(), _row_ptr[local_size0 + i + 1]),
439 std::next(ghost_value_data.begin(), val_pos));
440 insert_pos[rank]
441 += _row_ptr[local_size0 + i + 1] - _row_ptr[local_size0 + i];
442 }
443
444 _ghost_value_data_in.resize(_val_recv_disp.back());
445
446 // Compute data sizes for send and receive from displacements
447 std::vector<int> val_send_count(_val_send_disp.size() - 1);
448 std::adjacent_difference(std::next(_val_send_disp.begin()),
449 _val_send_disp.end(), val_send_count.begin());
450
451 std::vector<int> val_recv_count(_val_recv_disp.size() - 1);
452 std::adjacent_difference(std::next(_val_recv_disp.begin()),
453 _val_recv_disp.end(), val_recv_count.begin());
454
455 int status = MPI_Ineighbor_alltoallv(
456 ghost_value_data.data(), val_send_count.data(), _val_send_disp.data(),
457 dolfinx::MPI::mpi_type<T>(), _ghost_value_data_in.data(),
458 val_recv_count.data(), _val_recv_disp.data(),
459 dolfinx::MPI::mpi_type<T>(), _comm.comm(), &_request);
460 assert(status == MPI_SUCCESS);
461 }
462
469 {
470 int status = MPI_Wait(&_request, MPI_STATUS_IGNORE);
471 assert(status == MPI_SUCCESS);
472
473 // Add to local rows
474 assert(_ghost_value_data_in.size() == _unpack_pos.size());
475 for (std::size_t i = 0; i < _ghost_value_data_in.size(); ++i)
476 _data[_unpack_pos[i]] += _ghost_value_data_in[i];
477
478 // Set ghost row data to zero
479 const std::int32_t local_size0 = _index_maps[0]->size_local();
480 std::fill(std::next(_data.begin(), _row_ptr[local_size0]), _data.end(), 0);
481 }
482
484 double norm_squared() const
485 {
486 const std::size_t num_owned_rows = _index_maps[0]->size_local();
487 assert(num_owned_rows < _row_ptr.size());
488
489 const double norm_sq_local = std::accumulate(
490 _data.cbegin(), std::next(_data.cbegin(), _row_ptr[num_owned_rows]),
491 double(0), [](double norm, T y) { return norm + std::norm(y); });
492 double norm_sq;
493 MPI_Allreduce(&norm_sq_local, &norm_sq, 1, MPI_DOUBLE, MPI_SUM,
494 _comm.comm());
495 return norm_sq;
496 }
497
504 const std::array<std::shared_ptr<const common::IndexMap>, 2>&
506 {
507 return _index_maps;
508 }
509
512 std::vector<T>& values() { return _data; }
513
516 const std::vector<T>& values() const { return _data; }
517
520 const std::vector<std::int32_t>& row_ptr() const { return _row_ptr; }
521
524 const std::vector<std::int32_t>& cols() const { return _cols; }
525
533 const std::vector<std::int32_t>& off_diag_offset() const
534 {
535 return _off_diagonal_offset;
536 }
537
538private:
539 // Maps for the distribution of the ows and columns
540 std::array<std::shared_ptr<const common::IndexMap>, 2> _index_maps;
541
542 // Block sizes
543 std::array<int, 2> _bs;
544
545 // Matrix data
546 std::vector<T, Allocator> _data;
547 std::vector<std::int32_t> _cols, _row_ptr;
548
549 // Start of off-diagonal (unowned columns) on each row
550 std::vector<std::int32_t> _off_diagonal_offset;
551
552 // Neighborhood communicator (ghost->owner communicator for rows)
553 dolfinx::MPI::Comm _comm;
554
555 // -- Precomputed data for finalize/update
556
557 // Request in non-blocking communication
558 MPI_Request _request;
559
560 // Position in _data to add received data
561 std::vector<int> _unpack_pos;
562
563 // Displacements for alltoall for each neighbor when sending and
564 // receiving
565 std::vector<int> _val_send_disp, _val_recv_disp;
566
567 // Ownership of each row, by neighbor (for the neighbourhood defined
568 // on _comm)
569 std::vector<int> _ghost_row_to_rank;
570
571 // Temporary store for finalize data during non-blocking communication
572 std::vector<T> _ghost_value_data_in;
573};
574
575} // namespace dolfinx::la
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:42
Distributed sparse matrix.
Definition: MatrixCSR.h:127
auto mat_set_values()
Insertion functor for setting values in matrix. It is typically used in finite element assembly funct...
Definition: MatrixCSR.h:139
void set(const std::span< const T > &x, const std::span< const std::int32_t > &rows, const std::span< const std::int32_t > &cols)
Set values in the matrix.
Definition: MatrixCSR.h:353
Allocator allocator_type
The allocator type.
Definition: MatrixCSR.h:133
void finalize()
Transfer ghost row data to the owning ranks accumulating received values on the owned rows,...
Definition: MatrixCSR.h:409
const std::vector< std::int32_t > & cols() const
Get local column indices.
Definition: MatrixCSR.h:524
std::int32_t num_owned_rows() const
Number of local rows excluding ghost rows.
Definition: MatrixCSR.h:381
MatrixCSR(const SparsityPattern &p, const Allocator &alloc=Allocator())
Create a distributed matrix.
Definition: MatrixCSR.h:168
const std::vector< T > & values() const
Get local values (const version)
Definition: MatrixCSR.h:516
std::vector< T > to_dense() const
Copy to a dense matrix.
Definition: MatrixCSR.h:393
auto mat_add_values()
Insertion functor for accumulating values in matrix. It is typically used in finite element assembly ...
Definition: MatrixCSR.h:153
void set(T x)
Set all non-zero local entries to a value including entries in ghost rows.
Definition: MatrixCSR.h:339
MatrixCSR(MatrixCSR &&A)=default
Move constructor.
void finalize_end()
End transfer of ghost row data to owning ranks.
Definition: MatrixCSR.h:468
void add(const std::span< const T > &x, const std::span< const std::int32_t > &rows, const std::span< const std::int32_t > &cols)
Accumulate values in the matrix.
Definition: MatrixCSR.h:373
const std::vector< std::int32_t > & off_diag_offset() const
Get the start of off-diagonal (unowned columns) on each row, allowing the matrix to be split (virtual...
Definition: MatrixCSR.h:533
std::int32_t num_all_rows() const
Number of local rows including ghost rows.
Definition: MatrixCSR.h:384
const std::array< std::shared_ptr< const common::IndexMap >, 2 > & index_maps() const
Index maps for the row and column space. The row IndexMap contains ghost entries for rows which may b...
Definition: MatrixCSR.h:505
double norm_squared() const
Compute the Frobenius norm squared.
Definition: MatrixCSR.h:484
const std::vector< std::int32_t > & row_ptr() const
Get local row pointers.
Definition: MatrixCSR.h:520
std::vector< T > & values()
Get local data values.
Definition: MatrixCSR.h:512
T value_type
The value type.
Definition: MatrixCSR.h:130
void finalize_begin()
Begin transfer of ghost row data to owning ranks, where it will be accumulated into existing owned ro...
Definition: MatrixCSR.h:422
This class provides a sparsity pattern data structure that can be used to initialize sparse matrices....
Definition: SparsityPattern.h:34
std::shared_ptr< const common::IndexMap > index_map(int dim) const
Index map for given dimension dimension. Returns the index map for rows and columns that will be set ...
Definition: SparsityPattern.cpp:194
int block_size(int dim) const
Return index map block size for dimension dim.
Definition: SparsityPattern.cpp:225
const graph::AdjacencyList< std::int32_t > & graph() const
Sparsity pattern graph after assembly. Uses local indices for the columns.
Definition: SparsityPattern.cpp:415
std::int64_t num_nonzeros() const
Number of nonzeros on this rank after assembly, including ghost rows.
Definition: SparsityPattern.cpp:394
common::IndexMap column_index_map() const
Builds the index map for columns after assembly of the sparsity pattern.
Definition: SparsityPattern.cpp:214
std::span< const int > off_diagonal_offset() const
Row-wise start of off-diagonal (unowned columns) on each row.
Definition: SparsityPattern.cpp:422
int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition: MPI.cpp:71
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition: MPI.cpp:63
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition: MPI.h:90
Linear algebra interface.
Definition: sparsitybuild.h:15
auto norm(const Vector< T, Allocator > &a, Norm type=Norm::l2)
Compute the norm of the vector.
Definition: Vector.h:253