9#include "SparsityPattern.h"
10#include <dolfinx/common/IndexMap.h>
11#include <dolfinx/common/MPI.h>
12#include <dolfinx/graph/AdjacencyList.h>
36template <
typename U,
typename V,
typename W,
typename X>
37void set_csr(U&& data,
const V& cols,
const V& row_ptr,
const W& x,
38 const X& xrows,
const X& xcols,
39 [[maybe_unused]]
typename X::value_type local_size)
41 assert(x.size() == xrows.size() * xcols.size());
42 for (std::size_t r = 0; r < xrows.size(); ++r)
46 using T =
typename W::value_type;
47 const T* xr = x.data() + r * xcols.size();
50 if (row >= local_size)
51 throw std::runtime_error(
"Local row out of range");
55 auto cit0 = std::next(cols.begin(), row_ptr[row]);
56 auto cit1 = std::next(cols.begin(), row_ptr[row + 1]);
57 for (std::size_t c = 0; c < xcols.size(); ++c)
60 auto it = std::lower_bound(cit0, cit1, xcols[c]);
62 std::size_t d = std::distance(cols.begin(), it);
63 assert(d < data.size());
78template <
typename U,
typename V,
typename W,
typename X>
79void add_csr(U&& data,
const V& cols,
const V& row_ptr,
const W& x,
80 const X& xrows,
const X& xcols)
82 assert(x.size() == xrows.size() * xcols.size());
83 for (std::size_t r = 0; r < xrows.size(); ++r)
87 using T =
typename W::value_type;
88 const T* xr = x.data() + r * xcols.size();
91 if (row >= (
int)row_ptr.size())
92 throw std::runtime_error(
"Local row out of range");
96 auto cit0 = std::next(cols.begin(), row_ptr[row]);
97 auto cit1 = std::next(cols.begin(), row_ptr[row + 1]);
98 for (std::size_t c = 0; c < xcols.size(); ++c)
101 auto it = std::lower_bound(cit0, cit1, xcols[c]);
103 std::size_t d = std::distance(cols.begin(), it);
104 assert(d < data.size());
125template <
typename T,
class Allocator = std::allocator<T>>
141 return [&](
const std::span<const std::int32_t>& rows,
142 const std::span<const std::int32_t>&
cols,
143 const std::span<const T>& data) ->
int
155 return [&](
const std::span<const std::int32_t>& rows,
156 const std::span<const std::int32_t>&
cols,
157 const std::span<const T>& data) ->
int
173 _cols(p.
graph().array().begin(), p.
graph().array().end()),
174 _row_ptr(p.
graph().offsets().begin(), p.
graph().offsets().end()),
178 if (_bs[0] > 1 or _bs[1] > 1)
179 throw std::runtime_error(
"Block size not yet supported");
183 _off_diagonal_offset.reserve(num_diag_nnz.size());
184 std::transform(num_diag_nnz.begin(), num_diag_nnz.end(), _row_ptr.begin(),
185 std::back_inserter(_off_diagonal_offset), std::plus{});
188 const std::array local_size
189 = {_index_maps[0]->size_local(), _index_maps[1]->size_local()};
190 const std::array local_range
191 = {_index_maps[0]->local_range(), _index_maps[1]->local_range()};
192 const std::vector<std::int64_t>& ghosts1 = _index_maps[1]->ghosts();
194 const std::vector<std::int64_t>& ghosts0 = _index_maps[0]->ghosts();
195 const std::vector<int>& src_ranks = _index_maps[0]->src();
196 const std::vector<int>& dest_ranks = _index_maps[0]->dest();
200 MPI_Dist_graph_create_adjacent(_index_maps[0]->comm(), dest_ranks.size(),
201 dest_ranks.data(), MPI_UNWEIGHTED,
202 src_ranks.size(), src_ranks.data(),
203 MPI_UNWEIGHTED, MPI_INFO_NULL,
false, &comm);
208 _ghost_row_to_rank.reserve(_index_maps[0]->owners().
size());
209 for (
int r : _index_maps[0]->owners())
211 auto it = std::lower_bound(src_ranks.begin(), src_ranks.end(), r);
212 assert(it != src_ranks.end() and *it == r);
213 int pos = std::distance(src_ranks.begin(), it);
214 _ghost_row_to_rank.push_back(pos);
218 std::vector<std::int32_t> data_per_proc(src_ranks.size(), 0);
219 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
221 assert(_ghost_row_to_rank[i] < data_per_proc.size());
222 std::size_t pos = local_size[0] + i;
223 data_per_proc[_ghost_row_to_rank[i]] += _row_ptr[pos + 1] - _row_ptr[pos];
227 _val_send_disp.resize(src_ranks.size() + 1, 0);
228 std::partial_sum(data_per_proc.begin(), data_per_proc.end(),
229 std::next(_val_send_disp.begin()));
232 std::vector<std::int64_t> ghost_index_data(2 * _val_send_disp.back());
234 std::vector<int> insert_pos = _val_send_disp;
235 for (std::size_t i = 0; i < _ghost_row_to_rank.size(); ++i)
237 const int rank = _ghost_row_to_rank[i];
238 int row_id = local_size[0] + i;
239 for (
int j = _row_ptr[row_id]; j < _row_ptr[row_id + 1]; ++j)
242 const std::int32_t idx_pos = 2 * insert_pos[
rank];
245 ghost_index_data[idx_pos] = ghosts0[i];
246 if (std::int32_t col_local = _cols[j]; col_local < local_size[1])
247 ghost_index_data[idx_pos + 1] = col_local + local_range[1][0];
249 ghost_index_data[idx_pos + 1] = ghosts1[col_local - local_size[1]];
251 insert_pos[
rank] += 1;
257 std::vector<std::int64_t> ghost_index_array;
258 std::vector<int> recv_disp;
260 std::vector<int> send_sizes;
261 std::transform(data_per_proc.begin(), data_per_proc.end(),
262 std::back_inserter(send_sizes),
263 [](
auto x) { return 2 * x; });
265 std::vector<int> recv_sizes(dest_ranks.size());
266 send_sizes.reserve(1);
267 recv_sizes.reserve(1);
268 MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1,
269 MPI_INT, _comm.comm());
272 std::vector<int> send_disp = {0};
273 std::partial_sum(send_sizes.begin(), send_sizes.end(),
274 std::back_inserter(send_disp));
276 std::partial_sum(recv_sizes.begin(), recv_sizes.end(),
277 std::back_inserter(recv_disp));
279 ghost_index_array.resize(recv_disp.back());
280 MPI_Neighbor_alltoallv(ghost_index_data.data(), send_sizes.data(),
281 send_disp.data(), MPI_INT64_T,
282 ghost_index_array.data(), recv_sizes.data(),
283 recv_disp.data(), MPI_INT64_T, _comm.comm());
288 _val_recv_disp.resize(recv_disp.size());
289 std::transform(recv_disp.begin(), recv_disp.end(), _val_recv_disp.begin(),
290 [](
int d) { return d / 2; });
293 std::vector<std::pair<std::int64_t, std::int32_t>> global_to_local;
294 global_to_local.reserve(ghosts1.size());
295 std::int32_t local_i = local_size[1];
296 for (std::int64_t idx : ghosts1)
297 global_to_local.push_back({idx, global_to_local.size() + local_size[1]});
298 std::sort(global_to_local.begin(), global_to_local.end());
302 for (std::size_t i = 0; i < ghost_index_array.size(); i += 2)
305 const std::int32_t local_row = ghost_index_array[i] -
local_range[0][0];
306 assert(local_row >= 0 and local_row < local_size[0]);
309 std::int32_t local_col = ghost_index_array[i + 1] -
local_range[1][0];
310 if (local_col < 0 or local_col >= local_size[1])
312 const auto it = std::lower_bound(
313 global_to_local.begin(), global_to_local.end(),
314 std::pair(ghost_index_array[i + 1], -1),
315 [](
auto& a,
auto& b) { return a.first < b.first; });
316 assert(it != global_to_local.end()
317 and it->first == ghost_index_array[i + 1]);
318 local_col = it->second;
320 auto cit0 = std::next(_cols.begin(), _row_ptr[local_row]);
321 auto cit1 = std::next(_cols.begin(), _row_ptr[local_row + 1]);
324 auto cit = std::lower_bound(cit0, cit1, local_col);
326 assert(*cit == local_col);
327 std::size_t d = std::distance(_cols.begin(), cit);
328 _unpack_pos.push_back(d);
339 void set(T x) { std::fill(_data.begin(), _data.end(), x); }
353 void set(
const std::span<const T>& x,
354 const std::span<const std::int32_t>& rows,
355 const std::span<const std::int32_t>&
cols)
357 impl::set_csr(_data, _cols, _row_ptr, x, rows,
cols,
358 _index_maps[0]->size_local());
373 void add(
const std::span<const T>& x,
374 const std::span<const std::int32_t>& rows,
375 const std::span<const std::int32_t>&
cols)
377 impl::add_csr(_data, _cols, _row_ptr, x, rows,
cols);
396 const std::size_t ncols
397 = _index_maps[1]->size_local() + _index_maps[1]->num_ghosts();
398 std::vector<T> A(nrows * ncols);
399 for (std::size_t r = 0; r < nrows; ++r)
400 for (std::int32_t j = _row_ptr[r]; j < _row_ptr[r + 1]; ++j)
401 A[r * ncols + _cols[j]] = _data[j];
424 const std::int32_t local_size0 = _index_maps[0]->size_local();
425 const std::int32_t num_ghosts0 = _index_maps[0]->num_ghosts();
428 std::vector<int> insert_pos = _val_send_disp;
429 std::vector<T> ghost_value_data(_val_send_disp.back());
430 for (
int i = 0; i < num_ghosts0; ++i)
432 const int rank = _ghost_row_to_rank[i];
436 const std::int32_t val_pos = insert_pos[rank];
437 std::copy(std::next(_data.data(), _row_ptr[local_size0 + i]),
438 std::next(_data.data(), _row_ptr[local_size0 + i + 1]),
439 std::next(ghost_value_data.begin(), val_pos));
441 += _row_ptr[local_size0 + i + 1] - _row_ptr[local_size0 + i];
444 _ghost_value_data_in.resize(_val_recv_disp.back());
447 std::vector<int> val_send_count(_val_send_disp.size() - 1);
448 std::adjacent_difference(std::next(_val_send_disp.begin()),
449 _val_send_disp.end(), val_send_count.begin());
451 std::vector<int> val_recv_count(_val_recv_disp.size() - 1);
452 std::adjacent_difference(std::next(_val_recv_disp.begin()),
453 _val_recv_disp.end(), val_recv_count.begin());
455 int status = MPI_Ineighbor_alltoallv(
456 ghost_value_data.data(), val_send_count.data(), _val_send_disp.data(),
457 dolfinx::MPI::mpi_type<T>(), _ghost_value_data_in.data(),
458 val_recv_count.data(), _val_recv_disp.data(),
459 dolfinx::MPI::mpi_type<T>(), _comm.comm(), &_request);
460 assert(status == MPI_SUCCESS);
470 int status = MPI_Wait(&_request, MPI_STATUS_IGNORE);
471 assert(status == MPI_SUCCESS);
474 assert(_ghost_value_data_in.size() == _unpack_pos.size());
475 for (std::size_t i = 0; i < _ghost_value_data_in.size(); ++i)
476 _data[_unpack_pos[i]] += _ghost_value_data_in[i];
479 const std::int32_t local_size0 = _index_maps[0]->size_local();
480 std::fill(std::next(_data.begin(), _row_ptr[local_size0]), _data.end(), 0);
489 const double norm_sq_local = std::accumulate(
490 _data.cbegin(), std::next(_data.cbegin(), _row_ptr[
num_owned_rows]),
491 double(0), [](
double norm, T y) { return norm + std::norm(y); });
493 MPI_Allreduce(&norm_sq_local, &norm_sq, 1, MPI_DOUBLE, MPI_SUM,
504 const std::array<std::shared_ptr<const common::IndexMap>, 2>&
512 std::vector<T>&
values() {
return _data; }
516 const std::vector<T>&
values()
const {
return _data; }
520 const std::vector<std::int32_t>&
row_ptr()
const {
return _row_ptr; }
524 const std::vector<std::int32_t>&
cols()
const {
return _cols; }
535 return _off_diagonal_offset;
540 std::array<std::shared_ptr<const common::IndexMap>, 2> _index_maps;
543 std::array<int, 2> _bs;
546 std::vector<T, Allocator> _data;
547 std::vector<std::int32_t> _cols, _row_ptr;
550 std::vector<std::int32_t> _off_diagonal_offset;
558 MPI_Request _request;
561 std::vector<int> _unpack_pos;
565 std::vector<int> _val_send_disp, _val_recv_disp;
569 std::vector<int> _ghost_row_to_rank;
572 std::vector<T> _ghost_value_data_in;
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:42
Distributed sparse matrix.
Definition: MatrixCSR.h:127
auto mat_set_values()
Insertion functor for setting values in matrix. It is typically used in finite element assembly funct...
Definition: MatrixCSR.h:139
void set(const std::span< const T > &x, const std::span< const std::int32_t > &rows, const std::span< const std::int32_t > &cols)
Set values in the matrix.
Definition: MatrixCSR.h:353
Allocator allocator_type
The allocator type.
Definition: MatrixCSR.h:133
void finalize()
Transfer ghost row data to the owning ranks accumulating received values on the owned rows,...
Definition: MatrixCSR.h:409
const std::vector< std::int32_t > & cols() const
Get local column indices.
Definition: MatrixCSR.h:524
std::int32_t num_owned_rows() const
Number of local rows excluding ghost rows.
Definition: MatrixCSR.h:381
MatrixCSR(const SparsityPattern &p, const Allocator &alloc=Allocator())
Create a distributed matrix.
Definition: MatrixCSR.h:168
const std::vector< T > & values() const
Get local values (const version)
Definition: MatrixCSR.h:516
std::vector< T > to_dense() const
Copy to a dense matrix.
Definition: MatrixCSR.h:393
auto mat_add_values()
Insertion functor for accumulating values in matrix. It is typically used in finite element assembly ...
Definition: MatrixCSR.h:153
void set(T x)
Set all non-zero local entries to a value including entries in ghost rows.
Definition: MatrixCSR.h:339
MatrixCSR(MatrixCSR &&A)=default
Move constructor.
void finalize_end()
End transfer of ghost row data to owning ranks.
Definition: MatrixCSR.h:468
void add(const std::span< const T > &x, const std::span< const std::int32_t > &rows, const std::span< const std::int32_t > &cols)
Accumulate values in the matrix.
Definition: MatrixCSR.h:373
const std::vector< std::int32_t > & off_diag_offset() const
Get the start of off-diagonal (unowned columns) on each row, allowing the matrix to be split (virtual...
Definition: MatrixCSR.h:533
std::int32_t num_all_rows() const
Number of local rows including ghost rows.
Definition: MatrixCSR.h:384
const std::array< std::shared_ptr< const common::IndexMap >, 2 > & index_maps() const
Index maps for the row and column space. The row IndexMap contains ghost entries for rows which may b...
Definition: MatrixCSR.h:505
double norm_squared() const
Compute the Frobenius norm squared.
Definition: MatrixCSR.h:484
const std::vector< std::int32_t > & row_ptr() const
Get local row pointers.
Definition: MatrixCSR.h:520
std::vector< T > & values()
Get local data values.
Definition: MatrixCSR.h:512
T value_type
The value type.
Definition: MatrixCSR.h:130
void finalize_begin()
Begin transfer of ghost row data to owning ranks, where it will be accumulated into existing owned ro...
Definition: MatrixCSR.h:422
This class provides a sparsity pattern data structure that can be used to initialize sparse matrices....
Definition: SparsityPattern.h:34
std::shared_ptr< const common::IndexMap > index_map(int dim) const
Index map for given dimension dimension. Returns the index map for rows and columns that will be set ...
Definition: SparsityPattern.cpp:194
int block_size(int dim) const
Return index map block size for dimension dim.
Definition: SparsityPattern.cpp:225
const graph::AdjacencyList< std::int32_t > & graph() const
Sparsity pattern graph after assembly. Uses local indices for the columns.
Definition: SparsityPattern.cpp:415
std::int64_t num_nonzeros() const
Number of nonzeros on this rank after assembly, including ghost rows.
Definition: SparsityPattern.cpp:394
common::IndexMap column_index_map() const
Builds the index map for columns after assembly of the sparsity pattern.
Definition: SparsityPattern.cpp:214
std::span< const int > off_diagonal_offset() const
Row-wise start of off-diagonal (unowned columns) on each row.
Definition: SparsityPattern.cpp:422
int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition: MPI.cpp:71
int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition: MPI.cpp:63
constexpr std::array< std::int64_t, 2 > local_range(int rank, std::int64_t N, int size)
Return local range for the calling process, partitioning the global [0, N - 1] range across all ranks...
Definition: MPI.h:90
Linear algebra interface.
Definition: sparsitybuild.h:15
auto norm(const Vector< T, Allocator > &a, Norm type=Norm::l2)
Compute the norm of the vector.
Definition: Vector.h:253