Note: this is documentation for an old release. View the latest documentation at docs.fenicsproject.org/dolfinx/v0.9.0/cpp/doxygen/d6/dbe/Vector_8h_source.html
DOLFINx  0.4.1
DOLFINx C++ interface
Vector.h
1 // Copyright (C) 2020 Garth N. Wells
2 //
3 // This file is part of DOLFINx (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include "utils.h"
10 #include <complex>
11 #include <dolfinx/common/IndexMap.h>
12 #include <limits>
13 #include <memory>
14 #include <numeric>
15 #include <vector>
16 #include <xtl/xspan.hpp>
17 
18 namespace dolfinx::la
19 {
20 
22 
23 template <typename T, class Allocator = std::allocator<T>>
24 class Vector
25 {
26 public:
28  using value_type = T;
29 
31  using allocator_type = Allocator;
32 
34  Vector(const std::shared_ptr<const common::IndexMap>& map, int bs,
35  const Allocator& alloc = Allocator())
36  : _map(map), _bs(bs),
37  _buffer_send_fwd(bs * map->scatter_fwd_indices().array().size()),
38  _buffer_recv_fwd(bs * map->num_ghosts()),
39  _x(bs * (map->size_local() + map->num_ghosts()), alloc)
40  {
41  if (_bs == 1)
42  _datatype = dolfinx::MPI::mpi_type<T>();
43  else
44  {
45  MPI_Type_contiguous(bs, dolfinx::MPI::mpi_type<T>(), &_datatype);
46  MPI_Type_commit(&_datatype);
47  }
48  }
49 
51  Vector(const Vector& x)
52  : _map(x._map), _bs(x._bs), _request(MPI_REQUEST_NULL),
53  _buffer_send_fwd(x._buffer_send_fwd),
54  _buffer_recv_fwd(x._buffer_recv_fwd), _x(x._x)
55  {
56  if (_bs == 1)
57  _datatype = dolfinx::MPI::mpi_type<T>();
58  else
59  MPI_Type_dup(x._datatype, &_datatype);
60  }
61 
64  : _map(std::move(x._map)), _bs(std::move(x._bs)),
65  _datatype(std::exchange(x._datatype, MPI_DATATYPE_NULL)),
66  _request(std::exchange(x._request, MPI_REQUEST_NULL)),
67  _buffer_send_fwd(std::move(x._buffer_send_fwd)),
68  _buffer_recv_fwd(std::move(x._buffer_recv_fwd)), _x(std::move(x._x))
69  {
70  }
71 
74  {
75  if (_datatype != MPI_DATATYPE_NULL and _bs != 1)
76  MPI_Type_free(&_datatype);
77  }
78 
79  // Assignment operator (disabled)
80  Vector& operator=(const Vector& x) = delete;
81 
83  Vector& operator=(Vector&& x) = default;
84 
87  void set(T v) { std::fill(_x.begin(), _x.end(), v); }
88 
92  {
93  assert(_map);
94 
95  // Pack send buffer
96  const std::vector<std::int32_t>& indices
97  = _map->scatter_fwd_indices().array();
98  for (std::size_t i = 0; i < indices.size(); ++i)
99  {
100  std::copy_n(std::next(_x.cbegin(), _bs * indices[i]), _bs,
101  std::next(_buffer_send_fwd.begin(), _bs * i));
102  }
103 
104  _map->scatter_fwd_begin(xtl::span<const T>(_buffer_send_fwd), _datatype,
105  _request, xtl::span<T>(_buffer_recv_fwd));
106  }
107 
111  {
112  assert(_map);
113  const std::int32_t local_size = _bs * _map->size_local();
114  xtl::span xremote(_x.data() + local_size, _map->num_ghosts() * _bs);
115  _map->scatter_fwd_end(_request);
116 
117  // Copy received data into ghost positions
118  const std::vector<std::int32_t>& scatter_fwd_ghost_pos
119  = _map->scatter_fwd_ghost_positions();
120  for (std::size_t i = 0; i < _map->num_ghosts(); ++i)
121  {
122  const int pos = scatter_fwd_ghost_pos[i];
123  std::copy_n(std::next(_buffer_recv_fwd.cbegin(), _bs * pos), _bs,
124  std::next(xremote.begin(), _bs * i));
125  }
126  }
127 
130  void scatter_fwd()
131  {
132  this->scatter_fwd_begin();
133  this->scatter_fwd_end();
134  }
135 
139  {
140  // Pack send buffer
141  const std::int32_t local_size = _bs * _map->size_local();
142  xtl::span<const T> xremote(_x.data() + local_size,
143  _map->num_ghosts() * _bs);
144  const std::vector<std::int32_t>& scatter_fwd_ghost_pos
145  = _map->scatter_fwd_ghost_positions();
146  for (std::size_t i = 0; i < scatter_fwd_ghost_pos.size(); ++i)
147  {
148  const int pos = scatter_fwd_ghost_pos[i];
149  std::copy_n(std::next(xremote.cbegin(), _bs * i), _bs,
150  std::next(_buffer_recv_fwd.begin(), _bs * pos));
151  }
152 
153  // begin scatter
154  _map->scatter_rev_begin(xtl::span<const T>(_buffer_recv_fwd), _datatype,
155  _request, xtl::span<T>(_buffer_send_fwd));
156  }
157 
165  {
166  // Complete scatter
167  _map->scatter_rev_end(_request);
168 
169  // Copy/accumulate into owned part of the vector
170  const std::vector<std::int32_t>& shared_indices
171  = _map->scatter_fwd_indices().array();
172  switch (op)
173  {
174  case common::IndexMap::Mode::insert:
175  for (std::size_t i = 0; i < shared_indices.size(); ++i)
176  {
177  std::copy_n(std::next(_buffer_send_fwd.cbegin(), _bs * i), _bs,
178  std::next(_x.begin(), _bs * shared_indices[i]));
179  }
180  break;
181  case common::IndexMap::Mode::add:
182  for (std::size_t i = 0; i < shared_indices.size(); ++i)
183  for (int j = 0; j < _bs; ++j)
184  _x[shared_indices[i] * _bs + j] += _buffer_send_fwd[i * _bs + j];
185  break;
186  }
187  }
188 
195  {
196  this->scatter_rev_begin();
197  this->scatter_rev_end(op);
198  }
199 
203  double norm(Norm type = Norm::l2) const
204  {
205  switch (type)
206  {
207  case Norm::l2:
208  return std::sqrt(this->squared_norm());
209  case Norm::linf:
210  {
211  const std::int32_t size_local = _bs * _map->size_local();
212  double local_linf = 0.0;
213  if (size_local > 0)
214  {
215  auto local_max_entry = std::max_element(
216  _x.begin(), std::next(_x.begin(), size_local),
217  [](T a, T b) { return std::norm(a) < std::norm(b); });
218  local_linf = std::abs(*local_max_entry);
219  }
220 
221  double linf = 0.0;
222  MPI_Allreduce(&local_linf, &linf, 1, MPI_DOUBLE, MPI_MAX, _map->comm());
223  return linf;
224  }
225  default:
226  throw std::runtime_error("Norm type not supported");
227  }
228  }
229 
232  double squared_norm() const
233  {
234  const std::int32_t size_local = _bs * _map->size_local();
235  double result = std::transform_reduce(
236  _x.begin(), std::next(_x.begin(), size_local), double(0),
237  std::plus<double>(), [](T val) { return std::norm(val); });
238  double norm2;
239  MPI_Allreduce(&result, &norm2, 1, MPI_DOUBLE, MPI_SUM, _map->comm());
240  return norm2;
241  }
242 
244  std::shared_ptr<const common::IndexMap> map() const { return _map; }
245 
247  constexpr int bs() const { return _bs; }
248 
250  xtl::span<const T> array() const { return xtl::span<const T>(_x); }
251 
253  xtl::span<T> mutable_array() { return xtl::span(_x); }
254 
256  constexpr allocator_type allocator() const { return _x.get_allocator(); }
257 
258 private:
259  // Map describing the data layout
260  std::shared_ptr<const common::IndexMap> _map;
261 
262  // Block size
263  int _bs;
264 
265  // Data type and buffers for ghost scatters
266  MPI_Datatype _datatype = MPI_DATATYPE_NULL;
267  MPI_Request _request = MPI_REQUEST_NULL;
268  std::vector<T> _buffer_send_fwd, _buffer_recv_fwd;
269 
270  // Data
271  std::vector<T, Allocator> _x;
272 };
273 
280 template <typename T, class Allocator = std::allocator<T>>
282 {
283  const std::int32_t local_size = a.bs() * a.map()->size_local();
284  if (local_size != b.bs() * b.map()->size_local())
285  throw std::runtime_error("Incompatible vector sizes");
286  xtl::span<const T> x_a = a.array();
287  xtl::span<const T> x_b = b.array();
288 
289  const T local = std::transform_reduce(
290  x_a.begin(), std::next(x_a.begin(), local_size), x_b.begin(),
291  static_cast<T>(0), std::plus<T>(),
292  [](T a, T b) -> T
293  {
294  if constexpr (std::is_same<T, std::complex<double>>::value
295  or std::is_same<T, std::complex<float>>::value)
296  {
297  return std::conj(a) * b;
298  }
299  else
300  return a * b;
301  });
302 
303  T result;
304  MPI_Allreduce(&local, &result, 1, dolfinx::MPI::mpi_type<T>(), MPI_SUM,
305  a.map()->comm());
306  return result;
307 }
308 
314 template <typename T, typename U>
315 void orthonormalize(const xtl::span<Vector<T, U>>& basis, double tol = 1.0e-10)
316 {
317  // Loop over each vector in basis
318  for (std::size_t i = 0; i < basis.size(); ++i)
319  {
320  // Orthogonalize vector i with respect to previously orthonormalized
321  // vectors
322  for (std::size_t j = 0; j < i; ++j)
323  {
324  // basis_i <- basis_i - dot_ij basis_j
325  T dot_ij = inner_product(basis[i], basis[j]);
326  std::transform(basis[j].array().begin(), basis[j].array().end(),
327  basis[i].array().begin(), basis[i].mutable_array().begin(),
328  [dot_ij](auto xj, auto xi) { return xi - dot_ij * xj; });
329  }
330 
331  // Normalise basis function
332  double norm = basis[i].norm(Norm::l2);
333  if (norm < tol)
334  {
335  throw std::runtime_error(
336  "Linear dependency detected. Cannot orthogonalize.");
337  }
338  std::transform(basis[i].array().begin(), basis[i].array().end(),
339  basis[i].mutable_array().begin(),
340  [norm](auto x) { return x / norm; });
341  }
342 }
343 
348 template <typename T, typename U>
349 bool is_orthonormal(const xtl::span<const Vector<T, U>>& basis,
350  double tol = 1.0e-10)
351 {
352  for (std::size_t i = 0; i < basis.size(); i++)
353  {
354  for (std::size_t j = i; j < basis.size(); j++)
355  {
356  const double delta_ij = (i == j) ? 1.0 : 0.0;
357  T dot_ij = inner_product(basis[i], basis[j]);
358  if (std::abs(delta_ij - dot_ij) > tol)
359  return false;
360  }
361  }
362 
363  return true;
364 }
365 
366 } // namespace dolfinx::la
Mode
Mode for reverse scatter operation.
Definition: IndexMap.h:64
Distributed vector.
Definition: Vector.h:25
void scatter_rev(dolfinx::common::IndexMap::Mode op)
Scatter ghost data to owner. This process may receive data from more than one process,...
Definition: Vector.h:194
std::shared_ptr< const common::IndexMap > map() const
Get IndexMap.
Definition: Vector.h:244
void scatter_fwd_begin()
Begin scatter of local data from owner to ghosts on other ranks.
Definition: Vector.h:91
Allocator allocator_type
The allocator type.
Definition: Vector.h:31
void scatter_rev_end(common::IndexMap::Mode op)
End scatter of ghost data to owner. This process may receive data from more than one process,...
Definition: Vector.h:164
Vector(const std::shared_ptr< const common::IndexMap > &map, int bs, const Allocator &alloc=Allocator())
Create a distributed vector.
Definition: Vector.h:34
constexpr int bs() const
Get block size.
Definition: Vector.h:247
xtl::span< T > mutable_array()
Get local part of the vector.
Definition: Vector.h:253
void set(T v)
Set all entries (including ghosts)
Definition: Vector.h:87
Vector & operator=(Vector &&x)=default
Move Assignment operator.
double squared_norm() const
Compute the squared L2 norm of vector.
Definition: Vector.h:232
void scatter_fwd()
Scatter local data to ghost positions on other ranks.
Definition: Vector.h:130
Vector(Vector &&x)
Move constructor.
Definition: Vector.h:63
Vector(const Vector &x)
Copy constructor.
Definition: Vector.h:51
void scatter_fwd_end()
End scatter of local data from owner to ghosts on other ranks.
Definition: Vector.h:110
~Vector()
Destructor.
Definition: Vector.h:73
void scatter_rev_begin()
Start scatter of ghost data to owner.
Definition: Vector.h:138
constexpr allocator_type allocator() const
Get the allocator associated with the container.
Definition: Vector.h:256
xtl::span< const T > array() const
Get local part of the vector (const version)
Definition: Vector.h:250
T value_type
The value type.
Definition: Vector.h:28
double norm(Norm type=Norm::l2) const
Compute the norm of the vector.
Definition: Vector.h:203
Linear algebra interface.
Definition: sparsitybuild.h:13
void orthonormalize(const xtl::span< Vector< T, U >> &basis, double tol=1.0e-10)
Orthonormalize a set of vectors.
Definition: Vector.h:315
Norm
Norm types.
Definition: utils.h:13
bool is_orthonormal(const xtl::span< const Vector< T, U >> &basis, double tol=1.0e-10)
Test if basis is orthonormal.
Definition: Vector.h:349
T inner_product(const Vector< T, Allocator > &a, const Vector< T, Allocator > &b)
Compute the inner product of two vectors. The two vectors must have the same parallel layout.
Definition: Vector.h:281