guanaqo main
Utilities for scientific software
Loading...
Searching...
No Matches
sparsity-conversions.hpp
Go to the documentation of this file.
1#pragma once
2
3/// @file
4/// @ingroup linalg_sparsity_conv
5/// Convert between dense and sparse sparsity descriptors.
6
7#include <guanaqo/export.h>
11
12#include <algorithm>
13#include <cassert>
14#include <numeric>
15#include <optional>
16#include <span>
17#include <stdexcept>
18#include <utility>
19#include <vector>
20
22
23constexpr size_t cast_sz(auto i) {
24 assert(i >= 0);
25 return static_cast<size_t>(i);
26}
27
28/// @addtogroup linalg_sparsity_conv
29/// @{
30
31/// The requested conversion between sparsity structures could not be performed
32/// (most likely because certain compiler or standard library features were unavailable when
33/// the library was built).
34struct GUANAQO_EXPORT unsupported_conversion : std::logic_error {
35 using std::logic_error::logic_error;
36};
37
38/// Converts one matrix storage format to another.
39/// @tparam From
40/// The input sparsity pattern type.
41/// @tparam To
42/// The output sparsity pattern type.
43template <class From, class To>
45
46/// Additional options for the conversion performed by @ref SparsityConverter.
47template <class To>
49
50/// Conversion to dense format does not require any additional options.
51/// @todo We could support row/column major output order, triangular structure, symmetry (full,
52/// lower or upper), etc. as options here, and use them in the conversion.
53template <>
55
56/// Conversion from dense to dense is trivial.
57template <>
63 if (from.symmetry != Symmetry::Unsymmetric && from.rows != from.cols)
64 throw std::invalid_argument("Nonsquare matrix cannot be symmetric");
65 }
67 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
68 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
69 [[nodiscard]] size_t work_size() const { return 0; }
70 [[nodiscard]] length_t nnz_from() const {
71 return sparsity.rows * sparsity.cols;
72 }
73 template <class T>
74 [[nodiscard]] std::span<T>
75 convert_values(std::span<T> from_values,
76 std::span<std::remove_const_t<T>>) const {
77 assert(from_values.size() == cast_sz(nnz_from()));
78 return from_values;
79 }
80};
81
82/// Conversion from CSC to dense format.
83template <class Index, class StorageIndex>
84struct SparsityConverter<SparseCSC<Index, StorageIndex>, Dense> {
89 assert(check_uniqueness_csc(from.outer_ptr, from.inner_idx));
90 if (from.symmetry != Symmetry::Unsymmetric && from.rows != from.cols)
91 throw std::invalid_argument("Nonsquare matrix cannot be symmetric");
92 return {
93 .rows = from.rows,
94 .cols = from.cols,
95 .symmetry = from.symmetry,
96 };
97 }
99 : from_sparsity(from), sparsity(convert_sparsity(from, request)) {}
102 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
103 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
104 [[nodiscard]] size_t work_size() const {
105 return cast_sz(sparsity.rows) * cast_sz(sparsity.cols);
106 }
107 [[nodiscard]] length_t nnz_from() const { return from_sparsity.nnz(); }
108 template <class T>
109 [[nodiscard]] std::span<T>
110 convert_values(std::span<T> from_values,
111 std::span<std::remove_const_t<T>> work) const {
112 assert(from_values.size() == cast_sz(nnz_from()));
113 assert(work.size() == work_size());
114 std::ranges::fill(work, T{});
115 auto W = [&](index_t r, index_t c) -> std::remove_const_t<T> & {
116 return work[cast_sz(r) + cast_sz(c) * cast_sz(sparsity.rows)];
117 };
118 size_t l = 0;
119 for (index_t c = 0; c < from_sparsity.cols; ++c) {
120 auto inner_start = from_sparsity.outer_ptr[cast_sz(c)];
121 auto inner_end = from_sparsity.outer_ptr[cast_sz(c) + 1];
122 for (auto i = inner_start; i < inner_end; ++i) {
123 auto r = from_sparsity.inner_idx[cast_sz(i)];
124 switch (from_sparsity.symmetry) {
125 case Symmetry::Unsymmetric: W(r, c) = from_values[l]; break;
126 case Symmetry::Upper:
127 if (r > c)
128 throw std::invalid_argument(
129 "Invalid symmetric CSC matrix: "
130 "upper-triangular matrix should not "
131 "have elements below the diagonal");
132 W(c, r) = W(r, c) = from_values[l];
133 break;
134 case Symmetry::Lower:
135 if (r < c)
136 throw std::invalid_argument(
137 "Invalid symmetric CSC matrix: "
138 "lower-triangular matrix should not "
139 "have elements above the diagonal");
140 W(c, r) = W(r, c) = from_values[l];
141 break;
142 default: throw std::invalid_argument("Invalid symmetry");
143 }
144 ++l;
145 }
146 }
147 return work;
148 }
149};
150
151/// Conversion from COO to dense format.
152template <class Index>
158#if GUANAQO_SPARSE_SUPPORTS_SORTING
160#endif
161 if (from.symmetry != Symmetry::Unsymmetric && from.rows != from.cols)
162 throw std::invalid_argument("Nonsquare matrix cannot be symmetric");
163 return {
164 .rows = from.rows,
165 .cols = from.cols,
166 .symmetry = from.symmetry,
167 };
168 }
170 : from_sparsity(from), sparsity(convert_sparsity(from, request)) {}
173 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
174 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
175 [[nodiscard]] size_t work_size() const {
176 return cast_sz(sparsity.rows) * cast_sz(sparsity.cols);
177 }
178 [[nodiscard]] length_t nnz_from() const { return from_sparsity.nnz(); }
179 template <class T>
180 [[nodiscard]] std::span<T>
181 convert_values(std::span<T> from_values,
182 std::span<std::remove_const_t<T>> work) const {
183 assert(from_values.size() == cast_sz(nnz_from()));
184 assert(work.size() == work_size());
185 std::ranges::fill(work, T{});
186 auto W = [&](Index r, Index c) -> std::remove_const_t<T> & {
187 return work[cast_sz(r) + cast_sz(c) * cast_sz(sparsity.rows)];
188 };
189 for (size_t l = 0; l < cast_sz(from_sparsity.nnz()); ++l) {
190 auto r = from_sparsity.row_indices[l] - from_sparsity.first_index;
191 auto c = from_sparsity.col_indices[l] - from_sparsity.first_index;
192 switch (from_sparsity.symmetry) {
193 case Symmetry::Unsymmetric: W(r, c) = from_values[l]; break;
194 case Symmetry::Upper:
195 if (r > c)
196 throw std::invalid_argument(
197 "Invalid symmetric COO matrix: upper-triangular "
198 "matrix should not "
199 "have elements below the diagonal");
200 W(c, r) = W(r, c) = from_values[l];
201 break;
202 case Symmetry::Lower:
203 if (r < c)
204 throw std::invalid_argument(
205 "Invalid symmetric COO matrix: lower-triangular "
206 "matrix should not "
207 "have elements above the diagonal");
208 W(c, r) = W(r, c) = from_values[l];
209 break;
210 default: throw std::invalid_argument("Invalid symmetry");
211 }
212 }
213 return work;
214 }
215};
216
217/// Conversion to COO format allows customization of the index offset (zero-based or one-based).
218/// @todo We could support sorting the indices in the output if desired as well.
219template <class Index>
221 /// Convert the index offset (zero for C/C++, one for Fortran).
222 std::optional<Index> first_index = std::nullopt;
223};
224
225/// Conversion from dense to COO format.
226template <class Index>
232 Index Δ = 0;
233 if (request.first_index)
234 Δ = *request.first_index;
235 switch (from.symmetry) {
237 auto nnz = cast_sz(from.rows) * cast_sz(from.cols);
238 row_indices.resize(nnz);
239 col_indices.resize(nnz);
240 size_t l = 0;
241 for (index_t c = 0; c < from.cols; ++c) {
242 for (index_t r = 0; r < from.rows; ++r) {
243 row_indices[l] = static_cast<Index>(r) + Δ;
244 col_indices[l] = static_cast<Index>(c) + Δ;
245 ++l;
246 }
247 }
248 } break;
249 case Symmetry::Upper: {
250 if (from.rows != from.cols)
251 throw std::invalid_argument(
252 "Nonsquare matrix cannot be symmetric");
253 auto nnz = cast_sz(from.rows) * cast_sz(from.rows + 1) / 2;
254 row_indices.resize(nnz);
255 col_indices.resize(nnz);
256 size_t l = 0;
257 for (index_t c = 0; c < from.cols; ++c) {
258 for (index_t r = 0; r <= c; ++r) {
259 row_indices[l] = static_cast<Index>(r) + Δ;
260 col_indices[l] = static_cast<Index>(c) + Δ;
261 ++l;
262 }
263 }
264 } break;
265 case Symmetry::Lower:
266 throw std::invalid_argument(
267 "Lower-triangular symmetry currently not supported");
268 default: throw std::invalid_argument("Invalid symmetry");
269 }
270 return {
271 .rows = from.rows,
272 .cols = from.cols,
273 .symmetry = from.symmetry,
274 .row_indices = row_indices,
275 .col_indices = col_indices,
277 .first_index = request.first_index ? *request.first_index : 0,
278 };
279 }
281 : sparsity(convert_sparsity(from, request)) {}
282 std::vector<Index> row_indices, col_indices;
284 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
285 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
286 [[nodiscard]] size_t work_size() const {
287 if (sparsity.symmetry == Symmetry::Unsymmetric)
288 return 0;
289 return cast_sz(sparsity.nnz());
290 }
291 [[nodiscard]] length_t nnz_from() const {
292 return sparsity.rows * sparsity.cols;
293 }
294 template <class T>
295 [[nodiscard]] std::span<T>
296 convert_values(std::span<T> from_values,
297 std::span<std::remove_const_t<T>> work) const {
298 assert(from_values.size() == cast_sz(nnz_from()));
299 assert(work.size() == work_size());
300 if (sparsity.symmetry == Symmetry::Unsymmetric) {
301 return from_values;
302 } else if (sparsity.symmetry == Symmetry::Upper) {
303 auto f_col_above_diag = [&](index_t c) {
304 return std::span{from_values}.subspan(
305 cast_sz(c) * cast_sz(sparsity.rows), cast_sz(c) + 1);
306 };
307 auto t = work.begin();
308 for (index_t c = 0; c < sparsity.cols; ++c)
309 std::ranges::copy_backward(f_col_above_diag(c), t += c + 1);
310 return work;
311 } else {
312 throw std::logic_error("SparsityConverter<Dense, "
313 "SparseCOO<Index>>: Unsupported symmetry");
314 }
315 }
316};
317
318/// Conversion from CSC to COO format.
319template <class IndexFrom, class StorageIndexFrom, class IndexTo>
320struct SparsityConverter<SparseCSC<IndexFrom, StorageIndexFrom>,
321 SparseCOO<IndexTo>> {
326 IndexTo Δ = 0;
327 if (request.first_index)
328 Δ = *request.first_index;
329 row_indices.resize(cast_sz(from.nnz()));
330 col_indices.resize(cast_sz(from.nnz()));
331 size_t l = 0;
332 for (index_t c = 0; c < from.cols; ++c) {
333 auto inner_start = from.outer_ptr[cast_sz(c)];
334 auto inner_end = from.outer_ptr[cast_sz(c) + 1];
335 for (auto i = inner_start; i < inner_end; ++i) {
336 auto r = from.inner_idx[cast_sz(i)];
337 row_indices[l] = static_cast<IndexTo>(r) + Δ;
338 col_indices[l] = static_cast<IndexTo>(c) + Δ;
339 ++l;
340 }
341 }
342 return {
343 .rows = from.rows,
344 .cols = from.cols,
345 .symmetry = from.symmetry,
346 .row_indices = row_indices,
347 .col_indices = col_indices,
348 .order = from.order == from_sparsity_t::SortedRows
351 .first_index = request.first_index ? *request.first_index : 0,
352 };
353 }
355 : sparsity(convert_sparsity(from, request)) {
356#if GUANAQO_SPARSE_SUPPORTS_SORTING
357 assert(check_uniqueness_triplets(sparsity.row_indices,
358 sparsity.col_indices));
359#endif
360 }
361 std::vector<IndexTo> row_indices, col_indices;
363 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
364 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
365 [[nodiscard]] size_t work_size() const { return 0; }
366 [[nodiscard]] length_t nnz_from() const { return sparsity.nnz(); }
367 template <class T>
368 [[nodiscard]] std::span<T>
369 convert_values(std::span<T> from_values,
370 std::span<std::remove_const_t<T>>) const {
371 assert(from_values.size() == cast_sz(nnz_from()));
372 return from_values;
373 }
374};
375
376/// Conversion from COO to COO format (with possibly different index types and offsets).
377template <class IndexFrom, class IndexTo>
378struct SparsityConverter<SparseCOO<IndexFrom>, SparseCOO<IndexTo>> {
383 IndexTo Δ = 0;
384 if (request.first_index)
385 Δ = *request.first_index - static_cast<IndexTo>(from.first_index);
386 // Check if we can fully reuse the indices without changes
387 if constexpr (std::is_same_v<IndexFrom, IndexTo>)
388 if (Δ == 0)
389 return from;
390 // Otherwise, allocate space for shifted or converted indices
391 row_indices.resize(cast_sz(from.nnz()));
392 col_indices.resize(cast_sz(from.nnz()));
393 auto cvt_idx = [Δ](auto i) { return static_cast<IndexTo>(i) + Δ; };
394 std::ranges::transform(from.row_indices, row_indices.begin(), cvt_idx);
395 std::ranges::transform(from.col_indices, col_indices.begin(), cvt_idx);
396 return {
397 .rows = from.rows,
398 .cols = from.cols,
399 .symmetry = from.symmetry,
400 .row_indices = row_indices,
401 .col_indices = col_indices,
402 .order = static_cast<typename to_sparsity_t::Order>(from.order),
403 .first_index = request.first_index
404 ? *request.first_index
405 : static_cast<IndexTo>(from.first_index),
406 };
407 }
409 : sparsity(convert_sparsity(from, request)) {
410#if GUANAQO_SPARSE_SUPPORTS_SORTING
411 assert(check_uniqueness_triplets(sparsity.row_indices,
412 sparsity.col_indices));
413#endif
414 }
415 std::vector<IndexTo> row_indices, col_indices;
417 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
418 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
419 [[nodiscard]] size_t work_size() const { return 0; }
420 [[nodiscard]] length_t nnz_from() const { return sparsity.nnz(); }
421 template <class T>
422 [[nodiscard]] std::span<T>
423 convert_values(std::span<T> from_values,
424 std::span<std::remove_const_t<T>>) const {
425 assert(from_values.size() == cast_sz(nnz_from()));
426 return from_values;
427 }
428};
429
430/// Conversion to CSC format allows sorting the indices in the output if desired.
431template <class Index, class StorageIndex>
432struct SparsityConversionRequest<SparseCSC<Index, StorageIndex>> {
433 /// Sort the indices.
434 std::optional<typename SparseCSC<Index, StorageIndex>::Order> order =
435 std::nullopt;
436};
437
438/// Conversion from COO to CSC format.
439template <class IndexFrom, class IndexTo, class StorageIndexTo>
440struct SparsityConverter<SparseCOO<IndexFrom>,
441 SparseCSC<IndexTo, StorageIndexTo>> {
446 [[maybe_unused]] Request request) {
447 // Optional copy of the indices (needed for sorting)
448 std::vector<IndexFrom> row_indices, col_indices;
449 [[maybe_unused]] auto prepare_sort = [&] {
450 row_indices.resize(cast_sz(from.nnz()));
451 col_indices.resize(cast_sz(from.nnz()));
452 permutation.resize(cast_sz(from.nnz()));
453 std::ranges::copy(from.row_indices, row_indices.begin());
454 std::ranges::copy(from.col_indices, col_indices.begin());
455 std::iota(begin(permutation), end(permutation), StorageIndexTo{0});
456 };
457 // Sort the indices
458 typename to_sparsity_t::Order order;
459 if (request.order && *request.order == to_sparsity_t::SortedRows) {
461 switch (from.order) {
463 // No sorting necessary
464 break;
465 case from_sparsity_t::Unsorted: [[fallthrough]];
466 case from_sparsity_t::SortedByColsOnly: [[fallthrough]];
467 case from_sparsity_t::SortedByRowsAndCols: [[fallthrough]];
469#if GUANAQO_SPARSE_SUPPORTS_SORTING
470 prepare_sort();
471 sort_triplets(row_indices, col_indices, permutation);
472 break;
473#else
475 "Sorting is SparseCOO is not supported. Recompile with "
476 "C++23-compliant compiler");
477#endif
478 default: throw std::invalid_argument("Invalid order");
479 }
480 } else {
481 switch (from.order) {
484 // No sorting necessary
485 break;
488 // No sorting necessary
489 break;
490 case from_sparsity_t::Unsorted: [[fallthrough]];
491 case from_sparsity_t::SortedByRowsAndCols: [[fallthrough]];
493#if GUANAQO_SPARSE_SUPPORTS_SORTING
495 prepare_sort();
496 sort_triplets_col(row_indices, col_indices, permutation);
497 break;
498#else
500 "Sorting is SparseCOO is not supported. Recompile with "
501 "C++23-compliant compiler");
502#endif
503 default: throw std::invalid_argument("Invalid order");
504 }
505 }
506 assert(!request.order || *request.order == order);
507 if (std::ranges::is_sorted(permutation))
508 permutation.clear();
509 // Convert the sorted COO format to CSC
510 inner_idx.resize(cast_sz(from.nnz()));
511 outer_ptr.resize(cast_sz(from.cols) + 1);
513 row_indices.empty() ? from.row_indices : std::span{row_indices},
514 col_indices.empty() ? from.col_indices : std::span{col_indices},
515 inner_idx, outer_ptr, from.first_index);
516 return {
517 .rows = from.rows,
518 .cols = from.cols,
519 .symmetry = from.symmetry,
520 .inner_idx = inner_idx,
521 .outer_ptr = outer_ptr,
522 .order = order,
523 };
524 }
526 : sparsity(convert_sparsity(from, request)) {
527 assert(check_uniqueness_csc(sparsity.outer_ptr, sparsity.inner_idx));
528 }
529 std::vector<IndexTo> inner_idx;
530 std::vector<StorageIndexTo> outer_ptr;
531 std::vector<StorageIndexTo> permutation;
533 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
534 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
535 [[nodiscard]] size_t work_size() const { return permutation.size(); }
536 [[nodiscard]] length_t nnz_from() const { return sparsity.nnz(); }
537 template <class T>
538 [[nodiscard]] std::span<T>
539 convert_values(std::span<T> from_values,
540 std::span<std::remove_const_t<T>> work) const {
541 assert(from_values.size() == cast_sz(nnz_from()));
542 assert(work.size() == work_size());
543 if (permutation.empty()) {
544 return from_values;
545 } else {
546 for (size_t i = 0; i < permutation.size(); ++i)
547 work[i] = from_values[cast_sz(permutation[i])];
548 return work;
549 }
550 }
551};
552
553/// Conversion from CSC to CSC format (with possibly different index types and sorting).
554template <class IndexFrom, class StorageIndexFrom, class IndexTo,
555 class StorageIndexTo>
556struct SparsityConverter<SparseCSC<IndexFrom, StorageIndexFrom>,
557 SparseCSC<IndexTo, StorageIndexTo>> {
562 Request request) {
563 auto cvt_in = [](auto i) { return static_cast<IndexTo>(i); };
564 auto cvt_out = [](auto i) { return static_cast<StorageIndexTo>(i); };
565 auto convert_inner = [&] {
566 inner_idx.resize(from.inner_idx.size());
567 std::ranges::transform(from.inner_idx, inner_idx.begin(), cvt_in);
568 };
569 auto convert_outer = [&] {
570 outer_ptr.resize(from.outer_ptr.size());
571 std::ranges::transform(from.outer_ptr, outer_ptr.begin(), cvt_out);
572 };
573 auto sort_indices = [&] {
574#if GUANAQO_SPARSE_SUPPORTS_SORTING
575 permutation.resize(from.inner_idx.size());
576 std::iota(begin(permutation), end(permutation), StorageIndexTo{0});
578#else
580 "Sorting is SparseCSC is not supported. Recompile with "
581 "C++23-compliant compiler");
582#endif
583 };
584 using Order = typename to_sparsity_t::Order;
585 bool need_sorting = false;
586 auto order = static_cast<Order>(from.order);
587 // Check whether the user requested the indices to be sorted
588 if (request.order && *request.order == to_sparsity_t::SortedRows) {
590 switch (from.order) {
591 case from_sparsity_t::Unsorted: need_sorting = true; break;
592 case from_sparsity_t::SortedRows: need_sorting = false; break;
593 default: throw std::invalid_argument("Invalid order");
594 }
595 }
596 // Convert and sort, or only convert the indices
597 if (need_sorting) {
598 convert_inner();
599 convert_outer();
600 sort_indices();
601 } else {
602 if constexpr (!std::is_same_v<IndexFrom, IndexTo>)
603 convert_inner();
604 if constexpr (!std::is_same_v<StorageIndexFrom, StorageIndexTo>)
605 convert_outer();
606 }
607 // Remove unnecessary permutations
608 if (std::ranges::is_sorted(permutation))
609 permutation.clear();
610 // Can we avoid copying?
611 // If the index types are the same, we may be able to reuse the storage
612 auto get_inner_idx = [&] {
613 if constexpr (std::is_same_v<IndexFrom, IndexTo>)
614 return inner_idx.empty() ? from.inner_idx
615 : std::span{inner_idx};
616 else
617 return std::span{inner_idx};
618 };
619 auto get_outer_ptr = [&] {
620 if constexpr (std::is_same_v<StorageIndexFrom, StorageIndexTo>)
621 return outer_ptr.empty() ? from.outer_ptr
622 : std::span{outer_ptr};
623 else
624 return std::span{outer_ptr};
625 };
626 return {
627 .rows = from.rows,
628 .cols = from.cols,
629 .symmetry = from.symmetry,
630 .inner_idx = get_inner_idx(),
631 .outer_ptr = get_outer_ptr(),
632 .order = order,
633 };
634 }
636 : sparsity(convert_sparsity(from, request)) {
637 assert(check_uniqueness_csc(sparsity.outer_ptr, sparsity.inner_idx));
638 }
639 std::vector<IndexTo> inner_idx;
640 std::vector<StorageIndexTo> outer_ptr;
641 std::vector<StorageIndexTo> permutation;
643 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
644 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
645 [[nodiscard]] size_t work_size() const { return permutation.size(); }
646 [[nodiscard]] length_t nnz_from() const { return sparsity.nnz(); }
647 template <class T>
648 [[nodiscard]] std::span<T>
649 convert_values(std::span<T> from_values,
650 std::span<std::remove_const_t<T>> work) const {
651 assert(from_values.size() == cast_sz(nnz_from()));
652 assert(work.size() == work_size());
653 if (permutation.empty()) {
654 return from_values;
655 } else {
656 for (size_t i = 0; i < permutation.size(); ++i)
657 work[i] = from_values[cast_sz(permutation[i])];
658 return work;
659 }
660 }
661};
662
663/// Conversion from dense to CSC format.
664template <class Index, class StorageIndex>
665struct SparsityConverter<Dense, SparseCSC<Index, StorageIndex>> {
670 Request) {
671 auto cvt_out = [](auto i) { return static_cast<StorageIndex>(i); };
672 auto cvt_in = [](auto i) { return static_cast<Index>(i); };
673 switch (from.symmetry) {
675 auto nnz = cast_sz(from.rows) * cast_sz(from.cols);
676 inner_idx.resize(nnz);
677 outer_ptr.resize(cast_sz(from.cols) + 1);
678 size_t l = 0;
679 for (index_t c = 0; c < from.cols; ++c) {
680 outer_ptr[cast_sz(c)] = cvt_out(l);
681 for (index_t r = 0; r < from.rows; ++r) {
682 inner_idx[l] = cvt_in(r);
683 ++l;
684 }
685 }
686 outer_ptr[cast_sz(from.cols)] = cvt_out(l);
687 } break;
688 case Symmetry::Upper: {
689 if (from.rows != from.cols)
690 throw std::invalid_argument(
691 "Nonsquare matrix cannot be symmetric");
692 auto nnz = cast_sz(from.rows) * (cast_sz(from.rows) + 1) / 2;
693 inner_idx.resize(nnz);
694 outer_ptr.resize(cast_sz(from.cols) + 1);
695 size_t l = 0;
696 for (index_t c = 0; c < from.cols; ++c) {
697 outer_ptr[cast_sz(c)] = cvt_out(l);
698 for (index_t r = 0; r <= c; ++r) {
699 inner_idx[l] = cvt_in(r);
700 ++l;
701 }
702 }
703 outer_ptr[cast_sz(from.cols)] = cvt_out(l);
704 } break;
705 case Symmetry::Lower:
706 throw std::invalid_argument(
707 "Lower-triangular symmetry currently not supported");
708 default: throw std::invalid_argument("Invalid symmetry");
709 }
710 return {
711 .rows = from.rows,
712 .cols = from.cols,
713 .symmetry = from.symmetry,
714 .inner_idx = inner_idx,
715 .outer_ptr = outer_ptr,
717 };
718 }
720 : sparsity(convert_sparsity(from, request)) {}
721 std::vector<Index> inner_idx;
722 std::vector<StorageIndex> outer_ptr;
724 [[nodiscard]] operator const to_sparsity_t &() const & { return sparsity; }
725 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
726 [[nodiscard]] size_t work_size() const {
727 if (sparsity.symmetry == Symmetry::Unsymmetric)
728 return 0;
729 return cast_sz(sparsity.nnz());
730 }
731 [[nodiscard]] length_t nnz_from() const {
732 return sparsity.rows * sparsity.cols;
733 }
734 template <class T>
735 [[nodiscard]] std::span<T>
736 convert_values(std::span<T> from_values,
737 std::span<std::remove_const_t<T>> work) const {
738 assert(from_values.size() == cast_sz(nnz_from()));
739 assert(work.size() == work_size());
740 if (sparsity.symmetry == Symmetry::Unsymmetric) {
741 return from_values;
742 } else if (sparsity.symmetry == Symmetry::Upper) {
743 auto f_col_above_diag = [&](index_t c) {
744 return std::span{from_values}.subspan(
745 cast_sz(c) * cast_sz(sparsity.rows), cast_sz(c) + 1);
746 };
747 auto t = work.begin();
748 for (index_t c = 0; c < sparsity.cols; ++c)
749 std::ranges::copy_backward(f_col_above_diag(c), t += c + 1);
750 return work;
751 } else {
752 assert(false);
753 return {};
754 }
755 }
756};
757
758/// @}
759
760namespace detail {
761template <class To, class>
763
764template <class To, class... Froms>
765struct ConverterVariantHelper<To, std::variant<Froms...>> {
766 using type = std::variant<SparsityConverter<Froms, To>...>;
767};
768} // namespace detail
769
770/// @addtogroup linalg_sparsity_conv
771/// @{
772
773template <class To>
776
777/// Converts any supported matrix storage format to the given format.
778/// @see @ref Sparsity
779template <class To>
782 using to_sparsity_t = To;
785 : converter{std::visit(wrap_converter(request), from.value)} {}
787 [[nodiscard]] operator const to_sparsity_t &() const {
788 return std::visit(
789 [](const auto &c) -> const to_sparsity_t & { return c; },
790 converter);
791 }
792 [[nodiscard]] const to_sparsity_t &get_sparsity() const { return *this; }
793 [[nodiscard]] size_t work_size() const {
794 return std::visit([](const auto &c) { return c.work_size(); },
795 converter);
796 }
797 [[nodiscard]] length_t nnz_from() const {
798 return std::visit([](const auto &c) { return c.nnz_from(); },
799 converter);
800 }
801 template <class T>
802 [[nodiscard]] std::span<T>
803 convert_values(std::span<T> from_values,
804 std::span<std::remove_const_t<T>> work) const {
805 const auto visitor = [&](const auto &c) {
806 return c.template convert_values<T>(from_values, work);
807 };
808 return std::visit(visitor, converter);
809 }
810 /// Call @p evaluator, then convert the values and write the result into
811 /// @p result, while minimizing the number of copies.
812 /// @return The allocated workspace that can be moved into the @p work
813 /// argument during the next invocation (to minimize allocations).
814 /// @todo Write tests.
815 template <class T, class E>
816 std::vector<T> convert_values_into(std::span<T> result, E &&evaluator,
817 std::vector<T> work = {}) const {
818 const auto eval_size = cast_sz(nnz_from());
819 // If the work size is zero, conversion can be done in-place, so
820 // evaluate directly into the result.
821 if (work_size() == 0 && result.size() >= eval_size) {
822 std::forward<E>(evaluator)(result.first(eval_size));
823 const auto visitor = [&](const auto &c) {
824 return c.template convert_values<T>(result, work);
825 };
826 [[maybe_unused]] auto r = std::visit(visitor, converter);
827 assert(r.data() == result.data());
828 }
829 // If the conversion cannot be done in-place, allocate a workspace
830 // vector, then evaluate into that vector, and then convert the values
831 // into the result.
832 else {
833 work.resize(eval_size);
834 std::forward<E>(evaluator)(std::span<T>{work});
835 const auto visitor = [&](const auto &c) {
836 return c.template convert_values<T>(work, result);
837 };
838 [[maybe_unused]] auto r = std::visit(visitor, converter);
839 assert(r.data() == result.data());
840 }
841 return std::move(work);
842 }
843 template <class T>
844 [[nodiscard]] std::vector<std::remove_const_t<T>>
845 convert_values_copy(std::span<T> from_values) const {
846 std::vector<T> work(work_size());
847 auto result = convert_values<std::add_const_t<T>>(from_values, work);
848 if (result.data() != work.data())
849 work = {result.begin(), result.end()};
850 return work;
851 }
852
853 private:
854 static auto wrap_converter(Request request) {
855 return [request]<class From>(const From &from) -> ConverterVariant<To> {
856 return SparsityConverter<From, To>{from, request};
857 };
858 }
859};
860
861/// @}
862
863} // namespace guanaqo::linalg::sparsity
Common linear algebra index types.
std::optional< Index > first_index
Convert the index offset (zero for C/C++, one for Fortran).
detail::ConverterVariantHelper< To, SparsityVariant >::type ConverterVariant
Additional options for the conversion performed by SparsityConverter.
Converts one matrix storage format to another.
void sort_triplets(Ts &&...triplets)
Sort the (row, column, value) triplets, by column index first, then row.
bool check_uniqueness_csc(const Outer &outer_ptr, const Inner inner)
Check that no two entries with the same row and column index exist in the given sparse compressed-col...
void convert_triplets_to_ccs(const auto &rows, const auto &cols, auto &&inner_idx, auto &&outer_ptr, auto idx_0=0)
bool check_uniqueness_triplets(Ts &&...triplets)
Check that no two entries with the same row and column index exist in the given sparse coordinate lis...
void sort_rows_csc(const Outer &outer_ptr, Inners &&...inners)
Given a sparse compressed-column storage matrix, sort all row indices within each column.
void sort_triplets_col(Ts &&...triplets)
Sort the (row, column, value) triplets by column index.
@ Upper
Symmetric, upper-triangular part is stored.
Definition sparsity.hpp:22
@ Lower
Symmetric, lower-triangular part is stored.
Definition sparsity.hpp:23
Dense matrix structure.
Definition sparsity.hpp:37
std::ptrdiff_t index_t
Definition config.hpp:12
std::ptrdiff_t length_t
Definition config.hpp:14
constexpr size_t cast_sz(auto i)
Helpers for sparse matrix index ordering and conversions.
Sparse and dense sparsity descriptors.
Sparse coordinate list structure (COO).
Definition sparsity.hpp:70
std::span< const index_t > row_indices
Definition sparsity.hpp:74
length_t nnz() const
Get the number of structurally nonzero elements.
Definition sparsity.hpp:96
std::span< const index_t > col_indices
Definition sparsity.hpp:75
index_t first_index
Zero for C/C++, one for Fortran.
Definition sparsity.hpp:93
Sparse compressed-column structure (CCS or CSC).
Definition sparsity.hpp:44
std::span< const index_t > inner_idx
Definition sparsity.hpp:49
length_t nnz() const
Get the number of structurally nonzero elements.
Definition sparsity.hpp:60
std::span< const storage_index_t > outer_ptr
Definition sparsity.hpp:50
std::optional< typename SparseCSC< Index, StorageIndex >::Order > order
Sort the indices.
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > >) const
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > > work) const
to_sparsity_t convert_sparsity(from_sparsity_t from, Request request)
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > > work) const
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > > work) const
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > >) const
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > > work) const
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > > work) const
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > >) const
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > > work) const
std::vector< std::remove_const_t< T > > convert_values_copy(std::span< T > from_values) const
std::vector< T > convert_values_into(std::span< T > result, E &&evaluator, std::vector< T > work={}) const
Call evaluator, then convert the values and write the result into result, while minimizing the number...
std::span< T > convert_values(std::span< T > from_values, std::span< std::remove_const_t< T > > work) const
Stores any of the supported sparsity patterns.
Definition sparsity.hpp:118
The requested conversion between sparsity structures could not be performed (most likely because cert...