Tatooine
solve.h
Go to the documentation of this file.
1#ifndef TATOOINE_TENSOR_OPERATIONS_SOLVE_H
2#define TATOOINE_TENSOR_OPERATIONS_SOLVE_H
3//==============================================================================
6#include <tatooine/utility.h>
7
8#include <cstdint>
9//==============================================================================
10namespace tatooine {
11//==============================================================================
12auto copy_or_keep_if_rvalue_tensor_solve(dynamic_tensor auto&& x) -> decltype(auto) {
13 return tensor<tatooine::value_type<decltype(x)>>{std::forward<decltype(x)>(x)};
14}
15template <typename T>
17 return std::move(x);
18}
19template <typename T, std::size_t... Dims>
21 return std::move(x);
22}
23template <typename T>
25 return tensor<T>{x};
26}
27template <typename T, std::size_t... Dims>
29 return tensor<T, Dims...>{x};
30}
31template <fixed_size_quadratic_mat<2> MatA, static_mat MatB>
32requires(tensor_dimension<MatB, 0> == 2)
33auto constexpr solve_direct(MatA&& A, MatB&& B) {
34 using out_value_type =
36 auto constexpr K = tensor_dimension<MatB, 1>;
37
38 using out_mat_type = mat<out_value_type, 2, K>;
39 using out_type = std::optional<out_mat_type>;
40
41 auto const div = (A(0, 0) * A(1, 1) - A(1, 0) * A(0, 1));
42 if (div == 0) {
43 return out_type{std::nullopt};
44 }
45 auto const p = 1 / div;
46 auto X = out_type{out_mat_type{}};
47 for (std::size_t i = 0; i < K; ++i) {
48 X->at(0, i) = -(A(0, 1) * B(1, i) - A(1, 1) * B(0, i)) * p;
49 X->at(1, i) = (A(0, 0) * B(1, i) - A(1, 0) * B(0, i)) * p;
50 }
51 return X;
52}
53// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
54template <fixed_size_mat<2, 2> MatA, fixed_size_vec<2> VecB>
55auto solve_direct(MatA&& A, VecB&& b) -> std::optional<
57 using out_value_type =
59 auto const div = (A(0, 0) * A(1, 1) - A(1, 0) * A(0, 1));
60 if (div == 0) {
61 return std::nullopt;
62 }
63 auto const p = 1 / div;
64 return vec<out_value_type, 2>{-(A(0, 1) * b(1) - A(1, 1) * b(0)) * p,
65 (A(0, 0) * b(1) - A(1, 0) * b(0)) * p};
66}
67//------------------------------------------------------------------------------
68template <static_quadratic_mat MatA, static_vec VecB>
69requires(tensor_dimension<MatA, 0> ==
70 tensor_dimension<VecB, 0>)
71auto solve_cramer(MatA const& A, VecB const& b) -> std::optional<
73 tensor_dimension<MatA, 1>>> {
74 static constexpr auto N = tensor_dimension<MatA, 1>;
75 using out_value_type =
77 auto const det_inv = 1 / det(A);
78 auto Y = mat<out_value_type, N, N>{A};
79 auto tmp = vec<out_value_type, N>{};
80 auto result = vec<out_value_type, N>{};
81 for (std::size_t i = 0; i < N; ++i) {
82 tmp = Y.col(i);
83 Y.col(i) = b;
84 result(i) = det(Y) * det_inv;
85 Y.col(i) = tmp;
86 }
87 return result;
88}
89//------------------------------------------------------------------------------
90#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
91template <static_quadratic_mat MatA, static_vec VecB>
92requires(tensor_dimension<MatA, 1> ==
93 tensor_dimension<VecB, 0>)
94auto solve_lu_lapack(MatA& A_, VecB&& b_)
95 -> std::optional<
97 using common_type =
99 static constexpr auto M = tensor_dimension<MatA, 0>;
100 static constexpr auto N = tensor_dimension<MatA, 1>;
101 auto A = tensor<common_type, M, N>{A_};
102 auto b = tensor<common_type, N>{b_};
103 auto ipiv = vec<int, N>{};
104 [[maybe_unused]] auto info = lapack::gesv(A, b, ipiv);
105 if (info != 0) {
106 return std::nullopt;
107 }
108 return b;
109}
110#endif
111// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
112#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
113template <static_quadratic_mat MatA, static_mat MatB>
114requires(tensor_dimension<MatA, 0> == tensor_dimension<MatB, 0>)
115auto solve_lu_lapack(MatA& A_, MatB& B_) {
116 using out_value_type =
118 static constexpr auto N = tensor_dimension<MatA, 1>;
119 static constexpr auto K = tensor_dimension<MatB, 1>;
120 using out_mat_type = mat<out_value_type, N, K>;
121 using out_type = std::optional<out_mat_type>;
122 auto A = mat<out_value_type, N, N>{A_};
123 auto B = out_type{B_};
124 auto ipiv = vec<int, N>{};
125 [[maybe_unused]] auto info = lapack::gesv(A, *B, ipiv);
126 if (info != 0) {
127 return out_type{std::nullopt};
128 }
129 return B;
130}
131#endif
132//------------------------------------------------------------------------------
133#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
134template <static_mat MatA, static_mat MatB>
135requires(tensor_dimension<MatA, 0> == tensor_dimension<MatB, 0>)
136auto solve_qr_lapack(MatA&& A_, MatB&& B_) {
137 using out_value_type =
139 static auto constexpr M = tensor_dimension<MatA, 0>;
140 static auto constexpr N = tensor_dimension<MatA, 1>;
141 static auto constexpr K = tensor_dimension<MatB, 1>;
142 using mat_out_type = mat<out_value_type, N, K>;
143 using out_type = std::optional<mat_out_type>;
144
145 auto A = mat<out_value_type, M, N>{A_};
146 auto B = mat<out_value_type, M, K>{B_};
147 auto tau = vec<out_value_type, (M < N ? M : N)>{};
148
149 // Q * R = A
150 if (lapack::geqrf(A, tau) != 0) {
151 return out_type{std::nullopt};
152 }
153 // R * x = Q^T * B
155 0) {
156 return out_type{std::nullopt};
157 }
158 // Use back-substitution using the upper right part of A
161 return out_type{std::nullopt};
162 }
163 auto X = out_type{mat_out_type{}};
164 for_loop([&, i = std::size_t(0)](
165 auto const... is) mutable { X->at(is...) = B(is...); },
166 N, K);
167 return X;
168}
169#endif
170// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
171#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
172template <static_mat MatA, static_vec VecB>
173requires (tensor_dimension<MatA, 0> == tensor_dimension<VecB, 0>)
174auto solve_qr_lapack(MatA&& A_, VecB&& b_) {
175 using out_value_type =
177 static auto constexpr M = tensor_dimension<MatA, 0>;
178 static auto constexpr N = tensor_dimension<MatA, 1>;
179 auto A = mat<out_value_type, M, N>{A_};
180 auto b = vec<out_value_type, M>{b_};
181 using out_vec_type = vec<out_value_type, (M < N ? M : N)>;
182 using out_type = std::optional<out_vec_type>;
183 auto tau = out_type{out_vec_type{}};
184
185 // Q * R = A
186 if (lapack::geqrf(A, *tau) != 0) {
187 return out_type{std::nullopt};
188 }
189 // R * x = Q^T * b
191 return out_type{std::nullopt};
192 }
193 // Use back-substitution using the upper right part of A
196 return out_type{std::nullopt};
197 }
198 for (std::size_t i = 0; i < tau->dimension(0); ++i) {
199 tau->at(i) = b(i);
200 }
201 return tau;
202}
203#endif
204//------------------------------------------------------------------------------
205template <static_mat MatA, static_mat MatB>
206requires (tensor_dimension<MatA, 0> == tensor_dimension<MatB, 0>)
207auto solve(MatA&& A, MatB&& B) {
208 static auto constexpr M = tensor_dimension<MatA, 0>;
209 static auto constexpr N = tensor_dimension<MatA, 1>;
210 static auto constexpr K = tensor_dimension<MatB, 1>;
211 if constexpr (M == 2 && N == 2 && K >= M) {
212 return solve_direct(std::forward<MatA>(A), std::forward<MatB>(B));
213 } else if constexpr (M == N) {
214 return solve_lu_lapack(std::forward<MatA>(A), std::forward<MatB>(B));
215#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
216 throw std::runtime_error{
217 "cannot do a LU-factorization because LAPACK is missing"};
218#endif
219
220
221 } else if constexpr (M > N) {
222#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
223 return solve_qr_lapack(std::forward<MatA>(A), std::forward<MatB>(B));
224#else
225 throw std::runtime_error{
226 "cannot do a QR-factorization because LAPACK is missing"};
227#endif
228
229 } else {
230 throw std::runtime_error{"System is under-determined."};
231 }
232}
233//------------------------------------------------------------------------------
234#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
235template <dynamic_tensor TensorA, dynamic_tensor TensorB>
236auto solve_lu_lapack(TensorA&& A_, TensorB&& B_) -> std::optional<tensor<
238 assert(A_.rank() == 2);
239 assert(A_.dimension(0) == A_.dimension(1));
240 assert(A_.dimension(0) == B_.dimension(0));
241 assert(B_.rank() == 1 || B_.rank() == 2);
242 auto ipiv = tensor<int>::zeros(A_.dimension(0));
245 decltype(auto) A =
246 copy_or_keep_if_rvalue_tensor_solve(std::forward<TensorA>(A_));
247 decltype(auto) B =
248 copy_or_keep_if_rvalue_tensor_solve(std::forward<TensorB>(B_));
249 if (auto const info = lapack::gesv(A, B, ipiv); info != 0) {
250 return std::nullopt;
251 }
252 return std::forward<decltype(B)>(B);
253 } else {
254 using out_value_type =
256 auto A = tensor<out_value_type>{A_};
257 auto B = tensor<out_value_type>{B_};
258 if (lapack::gesv(A, B, ipiv) != 0) {
259 return std::nullopt;
260 }
261 return B;
262 }
263}
264#endif
265//------------------------------------------------------------------------------
266#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
267template <dynamic_tensor TensorA, dynamic_tensor TensorB>
268auto solve_qr_lapack(TensorA&& A_, TensorB&& B_)
269 -> std::optional<tensor<
271 using out_value_type =
273 assert(A_.rank() == 2);
274 assert(B_.rank() == 1 || B_.rank() == 2);
275 assert(A_.dimension(0) == B_.dimension(0));
278 auto const M = A.dimension(0);
279 auto const N = A.dimension(1);
280 auto const K = (B.rank() == 1 ? 1 : B.dimension(1));
281 auto tau = tensor<out_value_type>{min(M, N)};
282 auto X = K > 1 ? tensor<out_value_type>{N, K} : tensor<out_value_type>{N};
283
284 // Q * R = A
285 if (lapack::geqrf(A, tau) != 0) {
286 return std::nullopt;
287 }
288 // R * x = Q^T * B
290 0) {
291 return std::nullopt;
292 }
293 // Use back-substitution using the upper right part of A
296 return std::nullopt;
297 }
298 for_loop(
299 [&](auto const i, auto const j) {
300 if (B.rank() == 1) {
301 X(i) = B(i);
302 } else {
303 X(i, j) = B(i, j);
304 }
305 },
306 N, K);
307 return X;
308}
309#endif
310// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
311template <static_mat MatA, static_vec VecB>
312requires (tensor_dimension<MatA, 0> == tensor_dimension<VecB, 0>)
313auto solve(MatA&& A, VecB&& b) {
314 static auto constexpr M = tensor_dimension<MatA, 0>;
315 static auto constexpr N = tensor_dimension<MatA, 1>;
316 if constexpr (M == 2 && N == 2) {
317 return solve_direct(std::forward<MatA>(A), std::forward<VecB>(b));
318 } else if constexpr (M == 3 && N == 3) {
319 return solve_cramer(std::forward<MatA>(A), std::forward<VecB>(b));
320 } else if constexpr (M == N) {
321#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
322 return solve_lu_lapack(std::forward<MatA>(A), std::forward<VecB>(b));
323#else
324 throw std::runtime_error{
325 "cannot do a LU-factorization because LAPACK is missing"};
326#endif
327 } else if constexpr (M > N) {
328#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
329 return solve_qr_lapack(std::forward<MatA>(A), std::forward<VecB>(b));
330#else
331 throw std::runtime_error{
332 "cannot do a QR-factorization because LAPACK is missing"};
333#endif
334 } else {
335 throw std::runtime_error{"System is under-determined."};
336 }
337}
338//==============================================================================
339#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
348template <dynamic_tensor TensorA, dynamic_tensor TensorB>
349auto solve_symmetric_lapack(TensorA&& A_, TensorB&& B_,
350 lapack::uplo uplo = lapack::uplo::lower)
351 -> std::optional<tensor<common_type<tatooine::value_type<decltype(A_)>,
352 tatooine::value_type<decltype(B_)>>>> {
353 // assert A is quadratic matrix
354 assert(A_.rank() == 2);
355 assert(A_.dimension(0) == A_.dimension(1));
356
357 // assert B is vector or matrix
358 assert(B_.rank() == 1 || B_.rank() == 2);
359 // assert B dimensions are correct
360 assert(B_.dimension(0) == A_.dimension(0));
361 if constexpr (same_as<tatooine::value_type<decltype(A_)>,
362 tatooine::value_type<decltype(B_)>>) {
363 decltype(auto) A = copy_or_keep_if_rvalue_tensor_solve(A_);
364 decltype(auto) B = copy_or_keep_if_rvalue_tensor_solve(B_);
365
366 auto const info = lapack::sysv(A, B, uplo);
367 if (info > 0) {
368 std::cerr
369 << "[lapack::sysv]\n D(" << info << ", " << info
370 << ") is exactly zero. The factorization has been completed, but the "
371 "block diagonal matrix D is exactly singular, so the solution "
372 "could "
373 "not be computed.\n";
374 } else if (info < 0) {
375 std::cerr << "[lapack::sysv]\n Parameter " << -info << " is wrong.\n";
376 }
377 if (info != 0) {
378 return std::nullopt;
379 }
380 return std::forward<decltype(B)>(B);
381 } else {
382 using T = common_type<tatooine::value_type<decltype(A_)>,
383 tatooine::value_type<decltype(B_)>>;
384 auto A = tensor<T>{A_};
385 auto B = tensor<T>{B_};
386
387 auto const info = lapack::sysv(A, B, uplo);
388 if (info > 0) {
389 std::cerr
390 << "[lapack::sysv]\n D(" << info << ", " << info
391 << ") is exactly zero. The factorization has been completed, but the "
392 "block diagonal matrix D is exactly singular, so the solution "
393 "could "
394 "not be computed.\n";
395 } else if (info < 0) {
396 std::cerr << "[lapack::sysv]\n Parameter " << -info << " is wrong.\n";
397 }
398 if (info != 0) {
399 return std::nullopt;
400 }
401 return B;
402 }
403}
404#endif
405//==============================================================================
406#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
415//auto solve_symmetric_lapack_aa(dynamic_tensor auto&& A_, dynamic_tensor auto&& B_,
416// lapack::uplo uplo = lapack::uplo::lower)
417// -> std::optional<tensor<common_type<tatooine::value_type<decltype(A_)>,
418// tatooine::value_type<decltype(B_)>>>> {
419// // assert A is quadratic matrix
420// assert(A_.rank() == 2);
421// assert(A_.dimension(0) == A_.dimension(1));
422//
423// // assert B is vector or matrix
424// assert(B_.rank() == 1 || B_.rank() == 2);
425// // assert B dimensions are correct
426// assert(B_.dimension(0) == A_.dimension(0));
427// if constexpr (same_as<tatooine::value_type<decltype(A_)>,
428// tatooine::value_type<decltype(B_)>>) {
429// decltype(auto) A = copy_or_keep_if_rvalue_tensor_solve(A_);
430// decltype(auto) B = copy_or_keep_if_rvalue_tensor_solve(B_);
431//
432// auto const info = lapack::sysv_aa(A, B, uplo);
433// if (info > 0) {
434// std::cerr
435// << "[lapack::sysv_aa]\n D(" << info << ", " << info
436// << ") is exactly zero. The factorization has been completed, but the "
437// "block diagonal matrix D is exactly singular, so the solution "
438// "could "
439// "not be computed.\n";
440// } else if (info < 0) {
441// std::cerr << "[lapack::sysv_aa]\n Parameter " << -info << " is wrong.\n";
442// }
443// if (info != 0) {
444// return std::nullopt;
445// }
446// return std::forward<decltype(B)>(B);
447// } else {
448// using T = common_type<tatooine::value_type<decltype(A_)>,
449// tatooine::value_type<decltype(B_)>>;
450// auto A = tensor<T>{A_};
451// auto B = tensor<T>{B_};
452//
453// auto const info = lapack::sysv_aa(A, B, uplo);
454// if (info > 0) {
455// std::cerr
456// << "[lapack::sysv_aa]\n D(" << info << ", " << info
457// << ") is exactly zero. The factorization has been completed, but the "
458// "block diagonal matrix D is exactly singular, so the solution "
459// "could "
460// "not be computed.\n";
461// } else if (info < 0) {
462// std::cerr << "[lapack::sysv_aa]\n Parameter " << -info << " is wrong.\n";
463// }
464// if (info != 0) {
465// return std::nullopt;
466// }
467// return B;
468// }
469//}
470#endif
471//==============================================================================
472#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
490//auto solve_symmetric_lapack_rk(dynamic_tensor auto&& A_,
491// dynamic_tensor auto&& B_,
492// lapack::uplo uplo = lapack::uplo::lower)
493// -> std::optional<tensor<common_type<tatooine::value_type<decltype(A_)>,
494// tatooine::value_type<decltype(B_)>>>> {
495// // assert A is quadratic matrix
496// assert(A_.rank() == 2);
497// assert(A_.dimension(0) == A_.dimension(1));
498//
499// // assert B is vector or matrix
500// assert(B_.rank() == 1 || B_.rank() == 2);
501// // assert B dimensions are correct
502// assert(B_.dimension(0) == A_.dimension(0));
503// if constexpr (same_as<tatooine::value_type<decltype(A_)>,
504// tatooine::value_type<decltype(B_)>>) {
505// decltype(auto) A = copy_or_keep_if_rvalue_tensor_solve(A_);
506// decltype(auto) B = copy_or_keep_if_rvalue_tensor_solve(B_);
507//
508// auto const info = lapack::sysv_rk(A, B, uplo);
509// if (info > 0) {
510// std::cerr
511// << "[lapack::sysv_rk]\n D(" << info << ", " << info
512// << ") is exactly zero. The factorization has been completed, but the "
513// "block diagonal matrix D is exactly singular, so the solution "
514// "could "
515// "not be computed.\n";
516// } else if (info < 0) {
517// std::cerr << "[lapack::sysv_rk]\n Parameter " << -info << " is wrong.\n";
518// }
519// if (info != 0) {
520// return std::nullopt;
521// }
522// return std::forward<decltype(B)>(B);
523// } else {
524// using T = common_type<tatooine::value_type<decltype(A_)>,
525// tatooine::value_type<decltype(B_)>>;
526// auto A = tensor<T>{A_};
527// auto B = tensor<T>{B_};
528//
529// auto const info = lapack::sysv_rk(A, B, uplo);
530// if (info > 0) {
531// std::cerr
532// << "[lapack::sysv_rk]\n D(" << info << ", " << info
533// << ") is exactly zero. The factorization has been completed, but the "
534// "block diagonal matrix D is exactly singular, so the solution "
535// "could "
536// "not be computed.\n";
537// } else if (info < 0) {
538// std::cerr << "[lapack::sysv_rk]\n Parameter " << -info << " is wrong.\n";
539// }
540// if (info != 0) {
541// return std::nullopt;
542// }
543// return B;
544// }
545//}
546#endif
547//------------------------------------------------------------------------------
548#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
550 return solve_symmetric_lapack(std::forward<decltype(A)>(A),
551 std::forward<decltype(B)>(B));
552}
553#endif
554//------------------------------------------------------------------------------
555template <dynamic_tensor TensorA, dynamic_tensor TensorB>
556auto solve(TensorA&& A, TensorB&& B) -> std::optional<tensor<
558 assert(A.rank() == 2);
559 assert(B.rank() == 1 || B.rank() == 2);
560 assert(B.dimension(0) == A.dimension(0));
561 auto const M = A.dimension(0);
562 auto const N = A.dimension(1);
563 if (M == N) {
564#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
565 return solve_lu_lapack(std::forward<decltype(A)>(A),
566 std::forward<decltype(B)>(B));
567#else
568 throw std::runtime_error{
569 "cannot do a LU-factorization because LAPACK is missing"};
570#endif
571 } else if (M > N) {
572#if TATOOINE_BLAS_AND_LAPACK_AVAILABLE
573 return solve_qr_lapack(std::forward<decltype(A)>(A),
574 std::forward<decltype(B)>(B));
575#else
576 throw std::runtime_error{
577 "cannot do a QR-factorization because LAPACK is missing"};
578#endif
579 }
580 throw std::runtime_error{"System is under-determined."};
581}
582//==============================================================================
583} // namespace tatooine
584//==============================================================================
585#endif
Definition: tensor_concepts.h:17
Definition: concepts.h:15
auto gesv(int N, int NRHS, Float *A, int LDA, int *IPIV, Float *B, int LDB) -> int
Definition: gesv.h:38
auto sysv(uplo u, int N, int NRHS, Float *A, int LDA, int *IPIV, Float *B, int LDB, Float *WORK, int LWORK) -> int
Definition: sysv.h:39
auto geqrf(int M, int N, Float *A, int LDA, Float *TAU, Float *WORK, int LWORK) -> int
Definition: geqrf.h:18
auto trtrs(uplo u, op t, diag d, int N, int NRHS, Float *A, int LDA, Float *B, int LDB) -> int
Definition: trtrs.h:18
auto ormqr(side SIDE, op TRANS, int M, int N, int K, Float *A, int LDA, Float *TAU, Float *C, int LDC, Float *WORK, int LWORK) -> int
Definition: ormqr.h:21
Definition: algorithm.h:6
auto solve_symmetric(dynamic_tensor auto &&A, dynamic_tensor auto &&B)
Definition: solve.h:549
typename common_type_impl< Ts... >::type common_type
Definition: common_type.h:23
auto copy_or_keep_if_rvalue_tensor_solve(dynamic_tensor auto &&x) -> decltype(auto)
Definition: solve.h:12
auto constexpr solve_direct(MatA &&A, MatB &&B)
Definition: solve.h:33
typename value_type_impl< T >::type value_type
Definition: type_traits.h:280
auto solve_symmetric_lapack(TensorA &&A_, TensorB &&B_, lapack::uplo uplo=lapack::uplo::lower) -> std::optional< tensor< common_type< tatooine::value_type< decltype(A_)>, tatooine::value_type< decltype(B_)> > > >
Definition: solve.h:349
auto solve(polynomial< Real, 1 > const &p) -> std::vector< Real >
solve a + b*x
Definition: polynomial.h:187
auto solve_lu_lapack(MatA &A_, VecB &&b_) -> std::optional< tensor< common_type< tatooine::value_type< MatA >, tatooine::value_type< VecB > > > >
Definition: solve.h:94
auto solve_qr_lapack(MatA &&A_, MatB &&B_)
Definition: solve.h:136
constexpr auto min(A &&a, B &&b)
Definition: math.h:15
constexpr auto for_loop(Iteration &&iteration, execution_policy::sequential_t, Ranges(&&... ranges)[2]) -> void
Use this function for creating a sequential nested loop.
Definition: for_loop.h:336
constexpr auto det(base_tensor< Tensor, T, 2, 2 > const &A) -> T
Definition: determinant.h:7
auto solve_cramer(MatA const &A, VecB const &b) -> std::optional< vec< common_type< tatooine::value_type< MatA >, tatooine::value_type< VecB > >, tensor_dimension< MatA, 1 > > >
Definition: solve.h:71
@ left
Definition: uniform_tree_hierarchy.h:18
Definition: mat.h:14
Definition: tensor.h:17
static constexpr auto zeros()
Definition: tensor.h:144
Definition: vec.h:12