9#define UNROLL_FOR(...) BATMAT_FULLY_UNROLLED_FOR (__VA_ARGS__)
14template <
class T,
class Abi, KernelConfig Conf, index_t RowsReg, StorageOrder OA>
15[[gnu::hot, gnu::flatten]]
void
25 if constexpr (OA == StorageOrder::RowMajor) {
30 C_reg[ii] = rotl<Conf.rotate_C>(C->load(ii, 0));
37 for (index_t l = 0; l < k; ++l) {
39 simd Ail = shiftl<Conf.shift_A>(A_cached.load(ii, l));
40 simd &Cij = C_reg[ii];
41 simd Blj = rotl<Conf.rotate_B>(B.load(l, 0));
42 Conf.negate ? (Cij -= Ail * Blj) : (Cij += Ail * Blj);
47 D.template store<Conf.mask_D>(rotr<Conf.rotate_D>(C_reg[ii]), ii, 0);
52 B_reg[l] = rotl<Conf.rotate_B>(B.load(l, 0));
56 for (index_t i = 0; i < k; ++i) {
57 simd Cij = rotl<Conf.rotate_C>(C->load(i, 0));
59 simd Ail = shiftl<Conf.shift_A>(A_cached.load(i, ll));
60 Conf.negate ? (Cij -= Ail * B_reg[ll]) : (Cij += Ail * B_reg[ll]);
62 D.template store<Conf.mask_D>(rotr<Conf.rotate_D>(Cij), i, 0);
65 for (index_t i = 0; i < k; ++i) {
68 simd Ail = shiftl<Conf.shift_A>(A_cached.load(i, ll));
69 Conf.negate ? (Cij -= Ail * B_reg[ll]) : (Cij += Ail * B_reg[ll]);
71 D.template store<Conf.mask_D>(rotr<Conf.rotate_D>(Cij), i, 0);
78template <
class T,
class Abi, KernelConfig Conf, StorageOrder OA>
84 const index_t I = D.rows(), K = A.cols();
95 const std::optional<uview<const T, Abi, StorageOrder::ColMajor>> C_ = C;
98 if constexpr (OA == StorageOrder::RowMajor) {
100 return microkernel[I - 1](A_, B_, C_, D_, K);
102 auto Cj = C_ ? std::make_optional(C_->middle_rows(i)) : std::nullopt;
103 microkernel[ni - 1](A_.middle_rows(i), B_, Cj, D_.middle_rows(i), K);
107 return microkernel[K - 1](A_, B_, C_, D_, I);
108 microkernel[Rows - 1](A_.middle_cols(0), B_.middle_rows(0), C_, D_, I);
110 microkernel[nk - 1](A_.middle_cols(k), B_.middle_rows(k), D_, D_, I);
#define BATMAT_ASSUME(x)
Invokes undefined behavior if the expression x does not evaluate to true.
void foreach_chunked_merged(index_t i_begin, index_t i_end, auto chunk_size, auto func_chunk, LoopDir dir=LoopDir::Forward)
Iterate over the range [i_begin, i_end) in chunks of size chunk_size, calling func_chunk for each chu...
stdx::simd< Tp, Abi > simd
const constinit auto gemv_copy_lut
void gemv_copy_register(view< const T, Abi, OA > A, view< const T, Abi > B, std::optional< view< const T, Abi > > C, view< T, Abi > D) noexcept
Generalized matrix multiplication d = c ± A⁽ᵀ⁾ b. Using register blocking.
constexpr index_t RowsReg
void gemv_copy_microkernel(uview< const T, Abi, OA > A, uview< const T, Abi, StorageOrder::ColMajor > B, std::optional< uview< const T, Abi, StorageOrder::ColMajor > > C, uview< T, Abi, StorageOrder::ColMajor > D, index_t k) noexcept
Generalized matrix-vector multiplication d = c ± A⁽ᵀ⁾ b. Single register block.
cached_uview< Order==StorageOrder::ColMajor ? Cols :Rows, T, Abi, Order > with_cached_access(const uview< T, Abi, Order > &o) noexcept
simd_view_types< std::remove_const_t< T >, Abi >::template view< T, Order > view