{% comment %} // vim: set syntax=asm : /* mmm 16 x 6: ymm0 ymm2 ymm4 ymm6 ymm8 ymm10 ymm1 ymm3 ymm5 ymm7 ymm9 ymm11 System V ABI: args: rdi, rsi, rdx, rcx, r8, r9 preserve: rbx, rsp, rbp, r12, r13, r14, r15 scratch: rax, rdi, rsi, rdx, rcx, r8, r9, r10, r11 return: rax (+rdx) Windows ABI: args: RCX, RDX, R8, R9 preserve: RBX, RBP, RDI, RSI, RSP, R12, R13, R14, R15, and XMM6-15 scratch: RAX, RCX, RDX, R8, R9, R10, R11, XMM0-5, and the upper portions of YMM0-15 and ZMM0-15 return: rax (+rdx) */ {% endcomment %} {% include "preamble.tmpliq" type:"f32", size:"8x8", suffix:suffix, G:G %} {{L}}clear: vzeroall jmp {{L}}non_linear_loop {{L}}add_mat_mul: mov rbx, [rdi + 24] // B mov rax, [rdi + 16] // A mov rcx, [rdi + 8] // k test rcx, rcx jz {{L}}non_linear_loop {{L}}main_loop_packed_packed: vmovaps ymm12, [rax] {% for i in (0..7) %} vbroadcastss ymm14, dword ptr [rbx + {{i}} * 4] vfmadd231ps ymm{{i}}, ymm12, ymm14 {% endfor %} add rax, 32 add rbx, 32 dec rcx jnz {{L}}main_loop_packed_packed jmp {{L}}non_linear_loop // NON LINEAR / ADDC {% include "fma_mmm_f32_scalars.tmpliq" from:0, to:7, type:"f32" %} {% include "fma_mmm_f32_per_rows.tmpliq" mr:8, from:0, to:7, type:"f32" %} {% include "fma_mmm_f32_per_cols.tmpliq" mr:8, from:0, to:7, type:"f32" %} {{L}}add_unicast: mov r10, [rdi + 8] // c ptr mov rsi, [rdi + 16] // row stride mov rbx, [rdi + 24] // col stride mov eax, 0 {% for i in (0..3) %} pinsrd xmm14, eax, {{i}} add eax, esi {% endfor %} {% for i in (0..3) %} pinsrd xmm15, eax, {{i}} add eax, esi {% endfor %} vperm2f128 ymm14, ymm14, ymm15, 32 // ymm14 <- xmm14::xmm15 {% for i in (0..7) %} vpcmpeqd ymm15, ymm15, ymm15 vgatherdps ymm12, [ r10 + ymm14 ], ymm15 add r10, rbx vaddps ymm{{i}}, ymm{{i}}, ymm12 {% endfor %} jmp {{L}}non_linear_loop {{L}}add_row_col_products: mov rax, [ rdi + 8 ] mov rbx, [ rdi + 16 ] vmovups ymm12, [rax] {% for i in (0..7) %} vbroadcastss ymm14, dword ptr [rbx + {{i|times:4}} ] vfmadd231ps ymm{{i}}, ymm12, ymm14 {% endfor %} jmp {{L}}non_linear_loop {{L}}store: mov r8, [rdi + 8] // c ptr mov rsi, [rdi + 16] // row stride mov rbx, [rdi + 24] // col stride // tops of cols lea r9, [ r8 + rbx ] lea r10, [ r8 + 2 * rbx ] lea r12, [ r8 + 4 * rbx ] lea r11, [ r10 + rbx ] lea r13, [ r12 + rbx ] lea r14, [ r12 + 2 * rbx ] lea r15, [ r13 + 2 * rbx ] {% for quarter in (0..1) %} {% if quarter != 0 %} // move next four rows at top (xmm0,2,..10) {% for r in (0..7) %} vperm2f128 ymm{{r}}, ymm{{r}}, ymm{{r}}, {{quarter}} {% endfor %} {% endif %} {% for row in (0..3) %} {% for i in (0..7) %} vextractps dword ptr [r{{i | plus: 8}}], xmm{{i}}, {{row}} add r{{i | plus: 8}}, rsi {% endfor %} {% endfor %} {% endfor %} jmp {{L}}non_linear_loop {% include "postamble.tmpliq" type:"f32", size:"8x8", suffix:suffix, G:G, L:L %}