{% comment %} // vim: set syntax=asm : /* mmm 16 x 6: ymm0 ymm2 ymm4 ymm6 ymm8 ymm10 ymm1 ymm3 ymm5 ymm7 ymm9 ymm11 System V ABI: args: rdi, rsi, rdx, rcx, r8, r9 preserve: rbx, rsp, rbp, r12, r13, r14, r15 scratch: rax, rdi, rsi, rdx, rcx, r8, r9, r10, r11 return: rax (+rdx) Windows ABI: args: RCX, RDX, R8, R9 preserve: RBX, RBP, RDI, RSI, RSP, R12, R13, R14, R15, and XMM6-15 scratch: RAX, RCX, RDX, R8, R9, R10, R11, XMM0-5, and the upper portions of YMM0-15 and ZMM0-15 return: rax (+rdx) */ {% endcomment %} {% include "preamble.tmpliq" type:"f32", size:"16x6", suffix:suffix, G:G %} {{L}}clear: vzeroall jmp {{L}}non_linear_loop {{L}}add_mat_mul: mov rcx, [rdi + 24] // B mov rax, [rdi + 16] // A mov rbx, [rdi + 8] // k test rbx, rbx jz {{L}}non_linear_loop {{L}}main_loop_packed_packed: {% include "2x6/packed_packed_loop1/original.tmpli" %} dec rbx jnz {{L}}main_loop_packed_packed jmp {{L}}non_linear_loop // NON LINEAR / ADDC {% include "fma_mmm_f32_scalars.tmpliq" from:0, to:11, type:"f32" %} {% include "fma_mmm_f32_per_rows.tmpliq" mr:16, from:0, to:11, type:"f32" %} {% include "fma_mmm_f32_per_cols.tmpliq" mr:16, from:0, to:11, type:"f32" %} {{L}}add_unicast: mov r10, [rdi + 8] // c ptr mov rsi, [rdi + 16] // row stride mov rbx, [rdi + 24] // col stride mov eax, 0 {% for i in (0..3) %} pinsrd xmm14, eax, {{i}} add eax, esi {% endfor %} {% for i in (0..3) %} pinsrd xmm15, eax, {{i}} add eax, esi {% endfor %} vperm2f128 ymm14, ymm14, ymm15, 32 // ymm14 <- xmm14::xmm15 lea r8, [ r10 + rsi * 8 ] {% for i in (0..5) %} vpcmpeqd ymm15, ymm15, ymm15 vgatherdps ymm12, [ r10 + ymm14 ], ymm15 vpcmpeqd ymm15, ymm15, ymm15 vgatherdps ymm13, [ r8 + ymm14 ], ymm15 add r10, rbx add r8, rbx vaddps ymm{{i | times:2 }}, ymm{{i | times:2}}, ymm12 vaddps ymm{{i | times:2 | plus: 1}}, ymm{{i | times:2 | plus:1 }}, ymm13 {% endfor %} jmp {{L}}non_linear_loop {{L}}add_row_col_products: mov rax, [ rdi + 8 ] mov rbx, [ rdi + 16 ] vmovups ymm12, [rax] vmovups ymm13, [rax + 32] {% for i in (0..5) %} vbroadcastss ymm14, dword ptr [rbx + {{i|times:4}} ] vfmadd231ps ymm{{i|times:2}}, ymm12, ymm14 vfmadd231ps ymm{{i|times:2|plus:1}}, ymm13, ymm14 {% endfor %} jmp {{L}}non_linear_loop {{L}}store: mov r8, [rdi + 8] // c ptr mov rsi, [rdi + 16] // row stride mov rbx, [rdi + 24] // col stride // tops of cols lea r9, [ r8 + rbx ] lea r10, [ r8 + 2 * rbx ] lea r12, [ r8 + 4 * rbx ] lea r11, [ r10 + rbx ] lea r13, [ r12 + rbx ] {% for quarter in (0..3) %} {% if quarter != 0 %} // move next four rows at top (xmm0,2,..10) vperm2f128 ymm0, ymm0, ymm1, {{quarter}} vperm2f128 ymm2, ymm2, ymm3, {{quarter}} vperm2f128 ymm4, ymm4, ymm5, {{quarter}} vperm2f128 ymm6, ymm6, ymm7, {{quarter}} vperm2f128 ymm8, ymm8, ymm9, {{quarter}} vperm2f128 ymm10, ymm10, ymm11, {{quarter}} {% endif %} {% for row in (0..3) %} {% for i in (0..5) %} vextractps dword ptr [r{{i | plus: 8}}], xmm{{i | times:2}}, {{row}} add r{{i | plus: 8}}, rsi {% endfor %} {% endfor %} {% endfor %} jmp {{L}}non_linear_loop {% include "postamble.tmpliq" type:"f32", size:"16x6", suffix:suffix, G:G, L:L %}