#!/usr/bin/env perl # # ==================================================================== # Written by Andy Polyakov, @dot-asm, initially for the OpenSSL # project. # ==================================================================== # # sha256/512_block procedure for x86_64. # # 40% improvement over compiler-generated code on Opteron. On EM64T # sha256 was observed to run >80% faster and sha512 - >40%. No magical # tricks, just straight implementation... I really wonder why gcc # [being armed with inline assembler] fails to generate as fast code. # The only thing which is cool about this module is that it's very # same instruction sequence used for both SHA-256 and SHA-512. In # former case the instructions operate on 32-bit operands, while in # latter - on 64-bit ones. All I had to do is to get one flavor right, # the other one passed the test right away:-) # # sha256_block runs in ~1005 cycles on Opteron, which gives you # asymptotic performance of 64*1000/1005=63.7MBps times CPU clock # frequency in GHz. sha512_block runs in ~1275 cycles, which results # in 128*1000/1275=100MBps per GHz. Is there room for improvement? # Well, if you compare it to IA-64 implementation, which maintains # X[16] in register bank[!], tends to 4 instructions per CPU clock # cycle and runs in 1003 cycles, 1275 is very good result for 3-way # issue Opteron pipeline and X[16] maintained in memory. So that *if* # there is a way to improve it, *then* the only way would be to try to # offload X[16] updates to SSE unit, but that would require "deeper" # loop unroll, which in turn would naturally cause size blow-up, not # to mention increased complexity! And once again, only *if* it's # actually possible to noticeably improve overall ILP, instruction # level parallelism, on a given CPU implementation in this case. # # Special note on Intel EM64T. While Opteron CPU exhibits perfect # performance ratio of 1.5 between 64- and 32-bit flavors [see above], # [currently available] EM64T CPUs apparently are far from it. On the # contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit # sha256_block:-( This is presumably because 64-bit shifts/rotates # apparently are not atomic instructions, but implemented in microcode. # # May 2012. # # Optimization including one of Pavel Semjanov's ideas, alternative # Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and # unfortunately -2% SHA512 on P4 [which nobody should care about # that much]. # # June 2012. # # Add SIMD code paths, see below for improvement coefficients. SSSE3 # code path was not attempted for SHA512, because improvement is not # estimated to be high enough, noticeably less than 9%, to justify # the effort, not on pre-AVX processors. [Obviously with exclusion # for VIA Nano, but it has SHA512 instruction that is faster and # should be used instead.] For reference, corresponding estimated # upper limit for improvement for SSSE3 SHA256 is 28%. The fact that # higher coefficients are observed on VIA Nano and Bulldozer has more # to do with specifics of their architecture [which is topic for # separate discussion]. # # November 2012. # # Add AVX2 code path. Two consecutive input blocks are loaded to # 256-bit %ymm registers, with data from first block to least # significant 128-bit halves and data from second to most significant. # The data is then processed with same SIMD instruction sequence as # for AVX, but with %ymm as operands. Side effect is increased stack # frame, 448 additional bytes in SHA256 and 1152 in SHA512, and 1.2KB # code size increase. # # March 2014. # # Add support for Intel SHA Extensions. # # October 2023. # # Add support for Intel SHA512 Extension. ###################################################################### # Current performance in cycles per processed byte (less is better): # # SHA256 SSSE3 AVX/XOP(*) SHA512 AVX/XOP(*) # # AMD K8 14.9 - - 9.57 - # P4 17.3 - - 30.8 - # Core 2 15.6 13.8(+13%) - 9.97 - # Westmere 14.8 12.3(+19%) - 9.58 - # Sandy Bridge 17.4 14.2(+23%) 11.6(+50%(**)) 11.2 8.10(+38%(**)) # Ivy Bridge 12.6 10.5(+20%) 10.3(+22%) 8.17 7.22(+13%) # Haswell 12.2 9.28(+31%) 7.80(+56%) 7.66 5.40(+42%) # Skylake 11.4 9.03(+26%) 7.70(+48%) 7.25 5.20(+40%) # Cannon Lake 11.4 9.00(+27%) 3.55(+220%) 7.20 5.12(+41%) # Rocket Lake 10.4 9.13(+14%) 2.43(+330%) 6.66 5.34(+25%) # Bulldozer 21.1 13.6(+54%) 13.6(+54%(***)) 13.5 8.58(+57%) # Ryzen 11.0 9.02(+22%) 2.05(+440%) 7.05 5.67(+20%) # VIA Nano 23.0 16.5(+39%) - 14.7 - # Atom 23.0 18.9(+22%) - 14.7 - # Silvermont 27.4 20.6(+33%) - 17.5 - # Knights L 27.4 21.0(+30%) 19.6(+40%) 17.5 12.8(+37%) # Goldmont 18.9 14.3(+32%) 4.16(+350%) 12.0 - # # (*) whichever best applicable, including SHAEXT; # (**) switch from ror to shrd stands for fair share of improvement; # (***) execution time is fully determined by remaining integer-only # part, body_00_15; reducing the amount of SIMD instructions # below certain limit makes no difference/sense; to conserve # space SHA256 XOP code path is therefore omitted; $flavour = shift; $output = pop; if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or die "can't locate x86_64-xlate.pl"; $avx=undef; $shaext=1; ### set to zero if compiling for 1.0.1 if (!defined($avx) && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && ($ENV{ASM} //= "nasm") && `"$ENV{ASM}" -v 2>&1` =~ /NASM version ([0-9]+\.[0-9]+)(?:\.([0-9]+))?/) { $avx = ($1>=2.09) + ($1>=2.10) + 2 * ($1>=2.12); $avx += 2 if ($1==2.11 && $2>=8); } if (!defined($avx) && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && ($ENV{ASM} //= "ml64") && `"$ENV{ASM}" 2>&1` =~ /Version ([0-9]+)\./) { $avx = ($1>=10) + ($1>=12) + 2 * ($1>=14); } $ENV{CC} //= "cc"; if (!defined($avx) && `$ENV{CC} -Wa,-v -c -o /dev/zero -x assembler /dev/null 2>&1` =~ /GNU assembler version ([0-9]+)\.([0-9]+)/) { my $ver = $1 + $2/100.0; # 3.1->3.01, 3.10->3.10 $avx = ($ver>=2.19) + ($ver>=2.22) + ($ver>=2.25) + ($ver>=2.26); } if (!defined($avx) && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([0-9]+)\.([0-9]+)/) { my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10 $avx = ($ver>=3.0) + ($ver>3.0); $avx += 2*($ver>=7.0) if ($1 =~ /^clang/); } open STDOUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; if ($output =~ /512/) { $func="sha512_block_data_order"; $TABLE="K512"; $SZ=8; @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx", "%r8", "%r9", "%r10","%r11"); ($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi"); @Sigma0=(28,34,39); @Sigma1=(14,18,41); @sigma0=(1, 8, 7); @sigma1=(19,61, 6); $rounds=80; } else { $func="sha256_block_data_order"; $TABLE="K256"; $SZ=4; @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx", "%r8d","%r9d","%r10d","%r11d"); ($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi"); @Sigma0=( 2,13,22); @Sigma1=( 6,11,25); @sigma0=( 7,18, 3); @sigma1=(17,19,10); $rounds=64; } $ctx="%rdi"; # 1st arg, zapped by $a3 $inp="%rsi"; # 2nd arg $Tbl="%rbp"; $_ctx="16*$SZ+0*8(%rsp)"; $_inp="16*$SZ+1*8(%rsp)"; $_end="16*$SZ+2*8(%rsp)"; $framesz="16*$SZ+3*8"; sub ROUND_00_15() { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; my $STRIDE=$SZ; $STRIDE += 16 if ($i%(16/$SZ)==(16/$SZ-1)); $code.=<<___; ror \$`$Sigma1[2]-$Sigma1[1]`,$a0 mov $f,$a2 xor $e,$a0 ror \$`$Sigma0[2]-$Sigma0[1]`,$a1 xor $g,$a2 # f^g mov $T1,`$SZ*($i&0xf)`(%rsp) xor $a,$a1 and $e,$a2 # (f^g)&e ror \$`$Sigma1[1]-$Sigma1[0]`,$a0 add $h,$T1 # T1+=h xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g ror \$`$Sigma0[1]-$Sigma0[0]`,$a1 xor $e,$a0 add $a2,$T1 # T1+=Ch(e,f,g) mov $a,$a2 add ($Tbl),$T1 # T1+=K[round] xor $a,$a1 xor $b,$a2 # a^b, b^c in next round ror \$$Sigma1[0],$a0 # Sigma1(e) mov $b,$h and $a2,$a3 ror \$$Sigma0[0],$a1 # Sigma0(a) add $a0,$T1 # T1+=Sigma1(e) xor $a3,$h # h=Maj(a,b,c)=Ch(a^b,c,b) add $T1,$d # d+=T1 add $T1,$h # h+=T1 lea $STRIDE($Tbl),$Tbl # round++ ___ $code.=<<___ if ($i<15); add $a1,$h # h+=Sigma0(a) ___ ($a2,$a3) = ($a3,$a2); } sub ROUND_16_XX() { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___; mov `$SZ*(($i+1)&0xf)`(%rsp),$a0 mov `$SZ*(($i+14)&0xf)`(%rsp),$a2 mov $a0,$T1 ror \$`$sigma0[1]-$sigma0[0]`,$a0 add $a1,$a # modulo-scheduled h+=Sigma0(a) mov $a2,$a1 ror \$`$sigma1[1]-$sigma1[0]`,$a2 xor $T1,$a0 shr \$$sigma0[2],$T1 ror \$$sigma0[0],$a0 xor $a1,$a2 shr \$$sigma1[2],$a1 ror \$$sigma1[0],$a2 xor $a0,$T1 # sigma0(X[(i+1)&0xf]) xor $a1,$a2 # sigma1(X[(i+14)&0xf]) add `$SZ*(($i+9)&0xf)`(%rsp),$T1 add `$SZ*($i&0xf)`(%rsp),$T1 mov $e,$a0 add $a2,$T1 mov $a,$a1 ___ &ROUND_00_15(@_); } $code=<<___; .text .extern OPENSSL_ia32cap_P .globl $func .type $func,\@function,3,"unwind" .align 16 $func: .cfi_startproc push %rbp .cfi_push %rbp mov %rsp,%rbp .cfi_def_cfa_register %rbp ___ $code.=<<___ if ($SZ==4 || $avx); lea OPENSSL_ia32cap_P(%rip),%rax mov 0(%rax),%r9d mov 4(%rax),%r10d mov 8(%rax),%eax ___ $code.=<<___ if ($SZ==4 && $shaext); test \$`1<<29`,%eax # check for SHA jnz .Lshaext_shortcut ___ $code.=<<___ if ($avx && $SZ==8); test \$`1<<11`,%r10d # check for XOP jnz .Lxop_shortcut ___ $code.=<<___ if ($avx>1); and \$`1<<8|1<<5|1<<3`,%eax # check for BMI2+AVX2+BMI1 cmp \$`1<<8|1<<5|1<<3`,%eax je .Lavx2_shortcut ___ $code.=<<___ if ($avx); and \$`1<<30`,%r9d # mask "Intel CPU" bit and \$`1<<28|1<<9`,%r10d # mask AVX and SSSE3 bits or %r9d,%r10d cmp \$`1<<28|1<<9|1<<30`,%r10d je .Lavx_shortcut ___ $code.=<<___ if ($SZ==4); test \$`1<<9`,%r10d jnz .Lssse3_shortcut ___ $code.=<<___; push %rbx .cfi_push %rbx push %r12 .cfi_push %r12 push %r13 .cfi_push %r13 push %r14 .cfi_push %r14 push %r15 .cfi_push %r15 shl \$4,%rdx # num*16 sub \$$framesz,%rsp .cfi_alloca $framesz .cfi_def_cfa %rsp .cfi_end_prologue lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ mov $ctx,$_ctx # save ctx, 1st arg mov $inp,$_inp # save inp, 2nd arh mov %rdx,$_end # save end pointer, "3rd" arg mov $SZ*0($ctx),$A mov $SZ*1($ctx),$B mov $SZ*2($ctx),$C mov $SZ*3($ctx),$D mov $SZ*4($ctx),$E mov $SZ*5($ctx),$F mov $SZ*6($ctx),$G mov $SZ*7($ctx),$H jmp .Lloop .align 16 .Lloop: mov $B,$a3 lea $TABLE(%rip),$Tbl xor $C,$a3 # magic ___ for($i=0;$i<16;$i++) { $code.=" mov $SZ*$i($inp),$T1\n"; $code.=" mov @ROT[4],$a0\n"; $code.=" mov @ROT[0],$a1\n"; $code.=" bswap $T1\n"; &ROUND_00_15($i,@ROT); unshift(@ROT,pop(@ROT)); } $code.=<<___; jmp .Lrounds_16_xx .align 16 .Lrounds_16_xx: ___ for(;$i<32;$i++) { &ROUND_16_XX($i,@ROT); unshift(@ROT,pop(@ROT)); } $code.=<<___; cmpb \$0,`$SZ-1`($Tbl) jnz .Lrounds_16_xx mov $_ctx,$ctx add $a1,$A # modulo-scheduled h+=Sigma0(a) lea 16*$SZ($inp),$inp add $SZ*0($ctx),$A add $SZ*1($ctx),$B add $SZ*2($ctx),$C add $SZ*3($ctx),$D add $SZ*4($ctx),$E add $SZ*5($ctx),$F add $SZ*6($ctx),$G add $SZ*7($ctx),$H cmp $_end,$inp mov $A,$SZ*0($ctx) mov $B,$SZ*1($ctx) mov $C,$SZ*2($ctx) mov $D,$SZ*3($ctx) mov $E,$SZ*4($ctx) mov $F,$SZ*5($ctx) mov $G,$SZ*6($ctx) mov $H,$SZ*7($ctx) jb .Lloop lea $framesz+6*8(%rsp),%r11 .cfi_def_cfa %r11,8 mov $framesz(%rsp),%r15 mov -40(%r11),%r14 mov -32(%r11),%r13 mov -24(%r11),%r12 mov -16(%r11),%rbx mov -8(%r11),%rbp .cfi_epilogue lea (%r11),%rsp ret .cfi_endproc .size $func,.-$func ___ if ($SZ==4) { $code.=<<___; .align 64 .type $TABLE,\@object $TABLE: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .asciz "SHA256 block transform for x86_64, CRYPTOGAMS by \@dot-asm" ___ } else { $code.=<<___; .align 64 .type $TABLE,\@object $TABLE: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x0001020304050607,0x08090a0b0c0d0e0f .quad 0x0001020304050607,0x08090a0b0c0d0e0f ${TABLE}_nodup: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .asciz "SHA512 block transform for x86_64, CRYPTOGAMS by \@dot-asm" ___ } ###################################################################### # SIMD code paths # if ($SZ==4 && $shaext) {{{ ###################################################################### # Intel SHA Extensions implementation of SHA256 update function. # my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx"); my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..2,7..10)); my @MSG=map("%xmm$_",(3..6)); $code.=<<___; .type sha256_block_data_order_shaext,\@function,3,"unwind" .align 64 sha256_block_data_order_shaext: .cfi_startproc push %rbp .cfi_push %rbp mov %rsp,%rbp .cfi_def_cfa_register %rbp .Lshaext_shortcut: ___ $code.=<<___ if ($win64); sub \$0x50,%rsp .cfi_alloca 0x50 movaps %xmm6,-0x50(%rbp) movaps %xmm7,-0x40(%rbp) movaps %xmm8,-0x30(%rbp) movaps %xmm9,-0x20(%rbp) movaps %xmm10,-0x10(%rbp) .cfi_offset %xmm6-%xmm10,-0x60 ___ $code.=<<___; .cfi_end_prologue lea K256+0x80(%rip),$Tbl movdqu ($ctx),$ABEF # DCBA movdqu 16($ctx),$CDGH # HGFE movdqa 0x200-0x80($Tbl),$TMP # byte swap mask pshufd \$0x1b,$ABEF,$Wi # ABCD pshufd \$0xb1,$ABEF,$ABEF # CDAB pshufd \$0x1b,$CDGH,$CDGH # EFGH movdqa $TMP,$BSWAP # offload palignr \$8,$CDGH,$ABEF # ABEF punpcklqdq $Wi,$CDGH # CDGH jmp .Loop_shaext .align 16 .Loop_shaext: movdqu ($inp),@MSG[0] movdqu 0x10($inp),@MSG[1] movdqu 0x20($inp),@MSG[2] pshufb $TMP,@MSG[0] movdqu 0x30($inp),@MSG[3] movdqa 0*32-0x80($Tbl),$Wi paddd @MSG[0],$Wi pshufb $TMP,@MSG[1] movdqa $CDGH,$CDGH_SAVE # offload sha256rnds2 $ABEF,$CDGH # 0-3 pshufd \$0x0e,$Wi,$Wi nop movdqa $ABEF,$ABEF_SAVE # offload sha256rnds2 $CDGH,$ABEF movdqa 1*32-0x80($Tbl),$Wi paddd @MSG[1],$Wi pshufb $TMP,@MSG[2] sha256rnds2 $ABEF,$CDGH # 4-7 pshufd \$0x0e,$Wi,$Wi lea 0x40($inp),$inp sha256msg1 @MSG[1],@MSG[0] sha256rnds2 $CDGH,$ABEF movdqa 2*32-0x80($Tbl),$Wi paddd @MSG[2],$Wi pshufb $TMP,@MSG[3] sha256rnds2 $ABEF,$CDGH # 8-11 pshufd \$0x0e,$Wi,$Wi movdqa @MSG[3],$TMP palignr \$4,@MSG[2],$TMP nop paddd $TMP,@MSG[0] sha256msg1 @MSG[2],@MSG[1] sha256rnds2 $CDGH,$ABEF movdqa 3*32-0x80($Tbl),$Wi paddd @MSG[3],$Wi sha256msg2 @MSG[3],@MSG[0] sha256rnds2 $ABEF,$CDGH # 12-15 pshufd \$0x0e,$Wi,$Wi movdqa @MSG[0],$TMP palignr \$4,@MSG[3],$TMP nop paddd $TMP,@MSG[1] sha256msg1 @MSG[3],@MSG[2] sha256rnds2 $CDGH,$ABEF ___ for($i=4;$i<16-3;$i++) { $code.=<<___; movdqa $i*32-0x80($Tbl),$Wi paddd @MSG[0],$Wi sha256msg2 @MSG[0],@MSG[1] sha256rnds2 $ABEF,$CDGH # 16-19... pshufd \$0x0e,$Wi,$Wi movdqa @MSG[1],$TMP palignr \$4,@MSG[0],$TMP nop paddd $TMP,@MSG[2] sha256msg1 @MSG[0],@MSG[3] sha256rnds2 $CDGH,$ABEF ___ push(@MSG,shift(@MSG)); } $code.=<<___; movdqa 13*32-0x80($Tbl),$Wi paddd @MSG[0],$Wi sha256msg2 @MSG[0],@MSG[1] sha256rnds2 $ABEF,$CDGH # 52-55 pshufd \$0x0e,$Wi,$Wi movdqa @MSG[1],$TMP palignr \$4,@MSG[0],$TMP sha256rnds2 $CDGH,$ABEF paddd $TMP,@MSG[2] movdqa 14*32-0x80($Tbl),$Wi paddd @MSG[1],$Wi sha256rnds2 $ABEF,$CDGH # 56-59 pshufd \$0x0e,$Wi,$Wi sha256msg2 @MSG[1],@MSG[2] movdqa $BSWAP,$TMP sha256rnds2 $CDGH,$ABEF movdqa 15*32-0x80($Tbl),$Wi paddd @MSG[2],$Wi nop sha256rnds2 $ABEF,$CDGH # 60-63 pshufd \$0x0e,$Wi,$Wi dec $num nop sha256rnds2 $CDGH,$ABEF paddd $CDGH_SAVE,$CDGH paddd $ABEF_SAVE,$ABEF jnz .Loop_shaext pshufd \$0xb1,$CDGH,$CDGH # DCHG pshufd \$0x1b,$ABEF,$TMP # FEBA pshufd \$0xb1,$ABEF,$ABEF # BAFE punpckhqdq $CDGH,$ABEF # DCBA palignr \$8,$TMP,$CDGH # HGFE movdqu $ABEF,($ctx) movdqu $CDGH,16($ctx) ___ $code.=<<___ if ($win64); movaps -0x50(%rbp),%xmm6 movaps -0x40(%rbp),%xmm7 movaps -0x30(%rbp),%xmm8 movaps -0x20(%rbp),%xmm9 movaps -0x10(%rbp),%xmm10 mov %rbp,%rsp ___ $code.=<<___; .cfi_def_cfa_register %rsp pop %rbp .cfi_pop %rbp .cfi_epilogue ret .cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext ___ }}} if ($SZ==8 && $shaext && $avx>1) {{{ ###################################################################### # Intel SHA Extensions implementation of SHA512 update function. # my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx"); my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%ymm$_",(4..10)); my @MSG=map("%ymm$_",(0..3)); $code.=<<___; .globl sha512_block_data_order_shaext .type sha512_block_data_order_shaext,\@function,3,"unwind" .align 64 sha512_block_data_order_shaext: .cfi_startproc push %rbp .cfi_push %rbp mov %rsp,%rbp .cfi_def_cfa_register %rbp .Lshaext_shortcut: ___ $code.=<<___ if ($win64); sub \$0x50,%rsp .cfi_alloca 0x50 movaps %xmm6,-0x50(%rbp) movaps %xmm7,-0x40(%rbp) movaps %xmm8,-0x30(%rbp) movaps %xmm9,-0x20(%rbp) movaps %xmm10,-0x10(%rbp) .cfi_offset %xmm6-%xmm10,-0x60 ___ $code.=<<___; .cfi_end_prologue lea K512_nodup+0x80(%rip),$Tbl vmovdqu ($ctx),@MSG[0] # DCBA vmovdqu 32($ctx),@MSG[1] # HGFE vmovdqa -0xa0($Tbl),$BSWAP vpermq \$0b00011011,@MSG[0],@MSG[0] # ABCD vpblendd \$0b00001111,@MSG[1],@MSG[0],$ABEF # ABFE vpblendd \$0b00001111,@MSG[0],@MSG[1],$CDGH # HGCD vpermq \$0b11100001,$ABEF,$ABEF # ABEF vpermq \$0b01001011,$CDGH,$CDGH # CDGH jmp .Loop_shaext .align 16 .Loop_shaext: vmovdqu ($inp),@MSG[0] vmovdqu 0x20($inp),@MSG[1] vmovdqu 0x40($inp),@MSG[2] vpshufb $BSWAP,@MSG[0],@MSG[0] vmovdqu 0x60($inp),@MSG[3] vpaddq 0*32-0x80($Tbl),@MSG[0],$Wi vpshufb $BSWAP,@MSG[1],@MSG[1] vmovdqa $CDGH,$CDGH_SAVE # offload vsha512rnds2 %x#$Wi,$ABEF,$CDGH # 0-3 vextracti128 \$1,$Wi,%x#$Wi vmovdqa $ABEF,$ABEF_SAVE # offload vsha512rnds2 %x#$Wi,$CDGH,$ABEF vpaddq 1*32-0x80($Tbl),@MSG[1],$Wi vpshufb $BSWAP,@MSG[2],@MSG[2] vsha512rnds2 %x#$Wi,$ABEF,$CDGH # 4-7 vextracti128 \$1,$Wi,%x#$Wi lea 0x80($inp),$inp vsha512msg1 @MSG[1],@MSG[0] vsha512rnds2 %x#$Wi,$CDGH,$ABEF vpaddq 2*32-0x80($Tbl),@MSG[2],$Wi vpshufb $BSWAP,@MSG[3],@MSG[3] vsha512rnds2 %x#$Wi,$ABEF,$CDGH # 8-11 vextracti128 \$1,$Wi,%x#$Wi vpblendd \$0x03,@MSG[3],@MSG[2],$TMP vpermq \$0x39,$TMP,$TMP vpaddq $TMP,@MSG[0],@MSG[0] vsha512msg1 @MSG[2],@MSG[1] vsha512rnds2 %x#$Wi,$CDGH,$ABEF vpaddq 3*32-0x80($Tbl),@MSG[3],$Wi vsha512msg2 @MSG[3],@MSG[0] vsha512rnds2 %x#$Wi,$ABEF,$CDGH # 12-15 vextracti128 \$1,$Wi,%x#$Wi vpblendd \$0x03,@MSG[0],@MSG[3],$TMP vpermq \$0x39,$TMP,$TMP vpaddq $TMP,@MSG[1],@MSG[1] vsha512msg1 @MSG[3],@MSG[2] vsha512rnds2 %x#$Wi,$CDGH,$ABEF ___ for($i=4;$i<20-3;$i++) { $code.=<<___; vpaddq $i*32-0x80($Tbl),@MSG[0],$Wi vsha512msg2 @MSG[0],@MSG[1] vsha512rnds2 %x#$Wi,$ABEF,$CDGH # 16-19... vextracti128 \$1,$Wi,%x#$Wi vpblendd \$0x03,@MSG[1],@MSG[0],$TMP vpermq \$0x39,$TMP,$TMP vpaddq $TMP,@MSG[2],@MSG[2] vsha512msg1 @MSG[0],@MSG[3] vsha512rnds2 %x#$Wi,$CDGH,$ABEF ___ push(@MSG,shift(@MSG)); } $code.=<<___; vpaddq 17*32-0x80($Tbl),@MSG[0],$Wi vsha512msg2 @MSG[0],@MSG[1] vsha512rnds2 %x#$Wi,$ABEF,$CDGH # 68-71 vextracti128 \$1,$Wi,%x#$Wi vpblendd \$0x03,@MSG[1],@MSG[0],$TMP vpermq \$0x39,$TMP,$TMP vsha512rnds2 %x#$Wi,$CDGH,$ABEF vpaddq $TMP,@MSG[2],@MSG[2] vpaddq 18*32-0x80($Tbl),@MSG[1],$Wi vsha512rnds2 %x#$Wi,$ABEF,$CDGH # 72-75 vextracti128 \$1,$Wi,%x#$Wi vsha512msg2 @MSG[1],@MSG[2] vsha512rnds2 %x#$Wi,$CDGH,$ABEF vpaddq 19*32-0x80($Tbl),@MSG[2],$Wi vsha512rnds2 %x#$Wi,$ABEF,$CDGH # 76-79 vextracti128 \$1,$Wi,%x#$Wi dec $num vsha512rnds2 %x#$Wi,$CDGH,$ABEF vpaddq $CDGH_SAVE,$CDGH,$CDGH vpaddq $ABEF_SAVE,$ABEF,$ABEF jnz .Loop_shaext vpermq \$0b01001011,$ABEF,$ABEF # EFBA vpblendd \$0b11110000,$CDGH,$ABEF,@MSG[0] # CDBA vpblendd \$0b11110000,$ABEF,$CDGH,@MSG[1] # EFGH vpermq \$0b10110100,@MSG[0],@MSG[0] # DCBA vpermq \$0b00011011,@MSG[1],@MSG[1] # HGFE vmovdqu @MSG[0],($ctx) vmovdqu @MSG[1],32($ctx) vzeroupper ___ $code.=<<___ if ($win64); movaps -0x50(%rbp),%xmm6 movaps -0x40(%rbp),%xmm7 movaps -0x30(%rbp),%xmm8 movaps -0x20(%rbp),%xmm9 movaps -0x10(%rbp),%xmm10 mov %rbp,%rsp ___ $code.=<<___; .cfi_def_cfa_register %rsp pop %rbp .cfi_pop %rbp .cfi_epilogue ret .cfi_endproc .size sha512_block_data_order_shaext,.-sha512_block_data_order_shaext ___ }}} {{{ my $a4=$T1; my ($a,$b,$c,$d,$e,$f,$g,$h); sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; my $arg = pop; $arg = "\$$arg" if ($arg*1 eq $arg); $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; } sub body_00_15 () { ( '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'. '&ror ($a0,$Sigma1[2]-$Sigma1[1])', '&mov ($a,$a1)', '&mov ($a4,$f)', '&ror ($a1,$Sigma0[2]-$Sigma0[1])', '&xor ($a0,$e)', '&xor ($a4,$g)', # f^g '&ror ($a0,$Sigma1[1]-$Sigma1[0])', '&xor ($a1,$a)', '&and ($a4,$e)', # (f^g)&e '&xor ($a0,$e)', '&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i] '&mov ($a2,$a)', '&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g '&ror ($a1,$Sigma0[1]-$Sigma0[0])', '&xor ($a2,$b)', # a^b, b^c in next round '&add ($h,$a4)', # h+=Ch(e,f,g) '&ror ($a0,$Sigma1[0])', # Sigma1(e) '&and ($a3,$a2)', # (b^c)&(a^b) '&xor ($a1,$a)', '&add ($h,$a0)', # h+=Sigma1(e) '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b) '&ror ($a1,$Sigma0[0])', # Sigma0(a) '&add ($d,$h)', # d+=h '&add ($h,$a3)', # h+=Maj(a,b,c) '&mov ($a0,$d)', '&add ($a1,$h);'. # h+=Sigma0(a) '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;' ); } ###################################################################### # SSSE3 code path # if ($SZ==4) { # SHA256 only my $Tbl = $inp; my $_ctx="-64(%rbp)"; my $_inp="-56(%rbp)"; my $_end="-48(%rbp)"; my $framesz=3*8+$win64*16*4; my @X = map("%xmm$_",(0..3)); my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9)); $code.=<<___; .type ${func}_ssse3,\@function,3,"unwind" .align 64 ${func}_ssse3: .cfi_startproc push %rbp .cfi_push %rbp mov %rsp,%rbp .cfi_def_cfa_register %rbp .Lssse3_shortcut: push %rbx .cfi_push %rbx push %r12 .cfi_push %r12 push %r13 .cfi_push %r13 push %r14 .cfi_push %r14 push %r15 .cfi_push %r15 shl \$4,%rdx # num*16 sub \$$framesz,%rsp .cfi_alloca $framesz lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ mov $ctx,$_ctx # save ctx, 1st arg #mov $inp,$_inp # save inp, 2nd arg mov %rdx,$_end # save end pointer, "3rd" arg ___ $code.=<<___ if ($win64); movaps %xmm6,-0x80(%rbp) movaps %xmm7,-0x70(%rbp) movaps %xmm8,-0x60(%rbp) movaps %xmm9,-0x50(%rbp) .cfi_offset %xmm6-%xmm9,-0x90 ___ $code.=<<___; .cfi_end_prologue lea -16*$SZ(%rsp),%rsp mov $SZ*0($ctx),$A and \$-64,%rsp # align stack mov $SZ*1($ctx),$B mov $SZ*2($ctx),$C mov $SZ*3($ctx),$D mov $SZ*4($ctx),$E mov $SZ*5($ctx),$F mov $SZ*6($ctx),$G mov $SZ*7($ctx),$H ___ $code.=<<___; #movdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4 #movdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5 jmp .Lloop_ssse3 .align 16 .Lloop_ssse3: movdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 mov $inp,$_inp # offload $inp movdqu 0x00($inp),@X[0] movdqu 0x10($inp),@X[1] movdqu 0x20($inp),@X[2] pshufb $t3,@X[0] movdqu 0x30($inp),@X[3] lea $TABLE(%rip),$Tbl pshufb $t3,@X[1] movdqa 0x00($Tbl),$t0 movdqa 0x20($Tbl),$t1 pshufb $t3,@X[2] paddd @X[0],$t0 movdqa 0x40($Tbl),$t2 pshufb $t3,@X[3] movdqa 0x60($Tbl),$t3 paddd @X[1],$t1 paddd @X[2],$t2 paddd @X[3],$t3 movdqa $t0,0x00(%rsp) mov $A,$a1 movdqa $t1,0x10(%rsp) mov $B,$a3 movdqa $t2,0x20(%rsp) xor $C,$a3 # magic movdqa $t3,0x30(%rsp) mov $E,$a0 jmp .Lssse3_00_47 .align 16 .Lssse3_00_47: sub \$`-16*2*$SZ`,$Tbl # size optimization ___ sub Xupdate_256_SSSE3 () { ( '&movdqa ($t0,@X[1]);', '&movdqa ($t3,@X[3])', '&palignr ($t0,@X[0],$SZ)', # X[1..4] '&palignr ($t3,@X[2],$SZ);', # X[9..12] '&movdqa ($t1,$t0)', '&movdqa ($t2,$t0);', '&psrld ($t0,$sigma0[2])', '&paddd (@X[0],$t3);', # X[0..3] += X[9..12] '&psrld ($t2,$sigma0[0])', '&pshufd ($t3,@X[3],0b11111010)',# X[14..15] '&pslld ($t1,8*$SZ-$sigma0[1]);'. '&pxor ($t0,$t2)', '&psrld ($t2,$sigma0[1]-$sigma0[0]);'. '&pxor ($t0,$t1)', '&pslld ($t1,$sigma0[1]-$sigma0[0]);'. '&pxor ($t0,$t2);', '&movdqa ($t2,$t3)', '&pxor ($t0,$t1);', # sigma0(X[1..4]) '&psrld ($t3,$sigma1[2])', '&paddd (@X[0],$t0);', # X[0..3] += sigma0(X[1..4]) '&psrlq ($t2,$sigma1[0])', '&pxor ($t3,$t2);', '&psrlq ($t2,$sigma1[1]-$sigma1[0])', '&pxor ($t3,$t2)', '&pshufb ($t3,$t4)', # sigma1(X[14..15]) '&paddd (@X[0],$t3)', # X[0..1] += sigma1(X[14..15]) '&pshufd ($t3,@X[0],0b01010000)',# X[16..17] '&movdqa ($t2,$t3);', '&psrld ($t3,$sigma1[2])', '&psrlq ($t2,$sigma1[0])', '&pxor ($t3,$t2);', '&psrlq ($t2,$sigma1[1]-$sigma1[0])', '&pxor ($t3,$t2);', '&movdqa ($t2,16*2*$j."($Tbl)")', '&pshufb ($t3,$t5)', '&paddd (@X[0],$t3)' # X[2..3] += sigma1(X[16..17]) ); } sub SSSE3_256_00_47 () { my $j = shift; my $body = shift; my @X = @_; my @insns = (&$body,&$body,&$body,&$body); # 104 instructions if (0) { foreach (Xupdate_256_SSSE3()) { # 36 instructions eval; eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); } } else { # squeeze extra 4% on Westmere and 19% on Atom eval(shift(@insns)); #@ &movdqa ($t0,@X[1]); eval(shift(@insns)); eval(shift(@insns)); &movdqa ($t3,@X[3]); eval(shift(@insns)); #@ eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); #@ eval(shift(@insns)); &palignr ($t0,@X[0],$SZ); # X[1..4] eval(shift(@insns)); eval(shift(@insns)); &palignr ($t3,@X[2],$SZ); # X[9..12] eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); #@ &movdqa ($t1,$t0); eval(shift(@insns)); eval(shift(@insns)); &movdqa ($t2,$t0); eval(shift(@insns)); #@ eval(shift(@insns)); &psrld ($t0,$sigma0[2]); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &paddd (@X[0],$t3); # X[0..3] += X[9..12] eval(shift(@insns)); #@ eval(shift(@insns)); &psrld ($t2,$sigma0[0]); eval(shift(@insns)); eval(shift(@insns)); &pshufd ($t3,@X[3],0b11111010); # X[4..15] eval(shift(@insns)); eval(shift(@insns)); #@ &pslld ($t1,8*$SZ-$sigma0[1]); eval(shift(@insns)); eval(shift(@insns)); &pxor ($t0,$t2); eval(shift(@insns)); #@ eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); #@ &psrld ($t2,$sigma0[1]-$sigma0[0]); eval(shift(@insns)); &pxor ($t0,$t1); eval(shift(@insns)); eval(shift(@insns)); &pslld ($t1,$sigma0[1]-$sigma0[0]); eval(shift(@insns)); eval(shift(@insns)); &pxor ($t0,$t2); eval(shift(@insns)); eval(shift(@insns)); #@ &movdqa ($t2,$t3); eval(shift(@insns)); eval(shift(@insns)); &pxor ($t0,$t1); # sigma0(X[1..4]) eval(shift(@insns)); #@ eval(shift(@insns)); eval(shift(@insns)); &psrld ($t3,$sigma1[2]); eval(shift(@insns)); eval(shift(@insns)); &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4]) eval(shift(@insns)); #@ eval(shift(@insns)); &psrlq ($t2,$sigma1[0]); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &pxor ($t3,$t2); eval(shift(@insns)); #@ eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); #@ &psrlq ($t2,$sigma1[1]-$sigma1[0]); eval(shift(@insns)); eval(shift(@insns)); &pxor ($t3,$t2); eval(shift(@insns)); #@ eval(shift(@insns)); eval(shift(@insns)); #&pshufb ($t3,$t4); # sigma1(X[14..15]) &pshufd ($t3,$t3,0b10000000); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &psrldq ($t3,8); eval(shift(@insns)); eval(shift(@insns)); #@ eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); #@ &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15]) eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &pshufd ($t3,@X[0],0b01010000); # X[16..17] eval(shift(@insns)); eval(shift(@insns)); #@ eval(shift(@insns)); &movdqa ($t2,$t3); eval(shift(@insns)); eval(shift(@insns)); &psrld ($t3,$sigma1[2]); eval(shift(@insns)); eval(shift(@insns)); #@ &psrlq ($t2,$sigma1[0]); eval(shift(@insns)); eval(shift(@insns)); &pxor ($t3,$t2); eval(shift(@insns)); #@ eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); #@ eval(shift(@insns)); &psrlq ($t2,$sigma1[1]-$sigma1[0]); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &pxor ($t3,$t2); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); #@ #&pshufb ($t3,$t5); &pshufd ($t3,$t3,0b00001000); eval(shift(@insns)); eval(shift(@insns)); &movdqa ($t2,16*2*$j."($Tbl)"); eval(shift(@insns)); #@ eval(shift(@insns)); &pslldq ($t3,8); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17]) eval(shift(@insns)); #@ eval(shift(@insns)); eval(shift(@insns)); } &paddd ($t2,@X[0]); foreach (@insns) { eval; } # remaining instructions &movdqa (16*$j."(%rsp)",$t2); } for ($i=0,$j=0; $j<4; $j++) { &SSSE3_256_00_47($j,\&body_00_15,@X); push(@X,shift(@X)); # rotate(@X) } &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0); &jne (".Lssse3_00_47"); for ($i=0; $i<16; ) { foreach(body_00_15()) { eval; } } $code.=<<___; mov $_ctx,$ctx mov $a1,$A mov $_inp,$inp add $SZ*0($ctx),$A add $SZ*1($ctx),$B add $SZ*2($ctx),$C add $SZ*3($ctx),$D add $SZ*4($ctx),$E add $SZ*5($ctx),$F add $SZ*6($ctx),$G add $SZ*7($ctx),$H lea 16*$SZ($inp),$inp cmp $_end,$inp mov $A,$SZ*0($ctx) mov $B,$SZ*1($ctx) mov $C,$SZ*2($ctx) mov $D,$SZ*3($ctx) mov $E,$SZ*4($ctx) mov $F,$SZ*5($ctx) mov $G,$SZ*6($ctx) mov $H,$SZ*7($ctx) jb .Lloop_ssse3 ___ $code.=<<___ if ($win64); movaps -0x80(%rbp),%xmm6 movaps -0x70(%rbp),%xmm7 movaps -0x60(%rbp),%xmm8 movaps -0x50(%rbp),%xmm9 ___ $code.=<<___; mov -40(%rbp),%r15 mov -32(%rbp),%r14 mov -24(%rbp),%r13 mov -16(%rbp),%r12 mov -8(%rbp),%rbx mov %rbp,%rsp .cfi_def_cfa_register %rsp pop %rbp .cfi_pop %rbp .cfi_epilogue ret .cfi_endproc .size ${func}_ssse3,.-${func}_ssse3 ___ } if ($avx) {{ ###################################################################### # XOP code path # if ($SZ==8) { # SHA512 only my $Tbl=$inp; my $_ctx="-64(%rbp)"; my $_inp="-56(%rbp)"; my $_end="-48(%rbp)"; my $framesz=3*8+$win64*16*6; $code.=<<___; .type ${func}_xop,\@function,3,"unwind" .align 64 ${func}_xop: .cfi_startproc push %rbp .cfi_push %rbp mov %rsp,%rbp .cfi_def_cfa_register %rbp .Lxop_shortcut: push %rbx .cfi_push %rbx push %r12 .cfi_push %r12 push %r13 .cfi_push %r13 push %r14 .cfi_push %r14 push %r15 .cfi_push %r15 shl \$4,%rdx # num*16 sub \$$framesz,%rsp .cfi_alloca $framesz lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ mov $ctx,$_ctx # save ctx, 1st arg #mov $inp,$_inp # save inp, 2nd arg mov %rdx,$_end # save end pointer, "3rd" arg ___ $code.=<<___ if ($win64); movaps %xmm6,-0xa0(%rbp) movaps %xmm7,-0x90(%rbp) movaps %xmm8,-0x80(%rbp) movaps %xmm9,-0x70(%rbp) .cfi_offset %xmm6-%xmm9,-0xb0 ___ $code.=<<___ if ($win64 && $SZ>4); movaps %xmm10,-0x60(%rbp) movaps %xmm11,-0x50(%rbp) .cfi_offset %xmm10-%xmm11,-0x70 ___ $code.=<<___; .cfi_end_prologue lea -16*$SZ(%rsp),%rsp vzeroupper and \$-64,%rsp # align stack mov $SZ*0($ctx),$A mov $SZ*1($ctx),$B mov $SZ*2($ctx),$C mov $SZ*3($ctx),$D mov $SZ*4($ctx),$E mov $SZ*5($ctx),$F mov $SZ*6($ctx),$G mov $SZ*7($ctx),$H jmp .Lloop_xop ___ if ($SZ==4) { # SHA256 my @X = map("%xmm$_",(0..3)); my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7)); $code.=<<___; .align 16 .Lloop_xop: vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 mov $inp,$_inp # offload $inp vmovdqu 0x00($inp),@X[0] vmovdqu 0x10($inp),@X[1] vmovdqu 0x20($inp),@X[2] vmovdqu 0x30($inp),@X[3] vpshufb $t3,@X[0],@X[0] lea $TABLE(%rip),$Tbl vpshufb $t3,@X[1],@X[1] vpshufb $t3,@X[2],@X[2] vpaddd 0x00($Tbl),@X[0],$t0 vpshufb $t3,@X[3],@X[3] vpaddd 0x20($Tbl),@X[1],$t1 vpaddd 0x40($Tbl),@X[2],$t2 vpaddd 0x60($Tbl),@X[3],$t3 vmovdqa $t0,0x00(%rsp) mov $A,$a1 vmovdqa $t1,0x10(%rsp) mov $B,$a3 vmovdqa $t2,0x20(%rsp) xor $C,$a3 # magic vmovdqa $t3,0x30(%rsp) mov $E,$a0 jmp .Lxop_00_47 .align 16 .Lxop_00_47: sub \$`-16*2*$SZ`,$Tbl # size optimization ___ sub XOP_256_00_47 () { my $j = shift; my $body = shift; my @X = @_; my @insns = (&$body,&$body,&$body,&$body); # 104 instructions &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4] eval(shift(@insns)); eval(shift(@insns)); &vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12] eval(shift(@insns)); eval(shift(@insns)); &vprotd ($t1,$t0,8*$SZ-$sigma0[1]); eval(shift(@insns)); eval(shift(@insns)); &vpsrld ($t0,$t0,$sigma0[2]); eval(shift(@insns)); eval(shift(@insns)); &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12] eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t0,$t0,$t1); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vprotd ($t3,@X[3],8*$SZ-$sigma1[1]); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t0,$t0,$t2); # sigma0(X[1..4]) eval(shift(@insns)); eval(shift(@insns)); &vpsrld ($t2,@X[3],$sigma1[2]); eval(shift(@insns)); eval(shift(@insns)); &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4]) eval(shift(@insns)); eval(shift(@insns)); &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t3,$t3,$t2); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t3,$t3,$t1); # sigma1(X[14..15]) eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpsrldq ($t3,$t3,8); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15]) eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vprotd ($t3,@X[0],8*$SZ-$sigma1[1]); eval(shift(@insns)); eval(shift(@insns)); &vpsrld ($t2,@X[0],$sigma1[2]); eval(shift(@insns)); eval(shift(@insns)); &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t3,$t3,$t2); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t3,$t3,$t1); # sigma1(X[16..17]) eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpslldq ($t3,$t3,8); # 22 instructions eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17]) eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpaddd ($t2,@X[0],16*2*$j."($Tbl)"); foreach (@insns) { eval; } # remaining instructions &vmovdqa (16*$j."(%rsp)",$t2); } for ($i=0,$j=0; $j<4; $j++) { &XOP_256_00_47($j,\&body_00_15,@X); push(@X,shift(@X)); # rotate(@X) } &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0); &jne (".Lxop_00_47"); for ($i=0; $i<16; ) { foreach(body_00_15()) { eval; } } } else { # SHA512 my @X = map("%xmm$_",(0..7)); my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11)); $code.=<<___; .align 16 .Lloop_xop: vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 mov $inp,$_inp # offload $inp vmovdqu 0x00($inp),@X[0] vmovdqu 0x10($inp),@X[1] vmovdqu 0x20($inp),@X[2] vpshufb $t3,@X[0],@X[0] vmovdqu 0x30($inp),@X[3] vpshufb $t3,@X[1],@X[1] vmovdqu 0x40($inp),@X[4] vpshufb $t3,@X[2],@X[2] vmovdqu 0x50($inp),@X[5] vpshufb $t3,@X[3],@X[3] vmovdqu 0x60($inp),@X[6] vpshufb $t3,@X[4],@X[4] vmovdqu 0x70($inp),@X[7] lea $TABLE+0x80(%rip),$Tbl # size optimization vpshufb $t3,@X[5],@X[5] vpaddq -0x80($Tbl),@X[0],$t0 vpshufb $t3,@X[6],@X[6] vpaddq -0x60($Tbl),@X[1],$t1 vpshufb $t3,@X[7],@X[7] vpaddq -0x40($Tbl),@X[2],$t2 vpaddq -0x20($Tbl),@X[3],$t3 vmovdqa $t0,0x00(%rsp) vpaddq 0x00($Tbl),@X[4],$t0 vmovdqa $t1,0x10(%rsp) vpaddq 0x20($Tbl),@X[5],$t1 vmovdqa $t2,0x20(%rsp) vpaddq 0x40($Tbl),@X[6],$t2 vmovdqa $t3,0x30(%rsp) vpaddq 0x60($Tbl),@X[7],$t3 vmovdqa $t0,0x40(%rsp) mov $A,$a1 vmovdqa $t1,0x50(%rsp) mov $B,$a3 vmovdqa $t2,0x60(%rsp) xor $C,$a3 # magic vmovdqa $t3,0x70(%rsp) mov $E,$a0 jmp .Lxop_00_47 .align 16 .Lxop_00_47: add \$`16*2*$SZ`,$Tbl ___ sub XOP_512_00_47 () { my $j = shift; my $body = shift; my @X = @_; my @insns = (&$body,&$body); # 52 instructions &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2] eval(shift(@insns)); eval(shift(@insns)); &vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10] eval(shift(@insns)); eval(shift(@insns)); &vprotq ($t1,$t0,8*$SZ-$sigma0[1]); eval(shift(@insns)); eval(shift(@insns)); &vpsrlq ($t0,$t0,$sigma0[2]); eval(shift(@insns)); eval(shift(@insns)); &vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10] eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t0,$t0,$t1); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vprotq ($t3,@X[7],8*$SZ-$sigma1[1]); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t0,$t0,$t2); # sigma0(X[1..2]) eval(shift(@insns)); eval(shift(@insns)); &vpsrlq ($t2,@X[7],$sigma1[2]); eval(shift(@insns)); eval(shift(@insns)); &vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2]) eval(shift(@insns)); eval(shift(@insns)); &vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t3,$t3,$t2); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpxor ($t3,$t3,$t1); # sigma1(X[14..15]) eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15]) eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)"); foreach (@insns) { eval; } # remaining instructions &vmovdqa (16*$j."(%rsp)",$t2); } for ($i=0,$j=0; $j<8; $j++) { &XOP_512_00_47($j,\&body_00_15,@X); push(@X,shift(@X)); # rotate(@X) } &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0); &jne (".Lxop_00_47"); for ($i=0; $i<16; ) { foreach(body_00_15()) { eval; } } } $code.=<<___; mov $_ctx,$ctx mov $a1,$A mov $_inp,$inp add $SZ*0($ctx),$A add $SZ*1($ctx),$B add $SZ*2($ctx),$C add $SZ*3($ctx),$D add $SZ*4($ctx),$E add $SZ*5($ctx),$F add $SZ*6($ctx),$G add $SZ*7($ctx),$H lea 16*$SZ($inp),$inp cmp $_end,$inp mov $A,$SZ*0($ctx) mov $B,$SZ*1($ctx) mov $C,$SZ*2($ctx) mov $D,$SZ*3($ctx) mov $E,$SZ*4($ctx) mov $F,$SZ*5($ctx) mov $G,$SZ*6($ctx) mov $H,$SZ*7($ctx) jb .Lloop_xop vzeroupper ___ $code.=<<___ if ($win64); movaps -0xa0(%rbp),%xmm6 movaps -0x90(%rbp),%xmm7 movaps -0x80(%rbp),%xmm8 movaps -0x70(%rbp),%xmm9 ___ $code.=<<___ if ($win64 && $SZ>4); movaps -0x60(%rbp),%xmm10 movaps -0x50(%rbp),%xmm11 ___ $code.=<<___; mov -40(%rbp),%r15 mov -32(%rbp),%r14 mov -24(%rbp),%r13 mov -16(%rbp),%r12 mov -8(%rbp),%rbx mov %rbp,%rsp .cfi_def_cfa_register %rsp pop %rbp .cfi_pop %rbp .cfi_epilogue ret .cfi_endproc .size ${func}_xop,.-${func}_xop ___ } ###################################################################### # AVX+shrd code path # my $Tbl=$inp; my $_ctx="-64(%rbp)"; my $_inp="-56(%rbp)"; my $_end="-48(%rbp)"; my $framesz=3*8+$win64*16*6; local *ror = sub { &shrd(@_[0],@_) }; $code.=<<___; .type ${func}_avx,\@function,3,"unwind" .align 64 ${func}_avx: .cfi_startproc push %rbp .cfi_push %rbp mov %rsp,%rbp .cfi_def_cfa_register %rbp .Lavx_shortcut: push %rbx .cfi_push %rbx push %r12 .cfi_push %r12 push %r13 .cfi_push %r13 push %r14 .cfi_push %r14 push %r15 .cfi_push %r15 shl \$4,%rdx # num*16 sub \$$framesz,%rsp .cfi_alloca $framesz lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ mov $ctx,$_ctx # save ctx, 1st arg #mov $inp,$_inp # save inp, 2nd arg mov %rdx,$_end # save end pointer, "3rd" arg ___ $code.=<<___ if ($win64); movaps %xmm6,-0xa0(%rbp) movaps %xmm7,-0x90(%rbp) movaps %xmm8,-0x80(%rbp) movaps %xmm9,-0x70(%rbp) .cfi_offset %xmm6-%xmm9,-0xb0 ___ $code.=<<___ if ($win64 && $SZ>4); movaps %xmm10,-0x60(%rbp) movaps %xmm11,-0x50(%rbp) .cfi_offset %xmm10-%xmm11,-0x70 ___ $code.=<<___; .cfi_end_prologue lea -16*$SZ(%rsp),%rsp vzeroupper and \$-64,%rsp # align stack mov $SZ*0($ctx),$A mov $SZ*1($ctx),$B mov $SZ*2($ctx),$C mov $SZ*3($ctx),$D mov $SZ*4($ctx),$E mov $SZ*5($ctx),$F mov $SZ*6($ctx),$G mov $SZ*7($ctx),$H ___ if ($SZ==4) { # SHA256 my @X = map("%xmm$_",(0..3)); my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9)); $code.=<<___; vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4 vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5 jmp .Lloop_avx .align 16 .Lloop_avx: vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 mov $inp,$_inp # offload $inp vmovdqu 0x00($inp),@X[0] vmovdqu 0x10($inp),@X[1] vmovdqu 0x20($inp),@X[2] vmovdqu 0x30($inp),@X[3] vpshufb $t3,@X[0],@X[0] lea $TABLE(%rip),$Tbl vpshufb $t3,@X[1],@X[1] vpshufb $t3,@X[2],@X[2] vpaddd 0x00($Tbl),@X[0],$t0 vpshufb $t3,@X[3],@X[3] vpaddd 0x20($Tbl),@X[1],$t1 vpaddd 0x40($Tbl),@X[2],$t2 vpaddd 0x60($Tbl),@X[3],$t3 vmovdqa $t0,0x00(%rsp) mov $A,$a1 vmovdqa $t1,0x10(%rsp) mov $B,$a3 vmovdqa $t2,0x20(%rsp) xor $C,$a3 # magic vmovdqa $t3,0x30(%rsp) mov $E,$a0 jmp .Lavx_00_47 .align 16 .Lavx_00_47: sub \$`-16*2*$SZ`,$Tbl # size optimization ___ sub Xupdate_256_AVX () { ( '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4] '&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12] '&vpsrld ($t2,$t0,$sigma0[0]);', '&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12] '&vpsrld ($t3,$t0,$sigma0[2])', '&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);', '&vpxor ($t0,$t3,$t2)', '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15] '&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);', '&vpxor ($t0,$t0,$t1)', '&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);', '&vpxor ($t0,$t0,$t2)', '&vpsrld ($t2,$t3,$sigma1[2]);', '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4]) '&vpsrlq ($t3,$t3,$sigma1[0]);', '&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4]) '&vpxor ($t2,$t2,$t3);', '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])', '&vpxor ($t2,$t2,$t3)', '&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15]) '&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15]) '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17] '&vpsrld ($t2,$t3,$sigma1[2])', '&vpsrlq ($t3,$t3,$sigma1[0])', '&vpxor ($t2,$t2,$t3);', '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])', '&vpxor ($t2,$t2,$t3)', '&vpshufb ($t2,$t2,$t5)', '&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17]) ); } sub AVX_256_00_47 () { my $j = shift; my $body = shift; my @X = @_; my @insns = (&$body,&$body,&$body,&$body); # 104 instructions foreach (Xupdate_256_AVX()) { # 29 instructions eval; eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); } &vpaddd ($t2,@X[0],16*2*$j."($Tbl)"); foreach (@insns) { eval; } # remaining instructions &vmovdqa (16*$j."(%rsp)",$t2); } for ($i=0,$j=0; $j<4; $j++) { &AVX_256_00_47($j,\&body_00_15,@X); push(@X,shift(@X)); # rotate(@X) } &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0); &jne (".Lavx_00_47"); for ($i=0; $i<16; ) { foreach(body_00_15()) { eval; } } } else { # SHA512 my @X = map("%xmm$_",(0..7)); my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11)); $code.=<<___; jmp .Lloop_avx .align 16 .Lloop_avx: vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 mov $inp,$_inp # offload $inp vmovdqu 0x00($inp),@X[0] vmovdqu 0x10($inp),@X[1] vmovdqu 0x20($inp),@X[2] vpshufb $t3,@X[0],@X[0] vmovdqu 0x30($inp),@X[3] vpshufb $t3,@X[1],@X[1] vmovdqu 0x40($inp),@X[4] vpshufb $t3,@X[2],@X[2] vmovdqu 0x50($inp),@X[5] vpshufb $t3,@X[3],@X[3] vmovdqu 0x60($inp),@X[6] vpshufb $t3,@X[4],@X[4] vmovdqu 0x70($inp),@X[7] lea $TABLE+0x80(%rip),$Tbl # size optimization vpshufb $t3,@X[5],@X[5] vpaddq -0x80($Tbl),@X[0],$t0 vpshufb $t3,@X[6],@X[6] vpaddq -0x60($Tbl),@X[1],$t1 vpshufb $t3,@X[7],@X[7] vpaddq -0x40($Tbl),@X[2],$t2 vpaddq -0x20($Tbl),@X[3],$t3 vmovdqa $t0,0x00(%rsp) vpaddq 0x00($Tbl),@X[4],$t0 vmovdqa $t1,0x10(%rsp) vpaddq 0x20($Tbl),@X[5],$t1 vmovdqa $t2,0x20(%rsp) vpaddq 0x40($Tbl),@X[6],$t2 vmovdqa $t3,0x30(%rsp) vpaddq 0x60($Tbl),@X[7],$t3 vmovdqa $t0,0x40(%rsp) mov $A,$a1 vmovdqa $t1,0x50(%rsp) mov $B,$a3 vmovdqa $t2,0x60(%rsp) xor $C,$a3 # magic vmovdqa $t3,0x70(%rsp) mov $E,$a0 jmp .Lavx_00_47 .align 16 .Lavx_00_47: add \$`16*2*$SZ`,$Tbl ___ sub Xupdate_512_AVX () { ( '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2] '&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10] '&vpsrlq ($t2,$t0,$sigma0[0])', '&vpaddq (@X[0],@X[0],$t3);', # X[0..1] += X[9..10] '&vpsrlq ($t3,$t0,$sigma0[2])', '&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);', '&vpxor ($t0,$t3,$t2)', '&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);', '&vpxor ($t0,$t0,$t1)', '&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);', '&vpxor ($t0,$t0,$t2)', '&vpsrlq ($t3,@X[7],$sigma1[2]);', '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2]) '&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1]);', '&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2]) '&vpsrlq ($t1,@X[7],$sigma1[0]);', '&vpxor ($t3,$t3,$t2)', '&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);', '&vpxor ($t3,$t3,$t1)', '&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);', '&vpxor ($t3,$t3,$t2)', '&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15]) '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15]) ); } sub AVX_512_00_47 () { my $j = shift; my $body = shift; my @X = @_; my @insns = (&$body,&$body); # 52 instructions foreach (Xupdate_512_AVX()) { # 23 instructions eval; eval(shift(@insns)); eval(shift(@insns)); } &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)"); foreach (@insns) { eval; } # remaining instructions &vmovdqa (16*$j."(%rsp)",$t2); } for ($i=0,$j=0; $j<8; $j++) { &AVX_512_00_47($j,\&body_00_15,@X); push(@X,shift(@X)); # rotate(@X) } &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0); &jne (".Lavx_00_47"); for ($i=0; $i<16; ) { foreach(body_00_15()) { eval; } } } $code.=<<___; mov $_ctx,$ctx mov $a1,$A mov $_inp,$inp add $SZ*0($ctx),$A add $SZ*1($ctx),$B add $SZ*2($ctx),$C add $SZ*3($ctx),$D add $SZ*4($ctx),$E add $SZ*5($ctx),$F add $SZ*6($ctx),$G add $SZ*7($ctx),$H lea 16*$SZ($inp),$inp cmp $_end,$inp mov $A,$SZ*0($ctx) mov $B,$SZ*1($ctx) mov $C,$SZ*2($ctx) mov $D,$SZ*3($ctx) mov $E,$SZ*4($ctx) mov $F,$SZ*5($ctx) mov $G,$SZ*6($ctx) mov $H,$SZ*7($ctx) jb .Lloop_avx vzeroupper ___ $code.=<<___ if ($win64); movaps -0xa0(%rbp),%xmm6 movaps -0x90(%rbp),%xmm7 movaps -0x80(%rbp),%xmm8 movaps -0x70(%rbp),%xmm9 ___ $code.=<<___ if ($win64 && $SZ>4); movaps -0x60(%rbp),%xmm10 movaps -0x50(%rbp),%xmm11 ___ $code.=<<___; mov -40(%rbp),%r15 mov -32(%rbp),%r14 mov -24(%rbp),%r13 mov -16(%rbp),%r12 mov -8(%rbp),%rbx mov %rbp,%rsp .cfi_def_cfa_register %rsp pop %rbp .cfi_pop %rbp .cfi_epilogue ret .cfi_endproc .size ${func}_avx,.-${func}_avx ___ if ($avx>1) {{ ###################################################################### # AVX2+BMI code path # my $Tbl=$inp; my $_ctx="-64(%rbp)"; my $_inp="-56(%rbp)"; my $_end="-48(%rbp)"; my $framesz=3*8+$win64*16*6; my $PUSH8=8*2*$SZ; use integer; sub bodyx_00_15 () { # at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f ( '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'. '&add ($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)', # h+=X[i]+K[i] '&and ($a4,$e)', # f&e '&rorx ($a0,$e,$Sigma1[2])', '&rorx ($a2,$e,$Sigma1[1])', '&lea ($a,"($a,$a1)")', # h+=Sigma0(a) from the past '&lea ($h,"($h,$a4)")', '&andn ($a4,$e,$g)', # ~e&g '&xor ($a0,$a2)', '&rorx ($a1,$e,$Sigma1[0])', '&lea ($h,"($h,$a4)")', # h+=Ch(e,f,g)=(e&f)+(~e&g) '&xor ($a0,$a1)', # Sigma1(e) '&mov ($a2,$a)', '&rorx ($a4,$a,$Sigma0[2])', '&lea ($h,"($h,$a0)")', # h+=Sigma1(e) '&xor ($a2,$b)', # a^b, b^c in next round '&rorx ($a1,$a,$Sigma0[1])', '&rorx ($a0,$a,$Sigma0[0])', '&lea ($d,"($d,$h)")', # d+=h '&and ($a3,$a2)', # (b^c)&(a^b) '&xor ($a1,$a4)', '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b) '&xor ($a1,$a0)', # Sigma0(a) '&lea ($h,"($h,$a3)");'. # h+=Maj(a,b,c) '&mov ($a4,$e)', # copy of f in future '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;' ); # and at the finish one has to $a+=$a1 } $code.=<<___; .type ${func}_avx2,\@function,3,"unwind" .align 64 ${func}_avx2: .cfi_startproc push %rbp .cfi_push %rbp mov %rsp,%rbp .cfi_def_cfa_register %rbp .Lavx2_shortcut: push %rbx .cfi_push %rbx push %r12 .cfi_push %r12 push %r13 .cfi_push %r13 push %r14 .cfi_push %r14 push %r15 .cfi_push %r15 shl \$4,%rdx # num*16 sub \$$framesz,%rsp .cfi_alloca $framesz lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ mov $ctx,$_ctx # save ctx, 1st arg #mov $inp,$_inp # save inp, 2nd arg mov %rdx,$_end # save end pointer, "3rd" arg ___ $code.=<<___ if ($win64); movaps %xmm6,-0xa0(%rbp) movaps %xmm7,-0x90(%rbp) movaps %xmm8,-0x80(%rbp) movaps %xmm9,-0x70(%rbp) .cfi_offset %xmm6-%xmm9,-0xb0 ___ $code.=<<___ if ($win64 && $SZ>4); movaps %xmm10,-0x60(%rbp) movaps %xmm11,-0x50(%rbp) .cfi_offset %xmm10-%xmm11,-0x70 ___ $code.=<<___; .cfi_end_prologue lea -$PUSH8(%rsp),%rsp vzeroupper and \$-$PUSH8,%rsp # align stack sub \$-16*$SZ,$inp # inp++, size optimization mov $SZ*0($ctx),$A mov $inp,%r12 # borrow $T1 mov $SZ*1($ctx),$B cmp %rdx,$inp # $_end mov $SZ*2($ctx),$C cmove %rsp,%r12 # next block or random data mov $SZ*3($ctx),$D mov $SZ*4($ctx),$E mov $SZ*5($ctx),$F mov $SZ*6($ctx),$G mov $SZ*7($ctx),$H ___ if ($SZ==4) { # SHA256 my @X = map("%ymm$_",(0..3)); my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9)); $code.=<<___; vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4 vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5 jmp .Loop_avx2 .align 16 .Loop_avx2: vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 mov $inp,$_inp # offload $inp vmovdqu -16*$SZ+0($inp),%xmm0 vmovdqu -16*$SZ+16($inp),%xmm1 vmovdqu -16*$SZ+32($inp),%xmm2 vmovdqu -16*$SZ+48($inp),%xmm3 lea $TABLE(%rip),$Tbl vinserti128 \$1,(%r12),@X[0],@X[0] vinserti128 \$1,16(%r12),@X[1],@X[1] vpshufb $t3,@X[0],@X[0] vinserti128 \$1,32(%r12),@X[2],@X[2] vpshufb $t3,@X[1],@X[1] vinserti128 \$1,48(%r12),@X[3],@X[3] vpshufb $t3,@X[2],@X[2] vpaddd 0x00($Tbl),@X[0],$t0 vpshufb $t3,@X[3],@X[3] vpaddd 0x20($Tbl),@X[1],$t1 vpaddd 0x40($Tbl),@X[2],$t2 vpaddd 0x60($Tbl),@X[3],$t3 vmovdqa $t0,0x00(%rsp) xor $a1,$a1 vmovdqa $t1,0x20(%rsp) lea -$PUSH8(%rsp),%rsp mov $B,$a3 vmovdqa $t2,0x00(%rsp) xor $C,$a3 # magic vmovdqa $t3,0x20(%rsp) mov $F,$a4 sub \$-16*2*$SZ,$Tbl # size optimization jmp .Lavx2_00_47 .align 16 .Lavx2_00_47: ___ sub AVX2_256_00_47 () { my $j = shift; my $body = shift; my @X = @_; my @insns = (&$body,&$body,&$body,&$body); # 96 instructions my $base = "+2*$PUSH8(%rsp)"; &lea ("%rsp","-$PUSH8(%rsp)") if (($j%2)==0); foreach (Xupdate_256_AVX()) { # 29 instructions eval; eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); } &vpaddd ($t2,@X[0],16*2*$j."($Tbl)"); foreach (@insns) { eval; } # remaining instructions &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2); } for ($i=0,$j=0; $j<4; $j++) { &AVX2_256_00_47($j,\&bodyx_00_15,@X); push(@X,shift(@X)); # rotate(@X) } &lea ($Tbl,16*2*$SZ."($Tbl)"); &cmpb (($SZ-1)."($Tbl)",0); &jne (".Lavx2_00_47"); for ($i=0; $i<16; ) { my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)"; foreach(bodyx_00_15()) { eval; } } } else { # SHA512 my @X = map("%ymm$_",(0..7)); my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11)); $code.=<<___; jmp .Loop_avx2 .align 16 .Loop_avx2: vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t2 mov $inp,$_inp # offload $inp vmovdqu -16*$SZ($inp),%xmm0 vmovdqu -16*$SZ+16($inp),%xmm1 vmovdqu -16*$SZ+32($inp),%xmm2 vmovdqu -16*$SZ+48($inp),%xmm3 vmovdqu -16*$SZ+64($inp),%xmm4 vmovdqu -16*$SZ+80($inp),%xmm5 vmovdqu -16*$SZ+96($inp),%xmm6 vmovdqu -16*$SZ+112($inp),%xmm7 lea $TABLE+0x80(%rip),$Tbl # size optimization vinserti128 \$1,(%r12),@X[0],@X[0] vinserti128 \$1,16(%r12),@X[1],@X[1] vpshufb $t2,@X[0],@X[0] vinserti128 \$1,32(%r12),@X[2],@X[2] vpshufb $t2,@X[1],@X[1] vinserti128 \$1,48(%r12),@X[3],@X[3] vpshufb $t2,@X[2],@X[2] vinserti128 \$1,64(%r12),@X[4],@X[4] vpshufb $t2,@X[3],@X[3] vinserti128 \$1,80(%r12),@X[5],@X[5] vpshufb $t2,@X[4],@X[4] vinserti128 \$1,96(%r12),@X[6],@X[6] vpshufb $t2,@X[5],@X[5] vinserti128 \$1,112(%r12),@X[7],@X[7] vpaddq -0x80($Tbl),@X[0],$t0 vpshufb $t2,@X[6],@X[6] vpaddq -0x60($Tbl),@X[1],$t1 vpshufb $t2,@X[7],@X[7] vpaddq -0x40($Tbl),@X[2],$t2 vpaddq -0x20($Tbl),@X[3],$t3 vmovdqa $t0,0x00(%rsp) vpaddq 0x00($Tbl),@X[4],$t0 vmovdqa $t1,0x20(%rsp) vpaddq 0x20($Tbl),@X[5],$t1 vmovdqa $t2,0x40(%rsp) vpaddq 0x40($Tbl),@X[6],$t2 vmovdqa $t3,0x60(%rsp) lea -$PUSH8(%rsp),%rsp vpaddq 0x60($Tbl),@X[7],$t3 vmovdqa $t0,0x00(%rsp) xor $a1,$a1 vmovdqa $t1,0x20(%rsp) mov $B,$a3 vmovdqa $t2,0x40(%rsp) xor $C,$a3 # magic vmovdqa $t3,0x60(%rsp) mov $F,$a4 add \$16*2*$SZ,$Tbl jmp .Lavx2_00_47 .align 16 .Lavx2_00_47: ___ sub AVX2_512_00_47 () { my $j = shift; my $body = shift; my @X = @_; my @insns = (&$body,&$body); # 48 instructions my $base = "+2*$PUSH8(%rsp)"; &lea ("%rsp","-$PUSH8(%rsp)") if (($j%4)==0); foreach (Xupdate_512_AVX()) { # 23 instructions eval; if ($_ !~ /\;$/) { eval(shift(@insns)); eval(shift(@insns)); eval(shift(@insns)); } } &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)"); foreach (@insns) { eval; } # remaining instructions &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2); } for ($i=0,$j=0; $j<8; $j++) { &AVX2_512_00_47($j,\&bodyx_00_15,@X); push(@X,shift(@X)); # rotate(@X) } &lea ($Tbl,16*2*$SZ."($Tbl)"); &cmpb (($SZ-1-0x80)."($Tbl)",0); &jne (".Lavx2_00_47"); for ($i=0; $i<16; ) { my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)"; foreach(bodyx_00_15()) { eval; } } } $code.=<<___; mov $_ctx,$ctx add $a1,$A mov $_inp,$a4 add $SZ*0($ctx),$A add $SZ*1($ctx),$B add $SZ*2($ctx),$C add $SZ*3($ctx),$D add $SZ*4($ctx),$E add $SZ*5($ctx),$F add $SZ*6($ctx),$G add $SZ*7($ctx),$H mov $A,$SZ*0($ctx) mov $B,$SZ*1($ctx) mov $C,$SZ*2($ctx) mov $D,$SZ*3($ctx) mov $E,$SZ*4($ctx) mov $F,$SZ*5($ctx) mov $G,$SZ*6($ctx) mov $H,$SZ*7($ctx) cmp $_end,$a4 je .Ldone_avx2 lea `2*$SZ*($rounds-8)`(%rsp),$Tbl xor $a1,$a1 mov $B,$a3 xor $C,$a3 # magic mov $F,$a4 jmp .Lower_avx2 .align 16 .Lower_avx2: ___ for ($i=0; $i<8; ) { my $base="+16($Tbl)"; foreach(bodyx_00_15()) { eval; } } $code.=<<___; lea -$PUSH8($Tbl),$Tbl cmp %rsp,$Tbl jae .Lower_avx2 mov $_ctx,$ctx add $a1,$A mov $_inp,$inp lea `2*$SZ*($rounds-8)`(%rsp),%rsp add $SZ*0($ctx),$A add $SZ*1($ctx),$B add $SZ*2($ctx),$C add $SZ*3($ctx),$D add $SZ*4($ctx),$E add $SZ*5($ctx),$F lea `2*16*$SZ`($inp),$inp # inp+=2 add $SZ*6($ctx),$G mov $inp,%r12 add $SZ*7($ctx),$H cmp $_end,$inp mov $A,$SZ*0($ctx) cmove %rsp,%r12 # next block or stale data mov $B,$SZ*1($ctx) mov $C,$SZ*2($ctx) mov $D,$SZ*3($ctx) mov $E,$SZ*4($ctx) mov $F,$SZ*5($ctx) mov $G,$SZ*6($ctx) mov $H,$SZ*7($ctx) jbe .Loop_avx2 .Ldone_avx2: vzeroupper ___ $code.=<<___ if ($win64); movaps -0xa0(%rbp),%xmm6 movaps -0x90(%rbp),%xmm7 movaps -0x80(%rbp),%xmm8 movaps -0x70(%rbp),%xmm9 ___ $code.=<<___ if ($win64 && $SZ>4); movaps -0x60(%rbp),%xmm10 movaps -0x50(%rbp),%xmm11 ___ $code.=<<___; mov -40(%rbp),%r15 mov -32(%rbp),%r14 mov -24(%rbp),%r13 mov -16(%rbp),%r12 mov -8(%rbp),%rbx mov %rbp,%rsp .cfi_def_cfa_register %rsp pop %rbp .cfi_pop %rbp .cfi_epilogue ret .cfi_endproc .size ${func}_avx2,.-${func}_avx2 ___ }} }}}}} sub sha256op38 { my $instr = shift; my %opcodelet = ( "sha256rnds2" => 0xcb, "sha256msg1" => 0xcc, "sha256msg2" => 0xcd ); if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) { my @opcode=(0x0f,0x38); push @opcode,$opcodelet{$instr}; push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M return ".byte\t".join(',',@opcode); } else { return $instr."\t".@_[0]; } } sub vsha512rnds2 { my $instr = shift; if (@_[0] =~ /%xmm([0-9]+),\s*%ymm([0-9]+),\s*%ymm([0-9]+)/) { my @opcode=(0xc4,0xe2,0x7f,0xcb); @opcode[1] ^= (($1>>3)<<5)|(($3>>3)<<7); @opcode[2] ^= $2<<3; push @opcode,0xc0|($1&7)|(($3&7)<<3); # ModR/M return ".byte\t".join(',',@opcode); } else { return $instr."\t".@_[0]; } } sub vsha512msg { my $instr = shift; my $op = shift; if (@_[0] =~ /%[xy]mm([0-9]+),\s*%ymm([0-9]+)/) { my @opcode=(0xc4,0xe2,0x7f,0xcb+$op); @opcode[1] ^= (($1>>3)<<5)|(($2>>3)<<7); push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M return ".byte\t".join(',',@opcode); } else { return $instr.$op."\t".@_[0]; } } foreach (split("\n",$code)) { s/\`([^\`]*)\`/eval $1/geo; s/%x#%[yz]/%x/go; s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/eo or s/\b(vsha512msg)([12])\s+(.*)/vsha512msg($1,$2,$3)/eo or s/\b(vsha512rnds2)\s+(.*)/vsha512rnds2($1,$2)/eo; print $_,"\n"; } close STDOUT;