Cherry-pick BoringSSL 6410e18: Update several assembly files from upstream.

I'm not sure why this was skipped when it came up in the merge queue the
first time. :(
This commit is contained in:
Adam Langley 2018-08-07 11:26:15 -07:00 committed by Brian Smith
parent bfe884f465
commit b5ee32ff4b
2 changed files with 107 additions and 6 deletions
crypto/fipsmodule

@ -44,9 +44,8 @@
# See ghash-x86.pl for background information and details about coding # See ghash-x86.pl for background information and details about coding
# techniques. # techniques.
# #
# Special thanks to David Woodhouse <dwmw2@infradead.org> for # Special thanks to David Woodhouse for providing access to a
# providing access to a Westmere-based system on behalf of Intel # Westmere-based system on behalf of Intel Open Source Technology Centre.
# Open Source Technology Centre.
# December 2012 # December 2012
# #
@ -228,13 +227,21 @@ $code=<<___;
.type GFp_gcm_gmult_4bit,\@function,2 .type GFp_gcm_gmult_4bit,\@function,2
.align 16 .align 16
GFp_gcm_gmult_4bit: GFp_gcm_gmult_4bit:
.cfi_startproc
push %rbx push %rbx
.cfi_push %rbx
push %rbp # %rbp and others are pushed exclusively in push %rbp # %rbp and others are pushed exclusively in
.cfi_push %rbp
push %r12 # order to reuse Win64 exception handler... push %r12 # order to reuse Win64 exception handler...
.cfi_push %r12
push %r13 push %r13
.cfi_push %r13
push %r14 push %r14
.cfi_push %r14
push %r15 push %r15
.cfi_push %r15
sub \$280,%rsp sub \$280,%rsp
.cfi_adjust_cfa_offset 280
.Lgmult_prologue: .Lgmult_prologue:
movzb 15($Xi),$Zlo movzb 15($Xi),$Zlo
@ -246,10 +253,14 @@ $code.=<<___;
mov $Zhi,($Xi) mov $Zhi,($Xi)
lea 280+48(%rsp),%rsi lea 280+48(%rsp),%rsi
.cfi_def_cfa %rsi,8
mov -8(%rsi),%rbx mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lgmult_epilogue: .Lgmult_epilogue:
ret ret
.cfi_endproc
.size GFp_gcm_gmult_4bit,.-GFp_gcm_gmult_4bit .size GFp_gcm_gmult_4bit,.-GFp_gcm_gmult_4bit
___ ___
@ -263,13 +274,21 @@ $code.=<<___;
.type GFp_gcm_ghash_4bit,\@function,4 .type GFp_gcm_ghash_4bit,\@function,4
.align 16 .align 16
GFp_gcm_ghash_4bit: GFp_gcm_ghash_4bit:
.cfi_startproc
push %rbx push %rbx
.cfi_push %rbx
push %rbp push %rbp
.cfi_push %rbp
push %r12 push %r12
.cfi_push %r12
push %r13 push %r13
.cfi_push %r13
push %r14 push %r14
.cfi_push %r14
push %r15 push %r15
.cfi_push %r15
sub \$280,%rsp sub \$280,%rsp
.cfi_adjust_cfa_offset 280
.Lghash_prologue: .Lghash_prologue:
mov $inp,%r14 # reassign couple of args mov $inp,%r14 # reassign couple of args
mov $len,%r15 mov $len,%r15
@ -398,15 +417,24 @@ $code.=<<___;
mov $Zhi,($Xi) mov $Zhi,($Xi)
lea 280+48(%rsp),%rsi lea 280+48(%rsp),%rsi
.cfi_def_cfa %rsi,8
mov -48(%rsi),%r15 mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14 mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13 mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12 mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx mov -8(%rsi),%rbx
.cfi_restore %rbx
lea 0(%rsi),%rsp lea 0(%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lghash_epilogue: .Lghash_epilogue:
ret ret
.cfi_endproc
.size GFp_gcm_ghash_4bit,.-GFp_gcm_ghash_4bit .size GFp_gcm_ghash_4bit,.-GFp_gcm_ghash_4bit
___ ___
@ -653,7 +681,8 @@ if ($do4xaggr) {
my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15)); my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
$code.=<<___; $code.=<<___;
mov GFp_ia32cap_P+4(%rip),%eax leaq GFp_ia32cap_P(%rip),%rax
mov 4(%rax),%eax
cmp \$0x30,$len cmp \$0x30,$len
jb .Lskip4x jb .Lskip4x

@ -164,7 +164,7 @@ $Tbl="%rbp";
$_ctx="16*$SZ+0*8(%rsp)"; $_ctx="16*$SZ+0*8(%rsp)";
$_inp="16*$SZ+1*8(%rsp)"; $_inp="16*$SZ+1*8(%rsp)";
$_end="16*$SZ+2*8(%rsp)"; $_end="16*$SZ+2*8(%rsp)";
$_rsp="16*$SZ+3*8(%rsp)"; $_rsp="`16*$SZ+3*8`(%rsp)";
$framesz="16*$SZ+4*8"; $framesz="16*$SZ+4*8";
@ -257,9 +257,10 @@ $code=<<___;
.type $func,\@function,3 .type $func,\@function,3
.align 16 .align 16
$func: $func:
.cfi_startproc
___ ___
$code.=<<___ if ($SZ==4 || $avx); $code.=<<___ if ($SZ==4 || $avx);
lea GFp_ia32cap_P(%rip),%r11 leaq GFp_ia32cap_P(%rip),%r11
mov 0(%r11),%r9d mov 0(%r11),%r9d
mov 4(%r11),%r10d mov 4(%r11),%r10d
mov 8(%r11),%r11d mov 8(%r11),%r11d
@ -286,12 +287,19 @@ $code.=<<___ if ($SZ==4);
___ ___
$code.=<<___; $code.=<<___;
mov %rsp,%rax # copy %rsp mov %rsp,%rax # copy %rsp
.cfi_def_cfa_register %rax
push %rbx push %rbx
.cfi_push %rbx
push %rbp push %rbp
.cfi_push %rbp
push %r12 push %r12
.cfi_push %r12
push %r13 push %r13
.cfi_push %r13
push %r14 push %r14
.cfi_push %r14
push %r15 push %r15
.cfi_push %r15
shl \$4,%rdx # num*16 shl \$4,%rdx # num*16
sub \$$framesz,%rsp sub \$$framesz,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
@ -300,6 +308,7 @@ $code.=<<___;
mov $inp,$_inp # save inp, 2nd arh mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg mov %rdx,$_end # save end pointer, "3rd" arg
mov %rax,$_rsp # save copy of %rsp mov %rax,$_rsp # save copy of %rsp
.cfi_cfa_expression $_rsp,deref,+8
.Lprologue: .Lprologue:
mov $SZ*0($ctx),$A mov $SZ*0($ctx),$A
@ -366,15 +375,24 @@ $code.=<<___;
jb .Lloop jb .Lloop
mov $_rsp,%rsi mov $_rsp,%rsi
.cfi_def_cfa %rsi,8
mov -48(%rsi),%r15 mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14 mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13 mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12 mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue: .Lepilogue:
ret ret
.cfi_endproc
.size $func,.-$func .size $func,.-$func
___ ___
@ -744,14 +762,22 @@ $code.=<<___;
.type ${func}_ssse3,\@function,3 .type ${func}_ssse3,\@function,3
.align 64 .align 64
${func}_ssse3: ${func}_ssse3:
.cfi_startproc
.Lssse3_shortcut: .Lssse3_shortcut:
mov %rsp,%rax # copy %rsp mov %rsp,%rax # copy %rsp
.cfi_def_cfa_register %rax
push %rbx push %rbx
.cfi_push %rbx
push %rbp push %rbp
.cfi_push %rbp
push %r12 push %r12
.cfi_push %r12
push %r13 push %r13
.cfi_push %r13
push %r14 push %r14
.cfi_push %r14
push %r15 push %r15
.cfi_push %r15
shl \$4,%rdx # num*16 shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*4`,%rsp sub \$`$framesz+$win64*16*4`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
@ -760,6 +786,7 @@ ${func}_ssse3:
mov $inp,$_inp # save inp, 2nd arh mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg mov %rdx,$_end # save end pointer, "3rd" arg
mov %rax,$_rsp # save copy of %rsp mov %rax,$_rsp # save copy of %rsp
.cfi_cfa_expression $_rsp,deref,+8
___ ___
$code.=<<___ if ($win64); $code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp) movaps %xmm6,16*$SZ+32(%rsp)
@ -1058,6 +1085,7 @@ $code.=<<___;
jb .Lloop_ssse3 jb .Lloop_ssse3
mov $_rsp,%rsi mov $_rsp,%rsi
.cfi_def_cfa %rsi,8
___ ___
$code.=<<___ if ($win64); $code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6 movaps 16*$SZ+32(%rsp),%xmm6
@ -1067,14 +1095,22 @@ $code.=<<___ if ($win64);
___ ___
$code.=<<___; $code.=<<___;
mov -48(%rsi),%r15 mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14 mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13 mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12 mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_ssse3: .Lepilogue_ssse3:
ret ret
.cfi_endproc
.size ${func}_ssse3,.-${func}_ssse3 .size ${func}_ssse3,.-${func}_ssse3
___ ___
} }
@ -1089,14 +1125,22 @@ $code.=<<___;
.type ${func}_avx,\@function,3 .type ${func}_avx,\@function,3
.align 64 .align 64
${func}_avx: ${func}_avx:
.cfi_startproc
.Lavx_shortcut: .Lavx_shortcut:
mov %rsp,%rax # copy %rsp mov %rsp,%rax # copy %rsp
.cfi_def_cfa_register %rax
push %rbx push %rbx
.cfi_push %rbx
push %rbp push %rbp
.cfi_push %rbp
push %r12 push %r12
.cfi_push %r12
push %r13 push %r13
.cfi_push %r13
push %r14 push %r14
.cfi_push %r14
push %r15 push %r15
.cfi_push %r15
shl \$4,%rdx # num*16 shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
@ -1105,6 +1149,7 @@ ${func}_avx:
mov $inp,$_inp # save inp, 2nd arh mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg mov %rdx,$_end # save end pointer, "3rd" arg
mov %rax,$_rsp # save copy of %rsp mov %rax,$_rsp # save copy of %rsp
.cfi_cfa_expression $_rsp,deref,+8
___ ___
$code.=<<___ if ($win64); $code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp) movaps %xmm6,16*$SZ+32(%rsp)
@ -1363,6 +1408,7 @@ $code.=<<___;
jb .Lloop_avx jb .Lloop_avx
mov $_rsp,%rsi mov $_rsp,%rsi
.cfi_def_cfa %rsi,8
vzeroupper vzeroupper
___ ___
$code.=<<___ if ($win64); $code.=<<___ if ($win64);
@ -1377,14 +1423,22 @@ $code.=<<___ if ($win64 && $SZ>4);
___ ___
$code.=<<___; $code.=<<___;
mov -48(%rsi),%r15 mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14 mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13 mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12 mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx: .Lepilogue_avx:
ret ret
.cfi_endproc
.size ${func}_avx,.-${func}_avx .size ${func}_avx,.-${func}_avx
___ ___
@ -1440,14 +1494,22 @@ $code.=<<___;
.type ${func}_avx2,\@function,3 .type ${func}_avx2,\@function,3
.align 64 .align 64
${func}_avx2: ${func}_avx2:
.cfi_startproc
.Lavx2_shortcut: .Lavx2_shortcut:
mov %rsp,%rax # copy %rsp mov %rsp,%rax # copy %rsp
.cfi_def_cfa_register %rax
push %rbx push %rbx
.cfi_push %rbx
push %rbp push %rbp
.cfi_push %rbp
push %r12 push %r12
.cfi_push %r12
push %r13 push %r13
.cfi_push %r13
push %r14 push %r14
.cfi_push %r14
push %r15 push %r15
.cfi_push %r15
sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
shl \$4,%rdx # num*16 shl \$4,%rdx # num*16
and \$-256*$SZ,%rsp # align stack frame and \$-256*$SZ,%rsp # align stack frame
@ -1457,6 +1519,7 @@ ${func}_avx2:
mov $inp,$_inp # save inp, 2nd arh mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg mov %rdx,$_end # save end pointer, "3rd" arg
mov %rax,$_rsp # save copy of %rsp mov %rax,$_rsp # save copy of %rsp
.cfi_cfa_expression $_rsp,deref,+8
___ ___
$code.=<<___ if ($win64); $code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp) movaps %xmm6,16*$SZ+32(%rsp)
@ -1737,6 +1800,7 @@ $code.=<<___;
.Ldone_avx2: .Ldone_avx2:
lea ($Tbl),%rsp lea ($Tbl),%rsp
mov $_rsp,%rsi mov $_rsp,%rsi
.cfi_def_cfa %rsi,8
vzeroupper vzeroupper
___ ___
$code.=<<___ if ($win64); $code.=<<___ if ($win64);
@ -1751,14 +1815,22 @@ $code.=<<___ if ($win64 && $SZ>4);
___ ___
$code.=<<___; $code.=<<___;
mov -48(%rsi),%r15 mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14 mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13 mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12 mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx2: .Lepilogue_avx2:
ret ret
.cfi_endproc
.size ${func}_avx2,.-${func}_avx2 .size ${func}_avx2,.-${func}_avx2
___ ___
}} }}