Cherry-pick BoringSSL 6410e18: Update several assembly files from upstream.

I'm not sure why this was skipped when it came up in the merge queue the
first time. :(
This commit is contained in:
Adam Langley 2018-08-07 11:26:15 -07:00 committed by Brian Smith
parent bfe884f465
commit b5ee32ff4b
2 changed files with 107 additions and 6 deletions

View File

@ -44,9 +44,8 @@
# See ghash-x86.pl for background information and details about coding
# techniques.
#
# Special thanks to David Woodhouse <dwmw2@infradead.org> for
# providing access to a Westmere-based system on behalf of Intel
# Open Source Technology Centre.
# Special thanks to David Woodhouse for providing access to a
# Westmere-based system on behalf of Intel Open Source Technology Centre.
# December 2012
#
@ -228,13 +227,21 @@ $code=<<___;
.type GFp_gcm_gmult_4bit,\@function,2
.align 16
GFp_gcm_gmult_4bit:
.cfi_startproc
push %rbx
.cfi_push %rbx
push %rbp # %rbp and others are pushed exclusively in
.cfi_push %rbp
push %r12 # order to reuse Win64 exception handler...
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
sub \$280,%rsp
.cfi_adjust_cfa_offset 280
.Lgmult_prologue:
movzb 15($Xi),$Zlo
@ -246,10 +253,14 @@ $code.=<<___;
mov $Zhi,($Xi)
lea 280+48(%rsp),%rsi
.cfi_def_cfa %rsi,8
mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lgmult_epilogue:
ret
.cfi_endproc
.size GFp_gcm_gmult_4bit,.-GFp_gcm_gmult_4bit
___
@ -263,13 +274,21 @@ $code.=<<___;
.type GFp_gcm_ghash_4bit,\@function,4
.align 16
GFp_gcm_ghash_4bit:
.cfi_startproc
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r12
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
sub \$280,%rsp
.cfi_adjust_cfa_offset 280
.Lghash_prologue:
mov $inp,%r14 # reassign couple of args
mov $len,%r15
@ -398,15 +417,24 @@ $code.=<<___;
mov $Zhi,($Xi)
lea 280+48(%rsp),%rsi
.cfi_def_cfa %rsi,8
mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx
.cfi_restore %rbx
lea 0(%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lghash_epilogue:
ret
.cfi_endproc
.size GFp_gcm_ghash_4bit,.-GFp_gcm_ghash_4bit
___
@ -653,7 +681,8 @@ if ($do4xaggr) {
my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
$code.=<<___;
mov GFp_ia32cap_P+4(%rip),%eax
leaq GFp_ia32cap_P(%rip),%rax
mov 4(%rax),%eax
cmp \$0x30,$len
jb .Lskip4x

View File

@ -164,7 +164,7 @@ $Tbl="%rbp";
$_ctx="16*$SZ+0*8(%rsp)";
$_inp="16*$SZ+1*8(%rsp)";
$_end="16*$SZ+2*8(%rsp)";
$_rsp="16*$SZ+3*8(%rsp)";
$_rsp="`16*$SZ+3*8`(%rsp)";
$framesz="16*$SZ+4*8";
@ -257,9 +257,10 @@ $code=<<___;
.type $func,\@function,3
.align 16
$func:
.cfi_startproc
___
$code.=<<___ if ($SZ==4 || $avx);
lea GFp_ia32cap_P(%rip),%r11
leaq GFp_ia32cap_P(%rip),%r11
mov 0(%r11),%r9d
mov 4(%r11),%r10d
mov 8(%r11),%r11d
@ -286,12 +287,19 @@ $code.=<<___ if ($SZ==4);
___
$code.=<<___;
mov %rsp,%rax # copy %rsp
.cfi_def_cfa_register %rax
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r12
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
shl \$4,%rdx # num*16
sub \$$framesz,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
@ -300,6 +308,7 @@ $code.=<<___;
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %rax,$_rsp # save copy of %rsp
.cfi_cfa_expression $_rsp,deref,+8
.Lprologue:
mov $SZ*0($ctx),$A
@ -366,15 +375,24 @@ $code.=<<___;
jb .Lloop
mov $_rsp,%rsi
.cfi_def_cfa %rsi,8
mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size $func,.-$func
___
@ -744,14 +762,22 @@ $code.=<<___;
.type ${func}_ssse3,\@function,3
.align 64
${func}_ssse3:
.cfi_startproc
.Lssse3_shortcut:
mov %rsp,%rax # copy %rsp
.cfi_def_cfa_register %rax
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r12
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*4`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
@ -760,6 +786,7 @@ ${func}_ssse3:
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %rax,$_rsp # save copy of %rsp
.cfi_cfa_expression $_rsp,deref,+8
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
@ -1058,6 +1085,7 @@ $code.=<<___;
jb .Lloop_ssse3
mov $_rsp,%rsi
.cfi_def_cfa %rsi,8
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
@ -1067,14 +1095,22 @@ $code.=<<___ if ($win64);
___
$code.=<<___;
mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_ssse3:
ret
.cfi_endproc
.size ${func}_ssse3,.-${func}_ssse3
___
}
@ -1089,14 +1125,22 @@ $code.=<<___;
.type ${func}_avx,\@function,3
.align 64
${func}_avx:
.cfi_startproc
.Lavx_shortcut:
mov %rsp,%rax # copy %rsp
.cfi_def_cfa_register %rax
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r12
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
@ -1105,6 +1149,7 @@ ${func}_avx:
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %rax,$_rsp # save copy of %rsp
.cfi_cfa_expression $_rsp,deref,+8
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
@ -1363,6 +1408,7 @@ $code.=<<___;
jb .Lloop_avx
mov $_rsp,%rsi
.cfi_def_cfa %rsi,8
vzeroupper
___
$code.=<<___ if ($win64);
@ -1377,14 +1423,22 @@ $code.=<<___ if ($win64 && $SZ>4);
___
$code.=<<___;
mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size ${func}_avx,.-${func}_avx
___
@ -1440,14 +1494,22 @@ $code.=<<___;
.type ${func}_avx2,\@function,3
.align 64
${func}_avx2:
.cfi_startproc
.Lavx2_shortcut:
mov %rsp,%rax # copy %rsp
.cfi_def_cfa_register %rax
push %rbx
.cfi_push %rbx
push %rbp
.cfi_push %rbp
push %r12
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
shl \$4,%rdx # num*16
and \$-256*$SZ,%rsp # align stack frame
@ -1457,6 +1519,7 @@ ${func}_avx2:
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %rax,$_rsp # save copy of %rsp
.cfi_cfa_expression $_rsp,deref,+8
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
@ -1737,6 +1800,7 @@ $code.=<<___;
.Ldone_avx2:
lea ($Tbl),%rsp
mov $_rsp,%rsi
.cfi_def_cfa %rsi,8
vzeroupper
___
$code.=<<___ if ($win64);
@ -1751,14 +1815,22 @@ $code.=<<___ if ($win64 && $SZ>4);
___
$code.=<<___;
mov -48(%rsi),%r15
.cfi_restore %r15
mov -40(%rsi),%r14
.cfi_restore %r14
mov -32(%rsi),%r13
.cfi_restore %r13
mov -24(%rsi),%r12
.cfi_restore %r12
mov -16(%rsi),%rbp
.cfi_restore %rbp
mov -8(%rsi),%rbx
.cfi_restore %rbx
lea (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx2:
ret
.cfi_endproc
.size ${func}_avx2,.-${func}_avx2
___
}}