Vectorize gcm_mul32_nohw and replace gcm_gmult_4bit_mmx.

This shrinks the perf gap between nohw and 4bit_mmx. Replace 4bit_mmx
and fix the last remaining variable-time GHASH implementation, covering
32-bit x86 without SSSE3.

Before:
Did 2065000 AES-128-GCM (16 bytes) seal operations in 1000154us (2064682.0 ops/sec): 33.0 MB/s
Did 368000 AES-128-GCM (256 bytes) seal operations in 1002435us (367106.1 ops/sec): 94.0 MB/s
Did 77000 AES-128-GCM (1350 bytes) seal operations in 1001225us (76905.8 ops/sec): 103.8 MB/s
Did 14000 AES-128-GCM (8192 bytes) seal operations in 1067523us (13114.5 ops/sec): 107.4 MB/s
Did 6572 AES-128-GCM (16384 bytes) seal operations in 1015976us (6468.7 ops/sec): 106.0 MB/s
After:
Did 1995000 AES-128-GCM (16 bytes) seal operations in 1000374us (1994254.1 ops/sec): 31.9 MB/s
Did 319000 AES-128-GCM (256 bytes) seal operations in 1000196us (318937.5 ops/sec): 81.6 MB/s
Did 66000 AES-128-GCM (1350 bytes) seal operations in 1002823us (65814.2 ops/sec): 88.8 MB/s
Did 12000 AES-128-GCM (8192 bytes) seal operations in 1079294us (11118.4 ops/sec): 91.1 MB/s
Did 5511 AES-128-GCM (16384 bytes) seal operations in 1006218us (5476.9 ops/sec): 89.7 MB/s
(Note fallback AES is dampening the perf hit. Pairing with AESNI to
roughly isolate GHASH shows a 40% hit.)

That just leaves aes_nohw...

Change-Id: I7d842806c54a5a057895fa2e7665633330e34b72
Reviewed-on: https://boringssl-review.googlesource.com/c/boringssl/+/38784
Reviewed-by: Adam Langley <agl@google.com>
Commit-Queue: David Benjamin <davidben@google.com>
This commit is contained in:
David Benjamin 2019-11-08 22:48:30 -05:00 committed by CQ bot account: commit-bot@chromium.org
parent 9855c1c59a
commit a2518dd077
5 changed files with 75 additions and 515 deletions

View File

@ -131,6 +131,9 @@
# reduction_alg9. Resulting performance is 1.96 cycles per byte on
# Westmere, 1.95 - on Sandy/Ivy Bridge, 1.76 - on Bulldozer.
# This file was patched in BoringSSL to remove the variable-time 4-bit
# implementation.
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
push(@INC,"${dir}","${dir}../../../perlasm");
require "x86asm.pl";
@ -143,461 +146,7 @@ open STDOUT,">$output";
$sse2=0;
for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
($Zhh,$Zhl,$Zlh,$Zll) = ("ebp","edx","ecx","ebx");
$inp = "edi";
$Htbl = "esi";
$unroll = 0; # Affects x86 loop. Folded loop performs ~7% worse
# than unrolled, which has to be weighted against
# 2.5x x86-specific code size reduction.
sub x86_loop {
my $off = shift;
my $rem = "eax";
&mov ($Zhh,&DWP(4,$Htbl,$Zll));
&mov ($Zhl,&DWP(0,$Htbl,$Zll));
&mov ($Zlh,&DWP(12,$Htbl,$Zll));
&mov ($Zll,&DWP(8,$Htbl,$Zll));
&xor ($rem,$rem); # avoid partial register stalls on PIII
# shrd practically kills P4, 2.5x deterioration, but P4 has
# MMX code-path to execute. shrd runs tad faster [than twice
# the shifts, move's and or's] on pre-MMX Pentium (as well as
# PIII and Core2), *but* minimizes code size, spares register
# and thus allows to fold the loop...
if (!$unroll) {
my $cnt = $inp;
&mov ($cnt,15);
&jmp (&label("x86_loop"));
&set_label("x86_loop",16);
for($i=1;$i<=2;$i++) {
&mov (&LB($rem),&LB($Zll));
&shrd ($Zll,$Zlh,4);
&and (&LB($rem),0xf);
&shrd ($Zlh,$Zhl,4);
&shrd ($Zhl,$Zhh,4);
&shr ($Zhh,4);
&xor ($Zhh,&DWP($off+16,"esp",$rem,4));
&mov (&LB($rem),&BP($off,"esp",$cnt));
if ($i&1) {
&and (&LB($rem),0xf0);
} else {
&shl (&LB($rem),4);
}
&xor ($Zll,&DWP(8,$Htbl,$rem));
&xor ($Zlh,&DWP(12,$Htbl,$rem));
&xor ($Zhl,&DWP(0,$Htbl,$rem));
&xor ($Zhh,&DWP(4,$Htbl,$rem));
if ($i&1) {
&dec ($cnt);
&js (&label("x86_break"));
} else {
&jmp (&label("x86_loop"));
}
}
&set_label("x86_break",16);
} else {
for($i=1;$i<32;$i++) {
&comment($i);
&mov (&LB($rem),&LB($Zll));
&shrd ($Zll,$Zlh,4);
&and (&LB($rem),0xf);
&shrd ($Zlh,$Zhl,4);
&shrd ($Zhl,$Zhh,4);
&shr ($Zhh,4);
&xor ($Zhh,&DWP($off+16,"esp",$rem,4));
if ($i&1) {
&mov (&LB($rem),&BP($off+15-($i>>1),"esp"));
&and (&LB($rem),0xf0);
} else {
&mov (&LB($rem),&BP($off+15-($i>>1),"esp"));
&shl (&LB($rem),4);
}
&xor ($Zll,&DWP(8,$Htbl,$rem));
&xor ($Zlh,&DWP(12,$Htbl,$rem));
&xor ($Zhl,&DWP(0,$Htbl,$rem));
&xor ($Zhh,&DWP(4,$Htbl,$rem));
}
}
&bswap ($Zll);
&bswap ($Zlh);
&bswap ($Zhl);
if (!$x86only) {
&bswap ($Zhh);
} else {
&mov ("eax",$Zhh);
&bswap ("eax");
&mov ($Zhh,"eax");
}
}
if ($unroll) {
&function_begin_B("_x86_gmult_4bit_inner");
&x86_loop(4);
&ret ();
&function_end_B("_x86_gmult_4bit_inner");
}
sub deposit_rem_4bit {
my $bias = shift;
&mov (&DWP($bias+0, "esp"),0x0000<<16);
&mov (&DWP($bias+4, "esp"),0x1C20<<16);
&mov (&DWP($bias+8, "esp"),0x3840<<16);
&mov (&DWP($bias+12,"esp"),0x2460<<16);
&mov (&DWP($bias+16,"esp"),0x7080<<16);
&mov (&DWP($bias+20,"esp"),0x6CA0<<16);
&mov (&DWP($bias+24,"esp"),0x48C0<<16);
&mov (&DWP($bias+28,"esp"),0x54E0<<16);
&mov (&DWP($bias+32,"esp"),0xE100<<16);
&mov (&DWP($bias+36,"esp"),0xFD20<<16);
&mov (&DWP($bias+40,"esp"),0xD940<<16);
&mov (&DWP($bias+44,"esp"),0xC560<<16);
&mov (&DWP($bias+48,"esp"),0x9180<<16);
&mov (&DWP($bias+52,"esp"),0x8DA0<<16);
&mov (&DWP($bias+56,"esp"),0xA9C0<<16);
&mov (&DWP($bias+60,"esp"),0xB5E0<<16);
}
if (!$x86only) {{{
&static_label("rem_4bit");
if (!$sse2) {{ # pure-MMX "May" version...
# This code was removed since SSE2 is required for BoringSSL. The
# outer structure of the code was retained to minimize future merge
# conflicts.
}} else {{ # "June" MMX version...
# ... has slower "April" gcm_gmult_4bit_mmx with folded
# loop. This is done to conserve code size...
$S=16; # shift factor for rem_4bit
sub mmx_loop() {
# MMX version performs 2.8 times better on P4 (see comment in non-MMX
# routine for further details), 40% better on Opteron and Core2, 50%
# better on PIII... In other words effort is considered to be well
# spent...
my $inp = shift;
my $rem_4bit = shift;
my $cnt = $Zhh;
my $nhi = $Zhl;
my $nlo = $Zlh;
my $rem = $Zll;
my ($Zlo,$Zhi) = ("mm0","mm1");
my $tmp = "mm2";
&xor ($nlo,$nlo); # avoid partial register stalls on PIII
&mov ($nhi,$Zll);
&mov (&LB($nlo),&LB($nhi));
&mov ($cnt,14);
&shl (&LB($nlo),4);
&and ($nhi,0xf0);
&movq ($Zlo,&QWP(8,$Htbl,$nlo));
&movq ($Zhi,&QWP(0,$Htbl,$nlo));
&movd ($rem,$Zlo);
&jmp (&label("mmx_loop"));
&set_label("mmx_loop",16);
&psrlq ($Zlo,4);
&and ($rem,0xf);
&movq ($tmp,$Zhi);
&psrlq ($Zhi,4);
&pxor ($Zlo,&QWP(8,$Htbl,$nhi));
&mov (&LB($nlo),&BP(0,$inp,$cnt));
&psllq ($tmp,60);
&pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
&dec ($cnt);
&movd ($rem,$Zlo);
&pxor ($Zhi,&QWP(0,$Htbl,$nhi));
&mov ($nhi,$nlo);
&pxor ($Zlo,$tmp);
&js (&label("mmx_break"));
&shl (&LB($nlo),4);
&and ($rem,0xf);
&psrlq ($Zlo,4);
&and ($nhi,0xf0);
&movq ($tmp,$Zhi);
&psrlq ($Zhi,4);
&pxor ($Zlo,&QWP(8,$Htbl,$nlo));
&psllq ($tmp,60);
&pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
&movd ($rem,$Zlo);
&pxor ($Zhi,&QWP(0,$Htbl,$nlo));
&pxor ($Zlo,$tmp);
&jmp (&label("mmx_loop"));
&set_label("mmx_break",16);
&shl (&LB($nlo),4);
&and ($rem,0xf);
&psrlq ($Zlo,4);
&and ($nhi,0xf0);
&movq ($tmp,$Zhi);
&psrlq ($Zhi,4);
&pxor ($Zlo,&QWP(8,$Htbl,$nlo));
&psllq ($tmp,60);
&pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
&movd ($rem,$Zlo);
&pxor ($Zhi,&QWP(0,$Htbl,$nlo));
&pxor ($Zlo,$tmp);
&psrlq ($Zlo,4);
&and ($rem,0xf);
&movq ($tmp,$Zhi);
&psrlq ($Zhi,4);
&pxor ($Zlo,&QWP(8,$Htbl,$nhi));
&psllq ($tmp,60);
&pxor ($Zhi,&QWP(0,$rem_4bit,$rem,8));
&movd ($rem,$Zlo);
&pxor ($Zhi,&QWP(0,$Htbl,$nhi));
&pxor ($Zlo,$tmp);
&psrlq ($Zlo,32); # lower part of Zlo is already there
&movd ($Zhl,$Zhi);
&psrlq ($Zhi,32);
&movd ($Zlh,$Zlo);
&movd ($Zhh,$Zhi);
&bswap ($Zll);
&bswap ($Zhl);
&bswap ($Zlh);
&bswap ($Zhh);
}
&function_begin("gcm_gmult_4bit_mmx");
&mov ($inp,&wparam(0)); # load Xi
&mov ($Htbl,&wparam(1)); # load Htable
&call (&label("pic_point"));
&set_label("pic_point");
&blindpop("eax");
&lea ("eax",&DWP(&label("rem_4bit")."-".&label("pic_point"),"eax"));
&movz ($Zll,&BP(15,$inp));
&mmx_loop($inp,"eax");
&emms ();
&mov (&DWP(12,$inp),$Zll);
&mov (&DWP(4,$inp),$Zhl);
&mov (&DWP(8,$inp),$Zlh);
&mov (&DWP(0,$inp),$Zhh);
&function_end("gcm_gmult_4bit_mmx");
######################################################################
# Below subroutine is "528B" variant of "4-bit" GCM GHASH function
# (see gcm128.c for details). It provides further 20-40% performance
# improvement over above mentioned "May" version.
&static_label("rem_8bit");
&function_begin("gcm_ghash_4bit_mmx");
{ my ($Zlo,$Zhi) = ("mm7","mm6");
my $rem_8bit = "esi";
my $Htbl = "ebx";
# parameter block
&mov ("eax",&wparam(0)); # Xi
&mov ("ebx",&wparam(1)); # Htable
&mov ("ecx",&wparam(2)); # inp
&mov ("edx",&wparam(3)); # len
&mov ("ebp","esp"); # original %esp
&call (&label("pic_point"));
&set_label ("pic_point");
&blindpop ($rem_8bit);
&lea ($rem_8bit,&DWP(&label("rem_8bit")."-".&label("pic_point"),$rem_8bit));
&sub ("esp",512+16+16); # allocate stack frame...
&and ("esp",-64); # ...and align it
&sub ("esp",16); # place for (u8)(H[]<<4)
&add ("edx","ecx"); # pointer to the end of input
&mov (&DWP(528+16+0,"esp"),"eax"); # save Xi
&mov (&DWP(528+16+8,"esp"),"edx"); # save inp+len
&mov (&DWP(528+16+12,"esp"),"ebp"); # save original %esp
{ my @lo = ("mm0","mm1","mm2");
my @hi = ("mm3","mm4","mm5");
my @tmp = ("mm6","mm7");
my ($off1,$off2,$i) = (0,0,);
&add ($Htbl,128); # optimize for size
&lea ("edi",&DWP(16+128,"esp"));
&lea ("ebp",&DWP(16+256+128,"esp"));
# decompose Htable (low and high parts are kept separately),
# generate Htable[]>>4, (u8)(Htable[]<<4), save to stack...
for ($i=0;$i<18;$i++) {
&mov ("edx",&DWP(16*$i+8-128,$Htbl)) if ($i<16);
&movq ($lo[0],&QWP(16*$i+8-128,$Htbl)) if ($i<16);
&psllq ($tmp[1],60) if ($i>1);
&movq ($hi[0],&QWP(16*$i+0-128,$Htbl)) if ($i<16);
&por ($lo[2],$tmp[1]) if ($i>1);
&movq (&QWP($off1-128,"edi"),$lo[1]) if ($i>0 && $i<17);
&psrlq ($lo[1],4) if ($i>0 && $i<17);
&movq (&QWP($off1,"edi"),$hi[1]) if ($i>0 && $i<17);
&movq ($tmp[0],$hi[1]) if ($i>0 && $i<17);
&movq (&QWP($off2-128,"ebp"),$lo[2]) if ($i>1);
&psrlq ($hi[1],4) if ($i>0 && $i<17);
&movq (&QWP($off2,"ebp"),$hi[2]) if ($i>1);
&shl ("edx",4) if ($i<16);
&mov (&BP($i,"esp"),&LB("edx")) if ($i<16);
unshift (@lo,pop(@lo)); # "rotate" registers
unshift (@hi,pop(@hi));
unshift (@tmp,pop(@tmp));
$off1 += 8 if ($i>0);
$off2 += 8 if ($i>1);
}
}
&movq ($Zhi,&QWP(0,"eax"));
&mov ("ebx",&DWP(8,"eax"));
&mov ("edx",&DWP(12,"eax")); # load Xi
&set_label("outer",16);
{ my $nlo = "eax";
my $dat = "edx";
my @nhi = ("edi","ebp");
my @rem = ("ebx","ecx");
my @red = ("mm0","mm1","mm2");
my $tmp = "mm3";
&xor ($dat,&DWP(12,"ecx")); # merge input data
&xor ("ebx",&DWP(8,"ecx"));
&pxor ($Zhi,&QWP(0,"ecx"));
&lea ("ecx",&DWP(16,"ecx")); # inp+=16
#&mov (&DWP(528+12,"esp"),$dat); # save inp^Xi
&mov (&DWP(528+8,"esp"),"ebx");
&movq (&QWP(528+0,"esp"),$Zhi);
&mov (&DWP(528+16+4,"esp"),"ecx"); # save inp
&xor ($nlo,$nlo);
&rol ($dat,8);
&mov (&LB($nlo),&LB($dat));
&mov ($nhi[1],$nlo);
&and (&LB($nlo),0x0f);
&shr ($nhi[1],4);
&pxor ($red[0],$red[0]);
&rol ($dat,8); # next byte
&pxor ($red[1],$red[1]);
&pxor ($red[2],$red[2]);
# Just like in "May" version modulo-schedule for critical path in
# 'Z.hi ^= rem_8bit[Z.lo&0xff^((u8)H[nhi]<<4)]<<48'. Final 'pxor'
# is scheduled so late that rem_8bit[] has to be shifted *right*
# by 16, which is why last argument to pinsrw is 2, which
# corresponds to <<32=<<48>>16...
for ($j=11,$i=0;$i<15;$i++) {
if ($i>0) {
&pxor ($Zlo,&QWP(16,"esp",$nlo,8)); # Z^=H[nlo]
&rol ($dat,8); # next byte
&pxor ($Zhi,&QWP(16+128,"esp",$nlo,8));
&pxor ($Zlo,$tmp);
&pxor ($Zhi,&QWP(16+256+128,"esp",$nhi[0],8));
&xor (&LB($rem[1]),&BP(0,"esp",$nhi[0])); # rem^(H[nhi]<<4)
} else {
&movq ($Zlo,&QWP(16,"esp",$nlo,8));
&movq ($Zhi,&QWP(16+128,"esp",$nlo,8));
}
&mov (&LB($nlo),&LB($dat));
&mov ($dat,&DWP(528+$j,"esp")) if (--$j%4==0);
&movd ($rem[0],$Zlo);
&movz ($rem[1],&LB($rem[1])) if ($i>0);
&psrlq ($Zlo,8); # Z>>=8
&movq ($tmp,$Zhi);
&mov ($nhi[0],$nlo);
&psrlq ($Zhi,8);
&pxor ($Zlo,&QWP(16+256+0,"esp",$nhi[1],8)); # Z^=H[nhi]>>4
&and (&LB($nlo),0x0f);
&psllq ($tmp,56);
&pxor ($Zhi,$red[1]) if ($i>1);
&shr ($nhi[0],4);
&pinsrw ($red[0],&WP(0,$rem_8bit,$rem[1],2),2) if ($i>0);
unshift (@red,pop(@red)); # "rotate" registers
unshift (@rem,pop(@rem));
unshift (@nhi,pop(@nhi));
}
&pxor ($Zlo,&QWP(16,"esp",$nlo,8)); # Z^=H[nlo]
&pxor ($Zhi,&QWP(16+128,"esp",$nlo,8));
&xor (&LB($rem[1]),&BP(0,"esp",$nhi[0])); # rem^(H[nhi]<<4)
&pxor ($Zlo,$tmp);
&pxor ($Zhi,&QWP(16+256+128,"esp",$nhi[0],8));
&movz ($rem[1],&LB($rem[1]));
&pxor ($red[2],$red[2]); # clear 2nd word
&psllq ($red[1],4);
&movd ($rem[0],$Zlo);
&psrlq ($Zlo,4); # Z>>=4
&movq ($tmp,$Zhi);
&psrlq ($Zhi,4);
&shl ($rem[0],4); # rem<<4
&pxor ($Zlo,&QWP(16,"esp",$nhi[1],8)); # Z^=H[nhi]
&psllq ($tmp,60);
&movz ($rem[0],&LB($rem[0]));
&pxor ($Zlo,$tmp);
&pxor ($Zhi,&QWP(16+128,"esp",$nhi[1],8));
&pinsrw ($red[0],&WP(0,$rem_8bit,$rem[1],2),2);
&pxor ($Zhi,$red[1]);
&movd ($dat,$Zlo);
&pinsrw ($red[2],&WP(0,$rem_8bit,$rem[0],2),3); # last is <<48
&psllq ($red[0],12); # correct by <<16>>4
&pxor ($Zhi,$red[0]);
&psrlq ($Zlo,32);
&pxor ($Zhi,$red[2]);
&mov ("ecx",&DWP(528+16+4,"esp")); # restore inp
&movd ("ebx",$Zlo);
&movq ($tmp,$Zhi); # 01234567
&psllw ($Zhi,8); # 1.3.5.7.
&psrlw ($tmp,8); # .0.2.4.6
&por ($Zhi,$tmp); # 10325476
&bswap ($dat);
&pshufw ($Zhi,$Zhi,0b00011011); # 76543210
&bswap ("ebx");
&cmp ("ecx",&DWP(528+16+8,"esp")); # are we done?
&jne (&label("outer"));
}
&mov ("eax",&DWP(528+16+0,"esp")); # restore Xi
&mov (&DWP(12,"eax"),"edx");
&mov (&DWP(8,"eax"),"ebx");
&movq (&QWP(0,"eax"),$Zhi);
&mov ("esp",&DWP(528+16+12,"esp")); # restore original %esp
&emms ();
}
&function_end("gcm_ghash_4bit_mmx");
}}
if ($sse2) {{
######################################################################
# PCLMULQDQ version.
@ -1105,46 +654,7 @@ my ($Xhi,$Xi)=@_;
&set_label("bswap",64);
&data_byte(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
&data_byte(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2); # 0x1c2_polynomial
&set_label("rem_8bit",64);
&data_short(0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E);
&data_short(0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E);
&data_short(0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E);
&data_short(0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E);
&data_short(0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E);
&data_short(0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E);
&data_short(0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E);
&data_short(0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E);
&data_short(0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE);
&data_short(0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE);
&data_short(0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE);
&data_short(0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE);
&data_short(0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E);
&data_short(0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E);
&data_short(0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE);
&data_short(0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE);
&data_short(0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E);
&data_short(0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E);
&data_short(0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E);
&data_short(0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E);
&data_short(0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E);
&data_short(0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E);
&data_short(0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E);
&data_short(0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E);
&data_short(0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE);
&data_short(0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE);
&data_short(0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE);
&data_short(0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE);
&data_short(0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E);
&data_short(0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E);
&data_short(0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE);
&data_short(0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE);
}} # $sse2
&set_label("rem_4bit",64);
&data_word(0,0x0000<<$S,0,0x1C20<<$S,0,0x3840<<$S,0,0x2460<<$S);
&data_word(0,0x7080<<$S,0,0x6CA0<<$S,0,0x48C0<<$S,0,0x54E0<<$S);
&data_word(0,0xE100<<$S,0,0xFD20<<$S,0,0xD940<<$S,0,0xC560<<$S);
&data_word(0,0x9180<<$S,0,0x8DA0<<$S,0,0xA9C0<<$S,0,0xB5E0<<$S);
}}} # !$x86only
&asciz("GHASH for x86, CRYPTOGAMS by <appro\@openssl.org>");

View File

@ -222,19 +222,9 @@ void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash,
}
#endif
#if defined(GHASH_ASM_X86)
// TODO(davidben): This implementation is not constant-time, but it is
// dramatically faster than |gcm_gmult_nohw|. See if we can get a
// constant-time SSE2 implementation to close this gap, or decide we don't
// care.
gcm_init_4bit(out_table, H.u);
*out_mult = gcm_gmult_4bit_mmx;
*out_hash = gcm_ghash_4bit_mmx;
#else
gcm_init_nohw(out_table, H.u);
*out_mult = gcm_gmult_nohw;
*out_hash = gcm_ghash_nohw;
#endif
}
void CRYPTO_gcm128_init_key(GCM128_KEY *gcm_key, const AES_KEY *aes_key,

View File

@ -17,6 +17,10 @@
#include "../../internal.h"
#include "internal.h"
#if !defined(BORINGSSL_HAS_UINT128) && defined(__SSE2__)
#include <emmintrin.h>
#endif
// This file contains a constant-time implementation of GHASH based on the notes
// in https://bearssl.org/constanttime.html#ghash-for-gcm and the reduction
@ -75,7 +79,74 @@ static void gcm_mul64_nohw(uint64_t *out_lo, uint64_t *out_hi, uint64_t a,
((uint64_t)(extra >> 64));
}
#else // !BORINGSSL_HAS_UINT128
#elif defined(__SSE2__)
static __m128i gcm_mul32_nohw(uint32_t a, uint32_t b) {
// One term every four bits means the largest term is 32/4 = 8, which does not
// overflow into the next term.
__m128i aa = _mm_setr_epi32(a, 0, a, 0);
__m128i bb = _mm_setr_epi32(b, 0, b, 0);
__m128i a0a0 =
_mm_and_si128(aa, _mm_setr_epi32(0x11111111, 0, 0x11111111, 0));
__m128i a2a2 =
_mm_and_si128(aa, _mm_setr_epi32(0x44444444, 0, 0x44444444, 0));
__m128i b0b1 =
_mm_and_si128(bb, _mm_setr_epi32(0x11111111, 0, 0x22222222, 0));
__m128i b2b3 =
_mm_and_si128(bb, _mm_setr_epi32(0x44444444, 0, 0x88888888, 0));
__m128i c0c1 =
_mm_xor_si128(_mm_mul_epu32(a0a0, b0b1), _mm_mul_epu32(a2a2, b2b3));
__m128i c2c3 =
_mm_xor_si128(_mm_mul_epu32(a2a2, b0b1), _mm_mul_epu32(a0a0, b2b3));
__m128i a1a1 =
_mm_and_si128(aa, _mm_setr_epi32(0x22222222, 0, 0x22222222, 0));
__m128i a3a3 =
_mm_and_si128(aa, _mm_setr_epi32(0x88888888, 0, 0x88888888, 0));
__m128i b3b0 =
_mm_and_si128(bb, _mm_setr_epi32(0x88888888, 0, 0x11111111, 0));
__m128i b1b2 =
_mm_and_si128(bb, _mm_setr_epi32(0x22222222, 0, 0x44444444, 0));
c0c1 = _mm_xor_si128(c0c1, _mm_mul_epu32(a1a1, b3b0));
c0c1 = _mm_xor_si128(c0c1, _mm_mul_epu32(a3a3, b1b2));
c2c3 = _mm_xor_si128(c2c3, _mm_mul_epu32(a3a3, b3b0));
c2c3 = _mm_xor_si128(c2c3, _mm_mul_epu32(a1a1, b1b2));
c0c1 = _mm_and_si128(
c0c1, _mm_setr_epi32(0x11111111, 0x11111111, 0x22222222, 0x22222222));
c2c3 = _mm_and_si128(
c2c3, _mm_setr_epi32(0x44444444, 0x44444444, 0x88888888, 0x88888888));
c0c1 = _mm_xor_si128(c0c1, c2c3);
// c0 ^= c1
c0c1 = _mm_xor_si128(c0c1, _mm_srli_si128(c0c1, 8));
return c0c1;
}
static void gcm_mul64_nohw(uint64_t *out_lo, uint64_t *out_hi, uint64_t a,
uint64_t b) {
uint32_t a0 = a & 0xffffffff;
uint32_t a1 = a >> 32;
uint32_t b0 = b & 0xffffffff;
uint32_t b1 = b >> 32;
// Karatsuba multiplication.
__m128i lo = gcm_mul32_nohw(a0, b0);
__m128i hi = gcm_mul32_nohw(a1, b1);
__m128i mid = gcm_mul32_nohw(a0 ^ a1, b0 ^ b1);
mid = _mm_xor_si128(mid, lo);
mid = _mm_xor_si128(mid, hi);
__m128i ret = _mm_unpacklo_epi64(lo, hi);
mid = _mm_slli_si128(mid, 4);
mid = _mm_and_si128(mid, _mm_setr_epi32(0, 0xffffffff, 0xffffffff, 0));
ret = _mm_xor_si128(ret, mid);
memcpy(out_lo, &ret, 8);
memcpy(out_hi, ((char*)&ret) + 8, 8);
}
#else // !BORINGSSL_HAS_UINT128 && !__SSE2__
static uint64_t gcm_mul32_nohw(uint32_t a, uint32_t b) {
// One term every four bits means the largest term is 32/4 = 8, which does not

View File

@ -135,14 +135,6 @@ TEST(GCMTest, ABI) {
};
alignas(16) u128 Htable[16];
#if defined(GHASH_ASM_X86)
CHECK_ABI(gcm_init_4bit, Htable, kH);
CHECK_ABI(gcm_gmult_4bit_mmx, X, Htable);
for (size_t blocks : kBlockCounts) {
CHECK_ABI(gcm_ghash_4bit_mmx, X, Htable, buf, 16 * blocks);
}
#endif // GHASH_ASM_X86
#if defined(GHASH_ASM_X86) || defined(GHASH_ASM_X86_64)
if (gcm_ssse3_capable()) {
CHECK_ABI_SEH(gcm_init_ssse3, Htable, kH);

View File

@ -304,9 +304,6 @@ size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
#if defined(OPENSSL_X86)
#define GHASH_ASM_X86
void gcm_gmult_4bit_mmx(uint64_t Xi[2], const u128 Htable[16]);
void gcm_ghash_4bit_mmx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len);
#endif // OPENSSL_X86
#elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)