diff --git a/crypto/cipher/e_rc4.c b/crypto/cipher/e_rc4.c index e99d60318..39970bec2 100644 --- a/crypto/cipher/e_rc4.c +++ b/crypto/cipher/e_rc4.c @@ -161,14 +161,6 @@ static void aead_rc4_md5_tls_cleanup(EVP_AEAD_CTX *ctx) { OPENSSL_free(rc4_ctx); } -#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) -#define STITCHED_CALL - -/* rc4_md5_enc is defined in rc4_md5-x86_64.pl */ -void rc4_md5_enc(RC4_KEY *key, const void *in0, void *out, MD5_CTX *ctx, - const void *inp, size_t blocks); -#endif - static int aead_rc4_md5_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, @@ -216,38 +208,6 @@ static int aead_rc4_md5_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, ad_extra[1] = (uint8_t)(in_len & 0xff); MD5_Update(&md, ad_extra, sizeof(ad_extra)); -#if defined(STITCHED_CALL) - /* 32 is $MOD from rc4_md5-x86_64.pl. */ - rc4_off = 32 - 1 - (rc4_ctx->rc4.x & (32 - 1)); - md5_off = MD5_CBLOCK - md.num; - /* Ensure RC4 is behind MD5. */ - if (rc4_off > md5_off) { - md5_off += MD5_CBLOCK; - } - assert(md5_off >= rc4_off); - - if (in_len > md5_off && (blocks = (in_len - md5_off) / MD5_CBLOCK) && - (OPENSSL_ia32cap_P[0] & (1 << 20)) == 0) { - /* Process the initial portions of the plaintext normally. */ - MD5_Update(&md, in, md5_off); - RC4(&rc4_ctx->rc4, rc4_off, in, out); - - /* Process the next |blocks| blocks of plaintext with stitched routines. */ - rc4_md5_enc(&rc4_ctx->rc4, in + rc4_off, out + rc4_off, &md, in + md5_off, - blocks); - blocks *= MD5_CBLOCK; - rc4_off += blocks; - md5_off += blocks; - md.Nh += blocks >> 29; - md.Nl += blocks <<= 3; - if (md.Nl < (unsigned int)blocks) { - md.Nh++; - } - } else { - rc4_off = 0; - md5_off = 0; - } -#endif /* Finish computing the MAC. */ MD5_Update(&md, in + md5_off, in_len - md5_off); MD5_Final(digest, &md); @@ -318,43 +278,6 @@ static int aead_rc4_md5_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, ad_extra[1] = (uint8_t)(plaintext_len & 0xff); MD5_Update(&md, ad_extra, sizeof(ad_extra)); -#if defined(STITCHED_CALL) - rc4_off = 32 - 1 - (rc4_ctx->rc4.x & (32 - 1)); - md5_off = MD5_CBLOCK - md.num; - /* Ensure MD5 is a full block behind RC4 so it has plaintext to operate on in - * both normal and stitched routines. */ - if (md5_off > rc4_off) { - rc4_off += 2 * MD5_CBLOCK; - } else { - rc4_off += MD5_CBLOCK; - } - - if (in_len > rc4_off && (blocks = (in_len - rc4_off) / MD5_CBLOCK) && - (OPENSSL_ia32cap_P[0] & (1 << 20)) == 0) { - /* Decrypt the initial portion of the ciphertext and digest the plaintext - * normally. */ - RC4(&rc4_ctx->rc4, rc4_off, in, out); - MD5_Update(&md, out, md5_off); - - /* Decrypt and digest the next |blocks| blocks of ciphertext with the - * stitched routines. */ - rc4_md5_enc(&rc4_ctx->rc4, in + rc4_off, out + rc4_off, &md, out + md5_off, - blocks); - blocks *= MD5_CBLOCK; - rc4_off += blocks; - md5_off += blocks; - l = (md.Nl + (blocks << 3)) & 0xffffffffU; - if (l < md.Nl) { - md.Nh++; - } - md.Nl = l; - md.Nh += blocks >> 29; - } else { - md5_off = 0; - rc4_off = 0; - } -#endif - /* Process the remainder of the input. */ RC4(&rc4_ctx->rc4, in_len - rc4_off, in + rc4_off, out + rc4_off); MD5_Update(&md, out + md5_off, plaintext_len - md5_off); diff --git a/crypto/rc4/CMakeLists.txt b/crypto/rc4/CMakeLists.txt index a208e960d..a008fe535 100644 --- a/crypto/rc4/CMakeLists.txt +++ b/crypto/rc4/CMakeLists.txt @@ -1,31 +1,9 @@ include_directories(../../include) -if (${ARCH} STREQUAL "x86_64") - set( - RC4_ARCH_SOURCES - - rc4-x86_64.${ASM_EXT} - rc4-md5-x86_64.${ASM_EXT} - ) -endif() - -if (${ARCH} STREQUAL "x86") - set( - RC4_ARCH_SOURCES - - rc4-586.${ASM_EXT} - ) -endif() - add_library( rc4 OBJECT rc4.c - ${RC4_ARCH_SOURCES} ) - -perlasm(rc4-x86_64.${ASM_EXT} asm/rc4-x86_64.pl) -perlasm(rc4-md5-x86_64.${ASM_EXT} asm/rc4-md5-x86_64.pl) -perlasm(rc4-586.${ASM_EXT} asm/rc4-586.pl) diff --git a/crypto/rc4/asm/rc4-586.pl b/crypto/rc4/asm/rc4-586.pl deleted file mode 100644 index fc860ae2a..000000000 --- a/crypto/rc4/asm/rc4-586.pl +++ /dev/null @@ -1,414 +0,0 @@ -#!/usr/bin/env perl - -# ==================================================================== -# [Re]written by Andy Polyakov for the OpenSSL -# project. The module is, however, dual licensed under OpenSSL and -# CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. -# ==================================================================== - -# At some point it became apparent that the original SSLeay RC4 -# assembler implementation performs suboptimally on latest IA-32 -# microarchitectures. After re-tuning performance has changed as -# following: -# -# Pentium -10% -# Pentium III +12% -# AMD +50%(*) -# P4 +250%(**) -# -# (*) This number is actually a trade-off:-) It's possible to -# achieve +72%, but at the cost of -48% off PIII performance. -# In other words code performing further 13% faster on AMD -# would perform almost 2 times slower on Intel PIII... -# For reference! This code delivers ~80% of rc4-amd64.pl -# performance on the same Opteron machine. -# (**) This number requires compressed key schedule set up by -# RC4_set_key [see commentary below for further details]. -# -# - -# May 2011 -# -# Optimize for Core2 and Westmere [and incidentally Opteron]. Current -# performance in cycles per processed byte (less is better) and -# improvement relative to previous version of this module is: -# -# Pentium 10.2 # original numbers -# Pentium III 7.8(*) -# Intel P4 7.5 -# -# Opteron 6.1/+20% # new MMX numbers -# Core2 5.3/+67%(**) -# Westmere 5.1/+94%(**) -# Sandy Bridge 5.0/+8% -# Atom 12.6/+6% -# -# (*) PIII can actually deliver 6.6 cycles per byte with MMX code, -# but this specific code performs poorly on Core2. And vice -# versa, below MMX/SSE code delivering 5.8/7.1 on Core2 performs -# poorly on PIII, at 8.0/14.5:-( As PIII is not a "hot" CPU -# [anymore], I chose to discard PIII-specific code path and opt -# for original IALU-only code, which is why MMX/SSE code path -# is guarded by SSE2 bit (see below), not MMX/SSE. -# (**) Performance vs. block size on Core2 and Westmere had a maximum -# at ... 64 bytes block size. And it was quite a maximum, 40-60% -# in comparison to largest 8KB block size. Above improvement -# coefficients are for the largest block size. - -$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; -push(@INC,"${dir}","${dir}../../perlasm"); -require "x86asm.pl"; - -&asm_init($ARGV[0],"rc4-586.pl",$x86only = $ARGV[$#ARGV] eq "386"); - -$xx="eax"; -$yy="ebx"; -$tx="ecx"; -$ty="edx"; -$inp="esi"; -$out="ebp"; -$dat="edi"; - -sub RC4_loop { - my $i=shift; - my $func = ($i==0)?*mov:*or; - - &add (&LB($yy),&LB($tx)); - &mov ($ty,&DWP(0,$dat,$yy,4)); - &mov (&DWP(0,$dat,$yy,4),$tx); - &mov (&DWP(0,$dat,$xx,4),$ty); - &add ($ty,$tx); - &inc (&LB($xx)); - &and ($ty,0xff); - &ror ($out,8) if ($i!=0); - if ($i<3) { - &mov ($tx,&DWP(0,$dat,$xx,4)); - } else { - &mov ($tx,&wparam(3)); # reload [re-biased] out - } - &$func ($out,&DWP(0,$dat,$ty,4)); -} - -if ($alt=0) { - # >20% faster on Atom and Sandy Bridge[!], 8% faster on Opteron, - # but ~40% slower on Core2 and Westmere... Attempt to add movz - # brings down Opteron by 25%, Atom and Sandy Bridge by 15%, yet - # on Core2 with movz it's almost 20% slower than below alternative - # code... Yes, it's a total mess... - my @XX=($xx,$out); - $RC4_loop_mmx = sub { # SSE actually... - my $i=shift; - my $j=$i<=0?0:$i>>1; - my $mm=$i<=0?"mm0":"mm".($i&1); - - &add (&LB($yy),&LB($tx)); - &lea (@XX[1],&DWP(1,@XX[0])); - &pxor ("mm2","mm0") if ($i==0); - &psllq ("mm1",8) if ($i==0); - &and (@XX[1],0xff); - &pxor ("mm0","mm0") if ($i<=0); - &mov ($ty,&DWP(0,$dat,$yy,4)); - &mov (&DWP(0,$dat,$yy,4),$tx); - &pxor ("mm1","mm2") if ($i==0); - &mov (&DWP(0,$dat,$XX[0],4),$ty); - &add (&LB($ty),&LB($tx)); - &movd (@XX[0],"mm7") if ($i==0); - &mov ($tx,&DWP(0,$dat,@XX[1],4)); - &pxor ("mm1","mm1") if ($i==1); - &movq ("mm2",&QWP(0,$inp)) if ($i==1); - &movq (&QWP(-8,(@XX[0],$inp)),"mm1") if ($i==0); - &pinsrw ($mm,&DWP(0,$dat,$ty,4),$j); - - push (@XX,shift(@XX)) if ($i>=0); - } -} else { - # Using pinsrw here improves performane on Intel CPUs by 2-3%, but - # brings down AMD by 7%... - $RC4_loop_mmx = sub { - my $i=shift; - - &add (&LB($yy),&LB($tx)); - &psllq ("mm1",8*(($i-1)&7)) if (abs($i)!=1); - &mov ($ty,&DWP(0,$dat,$yy,4)); - &mov (&DWP(0,$dat,$yy,4),$tx); - &mov (&DWP(0,$dat,$xx,4),$ty); - &inc ($xx); - &add ($ty,$tx); - &movz ($xx,&LB($xx)); # (*) - &movz ($ty,&LB($ty)); # (*) - &pxor ("mm2",$i==1?"mm0":"mm1") if ($i>=0); - &movq ("mm0",&QWP(0,$inp)) if ($i<=0); - &movq (&QWP(-8,($out,$inp)),"mm2") if ($i==0); - &mov ($tx,&DWP(0,$dat,$xx,4)); - &movd ($i>0?"mm1":"mm2",&DWP(0,$dat,$ty,4)); - - # (*) This is the key to Core2 and Westmere performance. - # Whithout movz out-of-order execution logic confuses - # itself and fails to reorder loads and stores. Problem - # appears to be fixed in Sandy Bridge... - } -} - -&external_label("OPENSSL_ia32cap_P"); - -# void asm_RC4(RC4_KEY *key,size_t len,const unsigned char *inp,unsigned char *out); -&function_begin("asm_RC4"); - &mov ($dat,&wparam(0)); # load key schedule pointer - &mov ($ty, &wparam(1)); # load len - &mov ($inp,&wparam(2)); # load inp - &mov ($out,&wparam(3)); # load out - - &xor ($xx,$xx); # avoid partial register stalls - &xor ($yy,$yy); - - &cmp ($ty,0); # safety net - &je (&label("abort")); - - &mov (&LB($xx),&BP(0,$dat)); # load key->x - &mov (&LB($yy),&BP(4,$dat)); # load key->y - &add ($dat,8); - - &lea ($tx,&DWP(0,$inp,$ty)); - &sub ($out,$inp); # re-bias out - &mov (&wparam(1),$tx); # save input+len - - &inc (&LB($xx)); - - # detect compressed key schedule... - &cmp (&DWP(256,$dat),-1); - &je (&label("RC4_CHAR")); - - &mov ($tx,&DWP(0,$dat,$xx,4)); - - &and ($ty,-4); # how many 4-byte chunks? - &jz (&label("loop1")); - - &mov (&wparam(3),$out); # $out as accumulator in these loops - if ($x86only) { - &jmp (&label("go4loop4")); - } else { - &test ($ty,-8); - &jz (&label("go4loop4")); - - &picmeup($out,"OPENSSL_ia32cap_P"); - &bt (&DWP(0,$out),26); # check SSE2 bit [could have been MMX] - &jnc (&label("go4loop4")); - - &mov ($out,&wparam(3)) if (!$alt); - &movd ("mm7",&wparam(3)) if ($alt); - &and ($ty,-8); - &lea ($ty,&DWP(-8,$inp,$ty)); - &mov (&DWP(-4,$dat),$ty); # save input+(len/8)*8-8 - - &$RC4_loop_mmx(-1); - &jmp(&label("loop_mmx_enter")); - - &set_label("loop_mmx",16); - &$RC4_loop_mmx(0); - &set_label("loop_mmx_enter"); - for ($i=1;$i<8;$i++) { &$RC4_loop_mmx($i); } - &mov ($ty,$yy); - &xor ($yy,$yy); # this is second key to Core2 - &mov (&LB($yy),&LB($ty)); # and Westmere performance... - &cmp ($inp,&DWP(-4,$dat)); - &lea ($inp,&DWP(8,$inp)); - &jb (&label("loop_mmx")); - - if ($alt) { - &movd ($out,"mm7"); - &pxor ("mm2","mm0"); - &psllq ("mm1",8); - &pxor ("mm1","mm2"); - &movq (&QWP(-8,$out,$inp),"mm1"); - } else { - &psllq ("mm1",56); - &pxor ("mm2","mm1"); - &movq (&QWP(-8,$out,$inp),"mm2"); - } - &emms (); - - &cmp ($inp,&wparam(1)); # compare to input+len - &je (&label("done")); - &jmp (&label("loop1")); - } - -&set_label("go4loop4",16); - &lea ($ty,&DWP(-4,$inp,$ty)); - &mov (&wparam(2),$ty); # save input+(len/4)*4-4 - - &set_label("loop4"); - for ($i=0;$i<4;$i++) { RC4_loop($i); } - &ror ($out,8); - &xor ($out,&DWP(0,$inp)); - &cmp ($inp,&wparam(2)); # compare to input+(len/4)*4-4 - &mov (&DWP(0,$tx,$inp),$out);# $tx holds re-biased out here - &lea ($inp,&DWP(4,$inp)); - &mov ($tx,&DWP(0,$dat,$xx,4)); - &jb (&label("loop4")); - - &cmp ($inp,&wparam(1)); # compare to input+len - &je (&label("done")); - &mov ($out,&wparam(3)); # restore $out - - &set_label("loop1",16); - &add (&LB($yy),&LB($tx)); - &mov ($ty,&DWP(0,$dat,$yy,4)); - &mov (&DWP(0,$dat,$yy,4),$tx); - &mov (&DWP(0,$dat,$xx,4),$ty); - &add ($ty,$tx); - &inc (&LB($xx)); - &and ($ty,0xff); - &mov ($ty,&DWP(0,$dat,$ty,4)); - &xor (&LB($ty),&BP(0,$inp)); - &lea ($inp,&DWP(1,$inp)); - &mov ($tx,&DWP(0,$dat,$xx,4)); - &cmp ($inp,&wparam(1)); # compare to input+len - &mov (&BP(-1,$out,$inp),&LB($ty)); - &jb (&label("loop1")); - - &jmp (&label("done")); - -# this is essentially Intel P4 specific codepath... -&set_label("RC4_CHAR",16); - &movz ($tx,&BP(0,$dat,$xx)); - # strangely enough unrolled loop performs over 20% slower... - &set_label("cloop1"); - &add (&LB($yy),&LB($tx)); - &movz ($ty,&BP(0,$dat,$yy)); - &mov (&BP(0,$dat,$yy),&LB($tx)); - &mov (&BP(0,$dat,$xx),&LB($ty)); - &add (&LB($ty),&LB($tx)); - &movz ($ty,&BP(0,$dat,$ty)); - &add (&LB($xx),1); - &xor (&LB($ty),&BP(0,$inp)); - &lea ($inp,&DWP(1,$inp)); - &movz ($tx,&BP(0,$dat,$xx)); - &cmp ($inp,&wparam(1)); - &mov (&BP(-1,$out,$inp),&LB($ty)); - &jb (&label("cloop1")); - -&set_label("done"); - &dec (&LB($xx)); - &mov (&DWP(-4,$dat),$yy); # save key->y - &mov (&BP(-8,$dat),&LB($xx)); # save key->x -&set_label("abort"); -&function_end("asm_RC4"); - -######################################################################## - -$inp="esi"; -$out="edi"; -$idi="ebp"; -$ido="ecx"; -$idx="edx"; - -# void asm_RC4_set_key(RC4_KEY *key,int len,const unsigned char *data); -&function_begin("asm_RC4_set_key"); - &mov ($out,&wparam(0)); # load key - &mov ($idi,&wparam(1)); # load len - &mov ($inp,&wparam(2)); # load data - &picmeup($idx,"OPENSSL_ia32cap_P"); - - &lea ($out,&DWP(2*4,$out)); # &key->data - &lea ($inp,&DWP(0,$inp,$idi)); # $inp to point at the end - &neg ($idi); - &xor ("eax","eax"); - &mov (&DWP(-4,$out),$idi); # borrow key->y - - &bt (&DWP(0,$idx),20); # check for bit#20 - &jc (&label("c1stloop")); - -&set_label("w1stloop",16); - &mov (&DWP(0,$out,"eax",4),"eax"); # key->data[i]=i; - &add (&LB("eax"),1); # i++; - &jnc (&label("w1stloop")); - - &xor ($ido,$ido); - &xor ($idx,$idx); - -&set_label("w2ndloop",16); - &mov ("eax",&DWP(0,$out,$ido,4)); - &add (&LB($idx),&BP(0,$inp,$idi)); - &add (&LB($idx),&LB("eax")); - &add ($idi,1); - &mov ("ebx",&DWP(0,$out,$idx,4)); - &jnz (&label("wnowrap")); - &mov ($idi,&DWP(-4,$out)); - &set_label("wnowrap"); - &mov (&DWP(0,$out,$idx,4),"eax"); - &mov (&DWP(0,$out,$ido,4),"ebx"); - &add (&LB($ido),1); - &jnc (&label("w2ndloop")); -&jmp (&label("exit")); - -# Unlike all other x86 [and x86_64] implementations, Intel P4 core -# [including EM64T] was found to perform poorly with above "32-bit" key -# schedule, a.k.a. RC4_INT. Performance improvement for IA-32 hand-coded -# assembler turned out to be 3.5x if re-coded for compressed 8-bit one, -# a.k.a. RC4_CHAR! It's however inappropriate to just switch to 8-bit -# schedule for x86[_64], because non-P4 implementations suffer from -# significant performance losses then, e.g. PIII exhibits >2x -# deterioration, and so does Opteron. In order to assure optimal -# all-round performance, we detect P4 at run-time and set up compressed -# key schedule, which is recognized by RC4 procedure. - -&set_label("c1stloop",16); - &mov (&BP(0,$out,"eax"),&LB("eax")); # key->data[i]=i; - &add (&LB("eax"),1); # i++; - &jnc (&label("c1stloop")); - - &xor ($ido,$ido); - &xor ($idx,$idx); - &xor ("ebx","ebx"); - -&set_label("c2ndloop",16); - &mov (&LB("eax"),&BP(0,$out,$ido)); - &add (&LB($idx),&BP(0,$inp,$idi)); - &add (&LB($idx),&LB("eax")); - &add ($idi,1); - &mov (&LB("ebx"),&BP(0,$out,$idx)); - &jnz (&label("cnowrap")); - &mov ($idi,&DWP(-4,$out)); - &set_label("cnowrap"); - &mov (&BP(0,$out,$idx),&LB("eax")); - &mov (&BP(0,$out,$ido),&LB("ebx")); - &add (&LB($ido),1); - &jnc (&label("c2ndloop")); - - &mov (&DWP(256,$out),-1); # mark schedule as compressed - -&set_label("exit"); - &xor ("eax","eax"); - &mov (&DWP(-8,$out),"eax"); # key->x=0; - &mov (&DWP(-4,$out),"eax"); # key->y=0; -&function_end("asm_RC4_set_key"); - -# const char *RC4_options(void); -&function_begin_B("RC4_options"); - &call (&label("pic_point")); -&set_label("pic_point"); - &blindpop("eax"); - &lea ("eax",&DWP(&label("opts")."-".&label("pic_point"),"eax")); - &picmeup("edx","OPENSSL_ia32cap_P"); - &mov ("edx",&DWP(0,"edx")); - &bt ("edx",20); - &jc (&label("1xchar")); - &bt ("edx",26); - &jnc (&label("ret")); - &add ("eax",25); - &ret (); -&set_label("1xchar"); - &add ("eax",12); -&set_label("ret"); - &ret (); -&set_label("opts",64); -&asciz ("rc4(4x,int)"); -&asciz ("rc4(1x,char)"); -&asciz ("rc4(8x,mmx)"); -&asciz ("RC4 for x86, CRYPTOGAMS by "); -&align (64); -&function_end_B("RC4_options"); - -&asm_finish(); - diff --git a/crypto/rc4/asm/rc4-md5-x86_64.pl b/crypto/rc4/asm/rc4-md5-x86_64.pl deleted file mode 100644 index 272fa91e1..000000000 --- a/crypto/rc4/asm/rc4-md5-x86_64.pl +++ /dev/null @@ -1,632 +0,0 @@ -#!/usr/bin/env perl -# -# ==================================================================== -# Written by Andy Polyakov for the OpenSSL -# project. The module is, however, dual licensed under OpenSSL and -# CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. -# ==================================================================== - -# June 2011 -# -# This is RC4+MD5 "stitch" implementation. The idea, as spelled in -# http://download.intel.com/design/intarch/papers/323686.pdf, is that -# since both algorithms exhibit instruction-level parallelism, ILP, -# below theoretical maximum, interleaving them would allow to utilize -# processor resources better and achieve better performance. RC4 -# instruction sequence is virtually identical to rc4-x86_64.pl, which -# is heavily based on submission by Maxim Perminov, Maxim Locktyukhin -# and Jim Guilford of Intel. MD5 is fresh implementation aiming to -# minimize register usage, which was used as "main thread" with RC4 -# weaved into it, one RC4 round per one MD5 round. In addition to the -# stiched subroutine the script can generate standalone replacement -# md5_block_asm_data_order and RC4. Below are performance numbers in -# cycles per processed byte, less is better, for these the standalone -# subroutines, sum of them, and stitched one: -# -# RC4 MD5 RC4+MD5 stitch gain -# Opteron 6.5(*) 5.4 11.9 7.0 +70%(*) -# Core2 6.5 5.8 12.3 7.7 +60% -# Westmere 4.3 5.2 9.5 7.0 +36% -# Sandy Bridge 4.2 5.5 9.7 6.8 +43% -# Atom 9.3 6.5 15.8 11.1 +42% -# -# (*) rc4-x86_64.pl delivers 5.3 on Opteron, so real improvement -# is +53%... - -my ($rc4,$md5)=(1,1); # what to generate? -my $D="#" if (!$md5); # if set to "#", MD5 is stitched into RC4(), - # but its result is discarded. Idea here is - # to be able to use 'openssl speed rc4' for - # benchmarking the stitched subroutine... - -my $flavour = shift; -my $output = shift; -if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } - -my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); - -$0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate; -( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or -( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or -die "can't locate x86_64-xlate.pl"; - -open OUT,"| \"$^X\" $xlate $flavour $output"; -*STDOUT=*OUT; - -my ($dat,$in0,$out,$ctx,$inp,$len, $func,$nargs); - -if ($rc4 && !$md5) { - ($dat,$len,$in0,$out) = ("%rdi","%rsi","%rdx","%rcx"); - $func="RC4"; $nargs=4; -} elsif ($md5 && !$rc4) { - ($ctx,$inp,$len) = ("%rdi","%rsi","%rdx"); - $func="md5_block_asm_data_order"; $nargs=3; -} else { - ($dat,$in0,$out,$ctx,$inp,$len) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9"); - $func="rc4_md5_enc"; $nargs=6; - # void rc4_md5_enc( - # RC4_KEY *key, # - # const void *in0, # RC4 input - # void *out, # RC4 output - # MD5_CTX *ctx, # - # const void *inp, # MD5 input - # size_t len); # number of 64-byte blocks -} - -my @K=( 0xd76aa478,0xe8c7b756,0x242070db,0xc1bdceee, - 0xf57c0faf,0x4787c62a,0xa8304613,0xfd469501, - 0x698098d8,0x8b44f7af,0xffff5bb1,0x895cd7be, - 0x6b901122,0xfd987193,0xa679438e,0x49b40821, - - 0xf61e2562,0xc040b340,0x265e5a51,0xe9b6c7aa, - 0xd62f105d,0x02441453,0xd8a1e681,0xe7d3fbc8, - 0x21e1cde6,0xc33707d6,0xf4d50d87,0x455a14ed, - 0xa9e3e905,0xfcefa3f8,0x676f02d9,0x8d2a4c8a, - - 0xfffa3942,0x8771f681,0x6d9d6122,0xfde5380c, - 0xa4beea44,0x4bdecfa9,0xf6bb4b60,0xbebfbc70, - 0x289b7ec6,0xeaa127fa,0xd4ef3085,0x04881d05, - 0xd9d4d039,0xe6db99e5,0x1fa27cf8,0xc4ac5665, - - 0xf4292244,0x432aff97,0xab9423a7,0xfc93a039, - 0x655b59c3,0x8f0ccc92,0xffeff47d,0x85845dd1, - 0x6fa87e4f,0xfe2ce6e0,0xa3014314,0x4e0811a1, - 0xf7537e82,0xbd3af235,0x2ad7d2bb,0xeb86d391 ); - -my @V=("%r8d","%r9d","%r10d","%r11d"); # MD5 registers -my $tmp="%r12d"; - -my @XX=("%rbp","%rsi"); # RC4 registers -my @TX=("%rax","%rbx"); -my $YY="%rcx"; -my $TY="%rdx"; - -my $MOD=32; # 16, 32 or 64 - -$code.=<<___; -.text -.align 16 - -.globl $func -.type $func,\@function,$nargs -$func: - cmp \$0,$len - je .Labort - push %rbx - push %rbp - push %r12 - push %r13 - push %r14 - push %r15 - sub \$40,%rsp -.Lbody: -___ -if ($rc4) { -$code.=<<___; -$D#md5# mov $ctx,%r11 # reassign arguments - mov $len,%r12 - mov $in0,%r13 - mov $out,%r14 -$D#md5# mov $inp,%r15 -___ - $ctx="%r11" if ($md5); # reassign arguments - $len="%r12"; - $in0="%r13"; - $out="%r14"; - $inp="%r15" if ($md5); - $inp=$in0 if (!$md5); -$code.=<<___; - xor $XX[0],$XX[0] - xor $YY,$YY - - lea 8($dat),$dat - mov -8($dat),$XX[0]#b - mov -4($dat),$YY#b - - inc $XX[0]#b - sub $in0,$out - movl ($dat,$XX[0],4),$TX[0]#d -___ -$code.=<<___ if (!$md5); - xor $TX[1],$TX[1] - test \$-128,$len - jz .Loop1 - sub $XX[0],$TX[1] - and \$`$MOD-1`,$TX[1] - jz .Loop${MOD}_is_hot - sub $TX[1],$len -.Loop${MOD}_warmup: - add $TX[0]#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX[0]#d,($dat,$YY,4) - movl $TY#d,($dat,$XX[0],4) - add $TY#b,$TX[0]#b - inc $XX[0]#b - movl ($dat,$TX[0],4),$TY#d - movl ($dat,$XX[0],4),$TX[0]#d - xorb ($in0),$TY#b - movb $TY#b,($out,$in0) - lea 1($in0),$in0 - dec $TX[1] - jnz .Loop${MOD}_warmup - - mov $YY,$TX[1] - xor $YY,$YY - mov $TX[1]#b,$YY#b - -.Loop${MOD}_is_hot: - mov $len,32(%rsp) # save original $len - shr \$6,$len # number of 64-byte blocks -___ - if ($D && !$md5) { # stitch in dummy MD5 - $md5=1; - $ctx="%r11"; - $inp="%r15"; - $code.=<<___; - mov %rsp,$ctx - mov $in0,$inp -___ - } -} -$code.=<<___; -#rc4# add $TX[0]#b,$YY#b -#rc4# lea ($dat,$XX[0],4),$XX[1] - shl \$6,$len - add $inp,$len # pointer to the end of input - mov $len,16(%rsp) - -#md5# mov $ctx,24(%rsp) # save pointer to MD5_CTX -#md5# mov 0*4($ctx),$V[0] # load current hash value from MD5_CTX -#md5# mov 1*4($ctx),$V[1] -#md5# mov 2*4($ctx),$V[2] -#md5# mov 3*4($ctx),$V[3] - jmp .Loop - -.align 16 -.Loop: -#md5# mov $V[0],0*4(%rsp) # put aside current hash value -#md5# mov $V[1],1*4(%rsp) -#md5# mov $V[2],2*4(%rsp) -#md5# mov $V[3],$tmp # forward reference -#md5# mov $V[3],3*4(%rsp) -___ - -sub R0 { - my ($i,$a,$b,$c,$d)=@_; - my @rot0=(7,12,17,22); - my $j=$i%16; - my $k=$i%$MOD; - my $xmm="%xmm".($j&1); - $code.=" movdqu ($in0),%xmm2\n" if ($rc4 && $j==15); - $code.=" add \$$MOD,$XX[0]#b\n" if ($rc4 && $j==15 && $k==$MOD-1); - $code.=" pxor $xmm,$xmm\n" if ($rc4 && $j<=1); - $code.=<<___; -#rc4# movl ($dat,$YY,4),$TY#d -#md5# xor $c,$tmp -#rc4# movl $TX[0]#d,($dat,$YY,4) -#md5# and $b,$tmp -#md5# add 4*`$j`($inp),$a -#rc4# add $TY#b,$TX[0]#b -#rc4# movl `4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d -#md5# add \$$K[$i],$a -#md5# xor $d,$tmp -#rc4# movz $TX[0]#b,$TX[0]#d -#rc4# movl $TY#d,4*$k($XX[1]) -#md5# add $tmp,$a -#rc4# add $TX[1]#b,$YY#b -#md5# rol \$$rot0[$j%4],$a -#md5# mov `$j==15?"$b":"$c"`,$tmp # forward reference -#rc4# pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n -#md5# add $b,$a -___ - $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1); - mov $YY,$XX[1] - xor $YY,$YY # keyword to partial register - mov $XX[1]#b,$YY#b - lea ($dat,$XX[0],4),$XX[1] -___ - $code.=<<___ if ($rc4 && $j==15); - psllq \$8,%xmm1 - pxor %xmm0,%xmm2 - pxor %xmm1,%xmm2 -___ -} -sub R1 { - my ($i,$a,$b,$c,$d)=@_; - my @rot1=(5,9,14,20); - my $j=$i%16; - my $k=$i%$MOD; - my $xmm="%xmm".($j&1); - $code.=" movdqu 16($in0),%xmm3\n" if ($rc4 && $j==15); - $code.=" add \$$MOD,$XX[0]#b\n" if ($rc4 && $j==15 && $k==$MOD-1); - $code.=" pxor $xmm,$xmm\n" if ($rc4 && $j<=1); - $code.=<<___; -#rc4# movl ($dat,$YY,4),$TY#d -#md5# xor $b,$tmp -#rc4# movl $TX[0]#d,($dat,$YY,4) -#md5# and $d,$tmp -#md5# add 4*`((1+5*$j)%16)`($inp),$a -#rc4# add $TY#b,$TX[0]#b -#rc4# movl `4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d -#md5# add \$$K[$i],$a -#md5# xor $c,$tmp -#rc4# movz $TX[0]#b,$TX[0]#d -#rc4# movl $TY#d,4*$k($XX[1]) -#md5# add $tmp,$a -#rc4# add $TX[1]#b,$YY#b -#md5# rol \$$rot1[$j%4],$a -#md5# mov `$j==15?"$c":"$b"`,$tmp # forward reference -#rc4# pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n -#md5# add $b,$a -___ - $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1); - mov $YY,$XX[1] - xor $YY,$YY # keyword to partial register - mov $XX[1]#b,$YY#b - lea ($dat,$XX[0],4),$XX[1] -___ - $code.=<<___ if ($rc4 && $j==15); - psllq \$8,%xmm1 - pxor %xmm0,%xmm3 - pxor %xmm1,%xmm3 -___ -} -sub R2 { - my ($i,$a,$b,$c,$d)=@_; - my @rot2=(4,11,16,23); - my $j=$i%16; - my $k=$i%$MOD; - my $xmm="%xmm".($j&1); - $code.=" movdqu 32($in0),%xmm4\n" if ($rc4 && $j==15); - $code.=" add \$$MOD,$XX[0]#b\n" if ($rc4 && $j==15 && $k==$MOD-1); - $code.=" pxor $xmm,$xmm\n" if ($rc4 && $j<=1); - $code.=<<___; -#rc4# movl ($dat,$YY,4),$TY#d -#md5# xor $c,$tmp -#rc4# movl $TX[0]#d,($dat,$YY,4) -#md5# xor $b,$tmp -#md5# add 4*`((5+3*$j)%16)`($inp),$a -#rc4# add $TY#b,$TX[0]#b -#rc4# movl `4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d -#md5# add \$$K[$i],$a -#rc4# movz $TX[0]#b,$TX[0]#d -#md5# add $tmp,$a -#rc4# movl $TY#d,4*$k($XX[1]) -#rc4# add $TX[1]#b,$YY#b -#md5# rol \$$rot2[$j%4],$a -#md5# mov `$j==15?"\\\$-1":"$c"`,$tmp # forward reference -#rc4# pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n -#md5# add $b,$a -___ - $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1); - mov $YY,$XX[1] - xor $YY,$YY # keyword to partial register - mov $XX[1]#b,$YY#b - lea ($dat,$XX[0],4),$XX[1] -___ - $code.=<<___ if ($rc4 && $j==15); - psllq \$8,%xmm1 - pxor %xmm0,%xmm4 - pxor %xmm1,%xmm4 -___ -} -sub R3 { - my ($i,$a,$b,$c,$d)=@_; - my @rot3=(6,10,15,21); - my $j=$i%16; - my $k=$i%$MOD; - my $xmm="%xmm".($j&1); - $code.=" movdqu 48($in0),%xmm5\n" if ($rc4 && $j==15); - $code.=" add \$$MOD,$XX[0]#b\n" if ($rc4 && $j==15 && $k==$MOD-1); - $code.=" pxor $xmm,$xmm\n" if ($rc4 && $j<=1); - $code.=<<___; -#rc4# movl ($dat,$YY,4),$TY#d -#md5# xor $d,$tmp -#rc4# movl $TX[0]#d,($dat,$YY,4) -#md5# or $b,$tmp -#md5# add 4*`((7*$j)%16)`($inp),$a -#rc4# add $TY#b,$TX[0]#b -#rc4# movl `4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d -#md5# add \$$K[$i],$a -#rc4# movz $TX[0]#b,$TX[0]#d -#md5# xor $c,$tmp -#rc4# movl $TY#d,4*$k($XX[1]) -#md5# add $tmp,$a -#rc4# add $TX[1]#b,$YY#b -#md5# rol \$$rot3[$j%4],$a -#md5# mov \$-1,$tmp # forward reference -#rc4# pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n -#md5# add $b,$a -___ - $code.=<<___ if ($rc4 && $j==15); - mov $XX[0],$XX[1] - xor $XX[0],$XX[0] # keyword to partial register - mov $XX[1]#b,$XX[0]#b - mov $YY,$XX[1] - xor $YY,$YY # keyword to partial register - mov $XX[1]#b,$YY#b - lea ($dat,$XX[0],4),$XX[1] - psllq \$8,%xmm1 - pxor %xmm0,%xmm5 - pxor %xmm1,%xmm5 -___ -} - -my $i=0; -for(;$i<16;$i++) { R0($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); } -for(;$i<32;$i++) { R1($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); } -for(;$i<48;$i++) { R2($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); } -for(;$i<64;$i++) { R3($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); } - -$code.=<<___; -#md5# add 0*4(%rsp),$V[0] # accumulate hash value -#md5# add 1*4(%rsp),$V[1] -#md5# add 2*4(%rsp),$V[2] -#md5# add 3*4(%rsp),$V[3] - -#rc4# movdqu %xmm2,($out,$in0) # write RC4 output -#rc4# movdqu %xmm3,16($out,$in0) -#rc4# movdqu %xmm4,32($out,$in0) -#rc4# movdqu %xmm5,48($out,$in0) -#md5# lea 64($inp),$inp -#rc4# lea 64($in0),$in0 - cmp 16(%rsp),$inp # are we done? - jb .Loop - -#md5# mov 24(%rsp),$len # restore pointer to MD5_CTX -#rc4# sub $TX[0]#b,$YY#b # correct $YY -#md5# mov $V[0],0*4($len) # write MD5_CTX -#md5# mov $V[1],1*4($len) -#md5# mov $V[2],2*4($len) -#md5# mov $V[3],3*4($len) -___ -$code.=<<___ if ($rc4 && (!$md5 || $D)); - mov 32(%rsp),$len # restore original $len - and \$63,$len # remaining bytes - jnz .Loop1 - jmp .Ldone - -.align 16 -.Loop1: - add $TX[0]#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX[0]#d,($dat,$YY,4) - movl $TY#d,($dat,$XX[0],4) - add $TY#b,$TX[0]#b - inc $XX[0]#b - movl ($dat,$TX[0],4),$TY#d - movl ($dat,$XX[0],4),$TX[0]#d - xorb ($in0),$TY#b - movb $TY#b,($out,$in0) - lea 1($in0),$in0 - dec $len - jnz .Loop1 - -.Ldone: -___ -$code.=<<___; -#rc4# sub \$1,$XX[0]#b -#rc4# movl $XX[0]#d,-8($dat) -#rc4# movl $YY#d,-4($dat) - - mov 40(%rsp),%r15 - mov 48(%rsp),%r14 - mov 56(%rsp),%r13 - mov 64(%rsp),%r12 - mov 72(%rsp),%rbp - mov 80(%rsp),%rbx - lea 88(%rsp),%rsp -.Lepilogue: -.Labort: - ret -.size $func,.-$func -___ - -if ($rc4 && $D) { # sole purpose of this section is to provide - # option to use the generated module as drop-in - # replacement for rc4-x86_64.pl for debugging - # and testing purposes... -my ($idx,$ido)=("%r8","%r9"); -my ($dat,$len,$inp)=("%rdi","%rsi","%rdx"); - -$code.=<<___; -.globl RC4_set_key -.type RC4_set_key,\@function,3 -.align 16 -RC4_set_key: - lea 8($dat),$dat - lea ($inp,$len),$inp - neg $len - mov $len,%rcx - xor %eax,%eax - xor $ido,$ido - xor %r10,%r10 - xor %r11,%r11 - jmp .Lw1stloop - -.align 16 -.Lw1stloop: - mov %eax,($dat,%rax,4) - add \$1,%al - jnc .Lw1stloop - - xor $ido,$ido - xor $idx,$idx -.align 16 -.Lw2ndloop: - mov ($dat,$ido,4),%r10d - add ($inp,$len,1),$idx#b - add %r10b,$idx#b - add \$1,$len - mov ($dat,$idx,4),%r11d - cmovz %rcx,$len - mov %r10d,($dat,$idx,4) - mov %r11d,($dat,$ido,4) - add \$1,$ido#b - jnc .Lw2ndloop - - xor %eax,%eax - mov %eax,-8($dat) - mov %eax,-4($dat) - ret -.size RC4_set_key,.-RC4_set_key - -.globl RC4_options -.type RC4_options,\@abi-omnipotent -.align 16 -RC4_options: - lea .Lopts(%rip),%rax - ret -.align 64 -.Lopts: -.asciz "rc4(64x,int)" -.align 64 -.size RC4_options,.-RC4_options -___ -} -# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, -# CONTEXT *context,DISPATCHER_CONTEXT *disp) -if ($win64) { -my $rec="%rcx"; -my $frame="%rdx"; -my $context="%r8"; -my $disp="%r9"; - -$code.=<<___; -.extern __imp_RtlVirtualUnwind -.type se_handler,\@abi-omnipotent -.align 16 -se_handler: - push %rsi - push %rdi - push %rbx - push %rbp - push %r12 - push %r13 - push %r14 - push %r15 - pushfq - sub \$64,%rsp - - mov 120($context),%rax # pull context->Rax - mov 248($context),%rbx # pull context->Rip - - lea .Lbody(%rip),%r10 - cmp %r10,%rbx # context->Rip<.Lbody - jb .Lin_prologue - - mov 152($context),%rax # pull context->Rsp - - lea .Lepilogue(%rip),%r10 - cmp %r10,%rbx # context->Rip>=.Lepilogue - jae .Lin_prologue - - mov 40(%rax),%r15 - mov 48(%rax),%r14 - mov 56(%rax),%r13 - mov 64(%rax),%r12 - mov 72(%rax),%rbp - mov 80(%rax),%rbx - lea 88(%rax),%rax - - mov %rbx,144($context) # restore context->Rbx - mov %rbp,160($context) # restore context->Rbp - mov %r12,216($context) # restore context->R12 - mov %r13,224($context) # restore context->R12 - mov %r14,232($context) # restore context->R14 - mov %r15,240($context) # restore context->R15 - -.Lin_prologue: - mov 8(%rax),%rdi - mov 16(%rax),%rsi - mov %rax,152($context) # restore context->Rsp - mov %rsi,168($context) # restore context->Rsi - mov %rdi,176($context) # restore context->Rdi - - mov 40($disp),%rdi # disp->ContextRecord - mov $context,%rsi # context - mov \$154,%ecx # sizeof(CONTEXT) - .long 0xa548f3fc # cld; rep movsq - - mov $disp,%rsi - xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER - mov 8(%rsi),%rdx # arg2, disp->ImageBase - mov 0(%rsi),%r8 # arg3, disp->ControlPc - mov 16(%rsi),%r9 # arg4, disp->FunctionEntry - mov 40(%rsi),%r10 # disp->ContextRecord - lea 56(%rsi),%r11 # &disp->HandlerData - lea 24(%rsi),%r12 # &disp->EstablisherFrame - mov %r10,32(%rsp) # arg5 - mov %r11,40(%rsp) # arg6 - mov %r12,48(%rsp) # arg7 - mov %rcx,56(%rsp) # arg8, (NULL) - call *__imp_RtlVirtualUnwind(%rip) - - mov \$1,%eax # ExceptionContinueSearch - add \$64,%rsp - popfq - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %rbp - pop %rbx - pop %rdi - pop %rsi - ret -.size se_handler,.-se_handler - -.section .pdata -.align 4 - .rva .LSEH_begin_$func - .rva .LSEH_end_$func - .rva .LSEH_info_$func - -.section .xdata -.align 8 -.LSEH_info_$func: - .byte 9,0,0,0 - .rva se_handler -___ -} - -sub reg_part { -my ($reg,$conv)=@_; - if ($reg =~ /%r[0-9]+/) { $reg .= $conv; } - elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; } - elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; } - elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; } - return $reg; -} - -$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem; -$code =~ s/\`([^\`]*)\`/eval $1/gem; -$code =~ s/pinsrw\s+\$0,/movd /gm; - -$code =~ s/#md5#//gm if ($md5); -$code =~ s/#rc4#//gm if ($rc4); - -print $code; - -close STDOUT; diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl deleted file mode 100644 index cef626891..000000000 --- a/crypto/rc4/asm/rc4-x86_64.pl +++ /dev/null @@ -1,653 +0,0 @@ -#!/usr/bin/env perl -# -# ==================================================================== -# Written by Andy Polyakov for the OpenSSL -# project. The module is, however, dual licensed under OpenSSL and -# CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. -# ==================================================================== -# -# July 2004 -# -# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in -# "hand-coded assembler"] doesn't stand for the whole improvement -# coefficient. It turned out that eliminating RC4_CHAR from config -# line results in ~40% improvement (yes, even for C implementation). -# Presumably it has everything to do with AMD cache architecture and -# RAW or whatever penalties. Once again! The module *requires* config -# line *without* RC4_CHAR! As for coding "secret," I bet on partial -# register arithmetics. For example instead of 'inc %r8; and $255,%r8' -# I simply 'inc %r8b'. Even though optimization manual discourages -# to operate on partial registers, it turned out to be the best bet. -# At least for AMD... How IA32E would perform remains to be seen... - -# November 2004 -# -# As was shown by Marc Bevand reordering of couple of load operations -# results in even higher performance gain of 3.3x:-) At least on -# Opteron... For reference, 1x in this case is RC4_CHAR C-code -# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock. -# Latter means that if you want to *estimate* what to expect from -# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz. - -# November 2004 -# -# Intel P4 EM64T core was found to run the AMD64 code really slow... -# The only way to achieve comparable performance on P4 was to keep -# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to -# compose blended code, which would perform even within 30% marginal -# on either AMD and Intel platforms, I implement both cases. See -# rc4_skey.c for further details... - -# April 2005 -# -# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing -# those with add/sub results in 50% performance improvement of folded -# loop... - -# May 2005 -# -# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T -# performance by >30% [unlike P4 32-bit case that is]. But this is -# provided that loads are reordered even more aggressively! Both code -# pathes, AMD64 and EM64T, reorder loads in essentially same manner -# as my IA-64 implementation. On Opteron this resulted in modest 5% -# improvement [I had to test it], while final Intel P4 performance -# achieves respectful 432MBps on 2.8GHz processor now. For reference. -# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than -# RC4_INT code-path. While if executed on Opteron, it's only 25% -# slower than the RC4_INT one [meaning that if CPU ยต-arch detection -# is not implemented, then this final RC4_CHAR code-path should be -# preferred, as it provides better *all-round* performance]. - -# March 2007 -# -# Intel Core2 was observed to perform poorly on both code paths:-( It -# apparently suffers from some kind of partial register stall, which -# occurs in 64-bit mode only [as virtually identical 32-bit loop was -# observed to outperform 64-bit one by almost 50%]. Adding two movzb to -# cloop1 boosts its performance by 80%! This loop appears to be optimal -# fit for Core2 and therefore the code was modified to skip cloop8 on -# this CPU. - -# May 2010 -# -# Intel Westmere was observed to perform suboptimally. Adding yet -# another movzb to cloop1 improved performance by almost 50%! Core2 -# performance is improved too, but nominally... - -# May 2011 -# -# The only code path that was not modified is P4-specific one. Non-P4 -# Intel code path optimization is heavily based on submission by Maxim -# Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used -# some of the ideas even in attempt to optmize the original RC4_INT -# code path... Current performance in cycles per processed byte (less -# is better) and improvement coefficients relative to previous -# version of this module are: -# -# Opteron 5.3/+0%(*) -# P4 6.5 -# Core2 6.2/+15%(**) -# Westmere 4.2/+60% -# Sandy Bridge 4.2/+120% -# Atom 9.3/+80% -# -# (*) But corresponding loop has less instructions, which should have -# positive effect on upcoming Bulldozer, which has one less ALU. -# For reference, Intel code runs at 6.8 cpb rate on Opteron. -# (**) Note that Core2 result is ~15% lower than corresponding result -# for 32-bit code, meaning that it's possible to improve it, -# but more than likely at the cost of the others (see rc4-586.pl -# to get the idea)... - -$flavour = shift; -$output = shift; -if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } - -$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); - -$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; -( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or -( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or -die "can't locate x86_64-xlate.pl"; - -open OUT,"| \"$^X\" $xlate $flavour $output"; -*STDOUT=*OUT; - -$dat="%rdi"; # arg1 -$len="%rsi"; # arg2 -$inp="%rdx"; # arg3 -$out="%rcx"; # arg4 - -{ -$code=<<___; -.text -.extern OPENSSL_ia32cap_P - -.globl asm_RC4 -.type asm_RC4,\@function,4 -.align 16 -asm_RC4: - or $len,$len - jne .Lentry - ret -.Lentry: - push %rbx - push %r12 - push %r13 -.Lprologue: - mov $len,%r11 - mov $inp,%r12 - mov $out,%r13 -___ -my $len="%r11"; # reassign input arguments -my $inp="%r12"; -my $out="%r13"; - -my @XX=("%r10","%rsi"); -my @TX=("%rax","%rbx"); -my $YY="%rcx"; -my $TY="%rdx"; - -$code.=<<___; - xor $XX[0],$XX[0] - xor $YY,$YY - - lea 8($dat),$dat - mov -8($dat),$XX[0]#b - mov -4($dat),$YY#b - cmpl \$-1,256($dat) - je .LRC4_CHAR - mov OPENSSL_ia32cap_P(%rip),%r8d - xor $TX[1],$TX[1] - inc $XX[0]#b - sub $XX[0],$TX[1] - sub $inp,$out - movl ($dat,$XX[0],4),$TX[0]#d - test \$-16,$len - jz .Lloop1 - bt \$30,%r8d # Intel CPU? - jc .Lintel - and \$7,$TX[1] - lea 1($XX[0]),$XX[1] - jz .Loop8 - sub $TX[1],$len -.Loop8_warmup: - add $TX[0]#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX[0]#d,($dat,$YY,4) - movl $TY#d,($dat,$XX[0],4) - add $TY#b,$TX[0]#b - inc $XX[0]#b - movl ($dat,$TX[0],4),$TY#d - movl ($dat,$XX[0],4),$TX[0]#d - xorb ($inp),$TY#b - movb $TY#b,($out,$inp) - lea 1($inp),$inp - dec $TX[1] - jnz .Loop8_warmup - - lea 1($XX[0]),$XX[1] - jmp .Loop8 -.align 16 -.Loop8: -___ -for ($i=0;$i<8;$i++) { -$code.=<<___ if ($i==7); - add \$8,$XX[1]#b -___ -$code.=<<___; - add $TX[0]#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX[0]#d,($dat,$YY,4) - movl `4*($i==7?-1:$i)`($dat,$XX[1],4),$TX[1]#d - ror \$8,%r8 # ror is redundant when $i=0 - movl $TY#d,4*$i($dat,$XX[0],4) - add $TX[0]#b,$TY#b - movb ($dat,$TY,4),%r8b -___ -push(@TX,shift(@TX)); #push(@XX,shift(@XX)); # "rotate" registers -} -$code.=<<___; - add \$8,$XX[0]#b - ror \$8,%r8 - sub \$8,$len - - xor ($inp),%r8 - mov %r8,($out,$inp) - lea 8($inp),$inp - - test \$-8,$len - jnz .Loop8 - cmp \$0,$len - jne .Lloop1 - jmp .Lexit - -.align 16 -.Lintel: - test \$-32,$len - jz .Lloop1 - and \$15,$TX[1] - jz .Loop16_is_hot - sub $TX[1],$len -.Loop16_warmup: - add $TX[0]#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX[0]#d,($dat,$YY,4) - movl $TY#d,($dat,$XX[0],4) - add $TY#b,$TX[0]#b - inc $XX[0]#b - movl ($dat,$TX[0],4),$TY#d - movl ($dat,$XX[0],4),$TX[0]#d - xorb ($inp),$TY#b - movb $TY#b,($out,$inp) - lea 1($inp),$inp - dec $TX[1] - jnz .Loop16_warmup - - mov $YY,$TX[1] - xor $YY,$YY - mov $TX[1]#b,$YY#b - -.Loop16_is_hot: - lea ($dat,$XX[0],4),$XX[1] -___ -sub RC4_loop { - my $i=shift; - my $j=$i<0?0:$i; - my $xmm="%xmm".($j&1); - - $code.=" add \$16,$XX[0]#b\n" if ($i==15); - $code.=" movdqu ($inp),%xmm2\n" if ($i==15); - $code.=" add $TX[0]#b,$YY#b\n" if ($i<=0); - $code.=" movl ($dat,$YY,4),$TY#d\n"; - $code.=" pxor %xmm0,%xmm2\n" if ($i==0); - $code.=" psllq \$8,%xmm1\n" if ($i==0); - $code.=" pxor $xmm,$xmm\n" if ($i<=1); - $code.=" movl $TX[0]#d,($dat,$YY,4)\n"; - $code.=" add $TY#b,$TX[0]#b\n"; - $code.=" movl `4*($j+1)`($XX[1]),$TX[1]#d\n" if ($i<15); - $code.=" movz $TX[0]#b,$TX[0]#d\n"; - $code.=" movl $TY#d,4*$j($XX[1])\n"; - $code.=" pxor %xmm1,%xmm2\n" if ($i==0); - $code.=" lea ($dat,$XX[0],4),$XX[1]\n" if ($i==15); - $code.=" add $TX[1]#b,$YY#b\n" if ($i<15); - $code.=" pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n"; - $code.=" movdqu %xmm2,($out,$inp)\n" if ($i==0); - $code.=" lea 16($inp),$inp\n" if ($i==0); - $code.=" movl ($XX[1]),$TX[1]#d\n" if ($i==15); -} - RC4_loop(-1); -$code.=<<___; - jmp .Loop16_enter -.align 16 -.Loop16: -___ - -for ($i=0;$i<16;$i++) { - $code.=".Loop16_enter:\n" if ($i==1); - RC4_loop($i); - push(@TX,shift(@TX)); # "rotate" registers -} -$code.=<<___; - mov $YY,$TX[1] - xor $YY,$YY # keyword to partial register - sub \$16,$len - mov $TX[1]#b,$YY#b - test \$-16,$len - jnz .Loop16 - - psllq \$8,%xmm1 - pxor %xmm0,%xmm2 - pxor %xmm1,%xmm2 - movdqu %xmm2,($out,$inp) - lea 16($inp),$inp - - cmp \$0,$len - jne .Lloop1 - jmp .Lexit - -.align 16 -.Lloop1: - add $TX[0]#b,$YY#b - movl ($dat,$YY,4),$TY#d - movl $TX[0]#d,($dat,$YY,4) - movl $TY#d,($dat,$XX[0],4) - add $TY#b,$TX[0]#b - inc $XX[0]#b - movl ($dat,$TX[0],4),$TY#d - movl ($dat,$XX[0],4),$TX[0]#d - xorb ($inp),$TY#b - movb $TY#b,($out,$inp) - lea 1($inp),$inp - dec $len - jnz .Lloop1 - jmp .Lexit - -.align 16 -.LRC4_CHAR: - add \$1,$XX[0]#b - movzb ($dat,$XX[0]),$TX[0]#d - test \$-8,$len - jz .Lcloop1 - jmp .Lcloop8 -.align 16 -.Lcloop8: - mov ($inp),%r8d - mov 4($inp),%r9d -___ -# unroll 2x4-wise, because 64-bit rotates kill Intel P4... -for ($i=0;$i<4;$i++) { -$code.=<<___; - add $TX[0]#b,$YY#b - lea 1($XX[0]),$XX[1] - movzb ($dat,$YY),$TY#d - movzb $XX[1]#b,$XX[1]#d - movzb ($dat,$XX[1]),$TX[1]#d - movb $TX[0]#b,($dat,$YY) - cmp $XX[1],$YY - movb $TY#b,($dat,$XX[0]) - jne .Lcmov$i # Intel cmov is sloooow... - mov $TX[0],$TX[1] -.Lcmov$i: - add $TX[0]#b,$TY#b - xor ($dat,$TY),%r8b - ror \$8,%r8d -___ -push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers -} -for ($i=4;$i<8;$i++) { -$code.=<<___; - add $TX[0]#b,$YY#b - lea 1($XX[0]),$XX[1] - movzb ($dat,$YY),$TY#d - movzb $XX[1]#b,$XX[1]#d - movzb ($dat,$XX[1]),$TX[1]#d - movb $TX[0]#b,($dat,$YY) - cmp $XX[1],$YY - movb $TY#b,($dat,$XX[0]) - jne .Lcmov$i # Intel cmov is sloooow... - mov $TX[0],$TX[1] -.Lcmov$i: - add $TX[0]#b,$TY#b - xor ($dat,$TY),%r9b - ror \$8,%r9d -___ -push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers -} -$code.=<<___; - lea -8($len),$len - mov %r8d,($out) - lea 8($inp),$inp - mov %r9d,4($out) - lea 8($out),$out - - test \$-8,$len - jnz .Lcloop8 - cmp \$0,$len - jne .Lcloop1 - jmp .Lexit -___ -$code.=<<___; -.align 16 -.Lcloop1: - add $TX[0]#b,$YY#b - movzb $YY#b,$YY#d - movzb ($dat,$YY),$TY#d - movb $TX[0]#b,($dat,$YY) - movb $TY#b,($dat,$XX[0]) - add $TX[0]#b,$TY#b - add \$1,$XX[0]#b - movzb $TY#b,$TY#d - movzb $XX[0]#b,$XX[0]#d - movzb ($dat,$TY),$TY#d - movzb ($dat,$XX[0]),$TX[0]#d - xorb ($inp),$TY#b - lea 1($inp),$inp - movb $TY#b,($out) - lea 1($out),$out - sub \$1,$len - jnz .Lcloop1 - jmp .Lexit - -.align 16 -.Lexit: - sub \$1,$XX[0]#b - movl $XX[0]#d,-8($dat) - movl $YY#d,-4($dat) - - mov (%rsp),%r13 - mov 8(%rsp),%r12 - mov 16(%rsp),%rbx - add \$24,%rsp -.Lepilogue: - ret -.size asm_RC4,.-asm_RC4 -___ -} - -$idx="%r8"; -$ido="%r9"; - -$code.=<<___; -.globl asm_RC4_set_key -.type asm_RC4_set_key,\@function,3 -.align 16 -asm_RC4_set_key: - lea 8($dat),$dat - lea ($inp,$len),$inp - neg $len - mov $len,%rcx - xor %eax,%eax - xor $ido,$ido - xor %r10,%r10 - xor %r11,%r11 - - mov OPENSSL_ia32cap_P(%rip),$idx#d - bt \$20,$idx#d # RC4_CHAR? - jc .Lc1stloop - jmp .Lw1stloop - -.align 16 -.Lw1stloop: - mov %eax,($dat,%rax,4) - add \$1,%al - jnc .Lw1stloop - - xor $ido,$ido - xor $idx,$idx -.align 16 -.Lw2ndloop: - mov ($dat,$ido,4),%r10d - add ($inp,$len,1),$idx#b - add %r10b,$idx#b - add \$1,$len - mov ($dat,$idx,4),%r11d - cmovz %rcx,$len - mov %r10d,($dat,$idx,4) - mov %r11d,($dat,$ido,4) - add \$1,$ido#b - jnc .Lw2ndloop - jmp .Lexit_key - -.align 16 -.Lc1stloop: - mov %al,($dat,%rax) - add \$1,%al - jnc .Lc1stloop - - xor $ido,$ido - xor $idx,$idx -.align 16 -.Lc2ndloop: - mov ($dat,$ido),%r10b - add ($inp,$len),$idx#b - add %r10b,$idx#b - add \$1,$len - mov ($dat,$idx),%r11b - jnz .Lcnowrap - mov %rcx,$len -.Lcnowrap: - mov %r10b,($dat,$idx) - mov %r11b,($dat,$ido) - add \$1,$ido#b - jnc .Lc2ndloop - movl \$-1,256($dat) - -.align 16 -.Lexit_key: - xor %eax,%eax - mov %eax,-8($dat) - mov %eax,-4($dat) - ret -.size asm_RC4_set_key,.-asm_RC4_set_key -___ - -# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, -# CONTEXT *context,DISPATCHER_CONTEXT *disp) -if ($win64) { -$rec="%rcx"; -$frame="%rdx"; -$context="%r8"; -$disp="%r9"; - -$code.=<<___; -.extern __imp_RtlVirtualUnwind -.type stream_se_handler,\@abi-omnipotent -.align 16 -stream_se_handler: - push %rsi - push %rdi - push %rbx - push %rbp - push %r12 - push %r13 - push %r14 - push %r15 - pushfq - sub \$64,%rsp - - mov 120($context),%rax # pull context->Rax - mov 248($context),%rbx # pull context->Rip - - lea .Lprologue(%rip),%r10 - cmp %r10,%rbx # context->RipRsp - - lea .Lepilogue(%rip),%r10 - cmp %r10,%rbx # context->Rip>=epilogue label - jae .Lin_prologue - - lea 24(%rax),%rax - - mov -8(%rax),%rbx - mov -16(%rax),%r12 - mov -24(%rax),%r13 - mov %rbx,144($context) # restore context->Rbx - mov %r12,216($context) # restore context->R12 - mov %r13,224($context) # restore context->R13 - -.Lin_prologue: - mov 8(%rax),%rdi - mov 16(%rax),%rsi - mov %rax,152($context) # restore context->Rsp - mov %rsi,168($context) # restore context->Rsi - mov %rdi,176($context) # restore context->Rdi - - jmp .Lcommon_seh_exit -.size stream_se_handler,.-stream_se_handler - -.type key_se_handler,\@abi-omnipotent -.align 16 -key_se_handler: - push %rsi - push %rdi - push %rbx - push %rbp - push %r12 - push %r13 - push %r14 - push %r15 - pushfq - sub \$64,%rsp - - mov 152($context),%rax # pull context->Rsp - mov 8(%rax),%rdi - mov 16(%rax),%rsi - mov %rsi,168($context) # restore context->Rsi - mov %rdi,176($context) # restore context->Rdi - -.Lcommon_seh_exit: - - mov 40($disp),%rdi # disp->ContextRecord - mov $context,%rsi # context - mov \$154,%ecx # sizeof(CONTEXT) - .long 0xa548f3fc # cld; rep movsq - - mov $disp,%rsi - xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER - mov 8(%rsi),%rdx # arg2, disp->ImageBase - mov 0(%rsi),%r8 # arg3, disp->ControlPc - mov 16(%rsi),%r9 # arg4, disp->FunctionEntry - mov 40(%rsi),%r10 # disp->ContextRecord - lea 56(%rsi),%r11 # &disp->HandlerData - lea 24(%rsi),%r12 # &disp->EstablisherFrame - mov %r10,32(%rsp) # arg5 - mov %r11,40(%rsp) # arg6 - mov %r12,48(%rsp) # arg7 - mov %rcx,56(%rsp) # arg8, (NULL) - call *__imp_RtlVirtualUnwind(%rip) - - mov \$1,%eax # ExceptionContinueSearch - add \$64,%rsp - popfq - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %rbp - pop %rbx - pop %rdi - pop %rsi - ret -.size key_se_handler,.-key_se_handler - -.section .pdata -.align 4 - .rva .LSEH_begin_asm_RC4 - .rva .LSEH_end_asm_RC4 - .rva .LSEH_info_asm_RC4 - - .rva .LSEH_begin_asm_RC4_set_key - .rva .LSEH_end_asm_RC4_set_key - .rva .LSEH_info_asm_RC4_set_key - -.section .xdata -.align 8 -.LSEH_info_asm_RC4: - .byte 9,0,0,0 - .rva stream_se_handler -.LSEH_info_asm_RC4_set_key: - .byte 9,0,0,0 - .rva key_se_handler -___ -} - -sub reg_part { -my ($reg,$conv)=@_; - if ($reg =~ /%r[0-9]+/) { $reg .= $conv; } - elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; } - elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; } - elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; } - return $reg; -} - -$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem; -$code =~ s/\`([^\`]*)\`/eval $1/gem; - -print $code; - -close STDOUT; diff --git a/crypto/rc4/rc4.c b/crypto/rc4/rc4.c index aa19dc286..cfd0ac8be 100644 --- a/crypto/rc4/rc4.c +++ b/crypto/rc4/rc4.c @@ -56,9 +56,6 @@ #include -#if defined(OPENSSL_NO_ASM) || \ - (!defined(OPENSSL_X86_64) && !defined(OPENSSL_X86)) - #if defined(OPENSSL_64_BIT) #define RC4_CHUNK uint64_t #elif defined(OPENSSL_32_BIT) @@ -263,22 +260,3 @@ void RC4_set_key(RC4_KEY *rc4key, unsigned len, const uint8_t *key) { SK_LOOP(d, i + 3); } } - -#else - -/* In this case several functions are provided by asm code. However, one cannot - * control asm symbol visibility with command line flags and such so they are - * always hidden and wrapped by these C functions, which can be so - * controlled. */ - -void asm_RC4(RC4_KEY *key, size_t len, const uint8_t *in, uint8_t *out); -void RC4(RC4_KEY *key, size_t len, const uint8_t *in, uint8_t *out) { - asm_RC4(key, len, in, out); -} - -void asm_RC4_set_key(RC4_KEY *rc4key, unsigned len, const uint8_t *key); -void RC4_set_key(RC4_KEY *rc4key, unsigned len, const uint8_t *key) { - asm_RC4_set_key(rc4key, len, key); -} - -#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86) */