1 #!/usr/bin/env perl
   2 #
   3 # ====================================================================
   4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
   5 # project. The module is, however, dual licensed under OpenSSL and
   6 # CRYPTOGAMS licenses depending on where you obtain it. For further
   7 # details see http://www.openssl.org/~appro/cryptogams/.
   8 # ====================================================================
   9 #
  10 # May 2011
  11 #
  12 # The module implements bn_GF2m_mul_2x2 polynomial multiplication used
  13 # in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
  14 # the time being... Except that it has two code paths: code suitable
  15 # for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
  16 # later. Improvement varies from one benchmark and µ-arch to another.
  17 # Vanilla code path is at most 20% faster than compiler-generated code
  18 # [not very impressive], while PCLMULQDQ - whole 85%-160% better on
  19 # 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
  20 # these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not
  21 # all CPU time is burnt in it...
  22 
  23 $flavour = shift;
  24 $output  = shift;
  25 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  26 
  27 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  28 
  29 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  30 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  31 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  32 die "can't locate x86_64-xlate.pl";
  33 
  34 open OUT,"| \"$^X\" $xlate $flavour $output";
  35 *STDOUT=*OUT;
  36 
  37 ($lo,$hi)=("%rax","%rdx");      $a=$lo;
  38 ($i0,$i1)=("%rsi","%rdi");
  39 ($t0,$t1)=("%rbx","%rcx");
  40 ($b,$mask)=("%rbp","%r8");
  41 ($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15));
  42 ($R,$Tx)=("%xmm0","%xmm1");
  43 
  44 $code.=<<___;
  45 .text
  46 
  47 .type   _mul_1x1,\@abi-omnipotent
  48 .align  16
  49 _mul_1x1:
  50         sub     \$128+8,%rsp
  51         mov     \$-1,$a1
  52         lea     ($a,$a),$i0
  53         shr     \$3,$a1
  54         lea     (,$a,4),$i1
  55         and     $a,$a1                  # a1=a&0x1fffffffffffffff
  56         lea     (,$a,8),$a8
  57         sar     \$63,$a                 # broadcast 63rd bit
  58         lea     ($a1,$a1),$a2
  59         sar     \$63,$i0                # broadcast 62nd bit
  60         lea     (,$a1,4),$a4
  61         and     $b,$a
  62         sar     \$63,$i1                # boardcast 61st bit
  63         mov     $a,$hi                  # $a is $lo
  64         shl     \$63,$lo
  65         and     $b,$i0
  66         shr     \$1,$hi
  67         mov     $i0,$t1
  68         shl     \$62,$i0
  69         and     $b,$i1
  70         shr     \$2,$t1
  71         xor     $i0,$lo
  72         mov     $i1,$t0
  73         shl     \$61,$i1
  74         xor     $t1,$hi
  75         shr     \$3,$t0
  76         xor     $i1,$lo
  77         xor     $t0,$hi
  78 
  79         mov     $a1,$a12
  80         movq    \$0,0(%rsp)             # tab[0]=0
  81         xor     $a2,$a12                # a1^a2
  82         mov     $a1,8(%rsp)             # tab[1]=a1
  83          mov    $a4,$a48
  84         mov     $a2,16(%rsp)            # tab[2]=a2
  85          xor    $a8,$a48                # a4^a8
  86         mov     $a12,24(%rsp)           # tab[3]=a1^a2
  87 
  88         xor     $a4,$a1
  89         mov     $a4,32(%rsp)            # tab[4]=a4
  90         xor     $a4,$a2
  91         mov     $a1,40(%rsp)            # tab[5]=a1^a4
  92         xor     $a4,$a12
  93         mov     $a2,48(%rsp)            # tab[6]=a2^a4
  94          xor    $a48,$a1                # a1^a4^a4^a8=a1^a8
  95         mov     $a12,56(%rsp)           # tab[7]=a1^a2^a4
  96          xor    $a48,$a2                # a2^a4^a4^a8=a1^a8
  97 
  98         mov     $a8,64(%rsp)            # tab[8]=a8
  99         xor     $a48,$a12               # a1^a2^a4^a4^a8=a1^a2^a8
 100         mov     $a1,72(%rsp)            # tab[9]=a1^a8
 101          xor    $a4,$a1                 # a1^a8^a4
 102         mov     $a2,80(%rsp)            # tab[10]=a2^a8
 103          xor    $a4,$a2                 # a2^a8^a4
 104         mov     $a12,88(%rsp)           # tab[11]=a1^a2^a8
 105 
 106         xor     $a4,$a12                # a1^a2^a8^a4
 107         mov     $a48,96(%rsp)           # tab[12]=a4^a8
 108          mov    $mask,$i0
 109         mov     $a1,104(%rsp)           # tab[13]=a1^a4^a8
 110          and    $b,$i0
 111         mov     $a2,112(%rsp)           # tab[14]=a2^a4^a8
 112          shr    \$4,$b
 113         mov     $a12,120(%rsp)          # tab[15]=a1^a2^a4^a8
 114          mov    $mask,$i1
 115          and    $b,$i1
 116          shr    \$4,$b
 117 
 118         movq    (%rsp,$i0,8),$R         # half of calculations is done in SSE2
 119         mov     $mask,$i0
 120         and     $b,$i0
 121         shr     \$4,$b
 122 ___
 123     for ($n=1;$n<8;$n++) {
 124         $code.=<<___;
 125         mov     (%rsp,$i1,8),$t1
 126         mov     $mask,$i1
 127         mov     $t1,$t0
 128         shl     \$`8*$n-4`,$t1
 129         and     $b,$i1
 130          movq   (%rsp,$i0,8),$Tx
 131         shr     \$`64-(8*$n-4)`,$t0
 132         xor     $t1,$lo
 133          pslldq \$$n,$Tx
 134          mov    $mask,$i0
 135         shr     \$4,$b
 136         xor     $t0,$hi
 137          and    $b,$i0
 138          shr    \$4,$b
 139          pxor   $Tx,$R
 140 ___
 141     }
 142 $code.=<<___;
 143         mov     (%rsp,$i1,8),$t1
 144         mov     $t1,$t0
 145         shl     \$`8*$n-4`,$t1
 146         movq    $R,$i0
 147         shr     \$`64-(8*$n-4)`,$t0
 148         xor     $t1,$lo
 149         psrldq  \$8,$R
 150         xor     $t0,$hi
 151         movq    $R,$i1
 152         xor     $i0,$lo
 153         xor     $i1,$hi
 154 
 155         add     \$128+8,%rsp
 156         ret
 157 .Lend_mul_1x1:
 158 .size   _mul_1x1,.-_mul_1x1
 159 ___
 160 
 161 ($rp,$a1,$a0,$b1,$b0) = $win64? ("%rcx","%rdx","%r8", "%r9","%r10") :   # Win64 order
 162                                 ("%rdi","%rsi","%rdx","%rcx","%r8");    # Unix order
 163 
 164 $code.=<<___;
 165 .extern OPENSSL_ia32cap_P
 166 .globl  bn_GF2m_mul_2x2
 167 .type   bn_GF2m_mul_2x2,\@abi-omnipotent
 168 .align  16
 169 bn_GF2m_mul_2x2:
 170         mov     OPENSSL_ia32cap_P(%rip),%rax
 171         bt      \$33,%rax
 172         jnc     .Lvanilla_mul_2x2
 173 
 174         movq            $a1,%xmm0
 175         movq            $b1,%xmm1
 176         movq            $a0,%xmm2
 177 ___
 178 $code.=<<___ if ($win64);
 179         movq            40(%rsp),%xmm3
 180 ___
 181 $code.=<<___ if (!$win64);
 182         movq            $b0,%xmm3
 183 ___
 184 $code.=<<___;
 185         movdqa          %xmm0,%xmm4
 186         movdqa          %xmm1,%xmm5
 187         pclmulqdq       \$0,%xmm1,%xmm0 # a1·b1
 188         pxor            %xmm2,%xmm4
 189         pxor            %xmm3,%xmm5
 190         pclmulqdq       \$0,%xmm3,%xmm2 # a0·b0
 191         pclmulqdq       \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
 192         xorps           %xmm0,%xmm4
 193         xorps           %xmm2,%xmm4     # (a0+a1)·(b0+b1)-a0·b0-a1·b1
 194         movdqa          %xmm4,%xmm5
 195         pslldq          \$8,%xmm4
 196         psrldq          \$8,%xmm5
 197         pxor            %xmm4,%xmm2
 198         pxor            %xmm5,%xmm0
 199         movdqu          %xmm2,0($rp)
 200         movdqu          %xmm0,16($rp)
 201         ret
 202 
 203 .align  16
 204 .Lvanilla_mul_2x2:
 205         lea     -8*17(%rsp),%rsp
 206 ___
 207 $code.=<<___ if ($win64);
 208         mov     `8*17+40`(%rsp),$b0
 209         mov     %rdi,8*15(%rsp)
 210         mov     %rsi,8*16(%rsp)
 211 ___
 212 $code.=<<___;
 213         mov     %r14,8*10(%rsp)
 214         mov     %r13,8*11(%rsp)
 215         mov     %r12,8*12(%rsp)
 216         mov     %rbp,8*13(%rsp)
 217         mov     %rbx,8*14(%rsp)
 218 .Lbody_mul_2x2:
 219         mov     $rp,32(%rsp)            # save the arguments
 220         mov     $a1,40(%rsp)
 221         mov     $a0,48(%rsp)
 222         mov     $b1,56(%rsp)
 223         mov     $b0,64(%rsp)
 224 
 225         mov     \$0xf,$mask
 226         mov     $a1,$a
 227         mov     $b1,$b
 228         call    _mul_1x1                # a1·b1
 229         mov     $lo,16(%rsp)
 230         mov     $hi,24(%rsp)
 231 
 232         mov     48(%rsp),$a
 233         mov     64(%rsp),$b
 234         call    _mul_1x1                # a0·b0
 235         mov     $lo,0(%rsp)
 236         mov     $hi,8(%rsp)
 237 
 238         mov     40(%rsp),$a
 239         mov     56(%rsp),$b
 240         xor     48(%rsp),$a
 241         xor     64(%rsp),$b
 242         call    _mul_1x1                # (a0+a1)·(b0+b1)
 243 ___
 244         @r=("%rbx","%rcx","%rdi","%rsi");
 245 $code.=<<___;
 246         mov     0(%rsp),@r[0]
 247         mov     8(%rsp),@r[1]
 248         mov     16(%rsp),@r[2]
 249         mov     24(%rsp),@r[3]
 250         mov     32(%rsp),%rbp
 251 
 252         xor     $hi,$lo
 253         xor     @r[1],$hi
 254         xor     @r[0],$lo
 255         mov     @r[0],0(%rbp)
 256         xor     @r[2],$hi
 257         mov     @r[3],24(%rbp)
 258         xor     @r[3],$lo
 259         xor     @r[3],$hi
 260         xor     $hi,$lo
 261         mov     $hi,16(%rbp)
 262         mov     $lo,8(%rbp)
 263 
 264         mov     8*10(%rsp),%r14
 265         mov     8*11(%rsp),%r13
 266         mov     8*12(%rsp),%r12
 267         mov     8*13(%rsp),%rbp
 268         mov     8*14(%rsp),%rbx
 269 ___
 270 $code.=<<___ if ($win64);
 271         mov     8*15(%rsp),%rdi
 272         mov     8*16(%rsp),%rsi
 273 ___
 274 $code.=<<___;
 275         lea     8*17(%rsp),%rsp
 276         ret
 277 .Lend_mul_2x2:
 278 .size   bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
 279 .asciz  "GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
 280 .align  16
 281 ___
 282 
 283 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
 284 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
 285 if ($win64) {
 286 $rec="%rcx";
 287 $frame="%rdx";
 288 $context="%r8";
 289 $disp="%r9";
 290 
 291 $code.=<<___;
 292 .extern __imp_RtlVirtualUnwind
 293 
 294 .type   se_handler,\@abi-omnipotent
 295 .align  16
 296 se_handler:
 297         push    %rsi
 298         push    %rdi
 299         push    %rbx
 300         push    %rbp
 301         push    %r12
 302         push    %r13
 303         push    %r14
 304         push    %r15
 305         pushfq
 306         sub     \$64,%rsp
 307 
 308         mov     152($context),%rax      # pull context->Rsp
 309         mov     248($context),%rbx      # pull context->Rip
 310 
 311         lea     .Lbody_mul_2x2(%rip),%r10
 312         cmp     %r10,%rbx               # context->Rip<"prologue" label
 313         jb      .Lin_prologue
 314 
 315         mov     8*10(%rax),%r14         # mimic epilogue
 316         mov     8*11(%rax),%r13
 317         mov     8*12(%rax),%r12
 318         mov     8*13(%rax),%rbp
 319         mov     8*14(%rax),%rbx
 320         mov     8*15(%rax),%rdi
 321         mov     8*16(%rax),%rsi
 322 
 323         mov     %rbx,144($context)      # restore context->Rbx
 324         mov     %rbp,160($context)      # restore context->Rbp
 325         mov     %rsi,168($context)      # restore context->Rsi
 326         mov     %rdi,176($context)      # restore context->Rdi
 327         mov     %r12,216($context)      # restore context->R12
 328         mov     %r13,224($context)      # restore context->R13
 329         mov     %r14,232($context)      # restore context->R14
 330 
 331 .Lin_prologue:
 332         lea     8*17(%rax),%rax
 333         mov     %rax,152($context)      # restore context->Rsp
 334 
 335         mov     40($disp),%rdi          # disp->ContextRecord
 336         mov     $context,%rsi           # context
 337         mov     \$154,%ecx              # sizeof(CONTEXT)
 338         .long   0xa548f3fc              # cld; rep movsq
 339 
 340         mov     $disp,%rsi
 341         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
 342         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
 343         mov     0(%rsi),%r8             # arg3, disp->ControlPc
 344         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
 345         mov     40(%rsi),%r10           # disp->ContextRecord
 346         lea     56(%rsi),%r11           # &disp->HandlerData
 347         lea     24(%rsi),%r12           # &disp->EstablisherFrame
 348         mov     %r10,32(%rsp)           # arg5
 349         mov     %r11,40(%rsp)           # arg6
 350         mov     %r12,48(%rsp)           # arg7
 351         mov     %rcx,56(%rsp)           # arg8, (NULL)
 352         call    *__imp_RtlVirtualUnwind(%rip)
 353 
 354         mov     \$1,%eax                # ExceptionContinueSearch
 355         add     \$64,%rsp
 356         popfq
 357         pop     %r15
 358         pop     %r14
 359         pop     %r13
 360         pop     %r12
 361         pop     %rbp
 362         pop     %rbx
 363         pop     %rdi
 364         pop     %rsi
 365         ret
 366 .size   se_handler,.-se_handler
 367 
 368 .section        .pdata
 369 .align  4
 370         .rva    _mul_1x1
 371         .rva    .Lend_mul_1x1
 372         .rva    .LSEH_info_1x1
 373 
 374         .rva    .Lvanilla_mul_2x2
 375         .rva    .Lend_mul_2x2
 376         .rva    .LSEH_info_2x2
 377 .section        .xdata
 378 .align  8
 379 .LSEH_info_1x1:
 380         .byte   0x01,0x07,0x02,0x00
 381         .byte   0x07,0x01,0x11,0x00     # sub rsp,128+8
 382 .LSEH_info_2x2:
 383         .byte   9,0,0,0
 384         .rva    se_handler
 385 ___
 386 }
 387 
 388 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
 389 print $code;
 390 close STDOUT;