Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/copy.s
          +++ new/usr/src/uts/intel/ia32/ml/copy.s
↓ open down ↓ 28 lines elided ↑ open up ↑
  29   29   */
  30   30  
  31   31  /*       Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.        */
  32   32  /*       Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T          */
  33   33  /*         All Rights Reserved                                          */
  34   34  
  35   35  /*       Copyright (c) 1987, 1988 Microsoft Corporation                 */
  36   36  /*         All Rights Reserved                                          */
  37   37  
  38   38  /*
  39      - * Copyright 2016 Joyent, Inc.
       39 + * Copyright (c) 2018 Joyent, Inc.
  40   40   */
  41   41  
  42   42  #include <sys/errno.h>
  43   43  #include <sys/asm_linkage.h>
  44   44  
  45   45  #if defined(__lint)
  46   46  #include <sys/types.h>
  47   47  #include <sys/systm.h>
  48   48  #else   /* __lint */
  49   49  #include "assym.h"
↓ open down ↓ 809 lines elided ↑ open up ↑
 859  859           * AMD Opteron. The transition value is patched at boot time to avoid
 860  860           * memory reference hit.
 861  861           */
 862  862          .globl bcopy_patch_start
 863  863  bcopy_patch_start:
 864  864          cmpq    $BCOPY_NHM_REP, %rdx
 865  865          .globl bcopy_patch_end
 866  866  bcopy_patch_end:
 867  867  
 868  868          .p2align 4
 869      -        .globl bcopy_ck_size
 870      -bcopy_ck_size:
      869 +        ALTENTRY(bcopy_ck_size)
      870 +
 871  871          cmpq    $BCOPY_DFLT_REP, %rdx
 872  872          jae     L(use_rep)
 873  873  
 874  874          /*
 875  875           * Align to a 8-byte boundary. Avoids penalties from unaligned stores
 876  876           * as well as from stores spanning cachelines.
 877  877           */
 878  878          test    $0x7, %rsi
 879  879          jz      L(aligned_loop)
 880  880          test    $0x1, %rsi
↓ open down ↓ 68 lines elided ↑ open up ↑
 949  949          movq    %rdx, %rcx              /* %rcx = count */
 950  950          shrq    $3, %rcx                /* 8-byte word count */
 951  951          rep
 952  952            smovq
 953  953  
 954  954          xchgq   %rsi, %rdi              /* %rdi = src, %rsi = destination */
 955  955          andq    $7, %rdx                /* remainder */
 956  956          jnz     L(do_remainder)
 957  957          ret
 958  958  #undef  L
      959 +        SET_SIZE(bcopy_ck_size)
 959  960  
 960  961  #ifdef DEBUG
 961  962          /*
 962  963           * Setup frame on the run-time stack. The end of the input argument
 963  964           * area must be aligned on a 16 byte boundary. The stack pointer %rsp,
 964  965           * always points to the end of the latest allocated stack frame.
 965  966           * panic(const char *format, ...) is a varargs function. When a
 966  967           * function taking variable arguments is called, %rax must be set
 967  968           * to eight times the number of floating point parameters passed
 968  969           * to the function in SSE registers.
↓ open down ↓ 2230 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX