Print this page
de-linting of .s files
first

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/copy.s
          +++ new/usr/src/uts/intel/ia32/ml/copy.s
↓ open down ↓ 34 lines elided ↑ open up ↑
  35   35  /*       Copyright (c) 1987, 1988 Microsoft Corporation                 */
  36   36  /*         All Rights Reserved                                          */
  37   37  
  38   38  /*
  39   39   * Copyright 2019 Joyent, Inc.
  40   40   */
  41   41  
  42   42  #include <sys/errno.h>
  43   43  #include <sys/asm_linkage.h>
  44   44  
  45      -#if defined(__lint)
  46      -#include <sys/types.h>
  47      -#include <sys/systm.h>
  48      -#else   /* __lint */
  49   45  #include "assym.h"
  50      -#endif  /* __lint */
  51   46  
  52   47  #define KCOPY_MIN_SIZE  128     /* Must be >= 16 bytes */
  53   48  #define XCOPY_MIN_SIZE  128     /* Must be >= 16 bytes */
  54   49  /*
  55   50   * Non-temopral access (NTA) alignment requirement
  56   51   */
  57   52  #define NTA_ALIGN_SIZE  4       /* Must be at least 4-byte aligned */
  58   53  #define NTA_ALIGN_MASK  _CONST(NTA_ALIGN_SIZE-1)
  59   54  #define COUNT_ALIGN_SIZE        16      /* Must be at least 16-byte aligned */
  60   55  #define COUNT_ALIGN_MASK        _CONST(COUNT_ALIGN_SIZE-1)
↓ open down ↓ 75 lines elided ↑ open up ↑
 136  131  /*
 137  132   * Copy a block of storage, returning an error code if `from' or
 138  133   * `to' takes a kernel pagefault which cannot be resolved.
 139  134   * Returns errno value on pagefault error, 0 if all ok
 140  135   */
 141  136  
 142  137  /*
 143  138   * I'm sorry about these macros, but copy.s is unsurprisingly sensitive to
 144  139   * additional call instructions.
 145  140   */
 146      -#if defined(__amd64)
 147  141  #define SMAP_DISABLE_COUNT      16
 148  142  #define SMAP_ENABLE_COUNT       26
 149      -#elif defined(__i386)
 150      -#define SMAP_DISABLE_COUNT      0
 151      -#define SMAP_ENABLE_COUNT       0
 152      -#endif
 153  143  
 154  144  #define SMAP_DISABLE_INSTR(ITER)                \
 155  145          .globl  _smap_disable_patch_/**/ITER;   \
 156  146          _smap_disable_patch_/**/ITER/**/:;      \
 157  147          nop; nop; nop;
 158  148  
 159  149  #define SMAP_ENABLE_INSTR(ITER)                 \
 160  150          .globl  _smap_enable_patch_/**/ITER;    \
 161  151          _smap_enable_patch_/**/ITER/**/:;       \
 162  152          nop; nop; nop;
 163  153  
 164      -#if defined(__lint)
 165      -
 166      -/* ARGSUSED */
 167      -int
 168      -kcopy(const void *from, void *to, size_t count)
 169      -{ return (0); }
 170      -
 171      -#else   /* __lint */
 172      -
 173  154          .globl  kernelbase
 174  155          .globl  postbootkernelbase
 175  156  
 176      -#if defined(__amd64)
 177      -
 178  157          ENTRY(kcopy)
 179  158          pushq   %rbp
 180  159          movq    %rsp, %rbp
 181  160  #ifdef DEBUG
 182  161          cmpq    postbootkernelbase(%rip), %rdi          /* %rdi = from */
 183  162          jb      0f
 184  163          cmpq    postbootkernelbase(%rip), %rsi          /* %rsi = to */
 185  164          jnb     1f
 186  165  0:      leaq    .kcopy_panic_msg(%rip), %rdi
 187  166          xorl    %eax, %eax
↓ open down ↓ 16 lines elided ↑ open up ↑
 204  183          /*
 205  184           * A fault during do_copy_fault is indicated through an errno value
 206  185           * in %rax and we iretq from the trap handler to here.
 207  186           */
 208  187  _kcopy_copyerr:
 209  188          movq    %r11, T_LOFAULT(%r9)    /* restore original lofault */
 210  189          leave
 211  190          ret
 212  191          SET_SIZE(kcopy)
 213  192  
 214      -#elif defined(__i386)
 215      -
 216      -#define ARG_FROM        8
 217      -#define ARG_TO          12
 218      -#define ARG_COUNT       16
 219      -
 220      -        ENTRY(kcopy)
 221      -#ifdef DEBUG
 222      -        pushl   %ebp
 223      -        movl    %esp, %ebp
 224      -        movl    postbootkernelbase, %eax
 225      -        cmpl    %eax, ARG_FROM(%ebp)
 226      -        jb      0f
 227      -        cmpl    %eax, ARG_TO(%ebp)
 228      -        jnb     1f
 229      -0:      pushl   $.kcopy_panic_msg
 230      -        call    panic
 231      -1:      popl    %ebp
 232      -#endif
 233      -        lea     _kcopy_copyerr, %eax    /* lofault value */
 234      -        movl    %gs:CPU_THREAD, %edx
 235      -
 236      -do_copy_fault:
 237      -        pushl   %ebp
 238      -        movl    %esp, %ebp              /* setup stack frame */
 239      -        pushl   %esi
 240      -        pushl   %edi                    /* save registers */
 241      -
 242      -        movl    T_LOFAULT(%edx), %edi
 243      -        pushl   %edi                    /* save the current lofault */
 244      -        movl    %eax, T_LOFAULT(%edx)   /* new lofault */
 245      -
 246      -        movl    ARG_COUNT(%ebp), %ecx
 247      -        movl    ARG_FROM(%ebp), %esi
 248      -        movl    ARG_TO(%ebp), %edi
 249      -        shrl    $2, %ecx                /* word count */
 250      -        rep
 251      -          smovl
 252      -        movl    ARG_COUNT(%ebp), %ecx
 253      -        andl    $3, %ecx                /* bytes left over */
 254      -        rep
 255      -          smovb
 256      -        xorl    %eax, %eax
 257      -
 258      -        /*
 259      -         * A fault during do_copy_fault is indicated through an errno value
 260      -         * in %eax and we iret from the trap handler to here.
 261      -         */
 262      -_kcopy_copyerr:
 263      -        popl    %ecx
 264      -        popl    %edi
 265      -        movl    %ecx, T_LOFAULT(%edx)   /* restore the original lofault */
 266      -        popl    %esi
 267      -        popl    %ebp
 268      -        ret
 269      -        SET_SIZE(kcopy)
 270      -
 271  193  #undef  ARG_FROM
 272  194  #undef  ARG_TO
 273  195  #undef  ARG_COUNT
 274  196  
 275      -#endif  /* __i386 */
 276      -#endif  /* __lint */
 277      -
 278      -#if defined(__lint)
 279      -
 280      -/*
 281      - * Copy a block of storage.  Similar to kcopy but uses non-temporal
 282      - * instructions.
 283      - */
 284      -
 285      -/* ARGSUSED */
 286      -int
 287      -kcopy_nta(const void *from, void *to, size_t count, int copy_cached)
 288      -{ return (0); }
 289      -
 290      -#else   /* __lint */
 291      -
 292      -#if defined(__amd64)
 293      -
 294  197  #define COPY_LOOP_INIT(src, dst, cnt)   \
 295  198          addq    cnt, src;                       \
 296  199          addq    cnt, dst;                       \
 297  200          shrq    $3, cnt;                        \
 298  201          neg     cnt
 299  202  
 300  203          /* Copy 16 bytes per loop.  Uses %rax and %r8 */
 301  204  #define COPY_LOOP_BODY(src, dst, cnt)   \
 302  205          prefetchnta     0x100(src, cnt, 8);     \
 303  206          movq    (src, cnt, 8), %rax;            \
↓ open down ↓ 56 lines elided ↑ open up ↑
 360  263          xorl    %eax, %eax              /* return 0 (success) */
 361  264          SMAP_ENABLE_INSTR(1)
 362  265  
 363  266  _kcopy_nta_copyerr:
 364  267          movq    %r11, T_LOFAULT(%r9)    /* restore original lofault */
 365  268          leave
 366  269          ret
 367  270          SET_SIZE(do_copy_fault_nta)
 368  271          SET_SIZE(kcopy_nta)
 369  272  
 370      -#elif defined(__i386)
 371      -
 372      -#define ARG_FROM        8
 373      -#define ARG_TO          12
 374      -#define ARG_COUNT       16
 375      -
 376      -#define COPY_LOOP_INIT(src, dst, cnt)   \
 377      -        addl    cnt, src;                       \
 378      -        addl    cnt, dst;                       \
 379      -        shrl    $3, cnt;                        \
 380      -        neg     cnt
 381      -
 382      -#define COPY_LOOP_BODY(src, dst, cnt)   \
 383      -        prefetchnta     0x100(src, cnt, 8);     \
 384      -        movl    (src, cnt, 8), %esi;            \
 385      -        movnti  %esi, (dst, cnt, 8);            \
 386      -        movl    0x4(src, cnt, 8), %esi;         \
 387      -        movnti  %esi, 0x4(dst, cnt, 8);         \
 388      -        movl    0x8(src, cnt, 8), %esi;         \
 389      -        movnti  %esi, 0x8(dst, cnt, 8);         \
 390      -        movl    0xc(src, cnt, 8), %esi;         \
 391      -        movnti  %esi, 0xc(dst, cnt, 8);         \
 392      -        addl    $2, cnt
 393      -
 394      -        /*
 395      -         * kcopy_nta is not implemented for 32-bit as no performance
 396      -         * improvement was shown.  We simply jump directly to kcopy
 397      -         * and discard the 4 arguments.
 398      -         */
 399      -        ENTRY(kcopy_nta)
 400      -        jmp     kcopy
 401      -
 402      -        lea     _kcopy_nta_copyerr, %eax        /* lofault value */
 403      -        ALTENTRY(do_copy_fault_nta)
 404      -        pushl   %ebp
 405      -        movl    %esp, %ebp              /* setup stack frame */
 406      -        pushl   %esi
 407      -        pushl   %edi
 408      -
 409      -        movl    %gs:CPU_THREAD, %edx
 410      -        movl    T_LOFAULT(%edx), %edi
 411      -        pushl   %edi                    /* save the current lofault */
 412      -        movl    %eax, T_LOFAULT(%edx)   /* new lofault */
 413      -
 414      -        /* COPY_LOOP_BODY needs to use %esi */
 415      -        movl    ARG_COUNT(%ebp), %ecx
 416      -        movl    ARG_FROM(%ebp), %edi
 417      -        movl    ARG_TO(%ebp), %eax
 418      -        COPY_LOOP_INIT(%edi, %eax, %ecx)
 419      -1:      COPY_LOOP_BODY(%edi, %eax, %ecx)
 420      -        jnz     1b
 421      -        mfence
 422      -
 423      -        xorl    %eax, %eax
 424      -_kcopy_nta_copyerr:
 425      -        popl    %ecx
 426      -        popl    %edi
 427      -        movl    %ecx, T_LOFAULT(%edx)   /* restore the original lofault */
 428      -        popl    %esi
 429      -        leave
 430      -        ret
 431      -        SET_SIZE(do_copy_fault_nta)
 432      -        SET_SIZE(kcopy_nta)
 433      -
 434      -#undef  ARG_FROM
 435      -#undef  ARG_TO
 436      -#undef  ARG_COUNT
 437      -
 438      -#endif  /* __i386 */
 439      -#endif  /* __lint */
 440      -
 441      -#if defined(__lint)
 442      -
 443      -/* ARGSUSED */
 444      -void
 445      -bcopy(const void *from, void *to, size_t count)
 446      -{}
 447      -
 448      -#else   /* __lint */
 449      -
 450      -#if defined(__amd64)
 451      -
 452  273          ENTRY(bcopy)
 453  274  #ifdef DEBUG
 454  275          orq     %rdx, %rdx              /* %rdx = count */
 455  276          jz      1f
 456  277          cmpq    postbootkernelbase(%rip), %rdi          /* %rdi = from */
 457  278          jb      0f
 458  279          cmpq    postbootkernelbase(%rip), %rsi          /* %rsi = to */
 459  280          jnb     1f
 460  281  0:      leaq    .bcopy_panic_msg(%rip), %rdi
 461  282          jmp     call_panic              /* setup stack and call panic */
↓ open down ↓ 508 lines elided ↑ open up ↑
 970  791           */
 971  792  call_panic:
 972  793          pushq   %rbp                    /* align stack properly */
 973  794          movq    %rsp, %rbp
 974  795          xorl    %eax, %eax              /* no variable arguments */
 975  796          call    panic                   /* %rdi = format string */
 976  797  #endif
 977  798          SET_SIZE(bcopy_altentry)
 978  799          SET_SIZE(bcopy)
 979  800  
 980      -#elif defined(__i386)
 981  801  
 982      -#define ARG_FROM        4
 983      -#define ARG_TO          8
 984      -#define ARG_COUNT       12
 985      -
 986      -        ENTRY(bcopy)
 987      -#ifdef DEBUG
 988      -        movl    ARG_COUNT(%esp), %eax
 989      -        orl     %eax, %eax
 990      -        jz      1f
 991      -        movl    postbootkernelbase, %eax
 992      -        cmpl    %eax, ARG_FROM(%esp)
 993      -        jb      0f
 994      -        cmpl    %eax, ARG_TO(%esp)
 995      -        jnb     1f
 996      -0:      pushl   %ebp
 997      -        movl    %esp, %ebp
 998      -        pushl   $.bcopy_panic_msg
 999      -        call    panic
1000      -1:
1001      -#endif
1002      -do_copy:
1003      -        movl    %esi, %eax              /* save registers */
1004      -        movl    %edi, %edx
1005      -        movl    ARG_COUNT(%esp), %ecx
1006      -        movl    ARG_FROM(%esp), %esi
1007      -        movl    ARG_TO(%esp), %edi
1008      -
1009      -        shrl    $2, %ecx                /* word count */
1010      -        rep
1011      -          smovl
1012      -        movl    ARG_COUNT(%esp), %ecx
1013      -        andl    $3, %ecx                /* bytes left over */
1014      -        rep
1015      -          smovb
1016      -        movl    %eax, %esi              /* restore registers */
1017      -        movl    %edx, %edi
1018      -        ret
1019      -        SET_SIZE(bcopy)
1020      -
1021      -#undef  ARG_COUNT
1022      -#undef  ARG_FROM
1023      -#undef  ARG_TO
1024      -
1025      -#endif  /* __i386 */
1026      -#endif  /* __lint */
1027      -
1028      -
1029  802  /*
1030  803   * Zero a block of storage, returning an error code if we
1031  804   * take a kernel pagefault which cannot be resolved.
1032  805   * Returns errno value on pagefault error, 0 if all ok
1033  806   */
1034  807  
1035      -#if defined(__lint)
1036      -
1037      -/* ARGSUSED */
1038      -int
1039      -kzero(void *addr, size_t count)
1040      -{ return (0); }
1041      -
1042      -#else   /* __lint */
1043      -
1044      -#if defined(__amd64)
1045      -
1046  808          ENTRY(kzero)
1047  809  #ifdef DEBUG
1048  810          cmpq    postbootkernelbase(%rip), %rdi  /* %rdi = addr */
1049  811          jnb     0f
1050  812          leaq    .kzero_panic_msg(%rip), %rdi
1051  813          jmp     call_panic              /* setup stack and call panic */
1052  814  0:
1053  815  #endif
1054  816          /*
1055  817           * pass lofault value as 3rd argument for fault return
↓ open down ↓ 10 lines elided ↑ open up ↑
1066  828          /*
1067  829           * A fault during bzero is indicated through an errno value
1068  830           * in %rax when we iretq to here.
1069  831           */
1070  832  _kzeroerr:
1071  833          addq    $8, %rsp                /* pop bzero_altentry call ret addr */
1072  834          movq    %r11, T_LOFAULT(%r9)    /* restore the original lofault */
1073  835          ret
1074  836          SET_SIZE(kzero)
1075  837  
1076      -#elif defined(__i386)
1077      -
1078      -#define ARG_ADDR        8
1079      -#define ARG_COUNT       12
1080      -
1081      -        ENTRY(kzero)
1082      -#ifdef DEBUG
1083      -        pushl   %ebp
1084      -        movl    %esp, %ebp
1085      -        movl    postbootkernelbase, %eax
1086      -        cmpl    %eax, ARG_ADDR(%ebp)
1087      -        jnb     0f
1088      -        pushl   $.kzero_panic_msg
1089      -        call    panic
1090      -0:      popl    %ebp
1091      -#endif
1092      -        lea     _kzeroerr, %eax         /* kzeroerr is lofault value */
1093      -
1094      -        pushl   %ebp                    /* save stack base */
1095      -        movl    %esp, %ebp              /* set new stack base */
1096      -        pushl   %edi                    /* save %edi */
1097      -
1098      -        mov     %gs:CPU_THREAD, %edx
1099      -        movl    T_LOFAULT(%edx), %edi
1100      -        pushl   %edi                    /* save the current lofault */
1101      -        movl    %eax, T_LOFAULT(%edx)   /* new lofault */
1102      -
1103      -        movl    ARG_COUNT(%ebp), %ecx   /* get size in bytes */
1104      -        movl    ARG_ADDR(%ebp), %edi    /* %edi <- address of bytes to clear */
1105      -        shrl    $2, %ecx                /* Count of double words to zero */
1106      -        xorl    %eax, %eax              /* sstol val */
1107      -        rep
1108      -          sstol                 /* %ecx contains words to clear (%eax=0) */
1109      -
1110      -        movl    ARG_COUNT(%ebp), %ecx   /* get size in bytes */
1111      -        andl    $3, %ecx                /* do mod 4 */
1112      -        rep
1113      -          sstob                 /* %ecx contains residual bytes to clear */
1114      -
1115      -        /*
1116      -         * A fault during kzero is indicated through an errno value
1117      -         * in %eax when we iret to here.
1118      -         */
1119      -_kzeroerr:
1120      -        popl    %edi
1121      -        movl    %edi, T_LOFAULT(%edx)   /* restore the original lofault */
1122      -        popl    %edi
1123      -        popl    %ebp
1124      -        ret
1125      -        SET_SIZE(kzero)
1126      -
1127      -#undef  ARG_ADDR
1128      -#undef  ARG_COUNT
1129      -
1130      -#endif  /* __i386 */
1131      -#endif  /* __lint */
1132      -
1133  838  /*
1134  839   * Zero a block of storage.
1135  840   */
1136  841  
1137      -#if defined(__lint)
1138      -
1139      -/* ARGSUSED */
1140      -void
1141      -bzero(void *addr, size_t count)
1142      -{}
1143      -
1144      -#else   /* __lint */
1145      -
1146      -#if defined(__amd64)
1147      -
1148  842          ENTRY(bzero)
1149  843  #ifdef DEBUG
1150  844          cmpq    postbootkernelbase(%rip), %rdi  /* %rdi = addr */
1151  845          jnb     0f
1152  846          leaq    .bzero_panic_msg(%rip), %rdi
1153  847          jmp     call_panic              /* setup stack and call panic */
1154  848  0:
1155  849  #endif
1156  850          ALTENTRY(bzero_altentry)
1157  851  do_zero:
↓ open down ↓ 294 lines elided ↑ open up ↑
1452 1146          shrq    $3, %rcx                /* count of 8-byte words to zero */
1453 1147          rep
1454 1148            sstoq                         /* %rcx = words to clear (%rax=0) */
1455 1149          andq    $7, %rsi                /* remaining bytes */
1456 1150          jnz     9b
1457 1151          ret
1458 1152  #undef  L
1459 1153          SET_SIZE(bzero_altentry)
1460 1154          SET_SIZE(bzero)
1461 1155  
1462      -#elif defined(__i386)
1463      -
1464      -#define ARG_ADDR        4
1465      -#define ARG_COUNT       8
1466      -
1467      -        ENTRY(bzero)
1468      -#ifdef DEBUG
1469      -        movl    postbootkernelbase, %eax
1470      -        cmpl    %eax, ARG_ADDR(%esp)
1471      -        jnb     0f
1472      -        pushl   %ebp
1473      -        movl    %esp, %ebp
1474      -        pushl   $.bzero_panic_msg
1475      -        call    panic
1476      -0:
1477      -#endif
1478      -do_zero:
1479      -        movl    %edi, %edx
1480      -        movl    ARG_COUNT(%esp), %ecx
1481      -        movl    ARG_ADDR(%esp), %edi
1482      -        shrl    $2, %ecx
1483      -        xorl    %eax, %eax
1484      -        rep
1485      -          sstol
1486      -        movl    ARG_COUNT(%esp), %ecx
1487      -        andl    $3, %ecx
1488      -        rep
1489      -          sstob
1490      -        movl    %edx, %edi
1491      -        ret
1492      -        SET_SIZE(bzero)
1493      -
1494      -#undef  ARG_ADDR
1495      -#undef  ARG_COUNT
1496      -
1497      -#endif  /* __i386 */
1498      -#endif  /* __lint */
1499      -
1500 1156  /*
1501 1157   * Transfer data to and from user space -
1502 1158   * Note that these routines can cause faults
1503 1159   * It is assumed that the kernel has nothing at
1504 1160   * less than KERNELBASE in the virtual address space.
1505 1161   *
1506 1162   * Note that copyin(9F) and copyout(9F) are part of the
1507 1163   * DDI/DKI which specifies that they return '-1' on "errors."
1508 1164   *
1509 1165   * Sigh.
↓ open down ↓ 2 lines elided ↑ open up ↑
1512 1168   * xcopyout_nta() which return the errno that we've faithfully computed.
1513 1169   * This allows other callers (e.g. uiomove(9F)) to work correctly.
1514 1170   * Given that these are used pretty heavily, we expand the calling
1515 1171   * sequences inline for all flavours (rather than making wrappers).
1516 1172   */
1517 1173  
1518 1174  /*
1519 1175   * Copy user data to kernel space.
1520 1176   */
1521 1177  
1522      -#if defined(__lint)
1523      -
1524      -/* ARGSUSED */
1525      -int
1526      -copyin(const void *uaddr, void *kaddr, size_t count)
1527      -{ return (0); }
1528      -
1529      -#else   /* lint */
1530      -
1531      -#if defined(__amd64)
1532      -
1533 1178          ENTRY(copyin)
1534 1179          pushq   %rbp
1535 1180          movq    %rsp, %rbp
1536 1181          subq    $24, %rsp
1537 1182  
1538 1183          /*
1539 1184           * save args in case we trap and need to rerun as a copyop
1540 1185           */
1541 1186          movq    %rdi, (%rsp)
1542 1187          movq    %rsi, 0x8(%rsp)
↓ open down ↓ 35 lines elided ↑ open up ↑
1578 1223          movq    0x10(%rsp), %rdx
1579 1224          leave
1580 1225          movq    CP_COPYIN(%rax), %rax
1581 1226          INDIRECT_JMP_REG(rax)
1582 1227  
1583 1228  2:      movl    $-1, %eax
1584 1229          leave
1585 1230          ret
1586 1231          SET_SIZE(copyin)
1587 1232  
1588      -#elif defined(__i386)
1589      -
1590      -#define ARG_UADDR       4
1591      -#define ARG_KADDR       8
1592      -
1593      -        ENTRY(copyin)
1594      -        movl    kernelbase, %ecx
1595      -#ifdef DEBUG
1596      -        cmpl    %ecx, ARG_KADDR(%esp)
1597      -        jnb     1f
1598      -        pushl   %ebp
1599      -        movl    %esp, %ebp
1600      -        pushl   $.copyin_panic_msg
1601      -        call    panic
1602      -1:
1603      -#endif
1604      -        lea     _copyin_err, %eax
1605      -
1606      -        movl    %gs:CPU_THREAD, %edx
1607      -        cmpl    %ecx, ARG_UADDR(%esp)   /* test uaddr < kernelbase */
1608      -        jb      do_copy_fault
1609      -        jmp     3f
1610      -
1611      -_copyin_err:
1612      -        popl    %ecx
1613      -        popl    %edi
1614      -        movl    %ecx, T_LOFAULT(%edx)   /* restore original lofault */
1615      -        popl    %esi
1616      -        popl    %ebp
1617      -3:
1618      -        movl    T_COPYOPS(%edx), %eax
1619      -        cmpl    $0, %eax
1620      -        jz      2f
1621      -        jmp     *CP_COPYIN(%eax)
1622      -
1623      -2:      movl    $-1, %eax
1624      -        ret
1625      -        SET_SIZE(copyin)
1626      -
1627      -#undef  ARG_UADDR
1628      -#undef  ARG_KADDR
1629      -
1630      -#endif  /* __i386 */
1631      -#endif  /* __lint */
1632      -
1633      -#if defined(__lint)
1634      -
1635      -/* ARGSUSED */
1636      -int
1637      -xcopyin_nta(const void *uaddr, void *kaddr, size_t count, int copy_cached)
1638      -{ return (0); }
1639      -
1640      -#else   /* __lint */
1641      -
1642      -#if defined(__amd64)
1643      -
1644 1233          ENTRY(xcopyin_nta)
1645 1234          pushq   %rbp
1646 1235          movq    %rsp, %rbp
1647 1236          subq    $24, %rsp
1648 1237  
1649 1238          /*
1650 1239           * save args in case we trap and need to rerun as a copyop
1651 1240           * %rcx is consumed in this routine so we don't need to save
1652 1241           * it.
1653 1242           */
↓ open down ↓ 69 lines elided ↑ open up ↑
1723 1312          movq    0x8(%rsp), %rsi
1724 1313          movq    0x10(%rsp), %rdx
1725 1314          leave
1726 1315          movq    CP_XCOPYIN(%r8), %r8
1727 1316          INDIRECT_JMP_REG(r8)
1728 1317  
1729 1318  2:      leave
1730 1319          ret
1731 1320          SET_SIZE(xcopyin_nta)
1732 1321  
1733      -#elif defined(__i386)
1734      -
1735      -#define ARG_UADDR       4
1736      -#define ARG_KADDR       8
1737      -#define ARG_COUNT       12
1738      -#define ARG_CACHED      16
1739      -
1740      -        .globl  use_sse_copy
1741      -
1742      -        ENTRY(xcopyin_nta)
1743      -        movl    kernelbase, %ecx
1744      -        lea     _xcopyin_err, %eax
1745      -        movl    %gs:CPU_THREAD, %edx
1746      -        cmpl    %ecx, ARG_UADDR(%esp)   /* test uaddr < kernelbase */
1747      -        jae     4f
1748      -
1749      -        cmpl    $0, use_sse_copy        /* no sse support */
1750      -        jz      do_copy_fault
1751      -
1752      -        cmpl    $0, ARG_CACHED(%esp)    /* copy_cached hint set? */
1753      -        jnz     do_copy_fault
1754      -
1755      -        /*
1756      -         * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1757      -         */
1758      -        cmpl    $XCOPY_MIN_SIZE, ARG_COUNT(%esp)
1759      -        jb      do_copy_fault
1760      -
1761      -        /*
1762      -         * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1763      -         * count is COUNT_ALIGN_SIZE aligned.
1764      -         */
1765      -        movl    ARG_UADDR(%esp), %ecx
1766      -        orl     ARG_KADDR(%esp), %ecx
1767      -        andl    $NTA_ALIGN_MASK, %ecx
1768      -        orl     ARG_COUNT(%esp), %ecx
1769      -        andl    $COUNT_ALIGN_MASK, %ecx
1770      -        jnz     do_copy_fault
1771      -
1772      -        jmp     do_copy_fault_nta       /* use regular access */
1773      -
1774      -4:
1775      -        movl    $EFAULT, %eax
1776      -        jmp     3f
1777      -
1778      -        /*
1779      -         * A fault during do_copy_fault or do_copy_fault_nta is
1780      -         * indicated through an errno value in %eax and we iret from the
1781      -         * trap handler to here.
1782      -         */
1783      -_xcopyin_err:
1784      -        popl    %ecx
1785      -        popl    %edi
1786      -        movl    %ecx, T_LOFAULT(%edx)   /* restore original lofault */
1787      -        popl    %esi
1788      -        popl    %ebp
1789      -3:
1790      -        cmpl    $0, T_COPYOPS(%edx)
1791      -        jz      2f
1792      -        movl    T_COPYOPS(%edx), %eax
1793      -        jmp     *CP_XCOPYIN(%eax)
1794      -
1795      -2:      rep;    ret     /* use 2 byte return instruction when branch target */
1796      -                        /* AMD Software Optimization Guide - Section 6.2 */
1797      -        SET_SIZE(xcopyin_nta)
1798      -
1799      -#undef  ARG_UADDR
1800      -#undef  ARG_KADDR
1801      -#undef  ARG_COUNT
1802      -#undef  ARG_CACHED
1803      -
1804      -#endif  /* __i386 */
1805      -#endif  /* __lint */
1806      -
1807 1322  /*
1808 1323   * Copy kernel data to user space.
1809 1324   */
1810 1325  
1811      -#if defined(__lint)
1812      -
1813      -/* ARGSUSED */
1814      -int
1815      -copyout(const void *kaddr, void *uaddr, size_t count)
1816      -{ return (0); }
1817      -
1818      -#else   /* __lint */
1819      -
1820      -#if defined(__amd64)
1821      -
1822 1326          ENTRY(copyout)
1823 1327          pushq   %rbp
1824 1328          movq    %rsp, %rbp
1825 1329          subq    $24, %rsp
1826 1330  
1827 1331          /*
1828 1332           * save args in case we trap and need to rerun as a copyop
1829 1333           */
1830 1334          movq    %rdi, (%rsp)
1831 1335          movq    %rsi, 0x8(%rsp)
↓ open down ↓ 36 lines elided ↑ open up ↑
1868 1372          movq    0x10(%rsp), %rdx
1869 1373          leave
1870 1374          movq    CP_COPYOUT(%rax), %rax
1871 1375          INDIRECT_JMP_REG(rax)
1872 1376  
1873 1377  2:      movl    $-1, %eax
1874 1378          leave
1875 1379          ret
1876 1380          SET_SIZE(copyout)
1877 1381  
1878      -#elif defined(__i386)
1879      -
1880      -#define ARG_KADDR       4
1881      -#define ARG_UADDR       8
1882      -
1883      -        ENTRY(copyout)
1884      -        movl    kernelbase, %ecx
1885      -#ifdef DEBUG
1886      -        cmpl    %ecx, ARG_KADDR(%esp)
1887      -        jnb     1f
1888      -        pushl   %ebp
1889      -        movl    %esp, %ebp
1890      -        pushl   $.copyout_panic_msg
1891      -        call    panic
1892      -1:
1893      -#endif
1894      -        lea     _copyout_err, %eax
1895      -        movl    %gs:CPU_THREAD, %edx
1896      -        cmpl    %ecx, ARG_UADDR(%esp)   /* test uaddr < kernelbase */
1897      -        jb      do_copy_fault
1898      -        jmp     3f
1899      -
1900      -_copyout_err:
1901      -        popl    %ecx
1902      -        popl    %edi
1903      -        movl    %ecx, T_LOFAULT(%edx)   /* restore original lofault */
1904      -        popl    %esi
1905      -        popl    %ebp
1906      -3:
1907      -        movl    T_COPYOPS(%edx), %eax
1908      -        cmpl    $0, %eax
1909      -        jz      2f
1910      -        jmp     *CP_COPYOUT(%eax)
1911      -
1912      -2:      movl    $-1, %eax
1913      -        ret
1914      -        SET_SIZE(copyout)
1915      -
1916      -#undef  ARG_UADDR
1917      -#undef  ARG_KADDR
1918      -
1919      -#endif  /* __i386 */
1920      -#endif  /* __lint */
1921      -
1922      -#if defined(__lint)
1923      -
1924      -/* ARGSUSED */
1925      -int
1926      -xcopyout_nta(const void *kaddr, void *uaddr, size_t count, int copy_cached)
1927      -{ return (0); }
1928      -
1929      -#else   /* __lint */
1930      -
1931      -#if defined(__amd64)
1932      -
1933 1382          ENTRY(xcopyout_nta)
1934 1383          pushq   %rbp
1935 1384          movq    %rsp, %rbp
1936 1385          subq    $24, %rsp
1937 1386  
1938 1387          /*
1939 1388           * save args in case we trap and need to rerun as a copyop
1940 1389           */
1941 1390          movq    %rdi, (%rsp)
1942 1391          movq    %rsi, 0x8(%rsp)
↓ open down ↓ 70 lines elided ↑ open up ↑
2013 1462          movq    0x8(%rsp), %rsi
2014 1463          movq    0x10(%rsp), %rdx
2015 1464          leave
2016 1465          movq    CP_XCOPYOUT(%r8), %r8
2017 1466          INDIRECT_JMP_REG(r8)
2018 1467  
2019 1468  2:      leave
2020 1469          ret
2021 1470          SET_SIZE(xcopyout_nta)
2022 1471  
2023      -#elif defined(__i386)
2024      -
2025      -#define ARG_KADDR       4
2026      -#define ARG_UADDR       8
2027      -#define ARG_COUNT       12
2028      -#define ARG_CACHED      16
2029      -
2030      -        ENTRY(xcopyout_nta)
2031      -        movl    kernelbase, %ecx
2032      -        lea     _xcopyout_err, %eax
2033      -        movl    %gs:CPU_THREAD, %edx
2034      -        cmpl    %ecx, ARG_UADDR(%esp)   /* test uaddr < kernelbase */
2035      -        jae     4f
2036      -
2037      -        cmpl    $0, use_sse_copy        /* no sse support */
2038      -        jz      do_copy_fault
2039      -
2040      -        cmpl    $0, ARG_CACHED(%esp)    /* copy_cached hint set? */
2041      -        jnz     do_copy_fault
2042      -
2043      -        /*
2044      -         * Make sure cnt is >= XCOPY_MIN_SIZE bytes
2045      -         */
2046      -        cmpl    $XCOPY_MIN_SIZE, %edx
2047      -        jb      do_copy_fault
2048      -
2049      -        /*
2050      -         * Make sure src and dst are NTA_ALIGN_SIZE aligned,
2051      -         * count is COUNT_ALIGN_SIZE aligned.
2052      -         */
2053      -        movl    ARG_UADDR(%esp), %ecx
2054      -        orl     ARG_KADDR(%esp), %ecx
2055      -        andl    $NTA_ALIGN_MASK, %ecx
2056      -        orl     ARG_COUNT(%esp), %ecx
2057      -        andl    $COUNT_ALIGN_MASK, %ecx
2058      -        jnz     do_copy_fault
2059      -        jmp     do_copy_fault_nta
2060      -
2061      -4:
2062      -        movl    $EFAULT, %eax
2063      -        jmp     3f
2064      -
2065      -        /*
2066      -         * A fault during do_copy_fault or do_copy_fault_nta is
2067      -         * indicated through an errno value in %eax and we iret from the
2068      -         * trap handler to here.
2069      -         */
2070      -_xcopyout_err:
2071      -        / restore the original lofault
2072      -        popl    %ecx
2073      -        popl    %edi
2074      -        movl    %ecx, T_LOFAULT(%edx)   / original lofault
2075      -        popl    %esi
2076      -        popl    %ebp
2077      -3:
2078      -        cmpl    $0, T_COPYOPS(%edx)
2079      -        jz      2f
2080      -        movl    T_COPYOPS(%edx), %eax
2081      -        jmp     *CP_XCOPYOUT(%eax)
2082      -
2083      -2:      rep;    ret     /* use 2 byte return instruction when branch target */
2084      -                        /* AMD Software Optimization Guide - Section 6.2 */
2085      -        SET_SIZE(xcopyout_nta)
2086      -
2087      -#undef  ARG_UADDR
2088      -#undef  ARG_KADDR
2089      -#undef  ARG_COUNT
2090      -#undef  ARG_CACHED
2091      -
2092      -#endif  /* __i386 */
2093      -#endif  /* __lint */
2094      -
2095 1472  /*
2096 1473   * Copy a null terminated string from one point to another in
2097 1474   * the kernel address space.
2098 1475   */
2099 1476  
2100      -#if defined(__lint)
2101      -
2102      -/* ARGSUSED */
2103      -int
2104      -copystr(const char *from, char *to, size_t maxlength, size_t *lencopied)
2105      -{ return (0); }
2106      -
2107      -#else   /* __lint */
2108      -
2109      -#if defined(__amd64)
2110      -
2111 1477          ENTRY(copystr)
2112 1478          pushq   %rbp
2113 1479          movq    %rsp, %rbp
2114 1480  #ifdef DEBUG
2115 1481          movq    kernelbase(%rip), %rax
2116 1482          cmpq    %rax, %rdi              /* %rdi = from */
2117 1483          jb      0f
2118 1484          cmpq    %rax, %rsi              /* %rsi = to */
2119 1485          jnb     1f
2120 1486  0:      leaq    .copystr_panic_msg(%rip), %rdi
↓ open down ↓ 44 lines elided ↑ open up ↑
2165 1531          cmpl    $0, %r10d
2166 1532          jz      copystr_done
2167 1533          SMAP_ENABLE_INSTR(7)
2168 1534  
2169 1535  copystr_done:
2170 1536          movq    %r11, T_LOFAULT(%r9)    /* restore the original lofault */
2171 1537          leave
2172 1538          ret
2173 1539          SET_SIZE(copystr)
2174 1540  
2175      -#elif defined(__i386)
2176      -
2177      -#define ARG_FROM        8
2178      -#define ARG_TO          12
2179      -#define ARG_MAXLEN      16
2180      -#define ARG_LENCOPIED   20
2181      -
2182      -        ENTRY(copystr)
2183      -#ifdef DEBUG
2184      -        pushl   %ebp
2185      -        movl    %esp, %ebp
2186      -        movl    kernelbase, %eax
2187      -        cmpl    %eax, ARG_FROM(%esp)
2188      -        jb      0f
2189      -        cmpl    %eax, ARG_TO(%esp)
2190      -        jnb     1f
2191      -0:      pushl   $.copystr_panic_msg
2192      -        call    panic
2193      -1:      popl    %ebp
2194      -#endif
2195      -        /* get the current lofault address */
2196      -        movl    %gs:CPU_THREAD, %eax
2197      -        movl    T_LOFAULT(%eax), %eax
2198      -do_copystr:
2199      -        pushl   %ebp                    /* setup stack frame */
2200      -        movl    %esp, %ebp
2201      -        pushl   %ebx                    /* save registers */
2202      -        pushl   %edi
2203      -
2204      -        movl    %gs:CPU_THREAD, %ebx
2205      -        movl    T_LOFAULT(%ebx), %edi
2206      -        pushl   %edi                    /* save the current lofault */
2207      -        movl    %eax, T_LOFAULT(%ebx)   /* new lofault */
2208      -
2209      -        movl    ARG_MAXLEN(%ebp), %ecx
2210      -        cmpl    $0, %ecx
2211      -        je      copystr_enametoolong    /* maxlength == 0 */
2212      -
2213      -        movl    ARG_FROM(%ebp), %ebx    /* source address */
2214      -        movl    ARG_TO(%ebp), %edx      /* destination address */
2215      -
2216      -copystr_loop:
2217      -        decl    %ecx
2218      -        movb    (%ebx), %al
2219      -        incl    %ebx
2220      -        movb    %al, (%edx)
2221      -        incl    %edx
2222      -        cmpb    $0, %al
2223      -        je      copystr_null            /* null char */
2224      -        cmpl    $0, %ecx
2225      -        jne     copystr_loop
2226      -
2227      -copystr_enametoolong:
2228      -        movl    $ENAMETOOLONG, %eax
2229      -        jmp     copystr_out
2230      -
2231      -copystr_null:
2232      -        xorl    %eax, %eax              /* no error */
2233      -
2234      -copystr_out:
2235      -        cmpl    $0, ARG_LENCOPIED(%ebp) /* want length? */
2236      -        je      copystr_done            /* no */
2237      -        movl    ARG_MAXLEN(%ebp), %edx
2238      -        subl    %ecx, %edx              /* compute length and store it */
2239      -        movl    ARG_LENCOPIED(%ebp), %ecx
2240      -        movl    %edx, (%ecx)
2241      -
2242      -copystr_done:
2243      -        popl    %edi
2244      -        movl    %gs:CPU_THREAD, %ebx
2245      -        movl    %edi, T_LOFAULT(%ebx)   /* restore the original lofault */
2246      -
2247      -        popl    %edi
2248      -        popl    %ebx
2249      -        popl    %ebp
2250      -        ret
2251      -        SET_SIZE(copystr)
2252      -
2253      -#undef  ARG_FROM
2254      -#undef  ARG_TO
2255      -#undef  ARG_MAXLEN
2256      -#undef  ARG_LENCOPIED
2257      -
2258      -#endif  /* __i386 */
2259      -#endif  /* __lint */
2260      -
2261 1541  /*
2262 1542   * Copy a null terminated string from the user address space into
2263 1543   * the kernel address space.
2264 1544   */
2265 1545  
2266      -#if defined(__lint)
2267      -
2268      -/* ARGSUSED */
2269      -int
2270      -copyinstr(const char *uaddr, char *kaddr, size_t maxlength,
2271      -    size_t *lencopied)
2272      -{ return (0); }
2273      -
2274      -#else   /* __lint */
2275      -
2276      -#if defined(__amd64)
2277      -
2278 1546          ENTRY(copyinstr)
2279 1547          pushq   %rbp
2280 1548          movq    %rsp, %rbp
2281 1549          subq    $32, %rsp
2282 1550  
2283 1551          /*
2284 1552           * save args in case we trap and need to rerun as a copyop
2285 1553           */
2286 1554          movq    %rdi, (%rsp)
2287 1555          movq    %rsi, 0x8(%rsp)
↓ open down ↓ 41 lines elided ↑ open up ↑
2329 1597          movq    0x18(%rsp), %rcx
2330 1598          leave
2331 1599          movq    CP_COPYINSTR(%rax), %rax
2332 1600          INDIRECT_JMP_REG(rax)
2333 1601  
2334 1602  2:      movl    $EFAULT, %eax           /* return EFAULT */
2335 1603          leave
2336 1604          ret
2337 1605          SET_SIZE(copyinstr)
2338 1606  
2339      -#elif defined(__i386)
2340      -
2341      -#define ARG_UADDR       4
2342      -#define ARG_KADDR       8
2343      -
2344      -        ENTRY(copyinstr)
2345      -        movl    kernelbase, %ecx
2346      -#ifdef DEBUG
2347      -        cmpl    %ecx, ARG_KADDR(%esp)
2348      -        jnb     1f
2349      -        pushl   %ebp
2350      -        movl    %esp, %ebp
2351      -        pushl   $.copyinstr_panic_msg
2352      -        call    panic
2353      -1:
2354      -#endif
2355      -        lea     _copyinstr_error, %eax
2356      -        cmpl    %ecx, ARG_UADDR(%esp)   /* test uaddr < kernelbase */
2357      -        jb      do_copystr
2358      -        movl    %gs:CPU_THREAD, %edx
2359      -        jmp     3f
2360      -
2361      -_copyinstr_error:
2362      -        popl    %edi
2363      -        movl    %gs:CPU_THREAD, %edx
2364      -        movl    %edi, T_LOFAULT(%edx)   /* original lofault */
2365      -
2366      -        popl    %edi
2367      -        popl    %ebx
2368      -        popl    %ebp
2369      -3:
2370      -        movl    T_COPYOPS(%edx), %eax
2371      -        cmpl    $0, %eax
2372      -        jz      2f
2373      -        jmp     *CP_COPYINSTR(%eax)
2374      -
2375      -2:      movl    $EFAULT, %eax           /* return EFAULT */
2376      -        ret
2377      -        SET_SIZE(copyinstr)
2378      -
2379      -#undef  ARG_UADDR
2380      -#undef  ARG_KADDR
2381      -
2382      -#endif  /* __i386 */
2383      -#endif  /* __lint */
2384      -
2385 1607  /*
2386 1608   * Copy a null terminated string from the kernel
2387 1609   * address space to the user address space.
2388 1610   */
2389 1611  
2390      -#if defined(__lint)
2391      -
2392      -/* ARGSUSED */
2393      -int
2394      -copyoutstr(const char *kaddr, char *uaddr, size_t maxlength,
2395      -    size_t *lencopied)
2396      -{ return (0); }
2397      -
2398      -#else   /* __lint */
2399      -
2400      -#if defined(__amd64)
2401      -
2402 1612          ENTRY(copyoutstr)
2403 1613          pushq   %rbp
2404 1614          movq    %rsp, %rbp
2405 1615          subq    $32, %rsp
2406 1616  
2407 1617          /*
2408 1618           * save args in case we trap and need to rerun as a copyop
2409 1619           */
2410 1620          movq    %rdi, (%rsp)
2411 1621          movq    %rsi, 0x8(%rsp)
↓ open down ↓ 40 lines elided ↑ open up ↑
2452 1662          movq    0x18(%rsp), %rcx
2453 1663          leave
2454 1664          movq    CP_COPYOUTSTR(%rax), %rax
2455 1665          INDIRECT_JMP_REG(rax)
2456 1666  
2457 1667  2:      movl    $EFAULT, %eax           /* return EFAULT */
2458 1668          leave
2459 1669          ret
2460 1670          SET_SIZE(copyoutstr)
2461 1671  
2462      -#elif defined(__i386)
2463      -
2464      -#define ARG_KADDR       4
2465      -#define ARG_UADDR       8
2466      -
2467      -        ENTRY(copyoutstr)
2468      -        movl    kernelbase, %ecx
2469      -#ifdef DEBUG
2470      -        cmpl    %ecx, ARG_KADDR(%esp)
2471      -        jnb     1f
2472      -        pushl   %ebp
2473      -        movl    %esp, %ebp
2474      -        pushl   $.copyoutstr_panic_msg
2475      -        call    panic
2476      -1:
2477      -#endif
2478      -        lea     _copyoutstr_error, %eax
2479      -        cmpl    %ecx, ARG_UADDR(%esp)   /* test uaddr < kernelbase */
2480      -        jb      do_copystr
2481      -        movl    %gs:CPU_THREAD, %edx
2482      -        jmp     3f
2483      -
2484      -_copyoutstr_error:
2485      -        popl    %edi
2486      -        movl    %gs:CPU_THREAD, %edx
2487      -        movl    %edi, T_LOFAULT(%edx)   /* restore the original lofault */
2488      -
2489      -        popl    %edi
2490      -        popl    %ebx
2491      -        popl    %ebp
2492      -3:
2493      -        movl    T_COPYOPS(%edx), %eax
2494      -        cmpl    $0, %eax
2495      -        jz      2f
2496      -        jmp     *CP_COPYOUTSTR(%eax)
2497      -
2498      -2:      movl    $EFAULT, %eax           /* return EFAULT */
2499      -        ret
2500      -        SET_SIZE(copyoutstr)
2501      -
2502      -#undef  ARG_KADDR
2503      -#undef  ARG_UADDR
2504      -
2505      -#endif  /* __i386 */
2506      -#endif  /* __lint */
2507      -
2508 1672  /*
2509 1673   * Since all of the fuword() variants are so similar, we have a macro to spit
2510 1674   * them out.  This allows us to create DTrace-unobservable functions easily.
2511 1675   */
2512 1676  
2513      -#if defined(__lint)
2514      -
2515      -#if defined(__amd64)
2516      -
2517      -/* ARGSUSED */
2518      -int
2519      -fuword64(const void *addr, uint64_t *dst)
2520      -{ return (0); }
2521      -
2522      -#endif
2523      -
2524      -/* ARGSUSED */
2525      -int
2526      -fuword32(const void *addr, uint32_t *dst)
2527      -{ return (0); }
2528      -
2529      -/* ARGSUSED */
2530      -int
2531      -fuword16(const void *addr, uint16_t *dst)
2532      -{ return (0); }
2533      -
2534      -/* ARGSUSED */
2535      -int
2536      -fuword8(const void *addr, uint8_t *dst)
2537      -{ return (0); }
2538      -
2539      -#else   /* __lint */
2540      -
2541      -#if defined(__amd64)
2542      -
2543 1677  /*
2544 1678   * Note that we don't save and reload the arguments here
2545 1679   * because their values are not altered in the copy path.
2546 1680   * Additionally, when successful, the smap_enable jmp will
2547 1681   * actually return us to our original caller.
2548 1682   */
2549 1683  
2550 1684  #define FUWORD(NAME, INSTR, REG, COPYOP, DISNUM, EN1, EN2)      \
2551 1685          ENTRY(NAME)                             \
2552 1686          movq    %gs:CPU_THREAD, %r9;            \
↓ open down ↓ 20 lines elided ↑ open up ↑
2573 1707  2:                                              \
2574 1708          movl    $-1, %eax;                      \
2575 1709          ret;                                    \
2576 1710          SET_SIZE(NAME)
2577 1711  
2578 1712          FUWORD(fuword64, movq, %rax, CP_FUWORD64,8,10,11)
2579 1713          FUWORD(fuword32, movl, %eax, CP_FUWORD32,9,12,13)
2580 1714          FUWORD(fuword16, movw, %ax, CP_FUWORD16,10,14,15)
2581 1715          FUWORD(fuword8, movb, %al, CP_FUWORD8,11,16,17)
2582 1716  
2583      -#elif defined(__i386)
2584      -
2585      -#define FUWORD(NAME, INSTR, REG, COPYOP)        \
2586      -        ENTRY(NAME)                             \
2587      -        movl    %gs:CPU_THREAD, %ecx;           \
2588      -        movl    kernelbase, %eax;               \
2589      -        cmpl    %eax, 4(%esp);                  \
2590      -        jae     1f;                             \
2591      -        lea     _flt_/**/NAME, %edx;            \
2592      -        movl    %edx, T_LOFAULT(%ecx);          \
2593      -        movl    4(%esp), %eax;                  \
2594      -        movl    8(%esp), %edx;                  \
2595      -        INSTR   (%eax), REG;                    \
2596      -        movl    $0, T_LOFAULT(%ecx);            \
2597      -        INSTR   REG, (%edx);                    \
2598      -        xorl    %eax, %eax;                     \
2599      -        ret;                                    \
2600      -_flt_/**/NAME:                                  \
2601      -        movl    $0, T_LOFAULT(%ecx);            \
2602      -1:                                              \
2603      -        movl    T_COPYOPS(%ecx), %eax;          \
2604      -        cmpl    $0, %eax;                       \
2605      -        jz      2f;                             \
2606      -        jmp     *COPYOP(%eax);                  \
2607      -2:                                              \
2608      -        movl    $-1, %eax;                      \
2609      -        ret;                                    \
2610      -        SET_SIZE(NAME)
2611      -
2612      -        FUWORD(fuword32, movl, %eax, CP_FUWORD32)
2613      -        FUWORD(fuword16, movw, %ax, CP_FUWORD16)
2614      -        FUWORD(fuword8, movb, %al, CP_FUWORD8)
2615      -
2616      -#endif  /* __i386 */
2617      -
2618 1717  #undef  FUWORD
2619 1718  
2620      -#endif  /* __lint */
2621      -
2622 1719  /*
2623 1720   * Set user word.
2624 1721   */
2625 1722  
2626      -#if defined(__lint)
2627      -
2628      -#if defined(__amd64)
2629      -
2630      -/* ARGSUSED */
2631      -int
2632      -suword64(void *addr, uint64_t value)
2633      -{ return (0); }
2634      -
2635      -#endif
2636      -
2637      -/* ARGSUSED */
2638      -int
2639      -suword32(void *addr, uint32_t value)
2640      -{ return (0); }
2641      -
2642      -/* ARGSUSED */
2643      -int
2644      -suword16(void *addr, uint16_t value)
2645      -{ return (0); }
2646      -
2647      -/* ARGSUSED */
2648      -int
2649      -suword8(void *addr, uint8_t value)
2650      -{ return (0); }
2651      -
2652      -#else   /* lint */
2653      -
2654      -#if defined(__amd64)
2655      -
2656 1723  /*
2657 1724   * Note that we don't save and reload the arguments here
2658 1725   * because their values are not altered in the copy path.
2659 1726   */
2660 1727  
2661 1728  #define SUWORD(NAME, INSTR, REG, COPYOP, DISNUM, EN1, EN2)      \
2662 1729          ENTRY(NAME)                             \
2663 1730          movq    %gs:CPU_THREAD, %r9;            \
2664 1731          cmpq    kernelbase(%rip), %rdi;         \
2665 1732          jae     1f;                             \
↓ open down ↓ 17 lines elided ↑ open up ↑
2683 1750  3:                                              \
2684 1751          movl    $-1, %eax;                      \
2685 1752          ret;                                    \
2686 1753          SET_SIZE(NAME)
2687 1754  
2688 1755          SUWORD(suword64, movq, %rsi, CP_SUWORD64,12,18,19)
2689 1756          SUWORD(suword32, movl, %esi, CP_SUWORD32,13,20,21)
2690 1757          SUWORD(suword16, movw, %si, CP_SUWORD16,14,22,23)
2691 1758          SUWORD(suword8, movb, %sil, CP_SUWORD8,15,24,25)
2692 1759  
2693      -#elif defined(__i386)
2694      -
2695      -#define SUWORD(NAME, INSTR, REG, COPYOP)        \
2696      -        ENTRY(NAME)                             \
2697      -        movl    %gs:CPU_THREAD, %ecx;           \
2698      -        movl    kernelbase, %eax;               \
2699      -        cmpl    %eax, 4(%esp);                  \
2700      -        jae     1f;                             \
2701      -        lea     _flt_/**/NAME, %edx;            \
2702      -        movl    %edx, T_LOFAULT(%ecx);          \
2703      -        movl    4(%esp), %eax;                  \
2704      -        movl    8(%esp), %edx;                  \
2705      -        INSTR   REG, (%eax);                    \
2706      -        movl    $0, T_LOFAULT(%ecx);            \
2707      -        xorl    %eax, %eax;                     \
2708      -        ret;                                    \
2709      -_flt_/**/NAME:                                  \
2710      -        movl    $0, T_LOFAULT(%ecx);            \
2711      -1:                                              \
2712      -        movl    T_COPYOPS(%ecx), %eax;          \
2713      -        cmpl    $0, %eax;                       \
2714      -        jz      3f;                             \
2715      -        movl    COPYOP(%eax), %ecx;             \
2716      -        jmp     *%ecx;                          \
2717      -3:                                              \
2718      -        movl    $-1, %eax;                      \
2719      -        ret;                                    \
2720      -        SET_SIZE(NAME)
2721      -
2722      -        SUWORD(suword32, movl, %edx, CP_SUWORD32)
2723      -        SUWORD(suword16, movw, %dx, CP_SUWORD16)
2724      -        SUWORD(suword8, movb, %dl, CP_SUWORD8)
2725      -
2726      -#endif  /* __i386 */
2727      -
2728 1760  #undef  SUWORD
2729 1761  
2730      -#endif  /* __lint */
2731      -
2732      -#if defined(__lint)
2733      -
2734      -#if defined(__amd64)
2735      -
2736      -/*ARGSUSED*/
2737      -void
2738      -fuword64_noerr(const void *addr, uint64_t *dst)
2739      -{}
2740      -
2741      -#endif
2742      -
2743      -/*ARGSUSED*/
2744      -void
2745      -fuword32_noerr(const void *addr, uint32_t *dst)
2746      -{}
2747      -
2748      -/*ARGSUSED*/
2749      -void
2750      -fuword8_noerr(const void *addr, uint8_t *dst)
2751      -{}
2752      -
2753      -/*ARGSUSED*/
2754      -void
2755      -fuword16_noerr(const void *addr, uint16_t *dst)
2756      -{}
2757      -
2758      -#else   /* __lint */
2759      -
2760      -#if defined(__amd64)
2761      -
2762 1762  #define FUWORD_NOERR(NAME, INSTR, REG)          \
2763 1763          ENTRY(NAME)                             \
2764 1764          cmpq    kernelbase(%rip), %rdi;         \
2765 1765          cmovnbq kernelbase(%rip), %rdi;         \
2766 1766          INSTR   (%rdi), REG;                    \
2767 1767          INSTR   REG, (%rsi);                    \
2768 1768          ret;                                    \
2769 1769          SET_SIZE(NAME)
2770 1770  
2771 1771          FUWORD_NOERR(fuword64_noerr, movq, %rax)
2772 1772          FUWORD_NOERR(fuword32_noerr, movl, %eax)
2773 1773          FUWORD_NOERR(fuword16_noerr, movw, %ax)
2774 1774          FUWORD_NOERR(fuword8_noerr, movb, %al)
2775 1775  
2776      -#elif defined(__i386)
2777      -
2778      -#define FUWORD_NOERR(NAME, INSTR, REG)          \
2779      -        ENTRY(NAME)                             \
2780      -        movl    4(%esp), %eax;                  \
2781      -        cmpl    kernelbase, %eax;               \
2782      -        jb      1f;                             \
2783      -        movl    kernelbase, %eax;               \
2784      -1:      movl    8(%esp), %edx;                  \
2785      -        INSTR   (%eax), REG;                    \
2786      -        INSTR   REG, (%edx);                    \
2787      -        ret;                                    \
2788      -        SET_SIZE(NAME)
2789      -
2790      -        FUWORD_NOERR(fuword32_noerr, movl, %ecx)
2791      -        FUWORD_NOERR(fuword16_noerr, movw, %cx)
2792      -        FUWORD_NOERR(fuword8_noerr, movb, %cl)
2793      -
2794      -#endif  /* __i386 */
2795      -
2796 1776  #undef  FUWORD_NOERR
2797 1777  
2798      -#endif  /* __lint */
2799      -
2800      -#if defined(__lint)
2801      -
2802      -#if defined(__amd64)
2803      -
2804      -/*ARGSUSED*/
2805      -void
2806      -suword64_noerr(void *addr, uint64_t value)
2807      -{}
2808      -
2809      -#endif
2810      -
2811      -/*ARGSUSED*/
2812      -void
2813      -suword32_noerr(void *addr, uint32_t value)
2814      -{}
2815      -
2816      -/*ARGSUSED*/
2817      -void
2818      -suword16_noerr(void *addr, uint16_t value)
2819      -{}
2820      -
2821      -/*ARGSUSED*/
2822      -void
2823      -suword8_noerr(void *addr, uint8_t value)
2824      -{}
2825      -
2826      -#else   /* lint */
2827      -
2828      -#if defined(__amd64)
2829      -
2830 1778  #define SUWORD_NOERR(NAME, INSTR, REG)          \
2831 1779          ENTRY(NAME)                             \
2832 1780          cmpq    kernelbase(%rip), %rdi;         \
2833 1781          cmovnbq kernelbase(%rip), %rdi;         \
2834 1782          INSTR   REG, (%rdi);                    \
2835 1783          ret;                                    \
2836 1784          SET_SIZE(NAME)
2837 1785  
2838 1786          SUWORD_NOERR(suword64_noerr, movq, %rsi)
2839 1787          SUWORD_NOERR(suword32_noerr, movl, %esi)
2840 1788          SUWORD_NOERR(suword16_noerr, movw, %si)
2841 1789          SUWORD_NOERR(suword8_noerr, movb, %sil)
2842 1790  
2843      -#elif defined(__i386)
2844      -
2845      -#define SUWORD_NOERR(NAME, INSTR, REG)          \
2846      -        ENTRY(NAME)                             \
2847      -        movl    4(%esp), %eax;                  \
2848      -        cmpl    kernelbase, %eax;               \
2849      -        jb      1f;                             \
2850      -        movl    kernelbase, %eax;               \
2851      -1:                                              \
2852      -        movl    8(%esp), %edx;                  \
2853      -        INSTR   REG, (%eax);                    \
2854      -        ret;                                    \
2855      -        SET_SIZE(NAME)
2856      -
2857      -        SUWORD_NOERR(suword32_noerr, movl, %edx)
2858      -        SUWORD_NOERR(suword16_noerr, movw, %dx)
2859      -        SUWORD_NOERR(suword8_noerr, movb, %dl)
2860      -
2861      -#endif  /* __i386 */
2862      -
2863 1791  #undef  SUWORD_NOERR
2864 1792  
2865      -#endif  /* lint */
2866 1793  
2867      -
2868      -#if defined(__lint)
2869      -
2870      -/*ARGSUSED*/
2871      -int
2872      -subyte(void *addr, uchar_t value)
2873      -{ return (0); }
2874      -
2875      -/*ARGSUSED*/
2876      -void
2877      -subyte_noerr(void *addr, uchar_t value)
2878      -{}
2879      -
2880      -/*ARGSUSED*/
2881      -int
2882      -fulword(const void *addr, ulong_t *valuep)
2883      -{ return (0); }
2884      -
2885      -/*ARGSUSED*/
2886      -void
2887      -fulword_noerr(const void *addr, ulong_t *valuep)
2888      -{}
2889      -
2890      -/*ARGSUSED*/
2891      -int
2892      -sulword(void *addr, ulong_t valuep)
2893      -{ return (0); }
2894      -
2895      -/*ARGSUSED*/
2896      -void
2897      -sulword_noerr(void *addr, ulong_t valuep)
2898      -{}
2899      -
2900      -#else
2901      -
2902 1794          .weak   subyte
2903 1795          subyte=suword8
2904 1796          .weak   subyte_noerr
2905 1797          subyte_noerr=suword8_noerr
2906 1798  
2907      -#if defined(__amd64)
2908      -
2909 1799          .weak   fulword
2910 1800          fulword=fuword64
2911 1801          .weak   fulword_noerr
2912 1802          fulword_noerr=fuword64_noerr
2913 1803          .weak   sulword
2914 1804          sulword=suword64
2915 1805          .weak   sulword_noerr
2916 1806          sulword_noerr=suword64_noerr
2917 1807  
2918      -#elif defined(__i386)
2919      -
2920      -        .weak   fulword
2921      -        fulword=fuword32
2922      -        .weak   fulword_noerr
2923      -        fulword_noerr=fuword32_noerr
2924      -        .weak   sulword
2925      -        sulword=suword32
2926      -        .weak   sulword_noerr
2927      -        sulword_noerr=suword32_noerr
2928      -
2929      -#endif /* __i386 */
2930      -
2931      -#endif /* __lint */
2932      -
2933      -#if defined(__lint)
2934      -
2935      -/*
2936      - * Copy a block of storage - must not overlap (from + len <= to).
2937      - * No fault handler installed (to be called under on_fault())
2938      - */
2939      -
2940      -/* ARGSUSED */
2941      -void
2942      -copyout_noerr(const void *kfrom, void *uto, size_t count)
2943      -{}
2944      -
2945      -/* ARGSUSED */
2946      -void
2947      -copyin_noerr(const void *ufrom, void *kto, size_t count)
2948      -{}
2949      -
2950      -/*
2951      - * Zero a block of storage in user space
2952      - */
2953      -
2954      -/* ARGSUSED */
2955      -void
2956      -uzero(void *addr, size_t count)
2957      -{}
2958      -
2959      -/*
2960      - * copy a block of storage in user space
2961      - */
2962      -
2963      -/* ARGSUSED */
2964      -void
2965      -ucopy(const void *ufrom, void *uto, size_t ulength)
2966      -{}
2967      -
2968      -/*
2969      - * copy a string in user space
2970      - */
2971      -
2972      -/* ARGSUSED */
2973      -void
2974      -ucopystr(const char *ufrom, char *uto, size_t umaxlength, size_t *lencopied)
2975      -{}
2976      -
2977      -#else /* __lint */
2978      -
2979      -#if defined(__amd64)
2980      -
2981 1808          ENTRY(copyin_noerr)
2982 1809          movq    kernelbase(%rip), %rax
2983 1810  #ifdef DEBUG
2984 1811          cmpq    %rax, %rsi              /* %rsi = kto */
2985 1812          jae     1f
2986 1813          leaq    .cpyin_ne_pmsg(%rip), %rdi
2987 1814          jmp     call_panic              /* setup stack and call panic */
2988 1815  1:
2989 1816  #endif
2990 1817          cmpq    %rax, %rdi              /* ufrom < kernelbase */
↓ open down ↓ 47 lines elided ↑ open up ↑
3038 1865          cmpq    %rax, %rsi
3039 1866          cmovaeq %rax, %rsi      /* force fault at kernelbase */
3040 1867          /* do_copystr expects lofault address in %r8 */
3041 1868          /* do_copystr expects whether or not we need smap in %r10 */
3042 1869          xorl    %r10d, %r10d
3043 1870          movq    %gs:CPU_THREAD, %r8
3044 1871          movq    T_LOFAULT(%r8), %r8
3045 1872          jmp     do_copystr
3046 1873          SET_SIZE(ucopystr)
3047 1874  
3048      -#elif defined(__i386)
3049      -
3050      -        ENTRY(copyin_noerr)
3051      -        movl    kernelbase, %eax
3052 1875  #ifdef DEBUG
3053      -        cmpl    %eax, 8(%esp)
3054      -        jae     1f
3055      -        pushl   $.cpyin_ne_pmsg
3056      -        call    panic
3057      -1:
3058      -#endif
3059      -        cmpl    %eax, 4(%esp)
3060      -        jb      do_copy
3061      -        movl    %eax, 4(%esp)   /* force fault at kernelbase */
3062      -        jmp     do_copy
3063      -        SET_SIZE(copyin_noerr)
3064      -
3065      -        ENTRY(copyout_noerr)
3066      -        movl    kernelbase, %eax
3067      -#ifdef DEBUG
3068      -        cmpl    %eax, 4(%esp)
3069      -        jae     1f
3070      -        pushl   $.cpyout_ne_pmsg
3071      -        call    panic
3072      -1:
3073      -#endif
3074      -        cmpl    %eax, 8(%esp)
3075      -        jb      do_copy
3076      -        movl    %eax, 8(%esp)   /* force fault at kernelbase */
3077      -        jmp     do_copy
3078      -        SET_SIZE(copyout_noerr)
3079      -
3080      -        ENTRY(uzero)
3081      -        movl    kernelbase, %eax
3082      -        cmpl    %eax, 4(%esp)
3083      -        jb      do_zero
3084      -        movl    %eax, 4(%esp)   /* force fault at kernelbase */
3085      -        jmp     do_zero
3086      -        SET_SIZE(uzero)
3087      -
3088      -        ENTRY(ucopy)
3089      -        movl    kernelbase, %eax
3090      -        cmpl    %eax, 4(%esp)
3091      -        jb      1f
3092      -        movl    %eax, 4(%esp)   /* force fault at kernelbase */
3093      -1:
3094      -        cmpl    %eax, 8(%esp)
3095      -        jb      do_copy
3096      -        movl    %eax, 8(%esp)   /* force fault at kernelbase */
3097      -        jmp     do_copy
3098      -        SET_SIZE(ucopy)
3099      -
3100      -        ENTRY(ucopystr)
3101      -        movl    kernelbase, %eax
3102      -        cmpl    %eax, 4(%esp)
3103      -        jb      1f
3104      -        movl    %eax, 4(%esp)   /* force fault at kernelbase */
3105      -1:
3106      -        cmpl    %eax, 8(%esp)
3107      -        jb      2f
3108      -        movl    %eax, 8(%esp)   /* force fault at kernelbase */
3109      -2:
3110      -        /* do_copystr expects the lofault address in %eax */
3111      -        movl    %gs:CPU_THREAD, %eax
3112      -        movl    T_LOFAULT(%eax), %eax
3113      -        jmp     do_copystr
3114      -        SET_SIZE(ucopystr)
3115      -
3116      -#endif  /* __i386 */
3117      -
3118      -#ifdef DEBUG
3119 1876          .data
3120 1877  .kcopy_panic_msg:
3121 1878          .string "kcopy: arguments below kernelbase"
3122 1879  .bcopy_panic_msg:
3123 1880          .string "bcopy: arguments below kernelbase"
3124 1881  .kzero_panic_msg:
3125 1882          .string "kzero: arguments below kernelbase"
3126 1883  .bzero_panic_msg:
3127 1884          .string "bzero: arguments below kernelbase"
3128 1885  .copyin_panic_msg:
↓ open down ↓ 9 lines elided ↑ open up ↑
3138 1895  .copyinstr_panic_msg:
3139 1896          .string "copyinstr: kaddr argument not in kernel address space"
3140 1897  .copyoutstr_panic_msg:
3141 1898          .string "copyoutstr: kaddr argument not in kernel address space"
3142 1899  .cpyin_ne_pmsg:
3143 1900          .string "copyin_noerr: argument not in kernel address space"
3144 1901  .cpyout_ne_pmsg:
3145 1902          .string "copyout_noerr: argument not in kernel address space"
3146 1903  #endif
3147 1904  
3148      -#endif  /* __lint */
3149      -
3150 1905  /*
3151 1906   * These functions are used for SMAP, supervisor mode access protection. They
3152 1907   * are hotpatched to become real instructions when the system starts up which is
3153 1908   * done in mlsetup() as a part of enabling the other CR4 related features.
3154 1909   *
3155 1910   * Generally speaking, smap_disable() is a stac instruction and smap_enable is a
3156 1911   * clac instruction. It's safe to call these any number of times, and in fact,
3157 1912   * out of paranoia, the kernel will likely call it at several points.
3158 1913   */
3159 1914  
3160      -#if defined(__lint)
3161      -
3162      -void
3163      -smap_enable(void)
3164      -{}
3165      -
3166      -void
3167      -smap_disable(void)
3168      -{}
3169      -
3170      -#else
3171      -
3172      -#if defined (__amd64) || defined(__i386)
3173 1915          ENTRY(smap_disable)
3174 1916          nop
3175 1917          nop
3176 1918          nop
3177 1919          ret
3178 1920          SET_SIZE(smap_disable)
3179 1921  
3180 1922          ENTRY(smap_enable)
3181 1923          nop
3182 1924          nop
3183 1925          nop
3184 1926          ret
3185 1927          SET_SIZE(smap_enable)
3186 1928  
3187      -#endif /* __amd64 || __i386 */
3188      -
3189      -#endif /* __lint */
3190      -
3191      -#ifndef __lint
3192      -
3193 1929  .data
3194 1930  .align  4
3195 1931  .globl  _smap_enable_patch_count
3196 1932  .type   _smap_enable_patch_count,@object
3197 1933  .size   _smap_enable_patch_count, 4
3198 1934  _smap_enable_patch_count:
3199 1935          .long   SMAP_ENABLE_COUNT
3200 1936  
3201 1937  .globl  _smap_disable_patch_count
3202 1938  .type   _smap_disable_patch_count,@object
3203 1939  .size   _smap_disable_patch_count, 4
3204 1940  _smap_disable_patch_count:
3205 1941          .long SMAP_DISABLE_COUNT
3206      -
3207      -#endif /* __lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX