Print this page
9059 Simplify SMAP relocations with krtld
Portions contributed by: John Levon <john.levon@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/copy.s
          +++ new/usr/src/uts/intel/ia32/ml/copy.s
↓ open down ↓ 28 lines elided ↑ open up ↑
  29   29   */
  30   30  
  31   31  /*       Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.        */
  32   32  /*       Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T          */
  33   33  /*         All Rights Reserved                                          */
  34   34  
  35   35  /*       Copyright (c) 1987, 1988 Microsoft Corporation                 */
  36   36  /*         All Rights Reserved                                          */
  37   37  
  38   38  /*
  39      - * Copyright 2019 Joyent, Inc.
       39 + * Copyright 2020 Joyent, Inc.
  40   40   */
  41   41  
  42   42  #include <sys/errno.h>
  43   43  #include <sys/asm_linkage.h>
  44   44  
  45   45  #include "assym.h"
  46   46  
  47   47  #define KCOPY_MIN_SIZE  128     /* Must be >= 16 bytes */
  48   48  #define XCOPY_MIN_SIZE  128     /* Must be >= 16 bytes */
  49   49  /*
↓ open down ↓ 9 lines elided ↑ open up ↑
  59   59   * access protection -- SMAP. SMAP forces the kernel to set certain bits to
  60   60   * enable access of user pages (AC in rflags, defines as PS_ACHK in
  61   61   * <sys/psw.h>). One of the challenges is that the implementation of many of the
  62   62   * userland copy routines directly use the kernel ones. For example, copyin and
  63   63   * copyout simply go and jump to the do_copy_fault label and traditionally let
  64   64   * those deal with the return for them. In fact, changing that is a can of frame
  65   65   * pointers.
  66   66   *
  67   67   * Rules and Constraints:
  68   68   *
  69      - * 1. For anything that's not in copy.s, we have it do explicit calls to the
  70      - * smap related code. It usually is in a position where it is able to. This is
  71      - * restricted to the following three places: DTrace, resume() in swtch.s and
  72      - * on_fault/no_fault. If you want to add it somewhere else, we should be
  73      - * thinking twice.
       69 + * 1. For anything that's not in copy.s, we have it do explicit smap_disable()
       70 + * or smap_enable() calls.  This is restricted to the following three places:
       71 + * DTrace, resume() in swtch.s and on_fault/no_fault. If you want to add it
       72 + * somewhere else, we should be thinking twice.
  74   73   *
  75   74   * 2. We try to toggle this at the smallest window possible. This means that if
  76   75   * we take a fault, need to try to use a copyop in copyin() or copyout(), or any
  77   76   * other function, we will always leave with SMAP enabled (the kernel cannot
  78   77   * access user pages).
  79   78   *
  80   79   * 3. None of the *_noerr() or ucopy/uzero routines should toggle SMAP. They are
  81   80   * explicitly only allowed to be called while in an on_fault()/no_fault() handler,
  82   81   * which already takes care of ensuring that SMAP is enabled and disabled. Note
  83   82   * this means that when under an on_fault()/no_fault() handler, one must not
  84      - * call the non-*_noeer() routines.
       83 + * call the non-*_noerr() routines.
  85   84   *
  86   85   * 4. The first thing we should do after coming out of an lofault handler is to
  87      - * make sure that we call smap_enable again to ensure that we are safely
       86 + * make sure that we call smap_enable() again to ensure that we are safely
  88   87   * protected, as more often than not, we will have disabled smap to get there.
  89   88   *
  90      - * 5. The SMAP functions, smap_enable and smap_disable may not touch any
  91      - * registers beyond those done by the call and ret. These routines may be called
  92      - * from arbitrary contexts in copy.s where we have slightly more special ABIs in
  93      - * place.
       89 + * 5. smap_enable() and smap_disable() don't exist: calls to these functions
       90 + * generate runtime relocations, that are then processed into the necessary
       91 + * clac/stac, via the krtld hotinlines mechanism and hotinline_smap().
  94   92   *
  95   93   * 6. For any inline user of SMAP, the appropriate SMAP_ENABLE_INSTR and
  96      - * SMAP_DISABLE_INSTR macro should be used (except for smap_enable() and
  97      - * smap_disable()). If the number of these is changed, you must update the
  98      - * constants SMAP_ENABLE_COUNT and SMAP_DISABLE_COUNT below.
       94 + * SMAP_DISABLE_INSTR macro should be used. If the number of these is changed,
       95 + * you must update the constants SMAP_ENABLE_COUNT and SMAP_DISABLE_COUNT below.
  99   96   *
 100      - * 7. Note, at this time SMAP is not implemented for the 32-bit kernel. There is
 101      - * no known technical reason preventing it from being enabled.
 102      - *
 103      - * 8. Generally this .s file is processed by a K&R style cpp. This means that it
       97 + * 7. Generally this .s file is processed by a K&R style cpp. This means that it
 104   98   * really has a lot of feelings about whitespace. In particular, if you have a
 105   99   * macro FOO with the arguments FOO(1, 3), the second argument is in fact ' 3'.
 106  100   *
 107      - * 9. The smap_enable and smap_disable functions should not generally be called.
 108      - * They exist such that DTrace and on_trap() may use them, that's it.
 109      - *
 110      - * 10. In general, the kernel has its own value for rflags that gets used. This
      101 + * 8. In general, the kernel has its own value for rflags that gets used. This
 111  102   * is maintained in a few different places which vary based on how the thread
 112  103   * comes into existence and whether it's a user thread. In general, when the
 113  104   * kernel takes a trap, it always will set ourselves to a known set of flags,
 114  105   * mainly as part of ENABLE_INTR_FLAGS and F_OFF and F_ON. These ensure that
 115  106   * PS_ACHK is cleared for us. In addition, when using the sysenter instruction,
 116  107   * we mask off PS_ACHK off via the AMD_SFMASK MSR. See init_cpu_syscall() for
 117  108   * where that gets masked off.
 118  109   */
 119  110  
 120  111  /*
↓ open down ↓ 1774 lines elided ↑ open up ↑
1895 1886  .copyinstr_panic_msg:
1896 1887          .string "copyinstr: kaddr argument not in kernel address space"
1897 1888  .copyoutstr_panic_msg:
1898 1889          .string "copyoutstr: kaddr argument not in kernel address space"
1899 1890  .cpyin_ne_pmsg:
1900 1891          .string "copyin_noerr: argument not in kernel address space"
1901 1892  .cpyout_ne_pmsg:
1902 1893          .string "copyout_noerr: argument not in kernel address space"
1903 1894  #endif
1904 1895  
1905      -/*
1906      - * These functions are used for SMAP, supervisor mode access protection. They
1907      - * are hotpatched to become real instructions when the system starts up which is
1908      - * done in mlsetup() as a part of enabling the other CR4 related features.
1909      - *
1910      - * Generally speaking, smap_disable() is a stac instruction and smap_enable is a
1911      - * clac instruction. It's safe to call these any number of times, and in fact,
1912      - * out of paranoia, the kernel will likely call it at several points.
1913      - */
1914      -
1915      -        ENTRY(smap_disable)
1916      -        nop
1917      -        nop
1918      -        nop
1919      -        ret
1920      -        SET_SIZE(smap_disable)
1921      -
1922      -        ENTRY(smap_enable)
1923      -        nop
1924      -        nop
1925      -        nop
1926      -        ret
1927      -        SET_SIZE(smap_enable)
1928      -
1929 1896  .data
1930 1897  .align  4
1931 1898  .globl  _smap_enable_patch_count
1932 1899  .type   _smap_enable_patch_count,@object
1933 1900  .size   _smap_enable_patch_count, 4
1934 1901  _smap_enable_patch_count:
1935 1902          .long   SMAP_ENABLE_COUNT
1936 1903  
1937 1904  .globl  _smap_disable_patch_count
1938 1905  .type   _smap_disable_patch_count,@object
1939 1906  .size   _smap_disable_patch_count, 4
1940 1907  _smap_disable_patch_count:
1941 1908          .long SMAP_DISABLE_COUNT
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX