Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/vm/htable.h
          +++ new/usr/src/uts/i86pc/vm/htable.h
↓ open down ↓ 16 lines elided ↑ open up ↑
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  /*
  26   26   * Copyright (c) 2014 by Delphix. All rights reserved.
       27 + * Copyright 2018 Joyent, Inc.
  27   28   */
  28   29  
  29   30  #ifndef _VM_HTABLE_H
  30   31  #define _VM_HTABLE_H
  31   32  
  32   33  #ifdef  __cplusplus
  33   34  extern "C" {
  34   35  #endif
  35   36  
  36   37  #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL)
  37   38  #include <asm/htable.h>
  38   39  #endif
  39   40  
  40   41  extern void atomic_andb(uint8_t *addr, uint8_t value);
  41   42  extern void atomic_orb(uint8_t *addr, uint8_t value);
  42   43  extern void atomic_inc16(uint16_t *addr);
  43   44  extern void atomic_dec16(uint16_t *addr);
  44      -extern void mmu_tlbflush_entry(caddr_t addr);
  45   45  
  46   46  /*
  47   47   * Each hardware page table has an htable_t describing it.
  48   48   *
  49   49   * We use a reference counter mechanism to detect when we can free an htable.
  50   50   * In the implmentation the reference count is split into 2 separate counters:
  51   51   *
  52   52   *      ht_busy is a traditional reference count of uses of the htable pointer
  53   53   *
  54   54   *      ht_valid_cnt is a count of how references are implied by valid PTE/PTP
↓ open down ↓ 23 lines elided ↑ open up ↑
  78   78          pfn_t           ht_pfn;         /* pfn of page of the pagetable */
  79   79          struct htable   *ht_prev;       /* backward link for hash table */
  80   80          struct htable   *ht_parent;     /* htable that points to this htable */
  81   81          struct htable   *ht_shares;     /* for HTABLE_SHARED_PFN only */
  82   82  };
  83   83  typedef struct htable htable_t;
  84   84  
  85   85  /*
  86   86   * Flags values for htable ht_flags field:
  87   87   *
  88      - * HTABLE_VLP - this is the top level htable of a VLP HAT.
       88 + * HTABLE_COPIED - This is the top level htable of a HAT being used with per-CPU
       89 + *      pagetables.
  89   90   *
  90   91   * HTABLE_SHARED_PFN - this htable had its PFN assigned from sharing another
  91   92   *      htable. Used by hat_share() for ISM.
  92   93   */
  93      -#define HTABLE_VLP              (0x01)
       94 +#define HTABLE_COPIED           (0x01)
  94   95  #define HTABLE_SHARED_PFN       (0x02)
  95   96  
  96   97  /*
  97   98   * The htable hash table hashing function.  The 28 is so that high
  98   99   * order bits are include in the hash index to skew the wrap
  99  100   * around of addresses. Even though the hash buckets are stored per
 100  101   * hat we include the value of hat pointer in the hash function so
 101  102   * that the secondary hash for the htable mutex winds up begin different in
 102  103   * every address space.
 103  104   */
 104  105  #define HTABLE_HASH(hat, va, lvl)                                       \
 105  106          ((((va) >> LEVEL_SHIFT(1)) + ((va) >> 28) + (lvl) +             \
 106  107          ((uintptr_t)(hat) >> 4)) & ((hat)->hat_num_hash - 1))
 107  108  
 108  109  /*
 109      - * Each CPU gets a unique hat_cpu_info structure in cpu_hat_info.
      110 + * Each CPU gets a unique hat_cpu_info structure in cpu_hat_info. For more
      111 + * information on its use and members, see uts/i86pc/vm/hat_i86.c.
 110  112   */
 111  113  struct hat_cpu_info {
 112  114          kmutex_t hci_mutex;             /* mutex to ensure sequential usage */
 113  115  #if defined(__amd64)
 114      -        pfn_t   hci_vlp_pfn;            /* pfn of hci_vlp_l3ptes */
 115      -        x86pte_t *hci_vlp_l3ptes;       /* VLP Level==3 pagetable (top) */
 116      -        x86pte_t *hci_vlp_l2ptes;       /* VLP Level==2 pagetable */
      116 +        pfn_t   hci_pcp_l3pfn;          /* pfn of hci_pcp_l3ptes */
      117 +        pfn_t   hci_pcp_l2pfn;          /* pfn of hci_pcp_l2ptes */
      118 +        x86pte_t *hci_pcp_l3ptes;       /* PCP Level==3 pagetable (top) */
      119 +        x86pte_t *hci_pcp_l2ptes;       /* PCP Level==2 pagetable */
      120 +        struct hat *hci_user_hat;       /* CPU specific HAT */
      121 +        pfn_t   hci_user_l3pfn;         /* pfn of hci_user_l3ptes */
      122 +        x86pte_t *hci_user_l3ptes;      /* PCP User L3 pagetable */
 117  123  #endif  /* __amd64 */
 118  124  };
 119  125  
 120  126  
 121  127  /*
 122  128   * Compute the last page aligned VA mapped by an htable.
 123  129   *
 124  130   * Given a va and a level, compute the virtual address of the start of the
 125  131   * next page at that level.
 126  132   *
 127  133   * XX64 - The check for the VA hole needs to be better generalized.
 128  134   */
 129  135  #if defined(__amd64)
 130      -#define HTABLE_NUM_PTES(ht)     (((ht)->ht_flags & HTABLE_VLP) ? 4 : 512)
      136 +#define HTABLE_NUM_PTES(ht)     (((ht)->ht_flags & HTABLE_COPIED) ? \
      137 +        (((ht)->ht_level == mmu.max_level) ? 512 : 4) : 512)
 131  138  
 132  139  #define HTABLE_LAST_PAGE(ht)                                            \
 133  140          ((ht)->ht_level == mmu.max_level ? ((uintptr_t)0UL - MMU_PAGESIZE) :\
 134  141          ((ht)->ht_vaddr - MMU_PAGESIZE +                                \
 135  142          ((uintptr_t)HTABLE_NUM_PTES(ht) << LEVEL_SHIFT((ht)->ht_level))))
 136  143  
 137  144  #define NEXT_ENTRY_VA(va, l)    \
 138  145          ((va & LEVEL_MASK(l)) + LEVEL_SIZE(l) == mmu.hole_start ?       \
 139  146          mmu.hole_end : (va & LEVEL_MASK(l)) + LEVEL_SIZE(l))
 140  147  
↓ open down ↓ 202 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX