Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

*** 22,31 **** --- 22,32 ---- * Copyright 2007 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ /* * Copyright (c) 2014 by Delphix. All rights reserved. + * Copyright 2018 Joyent, Inc. */ #ifndef _VM_HTABLE_H #define _VM_HTABLE_H
*** 39,49 **** extern void atomic_andb(uint8_t *addr, uint8_t value); extern void atomic_orb(uint8_t *addr, uint8_t value); extern void atomic_inc16(uint16_t *addr); extern void atomic_dec16(uint16_t *addr); - extern void mmu_tlbflush_entry(caddr_t addr); /* * Each hardware page table has an htable_t describing it. * * We use a reference counter mechanism to detect when we can free an htable. --- 40,49 ----
*** 83,98 **** typedef struct htable htable_t; /* * Flags values for htable ht_flags field: * ! * HTABLE_VLP - this is the top level htable of a VLP HAT. * * HTABLE_SHARED_PFN - this htable had its PFN assigned from sharing another * htable. Used by hat_share() for ISM. */ ! #define HTABLE_VLP (0x01) #define HTABLE_SHARED_PFN (0x02) /* * The htable hash table hashing function. The 28 is so that high * order bits are include in the hash index to skew the wrap --- 83,99 ---- typedef struct htable htable_t; /* * Flags values for htable ht_flags field: * ! * HTABLE_COPIED - This is the top level htable of a HAT being used with per-CPU ! * pagetables. * * HTABLE_SHARED_PFN - this htable had its PFN assigned from sharing another * htable. Used by hat_share() for ISM. */ ! #define HTABLE_COPIED (0x01) #define HTABLE_SHARED_PFN (0x02) /* * The htable hash table hashing function. The 28 is so that high * order bits are include in the hash index to skew the wrap
*** 104,121 **** #define HTABLE_HASH(hat, va, lvl) \ ((((va) >> LEVEL_SHIFT(1)) + ((va) >> 28) + (lvl) + \ ((uintptr_t)(hat) >> 4)) & ((hat)->hat_num_hash - 1)) /* ! * Each CPU gets a unique hat_cpu_info structure in cpu_hat_info. */ struct hat_cpu_info { kmutex_t hci_mutex; /* mutex to ensure sequential usage */ #if defined(__amd64) ! pfn_t hci_vlp_pfn; /* pfn of hci_vlp_l3ptes */ ! x86pte_t *hci_vlp_l3ptes; /* VLP Level==3 pagetable (top) */ ! x86pte_t *hci_vlp_l2ptes; /* VLP Level==2 pagetable */ #endif /* __amd64 */ }; /* --- 105,127 ---- #define HTABLE_HASH(hat, va, lvl) \ ((((va) >> LEVEL_SHIFT(1)) + ((va) >> 28) + (lvl) + \ ((uintptr_t)(hat) >> 4)) & ((hat)->hat_num_hash - 1)) /* ! * Each CPU gets a unique hat_cpu_info structure in cpu_hat_info. For more ! * information on its use and members, see uts/i86pc/vm/hat_i86.c. */ struct hat_cpu_info { kmutex_t hci_mutex; /* mutex to ensure sequential usage */ #if defined(__amd64) ! pfn_t hci_pcp_l3pfn; /* pfn of hci_pcp_l3ptes */ ! pfn_t hci_pcp_l2pfn; /* pfn of hci_pcp_l2ptes */ ! x86pte_t *hci_pcp_l3ptes; /* PCP Level==3 pagetable (top) */ ! x86pte_t *hci_pcp_l2ptes; /* PCP Level==2 pagetable */ ! struct hat *hci_user_hat; /* CPU specific HAT */ ! pfn_t hci_user_l3pfn; /* pfn of hci_user_l3ptes */ ! x86pte_t *hci_user_l3ptes; /* PCP User L3 pagetable */ #endif /* __amd64 */ }; /*
*** 125,135 **** * next page at that level. * * XX64 - The check for the VA hole needs to be better generalized. */ #if defined(__amd64) ! #define HTABLE_NUM_PTES(ht) (((ht)->ht_flags & HTABLE_VLP) ? 4 : 512) #define HTABLE_LAST_PAGE(ht) \ ((ht)->ht_level == mmu.max_level ? ((uintptr_t)0UL - MMU_PAGESIZE) :\ ((ht)->ht_vaddr - MMU_PAGESIZE + \ ((uintptr_t)HTABLE_NUM_PTES(ht) << LEVEL_SHIFT((ht)->ht_level)))) --- 131,142 ---- * next page at that level. * * XX64 - The check for the VA hole needs to be better generalized. */ #if defined(__amd64) ! #define HTABLE_NUM_PTES(ht) (((ht)->ht_flags & HTABLE_COPIED) ? \ ! (((ht)->ht_level == mmu.max_level) ? 512 : 4) : 512) #define HTABLE_LAST_PAGE(ht) \ ((ht)->ht_level == mmu.max_level ? ((uintptr_t)0UL - MMU_PAGESIZE) :\ ((ht)->ht_vaddr - MMU_PAGESIZE + \ ((uintptr_t)HTABLE_NUM_PTES(ht) << LEVEL_SHIFT((ht)->ht_level))))