Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

*** 30,40 **** */ /* * Portions Copyright 2009 Advanced Micro Devices, Inc. */ /* ! * Copyright 2017 Joyent, Inc. */ /* * Various routines to handle identification * and classification of x86 processors. */ --- 30,40 ---- */ /* * Portions Copyright 2009 Advanced Micro Devices, Inc. */ /* ! * Copyright 2018 Joyent, Inc. */ /* * Various routines to handle identification * and classification of x86 processors. */
*** 56,65 **** --- 56,66 ---- #include <sys/bitmap.h> #include <sys/auxv_386.h> #include <sys/memnode.h> #include <sys/pci_cfgspace.h> #include <sys/comm_page.h> + #include <sys/mach_mmu.h> #include <sys/tsc.h> #ifdef __xpv #include <sys/hypervisor.h> #else
*** 81,91 **** * * o Determining vendor/model/family/stepping and setting x86_type and * x86_vendor accordingly. * o Processing the feature flags returned by the cpuid instruction while * applying any workarounds or tricks for the specific processor. ! * o Mapping the feature flags into Solaris feature bits (X86_*). * o Processing extended feature flags if supported by the processor, * again while applying specific processor knowledge. * o Determining the CMT characteristics of the system. * * Pass 1 is done on non-boot CPUs during their initialization and the results --- 82,92 ---- * * o Determining vendor/model/family/stepping and setting x86_type and * x86_vendor accordingly. * o Processing the feature flags returned by the cpuid instruction while * applying any workarounds or tricks for the specific processor. ! * o Mapping the feature flags into illumos feature bits (X86_*). * o Processing extended feature flags if supported by the processor, * again while applying specific processor knowledge. * o Determining the CMT characteristics of the system. * * Pass 1 is done on non-boot CPUs during their initialization and the results
*** 120,129 **** --- 121,138 ---- uint_t x86_vendor = X86_VENDOR_IntelClone; uint_t x86_type = X86_TYPE_OTHER; uint_t x86_clflush_size = 0; + #if defined(__xpv) + int x86_use_pcid = 0; + int x86_use_invpcid = 0; + #else + int x86_use_pcid = -1; + int x86_use_invpcid = -1; + #endif + uint_t pentiumpro_bug4046376; uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)]; static char *x86_feature_names[NUM_X86_FEATURES] = {
*** 194,203 **** --- 203,214 ---- "xsaves", "sha", "umip", "pku", "ospke", + "pcid", + "invpcid", }; boolean_t is_x86_feature(void *featureset, uint_t feature) {
*** 1296,1305 **** --- 1307,1320 ---- } if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMEP) add_x86_feature(featureset, X86FSET_SMEP); + if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_INVPCID) { + add_x86_feature(featureset, X86FSET_INVPCID); + } + /* * We check disable_smap here in addition to in startup_smap() * to ensure CPUs that aren't the boot CPU don't accidentally * include it in the feature set and thus generate a mismatched * x86 feature set across CPUs. Note that at this time we only
*** 1498,1507 **** --- 1513,1529 ---- add_x86_feature(featureset, X86FSET_AVX512FMAPS); } } } + + if (cpi->cpi_vendor == X86_VENDOR_Intel) { + if (cp->cp_ecx & CPUID_INTC_ECX_PCID) { + add_x86_feature(featureset, X86FSET_PCID); + } + } + if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) { add_x86_feature(featureset, X86FSET_X2APIC); } if (cp->cp_edx & CPUID_INTC_EDX_DE) { add_x86_feature(featureset, X86FSET_DE);
*** 4990,4999 **** --- 5012,5044 ---- no_trap(); } #endif /* !__xpv */ } + void + enable_pcid(void) + { + if (x86_use_pcid == -1) + x86_use_pcid = is_x86_feature(x86_featureset, X86FSET_PCID); + + if (x86_use_invpcid == -1) { + x86_use_invpcid = is_x86_feature(x86_featureset, + X86FSET_INVPCID); + } + + if (!x86_use_pcid) + return; + + /* + * Intel say that on setting PCIDE, it immediately starts using the PCID + * bits; better make sure there's nothing there. + */ + ASSERT((getcr3() & MMU_PAGEOFFSET) == PCID_NONE); + + setcr4(getcr4() | CR4_PCIDE); + } + /* * Setup necessary registers to enable XSAVE feature on this processor. * This function needs to be called early enough, so that no xsave/xrstor * ops will execute on the processor before the MSRs are properly set up. *