Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/os/cpuid.c
          +++ new/usr/src/uts/i86pc/os/cpuid.c
↓ open down ↓ 24 lines elided ↑ open up ↑
  25   25   * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
  26   26   */
  27   27  /*
  28   28   * Copyright (c) 2010, Intel Corporation.
  29   29   * All rights reserved.
  30   30   */
  31   31  /*
  32   32   * Portions Copyright 2009 Advanced Micro Devices, Inc.
  33   33   */
  34   34  /*
  35      - * Copyright 2017 Joyent, Inc.
       35 + * Copyright 2018 Joyent, Inc.
  36   36   */
  37   37  /*
  38   38   * Various routines to handle identification
  39   39   * and classification of x86 processors.
  40   40   */
  41   41  
  42   42  #include <sys/types.h>
  43   43  #include <sys/archsystm.h>
  44   44  #include <sys/x86_archext.h>
  45   45  #include <sys/kmem.h>
↓ open down ↓ 5 lines elided ↑ open up ↑
  51   51  #include <sys/processor.h>
  52   52  #include <sys/sysmacros.h>
  53   53  #include <sys/pg.h>
  54   54  #include <sys/fp.h>
  55   55  #include <sys/controlregs.h>
  56   56  #include <sys/bitmap.h>
  57   57  #include <sys/auxv_386.h>
  58   58  #include <sys/memnode.h>
  59   59  #include <sys/pci_cfgspace.h>
  60   60  #include <sys/comm_page.h>
       61 +#include <sys/mach_mmu.h>
  61   62  #include <sys/tsc.h>
  62   63  
  63   64  #ifdef __xpv
  64   65  #include <sys/hypervisor.h>
  65   66  #else
  66   67  #include <sys/ontrap.h>
  67   68  #endif
  68   69  
  69   70  /*
  70   71   * Pass 0 of cpuid feature analysis happens in locore. It contains special code
↓ open down ↓ 5 lines elided ↑ open up ↑
  76   77   * for the boot CPU and does the basic analysis that the early kernel needs.
  77   78   * x86_featureset is set based on the return value of cpuid_pass1() of the boot
  78   79   * CPU.
  79   80   *
  80   81   * Pass 1 includes:
  81   82   *
  82   83   *      o Determining vendor/model/family/stepping and setting x86_type and
  83   84   *        x86_vendor accordingly.
  84   85   *      o Processing the feature flags returned by the cpuid instruction while
  85   86   *        applying any workarounds or tricks for the specific processor.
  86      - *      o Mapping the feature flags into Solaris feature bits (X86_*).
       87 + *      o Mapping the feature flags into illumos feature bits (X86_*).
  87   88   *      o Processing extended feature flags if supported by the processor,
  88   89   *        again while applying specific processor knowledge.
  89   90   *      o Determining the CMT characteristics of the system.
  90   91   *
  91   92   * Pass 1 is done on non-boot CPUs during their initialization and the results
  92   93   * are used only as a meager attempt at ensuring that all processors within the
  93   94   * system support the same features.
  94   95   *
  95   96   * Pass 2 of cpuid feature analysis happens just at the beginning
  96   97   * of startup().  It just copies in and corrects the remainder
↓ open down ↓ 18 lines elided ↑ open up ↑
 115  116   *
 116  117   * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
 117  118   * the pass numbers.  Accordingly, changes to the pass code may require changes
 118  119   * to the accessor code.
 119  120   */
 120  121  
 121  122  uint_t x86_vendor = X86_VENDOR_IntelClone;
 122  123  uint_t x86_type = X86_TYPE_OTHER;
 123  124  uint_t x86_clflush_size = 0;
 124  125  
      126 +#if defined(__xpv)
      127 +int x86_use_pcid = 0;
      128 +int x86_use_invpcid = 0;
      129 +#else
      130 +int x86_use_pcid = -1;
      131 +int x86_use_invpcid = -1;
      132 +#endif
      133 +
 125  134  uint_t pentiumpro_bug4046376;
 126  135  
 127  136  uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
 128  137  
 129  138  static char *x86_feature_names[NUM_X86_FEATURES] = {
 130  139          "lgpg",
 131  140          "tsc",
 132  141          "msr",
 133  142          "mtrr",
 134  143          "pge",
↓ open down ↓ 54 lines elided ↑ open up ↑
 189  198          "avx512_vpopcntdq",
 190  199          "avx512_4vnniw",
 191  200          "avx512_4fmaps",
 192  201          "xsaveopt",
 193  202          "xsavec",
 194  203          "xsaves",
 195  204          "sha",
 196  205          "umip",
 197  206          "pku",
 198  207          "ospke",
      208 +        "pcid",
      209 +        "invpcid",
 199  210  };
 200  211  
 201  212  boolean_t
 202  213  is_x86_feature(void *featureset, uint_t feature)
 203  214  {
 204  215          ASSERT(feature < NUM_X86_FEATURES);
 205  216          return (BT_TEST((ulong_t *)featureset, feature));
 206  217  }
 207  218  
 208  219  void
↓ open down ↓ 1082 lines elided ↑ open up ↑
1291 1302                          ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_AVX2;
1292 1303                          ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_MPX;
1293 1304                          ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_ALL_AVX512;
1294 1305                          ecp->cp_ecx &= ~CPUID_INTC_ECX_7_0_ALL_AVX512;
1295 1306                          ecp->cp_edx &= ~CPUID_INTC_EDX_7_0_ALL_AVX512;
1296 1307                  }
1297 1308  
1298 1309                  if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMEP)
1299 1310                          add_x86_feature(featureset, X86FSET_SMEP);
1300 1311  
     1312 +                if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_INVPCID) {
     1313 +                        add_x86_feature(featureset, X86FSET_INVPCID);
     1314 +                }
     1315 +
1301 1316                  /*
1302 1317                   * We check disable_smap here in addition to in startup_smap()
1303 1318                   * to ensure CPUs that aren't the boot CPU don't accidentally
1304 1319                   * include it in the feature set and thus generate a mismatched
1305 1320                   * x86 feature set across CPUs. Note that at this time we only
1306 1321                   * enable SMAP for the 64-bit kernel.
1307 1322                   */
1308 1323  #if defined(__amd64)
1309 1324                  if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMAP &&
1310 1325                      disable_smap == 0)
↓ open down ↓ 182 lines elided ↑ open up ↑
1493 1508                                      CPUID_INTC_EDX_7_0_AVX5124NNIW)
1494 1509                                          add_x86_feature(featureset,
1495 1510                                              X86FSET_AVX512NNIW);
1496 1511                                  if (cpi->cpi_std[7].cp_edx &
1497 1512                                      CPUID_INTC_EDX_7_0_AVX5124FMAPS)
1498 1513                                          add_x86_feature(featureset,
1499 1514                                              X86FSET_AVX512FMAPS);
1500 1515                          }
1501 1516                  }
1502 1517          }
     1518 +
     1519 +        if (cpi->cpi_vendor == X86_VENDOR_Intel) {
     1520 +                if (cp->cp_ecx & CPUID_INTC_ECX_PCID) {
     1521 +                        add_x86_feature(featureset, X86FSET_PCID);
     1522 +                }
     1523 +        }
     1524 +
1503 1525          if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) {
1504 1526                  add_x86_feature(featureset, X86FSET_X2APIC);
1505 1527          }
1506 1528          if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1507 1529                  add_x86_feature(featureset, X86FSET_DE);
1508 1530          }
1509 1531  #if !defined(__xpv)
1510 1532          if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1511 1533  
1512 1534                  /*
↓ open down ↓ 3472 lines elided ↑ open up ↑
4985 5007                                  reg &= ~(AMD_ACTONCMPHALT_MASK <<
4986 5008                                      AMD_ACTONCMPHALT_SHIFT);
4987 5009                                  wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
4988 5010                          }
4989 5011                  }
4990 5012                  no_trap();
4991 5013          }
4992 5014  #endif  /* !__xpv */
4993 5015  }
4994 5016  
     5017 +void
     5018 +enable_pcid(void)
     5019 +{
     5020 +        if (x86_use_pcid == -1)
     5021 +                x86_use_pcid = is_x86_feature(x86_featureset, X86FSET_PCID);
     5022 +
     5023 +        if (x86_use_invpcid == -1) {
     5024 +                x86_use_invpcid = is_x86_feature(x86_featureset,
     5025 +                    X86FSET_INVPCID);
     5026 +        }
     5027 +
     5028 +        if (!x86_use_pcid)
     5029 +                return;
     5030 +
     5031 +        /*
     5032 +         * Intel say that on setting PCIDE, it immediately starts using the PCID
     5033 +         * bits; better make sure there's nothing there.
     5034 +         */
     5035 +        ASSERT((getcr3() & MMU_PAGEOFFSET) == PCID_NONE);
     5036 +
     5037 +        setcr4(getcr4() | CR4_PCIDE);
     5038 +}
     5039 +
4995 5040  /*
4996 5041   * Setup necessary registers to enable XSAVE feature on this processor.
4997 5042   * This function needs to be called early enough, so that no xsave/xrstor
4998 5043   * ops will execute on the processor before the MSRs are properly set up.
4999 5044   *
5000 5045   * Current implementation has the following assumption:
5001 5046   * - cpuid_pass1() is done, so that X86 features are known.
5002 5047   * - fpu_probe() is done, so that fp_save_mech is chosen.
5003 5048   */
5004 5049  void
↓ open down ↓ 197 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX