Print this page
10208 Add x86 features for L1TF


  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
  24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
  25  * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
  26  */
  27 /*
  28  * Copyright (c) 2010, Intel Corporation.
  29  * All rights reserved.
  30  */
  31 /*
  32  * Portions Copyright 2009 Advanced Micro Devices, Inc.
  33  */
  34 /*
  35  * Copyright 2018 Joyent, Inc.
  36  */
  37 /*
  38  * Various routines to handle identification
  39  * and classification of x86 processors.
  40  */
  41 
  42 #include <sys/types.h>
  43 #include <sys/archsystm.h>
  44 #include <sys/x86_archext.h>
  45 #include <sys/kmem.h>
  46 #include <sys/systm.h>
  47 #include <sys/cmn_err.h>
  48 #include <sys/sunddi.h>
  49 #include <sys/sunndi.h>
  50 #include <sys/cpuvar.h>
  51 #include <sys/processor.h>
  52 #include <sys/sysmacros.h>
  53 #include <sys/pg.h>
  54 #include <sys/fp.h>
  55 #include <sys/controlregs.h>


 200         "avx512_4vnniw",
 201         "avx512_4fmaps",
 202         "xsaveopt",
 203         "xsavec",
 204         "xsaves",
 205         "sha",
 206         "umip",
 207         "pku",
 208         "ospke",
 209         "pcid",
 210         "invpcid",
 211         "ibrs",
 212         "ibpb",
 213         "stibp",
 214         "ssbd",
 215         "ssbd_virt",
 216         "rdcl_no",
 217         "ibrs_all",
 218         "rsba",
 219         "ssb_no",
 220         "stibp_all"


 221 };
 222 
 223 boolean_t
 224 is_x86_feature(void *featureset, uint_t feature)
 225 {
 226         ASSERT(feature < NUM_X86_FEATURES);
 227         return (BT_TEST((ulong_t *)featureset, feature));
 228 }
 229 
 230 void
 231 add_x86_feature(void *featureset, uint_t feature)
 232 {
 233         ASSERT(feature < NUM_X86_FEATURES);
 234         BT_SET((ulong_t *)featureset, feature);
 235 }
 236 
 237 void
 238 remove_x86_feature(void *featureset, uint_t feature)
 239 {
 240         ASSERT(feature < NUM_X86_FEATURES);


1034 
1035                         /*
1036                          * Be paranoid and assume we'll get a #GP.
1037                          */
1038                         if (!on_trap(&otd, OT_DATA_ACCESS)) {
1039                                 uint64_t reg;
1040 
1041                                 reg = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
1042                                 if (reg & IA32_ARCH_CAP_RDCL_NO) {
1043                                         add_x86_feature(featureset,
1044                                             X86FSET_RDCL_NO);
1045                                 }
1046                                 if (reg & IA32_ARCH_CAP_IBRS_ALL) {
1047                                         add_x86_feature(featureset,
1048                                             X86FSET_IBRS_ALL);
1049                                 }
1050                                 if (reg & IA32_ARCH_CAP_RSBA) {
1051                                         add_x86_feature(featureset,
1052                                             X86FSET_RSBA);
1053                                 }




1054                                 if (reg & IA32_ARCH_CAP_SSB_NO) {
1055                                         add_x86_feature(featureset,
1056                                             X86FSET_SSB_NO);
1057                                 }
1058                         }
1059                         no_trap();
1060                 }
1061 #endif  /* !__xpv */
1062 
1063                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)
1064                         add_x86_feature(featureset, X86FSET_SSBD);



1065         }
1066 }
1067 
1068 /*
1069  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
1070  */
1071 void
1072 setup_xfem(void)
1073 {
1074         uint64_t flags = XFEATURE_LEGACY_FP;
1075 
1076         ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
1077 
1078         if (is_x86_feature(x86_featureset, X86FSET_SSE))
1079                 flags |= XFEATURE_SSE;
1080 
1081         if (is_x86_feature(x86_featureset, X86FSET_AVX))
1082                 flags |= XFEATURE_AVX;
1083 
1084         if (is_x86_feature(x86_featureset, X86FSET_AVX512F))




  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
  24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
  25  * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
  26  */
  27 /*
  28  * Copyright (c) 2010, Intel Corporation.
  29  * All rights reserved.
  30  */
  31 /*
  32  * Portions Copyright 2009 Advanced Micro Devices, Inc.
  33  */
  34 /*
  35  * Copyright (c) 2019, Joyent, Inc.
  36  */
  37 /*
  38  * Various routines to handle identification
  39  * and classification of x86 processors.
  40  */
  41 
  42 #include <sys/types.h>
  43 #include <sys/archsystm.h>
  44 #include <sys/x86_archext.h>
  45 #include <sys/kmem.h>
  46 #include <sys/systm.h>
  47 #include <sys/cmn_err.h>
  48 #include <sys/sunddi.h>
  49 #include <sys/sunndi.h>
  50 #include <sys/cpuvar.h>
  51 #include <sys/processor.h>
  52 #include <sys/sysmacros.h>
  53 #include <sys/pg.h>
  54 #include <sys/fp.h>
  55 #include <sys/controlregs.h>


 200         "avx512_4vnniw",
 201         "avx512_4fmaps",
 202         "xsaveopt",
 203         "xsavec",
 204         "xsaves",
 205         "sha",
 206         "umip",
 207         "pku",
 208         "ospke",
 209         "pcid",
 210         "invpcid",
 211         "ibrs",
 212         "ibpb",
 213         "stibp",
 214         "ssbd",
 215         "ssbd_virt",
 216         "rdcl_no",
 217         "ibrs_all",
 218         "rsba",
 219         "ssb_no",
 220         "stibp_all",
 221         "flush_cmd",
 222         "l1d_vmentry_no"
 223 };
 224 
 225 boolean_t
 226 is_x86_feature(void *featureset, uint_t feature)
 227 {
 228         ASSERT(feature < NUM_X86_FEATURES);
 229         return (BT_TEST((ulong_t *)featureset, feature));
 230 }
 231 
 232 void
 233 add_x86_feature(void *featureset, uint_t feature)
 234 {
 235         ASSERT(feature < NUM_X86_FEATURES);
 236         BT_SET((ulong_t *)featureset, feature);
 237 }
 238 
 239 void
 240 remove_x86_feature(void *featureset, uint_t feature)
 241 {
 242         ASSERT(feature < NUM_X86_FEATURES);


1036 
1037                         /*
1038                          * Be paranoid and assume we'll get a #GP.
1039                          */
1040                         if (!on_trap(&otd, OT_DATA_ACCESS)) {
1041                                 uint64_t reg;
1042 
1043                                 reg = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
1044                                 if (reg & IA32_ARCH_CAP_RDCL_NO) {
1045                                         add_x86_feature(featureset,
1046                                             X86FSET_RDCL_NO);
1047                                 }
1048                                 if (reg & IA32_ARCH_CAP_IBRS_ALL) {
1049                                         add_x86_feature(featureset,
1050                                             X86FSET_IBRS_ALL);
1051                                 }
1052                                 if (reg & IA32_ARCH_CAP_RSBA) {
1053                                         add_x86_feature(featureset,
1054                                             X86FSET_RSBA);
1055                                 }
1056                                 if (reg & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) {
1057                                         add_x86_feature(featureset,
1058                                             X86FSET_L1D_VM_NO);
1059                                 }
1060                                 if (reg & IA32_ARCH_CAP_SSB_NO) {
1061                                         add_x86_feature(featureset,
1062                                             X86FSET_SSB_NO);
1063                                 }
1064                         }
1065                         no_trap();
1066                 }
1067 #endif  /* !__xpv */
1068 
1069                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)
1070                         add_x86_feature(featureset, X86FSET_SSBD);
1071 
1072                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_FLUSH_CMD)
1073                         add_x86_feature(featureset, X86FSET_FLUSH_CMD);
1074         }
1075 }
1076 
1077 /*
1078  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
1079  */
1080 void
1081 setup_xfem(void)
1082 {
1083         uint64_t flags = XFEATURE_LEGACY_FP;
1084 
1085         ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
1086 
1087         if (is_x86_feature(x86_featureset, X86FSET_SSE))
1088                 flags |= XFEATURE_SSE;
1089 
1090         if (is_x86_feature(x86_featureset, X86FSET_AVX))
1091                 flags |= XFEATURE_AVX;
1092 
1093         if (is_x86_feature(x86_featureset, X86FSET_AVX512F))