15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
26 */
27 /*
28 * Copyright (c) 2010, Intel Corporation.
29 * All rights reserved.
30 */
31 /*
32 * Portions Copyright 2009 Advanced Micro Devices, Inc.
33 */
34 /*
35 * Copyright 2017 Joyent, Inc.
36 */
37 /*
38 * Various routines to handle identification
39 * and classification of x86 processors.
40 */
41
42 #include <sys/types.h>
43 #include <sys/archsystm.h>
44 #include <sys/x86_archext.h>
45 #include <sys/kmem.h>
46 #include <sys/systm.h>
47 #include <sys/cmn_err.h>
48 #include <sys/sunddi.h>
49 #include <sys/sunndi.h>
50 #include <sys/cpuvar.h>
51 #include <sys/processor.h>
52 #include <sys/sysmacros.h>
53 #include <sys/pg.h>
54 #include <sys/fp.h>
55 #include <sys/controlregs.h>
56 #include <sys/bitmap.h>
57 #include <sys/auxv_386.h>
58 #include <sys/memnode.h>
59 #include <sys/pci_cfgspace.h>
60 #include <sys/comm_page.h>
61 #include <sys/tsc.h>
62
63 #ifdef __xpv
64 #include <sys/hypervisor.h>
65 #else
66 #include <sys/ontrap.h>
67 #endif
68
69 /*
70 * Pass 0 of cpuid feature analysis happens in locore. It contains special code
71 * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
72 * them accordingly. For most modern processors, feature detection occurs here
73 * in pass 1.
74 *
75 * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
76 * for the boot CPU and does the basic analysis that the early kernel needs.
77 * x86_featureset is set based on the return value of cpuid_pass1() of the boot
78 * CPU.
79 *
80 * Pass 1 includes:
81 *
82 * o Determining vendor/model/family/stepping and setting x86_type and
83 * x86_vendor accordingly.
84 * o Processing the feature flags returned by the cpuid instruction while
85 * applying any workarounds or tricks for the specific processor.
86 * o Mapping the feature flags into Solaris feature bits (X86_*).
87 * o Processing extended feature flags if supported by the processor,
88 * again while applying specific processor knowledge.
89 * o Determining the CMT characteristics of the system.
90 *
91 * Pass 1 is done on non-boot CPUs during their initialization and the results
92 * are used only as a meager attempt at ensuring that all processors within the
93 * system support the same features.
94 *
95 * Pass 2 of cpuid feature analysis happens just at the beginning
96 * of startup(). It just copies in and corrects the remainder
97 * of the cpuid data we depend on: standard cpuid functions that we didn't
98 * need for pass1 feature analysis, and extended cpuid functions beyond the
99 * simple feature processing done in pass1.
100 *
101 * Pass 3 of cpuid analysis is invoked after basic kernel services; in
102 * particular kernel memory allocation has been made available. It creates a
103 * readable brand string based on the data collected in the first two passes.
104 *
105 * Pass 4 of cpuid analysis is invoked after post_startup() when all
106 * the support infrastructure for various hardware features has been
107 * initialized. It determines which processor features will be reported
108 * to userland via the aux vector.
109 *
110 * All passes are executed on all CPUs, but only the boot CPU determines what
111 * features the kernel will use.
112 *
113 * Much of the worst junk in this file is for the support of processors
114 * that didn't really implement the cpuid instruction properly.
115 *
116 * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
117 * the pass numbers. Accordingly, changes to the pass code may require changes
118 * to the accessor code.
119 */
120
121 uint_t x86_vendor = X86_VENDOR_IntelClone;
122 uint_t x86_type = X86_TYPE_OTHER;
123 uint_t x86_clflush_size = 0;
124
125 uint_t pentiumpro_bug4046376;
126
127 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
128
129 static char *x86_feature_names[NUM_X86_FEATURES] = {
130 "lgpg",
131 "tsc",
132 "msr",
133 "mtrr",
134 "pge",
135 "de",
136 "cmov",
137 "mmx",
138 "mca",
139 "pae",
140 "cv8",
141 "pat",
142 "sep",
143 "sse",
144 "sse2",
179 "mpx",
180 "avx512f",
181 "avx512dq",
182 "avx512pf",
183 "avx512er",
184 "avx512cd",
185 "avx512bw",
186 "avx512vl",
187 "avx512fma",
188 "avx512vbmi",
189 "avx512_vpopcntdq",
190 "avx512_4vnniw",
191 "avx512_4fmaps",
192 "xsaveopt",
193 "xsavec",
194 "xsaves",
195 "sha",
196 "umip",
197 "pku",
198 "ospke",
199 };
200
201 boolean_t
202 is_x86_feature(void *featureset, uint_t feature)
203 {
204 ASSERT(feature < NUM_X86_FEATURES);
205 return (BT_TEST((ulong_t *)featureset, feature));
206 }
207
208 void
209 add_x86_feature(void *featureset, uint_t feature)
210 {
211 ASSERT(feature < NUM_X86_FEATURES);
212 BT_SET((ulong_t *)featureset, feature);
213 }
214
215 void
216 remove_x86_feature(void *featureset, uint_t feature)
217 {
218 ASSERT(feature < NUM_X86_FEATURES);
1281 ecp->cp_eax = 7;
1282 ecp->cp_ecx = 0;
1283 (void) __cpuid_insn(ecp);
1284 /*
1285 * If XSAVE has been disabled, just ignore all of the
1286 * extended-save-area dependent flags here.
1287 */
1288 if (xsave_force_disable) {
1289 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI1;
1290 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI2;
1291 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_AVX2;
1292 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_MPX;
1293 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_ALL_AVX512;
1294 ecp->cp_ecx &= ~CPUID_INTC_ECX_7_0_ALL_AVX512;
1295 ecp->cp_edx &= ~CPUID_INTC_EDX_7_0_ALL_AVX512;
1296 }
1297
1298 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMEP)
1299 add_x86_feature(featureset, X86FSET_SMEP);
1300
1301 /*
1302 * We check disable_smap here in addition to in startup_smap()
1303 * to ensure CPUs that aren't the boot CPU don't accidentally
1304 * include it in the feature set and thus generate a mismatched
1305 * x86 feature set across CPUs. Note that at this time we only
1306 * enable SMAP for the 64-bit kernel.
1307 */
1308 #if defined(__amd64)
1309 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMAP &&
1310 disable_smap == 0)
1311 add_x86_feature(featureset, X86FSET_SMAP);
1312 #endif
1313 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_MPX)
1314 add_x86_feature(featureset, X86FSET_MPX);
1315
1316 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_RDSEED)
1317 add_x86_feature(featureset, X86FSET_RDSEED);
1318
1319 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_ADX)
1320 add_x86_feature(featureset, X86FSET_ADX);
1483 if (cpi->cpi_std[7].cp_ecx &
1484 CPUID_INTC_ECX_7_0_AVX512VBMI)
1485 add_x86_feature(featureset,
1486 X86FSET_AVX512VBMI);
1487 if (cpi->cpi_std[7].cp_ecx &
1488 CPUID_INTC_ECX_7_0_AVX512VPOPCDQ)
1489 add_x86_feature(featureset,
1490 X86FSET_AVX512VPOPCDQ);
1491
1492 if (cpi->cpi_std[7].cp_edx &
1493 CPUID_INTC_EDX_7_0_AVX5124NNIW)
1494 add_x86_feature(featureset,
1495 X86FSET_AVX512NNIW);
1496 if (cpi->cpi_std[7].cp_edx &
1497 CPUID_INTC_EDX_7_0_AVX5124FMAPS)
1498 add_x86_feature(featureset,
1499 X86FSET_AVX512FMAPS);
1500 }
1501 }
1502 }
1503 if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) {
1504 add_x86_feature(featureset, X86FSET_X2APIC);
1505 }
1506 if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1507 add_x86_feature(featureset, X86FSET_DE);
1508 }
1509 #if !defined(__xpv)
1510 if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1511
1512 /*
1513 * We require the CLFLUSH instruction for erratum workaround
1514 * to use MONITOR/MWAIT.
1515 */
1516 if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1517 cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1518 add_x86_feature(featureset, X86FSET_MWAIT);
1519 } else {
1520 extern int idle_cpu_assert_cflush_monitor;
1521
1522 /*
4975 */
4976 if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
4977 on_trap_data_t otd;
4978 uint64_t reg;
4979
4980 if (!on_trap(&otd, OT_DATA_ACCESS)) {
4981 reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
4982 /* Disable C1E state if it is enabled by BIOS */
4983 if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
4984 AMD_ACTONCMPHALT_MASK) {
4985 reg &= ~(AMD_ACTONCMPHALT_MASK <<
4986 AMD_ACTONCMPHALT_SHIFT);
4987 wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
4988 }
4989 }
4990 no_trap();
4991 }
4992 #endif /* !__xpv */
4993 }
4994
4995 /*
4996 * Setup necessary registers to enable XSAVE feature on this processor.
4997 * This function needs to be called early enough, so that no xsave/xrstor
4998 * ops will execute on the processor before the MSRs are properly set up.
4999 *
5000 * Current implementation has the following assumption:
5001 * - cpuid_pass1() is done, so that X86 features are known.
5002 * - fpu_probe() is done, so that fp_save_mech is chosen.
5003 */
5004 void
5005 xsave_setup_msr(cpu_t *cpu)
5006 {
5007 ASSERT(fp_save_mech == FP_XSAVE);
5008 ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
5009
5010 /* Enable OSXSAVE in CR4. */
5011 setcr4(getcr4() | CR4_OSXSAVE);
5012 /*
5013 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
5014 * correct value.
|
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
26 */
27 /*
28 * Copyright (c) 2010, Intel Corporation.
29 * All rights reserved.
30 */
31 /*
32 * Portions Copyright 2009 Advanced Micro Devices, Inc.
33 */
34 /*
35 * Copyright 2018 Joyent, Inc.
36 */
37 /*
38 * Various routines to handle identification
39 * and classification of x86 processors.
40 */
41
42 #include <sys/types.h>
43 #include <sys/archsystm.h>
44 #include <sys/x86_archext.h>
45 #include <sys/kmem.h>
46 #include <sys/systm.h>
47 #include <sys/cmn_err.h>
48 #include <sys/sunddi.h>
49 #include <sys/sunndi.h>
50 #include <sys/cpuvar.h>
51 #include <sys/processor.h>
52 #include <sys/sysmacros.h>
53 #include <sys/pg.h>
54 #include <sys/fp.h>
55 #include <sys/controlregs.h>
56 #include <sys/bitmap.h>
57 #include <sys/auxv_386.h>
58 #include <sys/memnode.h>
59 #include <sys/pci_cfgspace.h>
60 #include <sys/comm_page.h>
61 #include <sys/mach_mmu.h>
62 #include <sys/tsc.h>
63
64 #ifdef __xpv
65 #include <sys/hypervisor.h>
66 #else
67 #include <sys/ontrap.h>
68 #endif
69
70 /*
71 * Pass 0 of cpuid feature analysis happens in locore. It contains special code
72 * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
73 * them accordingly. For most modern processors, feature detection occurs here
74 * in pass 1.
75 *
76 * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
77 * for the boot CPU and does the basic analysis that the early kernel needs.
78 * x86_featureset is set based on the return value of cpuid_pass1() of the boot
79 * CPU.
80 *
81 * Pass 1 includes:
82 *
83 * o Determining vendor/model/family/stepping and setting x86_type and
84 * x86_vendor accordingly.
85 * o Processing the feature flags returned by the cpuid instruction while
86 * applying any workarounds or tricks for the specific processor.
87 * o Mapping the feature flags into illumos feature bits (X86_*).
88 * o Processing extended feature flags if supported by the processor,
89 * again while applying specific processor knowledge.
90 * o Determining the CMT characteristics of the system.
91 *
92 * Pass 1 is done on non-boot CPUs during their initialization and the results
93 * are used only as a meager attempt at ensuring that all processors within the
94 * system support the same features.
95 *
96 * Pass 2 of cpuid feature analysis happens just at the beginning
97 * of startup(). It just copies in and corrects the remainder
98 * of the cpuid data we depend on: standard cpuid functions that we didn't
99 * need for pass1 feature analysis, and extended cpuid functions beyond the
100 * simple feature processing done in pass1.
101 *
102 * Pass 3 of cpuid analysis is invoked after basic kernel services; in
103 * particular kernel memory allocation has been made available. It creates a
104 * readable brand string based on the data collected in the first two passes.
105 *
106 * Pass 4 of cpuid analysis is invoked after post_startup() when all
107 * the support infrastructure for various hardware features has been
108 * initialized. It determines which processor features will be reported
109 * to userland via the aux vector.
110 *
111 * All passes are executed on all CPUs, but only the boot CPU determines what
112 * features the kernel will use.
113 *
114 * Much of the worst junk in this file is for the support of processors
115 * that didn't really implement the cpuid instruction properly.
116 *
117 * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
118 * the pass numbers. Accordingly, changes to the pass code may require changes
119 * to the accessor code.
120 */
121
122 uint_t x86_vendor = X86_VENDOR_IntelClone;
123 uint_t x86_type = X86_TYPE_OTHER;
124 uint_t x86_clflush_size = 0;
125
126 #if defined(__xpv)
127 int x86_use_pcid = 0;
128 int x86_use_invpcid = 0;
129 #else
130 int x86_use_pcid = -1;
131 int x86_use_invpcid = -1;
132 #endif
133
134 uint_t pentiumpro_bug4046376;
135
136 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
137
138 static char *x86_feature_names[NUM_X86_FEATURES] = {
139 "lgpg",
140 "tsc",
141 "msr",
142 "mtrr",
143 "pge",
144 "de",
145 "cmov",
146 "mmx",
147 "mca",
148 "pae",
149 "cv8",
150 "pat",
151 "sep",
152 "sse",
153 "sse2",
188 "mpx",
189 "avx512f",
190 "avx512dq",
191 "avx512pf",
192 "avx512er",
193 "avx512cd",
194 "avx512bw",
195 "avx512vl",
196 "avx512fma",
197 "avx512vbmi",
198 "avx512_vpopcntdq",
199 "avx512_4vnniw",
200 "avx512_4fmaps",
201 "xsaveopt",
202 "xsavec",
203 "xsaves",
204 "sha",
205 "umip",
206 "pku",
207 "ospke",
208 "pcid",
209 "invpcid",
210 };
211
212 boolean_t
213 is_x86_feature(void *featureset, uint_t feature)
214 {
215 ASSERT(feature < NUM_X86_FEATURES);
216 return (BT_TEST((ulong_t *)featureset, feature));
217 }
218
219 void
220 add_x86_feature(void *featureset, uint_t feature)
221 {
222 ASSERT(feature < NUM_X86_FEATURES);
223 BT_SET((ulong_t *)featureset, feature);
224 }
225
226 void
227 remove_x86_feature(void *featureset, uint_t feature)
228 {
229 ASSERT(feature < NUM_X86_FEATURES);
1292 ecp->cp_eax = 7;
1293 ecp->cp_ecx = 0;
1294 (void) __cpuid_insn(ecp);
1295 /*
1296 * If XSAVE has been disabled, just ignore all of the
1297 * extended-save-area dependent flags here.
1298 */
1299 if (xsave_force_disable) {
1300 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI1;
1301 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI2;
1302 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_AVX2;
1303 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_MPX;
1304 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_ALL_AVX512;
1305 ecp->cp_ecx &= ~CPUID_INTC_ECX_7_0_ALL_AVX512;
1306 ecp->cp_edx &= ~CPUID_INTC_EDX_7_0_ALL_AVX512;
1307 }
1308
1309 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMEP)
1310 add_x86_feature(featureset, X86FSET_SMEP);
1311
1312 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_INVPCID) {
1313 add_x86_feature(featureset, X86FSET_INVPCID);
1314 }
1315
1316 /*
1317 * We check disable_smap here in addition to in startup_smap()
1318 * to ensure CPUs that aren't the boot CPU don't accidentally
1319 * include it in the feature set and thus generate a mismatched
1320 * x86 feature set across CPUs. Note that at this time we only
1321 * enable SMAP for the 64-bit kernel.
1322 */
1323 #if defined(__amd64)
1324 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMAP &&
1325 disable_smap == 0)
1326 add_x86_feature(featureset, X86FSET_SMAP);
1327 #endif
1328 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_MPX)
1329 add_x86_feature(featureset, X86FSET_MPX);
1330
1331 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_RDSEED)
1332 add_x86_feature(featureset, X86FSET_RDSEED);
1333
1334 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_ADX)
1335 add_x86_feature(featureset, X86FSET_ADX);
1498 if (cpi->cpi_std[7].cp_ecx &
1499 CPUID_INTC_ECX_7_0_AVX512VBMI)
1500 add_x86_feature(featureset,
1501 X86FSET_AVX512VBMI);
1502 if (cpi->cpi_std[7].cp_ecx &
1503 CPUID_INTC_ECX_7_0_AVX512VPOPCDQ)
1504 add_x86_feature(featureset,
1505 X86FSET_AVX512VPOPCDQ);
1506
1507 if (cpi->cpi_std[7].cp_edx &
1508 CPUID_INTC_EDX_7_0_AVX5124NNIW)
1509 add_x86_feature(featureset,
1510 X86FSET_AVX512NNIW);
1511 if (cpi->cpi_std[7].cp_edx &
1512 CPUID_INTC_EDX_7_0_AVX5124FMAPS)
1513 add_x86_feature(featureset,
1514 X86FSET_AVX512FMAPS);
1515 }
1516 }
1517 }
1518
1519 if (cpi->cpi_vendor == X86_VENDOR_Intel) {
1520 if (cp->cp_ecx & CPUID_INTC_ECX_PCID) {
1521 add_x86_feature(featureset, X86FSET_PCID);
1522 }
1523 }
1524
1525 if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) {
1526 add_x86_feature(featureset, X86FSET_X2APIC);
1527 }
1528 if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1529 add_x86_feature(featureset, X86FSET_DE);
1530 }
1531 #if !defined(__xpv)
1532 if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1533
1534 /*
1535 * We require the CLFLUSH instruction for erratum workaround
1536 * to use MONITOR/MWAIT.
1537 */
1538 if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1539 cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1540 add_x86_feature(featureset, X86FSET_MWAIT);
1541 } else {
1542 extern int idle_cpu_assert_cflush_monitor;
1543
1544 /*
4997 */
4998 if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
4999 on_trap_data_t otd;
5000 uint64_t reg;
5001
5002 if (!on_trap(&otd, OT_DATA_ACCESS)) {
5003 reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
5004 /* Disable C1E state if it is enabled by BIOS */
5005 if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
5006 AMD_ACTONCMPHALT_MASK) {
5007 reg &= ~(AMD_ACTONCMPHALT_MASK <<
5008 AMD_ACTONCMPHALT_SHIFT);
5009 wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
5010 }
5011 }
5012 no_trap();
5013 }
5014 #endif /* !__xpv */
5015 }
5016
5017 void
5018 enable_pcid(void)
5019 {
5020 if (x86_use_pcid == -1)
5021 x86_use_pcid = is_x86_feature(x86_featureset, X86FSET_PCID);
5022
5023 if (x86_use_invpcid == -1) {
5024 x86_use_invpcid = is_x86_feature(x86_featureset,
5025 X86FSET_INVPCID);
5026 }
5027
5028 if (!x86_use_pcid)
5029 return;
5030
5031 /*
5032 * Intel say that on setting PCIDE, it immediately starts using the PCID
5033 * bits; better make sure there's nothing there.
5034 */
5035 ASSERT((getcr3() & MMU_PAGEOFFSET) == PCID_NONE);
5036
5037 setcr4(getcr4() | CR4_PCIDE);
5038 }
5039
5040 /*
5041 * Setup necessary registers to enable XSAVE feature on this processor.
5042 * This function needs to be called early enough, so that no xsave/xrstor
5043 * ops will execute on the processor before the MSRs are properly set up.
5044 *
5045 * Current implementation has the following assumption:
5046 * - cpuid_pass1() is done, so that X86 features are known.
5047 * - fpu_probe() is done, so that fp_save_mech is chosen.
5048 */
5049 void
5050 xsave_setup_msr(cpu_t *cpu)
5051 {
5052 ASSERT(fp_save_mech == FP_XSAVE);
5053 ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
5054
5055 /* Enable OSXSAVE in CR4. */
5056 setcr4(getcr4() | CR4_OSXSAVE);
5057 /*
5058 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
5059 * correct value.
|