Print this page
update

*** 21,41 **** /* * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2016 by Delphix. All rights reserved. * Copyright 2013 Nexenta Systems, Inc. All rights reserved. * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net> */ /* * Copyright (c) 2010, Intel Corporation. * All rights reserved. */ /* * Portions Copyright 2009 Advanced Micro Devices, Inc. */ - /* - * Copyright 2020 Joyent, Inc. - */ /* * CPU Identification logic * * The purpose of this file and its companion, cpuid_subr.c, is to help deal --- 21,39 ---- /* * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2016 by Delphix. All rights reserved. * Copyright 2013 Nexenta Systems, Inc. All rights reserved. * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net> + * Copyright 2020 Joyent, Inc. */ /* * Copyright (c) 2010, Intel Corporation. * All rights reserved. */ /* * Portions Copyright 2009 Advanced Micro Devices, Inc. */ /* * CPU Identification logic * * The purpose of this file and its companion, cpuid_subr.c, is to help deal
*** 1225,1235 **** * * We track whether or not we should do this based on what cpuid pass we're in. * Whenever we hit cpuid_scan_security() on the boot CPU and we're still on pass * 1 of the cpuid logic, then we can completely turn off TSX. Notably this * should happen twice. Once in the normal cpuid_pass1() code and then a second ! * time after we do the initial microcode update. * * If TAA has been fixed, then it will be enumerated in IA32_ARCH_CAPABILITIES * as TAA_NO. In such a case, we will still disable TSX: it's proven to be an * unfortunate feature in a number of ways, and taking the opportunity to * finally be able to turn it off is likely to be of benefit in the future. --- 1223,1235 ---- * * We track whether or not we should do this based on what cpuid pass we're in. * Whenever we hit cpuid_scan_security() on the boot CPU and we're still on pass * 1 of the cpuid logic, then we can completely turn off TSX. Notably this * should happen twice. Once in the normal cpuid_pass1() code and then a second ! * time after we do the initial microcode update. As a result we need to be ! * careful in cpuid_apply_tsx() to only use the MSR if we've loaded a suitable ! * microcode on the current CPU (which happens prior to cpuid_pass_ucode()). * * If TAA has been fixed, then it will be enumerated in IA32_ARCH_CAPABILITIES * as TAA_NO. In such a case, we will still disable TSX: it's proven to be an * unfortunate feature in a number of ways, and taking the opportunity to * finally be able to turn it off is likely to be of benefit in the future.
*** 2837,2858 **** } else { x86_taa_mitigation = X86_TAA_TSX_FORCE_ABORT; } } static void ! cpuid_apply_tsx(x86_taa_mitigation_t taa) { uint64_t val; switch (taa) { case X86_TAA_TSX_DISABLE: val = rdmsr(MSR_IA32_TSX_CTRL); val |= IA32_TSX_CTRL_CPUID_CLEAR | IA32_TSX_CTRL_RTM_DISABLE; wrmsr(MSR_IA32_TSX_CTRL, val); break; case X86_TAA_TSX_FORCE_ABORT: val = rdmsr(MSR_IA32_TSX_CTRL); val |= IA32_TSX_CTRL_RTM_DISABLE; wrmsr(MSR_IA32_TSX_CTRL, val); break; case X86_TAA_HW_MITIGATED: --- 2837,2866 ---- } else { x86_taa_mitigation = X86_TAA_TSX_FORCE_ABORT; } } + /* + * As mentioned, we should only touch the MSR when we've got a suitable + * microcode loaded on this CPU. + */ static void ! cpuid_apply_tsx(x86_taa_mitigation_t taa, uchar_t *featureset) { uint64_t val; switch (taa) { case X86_TAA_TSX_DISABLE: + if (!is_x86_feature(featureset, X86FSET_TSX_CTRL)) + return; val = rdmsr(MSR_IA32_TSX_CTRL); val |= IA32_TSX_CTRL_CPUID_CLEAR | IA32_TSX_CTRL_RTM_DISABLE; wrmsr(MSR_IA32_TSX_CTRL, val); break; case X86_TAA_TSX_FORCE_ABORT: + if (!is_x86_feature(featureset, X86FSET_TSX_CTRL)) + return; val = rdmsr(MSR_IA32_TSX_CTRL); val |= IA32_TSX_CTRL_RTM_DISABLE; wrmsr(MSR_IA32_TSX_CTRL, val); break; case X86_TAA_HW_MITIGATED:
*** 2974,2994 **** /* * Take care of certain mitigations on the non-boot CPU. The boot CPU * will have already run this function and determined what we need to * do. This gives us a hook for per-HW thread mitigations such as ! * enhanced IBRS, or disabling TSX. For TSX disabling, we need to be ! * careful that we've had a chance to load ucode that enables the new ! * MSRs. */ if (cpu->cpu_id != 0) { if (x86_spectrev2_mitigation == X86_SPECTREV2_ENHANCED_IBRS) { cpuid_enable_enhanced_ibrs(); } ! if (cpi->cpi_pass >= 1) ! cpuid_apply_tsx(x86_taa_mitigation); return; } /* * Go through and initialize various security mechanisms that we should --- 2982,2999 ---- /* * Take care of certain mitigations on the non-boot CPU. The boot CPU * will have already run this function and determined what we need to * do. This gives us a hook for per-HW thread mitigations such as ! * enhanced IBRS, or disabling TSX. */ if (cpu->cpu_id != 0) { if (x86_spectrev2_mitigation == X86_SPECTREV2_ENHANCED_IBRS) { cpuid_enable_enhanced_ibrs(); } ! cpuid_apply_tsx(x86_taa_mitigation, featureset); return; } /* * Go through and initialize various security mechanisms that we should
*** 3047,3057 **** /* * Determine what our mitigation strategy should be for TAA and then * also apply TAA mitigations. */ cpuid_update_tsx(cpu, featureset); ! cpuid_apply_tsx(x86_taa_mitigation); } /* * Setup XFeature_Enabled_Mask register. Required by xsave feature. */ --- 3052,3062 ---- /* * Determine what our mitigation strategy should be for TAA and then * also apply TAA mitigations. */ cpuid_update_tsx(cpu, featureset); ! cpuid_apply_tsx(x86_taa_mitigation, featureset); } /* * Setup XFeature_Enabled_Mask register. Required by xsave feature. */