Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>


2104                                         cpi->cpi_procnodeid = node2_1 +
2105                                             !first_half;
2106                                 else
2107                                         cpi->cpi_procnodeid = node2_1 +
2108                                             first_half;
2109                         }
2110                 }
2111         } else {
2112                 cpi->cpi_procnodeid = 0;
2113         }
2114 
2115         cpi->cpi_chipid =
2116             cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
2117 
2118         cpi->cpi_ncore_bits = coreidsz;
2119         cpi->cpi_nthread_bits = ddi_fls(cpi->cpi_ncpu_per_chip /
2120             cpi->cpi_ncore_per_chip);
2121 }
2122 
2123 static void













2124 cpuid_scan_security(cpu_t *cpu, uchar_t *featureset)
2125 {
2126         struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2127 
2128         if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2129             cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
2130                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBPB)
2131                         add_x86_feature(featureset, X86FSET_IBPB);
2132                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS)
2133                         add_x86_feature(featureset, X86FSET_IBRS);
2134                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP)
2135                         add_x86_feature(featureset, X86FSET_STIBP);
2136                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS_ALL)
2137                         add_x86_feature(featureset, X86FSET_IBRS_ALL);
2138                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP_ALL)
2139                         add_x86_feature(featureset, X86FSET_STIBP_ALL);
2140                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_PREFER_IBRS)
2141                         add_x86_feature(featureset, X86FSET_RSBA);
2142                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_SSBD)
2143                         add_x86_feature(featureset, X86FSET_SSBD);


2188                                 }
2189                                 if (reg & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) {
2190                                         add_x86_feature(featureset,
2191                                             X86FSET_L1D_VM_NO);
2192                                 }
2193                                 if (reg & IA32_ARCH_CAP_SSB_NO) {
2194                                         add_x86_feature(featureset,
2195                                             X86FSET_SSB_NO);
2196                                 }
2197                         }
2198                         no_trap();
2199                 }
2200 #endif  /* !__xpv */
2201 
2202                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)
2203                         add_x86_feature(featureset, X86FSET_SSBD);
2204 
2205                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_FLUSH_CMD)
2206                         add_x86_feature(featureset, X86FSET_FLUSH_CMD);
2207         }





































2208 }
2209 
2210 /*
2211  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
2212  */
2213 void
2214 setup_xfem(void)
2215 {
2216         uint64_t flags = XFEATURE_LEGACY_FP;
2217 
2218         ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
2219 
2220         if (is_x86_feature(x86_featureset, X86FSET_SSE))
2221                 flags |= XFEATURE_SSE;
2222 
2223         if (is_x86_feature(x86_featureset, X86FSET_AVX))
2224                 flags |= XFEATURE_AVX;
2225 
2226         if (is_x86_feature(x86_featureset, X86FSET_AVX512F))
2227                 flags |= XFEATURE_AVX512;




2104                                         cpi->cpi_procnodeid = node2_1 +
2105                                             !first_half;
2106                                 else
2107                                         cpi->cpi_procnodeid = node2_1 +
2108                                             first_half;
2109                         }
2110                 }
2111         } else {
2112                 cpi->cpi_procnodeid = 0;
2113         }
2114 
2115         cpi->cpi_chipid =
2116             cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
2117 
2118         cpi->cpi_ncore_bits = coreidsz;
2119         cpi->cpi_nthread_bits = ddi_fls(cpi->cpi_ncpu_per_chip /
2120             cpi->cpi_ncore_per_chip);
2121 }
2122 
2123 static void
2124 spec_l1d_flush_noop(void)
2125 {
2126 }
2127 
2128 static void
2129 spec_l1d_flush_msr(void)
2130 {
2131         wrmsr(MSR_IA32_FLUSH_CMD, IA32_FLUSH_CMD_L1D);
2132 }
2133 
2134 void (*spec_l1d_flush)(void) = spec_l1d_flush_noop;
2135 
2136 static void
2137 cpuid_scan_security(cpu_t *cpu, uchar_t *featureset)
2138 {
2139         struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2140 
2141         if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2142             cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
2143                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBPB)
2144                         add_x86_feature(featureset, X86FSET_IBPB);
2145                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS)
2146                         add_x86_feature(featureset, X86FSET_IBRS);
2147                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP)
2148                         add_x86_feature(featureset, X86FSET_STIBP);
2149                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS_ALL)
2150                         add_x86_feature(featureset, X86FSET_IBRS_ALL);
2151                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP_ALL)
2152                         add_x86_feature(featureset, X86FSET_STIBP_ALL);
2153                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_PREFER_IBRS)
2154                         add_x86_feature(featureset, X86FSET_RSBA);
2155                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_SSBD)
2156                         add_x86_feature(featureset, X86FSET_SSBD);


2201                                 }
2202                                 if (reg & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) {
2203                                         add_x86_feature(featureset,
2204                                             X86FSET_L1D_VM_NO);
2205                                 }
2206                                 if (reg & IA32_ARCH_CAP_SSB_NO) {
2207                                         add_x86_feature(featureset,
2208                                             X86FSET_SSB_NO);
2209                                 }
2210                         }
2211                         no_trap();
2212                 }
2213 #endif  /* !__xpv */
2214 
2215                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)
2216                         add_x86_feature(featureset, X86FSET_SSBD);
2217 
2218                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_FLUSH_CMD)
2219                         add_x86_feature(featureset, X86FSET_FLUSH_CMD);
2220         }
2221 
2222         if (cpu->cpu_id != 0)
2223                 return;
2224 
2225         /*
2226          * We're the boot CPU, so let's figure out our L1TF status.
2227          *
2228          * First, if this is a RDCL_NO CPU, then we are not vulnerable: we don't
2229          * need to exclude with ht_acquire(), and we don't need to flush.
2230          */
2231         if (is_x86_feature(featureset, X86FSET_RDCL_NO)) {
2232                 extern int ht_exclusion;
2233                 ht_exclusion = 0;
2234                 spec_l1d_flush = spec_l1d_flush_noop;
2235                 membar_producer();
2236                 return;
2237         }
2238 
2239         /*
2240          * If HT is enabled, we will need HT exclusion, as well as the flush on
2241          * VM entry.  If HT isn't enabled, we still need at least the flush for
2242          * the L1TF sequential case.
2243          *
2244          * However, if X86FSET_L1D_VM_NO is set, we're most likely running
2245          * inside a VM ourselves, and we don't need the flush.
2246          *
2247          * If we don't have the FLUSH_CMD available at all, we'd better just
2248          * hope HT is disabled.
2249          */
2250         if (is_x86_feature(featureset, X86FSET_FLUSH_CMD) &&
2251             !is_x86_feature(featureset, X86FSET_L1D_VM_NO)) {
2252                 spec_l1d_flush = spec_l1d_flush_msr;
2253         } else {
2254                 spec_l1d_flush = spec_l1d_flush_noop;
2255         }
2256 
2257         membar_producer();
2258 }
2259 
2260 /*
2261  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
2262  */
2263 void
2264 setup_xfem(void)
2265 {
2266         uint64_t flags = XFEATURE_LEGACY_FP;
2267 
2268         ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
2269 
2270         if (is_x86_feature(x86_featureset, X86FSET_SSE))
2271                 flags |= XFEATURE_SSE;
2272 
2273         if (is_x86_feature(x86_featureset, X86FSET_AVX))
2274                 flags |= XFEATURE_AVX;
2275 
2276         if (is_x86_feature(x86_featureset, X86FSET_AVX512F))
2277                 flags |= XFEATURE_AVX512;