Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>


2103                                         cpi->cpi_procnodeid = node2_1 +
2104                                             !first_half;
2105                                 else
2106                                         cpi->cpi_procnodeid = node2_1 +
2107                                             first_half;
2108                         }
2109                 }
2110         } else {
2111                 cpi->cpi_procnodeid = 0;
2112         }
2113 
2114         cpi->cpi_chipid =
2115             cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
2116 
2117         cpi->cpi_ncore_bits = coreidsz;
2118         cpi->cpi_nthread_bits = ddi_fls(cpi->cpi_ncpu_per_chip /
2119             cpi->cpi_ncore_per_chip);
2120 }
2121 
2122 static void













2123 cpuid_scan_security(cpu_t *cpu, uchar_t *featureset)
2124 {
2125         struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2126 
2127         if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2128             cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
2129                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBPB)
2130                         add_x86_feature(featureset, X86FSET_IBPB);
2131                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS)
2132                         add_x86_feature(featureset, X86FSET_IBRS);
2133                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP)
2134                         add_x86_feature(featureset, X86FSET_STIBP);
2135                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS_ALL)
2136                         add_x86_feature(featureset, X86FSET_IBRS_ALL);
2137                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP_ALL)
2138                         add_x86_feature(featureset, X86FSET_STIBP_ALL);
2139                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_PREFER_IBRS)
2140                         add_x86_feature(featureset, X86FSET_RSBA);
2141                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_SSBD)
2142                         add_x86_feature(featureset, X86FSET_SSBD);


2187                                 }
2188                                 if (reg & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) {
2189                                         add_x86_feature(featureset,
2190                                             X86FSET_L1D_VM_NO);
2191                                 }
2192                                 if (reg & IA32_ARCH_CAP_SSB_NO) {
2193                                         add_x86_feature(featureset,
2194                                             X86FSET_SSB_NO);
2195                                 }
2196                         }
2197                         no_trap();
2198                 }
2199 #endif  /* !__xpv */
2200 
2201                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)
2202                         add_x86_feature(featureset, X86FSET_SSBD);
2203 
2204                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_FLUSH_CMD)
2205                         add_x86_feature(featureset, X86FSET_FLUSH_CMD);
2206         }





































2207 }
2208 
2209 /*
2210  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
2211  */
2212 void
2213 setup_xfem(void)
2214 {
2215         uint64_t flags = XFEATURE_LEGACY_FP;
2216 
2217         ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
2218 
2219         if (is_x86_feature(x86_featureset, X86FSET_SSE))
2220                 flags |= XFEATURE_SSE;
2221 
2222         if (is_x86_feature(x86_featureset, X86FSET_AVX))
2223                 flags |= XFEATURE_AVX;
2224 
2225         if (is_x86_feature(x86_featureset, X86FSET_AVX512F))
2226                 flags |= XFEATURE_AVX512;




2103                                         cpi->cpi_procnodeid = node2_1 +
2104                                             !first_half;
2105                                 else
2106                                         cpi->cpi_procnodeid = node2_1 +
2107                                             first_half;
2108                         }
2109                 }
2110         } else {
2111                 cpi->cpi_procnodeid = 0;
2112         }
2113 
2114         cpi->cpi_chipid =
2115             cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
2116 
2117         cpi->cpi_ncore_bits = coreidsz;
2118         cpi->cpi_nthread_bits = ddi_fls(cpi->cpi_ncpu_per_chip /
2119             cpi->cpi_ncore_per_chip);
2120 }
2121 
2122 static void
2123 spec_l1d_flush_noop(void)
2124 {
2125 }
2126 
2127 static void
2128 spec_l1d_flush_msr(void)
2129 {
2130         wrmsr(MSR_IA32_FLUSH_CMD, IA32_FLUSH_CMD_L1D);
2131 }
2132 
2133 void (*spec_l1d_flush)(void) = spec_l1d_flush_noop;
2134 
2135 static void
2136 cpuid_scan_security(cpu_t *cpu, uchar_t *featureset)
2137 {
2138         struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2139 
2140         if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2141             cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
2142                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBPB)
2143                         add_x86_feature(featureset, X86FSET_IBPB);
2144                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS)
2145                         add_x86_feature(featureset, X86FSET_IBRS);
2146                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP)
2147                         add_x86_feature(featureset, X86FSET_STIBP);
2148                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS_ALL)
2149                         add_x86_feature(featureset, X86FSET_IBRS_ALL);
2150                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP_ALL)
2151                         add_x86_feature(featureset, X86FSET_STIBP_ALL);
2152                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_PREFER_IBRS)
2153                         add_x86_feature(featureset, X86FSET_RSBA);
2154                 if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_SSBD)
2155                         add_x86_feature(featureset, X86FSET_SSBD);


2200                                 }
2201                                 if (reg & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) {
2202                                         add_x86_feature(featureset,
2203                                             X86FSET_L1D_VM_NO);
2204                                 }
2205                                 if (reg & IA32_ARCH_CAP_SSB_NO) {
2206                                         add_x86_feature(featureset,
2207                                             X86FSET_SSB_NO);
2208                                 }
2209                         }
2210                         no_trap();
2211                 }
2212 #endif  /* !__xpv */
2213 
2214                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)
2215                         add_x86_feature(featureset, X86FSET_SSBD);
2216 
2217                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_FLUSH_CMD)
2218                         add_x86_feature(featureset, X86FSET_FLUSH_CMD);
2219         }
2220 
2221         if (cpu->cpu_id != 0)
2222                 return;
2223 
2224         /*
2225          * We're the boot CPU, so let's figure out our L1TF status.
2226          *
2227          * First, if this is a RDCL_NO CPU, then we are not vulnerable: we don't
2228          * need to exclude with ht_acquire(), and we don't need to flush.
2229          */
2230         if (is_x86_feature(featureset, X86FSET_RDCL_NO)) {
2231                 extern int ht_exclusion;
2232                 ht_exclusion = 0;
2233                 spec_l1d_flush = spec_l1d_flush_noop;
2234                 membar_producer();
2235                 return;
2236         }
2237 
2238         /*
2239          * If HT is enabled, we will need HT exclusion, as well as the flush on
2240          * VM entry.  If HT isn't enabled, we still need at least the flush for
2241          * the L1TF sequential case.
2242          *
2243          * However, if X86FSET_L1D_VM_NO is set, we're most likely running
2244          * inside a VM ourselves, and we don't need the flush.
2245          *
2246          * If we don't have the FLUSH_CMD available at all, we'd better just
2247          * hope HT is disabled.
2248          */
2249         if (is_x86_feature(featureset, X86FSET_FLUSH_CMD) &&
2250             !is_x86_feature(featureset, X86FSET_L1D_VM_NO)) {
2251                 spec_l1d_flush = spec_l1d_flush_msr;
2252         } else {
2253                 spec_l1d_flush = spec_l1d_flush_noop;
2254         }
2255 
2256         membar_producer();
2257 }
2258 
2259 /*
2260  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
2261  */
2262 void
2263 setup_xfem(void)
2264 {
2265         uint64_t flags = XFEATURE_LEGACY_FP;
2266 
2267         ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
2268 
2269         if (is_x86_feature(x86_featureset, X86FSET_SSE))
2270                 flags |= XFEATURE_SSE;
2271 
2272         if (is_x86_feature(x86_featureset, X86FSET_AVX))
2273                 flags |= XFEATURE_AVX;
2274 
2275         if (is_x86_feature(x86_featureset, X86FSET_AVX512F))
2276                 flags |= XFEATURE_AVX512;