2037 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
2038 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
2039 }
2040 if (hdl != NULL) {
2041 cmi_faulted_exit(hdl);
2042 cmi_hdl_rele(hdl);
2043 }
2044 #endif
2045 }
2046
2047 /*
2048 * The following two routines are used as context operators on threads belonging
2049 * to processes with a private LDT (see sysi86). Due to the rarity of such
2050 * processes, these routines are currently written for best code readability and
2051 * organization rather than speed. We could avoid checking x86_featureset at
2052 * every context switch by installing different context ops, depending on
2053 * x86_featureset, at LDT creation time -- one for each combination of fast
2054 * syscall features.
2055 */
2056
2057 /*ARGSUSED*/
2058 void
2059 cpu_fast_syscall_disable(void *arg)
2060 {
2061 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2062 is_x86_feature(x86_featureset, X86FSET_SEP))
2063 cpu_sep_disable();
2064 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2065 is_x86_feature(x86_featureset, X86FSET_ASYSC))
2066 cpu_asysc_disable();
2067 }
2068
2069 /*ARGSUSED*/
2070 void
2071 cpu_fast_syscall_enable(void *arg)
2072 {
2073 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2074 is_x86_feature(x86_featureset, X86FSET_SEP))
2075 cpu_sep_enable();
2076 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2077 is_x86_feature(x86_featureset, X86FSET_ASYSC))
2078 cpu_asysc_enable();
2079 }
2080
2081 static void
2082 cpu_sep_enable(void)
2083 {
2084 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
2085 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2086
2087 wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
2088 }
2089
2090 static void
2091 cpu_sep_disable(void)
|
2037 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
2038 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp));
2039 }
2040 if (hdl != NULL) {
2041 cmi_faulted_exit(hdl);
2042 cmi_hdl_rele(hdl);
2043 }
2044 #endif
2045 }
2046
2047 /*
2048 * The following two routines are used as context operators on threads belonging
2049 * to processes with a private LDT (see sysi86). Due to the rarity of such
2050 * processes, these routines are currently written for best code readability and
2051 * organization rather than speed. We could avoid checking x86_featureset at
2052 * every context switch by installing different context ops, depending on
2053 * x86_featureset, at LDT creation time -- one for each combination of fast
2054 * syscall features.
2055 */
2056
2057 void
2058 cpu_fast_syscall_disable(void)
2059 {
2060 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2061 is_x86_feature(x86_featureset, X86FSET_SEP))
2062 cpu_sep_disable();
2063 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2064 is_x86_feature(x86_featureset, X86FSET_ASYSC))
2065 cpu_asysc_disable();
2066 }
2067
2068 void
2069 cpu_fast_syscall_enable(void)
2070 {
2071 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2072 is_x86_feature(x86_featureset, X86FSET_SEP))
2073 cpu_sep_enable();
2074 if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
2075 is_x86_feature(x86_featureset, X86FSET_ASYSC))
2076 cpu_asysc_enable();
2077 }
2078
2079 static void
2080 cpu_sep_enable(void)
2081 {
2082 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
2083 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
2084
2085 wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
2086 }
2087
2088 static void
2089 cpu_sep_disable(void)
|