154 * we're not in the context of an interrupt which happens to handle
155 * saving and restoring rflags correctly, we may lose our SMAP related
156 * state.
157 *
158 * To handle this, as part of being switched out, we first save whether
159 * or not userland access is allowed ($PS_ACHK in rflags) and store that
160 * in t_useracc on the kthread_t and unconditionally enable SMAP to
161 * protect the system.
162 *
163 * Later, when the thread finishes resuming, we potentially disable smap
164 * if PS_ACHK was present in rflags. See uts/intel/ia32/ml/copy.s for
165 * more information on rflags and SMAP.
166 */
167 pushfq
168 popq %rsi
169 andq $PS_ACHK, %rsi
170 movq %rsi, T_USERACC(%rax)
171 call smap_enable
172
173 /*
174 * Save non-volatile registers, and set return address for current
175 * thread to resume_return.
176 *
177 * %r12 = t (new thread) when done
178 */
179 SAVE_REGS(%rax, %r11)
180
181
182 LOADCPU(%r15) /* %r15 = CPU */
183 movq CPU_THREAD(%r15), %r13 /* %r13 = curthread */
184
185 /*
186 * Call savectx if thread has installed context ops.
187 *
188 * Note that if we have floating point context, the save op
189 * (either fpsave_begin or fpxsave_begin) will issue the
190 * async save instruction (fnsave or fxsave respectively)
191 * that we fwait for below.
192 */
193 cmpq $0, T_CTX(%r13) /* should current thread savectx? */
473 */
474 movq T_PC(%r12), %rax /* saved return addr */
475 RESTORE_REGS(%r11);
476 pushq %rax /* push return address for spl0() */
477 call __dtrace_probe___sched_on__cpu
478 jmp spl0
479
480 resume_from_intr_return:
481 /*
482 * Remove stack frame created in SAVE_REGS()
483 */
484 addq $CLONGSIZE, %rsp
485 ret
486 SET_SIZE(resume_from_intr)
487
488 ENTRY(thread_start)
489 popq %rax /* start() */
490 popq %rdi /* arg */
491 popq %rsi /* len */
492 movq %rsp, %rbp
493 call *%rax
494 call thread_exit /* destroy thread if it returns. */
495 /*NOTREACHED*/
496 SET_SIZE(thread_start)
|
154 * we're not in the context of an interrupt which happens to handle
155 * saving and restoring rflags correctly, we may lose our SMAP related
156 * state.
157 *
158 * To handle this, as part of being switched out, we first save whether
159 * or not userland access is allowed ($PS_ACHK in rflags) and store that
160 * in t_useracc on the kthread_t and unconditionally enable SMAP to
161 * protect the system.
162 *
163 * Later, when the thread finishes resuming, we potentially disable smap
164 * if PS_ACHK was present in rflags. See uts/intel/ia32/ml/copy.s for
165 * more information on rflags and SMAP.
166 */
167 pushfq
168 popq %rsi
169 andq $PS_ACHK, %rsi
170 movq %rsi, T_USERACC(%rax)
171 call smap_enable
172
173 /*
174 * Take a moment to potentially clear the RSB buffer. This is done to
175 * prevent various Spectre variant 2 and SpectreRSB attacks. This may
176 * not be sufficient. Please see uts/intel/ia32/ml/retpoline.s for more
177 * information about this.
178 */
179 call x86_rsb_stuff
180
181 /*
182 * Save non-volatile registers, and set return address for current
183 * thread to resume_return.
184 *
185 * %r12 = t (new thread) when done
186 */
187 SAVE_REGS(%rax, %r11)
188
189
190 LOADCPU(%r15) /* %r15 = CPU */
191 movq CPU_THREAD(%r15), %r13 /* %r13 = curthread */
192
193 /*
194 * Call savectx if thread has installed context ops.
195 *
196 * Note that if we have floating point context, the save op
197 * (either fpsave_begin or fpxsave_begin) will issue the
198 * async save instruction (fnsave or fxsave respectively)
199 * that we fwait for below.
200 */
201 cmpq $0, T_CTX(%r13) /* should current thread savectx? */
481 */
482 movq T_PC(%r12), %rax /* saved return addr */
483 RESTORE_REGS(%r11);
484 pushq %rax /* push return address for spl0() */
485 call __dtrace_probe___sched_on__cpu
486 jmp spl0
487
488 resume_from_intr_return:
489 /*
490 * Remove stack frame created in SAVE_REGS()
491 */
492 addq $CLONGSIZE, %rsp
493 ret
494 SET_SIZE(resume_from_intr)
495
496 ENTRY(thread_start)
497 popq %rax /* start() */
498 popq %rdi /* arg */
499 popq %rsi /* len */
500 movq %rsp, %rbp
501 INDIRECT_CALL_REG(rax)
502 call thread_exit /* destroy thread if it returns. */
503 /*NOTREACHED*/
504 SET_SIZE(thread_start)
|