7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 */
29
30 /*
31 * Process switching routines.
32 */
33
34 #if defined(__lint)
35 #include <sys/thread.h>
36 #include <sys/systm.h>
37 #include <sys/time.h>
38 #else /* __lint */
39 #include "assym.h"
40 #endif /* __lint */
41
42 #include <sys/asm_linkage.h>
43 #include <sys/asm_misc.h>
44 #include <sys/regset.h>
45 #include <sys/privregs.h>
46 #include <sys/stack.h>
47 #include <sys/segments.h>
218 popl %edx; \
219 popl %eax; \
220 cmpxchg8b T_INTR_START(thread_t); \
221 jnz 0b; \
222 popl %ecx; \
223 1:
224
225 #endif /* __amd64 */
226
227 #if defined(__lint)
228
229 /* ARGSUSED */
230 void
231 resume(kthread_t *t)
232 {}
233
234 #else /* __lint */
235
236 #if defined(__amd64)
237
238 ENTRY(resume)
239 movq %gs:CPU_THREAD, %rax
240 leaq resume_return(%rip), %r11
241
242 /*
243 * Deal with SMAP here. A thread may be switched out at any point while
244 * it is executing. The thread could be under on_fault() or it could be
245 * pre-empted while performing a copy interruption. If this happens and
246 * we're not in the context of an interrupt which happens to handle
247 * saving and restoring rflags correctly, we may lose our SMAP related
248 * state.
249 *
250 * To handle this, as part of being switched out, we first save whether
251 * or not userland access is allowed ($PS_ACHK in rflags) and store that
252 * in t_useracc on the kthread_t and unconditionally enable SMAP to
253 * protect the system.
254 *
255 * Later, when the thread finishes resuming, we potentially disable smap
256 * if PS_ACHK was present in rflags. See uts/intel/ia32/ml/copy.s for
257 * more information on rflags and SMAP.
345 pause
346 cmpb $0, T_LOCK(%r12) /* check mutex status */
347 jz .lock_thread_mutex /* clear, retry lock */
348 jmp .spin_thread_mutex /* still locked, spin... */
349
350 .thread_mutex_locked:
351 /*
352 * Fix CPU structure to indicate new running thread.
353 * Set pointer in new thread to the CPU structure.
354 */
355 LOADCPU(%r13) /* load current CPU pointer */
356 cmpq %r13, T_CPU(%r12)
357 je .setup_cpu
358
359 /* cp->cpu_stats.sys.cpumigrate++ */
360 incq CPU_STATS_SYS_CPUMIGRATE(%r13)
361 movq %r13, T_CPU(%r12) /* set new thread's CPU pointer */
362
363 .setup_cpu:
364 /*
365 * Setup rsp0 (kernel stack) in TSS to curthread's stack.
366 * (Note: Since we don't have saved 'regs' structure for all
367 * the threads we can't easily determine if we need to
368 * change rsp0. So, we simply change the rsp0 to bottom
369 * of the thread stack and it will work for all cases.)
370 *
371 * XX64 - Is this correct?
372 */
373 movq CPU_TSS(%r13), %r14
374 movq T_STACK(%r12), %rax
375 addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
376 #if !defined(__xpv)
377 movq %rax, TSS_RSP0(%r14)
378 #else
379 movl $KDS_SEL, %edi
380 movq %rax, %rsi
381 call HYPERVISOR_stack_switch
382 #endif /* __xpv */
383
384 movq %r12, CPU_THREAD(%r13) /* set CPU's thread pointer */
385 mfence /* synchronize with mutex_exit() */
386 xorl %ebp, %ebp /* make $<threadlist behave better */
387 movq T_LWP(%r12), %rax /* set associated lwp to */
388 movq %rax, CPU_LWP(%r13) /* CPU's lwp ptr */
389
390 movq T_SP(%r12), %rsp /* switch to outgoing thread's stack */
391 movq T_PC(%r12), %r13 /* saved return addr */
392
393 /*
394 * Call restorectx if context ops have been installed.
395 */
396 cmpq $0, T_CTX(%r12) /* should resumed thread restorectx? */
397 jz .norestorectx /* skip call when zero */
398 movq %r12, %rdi /* arg = thread pointer */
|
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2018 Joyent, Inc.
28 */
29
30 /*
31 * Process switching routines.
32 */
33
34 #if defined(__lint)
35 #include <sys/thread.h>
36 #include <sys/systm.h>
37 #include <sys/time.h>
38 #else /* __lint */
39 #include "assym.h"
40 #endif /* __lint */
41
42 #include <sys/asm_linkage.h>
43 #include <sys/asm_misc.h>
44 #include <sys/regset.h>
45 #include <sys/privregs.h>
46 #include <sys/stack.h>
47 #include <sys/segments.h>
218 popl %edx; \
219 popl %eax; \
220 cmpxchg8b T_INTR_START(thread_t); \
221 jnz 0b; \
222 popl %ecx; \
223 1:
224
225 #endif /* __amd64 */
226
227 #if defined(__lint)
228
229 /* ARGSUSED */
230 void
231 resume(kthread_t *t)
232 {}
233
234 #else /* __lint */
235
236 #if defined(__amd64)
237
238 .global kpti_enable
239
240 ENTRY(resume)
241 movq %gs:CPU_THREAD, %rax
242 leaq resume_return(%rip), %r11
243
244 /*
245 * Deal with SMAP here. A thread may be switched out at any point while
246 * it is executing. The thread could be under on_fault() or it could be
247 * pre-empted while performing a copy interruption. If this happens and
248 * we're not in the context of an interrupt which happens to handle
249 * saving and restoring rflags correctly, we may lose our SMAP related
250 * state.
251 *
252 * To handle this, as part of being switched out, we first save whether
253 * or not userland access is allowed ($PS_ACHK in rflags) and store that
254 * in t_useracc on the kthread_t and unconditionally enable SMAP to
255 * protect the system.
256 *
257 * Later, when the thread finishes resuming, we potentially disable smap
258 * if PS_ACHK was present in rflags. See uts/intel/ia32/ml/copy.s for
259 * more information on rflags and SMAP.
347 pause
348 cmpb $0, T_LOCK(%r12) /* check mutex status */
349 jz .lock_thread_mutex /* clear, retry lock */
350 jmp .spin_thread_mutex /* still locked, spin... */
351
352 .thread_mutex_locked:
353 /*
354 * Fix CPU structure to indicate new running thread.
355 * Set pointer in new thread to the CPU structure.
356 */
357 LOADCPU(%r13) /* load current CPU pointer */
358 cmpq %r13, T_CPU(%r12)
359 je .setup_cpu
360
361 /* cp->cpu_stats.sys.cpumigrate++ */
362 incq CPU_STATS_SYS_CPUMIGRATE(%r13)
363 movq %r13, T_CPU(%r12) /* set new thread's CPU pointer */
364
365 .setup_cpu:
366 /*
367 * Setup rsp0 (kernel stack) in TSS to curthread's saved regs
368 * structure. If this thread doesn't have a regs structure above
369 * the stack -- that is, if lwp_stk_init() was never called for the
370 * thread -- this will set rsp0 to the wrong value, but it's harmless
371 * as it's a kernel thread, and it won't actually attempt to implicitly
372 * use the rsp0 via a privilege change.
373 *
374 * Note that when we have KPTI enabled on amd64, we never use this
375 * value at all (since all the interrupts have an IST set).
376 */
377 movq CPU_TSS(%r13), %r14
378 #if !defined(__xpv)
379 cmpq $1, kpti_enable
380 jne 1f
381 leaq CPU_KPTI_TR_RSP(%r13), %rax
382 jmp 2f
383 1:
384 movq T_STACK(%r12), %rax
385 addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
386 2:
387 movq %rax, TSS_RSP0(%r14)
388 #else
389 movq T_STACK(%r12), %rax
390 addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
391 movl $KDS_SEL, %edi
392 movq %rax, %rsi
393 call HYPERVISOR_stack_switch
394 #endif /* __xpv */
395
396 movq %r12, CPU_THREAD(%r13) /* set CPU's thread pointer */
397 mfence /* synchronize with mutex_exit() */
398 xorl %ebp, %ebp /* make $<threadlist behave better */
399 movq T_LWP(%r12), %rax /* set associated lwp to */
400 movq %rax, CPU_LWP(%r13) /* CPU's lwp ptr */
401
402 movq T_SP(%r12), %rsp /* switch to outgoing thread's stack */
403 movq T_PC(%r12), %r13 /* saved return addr */
404
405 /*
406 * Call restorectx if context ops have been installed.
407 */
408 cmpq $0, T_CTX(%r12) /* should resumed thread restorectx? */
409 jz .norestorectx /* skip call when zero */
410 movq %r12, %rdi /* arg = thread pointer */
|