Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/ia32/ml/swtch.s
+++ new/usr/src/uts/intel/ia32/ml/swtch.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 - * Copyright (c) 2018 Joyent, Inc.
27 + * Copyright 2019 Joyent, Inc.
28 28 */
29 29
30 30 /*
31 31 * Process switching routines.
32 32 */
33 33
34 -#if defined(__lint)
35 -#include <sys/thread.h>
36 -#include <sys/systm.h>
37 -#include <sys/time.h>
38 -#else /* __lint */
39 -#include "assym.h"
40 -#endif /* __lint */
41 -
42 34 #include <sys/asm_linkage.h>
43 35 #include <sys/asm_misc.h>
44 36 #include <sys/regset.h>
45 37 #include <sys/privregs.h>
46 38 #include <sys/stack.h>
47 39 #include <sys/segments.h>
48 40 #include <sys/psw.h>
49 41
42 +#include "assym.h"
43 +
50 44 /*
51 45 * resume(thread_id_t t);
52 46 *
53 47 * a thread can only run on one processor at a time. there
54 48 * exists a window on MPs where the current thread on one
55 49 * processor is capable of being dispatched by another processor.
56 50 * some overlap between outgoing and incoming threads can happen
57 51 * when they are the same thread. in this case where the threads
58 52 * are the same, resume() on one processor will spin on the incoming
59 53 * thread until resume() on the other processor has finished with
60 54 * the outgoing thread.
61 55 *
62 56 * The MMU context changes when the resuming thread resides in a different
63 57 * process. Kernel threads are known by resume to reside in process 0.
64 58 * The MMU context, therefore, only changes when resuming a thread in
65 59 * a process different from curproc.
66 60 *
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
67 61 * resume_from_intr() is called when the thread being resumed was not
68 62 * passivated by resume (e.g. was interrupted). This means that the
69 63 * resume lock is already held and that a restore context is not needed.
70 64 * Also, the MMU context is not changed on the resume in this case.
71 65 *
72 66 * resume_from_zombie() is the same as resume except the calling thread
73 67 * is a zombie and must be put on the deathrow list after the CPU is
74 68 * off the stack.
75 69 */
76 70
77 -#if !defined(__lint)
78 -
79 71 #if LWP_PCB_FPU != 0
80 72 #error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
81 73 #endif /* LWP_PCB_FPU != 0 */
82 74
83 -#endif /* !__lint */
84 -
85 -#if defined(__amd64)
86 -
87 75 /*
88 76 * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
89 77 *
90 78 * The stack frame must be created before the save of %rsp so that tracebacks
91 79 * of swtch()ed-out processes show the process as having last called swtch().
92 80 */
93 81 #define SAVE_REGS(thread_t, retaddr) \
94 82 movq %rbp, T_RBP(thread_t); \
95 83 movq %rbx, T_RBX(thread_t); \
96 84 movq %r12, T_R12(thread_t); \
97 85 movq %r13, T_R13(thread_t); \
98 86 movq %r14, T_R14(thread_t); \
99 87 movq %r15, T_R15(thread_t); \
100 88 pushq %rbp; \
101 89 movq %rsp, %rbp; \
102 90 movq %rsp, T_SP(thread_t); \
103 91 movq retaddr, T_PC(thread_t); \
104 92 movq %rdi, %r12; \
105 93 call __dtrace_probe___sched_off__cpu
106 94
107 95 /*
108 96 * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
109 97 *
110 98 * We load up %rsp from the label_t as part of the context switch, so
111 99 * we don't repeat that here.
112 100 *
113 101 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
114 102 * already has the effect of putting the stack back the way it was when
115 103 * we came in.
116 104 */
117 105 #define RESTORE_REGS(scratch_reg) \
118 106 movq %gs:CPU_THREAD, scratch_reg; \
119 107 movq T_RBP(scratch_reg), %rbp; \
120 108 movq T_RBX(scratch_reg), %rbx; \
121 109 movq T_R12(scratch_reg), %r12; \
122 110 movq T_R13(scratch_reg), %r13; \
123 111 movq T_R14(scratch_reg), %r14; \
124 112 movq T_R15(scratch_reg), %r15
125 113
126 114 /*
127 115 * Get pointer to a thread's hat structure
128 116 */
129 117 #define GET_THREAD_HATP(hatp, thread_t, scratch_reg) \
130 118 movq T_PROCP(thread_t), hatp; \
131 119 movq P_AS(hatp), scratch_reg; \
132 120 movq A_HAT(scratch_reg), hatp
133 121
134 122 #define TSC_READ() \
135 123 call tsc_read; \
136 124 movq %rax, %r14;
137 125
138 126 /*
139 127 * If we are resuming an interrupt thread, store a timestamp in the thread
140 128 * structure. If an interrupt occurs between tsc_read() and its subsequent
141 129 * store, the timestamp will be stale by the time it is stored. We can detect
142 130 * this by doing a compare-and-swap on the thread's timestamp, since any
143 131 * interrupt occurring in this window will put a new timestamp in the thread's
144 132 * t_intr_start field.
145 133 */
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
146 134 #define STORE_INTR_START(thread_t) \
147 135 testw $T_INTR_THREAD, T_FLAGS(thread_t); \
148 136 jz 1f; \
149 137 0: \
150 138 TSC_READ(); \
151 139 movq T_INTR_START(thread_t), %rax; \
152 140 cmpxchgq %r14, T_INTR_START(thread_t); \
153 141 jnz 0b; \
154 142 1:
155 143
156 -#elif defined (__i386)
157 -
158 -/*
159 - * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
160 - *
161 - * The stack frame must be created before the save of %esp so that tracebacks
162 - * of swtch()ed-out processes show the process as having last called swtch().
163 - */
164 -#define SAVE_REGS(thread_t, retaddr) \
165 - movl %ebp, T_EBP(thread_t); \
166 - movl %ebx, T_EBX(thread_t); \
167 - movl %esi, T_ESI(thread_t); \
168 - movl %edi, T_EDI(thread_t); \
169 - pushl %ebp; \
170 - movl %esp, %ebp; \
171 - movl %esp, T_SP(thread_t); \
172 - movl retaddr, T_PC(thread_t); \
173 - movl 8(%ebp), %edi; \
174 - pushl %edi; \
175 - call __dtrace_probe___sched_off__cpu; \
176 - addl $CLONGSIZE, %esp
177 -
178 -/*
179 - * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
180 - *
181 - * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
182 - * already has the effect of putting the stack back the way it was when
183 - * we came in.
184 - */
185 -#define RESTORE_REGS(scratch_reg) \
186 - movl %gs:CPU_THREAD, scratch_reg; \
187 - movl T_EBP(scratch_reg), %ebp; \
188 - movl T_EBX(scratch_reg), %ebx; \
189 - movl T_ESI(scratch_reg), %esi; \
190 - movl T_EDI(scratch_reg), %edi
191 -
192 -/*
193 - * Get pointer to a thread's hat structure
194 - */
195 -#define GET_THREAD_HATP(hatp, thread_t, scratch_reg) \
196 - movl T_PROCP(thread_t), hatp; \
197 - movl P_AS(hatp), scratch_reg; \
198 - movl A_HAT(scratch_reg), hatp
199 -
200 -/*
201 - * If we are resuming an interrupt thread, store a timestamp in the thread
202 - * structure. If an interrupt occurs between tsc_read() and its subsequent
203 - * store, the timestamp will be stale by the time it is stored. We can detect
204 - * this by doing a compare-and-swap on the thread's timestamp, since any
205 - * interrupt occurring in this window will put a new timestamp in the thread's
206 - * t_intr_start field.
207 - */
208 -#define STORE_INTR_START(thread_t) \
209 - testw $T_INTR_THREAD, T_FLAGS(thread_t); \
210 - jz 1f; \
211 - pushl %ecx; \
212 -0: \
213 - pushl T_INTR_START(thread_t); \
214 - pushl T_INTR_START+4(thread_t); \
215 - call tsc_read; \
216 - movl %eax, %ebx; \
217 - movl %edx, %ecx; \
218 - popl %edx; \
219 - popl %eax; \
220 - cmpxchg8b T_INTR_START(thread_t); \
221 - jnz 0b; \
222 - popl %ecx; \
223 -1:
224 -
225 -#endif /* __amd64 */
226 -
227 -#if defined(__lint)
228 -
229 -/* ARGSUSED */
230 -void
231 -resume(kthread_t *t)
232 -{}
233 -
234 -#else /* __lint */
235 -
236 -#if defined(__amd64)
237 -
238 144 .global kpti_enable
239 145
240 146 ENTRY(resume)
241 147 movq %gs:CPU_THREAD, %rax
242 148 leaq resume_return(%rip), %r11
243 149
244 150 /*
245 151 * Deal with SMAP here. A thread may be switched out at any point while
246 152 * it is executing. The thread could be under on_fault() or it could be
247 153 * pre-empted while performing a copy interruption. If this happens and
248 154 * we're not in the context of an interrupt which happens to handle
249 155 * saving and restoring rflags correctly, we may lose our SMAP related
250 156 * state.
251 157 *
252 158 * To handle this, as part of being switched out, we first save whether
253 159 * or not userland access is allowed ($PS_ACHK in rflags) and store that
254 160 * in t_useracc on the kthread_t and unconditionally enable SMAP to
255 161 * protect the system.
256 162 *
257 163 * Later, when the thread finishes resuming, we potentially disable smap
258 164 * if PS_ACHK was present in rflags. See uts/intel/ia32/ml/copy.s for
259 165 * more information on rflags and SMAP.
260 166 */
261 167 pushfq
262 168 popq %rsi
263 169 andq $PS_ACHK, %rsi
264 170 movq %rsi, T_USERACC(%rax)
265 171 call smap_enable
266 172
267 173 /*
268 174 * Save non-volatile registers, and set return address for current
269 175 * thread to resume_return.
270 176 *
271 177 * %r12 = t (new thread) when done
272 178 */
273 179 SAVE_REGS(%rax, %r11)
274 180
275 181
276 182 LOADCPU(%r15) /* %r15 = CPU */
277 183 movq CPU_THREAD(%r15), %r13 /* %r13 = curthread */
278 184
279 185 /*
280 186 * Call savectx if thread has installed context ops.
281 187 *
282 188 * Note that if we have floating point context, the save op
283 189 * (either fpsave_begin or fpxsave_begin) will issue the
284 190 * async save instruction (fnsave or fxsave respectively)
285 191 * that we fwait for below.
286 192 */
287 193 cmpq $0, T_CTX(%r13) /* should current thread savectx? */
288 194 je .nosavectx /* skip call when zero */
289 195
290 196 movq %r13, %rdi /* arg = thread pointer */
291 197 call savectx /* call ctx ops */
292 198 .nosavectx:
293 199
294 200 /*
295 201 * Call savepctx if process has installed context ops.
296 202 */
297 203 movq T_PROCP(%r13), %r14 /* %r14 = proc */
298 204 cmpq $0, P_PCTX(%r14) /* should current thread savectx? */
299 205 je .nosavepctx /* skip call when zero */
300 206
301 207 movq %r14, %rdi /* arg = proc pointer */
302 208 call savepctx /* call ctx ops */
303 209 .nosavepctx:
304 210
305 211 /*
306 212 * Temporarily switch to the idle thread's stack
307 213 */
308 214 movq CPU_IDLE_THREAD(%r15), %rax /* idle thread pointer */
309 215
310 216 /*
311 217 * Set the idle thread as the current thread
312 218 */
313 219 movq T_SP(%rax), %rsp /* It is safe to set rsp */
314 220 movq %rax, CPU_THREAD(%r15)
315 221
316 222 /*
317 223 * Switch in the hat context for the new thread
318 224 *
319 225 */
320 226 GET_THREAD_HATP(%rdi, %r12, %r11)
321 227 call hat_switch
322 228
323 229 /*
324 230 * Clear and unlock previous thread's t_lock
325 231 * to allow it to be dispatched by another processor.
326 232 */
327 233 movb $0, T_LOCK(%r13)
328 234
329 235 /*
330 236 * IMPORTANT: Registers at this point must be:
331 237 * %r12 = new thread
332 238 *
333 239 * Here we are in the idle thread, have dropped the old thread.
334 240 */
335 241 ALTENTRY(_resume_from_idle)
336 242 /*
337 243 * spin until dispatched thread's mutex has
338 244 * been unlocked. this mutex is unlocked when
339 245 * it becomes safe for the thread to run.
340 246 */
341 247 .lock_thread_mutex:
342 248 lock
343 249 btsl $0, T_LOCK(%r12) /* attempt to lock new thread's mutex */
344 250 jnc .thread_mutex_locked /* got it */
345 251
346 252 .spin_thread_mutex:
347 253 pause
348 254 cmpb $0, T_LOCK(%r12) /* check mutex status */
349 255 jz .lock_thread_mutex /* clear, retry lock */
350 256 jmp .spin_thread_mutex /* still locked, spin... */
351 257
352 258 .thread_mutex_locked:
353 259 /*
354 260 * Fix CPU structure to indicate new running thread.
355 261 * Set pointer in new thread to the CPU structure.
356 262 */
357 263 LOADCPU(%r13) /* load current CPU pointer */
358 264 cmpq %r13, T_CPU(%r12)
359 265 je .setup_cpu
360 266
361 267 /* cp->cpu_stats.sys.cpumigrate++ */
362 268 incq CPU_STATS_SYS_CPUMIGRATE(%r13)
363 269 movq %r13, T_CPU(%r12) /* set new thread's CPU pointer */
364 270
365 271 .setup_cpu:
366 272 /*
367 273 * Setup rsp0 (kernel stack) in TSS to curthread's saved regs
368 274 * structure. If this thread doesn't have a regs structure above
369 275 * the stack -- that is, if lwp_stk_init() was never called for the
370 276 * thread -- this will set rsp0 to the wrong value, but it's harmless
371 277 * as it's a kernel thread, and it won't actually attempt to implicitly
372 278 * use the rsp0 via a privilege change.
373 279 *
374 280 * Note that when we have KPTI enabled on amd64, we never use this
375 281 * value at all (since all the interrupts have an IST set).
376 282 */
377 283 movq CPU_TSS(%r13), %r14
378 284 #if !defined(__xpv)
379 285 cmpq $1, kpti_enable
380 286 jne 1f
381 287 leaq CPU_KPTI_TR_RSP(%r13), %rax
382 288 jmp 2f
383 289 1:
384 290 movq T_STACK(%r12), %rax
385 291 addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
386 292 2:
387 293 movq %rax, TSS_RSP0(%r14)
388 294 #else
389 295 movq T_STACK(%r12), %rax
390 296 addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
391 297 movl $KDS_SEL, %edi
392 298 movq %rax, %rsi
393 299 call HYPERVISOR_stack_switch
394 300 #endif /* __xpv */
395 301
396 302 movq %r12, CPU_THREAD(%r13) /* set CPU's thread pointer */
397 303 mfence /* synchronize with mutex_exit() */
398 304 xorl %ebp, %ebp /* make $<threadlist behave better */
399 305 movq T_LWP(%r12), %rax /* set associated lwp to */
400 306 movq %rax, CPU_LWP(%r13) /* CPU's lwp ptr */
401 307
402 308 movq T_SP(%r12), %rsp /* switch to outgoing thread's stack */
403 309 movq T_PC(%r12), %r13 /* saved return addr */
404 310
405 311 /*
406 312 * Call restorectx if context ops have been installed.
407 313 */
408 314 cmpq $0, T_CTX(%r12) /* should resumed thread restorectx? */
409 315 jz .norestorectx /* skip call when zero */
410 316 movq %r12, %rdi /* arg = thread pointer */
411 317 call restorectx /* call ctx ops */
412 318 .norestorectx:
413 319
414 320 /*
415 321 * Call restorepctx if context ops have been installed for the proc.
416 322 */
417 323 movq T_PROCP(%r12), %rcx
418 324 cmpq $0, P_PCTX(%rcx)
419 325 jz .norestorepctx
420 326 movq %rcx, %rdi
421 327 call restorepctx
422 328 .norestorepctx:
423 329
424 330 STORE_INTR_START(%r12)
425 331
426 332 /*
427 333 * If we came into swtch with the ability to access userland pages, go
428 334 * ahead and restore that fact by disabling SMAP. Clear the indicator
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
429 335 * flag out of paranoia.
430 336 */
431 337 movq T_USERACC(%r12), %rax /* should we disable smap? */
432 338 cmpq $0, %rax /* skip call when zero */
433 339 jz .nosmap
434 340 xorq %rax, %rax
435 341 movq %rax, T_USERACC(%r12)
436 342 call smap_disable
437 343 .nosmap:
438 344
345 + call ht_mark
346 +
439 347 /*
440 348 * Restore non-volatile registers, then have spl0 return to the
441 349 * resuming thread's PC after first setting the priority as low as
442 350 * possible and blocking all interrupt threads that may be active.
443 351 */
444 352 movq %r13, %rax /* save return address */
445 353 RESTORE_REGS(%r11)
446 354 pushq %rax /* push return address for spl0() */
447 355 call __dtrace_probe___sched_on__cpu
448 356 jmp spl0
449 357
450 358 resume_return:
451 359 /*
452 360 * Remove stack frame created in SAVE_REGS()
453 361 */
454 362 addq $CLONGSIZE, %rsp
455 363 ret
456 364 SET_SIZE(_resume_from_idle)
457 365 SET_SIZE(resume)
458 366
459 -#elif defined (__i386)
460 -
461 - ENTRY(resume)
462 - movl %gs:CPU_THREAD, %eax
463 - movl $resume_return, %ecx
464 -
465 - /*
466 - * Save non-volatile registers, and set return address for current
467 - * thread to resume_return.
468 - *
469 - * %edi = t (new thread) when done.
470 - */
471 - SAVE_REGS(%eax, %ecx)
472 -
473 - LOADCPU(%ebx) /* %ebx = CPU */
474 - movl CPU_THREAD(%ebx), %esi /* %esi = curthread */
475 -
476 -#ifdef DEBUG
477 - call assert_ints_enabled /* panics if we are cli'd */
478 -#endif
479 - /*
480 - * Call savectx if thread has installed context ops.
481 - *
482 - * Note that if we have floating point context, the save op
483 - * (either fpsave_begin or fpxsave_begin) will issue the
484 - * async save instruction (fnsave or fxsave respectively)
485 - * that we fwait for below.
486 - */
487 - movl T_CTX(%esi), %eax /* should current thread savectx? */
488 - testl %eax, %eax
489 - jz .nosavectx /* skip call when zero */
490 - pushl %esi /* arg = thread pointer */
491 - call savectx /* call ctx ops */
492 - addl $4, %esp /* restore stack pointer */
493 -.nosavectx:
494 -
495 - /*
496 - * Call savepctx if process has installed context ops.
497 - */
498 - movl T_PROCP(%esi), %eax /* %eax = proc */
499 - cmpl $0, P_PCTX(%eax) /* should current thread savectx? */
500 - je .nosavepctx /* skip call when zero */
501 - pushl %eax /* arg = proc pointer */
502 - call savepctx /* call ctx ops */
503 - addl $4, %esp
504 -.nosavepctx:
505 -
506 - /*
507 - * Temporarily switch to the idle thread's stack
508 - */
509 - movl CPU_IDLE_THREAD(%ebx), %eax /* idle thread pointer */
510 -
511 - /*
512 - * Set the idle thread as the current thread
513 - */
514 - movl T_SP(%eax), %esp /* It is safe to set esp */
515 - movl %eax, CPU_THREAD(%ebx)
516 -
517 - /* switch in the hat context for the new thread */
518 - GET_THREAD_HATP(%ecx, %edi, %ecx)
519 - pushl %ecx
520 - call hat_switch
521 - addl $4, %esp
522 -
523 - /*
524 - * Clear and unlock previous thread's t_lock
525 - * to allow it to be dispatched by another processor.
526 - */
527 - movb $0, T_LOCK(%esi)
528 -
529 - /*
530 - * IMPORTANT: Registers at this point must be:
531 - * %edi = new thread
532 - *
533 - * Here we are in the idle thread, have dropped the old thread.
534 - */
535 - ALTENTRY(_resume_from_idle)
536 - /*
537 - * spin until dispatched thread's mutex has
538 - * been unlocked. this mutex is unlocked when
539 - * it becomes safe for the thread to run.
540 - */
541 -.L4:
542 - lock
543 - btsl $0, T_LOCK(%edi) /* lock new thread's mutex */
544 - jc .L4_2 /* lock did not succeed */
545 -
546 - /*
547 - * Fix CPU structure to indicate new running thread.
548 - * Set pointer in new thread to the CPU structure.
549 - */
550 - LOADCPU(%esi) /* load current CPU pointer */
551 - movl T_STACK(%edi), %eax /* here to use v pipeline of */
552 - /* Pentium. Used few lines below */
553 - cmpl %esi, T_CPU(%edi)
554 - jne .L5_2
555 -.L5_1:
556 - /*
557 - * Setup esp0 (kernel stack) in TSS to curthread's stack.
558 - * (Note: Since we don't have saved 'regs' structure for all
559 - * the threads we can't easily determine if we need to
560 - * change esp0. So, we simply change the esp0 to bottom
561 - * of the thread stack and it will work for all cases.)
562 - */
563 - movl CPU_TSS(%esi), %ecx
564 - addl $REGSIZE+MINFRAME, %eax /* to the bottom of thread stack */
565 -#if !defined(__xpv)
566 - movl %eax, TSS_ESP0(%ecx)
567 -#else
568 - pushl %eax
569 - pushl $KDS_SEL
570 - call HYPERVISOR_stack_switch
571 - addl $8, %esp
572 -#endif /* __xpv */
573 -
574 - movl %edi, CPU_THREAD(%esi) /* set CPU's thread pointer */
575 - mfence /* synchronize with mutex_exit() */
576 - xorl %ebp, %ebp /* make $<threadlist behave better */
577 - movl T_LWP(%edi), %eax /* set associated lwp to */
578 - movl %eax, CPU_LWP(%esi) /* CPU's lwp ptr */
579 -
580 - movl T_SP(%edi), %esp /* switch to outgoing thread's stack */
581 - movl T_PC(%edi), %esi /* saved return addr */
582 -
583 - /*
584 - * Call restorectx if context ops have been installed.
585 - */
586 - movl T_CTX(%edi), %eax /* should resumed thread restorectx? */
587 - testl %eax, %eax
588 - jz .norestorectx /* skip call when zero */
589 - pushl %edi /* arg = thread pointer */
590 - call restorectx /* call ctx ops */
591 - addl $4, %esp /* restore stack pointer */
592 -.norestorectx:
593 -
594 - /*
595 - * Call restorepctx if context ops have been installed for the proc.
596 - */
597 - movl T_PROCP(%edi), %eax
598 - cmpl $0, P_PCTX(%eax)
599 - je .norestorepctx
600 - pushl %eax /* arg = proc pointer */
601 - call restorepctx
602 - addl $4, %esp /* restore stack pointer */
603 -.norestorepctx:
604 -
605 - STORE_INTR_START(%edi)
606 -
607 - /*
608 - * Restore non-volatile registers, then have spl0 return to the
609 - * resuming thread's PC after first setting the priority as low as
610 - * possible and blocking all interrupt threads that may be active.
611 - */
612 - movl %esi, %eax /* save return address */
613 - RESTORE_REGS(%ecx)
614 - pushl %eax /* push return address for spl0() */
615 - call __dtrace_probe___sched_on__cpu
616 - jmp spl0
617 -
618 -resume_return:
619 - /*
620 - * Remove stack frame created in SAVE_REGS()
621 - */
622 - addl $CLONGSIZE, %esp
623 - ret
624 -
625 -.L4_2:
626 - pause
627 - cmpb $0, T_LOCK(%edi)
628 - je .L4
629 - jmp .L4_2
630 -
631 -.L5_2:
632 - /* cp->cpu_stats.sys.cpumigrate++ */
633 - addl $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
634 - adcl $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
635 - movl %esi, T_CPU(%edi) /* set new thread's CPU pointer */
636 - jmp .L5_1
637 -
638 - SET_SIZE(_resume_from_idle)
639 - SET_SIZE(resume)
640 -
641 -#endif /* __amd64 */
642 -#endif /* __lint */
643 -
644 -#if defined(__lint)
645 -
646 -/* ARGSUSED */
647 -void
648 -resume_from_zombie(kthread_t *t)
649 -{}
650 -
651 -#else /* __lint */
652 -
653 -#if defined(__amd64)
654 -
655 367 ENTRY(resume_from_zombie)
656 368 movq %gs:CPU_THREAD, %rax
657 369 leaq resume_from_zombie_return(%rip), %r11
658 370
659 371 /*
660 372 * Save non-volatile registers, and set return address for current
661 373 * thread to resume_from_zombie_return.
662 374 *
663 375 * %r12 = t (new thread) when done
664 376 */
665 377 SAVE_REGS(%rax, %r11)
666 378
667 379 movq %gs:CPU_THREAD, %r13 /* %r13 = curthread */
668 380
669 381 /* clean up the fp unit. It might be left enabled */
670 382
671 383 #if defined(__xpv) /* XXPV XXtclayton */
672 384 /*
673 385 * Remove this after bringup.
674 386 * (Too many #gp's for an instrumented hypervisor.)
675 387 */
676 388 STTS(%rax)
677 389 #else
678 390 movq %cr0, %rax
679 391 testq $CR0_TS, %rax
680 392 jnz .zfpu_disabled /* if TS already set, nothing to do */
681 393 fninit /* init fpu & discard pending error */
682 394 orq $CR0_TS, %rax
683 395 movq %rax, %cr0
684 396 .zfpu_disabled:
685 397
686 398 #endif /* __xpv */
687 399
688 400 /*
689 401 * Temporarily switch to the idle thread's stack so that the zombie
690 402 * thread's stack can be reclaimed by the reaper.
691 403 */
692 404 movq %gs:CPU_IDLE_THREAD, %rax /* idle thread pointer */
693 405 movq T_SP(%rax), %rsp /* get onto idle thread stack */
694 406
695 407 /*
696 408 * Sigh. If the idle thread has never run thread_start()
697 409 * then t_sp is mis-aligned by thread_load().
698 410 */
699 411 andq $_BITNOT(STACK_ALIGN-1), %rsp
700 412
701 413 /*
702 414 * Set the idle thread as the current thread.
703 415 */
704 416 movq %rax, %gs:CPU_THREAD
705 417
706 418 /* switch in the hat context for the new thread */
707 419 GET_THREAD_HATP(%rdi, %r12, %r11)
708 420 call hat_switch
709 421
710 422 /*
711 423 * Put the zombie on death-row.
712 424 */
713 425 movq %r13, %rdi
714 426 call reapq_add
715 427
716 428 jmp _resume_from_idle /* finish job of resume */
717 429
718 430 resume_from_zombie_return:
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
719 431 RESTORE_REGS(%r11) /* restore non-volatile registers */
720 432 call __dtrace_probe___sched_on__cpu
721 433
722 434 /*
723 435 * Remove stack frame created in SAVE_REGS()
724 436 */
725 437 addq $CLONGSIZE, %rsp
726 438 ret
727 439 SET_SIZE(resume_from_zombie)
728 440
729 -#elif defined (__i386)
730 -
731 - ENTRY(resume_from_zombie)
732 - movl %gs:CPU_THREAD, %eax
733 - movl $resume_from_zombie_return, %ecx
734 -
735 - /*
736 - * Save non-volatile registers, and set return address for current
737 - * thread to resume_from_zombie_return.
738 - *
739 - * %edi = t (new thread) when done.
740 - */
741 - SAVE_REGS(%eax, %ecx)
742 -
743 -#ifdef DEBUG
744 - call assert_ints_enabled /* panics if we are cli'd */
745 -#endif
746 - movl %gs:CPU_THREAD, %esi /* %esi = curthread */
747 -
748 - /* clean up the fp unit. It might be left enabled */
749 -
750 - movl %cr0, %eax
751 - testl $CR0_TS, %eax
752 - jnz .zfpu_disabled /* if TS already set, nothing to do */
753 - fninit /* init fpu & discard pending error */
754 - orl $CR0_TS, %eax
755 - movl %eax, %cr0
756 -.zfpu_disabled:
757 -
758 - /*
759 - * Temporarily switch to the idle thread's stack so that the zombie
760 - * thread's stack can be reclaimed by the reaper.
761 - */
762 - movl %gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
763 - movl T_SP(%eax), %esp /* get onto idle thread stack */
764 -
765 - /*
766 - * Set the idle thread as the current thread.
767 - */
768 - movl %eax, %gs:CPU_THREAD
769 -
770 - /*
771 - * switch in the hat context for the new thread
772 - */
773 - GET_THREAD_HATP(%ecx, %edi, %ecx)
774 - pushl %ecx
775 - call hat_switch
776 - addl $4, %esp
777 -
778 - /*
779 - * Put the zombie on death-row.
780 - */
781 - pushl %esi
782 - call reapq_add
783 - addl $4, %esp
784 - jmp _resume_from_idle /* finish job of resume */
785 -
786 -resume_from_zombie_return:
787 - RESTORE_REGS(%ecx) /* restore non-volatile registers */
788 - call __dtrace_probe___sched_on__cpu
789 -
790 - /*
791 - * Remove stack frame created in SAVE_REGS()
792 - */
793 - addl $CLONGSIZE, %esp
794 - ret
795 - SET_SIZE(resume_from_zombie)
796 -
797 -#endif /* __amd64 */
798 -#endif /* __lint */
799 -
800 -#if defined(__lint)
801 -
802 -/* ARGSUSED */
803 -void
804 -resume_from_intr(kthread_t *t)
805 -{}
806 -
807 -#else /* __lint */
808 -
809 -#if defined(__amd64)
810 -
811 441 ENTRY(resume_from_intr)
812 442 movq %gs:CPU_THREAD, %rax
813 443 leaq resume_from_intr_return(%rip), %r11
814 444
815 445 /*
816 446 * Save non-volatile registers, and set return address for current
817 447 * thread to resume_from_intr_return.
818 448 *
819 449 * %r12 = t (new thread) when done
820 450 */
821 451 SAVE_REGS(%rax, %r11)
822 452
823 453 movq %gs:CPU_THREAD, %r13 /* %r13 = curthread */
824 454 movq %r12, %gs:CPU_THREAD /* set CPU's thread pointer */
825 455 mfence /* synchronize with mutex_exit() */
826 456 movq T_SP(%r12), %rsp /* restore resuming thread's sp */
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
827 457 xorl %ebp, %ebp /* make $<threadlist behave better */
828 458
829 459 /*
830 460 * Unlock outgoing thread's mutex dispatched by another processor.
831 461 */
832 462 xorl %eax, %eax
833 463 xchgb %al, T_LOCK(%r13)
834 464
835 465 STORE_INTR_START(%r12)
836 466
467 + call ht_mark
468 +
837 469 /*
838 470 * Restore non-volatile registers, then have spl0 return to the
839 471 * resuming thread's PC after first setting the priority as low as
840 472 * possible and blocking all interrupt threads that may be active.
841 473 */
842 474 movq T_PC(%r12), %rax /* saved return addr */
843 475 RESTORE_REGS(%r11);
844 476 pushq %rax /* push return address for spl0() */
845 477 call __dtrace_probe___sched_on__cpu
846 478 jmp spl0
847 479
848 480 resume_from_intr_return:
849 481 /*
850 482 * Remove stack frame created in SAVE_REGS()
851 483 */
852 484 addq $CLONGSIZE, %rsp
853 485 ret
854 486 SET_SIZE(resume_from_intr)
855 487
856 -#elif defined (__i386)
857 -
858 - ENTRY(resume_from_intr)
859 - movl %gs:CPU_THREAD, %eax
860 - movl $resume_from_intr_return, %ecx
861 -
862 - /*
863 - * Save non-volatile registers, and set return address for current
864 - * thread to resume_return.
865 - *
866 - * %edi = t (new thread) when done.
867 - */
868 - SAVE_REGS(%eax, %ecx)
869 -
870 -#ifdef DEBUG
871 - call assert_ints_enabled /* panics if we are cli'd */
872 -#endif
873 - movl %gs:CPU_THREAD, %esi /* %esi = curthread */
874 - movl %edi, %gs:CPU_THREAD /* set CPU's thread pointer */
875 - mfence /* synchronize with mutex_exit() */
876 - movl T_SP(%edi), %esp /* restore resuming thread's sp */
877 - xorl %ebp, %ebp /* make $<threadlist behave better */
878 -
879 - /*
880 - * Unlock outgoing thread's mutex dispatched by another processor.
881 - */
882 - xorl %eax,%eax
883 - xchgb %al, T_LOCK(%esi)
884 -
885 - STORE_INTR_START(%edi)
886 -
887 - /*
888 - * Restore non-volatile registers, then have spl0 return to the
889 - * resuming thread's PC after first setting the priority as low as
890 - * possible and blocking all interrupt threads that may be active.
891 - */
892 - movl T_PC(%edi), %eax /* saved return addr */
893 - RESTORE_REGS(%ecx)
894 - pushl %eax /* push return address for spl0() */
895 - call __dtrace_probe___sched_on__cpu
896 - jmp spl0
897 -
898 -resume_from_intr_return:
899 - /*
900 - * Remove stack frame created in SAVE_REGS()
901 - */
902 - addl $CLONGSIZE, %esp
903 - ret
904 - SET_SIZE(resume_from_intr)
905 -
906 -#endif /* __amd64 */
907 -#endif /* __lint */
908 -
909 -#if defined(__lint)
910 -
911 -void
912 -thread_start(void)
913 -{}
914 -
915 -#else /* __lint */
916 -
917 -#if defined(__amd64)
918 -
919 488 ENTRY(thread_start)
920 489 popq %rax /* start() */
921 490 popq %rdi /* arg */
922 491 popq %rsi /* len */
923 492 movq %rsp, %rbp
924 493 call *%rax
925 494 call thread_exit /* destroy thread if it returns. */
926 495 /*NOTREACHED*/
927 496 SET_SIZE(thread_start)
928 -
929 -#elif defined(__i386)
930 -
931 - ENTRY(thread_start)
932 - popl %eax
933 - movl %esp, %ebp
934 - addl $8, %ebp
935 - call *%eax
936 - addl $8, %esp
937 - call thread_exit /* destroy thread if it returns. */
938 - /*NOTREACHED*/
939 - SET_SIZE(thread_start)
940 -
941 -#endif /* __i386 */
942 -
943 -#endif /* __lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX