1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Process switching routines.
27 */
28
29 #if !defined(lint)
30 #include "assym.h"
31 #else /* lint */
32 #include <sys/thread.h>
33 #endif /* lint */
34
35 #include <sys/param.h>
36 #include <sys/asm_linkage.h>
37 #include <sys/mmu.h>
38 #include <sys/pcb.h>
39 #include <sys/machthread.h>
40 #include <sys/machclock.h>
41 #include <sys/privregs.h>
42 #include <sys/vtrace.h>
43 #include <vm/hat_sfmmu.h>
44
45 /*
46 * resume(kthread_id_t)
47 *
48 * a thread can only run on one processor at a time. there
49 * exists a window on MPs where the current thread on one
50 * processor is capable of being dispatched by another processor.
51 * some overlap between outgoing and incoming threads can happen
52 * when they are the same thread. in this case where the threads
53 * are the same, resume() on one processor will spin on the incoming
54 * thread until resume() on the other processor has finished with
55 * the outgoing thread.
56 *
57 * The MMU context changes when the resuming thread resides in a different
58 * process. Kernel threads are known by resume to reside in process 0.
59 * The MMU context, therefore, only changes when resuming a thread in
60 * a process different from curproc.
61 *
62 * resume_from_intr() is called when the thread being resumed was not
63 * passivated by resume (e.g. was interrupted). This means that the
64 * resume lock is already held and that a restore context is not needed.
65 * Also, the MMU context is not changed on the resume in this case.
66 *
67 * resume_from_zombie() is the same as resume except the calling thread
68 * is a zombie and must be put on the deathrow list after the CPU is
69 * off the stack.
70 */
71
72 #if defined(lint)
73
74 /* ARGSUSED */
75 void
76 resume(kthread_id_t t)
77 {}
78
79 #else /* lint */
80
81 ENTRY(resume)
82 save %sp, -SA(MINFRAME), %sp ! save ins and locals
83
84 call __dtrace_probe___sched_off__cpu ! DTrace probe
85 mov %i0, %o0 ! arg for DTrace probe
86
87 membar #Sync ! flush writebuffers
88 flushw ! flushes all but this window
89
90 stn %i7, [THREAD_REG + T_PC] ! save return address
91 stn %fp, [THREAD_REG + T_SP] ! save sp
92
93 !
94 ! Save GSR (Graphics Status Register).
95 !
96 ! Read fprs, call fp_save if FPRS_FEF set.
97 ! This handles floating-point state saving.
98 ! The fprs could be turned on by hw bcopy software,
99 ! *or* by fp_disabled. Handle it either way.
100 !
101 ldn [THREAD_REG + T_LWP], %o4 ! get lwp pointer
102 rd %fprs, %g4 ! read fprs
103 brnz,pt %o4, 0f ! if user thread skip
104 ldn [THREAD_REG + T_CPU], %i1 ! get CPU pointer
105
106 !
107 ! kernel thread
108 !
109 ! we save fprs at the beginning the stack so we know
110 ! where to check at resume time
111 ldn [THREAD_REG + T_STACK], %i2
112 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer
113 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set
114 bz,pt %icc, 1f ! nope, skip
115 st %g4, [%i2 + SA(MINFRAME) + FPU_FPRS] ! save fprs
116
117 ! save kernel fp state in stack
118 add %i2, SA(MINFRAME), %o0 ! o0 = kfpu_t ptr
119 rd %gsr, %g5
120 call fp_save
121 stx %g5, [%o0 + FPU_GSR] ! store GSR
122 ba,a,pt %icc, 1f
123 nop
124
125 0:
126 ! user thread
127 ! o4 = lwp ptr
128 ! g4 = fprs
129 ! i1 = CPU ptr
130 ldn [%o4 + LWP_FPU], %o0 ! fp pointer
131 stn %fp, [THREAD_REG + T_SP] ! save sp
132 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set
133 st %g4, [%o0 + FPU_FPRS] ! store FPRS
134 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
135 sethi %hi(fpu_exists), %g5
136 ld [%g5 + %lo(fpu_exists)], %g5
137 brz,pn %g5, 1f
138 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer
139 #endif
140 bz,pt %icc, 1f ! most apps don't use fp
141 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer
142 ldn [%o4 + LWP_FPU], %o0 ! fp pointer
143 rd %gsr, %g5
144 call fp_save ! doesn't touch globals
145 stx %g5, [%o0 + FPU_GSR] ! store GSR
146 1:
147 !
148 ! Perform context switch callback if set.
149 ! This handles coprocessor state saving.
150 ! i1 = cpu ptr
151 ! g3 = ctx pointer
152 !
153 wr %g0, %g0, %fprs ! disable fpu and clear fprs
154 brz,pt %g3, 2f ! skip call when zero
155 ldn [%i0 + T_PROCP], %i3 ! delay slot - get proc pointer
156 call savectx
157 mov THREAD_REG, %o0 ! delay - arg = thread pointer
158 2:
159 ldn [THREAD_REG + T_PROCP], %i2 ! load old curproc - for mmu
160
161 !
162 ! Temporarily switch to idle thread's stack
163 !
164 ldn [%i1 + CPU_IDLE_THREAD], %o0 ! idle thread pointer
165 ldn [%o0 + T_SP], %o1 ! get onto idle thread stack
166 sub %o1, SA(MINFRAME), %sp ! save room for ins and locals
167 clr %fp
168
169 !
170 ! Set the idle thread as the current thread
171 !
172 mov THREAD_REG, %l3 ! save %g7 (current thread)
173 mov %o0, THREAD_REG ! set %g7 to idle
174 stn %o0, [%i1 + CPU_THREAD] ! set CPU's thread to idle
175
176 !
177 ! Clear and unlock previous thread's t_lock
178 ! to allow it to be dispatched by another processor.
179 !
180 clrb [%l3 + T_LOCK] ! clear tp->t_lock
181
182 !
183 ! IMPORTANT: Registers at this point must be:
184 ! %i0 = new thread
185 ! %i1 = cpu pointer
186 ! %i2 = old proc pointer
187 ! %i3 = new proc pointer
188 !
189 ! Here we are in the idle thread, have dropped the old thread.
190 !
191 ALTENTRY(_resume_from_idle)
192
193 ! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
194 SET_KCONTEXTREG(%o0, %g1, %g2, %g3, %o3, l1, l2, l3)
195
196 cmp %i2, %i3 ! resuming the same process?
197 be,pt %xcc, 5f ! yes.
198 nop
199
200 ldx [%i3 + P_AS], %o0 ! load p->p_as
201 ldx [%o0 + A_HAT], %i5 ! %i5 = new proc hat
202
203 !
204 ! update cpusran field
205 !
206 ld [%i1 + CPU_ID], %o4
207 add %i5, SFMMU_CPUSRAN, %o5
208 CPU_INDEXTOSET(%o5, %o4, %g1)
209 ldx [%o5], %o2 ! %o2 = cpusran field
210 mov 1, %g2
211 sllx %g2, %o4, %o4 ! %o4 = bit for this cpu
212 andcc %o4, %o2, %g0
213 bnz,pn %xcc, 0f ! bit already set, go to 0
214 nop
215 3:
216 or %o2, %o4, %o1 ! or in this cpu's bit mask
217 casx [%o5], %o2, %o1
218 cmp %o2, %o1
219 bne,a,pn %xcc, 3b
220 ldx [%o5], %o2 ! o2 = cpusran field
221 membar #LoadLoad|#StoreLoad
222
223 0:
224 !
225 ! disable interrupts
226 !
227 ! if resume from user to kernel thread
228 ! call sfmmu_setctx_sec
229 ! if resume from kernel (or a different user) thread to user thread
230 ! call sfmmu_alloc_ctx
231 ! sfmmu_load_mmustate
232 !
233 ! enable interrupts
234 !
235 ! %i5 = new proc hat
236 !
237
238 sethi %hi(ksfmmup), %o2
239 ldx [%o2 + %lo(ksfmmup)], %o2
240
241 rdpr %pstate, %i4
242 cmp %i5, %o2 ! new proc hat == ksfmmup ?
243 bne,pt %xcc, 3f ! new proc is not kernel as, go to 3
244 wrpr %i4, PSTATE_IE, %pstate
245
246 SET_KAS_CTXSEC_ARGS(%i5, %o0, %o1)
247
248 ! new proc is kernel as
249
250 call sfmmu_setctx_sec ! switch to kernel context
251 or %o0, %o1, %o0
252
253 ba,a,pt %icc, 4f
254
255 !
256 ! Switch to user address space.
257 !
258 3:
259 mov %i5, %o0 ! %o0 = sfmmup
260 mov %i1, %o2 ! %o2 = CPU
261 set SFMMU_PRIVATE, %o3 ! %o3 = sfmmu private flag
262 call sfmmu_alloc_ctx
263 mov %g0, %o1 ! %o1 = allocate flag = 0
264
265 brz,a,pt %o0, 4f ! %o0 == 0, no private alloc'ed
266 nop
267
268 ldn [%i5 + SFMMU_SCDP], %o0 ! using shared contexts?
269 brz,a,pt %o0, 4f
270 nop
271
272 ldn [%o0 + SCD_SFMMUP], %o0 ! %o0 = scdp->scd_sfmmup
273 mov %i1, %o2 ! %o2 = CPU
274 set SFMMU_SHARED, %o3 ! %o3 = sfmmu shared flag
275 call sfmmu_alloc_ctx
276 mov 1, %o1 ! %o1 = allocate flag = 1
277
278 4:
279 call sfmmu_load_mmustate ! program MMU registers
280 mov %i5, %o0
281
282 wrpr %g0, %i4, %pstate ! enable interrupts
283
284 5:
285 !
286 ! spin until dispatched thread's mutex has
287 ! been unlocked. this mutex is unlocked when
288 ! it becomes safe for the thread to run.
289 !
290 ldstub [%i0 + T_LOCK], %o0 ! lock curthread's t_lock
291 6:
292 brnz,pn %o0, 7f ! lock failed
293 ldx [%i0 + T_PC], %i7 ! delay - restore resuming thread's pc
294
295 !
296 ! Fix CPU structure to indicate new running thread.
297 ! Set pointer in new thread to the CPU structure.
298 ! XXX - Move migration statistic out of here
299 !
300 ldx [%i0 + T_CPU], %g2 ! last CPU to run the new thread
301 cmp %g2, %i1 ! test for migration
302 be,pt %xcc, 4f ! no migration
303 ldn [%i0 + T_LWP], %o1 ! delay - get associated lwp (if any)
304 ldx [%i1 + CPU_STATS_SYS_CPUMIGRATE], %g2
305 inc %g2
306 stx %g2, [%i1 + CPU_STATS_SYS_CPUMIGRATE]
307 stx %i1, [%i0 + T_CPU] ! set new thread's CPU pointer
308 4:
309 stx %i0, [%i1 + CPU_THREAD] ! set CPU's thread pointer
310 membar #StoreLoad ! synchronize with mutex_exit()
311 mov %i0, THREAD_REG ! update global thread register
312 stx %o1, [%i1 + CPU_LWP] ! set CPU's lwp ptr
313 brz,a,pn %o1, 1f ! if no lwp, branch and clr mpcb
314 stx %g0, [%i1 + CPU_MPCB]
315 !
316 ! user thread
317 ! o1 = lwp
318 ! i0 = new thread
319 !
320 ldx [%i0 + T_STACK], %o0
321 stx %o0, [%i1 + CPU_MPCB] ! set CPU's mpcb pointer
322 #ifdef CPU_MPCB_PA
323 ldx [%o0 + MPCB_PA], %o0
324 stx %o0, [%i1 + CPU_MPCB_PA]
325 #endif
326 ! Switch to new thread's stack
327 ldx [%i0 + T_SP], %o0 ! restore resuming thread's sp
328 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
329 mov %o0, %fp
330 !
331 ! Restore resuming thread's GSR reg and floating-point regs
332 ! Note that the ld to the gsr register ensures that the loading of
333 ! the floating point saved state has completed without necessity
334 ! of a membar #Sync.
335 !
336 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
337 sethi %hi(fpu_exists), %g3
338 ld [%g3 + %lo(fpu_exists)], %g3
339 brz,pn %g3, 2f
340 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
341 #endif
342 ldx [%o1 + LWP_FPU], %o0 ! fp pointer
343 ld [%o0 + FPU_FPRS], %g5 ! get fpu_fprs
344 andcc %g5, FPRS_FEF, %g0 ! is FPRS_FEF set?
345 bz,a,pt %icc, 9f ! no, skip fp_restore
346 wr %g0, FPRS_FEF, %fprs ! enable fprs so fp_zero works
347
348 ldx [THREAD_REG + T_CPU], %o4 ! cpu pointer
349 call fp_restore
350 wr %g5, %g0, %fprs ! enable fpu and restore fprs
351
352 ldx [%o0 + FPU_GSR], %g5 ! load saved GSR data
353 wr %g5, %g0, %gsr ! restore %gsr data
354 ba,pt %icc,2f
355 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
356
357 9:
358 !
359 ! Zero resuming thread's fp registers, for *all* non-fp program
360 ! Remove all possibility of using the fp regs as a "covert channel".
361 !
362 call fp_zero
363 wr %g0, %g0, %gsr
364 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
365 ba,pt %icc, 2f
366 wr %g0, %g0, %fprs ! disable fprs
367
368 1:
369 #ifdef CPU_MPCB_PA
370 mov -1, %o1
371 stx %o1, [%i1 + CPU_MPCB_PA]
372 #endif
373 !
374 ! kernel thread
375 ! i0 = new thread
376 !
377 ! Switch to new thread's stack
378 !
379 ldx [%i0 + T_SP], %o0 ! restore resuming thread's sp
380 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
381 mov %o0, %fp
382 !
383 ! Restore resuming thread's GSR reg and floating-point regs
384 ! Note that the ld to the gsr register ensures that the loading of
385 ! the floating point saved state has completed without necessity
386 ! of a membar #Sync.
387 !
388 ldx [%i0 + T_STACK], %o0
389 ld [%o0 + SA(MINFRAME) + FPU_FPRS], %g5 ! load fprs
390 ldx [%i0 + T_CTX], %i5 ! should thread restorectx?
391 andcc %g5, FPRS_FEF, %g0 ! did we save fp in stack?
392 bz,a,pt %icc, 2f
393 wr %g0, %g0, %fprs ! clr fprs
394
395 wr %g5, %g0, %fprs ! enable fpu and restore fprs
396 call fp_restore
397 add %o0, SA(MINFRAME), %o0 ! o0 = kpu_t ptr
398 ldx [%o0 + FPU_GSR], %g5 ! load saved GSR data
399 wr %g5, %g0, %gsr ! restore %gsr data
400
401 2:
402 !
403 ! Restore resuming thread's context
404 ! i5 = ctx ptr
405 !
406 brz,a,pt %i5, 8f ! skip restorectx() when zero
407 ld [%i1 + CPU_BASE_SPL], %o0
408 call restorectx ! thread can not sleep on temp stack
409 mov THREAD_REG, %o0 ! delay slot - arg = thread pointer
410 !
411 ! Set priority as low as possible, blocking all interrupt threads
412 ! that may be active.
413 !
414 ld [%i1 + CPU_BASE_SPL], %o0
415 8:
416 wrpr %o0, 0, %pil
417 wrpr %g0, WSTATE_KERN, %wstate
418 !
419 ! If we are resuming an interrupt thread, store a starting timestamp
420 ! in the thread structure.
421 !
422 lduh [THREAD_REG + T_FLAGS], %o0
423 andcc %o0, T_INTR_THREAD, %g0
424 bnz,pn %xcc, 0f
425 nop
426 5:
427 call __dtrace_probe___sched_on__cpu ! DTrace probe
428 nop
429
430 ret ! resume curthread
431 restore
432 0:
433 add THREAD_REG, T_INTR_START, %o2
434 1:
435 ldx [%o2], %o1
436 RD_CLOCK_TICK(%o0,%o3,%g5,__LINE__)
437 casx [%o2], %o1, %o0
438 cmp %o0, %o1
439 be,pt %xcc, 5b
440 nop
441 ! If an interrupt occurred while we were attempting to store
442 ! the timestamp, try again.
443 ba,pt %xcc, 1b
444 nop
445
446 !
447 ! lock failed - spin with regular load to avoid cache-thrashing.
448 !
449 7:
450 brnz,a,pt %o0, 7b ! spin while locked
451 ldub [%i0 + T_LOCK], %o0
452 ba %xcc, 6b
453 ldstub [%i0 + T_LOCK], %o0 ! delay - lock curthread's mutex
454 SET_SIZE(_resume_from_idle)
455 SET_SIZE(resume)
456
457 #endif /* lint */
458
459 #if defined(lint)
460
461 /* ARGSUSED */
462 void
463 resume_from_zombie(kthread_id_t t)
464 {}
465
466 #else /* lint */
467
468 ENTRY(resume_from_zombie)
469 save %sp, -SA(MINFRAME), %sp ! save ins and locals
470
471 call __dtrace_probe___sched_off__cpu ! DTrace probe
472 mov %i0, %o0 ! arg for DTrace probe
473
474 ldn [THREAD_REG + T_CPU], %i1 ! cpu pointer
475
476 flushw ! flushes all but this window
477 ldn [THREAD_REG + T_PROCP], %i2 ! old procp for mmu ctx
478
479 !
480 ! Temporarily switch to the idle thread's stack so that
481 ! the zombie thread's stack can be reclaimed by the reaper.
482 !
483 ldn [%i1 + CPU_IDLE_THREAD], %o2 ! idle thread pointer
484 ldn [%o2 + T_SP], %o1 ! get onto idle thread stack
485 sub %o1, SA(MINFRAME), %sp ! save room for ins and locals
486 clr %fp
487 !
488 ! Set the idle thread as the current thread.
489 ! Put the zombie on death-row.
490 !
491 mov THREAD_REG, %o0 ! save %g7 = curthread for arg
492 mov %o2, THREAD_REG ! set %g7 to idle
493 stn %g0, [%i1 + CPU_MPCB] ! clear mpcb
494 #ifdef CPU_MPCB_PA
495 mov -1, %o1
496 stx %o1, [%i1 + CPU_MPCB_PA]
497 #endif
498 call reapq_add ! reapq_add(old_thread);
499 stn %o2, [%i1 + CPU_THREAD] ! delay - CPU's thread = idle
500
501 !
502 ! resume_from_idle args:
503 ! %i0 = new thread
504 ! %i1 = cpu
505 ! %i2 = old proc
506 ! %i3 = new proc
507 !
508 b _resume_from_idle ! finish job of resume
509 ldn [%i0 + T_PROCP], %i3 ! new process
510 SET_SIZE(resume_from_zombie)
511
512 #endif /* lint */
513
514 #if defined(lint)
515
516 /* ARGSUSED */
517 void
518 resume_from_intr(kthread_id_t t)
519 {}
520
521 #else /* lint */
522
523 ENTRY(resume_from_intr)
524 save %sp, -SA(MINFRAME), %sp ! save ins and locals
525
526 !
527 ! We read in the fprs and call fp_save if FPRS_FEF is set
528 ! to save the floating-point state if fprs has been
529 ! modified by operations such as hw bcopy or fp_disabled.
530 ! This is to resolve an issue where an interrupting thread
531 ! doesn't retain their floating-point registers when
532 ! switching out of the interrupt context.
533 !
534 rd %fprs, %g4
535 ldn [THREAD_REG + T_STACK], %i2
536 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set
537 bz,pt %icc, 4f
538 st %g4, [%i2 + SA(MINFRAME) + FPU_FPRS] ! save fprs
539
540 ! save kernel fp state in stack
541 add %i2, SA(MINFRAME), %o0 ! %o0 = kfpu_t ptr
542 rd %gsr, %g5
543 call fp_save
544 stx %g5, [%o0 + FPU_GSR] ! store GSR
545
546 4:
547
548 flushw ! flushes all but this window
549 stn %fp, [THREAD_REG + T_SP] ! delay - save sp
550 stn %i7, [THREAD_REG + T_PC] ! save return address
551
552 ldn [%i0 + T_PC], %i7 ! restore resuming thread's pc
553 ldn [THREAD_REG + T_CPU], %i1 ! cpu pointer
554
555 !
556 ! Fix CPU structure to indicate new running thread.
557 ! The pinned thread we're resuming already has the CPU pointer set.
558 !
559 mov THREAD_REG, %l3 ! save old thread
560 stn %i0, [%i1 + CPU_THREAD] ! set CPU's thread pointer
561 membar #StoreLoad ! synchronize with mutex_exit()
562 mov %i0, THREAD_REG ! update global thread register
563
564 !
565 ! Switch to new thread's stack
566 !
567 ldn [THREAD_REG + T_SP], %o0 ! restore resuming thread's sp
568 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
569 mov %o0, %fp
570 clrb [%l3 + T_LOCK] ! clear intr thread's tp->t_lock
571
572 !
573 ! If we are resuming an interrupt thread, store a timestamp in the
574 ! thread structure.
575 !
576 lduh [THREAD_REG + T_FLAGS], %o0
577 andcc %o0, T_INTR_THREAD, %g0
578 bnz,pn %xcc, 0f
579 !
580 ! We're resuming a non-interrupt thread.
581 ! Clear CPU_INTRCNT and check if cpu_kprunrun set?
582 !
583 ldub [%i1 + CPU_KPRUNRUN], %o5 ! delay
584 brnz,pn %o5, 3f ! call kpreempt(KPREEMPT_SYNC);
585 stub %g0, [%i1 + CPU_INTRCNT]
586 1:
587 ret ! resume curthread
588 restore
589 0:
590 !
591 ! We're an interrupt thread. Update t_intr_start and cpu_intrcnt
592 !
593 add THREAD_REG, T_INTR_START, %o2
594 2:
595 ldx [%o2], %o1
596 RD_CLOCK_TICK(%o0,%o3,%l1,__LINE__)
597 casx [%o2], %o1, %o0
598 cmp %o0, %o1
599 bne,pn %xcc, 2b
600 ldn [THREAD_REG + T_INTR], %l1 ! delay
601 ! Reset cpu_intrcnt if we aren't pinning anyone
602 brz,a,pt %l1, 2f
603 stub %g0, [%i1 + CPU_INTRCNT]
604 2:
605 ba,pt %xcc, 1b
606 nop
607 3:
608 !
609 ! We're a non-interrupt thread and cpu_kprunrun is set. call kpreempt.
610 !
611 call kpreempt
612 mov KPREEMPT_SYNC, %o0
613 ba,pt %xcc, 1b
614 nop
615 SET_SIZE(resume_from_intr)
616
617 #endif /* lint */
618
619
620 /*
621 * thread_start()
622 *
623 * the current register window was crafted by thread_run() to contain
624 * an address of a procedure (in register %i7), and its args in registers
625 * %i0 through %i5. a stack trace of this thread will show the procedure
626 * that thread_start() invoked at the bottom of the stack. an exit routine
627 * is stored in %l0 and called when started thread returns from its called
628 * procedure.
629 */
630
631 #if defined(lint)
632
633 void
634 thread_start(void)
635 {}
636
637 #else /* lint */
638
639 ENTRY(thread_start)
640 mov %i0, %o0
641 jmpl %i7, %o7 ! call thread_run()'s start() procedure.
642 mov %i1, %o1
643
644 call thread_exit ! destroy thread if it returns.
645 nop
646 unimp 0
647 SET_SIZE(thread_start)
648
649 #endif /* lint */