Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4/ml/swtch.s
+++ new/usr/src/uts/sun4/ml/swtch.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * Process switching routines.
27 27 */
28 28
29 -#if !defined(lint)
30 29 #include "assym.h"
31 -#else /* lint */
32 -#include <sys/thread.h>
33 -#endif /* lint */
34 30
35 31 #include <sys/param.h>
36 32 #include <sys/asm_linkage.h>
37 33 #include <sys/mmu.h>
38 34 #include <sys/pcb.h>
39 35 #include <sys/machthread.h>
40 36 #include <sys/machclock.h>
41 37 #include <sys/privregs.h>
42 38 #include <sys/vtrace.h>
43 39 #include <vm/hat_sfmmu.h>
44 40
45 41 /*
46 42 * resume(kthread_id_t)
47 43 *
48 44 * a thread can only run on one processor at a time. there
49 45 * exists a window on MPs where the current thread on one
50 46 * processor is capable of being dispatched by another processor.
51 47 * some overlap between outgoing and incoming threads can happen
52 48 * when they are the same thread. in this case where the threads
53 49 * are the same, resume() on one processor will spin on the incoming
54 50 * thread until resume() on the other processor has finished with
55 51 * the outgoing thread.
56 52 *
57 53 * The MMU context changes when the resuming thread resides in a different
58 54 * process. Kernel threads are known by resume to reside in process 0.
59 55 * The MMU context, therefore, only changes when resuming a thread in
60 56 * a process different from curproc.
61 57 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
62 58 * resume_from_intr() is called when the thread being resumed was not
63 59 * passivated by resume (e.g. was interrupted). This means that the
64 60 * resume lock is already held and that a restore context is not needed.
65 61 * Also, the MMU context is not changed on the resume in this case.
66 62 *
67 63 * resume_from_zombie() is the same as resume except the calling thread
68 64 * is a zombie and must be put on the deathrow list after the CPU is
69 65 * off the stack.
70 66 */
71 67
72 -#if defined(lint)
73 -
74 -/* ARGSUSED */
75 -void
76 -resume(kthread_id_t t)
77 -{}
78 -
79 -#else /* lint */
80 -
81 68 ENTRY(resume)
82 69 save %sp, -SA(MINFRAME), %sp ! save ins and locals
83 70
84 71 call __dtrace_probe___sched_off__cpu ! DTrace probe
85 72 mov %i0, %o0 ! arg for DTrace probe
86 73
87 74 membar #Sync ! flush writebuffers
88 75 flushw ! flushes all but this window
89 76
90 77 stn %i7, [THREAD_REG + T_PC] ! save return address
91 78 stn %fp, [THREAD_REG + T_SP] ! save sp
92 79
93 80 !
94 81 ! Save GSR (Graphics Status Register).
95 82 !
96 83 ! Read fprs, call fp_save if FPRS_FEF set.
97 84 ! This handles floating-point state saving.
98 85 ! The fprs could be turned on by hw bcopy software,
99 86 ! *or* by fp_disabled. Handle it either way.
100 87 !
101 88 ldn [THREAD_REG + T_LWP], %o4 ! get lwp pointer
102 89 rd %fprs, %g4 ! read fprs
103 90 brnz,pt %o4, 0f ! if user thread skip
104 91 ldn [THREAD_REG + T_CPU], %i1 ! get CPU pointer
105 92
106 93 !
107 94 ! kernel thread
108 95 !
109 96 ! we save fprs at the beginning the stack so we know
110 97 ! where to check at resume time
111 98 ldn [THREAD_REG + T_STACK], %i2
112 99 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer
113 100 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set
114 101 bz,pt %icc, 1f ! nope, skip
115 102 st %g4, [%i2 + SA(MINFRAME) + FPU_FPRS] ! save fprs
116 103
117 104 ! save kernel fp state in stack
118 105 add %i2, SA(MINFRAME), %o0 ! o0 = kfpu_t ptr
119 106 rd %gsr, %g5
120 107 call fp_save
121 108 stx %g5, [%o0 + FPU_GSR] ! store GSR
122 109 ba,a,pt %icc, 1f
123 110 nop
124 111
125 112 0:
126 113 ! user thread
127 114 ! o4 = lwp ptr
128 115 ! g4 = fprs
129 116 ! i1 = CPU ptr
130 117 ldn [%o4 + LWP_FPU], %o0 ! fp pointer
131 118 stn %fp, [THREAD_REG + T_SP] ! save sp
132 119 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set
133 120 st %g4, [%o0 + FPU_FPRS] ! store FPRS
134 121 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
135 122 sethi %hi(fpu_exists), %g5
136 123 ld [%g5 + %lo(fpu_exists)], %g5
137 124 brz,pn %g5, 1f
138 125 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer
139 126 #endif
140 127 bz,pt %icc, 1f ! most apps don't use fp
141 128 ldn [THREAD_REG + T_CTX], %g3 ! get ctx pointer
142 129 ldn [%o4 + LWP_FPU], %o0 ! fp pointer
143 130 rd %gsr, %g5
144 131 call fp_save ! doesn't touch globals
145 132 stx %g5, [%o0 + FPU_GSR] ! store GSR
146 133 1:
147 134 !
148 135 ! Perform context switch callback if set.
149 136 ! This handles coprocessor state saving.
150 137 ! i1 = cpu ptr
151 138 ! g3 = ctx pointer
152 139 !
153 140 wr %g0, %g0, %fprs ! disable fpu and clear fprs
154 141 brz,pt %g3, 2f ! skip call when zero
155 142 ldn [%i0 + T_PROCP], %i3 ! delay slot - get proc pointer
156 143 call savectx
157 144 mov THREAD_REG, %o0 ! delay - arg = thread pointer
158 145 2:
159 146 ldn [THREAD_REG + T_PROCP], %i2 ! load old curproc - for mmu
160 147
161 148 !
162 149 ! Temporarily switch to idle thread's stack
163 150 !
164 151 ldn [%i1 + CPU_IDLE_THREAD], %o0 ! idle thread pointer
165 152 ldn [%o0 + T_SP], %o1 ! get onto idle thread stack
166 153 sub %o1, SA(MINFRAME), %sp ! save room for ins and locals
167 154 clr %fp
168 155
169 156 !
170 157 ! Set the idle thread as the current thread
171 158 !
172 159 mov THREAD_REG, %l3 ! save %g7 (current thread)
173 160 mov %o0, THREAD_REG ! set %g7 to idle
174 161 stn %o0, [%i1 + CPU_THREAD] ! set CPU's thread to idle
175 162
176 163 !
177 164 ! Clear and unlock previous thread's t_lock
178 165 ! to allow it to be dispatched by another processor.
179 166 !
180 167 clrb [%l3 + T_LOCK] ! clear tp->t_lock
181 168
182 169 !
183 170 ! IMPORTANT: Registers at this point must be:
184 171 ! %i0 = new thread
185 172 ! %i1 = cpu pointer
186 173 ! %i2 = old proc pointer
187 174 ! %i3 = new proc pointer
188 175 !
189 176 ! Here we are in the idle thread, have dropped the old thread.
190 177 !
191 178 ALTENTRY(_resume_from_idle)
192 179
193 180 ! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
194 181 SET_KCONTEXTREG(%o0, %g1, %g2, %g3, %o3, l1, l2, l3)
195 182
196 183 cmp %i2, %i3 ! resuming the same process?
197 184 be,pt %xcc, 5f ! yes.
198 185 nop
199 186
200 187 ldx [%i3 + P_AS], %o0 ! load p->p_as
201 188 ldx [%o0 + A_HAT], %i5 ! %i5 = new proc hat
202 189
203 190 !
204 191 ! update cpusran field
205 192 !
206 193 ld [%i1 + CPU_ID], %o4
207 194 add %i5, SFMMU_CPUSRAN, %o5
208 195 CPU_INDEXTOSET(%o5, %o4, %g1)
209 196 ldx [%o5], %o2 ! %o2 = cpusran field
210 197 mov 1, %g2
211 198 sllx %g2, %o4, %o4 ! %o4 = bit for this cpu
212 199 andcc %o4, %o2, %g0
213 200 bnz,pn %xcc, 0f ! bit already set, go to 0
214 201 nop
215 202 3:
216 203 or %o2, %o4, %o1 ! or in this cpu's bit mask
217 204 casx [%o5], %o2, %o1
218 205 cmp %o2, %o1
219 206 bne,a,pn %xcc, 3b
220 207 ldx [%o5], %o2 ! o2 = cpusran field
221 208 membar #LoadLoad|#StoreLoad
222 209
223 210 0:
224 211 !
225 212 ! disable interrupts
226 213 !
227 214 ! if resume from user to kernel thread
228 215 ! call sfmmu_setctx_sec
229 216 ! if resume from kernel (or a different user) thread to user thread
230 217 ! call sfmmu_alloc_ctx
231 218 ! sfmmu_load_mmustate
232 219 !
233 220 ! enable interrupts
234 221 !
235 222 ! %i5 = new proc hat
236 223 !
237 224
238 225 sethi %hi(ksfmmup), %o2
239 226 ldx [%o2 + %lo(ksfmmup)], %o2
240 227
241 228 rdpr %pstate, %i4
242 229 cmp %i5, %o2 ! new proc hat == ksfmmup ?
243 230 bne,pt %xcc, 3f ! new proc is not kernel as, go to 3
244 231 wrpr %i4, PSTATE_IE, %pstate
245 232
246 233 SET_KAS_CTXSEC_ARGS(%i5, %o0, %o1)
247 234
248 235 ! new proc is kernel as
249 236
250 237 call sfmmu_setctx_sec ! switch to kernel context
251 238 or %o0, %o1, %o0
252 239
253 240 ba,a,pt %icc, 4f
254 241
255 242 !
256 243 ! Switch to user address space.
257 244 !
258 245 3:
259 246 mov %i5, %o0 ! %o0 = sfmmup
260 247 mov %i1, %o2 ! %o2 = CPU
261 248 set SFMMU_PRIVATE, %o3 ! %o3 = sfmmu private flag
262 249 call sfmmu_alloc_ctx
263 250 mov %g0, %o1 ! %o1 = allocate flag = 0
264 251
265 252 brz,a,pt %o0, 4f ! %o0 == 0, no private alloc'ed
266 253 nop
267 254
268 255 ldn [%i5 + SFMMU_SCDP], %o0 ! using shared contexts?
269 256 brz,a,pt %o0, 4f
270 257 nop
271 258
272 259 ldn [%o0 + SCD_SFMMUP], %o0 ! %o0 = scdp->scd_sfmmup
273 260 mov %i1, %o2 ! %o2 = CPU
274 261 set SFMMU_SHARED, %o3 ! %o3 = sfmmu shared flag
275 262 call sfmmu_alloc_ctx
276 263 mov 1, %o1 ! %o1 = allocate flag = 1
277 264
278 265 4:
279 266 call sfmmu_load_mmustate ! program MMU registers
280 267 mov %i5, %o0
281 268
282 269 wrpr %g0, %i4, %pstate ! enable interrupts
283 270
284 271 5:
285 272 !
286 273 ! spin until dispatched thread's mutex has
287 274 ! been unlocked. this mutex is unlocked when
288 275 ! it becomes safe for the thread to run.
289 276 !
290 277 ldstub [%i0 + T_LOCK], %o0 ! lock curthread's t_lock
291 278 6:
292 279 brnz,pn %o0, 7f ! lock failed
293 280 ldx [%i0 + T_PC], %i7 ! delay - restore resuming thread's pc
294 281
295 282 !
296 283 ! Fix CPU structure to indicate new running thread.
297 284 ! Set pointer in new thread to the CPU structure.
298 285 ! XXX - Move migration statistic out of here
299 286 !
300 287 ldx [%i0 + T_CPU], %g2 ! last CPU to run the new thread
301 288 cmp %g2, %i1 ! test for migration
302 289 be,pt %xcc, 4f ! no migration
303 290 ldn [%i0 + T_LWP], %o1 ! delay - get associated lwp (if any)
304 291 ldx [%i1 + CPU_STATS_SYS_CPUMIGRATE], %g2
305 292 inc %g2
306 293 stx %g2, [%i1 + CPU_STATS_SYS_CPUMIGRATE]
307 294 stx %i1, [%i0 + T_CPU] ! set new thread's CPU pointer
308 295 4:
309 296 stx %i0, [%i1 + CPU_THREAD] ! set CPU's thread pointer
310 297 membar #StoreLoad ! synchronize with mutex_exit()
311 298 mov %i0, THREAD_REG ! update global thread register
312 299 stx %o1, [%i1 + CPU_LWP] ! set CPU's lwp ptr
313 300 brz,a,pn %o1, 1f ! if no lwp, branch and clr mpcb
314 301 stx %g0, [%i1 + CPU_MPCB]
315 302 !
316 303 ! user thread
317 304 ! o1 = lwp
318 305 ! i0 = new thread
319 306 !
320 307 ldx [%i0 + T_STACK], %o0
321 308 stx %o0, [%i1 + CPU_MPCB] ! set CPU's mpcb pointer
322 309 #ifdef CPU_MPCB_PA
323 310 ldx [%o0 + MPCB_PA], %o0
324 311 stx %o0, [%i1 + CPU_MPCB_PA]
325 312 #endif
326 313 ! Switch to new thread's stack
327 314 ldx [%i0 + T_SP], %o0 ! restore resuming thread's sp
328 315 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
329 316 mov %o0, %fp
330 317 !
331 318 ! Restore resuming thread's GSR reg and floating-point regs
332 319 ! Note that the ld to the gsr register ensures that the loading of
333 320 ! the floating point saved state has completed without necessity
334 321 ! of a membar #Sync.
335 322 !
336 323 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
337 324 sethi %hi(fpu_exists), %g3
338 325 ld [%g3 + %lo(fpu_exists)], %g3
339 326 brz,pn %g3, 2f
340 327 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
341 328 #endif
342 329 ldx [%o1 + LWP_FPU], %o0 ! fp pointer
343 330 ld [%o0 + FPU_FPRS], %g5 ! get fpu_fprs
344 331 andcc %g5, FPRS_FEF, %g0 ! is FPRS_FEF set?
345 332 bz,a,pt %icc, 9f ! no, skip fp_restore
346 333 wr %g0, FPRS_FEF, %fprs ! enable fprs so fp_zero works
347 334
348 335 ldx [THREAD_REG + T_CPU], %o4 ! cpu pointer
349 336 call fp_restore
350 337 wr %g5, %g0, %fprs ! enable fpu and restore fprs
351 338
352 339 ldx [%o0 + FPU_GSR], %g5 ! load saved GSR data
353 340 wr %g5, %g0, %gsr ! restore %gsr data
354 341 ba,pt %icc,2f
355 342 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
356 343
357 344 9:
358 345 !
359 346 ! Zero resuming thread's fp registers, for *all* non-fp program
360 347 ! Remove all possibility of using the fp regs as a "covert channel".
361 348 !
362 349 call fp_zero
363 350 wr %g0, %g0, %gsr
364 351 ldx [%i0 + T_CTX], %i5 ! should resumed thread restorectx?
365 352 ba,pt %icc, 2f
366 353 wr %g0, %g0, %fprs ! disable fprs
367 354
368 355 1:
369 356 #ifdef CPU_MPCB_PA
370 357 mov -1, %o1
371 358 stx %o1, [%i1 + CPU_MPCB_PA]
372 359 #endif
373 360 !
374 361 ! kernel thread
375 362 ! i0 = new thread
376 363 !
377 364 ! Switch to new thread's stack
378 365 !
379 366 ldx [%i0 + T_SP], %o0 ! restore resuming thread's sp
380 367 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
381 368 mov %o0, %fp
382 369 !
383 370 ! Restore resuming thread's GSR reg and floating-point regs
384 371 ! Note that the ld to the gsr register ensures that the loading of
385 372 ! the floating point saved state has completed without necessity
386 373 ! of a membar #Sync.
387 374 !
388 375 ldx [%i0 + T_STACK], %o0
389 376 ld [%o0 + SA(MINFRAME) + FPU_FPRS], %g5 ! load fprs
390 377 ldx [%i0 + T_CTX], %i5 ! should thread restorectx?
391 378 andcc %g5, FPRS_FEF, %g0 ! did we save fp in stack?
392 379 bz,a,pt %icc, 2f
393 380 wr %g0, %g0, %fprs ! clr fprs
394 381
395 382 wr %g5, %g0, %fprs ! enable fpu and restore fprs
396 383 call fp_restore
397 384 add %o0, SA(MINFRAME), %o0 ! o0 = kpu_t ptr
398 385 ldx [%o0 + FPU_GSR], %g5 ! load saved GSR data
399 386 wr %g5, %g0, %gsr ! restore %gsr data
400 387
401 388 2:
402 389 !
403 390 ! Restore resuming thread's context
404 391 ! i5 = ctx ptr
405 392 !
406 393 brz,a,pt %i5, 8f ! skip restorectx() when zero
407 394 ld [%i1 + CPU_BASE_SPL], %o0
408 395 call restorectx ! thread can not sleep on temp stack
409 396 mov THREAD_REG, %o0 ! delay slot - arg = thread pointer
410 397 !
411 398 ! Set priority as low as possible, blocking all interrupt threads
412 399 ! that may be active.
413 400 !
414 401 ld [%i1 + CPU_BASE_SPL], %o0
415 402 8:
416 403 wrpr %o0, 0, %pil
417 404 wrpr %g0, WSTATE_KERN, %wstate
418 405 !
419 406 ! If we are resuming an interrupt thread, store a starting timestamp
420 407 ! in the thread structure.
421 408 !
422 409 lduh [THREAD_REG + T_FLAGS], %o0
423 410 andcc %o0, T_INTR_THREAD, %g0
424 411 bnz,pn %xcc, 0f
425 412 nop
426 413 5:
427 414 call __dtrace_probe___sched_on__cpu ! DTrace probe
428 415 nop
429 416
430 417 ret ! resume curthread
431 418 restore
432 419 0:
433 420 add THREAD_REG, T_INTR_START, %o2
434 421 1:
435 422 ldx [%o2], %o1
436 423 RD_CLOCK_TICK(%o0,%o3,%g5,__LINE__)
437 424 casx [%o2], %o1, %o0
438 425 cmp %o0, %o1
439 426 be,pt %xcc, 5b
440 427 nop
441 428 ! If an interrupt occurred while we were attempting to store
442 429 ! the timestamp, try again.
443 430 ba,pt %xcc, 1b
444 431 nop
445 432
446 433 !
↓ open down ↓ |
356 lines elided |
↑ open up ↑ |
447 434 ! lock failed - spin with regular load to avoid cache-thrashing.
448 435 !
449 436 7:
450 437 brnz,a,pt %o0, 7b ! spin while locked
451 438 ldub [%i0 + T_LOCK], %o0
452 439 ba %xcc, 6b
453 440 ldstub [%i0 + T_LOCK], %o0 ! delay - lock curthread's mutex
454 441 SET_SIZE(_resume_from_idle)
455 442 SET_SIZE(resume)
456 443
457 -#endif /* lint */
458 -
459 -#if defined(lint)
460 -
461 -/* ARGSUSED */
462 -void
463 -resume_from_zombie(kthread_id_t t)
464 -{}
465 -
466 -#else /* lint */
467 -
468 444 ENTRY(resume_from_zombie)
469 445 save %sp, -SA(MINFRAME), %sp ! save ins and locals
470 446
471 447 call __dtrace_probe___sched_off__cpu ! DTrace probe
472 448 mov %i0, %o0 ! arg for DTrace probe
473 449
474 450 ldn [THREAD_REG + T_CPU], %i1 ! cpu pointer
475 451
476 452 flushw ! flushes all but this window
477 453 ldn [THREAD_REG + T_PROCP], %i2 ! old procp for mmu ctx
478 454
479 455 !
480 456 ! Temporarily switch to the idle thread's stack so that
481 457 ! the zombie thread's stack can be reclaimed by the reaper.
482 458 !
483 459 ldn [%i1 + CPU_IDLE_THREAD], %o2 ! idle thread pointer
484 460 ldn [%o2 + T_SP], %o1 ! get onto idle thread stack
485 461 sub %o1, SA(MINFRAME), %sp ! save room for ins and locals
486 462 clr %fp
487 463 !
488 464 ! Set the idle thread as the current thread.
489 465 ! Put the zombie on death-row.
490 466 !
491 467 mov THREAD_REG, %o0 ! save %g7 = curthread for arg
492 468 mov %o2, THREAD_REG ! set %g7 to idle
493 469 stn %g0, [%i1 + CPU_MPCB] ! clear mpcb
494 470 #ifdef CPU_MPCB_PA
495 471 mov -1, %o1
496 472 stx %o1, [%i1 + CPU_MPCB_PA]
497 473 #endif
498 474 call reapq_add ! reapq_add(old_thread);
499 475 stn %o2, [%i1 + CPU_THREAD] ! delay - CPU's thread = idle
500 476
501 477 !
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
502 478 ! resume_from_idle args:
503 479 ! %i0 = new thread
504 480 ! %i1 = cpu
505 481 ! %i2 = old proc
506 482 ! %i3 = new proc
507 483 !
508 484 b _resume_from_idle ! finish job of resume
509 485 ldn [%i0 + T_PROCP], %i3 ! new process
510 486 SET_SIZE(resume_from_zombie)
511 487
512 -#endif /* lint */
513 -
514 -#if defined(lint)
515 -
516 -/* ARGSUSED */
517 -void
518 -resume_from_intr(kthread_id_t t)
519 -{}
520 -
521 -#else /* lint */
522 -
523 488 ENTRY(resume_from_intr)
524 489 save %sp, -SA(MINFRAME), %sp ! save ins and locals
525 490
526 491 !
527 492 ! We read in the fprs and call fp_save if FPRS_FEF is set
528 493 ! to save the floating-point state if fprs has been
529 494 ! modified by operations such as hw bcopy or fp_disabled.
530 495 ! This is to resolve an issue where an interrupting thread
531 496 ! doesn't retain their floating-point registers when
532 497 ! switching out of the interrupt context.
533 498 !
534 499 rd %fprs, %g4
535 500 ldn [THREAD_REG + T_STACK], %i2
536 501 andcc %g4, FPRS_FEF, %g0 ! is FPRS_FEF set
537 502 bz,pt %icc, 4f
538 503 st %g4, [%i2 + SA(MINFRAME) + FPU_FPRS] ! save fprs
539 504
540 505 ! save kernel fp state in stack
541 506 add %i2, SA(MINFRAME), %o0 ! %o0 = kfpu_t ptr
542 507 rd %gsr, %g5
543 508 call fp_save
544 509 stx %g5, [%o0 + FPU_GSR] ! store GSR
545 510
546 511 4:
547 512
548 513 flushw ! flushes all but this window
549 514 stn %fp, [THREAD_REG + T_SP] ! delay - save sp
550 515 stn %i7, [THREAD_REG + T_PC] ! save return address
551 516
552 517 ldn [%i0 + T_PC], %i7 ! restore resuming thread's pc
553 518 ldn [THREAD_REG + T_CPU], %i1 ! cpu pointer
554 519
555 520 !
556 521 ! Fix CPU structure to indicate new running thread.
557 522 ! The pinned thread we're resuming already has the CPU pointer set.
558 523 !
559 524 mov THREAD_REG, %l3 ! save old thread
560 525 stn %i0, [%i1 + CPU_THREAD] ! set CPU's thread pointer
561 526 membar #StoreLoad ! synchronize with mutex_exit()
562 527 mov %i0, THREAD_REG ! update global thread register
563 528
564 529 !
565 530 ! Switch to new thread's stack
566 531 !
567 532 ldn [THREAD_REG + T_SP], %o0 ! restore resuming thread's sp
568 533 sub %o0, SA(MINFRAME), %sp ! in case of intr or trap before restore
569 534 mov %o0, %fp
570 535 clrb [%l3 + T_LOCK] ! clear intr thread's tp->t_lock
571 536
572 537 !
573 538 ! If we are resuming an interrupt thread, store a timestamp in the
574 539 ! thread structure.
575 540 !
576 541 lduh [THREAD_REG + T_FLAGS], %o0
577 542 andcc %o0, T_INTR_THREAD, %g0
578 543 bnz,pn %xcc, 0f
579 544 !
580 545 ! We're resuming a non-interrupt thread.
581 546 ! Clear CPU_INTRCNT and check if cpu_kprunrun set?
582 547 !
583 548 ldub [%i1 + CPU_KPRUNRUN], %o5 ! delay
584 549 brnz,pn %o5, 3f ! call kpreempt(KPREEMPT_SYNC);
585 550 stub %g0, [%i1 + CPU_INTRCNT]
586 551 1:
587 552 ret ! resume curthread
588 553 restore
589 554 0:
590 555 !
591 556 ! We're an interrupt thread. Update t_intr_start and cpu_intrcnt
592 557 !
593 558 add THREAD_REG, T_INTR_START, %o2
594 559 2:
595 560 ldx [%o2], %o1
596 561 RD_CLOCK_TICK(%o0,%o3,%l1,__LINE__)
597 562 casx [%o2], %o1, %o0
598 563 cmp %o0, %o1
599 564 bne,pn %xcc, 2b
600 565 ldn [THREAD_REG + T_INTR], %l1 ! delay
601 566 ! Reset cpu_intrcnt if we aren't pinning anyone
602 567 brz,a,pt %l1, 2f
603 568 stub %g0, [%i1 + CPU_INTRCNT]
604 569 2:
605 570 ba,pt %xcc, 1b
606 571 nop
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
607 572 3:
608 573 !
609 574 ! We're a non-interrupt thread and cpu_kprunrun is set. call kpreempt.
610 575 !
611 576 call kpreempt
612 577 mov KPREEMPT_SYNC, %o0
613 578 ba,pt %xcc, 1b
614 579 nop
615 580 SET_SIZE(resume_from_intr)
616 581
617 -#endif /* lint */
618 582
619 -
620 583 /*
621 584 * thread_start()
622 585 *
623 586 * the current register window was crafted by thread_run() to contain
624 587 * an address of a procedure (in register %i7), and its args in registers
625 588 * %i0 through %i5. a stack trace of this thread will show the procedure
626 589 * that thread_start() invoked at the bottom of the stack. an exit routine
627 590 * is stored in %l0 and called when started thread returns from its called
628 591 * procedure.
629 592 */
630 593
631 -#if defined(lint)
632 -
633 -void
634 -thread_start(void)
635 -{}
636 -
637 -#else /* lint */
638 -
639 594 ENTRY(thread_start)
640 595 mov %i0, %o0
641 596 jmpl %i7, %o7 ! call thread_run()'s start() procedure.
642 597 mov %i1, %o1
643 598
644 599 call thread_exit ! destroy thread if it returns.
645 600 nop
646 601 unimp 0
647 602 SET_SIZE(thread_start)
648 -
649 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX