58 #include <sys/cpupart.h>
59 #include <sys/pset.h>
60 #include <sys/door.h>
61 #include <sys/spl.h>
62 #include <sys/copyops.h>
63 #include <sys/rctl.h>
64 #include <sys/brand.h>
65 #include <sys/pool.h>
66 #include <sys/zone.h>
67 #include <sys/tsol/label.h>
68 #include <sys/tsol/tndb.h>
69 #include <sys/cpc_impl.h>
70 #include <sys/sdt.h>
71 #include <sys/reboot.h>
72 #include <sys/kdi.h>
73 #include <sys/schedctl.h>
74 #include <sys/waitq.h>
75 #include <sys/cpucaps.h>
76 #include <sys/kiconv.h>
77 #include <sys/ctype.h>
78
79 struct kmem_cache *thread_cache; /* cache of free threads */
80 struct kmem_cache *lwp_cache; /* cache of free lwps */
81 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */
82
83 /*
84 * allthreads is only for use by kmem_readers. All kernel loops can use
85 * the current thread as a start/end point.
86 */
87 kthread_t *allthreads = &t0; /* circular list of all threads */
88
89 static kcondvar_t reaper_cv; /* synchronization var */
90 kthread_t *thread_deathrow; /* circular list of reapable threads */
91 kthread_t *lwp_deathrow; /* circular list of reapable threads */
92 kmutex_t reaplock; /* protects lwp and thread deathrows */
93 int thread_reapcnt = 0; /* number of threads on deathrow */
94 int lwp_reapcnt = 0; /* number of lwps on deathrow */
95 int reaplimit = 16; /* delay reaping until reaplimit */
96
97 thread_free_lock_t *thread_free_lock;
98 /* protects tick thread from reaper */
499 */
500 t->t_cpupart = &cp_default;
501
502 /*
503 * For now, affiliate this thread with the root lgroup.
504 * Since the kernel does not (presently) allocate its memory
505 * in a locality aware fashion, the root is an appropriate home.
506 * If this thread is later associated with an lwp, it will have
507 * it's lgroup re-assigned at that time.
508 */
509 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
510
511 /*
512 * Inherit the current cpu. If this cpu isn't part of the chosen
513 * lgroup, a new cpu will be chosen by cpu_choose when the thread
514 * is ready to run.
515 */
516 if (CPU->cpu_part == &cp_default)
517 t->t_cpu = CPU;
518 else
519 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
520 t->t_pri, NULL);
521
522 t->t_disp_queue = t->t_cpu->cpu_disp;
523 kpreempt_enable();
524
525 /*
526 * Initialize thread state and the dispatcher lock pointer.
527 * Need to hold onto pidlock to block allthreads walkers until
528 * the state is set.
529 */
530 switch (state) {
531 case TS_RUN:
532 curthread->t_oldspl = splhigh(); /* get dispatcher spl */
533 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
534 CL_SETRUN(t);
535 thread_unlock(t);
536 break;
537
538 case TS_ONPROC:
539 THREAD_ONPROC(t, t->t_cpu);
540 break;
1309 * When swtch() is switching away from an interrupt thread because it
1310 * blocked or was preempted, this routine is called to complete the
1311 * saving of the interrupted thread state, and returns the interrupted
1312 * thread pointer so it may be resumed.
1313 *
1314 * Called by swtch() only at high spl.
1315 */
1316 kthread_t *
1317 thread_unpin()
1318 {
1319 kthread_t *t = curthread; /* current thread */
1320 kthread_t *itp; /* interrupted thread */
1321 int i; /* interrupt level */
1322 extern int intr_passivate();
1323
1324 ASSERT(t->t_intr != NULL);
1325
1326 itp = t->t_intr; /* interrupted thread */
1327 t->t_intr = NULL; /* clear interrupt ptr */
1328
1329 /*
1330 * Get state from interrupt thread for the one
1331 * it interrupted.
1332 */
1333
1334 i = intr_passivate(t, itp);
1335
1336 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1337 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1338 i, t, t, itp, itp);
1339
1340 /*
1341 * Dissociate the current thread from the interrupted thread's LWP.
1342 */
1343 t->t_lwp = NULL;
1344
1345 /*
1346 * Interrupt handlers above the level that spinlocks block must
1347 * not block.
1348 */
|
58 #include <sys/cpupart.h>
59 #include <sys/pset.h>
60 #include <sys/door.h>
61 #include <sys/spl.h>
62 #include <sys/copyops.h>
63 #include <sys/rctl.h>
64 #include <sys/brand.h>
65 #include <sys/pool.h>
66 #include <sys/zone.h>
67 #include <sys/tsol/label.h>
68 #include <sys/tsol/tndb.h>
69 #include <sys/cpc_impl.h>
70 #include <sys/sdt.h>
71 #include <sys/reboot.h>
72 #include <sys/kdi.h>
73 #include <sys/schedctl.h>
74 #include <sys/waitq.h>
75 #include <sys/cpucaps.h>
76 #include <sys/kiconv.h>
77 #include <sys/ctype.h>
78 #include <sys/ht.h>
79
80 #ifndef STACK_GROWTH_DOWN
81 #error Stacks do not grow downward; 3b2 zombie attack detected!
82 #endif
83
84 struct kmem_cache *thread_cache; /* cache of free threads */
85 struct kmem_cache *lwp_cache; /* cache of free lwps */
86 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */
87
88 /*
89 * allthreads is only for use by kmem_readers. All kernel loops can use
90 * the current thread as a start/end point.
91 */
92 kthread_t *allthreads = &t0; /* circular list of all threads */
93
94 static kcondvar_t reaper_cv; /* synchronization var */
95 kthread_t *thread_deathrow; /* circular list of reapable threads */
96 kthread_t *lwp_deathrow; /* circular list of reapable threads */
97 kmutex_t reaplock; /* protects lwp and thread deathrows */
98 int thread_reapcnt = 0; /* number of threads on deathrow */
99 int lwp_reapcnt = 0; /* number of lwps on deathrow */
100 int reaplimit = 16; /* delay reaping until reaplimit */
101
102 thread_free_lock_t *thread_free_lock;
103 /* protects tick thread from reaper */
504 */
505 t->t_cpupart = &cp_default;
506
507 /*
508 * For now, affiliate this thread with the root lgroup.
509 * Since the kernel does not (presently) allocate its memory
510 * in a locality aware fashion, the root is an appropriate home.
511 * If this thread is later associated with an lwp, it will have
512 * it's lgroup re-assigned at that time.
513 */
514 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
515
516 /*
517 * Inherit the current cpu. If this cpu isn't part of the chosen
518 * lgroup, a new cpu will be chosen by cpu_choose when the thread
519 * is ready to run.
520 */
521 if (CPU->cpu_part == &cp_default)
522 t->t_cpu = CPU;
523 else
524 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t,
525 t->t_pri);
526
527 t->t_disp_queue = t->t_cpu->cpu_disp;
528 kpreempt_enable();
529
530 /*
531 * Initialize thread state and the dispatcher lock pointer.
532 * Need to hold onto pidlock to block allthreads walkers until
533 * the state is set.
534 */
535 switch (state) {
536 case TS_RUN:
537 curthread->t_oldspl = splhigh(); /* get dispatcher spl */
538 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
539 CL_SETRUN(t);
540 thread_unlock(t);
541 break;
542
543 case TS_ONPROC:
544 THREAD_ONPROC(t, t->t_cpu);
545 break;
1314 * When swtch() is switching away from an interrupt thread because it
1315 * blocked or was preempted, this routine is called to complete the
1316 * saving of the interrupted thread state, and returns the interrupted
1317 * thread pointer so it may be resumed.
1318 *
1319 * Called by swtch() only at high spl.
1320 */
1321 kthread_t *
1322 thread_unpin()
1323 {
1324 kthread_t *t = curthread; /* current thread */
1325 kthread_t *itp; /* interrupted thread */
1326 int i; /* interrupt level */
1327 extern int intr_passivate();
1328
1329 ASSERT(t->t_intr != NULL);
1330
1331 itp = t->t_intr; /* interrupted thread */
1332 t->t_intr = NULL; /* clear interrupt ptr */
1333
1334 ht_end_intr();
1335
1336 /*
1337 * Get state from interrupt thread for the one
1338 * it interrupted.
1339 */
1340
1341 i = intr_passivate(t, itp);
1342
1343 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1344 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1345 i, t, t, itp, itp);
1346
1347 /*
1348 * Dissociate the current thread from the interrupted thread's LWP.
1349 */
1350 t->t_lwp = NULL;
1351
1352 /*
1353 * Interrupt handlers above the level that spinlocks block must
1354 * not block.
1355 */
|