58 #include <sys/cpupart.h>
59 #include <sys/pset.h>
60 #include <sys/door.h>
61 #include <sys/spl.h>
62 #include <sys/copyops.h>
63 #include <sys/rctl.h>
64 #include <sys/brand.h>
65 #include <sys/pool.h>
66 #include <sys/zone.h>
67 #include <sys/tsol/label.h>
68 #include <sys/tsol/tndb.h>
69 #include <sys/cpc_impl.h>
70 #include <sys/sdt.h>
71 #include <sys/reboot.h>
72 #include <sys/kdi.h>
73 #include <sys/schedctl.h>
74 #include <sys/waitq.h>
75 #include <sys/cpucaps.h>
76 #include <sys/kiconv.h>
77 #include <sys/ctype.h>
78
79 struct kmem_cache *thread_cache; /* cache of free threads */
80 struct kmem_cache *lwp_cache; /* cache of free lwps */
81 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */
82
83 /*
84 * allthreads is only for use by kmem_readers. All kernel loops can use
85 * the current thread as a start/end point.
86 */
87 kthread_t *allthreads = &t0; /* circular list of all threads */
88
89 static kcondvar_t reaper_cv; /* synchronization var */
90 kthread_t *thread_deathrow; /* circular list of reapable threads */
91 kthread_t *lwp_deathrow; /* circular list of reapable threads */
92 kmutex_t reaplock; /* protects lwp and thread deathrows */
93 int thread_reapcnt = 0; /* number of threads on deathrow */
94 int lwp_reapcnt = 0; /* number of lwps on deathrow */
95 int reaplimit = 16; /* delay reaping until reaplimit */
96
97 thread_free_lock_t *thread_free_lock;
470
471 mutex_enter(&pidlock);
472 nthread++;
473 t->t_did = next_t_id++;
474 t->t_prev = curthread->t_prev;
475 t->t_next = curthread;
476
477 /*
478 * Add the thread to the list of all threads, and initialize
479 * its t_cpu pointer. We need to block preemption since
480 * cpu_offline walks the thread list looking for threads
481 * with t_cpu pointing to the CPU being offlined. We want
482 * to make sure that the list is consistent and that if t_cpu
483 * is set, the thread is on the list.
484 */
485 kpreempt_disable();
486 curthread->t_prev->t_next = t;
487 curthread->t_prev = t;
488
489 /*
490 * Threads should never have a NULL t_cpu pointer so assign it
491 * here. If the thread is being created with state TS_RUN a
492 * better CPU may be chosen when it is placed on the run queue.
493 *
494 * We need to keep kernel preemption disabled when setting all
495 * three fields to keep them in sync. Also, always create in
496 * the default partition since that's where kernel threads go
497 * (if this isn't a kernel thread, t_cpupart will be changed
498 * in lwp_create before setting the thread runnable).
499 */
500 t->t_cpupart = &cp_default;
501
502 /*
503 * For now, affiliate this thread with the root lgroup.
504 * Since the kernel does not (presently) allocate its memory
505 * in a locality aware fashion, the root is an appropriate home.
506 * If this thread is later associated with an lwp, it will have
507 * it's lgroup re-assigned at that time.
508 */
509 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
510
511 /*
512 * Inherit the current cpu. If this cpu isn't part of the chosen
513 * lgroup, a new cpu will be chosen by cpu_choose when the thread
514 * is ready to run.
515 */
516 if (CPU->cpu_part == &cp_default)
517 t->t_cpu = CPU;
518 else
519 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
520 t->t_pri, NULL);
521
522 t->t_disp_queue = t->t_cpu->cpu_disp;
523 kpreempt_enable();
524
525 /*
526 * Initialize thread state and the dispatcher lock pointer.
527 * Need to hold onto pidlock to block allthreads walkers until
528 * the state is set.
529 */
530 switch (state) {
531 case TS_RUN:
532 curthread->t_oldspl = splhigh(); /* get dispatcher spl */
533 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
534 CL_SETRUN(t);
535 thread_unlock(t);
536 break;
537
538 case TS_ONPROC:
539 THREAD_ONPROC(t, t->t_cpu);
540 break;
1309 * When swtch() is switching away from an interrupt thread because it
1310 * blocked or was preempted, this routine is called to complete the
1311 * saving of the interrupted thread state, and returns the interrupted
1312 * thread pointer so it may be resumed.
1313 *
1314 * Called by swtch() only at high spl.
1315 */
1316 kthread_t *
1317 thread_unpin()
1318 {
1319 kthread_t *t = curthread; /* current thread */
1320 kthread_t *itp; /* interrupted thread */
1321 int i; /* interrupt level */
1322 extern int intr_passivate();
1323
1324 ASSERT(t->t_intr != NULL);
1325
1326 itp = t->t_intr; /* interrupted thread */
1327 t->t_intr = NULL; /* clear interrupt ptr */
1328
1329 /*
1330 * Get state from interrupt thread for the one
1331 * it interrupted.
1332 */
1333
1334 i = intr_passivate(t, itp);
1335
1336 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1337 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1338 i, t, t, itp, itp);
1339
1340 /*
1341 * Dissociate the current thread from the interrupted thread's LWP.
1342 */
1343 t->t_lwp = NULL;
1344
1345 /*
1346 * Interrupt handlers above the level that spinlocks block must
1347 * not block.
1348 */
|
58 #include <sys/cpupart.h>
59 #include <sys/pset.h>
60 #include <sys/door.h>
61 #include <sys/spl.h>
62 #include <sys/copyops.h>
63 #include <sys/rctl.h>
64 #include <sys/brand.h>
65 #include <sys/pool.h>
66 #include <sys/zone.h>
67 #include <sys/tsol/label.h>
68 #include <sys/tsol/tndb.h>
69 #include <sys/cpc_impl.h>
70 #include <sys/sdt.h>
71 #include <sys/reboot.h>
72 #include <sys/kdi.h>
73 #include <sys/schedctl.h>
74 #include <sys/waitq.h>
75 #include <sys/cpucaps.h>
76 #include <sys/kiconv.h>
77 #include <sys/ctype.h>
78 #include <sys/ht.h>
79
80 struct kmem_cache *thread_cache; /* cache of free threads */
81 struct kmem_cache *lwp_cache; /* cache of free lwps */
82 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */
83
84 /*
85 * allthreads is only for use by kmem_readers. All kernel loops can use
86 * the current thread as a start/end point.
87 */
88 kthread_t *allthreads = &t0; /* circular list of all threads */
89
90 static kcondvar_t reaper_cv; /* synchronization var */
91 kthread_t *thread_deathrow; /* circular list of reapable threads */
92 kthread_t *lwp_deathrow; /* circular list of reapable threads */
93 kmutex_t reaplock; /* protects lwp and thread deathrows */
94 int thread_reapcnt = 0; /* number of threads on deathrow */
95 int lwp_reapcnt = 0; /* number of lwps on deathrow */
96 int reaplimit = 16; /* delay reaping until reaplimit */
97
98 thread_free_lock_t *thread_free_lock;
471
472 mutex_enter(&pidlock);
473 nthread++;
474 t->t_did = next_t_id++;
475 t->t_prev = curthread->t_prev;
476 t->t_next = curthread;
477
478 /*
479 * Add the thread to the list of all threads, and initialize
480 * its t_cpu pointer. We need to block preemption since
481 * cpu_offline walks the thread list looking for threads
482 * with t_cpu pointing to the CPU being offlined. We want
483 * to make sure that the list is consistent and that if t_cpu
484 * is set, the thread is on the list.
485 */
486 kpreempt_disable();
487 curthread->t_prev->t_next = t;
488 curthread->t_prev = t;
489
490 /*
491 * We'll always create in the default partition since that's where
492 * kernel threads go (we'll change this later if needed, in
493 * lwp_create()).
494 */
495 t->t_cpupart = &cp_default;
496
497 /*
498 * For now, affiliate this thread with the root lgroup.
499 * Since the kernel does not (presently) allocate its memory
500 * in a locality aware fashion, the root is an appropriate home.
501 * If this thread is later associated with an lwp, it will have
502 * its lgroup re-assigned at that time.
503 */
504 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
505
506 /*
507 * If the current CPU is in the default cpupart, use it. Otherwise,
508 * pick one that is; before entering the dispatcher code, we'll
509 * make sure to keep the invariant that ->t_cpu is set. (In fact, we
510 * rely on this, in ht_should_run(), in the call tree of
511 * disp_lowpri_cpu().)
512 */
513 if (CPU->cpu_part == &cp_default) {
514 t->t_cpu = CPU;
515 } else {
516 t->t_cpu = cp_default.cp_cpulist;
517 t->t_cpu = disp_lowpri_cpu(t->t_cpu, t, t->t_pri);
518 }
519
520 t->t_disp_queue = t->t_cpu->cpu_disp;
521 kpreempt_enable();
522
523 /*
524 * Initialize thread state and the dispatcher lock pointer.
525 * Need to hold onto pidlock to block allthreads walkers until
526 * the state is set.
527 */
528 switch (state) {
529 case TS_RUN:
530 curthread->t_oldspl = splhigh(); /* get dispatcher spl */
531 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
532 CL_SETRUN(t);
533 thread_unlock(t);
534 break;
535
536 case TS_ONPROC:
537 THREAD_ONPROC(t, t->t_cpu);
538 break;
1307 * When swtch() is switching away from an interrupt thread because it
1308 * blocked or was preempted, this routine is called to complete the
1309 * saving of the interrupted thread state, and returns the interrupted
1310 * thread pointer so it may be resumed.
1311 *
1312 * Called by swtch() only at high spl.
1313 */
1314 kthread_t *
1315 thread_unpin()
1316 {
1317 kthread_t *t = curthread; /* current thread */
1318 kthread_t *itp; /* interrupted thread */
1319 int i; /* interrupt level */
1320 extern int intr_passivate();
1321
1322 ASSERT(t->t_intr != NULL);
1323
1324 itp = t->t_intr; /* interrupted thread */
1325 t->t_intr = NULL; /* clear interrupt ptr */
1326
1327 ht_end_intr();
1328
1329 /*
1330 * Get state from interrupt thread for the one
1331 * it interrupted.
1332 */
1333
1334 i = intr_passivate(t, itp);
1335
1336 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1337 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1338 i, t, t, itp, itp);
1339
1340 /*
1341 * Dissociate the current thread from the interrupted thread's LWP.
1342 */
1343 t->t_lwp = NULL;
1344
1345 /*
1346 * Interrupt handlers above the level that spinlocks block must
1347 * not block.
1348 */
|