302
303 /*
304 * Finish initializing the kernel memory allocator now that
305 * thread_create() is available.
306 */
307 kmem_thread_init();
308
309 if (boothowto & RB_DEBUG)
310 kdi_dvec_thravail();
311 }
312
313 /*
314 * Create a thread.
315 *
316 * thread_create() blocks for memory if necessary. It never fails.
317 *
318 * If stk is NULL, the thread is created at the base of the stack
319 * and cannot be swapped.
320 */
321 kthread_t *
322 thread_create(
323 caddr_t stk,
324 size_t stksize,
325 void (*proc)(),
326 void *arg,
327 size_t len,
328 proc_t *pp,
329 int state,
330 pri_t pri)
331 {
332 kthread_t *t;
333 extern struct classfuncs sys_classfuncs;
334 turnstile_t *ts;
335
336 /*
337 * Every thread keeps a turnstile around in case it needs to block.
338 * The only reason the turnstile is not simply part of the thread
339 * structure is that we may have to break the association whenever
340 * more than one thread blocks on a given synchronization object.
341 * From a memory-management standpoint, turnstiles are like the
342 * "attached mblks" that hang off dblks in the streams allocator.
343 */
344 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
345
346 if (stk == NULL) {
347 /*
348 * alloc both thread and stack in segkp chunk
349 */
350
874 * to reset the PC if it is in mutex_owner_running, refreshing
875 * stale thread pointers.
876 */
877 mutex_sync(); /* sync with mutex code */
878
879 /*
880 * Reap threads
881 */
882 thread_reap_list(t);
883
884 /*
885 * Reap lwps
886 */
887 thread_reap_list(l);
888 }
889
890 /*
891 * cleanup zombie threads that are on deathrow.
892 */
893 void
894 thread_reaper()
895 {
896 kthread_t *t, *l;
897 callb_cpr_t cprinfo;
898
899 /*
900 * Register callback to clean up threads when zone is destroyed.
901 */
902 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
903
904 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
905 for (;;) {
906 mutex_enter(&reaplock);
907 while (thread_deathrow == NULL && lwp_deathrow == NULL) {
908 CALLB_CPR_SAFE_BEGIN(&cprinfo);
909 cv_wait(&reaper_cv, &reaplock);
910 CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
911 }
912 /*
913 * mutex_sync() needs to be called when reaping, but
914 * not too often. We limit reaping rate to once
1011 * definition) stale: the dead thread is not holding any locks, and
1012 * is therefore not in any blocking chains -- but if we do not regrab
1013 * our lock before freeing the dead thread's data structures, the
1014 * thread walking the (stale) blocking chain will die on memory
1015 * corruption when it attempts to drop the dead thread's lock. We
1016 * only need do this once because there is no way for the dead thread
1017 * to ever again be on a blocking chain: once we have grabbed and
1018 * dropped the thread lock, we are guaranteed that anyone that could
1019 * have seen this thread in a blocking chain can no longer see it.
1020 */
1021 thread_lock(t);
1022 thread_unlock(t);
1023
1024 mutex_exit(&reaplock);
1025 }
1026
1027 /*
1028 * Install thread context ops for the current thread.
1029 */
1030 void
1031 installctx(
1032 kthread_t *t,
1033 void *arg,
1034 void (*save)(void *),
1035 void (*restore)(void *),
1036 void (*fork)(void *, void *),
1037 void (*lwp_create)(void *, void *),
1038 void (*exit)(void *),
1039 void (*free)(void *, int))
1040 {
1041 struct ctxop *ctx;
1042
1043 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1044 ctx->save_op = save;
1045 ctx->restore_op = restore;
1046 ctx->fork_op = fork;
1047 ctx->lwp_create_op = lwp_create;
1048 ctx->exit_op = exit;
1049 ctx->free_op = free;
1050 ctx->arg = arg;
1051 ctx->next = t->t_ctx;
1052 t->t_ctx = ctx;
1053 }
1054
1055 /*
1056 * Remove the thread context ops from a thread.
1057 */
1058 int
1059 removectx(
1060 kthread_t *t,
1061 void *arg,
1062 void (*save)(void *),
1063 void (*restore)(void *),
1064 void (*fork)(void *, void *),
1065 void (*lwp_create)(void *, void *),
1066 void (*exit)(void *),
1067 void (*free)(void *, int))
1068 {
1069 struct ctxop *ctx, *prev_ctx;
1070
1071 /*
1072 * The incoming kthread_t (which is the thread for which the
1073 * context ops will be removed) should be one of the following:
1074 *
1075 * a) the current thread,
1076 *
1077 * b) a thread of a process that's being forked (SIDL),
1078 *
1079 * c) a thread that belongs to the same process as the current
1080 * thread and for which the current thread is the agent thread,
1081 *
1082 * d) a thread that is TS_STOPPED which is indicative of it
1083 * being (if curthread is not an agent) a thread being created
1084 * as part of an lwp creation.
1085 */
1086 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1283 setrun(kthread_t *t)
1284 {
1285 thread_lock(t);
1286 setrun_locked(t);
1287 thread_unlock(t);
1288 }
1289
1290 /*
1291 * Unpin an interrupted thread.
1292 * When an interrupt occurs, the interrupt is handled on the stack
1293 * of an interrupt thread, taken from a pool linked to the CPU structure.
1294 *
1295 * When swtch() is switching away from an interrupt thread because it
1296 * blocked or was preempted, this routine is called to complete the
1297 * saving of the interrupted thread state, and returns the interrupted
1298 * thread pointer so it may be resumed.
1299 *
1300 * Called by swtch() only at high spl.
1301 */
1302 kthread_t *
1303 thread_unpin()
1304 {
1305 kthread_t *t = curthread; /* current thread */
1306 kthread_t *itp; /* interrupted thread */
1307 int i; /* interrupt level */
1308 extern int intr_passivate();
1309
1310 ASSERT(t->t_intr != NULL);
1311
1312 itp = t->t_intr; /* interrupted thread */
1313 t->t_intr = NULL; /* clear interrupt ptr */
1314
1315 /*
1316 * Get state from interrupt thread for the one
1317 * it interrupted.
1318 */
1319
1320 i = intr_passivate(t, itp);
1321
1322 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1323 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1331 /*
1332 * Interrupt handlers above the level that spinlocks block must
1333 * not block.
1334 */
1335 #if DEBUG
1336 if (i < 0 || i > LOCK_LEVEL)
1337 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1338 #endif
1339
1340 /*
1341 * Compute the CPU's base interrupt level based on the active
1342 * interrupts.
1343 */
1344 ASSERT(CPU->cpu_intr_actv & (1 << i));
1345 set_base_spl();
1346
1347 return (itp);
1348 }
1349
1350 /*
1351 * Create and initialize an interrupt thread.
1352 * Returns non-zero on error.
1353 * Called at spl7() or better.
1354 */
1355 void
1356 thread_create_intr(struct cpu *cp)
1357 {
1358 kthread_t *tp;
1359
1360 tp = thread_create(NULL, 0,
1361 (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1362
1363 /*
1364 * Set the thread in the TS_FREE state. The state will change
1365 * to TS_ONPROC only while the interrupt is active. Think of these
1366 * as being on a private free list for the CPU. Being TS_FREE keeps
1367 * inactive interrupt threads out of debugger thread lists.
1368 *
1369 * We cannot call thread_create with TS_FREE because of the current
1370 * checks there for ONPROC. Fix this when thread_create takes flags.
1371 */
1372 THREAD_FREEINTR(tp, cp);
1373
1374 /*
1375 * Nobody should ever reference the credentials of an interrupt
1376 * thread so make it NULL to catch any such references.
1377 */
1378 tp->t_cred = NULL;
1379 tp->t_flag |= T_INTR_THREAD;
1380 tp->t_cpu = cp;
1381 tp->t_bound_cpu = cp;
1382 tp->t_disp_queue = cp->cpu_disp;
1383 tp->t_affinitycnt = 1;
1384 tp->t_preempt = 1;
1385
1386 /*
1387 * Don't make a user-requested binding on this thread so that
1388 * the processor can be offlined.
1389 */
1390 tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */
1391 tp->t_bind_pset = PS_NONE;
1392
1393 #if defined(__i386) || defined(__amd64)
1394 tp->t_stk -= STACK_ALIGN;
1395 *(tp->t_stk) = 0; /* terminate intr thread stack */
1396 #endif
1397
1398 /*
1399 * Link onto CPU's interrupt pool.
1400 */
1401 tp->t_link = cp->cpu_intr_thread;
1402 cp->cpu_intr_thread = tp;
1403 }
1404
1405 /*
1406 * TSD -- THREAD SPECIFIC DATA
1407 */
1408 static kmutex_t tsd_mutex; /* linked list spin lock */
1409 static uint_t tsd_nkeys; /* size of destructor array */
1410 /* per-key destructor funcs */
1411 static void (**tsd_destructor)(void *);
1412 /* list of tsd_thread's */
1413 static struct tsd_thread *tsd_list;
1414
1415 /*
1416 * Default destructor
1417 * Needed because NULL destructor means that the key is unused
1418 */
1419 /* ARGSUSED */
1420 void
1421 tsd_defaultdestructor(void *value)
1422 {}
1423
1424 /*
1425 * Create a key (index into per thread array)
|
302
303 /*
304 * Finish initializing the kernel memory allocator now that
305 * thread_create() is available.
306 */
307 kmem_thread_init();
308
309 if (boothowto & RB_DEBUG)
310 kdi_dvec_thravail();
311 }
312
313 /*
314 * Create a thread.
315 *
316 * thread_create() blocks for memory if necessary. It never fails.
317 *
318 * If stk is NULL, the thread is created at the base of the stack
319 * and cannot be swapped.
320 */
321 kthread_t *
322 thread_create(caddr_t stk, size_t stksize, void (*proc)(), void *arg,
323 size_t len, proc_t *pp, int state, pri_t pri)
324 {
325 kthread_t *t;
326 extern struct classfuncs sys_classfuncs;
327 turnstile_t *ts;
328
329 /*
330 * Every thread keeps a turnstile around in case it needs to block.
331 * The only reason the turnstile is not simply part of the thread
332 * structure is that we may have to break the association whenever
333 * more than one thread blocks on a given synchronization object.
334 * From a memory-management standpoint, turnstiles are like the
335 * "attached mblks" that hang off dblks in the streams allocator.
336 */
337 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
338
339 if (stk == NULL) {
340 /*
341 * alloc both thread and stack in segkp chunk
342 */
343
867 * to reset the PC if it is in mutex_owner_running, refreshing
868 * stale thread pointers.
869 */
870 mutex_sync(); /* sync with mutex code */
871
872 /*
873 * Reap threads
874 */
875 thread_reap_list(t);
876
877 /*
878 * Reap lwps
879 */
880 thread_reap_list(l);
881 }
882
883 /*
884 * cleanup zombie threads that are on deathrow.
885 */
886 void
887 thread_reaper(void)
888 {
889 kthread_t *t, *l;
890 callb_cpr_t cprinfo;
891
892 /*
893 * Register callback to clean up threads when zone is destroyed.
894 */
895 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
896
897 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
898 for (;;) {
899 mutex_enter(&reaplock);
900 while (thread_deathrow == NULL && lwp_deathrow == NULL) {
901 CALLB_CPR_SAFE_BEGIN(&cprinfo);
902 cv_wait(&reaper_cv, &reaplock);
903 CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
904 }
905 /*
906 * mutex_sync() needs to be called when reaping, but
907 * not too often. We limit reaping rate to once
1004 * definition) stale: the dead thread is not holding any locks, and
1005 * is therefore not in any blocking chains -- but if we do not regrab
1006 * our lock before freeing the dead thread's data structures, the
1007 * thread walking the (stale) blocking chain will die on memory
1008 * corruption when it attempts to drop the dead thread's lock. We
1009 * only need do this once because there is no way for the dead thread
1010 * to ever again be on a blocking chain: once we have grabbed and
1011 * dropped the thread lock, we are guaranteed that anyone that could
1012 * have seen this thread in a blocking chain can no longer see it.
1013 */
1014 thread_lock(t);
1015 thread_unlock(t);
1016
1017 mutex_exit(&reaplock);
1018 }
1019
1020 /*
1021 * Install thread context ops for the current thread.
1022 */
1023 void
1024 installctx(kthread_t *t, void *arg, void (*save)(void *),
1025 void (*restore)(void *), void (*fork)(void *, void *),
1026 void (*lwp_create)(void *, void *), void (*exit)(void *),
1027 void (*free)(void *, int))
1028 {
1029 struct ctxop *ctx;
1030
1031 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1032 ctx->save_op = save;
1033 ctx->restore_op = restore;
1034 ctx->fork_op = fork;
1035 ctx->lwp_create_op = lwp_create;
1036 ctx->exit_op = exit;
1037 ctx->free_op = free;
1038 ctx->arg = arg;
1039 ctx->next = t->t_ctx;
1040 t->t_ctx = ctx;
1041 }
1042
1043 /*
1044 * Remove the thread context ops from a thread.
1045 */
1046 int
1047 removectx(kthread_t *t, void *arg, void (*save)(void *),
1048 void (*restore)(void *), void (*fork)(void *, void *),
1049 void (*lwp_create)(void *, void *), void (*exit)(void *),
1050 void (*free)(void *, int))
1051 {
1052 struct ctxop *ctx, *prev_ctx;
1053
1054 /*
1055 * The incoming kthread_t (which is the thread for which the
1056 * context ops will be removed) should be one of the following:
1057 *
1058 * a) the current thread,
1059 *
1060 * b) a thread of a process that's being forked (SIDL),
1061 *
1062 * c) a thread that belongs to the same process as the current
1063 * thread and for which the current thread is the agent thread,
1064 *
1065 * d) a thread that is TS_STOPPED which is indicative of it
1066 * being (if curthread is not an agent) a thread being created
1067 * as part of an lwp creation.
1068 */
1069 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1266 setrun(kthread_t *t)
1267 {
1268 thread_lock(t);
1269 setrun_locked(t);
1270 thread_unlock(t);
1271 }
1272
1273 /*
1274 * Unpin an interrupted thread.
1275 * When an interrupt occurs, the interrupt is handled on the stack
1276 * of an interrupt thread, taken from a pool linked to the CPU structure.
1277 *
1278 * When swtch() is switching away from an interrupt thread because it
1279 * blocked or was preempted, this routine is called to complete the
1280 * saving of the interrupted thread state, and returns the interrupted
1281 * thread pointer so it may be resumed.
1282 *
1283 * Called by swtch() only at high spl.
1284 */
1285 kthread_t *
1286 thread_unpin(void)
1287 {
1288 kthread_t *t = curthread; /* current thread */
1289 kthread_t *itp; /* interrupted thread */
1290 int i; /* interrupt level */
1291 extern int intr_passivate();
1292
1293 ASSERT(t->t_intr != NULL);
1294
1295 itp = t->t_intr; /* interrupted thread */
1296 t->t_intr = NULL; /* clear interrupt ptr */
1297
1298 /*
1299 * Get state from interrupt thread for the one
1300 * it interrupted.
1301 */
1302
1303 i = intr_passivate(t, itp);
1304
1305 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1306 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1314 /*
1315 * Interrupt handlers above the level that spinlocks block must
1316 * not block.
1317 */
1318 #if DEBUG
1319 if (i < 0 || i > LOCK_LEVEL)
1320 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1321 #endif
1322
1323 /*
1324 * Compute the CPU's base interrupt level based on the active
1325 * interrupts.
1326 */
1327 ASSERT(CPU->cpu_intr_actv & (1 << i));
1328 set_base_spl();
1329
1330 return (itp);
1331 }
1332
1333 /*
1334 * TSD -- THREAD SPECIFIC DATA
1335 */
1336 static kmutex_t tsd_mutex; /* linked list spin lock */
1337 static uint_t tsd_nkeys; /* size of destructor array */
1338 /* per-key destructor funcs */
1339 static void (**tsd_destructor)(void *);
1340 /* list of tsd_thread's */
1341 static struct tsd_thread *tsd_list;
1342
1343 /*
1344 * Default destructor
1345 * Needed because NULL destructor means that the key is unused
1346 */
1347 /* ARGSUSED */
1348 void
1349 tsd_defaultdestructor(void *value)
1350 {}
1351
1352 /*
1353 * Create a key (index into per thread array)
|