1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2018 Joyent, Inc.
  25  */
  26 
  27 #include <sys/types.h>
  28 #include <sys/param.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/signal.h>
  31 #include <sys/stack.h>
  32 #include <sys/pcb.h>
  33 #include <sys/user.h>
  34 #include <sys/systm.h>
  35 #include <sys/sysinfo.h>
  36 #include <sys/errno.h>
  37 #include <sys/cmn_err.h>
  38 #include <sys/cred.h>
  39 #include <sys/resource.h>
  40 #include <sys/task.h>
  41 #include <sys/project.h>
  42 #include <sys/proc.h>
  43 #include <sys/debug.h>
  44 #include <sys/disp.h>
  45 #include <sys/class.h>
  46 #include <vm/seg_kmem.h>
  47 #include <vm/seg_kp.h>
  48 #include <sys/machlock.h>
  49 #include <sys/kmem.h>
  50 #include <sys/varargs.h>
  51 #include <sys/turnstile.h>
  52 #include <sys/poll.h>
  53 #include <sys/vtrace.h>
  54 #include <sys/callb.h>
  55 #include <c2/audit.h>
  56 #include <sys/tnf.h>
  57 #include <sys/sobject.h>
  58 #include <sys/cpupart.h>
  59 #include <sys/pset.h>
  60 #include <sys/door.h>
  61 #include <sys/spl.h>
  62 #include <sys/copyops.h>
  63 #include <sys/rctl.h>
  64 #include <sys/brand.h>
  65 #include <sys/pool.h>
  66 #include <sys/zone.h>
  67 #include <sys/tsol/label.h>
  68 #include <sys/tsol/tndb.h>
  69 #include <sys/cpc_impl.h>
  70 #include <sys/sdt.h>
  71 #include <sys/reboot.h>
  72 #include <sys/kdi.h>
  73 #include <sys/schedctl.h>
  74 #include <sys/waitq.h>
  75 #include <sys/cpucaps.h>
  76 #include <sys/kiconv.h>
  77 #include <sys/ctype.h>
  78 #include <sys/ht.h>
  79 
  80 #ifndef STACK_GROWTH_DOWN
  81 #error Stacks do not grow downward; 3b2 zombie attack detected!
  82 #endif
  83 
  84 struct kmem_cache *thread_cache;        /* cache of free threads */
  85 struct kmem_cache *lwp_cache;           /* cache of free lwps */
  86 struct kmem_cache *turnstile_cache;     /* cache of free turnstiles */
  87 
  88 /*
  89  * allthreads is only for use by kmem_readers.  All kernel loops can use
  90  * the current thread as a start/end point.
  91  */
  92 kthread_t *allthreads = &t0;        /* circular list of all threads */
  93 
  94 static kcondvar_t reaper_cv;            /* synchronization var */
  95 kthread_t       *thread_deathrow;       /* circular list of reapable threads */
  96 kthread_t       *lwp_deathrow;          /* circular list of reapable threads */
  97 kmutex_t        reaplock;               /* protects lwp and thread deathrows */
  98 int     thread_reapcnt = 0;             /* number of threads on deathrow */
  99 int     lwp_reapcnt = 0;                /* number of lwps on deathrow */
 100 int     reaplimit = 16;                 /* delay reaping until reaplimit */
 101 
 102 thread_free_lock_t      *thread_free_lock;
 103                                         /* protects tick thread from reaper */
 104 
 105 extern int nthread;
 106 
 107 /* System Scheduling classes. */
 108 id_t    syscid;                         /* system scheduling class ID */
 109 id_t    sysdccid = CLASS_UNUSED;        /* reset when SDC loads */
 110 
 111 void    *segkp_thread;                  /* cookie for segkp pool */
 112 
 113 int lwp_cache_sz = 32;
 114 int t_cache_sz = 8;
 115 static kt_did_t next_t_id = 1;
 116 
 117 /* Default mode for thread binding to CPUs and processor sets */
 118 int default_binding_mode = TB_ALLHARD;
 119 
 120 /*
 121  * Min/Max stack sizes for stack size parameters
 122  */
 123 #define MAX_STKSIZE     (32 * DEFAULTSTKSZ)
 124 #define MIN_STKSIZE     DEFAULTSTKSZ
 125 
 126 /*
 127  * default_stksize overrides lwp_default_stksize if it is set.
 128  */
 129 int     default_stksize;
 130 int     lwp_default_stksize;
 131 
 132 static zone_key_t zone_thread_key;
 133 
 134 unsigned int kmem_stackinfo;            /* stackinfo feature on-off */
 135 kmem_stkinfo_t *kmem_stkinfo_log;       /* stackinfo circular log */
 136 static kmutex_t kmem_stkinfo_lock;      /* protects kmem_stkinfo_log */
 137 
 138 /*
 139  * forward declarations for internal thread specific data (tsd)
 140  */
 141 static void *tsd_realloc(void *, size_t, size_t);
 142 
 143 void thread_reaper(void);
 144 
 145 /* forward declarations for stackinfo feature */
 146 static void stkinfo_begin(kthread_t *);
 147 static void stkinfo_end(kthread_t *);
 148 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t);
 149 
 150 /*ARGSUSED*/
 151 static int
 152 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
 153 {
 154         bzero(buf, sizeof (turnstile_t));
 155         return (0);
 156 }
 157 
 158 /*ARGSUSED*/
 159 static void
 160 turnstile_destructor(void *buf, void *cdrarg)
 161 {
 162         turnstile_t *ts = buf;
 163 
 164         ASSERT(ts->ts_free == NULL);
 165         ASSERT(ts->ts_waiters == 0);
 166         ASSERT(ts->ts_inheritor == NULL);
 167         ASSERT(ts->ts_sleepq[0].sq_first == NULL);
 168         ASSERT(ts->ts_sleepq[1].sq_first == NULL);
 169 }
 170 
 171 void
 172 thread_init(void)
 173 {
 174         kthread_t *tp;
 175         extern char sys_name[];
 176         extern void idle();
 177         struct cpu *cpu = CPU;
 178         int i;
 179         kmutex_t *lp;
 180 
 181         mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
 182         thread_free_lock =
 183             kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP);
 184         for (i = 0; i < THREAD_FREE_NUM; i++) {
 185                 lp = &thread_free_lock[i].tf_lock;
 186                 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL);
 187         }
 188 
 189 #if defined(__i386) || defined(__amd64)
 190         thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
 191             PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
 192 
 193         /*
 194          * "struct _klwp" includes a "struct pcb", which includes a
 195          * "struct fpu", which needs to be 64-byte aligned on amd64
 196          * (and even on i386) for xsave/xrstor.
 197          */
 198         lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
 199             64, NULL, NULL, NULL, NULL, NULL, 0);
 200 #else
 201         /*
 202          * Allocate thread structures from static_arena.  This prevents
 203          * issues where a thread tries to relocate its own thread
 204          * structure and touches it after the mapping has been suspended.
 205          */
 206         thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
 207             PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
 208 
 209         lwp_stk_cache_init();
 210 
 211         lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
 212             0, NULL, NULL, NULL, NULL, NULL, 0);
 213 #endif
 214 
 215         turnstile_cache = kmem_cache_create("turnstile_cache",
 216             sizeof (turnstile_t), 0,
 217             turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
 218 
 219         label_init();
 220         cred_init();
 221 
 222         /*
 223          * Initialize various resource management facilities.
 224          */
 225         rctl_init();
 226         cpucaps_init();
 227         /*
 228          * Zone_init() should be called before project_init() so that project ID
 229          * for the first project is initialized correctly.
 230          */
 231         zone_init();
 232         project_init();
 233         brand_init();
 234         kiconv_init();
 235         task_init();
 236         tcache_init();
 237         pool_init();
 238 
 239         curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
 240 
 241         /*
 242          * Originally, we had two parameters to set default stack
 243          * size: one for lwp's (lwp_default_stksize), and one for
 244          * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
 245          * Now we have a third parameter that overrides both if it is
 246          * set to a legal stack size, called default_stksize.
 247          */
 248 
 249         if (default_stksize == 0) {
 250                 default_stksize = DEFAULTSTKSZ;
 251         } else if (default_stksize % PAGESIZE != 0 ||
 252             default_stksize > MAX_STKSIZE ||
 253             default_stksize < MIN_STKSIZE) {
 254                 cmn_err(CE_WARN, "Illegal stack size. Using %d",
 255                     (int)DEFAULTSTKSZ);
 256                 default_stksize = DEFAULTSTKSZ;
 257         } else {
 258                 lwp_default_stksize = default_stksize;
 259         }
 260 
 261         if (lwp_default_stksize == 0) {
 262                 lwp_default_stksize = default_stksize;
 263         } else if (lwp_default_stksize % PAGESIZE != 0 ||
 264             lwp_default_stksize > MAX_STKSIZE ||
 265             lwp_default_stksize < MIN_STKSIZE) {
 266                 cmn_err(CE_WARN, "Illegal stack size. Using %d",
 267                     default_stksize);
 268                 lwp_default_stksize = default_stksize;
 269         }
 270 
 271         segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
 272             lwp_default_stksize,
 273             (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
 274 
 275         segkp_thread = segkp_cache_init(segkp, t_cache_sz,
 276             default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
 277 
 278         (void) getcid(sys_name, &syscid);
 279         curthread->t_cid = syscid;   /* current thread is t0 */
 280 
 281         /*
 282          * Set up the first CPU's idle thread.
 283          * It runs whenever the CPU has nothing worthwhile to do.
 284          */
 285         tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
 286         cpu->cpu_idle_thread = tp;
 287         tp->t_preempt = 1;
 288         tp->t_disp_queue = cpu->cpu_disp;
 289         ASSERT(tp->t_disp_queue != NULL);
 290         tp->t_bound_cpu = cpu;
 291         tp->t_affinitycnt = 1;
 292 
 293         /*
 294          * Registering a thread in the callback table is usually
 295          * done in the initialization code of the thread. In this
 296          * case, we do it right after thread creation to avoid
 297          * blocking idle thread while registering itself. It also
 298          * avoids the possibility of reregistration in case a CPU
 299          * restarts its idle thread.
 300          */
 301         CALLB_CPR_INIT_SAFE(tp, "idle");
 302 
 303         /*
 304          * Create the thread_reaper daemon. From this point on, exited
 305          * threads will get reaped.
 306          */
 307         (void) thread_create(NULL, 0, (void (*)())thread_reaper,
 308             NULL, 0, &p0, TS_RUN, minclsyspri);
 309 
 310         /*
 311          * Finish initializing the kernel memory allocator now that
 312          * thread_create() is available.
 313          */
 314         kmem_thread_init();
 315 
 316         if (boothowto & RB_DEBUG)
 317                 kdi_dvec_thravail();
 318 }
 319 
 320 /*
 321  * Create a thread.
 322  *
 323  * thread_create() blocks for memory if necessary.  It never fails.
 324  *
 325  * If stk is NULL, the thread is created at the base of the stack
 326  * and cannot be swapped.
 327  */
 328 kthread_t *
 329 thread_create(
 330         caddr_t stk,
 331         size_t  stksize,
 332         void    (*proc)(),
 333         void    *arg,
 334         size_t  len,
 335         proc_t   *pp,
 336         int     state,
 337         pri_t   pri)
 338 {
 339         kthread_t *t;
 340         extern struct classfuncs sys_classfuncs;
 341         turnstile_t *ts;
 342 
 343         /*
 344          * Every thread keeps a turnstile around in case it needs to block.
 345          * The only reason the turnstile is not simply part of the thread
 346          * structure is that we may have to break the association whenever
 347          * more than one thread blocks on a given synchronization object.
 348          * From a memory-management standpoint, turnstiles are like the
 349          * "attached mblks" that hang off dblks in the streams allocator.
 350          */
 351         ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
 352 
 353         if (stk == NULL) {
 354                 /*
 355                  * alloc both thread and stack in segkp chunk
 356                  */
 357 
 358                 if (stksize < default_stksize)
 359                         stksize = default_stksize;
 360 
 361                 if (stksize == default_stksize) {
 362                         stk = (caddr_t)segkp_cache_get(segkp_thread);
 363                 } else {
 364                         stksize = roundup(stksize, PAGESIZE);
 365                         stk = (caddr_t)segkp_get(segkp, stksize,
 366                             (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
 367                 }
 368 
 369                 ASSERT(stk != NULL);
 370 
 371                 /*
 372                  * The machine-dependent mutex code may require that
 373                  * thread pointers (since they may be used for mutex owner
 374                  * fields) have certain alignment requirements.
 375                  * PTR24_ALIGN is the size of the alignment quanta.
 376                  * XXX - assumes stack grows toward low addresses.
 377                  */
 378                 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
 379                         cmn_err(CE_PANIC, "thread_create: proposed stack size"
 380                             " too small to hold thread.");
 381 #ifdef STACK_GROWTH_DOWN
 382                 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
 383                 stksize &= -PTR24_ALIGN;    /* make thread aligned */
 384                 t = (kthread_t *)(stk + stksize);
 385                 bzero(t, sizeof (kthread_t));
 386                 if (audit_active)
 387                         audit_thread_create(t);
 388                 t->t_stk = stk + stksize;
 389                 t->t_stkbase = stk;
 390 #else   /* stack grows to larger addresses */
 391                 stksize -= SA(sizeof (kthread_t));
 392                 t = (kthread_t *)(stk);
 393                 bzero(t, sizeof (kthread_t));
 394                 t->t_stk = stk + sizeof (kthread_t);
 395                 t->t_stkbase = stk + stksize + sizeof (kthread_t);
 396 #endif  /* STACK_GROWTH_DOWN */
 397                 t->t_flag |= T_TALLOCSTK;
 398                 t->t_swap = stk;
 399         } else {
 400                 t = kmem_cache_alloc(thread_cache, KM_SLEEP);
 401                 bzero(t, sizeof (kthread_t));
 402                 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
 403                 if (audit_active)
 404                         audit_thread_create(t);
 405                 /*
 406                  * Initialize t_stk to the kernel stack pointer to use
 407                  * upon entry to the kernel
 408                  */
 409 #ifdef STACK_GROWTH_DOWN
 410                 t->t_stk = stk + stksize;
 411                 t->t_stkbase = stk;
 412 #else
 413                 t->t_stk = stk;                      /* 3b2-like */
 414                 t->t_stkbase = stk + stksize;
 415 #endif /* STACK_GROWTH_DOWN */
 416         }
 417 
 418         if (kmem_stackinfo != 0) {
 419                 stkinfo_begin(t);
 420         }
 421 
 422         t->t_ts = ts;
 423 
 424         /*
 425          * p_cred could be NULL if it thread_create is called before cred_init
 426          * is called in main.
 427          */
 428         mutex_enter(&pp->p_crlock);
 429         if (pp->p_cred)
 430                 crhold(t->t_cred = pp->p_cred);
 431         mutex_exit(&pp->p_crlock);
 432         t->t_start = gethrestime_sec();
 433         t->t_startpc = proc;
 434         t->t_procp = pp;
 435         t->t_clfuncs = &sys_classfuncs.thread;
 436         t->t_cid = syscid;
 437         t->t_pri = pri;
 438         t->t_stime = ddi_get_lbolt();
 439         t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
 440         t->t_bind_cpu = PBIND_NONE;
 441         t->t_bindflag = (uchar_t)default_binding_mode;
 442         t->t_bind_pset = PS_NONE;
 443         t->t_plockp = &pp->p_lock;
 444         t->t_copyops = NULL;
 445         t->t_taskq = NULL;
 446         t->t_anttime = 0;
 447         t->t_hatdepth = 0;
 448 
 449         t->t_dtrace_vtime = 1;       /* assure vtimestamp is always non-zero */
 450 
 451         CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
 452 #ifndef NPROBE
 453         /* Kernel probe */
 454         tnf_thread_create(t);
 455 #endif /* NPROBE */
 456         LOCK_INIT_CLEAR(&t->t_lock);
 457 
 458         /*
 459          * Callers who give us a NULL proc must do their own
 460          * stack initialization.  e.g. lwp_create()
 461          */
 462         if (proc != NULL) {
 463                 t->t_stk = thread_stk_init(t->t_stk);
 464                 thread_load(t, proc, arg, len);
 465         }
 466 
 467         /*
 468          * Put a hold on project0. If this thread is actually in a
 469          * different project, then t_proj will be changed later in
 470          * lwp_create().  All kernel-only threads must be in project 0.
 471          */
 472         t->t_proj = project_hold(proj0p);
 473 
 474         lgrp_affinity_init(&t->t_lgrp_affinity);
 475 
 476         mutex_enter(&pidlock);
 477         nthread++;
 478         t->t_did = next_t_id++;
 479         t->t_prev = curthread->t_prev;
 480         t->t_next = curthread;
 481 
 482         /*
 483          * Add the thread to the list of all threads, and initialize
 484          * its t_cpu pointer.  We need to block preemption since
 485          * cpu_offline walks the thread list looking for threads
 486          * with t_cpu pointing to the CPU being offlined.  We want
 487          * to make sure that the list is consistent and that if t_cpu
 488          * is set, the thread is on the list.
 489          */
 490         kpreempt_disable();
 491         curthread->t_prev->t_next = t;
 492         curthread->t_prev = t;
 493 
 494         /*
 495          * Threads should never have a NULL t_cpu pointer so assign it
 496          * here.  If the thread is being created with state TS_RUN a
 497          * better CPU may be chosen when it is placed on the run queue.
 498          *
 499          * We need to keep kernel preemption disabled when setting all
 500          * three fields to keep them in sync.  Also, always create in
 501          * the default partition since that's where kernel threads go
 502          * (if this isn't a kernel thread, t_cpupart will be changed
 503          * in lwp_create before setting the thread runnable).
 504          */
 505         t->t_cpupart = &cp_default;
 506 
 507         /*
 508          * For now, affiliate this thread with the root lgroup.
 509          * Since the kernel does not (presently) allocate its memory
 510          * in a locality aware fashion, the root is an appropriate home.
 511          * If this thread is later associated with an lwp, it will have
 512          * it's lgroup re-assigned at that time.
 513          */
 514         lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
 515 
 516         /*
 517          * Inherit the current cpu.  If this cpu isn't part of the chosen
 518          * lgroup, a new cpu will be chosen by cpu_choose when the thread
 519          * is ready to run.
 520          */
 521         if (CPU->cpu_part == &cp_default)
 522                 t->t_cpu = CPU;
 523         else
 524                 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t,
 525                     t->t_pri);
 526 
 527         t->t_disp_queue = t->t_cpu->cpu_disp;
 528         kpreempt_enable();
 529 
 530         /*
 531          * Initialize thread state and the dispatcher lock pointer.
 532          * Need to hold onto pidlock to block allthreads walkers until
 533          * the state is set.
 534          */
 535         switch (state) {
 536         case TS_RUN:
 537                 curthread->t_oldspl = splhigh();     /* get dispatcher spl */
 538                 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
 539                 CL_SETRUN(t);
 540                 thread_unlock(t);
 541                 break;
 542 
 543         case TS_ONPROC:
 544                 THREAD_ONPROC(t, t->t_cpu);
 545                 break;
 546 
 547         case TS_FREE:
 548                 /*
 549                  * Free state will be used for intr threads.
 550                  * The interrupt routine must set the thread dispatcher
 551                  * lock pointer (t_lockp) if starting on a CPU
 552                  * other than the current one.
 553                  */
 554                 THREAD_FREEINTR(t, CPU);
 555                 break;
 556 
 557         case TS_STOPPED:
 558                 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
 559                 break;
 560 
 561         default:                        /* TS_SLEEP, TS_ZOMB or TS_TRANS */
 562                 cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
 563         }
 564         mutex_exit(&pidlock);
 565         return (t);
 566 }
 567 
 568 /*
 569  * Move thread to project0 and take care of project reference counters.
 570  */
 571 void
 572 thread_rele(kthread_t *t)
 573 {
 574         kproject_t *kpj;
 575 
 576         thread_lock(t);
 577 
 578         ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
 579         kpj = ttoproj(t);
 580         t->t_proj = proj0p;
 581 
 582         thread_unlock(t);
 583 
 584         if (kpj != proj0p) {
 585                 project_rele(kpj);
 586                 (void) project_hold(proj0p);
 587         }
 588 }
 589 
 590 void
 591 thread_exit(void)
 592 {
 593         kthread_t *t = curthread;
 594 
 595         if ((t->t_proc_flag & TP_ZTHREAD) != 0)
 596                 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
 597 
 598         tsd_exit();             /* Clean up this thread's TSD */
 599 
 600         kcpc_passivate();       /* clean up performance counter state */
 601 
 602         /*
 603          * No kernel thread should have called poll() without arranging
 604          * calling pollcleanup() here.
 605          */
 606         ASSERT(t->t_pollstate == NULL);
 607         ASSERT(t->t_schedctl == NULL);
 608         if (t->t_door)
 609                 door_slam();    /* in case thread did an upcall */
 610 
 611 #ifndef NPROBE
 612         /* Kernel probe */
 613         if (t->t_tnf_tpdp)
 614                 tnf_thread_exit();
 615 #endif /* NPROBE */
 616 
 617         thread_rele(t);
 618         t->t_preempt++;
 619 
 620         /*
 621          * remove thread from the all threads list so that
 622          * death-row can use the same pointers.
 623          */
 624         mutex_enter(&pidlock);
 625         t->t_next->t_prev = t->t_prev;
 626         t->t_prev->t_next = t->t_next;
 627         ASSERT(allthreads != t);        /* t0 never exits */
 628         cv_broadcast(&t->t_joincv);      /* wake up anyone in thread_join */
 629         mutex_exit(&pidlock);
 630 
 631         if (t->t_ctx != NULL)
 632                 exitctx(t);
 633         if (t->t_procp->p_pctx != NULL)
 634                 exitpctx(t->t_procp);
 635 
 636         if (kmem_stackinfo != 0) {
 637                 stkinfo_end(t);
 638         }
 639 
 640         t->t_state = TS_ZOMB;        /* set zombie thread */
 641 
 642         swtch_from_zombie();    /* give up the CPU */
 643         /* NOTREACHED */
 644 }
 645 
 646 /*
 647  * Check to see if the specified thread is active (defined as being on
 648  * the thread list).  This is certainly a slow way to do this; if there's
 649  * ever a reason to speed it up, we could maintain a hash table of active
 650  * threads indexed by their t_did.
 651  */
 652 static kthread_t *
 653 did_to_thread(kt_did_t tid)
 654 {
 655         kthread_t *t;
 656 
 657         ASSERT(MUTEX_HELD(&pidlock));
 658         for (t = curthread->t_next; t != curthread; t = t->t_next) {
 659                 if (t->t_did == tid)
 660                         break;
 661         }
 662         if (t->t_did == tid)
 663                 return (t);
 664         else
 665                 return (NULL);
 666 }
 667 
 668 /*
 669  * Wait for specified thread to exit.  Returns immediately if the thread
 670  * could not be found, meaning that it has either already exited or never
 671  * existed.
 672  */
 673 void
 674 thread_join(kt_did_t tid)
 675 {
 676         kthread_t *t;
 677 
 678         ASSERT(tid != curthread->t_did);
 679         ASSERT(tid != t0.t_did);
 680 
 681         mutex_enter(&pidlock);
 682         /*
 683          * Make sure we check that the thread is on the thread list
 684          * before blocking on it; otherwise we could end up blocking on
 685          * a cv that's already been freed.  In other words, don't cache
 686          * the thread pointer across calls to cv_wait.
 687          *
 688          * The choice of loop invariant means that whenever a thread
 689          * is taken off the allthreads list, a cv_broadcast must be
 690          * performed on that thread's t_joincv to wake up any waiters.
 691          * The broadcast doesn't have to happen right away, but it
 692          * shouldn't be postponed indefinitely (e.g., by doing it in
 693          * thread_free which may only be executed when the deathrow
 694          * queue is processed.
 695          */
 696         while (t = did_to_thread(tid))
 697                 cv_wait(&t->t_joincv, &pidlock);
 698         mutex_exit(&pidlock);
 699 }
 700 
 701 void
 702 thread_free_prevent(kthread_t *t)
 703 {
 704         kmutex_t *lp;
 705 
 706         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 707         mutex_enter(lp);
 708 }
 709 
 710 void
 711 thread_free_allow(kthread_t *t)
 712 {
 713         kmutex_t *lp;
 714 
 715         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 716         mutex_exit(lp);
 717 }
 718 
 719 static void
 720 thread_free_barrier(kthread_t *t)
 721 {
 722         kmutex_t *lp;
 723 
 724         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 725         mutex_enter(lp);
 726         mutex_exit(lp);
 727 }
 728 
 729 void
 730 thread_free(kthread_t *t)
 731 {
 732         boolean_t allocstk = (t->t_flag & T_TALLOCSTK);
 733         klwp_t *lwp = t->t_lwp;
 734         caddr_t swap = t->t_swap;
 735 
 736         ASSERT(t != &t0 && t->t_state == TS_FREE);
 737         ASSERT(t->t_door == NULL);
 738         ASSERT(t->t_schedctl == NULL);
 739         ASSERT(t->t_pollstate == NULL);
 740 
 741         t->t_pri = 0;
 742         t->t_pc = 0;
 743         t->t_sp = 0;
 744         t->t_wchan0 = NULL;
 745         t->t_wchan = NULL;
 746         if (t->t_cred != NULL) {
 747                 crfree(t->t_cred);
 748                 t->t_cred = 0;
 749         }
 750         if (t->t_pdmsg) {
 751                 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
 752                 t->t_pdmsg = NULL;
 753         }
 754         if (audit_active)
 755                 audit_thread_free(t);
 756 #ifndef NPROBE
 757         if (t->t_tnf_tpdp)
 758                 tnf_thread_free(t);
 759 #endif /* NPROBE */
 760         if (t->t_cldata) {
 761                 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
 762         }
 763         if (t->t_rprof != NULL) {
 764                 kmem_free(t->t_rprof, sizeof (*t->t_rprof));
 765                 t->t_rprof = NULL;
 766         }
 767         t->t_lockp = NULL;   /* nothing should try to lock this thread now */
 768         if (lwp)
 769                 lwp_freeregs(lwp, 0);
 770         if (t->t_ctx)
 771                 freectx(t, 0);
 772         t->t_stk = NULL;
 773         if (lwp)
 774                 lwp_stk_fini(lwp);
 775         lock_clear(&t->t_lock);
 776 
 777         if (t->t_ts->ts_waiters > 0)
 778                 panic("thread_free: turnstile still active");
 779 
 780         kmem_cache_free(turnstile_cache, t->t_ts);
 781 
 782         free_afd(&t->t_activefd);
 783 
 784         /*
 785          * Barrier for the tick accounting code.  The tick accounting code
 786          * holds this lock to keep the thread from going away while it's
 787          * looking at it.
 788          */
 789         thread_free_barrier(t);
 790 
 791         ASSERT(ttoproj(t) == proj0p);
 792         project_rele(ttoproj(t));
 793 
 794         lgrp_affinity_free(&t->t_lgrp_affinity);
 795 
 796         mutex_enter(&pidlock);
 797         nthread--;
 798         mutex_exit(&pidlock);
 799 
 800         if (t->t_name != NULL) {
 801                 kmem_free(t->t_name, THREAD_NAME_MAX);
 802                 t->t_name = NULL;
 803         }
 804 
 805         /*
 806          * Free thread, lwp and stack.  This needs to be done carefully, since
 807          * if T_TALLOCSTK is set, the thread is part of the stack.
 808          */
 809         t->t_lwp = NULL;
 810         t->t_swap = NULL;
 811 
 812         if (swap) {
 813                 segkp_release(segkp, swap);
 814         }
 815         if (lwp) {
 816                 kmem_cache_free(lwp_cache, lwp);
 817         }
 818         if (!allocstk) {
 819                 kmem_cache_free(thread_cache, t);
 820         }
 821 }
 822 
 823 /*
 824  * Removes threads associated with the given zone from a deathrow queue.
 825  * tp is a pointer to the head of the deathrow queue, and countp is a
 826  * pointer to the current deathrow count.  Returns a linked list of
 827  * threads removed from the list.
 828  */
 829 static kthread_t *
 830 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
 831 {
 832         kthread_t *tmp, *list = NULL;
 833         cred_t *cr;
 834 
 835         ASSERT(MUTEX_HELD(&reaplock));
 836         while (*tp != NULL) {
 837                 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
 838                         tmp = *tp;
 839                         *tp = tmp->t_forw;
 840                         tmp->t_forw = list;
 841                         list = tmp;
 842                         (*countp)--;
 843                 } else {
 844                         tp = &(*tp)->t_forw;
 845                 }
 846         }
 847         return (list);
 848 }
 849 
 850 static void
 851 thread_reap_list(kthread_t *t)
 852 {
 853         kthread_t *next;
 854 
 855         while (t != NULL) {
 856                 next = t->t_forw;
 857                 thread_free(t);
 858                 t = next;
 859         }
 860 }
 861 
 862 /* ARGSUSED */
 863 static void
 864 thread_zone_destroy(zoneid_t zoneid, void *unused)
 865 {
 866         kthread_t *t, *l;
 867 
 868         mutex_enter(&reaplock);
 869         /*
 870          * Pull threads and lwps associated with zone off deathrow lists.
 871          */
 872         t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
 873         l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
 874         mutex_exit(&reaplock);
 875 
 876         /*
 877          * Guard against race condition in mutex_owner_running:
 878          *      thread=owner(mutex)
 879          *      <interrupt>
 880          *                              thread exits mutex
 881          *                              thread exits
 882          *                              thread reaped
 883          *                              thread struct freed
 884          * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 885          * A cross call to all cpus will cause the interrupt handler
 886          * to reset the PC if it is in mutex_owner_running, refreshing
 887          * stale thread pointers.
 888          */
 889         mutex_sync();   /* sync with mutex code */
 890 
 891         /*
 892          * Reap threads
 893          */
 894         thread_reap_list(t);
 895 
 896         /*
 897          * Reap lwps
 898          */
 899         thread_reap_list(l);
 900 }
 901 
 902 /*
 903  * cleanup zombie threads that are on deathrow.
 904  */
 905 void
 906 thread_reaper()
 907 {
 908         kthread_t *t, *l;
 909         callb_cpr_t cprinfo;
 910 
 911         /*
 912          * Register callback to clean up threads when zone is destroyed.
 913          */
 914         zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
 915 
 916         CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
 917         for (;;) {
 918                 mutex_enter(&reaplock);
 919                 while (thread_deathrow == NULL && lwp_deathrow == NULL) {
 920                         CALLB_CPR_SAFE_BEGIN(&cprinfo);
 921                         cv_wait(&reaper_cv, &reaplock);
 922                         CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
 923                 }
 924                 /*
 925                  * mutex_sync() needs to be called when reaping, but
 926                  * not too often.  We limit reaping rate to once
 927                  * per second.  Reaplimit is max rate at which threads can
 928                  * be freed. Does not impact thread destruction/creation.
 929                  */
 930                 t = thread_deathrow;
 931                 l = lwp_deathrow;
 932                 thread_deathrow = NULL;
 933                 lwp_deathrow = NULL;
 934                 thread_reapcnt = 0;
 935                 lwp_reapcnt = 0;
 936                 mutex_exit(&reaplock);
 937 
 938                 /*
 939                  * Guard against race condition in mutex_owner_running:
 940                  *      thread=owner(mutex)
 941                  *      <interrupt>
 942                  *                              thread exits mutex
 943                  *                              thread exits
 944                  *                              thread reaped
 945                  *                              thread struct freed
 946                  * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 947                  * A cross call to all cpus will cause the interrupt handler
 948                  * to reset the PC if it is in mutex_owner_running, refreshing
 949                  * stale thread pointers.
 950                  */
 951                 mutex_sync();   /* sync with mutex code */
 952                 /*
 953                  * Reap threads
 954                  */
 955                 thread_reap_list(t);
 956 
 957                 /*
 958                  * Reap lwps
 959                  */
 960                 thread_reap_list(l);
 961                 delay(hz);
 962         }
 963 }
 964 
 965 /*
 966  * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto
 967  * thread_deathrow. The thread's state is changed already TS_FREE to indicate
 968  * that is reapable. The thread already holds the reaplock, and was already
 969  * freed.
 970  */
 971 void
 972 reapq_move_lq_to_tq(kthread_t *t)
 973 {
 974         ASSERT(t->t_state == TS_FREE);
 975         ASSERT(MUTEX_HELD(&reaplock));
 976         t->t_forw = thread_deathrow;
 977         thread_deathrow = t;
 978         thread_reapcnt++;
 979         if (lwp_reapcnt + thread_reapcnt > reaplimit)
 980                 cv_signal(&reaper_cv);  /* wake the reaper */
 981 }
 982 
 983 /*
 984  * This is called by resume() to put a zombie thread onto deathrow.
 985  * The thread's state is changed to TS_FREE to indicate that is reapable.
 986  * This is called from the idle thread so it must not block - just spin.
 987  */
 988 void
 989 reapq_add(kthread_t *t)
 990 {
 991         mutex_enter(&reaplock);
 992 
 993         /*
 994          * lwp_deathrow contains threads with lwp linkage and
 995          * swappable thread stacks which have the default stacksize.
 996          * These threads' lwps and stacks may be reused by lwp_create().
 997          *
 998          * Anything else goes on thread_deathrow(), where it will eventually
 999          * be thread_free()d.
1000          */
1001         if (t->t_flag & T_LWPREUSE) {
1002                 ASSERT(ttolwp(t) != NULL);
1003                 t->t_forw = lwp_deathrow;
1004                 lwp_deathrow = t;
1005                 lwp_reapcnt++;
1006         } else {
1007                 t->t_forw = thread_deathrow;
1008                 thread_deathrow = t;
1009                 thread_reapcnt++;
1010         }
1011         if (lwp_reapcnt + thread_reapcnt > reaplimit)
1012                 cv_signal(&reaper_cv);      /* wake the reaper */
1013         t->t_state = TS_FREE;
1014         lock_clear(&t->t_lock);
1015 
1016         /*
1017          * Before we return, we need to grab and drop the thread lock for
1018          * the dead thread.  At this point, the current thread is the idle
1019          * thread, and the dead thread's CPU lock points to the current
1020          * CPU -- and we must grab and drop the lock to synchronize with
1021          * a racing thread walking a blocking chain that the zombie thread
1022          * was recently in.  By this point, that blocking chain is (by
1023          * definition) stale:  the dead thread is not holding any locks, and
1024          * is therefore not in any blocking chains -- but if we do not regrab
1025          * our lock before freeing the dead thread's data structures, the
1026          * thread walking the (stale) blocking chain will die on memory
1027          * corruption when it attempts to drop the dead thread's lock.  We
1028          * only need do this once because there is no way for the dead thread
1029          * to ever again be on a blocking chain:  once we have grabbed and
1030          * dropped the thread lock, we are guaranteed that anyone that could
1031          * have seen this thread in a blocking chain can no longer see it.
1032          */
1033         thread_lock(t);
1034         thread_unlock(t);
1035 
1036         mutex_exit(&reaplock);
1037 }
1038 
1039 /*
1040  * Install thread context ops for the current thread.
1041  */
1042 void
1043 installctx(
1044         kthread_t *t,
1045         void    *arg,
1046         void    (*save)(void *),
1047         void    (*restore)(void *),
1048         void    (*fork)(void *, void *),
1049         void    (*lwp_create)(void *, void *),
1050         void    (*exit)(void *),
1051         void    (*free)(void *, int))
1052 {
1053         struct ctxop *ctx;
1054 
1055         ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1056         ctx->save_op = save;
1057         ctx->restore_op = restore;
1058         ctx->fork_op = fork;
1059         ctx->lwp_create_op = lwp_create;
1060         ctx->exit_op = exit;
1061         ctx->free_op = free;
1062         ctx->arg = arg;
1063         ctx->next = t->t_ctx;
1064         t->t_ctx = ctx;
1065 }
1066 
1067 /*
1068  * Remove the thread context ops from a thread.
1069  */
1070 int
1071 removectx(
1072         kthread_t *t,
1073         void    *arg,
1074         void    (*save)(void *),
1075         void    (*restore)(void *),
1076         void    (*fork)(void *, void *),
1077         void    (*lwp_create)(void *, void *),
1078         void    (*exit)(void *),
1079         void    (*free)(void *, int))
1080 {
1081         struct ctxop *ctx, *prev_ctx;
1082 
1083         /*
1084          * The incoming kthread_t (which is the thread for which the
1085          * context ops will be removed) should be one of the following:
1086          *
1087          * a) the current thread,
1088          *
1089          * b) a thread of a process that's being forked (SIDL),
1090          *
1091          * c) a thread that belongs to the same process as the current
1092          *    thread and for which the current thread is the agent thread,
1093          *
1094          * d) a thread that is TS_STOPPED which is indicative of it
1095          *    being (if curthread is not an agent) a thread being created
1096          *    as part of an lwp creation.
1097          */
1098         ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1099             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1100 
1101         /*
1102          * Serialize modifications to t->t_ctx to prevent the agent thread
1103          * and the target thread from racing with each other during lwp exit.
1104          */
1105         mutex_enter(&t->t_ctx_lock);
1106         prev_ctx = NULL;
1107         kpreempt_disable();
1108         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
1109                 if (ctx->save_op == save && ctx->restore_op == restore &&
1110                     ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
1111                     ctx->exit_op == exit && ctx->free_op == free &&
1112                     ctx->arg == arg) {
1113                         if (prev_ctx)
1114                                 prev_ctx->next = ctx->next;
1115                         else
1116                                 t->t_ctx = ctx->next;
1117                         mutex_exit(&t->t_ctx_lock);
1118                         if (ctx->free_op != NULL)
1119                                 (ctx->free_op)(ctx->arg, 0);
1120                         kmem_free(ctx, sizeof (struct ctxop));
1121                         kpreempt_enable();
1122                         return (1);
1123                 }
1124                 prev_ctx = ctx;
1125         }
1126         mutex_exit(&t->t_ctx_lock);
1127         kpreempt_enable();
1128 
1129         return (0);
1130 }
1131 
1132 void
1133 savectx(kthread_t *t)
1134 {
1135         struct ctxop *ctx;
1136 
1137         ASSERT(t == curthread);
1138         for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1139                 if (ctx->save_op != NULL)
1140                         (ctx->save_op)(ctx->arg);
1141 }
1142 
1143 void
1144 restorectx(kthread_t *t)
1145 {
1146         struct ctxop *ctx;
1147 
1148         ASSERT(t == curthread);
1149         for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1150                 if (ctx->restore_op != NULL)
1151                         (ctx->restore_op)(ctx->arg);
1152 }
1153 
1154 void
1155 forkctx(kthread_t *t, kthread_t *ct)
1156 {
1157         struct ctxop *ctx;
1158 
1159         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1160                 if (ctx->fork_op != NULL)
1161                         (ctx->fork_op)(t, ct);
1162 }
1163 
1164 /*
1165  * Note that this operator is only invoked via the _lwp_create
1166  * system call.  The system may have other reasons to create lwps
1167  * e.g. the agent lwp or the doors unreferenced lwp.
1168  */
1169 void
1170 lwp_createctx(kthread_t *t, kthread_t *ct)
1171 {
1172         struct ctxop *ctx;
1173 
1174         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1175                 if (ctx->lwp_create_op != NULL)
1176                         (ctx->lwp_create_op)(t, ct);
1177 }
1178 
1179 /*
1180  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1181  * needed when the thread/LWP leaves the processor for the last time. This
1182  * routine is not intended to deal with freeing memory; freectx() is used for
1183  * that purpose during thread_free(). This routine is provided to allow for
1184  * clean-up that can't wait until thread_free().
1185  */
1186 void
1187 exitctx(kthread_t *t)
1188 {
1189         struct ctxop *ctx;
1190 
1191         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1192                 if (ctx->exit_op != NULL)
1193                         (ctx->exit_op)(t);
1194 }
1195 
1196 /*
1197  * freectx is called from thread_free() and exec() to get
1198  * rid of old thread context ops.
1199  */
1200 void
1201 freectx(kthread_t *t, int isexec)
1202 {
1203         struct ctxop *ctx;
1204 
1205         kpreempt_disable();
1206         while ((ctx = t->t_ctx) != NULL) {
1207                 t->t_ctx = ctx->next;
1208                 if (ctx->free_op != NULL)
1209                         (ctx->free_op)(ctx->arg, isexec);
1210                 kmem_free(ctx, sizeof (struct ctxop));
1211         }
1212         kpreempt_enable();
1213 }
1214 
1215 /*
1216  * freectx_ctx is called from lwp_create() when lwp is reused from
1217  * lwp_deathrow and its thread structure is added to thread_deathrow.
1218  * The thread structure to which this ctx was attached may be already
1219  * freed by the thread reaper so free_op implementations shouldn't rely
1220  * on thread structure to which this ctx was attached still being around.
1221  */
1222 void
1223 freectx_ctx(struct ctxop *ctx)
1224 {
1225         struct ctxop *nctx;
1226 
1227         ASSERT(ctx != NULL);
1228 
1229         kpreempt_disable();
1230         do {
1231                 nctx = ctx->next;
1232                 if (ctx->free_op != NULL)
1233                         (ctx->free_op)(ctx->arg, 0);
1234                 kmem_free(ctx, sizeof (struct ctxop));
1235         } while ((ctx = nctx) != NULL);
1236         kpreempt_enable();
1237 }
1238 
1239 /*
1240  * Set the thread running; arrange for it to be swapped in if necessary.
1241  */
1242 void
1243 setrun_locked(kthread_t *t)
1244 {
1245         ASSERT(THREAD_LOCK_HELD(t));
1246         if (t->t_state == TS_SLEEP) {
1247                 /*
1248                  * Take off sleep queue.
1249                  */
1250                 SOBJ_UNSLEEP(t->t_sobj_ops, t);
1251         } else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1252                 /*
1253                  * Already on dispatcher queue.
1254                  */
1255                 return;
1256         } else if (t->t_state == TS_WAIT) {
1257                 waitq_setrun(t);
1258         } else if (t->t_state == TS_STOPPED) {
1259                 /*
1260                  * All of the sending of SIGCONT (TC_XSTART) and /proc
1261                  * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1262                  * requested that the thread be run.
1263                  * Just calling setrun() is not sufficient to set a stopped
1264                  * thread running.  TP_TXSTART is always set if the thread
1265                  * is not stopped by a jobcontrol stop signal.
1266                  * TP_TPSTART is always set if /proc is not controlling it.
1267                  * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1268                  * The thread won't be stopped unless one of these
1269                  * three mechanisms did it.
1270                  *
1271                  * These flags must be set before calling setrun_locked(t).
1272                  * They can't be passed as arguments because the streams
1273                  * code calls setrun() indirectly and the mechanism for
1274                  * doing so admits only one argument.  Note that the
1275                  * thread must be locked in order to change t_schedflags.
1276                  */
1277                 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1278                         return;
1279                 /*
1280                  * Process is no longer stopped (a thread is running).
1281                  */
1282                 t->t_whystop = 0;
1283                 t->t_whatstop = 0;
1284                 /*
1285                  * Strictly speaking, we do not have to clear these
1286                  * flags here; they are cleared on entry to stop().
1287                  * However, they are confusing when doing kernel
1288                  * debugging or when they are revealed by ps(1).
1289                  */
1290                 t->t_schedflag &= ~TS_ALLSTART;
1291                 THREAD_TRANSITION(t);   /* drop stopped-thread lock */
1292                 ASSERT(t->t_lockp == &transition_lock);
1293                 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1294                 /*
1295                  * Let the class put the process on the dispatcher queue.
1296                  */
1297                 CL_SETRUN(t);
1298         }
1299 }
1300 
1301 void
1302 setrun(kthread_t *t)
1303 {
1304         thread_lock(t);
1305         setrun_locked(t);
1306         thread_unlock(t);
1307 }
1308 
1309 /*
1310  * Unpin an interrupted thread.
1311  *      When an interrupt occurs, the interrupt is handled on the stack
1312  *      of an interrupt thread, taken from a pool linked to the CPU structure.
1313  *
1314  *      When swtch() is switching away from an interrupt thread because it
1315  *      blocked or was preempted, this routine is called to complete the
1316  *      saving of the interrupted thread state, and returns the interrupted
1317  *      thread pointer so it may be resumed.
1318  *
1319  *      Called by swtch() only at high spl.
1320  */
1321 kthread_t *
1322 thread_unpin()
1323 {
1324         kthread_t       *t = curthread; /* current thread */
1325         kthread_t       *itp;           /* interrupted thread */
1326         int             i;              /* interrupt level */
1327         extern int      intr_passivate();
1328 
1329         ASSERT(t->t_intr != NULL);
1330 
1331         itp = t->t_intr;             /* interrupted thread */
1332         t->t_intr = NULL;            /* clear interrupt ptr */
1333 
1334         ht_end_intr();
1335 
1336         /*
1337          * Get state from interrupt thread for the one
1338          * it interrupted.
1339          */
1340 
1341         i = intr_passivate(t, itp);
1342 
1343         TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1344             "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1345             i, t, t, itp, itp);
1346 
1347         /*
1348          * Dissociate the current thread from the interrupted thread's LWP.
1349          */
1350         t->t_lwp = NULL;
1351 
1352         /*
1353          * Interrupt handlers above the level that spinlocks block must
1354          * not block.
1355          */
1356 #if DEBUG
1357         if (i < 0 || i > LOCK_LEVEL)
1358                 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1359 #endif
1360 
1361         /*
1362          * Compute the CPU's base interrupt level based on the active
1363          * interrupts.
1364          */
1365         ASSERT(CPU->cpu_intr_actv & (1 << i));
1366         set_base_spl();
1367 
1368         return (itp);
1369 }
1370 
1371 /*
1372  * Create and initialize an interrupt thread.
1373  *      Returns non-zero on error.
1374  *      Called at spl7() or better.
1375  */
1376 void
1377 thread_create_intr(struct cpu *cp)
1378 {
1379         kthread_t *tp;
1380 
1381         tp = thread_create(NULL, 0,
1382             (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1383 
1384         /*
1385          * Set the thread in the TS_FREE state.  The state will change
1386          * to TS_ONPROC only while the interrupt is active.  Think of these
1387          * as being on a private free list for the CPU.  Being TS_FREE keeps
1388          * inactive interrupt threads out of debugger thread lists.
1389          *
1390          * We cannot call thread_create with TS_FREE because of the current
1391          * checks there for ONPROC.  Fix this when thread_create takes flags.
1392          */
1393         THREAD_FREEINTR(tp, cp);
1394 
1395         /*
1396          * Nobody should ever reference the credentials of an interrupt
1397          * thread so make it NULL to catch any such references.
1398          */
1399         tp->t_cred = NULL;
1400         tp->t_flag |= T_INTR_THREAD;
1401         tp->t_cpu = cp;
1402         tp->t_bound_cpu = cp;
1403         tp->t_disp_queue = cp->cpu_disp;
1404         tp->t_affinitycnt = 1;
1405         tp->t_preempt = 1;
1406 
1407         /*
1408          * Don't make a user-requested binding on this thread so that
1409          * the processor can be offlined.
1410          */
1411         tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */
1412         tp->t_bind_pset = PS_NONE;
1413 
1414 #if defined(__i386) || defined(__amd64)
1415         tp->t_stk -= STACK_ALIGN;
1416         *(tp->t_stk) = 0;            /* terminate intr thread stack */
1417 #endif
1418 
1419         /*
1420          * Link onto CPU's interrupt pool.
1421          */
1422         tp->t_link = cp->cpu_intr_thread;
1423         cp->cpu_intr_thread = tp;
1424 }
1425 
1426 /*
1427  * TSD -- THREAD SPECIFIC DATA
1428  */
1429 static kmutex_t         tsd_mutex;       /* linked list spin lock */
1430 static uint_t           tsd_nkeys;       /* size of destructor array */
1431 /* per-key destructor funcs */
1432 static void             (**tsd_destructor)(void *);
1433 /* list of tsd_thread's */
1434 static struct tsd_thread        *tsd_list;
1435 
1436 /*
1437  * Default destructor
1438  *      Needed because NULL destructor means that the key is unused
1439  */
1440 /* ARGSUSED */
1441 void
1442 tsd_defaultdestructor(void *value)
1443 {}
1444 
1445 /*
1446  * Create a key (index into per thread array)
1447  *      Locks out tsd_create, tsd_destroy, and tsd_exit
1448  *      May allocate memory with lock held
1449  */
1450 void
1451 tsd_create(uint_t *keyp, void (*destructor)(void *))
1452 {
1453         int     i;
1454         uint_t  nkeys;
1455 
1456         /*
1457          * if key is allocated, do nothing
1458          */
1459         mutex_enter(&tsd_mutex);
1460         if (*keyp) {
1461                 mutex_exit(&tsd_mutex);
1462                 return;
1463         }
1464         /*
1465          * find an unused key
1466          */
1467         if (destructor == NULL)
1468                 destructor = tsd_defaultdestructor;
1469 
1470         for (i = 0; i < tsd_nkeys; ++i)
1471                 if (tsd_destructor[i] == NULL)
1472                         break;
1473 
1474         /*
1475          * if no unused keys, increase the size of the destructor array
1476          */
1477         if (i == tsd_nkeys) {
1478                 if ((nkeys = (tsd_nkeys << 1)) == 0)
1479                         nkeys = 1;
1480                 tsd_destructor =
1481                     (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1482                     (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1483                     (size_t)(nkeys * sizeof (void (*)(void *))));
1484                 tsd_nkeys = nkeys;
1485         }
1486 
1487         /*
1488          * allocate the next available unused key
1489          */
1490         tsd_destructor[i] = destructor;
1491         *keyp = i + 1;
1492         mutex_exit(&tsd_mutex);
1493 }
1494 
1495 /*
1496  * Destroy a key -- this is for unloadable modules
1497  *
1498  * Assumes that the caller is preventing tsd_set and tsd_get
1499  * Locks out tsd_create, tsd_destroy, and tsd_exit
1500  * May free memory with lock held
1501  */
1502 void
1503 tsd_destroy(uint_t *keyp)
1504 {
1505         uint_t key;
1506         struct tsd_thread *tsd;
1507 
1508         /*
1509          * protect the key namespace and our destructor lists
1510          */
1511         mutex_enter(&tsd_mutex);
1512         key = *keyp;
1513         *keyp = 0;
1514 
1515         ASSERT(key <= tsd_nkeys);
1516 
1517         /*
1518          * if the key is valid
1519          */
1520         if (key != 0) {
1521                 uint_t k = key - 1;
1522                 /*
1523                  * for every thread with TSD, call key's destructor
1524                  */
1525                 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1526                         /*
1527                          * no TSD for key in this thread
1528                          */
1529                         if (key > tsd->ts_nkeys)
1530                                 continue;
1531                         /*
1532                          * call destructor for key
1533                          */
1534                         if (tsd->ts_value[k] && tsd_destructor[k])
1535                                 (*tsd_destructor[k])(tsd->ts_value[k]);
1536                         /*
1537                          * reset value for key
1538                          */
1539                         tsd->ts_value[k] = NULL;
1540                 }
1541                 /*
1542                  * actually free the key (NULL destructor == unused)
1543                  */
1544                 tsd_destructor[k] = NULL;
1545         }
1546 
1547         mutex_exit(&tsd_mutex);
1548 }
1549 
1550 /*
1551  * Quickly return the per thread value that was stored with the specified key
1552  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1553  */
1554 void *
1555 tsd_get(uint_t key)
1556 {
1557         return (tsd_agent_get(curthread, key));
1558 }
1559 
1560 /*
1561  * Set a per thread value indexed with the specified key
1562  */
1563 int
1564 tsd_set(uint_t key, void *value)
1565 {
1566         return (tsd_agent_set(curthread, key, value));
1567 }
1568 
1569 /*
1570  * Like tsd_get(), except that the agent lwp can get the tsd of
1571  * another thread in the same process (the agent thread only runs when the
1572  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1573  */
1574 void *
1575 tsd_agent_get(kthread_t *t, uint_t key)
1576 {
1577         struct tsd_thread *tsd = t->t_tsd;
1578 
1579         ASSERT(t == curthread ||
1580             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1581 
1582         if (key && tsd != NULL && key <= tsd->ts_nkeys)
1583                 return (tsd->ts_value[key - 1]);
1584         return (NULL);
1585 }
1586 
1587 /*
1588  * Like tsd_set(), except that the agent lwp can set the tsd of
1589  * another thread in the same process, or syslwp can set the tsd
1590  * of a thread it's in the middle of creating.
1591  *
1592  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1593  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1594  * lock held
1595  */
1596 int
1597 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1598 {
1599         struct tsd_thread *tsd = t->t_tsd;
1600 
1601         ASSERT(t == curthread ||
1602             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1603 
1604         if (key == 0)
1605                 return (EINVAL);
1606         if (tsd == NULL)
1607                 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1608         if (key <= tsd->ts_nkeys) {
1609                 tsd->ts_value[key - 1] = value;
1610                 return (0);
1611         }
1612 
1613         ASSERT(key <= tsd_nkeys);
1614 
1615         /*
1616          * lock out tsd_destroy()
1617          */
1618         mutex_enter(&tsd_mutex);
1619         if (tsd->ts_nkeys == 0) {
1620                 /*
1621                  * Link onto list of threads with TSD
1622                  */
1623                 if ((tsd->ts_next = tsd_list) != NULL)
1624                         tsd_list->ts_prev = tsd;
1625                 tsd_list = tsd;
1626         }
1627 
1628         /*
1629          * Allocate thread local storage and set the value for key
1630          */
1631         tsd->ts_value = tsd_realloc(tsd->ts_value,
1632             tsd->ts_nkeys * sizeof (void *),
1633             key * sizeof (void *));
1634         tsd->ts_nkeys = key;
1635         tsd->ts_value[key - 1] = value;
1636         mutex_exit(&tsd_mutex);
1637 
1638         return (0);
1639 }
1640 
1641 
1642 /*
1643  * Return the per thread value that was stored with the specified key
1644  *      If necessary, create the key and the value
1645  *      Assumes the caller is protecting *keyp from tsd_destroy
1646  */
1647 void *
1648 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1649 {
1650         void *value;
1651         uint_t key = *keyp;
1652         struct tsd_thread *tsd = curthread->t_tsd;
1653 
1654         if (tsd == NULL)
1655                 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1656         if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1657                 return (value);
1658         if (key == 0)
1659                 tsd_create(keyp, destroy);
1660         (void) tsd_set(*keyp, value = (*allocate)());
1661 
1662         return (value);
1663 }
1664 
1665 /*
1666  * Called from thread_exit() to run the destructor function for each tsd
1667  *      Locks out tsd_create and tsd_destroy
1668  *      Assumes that the destructor *DOES NOT* use tsd
1669  */
1670 void
1671 tsd_exit(void)
1672 {
1673         int i;
1674         struct tsd_thread *tsd = curthread->t_tsd;
1675 
1676         if (tsd == NULL)
1677                 return;
1678 
1679         if (tsd->ts_nkeys == 0) {
1680                 kmem_free(tsd, sizeof (*tsd));
1681                 curthread->t_tsd = NULL;
1682                 return;
1683         }
1684 
1685         /*
1686          * lock out tsd_create and tsd_destroy, call
1687          * the destructor, and mark the value as destroyed.
1688          */
1689         mutex_enter(&tsd_mutex);
1690 
1691         for (i = 0; i < tsd->ts_nkeys; i++) {
1692                 if (tsd->ts_value[i] && tsd_destructor[i])
1693                         (*tsd_destructor[i])(tsd->ts_value[i]);
1694                 tsd->ts_value[i] = NULL;
1695         }
1696 
1697         /*
1698          * remove from linked list of threads with TSD
1699          */
1700         if (tsd->ts_next)
1701                 tsd->ts_next->ts_prev = tsd->ts_prev;
1702         if (tsd->ts_prev)
1703                 tsd->ts_prev->ts_next = tsd->ts_next;
1704         if (tsd_list == tsd)
1705                 tsd_list = tsd->ts_next;
1706 
1707         mutex_exit(&tsd_mutex);
1708 
1709         /*
1710          * free up the TSD
1711          */
1712         kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1713         kmem_free(tsd, sizeof (struct tsd_thread));
1714         curthread->t_tsd = NULL;
1715 }
1716 
1717 /*
1718  * realloc
1719  */
1720 static void *
1721 tsd_realloc(void *old, size_t osize, size_t nsize)
1722 {
1723         void *new;
1724 
1725         new = kmem_zalloc(nsize, KM_SLEEP);
1726         if (old) {
1727                 bcopy(old, new, osize);
1728                 kmem_free(old, osize);
1729         }
1730         return (new);
1731 }
1732 
1733 /*
1734  * Return non-zero if an interrupt is being serviced.
1735  */
1736 int
1737 servicing_interrupt()
1738 {
1739         int onintr = 0;
1740 
1741         /* Are we an interrupt thread */
1742         if (curthread->t_flag & T_INTR_THREAD)
1743                 return (1);
1744         /* Are we servicing a high level interrupt? */
1745         if (CPU_ON_INTR(CPU)) {
1746                 kpreempt_disable();
1747                 onintr = CPU_ON_INTR(CPU);
1748                 kpreempt_enable();
1749         }
1750         return (onintr);
1751 }
1752 
1753 
1754 /*
1755  * Change the dispatch priority of a thread in the system.
1756  * Used when raising or lowering a thread's priority.
1757  * (E.g., priority inheritance)
1758  *
1759  * Since threads are queued according to their priority, we
1760  * we must check the thread's state to determine whether it
1761  * is on a queue somewhere. If it is, we've got to:
1762  *
1763  *      o Dequeue the thread.
1764  *      o Change its effective priority.
1765  *      o Enqueue the thread.
1766  *
1767  * Assumptions: The thread whose priority we wish to change
1768  * must be locked before we call thread_change_(e)pri().
1769  * The thread_change(e)pri() function doesn't drop the thread
1770  * lock--that must be done by its caller.
1771  */
1772 void
1773 thread_change_epri(kthread_t *t, pri_t disp_pri)
1774 {
1775         uint_t  state;
1776 
1777         ASSERT(THREAD_LOCK_HELD(t));
1778 
1779         /*
1780          * If the inherited priority hasn't actually changed,
1781          * just return.
1782          */
1783         if (t->t_epri == disp_pri)
1784                 return;
1785 
1786         state = t->t_state;
1787 
1788         /*
1789          * If it's not on a queue, change the priority with impunity.
1790          */
1791         if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1792                 t->t_epri = disp_pri;
1793                 if (state == TS_ONPROC) {
1794                         cpu_t *cp = t->t_disp_queue->disp_cpu;
1795 
1796                         if (t == cp->cpu_dispthread)
1797                                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1798                 }
1799         } else if (state == TS_SLEEP) {
1800                 /*
1801                  * Take the thread out of its sleep queue.
1802                  * Change the inherited priority.
1803                  * Re-enqueue the thread.
1804                  * Each synchronization object exports a function
1805                  * to do this in an appropriate manner.
1806                  */
1807                 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1808         } else if (state == TS_WAIT) {
1809                 /*
1810                  * Re-enqueue a thread on the wait queue if its
1811                  * effective priority needs to change.
1812                  */
1813                 if (disp_pri != t->t_epri)
1814                         waitq_change_pri(t, disp_pri);
1815         } else {
1816                 /*
1817                  * The thread is on a run queue.
1818                  * Note: setbackdq() may not put the thread
1819                  * back on the same run queue where it originally
1820                  * resided.
1821                  */
1822                 (void) dispdeq(t);
1823                 t->t_epri = disp_pri;
1824                 setbackdq(t);
1825         }
1826         schedctl_set_cidpri(t);
1827 }
1828 
1829 /*
1830  * Function: Change the t_pri field of a thread.
1831  * Side Effects: Adjust the thread ordering on a run queue
1832  *               or sleep queue, if necessary.
1833  * Returns: 1 if the thread was on a run queue, else 0.
1834  */
1835 int
1836 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1837 {
1838         uint_t  state;
1839         int     on_rq = 0;
1840 
1841         ASSERT(THREAD_LOCK_HELD(t));
1842 
1843         state = t->t_state;
1844         THREAD_WILLCHANGE_PRI(t, disp_pri);
1845 
1846         /*
1847          * If it's not on a queue, change the priority with impunity.
1848          */
1849         if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1850                 t->t_pri = disp_pri;
1851 
1852                 if (state == TS_ONPROC) {
1853                         cpu_t *cp = t->t_disp_queue->disp_cpu;
1854 
1855                         if (t == cp->cpu_dispthread)
1856                                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1857                 }
1858         } else if (state == TS_SLEEP) {
1859                 /*
1860                  * If the priority has changed, take the thread out of
1861                  * its sleep queue and change the priority.
1862                  * Re-enqueue the thread.
1863                  * Each synchronization object exports a function
1864                  * to do this in an appropriate manner.
1865                  */
1866                 if (disp_pri != t->t_pri)
1867                         SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1868         } else if (state == TS_WAIT) {
1869                 /*
1870                  * Re-enqueue a thread on the wait queue if its
1871                  * priority needs to change.
1872                  */
1873                 if (disp_pri != t->t_pri)
1874                         waitq_change_pri(t, disp_pri);
1875         } else {
1876                 /*
1877                  * The thread is on a run queue.
1878                  * Note: setbackdq() may not put the thread
1879                  * back on the same run queue where it originally
1880                  * resided.
1881                  *
1882                  * We still requeue the thread even if the priority
1883                  * is unchanged to preserve round-robin (and other)
1884                  * effects between threads of the same priority.
1885                  */
1886                 on_rq = dispdeq(t);
1887                 ASSERT(on_rq);
1888                 t->t_pri = disp_pri;
1889                 if (front) {
1890                         setfrontdq(t);
1891                 } else {
1892                         setbackdq(t);
1893                 }
1894         }
1895         schedctl_set_cidpri(t);
1896         return (on_rq);
1897 }
1898 
1899 /*
1900  * Tunable kmem_stackinfo is set, fill the kernel thread stack with a
1901  * specific pattern.
1902  */
1903 static void
1904 stkinfo_begin(kthread_t *t)
1905 {
1906         caddr_t start;  /* stack start */
1907         caddr_t end;    /* stack end  */
1908         uint64_t *ptr;  /* pattern pointer */
1909 
1910         /*
1911          * Stack grows up or down, see thread_create(),
1912          * compute stack memory area start and end (start < end).
1913          */
1914         if (t->t_stk > t->t_stkbase) {
1915                 /* stack grows down */
1916                 start = t->t_stkbase;
1917                 end = t->t_stk;
1918         } else {
1919                 /* stack grows up */
1920                 start = t->t_stk;
1921                 end = t->t_stkbase;
1922         }
1923 
1924         /*
1925          * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
1926          * alignement for start and end in stack area boundaries
1927          * (protection against corrupt t_stkbase/t_stk data).
1928          */
1929         if ((((uintptr_t)start) & 0x7) != 0) {
1930                 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
1931         }
1932         end = (caddr_t)(((uintptr_t)end) & (~0x7));
1933 
1934         if ((end <= start) || (end - start) > (1024 * 1024)) {
1935                 /* negative or stack size > 1 meg, assume bogus */
1936                 return;
1937         }
1938 
1939         /* fill stack area with a pattern (instead of zeros) */
1940         ptr = (uint64_t *)((void *)start);
1941         while (ptr < (uint64_t *)((void *)end)) {
1942                 *ptr++ = KMEM_STKINFO_PATTERN;
1943         }
1944 }
1945 
1946 
1947 /*
1948  * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist,
1949  * compute the percentage of kernel stack really used, and set in the log
1950  * if it's the latest highest percentage.
1951  */
1952 static void
1953 stkinfo_end(kthread_t *t)
1954 {
1955         caddr_t start;  /* stack start */
1956         caddr_t end;    /* stack end  */
1957         uint64_t *ptr;  /* pattern pointer */
1958         size_t stksz;   /* stack size */
1959         size_t smallest = 0;
1960         size_t percent = 0;
1961         uint_t index = 0;
1962         uint_t i;
1963         static size_t smallest_percent = (size_t)-1;
1964         static uint_t full = 0;
1965 
1966         /* create the stackinfo log, if doesn't already exist */
1967         mutex_enter(&kmem_stkinfo_lock);
1968         if (kmem_stkinfo_log == NULL) {
1969                 kmem_stkinfo_log = (kmem_stkinfo_t *)
1970                     kmem_zalloc(KMEM_STKINFO_LOG_SIZE *
1971                     (sizeof (kmem_stkinfo_t)), KM_NOSLEEP);
1972                 if (kmem_stkinfo_log == NULL) {
1973                         mutex_exit(&kmem_stkinfo_lock);
1974                         return;
1975                 }
1976         }
1977         mutex_exit(&kmem_stkinfo_lock);
1978 
1979         /*
1980          * Stack grows up or down, see thread_create(),
1981          * compute stack memory area start and end (start < end).
1982          */
1983         if (t->t_stk > t->t_stkbase) {
1984                 /* stack grows down */
1985                 start = t->t_stkbase;
1986                 end = t->t_stk;
1987         } else {
1988                 /* stack grows up */
1989                 start = t->t_stk;
1990                 end = t->t_stkbase;
1991         }
1992 
1993         /* stack size as found in kthread_t */
1994         stksz = end - start;
1995 
1996         /*
1997          * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
1998          * alignement for start and end in stack area boundaries
1999          * (protection against corrupt t_stkbase/t_stk data).
2000          */
2001         if ((((uintptr_t)start) & 0x7) != 0) {
2002                 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
2003         }
2004         end = (caddr_t)(((uintptr_t)end) & (~0x7));
2005 
2006         if ((end <= start) || (end - start) > (1024 * 1024)) {
2007                 /* negative or stack size > 1 meg, assume bogus */
2008                 return;
2009         }
2010 
2011         /* search until no pattern in the stack */
2012         if (t->t_stk > t->t_stkbase) {
2013                 /* stack grows down */
2014 #if defined(__i386) || defined(__amd64)
2015                 /*
2016                  * 6 longs are pushed on stack, see thread_load(). Skip
2017                  * them, so if kthread has never run, percent is zero.
2018                  * 8 bytes alignement is preserved for a 32 bit kernel,
2019                  * 6 x 4 = 24, 24 is a multiple of 8.
2020                  *
2021                  */
2022                 end -= (6 * sizeof (long));
2023 #endif
2024                 ptr = (uint64_t *)((void *)start);
2025                 while (ptr < (uint64_t *)((void *)end)) {
2026                         if (*ptr != KMEM_STKINFO_PATTERN) {
2027                                 percent = stkinfo_percent(end,
2028                                     start, (caddr_t)ptr);
2029                                 break;
2030                         }
2031                         ptr++;
2032                 }
2033         } else {
2034                 /* stack grows up */
2035                 ptr = (uint64_t *)((void *)end);
2036                 ptr--;
2037                 while (ptr >= (uint64_t *)((void *)start)) {
2038                         if (*ptr != KMEM_STKINFO_PATTERN) {
2039                                 percent = stkinfo_percent(start,
2040                                     end, (caddr_t)ptr);
2041                                 break;
2042                         }
2043                         ptr--;
2044                 }
2045         }
2046 
2047         DTRACE_PROBE3(stack__usage, kthread_t *, t,
2048             size_t, stksz, size_t, percent);
2049 
2050         if (percent == 0) {
2051                 return;
2052         }
2053 
2054         mutex_enter(&kmem_stkinfo_lock);
2055         if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) {
2056                 /*
2057                  * The log is full and already contains the highest values
2058                  */
2059                 mutex_exit(&kmem_stkinfo_lock);
2060                 return;
2061         }
2062 
2063         /* keep a log of the highest used stack */
2064         for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) {
2065                 if (kmem_stkinfo_log[i].percent == 0) {
2066                         index = i;
2067                         full++;
2068                         break;
2069                 }
2070                 if (smallest == 0) {
2071                         smallest = kmem_stkinfo_log[i].percent;
2072                         index = i;
2073                         continue;
2074                 }
2075                 if (kmem_stkinfo_log[i].percent < smallest) {
2076                         smallest = kmem_stkinfo_log[i].percent;
2077                         index = i;
2078                 }
2079         }
2080 
2081         if (percent >= kmem_stkinfo_log[index].percent) {
2082                 kmem_stkinfo_log[index].kthread = (caddr_t)t;
2083                 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc;
2084                 kmem_stkinfo_log[index].start = start;
2085                 kmem_stkinfo_log[index].stksz = stksz;
2086                 kmem_stkinfo_log[index].percent = percent;
2087                 kmem_stkinfo_log[index].t_tid = t->t_tid;
2088                 kmem_stkinfo_log[index].cmd[0] = '\0';
2089                 if (t->t_tid != 0) {
2090                         stksz = strlen((t->t_procp)->p_user.u_comm);
2091                         if (stksz >= KMEM_STKINFO_STR_SIZE) {
2092                                 stksz = KMEM_STKINFO_STR_SIZE - 1;
2093                                 kmem_stkinfo_log[index].cmd[stksz] = '\0';
2094                         } else {
2095                                 stksz += 1;
2096                         }
2097                         (void) memcpy(kmem_stkinfo_log[index].cmd,
2098                             (t->t_procp)->p_user.u_comm, stksz);
2099                 }
2100                 if (percent < smallest_percent) {
2101                         smallest_percent = percent;
2102                 }
2103         }
2104         mutex_exit(&kmem_stkinfo_lock);
2105 }
2106 
2107 /*
2108  * Tunable kmem_stackinfo is set, compute stack utilization percentage.
2109  */
2110 static size_t
2111 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp)
2112 {
2113         size_t percent;
2114         size_t s;
2115 
2116         if (t_stk > t_stkbase) {
2117                 /* stack grows down */
2118                 if (sp > t_stk) {
2119                         return (0);
2120                 }
2121                 if (sp < t_stkbase) {
2122                         return (100);
2123                 }
2124                 percent = t_stk - sp + 1;
2125                 s = t_stk - t_stkbase + 1;
2126         } else {
2127                 /* stack grows up */
2128                 if (sp < t_stk) {
2129                         return (0);
2130                 }
2131                 if (sp > t_stkbase) {
2132                         return (100);
2133                 }
2134                 percent = sp - t_stk + 1;
2135                 s = t_stkbase - t_stk + 1;
2136         }
2137         percent = ((100 * percent) / s) + 1;
2138         if (percent > 100) {
2139                 percent = 100;
2140         }
2141         return (percent);
2142 }
2143 
2144 /*
2145  * NOTE: This will silently truncate a name > THREAD_NAME_MAX - 1 characters
2146  * long.  It is expected that callers (acting on behalf of userland clients)
2147  * will perform any required checks to return the correct error semantics.
2148  * It is also expected callers on behalf of userland clients have done
2149  * any necessary permission checks.
2150  */
2151 int
2152 thread_setname(kthread_t *t, const char *name)
2153 {
2154         char *buf = NULL;
2155 
2156         /*
2157          * We optimistically assume that a thread's name will only be set
2158          * once and so allocate memory in preparation of setting t_name.
2159          * If it turns out a name has already been set, we just discard (free)
2160          * the buffer we just allocated and reuse the current buffer
2161          * (as all should be THREAD_NAME_MAX large).
2162          *
2163          * Such an arrangement means over the lifetime of a kthread_t, t_name
2164          * is either NULL or has one value (the address of the buffer holding
2165          * the current thread name).   The assumption is that most kthread_t
2166          * instances will not have a name assigned, so dynamically allocating
2167          * the memory should minimize the footprint of this feature, but by
2168          * having the buffer persist for the life of the thread, it simplifies
2169          * usage in highly constrained situations (e.g. dtrace).
2170          */
2171         if (name != NULL && name[0] != '\0') {
2172                 for (size_t i = 0; name[i] != '\0'; i++) {
2173                         if (!isprint(name[i]))
2174                                 return (EINVAL);
2175                 }
2176 
2177                 buf = kmem_zalloc(THREAD_NAME_MAX, KM_SLEEP);
2178                 (void) strlcpy(buf, name, THREAD_NAME_MAX);
2179         }
2180 
2181         mutex_enter(&ttoproc(t)->p_lock);
2182         if (t->t_name == NULL) {
2183                 t->t_name = buf;
2184         } else {
2185                 if (buf != NULL) {
2186                         (void) strlcpy(t->t_name, name, THREAD_NAME_MAX);
2187                         kmem_free(buf, THREAD_NAME_MAX);
2188                 } else {
2189                         bzero(t->t_name, THREAD_NAME_MAX);
2190                 }
2191         }
2192         mutex_exit(&ttoproc(t)->p_lock);
2193         return (0);
2194 }
2195 
2196 int
2197 thread_vsetname(kthread_t *t, const char *fmt, ...)
2198 {
2199         char name[THREAD_NAME_MAX];
2200         va_list va;
2201         int rc;
2202 
2203         va_start(va, fmt);
2204         rc = vsnprintf(name, sizeof (name), fmt, va);
2205         va_end(va);
2206 
2207         if (rc < 0)
2208                 return (EINVAL);
2209 
2210         if (rc >= sizeof (name))
2211                 return (ENAMETOOLONG);
2212 
2213         return (thread_setname(t, name));
2214 }