1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2013, Joyent, Inc.  All rights reserved.
  25  */
  26 
  27 #include <sys/types.h>
  28 #include <sys/param.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/signal.h>
  31 #include <sys/stack.h>
  32 #include <sys/pcb.h>
  33 #include <sys/user.h>
  34 #include <sys/systm.h>
  35 #include <sys/sysinfo.h>
  36 #include <sys/errno.h>
  37 #include <sys/cmn_err.h>
  38 #include <sys/cred.h>
  39 #include <sys/resource.h>
  40 #include <sys/task.h>
  41 #include <sys/project.h>
  42 #include <sys/proc.h>
  43 #include <sys/debug.h>
  44 #include <sys/disp.h>
  45 #include <sys/class.h>
  46 #include <vm/seg_kmem.h>
  47 #include <vm/seg_kp.h>
  48 #include <sys/machlock.h>
  49 #include <sys/kmem.h>
  50 #include <sys/varargs.h>
  51 #include <sys/turnstile.h>
  52 #include <sys/poll.h>
  53 #include <sys/vtrace.h>
  54 #include <sys/callb.h>
  55 #include <c2/audit.h>
  56 #include <sys/tnf.h>
  57 #include <sys/sobject.h>
  58 #include <sys/cpupart.h>
  59 #include <sys/pset.h>
  60 #include <sys/door.h>
  61 #include <sys/spl.h>
  62 #include <sys/copyops.h>
  63 #include <sys/rctl.h>
  64 #include <sys/brand.h>
  65 #include <sys/pool.h>
  66 #include <sys/zone.h>
  67 #include <sys/tsol/label.h>
  68 #include <sys/tsol/tndb.h>
  69 #include <sys/cpc_impl.h>
  70 #include <sys/sdt.h>
  71 #include <sys/reboot.h>
  72 #include <sys/kdi.h>
  73 #include <sys/schedctl.h>
  74 #include <sys/waitq.h>
  75 #include <sys/cpucaps.h>
  76 #include <sys/kiconv.h>
  77 
  78 struct kmem_cache *thread_cache;        /* cache of free threads */
  79 struct kmem_cache *lwp_cache;           /* cache of free lwps */
  80 struct kmem_cache *turnstile_cache;     /* cache of free turnstiles */
  81 
  82 /*
  83  * allthreads is only for use by kmem_readers.  All kernel loops can use
  84  * the current thread as a start/end point.
  85  */
  86 kthread_t *allthreads = &t0;        /* circular list of all threads */
  87 
  88 static kcondvar_t reaper_cv;            /* synchronization var */
  89 kthread_t       *thread_deathrow;       /* circular list of reapable threads */
  90 kthread_t       *lwp_deathrow;          /* circular list of reapable threads */
  91 kmutex_t        reaplock;               /* protects lwp and thread deathrows */
  92 int     thread_reapcnt = 0;             /* number of threads on deathrow */
  93 int     lwp_reapcnt = 0;                /* number of lwps on deathrow */
  94 int     reaplimit = 16;                 /* delay reaping until reaplimit */
  95 
  96 thread_free_lock_t      *thread_free_lock;
  97                                         /* protects tick thread from reaper */
  98 
  99 extern int nthread;
 100 
 101 /* System Scheduling classes. */
 102 id_t    syscid;                         /* system scheduling class ID */
 103 id_t    sysdccid = CLASS_UNUSED;        /* reset when SDC loads */
 104 
 105 void    *segkp_thread;                  /* cookie for segkp pool */
 106 
 107 int lwp_cache_sz = 32;
 108 int t_cache_sz = 8;
 109 static kt_did_t next_t_id = 1;
 110 
 111 /* Default mode for thread binding to CPUs and processor sets */
 112 int default_binding_mode = TB_ALLHARD;
 113 
 114 /*
 115  * Min/Max stack sizes for stack size parameters
 116  */
 117 #define MAX_STKSIZE     (32 * DEFAULTSTKSZ)
 118 #define MIN_STKSIZE     DEFAULTSTKSZ
 119 
 120 /*
 121  * default_stksize overrides lwp_default_stksize if it is set.
 122  */
 123 int     default_stksize;
 124 int     lwp_default_stksize;
 125 
 126 static zone_key_t zone_thread_key;
 127 
 128 unsigned int kmem_stackinfo;            /* stackinfo feature on-off */
 129 kmem_stkinfo_t *kmem_stkinfo_log;       /* stackinfo circular log */
 130 static kmutex_t kmem_stkinfo_lock;      /* protects kmem_stkinfo_log */
 131 
 132 /*
 133  * forward declarations for internal thread specific data (tsd)
 134  */
 135 static void *tsd_realloc(void *, size_t, size_t);
 136 
 137 void thread_reaper(void);
 138 
 139 /* forward declarations for stackinfo feature */
 140 static void stkinfo_begin(kthread_t *);
 141 static void stkinfo_end(kthread_t *);
 142 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t);
 143 
 144 /*ARGSUSED*/
 145 static int
 146 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
 147 {
 148         bzero(buf, sizeof (turnstile_t));
 149         return (0);
 150 }
 151 
 152 /*ARGSUSED*/
 153 static void
 154 turnstile_destructor(void *buf, void *cdrarg)
 155 {
 156         turnstile_t *ts = buf;
 157 
 158         ASSERT(ts->ts_free == NULL);
 159         ASSERT(ts->ts_waiters == 0);
 160         ASSERT(ts->ts_inheritor == NULL);
 161         ASSERT(ts->ts_sleepq[0].sq_first == NULL);
 162         ASSERT(ts->ts_sleepq[1].sq_first == NULL);
 163 }
 164 
 165 void
 166 thread_init(void)
 167 {
 168         kthread_t *tp;
 169         extern char sys_name[];
 170         extern void idle();
 171         struct cpu *cpu = CPU;
 172         int i;
 173         kmutex_t *lp;
 174 
 175         mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
 176         thread_free_lock =
 177             kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP);
 178         for (i = 0; i < THREAD_FREE_NUM; i++) {
 179                 lp = &thread_free_lock[i].tf_lock;
 180                 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL);
 181         }
 182 
 183 #if defined(__i386) || defined(__amd64)
 184         thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
 185             PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
 186 
 187         /*
 188          * "struct _klwp" includes a "struct pcb", which includes a
 189          * "struct fpu", which needs to be 64-byte aligned on amd64
 190          * (and even on i386) for xsave/xrstor.
 191          */
 192         lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
 193             64, NULL, NULL, NULL, NULL, NULL, 0);
 194 #else
 195         /*
 196          * Allocate thread structures from static_arena.  This prevents
 197          * issues where a thread tries to relocate its own thread
 198          * structure and touches it after the mapping has been suspended.
 199          */
 200         thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
 201             PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
 202 
 203         lwp_stk_cache_init();
 204 
 205         lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
 206             0, NULL, NULL, NULL, NULL, NULL, 0);
 207 #endif
 208 
 209         turnstile_cache = kmem_cache_create("turnstile_cache",
 210             sizeof (turnstile_t), 0,
 211             turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
 212 
 213         label_init();
 214         cred_init();
 215 
 216         /*
 217          * Initialize various resource management facilities.
 218          */
 219         rctl_init();
 220         cpucaps_init();
 221         /*
 222          * Zone_init() should be called before project_init() so that project ID
 223          * for the first project is initialized correctly.
 224          */
 225         zone_init();
 226         project_init();
 227         brand_init();
 228         kiconv_init();
 229         task_init();
 230         tcache_init();
 231         pool_init();
 232 
 233         curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
 234 
 235         /*
 236          * Originally, we had two parameters to set default stack
 237          * size: one for lwp's (lwp_default_stksize), and one for
 238          * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
 239          * Now we have a third parameter that overrides both if it is
 240          * set to a legal stack size, called default_stksize.
 241          */
 242 
 243         if (default_stksize == 0) {
 244                 default_stksize = DEFAULTSTKSZ;
 245         } else if (default_stksize % PAGESIZE != 0 ||
 246             default_stksize > MAX_STKSIZE ||
 247             default_stksize < MIN_STKSIZE) {
 248                 cmn_err(CE_WARN, "Illegal stack size. Using %d",
 249                     (int)DEFAULTSTKSZ);
 250                 default_stksize = DEFAULTSTKSZ;
 251         } else {
 252                 lwp_default_stksize = default_stksize;
 253         }
 254 
 255         if (lwp_default_stksize == 0) {
 256                 lwp_default_stksize = default_stksize;
 257         } else if (lwp_default_stksize % PAGESIZE != 0 ||
 258             lwp_default_stksize > MAX_STKSIZE ||
 259             lwp_default_stksize < MIN_STKSIZE) {
 260                 cmn_err(CE_WARN, "Illegal stack size. Using %d",
 261                     default_stksize);
 262                 lwp_default_stksize = default_stksize;
 263         }
 264 
 265         segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
 266             lwp_default_stksize,
 267             (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
 268 
 269         segkp_thread = segkp_cache_init(segkp, t_cache_sz,
 270             default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
 271 
 272         (void) getcid(sys_name, &syscid);
 273         curthread->t_cid = syscid;   /* current thread is t0 */
 274 
 275         /*
 276          * Set up the first CPU's idle thread.
 277          * It runs whenever the CPU has nothing worthwhile to do.
 278          */
 279         tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
 280         cpu->cpu_idle_thread = tp;
 281         tp->t_preempt = 1;
 282         tp->t_disp_queue = cpu->cpu_disp;
 283         ASSERT(tp->t_disp_queue != NULL);
 284         tp->t_bound_cpu = cpu;
 285         tp->t_affinitycnt = 1;
 286 
 287         /*
 288          * Registering a thread in the callback table is usually
 289          * done in the initialization code of the thread. In this
 290          * case, we do it right after thread creation to avoid
 291          * blocking idle thread while registering itself. It also
 292          * avoids the possibility of reregistration in case a CPU
 293          * restarts its idle thread.
 294          */
 295         CALLB_CPR_INIT_SAFE(tp, "idle");
 296 
 297         /*
 298          * Create the thread_reaper daemon. From this point on, exited
 299          * threads will get reaped.
 300          */
 301         (void) thread_create(NULL, 0, (void (*)())thread_reaper,
 302             NULL, 0, &p0, TS_RUN, minclsyspri);
 303 
 304         /*
 305          * Finish initializing the kernel memory allocator now that
 306          * thread_create() is available.
 307          */
 308         kmem_thread_init();
 309 
 310         if (boothowto & RB_DEBUG)
 311                 kdi_dvec_thravail();
 312 }
 313 
 314 /*
 315  * Create a thread.
 316  *
 317  * thread_create() blocks for memory if necessary.  It never fails.
 318  *
 319  * If stk is NULL, the thread is created at the base of the stack
 320  * and cannot be swapped.
 321  */
 322 kthread_t *
 323 thread_create(
 324         caddr_t stk,
 325         size_t  stksize,
 326         void    (*proc)(),
 327         void    *arg,
 328         size_t  len,
 329         proc_t   *pp,
 330         int     state,
 331         pri_t   pri)
 332 {
 333         kthread_t *t;
 334         extern struct classfuncs sys_classfuncs;
 335         turnstile_t *ts;
 336 
 337         /*
 338          * Every thread keeps a turnstile around in case it needs to block.
 339          * The only reason the turnstile is not simply part of the thread
 340          * structure is that we may have to break the association whenever
 341          * more than one thread blocks on a given synchronization object.
 342          * From a memory-management standpoint, turnstiles are like the
 343          * "attached mblks" that hang off dblks in the streams allocator.
 344          */
 345         ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
 346 
 347         if (stk == NULL) {
 348                 /*
 349                  * alloc both thread and stack in segkp chunk
 350                  */
 351 
 352                 if (stksize < default_stksize)
 353                         stksize = default_stksize;
 354 
 355                 if (stksize == default_stksize) {
 356                         stk = (caddr_t)segkp_cache_get(segkp_thread);
 357                 } else {
 358                         stksize = roundup(stksize, PAGESIZE);
 359                         stk = (caddr_t)segkp_get(segkp, stksize,
 360                             (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
 361                 }
 362 
 363                 ASSERT(stk != NULL);
 364 
 365                 /*
 366                  * The machine-dependent mutex code may require that
 367                  * thread pointers (since they may be used for mutex owner
 368                  * fields) have certain alignment requirements.
 369                  * PTR24_ALIGN is the size of the alignment quanta.
 370                  * XXX - assumes stack grows toward low addresses.
 371                  */
 372                 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
 373                         cmn_err(CE_PANIC, "thread_create: proposed stack size"
 374                             " too small to hold thread.");
 375 #ifdef STACK_GROWTH_DOWN
 376                 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
 377                 stksize &= -PTR24_ALIGN;    /* make thread aligned */
 378                 t = (kthread_t *)(stk + stksize);
 379                 bzero(t, sizeof (kthread_t));
 380                 if (audit_active)
 381                         audit_thread_create(t);
 382                 t->t_stk = stk + stksize;
 383                 t->t_stkbase = stk;
 384 #else   /* stack grows to larger addresses */
 385                 stksize -= SA(sizeof (kthread_t));
 386                 t = (kthread_t *)(stk);
 387                 bzero(t, sizeof (kthread_t));
 388                 t->t_stk = stk + sizeof (kthread_t);
 389                 t->t_stkbase = stk + stksize + sizeof (kthread_t);
 390 #endif  /* STACK_GROWTH_DOWN */
 391                 t->t_flag |= T_TALLOCSTK;
 392                 t->t_swap = stk;
 393         } else {
 394                 t = kmem_cache_alloc(thread_cache, KM_SLEEP);
 395                 bzero(t, sizeof (kthread_t));
 396                 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
 397                 if (audit_active)
 398                         audit_thread_create(t);
 399                 /*
 400                  * Initialize t_stk to the kernel stack pointer to use
 401                  * upon entry to the kernel
 402                  */
 403 #ifdef STACK_GROWTH_DOWN
 404                 t->t_stk = stk + stksize;
 405                 t->t_stkbase = stk;
 406 #else
 407                 t->t_stk = stk;                      /* 3b2-like */
 408                 t->t_stkbase = stk + stksize;
 409 #endif /* STACK_GROWTH_DOWN */
 410         }
 411 
 412         if (kmem_stackinfo != 0) {
 413                 stkinfo_begin(t);
 414         }
 415 
 416         t->t_ts = ts;
 417 
 418         /*
 419          * p_cred could be NULL if it thread_create is called before cred_init
 420          * is called in main.
 421          */
 422         mutex_enter(&pp->p_crlock);
 423         if (pp->p_cred)
 424                 crhold(t->t_cred = pp->p_cred);
 425         mutex_exit(&pp->p_crlock);
 426         t->t_start = gethrestime_sec();
 427         t->t_startpc = proc;
 428         t->t_procp = pp;
 429         t->t_clfuncs = &sys_classfuncs.thread;
 430         t->t_cid = syscid;
 431         t->t_pri = pri;
 432         t->t_stime = ddi_get_lbolt();
 433         t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
 434         t->t_bind_cpu = PBIND_NONE;
 435         t->t_bindflag = (uchar_t)default_binding_mode;
 436         t->t_bind_pset = PS_NONE;
 437         t->t_plockp = &pp->p_lock;
 438         t->t_copyops = NULL;
 439         t->t_taskq = NULL;
 440         t->t_anttime = 0;
 441         t->t_hatdepth = 0;
 442 
 443         t->t_dtrace_vtime = 1;       /* assure vtimestamp is always non-zero */
 444 
 445         CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
 446 #ifndef NPROBE
 447         /* Kernel probe */
 448         tnf_thread_create(t);
 449 #endif /* NPROBE */
 450         LOCK_INIT_CLEAR(&t->t_lock);
 451 
 452         /*
 453          * Callers who give us a NULL proc must do their own
 454          * stack initialization.  e.g. lwp_create()
 455          */
 456         if (proc != NULL) {
 457                 t->t_stk = thread_stk_init(t->t_stk);
 458                 thread_load(t, proc, arg, len);
 459         }
 460 
 461         /*
 462          * Put a hold on project0. If this thread is actually in a
 463          * different project, then t_proj will be changed later in
 464          * lwp_create().  All kernel-only threads must be in project 0.
 465          */
 466         t->t_proj = project_hold(proj0p);
 467 
 468         lgrp_affinity_init(&t->t_lgrp_affinity);
 469 
 470         mutex_enter(&pidlock);
 471         nthread++;
 472         t->t_did = next_t_id++;
 473         t->t_prev = curthread->t_prev;
 474         t->t_next = curthread;
 475 
 476         /*
 477          * Add the thread to the list of all threads, and initialize
 478          * its t_cpu pointer.  We need to block preemption since
 479          * cpu_offline walks the thread list looking for threads
 480          * with t_cpu pointing to the CPU being offlined.  We want
 481          * to make sure that the list is consistent and that if t_cpu
 482          * is set, the thread is on the list.
 483          */
 484         kpreempt_disable();
 485         curthread->t_prev->t_next = t;
 486         curthread->t_prev = t;
 487 
 488         /*
 489          * Threads should never have a NULL t_cpu pointer so assign it
 490          * here.  If the thread is being created with state TS_RUN a
 491          * better CPU may be chosen when it is placed on the run queue.
 492          *
 493          * We need to keep kernel preemption disabled when setting all
 494          * three fields to keep them in sync.  Also, always create in
 495          * the default partition since that's where kernel threads go
 496          * (if this isn't a kernel thread, t_cpupart will be changed
 497          * in lwp_create before setting the thread runnable).
 498          */
 499         t->t_cpupart = &cp_default;
 500 
 501         /*
 502          * For now, affiliate this thread with the root lgroup.
 503          * Since the kernel does not (presently) allocate its memory
 504          * in a locality aware fashion, the root is an appropriate home.
 505          * If this thread is later associated with an lwp, it will have
 506          * it's lgroup re-assigned at that time.
 507          */
 508         lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
 509 
 510         /*
 511          * Inherit the current cpu.  If this cpu isn't part of the chosen
 512          * lgroup, a new cpu will be chosen by cpu_choose when the thread
 513          * is ready to run.
 514          */
 515         if (CPU->cpu_part == &cp_default)
 516                 t->t_cpu = CPU;
 517         else
 518                 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
 519                     t->t_pri, NULL);
 520 
 521         t->t_disp_queue = t->t_cpu->cpu_disp;
 522         kpreempt_enable();
 523 
 524         /*
 525          * Initialize thread state and the dispatcher lock pointer.
 526          * Need to hold onto pidlock to block allthreads walkers until
 527          * the state is set.
 528          */
 529         switch (state) {
 530         case TS_RUN:
 531                 curthread->t_oldspl = splhigh();     /* get dispatcher spl */
 532                 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
 533                 CL_SETRUN(t);
 534                 thread_unlock(t);
 535                 break;
 536 
 537         case TS_ONPROC:
 538                 THREAD_ONPROC(t, t->t_cpu);
 539                 break;
 540 
 541         case TS_FREE:
 542                 /*
 543                  * Free state will be used for intr threads.
 544                  * The interrupt routine must set the thread dispatcher
 545                  * lock pointer (t_lockp) if starting on a CPU
 546                  * other than the current one.
 547                  */
 548                 THREAD_FREEINTR(t, CPU);
 549                 break;
 550 
 551         case TS_STOPPED:
 552                 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
 553                 break;
 554 
 555         default:                        /* TS_SLEEP, TS_ZOMB or TS_TRANS */
 556                 cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
 557         }
 558         mutex_exit(&pidlock);
 559         return (t);
 560 }
 561 
 562 /*
 563  * Move thread to project0 and take care of project reference counters.
 564  */
 565 void
 566 thread_rele(kthread_t *t)
 567 {
 568         kproject_t *kpj;
 569 
 570         thread_lock(t);
 571 
 572         ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
 573         kpj = ttoproj(t);
 574         t->t_proj = proj0p;
 575 
 576         thread_unlock(t);
 577 
 578         if (kpj != proj0p) {
 579                 project_rele(kpj);
 580                 (void) project_hold(proj0p);
 581         }
 582 }
 583 
 584 void
 585 thread_exit(void)
 586 {
 587         kthread_t *t = curthread;
 588 
 589         if ((t->t_proc_flag & TP_ZTHREAD) != 0)
 590                 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
 591 
 592         tsd_exit();             /* Clean up this thread's TSD */
 593 
 594         kcpc_passivate();       /* clean up performance counter state */
 595 
 596         /*
 597          * No kernel thread should have called poll() without arranging
 598          * calling pollcleanup() here.
 599          */
 600         ASSERT(t->t_pollstate == NULL);
 601         ASSERT(t->t_schedctl == NULL);
 602         if (t->t_door)
 603                 door_slam();    /* in case thread did an upcall */
 604 
 605 #ifndef NPROBE
 606         /* Kernel probe */
 607         if (t->t_tnf_tpdp)
 608                 tnf_thread_exit();
 609 #endif /* NPROBE */
 610 
 611         thread_rele(t);
 612         t->t_preempt++;
 613 
 614         /*
 615          * remove thread from the all threads list so that
 616          * death-row can use the same pointers.
 617          */
 618         mutex_enter(&pidlock);
 619         t->t_next->t_prev = t->t_prev;
 620         t->t_prev->t_next = t->t_next;
 621         ASSERT(allthreads != t);        /* t0 never exits */
 622         cv_broadcast(&t->t_joincv);      /* wake up anyone in thread_join */
 623         mutex_exit(&pidlock);
 624 
 625         if (t->t_ctx != NULL)
 626                 exitctx(t);
 627         if (t->t_procp->p_pctx != NULL)
 628                 exitpctx(t->t_procp);
 629 
 630         if (kmem_stackinfo != 0) {
 631                 stkinfo_end(t);
 632         }
 633 
 634         t->t_state = TS_ZOMB;        /* set zombie thread */
 635 
 636         swtch_from_zombie();    /* give up the CPU */
 637         /* NOTREACHED */
 638 }
 639 
 640 /*
 641  * Check to see if the specified thread is active (defined as being on
 642  * the thread list).  This is certainly a slow way to do this; if there's
 643  * ever a reason to speed it up, we could maintain a hash table of active
 644  * threads indexed by their t_did.
 645  */
 646 static kthread_t *
 647 did_to_thread(kt_did_t tid)
 648 {
 649         kthread_t *t;
 650 
 651         ASSERT(MUTEX_HELD(&pidlock));
 652         for (t = curthread->t_next; t != curthread; t = t->t_next) {
 653                 if (t->t_did == tid)
 654                         break;
 655         }
 656         if (t->t_did == tid)
 657                 return (t);
 658         else
 659                 return (NULL);
 660 }
 661 
 662 /*
 663  * Wait for specified thread to exit.  Returns immediately if the thread
 664  * could not be found, meaning that it has either already exited or never
 665  * existed.
 666  */
 667 void
 668 thread_join(kt_did_t tid)
 669 {
 670         kthread_t *t;
 671 
 672         ASSERT(tid != curthread->t_did);
 673         ASSERT(tid != t0.t_did);
 674 
 675         mutex_enter(&pidlock);
 676         /*
 677          * Make sure we check that the thread is on the thread list
 678          * before blocking on it; otherwise we could end up blocking on
 679          * a cv that's already been freed.  In other words, don't cache
 680          * the thread pointer across calls to cv_wait.
 681          *
 682          * The choice of loop invariant means that whenever a thread
 683          * is taken off the allthreads list, a cv_broadcast must be
 684          * performed on that thread's t_joincv to wake up any waiters.
 685          * The broadcast doesn't have to happen right away, but it
 686          * shouldn't be postponed indefinitely (e.g., by doing it in
 687          * thread_free which may only be executed when the deathrow
 688          * queue is processed.
 689          */
 690         while (t = did_to_thread(tid))
 691                 cv_wait(&t->t_joincv, &pidlock);
 692         mutex_exit(&pidlock);
 693 }
 694 
 695 void
 696 thread_free_prevent(kthread_t *t)
 697 {
 698         kmutex_t *lp;
 699 
 700         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 701         mutex_enter(lp);
 702 }
 703 
 704 void
 705 thread_free_allow(kthread_t *t)
 706 {
 707         kmutex_t *lp;
 708 
 709         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 710         mutex_exit(lp);
 711 }
 712 
 713 static void
 714 thread_free_barrier(kthread_t *t)
 715 {
 716         kmutex_t *lp;
 717 
 718         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 719         mutex_enter(lp);
 720         mutex_exit(lp);
 721 }
 722 
 723 void
 724 thread_free(kthread_t *t)
 725 {
 726         boolean_t allocstk = (t->t_flag & T_TALLOCSTK);
 727         klwp_t *lwp = t->t_lwp;
 728         caddr_t swap = t->t_swap;
 729 
 730         ASSERT(t != &t0 && t->t_state == TS_FREE);
 731         ASSERT(t->t_door == NULL);
 732         ASSERT(t->t_schedctl == NULL);
 733         ASSERT(t->t_pollstate == NULL);
 734 
 735         t->t_pri = 0;
 736         t->t_pc = 0;
 737         t->t_sp = 0;
 738         t->t_wchan0 = NULL;
 739         t->t_wchan = NULL;
 740         if (t->t_cred != NULL) {
 741                 crfree(t->t_cred);
 742                 t->t_cred = 0;
 743         }
 744         if (t->t_pdmsg) {
 745                 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
 746                 t->t_pdmsg = NULL;
 747         }
 748         if (audit_active)
 749                 audit_thread_free(t);
 750 #ifndef NPROBE
 751         if (t->t_tnf_tpdp)
 752                 tnf_thread_free(t);
 753 #endif /* NPROBE */
 754         if (t->t_cldata) {
 755                 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
 756         }
 757         if (t->t_rprof != NULL) {
 758                 kmem_free(t->t_rprof, sizeof (*t->t_rprof));
 759                 t->t_rprof = NULL;
 760         }
 761         t->t_lockp = NULL;   /* nothing should try to lock this thread now */
 762         if (lwp)
 763                 lwp_freeregs(lwp, 0);
 764         if (t->t_ctx)
 765                 freectx(t, 0);
 766         t->t_stk = NULL;
 767         if (lwp)
 768                 lwp_stk_fini(lwp);
 769         lock_clear(&t->t_lock);
 770 
 771         if (t->t_ts->ts_waiters > 0)
 772                 panic("thread_free: turnstile still active");
 773 
 774         kmem_cache_free(turnstile_cache, t->t_ts);
 775 
 776         free_afd(&t->t_activefd);
 777 
 778         /*
 779          * Barrier for the tick accounting code.  The tick accounting code
 780          * holds this lock to keep the thread from going away while it's
 781          * looking at it.
 782          */
 783         thread_free_barrier(t);
 784 
 785         ASSERT(ttoproj(t) == proj0p);
 786         project_rele(ttoproj(t));
 787 
 788         lgrp_affinity_free(&t->t_lgrp_affinity);
 789 
 790         mutex_enter(&pidlock);
 791         nthread--;
 792         mutex_exit(&pidlock);
 793 
 794         /*
 795          * Free thread, lwp and stack.  This needs to be done carefully, since
 796          * if T_TALLOCSTK is set, the thread is part of the stack.
 797          */
 798         t->t_lwp = NULL;
 799         t->t_swap = NULL;
 800 
 801         if (swap) {
 802                 segkp_release(segkp, swap);
 803         }
 804         if (lwp) {
 805                 kmem_cache_free(lwp_cache, lwp);
 806         }
 807         if (!allocstk) {
 808                 kmem_cache_free(thread_cache, t);
 809         }
 810 }
 811 
 812 /*
 813  * Removes threads associated with the given zone from a deathrow queue.
 814  * tp is a pointer to the head of the deathrow queue, and countp is a
 815  * pointer to the current deathrow count.  Returns a linked list of
 816  * threads removed from the list.
 817  */
 818 static kthread_t *
 819 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
 820 {
 821         kthread_t *tmp, *list = NULL;
 822         cred_t *cr;
 823 
 824         ASSERT(MUTEX_HELD(&reaplock));
 825         while (*tp != NULL) {
 826                 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
 827                         tmp = *tp;
 828                         *tp = tmp->t_forw;
 829                         tmp->t_forw = list;
 830                         list = tmp;
 831                         (*countp)--;
 832                 } else {
 833                         tp = &(*tp)->t_forw;
 834                 }
 835         }
 836         return (list);
 837 }
 838 
 839 static void
 840 thread_reap_list(kthread_t *t)
 841 {
 842         kthread_t *next;
 843 
 844         while (t != NULL) {
 845                 next = t->t_forw;
 846                 thread_free(t);
 847                 t = next;
 848         }
 849 }
 850 
 851 /* ARGSUSED */
 852 static void
 853 thread_zone_destroy(zoneid_t zoneid, void *unused)
 854 {
 855         kthread_t *t, *l;
 856 
 857         mutex_enter(&reaplock);
 858         /*
 859          * Pull threads and lwps associated with zone off deathrow lists.
 860          */
 861         t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
 862         l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
 863         mutex_exit(&reaplock);
 864 
 865         /*
 866          * Guard against race condition in mutex_owner_running:
 867          *      thread=owner(mutex)
 868          *      <interrupt>
 869          *                              thread exits mutex
 870          *                              thread exits
 871          *                              thread reaped
 872          *                              thread struct freed
 873          * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 874          * A cross call to all cpus will cause the interrupt handler
 875          * to reset the PC if it is in mutex_owner_running, refreshing
 876          * stale thread pointers.
 877          */
 878         mutex_sync();   /* sync with mutex code */
 879 
 880         /*
 881          * Reap threads
 882          */
 883         thread_reap_list(t);
 884 
 885         /*
 886          * Reap lwps
 887          */
 888         thread_reap_list(l);
 889 }
 890 
 891 /*
 892  * cleanup zombie threads that are on deathrow.
 893  */
 894 void
 895 thread_reaper()
 896 {
 897         kthread_t *t, *l;
 898         callb_cpr_t cprinfo;
 899 
 900         /*
 901          * Register callback to clean up threads when zone is destroyed.
 902          */
 903         zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
 904 
 905         CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
 906         for (;;) {
 907                 mutex_enter(&reaplock);
 908                 while (thread_deathrow == NULL && lwp_deathrow == NULL) {
 909                         CALLB_CPR_SAFE_BEGIN(&cprinfo);
 910                         cv_wait(&reaper_cv, &reaplock);
 911                         CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
 912                 }
 913                 /*
 914                  * mutex_sync() needs to be called when reaping, but
 915                  * not too often.  We limit reaping rate to once
 916                  * per second.  Reaplimit is max rate at which threads can
 917                  * be freed. Does not impact thread destruction/creation.
 918                  */
 919                 t = thread_deathrow;
 920                 l = lwp_deathrow;
 921                 thread_deathrow = NULL;
 922                 lwp_deathrow = NULL;
 923                 thread_reapcnt = 0;
 924                 lwp_reapcnt = 0;
 925                 mutex_exit(&reaplock);
 926 
 927                 /*
 928                  * Guard against race condition in mutex_owner_running:
 929                  *      thread=owner(mutex)
 930                  *      <interrupt>
 931                  *                              thread exits mutex
 932                  *                              thread exits
 933                  *                              thread reaped
 934                  *                              thread struct freed
 935                  * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 936                  * A cross call to all cpus will cause the interrupt handler
 937                  * to reset the PC if it is in mutex_owner_running, refreshing
 938                  * stale thread pointers.
 939                  */
 940                 mutex_sync();   /* sync with mutex code */
 941                 /*
 942                  * Reap threads
 943                  */
 944                 thread_reap_list(t);
 945 
 946                 /*
 947                  * Reap lwps
 948                  */
 949                 thread_reap_list(l);
 950                 delay(hz);
 951         }
 952 }
 953 
 954 /*
 955  * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto
 956  * thread_deathrow. The thread's state is changed already TS_FREE to indicate
 957  * that is reapable. The thread already holds the reaplock, and was already
 958  * freed.
 959  */
 960 void
 961 reapq_move_lq_to_tq(kthread_t *t)
 962 {
 963         ASSERT(t->t_state == TS_FREE);
 964         ASSERT(MUTEX_HELD(&reaplock));
 965         t->t_forw = thread_deathrow;
 966         thread_deathrow = t;
 967         thread_reapcnt++;
 968         if (lwp_reapcnt + thread_reapcnt > reaplimit)
 969                 cv_signal(&reaper_cv);  /* wake the reaper */
 970 }
 971 
 972 /*
 973  * This is called by resume() to put a zombie thread onto deathrow.
 974  * The thread's state is changed to TS_FREE to indicate that is reapable.
 975  * This is called from the idle thread so it must not block - just spin.
 976  */
 977 void
 978 reapq_add(kthread_t *t)
 979 {
 980         mutex_enter(&reaplock);
 981 
 982         /*
 983          * lwp_deathrow contains threads with lwp linkage and
 984          * swappable thread stacks which have the default stacksize.
 985          * These threads' lwps and stacks may be reused by lwp_create().
 986          *
 987          * Anything else goes on thread_deathrow(), where it will eventually
 988          * be thread_free()d.
 989          */
 990         if (t->t_flag & T_LWPREUSE) {
 991                 ASSERT(ttolwp(t) != NULL);
 992                 t->t_forw = lwp_deathrow;
 993                 lwp_deathrow = t;
 994                 lwp_reapcnt++;
 995         } else {
 996                 t->t_forw = thread_deathrow;
 997                 thread_deathrow = t;
 998                 thread_reapcnt++;
 999         }
1000         if (lwp_reapcnt + thread_reapcnt > reaplimit)
1001                 cv_signal(&reaper_cv);      /* wake the reaper */
1002         t->t_state = TS_FREE;
1003         lock_clear(&t->t_lock);
1004 
1005         /*
1006          * Before we return, we need to grab and drop the thread lock for
1007          * the dead thread.  At this point, the current thread is the idle
1008          * thread, and the dead thread's CPU lock points to the current
1009          * CPU -- and we must grab and drop the lock to synchronize with
1010          * a racing thread walking a blocking chain that the zombie thread
1011          * was recently in.  By this point, that blocking chain is (by
1012          * definition) stale:  the dead thread is not holding any locks, and
1013          * is therefore not in any blocking chains -- but if we do not regrab
1014          * our lock before freeing the dead thread's data structures, the
1015          * thread walking the (stale) blocking chain will die on memory
1016          * corruption when it attempts to drop the dead thread's lock.  We
1017          * only need do this once because there is no way for the dead thread
1018          * to ever again be on a blocking chain:  once we have grabbed and
1019          * dropped the thread lock, we are guaranteed that anyone that could
1020          * have seen this thread in a blocking chain can no longer see it.
1021          */
1022         thread_lock(t);
1023         thread_unlock(t);
1024 
1025         mutex_exit(&reaplock);
1026 }
1027 
1028 /*
1029  * Install thread context ops for the current thread.
1030  */
1031 void
1032 installctx(
1033         kthread_t *t,
1034         void    *arg,
1035         void    (*save)(void *),
1036         void    (*restore)(void *),
1037         void    (*fork)(void *, void *),
1038         void    (*lwp_create)(void *, void *),
1039         void    (*exit)(void *),
1040         void    (*free)(void *, int))
1041 {
1042         struct ctxop *ctx;
1043 
1044         ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1045         ctx->save_op = save;
1046         ctx->restore_op = restore;
1047         ctx->fork_op = fork;
1048         ctx->lwp_create_op = lwp_create;
1049         ctx->exit_op = exit;
1050         ctx->free_op = free;
1051         ctx->arg = arg;
1052         ctx->next = t->t_ctx;
1053         t->t_ctx = ctx;
1054 }
1055 
1056 /*
1057  * Remove the thread context ops from a thread.
1058  */
1059 int
1060 removectx(
1061         kthread_t *t,
1062         void    *arg,
1063         void    (*save)(void *),
1064         void    (*restore)(void *),
1065         void    (*fork)(void *, void *),
1066         void    (*lwp_create)(void *, void *),
1067         void    (*exit)(void *),
1068         void    (*free)(void *, int))
1069 {
1070         struct ctxop *ctx, *prev_ctx;
1071 
1072         /*
1073          * The incoming kthread_t (which is the thread for which the
1074          * context ops will be removed) should be one of the following:
1075          *
1076          * a) the current thread,
1077          *
1078          * b) a thread of a process that's being forked (SIDL),
1079          *
1080          * c) a thread that belongs to the same process as the current
1081          *    thread and for which the current thread is the agent thread,
1082          *
1083          * d) a thread that is TS_STOPPED which is indicative of it
1084          *    being (if curthread is not an agent) a thread being created
1085          *    as part of an lwp creation.
1086          */
1087         ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1088             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1089 
1090         /*
1091          * Serialize modifications to t->t_ctx to prevent the agent thread
1092          * and the target thread from racing with each other during lwp exit.
1093          */
1094         mutex_enter(&t->t_ctx_lock);
1095         prev_ctx = NULL;
1096         kpreempt_disable();
1097         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
1098                 if (ctx->save_op == save && ctx->restore_op == restore &&
1099                     ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
1100                     ctx->exit_op == exit && ctx->free_op == free &&
1101                     ctx->arg == arg) {
1102                         if (prev_ctx)
1103                                 prev_ctx->next = ctx->next;
1104                         else
1105                                 t->t_ctx = ctx->next;
1106                         mutex_exit(&t->t_ctx_lock);
1107                         if (ctx->free_op != NULL)
1108                                 (ctx->free_op)(ctx->arg, 0);
1109                         kmem_free(ctx, sizeof (struct ctxop));
1110                         kpreempt_enable();
1111                         return (1);
1112                 }
1113                 prev_ctx = ctx;
1114         }
1115         mutex_exit(&t->t_ctx_lock);
1116         kpreempt_enable();
1117 
1118         return (0);
1119 }
1120 
1121 void
1122 savectx(kthread_t *t)
1123 {
1124         struct ctxop *ctx;
1125 
1126         ASSERT(t == curthread);
1127         for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1128                 if (ctx->save_op != NULL)
1129                         (ctx->save_op)(ctx->arg);
1130 }
1131 
1132 void
1133 restorectx(kthread_t *t)
1134 {
1135         struct ctxop *ctx;
1136 
1137         ASSERT(t == curthread);
1138         for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1139                 if (ctx->restore_op != NULL)
1140                         (ctx->restore_op)(ctx->arg);
1141 }
1142 
1143 void
1144 forkctx(kthread_t *t, kthread_t *ct)
1145 {
1146         struct ctxop *ctx;
1147 
1148         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1149                 if (ctx->fork_op != NULL)
1150                         (ctx->fork_op)(t, ct);
1151 }
1152 
1153 /*
1154  * Note that this operator is only invoked via the _lwp_create
1155  * system call.  The system may have other reasons to create lwps
1156  * e.g. the agent lwp or the doors unreferenced lwp.
1157  */
1158 void
1159 lwp_createctx(kthread_t *t, kthread_t *ct)
1160 {
1161         struct ctxop *ctx;
1162 
1163         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1164                 if (ctx->lwp_create_op != NULL)
1165                         (ctx->lwp_create_op)(t, ct);
1166 }
1167 
1168 /*
1169  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1170  * needed when the thread/LWP leaves the processor for the last time. This
1171  * routine is not intended to deal with freeing memory; freectx() is used for
1172  * that purpose during thread_free(). This routine is provided to allow for
1173  * clean-up that can't wait until thread_free().
1174  */
1175 void
1176 exitctx(kthread_t *t)
1177 {
1178         struct ctxop *ctx;
1179 
1180         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1181                 if (ctx->exit_op != NULL)
1182                         (ctx->exit_op)(t);
1183 }
1184 
1185 /*
1186  * freectx is called from thread_free() and exec() to get
1187  * rid of old thread context ops.
1188  */
1189 void
1190 freectx(kthread_t *t, int isexec)
1191 {
1192         struct ctxop *ctx;
1193 
1194         kpreempt_disable();
1195         while ((ctx = t->t_ctx) != NULL) {
1196                 t->t_ctx = ctx->next;
1197                 if (ctx->free_op != NULL)
1198                         (ctx->free_op)(ctx->arg, isexec);
1199                 kmem_free(ctx, sizeof (struct ctxop));
1200         }
1201         kpreempt_enable();
1202 }
1203 
1204 /*
1205  * freectx_ctx is called from lwp_create() when lwp is reused from
1206  * lwp_deathrow and its thread structure is added to thread_deathrow.
1207  * The thread structure to which this ctx was attached may be already
1208  * freed by the thread reaper so free_op implementations shouldn't rely
1209  * on thread structure to which this ctx was attached still being around.
1210  */
1211 void
1212 freectx_ctx(struct ctxop *ctx)
1213 {
1214         struct ctxop *nctx;
1215 
1216         ASSERT(ctx != NULL);
1217 
1218         kpreempt_disable();
1219         do {
1220                 nctx = ctx->next;
1221                 if (ctx->free_op != NULL)
1222                         (ctx->free_op)(ctx->arg, 0);
1223                 kmem_free(ctx, sizeof (struct ctxop));
1224         } while ((ctx = nctx) != NULL);
1225         kpreempt_enable();
1226 }
1227 
1228 /*
1229  * Set the thread running; arrange for it to be swapped in if necessary.
1230  */
1231 void
1232 setrun_locked(kthread_t *t)
1233 {
1234         ASSERT(THREAD_LOCK_HELD(t));
1235         if (t->t_state == TS_SLEEP) {
1236                 /*
1237                  * Take off sleep queue.
1238                  */
1239                 SOBJ_UNSLEEP(t->t_sobj_ops, t);
1240         } else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1241                 /*
1242                  * Already on dispatcher queue.
1243                  */
1244                 return;
1245         } else if (t->t_state == TS_WAIT) {
1246                 waitq_setrun(t);
1247         } else if (t->t_state == TS_STOPPED) {
1248                 /*
1249                  * All of the sending of SIGCONT (TC_XSTART) and /proc
1250                  * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1251                  * requested that the thread be run.
1252                  * Just calling setrun() is not sufficient to set a stopped
1253                  * thread running.  TP_TXSTART is always set if the thread
1254                  * is not stopped by a jobcontrol stop signal.
1255                  * TP_TPSTART is always set if /proc is not controlling it.
1256                  * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1257                  * The thread won't be stopped unless one of these
1258                  * three mechanisms did it.
1259                  *
1260                  * These flags must be set before calling setrun_locked(t).
1261                  * They can't be passed as arguments because the streams
1262                  * code calls setrun() indirectly and the mechanism for
1263                  * doing so admits only one argument.  Note that the
1264                  * thread must be locked in order to change t_schedflags.
1265                  */
1266                 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1267                         return;
1268                 /*
1269                  * Process is no longer stopped (a thread is running).
1270                  */
1271                 t->t_whystop = 0;
1272                 t->t_whatstop = 0;
1273                 /*
1274                  * Strictly speaking, we do not have to clear these
1275                  * flags here; they are cleared on entry to stop().
1276                  * However, they are confusing when doing kernel
1277                  * debugging or when they are revealed by ps(1).
1278                  */
1279                 t->t_schedflag &= ~TS_ALLSTART;
1280                 THREAD_TRANSITION(t);   /* drop stopped-thread lock */
1281                 ASSERT(t->t_lockp == &transition_lock);
1282                 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1283                 /*
1284                  * Let the class put the process on the dispatcher queue.
1285                  */
1286                 CL_SETRUN(t);
1287         }
1288 }
1289 
1290 void
1291 setrun(kthread_t *t)
1292 {
1293         thread_lock(t);
1294         setrun_locked(t);
1295         thread_unlock(t);
1296 }
1297 
1298 /*
1299  * Unpin an interrupted thread.
1300  *      When an interrupt occurs, the interrupt is handled on the stack
1301  *      of an interrupt thread, taken from a pool linked to the CPU structure.
1302  *
1303  *      When swtch() is switching away from an interrupt thread because it
1304  *      blocked or was preempted, this routine is called to complete the
1305  *      saving of the interrupted thread state, and returns the interrupted
1306  *      thread pointer so it may be resumed.
1307  *
1308  *      Called by swtch() only at high spl.
1309  */
1310 kthread_t *
1311 thread_unpin()
1312 {
1313         kthread_t       *t = curthread; /* current thread */
1314         kthread_t       *itp;           /* interrupted thread */
1315         int             i;              /* interrupt level */
1316         extern int      intr_passivate();
1317 
1318         ASSERT(t->t_intr != NULL);
1319 
1320         itp = t->t_intr;             /* interrupted thread */
1321         t->t_intr = NULL;            /* clear interrupt ptr */
1322 
1323         /*
1324          * Get state from interrupt thread for the one
1325          * it interrupted.
1326          */
1327 
1328         i = intr_passivate(t, itp);
1329 
1330         TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1331             "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1332             i, t, t, itp, itp);
1333 
1334         /*
1335          * Dissociate the current thread from the interrupted thread's LWP.
1336          */
1337         t->t_lwp = NULL;
1338 
1339         /*
1340          * Interrupt handlers above the level that spinlocks block must
1341          * not block.
1342          */
1343 #if DEBUG
1344         if (i < 0 || i > LOCK_LEVEL)
1345                 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1346 #endif
1347 
1348         /*
1349          * Compute the CPU's base interrupt level based on the active
1350          * interrupts.
1351          */
1352         ASSERT(CPU->cpu_intr_actv & (1 << i));
1353         set_base_spl();
1354 
1355         return (itp);
1356 }
1357 
1358 /*
1359  * Create and initialize an interrupt thread.
1360  *      Returns non-zero on error.
1361  *      Called at spl7() or better.
1362  */
1363 void
1364 thread_create_intr(struct cpu *cp)
1365 {
1366         kthread_t *tp;
1367 
1368         tp = thread_create(NULL, 0,
1369             (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1370 
1371         /*
1372          * Set the thread in the TS_FREE state.  The state will change
1373          * to TS_ONPROC only while the interrupt is active.  Think of these
1374          * as being on a private free list for the CPU.  Being TS_FREE keeps
1375          * inactive interrupt threads out of debugger thread lists.
1376          *
1377          * We cannot call thread_create with TS_FREE because of the current
1378          * checks there for ONPROC.  Fix this when thread_create takes flags.
1379          */
1380         THREAD_FREEINTR(tp, cp);
1381 
1382         /*
1383          * Nobody should ever reference the credentials of an interrupt
1384          * thread so make it NULL to catch any such references.
1385          */
1386         tp->t_cred = NULL;
1387         tp->t_flag |= T_INTR_THREAD;
1388         tp->t_cpu = cp;
1389         tp->t_bound_cpu = cp;
1390         tp->t_disp_queue = cp->cpu_disp;
1391         tp->t_affinitycnt = 1;
1392         tp->t_preempt = 1;
1393 
1394         /*
1395          * Don't make a user-requested binding on this thread so that
1396          * the processor can be offlined.
1397          */
1398         tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */
1399         tp->t_bind_pset = PS_NONE;
1400 
1401 #if defined(__i386) || defined(__amd64)
1402         tp->t_stk -= STACK_ALIGN;
1403         *(tp->t_stk) = 0;            /* terminate intr thread stack */
1404 #endif
1405 
1406         /*
1407          * Link onto CPU's interrupt pool.
1408          */
1409         tp->t_link = cp->cpu_intr_thread;
1410         cp->cpu_intr_thread = tp;
1411 }
1412 
1413 /*
1414  * TSD -- THREAD SPECIFIC DATA
1415  */
1416 static kmutex_t         tsd_mutex;       /* linked list spin lock */
1417 static uint_t           tsd_nkeys;       /* size of destructor array */
1418 /* per-key destructor funcs */
1419 static void             (**tsd_destructor)(void *);
1420 /* list of tsd_thread's */
1421 static struct tsd_thread        *tsd_list;
1422 
1423 /*
1424  * Default destructor
1425  *      Needed because NULL destructor means that the key is unused
1426  */
1427 /* ARGSUSED */
1428 void
1429 tsd_defaultdestructor(void *value)
1430 {}
1431 
1432 /*
1433  * Create a key (index into per thread array)
1434  *      Locks out tsd_create, tsd_destroy, and tsd_exit
1435  *      May allocate memory with lock held
1436  */
1437 void
1438 tsd_create(uint_t *keyp, void (*destructor)(void *))
1439 {
1440         int     i;
1441         uint_t  nkeys;
1442 
1443         /*
1444          * if key is allocated, do nothing
1445          */
1446         mutex_enter(&tsd_mutex);
1447         if (*keyp) {
1448                 mutex_exit(&tsd_mutex);
1449                 return;
1450         }
1451         /*
1452          * find an unused key
1453          */
1454         if (destructor == NULL)
1455                 destructor = tsd_defaultdestructor;
1456 
1457         for (i = 0; i < tsd_nkeys; ++i)
1458                 if (tsd_destructor[i] == NULL)
1459                         break;
1460 
1461         /*
1462          * if no unused keys, increase the size of the destructor array
1463          */
1464         if (i == tsd_nkeys) {
1465                 if ((nkeys = (tsd_nkeys << 1)) == 0)
1466                         nkeys = 1;
1467                 tsd_destructor =
1468                     (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1469                     (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1470                     (size_t)(nkeys * sizeof (void (*)(void *))));
1471                 tsd_nkeys = nkeys;
1472         }
1473 
1474         /*
1475          * allocate the next available unused key
1476          */
1477         tsd_destructor[i] = destructor;
1478         *keyp = i + 1;
1479         mutex_exit(&tsd_mutex);
1480 }
1481 
1482 /*
1483  * Destroy a key -- this is for unloadable modules
1484  *
1485  * Assumes that the caller is preventing tsd_set and tsd_get
1486  * Locks out tsd_create, tsd_destroy, and tsd_exit
1487  * May free memory with lock held
1488  */
1489 void
1490 tsd_destroy(uint_t *keyp)
1491 {
1492         uint_t key;
1493         struct tsd_thread *tsd;
1494 
1495         /*
1496          * protect the key namespace and our destructor lists
1497          */
1498         mutex_enter(&tsd_mutex);
1499         key = *keyp;
1500         *keyp = 0;
1501 
1502         ASSERT(key <= tsd_nkeys);
1503 
1504         /*
1505          * if the key is valid
1506          */
1507         if (key != 0) {
1508                 uint_t k = key - 1;
1509                 /*
1510                  * for every thread with TSD, call key's destructor
1511                  */
1512                 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1513                         /*
1514                          * no TSD for key in this thread
1515                          */
1516                         if (key > tsd->ts_nkeys)
1517                                 continue;
1518                         /*
1519                          * call destructor for key
1520                          */
1521                         if (tsd->ts_value[k] && tsd_destructor[k])
1522                                 (*tsd_destructor[k])(tsd->ts_value[k]);
1523                         /*
1524                          * reset value for key
1525                          */
1526                         tsd->ts_value[k] = NULL;
1527                 }
1528                 /*
1529                  * actually free the key (NULL destructor == unused)
1530                  */
1531                 tsd_destructor[k] = NULL;
1532         }
1533 
1534         mutex_exit(&tsd_mutex);
1535 }
1536 
1537 /*
1538  * Quickly return the per thread value that was stored with the specified key
1539  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1540  */
1541 void *
1542 tsd_get(uint_t key)
1543 {
1544         return (tsd_agent_get(curthread, key));
1545 }
1546 
1547 /*
1548  * Set a per thread value indexed with the specified key
1549  */
1550 int
1551 tsd_set(uint_t key, void *value)
1552 {
1553         return (tsd_agent_set(curthread, key, value));
1554 }
1555 
1556 /*
1557  * Like tsd_get(), except that the agent lwp can get the tsd of
1558  * another thread in the same process (the agent thread only runs when the
1559  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1560  */
1561 void *
1562 tsd_agent_get(kthread_t *t, uint_t key)
1563 {
1564         struct tsd_thread *tsd = t->t_tsd;
1565 
1566         ASSERT(t == curthread ||
1567             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1568 
1569         if (key && tsd != NULL && key <= tsd->ts_nkeys)
1570                 return (tsd->ts_value[key - 1]);
1571         return (NULL);
1572 }
1573 
1574 /*
1575  * Like tsd_set(), except that the agent lwp can set the tsd of
1576  * another thread in the same process, or syslwp can set the tsd
1577  * of a thread it's in the middle of creating.
1578  *
1579  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1580  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1581  * lock held
1582  */
1583 int
1584 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1585 {
1586         struct tsd_thread *tsd = t->t_tsd;
1587 
1588         ASSERT(t == curthread ||
1589             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1590 
1591         if (key == 0)
1592                 return (EINVAL);
1593         if (tsd == NULL)
1594                 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1595         if (key <= tsd->ts_nkeys) {
1596                 tsd->ts_value[key - 1] = value;
1597                 return (0);
1598         }
1599 
1600         ASSERT(key <= tsd_nkeys);
1601 
1602         /*
1603          * lock out tsd_destroy()
1604          */
1605         mutex_enter(&tsd_mutex);
1606         if (tsd->ts_nkeys == 0) {
1607                 /*
1608                  * Link onto list of threads with TSD
1609                  */
1610                 if ((tsd->ts_next = tsd_list) != NULL)
1611                         tsd_list->ts_prev = tsd;
1612                 tsd_list = tsd;
1613         }
1614 
1615         /*
1616          * Allocate thread local storage and set the value for key
1617          */
1618         tsd->ts_value = tsd_realloc(tsd->ts_value,
1619             tsd->ts_nkeys * sizeof (void *),
1620             key * sizeof (void *));
1621         tsd->ts_nkeys = key;
1622         tsd->ts_value[key - 1] = value;
1623         mutex_exit(&tsd_mutex);
1624 
1625         return (0);
1626 }
1627 
1628 
1629 /*
1630  * Return the per thread value that was stored with the specified key
1631  *      If necessary, create the key and the value
1632  *      Assumes the caller is protecting *keyp from tsd_destroy
1633  */
1634 void *
1635 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1636 {
1637         void *value;
1638         uint_t key = *keyp;
1639         struct tsd_thread *tsd = curthread->t_tsd;
1640 
1641         if (tsd == NULL)
1642                 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1643         if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1644                 return (value);
1645         if (key == 0)
1646                 tsd_create(keyp, destroy);
1647         (void) tsd_set(*keyp, value = (*allocate)());
1648 
1649         return (value);
1650 }
1651 
1652 /*
1653  * Called from thread_exit() to run the destructor function for each tsd
1654  *      Locks out tsd_create and tsd_destroy
1655  *      Assumes that the destructor *DOES NOT* use tsd
1656  */
1657 void
1658 tsd_exit(void)
1659 {
1660         int i;
1661         struct tsd_thread *tsd = curthread->t_tsd;
1662 
1663         if (tsd == NULL)
1664                 return;
1665 
1666         if (tsd->ts_nkeys == 0) {
1667                 kmem_free(tsd, sizeof (*tsd));
1668                 curthread->t_tsd = NULL;
1669                 return;
1670         }
1671 
1672         /*
1673          * lock out tsd_create and tsd_destroy, call
1674          * the destructor, and mark the value as destroyed.
1675          */
1676         mutex_enter(&tsd_mutex);
1677 
1678         for (i = 0; i < tsd->ts_nkeys; i++) {
1679                 if (tsd->ts_value[i] && tsd_destructor[i])
1680                         (*tsd_destructor[i])(tsd->ts_value[i]);
1681                 tsd->ts_value[i] = NULL;
1682         }
1683 
1684         /*
1685          * remove from linked list of threads with TSD
1686          */
1687         if (tsd->ts_next)
1688                 tsd->ts_next->ts_prev = tsd->ts_prev;
1689         if (tsd->ts_prev)
1690                 tsd->ts_prev->ts_next = tsd->ts_next;
1691         if (tsd_list == tsd)
1692                 tsd_list = tsd->ts_next;
1693 
1694         mutex_exit(&tsd_mutex);
1695 
1696         /*
1697          * free up the TSD
1698          */
1699         kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1700         kmem_free(tsd, sizeof (struct tsd_thread));
1701         curthread->t_tsd = NULL;
1702 }
1703 
1704 /*
1705  * realloc
1706  */
1707 static void *
1708 tsd_realloc(void *old, size_t osize, size_t nsize)
1709 {
1710         void *new;
1711 
1712         new = kmem_zalloc(nsize, KM_SLEEP);
1713         if (old) {
1714                 bcopy(old, new, osize);
1715                 kmem_free(old, osize);
1716         }
1717         return (new);
1718 }
1719 
1720 /*
1721  * Return non-zero if an interrupt is being serviced.
1722  */
1723 int
1724 servicing_interrupt()
1725 {
1726         int onintr = 0;
1727 
1728         /* Are we an interrupt thread */
1729         if (curthread->t_flag & T_INTR_THREAD)
1730                 return (1);
1731         /* Are we servicing a high level interrupt? */
1732         if (CPU_ON_INTR(CPU)) {
1733                 kpreempt_disable();
1734                 onintr = CPU_ON_INTR(CPU);
1735                 kpreempt_enable();
1736         }
1737         return (onintr);
1738 }
1739 
1740 
1741 /*
1742  * Change the dispatch priority of a thread in the system.
1743  * Used when raising or lowering a thread's priority.
1744  * (E.g., priority inheritance)
1745  *
1746  * Since threads are queued according to their priority, we
1747  * we must check the thread's state to determine whether it
1748  * is on a queue somewhere. If it is, we've got to:
1749  *
1750  *      o Dequeue the thread.
1751  *      o Change its effective priority.
1752  *      o Enqueue the thread.
1753  *
1754  * Assumptions: The thread whose priority we wish to change
1755  * must be locked before we call thread_change_(e)pri().
1756  * The thread_change(e)pri() function doesn't drop the thread
1757  * lock--that must be done by its caller.
1758  */
1759 void
1760 thread_change_epri(kthread_t *t, pri_t disp_pri)
1761 {
1762         uint_t  state;
1763 
1764         ASSERT(THREAD_LOCK_HELD(t));
1765 
1766         /*
1767          * If the inherited priority hasn't actually changed,
1768          * just return.
1769          */
1770         if (t->t_epri == disp_pri)
1771                 return;
1772 
1773         state = t->t_state;
1774 
1775         /*
1776          * If it's not on a queue, change the priority with impunity.
1777          */
1778         if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1779                 t->t_epri = disp_pri;
1780                 if (state == TS_ONPROC) {
1781                         cpu_t *cp = t->t_disp_queue->disp_cpu;
1782 
1783                         if (t == cp->cpu_dispthread)
1784                                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1785                 }
1786         } else if (state == TS_SLEEP) {
1787                 /*
1788                  * Take the thread out of its sleep queue.
1789                  * Change the inherited priority.
1790                  * Re-enqueue the thread.
1791                  * Each synchronization object exports a function
1792                  * to do this in an appropriate manner.
1793                  */
1794                 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1795         } else if (state == TS_WAIT) {
1796                 /*
1797                  * Re-enqueue a thread on the wait queue if its
1798                  * effective priority needs to change.
1799                  */
1800                 if (disp_pri != t->t_epri)
1801                         waitq_change_pri(t, disp_pri);
1802         } else {
1803                 /*
1804                  * The thread is on a run queue.
1805                  * Note: setbackdq() may not put the thread
1806                  * back on the same run queue where it originally
1807                  * resided.
1808                  */
1809                 (void) dispdeq(t);
1810                 t->t_epri = disp_pri;
1811                 setbackdq(t);
1812         }
1813         schedctl_set_cidpri(t);
1814 }
1815 
1816 /*
1817  * Function: Change the t_pri field of a thread.
1818  * Side Effects: Adjust the thread ordering on a run queue
1819  *               or sleep queue, if necessary.
1820  * Returns: 1 if the thread was on a run queue, else 0.
1821  */
1822 int
1823 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1824 {
1825         uint_t  state;
1826         int     on_rq = 0;
1827 
1828         ASSERT(THREAD_LOCK_HELD(t));
1829 
1830         state = t->t_state;
1831         THREAD_WILLCHANGE_PRI(t, disp_pri);
1832 
1833         /*
1834          * If it's not on a queue, change the priority with impunity.
1835          */
1836         if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1837                 t->t_pri = disp_pri;
1838 
1839                 if (state == TS_ONPROC) {
1840                         cpu_t *cp = t->t_disp_queue->disp_cpu;
1841 
1842                         if (t == cp->cpu_dispthread)
1843                                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1844                 }
1845         } else if (state == TS_SLEEP) {
1846                 /*
1847                  * If the priority has changed, take the thread out of
1848                  * its sleep queue and change the priority.
1849                  * Re-enqueue the thread.
1850                  * Each synchronization object exports a function
1851                  * to do this in an appropriate manner.
1852                  */
1853                 if (disp_pri != t->t_pri)
1854                         SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1855         } else if (state == TS_WAIT) {
1856                 /*
1857                  * Re-enqueue a thread on the wait queue if its
1858                  * priority needs to change.
1859                  */
1860                 if (disp_pri != t->t_pri)
1861                         waitq_change_pri(t, disp_pri);
1862         } else {
1863                 /*
1864                  * The thread is on a run queue.
1865                  * Note: setbackdq() may not put the thread
1866                  * back on the same run queue where it originally
1867                  * resided.
1868                  *
1869                  * We still requeue the thread even if the priority
1870                  * is unchanged to preserve round-robin (and other)
1871                  * effects between threads of the same priority.
1872                  */
1873                 on_rq = dispdeq(t);
1874                 ASSERT(on_rq);
1875                 t->t_pri = disp_pri;
1876                 if (front) {
1877                         setfrontdq(t);
1878                 } else {
1879                         setbackdq(t);
1880                 }
1881         }
1882         schedctl_set_cidpri(t);
1883         return (on_rq);
1884 }
1885 
1886 /*
1887  * Tunable kmem_stackinfo is set, fill the kernel thread stack with a
1888  * specific pattern.
1889  */
1890 static void
1891 stkinfo_begin(kthread_t *t)
1892 {
1893         caddr_t start;  /* stack start */
1894         caddr_t end;    /* stack end  */
1895         uint64_t *ptr;  /* pattern pointer */
1896 
1897         /*
1898          * Stack grows up or down, see thread_create(),
1899          * compute stack memory area start and end (start < end).
1900          */
1901         if (t->t_stk > t->t_stkbase) {
1902                 /* stack grows down */
1903                 start = t->t_stkbase;
1904                 end = t->t_stk;
1905         } else {
1906                 /* stack grows up */
1907                 start = t->t_stk;
1908                 end = t->t_stkbase;
1909         }
1910 
1911         /*
1912          * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
1913          * alignement for start and end in stack area boundaries
1914          * (protection against corrupt t_stkbase/t_stk data).
1915          */
1916         if ((((uintptr_t)start) & 0x7) != 0) {
1917                 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
1918         }
1919         end = (caddr_t)(((uintptr_t)end) & (~0x7));
1920 
1921         if ((end <= start) || (end - start) > (1024 * 1024)) {
1922                 /* negative or stack size > 1 meg, assume bogus */
1923                 return;
1924         }
1925 
1926         /* fill stack area with a pattern (instead of zeros) */
1927         ptr = (uint64_t *)((void *)start);
1928         while (ptr < (uint64_t *)((void *)end)) {
1929                 *ptr++ = KMEM_STKINFO_PATTERN;
1930         }
1931 }
1932 
1933 
1934 /*
1935  * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist,
1936  * compute the percentage of kernel stack really used, and set in the log
1937  * if it's the latest highest percentage.
1938  */
1939 static void
1940 stkinfo_end(kthread_t *t)
1941 {
1942         caddr_t start;  /* stack start */
1943         caddr_t end;    /* stack end  */
1944         uint64_t *ptr;  /* pattern pointer */
1945         size_t stksz;   /* stack size */
1946         size_t smallest = 0;
1947         size_t percent = 0;
1948         uint_t index = 0;
1949         uint_t i;
1950         static size_t smallest_percent = (size_t)-1;
1951         static uint_t full = 0;
1952 
1953         /* create the stackinfo log, if doesn't already exist */
1954         mutex_enter(&kmem_stkinfo_lock);
1955         if (kmem_stkinfo_log == NULL) {
1956                 kmem_stkinfo_log = (kmem_stkinfo_t *)
1957                     kmem_zalloc(KMEM_STKINFO_LOG_SIZE *
1958                     (sizeof (kmem_stkinfo_t)), KM_NOSLEEP);
1959                 if (kmem_stkinfo_log == NULL) {
1960                         mutex_exit(&kmem_stkinfo_lock);
1961                         return;
1962                 }
1963         }
1964         mutex_exit(&kmem_stkinfo_lock);
1965 
1966         /*
1967          * Stack grows up or down, see thread_create(),
1968          * compute stack memory area start and end (start < end).
1969          */
1970         if (t->t_stk > t->t_stkbase) {
1971                 /* stack grows down */
1972                 start = t->t_stkbase;
1973                 end = t->t_stk;
1974         } else {
1975                 /* stack grows up */
1976                 start = t->t_stk;
1977                 end = t->t_stkbase;
1978         }
1979 
1980         /* stack size as found in kthread_t */
1981         stksz = end - start;
1982 
1983         /*
1984          * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
1985          * alignement for start and end in stack area boundaries
1986          * (protection against corrupt t_stkbase/t_stk data).
1987          */
1988         if ((((uintptr_t)start) & 0x7) != 0) {
1989                 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
1990         }
1991         end = (caddr_t)(((uintptr_t)end) & (~0x7));
1992 
1993         if ((end <= start) || (end - start) > (1024 * 1024)) {
1994                 /* negative or stack size > 1 meg, assume bogus */
1995                 return;
1996         }
1997 
1998         /* search until no pattern in the stack */
1999         if (t->t_stk > t->t_stkbase) {
2000                 /* stack grows down */
2001 #if defined(__i386) || defined(__amd64)
2002                 /*
2003                  * 6 longs are pushed on stack, see thread_load(). Skip
2004                  * them, so if kthread has never run, percent is zero.
2005                  * 8 bytes alignement is preserved for a 32 bit kernel,
2006                  * 6 x 4 = 24, 24 is a multiple of 8.
2007                  *
2008                  */
2009                 end -= (6 * sizeof (long));
2010 #endif
2011                 ptr = (uint64_t *)((void *)start);
2012                 while (ptr < (uint64_t *)((void *)end)) {
2013                         if (*ptr != KMEM_STKINFO_PATTERN) {
2014                                 percent = stkinfo_percent(end,
2015                                     start, (caddr_t)ptr);
2016                                 break;
2017                         }
2018                         ptr++;
2019                 }
2020         } else {
2021                 /* stack grows up */
2022                 ptr = (uint64_t *)((void *)end);
2023                 ptr--;
2024                 while (ptr >= (uint64_t *)((void *)start)) {
2025                         if (*ptr != KMEM_STKINFO_PATTERN) {
2026                                 percent = stkinfo_percent(start,
2027                                     end, (caddr_t)ptr);
2028                                 break;
2029                         }
2030                         ptr--;
2031                 }
2032         }
2033 
2034         DTRACE_PROBE3(stack__usage, kthread_t *, t,
2035             size_t, stksz, size_t, percent);
2036 
2037         if (percent == 0) {
2038                 return;
2039         }
2040 
2041         mutex_enter(&kmem_stkinfo_lock);
2042         if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) {
2043                 /*
2044                  * The log is full and already contains the highest values
2045                  */
2046                 mutex_exit(&kmem_stkinfo_lock);
2047                 return;
2048         }
2049 
2050         /* keep a log of the highest used stack */
2051         for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) {
2052                 if (kmem_stkinfo_log[i].percent == 0) {
2053                         index = i;
2054                         full++;
2055                         break;
2056                 }
2057                 if (smallest == 0) {
2058                         smallest = kmem_stkinfo_log[i].percent;
2059                         index = i;
2060                         continue;
2061                 }
2062                 if (kmem_stkinfo_log[i].percent < smallest) {
2063                         smallest = kmem_stkinfo_log[i].percent;
2064                         index = i;
2065                 }
2066         }
2067 
2068         if (percent >= kmem_stkinfo_log[index].percent) {
2069                 kmem_stkinfo_log[index].kthread = (caddr_t)t;
2070                 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc;
2071                 kmem_stkinfo_log[index].start = start;
2072                 kmem_stkinfo_log[index].stksz = stksz;
2073                 kmem_stkinfo_log[index].percent = percent;
2074                 kmem_stkinfo_log[index].t_tid = t->t_tid;
2075                 kmem_stkinfo_log[index].cmd[0] = '\0';
2076                 if (t->t_tid != 0) {
2077                         stksz = strlen((t->t_procp)->p_user.u_comm);
2078                         if (stksz >= KMEM_STKINFO_STR_SIZE) {
2079                                 stksz = KMEM_STKINFO_STR_SIZE - 1;
2080                                 kmem_stkinfo_log[index].cmd[stksz] = '\0';
2081                         } else {
2082                                 stksz += 1;
2083                         }
2084                         (void) memcpy(kmem_stkinfo_log[index].cmd,
2085                             (t->t_procp)->p_user.u_comm, stksz);
2086                 }
2087                 if (percent < smallest_percent) {
2088                         smallest_percent = percent;
2089                 }
2090         }
2091         mutex_exit(&kmem_stkinfo_lock);
2092 }
2093 
2094 /*
2095  * Tunable kmem_stackinfo is set, compute stack utilization percentage.
2096  */
2097 static size_t
2098 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp)
2099 {
2100         size_t percent;
2101         size_t s;
2102 
2103         if (t_stk > t_stkbase) {
2104                 /* stack grows down */
2105                 if (sp > t_stk) {
2106                         return (0);
2107                 }
2108                 if (sp < t_stkbase) {
2109                         return (100);
2110                 }
2111                 percent = t_stk - sp + 1;
2112                 s = t_stk - t_stkbase + 1;
2113         } else {
2114                 /* stack grows up */
2115                 if (sp < t_stk) {
2116                         return (0);
2117                 }
2118                 if (sp > t_stkbase) {
2119                         return (100);
2120                 }
2121                 percent = sp - t_stk + 1;
2122                 s = t_stkbase - t_stk + 1;
2123         }
2124         percent = ((100 * percent) / s) + 1;
2125         if (percent > 100) {
2126                 percent = 100;
2127         }
2128         return (percent);
2129 }