1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2018 Joyent, Inc.
  25  */
  26 
  27 #include <sys/types.h>
  28 #include <sys/param.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/signal.h>
  31 #include <sys/stack.h>
  32 #include <sys/pcb.h>
  33 #include <sys/user.h>
  34 #include <sys/systm.h>
  35 #include <sys/sysinfo.h>
  36 #include <sys/errno.h>
  37 #include <sys/cmn_err.h>
  38 #include <sys/cred.h>
  39 #include <sys/resource.h>
  40 #include <sys/task.h>
  41 #include <sys/project.h>
  42 #include <sys/proc.h>
  43 #include <sys/debug.h>
  44 #include <sys/disp.h>
  45 #include <sys/class.h>
  46 #include <vm/seg_kmem.h>
  47 #include <vm/seg_kp.h>
  48 #include <sys/machlock.h>
  49 #include <sys/kmem.h>
  50 #include <sys/varargs.h>
  51 #include <sys/turnstile.h>
  52 #include <sys/poll.h>
  53 #include <sys/vtrace.h>
  54 #include <sys/callb.h>
  55 #include <c2/audit.h>
  56 #include <sys/tnf.h>
  57 #include <sys/sobject.h>
  58 #include <sys/cpupart.h>
  59 #include <sys/pset.h>
  60 #include <sys/door.h>
  61 #include <sys/spl.h>
  62 #include <sys/copyops.h>
  63 #include <sys/rctl.h>
  64 #include <sys/brand.h>
  65 #include <sys/pool.h>
  66 #include <sys/zone.h>
  67 #include <sys/tsol/label.h>
  68 #include <sys/tsol/tndb.h>
  69 #include <sys/cpc_impl.h>
  70 #include <sys/sdt.h>
  71 #include <sys/reboot.h>
  72 #include <sys/kdi.h>
  73 #include <sys/schedctl.h>
  74 #include <sys/waitq.h>
  75 #include <sys/cpucaps.h>
  76 #include <sys/kiconv.h>
  77 #include <sys/ctype.h>
  78 #include <sys/ht.h>
  79 
  80 struct kmem_cache *thread_cache;        /* cache of free threads */
  81 struct kmem_cache *lwp_cache;           /* cache of free lwps */
  82 struct kmem_cache *turnstile_cache;     /* cache of free turnstiles */
  83 
  84 /*
  85  * allthreads is only for use by kmem_readers.  All kernel loops can use
  86  * the current thread as a start/end point.
  87  */
  88 kthread_t *allthreads = &t0;        /* circular list of all threads */
  89 
  90 static kcondvar_t reaper_cv;            /* synchronization var */
  91 kthread_t       *thread_deathrow;       /* circular list of reapable threads */
  92 kthread_t       *lwp_deathrow;          /* circular list of reapable threads */
  93 kmutex_t        reaplock;               /* protects lwp and thread deathrows */
  94 int     thread_reapcnt = 0;             /* number of threads on deathrow */
  95 int     lwp_reapcnt = 0;                /* number of lwps on deathrow */
  96 int     reaplimit = 16;                 /* delay reaping until reaplimit */
  97 
  98 thread_free_lock_t      *thread_free_lock;
  99                                         /* protects tick thread from reaper */
 100 
 101 extern int nthread;
 102 
 103 /* System Scheduling classes. */
 104 id_t    syscid;                         /* system scheduling class ID */
 105 id_t    sysdccid = CLASS_UNUSED;        /* reset when SDC loads */
 106 
 107 void    *segkp_thread;                  /* cookie for segkp pool */
 108 
 109 int lwp_cache_sz = 32;
 110 int t_cache_sz = 8;
 111 static kt_did_t next_t_id = 1;
 112 
 113 /* Default mode for thread binding to CPUs and processor sets */
 114 int default_binding_mode = TB_ALLHARD;
 115 
 116 /*
 117  * Min/Max stack sizes for stack size parameters
 118  */
 119 #define MAX_STKSIZE     (32 * DEFAULTSTKSZ)
 120 #define MIN_STKSIZE     DEFAULTSTKSZ
 121 
 122 /*
 123  * default_stksize overrides lwp_default_stksize if it is set.
 124  */
 125 int     default_stksize;
 126 int     lwp_default_stksize;
 127 
 128 static zone_key_t zone_thread_key;
 129 
 130 unsigned int kmem_stackinfo;            /* stackinfo feature on-off */
 131 kmem_stkinfo_t *kmem_stkinfo_log;       /* stackinfo circular log */
 132 static kmutex_t kmem_stkinfo_lock;      /* protects kmem_stkinfo_log */
 133 
 134 /*
 135  * forward declarations for internal thread specific data (tsd)
 136  */
 137 static void *tsd_realloc(void *, size_t, size_t);
 138 
 139 void thread_reaper(void);
 140 
 141 /* forward declarations for stackinfo feature */
 142 static void stkinfo_begin(kthread_t *);
 143 static void stkinfo_end(kthread_t *);
 144 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t);
 145 
 146 /*ARGSUSED*/
 147 static int
 148 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
 149 {
 150         bzero(buf, sizeof (turnstile_t));
 151         return (0);
 152 }
 153 
 154 /*ARGSUSED*/
 155 static void
 156 turnstile_destructor(void *buf, void *cdrarg)
 157 {
 158         turnstile_t *ts = buf;
 159 
 160         ASSERT(ts->ts_free == NULL);
 161         ASSERT(ts->ts_waiters == 0);
 162         ASSERT(ts->ts_inheritor == NULL);
 163         ASSERT(ts->ts_sleepq[0].sq_first == NULL);
 164         ASSERT(ts->ts_sleepq[1].sq_first == NULL);
 165 }
 166 
 167 void
 168 thread_init(void)
 169 {
 170         kthread_t *tp;
 171         extern char sys_name[];
 172         extern void idle();
 173         struct cpu *cpu = CPU;
 174         int i;
 175         kmutex_t *lp;
 176 
 177         mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
 178         thread_free_lock =
 179             kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP);
 180         for (i = 0; i < THREAD_FREE_NUM; i++) {
 181                 lp = &thread_free_lock[i].tf_lock;
 182                 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL);
 183         }
 184 
 185 #if defined(__i386) || defined(__amd64)
 186         thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
 187             PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
 188 
 189         /*
 190          * "struct _klwp" includes a "struct pcb", which includes a
 191          * "struct fpu", which needs to be 64-byte aligned on amd64
 192          * (and even on i386) for xsave/xrstor.
 193          */
 194         lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
 195             64, NULL, NULL, NULL, NULL, NULL, 0);
 196 #else
 197         /*
 198          * Allocate thread structures from static_arena.  This prevents
 199          * issues where a thread tries to relocate its own thread
 200          * structure and touches it after the mapping has been suspended.
 201          */
 202         thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
 203             PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
 204 
 205         lwp_stk_cache_init();
 206 
 207         lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
 208             0, NULL, NULL, NULL, NULL, NULL, 0);
 209 #endif
 210 
 211         turnstile_cache = kmem_cache_create("turnstile_cache",
 212             sizeof (turnstile_t), 0,
 213             turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
 214 
 215         label_init();
 216         cred_init();
 217 
 218         /*
 219          * Initialize various resource management facilities.
 220          */
 221         rctl_init();
 222         cpucaps_init();
 223         /*
 224          * Zone_init() should be called before project_init() so that project ID
 225          * for the first project is initialized correctly.
 226          */
 227         zone_init();
 228         project_init();
 229         brand_init();
 230         kiconv_init();
 231         task_init();
 232         tcache_init();
 233         pool_init();
 234 
 235         curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
 236 
 237         /*
 238          * Originally, we had two parameters to set default stack
 239          * size: one for lwp's (lwp_default_stksize), and one for
 240          * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
 241          * Now we have a third parameter that overrides both if it is
 242          * set to a legal stack size, called default_stksize.
 243          */
 244 
 245         if (default_stksize == 0) {
 246                 default_stksize = DEFAULTSTKSZ;
 247         } else if (default_stksize % PAGESIZE != 0 ||
 248             default_stksize > MAX_STKSIZE ||
 249             default_stksize < MIN_STKSIZE) {
 250                 cmn_err(CE_WARN, "Illegal stack size. Using %d",
 251                     (int)DEFAULTSTKSZ);
 252                 default_stksize = DEFAULTSTKSZ;
 253         } else {
 254                 lwp_default_stksize = default_stksize;
 255         }
 256 
 257         if (lwp_default_stksize == 0) {
 258                 lwp_default_stksize = default_stksize;
 259         } else if (lwp_default_stksize % PAGESIZE != 0 ||
 260             lwp_default_stksize > MAX_STKSIZE ||
 261             lwp_default_stksize < MIN_STKSIZE) {
 262                 cmn_err(CE_WARN, "Illegal stack size. Using %d",
 263                     default_stksize);
 264                 lwp_default_stksize = default_stksize;
 265         }
 266 
 267         segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
 268             lwp_default_stksize,
 269             (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
 270 
 271         segkp_thread = segkp_cache_init(segkp, t_cache_sz,
 272             default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
 273 
 274         (void) getcid(sys_name, &syscid);
 275         curthread->t_cid = syscid;   /* current thread is t0 */
 276 
 277         /*
 278          * Set up the first CPU's idle thread.
 279          * It runs whenever the CPU has nothing worthwhile to do.
 280          */
 281         tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
 282         cpu->cpu_idle_thread = tp;
 283         tp->t_preempt = 1;
 284         tp->t_disp_queue = cpu->cpu_disp;
 285         ASSERT(tp->t_disp_queue != NULL);
 286         tp->t_bound_cpu = cpu;
 287         tp->t_affinitycnt = 1;
 288 
 289         /*
 290          * Registering a thread in the callback table is usually
 291          * done in the initialization code of the thread. In this
 292          * case, we do it right after thread creation to avoid
 293          * blocking idle thread while registering itself. It also
 294          * avoids the possibility of reregistration in case a CPU
 295          * restarts its idle thread.
 296          */
 297         CALLB_CPR_INIT_SAFE(tp, "idle");
 298 
 299         /*
 300          * Create the thread_reaper daemon. From this point on, exited
 301          * threads will get reaped.
 302          */
 303         (void) thread_create(NULL, 0, (void (*)())thread_reaper,
 304             NULL, 0, &p0, TS_RUN, minclsyspri);
 305 
 306         /*
 307          * Finish initializing the kernel memory allocator now that
 308          * thread_create() is available.
 309          */
 310         kmem_thread_init();
 311 
 312         if (boothowto & RB_DEBUG)
 313                 kdi_dvec_thravail();
 314 }
 315 
 316 /*
 317  * Create a thread.
 318  *
 319  * thread_create() blocks for memory if necessary.  It never fails.
 320  *
 321  * If stk is NULL, the thread is created at the base of the stack
 322  * and cannot be swapped.
 323  */
 324 kthread_t *
 325 thread_create(
 326         caddr_t stk,
 327         size_t  stksize,
 328         void    (*proc)(),
 329         void    *arg,
 330         size_t  len,
 331         proc_t   *pp,
 332         int     state,
 333         pri_t   pri)
 334 {
 335         kthread_t *t;
 336         extern struct classfuncs sys_classfuncs;
 337         turnstile_t *ts;
 338 
 339         /*
 340          * Every thread keeps a turnstile around in case it needs to block.
 341          * The only reason the turnstile is not simply part of the thread
 342          * structure is that we may have to break the association whenever
 343          * more than one thread blocks on a given synchronization object.
 344          * From a memory-management standpoint, turnstiles are like the
 345          * "attached mblks" that hang off dblks in the streams allocator.
 346          */
 347         ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
 348 
 349         if (stk == NULL) {
 350                 /*
 351                  * alloc both thread and stack in segkp chunk
 352                  */
 353 
 354                 if (stksize < default_stksize)
 355                         stksize = default_stksize;
 356 
 357                 if (stksize == default_stksize) {
 358                         stk = (caddr_t)segkp_cache_get(segkp_thread);
 359                 } else {
 360                         stksize = roundup(stksize, PAGESIZE);
 361                         stk = (caddr_t)segkp_get(segkp, stksize,
 362                             (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
 363                 }
 364 
 365                 ASSERT(stk != NULL);
 366 
 367                 /*
 368                  * The machine-dependent mutex code may require that
 369                  * thread pointers (since they may be used for mutex owner
 370                  * fields) have certain alignment requirements.
 371                  * PTR24_ALIGN is the size of the alignment quanta.
 372                  * XXX - assumes stack grows toward low addresses.
 373                  */
 374                 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
 375                         cmn_err(CE_PANIC, "thread_create: proposed stack size"
 376                             " too small to hold thread.");
 377 #ifdef STACK_GROWTH_DOWN
 378                 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
 379                 stksize &= -PTR24_ALIGN;    /* make thread aligned */
 380                 t = (kthread_t *)(stk + stksize);
 381                 bzero(t, sizeof (kthread_t));
 382                 if (audit_active)
 383                         audit_thread_create(t);
 384                 t->t_stk = stk + stksize;
 385                 t->t_stkbase = stk;
 386 #else   /* stack grows to larger addresses */
 387                 stksize -= SA(sizeof (kthread_t));
 388                 t = (kthread_t *)(stk);
 389                 bzero(t, sizeof (kthread_t));
 390                 t->t_stk = stk + sizeof (kthread_t);
 391                 t->t_stkbase = stk + stksize + sizeof (kthread_t);
 392 #endif  /* STACK_GROWTH_DOWN */
 393                 t->t_flag |= T_TALLOCSTK;
 394                 t->t_swap = stk;
 395         } else {
 396                 t = kmem_cache_alloc(thread_cache, KM_SLEEP);
 397                 bzero(t, sizeof (kthread_t));
 398                 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
 399                 if (audit_active)
 400                         audit_thread_create(t);
 401                 /*
 402                  * Initialize t_stk to the kernel stack pointer to use
 403                  * upon entry to the kernel
 404                  */
 405 #ifdef STACK_GROWTH_DOWN
 406                 t->t_stk = stk + stksize;
 407                 t->t_stkbase = stk;
 408 #else
 409                 t->t_stk = stk;                      /* 3b2-like */
 410                 t->t_stkbase = stk + stksize;
 411 #endif /* STACK_GROWTH_DOWN */
 412         }
 413 
 414         if (kmem_stackinfo != 0) {
 415                 stkinfo_begin(t);
 416         }
 417 
 418         t->t_ts = ts;
 419 
 420         /*
 421          * p_cred could be NULL if it thread_create is called before cred_init
 422          * is called in main.
 423          */
 424         mutex_enter(&pp->p_crlock);
 425         if (pp->p_cred)
 426                 crhold(t->t_cred = pp->p_cred);
 427         mutex_exit(&pp->p_crlock);
 428         t->t_start = gethrestime_sec();
 429         t->t_startpc = proc;
 430         t->t_procp = pp;
 431         t->t_clfuncs = &sys_classfuncs.thread;
 432         t->t_cid = syscid;
 433         t->t_pri = pri;
 434         t->t_stime = ddi_get_lbolt();
 435         t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
 436         t->t_bind_cpu = PBIND_NONE;
 437         t->t_bindflag = (uchar_t)default_binding_mode;
 438         t->t_bind_pset = PS_NONE;
 439         t->t_plockp = &pp->p_lock;
 440         t->t_copyops = NULL;
 441         t->t_taskq = NULL;
 442         t->t_anttime = 0;
 443         t->t_hatdepth = 0;
 444 
 445         t->t_dtrace_vtime = 1;       /* assure vtimestamp is always non-zero */
 446 
 447         CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
 448 #ifndef NPROBE
 449         /* Kernel probe */
 450         tnf_thread_create(t);
 451 #endif /* NPROBE */
 452         LOCK_INIT_CLEAR(&t->t_lock);
 453 
 454         /*
 455          * Callers who give us a NULL proc must do their own
 456          * stack initialization.  e.g. lwp_create()
 457          */
 458         if (proc != NULL) {
 459                 t->t_stk = thread_stk_init(t->t_stk);
 460                 thread_load(t, proc, arg, len);
 461         }
 462 
 463         /*
 464          * Put a hold on project0. If this thread is actually in a
 465          * different project, then t_proj will be changed later in
 466          * lwp_create().  All kernel-only threads must be in project 0.
 467          */
 468         t->t_proj = project_hold(proj0p);
 469 
 470         lgrp_affinity_init(&t->t_lgrp_affinity);
 471 
 472         mutex_enter(&pidlock);
 473         nthread++;
 474         t->t_did = next_t_id++;
 475         t->t_prev = curthread->t_prev;
 476         t->t_next = curthread;
 477 
 478         /*
 479          * Add the thread to the list of all threads, and initialize
 480          * its t_cpu pointer.  We need to block preemption since
 481          * cpu_offline walks the thread list looking for threads
 482          * with t_cpu pointing to the CPU being offlined.  We want
 483          * to make sure that the list is consistent and that if t_cpu
 484          * is set, the thread is on the list.
 485          */
 486         kpreempt_disable();
 487         curthread->t_prev->t_next = t;
 488         curthread->t_prev = t;
 489 
 490         /*
 491          * We'll always create in the default partition since that's where
 492          * kernel threads go (we'll change this later if needed, in
 493          * lwp_create()).
 494          */
 495         t->t_cpupart = &cp_default;
 496 
 497         /*
 498          * For now, affiliate this thread with the root lgroup.
 499          * Since the kernel does not (presently) allocate its memory
 500          * in a locality aware fashion, the root is an appropriate home.
 501          * If this thread is later associated with an lwp, it will have
 502          * its lgroup re-assigned at that time.
 503          */
 504         lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
 505 
 506         /*
 507          * If the current CPU is in the default cpupart, use it.  Otherwise,
 508          * pick one that is; before entering the dispatcher code, we'll
 509          * make sure to keep the invariant that ->t_cpu is set.  (In fact, we
 510          * rely on this, in ht_should_run(), in the call tree of
 511          * disp_lowpri_cpu().)
 512          */
 513         if (CPU->cpu_part == &cp_default) {
 514                 t->t_cpu = CPU;
 515         } else {
 516                 t->t_cpu = cp_default.cp_cpulist;
 517                 t->t_cpu = disp_lowpri_cpu(t->t_cpu, t, t->t_pri);
 518         }
 519 
 520         t->t_disp_queue = t->t_cpu->cpu_disp;
 521         kpreempt_enable();
 522 
 523         /*
 524          * Initialize thread state and the dispatcher lock pointer.
 525          * Need to hold onto pidlock to block allthreads walkers until
 526          * the state is set.
 527          */
 528         switch (state) {
 529         case TS_RUN:
 530                 curthread->t_oldspl = splhigh();     /* get dispatcher spl */
 531                 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
 532                 CL_SETRUN(t);
 533                 thread_unlock(t);
 534                 break;
 535 
 536         case TS_ONPROC:
 537                 THREAD_ONPROC(t, t->t_cpu);
 538                 break;
 539 
 540         case TS_FREE:
 541                 /*
 542                  * Free state will be used for intr threads.
 543                  * The interrupt routine must set the thread dispatcher
 544                  * lock pointer (t_lockp) if starting on a CPU
 545                  * other than the current one.
 546                  */
 547                 THREAD_FREEINTR(t, CPU);
 548                 break;
 549 
 550         case TS_STOPPED:
 551                 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
 552                 break;
 553 
 554         default:                        /* TS_SLEEP, TS_ZOMB or TS_TRANS */
 555                 cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
 556         }
 557         mutex_exit(&pidlock);
 558         return (t);
 559 }
 560 
 561 /*
 562  * Move thread to project0 and take care of project reference counters.
 563  */
 564 void
 565 thread_rele(kthread_t *t)
 566 {
 567         kproject_t *kpj;
 568 
 569         thread_lock(t);
 570 
 571         ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
 572         kpj = ttoproj(t);
 573         t->t_proj = proj0p;
 574 
 575         thread_unlock(t);
 576 
 577         if (kpj != proj0p) {
 578                 project_rele(kpj);
 579                 (void) project_hold(proj0p);
 580         }
 581 }
 582 
 583 void
 584 thread_exit(void)
 585 {
 586         kthread_t *t = curthread;
 587 
 588         if ((t->t_proc_flag & TP_ZTHREAD) != 0)
 589                 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
 590 
 591         tsd_exit();             /* Clean up this thread's TSD */
 592 
 593         kcpc_passivate();       /* clean up performance counter state */
 594 
 595         /*
 596          * No kernel thread should have called poll() without arranging
 597          * calling pollcleanup() here.
 598          */
 599         ASSERT(t->t_pollstate == NULL);
 600         ASSERT(t->t_schedctl == NULL);
 601         if (t->t_door)
 602                 door_slam();    /* in case thread did an upcall */
 603 
 604 #ifndef NPROBE
 605         /* Kernel probe */
 606         if (t->t_tnf_tpdp)
 607                 tnf_thread_exit();
 608 #endif /* NPROBE */
 609 
 610         thread_rele(t);
 611         t->t_preempt++;
 612 
 613         /*
 614          * remove thread from the all threads list so that
 615          * death-row can use the same pointers.
 616          */
 617         mutex_enter(&pidlock);
 618         t->t_next->t_prev = t->t_prev;
 619         t->t_prev->t_next = t->t_next;
 620         ASSERT(allthreads != t);        /* t0 never exits */
 621         cv_broadcast(&t->t_joincv);      /* wake up anyone in thread_join */
 622         mutex_exit(&pidlock);
 623 
 624         if (t->t_ctx != NULL)
 625                 exitctx(t);
 626         if (t->t_procp->p_pctx != NULL)
 627                 exitpctx(t->t_procp);
 628 
 629         if (kmem_stackinfo != 0) {
 630                 stkinfo_end(t);
 631         }
 632 
 633         t->t_state = TS_ZOMB;        /* set zombie thread */
 634 
 635         swtch_from_zombie();    /* give up the CPU */
 636         /* NOTREACHED */
 637 }
 638 
 639 /*
 640  * Check to see if the specified thread is active (defined as being on
 641  * the thread list).  This is certainly a slow way to do this; if there's
 642  * ever a reason to speed it up, we could maintain a hash table of active
 643  * threads indexed by their t_did.
 644  */
 645 static kthread_t *
 646 did_to_thread(kt_did_t tid)
 647 {
 648         kthread_t *t;
 649 
 650         ASSERT(MUTEX_HELD(&pidlock));
 651         for (t = curthread->t_next; t != curthread; t = t->t_next) {
 652                 if (t->t_did == tid)
 653                         break;
 654         }
 655         if (t->t_did == tid)
 656                 return (t);
 657         else
 658                 return (NULL);
 659 }
 660 
 661 /*
 662  * Wait for specified thread to exit.  Returns immediately if the thread
 663  * could not be found, meaning that it has either already exited or never
 664  * existed.
 665  */
 666 void
 667 thread_join(kt_did_t tid)
 668 {
 669         kthread_t *t;
 670 
 671         ASSERT(tid != curthread->t_did);
 672         ASSERT(tid != t0.t_did);
 673 
 674         mutex_enter(&pidlock);
 675         /*
 676          * Make sure we check that the thread is on the thread list
 677          * before blocking on it; otherwise we could end up blocking on
 678          * a cv that's already been freed.  In other words, don't cache
 679          * the thread pointer across calls to cv_wait.
 680          *
 681          * The choice of loop invariant means that whenever a thread
 682          * is taken off the allthreads list, a cv_broadcast must be
 683          * performed on that thread's t_joincv to wake up any waiters.
 684          * The broadcast doesn't have to happen right away, but it
 685          * shouldn't be postponed indefinitely (e.g., by doing it in
 686          * thread_free which may only be executed when the deathrow
 687          * queue is processed.
 688          */
 689         while (t = did_to_thread(tid))
 690                 cv_wait(&t->t_joincv, &pidlock);
 691         mutex_exit(&pidlock);
 692 }
 693 
 694 void
 695 thread_free_prevent(kthread_t *t)
 696 {
 697         kmutex_t *lp;
 698 
 699         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 700         mutex_enter(lp);
 701 }
 702 
 703 void
 704 thread_free_allow(kthread_t *t)
 705 {
 706         kmutex_t *lp;
 707 
 708         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 709         mutex_exit(lp);
 710 }
 711 
 712 static void
 713 thread_free_barrier(kthread_t *t)
 714 {
 715         kmutex_t *lp;
 716 
 717         lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
 718         mutex_enter(lp);
 719         mutex_exit(lp);
 720 }
 721 
 722 void
 723 thread_free(kthread_t *t)
 724 {
 725         boolean_t allocstk = (t->t_flag & T_TALLOCSTK);
 726         klwp_t *lwp = t->t_lwp;
 727         caddr_t swap = t->t_swap;
 728 
 729         ASSERT(t != &t0 && t->t_state == TS_FREE);
 730         ASSERT(t->t_door == NULL);
 731         ASSERT(t->t_schedctl == NULL);
 732         ASSERT(t->t_pollstate == NULL);
 733 
 734         t->t_pri = 0;
 735         t->t_pc = 0;
 736         t->t_sp = 0;
 737         t->t_wchan0 = NULL;
 738         t->t_wchan = NULL;
 739         if (t->t_cred != NULL) {
 740                 crfree(t->t_cred);
 741                 t->t_cred = 0;
 742         }
 743         if (t->t_pdmsg) {
 744                 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
 745                 t->t_pdmsg = NULL;
 746         }
 747         if (audit_active)
 748                 audit_thread_free(t);
 749 #ifndef NPROBE
 750         if (t->t_tnf_tpdp)
 751                 tnf_thread_free(t);
 752 #endif /* NPROBE */
 753         if (t->t_cldata) {
 754                 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
 755         }
 756         if (t->t_rprof != NULL) {
 757                 kmem_free(t->t_rprof, sizeof (*t->t_rprof));
 758                 t->t_rprof = NULL;
 759         }
 760         t->t_lockp = NULL;   /* nothing should try to lock this thread now */
 761         if (lwp)
 762                 lwp_freeregs(lwp, 0);
 763         if (t->t_ctx)
 764                 freectx(t, 0);
 765         t->t_stk = NULL;
 766         if (lwp)
 767                 lwp_stk_fini(lwp);
 768         lock_clear(&t->t_lock);
 769 
 770         if (t->t_ts->ts_waiters > 0)
 771                 panic("thread_free: turnstile still active");
 772 
 773         kmem_cache_free(turnstile_cache, t->t_ts);
 774 
 775         free_afd(&t->t_activefd);
 776 
 777         /*
 778          * Barrier for the tick accounting code.  The tick accounting code
 779          * holds this lock to keep the thread from going away while it's
 780          * looking at it.
 781          */
 782         thread_free_barrier(t);
 783 
 784         ASSERT(ttoproj(t) == proj0p);
 785         project_rele(ttoproj(t));
 786 
 787         lgrp_affinity_free(&t->t_lgrp_affinity);
 788 
 789         mutex_enter(&pidlock);
 790         nthread--;
 791         mutex_exit(&pidlock);
 792 
 793         if (t->t_name != NULL) {
 794                 kmem_free(t->t_name, THREAD_NAME_MAX);
 795                 t->t_name = NULL;
 796         }
 797 
 798         /*
 799          * Free thread, lwp and stack.  This needs to be done carefully, since
 800          * if T_TALLOCSTK is set, the thread is part of the stack.
 801          */
 802         t->t_lwp = NULL;
 803         t->t_swap = NULL;
 804 
 805         if (swap) {
 806                 segkp_release(segkp, swap);
 807         }
 808         if (lwp) {
 809                 kmem_cache_free(lwp_cache, lwp);
 810         }
 811         if (!allocstk) {
 812                 kmem_cache_free(thread_cache, t);
 813         }
 814 }
 815 
 816 /*
 817  * Removes threads associated with the given zone from a deathrow queue.
 818  * tp is a pointer to the head of the deathrow queue, and countp is a
 819  * pointer to the current deathrow count.  Returns a linked list of
 820  * threads removed from the list.
 821  */
 822 static kthread_t *
 823 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
 824 {
 825         kthread_t *tmp, *list = NULL;
 826         cred_t *cr;
 827 
 828         ASSERT(MUTEX_HELD(&reaplock));
 829         while (*tp != NULL) {
 830                 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
 831                         tmp = *tp;
 832                         *tp = tmp->t_forw;
 833                         tmp->t_forw = list;
 834                         list = tmp;
 835                         (*countp)--;
 836                 } else {
 837                         tp = &(*tp)->t_forw;
 838                 }
 839         }
 840         return (list);
 841 }
 842 
 843 static void
 844 thread_reap_list(kthread_t *t)
 845 {
 846         kthread_t *next;
 847 
 848         while (t != NULL) {
 849                 next = t->t_forw;
 850                 thread_free(t);
 851                 t = next;
 852         }
 853 }
 854 
 855 /* ARGSUSED */
 856 static void
 857 thread_zone_destroy(zoneid_t zoneid, void *unused)
 858 {
 859         kthread_t *t, *l;
 860 
 861         mutex_enter(&reaplock);
 862         /*
 863          * Pull threads and lwps associated with zone off deathrow lists.
 864          */
 865         t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
 866         l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
 867         mutex_exit(&reaplock);
 868 
 869         /*
 870          * Guard against race condition in mutex_owner_running:
 871          *      thread=owner(mutex)
 872          *      <interrupt>
 873          *                              thread exits mutex
 874          *                              thread exits
 875          *                              thread reaped
 876          *                              thread struct freed
 877          * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 878          * A cross call to all cpus will cause the interrupt handler
 879          * to reset the PC if it is in mutex_owner_running, refreshing
 880          * stale thread pointers.
 881          */
 882         mutex_sync();   /* sync with mutex code */
 883 
 884         /*
 885          * Reap threads
 886          */
 887         thread_reap_list(t);
 888 
 889         /*
 890          * Reap lwps
 891          */
 892         thread_reap_list(l);
 893 }
 894 
 895 /*
 896  * cleanup zombie threads that are on deathrow.
 897  */
 898 void
 899 thread_reaper()
 900 {
 901         kthread_t *t, *l;
 902         callb_cpr_t cprinfo;
 903 
 904         /*
 905          * Register callback to clean up threads when zone is destroyed.
 906          */
 907         zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
 908 
 909         CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
 910         for (;;) {
 911                 mutex_enter(&reaplock);
 912                 while (thread_deathrow == NULL && lwp_deathrow == NULL) {
 913                         CALLB_CPR_SAFE_BEGIN(&cprinfo);
 914                         cv_wait(&reaper_cv, &reaplock);
 915                         CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
 916                 }
 917                 /*
 918                  * mutex_sync() needs to be called when reaping, but
 919                  * not too often.  We limit reaping rate to once
 920                  * per second.  Reaplimit is max rate at which threads can
 921                  * be freed. Does not impact thread destruction/creation.
 922                  */
 923                 t = thread_deathrow;
 924                 l = lwp_deathrow;
 925                 thread_deathrow = NULL;
 926                 lwp_deathrow = NULL;
 927                 thread_reapcnt = 0;
 928                 lwp_reapcnt = 0;
 929                 mutex_exit(&reaplock);
 930 
 931                 /*
 932                  * Guard against race condition in mutex_owner_running:
 933                  *      thread=owner(mutex)
 934                  *      <interrupt>
 935                  *                              thread exits mutex
 936                  *                              thread exits
 937                  *                              thread reaped
 938                  *                              thread struct freed
 939                  * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 940                  * A cross call to all cpus will cause the interrupt handler
 941                  * to reset the PC if it is in mutex_owner_running, refreshing
 942                  * stale thread pointers.
 943                  */
 944                 mutex_sync();   /* sync with mutex code */
 945                 /*
 946                  * Reap threads
 947                  */
 948                 thread_reap_list(t);
 949 
 950                 /*
 951                  * Reap lwps
 952                  */
 953                 thread_reap_list(l);
 954                 delay(hz);
 955         }
 956 }
 957 
 958 /*
 959  * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto
 960  * thread_deathrow. The thread's state is changed already TS_FREE to indicate
 961  * that is reapable. The thread already holds the reaplock, and was already
 962  * freed.
 963  */
 964 void
 965 reapq_move_lq_to_tq(kthread_t *t)
 966 {
 967         ASSERT(t->t_state == TS_FREE);
 968         ASSERT(MUTEX_HELD(&reaplock));
 969         t->t_forw = thread_deathrow;
 970         thread_deathrow = t;
 971         thread_reapcnt++;
 972         if (lwp_reapcnt + thread_reapcnt > reaplimit)
 973                 cv_signal(&reaper_cv);  /* wake the reaper */
 974 }
 975 
 976 /*
 977  * This is called by resume() to put a zombie thread onto deathrow.
 978  * The thread's state is changed to TS_FREE to indicate that is reapable.
 979  * This is called from the idle thread so it must not block - just spin.
 980  */
 981 void
 982 reapq_add(kthread_t *t)
 983 {
 984         mutex_enter(&reaplock);
 985 
 986         /*
 987          * lwp_deathrow contains threads with lwp linkage and
 988          * swappable thread stacks which have the default stacksize.
 989          * These threads' lwps and stacks may be reused by lwp_create().
 990          *
 991          * Anything else goes on thread_deathrow(), where it will eventually
 992          * be thread_free()d.
 993          */
 994         if (t->t_flag & T_LWPREUSE) {
 995                 ASSERT(ttolwp(t) != NULL);
 996                 t->t_forw = lwp_deathrow;
 997                 lwp_deathrow = t;
 998                 lwp_reapcnt++;
 999         } else {
1000                 t->t_forw = thread_deathrow;
1001                 thread_deathrow = t;
1002                 thread_reapcnt++;
1003         }
1004         if (lwp_reapcnt + thread_reapcnt > reaplimit)
1005                 cv_signal(&reaper_cv);      /* wake the reaper */
1006         t->t_state = TS_FREE;
1007         lock_clear(&t->t_lock);
1008 
1009         /*
1010          * Before we return, we need to grab and drop the thread lock for
1011          * the dead thread.  At this point, the current thread is the idle
1012          * thread, and the dead thread's CPU lock points to the current
1013          * CPU -- and we must grab and drop the lock to synchronize with
1014          * a racing thread walking a blocking chain that the zombie thread
1015          * was recently in.  By this point, that blocking chain is (by
1016          * definition) stale:  the dead thread is not holding any locks, and
1017          * is therefore not in any blocking chains -- but if we do not regrab
1018          * our lock before freeing the dead thread's data structures, the
1019          * thread walking the (stale) blocking chain will die on memory
1020          * corruption when it attempts to drop the dead thread's lock.  We
1021          * only need do this once because there is no way for the dead thread
1022          * to ever again be on a blocking chain:  once we have grabbed and
1023          * dropped the thread lock, we are guaranteed that anyone that could
1024          * have seen this thread in a blocking chain can no longer see it.
1025          */
1026         thread_lock(t);
1027         thread_unlock(t);
1028 
1029         mutex_exit(&reaplock);
1030 }
1031 
1032 /*
1033  * Install thread context ops for the current thread.
1034  */
1035 void
1036 installctx(
1037         kthread_t *t,
1038         void    *arg,
1039         void    (*save)(void *),
1040         void    (*restore)(void *),
1041         void    (*fork)(void *, void *),
1042         void    (*lwp_create)(void *, void *),
1043         void    (*exit)(void *),
1044         void    (*free)(void *, int))
1045 {
1046         struct ctxop *ctx;
1047 
1048         ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1049         ctx->save_op = save;
1050         ctx->restore_op = restore;
1051         ctx->fork_op = fork;
1052         ctx->lwp_create_op = lwp_create;
1053         ctx->exit_op = exit;
1054         ctx->free_op = free;
1055         ctx->arg = arg;
1056         ctx->next = t->t_ctx;
1057         t->t_ctx = ctx;
1058 }
1059 
1060 /*
1061  * Remove the thread context ops from a thread.
1062  */
1063 int
1064 removectx(
1065         kthread_t *t,
1066         void    *arg,
1067         void    (*save)(void *),
1068         void    (*restore)(void *),
1069         void    (*fork)(void *, void *),
1070         void    (*lwp_create)(void *, void *),
1071         void    (*exit)(void *),
1072         void    (*free)(void *, int))
1073 {
1074         struct ctxop *ctx, *prev_ctx;
1075 
1076         /*
1077          * The incoming kthread_t (which is the thread for which the
1078          * context ops will be removed) should be one of the following:
1079          *
1080          * a) the current thread,
1081          *
1082          * b) a thread of a process that's being forked (SIDL),
1083          *
1084          * c) a thread that belongs to the same process as the current
1085          *    thread and for which the current thread is the agent thread,
1086          *
1087          * d) a thread that is TS_STOPPED which is indicative of it
1088          *    being (if curthread is not an agent) a thread being created
1089          *    as part of an lwp creation.
1090          */
1091         ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1092             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1093 
1094         /*
1095          * Serialize modifications to t->t_ctx to prevent the agent thread
1096          * and the target thread from racing with each other during lwp exit.
1097          */
1098         mutex_enter(&t->t_ctx_lock);
1099         prev_ctx = NULL;
1100         kpreempt_disable();
1101         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
1102                 if (ctx->save_op == save && ctx->restore_op == restore &&
1103                     ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
1104                     ctx->exit_op == exit && ctx->free_op == free &&
1105                     ctx->arg == arg) {
1106                         if (prev_ctx)
1107                                 prev_ctx->next = ctx->next;
1108                         else
1109                                 t->t_ctx = ctx->next;
1110                         mutex_exit(&t->t_ctx_lock);
1111                         if (ctx->free_op != NULL)
1112                                 (ctx->free_op)(ctx->arg, 0);
1113                         kmem_free(ctx, sizeof (struct ctxop));
1114                         kpreempt_enable();
1115                         return (1);
1116                 }
1117                 prev_ctx = ctx;
1118         }
1119         mutex_exit(&t->t_ctx_lock);
1120         kpreempt_enable();
1121 
1122         return (0);
1123 }
1124 
1125 void
1126 savectx(kthread_t *t)
1127 {
1128         struct ctxop *ctx;
1129 
1130         ASSERT(t == curthread);
1131         for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1132                 if (ctx->save_op != NULL)
1133                         (ctx->save_op)(ctx->arg);
1134 }
1135 
1136 void
1137 restorectx(kthread_t *t)
1138 {
1139         struct ctxop *ctx;
1140 
1141         ASSERT(t == curthread);
1142         for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1143                 if (ctx->restore_op != NULL)
1144                         (ctx->restore_op)(ctx->arg);
1145 }
1146 
1147 void
1148 forkctx(kthread_t *t, kthread_t *ct)
1149 {
1150         struct ctxop *ctx;
1151 
1152         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1153                 if (ctx->fork_op != NULL)
1154                         (ctx->fork_op)(t, ct);
1155 }
1156 
1157 /*
1158  * Note that this operator is only invoked via the _lwp_create
1159  * system call.  The system may have other reasons to create lwps
1160  * e.g. the agent lwp or the doors unreferenced lwp.
1161  */
1162 void
1163 lwp_createctx(kthread_t *t, kthread_t *ct)
1164 {
1165         struct ctxop *ctx;
1166 
1167         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1168                 if (ctx->lwp_create_op != NULL)
1169                         (ctx->lwp_create_op)(t, ct);
1170 }
1171 
1172 /*
1173  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1174  * needed when the thread/LWP leaves the processor for the last time. This
1175  * routine is not intended to deal with freeing memory; freectx() is used for
1176  * that purpose during thread_free(). This routine is provided to allow for
1177  * clean-up that can't wait until thread_free().
1178  */
1179 void
1180 exitctx(kthread_t *t)
1181 {
1182         struct ctxop *ctx;
1183 
1184         for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1185                 if (ctx->exit_op != NULL)
1186                         (ctx->exit_op)(t);
1187 }
1188 
1189 /*
1190  * freectx is called from thread_free() and exec() to get
1191  * rid of old thread context ops.
1192  */
1193 void
1194 freectx(kthread_t *t, int isexec)
1195 {
1196         struct ctxop *ctx;
1197 
1198         kpreempt_disable();
1199         while ((ctx = t->t_ctx) != NULL) {
1200                 t->t_ctx = ctx->next;
1201                 if (ctx->free_op != NULL)
1202                         (ctx->free_op)(ctx->arg, isexec);
1203                 kmem_free(ctx, sizeof (struct ctxop));
1204         }
1205         kpreempt_enable();
1206 }
1207 
1208 /*
1209  * freectx_ctx is called from lwp_create() when lwp is reused from
1210  * lwp_deathrow and its thread structure is added to thread_deathrow.
1211  * The thread structure to which this ctx was attached may be already
1212  * freed by the thread reaper so free_op implementations shouldn't rely
1213  * on thread structure to which this ctx was attached still being around.
1214  */
1215 void
1216 freectx_ctx(struct ctxop *ctx)
1217 {
1218         struct ctxop *nctx;
1219 
1220         ASSERT(ctx != NULL);
1221 
1222         kpreempt_disable();
1223         do {
1224                 nctx = ctx->next;
1225                 if (ctx->free_op != NULL)
1226                         (ctx->free_op)(ctx->arg, 0);
1227                 kmem_free(ctx, sizeof (struct ctxop));
1228         } while ((ctx = nctx) != NULL);
1229         kpreempt_enable();
1230 }
1231 
1232 /*
1233  * Set the thread running; arrange for it to be swapped in if necessary.
1234  */
1235 void
1236 setrun_locked(kthread_t *t)
1237 {
1238         ASSERT(THREAD_LOCK_HELD(t));
1239         if (t->t_state == TS_SLEEP) {
1240                 /*
1241                  * Take off sleep queue.
1242                  */
1243                 SOBJ_UNSLEEP(t->t_sobj_ops, t);
1244         } else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1245                 /*
1246                  * Already on dispatcher queue.
1247                  */
1248                 return;
1249         } else if (t->t_state == TS_WAIT) {
1250                 waitq_setrun(t);
1251         } else if (t->t_state == TS_STOPPED) {
1252                 /*
1253                  * All of the sending of SIGCONT (TC_XSTART) and /proc
1254                  * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1255                  * requested that the thread be run.
1256                  * Just calling setrun() is not sufficient to set a stopped
1257                  * thread running.  TP_TXSTART is always set if the thread
1258                  * is not stopped by a jobcontrol stop signal.
1259                  * TP_TPSTART is always set if /proc is not controlling it.
1260                  * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1261                  * The thread won't be stopped unless one of these
1262                  * three mechanisms did it.
1263                  *
1264                  * These flags must be set before calling setrun_locked(t).
1265                  * They can't be passed as arguments because the streams
1266                  * code calls setrun() indirectly and the mechanism for
1267                  * doing so admits only one argument.  Note that the
1268                  * thread must be locked in order to change t_schedflags.
1269                  */
1270                 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1271                         return;
1272                 /*
1273                  * Process is no longer stopped (a thread is running).
1274                  */
1275                 t->t_whystop = 0;
1276                 t->t_whatstop = 0;
1277                 /*
1278                  * Strictly speaking, we do not have to clear these
1279                  * flags here; they are cleared on entry to stop().
1280                  * However, they are confusing when doing kernel
1281                  * debugging or when they are revealed by ps(1).
1282                  */
1283                 t->t_schedflag &= ~TS_ALLSTART;
1284                 THREAD_TRANSITION(t);   /* drop stopped-thread lock */
1285                 ASSERT(t->t_lockp == &transition_lock);
1286                 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1287                 /*
1288                  * Let the class put the process on the dispatcher queue.
1289                  */
1290                 CL_SETRUN(t);
1291         }
1292 }
1293 
1294 void
1295 setrun(kthread_t *t)
1296 {
1297         thread_lock(t);
1298         setrun_locked(t);
1299         thread_unlock(t);
1300 }
1301 
1302 /*
1303  * Unpin an interrupted thread.
1304  *      When an interrupt occurs, the interrupt is handled on the stack
1305  *      of an interrupt thread, taken from a pool linked to the CPU structure.
1306  *
1307  *      When swtch() is switching away from an interrupt thread because it
1308  *      blocked or was preempted, this routine is called to complete the
1309  *      saving of the interrupted thread state, and returns the interrupted
1310  *      thread pointer so it may be resumed.
1311  *
1312  *      Called by swtch() only at high spl.
1313  */
1314 kthread_t *
1315 thread_unpin()
1316 {
1317         kthread_t       *t = curthread; /* current thread */
1318         kthread_t       *itp;           /* interrupted thread */
1319         int             i;              /* interrupt level */
1320         extern int      intr_passivate();
1321 
1322         ASSERT(t->t_intr != NULL);
1323 
1324         itp = t->t_intr;             /* interrupted thread */
1325         t->t_intr = NULL;            /* clear interrupt ptr */
1326 
1327         ht_end_intr();
1328 
1329         /*
1330          * Get state from interrupt thread for the one
1331          * it interrupted.
1332          */
1333 
1334         i = intr_passivate(t, itp);
1335 
1336         TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1337             "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1338             i, t, t, itp, itp);
1339 
1340         /*
1341          * Dissociate the current thread from the interrupted thread's LWP.
1342          */
1343         t->t_lwp = NULL;
1344 
1345         /*
1346          * Interrupt handlers above the level that spinlocks block must
1347          * not block.
1348          */
1349 #if DEBUG
1350         if (i < 0 || i > LOCK_LEVEL)
1351                 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1352 #endif
1353 
1354         /*
1355          * Compute the CPU's base interrupt level based on the active
1356          * interrupts.
1357          */
1358         ASSERT(CPU->cpu_intr_actv & (1 << i));
1359         set_base_spl();
1360 
1361         return (itp);
1362 }
1363 
1364 /*
1365  * Create and initialize an interrupt thread.
1366  *      Returns non-zero on error.
1367  *      Called at spl7() or better.
1368  */
1369 void
1370 thread_create_intr(struct cpu *cp)
1371 {
1372         kthread_t *tp;
1373 
1374         tp = thread_create(NULL, 0,
1375             (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1376 
1377         /*
1378          * Set the thread in the TS_FREE state.  The state will change
1379          * to TS_ONPROC only while the interrupt is active.  Think of these
1380          * as being on a private free list for the CPU.  Being TS_FREE keeps
1381          * inactive interrupt threads out of debugger thread lists.
1382          *
1383          * We cannot call thread_create with TS_FREE because of the current
1384          * checks there for ONPROC.  Fix this when thread_create takes flags.
1385          */
1386         THREAD_FREEINTR(tp, cp);
1387 
1388         /*
1389          * Nobody should ever reference the credentials of an interrupt
1390          * thread so make it NULL to catch any such references.
1391          */
1392         tp->t_cred = NULL;
1393         tp->t_flag |= T_INTR_THREAD;
1394         tp->t_cpu = cp;
1395         tp->t_bound_cpu = cp;
1396         tp->t_disp_queue = cp->cpu_disp;
1397         tp->t_affinitycnt = 1;
1398         tp->t_preempt = 1;
1399 
1400         /*
1401          * Don't make a user-requested binding on this thread so that
1402          * the processor can be offlined.
1403          */
1404         tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */
1405         tp->t_bind_pset = PS_NONE;
1406 
1407 #if defined(__i386) || defined(__amd64)
1408         tp->t_stk -= STACK_ALIGN;
1409         *(tp->t_stk) = 0;            /* terminate intr thread stack */
1410 #endif
1411 
1412         /*
1413          * Link onto CPU's interrupt pool.
1414          */
1415         tp->t_link = cp->cpu_intr_thread;
1416         cp->cpu_intr_thread = tp;
1417 }
1418 
1419 /*
1420  * TSD -- THREAD SPECIFIC DATA
1421  */
1422 static kmutex_t         tsd_mutex;       /* linked list spin lock */
1423 static uint_t           tsd_nkeys;       /* size of destructor array */
1424 /* per-key destructor funcs */
1425 static void             (**tsd_destructor)(void *);
1426 /* list of tsd_thread's */
1427 static struct tsd_thread        *tsd_list;
1428 
1429 /*
1430  * Default destructor
1431  *      Needed because NULL destructor means that the key is unused
1432  */
1433 /* ARGSUSED */
1434 void
1435 tsd_defaultdestructor(void *value)
1436 {}
1437 
1438 /*
1439  * Create a key (index into per thread array)
1440  *      Locks out tsd_create, tsd_destroy, and tsd_exit
1441  *      May allocate memory with lock held
1442  */
1443 void
1444 tsd_create(uint_t *keyp, void (*destructor)(void *))
1445 {
1446         int     i;
1447         uint_t  nkeys;
1448 
1449         /*
1450          * if key is allocated, do nothing
1451          */
1452         mutex_enter(&tsd_mutex);
1453         if (*keyp) {
1454                 mutex_exit(&tsd_mutex);
1455                 return;
1456         }
1457         /*
1458          * find an unused key
1459          */
1460         if (destructor == NULL)
1461                 destructor = tsd_defaultdestructor;
1462 
1463         for (i = 0; i < tsd_nkeys; ++i)
1464                 if (tsd_destructor[i] == NULL)
1465                         break;
1466 
1467         /*
1468          * if no unused keys, increase the size of the destructor array
1469          */
1470         if (i == tsd_nkeys) {
1471                 if ((nkeys = (tsd_nkeys << 1)) == 0)
1472                         nkeys = 1;
1473                 tsd_destructor =
1474                     (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1475                     (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1476                     (size_t)(nkeys * sizeof (void (*)(void *))));
1477                 tsd_nkeys = nkeys;
1478         }
1479 
1480         /*
1481          * allocate the next available unused key
1482          */
1483         tsd_destructor[i] = destructor;
1484         *keyp = i + 1;
1485         mutex_exit(&tsd_mutex);
1486 }
1487 
1488 /*
1489  * Destroy a key -- this is for unloadable modules
1490  *
1491  * Assumes that the caller is preventing tsd_set and tsd_get
1492  * Locks out tsd_create, tsd_destroy, and tsd_exit
1493  * May free memory with lock held
1494  */
1495 void
1496 tsd_destroy(uint_t *keyp)
1497 {
1498         uint_t key;
1499         struct tsd_thread *tsd;
1500 
1501         /*
1502          * protect the key namespace and our destructor lists
1503          */
1504         mutex_enter(&tsd_mutex);
1505         key = *keyp;
1506         *keyp = 0;
1507 
1508         ASSERT(key <= tsd_nkeys);
1509 
1510         /*
1511          * if the key is valid
1512          */
1513         if (key != 0) {
1514                 uint_t k = key - 1;
1515                 /*
1516                  * for every thread with TSD, call key's destructor
1517                  */
1518                 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1519                         /*
1520                          * no TSD for key in this thread
1521                          */
1522                         if (key > tsd->ts_nkeys)
1523                                 continue;
1524                         /*
1525                          * call destructor for key
1526                          */
1527                         if (tsd->ts_value[k] && tsd_destructor[k])
1528                                 (*tsd_destructor[k])(tsd->ts_value[k]);
1529                         /*
1530                          * reset value for key
1531                          */
1532                         tsd->ts_value[k] = NULL;
1533                 }
1534                 /*
1535                  * actually free the key (NULL destructor == unused)
1536                  */
1537                 tsd_destructor[k] = NULL;
1538         }
1539 
1540         mutex_exit(&tsd_mutex);
1541 }
1542 
1543 /*
1544  * Quickly return the per thread value that was stored with the specified key
1545  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1546  */
1547 void *
1548 tsd_get(uint_t key)
1549 {
1550         return (tsd_agent_get(curthread, key));
1551 }
1552 
1553 /*
1554  * Set a per thread value indexed with the specified key
1555  */
1556 int
1557 tsd_set(uint_t key, void *value)
1558 {
1559         return (tsd_agent_set(curthread, key, value));
1560 }
1561 
1562 /*
1563  * Like tsd_get(), except that the agent lwp can get the tsd of
1564  * another thread in the same process (the agent thread only runs when the
1565  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1566  */
1567 void *
1568 tsd_agent_get(kthread_t *t, uint_t key)
1569 {
1570         struct tsd_thread *tsd = t->t_tsd;
1571 
1572         ASSERT(t == curthread ||
1573             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1574 
1575         if (key && tsd != NULL && key <= tsd->ts_nkeys)
1576                 return (tsd->ts_value[key - 1]);
1577         return (NULL);
1578 }
1579 
1580 /*
1581  * Like tsd_set(), except that the agent lwp can set the tsd of
1582  * another thread in the same process, or syslwp can set the tsd
1583  * of a thread it's in the middle of creating.
1584  *
1585  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1586  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1587  * lock held
1588  */
1589 int
1590 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1591 {
1592         struct tsd_thread *tsd = t->t_tsd;
1593 
1594         ASSERT(t == curthread ||
1595             ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1596 
1597         if (key == 0)
1598                 return (EINVAL);
1599         if (tsd == NULL)
1600                 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1601         if (key <= tsd->ts_nkeys) {
1602                 tsd->ts_value[key - 1] = value;
1603                 return (0);
1604         }
1605 
1606         ASSERT(key <= tsd_nkeys);
1607 
1608         /*
1609          * lock out tsd_destroy()
1610          */
1611         mutex_enter(&tsd_mutex);
1612         if (tsd->ts_nkeys == 0) {
1613                 /*
1614                  * Link onto list of threads with TSD
1615                  */
1616                 if ((tsd->ts_next = tsd_list) != NULL)
1617                         tsd_list->ts_prev = tsd;
1618                 tsd_list = tsd;
1619         }
1620 
1621         /*
1622          * Allocate thread local storage and set the value for key
1623          */
1624         tsd->ts_value = tsd_realloc(tsd->ts_value,
1625             tsd->ts_nkeys * sizeof (void *),
1626             key * sizeof (void *));
1627         tsd->ts_nkeys = key;
1628         tsd->ts_value[key - 1] = value;
1629         mutex_exit(&tsd_mutex);
1630 
1631         return (0);
1632 }
1633 
1634 
1635 /*
1636  * Return the per thread value that was stored with the specified key
1637  *      If necessary, create the key and the value
1638  *      Assumes the caller is protecting *keyp from tsd_destroy
1639  */
1640 void *
1641 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1642 {
1643         void *value;
1644         uint_t key = *keyp;
1645         struct tsd_thread *tsd = curthread->t_tsd;
1646 
1647         if (tsd == NULL)
1648                 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1649         if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1650                 return (value);
1651         if (key == 0)
1652                 tsd_create(keyp, destroy);
1653         (void) tsd_set(*keyp, value = (*allocate)());
1654 
1655         return (value);
1656 }
1657 
1658 /*
1659  * Called from thread_exit() to run the destructor function for each tsd
1660  *      Locks out tsd_create and tsd_destroy
1661  *      Assumes that the destructor *DOES NOT* use tsd
1662  */
1663 void
1664 tsd_exit(void)
1665 {
1666         int i;
1667         struct tsd_thread *tsd = curthread->t_tsd;
1668 
1669         if (tsd == NULL)
1670                 return;
1671 
1672         if (tsd->ts_nkeys == 0) {
1673                 kmem_free(tsd, sizeof (*tsd));
1674                 curthread->t_tsd = NULL;
1675                 return;
1676         }
1677 
1678         /*
1679          * lock out tsd_create and tsd_destroy, call
1680          * the destructor, and mark the value as destroyed.
1681          */
1682         mutex_enter(&tsd_mutex);
1683 
1684         for (i = 0; i < tsd->ts_nkeys; i++) {
1685                 if (tsd->ts_value[i] && tsd_destructor[i])
1686                         (*tsd_destructor[i])(tsd->ts_value[i]);
1687                 tsd->ts_value[i] = NULL;
1688         }
1689 
1690         /*
1691          * remove from linked list of threads with TSD
1692          */
1693         if (tsd->ts_next)
1694                 tsd->ts_next->ts_prev = tsd->ts_prev;
1695         if (tsd->ts_prev)
1696                 tsd->ts_prev->ts_next = tsd->ts_next;
1697         if (tsd_list == tsd)
1698                 tsd_list = tsd->ts_next;
1699 
1700         mutex_exit(&tsd_mutex);
1701 
1702         /*
1703          * free up the TSD
1704          */
1705         kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1706         kmem_free(tsd, sizeof (struct tsd_thread));
1707         curthread->t_tsd = NULL;
1708 }
1709 
1710 /*
1711  * realloc
1712  */
1713 static void *
1714 tsd_realloc(void *old, size_t osize, size_t nsize)
1715 {
1716         void *new;
1717 
1718         new = kmem_zalloc(nsize, KM_SLEEP);
1719         if (old) {
1720                 bcopy(old, new, osize);
1721                 kmem_free(old, osize);
1722         }
1723         return (new);
1724 }
1725 
1726 /*
1727  * Return non-zero if an interrupt is being serviced.
1728  */
1729 int
1730 servicing_interrupt()
1731 {
1732         int onintr = 0;
1733 
1734         /* Are we an interrupt thread */
1735         if (curthread->t_flag & T_INTR_THREAD)
1736                 return (1);
1737         /* Are we servicing a high level interrupt? */
1738         if (CPU_ON_INTR(CPU)) {
1739                 kpreempt_disable();
1740                 onintr = CPU_ON_INTR(CPU);
1741                 kpreempt_enable();
1742         }
1743         return (onintr);
1744 }
1745 
1746 
1747 /*
1748  * Change the dispatch priority of a thread in the system.
1749  * Used when raising or lowering a thread's priority.
1750  * (E.g., priority inheritance)
1751  *
1752  * Since threads are queued according to their priority, we
1753  * we must check the thread's state to determine whether it
1754  * is on a queue somewhere. If it is, we've got to:
1755  *
1756  *      o Dequeue the thread.
1757  *      o Change its effective priority.
1758  *      o Enqueue the thread.
1759  *
1760  * Assumptions: The thread whose priority we wish to change
1761  * must be locked before we call thread_change_(e)pri().
1762  * The thread_change(e)pri() function doesn't drop the thread
1763  * lock--that must be done by its caller.
1764  */
1765 void
1766 thread_change_epri(kthread_t *t, pri_t disp_pri)
1767 {
1768         uint_t  state;
1769 
1770         ASSERT(THREAD_LOCK_HELD(t));
1771 
1772         /*
1773          * If the inherited priority hasn't actually changed,
1774          * just return.
1775          */
1776         if (t->t_epri == disp_pri)
1777                 return;
1778 
1779         state = t->t_state;
1780 
1781         /*
1782          * If it's not on a queue, change the priority with impunity.
1783          */
1784         if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1785                 t->t_epri = disp_pri;
1786                 if (state == TS_ONPROC) {
1787                         cpu_t *cp = t->t_disp_queue->disp_cpu;
1788 
1789                         if (t == cp->cpu_dispthread)
1790                                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1791                 }
1792         } else if (state == TS_SLEEP) {
1793                 /*
1794                  * Take the thread out of its sleep queue.
1795                  * Change the inherited priority.
1796                  * Re-enqueue the thread.
1797                  * Each synchronization object exports a function
1798                  * to do this in an appropriate manner.
1799                  */
1800                 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1801         } else if (state == TS_WAIT) {
1802                 /*
1803                  * Re-enqueue a thread on the wait queue if its
1804                  * effective priority needs to change.
1805                  */
1806                 if (disp_pri != t->t_epri)
1807                         waitq_change_pri(t, disp_pri);
1808         } else {
1809                 /*
1810                  * The thread is on a run queue.
1811                  * Note: setbackdq() may not put the thread
1812                  * back on the same run queue where it originally
1813                  * resided.
1814                  */
1815                 (void) dispdeq(t);
1816                 t->t_epri = disp_pri;
1817                 setbackdq(t);
1818         }
1819         schedctl_set_cidpri(t);
1820 }
1821 
1822 /*
1823  * Function: Change the t_pri field of a thread.
1824  * Side Effects: Adjust the thread ordering on a run queue
1825  *               or sleep queue, if necessary.
1826  * Returns: 1 if the thread was on a run queue, else 0.
1827  */
1828 int
1829 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1830 {
1831         uint_t  state;
1832         int     on_rq = 0;
1833 
1834         ASSERT(THREAD_LOCK_HELD(t));
1835 
1836         state = t->t_state;
1837         THREAD_WILLCHANGE_PRI(t, disp_pri);
1838 
1839         /*
1840          * If it's not on a queue, change the priority with impunity.
1841          */
1842         if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1843                 t->t_pri = disp_pri;
1844 
1845                 if (state == TS_ONPROC) {
1846                         cpu_t *cp = t->t_disp_queue->disp_cpu;
1847 
1848                         if (t == cp->cpu_dispthread)
1849                                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1850                 }
1851         } else if (state == TS_SLEEP) {
1852                 /*
1853                  * If the priority has changed, take the thread out of
1854                  * its sleep queue and change the priority.
1855                  * Re-enqueue the thread.
1856                  * Each synchronization object exports a function
1857                  * to do this in an appropriate manner.
1858                  */
1859                 if (disp_pri != t->t_pri)
1860                         SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1861         } else if (state == TS_WAIT) {
1862                 /*
1863                  * Re-enqueue a thread on the wait queue if its
1864                  * priority needs to change.
1865                  */
1866                 if (disp_pri != t->t_pri)
1867                         waitq_change_pri(t, disp_pri);
1868         } else {
1869                 /*
1870                  * The thread is on a run queue.
1871                  * Note: setbackdq() may not put the thread
1872                  * back on the same run queue where it originally
1873                  * resided.
1874                  *
1875                  * We still requeue the thread even if the priority
1876                  * is unchanged to preserve round-robin (and other)
1877                  * effects between threads of the same priority.
1878                  */
1879                 on_rq = dispdeq(t);
1880                 ASSERT(on_rq);
1881                 t->t_pri = disp_pri;
1882                 if (front) {
1883                         setfrontdq(t);
1884                 } else {
1885                         setbackdq(t);
1886                 }
1887         }
1888         schedctl_set_cidpri(t);
1889         return (on_rq);
1890 }
1891 
1892 /*
1893  * Tunable kmem_stackinfo is set, fill the kernel thread stack with a
1894  * specific pattern.
1895  */
1896 static void
1897 stkinfo_begin(kthread_t *t)
1898 {
1899         caddr_t start;  /* stack start */
1900         caddr_t end;    /* stack end  */
1901         uint64_t *ptr;  /* pattern pointer */
1902 
1903         /*
1904          * Stack grows up or down, see thread_create(),
1905          * compute stack memory area start and end (start < end).
1906          */
1907         if (t->t_stk > t->t_stkbase) {
1908                 /* stack grows down */
1909                 start = t->t_stkbase;
1910                 end = t->t_stk;
1911         } else {
1912                 /* stack grows up */
1913                 start = t->t_stk;
1914                 end = t->t_stkbase;
1915         }
1916 
1917         /*
1918          * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
1919          * alignement for start and end in stack area boundaries
1920          * (protection against corrupt t_stkbase/t_stk data).
1921          */
1922         if ((((uintptr_t)start) & 0x7) != 0) {
1923                 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
1924         }
1925         end = (caddr_t)(((uintptr_t)end) & (~0x7));
1926 
1927         if ((end <= start) || (end - start) > (1024 * 1024)) {
1928                 /* negative or stack size > 1 meg, assume bogus */
1929                 return;
1930         }
1931 
1932         /* fill stack area with a pattern (instead of zeros) */
1933         ptr = (uint64_t *)((void *)start);
1934         while (ptr < (uint64_t *)((void *)end)) {
1935                 *ptr++ = KMEM_STKINFO_PATTERN;
1936         }
1937 }
1938 
1939 
1940 /*
1941  * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist,
1942  * compute the percentage of kernel stack really used, and set in the log
1943  * if it's the latest highest percentage.
1944  */
1945 static void
1946 stkinfo_end(kthread_t *t)
1947 {
1948         caddr_t start;  /* stack start */
1949         caddr_t end;    /* stack end  */
1950         uint64_t *ptr;  /* pattern pointer */
1951         size_t stksz;   /* stack size */
1952         size_t smallest = 0;
1953         size_t percent = 0;
1954         uint_t index = 0;
1955         uint_t i;
1956         static size_t smallest_percent = (size_t)-1;
1957         static uint_t full = 0;
1958 
1959         /* create the stackinfo log, if doesn't already exist */
1960         mutex_enter(&kmem_stkinfo_lock);
1961         if (kmem_stkinfo_log == NULL) {
1962                 kmem_stkinfo_log = (kmem_stkinfo_t *)
1963                     kmem_zalloc(KMEM_STKINFO_LOG_SIZE *
1964                     (sizeof (kmem_stkinfo_t)), KM_NOSLEEP);
1965                 if (kmem_stkinfo_log == NULL) {
1966                         mutex_exit(&kmem_stkinfo_lock);
1967                         return;
1968                 }
1969         }
1970         mutex_exit(&kmem_stkinfo_lock);
1971 
1972         /*
1973          * Stack grows up or down, see thread_create(),
1974          * compute stack memory area start and end (start < end).
1975          */
1976         if (t->t_stk > t->t_stkbase) {
1977                 /* stack grows down */
1978                 start = t->t_stkbase;
1979                 end = t->t_stk;
1980         } else {
1981                 /* stack grows up */
1982                 start = t->t_stk;
1983                 end = t->t_stkbase;
1984         }
1985 
1986         /* stack size as found in kthread_t */
1987         stksz = end - start;
1988 
1989         /*
1990          * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
1991          * alignement for start and end in stack area boundaries
1992          * (protection against corrupt t_stkbase/t_stk data).
1993          */
1994         if ((((uintptr_t)start) & 0x7) != 0) {
1995                 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8);
1996         }
1997         end = (caddr_t)(((uintptr_t)end) & (~0x7));
1998 
1999         if ((end <= start) || (end - start) > (1024 * 1024)) {
2000                 /* negative or stack size > 1 meg, assume bogus */
2001                 return;
2002         }
2003 
2004         /* search until no pattern in the stack */
2005         if (t->t_stk > t->t_stkbase) {
2006                 /* stack grows down */
2007 #if defined(__i386) || defined(__amd64)
2008                 /*
2009                  * 6 longs are pushed on stack, see thread_load(). Skip
2010                  * them, so if kthread has never run, percent is zero.
2011                  * 8 bytes alignement is preserved for a 32 bit kernel,
2012                  * 6 x 4 = 24, 24 is a multiple of 8.
2013                  *
2014                  */
2015                 end -= (6 * sizeof (long));
2016 #endif
2017                 ptr = (uint64_t *)((void *)start);
2018                 while (ptr < (uint64_t *)((void *)end)) {
2019                         if (*ptr != KMEM_STKINFO_PATTERN) {
2020                                 percent = stkinfo_percent(end,
2021                                     start, (caddr_t)ptr);
2022                                 break;
2023                         }
2024                         ptr++;
2025                 }
2026         } else {
2027                 /* stack grows up */
2028                 ptr = (uint64_t *)((void *)end);
2029                 ptr--;
2030                 while (ptr >= (uint64_t *)((void *)start)) {
2031                         if (*ptr != KMEM_STKINFO_PATTERN) {
2032                                 percent = stkinfo_percent(start,
2033                                     end, (caddr_t)ptr);
2034                                 break;
2035                         }
2036                         ptr--;
2037                 }
2038         }
2039 
2040         DTRACE_PROBE3(stack__usage, kthread_t *, t,
2041             size_t, stksz, size_t, percent);
2042 
2043         if (percent == 0) {
2044                 return;
2045         }
2046 
2047         mutex_enter(&kmem_stkinfo_lock);
2048         if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) {
2049                 /*
2050                  * The log is full and already contains the highest values
2051                  */
2052                 mutex_exit(&kmem_stkinfo_lock);
2053                 return;
2054         }
2055 
2056         /* keep a log of the highest used stack */
2057         for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) {
2058                 if (kmem_stkinfo_log[i].percent == 0) {
2059                         index = i;
2060                         full++;
2061                         break;
2062                 }
2063                 if (smallest == 0) {
2064                         smallest = kmem_stkinfo_log[i].percent;
2065                         index = i;
2066                         continue;
2067                 }
2068                 if (kmem_stkinfo_log[i].percent < smallest) {
2069                         smallest = kmem_stkinfo_log[i].percent;
2070                         index = i;
2071                 }
2072         }
2073 
2074         if (percent >= kmem_stkinfo_log[index].percent) {
2075                 kmem_stkinfo_log[index].kthread = (caddr_t)t;
2076                 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc;
2077                 kmem_stkinfo_log[index].start = start;
2078                 kmem_stkinfo_log[index].stksz = stksz;
2079                 kmem_stkinfo_log[index].percent = percent;
2080                 kmem_stkinfo_log[index].t_tid = t->t_tid;
2081                 kmem_stkinfo_log[index].cmd[0] = '\0';
2082                 if (t->t_tid != 0) {
2083                         stksz = strlen((t->t_procp)->p_user.u_comm);
2084                         if (stksz >= KMEM_STKINFO_STR_SIZE) {
2085                                 stksz = KMEM_STKINFO_STR_SIZE - 1;
2086                                 kmem_stkinfo_log[index].cmd[stksz] = '\0';
2087                         } else {
2088                                 stksz += 1;
2089                         }
2090                         (void) memcpy(kmem_stkinfo_log[index].cmd,
2091                             (t->t_procp)->p_user.u_comm, stksz);
2092                 }
2093                 if (percent < smallest_percent) {
2094                         smallest_percent = percent;
2095                 }
2096         }
2097         mutex_exit(&kmem_stkinfo_lock);
2098 }
2099 
2100 /*
2101  * Tunable kmem_stackinfo is set, compute stack utilization percentage.
2102  */
2103 static size_t
2104 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp)
2105 {
2106         size_t percent;
2107         size_t s;
2108 
2109         if (t_stk > t_stkbase) {
2110                 /* stack grows down */
2111                 if (sp > t_stk) {
2112                         return (0);
2113                 }
2114                 if (sp < t_stkbase) {
2115                         return (100);
2116                 }
2117                 percent = t_stk - sp + 1;
2118                 s = t_stk - t_stkbase + 1;
2119         } else {
2120                 /* stack grows up */
2121                 if (sp < t_stk) {
2122                         return (0);
2123                 }
2124                 if (sp > t_stkbase) {
2125                         return (100);
2126                 }
2127                 percent = sp - t_stk + 1;
2128                 s = t_stkbase - t_stk + 1;
2129         }
2130         percent = ((100 * percent) / s) + 1;
2131         if (percent > 100) {
2132                 percent = 100;
2133         }
2134         return (percent);
2135 }
2136 
2137 /*
2138  * NOTE: This will silently truncate a name > THREAD_NAME_MAX - 1 characters
2139  * long.  It is expected that callers (acting on behalf of userland clients)
2140  * will perform any required checks to return the correct error semantics.
2141  * It is also expected callers on behalf of userland clients have done
2142  * any necessary permission checks.
2143  */
2144 int
2145 thread_setname(kthread_t *t, const char *name)
2146 {
2147         char *buf = NULL;
2148 
2149         /*
2150          * We optimistically assume that a thread's name will only be set
2151          * once and so allocate memory in preparation of setting t_name.
2152          * If it turns out a name has already been set, we just discard (free)
2153          * the buffer we just allocated and reuse the current buffer
2154          * (as all should be THREAD_NAME_MAX large).
2155          *
2156          * Such an arrangement means over the lifetime of a kthread_t, t_name
2157          * is either NULL or has one value (the address of the buffer holding
2158          * the current thread name).   The assumption is that most kthread_t
2159          * instances will not have a name assigned, so dynamically allocating
2160          * the memory should minimize the footprint of this feature, but by
2161          * having the buffer persist for the life of the thread, it simplifies
2162          * usage in highly constrained situations (e.g. dtrace).
2163          */
2164         if (name != NULL && name[0] != '\0') {
2165                 for (size_t i = 0; name[i] != '\0'; i++) {
2166                         if (!isprint(name[i]))
2167                                 return (EINVAL);
2168                 }
2169 
2170                 buf = kmem_zalloc(THREAD_NAME_MAX, KM_SLEEP);
2171                 (void) strlcpy(buf, name, THREAD_NAME_MAX);
2172         }
2173 
2174         mutex_enter(&ttoproc(t)->p_lock);
2175         if (t->t_name == NULL) {
2176                 t->t_name = buf;
2177         } else {
2178                 if (buf != NULL) {
2179                         (void) strlcpy(t->t_name, name, THREAD_NAME_MAX);
2180                         kmem_free(buf, THREAD_NAME_MAX);
2181                 } else {
2182                         bzero(t->t_name, THREAD_NAME_MAX);
2183                 }
2184         }
2185         mutex_exit(&ttoproc(t)->p_lock);
2186         return (0);
2187 }
2188 
2189 int
2190 thread_vsetname(kthread_t *t, const char *fmt, ...)
2191 {
2192         char name[THREAD_NAME_MAX];
2193         va_list va;
2194         int rc;
2195 
2196         va_start(va, fmt);
2197         rc = vsnprintf(name, sizeof (name), fmt, va);
2198         va_end(va);
2199 
2200         if (rc < 0)
2201                 return (EINVAL);
2202 
2203         if (rc >= sizeof (name))
2204                 return (ENAMETOOLONG);
2205 
2206         return (thread_setname(t, name));
2207 }