Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/cpu.c
+++ new/usr/src/uts/common/os/cpu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 * Copyright 2018 Joyent, Inc.
25 25 */
26 26
27 27 /*
28 28 * Architecture-independent CPU control functions.
29 29 */
30 30
31 31 #include <sys/types.h>
32 32 #include <sys/param.h>
33 33 #include <sys/var.h>
34 34 #include <sys/thread.h>
35 35 #include <sys/cpuvar.h>
36 36 #include <sys/cpu_event.h>
37 37 #include <sys/kstat.h>
38 38 #include <sys/uadmin.h>
39 39 #include <sys/systm.h>
40 40 #include <sys/errno.h>
41 41 #include <sys/cmn_err.h>
42 42 #include <sys/procset.h>
43 43 #include <sys/processor.h>
44 44 #include <sys/debug.h>
45 45 #include <sys/cpupart.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/pset.h>
48 48 #include <sys/pghw.h>
49 49 #include <sys/kmem.h>
50 50 #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */
51 51 #include <sys/atomic.h>
52 52 #include <sys/callb.h>
53 53 #include <sys/vtrace.h>
54 54 #include <sys/cyclic.h>
55 55 #include <sys/bitmap.h>
56 56 #include <sys/nvpair.h>
57 57 #include <sys/pool_pset.h>
58 58 #include <sys/msacct.h>
59 59 #include <sys/time.h>
60 60 #include <sys/archsystm.h>
61 61 #include <sys/sdt.h>
62 62 #if defined(__x86) || defined(__amd64)
63 63 #include <sys/x86_archext.h>
64 64 #endif
65 65 #include <sys/callo.h>
66 66
67 67 extern int mp_cpu_start(cpu_t *);
68 68 extern int mp_cpu_stop(cpu_t *);
69 69 extern int mp_cpu_poweron(cpu_t *);
70 70 extern int mp_cpu_poweroff(cpu_t *);
71 71 extern int mp_cpu_configure(int);
72 72 extern int mp_cpu_unconfigure(int);
73 73 extern void mp_cpu_faulted_enter(cpu_t *);
74 74 extern void mp_cpu_faulted_exit(cpu_t *);
75 75
76 76 extern int cmp_cpu_to_chip(processorid_t cpuid);
77 77 #ifdef __sparcv9
78 78 extern char *cpu_fru_fmri(cpu_t *cp);
79 79 #endif
80 80
81 81 static void cpu_add_active_internal(cpu_t *cp);
82 82 static void cpu_remove_active(cpu_t *cp);
83 83 static void cpu_info_kstat_create(cpu_t *cp);
84 84 static void cpu_info_kstat_destroy(cpu_t *cp);
85 85 static void cpu_stats_kstat_create(cpu_t *cp);
86 86 static void cpu_stats_kstat_destroy(cpu_t *cp);
87 87
88 88 static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw);
89 89 static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw);
90 90 static int cpu_stat_ks_update(kstat_t *ksp, int rw);
91 91 static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t);
92 92
93 93 /*
94 94 * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active,
95 95 * max_cpu_seqid_ever, and dispatch queue reallocations. The lock ordering with
96 96 * respect to related locks is:
97 97 *
98 98 * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock()
99 99 *
100 100 * Warning: Certain sections of code do not use the cpu_lock when
101 101 * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since
102 102 * all cpus are paused during modifications to this list, a solution
103 103 * to protect the list is too either disable kernel preemption while
104 104 * walking the list, *or* recheck the cpu_next pointer at each
105 105 * iteration in the loop. Note that in no cases can any cached
106 106 * copies of the cpu pointers be kept as they may become invalid.
107 107 */
108 108 kmutex_t cpu_lock;
109 109 cpu_t *cpu_list; /* list of all CPUs */
110 110 cpu_t *clock_cpu_list; /* used by clock to walk CPUs */
111 111 cpu_t *cpu_active; /* list of active CPUs */
112 112 static cpuset_t cpu_available; /* set of available CPUs */
113 113 cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */
114 114
115 115 cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */
116 116
117 117 /*
118 118 * max_ncpus keeps the max cpus the system can have. Initially
119 119 * it's NCPU, but since most archs scan the devtree for cpus
120 120 * fairly early on during boot, the real max can be known before
121 121 * ncpus is set (useful for early NCPU based allocations).
122 122 */
123 123 int max_ncpus = NCPU;
124 124 /*
125 125 * platforms that set max_ncpus to maxiumum number of cpus that can be
126 126 * dynamically added will set boot_max_ncpus to the number of cpus found
127 127 * at device tree scan time during boot.
128 128 */
129 129 int boot_max_ncpus = -1;
130 130 int boot_ncpus = -1;
131 131 /*
132 132 * Maximum possible CPU id. This can never be >= NCPU since NCPU is
133 133 * used to size arrays that are indexed by CPU id.
134 134 */
135 135 processorid_t max_cpuid = NCPU - 1;
136 136
137 137 /*
138 138 * Maximum cpu_seqid was given. This number can only grow and never shrink. It
139 139 * can be used to optimize NCPU loops to avoid going through CPUs which were
140 140 * never on-line.
141 141 */
142 142 processorid_t max_cpu_seqid_ever = 0;
143 143
144 144 int ncpus = 1;
145 145 int ncpus_online = 1;
146 146
147 147 /*
148 148 * CPU that we're trying to offline. Protected by cpu_lock.
149 149 */
150 150 cpu_t *cpu_inmotion;
151 151
152 152 /*
153 153 * Can be raised to suppress further weakbinding, which are instead
154 154 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock,
155 155 * while individual thread weakbinding synchronization is done under thread
156 156 * lock.
157 157 */
158 158 int weakbindingbarrier;
159 159
160 160 /*
161 161 * Variables used in pause_cpus().
162 162 */
163 163 static volatile char safe_list[NCPU];
164 164
165 165 static struct _cpu_pause_info {
166 166 int cp_spl; /* spl saved in pause_cpus() */
167 167 volatile int cp_go; /* Go signal sent after all ready */
168 168 int cp_count; /* # of CPUs to pause */
169 169 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */
170 170 kthread_id_t cp_paused;
171 171 void *(*cp_func)(void *);
172 172 } cpu_pause_info;
173 173
174 174 static kmutex_t pause_free_mutex;
175 175 static kcondvar_t pause_free_cv;
176 176
177 177
178 178 static struct cpu_sys_stats_ks_data {
179 179 kstat_named_t cpu_ticks_idle;
180 180 kstat_named_t cpu_ticks_user;
181 181 kstat_named_t cpu_ticks_kernel;
182 182 kstat_named_t cpu_ticks_wait;
183 183 kstat_named_t cpu_nsec_idle;
184 184 kstat_named_t cpu_nsec_user;
185 185 kstat_named_t cpu_nsec_kernel;
186 186 kstat_named_t cpu_nsec_dtrace;
187 187 kstat_named_t cpu_nsec_intr;
188 188 kstat_named_t cpu_load_intr;
189 189 kstat_named_t wait_ticks_io;
190 190 kstat_named_t dtrace_probes;
191 191 kstat_named_t bread;
192 192 kstat_named_t bwrite;
193 193 kstat_named_t lread;
194 194 kstat_named_t lwrite;
195 195 kstat_named_t phread;
196 196 kstat_named_t phwrite;
197 197 kstat_named_t pswitch;
198 198 kstat_named_t trap;
199 199 kstat_named_t intr;
200 200 kstat_named_t syscall;
201 201 kstat_named_t sysread;
202 202 kstat_named_t syswrite;
203 203 kstat_named_t sysfork;
204 204 kstat_named_t sysvfork;
205 205 kstat_named_t sysexec;
206 206 kstat_named_t readch;
207 207 kstat_named_t writech;
208 208 kstat_named_t rcvint;
209 209 kstat_named_t xmtint;
210 210 kstat_named_t mdmint;
211 211 kstat_named_t rawch;
212 212 kstat_named_t canch;
213 213 kstat_named_t outch;
214 214 kstat_named_t msg;
215 215 kstat_named_t sema;
216 216 kstat_named_t namei;
217 217 kstat_named_t ufsiget;
218 218 kstat_named_t ufsdirblk;
219 219 kstat_named_t ufsipage;
220 220 kstat_named_t ufsinopage;
221 221 kstat_named_t procovf;
222 222 kstat_named_t intrthread;
223 223 kstat_named_t intrblk;
224 224 kstat_named_t intrunpin;
225 225 kstat_named_t idlethread;
226 226 kstat_named_t inv_swtch;
227 227 kstat_named_t nthreads;
228 228 kstat_named_t cpumigrate;
229 229 kstat_named_t xcalls;
230 230 kstat_named_t mutex_adenters;
231 231 kstat_named_t rw_rdfails;
232 232 kstat_named_t rw_wrfails;
233 233 kstat_named_t modload;
234 234 kstat_named_t modunload;
235 235 kstat_named_t bawrite;
236 236 kstat_named_t iowait;
237 237 } cpu_sys_stats_ks_data_template = {
238 238 { "cpu_ticks_idle", KSTAT_DATA_UINT64 },
239 239 { "cpu_ticks_user", KSTAT_DATA_UINT64 },
240 240 { "cpu_ticks_kernel", KSTAT_DATA_UINT64 },
241 241 { "cpu_ticks_wait", KSTAT_DATA_UINT64 },
242 242 { "cpu_nsec_idle", KSTAT_DATA_UINT64 },
243 243 { "cpu_nsec_user", KSTAT_DATA_UINT64 },
244 244 { "cpu_nsec_kernel", KSTAT_DATA_UINT64 },
245 245 { "cpu_nsec_dtrace", KSTAT_DATA_UINT64 },
246 246 { "cpu_nsec_intr", KSTAT_DATA_UINT64 },
247 247 { "cpu_load_intr", KSTAT_DATA_UINT64 },
248 248 { "wait_ticks_io", KSTAT_DATA_UINT64 },
249 249 { "dtrace_probes", KSTAT_DATA_UINT64 },
250 250 { "bread", KSTAT_DATA_UINT64 },
251 251 { "bwrite", KSTAT_DATA_UINT64 },
252 252 { "lread", KSTAT_DATA_UINT64 },
253 253 { "lwrite", KSTAT_DATA_UINT64 },
254 254 { "phread", KSTAT_DATA_UINT64 },
255 255 { "phwrite", KSTAT_DATA_UINT64 },
256 256 { "pswitch", KSTAT_DATA_UINT64 },
257 257 { "trap", KSTAT_DATA_UINT64 },
258 258 { "intr", KSTAT_DATA_UINT64 },
259 259 { "syscall", KSTAT_DATA_UINT64 },
260 260 { "sysread", KSTAT_DATA_UINT64 },
261 261 { "syswrite", KSTAT_DATA_UINT64 },
262 262 { "sysfork", KSTAT_DATA_UINT64 },
263 263 { "sysvfork", KSTAT_DATA_UINT64 },
264 264 { "sysexec", KSTAT_DATA_UINT64 },
265 265 { "readch", KSTAT_DATA_UINT64 },
266 266 { "writech", KSTAT_DATA_UINT64 },
267 267 { "rcvint", KSTAT_DATA_UINT64 },
268 268 { "xmtint", KSTAT_DATA_UINT64 },
269 269 { "mdmint", KSTAT_DATA_UINT64 },
270 270 { "rawch", KSTAT_DATA_UINT64 },
271 271 { "canch", KSTAT_DATA_UINT64 },
272 272 { "outch", KSTAT_DATA_UINT64 },
273 273 { "msg", KSTAT_DATA_UINT64 },
274 274 { "sema", KSTAT_DATA_UINT64 },
275 275 { "namei", KSTAT_DATA_UINT64 },
276 276 { "ufsiget", KSTAT_DATA_UINT64 },
277 277 { "ufsdirblk", KSTAT_DATA_UINT64 },
278 278 { "ufsipage", KSTAT_DATA_UINT64 },
279 279 { "ufsinopage", KSTAT_DATA_UINT64 },
280 280 { "procovf", KSTAT_DATA_UINT64 },
281 281 { "intrthread", KSTAT_DATA_UINT64 },
282 282 { "intrblk", KSTAT_DATA_UINT64 },
283 283 { "intrunpin", KSTAT_DATA_UINT64 },
284 284 { "idlethread", KSTAT_DATA_UINT64 },
285 285 { "inv_swtch", KSTAT_DATA_UINT64 },
286 286 { "nthreads", KSTAT_DATA_UINT64 },
287 287 { "cpumigrate", KSTAT_DATA_UINT64 },
288 288 { "xcalls", KSTAT_DATA_UINT64 },
289 289 { "mutex_adenters", KSTAT_DATA_UINT64 },
290 290 { "rw_rdfails", KSTAT_DATA_UINT64 },
291 291 { "rw_wrfails", KSTAT_DATA_UINT64 },
292 292 { "modload", KSTAT_DATA_UINT64 },
293 293 { "modunload", KSTAT_DATA_UINT64 },
294 294 { "bawrite", KSTAT_DATA_UINT64 },
295 295 { "iowait", KSTAT_DATA_UINT64 },
296 296 };
297 297
298 298 static struct cpu_vm_stats_ks_data {
299 299 kstat_named_t pgrec;
300 300 kstat_named_t pgfrec;
301 301 kstat_named_t pgin;
302 302 kstat_named_t pgpgin;
303 303 kstat_named_t pgout;
304 304 kstat_named_t pgpgout;
305 305 kstat_named_t swapin;
306 306 kstat_named_t pgswapin;
307 307 kstat_named_t swapout;
308 308 kstat_named_t pgswapout;
309 309 kstat_named_t zfod;
310 310 kstat_named_t dfree;
311 311 kstat_named_t scan;
312 312 kstat_named_t rev;
313 313 kstat_named_t hat_fault;
314 314 kstat_named_t as_fault;
315 315 kstat_named_t maj_fault;
316 316 kstat_named_t cow_fault;
317 317 kstat_named_t prot_fault;
318 318 kstat_named_t softlock;
319 319 kstat_named_t kernel_asflt;
320 320 kstat_named_t pgrrun;
321 321 kstat_named_t execpgin;
322 322 kstat_named_t execpgout;
323 323 kstat_named_t execfree;
324 324 kstat_named_t anonpgin;
325 325 kstat_named_t anonpgout;
326 326 kstat_named_t anonfree;
327 327 kstat_named_t fspgin;
328 328 kstat_named_t fspgout;
329 329 kstat_named_t fsfree;
330 330 } cpu_vm_stats_ks_data_template = {
331 331 { "pgrec", KSTAT_DATA_UINT64 },
332 332 { "pgfrec", KSTAT_DATA_UINT64 },
333 333 { "pgin", KSTAT_DATA_UINT64 },
334 334 { "pgpgin", KSTAT_DATA_UINT64 },
335 335 { "pgout", KSTAT_DATA_UINT64 },
336 336 { "pgpgout", KSTAT_DATA_UINT64 },
337 337 { "swapin", KSTAT_DATA_UINT64 },
338 338 { "pgswapin", KSTAT_DATA_UINT64 },
339 339 { "swapout", KSTAT_DATA_UINT64 },
340 340 { "pgswapout", KSTAT_DATA_UINT64 },
341 341 { "zfod", KSTAT_DATA_UINT64 },
342 342 { "dfree", KSTAT_DATA_UINT64 },
343 343 { "scan", KSTAT_DATA_UINT64 },
344 344 { "rev", KSTAT_DATA_UINT64 },
345 345 { "hat_fault", KSTAT_DATA_UINT64 },
346 346 { "as_fault", KSTAT_DATA_UINT64 },
347 347 { "maj_fault", KSTAT_DATA_UINT64 },
348 348 { "cow_fault", KSTAT_DATA_UINT64 },
349 349 { "prot_fault", KSTAT_DATA_UINT64 },
350 350 { "softlock", KSTAT_DATA_UINT64 },
351 351 { "kernel_asflt", KSTAT_DATA_UINT64 },
352 352 { "pgrrun", KSTAT_DATA_UINT64 },
353 353 { "execpgin", KSTAT_DATA_UINT64 },
354 354 { "execpgout", KSTAT_DATA_UINT64 },
355 355 { "execfree", KSTAT_DATA_UINT64 },
356 356 { "anonpgin", KSTAT_DATA_UINT64 },
357 357 { "anonpgout", KSTAT_DATA_UINT64 },
358 358 { "anonfree", KSTAT_DATA_UINT64 },
359 359 { "fspgin", KSTAT_DATA_UINT64 },
360 360 { "fspgout", KSTAT_DATA_UINT64 },
361 361 { "fsfree", KSTAT_DATA_UINT64 },
362 362 };
363 363
364 364 /*
365 365 * Force the specified thread to migrate to the appropriate processor.
366 366 * Called with thread lock held, returns with it dropped.
367 367 */
368 368 static void
369 369 force_thread_migrate(kthread_id_t tp)
370 370 {
371 371 ASSERT(THREAD_LOCK_HELD(tp));
372 372 if (tp == curthread) {
373 373 THREAD_TRANSITION(tp);
374 374 CL_SETRUN(tp);
375 375 thread_unlock_nopreempt(tp);
376 376 swtch();
377 377 } else {
378 378 if (tp->t_state == TS_ONPROC) {
379 379 cpu_surrender(tp);
380 380 } else if (tp->t_state == TS_RUN) {
381 381 (void) dispdeq(tp);
382 382 setbackdq(tp);
383 383 }
384 384 thread_unlock(tp);
385 385 }
386 386 }
387 387
388 388 /*
389 389 * Set affinity for a specified CPU.
390 390 *
391 391 * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
↓ open down ↓ |
391 lines elided |
↑ open up ↑ |
392 392 * curthread, will set affinity to the CPU on which the thread is currently
393 393 * running. For other cpu_id values, the caller must ensure that the
394 394 * referenced CPU remains valid, which can be done by holding cpu_lock across
395 395 * this call.
396 396 *
397 397 * CPU affinity is guaranteed after return of thread_affinity_set(). If a
398 398 * caller setting affinity to CPU_CURRENT requires that its thread not migrate
399 399 * CPUs prior to a successful return, it should take extra precautions (such as
400 400 * their own call to kpreempt_disable) to ensure that safety.
401 401 *
402 + * CPU_BEST can be used to pick a "best" CPU to migrate to, including
403 + * potentially the current CPU.
404 + *
402 405 * A CPU affinity reference count is maintained by thread_affinity_set and
403 406 * thread_affinity_clear (incrementing and decrementing it, respectively),
404 407 * maintaining CPU affinity while the count is non-zero, and allowing regions
405 408 * of code which require affinity to be nested.
406 409 */
407 410 void
408 411 thread_affinity_set(kthread_id_t t, int cpu_id)
409 412 {
410 413 cpu_t *cp;
411 414
412 415 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
413 416
414 417 if (cpu_id == CPU_CURRENT) {
415 418 VERIFY3P(t, ==, curthread);
416 419 kpreempt_disable();
417 420 cp = CPU;
421 + } else if (cpu_id == CPU_BEST) {
422 + VERIFY3P(t, ==, curthread);
423 + kpreempt_disable();
424 + cp = disp_choose_best_cpu();
418 425 } else {
419 426 /*
420 427 * We should be asserting that cpu_lock is held here, but
421 428 * the NCA code doesn't acquire it. The following assert
422 429 * should be uncommented when the NCA code is fixed.
423 430 *
424 431 * ASSERT(MUTEX_HELD(&cpu_lock));
425 432 */
426 433 VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
427 434 cp = cpu[cpu_id];
428 435
429 436 /* user must provide a good cpu_id */
430 437 VERIFY(cp != NULL);
431 438 }
432 439
433 440 /*
434 441 * If there is already a hard affinity requested, and this affinity
435 442 * conflicts with that, panic.
436 443 */
437 444 thread_lock(t);
438 445 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
439 446 panic("affinity_set: setting %p but already bound to %p",
440 447 (void *)cp, (void *)t->t_bound_cpu);
441 448 }
442 449 t->t_affinitycnt++;
443 450 t->t_bound_cpu = cp;
444 451
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
445 452 /*
446 453 * Make sure we're running on the right CPU.
447 454 */
448 455 if (cp != t->t_cpu || t != curthread) {
449 456 ASSERT(cpu_id != CPU_CURRENT);
450 457 force_thread_migrate(t); /* drops thread lock */
451 458 } else {
452 459 thread_unlock(t);
453 460 }
454 461
455 - if (cpu_id == CPU_CURRENT) {
462 + if (cpu_id == CPU_CURRENT || cpu_id == CPU_BEST)
456 463 kpreempt_enable();
457 - }
458 464 }
459 465
460 466 /*
461 467 * Wrapper for backward compatibility.
462 468 */
463 469 void
464 470 affinity_set(int cpu_id)
465 471 {
466 472 thread_affinity_set(curthread, cpu_id);
467 473 }
468 474
469 475 /*
470 476 * Decrement the affinity reservation count and if it becomes zero,
471 477 * clear the CPU affinity for the current thread, or set it to the user's
472 478 * software binding request.
473 479 */
474 480 void
475 481 thread_affinity_clear(kthread_id_t t)
476 482 {
477 483 register processorid_t binding;
478 484
479 485 thread_lock(t);
480 486 if (--t->t_affinitycnt == 0) {
481 487 if ((binding = t->t_bind_cpu) == PBIND_NONE) {
482 488 /*
483 489 * Adjust disp_max_unbound_pri if necessary.
484 490 */
485 491 disp_adjust_unbound_pri(t);
486 492 t->t_bound_cpu = NULL;
487 493 if (t->t_cpu->cpu_part != t->t_cpupart) {
488 494 force_thread_migrate(t);
489 495 return;
490 496 }
491 497 } else {
492 498 t->t_bound_cpu = cpu[binding];
493 499 /*
494 500 * Make sure the thread is running on the bound CPU.
495 501 */
496 502 if (t->t_cpu != t->t_bound_cpu) {
497 503 force_thread_migrate(t);
498 504 return; /* already dropped lock */
499 505 }
500 506 }
501 507 }
502 508 thread_unlock(t);
503 509 }
504 510
505 511 /*
506 512 * Wrapper for backward compatibility.
507 513 */
508 514 void
509 515 affinity_clear(void)
510 516 {
511 517 thread_affinity_clear(curthread);
512 518 }
513 519
514 520 /*
515 521 * Weak cpu affinity. Bind to the "current" cpu for short periods
516 522 * of time during which the thread must not block (but may be preempted).
517 523 * Use this instead of kpreempt_disable() when it is only "no migration"
518 524 * rather than "no preemption" semantics that are required - disabling
519 525 * preemption holds higher priority threads off of cpu and if the
520 526 * operation that is protected is more than momentary this is not good
521 527 * for realtime etc.
522 528 *
523 529 * Weakly bound threads will not prevent a cpu from being offlined -
524 530 * we'll only run them on the cpu to which they are weakly bound but
525 531 * (because they do not block) we'll always be able to move them on to
526 532 * another cpu at offline time if we give them just a short moment to
527 533 * run during which they will unbind. To give a cpu a chance of offlining,
528 534 * however, we require a barrier to weak bindings that may be raised for a
529 535 * given cpu (offline/move code may set this and then wait a short time for
530 536 * existing weak bindings to drop); the cpu_inmotion pointer is that barrier.
531 537 *
532 538 * There are few restrictions on the calling context of thread_nomigrate.
533 539 * The caller must not hold the thread lock. Calls may be nested.
534 540 *
535 541 * After weakbinding a thread must not perform actions that may block.
536 542 * In particular it must not call thread_affinity_set; calling that when
537 543 * already weakbound is nonsensical anyway.
538 544 *
539 545 * If curthread is prevented from migrating for other reasons
540 546 * (kernel preemption disabled; high pil; strongly bound; interrupt thread)
541 547 * then the weak binding will succeed even if this cpu is the target of an
542 548 * offline/move request.
543 549 */
544 550 void
545 551 thread_nomigrate(void)
546 552 {
547 553 cpu_t *cp;
548 554 kthread_id_t t = curthread;
549 555
550 556 again:
551 557 kpreempt_disable();
552 558 cp = CPU;
553 559
554 560 /*
555 561 * A highlevel interrupt must not modify t_nomigrate or
556 562 * t_weakbound_cpu of the thread it has interrupted. A lowlevel
557 563 * interrupt thread cannot migrate and we can avoid the
558 564 * thread_lock call below by short-circuiting here. In either
559 565 * case we can just return since no migration is possible and
560 566 * the condition will persist (ie, when we test for these again
561 567 * in thread_allowmigrate they can't have changed). Migration
562 568 * is also impossible if we're at or above DISP_LEVEL pil.
563 569 */
564 570 if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD ||
565 571 getpil() >= DISP_LEVEL) {
566 572 kpreempt_enable();
567 573 return;
568 574 }
569 575
570 576 /*
571 577 * We must be consistent with existing weak bindings. Since we
572 578 * may be interrupted between the increment of t_nomigrate and
573 579 * the store to t_weakbound_cpu below we cannot assume that
574 580 * t_weakbound_cpu will be set if t_nomigrate is. Note that we
575 581 * cannot assert t_weakbound_cpu == t_bind_cpu since that is not
576 582 * always the case.
577 583 */
578 584 if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) {
579 585 if (!panicstr)
580 586 panic("thread_nomigrate: binding to %p but already "
581 587 "bound to %p", (void *)cp,
582 588 (void *)t->t_weakbound_cpu);
583 589 }
584 590
585 591 /*
586 592 * At this point we have preemption disabled and we don't yet hold
587 593 * the thread lock. So it's possible that somebody else could
588 594 * set t_bind_cpu here and not be able to force us across to the
589 595 * new cpu (since we have preemption disabled).
590 596 */
591 597 thread_lock(curthread);
592 598
593 599 /*
594 600 * If further weak bindings are being (temporarily) suppressed then
595 601 * we'll settle for disabling kernel preemption (which assures
596 602 * no migration provided the thread does not block which it is
597 603 * not allowed to if using thread_nomigrate). We must remember
598 604 * this disposition so we can take appropriate action in
599 605 * thread_allowmigrate. If this is a nested call and the
600 606 * thread is already weakbound then fall through as normal.
601 607 * We remember the decision to settle for kpreempt_disable through
602 608 * negative nesting counting in t_nomigrate. Once a thread has had one
603 609 * weakbinding request satisfied in this way any further (nested)
604 610 * requests will continue to be satisfied in the same way,
605 611 * even if weak bindings have recommenced.
606 612 */
607 613 if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) {
608 614 --t->t_nomigrate;
609 615 thread_unlock(curthread);
610 616 return; /* with kpreempt_disable still active */
611 617 }
612 618
613 619 /*
614 620 * We hold thread_lock so t_bind_cpu cannot change. We could,
615 621 * however, be running on a different cpu to which we are t_bound_cpu
616 622 * to (as explained above). If we grant the weak binding request
617 623 * in that case then the dispatcher must favour our weak binding
618 624 * over our strong (in which case, just as when preemption is
619 625 * disabled, we can continue to run on a cpu other than the one to
620 626 * which we are strongbound; the difference in this case is that
621 627 * this thread can be preempted and so can appear on the dispatch
622 628 * queues of a cpu other than the one it is strongbound to).
623 629 *
624 630 * If the cpu we are running on does not appear to be a current
625 631 * offline target (we check cpu_inmotion to determine this - since
626 632 * we don't hold cpu_lock we may not see a recent store to that,
627 633 * so it's possible that we at times can grant a weak binding to a
628 634 * cpu that is an offline target, but that one request will not
629 635 * prevent the offline from succeeding) then we will always grant
630 636 * the weak binding request. This includes the case above where
631 637 * we grant a weakbinding not commensurate with our strong binding.
632 638 *
633 639 * If our cpu does appear to be an offline target then we're inclined
634 640 * not to grant the weakbinding request just yet - we'd prefer to
635 641 * migrate to another cpu and grant the request there. The
636 642 * exceptions are those cases where going through preemption code
637 643 * will not result in us changing cpu:
638 644 *
639 645 * . interrupts have already bypassed this case (see above)
640 646 * . we are already weakbound to this cpu (dispatcher code will
641 647 * always return us to the weakbound cpu)
642 648 * . preemption was disabled even before we disabled it above
643 649 * . we are strongbound to this cpu (if we're strongbound to
644 650 * another and not yet running there the trip through the
645 651 * dispatcher will move us to the strongbound cpu and we
646 652 * will grant the weak binding there)
647 653 */
648 654 if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 ||
649 655 t->t_bound_cpu == cp) {
650 656 /*
651 657 * Don't be tempted to store to t_weakbound_cpu only on
652 658 * the first nested bind request - if we're interrupted
653 659 * after the increment of t_nomigrate and before the
654 660 * store to t_weakbound_cpu and the interrupt calls
655 661 * thread_nomigrate then the assertion in thread_allowmigrate
656 662 * would fail.
657 663 */
658 664 t->t_nomigrate++;
659 665 t->t_weakbound_cpu = cp;
660 666 membar_producer();
661 667 thread_unlock(curthread);
662 668 /*
663 669 * Now that we have dropped the thread_lock another thread
664 670 * can set our t_weakbound_cpu, and will try to migrate us
665 671 * to the strongbound cpu (which will not be prevented by
666 672 * preemption being disabled since we're about to enable
667 673 * preemption). We have granted the weakbinding to the current
668 674 * cpu, so again we are in the position that is is is possible
669 675 * that our weak and strong bindings differ. Again this
670 676 * is catered for by dispatcher code which will favour our
671 677 * weak binding.
672 678 */
673 679 kpreempt_enable();
674 680 } else {
675 681 /*
676 682 * Move to another cpu before granting the request by
677 683 * forcing this thread through preemption code. When we
678 684 * get to set{front,back}dq called from CL_PREEMPT()
679 685 * cpu_choose() will be used to select a cpu to queue
680 686 * us on - that will see cpu_inmotion and take
681 687 * steps to avoid returning us to this cpu.
682 688 */
683 689 cp->cpu_kprunrun = 1;
684 690 thread_unlock(curthread);
685 691 kpreempt_enable(); /* will call preempt() */
686 692 goto again;
687 693 }
688 694 }
689 695
690 696 void
691 697 thread_allowmigrate(void)
692 698 {
693 699 kthread_id_t t = curthread;
694 700
695 701 ASSERT(t->t_weakbound_cpu == CPU ||
696 702 (t->t_nomigrate < 0 && t->t_preempt > 0) ||
697 703 CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD ||
698 704 getpil() >= DISP_LEVEL);
699 705
700 706 if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) ||
701 707 getpil() >= DISP_LEVEL)
702 708 return;
703 709
704 710 if (t->t_nomigrate < 0) {
705 711 /*
706 712 * This thread was granted "weak binding" in the
707 713 * stronger form of kernel preemption disabling.
708 714 * Undo a level of nesting for both t_nomigrate
709 715 * and t_preempt.
710 716 */
711 717 ++t->t_nomigrate;
712 718 kpreempt_enable();
713 719 } else if (--t->t_nomigrate == 0) {
714 720 /*
715 721 * Time to drop the weak binding. We need to cater
716 722 * for the case where we're weakbound to a different
717 723 * cpu than that to which we're strongbound (a very
718 724 * temporary arrangement that must only persist until
719 725 * weak binding drops). We don't acquire thread_lock
720 726 * here so even as this code executes t_bound_cpu
721 727 * may be changing. So we disable preemption and
722 728 * a) in the case that t_bound_cpu changes while we
723 729 * have preemption disabled kprunrun will be set
724 730 * asynchronously, and b) if before disabling
725 731 * preemption we were already on a different cpu to
726 732 * our t_bound_cpu then we set kprunrun ourselves
727 733 * to force a trip through the dispatcher when
728 734 * preemption is enabled.
729 735 */
730 736 kpreempt_disable();
731 737 if (t->t_bound_cpu &&
732 738 t->t_weakbound_cpu != t->t_bound_cpu)
733 739 CPU->cpu_kprunrun = 1;
734 740 t->t_weakbound_cpu = NULL;
735 741 membar_producer();
736 742 kpreempt_enable();
737 743 }
738 744 }
739 745
740 746 /*
741 747 * weakbinding_stop can be used to temporarily cause weakbindings made
742 748 * with thread_nomigrate to be satisfied through the stronger action of
743 749 * kpreempt_disable. weakbinding_start recommences normal weakbinding.
744 750 */
745 751
746 752 void
747 753 weakbinding_stop(void)
748 754 {
749 755 ASSERT(MUTEX_HELD(&cpu_lock));
750 756 weakbindingbarrier = 1;
751 757 membar_producer(); /* make visible before subsequent thread_lock */
752 758 }
753 759
754 760 void
755 761 weakbinding_start(void)
756 762 {
757 763 ASSERT(MUTEX_HELD(&cpu_lock));
758 764 weakbindingbarrier = 0;
759 765 }
760 766
761 767 void
762 768 null_xcall(void)
763 769 {
764 770 }
765 771
766 772 /*
767 773 * This routine is called to place the CPUs in a safe place so that
768 774 * one of them can be taken off line or placed on line. What we are
769 775 * trying to do here is prevent a thread from traversing the list
770 776 * of active CPUs while we are changing it or from getting placed on
771 777 * the run queue of a CPU that has just gone off line. We do this by
772 778 * creating a thread with the highest possible prio for each CPU and
773 779 * having it call this routine. The advantage of this method is that
774 780 * we can eliminate all checks for CPU_ACTIVE in the disp routines.
775 781 * This makes disp faster at the expense of making p_online() slower
776 782 * which is a good trade off.
777 783 */
778 784 static void
779 785 cpu_pause(int index)
780 786 {
781 787 int s;
782 788 struct _cpu_pause_info *cpi = &cpu_pause_info;
783 789 volatile char *safe = &safe_list[index];
784 790 long lindex = index;
785 791
786 792 ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE));
787 793
788 794 while (*safe != PAUSE_DIE) {
789 795 *safe = PAUSE_READY;
790 796 membar_enter(); /* make sure stores are flushed */
791 797 sema_v(&cpi->cp_sem); /* signal requesting thread */
792 798
793 799 /*
794 800 * Wait here until all pause threads are running. That
795 801 * indicates that it's safe to do the spl. Until
796 802 * cpu_pause_info.cp_go is set, we don't want to spl
797 803 * because that might block clock interrupts needed
798 804 * to preempt threads on other CPUs.
799 805 */
800 806 while (cpi->cp_go == 0)
801 807 ;
802 808 /*
803 809 * Even though we are at the highest disp prio, we need
804 810 * to block out all interrupts below LOCK_LEVEL so that
805 811 * an intr doesn't come in, wake up a thread, and call
806 812 * setbackdq/setfrontdq.
807 813 */
808 814 s = splhigh();
809 815 /*
810 816 * if cp_func has been set then call it using index as the
811 817 * argument, currently only used by cpr_suspend_cpus().
812 818 * This function is used as the code to execute on the
813 819 * "paused" cpu's when a machine comes out of a sleep state
814 820 * and CPU's were powered off. (could also be used for
815 821 * hotplugging CPU's).
816 822 */
817 823 if (cpi->cp_func != NULL)
818 824 (*cpi->cp_func)((void *)lindex);
819 825
820 826 mach_cpu_pause(safe);
821 827
822 828 splx(s);
823 829 /*
824 830 * Waiting is at an end. Switch out of cpu_pause
825 831 * loop and resume useful work.
826 832 */
827 833 swtch();
828 834 }
829 835
830 836 mutex_enter(&pause_free_mutex);
831 837 *safe = PAUSE_DEAD;
832 838 cv_broadcast(&pause_free_cv);
833 839 mutex_exit(&pause_free_mutex);
834 840 }
835 841
836 842 /*
837 843 * Allow the cpus to start running again.
838 844 */
839 845 void
840 846 start_cpus()
841 847 {
842 848 int i;
843 849
844 850 ASSERT(MUTEX_HELD(&cpu_lock));
845 851 ASSERT(cpu_pause_info.cp_paused);
846 852 cpu_pause_info.cp_paused = NULL;
847 853 for (i = 0; i < NCPU; i++)
848 854 safe_list[i] = PAUSE_IDLE;
849 855 membar_enter(); /* make sure stores are flushed */
850 856 affinity_clear();
851 857 splx(cpu_pause_info.cp_spl);
852 858 kpreempt_enable();
853 859 }
854 860
855 861 /*
856 862 * Allocate a pause thread for a CPU.
857 863 */
858 864 static void
859 865 cpu_pause_alloc(cpu_t *cp)
860 866 {
861 867 kthread_id_t t;
862 868 long cpun = cp->cpu_id;
863 869
864 870 /*
865 871 * Note, v.v_nglobpris will not change value as long as I hold
866 872 * cpu_lock.
867 873 */
868 874 t = thread_create(NULL, 0, cpu_pause, (void *)cpun,
869 875 0, &p0, TS_STOPPED, v.v_nglobpris - 1);
870 876 thread_lock(t);
871 877 t->t_bound_cpu = cp;
872 878 t->t_disp_queue = cp->cpu_disp;
873 879 t->t_affinitycnt = 1;
874 880 t->t_preempt = 1;
875 881 thread_unlock(t);
876 882 cp->cpu_pause_thread = t;
877 883 /*
878 884 * Registering a thread in the callback table is usually done
879 885 * in the initialization code of the thread. In this
880 886 * case, we do it right after thread creation because the
881 887 * thread itself may never run, and we need to register the
882 888 * fact that it is safe for cpr suspend.
883 889 */
884 890 CALLB_CPR_INIT_SAFE(t, "cpu_pause");
885 891 }
886 892
887 893 /*
888 894 * Free a pause thread for a CPU.
889 895 */
890 896 static void
891 897 cpu_pause_free(cpu_t *cp)
892 898 {
893 899 kthread_id_t t;
894 900 int cpun = cp->cpu_id;
895 901
896 902 ASSERT(MUTEX_HELD(&cpu_lock));
897 903 /*
898 904 * We have to get the thread and tell it to die.
899 905 */
900 906 if ((t = cp->cpu_pause_thread) == NULL) {
901 907 ASSERT(safe_list[cpun] == PAUSE_IDLE);
902 908 return;
903 909 }
904 910 thread_lock(t);
905 911 t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */
906 912 t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */
907 913 t->t_pri = v.v_nglobpris - 1;
908 914 ASSERT(safe_list[cpun] == PAUSE_IDLE);
909 915 safe_list[cpun] = PAUSE_DIE;
910 916 THREAD_TRANSITION(t);
911 917 setbackdq(t);
912 918 thread_unlock_nopreempt(t);
913 919
914 920 /*
915 921 * If we don't wait for the thread to actually die, it may try to
916 922 * run on the wrong cpu as part of an actual call to pause_cpus().
917 923 */
918 924 mutex_enter(&pause_free_mutex);
919 925 while (safe_list[cpun] != PAUSE_DEAD) {
920 926 cv_wait(&pause_free_cv, &pause_free_mutex);
921 927 }
922 928 mutex_exit(&pause_free_mutex);
923 929 safe_list[cpun] = PAUSE_IDLE;
924 930
925 931 cp->cpu_pause_thread = NULL;
926 932 }
927 933
928 934 /*
929 935 * Initialize basic structures for pausing CPUs.
930 936 */
931 937 void
932 938 cpu_pause_init()
933 939 {
934 940 sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL);
935 941 /*
936 942 * Create initial CPU pause thread.
937 943 */
938 944 cpu_pause_alloc(CPU);
939 945 }
940 946
941 947 /*
942 948 * Start the threads used to pause another CPU.
943 949 */
944 950 static int
945 951 cpu_pause_start(processorid_t cpu_id)
946 952 {
947 953 int i;
948 954 int cpu_count = 0;
949 955
950 956 for (i = 0; i < NCPU; i++) {
951 957 cpu_t *cp;
952 958 kthread_id_t t;
953 959
954 960 cp = cpu[i];
955 961 if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) {
956 962 safe_list[i] = PAUSE_WAIT;
957 963 continue;
958 964 }
959 965
960 966 /*
961 967 * Skip CPU if it is quiesced or not yet started.
962 968 */
963 969 if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) {
964 970 safe_list[i] = PAUSE_WAIT;
965 971 continue;
966 972 }
967 973
968 974 /*
969 975 * Start this CPU's pause thread.
970 976 */
971 977 t = cp->cpu_pause_thread;
972 978 thread_lock(t);
973 979 /*
974 980 * Reset the priority, since nglobpris may have
975 981 * changed since the thread was created, if someone
976 982 * has loaded the RT (or some other) scheduling
977 983 * class.
978 984 */
979 985 t->t_pri = v.v_nglobpris - 1;
980 986 THREAD_TRANSITION(t);
981 987 setbackdq(t);
982 988 thread_unlock_nopreempt(t);
983 989 ++cpu_count;
984 990 }
985 991 return (cpu_count);
986 992 }
987 993
988 994
989 995 /*
990 996 * Pause all of the CPUs except the one we are on by creating a high
991 997 * priority thread bound to those CPUs.
992 998 *
993 999 * Note that one must be extremely careful regarding code
994 1000 * executed while CPUs are paused. Since a CPU may be paused
995 1001 * while a thread scheduling on that CPU is holding an adaptive
996 1002 * lock, code executed with CPUs paused must not acquire adaptive
997 1003 * (or low-level spin) locks. Also, such code must not block,
998 1004 * since the thread that is supposed to initiate the wakeup may
999 1005 * never run.
1000 1006 *
1001 1007 * With a few exceptions, the restrictions on code executed with CPUs
1002 1008 * paused match those for code executed at high-level interrupt
1003 1009 * context.
1004 1010 */
1005 1011 void
1006 1012 pause_cpus(cpu_t *off_cp, void *(*func)(void *))
1007 1013 {
1008 1014 processorid_t cpu_id;
1009 1015 int i;
1010 1016 struct _cpu_pause_info *cpi = &cpu_pause_info;
1011 1017
1012 1018 ASSERT(MUTEX_HELD(&cpu_lock));
1013 1019 ASSERT(cpi->cp_paused == NULL);
1014 1020 cpi->cp_count = 0;
1015 1021 cpi->cp_go = 0;
1016 1022 for (i = 0; i < NCPU; i++)
1017 1023 safe_list[i] = PAUSE_IDLE;
1018 1024 kpreempt_disable();
1019 1025
1020 1026 cpi->cp_func = func;
1021 1027
1022 1028 /*
1023 1029 * If running on the cpu that is going offline, get off it.
1024 1030 * This is so that it won't be necessary to rechoose a CPU
1025 1031 * when done.
1026 1032 */
1027 1033 if (CPU == off_cp)
1028 1034 cpu_id = off_cp->cpu_next_part->cpu_id;
1029 1035 else
1030 1036 cpu_id = CPU->cpu_id;
1031 1037 affinity_set(cpu_id);
1032 1038
1033 1039 /*
1034 1040 * Start the pause threads and record how many were started
1035 1041 */
1036 1042 cpi->cp_count = cpu_pause_start(cpu_id);
1037 1043
1038 1044 /*
1039 1045 * Now wait for all CPUs to be running the pause thread.
1040 1046 */
1041 1047 while (cpi->cp_count > 0) {
1042 1048 /*
1043 1049 * Spin reading the count without grabbing the disp
1044 1050 * lock to make sure we don't prevent the pause
1045 1051 * threads from getting the lock.
1046 1052 */
1047 1053 while (sema_held(&cpi->cp_sem))
1048 1054 ;
1049 1055 if (sema_tryp(&cpi->cp_sem))
1050 1056 --cpi->cp_count;
1051 1057 }
1052 1058 cpi->cp_go = 1; /* all have reached cpu_pause */
1053 1059
1054 1060 /*
1055 1061 * Now wait for all CPUs to spl. (Transition from PAUSE_READY
1056 1062 * to PAUSE_WAIT.)
1057 1063 */
1058 1064 for (i = 0; i < NCPU; i++) {
1059 1065 while (safe_list[i] != PAUSE_WAIT)
1060 1066 ;
1061 1067 }
1062 1068 cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */
1063 1069 cpi->cp_paused = curthread;
1064 1070 }
1065 1071
1066 1072 /*
1067 1073 * Check whether the current thread has CPUs paused
1068 1074 */
1069 1075 int
1070 1076 cpus_paused(void)
1071 1077 {
1072 1078 if (cpu_pause_info.cp_paused != NULL) {
1073 1079 ASSERT(cpu_pause_info.cp_paused == curthread);
1074 1080 return (1);
1075 1081 }
1076 1082 return (0);
1077 1083 }
1078 1084
1079 1085 static cpu_t *
1080 1086 cpu_get_all(processorid_t cpun)
1081 1087 {
1082 1088 ASSERT(MUTEX_HELD(&cpu_lock));
1083 1089
1084 1090 if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun))
1085 1091 return (NULL);
1086 1092 return (cpu[cpun]);
1087 1093 }
1088 1094
1089 1095 /*
1090 1096 * Check whether cpun is a valid processor id and whether it should be
1091 1097 * visible from the current zone. If it is, return a pointer to the
1092 1098 * associated CPU structure.
1093 1099 */
1094 1100 cpu_t *
1095 1101 cpu_get(processorid_t cpun)
1096 1102 {
1097 1103 cpu_t *c;
1098 1104
1099 1105 ASSERT(MUTEX_HELD(&cpu_lock));
1100 1106 c = cpu_get_all(cpun);
1101 1107 if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() &&
1102 1108 zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c))
1103 1109 return (NULL);
1104 1110 return (c);
1105 1111 }
1106 1112
1107 1113 /*
1108 1114 * The following functions should be used to check CPU states in the kernel.
1109 1115 * They should be invoked with cpu_lock held. Kernel subsystems interested
1110 1116 * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc
1111 1117 * states. Those are for user-land (and system call) use only.
1112 1118 */
1113 1119
1114 1120 /*
1115 1121 * Determine whether the CPU is online and handling interrupts.
1116 1122 */
1117 1123 int
1118 1124 cpu_is_online(cpu_t *cpu)
1119 1125 {
1120 1126 ASSERT(MUTEX_HELD(&cpu_lock));
1121 1127 return (cpu_flagged_online(cpu->cpu_flags));
1122 1128 }
1123 1129
1124 1130 /*
1125 1131 * Determine whether the CPU is offline (this includes spare and faulted).
1126 1132 */
1127 1133 int
1128 1134 cpu_is_offline(cpu_t *cpu)
1129 1135 {
1130 1136 ASSERT(MUTEX_HELD(&cpu_lock));
1131 1137 return (cpu_flagged_offline(cpu->cpu_flags));
1132 1138 }
1133 1139
1134 1140 /*
1135 1141 * Determine whether the CPU is powered off.
1136 1142 */
1137 1143 int
1138 1144 cpu_is_poweredoff(cpu_t *cpu)
1139 1145 {
1140 1146 ASSERT(MUTEX_HELD(&cpu_lock));
1141 1147 return (cpu_flagged_poweredoff(cpu->cpu_flags));
1142 1148 }
1143 1149
1144 1150 /*
1145 1151 * Determine whether the CPU is handling interrupts.
1146 1152 */
1147 1153 int
1148 1154 cpu_is_nointr(cpu_t *cpu)
1149 1155 {
1150 1156 ASSERT(MUTEX_HELD(&cpu_lock));
1151 1157 return (cpu_flagged_nointr(cpu->cpu_flags));
1152 1158 }
1153 1159
1154 1160 /*
1155 1161 * Determine whether the CPU is active (scheduling threads).
1156 1162 */
1157 1163 int
1158 1164 cpu_is_active(cpu_t *cpu)
1159 1165 {
1160 1166 ASSERT(MUTEX_HELD(&cpu_lock));
1161 1167 return (cpu_flagged_active(cpu->cpu_flags));
1162 1168 }
1163 1169
1164 1170 /*
1165 1171 * Same as above, but these require cpu_flags instead of cpu_t pointers.
1166 1172 */
1167 1173 int
1168 1174 cpu_flagged_online(cpu_flag_t cpu_flags)
1169 1175 {
1170 1176 return (cpu_flagged_active(cpu_flags) &&
1171 1177 (cpu_flags & CPU_ENABLE));
1172 1178 }
1173 1179
1174 1180 int
1175 1181 cpu_flagged_offline(cpu_flag_t cpu_flags)
1176 1182 {
1177 1183 return (((cpu_flags & CPU_POWEROFF) == 0) &&
1178 1184 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY));
1179 1185 }
1180 1186
1181 1187 int
1182 1188 cpu_flagged_poweredoff(cpu_flag_t cpu_flags)
1183 1189 {
1184 1190 return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF);
1185 1191 }
1186 1192
1187 1193 int
1188 1194 cpu_flagged_nointr(cpu_flag_t cpu_flags)
1189 1195 {
1190 1196 return (cpu_flagged_active(cpu_flags) &&
1191 1197 (cpu_flags & CPU_ENABLE) == 0);
1192 1198 }
1193 1199
1194 1200 int
1195 1201 cpu_flagged_active(cpu_flag_t cpu_flags)
1196 1202 {
1197 1203 return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) &&
1198 1204 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY));
1199 1205 }
1200 1206
1201 1207 /*
1202 1208 * Bring the indicated CPU online.
1203 1209 */
1204 1210 int
1205 1211 cpu_online(cpu_t *cp)
1206 1212 {
1207 1213 int error = 0;
1208 1214
1209 1215 /*
1210 1216 * Handle on-line request.
1211 1217 * This code must put the new CPU on the active list before
1212 1218 * starting it because it will not be paused, and will start
1213 1219 * using the active list immediately. The real start occurs
1214 1220 * when the CPU_QUIESCED flag is turned off.
1215 1221 */
1216 1222
1217 1223 ASSERT(MUTEX_HELD(&cpu_lock));
1218 1224
1219 1225 /*
1220 1226 * Put all the cpus into a known safe place.
1221 1227 * No mutexes can be entered while CPUs are paused.
1222 1228 */
1223 1229 error = mp_cpu_start(cp); /* arch-dep hook */
1224 1230 if (error == 0) {
1225 1231 pg_cpupart_in(cp, cp->cpu_part);
1226 1232 pause_cpus(NULL, NULL);
1227 1233 cpu_add_active_internal(cp);
1228 1234 if (cp->cpu_flags & CPU_FAULTED) {
1229 1235 cp->cpu_flags &= ~CPU_FAULTED;
1230 1236 mp_cpu_faulted_exit(cp);
1231 1237 }
1232 1238 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN |
1233 1239 CPU_SPARE);
1234 1240 CPU_NEW_GENERATION(cp);
1235 1241 start_cpus();
1236 1242 cpu_stats_kstat_create(cp);
1237 1243 cpu_create_intrstat(cp);
1238 1244 lgrp_kstat_create(cp);
1239 1245 cpu_state_change_notify(cp->cpu_id, CPU_ON);
1240 1246 cpu_intr_enable(cp); /* arch-dep hook */
1241 1247 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1242 1248 cpu_set_state(cp);
1243 1249 cyclic_online(cp);
1244 1250 /*
1245 1251 * This has to be called only after cyclic_online(). This
1246 1252 * function uses cyclics.
1247 1253 */
1248 1254 callout_cpu_online(cp);
1249 1255 poke_cpu(cp->cpu_id);
1250 1256 }
1251 1257
1252 1258 return (error);
1253 1259 }
1254 1260
1255 1261 /*
1256 1262 * Take the indicated CPU offline.
1257 1263 */
1258 1264 int
1259 1265 cpu_offline(cpu_t *cp, int flags)
1260 1266 {
1261 1267 cpupart_t *pp;
1262 1268 int error = 0;
1263 1269 cpu_t *ncp;
1264 1270 int intr_enable;
1265 1271 int cyclic_off = 0;
1266 1272 int callout_off = 0;
1267 1273 int loop_count;
1268 1274 int no_quiesce = 0;
1269 1275 int (*bound_func)(struct cpu *, int);
1270 1276 kthread_t *t;
1271 1277 lpl_t *cpu_lpl;
1272 1278 proc_t *p;
1273 1279 int lgrp_diff_lpl;
1274 1280 boolean_t unbind_all_threads = (flags & CPU_FORCED) != 0;
1275 1281
1276 1282 ASSERT(MUTEX_HELD(&cpu_lock));
1277 1283
1278 1284 /*
1279 1285 * If we're going from faulted or spare to offline, just
1280 1286 * clear these flags and update CPU state.
1281 1287 */
1282 1288 if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) {
1283 1289 if (cp->cpu_flags & CPU_FAULTED) {
1284 1290 cp->cpu_flags &= ~CPU_FAULTED;
1285 1291 mp_cpu_faulted_exit(cp);
1286 1292 }
1287 1293 cp->cpu_flags &= ~CPU_SPARE;
1288 1294 cpu_set_state(cp);
1289 1295 return (0);
1290 1296 }
1291 1297
1292 1298 /*
1293 1299 * Handle off-line request.
1294 1300 */
1295 1301 pp = cp->cpu_part;
1296 1302 /*
1297 1303 * Don't offline last online CPU in partition
1298 1304 */
1299 1305 if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2)
1300 1306 return (EBUSY);
1301 1307 /*
1302 1308 * Unbind all soft-bound threads bound to our CPU and hard bound threads
1303 1309 * if we were asked to.
1304 1310 */
1305 1311 error = cpu_unbind(cp->cpu_id, unbind_all_threads);
1306 1312 if (error != 0)
1307 1313 return (error);
1308 1314 /*
1309 1315 * We shouldn't be bound to this CPU ourselves.
1310 1316 */
1311 1317 if (curthread->t_bound_cpu == cp)
1312 1318 return (EBUSY);
1313 1319
1314 1320 /*
1315 1321 * Tell interested parties that this CPU is going offline.
1316 1322 */
1317 1323 CPU_NEW_GENERATION(cp);
1318 1324 cpu_state_change_notify(cp->cpu_id, CPU_OFF);
1319 1325
1320 1326 /*
1321 1327 * Tell the PG subsystem that the CPU is leaving the partition
1322 1328 */
1323 1329 pg_cpupart_out(cp, pp);
1324 1330
1325 1331 /*
1326 1332 * Take the CPU out of interrupt participation so we won't find
1327 1333 * bound kernel threads. If the architecture cannot completely
1328 1334 * shut off interrupts on the CPU, don't quiesce it, but don't
1329 1335 * run anything but interrupt thread... this is indicated by
1330 1336 * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being
1331 1337 * off.
1332 1338 */
1333 1339 intr_enable = cp->cpu_flags & CPU_ENABLE;
1334 1340 if (intr_enable)
1335 1341 no_quiesce = cpu_intr_disable(cp);
1336 1342
1337 1343 /*
1338 1344 * Record that we are aiming to offline this cpu. This acts as
1339 1345 * a barrier to further weak binding requests in thread_nomigrate
1340 1346 * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to
1341 1347 * lean away from this cpu. Further strong bindings are already
1342 1348 * avoided since we hold cpu_lock. Since threads that are set
1343 1349 * runnable around now and others coming off the target cpu are
1344 1350 * directed away from the target, existing strong and weak bindings
1345 1351 * (especially the latter) to the target cpu stand maximum chance of
1346 1352 * being able to unbind during the short delay loop below (if other
1347 1353 * unbound threads compete they may not see cpu in time to unbind
1348 1354 * even if they would do so immediately.
1349 1355 */
1350 1356 cpu_inmotion = cp;
1351 1357 membar_enter();
1352 1358
1353 1359 /*
1354 1360 * Check for kernel threads (strong or weak) bound to that CPU.
1355 1361 * Strongly bound threads may not unbind, and we'll have to return
1356 1362 * EBUSY. Weakly bound threads should always disappear - we've
1357 1363 * stopped more weak binding with cpu_inmotion and existing
1358 1364 * bindings will drain imminently (they may not block). Nonetheless
1359 1365 * we will wait for a fixed period for all bound threads to disappear.
1360 1366 * Inactive interrupt threads are OK (they'll be in TS_FREE
1361 1367 * state). If test finds some bound threads, wait a few ticks
1362 1368 * to give short-lived threads (such as interrupts) chance to
1363 1369 * complete. Note that if no_quiesce is set, i.e. this cpu
1364 1370 * is required to service interrupts, then we take the route
1365 1371 * that permits interrupt threads to be active (or bypassed).
1366 1372 */
1367 1373 bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads;
1368 1374
1369 1375 again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) {
1370 1376 if (loop_count >= 5) {
1371 1377 error = EBUSY; /* some threads still bound */
1372 1378 break;
1373 1379 }
1374 1380
1375 1381 /*
1376 1382 * If some threads were assigned, give them
1377 1383 * a chance to complete or move.
1378 1384 *
1379 1385 * This assumes that the clock_thread is not bound
1380 1386 * to any CPU, because the clock_thread is needed to
1381 1387 * do the delay(hz/100).
1382 1388 *
1383 1389 * Note: we still hold the cpu_lock while waiting for
1384 1390 * the next clock tick. This is OK since it isn't
1385 1391 * needed for anything else except processor_bind(2),
1386 1392 * and system initialization. If we drop the lock,
1387 1393 * we would risk another p_online disabling the last
1388 1394 * processor.
1389 1395 */
1390 1396 delay(hz/100);
1391 1397 }
1392 1398
1393 1399 if (error == 0 && callout_off == 0) {
1394 1400 callout_cpu_offline(cp);
1395 1401 callout_off = 1;
1396 1402 }
1397 1403
1398 1404 if (error == 0 && cyclic_off == 0) {
1399 1405 if (!cyclic_offline(cp)) {
1400 1406 /*
1401 1407 * We must have bound cyclics...
1402 1408 */
1403 1409 error = EBUSY;
1404 1410 goto out;
1405 1411 }
1406 1412 cyclic_off = 1;
1407 1413 }
1408 1414
1409 1415 /*
1410 1416 * Call mp_cpu_stop() to perform any special operations
1411 1417 * needed for this machine architecture to offline a CPU.
1412 1418 */
1413 1419 if (error == 0)
1414 1420 error = mp_cpu_stop(cp); /* arch-dep hook */
1415 1421
1416 1422 /*
1417 1423 * If that all worked, take the CPU offline and decrement
1418 1424 * ncpus_online.
1419 1425 */
1420 1426 if (error == 0) {
1421 1427 /*
1422 1428 * Put all the cpus into a known safe place.
1423 1429 * No mutexes can be entered while CPUs are paused.
1424 1430 */
1425 1431 pause_cpus(cp, NULL);
1426 1432 /*
1427 1433 * Repeat the operation, if necessary, to make sure that
1428 1434 * all outstanding low-level interrupts run to completion
1429 1435 * before we set the CPU_QUIESCED flag. It's also possible
1430 1436 * that a thread has weak bound to the cpu despite our raising
1431 1437 * cpu_inmotion above since it may have loaded that
1432 1438 * value before the barrier became visible (this would have
1433 1439 * to be the thread that was on the target cpu at the time
1434 1440 * we raised the barrier).
1435 1441 */
1436 1442 if ((!no_quiesce && cp->cpu_intr_actv != 0) ||
1437 1443 (*bound_func)(cp, 1)) {
1438 1444 start_cpus();
1439 1445 (void) mp_cpu_start(cp);
1440 1446 goto again;
1441 1447 }
1442 1448 ncp = cp->cpu_next_part;
1443 1449 cpu_lpl = cp->cpu_lpl;
1444 1450 ASSERT(cpu_lpl != NULL);
1445 1451
1446 1452 /*
1447 1453 * Remove the CPU from the list of active CPUs.
1448 1454 */
1449 1455 cpu_remove_active(cp);
1450 1456
1451 1457 /*
1452 1458 * Walk the active process list and look for threads
1453 1459 * whose home lgroup needs to be updated, or
1454 1460 * the last CPU they run on is the one being offlined now.
1455 1461 */
1456 1462
1457 1463 ASSERT(curthread->t_cpu != cp);
1458 1464 for (p = practive; p != NULL; p = p->p_next) {
1459 1465
1460 1466 t = p->p_tlist;
1461 1467
1462 1468 if (t == NULL)
1463 1469 continue;
1464 1470
1465 1471 lgrp_diff_lpl = 0;
1466 1472
1467 1473 do {
1468 1474 ASSERT(t->t_lpl != NULL);
1469 1475 /*
1470 1476 * Taking last CPU in lpl offline
1471 1477 * Rehome thread if it is in this lpl
1472 1478 * Otherwise, update the count of how many
1473 1479 * threads are in this CPU's lgroup but have
1474 1480 * a different lpl.
1475 1481 */
1476 1482
1477 1483 if (cpu_lpl->lpl_ncpu == 0) {
1478 1484 if (t->t_lpl == cpu_lpl)
1479 1485 lgrp_move_thread(t,
1480 1486 lgrp_choose(t,
1481 1487 t->t_cpupart), 0);
↓ open down ↓ |
1014 lines elided |
↑ open up ↑ |
1482 1488 else if (t->t_lpl->lpl_lgrpid ==
1483 1489 cpu_lpl->lpl_lgrpid)
1484 1490 lgrp_diff_lpl++;
1485 1491 }
1486 1492 ASSERT(t->t_lpl->lpl_ncpu > 0);
1487 1493
1488 1494 /*
1489 1495 * Update CPU last ran on if it was this CPU
1490 1496 */
1491 1497 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1492 - t->t_cpu = disp_lowpri_cpu(ncp,
1493 - t->t_lpl, t->t_pri, NULL);
1498 + t->t_cpu = disp_lowpri_cpu(ncp, t,
1499 + t->t_pri);
1494 1500 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1495 1501 t->t_weakbound_cpu == cp);
1496 1502
1497 1503 t = t->t_forw;
1498 1504 } while (t != p->p_tlist);
1499 1505
1500 1506 /*
1501 1507 * Didn't find any threads in the same lgroup as this
1502 1508 * CPU with a different lpl, so remove the lgroup from
1503 1509 * the process lgroup bitmask.
1504 1510 */
1505 1511
1506 1512 if (lgrp_diff_lpl == 0)
1507 1513 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
1508 1514 }
1509 1515
1510 1516 /*
1511 1517 * Walk thread list looking for threads that need to be
1512 1518 * rehomed, since there are some threads that are not in
1513 1519 * their process's p_tlist.
1514 1520 */
1515 1521
1516 1522 t = curthread;
1517 1523 do {
1518 1524 ASSERT(t != NULL && t->t_lpl != NULL);
1519 1525
1520 1526 /*
1521 1527 * Rehome threads with same lpl as this CPU when this
1522 1528 * is the last CPU in the lpl.
1523 1529 */
1524 1530
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
1525 1531 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1526 1532 lgrp_move_thread(t,
1527 1533 lgrp_choose(t, t->t_cpupart), 1);
1528 1534
1529 1535 ASSERT(t->t_lpl->lpl_ncpu > 0);
1530 1536
1531 1537 /*
1532 1538 * Update CPU last ran on if it was this CPU
1533 1539 */
1534 1540
1535 - if (t->t_cpu == cp && t->t_bound_cpu != cp) {
1536 - t->t_cpu = disp_lowpri_cpu(ncp,
1537 - t->t_lpl, t->t_pri, NULL);
1538 - }
1541 + if (t->t_cpu == cp && t->t_bound_cpu != cp)
1542 + t->t_cpu = disp_lowpri_cpu(ncp, t, t->t_pri);
1543 +
1539 1544 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1540 1545 t->t_weakbound_cpu == cp);
1541 1546 t = t->t_next;
1542 1547
1543 1548 } while (t != curthread);
1544 1549 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1545 1550 cp->cpu_flags |= CPU_OFFLINE;
1546 1551 disp_cpu_inactive(cp);
1547 1552 if (!no_quiesce)
1548 1553 cp->cpu_flags |= CPU_QUIESCED;
1549 1554 ncpus_online--;
1550 1555 cpu_set_state(cp);
1551 1556 cpu_inmotion = NULL;
1552 1557 start_cpus();
1553 1558 cpu_stats_kstat_destroy(cp);
1554 1559 cpu_delete_intrstat(cp);
1555 1560 lgrp_kstat_destroy(cp);
1556 1561 }
1557 1562
1558 1563 out:
1559 1564 cpu_inmotion = NULL;
1560 1565
1561 1566 /*
1562 1567 * If we failed, re-enable interrupts.
1563 1568 * Do this even if cpu_intr_disable returned an error, because
1564 1569 * it may have partially disabled interrupts.
1565 1570 */
1566 1571 if (error && intr_enable)
1567 1572 cpu_intr_enable(cp);
1568 1573
1569 1574 /*
1570 1575 * If we failed, but managed to offline the cyclic subsystem on this
1571 1576 * CPU, bring it back online.
1572 1577 */
1573 1578 if (error && cyclic_off)
1574 1579 cyclic_online(cp);
1575 1580
1576 1581 /*
1577 1582 * If we failed, but managed to offline callouts on this CPU,
1578 1583 * bring it back online.
1579 1584 */
1580 1585 if (error && callout_off)
1581 1586 callout_cpu_online(cp);
1582 1587
1583 1588 /*
1584 1589 * If we failed, tell the PG subsystem that the CPU is back
1585 1590 */
1586 1591 pg_cpupart_in(cp, pp);
1587 1592
1588 1593 /*
1589 1594 * If we failed, we need to notify everyone that this CPU is back on.
1590 1595 */
1591 1596 if (error != 0) {
1592 1597 CPU_NEW_GENERATION(cp);
1593 1598 cpu_state_change_notify(cp->cpu_id, CPU_ON);
1594 1599 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1595 1600 }
1596 1601
1597 1602 return (error);
1598 1603 }
1599 1604
1600 1605 /*
1601 1606 * Mark the indicated CPU as faulted, taking it offline.
1602 1607 */
1603 1608 int
1604 1609 cpu_faulted(cpu_t *cp, int flags)
1605 1610 {
1606 1611 int error = 0;
1607 1612
1608 1613 ASSERT(MUTEX_HELD(&cpu_lock));
1609 1614 ASSERT(!cpu_is_poweredoff(cp));
1610 1615
1611 1616 if (cpu_is_offline(cp)) {
1612 1617 cp->cpu_flags &= ~CPU_SPARE;
1613 1618 cp->cpu_flags |= CPU_FAULTED;
1614 1619 mp_cpu_faulted_enter(cp);
1615 1620 cpu_set_state(cp);
1616 1621 return (0);
1617 1622 }
1618 1623
1619 1624 if ((error = cpu_offline(cp, flags)) == 0) {
1620 1625 cp->cpu_flags |= CPU_FAULTED;
1621 1626 mp_cpu_faulted_enter(cp);
1622 1627 cpu_set_state(cp);
1623 1628 }
1624 1629
1625 1630 return (error);
1626 1631 }
1627 1632
1628 1633 /*
1629 1634 * Mark the indicated CPU as a spare, taking it offline.
1630 1635 */
1631 1636 int
1632 1637 cpu_spare(cpu_t *cp, int flags)
1633 1638 {
1634 1639 int error = 0;
1635 1640
1636 1641 ASSERT(MUTEX_HELD(&cpu_lock));
1637 1642 ASSERT(!cpu_is_poweredoff(cp));
1638 1643
1639 1644 if (cpu_is_offline(cp)) {
1640 1645 if (cp->cpu_flags & CPU_FAULTED) {
1641 1646 cp->cpu_flags &= ~CPU_FAULTED;
1642 1647 mp_cpu_faulted_exit(cp);
1643 1648 }
1644 1649 cp->cpu_flags |= CPU_SPARE;
1645 1650 cpu_set_state(cp);
1646 1651 return (0);
1647 1652 }
1648 1653
1649 1654 if ((error = cpu_offline(cp, flags)) == 0) {
1650 1655 cp->cpu_flags |= CPU_SPARE;
1651 1656 cpu_set_state(cp);
1652 1657 }
1653 1658
1654 1659 return (error);
1655 1660 }
1656 1661
1657 1662 /*
1658 1663 * Take the indicated CPU from poweroff to offline.
1659 1664 */
1660 1665 int
1661 1666 cpu_poweron(cpu_t *cp)
1662 1667 {
1663 1668 int error = ENOTSUP;
1664 1669
1665 1670 ASSERT(MUTEX_HELD(&cpu_lock));
1666 1671 ASSERT(cpu_is_poweredoff(cp));
1667 1672
1668 1673 error = mp_cpu_poweron(cp); /* arch-dep hook */
1669 1674 if (error == 0)
1670 1675 cpu_set_state(cp);
1671 1676
1672 1677 return (error);
1673 1678 }
1674 1679
1675 1680 /*
1676 1681 * Take the indicated CPU from any inactive state to powered off.
1677 1682 */
1678 1683 int
1679 1684 cpu_poweroff(cpu_t *cp)
1680 1685 {
1681 1686 int error = ENOTSUP;
1682 1687
1683 1688 ASSERT(MUTEX_HELD(&cpu_lock));
1684 1689 ASSERT(cpu_is_offline(cp));
1685 1690
1686 1691 if (!(cp->cpu_flags & CPU_QUIESCED))
1687 1692 return (EBUSY); /* not completely idle */
1688 1693
1689 1694 error = mp_cpu_poweroff(cp); /* arch-dep hook */
1690 1695 if (error == 0)
1691 1696 cpu_set_state(cp);
1692 1697
1693 1698 return (error);
1694 1699 }
1695 1700
1696 1701 /*
1697 1702 * Initialize the Sequential CPU id lookup table
1698 1703 */
1699 1704 void
1700 1705 cpu_seq_tbl_init()
1701 1706 {
1702 1707 cpu_t **tbl;
1703 1708
1704 1709 tbl = kmem_zalloc(sizeof (struct cpu *) * max_ncpus, KM_SLEEP);
1705 1710 tbl[0] = CPU;
1706 1711
1707 1712 cpu_seq = tbl;
1708 1713 }
1709 1714
1710 1715 /*
1711 1716 * Initialize the CPU lists for the first CPU.
1712 1717 */
1713 1718 void
1714 1719 cpu_list_init(cpu_t *cp)
1715 1720 {
1716 1721 cp->cpu_next = cp;
1717 1722 cp->cpu_prev = cp;
1718 1723 cpu_list = cp;
1719 1724 clock_cpu_list = cp;
1720 1725
1721 1726 cp->cpu_next_onln = cp;
1722 1727 cp->cpu_prev_onln = cp;
1723 1728 cpu_active = cp;
1724 1729
1725 1730 cp->cpu_seqid = 0;
1726 1731 CPUSET_ADD(cpu_seqid_inuse, 0);
1727 1732
1728 1733 /*
1729 1734 * Bootstrap cpu_seq using cpu_list
1730 1735 * The cpu_seq[] table will be dynamically allocated
1731 1736 * when kmem later becomes available (but before going MP)
1732 1737 */
1733 1738 cpu_seq = &cpu_list;
1734 1739
1735 1740 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
1736 1741 cp_default.cp_cpulist = cp;
1737 1742 cp_default.cp_ncpus = 1;
1738 1743 cp->cpu_next_part = cp;
1739 1744 cp->cpu_prev_part = cp;
1740 1745 cp->cpu_part = &cp_default;
1741 1746
1742 1747 CPUSET_ADD(cpu_available, cp->cpu_id);
1743 1748 }
1744 1749
1745 1750 /*
1746 1751 * Insert a CPU into the list of available CPUs.
1747 1752 */
1748 1753 void
1749 1754 cpu_add_unit(cpu_t *cp)
1750 1755 {
1751 1756 int seqid;
1752 1757
1753 1758 ASSERT(MUTEX_HELD(&cpu_lock));
1754 1759 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */
1755 1760
1756 1761 lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0);
1757 1762
1758 1763 /*
1759 1764 * Note: most users of the cpu_list will grab the
1760 1765 * cpu_lock to insure that it isn't modified. However,
1761 1766 * certain users can't or won't do that. To allow this
1762 1767 * we pause the other cpus. Users who walk the list
1763 1768 * without cpu_lock, must disable kernel preemption
1764 1769 * to insure that the list isn't modified underneath
1765 1770 * them. Also, any cached pointers to cpu structures
1766 1771 * must be revalidated by checking to see if the
1767 1772 * cpu_next pointer points to itself. This check must
1768 1773 * be done with the cpu_lock held or kernel preemption
1769 1774 * disabled. This check relies upon the fact that
1770 1775 * old cpu structures are not free'ed or cleared after
1771 1776 * then are removed from the cpu_list.
1772 1777 *
1773 1778 * Note that the clock code walks the cpu list dereferencing
1774 1779 * the cpu_part pointer, so we need to initialize it before
1775 1780 * adding the cpu to the list.
1776 1781 */
1777 1782 cp->cpu_part = &cp_default;
1778 1783 pause_cpus(NULL, NULL);
1779 1784 cp->cpu_next = cpu_list;
1780 1785 cp->cpu_prev = cpu_list->cpu_prev;
1781 1786 cpu_list->cpu_prev->cpu_next = cp;
1782 1787 cpu_list->cpu_prev = cp;
1783 1788 start_cpus();
1784 1789
1785 1790 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++)
1786 1791 continue;
1787 1792 CPUSET_ADD(cpu_seqid_inuse, seqid);
1788 1793 cp->cpu_seqid = seqid;
1789 1794
1790 1795 if (seqid > max_cpu_seqid_ever)
1791 1796 max_cpu_seqid_ever = seqid;
1792 1797
1793 1798 ASSERT(ncpus < max_ncpus);
1794 1799 ncpus++;
1795 1800 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
1796 1801 cpu[cp->cpu_id] = cp;
1797 1802 CPUSET_ADD(cpu_available, cp->cpu_id);
1798 1803 cpu_seq[cp->cpu_seqid] = cp;
1799 1804
1800 1805 /*
1801 1806 * allocate a pause thread for this CPU.
1802 1807 */
1803 1808 cpu_pause_alloc(cp);
1804 1809
1805 1810 /*
1806 1811 * So that new CPUs won't have NULL prev_onln and next_onln pointers,
1807 1812 * link them into a list of just that CPU.
1808 1813 * This is so that disp_lowpri_cpu will work for thread_create in
1809 1814 * pause_cpus() when called from the startup thread in a new CPU.
1810 1815 */
1811 1816 cp->cpu_next_onln = cp;
1812 1817 cp->cpu_prev_onln = cp;
1813 1818 cpu_info_kstat_create(cp);
1814 1819 cp->cpu_next_part = cp;
1815 1820 cp->cpu_prev_part = cp;
1816 1821
1817 1822 init_cpu_mstate(cp, CMS_SYSTEM);
1818 1823
1819 1824 pool_pset_mod = gethrtime();
1820 1825 }
1821 1826
1822 1827 /*
1823 1828 * Do the opposite of cpu_add_unit().
1824 1829 */
1825 1830 void
1826 1831 cpu_del_unit(int cpuid)
1827 1832 {
1828 1833 struct cpu *cp, *cpnext;
1829 1834
1830 1835 ASSERT(MUTEX_HELD(&cpu_lock));
1831 1836 cp = cpu[cpuid];
1832 1837 ASSERT(cp != NULL);
1833 1838
1834 1839 ASSERT(cp->cpu_next_onln == cp);
1835 1840 ASSERT(cp->cpu_prev_onln == cp);
1836 1841 ASSERT(cp->cpu_next_part == cp);
1837 1842 ASSERT(cp->cpu_prev_part == cp);
1838 1843
1839 1844 /*
1840 1845 * Tear down the CPU's physical ID cache, and update any
1841 1846 * processor groups
1842 1847 */
1843 1848 pg_cpu_fini(cp, NULL);
1844 1849 pghw_physid_destroy(cp);
1845 1850
1846 1851 /*
1847 1852 * Destroy kstat stuff.
1848 1853 */
1849 1854 cpu_info_kstat_destroy(cp);
1850 1855 term_cpu_mstate(cp);
1851 1856 /*
1852 1857 * Free up pause thread.
1853 1858 */
1854 1859 cpu_pause_free(cp);
1855 1860 CPUSET_DEL(cpu_available, cp->cpu_id);
1856 1861 cpu[cp->cpu_id] = NULL;
1857 1862 cpu_seq[cp->cpu_seqid] = NULL;
1858 1863
1859 1864 /*
1860 1865 * The clock thread and mutex_vector_enter cannot hold the
1861 1866 * cpu_lock while traversing the cpu list, therefore we pause
1862 1867 * all other threads by pausing the other cpus. These, and any
1863 1868 * other routines holding cpu pointers while possibly sleeping
1864 1869 * must be sure to call kpreempt_disable before processing the
1865 1870 * list and be sure to check that the cpu has not been deleted
1866 1871 * after any sleeps (check cp->cpu_next != NULL). We guarantee
1867 1872 * to keep the deleted cpu structure around.
1868 1873 *
1869 1874 * Note that this MUST be done AFTER cpu_available
1870 1875 * has been updated so that we don't waste time
1871 1876 * trying to pause the cpu we're trying to delete.
1872 1877 */
1873 1878 pause_cpus(NULL, NULL);
1874 1879
1875 1880 cpnext = cp->cpu_next;
1876 1881 cp->cpu_prev->cpu_next = cp->cpu_next;
1877 1882 cp->cpu_next->cpu_prev = cp->cpu_prev;
1878 1883 if (cp == cpu_list)
1879 1884 cpu_list = cpnext;
1880 1885
1881 1886 /*
1882 1887 * Signals that the cpu has been deleted (see above).
1883 1888 */
1884 1889 cp->cpu_next = NULL;
1885 1890 cp->cpu_prev = NULL;
1886 1891
1887 1892 start_cpus();
1888 1893
1889 1894 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid);
1890 1895 ncpus--;
1891 1896 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0);
1892 1897
1893 1898 pool_pset_mod = gethrtime();
1894 1899 }
1895 1900
1896 1901 /*
1897 1902 * Add a CPU to the list of active CPUs.
1898 1903 * This routine must not get any locks, because other CPUs are paused.
1899 1904 */
1900 1905 static void
1901 1906 cpu_add_active_internal(cpu_t *cp)
1902 1907 {
1903 1908 cpupart_t *pp = cp->cpu_part;
1904 1909
1905 1910 ASSERT(MUTEX_HELD(&cpu_lock));
1906 1911 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */
1907 1912
1908 1913 ncpus_online++;
1909 1914 cpu_set_state(cp);
1910 1915 cp->cpu_next_onln = cpu_active;
1911 1916 cp->cpu_prev_onln = cpu_active->cpu_prev_onln;
1912 1917 cpu_active->cpu_prev_onln->cpu_next_onln = cp;
1913 1918 cpu_active->cpu_prev_onln = cp;
1914 1919
1915 1920 if (pp->cp_cpulist) {
1916 1921 cp->cpu_next_part = pp->cp_cpulist;
1917 1922 cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part;
1918 1923 pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp;
1919 1924 pp->cp_cpulist->cpu_prev_part = cp;
1920 1925 } else {
1921 1926 ASSERT(pp->cp_ncpus == 0);
1922 1927 pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp;
1923 1928 }
1924 1929 pp->cp_ncpus++;
1925 1930 if (pp->cp_ncpus == 1) {
1926 1931 cp_numparts_nonempty++;
1927 1932 ASSERT(cp_numparts_nonempty != 0);
1928 1933 }
1929 1934
1930 1935 pg_cpu_active(cp);
1931 1936 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0);
1932 1937
1933 1938 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg));
1934 1939 }
1935 1940
1936 1941 /*
1937 1942 * Add a CPU to the list of active CPUs.
1938 1943 * This is called from machine-dependent layers when a new CPU is started.
1939 1944 */
1940 1945 void
1941 1946 cpu_add_active(cpu_t *cp)
1942 1947 {
1943 1948 pg_cpupart_in(cp, cp->cpu_part);
1944 1949
1945 1950 pause_cpus(NULL, NULL);
1946 1951 cpu_add_active_internal(cp);
1947 1952 start_cpus();
1948 1953
1949 1954 cpu_stats_kstat_create(cp);
1950 1955 cpu_create_intrstat(cp);
1951 1956 lgrp_kstat_create(cp);
1952 1957 cpu_state_change_notify(cp->cpu_id, CPU_INIT);
1953 1958 }
1954 1959
1955 1960
1956 1961 /*
1957 1962 * Remove a CPU from the list of active CPUs.
1958 1963 * This routine must not get any locks, because other CPUs are paused.
1959 1964 */
1960 1965 /* ARGSUSED */
1961 1966 static void
1962 1967 cpu_remove_active(cpu_t *cp)
1963 1968 {
1964 1969 cpupart_t *pp = cp->cpu_part;
1965 1970
1966 1971 ASSERT(MUTEX_HELD(&cpu_lock));
1967 1972 ASSERT(cp->cpu_next_onln != cp); /* not the last one */
1968 1973 ASSERT(cp->cpu_prev_onln != cp); /* not the last one */
1969 1974
1970 1975 pg_cpu_inactive(cp);
1971 1976
1972 1977 lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0);
1973 1978
1974 1979 if (cp == clock_cpu_list)
1975 1980 clock_cpu_list = cp->cpu_next_onln;
1976 1981
1977 1982 cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln;
1978 1983 cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln;
1979 1984 if (cpu_active == cp) {
1980 1985 cpu_active = cp->cpu_next_onln;
1981 1986 }
1982 1987 cp->cpu_next_onln = cp;
1983 1988 cp->cpu_prev_onln = cp;
1984 1989
1985 1990 cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part;
1986 1991 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part;
1987 1992 if (pp->cp_cpulist == cp) {
1988 1993 pp->cp_cpulist = cp->cpu_next_part;
1989 1994 ASSERT(pp->cp_cpulist != cp);
1990 1995 }
1991 1996 cp->cpu_next_part = cp;
1992 1997 cp->cpu_prev_part = cp;
1993 1998 pp->cp_ncpus--;
1994 1999 if (pp->cp_ncpus == 0) {
1995 2000 cp_numparts_nonempty--;
1996 2001 ASSERT(cp_numparts_nonempty != 0);
1997 2002 }
1998 2003 }
1999 2004
2000 2005 /*
2001 2006 * Routine used to setup a newly inserted CPU in preparation for starting
2002 2007 * it running code.
2003 2008 */
2004 2009 int
2005 2010 cpu_configure(int cpuid)
2006 2011 {
2007 2012 int retval = 0;
2008 2013
2009 2014 ASSERT(MUTEX_HELD(&cpu_lock));
2010 2015
2011 2016 /*
2012 2017 * Some structures are statically allocated based upon
2013 2018 * the maximum number of cpus the system supports. Do not
2014 2019 * try to add anything beyond this limit.
2015 2020 */
2016 2021 if (cpuid < 0 || cpuid >= NCPU) {
2017 2022 return (EINVAL);
2018 2023 }
2019 2024
2020 2025 if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) {
2021 2026 return (EALREADY);
2022 2027 }
2023 2028
2024 2029 if ((retval = mp_cpu_configure(cpuid)) != 0) {
2025 2030 return (retval);
2026 2031 }
2027 2032
2028 2033 cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF;
2029 2034 cpu_set_state(cpu[cpuid]);
2030 2035 retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG);
2031 2036 if (retval != 0)
2032 2037 (void) mp_cpu_unconfigure(cpuid);
2033 2038
2034 2039 return (retval);
2035 2040 }
2036 2041
2037 2042 /*
2038 2043 * Routine used to cleanup a CPU that has been powered off. This will
2039 2044 * destroy all per-cpu information related to this cpu.
2040 2045 */
2041 2046 int
2042 2047 cpu_unconfigure(int cpuid)
2043 2048 {
2044 2049 int error;
2045 2050
2046 2051 ASSERT(MUTEX_HELD(&cpu_lock));
2047 2052
2048 2053 if (cpu[cpuid] == NULL) {
2049 2054 return (ENODEV);
2050 2055 }
2051 2056
2052 2057 if (cpu[cpuid]->cpu_flags == 0) {
2053 2058 return (EALREADY);
2054 2059 }
2055 2060
2056 2061 if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) {
2057 2062 return (EBUSY);
2058 2063 }
2059 2064
2060 2065 if (cpu[cpuid]->cpu_props != NULL) {
2061 2066 (void) nvlist_free(cpu[cpuid]->cpu_props);
2062 2067 cpu[cpuid]->cpu_props = NULL;
2063 2068 }
2064 2069
2065 2070 error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG);
2066 2071
2067 2072 if (error != 0)
2068 2073 return (error);
2069 2074
2070 2075 return (mp_cpu_unconfigure(cpuid));
2071 2076 }
2072 2077
2073 2078 /*
2074 2079 * Routines for registering and de-registering cpu_setup callback functions.
2075 2080 *
2076 2081 * Caller's context
2077 2082 * These routines must not be called from a driver's attach(9E) or
2078 2083 * detach(9E) entry point.
2079 2084 *
2080 2085 * NOTE: CPU callbacks should not block. They are called with cpu_lock held.
2081 2086 */
2082 2087
2083 2088 /*
2084 2089 * Ideally, these would be dynamically allocated and put into a linked
2085 2090 * list; however that is not feasible because the registration routine
2086 2091 * has to be available before the kmem allocator is working (in fact,
2087 2092 * it is called by the kmem allocator init code). In any case, there
2088 2093 * are quite a few extra entries for future users.
2089 2094 */
2090 2095 #define NCPU_SETUPS 20
2091 2096
2092 2097 struct cpu_setup {
2093 2098 cpu_setup_func_t *func;
2094 2099 void *arg;
2095 2100 } cpu_setups[NCPU_SETUPS];
2096 2101
2097 2102 void
2098 2103 register_cpu_setup_func(cpu_setup_func_t *func, void *arg)
2099 2104 {
2100 2105 int i;
2101 2106
2102 2107 ASSERT(MUTEX_HELD(&cpu_lock));
2103 2108
2104 2109 for (i = 0; i < NCPU_SETUPS; i++)
2105 2110 if (cpu_setups[i].func == NULL)
2106 2111 break;
2107 2112 if (i >= NCPU_SETUPS)
2108 2113 cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries");
2109 2114
2110 2115 cpu_setups[i].func = func;
2111 2116 cpu_setups[i].arg = arg;
2112 2117 }
2113 2118
2114 2119 void
2115 2120 unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg)
2116 2121 {
2117 2122 int i;
2118 2123
2119 2124 ASSERT(MUTEX_HELD(&cpu_lock));
2120 2125
2121 2126 for (i = 0; i < NCPU_SETUPS; i++)
2122 2127 if ((cpu_setups[i].func == func) &&
2123 2128 (cpu_setups[i].arg == arg))
2124 2129 break;
2125 2130 if (i >= NCPU_SETUPS)
2126 2131 cmn_err(CE_PANIC, "Could not find cpu_setup callback to "
2127 2132 "deregister");
2128 2133
2129 2134 cpu_setups[i].func = NULL;
2130 2135 cpu_setups[i].arg = 0;
2131 2136 }
2132 2137
2133 2138 /*
2134 2139 * Call any state change hooks for this CPU, ignore any errors.
2135 2140 */
2136 2141 void
2137 2142 cpu_state_change_notify(int id, cpu_setup_t what)
2138 2143 {
2139 2144 int i;
2140 2145
2141 2146 ASSERT(MUTEX_HELD(&cpu_lock));
2142 2147
2143 2148 for (i = 0; i < NCPU_SETUPS; i++) {
2144 2149 if (cpu_setups[i].func != NULL) {
2145 2150 cpu_setups[i].func(what, id, cpu_setups[i].arg);
2146 2151 }
2147 2152 }
2148 2153 }
2149 2154
2150 2155 /*
2151 2156 * Call any state change hooks for this CPU, undo it if error found.
2152 2157 */
2153 2158 static int
2154 2159 cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo)
2155 2160 {
2156 2161 int i;
2157 2162 int retval = 0;
2158 2163
2159 2164 ASSERT(MUTEX_HELD(&cpu_lock));
2160 2165
2161 2166 for (i = 0; i < NCPU_SETUPS; i++) {
2162 2167 if (cpu_setups[i].func != NULL) {
2163 2168 retval = cpu_setups[i].func(what, id,
2164 2169 cpu_setups[i].arg);
2165 2170 if (retval) {
2166 2171 for (i--; i >= 0; i--) {
2167 2172 if (cpu_setups[i].func != NULL)
2168 2173 cpu_setups[i].func(undo,
2169 2174 id, cpu_setups[i].arg);
2170 2175 }
2171 2176 break;
2172 2177 }
2173 2178 }
2174 2179 }
2175 2180 return (retval);
2176 2181 }
2177 2182
2178 2183 /*
2179 2184 * Export information about this CPU via the kstat mechanism.
2180 2185 */
2181 2186 static struct {
2182 2187 kstat_named_t ci_state;
2183 2188 kstat_named_t ci_state_begin;
2184 2189 kstat_named_t ci_cpu_type;
2185 2190 kstat_named_t ci_fpu_type;
2186 2191 kstat_named_t ci_clock_MHz;
2187 2192 kstat_named_t ci_chip_id;
2188 2193 kstat_named_t ci_implementation;
2189 2194 kstat_named_t ci_brandstr;
2190 2195 kstat_named_t ci_core_id;
2191 2196 kstat_named_t ci_curr_clock_Hz;
2192 2197 kstat_named_t ci_supp_freq_Hz;
2193 2198 kstat_named_t ci_pg_id;
2194 2199 #if defined(__sparcv9)
2195 2200 kstat_named_t ci_device_ID;
2196 2201 kstat_named_t ci_cpu_fru;
2197 2202 #endif
2198 2203 #if defined(__x86)
2199 2204 kstat_named_t ci_vendorstr;
2200 2205 kstat_named_t ci_family;
2201 2206 kstat_named_t ci_model;
2202 2207 kstat_named_t ci_step;
2203 2208 kstat_named_t ci_clogid;
2204 2209 kstat_named_t ci_pkg_core_id;
2205 2210 kstat_named_t ci_ncpuperchip;
2206 2211 kstat_named_t ci_ncoreperchip;
2207 2212 kstat_named_t ci_max_cstates;
2208 2213 kstat_named_t ci_curr_cstate;
2209 2214 kstat_named_t ci_cacheid;
2210 2215 kstat_named_t ci_sktstr;
2211 2216 #endif
2212 2217 } cpu_info_template = {
2213 2218 { "state", KSTAT_DATA_CHAR },
2214 2219 { "state_begin", KSTAT_DATA_LONG },
2215 2220 { "cpu_type", KSTAT_DATA_CHAR },
2216 2221 { "fpu_type", KSTAT_DATA_CHAR },
2217 2222 { "clock_MHz", KSTAT_DATA_LONG },
2218 2223 { "chip_id", KSTAT_DATA_LONG },
2219 2224 { "implementation", KSTAT_DATA_STRING },
2220 2225 { "brand", KSTAT_DATA_STRING },
2221 2226 { "core_id", KSTAT_DATA_LONG },
2222 2227 { "current_clock_Hz", KSTAT_DATA_UINT64 },
2223 2228 { "supported_frequencies_Hz", KSTAT_DATA_STRING },
2224 2229 { "pg_id", KSTAT_DATA_LONG },
2225 2230 #if defined(__sparcv9)
2226 2231 { "device_ID", KSTAT_DATA_UINT64 },
2227 2232 { "cpu_fru", KSTAT_DATA_STRING },
2228 2233 #endif
2229 2234 #if defined(__x86)
2230 2235 { "vendor_id", KSTAT_DATA_STRING },
2231 2236 { "family", KSTAT_DATA_INT32 },
2232 2237 { "model", KSTAT_DATA_INT32 },
2233 2238 { "stepping", KSTAT_DATA_INT32 },
2234 2239 { "clog_id", KSTAT_DATA_INT32 },
2235 2240 { "pkg_core_id", KSTAT_DATA_LONG },
2236 2241 { "ncpu_per_chip", KSTAT_DATA_INT32 },
2237 2242 { "ncore_per_chip", KSTAT_DATA_INT32 },
2238 2243 { "supported_max_cstates", KSTAT_DATA_INT32 },
2239 2244 { "current_cstate", KSTAT_DATA_INT32 },
2240 2245 { "cache_id", KSTAT_DATA_INT32 },
2241 2246 { "socket_type", KSTAT_DATA_STRING },
2242 2247 #endif
2243 2248 };
2244 2249
2245 2250 static kmutex_t cpu_info_template_lock;
2246 2251
2247 2252 static int
2248 2253 cpu_info_kstat_update(kstat_t *ksp, int rw)
2249 2254 {
2250 2255 cpu_t *cp = ksp->ks_private;
2251 2256 const char *pi_state;
2252 2257
2253 2258 if (rw == KSTAT_WRITE)
2254 2259 return (EACCES);
2255 2260
2256 2261 #if defined(__x86)
2257 2262 /* Is the cpu still initialising itself? */
2258 2263 if (cpuid_checkpass(cp, 1) == 0)
2259 2264 return (ENXIO);
2260 2265 #endif
2261 2266 switch (cp->cpu_type_info.pi_state) {
2262 2267 case P_ONLINE:
2263 2268 pi_state = PS_ONLINE;
2264 2269 break;
2265 2270 case P_POWEROFF:
2266 2271 pi_state = PS_POWEROFF;
2267 2272 break;
2268 2273 case P_NOINTR:
2269 2274 pi_state = PS_NOINTR;
2270 2275 break;
2271 2276 case P_FAULTED:
2272 2277 pi_state = PS_FAULTED;
2273 2278 break;
2274 2279 case P_SPARE:
2275 2280 pi_state = PS_SPARE;
2276 2281 break;
2277 2282 case P_OFFLINE:
2278 2283 pi_state = PS_OFFLINE;
2279 2284 break;
2280 2285 default:
2281 2286 pi_state = "unknown";
2282 2287 }
2283 2288 (void) strcpy(cpu_info_template.ci_state.value.c, pi_state);
2284 2289 cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin;
2285 2290 (void) strncpy(cpu_info_template.ci_cpu_type.value.c,
2286 2291 cp->cpu_type_info.pi_processor_type, 15);
2287 2292 (void) strncpy(cpu_info_template.ci_fpu_type.value.c,
2288 2293 cp->cpu_type_info.pi_fputypes, 15);
2289 2294 cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock;
2290 2295 cpu_info_template.ci_chip_id.value.l =
2291 2296 pg_plat_hw_instance_id(cp, PGHW_CHIP);
2292 2297 kstat_named_setstr(&cpu_info_template.ci_implementation,
2293 2298 cp->cpu_idstr);
2294 2299 kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr);
2295 2300 cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp);
2296 2301 cpu_info_template.ci_curr_clock_Hz.value.ui64 =
2297 2302 cp->cpu_curr_clock;
2298 2303 cpu_info_template.ci_pg_id.value.l =
2299 2304 cp->cpu_pg && cp->cpu_pg->cmt_lineage ?
2300 2305 cp->cpu_pg->cmt_lineage->pg_id : -1;
2301 2306 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz,
2302 2307 cp->cpu_supp_freqs);
2303 2308 #if defined(__sparcv9)
2304 2309 cpu_info_template.ci_device_ID.value.ui64 =
2305 2310 cpunodes[cp->cpu_id].device_id;
2306 2311 kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp));
2307 2312 #endif
2308 2313 #if defined(__x86)
2309 2314 kstat_named_setstr(&cpu_info_template.ci_vendorstr,
2310 2315 cpuid_getvendorstr(cp));
2311 2316 cpu_info_template.ci_family.value.l = cpuid_getfamily(cp);
2312 2317 cpu_info_template.ci_model.value.l = cpuid_getmodel(cp);
2313 2318 cpu_info_template.ci_step.value.l = cpuid_getstep(cp);
2314 2319 cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp);
2315 2320 cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp);
2316 2321 cpu_info_template.ci_ncoreperchip.value.l =
2317 2322 cpuid_get_ncore_per_chip(cp);
2318 2323 cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp);
2319 2324 cpu_info_template.ci_max_cstates.value.l = cp->cpu_m.max_cstates;
2320 2325 cpu_info_template.ci_curr_cstate.value.l = cpu_idle_get_cpu_state(cp);
2321 2326 cpu_info_template.ci_cacheid.value.i32 = cpuid_get_cacheid(cp);
2322 2327 kstat_named_setstr(&cpu_info_template.ci_sktstr,
2323 2328 cpuid_getsocketstr(cp));
2324 2329 #endif
2325 2330
2326 2331 return (0);
2327 2332 }
2328 2333
2329 2334 static void
2330 2335 cpu_info_kstat_create(cpu_t *cp)
2331 2336 {
2332 2337 zoneid_t zoneid;
2333 2338
2334 2339 ASSERT(MUTEX_HELD(&cpu_lock));
2335 2340
2336 2341 if (pool_pset_enabled())
2337 2342 zoneid = GLOBAL_ZONEID;
2338 2343 else
2339 2344 zoneid = ALL_ZONES;
2340 2345 if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id,
2341 2346 NULL, "misc", KSTAT_TYPE_NAMED,
2342 2347 sizeof (cpu_info_template) / sizeof (kstat_named_t),
2343 2348 KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE, zoneid)) != NULL) {
2344 2349 cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN;
2345 2350 #if defined(__sparcv9)
2346 2351 cp->cpu_info_kstat->ks_data_size +=
2347 2352 strlen(cpu_fru_fmri(cp)) + 1;
2348 2353 #endif
2349 2354 #if defined(__x86)
2350 2355 cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN;
2351 2356 #endif
2352 2357 if (cp->cpu_supp_freqs != NULL)
2353 2358 cp->cpu_info_kstat->ks_data_size +=
2354 2359 strlen(cp->cpu_supp_freqs) + 1;
2355 2360 cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock;
2356 2361 cp->cpu_info_kstat->ks_data = &cpu_info_template;
2357 2362 cp->cpu_info_kstat->ks_private = cp;
2358 2363 cp->cpu_info_kstat->ks_update = cpu_info_kstat_update;
2359 2364 kstat_install(cp->cpu_info_kstat);
2360 2365 }
2361 2366 }
2362 2367
2363 2368 static void
2364 2369 cpu_info_kstat_destroy(cpu_t *cp)
2365 2370 {
2366 2371 ASSERT(MUTEX_HELD(&cpu_lock));
2367 2372
2368 2373 kstat_delete(cp->cpu_info_kstat);
2369 2374 cp->cpu_info_kstat = NULL;
2370 2375 }
2371 2376
2372 2377 /*
2373 2378 * Create and install kstats for the boot CPU.
2374 2379 */
2375 2380 void
2376 2381 cpu_kstat_init(cpu_t *cp)
2377 2382 {
2378 2383 mutex_enter(&cpu_lock);
2379 2384 cpu_info_kstat_create(cp);
2380 2385 cpu_stats_kstat_create(cp);
2381 2386 cpu_create_intrstat(cp);
2382 2387 cpu_set_state(cp);
2383 2388 mutex_exit(&cpu_lock);
2384 2389 }
2385 2390
2386 2391 /*
2387 2392 * Make visible to the zone that subset of the cpu information that would be
2388 2393 * initialized when a cpu is configured (but still offline).
2389 2394 */
2390 2395 void
2391 2396 cpu_visibility_configure(cpu_t *cp, zone_t *zone)
2392 2397 {
2393 2398 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2394 2399
2395 2400 ASSERT(MUTEX_HELD(&cpu_lock));
2396 2401 ASSERT(pool_pset_enabled());
2397 2402 ASSERT(cp != NULL);
2398 2403
2399 2404 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2400 2405 zone->zone_ncpus++;
2401 2406 ASSERT(zone->zone_ncpus <= ncpus);
2402 2407 }
2403 2408 if (cp->cpu_info_kstat != NULL)
2404 2409 kstat_zone_add(cp->cpu_info_kstat, zoneid);
2405 2410 }
2406 2411
2407 2412 /*
2408 2413 * Make visible to the zone that subset of the cpu information that would be
2409 2414 * initialized when a previously configured cpu is onlined.
2410 2415 */
2411 2416 void
2412 2417 cpu_visibility_online(cpu_t *cp, zone_t *zone)
2413 2418 {
2414 2419 kstat_t *ksp;
2415 2420 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */
2416 2421 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2417 2422 processorid_t cpun;
2418 2423
2419 2424 ASSERT(MUTEX_HELD(&cpu_lock));
2420 2425 ASSERT(pool_pset_enabled());
2421 2426 ASSERT(cp != NULL);
2422 2427 ASSERT(cpu_is_active(cp));
2423 2428
2424 2429 cpun = cp->cpu_id;
2425 2430 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2426 2431 zone->zone_ncpus_online++;
2427 2432 ASSERT(zone->zone_ncpus_online <= ncpus_online);
2428 2433 }
2429 2434 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
2430 2435 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
2431 2436 != NULL) {
2432 2437 kstat_zone_add(ksp, zoneid);
2433 2438 kstat_rele(ksp);
2434 2439 }
2435 2440 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
2436 2441 kstat_zone_add(ksp, zoneid);
2437 2442 kstat_rele(ksp);
2438 2443 }
2439 2444 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
2440 2445 kstat_zone_add(ksp, zoneid);
2441 2446 kstat_rele(ksp);
2442 2447 }
2443 2448 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
2444 2449 NULL) {
2445 2450 kstat_zone_add(ksp, zoneid);
2446 2451 kstat_rele(ksp);
2447 2452 }
2448 2453 }
2449 2454
2450 2455 /*
2451 2456 * Update relevant kstats such that cpu is now visible to processes
2452 2457 * executing in specified zone.
2453 2458 */
2454 2459 void
2455 2460 cpu_visibility_add(cpu_t *cp, zone_t *zone)
2456 2461 {
2457 2462 cpu_visibility_configure(cp, zone);
2458 2463 if (cpu_is_active(cp))
2459 2464 cpu_visibility_online(cp, zone);
2460 2465 }
2461 2466
2462 2467 /*
2463 2468 * Make invisible to the zone that subset of the cpu information that would be
2464 2469 * torn down when a previously offlined cpu is unconfigured.
2465 2470 */
2466 2471 void
2467 2472 cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone)
2468 2473 {
2469 2474 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2470 2475
2471 2476 ASSERT(MUTEX_HELD(&cpu_lock));
2472 2477 ASSERT(pool_pset_enabled());
2473 2478 ASSERT(cp != NULL);
2474 2479
2475 2480 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2476 2481 ASSERT(zone->zone_ncpus != 0);
2477 2482 zone->zone_ncpus--;
2478 2483 }
2479 2484 if (cp->cpu_info_kstat)
2480 2485 kstat_zone_remove(cp->cpu_info_kstat, zoneid);
2481 2486 }
2482 2487
2483 2488 /*
2484 2489 * Make invisible to the zone that subset of the cpu information that would be
2485 2490 * torn down when a cpu is offlined (but still configured).
2486 2491 */
2487 2492 void
2488 2493 cpu_visibility_offline(cpu_t *cp, zone_t *zone)
2489 2494 {
2490 2495 kstat_t *ksp;
2491 2496 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */
2492 2497 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2493 2498 processorid_t cpun;
2494 2499
2495 2500 ASSERT(MUTEX_HELD(&cpu_lock));
2496 2501 ASSERT(pool_pset_enabled());
2497 2502 ASSERT(cp != NULL);
2498 2503 ASSERT(cpu_is_active(cp));
2499 2504
2500 2505 cpun = cp->cpu_id;
2501 2506 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2502 2507 ASSERT(zone->zone_ncpus_online != 0);
2503 2508 zone->zone_ncpus_online--;
2504 2509 }
2505 2510
2506 2511 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
2507 2512 NULL) {
2508 2513 kstat_zone_remove(ksp, zoneid);
2509 2514 kstat_rele(ksp);
2510 2515 }
2511 2516 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
2512 2517 kstat_zone_remove(ksp, zoneid);
2513 2518 kstat_rele(ksp);
2514 2519 }
2515 2520 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
2516 2521 kstat_zone_remove(ksp, zoneid);
2517 2522 kstat_rele(ksp);
2518 2523 }
2519 2524 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
2520 2525 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
2521 2526 != NULL) {
2522 2527 kstat_zone_remove(ksp, zoneid);
2523 2528 kstat_rele(ksp);
2524 2529 }
2525 2530 }
2526 2531
2527 2532 /*
2528 2533 * Update relevant kstats such that cpu is no longer visible to processes
2529 2534 * executing in specified zone.
2530 2535 */
2531 2536 void
2532 2537 cpu_visibility_remove(cpu_t *cp, zone_t *zone)
2533 2538 {
2534 2539 if (cpu_is_active(cp))
2535 2540 cpu_visibility_offline(cp, zone);
2536 2541 cpu_visibility_unconfigure(cp, zone);
2537 2542 }
2538 2543
2539 2544 /*
2540 2545 * Bind a thread to a CPU as requested.
2541 2546 */
2542 2547 int
2543 2548 cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind,
2544 2549 int *error)
2545 2550 {
2546 2551 processorid_t binding;
2547 2552 cpu_t *cp = NULL;
2548 2553
2549 2554 ASSERT(MUTEX_HELD(&cpu_lock));
2550 2555 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
2551 2556
2552 2557 thread_lock(tp);
2553 2558
2554 2559 /*
2555 2560 * Record old binding, but change the obind, which was initialized
2556 2561 * to PBIND_NONE, only if this thread has a binding. This avoids
2557 2562 * reporting PBIND_NONE for a process when some LWPs are bound.
2558 2563 */
2559 2564 binding = tp->t_bind_cpu;
2560 2565 if (binding != PBIND_NONE)
2561 2566 *obind = binding; /* record old binding */
2562 2567
2563 2568 switch (bind) {
2564 2569 case PBIND_QUERY:
2565 2570 /* Just return the old binding */
2566 2571 thread_unlock(tp);
2567 2572 return (0);
2568 2573
2569 2574 case PBIND_QUERY_TYPE:
2570 2575 /* Return the binding type */
2571 2576 *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD;
2572 2577 thread_unlock(tp);
2573 2578 return (0);
2574 2579
2575 2580 case PBIND_SOFT:
2576 2581 /*
2577 2582 * Set soft binding for this thread and return the actual
2578 2583 * binding
2579 2584 */
2580 2585 TB_CPU_SOFT_SET(tp);
2581 2586 thread_unlock(tp);
2582 2587 return (0);
2583 2588
2584 2589 case PBIND_HARD:
2585 2590 /*
2586 2591 * Set hard binding for this thread and return the actual
2587 2592 * binding
2588 2593 */
2589 2594 TB_CPU_HARD_SET(tp);
2590 2595 thread_unlock(tp);
2591 2596 return (0);
2592 2597
2593 2598 default:
2594 2599 break;
2595 2600 }
2596 2601
2597 2602 /*
2598 2603 * If this thread/LWP cannot be bound because of permission
2599 2604 * problems, just note that and return success so that the
2600 2605 * other threads/LWPs will be bound. This is the way
2601 2606 * processor_bind() is defined to work.
2602 2607 *
2603 2608 * Binding will get EPERM if the thread is of system class
2604 2609 * or hasprocperm() fails.
2605 2610 */
2606 2611 if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) {
2607 2612 *error = EPERM;
2608 2613 thread_unlock(tp);
2609 2614 return (0);
2610 2615 }
2611 2616
2612 2617 binding = bind;
2613 2618 if (binding != PBIND_NONE) {
2614 2619 cp = cpu_get((processorid_t)binding);
2615 2620 /*
2616 2621 * Make sure binding is valid and is in right partition.
2617 2622 */
2618 2623 if (cp == NULL || tp->t_cpupart != cp->cpu_part) {
2619 2624 *error = EINVAL;
2620 2625 thread_unlock(tp);
2621 2626 return (0);
2622 2627 }
2623 2628 }
2624 2629 tp->t_bind_cpu = binding; /* set new binding */
2625 2630
2626 2631 /*
2627 2632 * If there is no system-set reason for affinity, set
2628 2633 * the t_bound_cpu field to reflect the binding.
2629 2634 */
2630 2635 if (tp->t_affinitycnt == 0) {
2631 2636 if (binding == PBIND_NONE) {
2632 2637 /*
2633 2638 * We may need to adjust disp_max_unbound_pri
2634 2639 * since we're becoming unbound.
2635 2640 */
2636 2641 disp_adjust_unbound_pri(tp);
2637 2642
2638 2643 tp->t_bound_cpu = NULL; /* set new binding */
2639 2644
2640 2645 /*
2641 2646 * Move thread to lgroup with strongest affinity
2642 2647 * after unbinding
2643 2648 */
2644 2649 if (tp->t_lgrp_affinity)
2645 2650 lgrp_move_thread(tp,
2646 2651 lgrp_choose(tp, tp->t_cpupart), 1);
2647 2652
2648 2653 if (tp->t_state == TS_ONPROC &&
2649 2654 tp->t_cpu->cpu_part != tp->t_cpupart)
2650 2655 cpu_surrender(tp);
2651 2656 } else {
2652 2657 lpl_t *lpl;
2653 2658
2654 2659 tp->t_bound_cpu = cp;
2655 2660 ASSERT(cp->cpu_lpl != NULL);
2656 2661
2657 2662 /*
2658 2663 * Set home to lgroup with most affinity containing CPU
2659 2664 * that thread is being bound or minimum bounding
2660 2665 * lgroup if no affinities set
2661 2666 */
2662 2667 if (tp->t_lgrp_affinity)
2663 2668 lpl = lgrp_affinity_best(tp, tp->t_cpupart,
2664 2669 LGRP_NONE, B_FALSE);
2665 2670 else
2666 2671 lpl = cp->cpu_lpl;
2667 2672
2668 2673 if (tp->t_lpl != lpl) {
2669 2674 /* can't grab cpu_lock */
2670 2675 lgrp_move_thread(tp, lpl, 1);
2671 2676 }
2672 2677
2673 2678 /*
2674 2679 * Make the thread switch to the bound CPU.
2675 2680 * If the thread is runnable, we need to
2676 2681 * requeue it even if t_cpu is already set
2677 2682 * to the right CPU, since it may be on a
2678 2683 * kpreempt queue and need to move to a local
2679 2684 * queue. We could check t_disp_queue to
2680 2685 * avoid unnecessary overhead if it's already
2681 2686 * on the right queue, but since this isn't
2682 2687 * a performance-critical operation it doesn't
2683 2688 * seem worth the extra code and complexity.
2684 2689 *
2685 2690 * If the thread is weakbound to the cpu then it will
2686 2691 * resist the new binding request until the weak
2687 2692 * binding drops. The cpu_surrender or requeueing
2688 2693 * below could be skipped in such cases (since it
2689 2694 * will have no effect), but that would require
2690 2695 * thread_allowmigrate to acquire thread_lock so
2691 2696 * we'll take the very occasional hit here instead.
2692 2697 */
2693 2698 if (tp->t_state == TS_ONPROC) {
2694 2699 cpu_surrender(tp);
2695 2700 } else if (tp->t_state == TS_RUN) {
2696 2701 cpu_t *ocp = tp->t_cpu;
2697 2702
2698 2703 (void) dispdeq(tp);
2699 2704 setbackdq(tp);
2700 2705 /*
2701 2706 * Either on the bound CPU's disp queue now,
2702 2707 * or swapped out or on the swap queue.
2703 2708 */
2704 2709 ASSERT(tp->t_disp_queue == cp->cpu_disp ||
2705 2710 tp->t_weakbound_cpu == ocp ||
2706 2711 (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ))
2707 2712 != TS_LOAD);
2708 2713 }
2709 2714 }
2710 2715 }
2711 2716
2712 2717 /*
2713 2718 * Our binding has changed; set TP_CHANGEBIND.
2714 2719 */
2715 2720 tp->t_proc_flag |= TP_CHANGEBIND;
2716 2721 aston(tp);
2717 2722
2718 2723 thread_unlock(tp);
2719 2724
2720 2725 return (0);
2721 2726 }
2722 2727
2723 2728 #if CPUSET_WORDS > 1
2724 2729
2725 2730 /*
2726 2731 * Functions for implementing cpuset operations when a cpuset is more
2727 2732 * than one word. On platforms where a cpuset is a single word these
2728 2733 * are implemented as macros in cpuvar.h.
2729 2734 */
2730 2735
2731 2736 void
2732 2737 cpuset_all(cpuset_t *s)
2733 2738 {
2734 2739 int i;
2735 2740
2736 2741 for (i = 0; i < CPUSET_WORDS; i++)
2737 2742 s->cpub[i] = ~0UL;
2738 2743 }
2739 2744
2740 2745 void
2741 2746 cpuset_all_but(cpuset_t *s, uint_t cpu)
2742 2747 {
2743 2748 cpuset_all(s);
2744 2749 CPUSET_DEL(*s, cpu);
2745 2750 }
2746 2751
2747 2752 void
2748 2753 cpuset_only(cpuset_t *s, uint_t cpu)
2749 2754 {
2750 2755 CPUSET_ZERO(*s);
2751 2756 CPUSET_ADD(*s, cpu);
2752 2757 }
2753 2758
2754 2759 int
2755 2760 cpuset_isnull(cpuset_t *s)
2756 2761 {
2757 2762 int i;
2758 2763
2759 2764 for (i = 0; i < CPUSET_WORDS; i++)
2760 2765 if (s->cpub[i] != 0)
2761 2766 return (0);
2762 2767 return (1);
2763 2768 }
2764 2769
2765 2770 int
2766 2771 cpuset_cmp(cpuset_t *s1, cpuset_t *s2)
2767 2772 {
2768 2773 int i;
2769 2774
2770 2775 for (i = 0; i < CPUSET_WORDS; i++)
2771 2776 if (s1->cpub[i] != s2->cpub[i])
2772 2777 return (0);
2773 2778 return (1);
2774 2779 }
2775 2780
2776 2781 uint_t
2777 2782 cpuset_find(cpuset_t *s)
2778 2783 {
2779 2784
2780 2785 uint_t i;
2781 2786 uint_t cpu = (uint_t)-1;
2782 2787
2783 2788 /*
2784 2789 * Find a cpu in the cpuset
2785 2790 */
2786 2791 for (i = 0; i < CPUSET_WORDS; i++) {
2787 2792 cpu = (uint_t)(lowbit(s->cpub[i]) - 1);
2788 2793 if (cpu != (uint_t)-1) {
2789 2794 cpu += i * BT_NBIPUL;
2790 2795 break;
2791 2796 }
2792 2797 }
2793 2798 return (cpu);
2794 2799 }
2795 2800
2796 2801 void
2797 2802 cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid)
2798 2803 {
2799 2804 int i, j;
2800 2805 uint_t bit;
2801 2806
2802 2807 /*
2803 2808 * First, find the smallest cpu id in the set.
2804 2809 */
2805 2810 for (i = 0; i < CPUSET_WORDS; i++) {
2806 2811 if (s->cpub[i] != 0) {
2807 2812 bit = (uint_t)(lowbit(s->cpub[i]) - 1);
2808 2813 ASSERT(bit != (uint_t)-1);
2809 2814 *smallestid = bit + (i * BT_NBIPUL);
2810 2815
2811 2816 /*
2812 2817 * Now find the largest cpu id in
2813 2818 * the set and return immediately.
2814 2819 * Done in an inner loop to avoid
2815 2820 * having to break out of the first
2816 2821 * loop.
2817 2822 */
2818 2823 for (j = CPUSET_WORDS - 1; j >= i; j--) {
2819 2824 if (s->cpub[j] != 0) {
2820 2825 bit = (uint_t)(highbit(s->cpub[j]) - 1);
2821 2826 ASSERT(bit != (uint_t)-1);
2822 2827 *largestid = bit + (j * BT_NBIPUL);
2823 2828 ASSERT(*largestid >= *smallestid);
2824 2829 return;
2825 2830 }
2826 2831 }
2827 2832
2828 2833 /*
2829 2834 * If this code is reached, a
2830 2835 * smallestid was found, but not a
2831 2836 * largestid. The cpuset must have
2832 2837 * been changed during the course
2833 2838 * of this function call.
2834 2839 */
2835 2840 ASSERT(0);
2836 2841 }
2837 2842 }
2838 2843 *smallestid = *largestid = CPUSET_NOTINSET;
2839 2844 }
2840 2845
2841 2846 #endif /* CPUSET_WORDS */
2842 2847
2843 2848 /*
2844 2849 * Unbind threads bound to specified CPU.
2845 2850 *
2846 2851 * If `unbind_all_threads' is true, unbind all user threads bound to a given
2847 2852 * CPU. Otherwise unbind all soft-bound user threads.
2848 2853 */
2849 2854 int
2850 2855 cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads)
2851 2856 {
2852 2857 processorid_t obind;
2853 2858 kthread_t *tp;
2854 2859 int ret = 0;
2855 2860 proc_t *pp;
2856 2861 int err, berr = 0;
2857 2862
2858 2863 ASSERT(MUTEX_HELD(&cpu_lock));
2859 2864
2860 2865 mutex_enter(&pidlock);
2861 2866 for (pp = practive; pp != NULL; pp = pp->p_next) {
2862 2867 mutex_enter(&pp->p_lock);
2863 2868 tp = pp->p_tlist;
2864 2869 /*
2865 2870 * Skip zombies, kernel processes, and processes in
2866 2871 * other zones, if called from a non-global zone.
2867 2872 */
2868 2873 if (tp == NULL || (pp->p_flag & SSYS) ||
2869 2874 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) {
2870 2875 mutex_exit(&pp->p_lock);
2871 2876 continue;
2872 2877 }
2873 2878 do {
2874 2879 if (tp->t_bind_cpu != cpu)
2875 2880 continue;
2876 2881 /*
2877 2882 * Skip threads with hard binding when
2878 2883 * `unbind_all_threads' is not specified.
2879 2884 */
2880 2885 if (!unbind_all_threads && TB_CPU_IS_HARD(tp))
2881 2886 continue;
2882 2887 err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr);
2883 2888 if (ret == 0)
2884 2889 ret = err;
2885 2890 } while ((tp = tp->t_forw) != pp->p_tlist);
2886 2891 mutex_exit(&pp->p_lock);
2887 2892 }
2888 2893 mutex_exit(&pidlock);
2889 2894 if (ret == 0)
2890 2895 ret = berr;
2891 2896 return (ret);
2892 2897 }
2893 2898
2894 2899
2895 2900 /*
2896 2901 * Destroy all remaining bound threads on a cpu.
2897 2902 */
2898 2903 void
2899 2904 cpu_destroy_bound_threads(cpu_t *cp)
2900 2905 {
2901 2906 extern id_t syscid;
2902 2907 register kthread_id_t t, tlist, tnext;
2903 2908
2904 2909 /*
2905 2910 * Destroy all remaining bound threads on the cpu. This
2906 2911 * should include both the interrupt threads and the idle thread.
2907 2912 * This requires some care, since we need to traverse the
2908 2913 * thread list with the pidlock mutex locked, but thread_free
2909 2914 * also locks the pidlock mutex. So, we collect the threads
2910 2915 * we're going to reap in a list headed by "tlist", then we
2911 2916 * unlock the pidlock mutex and traverse the tlist list,
2912 2917 * doing thread_free's on the thread's. Simple, n'est pas?
2913 2918 * Also, this depends on thread_free not mucking with the
2914 2919 * t_next and t_prev links of the thread.
2915 2920 */
2916 2921
2917 2922 if ((t = curthread) != NULL) {
2918 2923
2919 2924 tlist = NULL;
2920 2925 mutex_enter(&pidlock);
2921 2926 do {
2922 2927 tnext = t->t_next;
2923 2928 if (t->t_bound_cpu == cp) {
2924 2929
2925 2930 /*
2926 2931 * We've found a bound thread, carefully unlink
2927 2932 * it out of the thread list, and add it to
2928 2933 * our "tlist". We "know" we don't have to
2929 2934 * worry about unlinking curthread (the thread
2930 2935 * that is executing this code).
2931 2936 */
2932 2937 t->t_next->t_prev = t->t_prev;
2933 2938 t->t_prev->t_next = t->t_next;
2934 2939 t->t_next = tlist;
2935 2940 tlist = t;
2936 2941 ASSERT(t->t_cid == syscid);
2937 2942 /* wake up anyone blocked in thread_join */
2938 2943 cv_broadcast(&t->t_joincv);
2939 2944 /*
2940 2945 * t_lwp set by interrupt threads and not
2941 2946 * cleared.
2942 2947 */
2943 2948 t->t_lwp = NULL;
2944 2949 /*
2945 2950 * Pause and idle threads always have
2946 2951 * t_state set to TS_ONPROC.
2947 2952 */
2948 2953 t->t_state = TS_FREE;
2949 2954 t->t_prev = NULL; /* Just in case */
2950 2955 }
2951 2956
2952 2957 } while ((t = tnext) != curthread);
2953 2958
2954 2959 mutex_exit(&pidlock);
2955 2960
2956 2961 mutex_sync();
2957 2962 for (t = tlist; t != NULL; t = tnext) {
2958 2963 tnext = t->t_next;
2959 2964 thread_free(t);
2960 2965 }
2961 2966 }
2962 2967 }
2963 2968
2964 2969 /*
2965 2970 * Update the cpu_supp_freqs of this cpu. This information is returned
2966 2971 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then
2967 2972 * maintain the kstat data size.
2968 2973 */
2969 2974 void
2970 2975 cpu_set_supp_freqs(cpu_t *cp, const char *freqs)
2971 2976 {
2972 2977 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */
2973 2978 const char *lfreqs = clkstr;
2974 2979 boolean_t kstat_exists = B_FALSE;
2975 2980 kstat_t *ksp;
2976 2981 size_t len;
2977 2982
2978 2983 /*
2979 2984 * A NULL pointer means we only support one speed.
2980 2985 */
2981 2986 if (freqs == NULL)
2982 2987 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64,
2983 2988 cp->cpu_curr_clock);
2984 2989 else
2985 2990 lfreqs = freqs;
2986 2991
2987 2992 /*
2988 2993 * Make sure the frequency doesn't change while a snapshot is
2989 2994 * going on. Of course, we only need to worry about this if
2990 2995 * the kstat exists.
2991 2996 */
2992 2997 if ((ksp = cp->cpu_info_kstat) != NULL) {
2993 2998 mutex_enter(ksp->ks_lock);
2994 2999 kstat_exists = B_TRUE;
2995 3000 }
2996 3001
2997 3002 /*
2998 3003 * Free any previously allocated string and if the kstat
2999 3004 * already exists, then update its data size.
3000 3005 */
3001 3006 if (cp->cpu_supp_freqs != NULL) {
3002 3007 len = strlen(cp->cpu_supp_freqs) + 1;
3003 3008 kmem_free(cp->cpu_supp_freqs, len);
3004 3009 if (kstat_exists)
3005 3010 ksp->ks_data_size -= len;
3006 3011 }
3007 3012
3008 3013 /*
3009 3014 * Allocate the new string and set the pointer.
3010 3015 */
3011 3016 len = strlen(lfreqs) + 1;
3012 3017 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP);
3013 3018 (void) strcpy(cp->cpu_supp_freqs, lfreqs);
3014 3019
3015 3020 /*
3016 3021 * If the kstat already exists then update the data size and
3017 3022 * free the lock.
3018 3023 */
3019 3024 if (kstat_exists) {
3020 3025 ksp->ks_data_size += len;
3021 3026 mutex_exit(ksp->ks_lock);
3022 3027 }
3023 3028 }
3024 3029
3025 3030 /*
3026 3031 * Indicate the current CPU's clock freqency (in Hz).
3027 3032 * The calling context must be such that CPU references are safe.
3028 3033 */
3029 3034 void
3030 3035 cpu_set_curr_clock(uint64_t new_clk)
3031 3036 {
3032 3037 uint64_t old_clk;
3033 3038
3034 3039 old_clk = CPU->cpu_curr_clock;
3035 3040 CPU->cpu_curr_clock = new_clk;
3036 3041
3037 3042 /*
3038 3043 * The cpu-change-speed DTrace probe exports the frequency in Hz
3039 3044 */
3040 3045 DTRACE_PROBE3(cpu__change__speed, processorid_t, CPU->cpu_id,
3041 3046 uint64_t, old_clk, uint64_t, new_clk);
3042 3047 }
3043 3048
3044 3049 /*
3045 3050 * processor_info(2) and p_online(2) status support functions
3046 3051 * The constants returned by the cpu_get_state() and cpu_get_state_str() are
3047 3052 * for use in communicating processor state information to userland. Kernel
3048 3053 * subsystems should only be using the cpu_flags value directly. Subsystems
3049 3054 * modifying cpu_flags should record the state change via a call to the
3050 3055 * cpu_set_state().
3051 3056 */
3052 3057
3053 3058 /*
3054 3059 * Update the pi_state of this CPU. This function provides the CPU status for
3055 3060 * the information returned by processor_info(2).
3056 3061 */
3057 3062 void
3058 3063 cpu_set_state(cpu_t *cpu)
3059 3064 {
3060 3065 ASSERT(MUTEX_HELD(&cpu_lock));
3061 3066 cpu->cpu_type_info.pi_state = cpu_get_state(cpu);
3062 3067 cpu->cpu_state_begin = gethrestime_sec();
3063 3068 pool_cpu_mod = gethrtime();
3064 3069 }
3065 3070
3066 3071 /*
3067 3072 * Return offline/online/other status for the indicated CPU. Use only for
3068 3073 * communication with user applications; cpu_flags provides the in-kernel
3069 3074 * interface.
3070 3075 */
3071 3076 int
3072 3077 cpu_get_state(cpu_t *cpu)
3073 3078 {
3074 3079 ASSERT(MUTEX_HELD(&cpu_lock));
3075 3080 if (cpu->cpu_flags & CPU_POWEROFF)
3076 3081 return (P_POWEROFF);
3077 3082 else if (cpu->cpu_flags & CPU_FAULTED)
3078 3083 return (P_FAULTED);
3079 3084 else if (cpu->cpu_flags & CPU_SPARE)
3080 3085 return (P_SPARE);
3081 3086 else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)
3082 3087 return (P_OFFLINE);
3083 3088 else if (cpu->cpu_flags & CPU_ENABLE)
3084 3089 return (P_ONLINE);
3085 3090 else
3086 3091 return (P_NOINTR);
3087 3092 }
3088 3093
3089 3094 /*
3090 3095 * Return processor_info(2) state as a string.
3091 3096 */
3092 3097 const char *
3093 3098 cpu_get_state_str(cpu_t *cpu)
3094 3099 {
3095 3100 const char *string;
3096 3101
3097 3102 switch (cpu_get_state(cpu)) {
3098 3103 case P_ONLINE:
3099 3104 string = PS_ONLINE;
3100 3105 break;
3101 3106 case P_POWEROFF:
3102 3107 string = PS_POWEROFF;
3103 3108 break;
3104 3109 case P_NOINTR:
3105 3110 string = PS_NOINTR;
3106 3111 break;
3107 3112 case P_SPARE:
3108 3113 string = PS_SPARE;
3109 3114 break;
3110 3115 case P_FAULTED:
3111 3116 string = PS_FAULTED;
3112 3117 break;
3113 3118 case P_OFFLINE:
3114 3119 string = PS_OFFLINE;
3115 3120 break;
3116 3121 default:
3117 3122 string = "unknown";
3118 3123 break;
3119 3124 }
3120 3125 return (string);
3121 3126 }
3122 3127
3123 3128 /*
3124 3129 * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named
3125 3130 * kstats, respectively. This is done when a CPU is initialized or placed
3126 3131 * online via p_online(2).
3127 3132 */
3128 3133 static void
3129 3134 cpu_stats_kstat_create(cpu_t *cp)
3130 3135 {
3131 3136 int instance = cp->cpu_id;
3132 3137 char *module = "cpu";
3133 3138 char *class = "misc";
3134 3139 kstat_t *ksp;
3135 3140 zoneid_t zoneid;
3136 3141
3137 3142 ASSERT(MUTEX_HELD(&cpu_lock));
3138 3143
3139 3144 if (pool_pset_enabled())
3140 3145 zoneid = GLOBAL_ZONEID;
3141 3146 else
3142 3147 zoneid = ALL_ZONES;
3143 3148 /*
3144 3149 * Create named kstats
3145 3150 */
3146 3151 #define CPU_STATS_KS_CREATE(name, tsize, update_func) \
3147 3152 ksp = kstat_create_zone(module, instance, (name), class, \
3148 3153 KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \
3149 3154 zoneid); \
3150 3155 if (ksp != NULL) { \
3151 3156 ksp->ks_private = cp; \
3152 3157 ksp->ks_update = (update_func); \
3153 3158 kstat_install(ksp); \
3154 3159 } else \
3155 3160 cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \
3156 3161 module, instance, (name));
3157 3162
3158 3163 CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template),
3159 3164 cpu_sys_stats_ks_update);
3160 3165 CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template),
3161 3166 cpu_vm_stats_ks_update);
3162 3167
3163 3168 /*
3164 3169 * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat.
3165 3170 */
3166 3171 ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL,
3167 3172 "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid);
3168 3173 if (ksp != NULL) {
3169 3174 ksp->ks_update = cpu_stat_ks_update;
3170 3175 ksp->ks_private = cp;
3171 3176 kstat_install(ksp);
3172 3177 }
3173 3178 }
3174 3179
3175 3180 static void
3176 3181 cpu_stats_kstat_destroy(cpu_t *cp)
3177 3182 {
3178 3183 char ks_name[KSTAT_STRLEN];
3179 3184
3180 3185 (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id);
3181 3186 kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name);
3182 3187
3183 3188 kstat_delete_byname("cpu", cp->cpu_id, "sys");
3184 3189 kstat_delete_byname("cpu", cp->cpu_id, "vm");
3185 3190 }
3186 3191
3187 3192 static int
3188 3193 cpu_sys_stats_ks_update(kstat_t *ksp, int rw)
3189 3194 {
3190 3195 cpu_t *cp = (cpu_t *)ksp->ks_private;
3191 3196 struct cpu_sys_stats_ks_data *csskd;
3192 3197 cpu_sys_stats_t *css;
3193 3198 hrtime_t msnsecs[NCMSTATES];
3194 3199 int i;
3195 3200
3196 3201 if (rw == KSTAT_WRITE)
3197 3202 return (EACCES);
3198 3203
3199 3204 csskd = ksp->ks_data;
3200 3205 css = &cp->cpu_stats.sys;
3201 3206
3202 3207 /*
3203 3208 * Read CPU mstate, but compare with the last values we
3204 3209 * received to make sure that the returned kstats never
3205 3210 * decrease.
3206 3211 */
3207 3212
3208 3213 get_cpu_mstate(cp, msnsecs);
3209 3214 if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE])
3210 3215 msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64;
3211 3216 if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER])
3212 3217 msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64;
3213 3218 if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM])
3214 3219 msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64;
3215 3220
3216 3221 bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data,
3217 3222 sizeof (cpu_sys_stats_ks_data_template));
3218 3223
3219 3224 csskd->cpu_ticks_wait.value.ui64 = 0;
3220 3225 csskd->wait_ticks_io.value.ui64 = 0;
3221 3226
3222 3227 csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE];
3223 3228 csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER];
3224 3229 csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM];
3225 3230 csskd->cpu_ticks_idle.value.ui64 =
3226 3231 NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64);
3227 3232 csskd->cpu_ticks_user.value.ui64 =
3228 3233 NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64);
3229 3234 csskd->cpu_ticks_kernel.value.ui64 =
3230 3235 NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64);
3231 3236 csskd->cpu_nsec_dtrace.value.ui64 = cp->cpu_dtrace_nsec;
3232 3237 csskd->dtrace_probes.value.ui64 = cp->cpu_dtrace_probes;
3233 3238 csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast;
3234 3239 csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload;
3235 3240 csskd->bread.value.ui64 = css->bread;
3236 3241 csskd->bwrite.value.ui64 = css->bwrite;
3237 3242 csskd->lread.value.ui64 = css->lread;
3238 3243 csskd->lwrite.value.ui64 = css->lwrite;
3239 3244 csskd->phread.value.ui64 = css->phread;
3240 3245 csskd->phwrite.value.ui64 = css->phwrite;
3241 3246 csskd->pswitch.value.ui64 = css->pswitch;
3242 3247 csskd->trap.value.ui64 = css->trap;
3243 3248 csskd->intr.value.ui64 = 0;
3244 3249 for (i = 0; i < PIL_MAX; i++)
3245 3250 csskd->intr.value.ui64 += css->intr[i];
3246 3251 csskd->syscall.value.ui64 = css->syscall;
3247 3252 csskd->sysread.value.ui64 = css->sysread;
3248 3253 csskd->syswrite.value.ui64 = css->syswrite;
3249 3254 csskd->sysfork.value.ui64 = css->sysfork;
3250 3255 csskd->sysvfork.value.ui64 = css->sysvfork;
3251 3256 csskd->sysexec.value.ui64 = css->sysexec;
3252 3257 csskd->readch.value.ui64 = css->readch;
3253 3258 csskd->writech.value.ui64 = css->writech;
3254 3259 csskd->rcvint.value.ui64 = css->rcvint;
3255 3260 csskd->xmtint.value.ui64 = css->xmtint;
3256 3261 csskd->mdmint.value.ui64 = css->mdmint;
3257 3262 csskd->rawch.value.ui64 = css->rawch;
3258 3263 csskd->canch.value.ui64 = css->canch;
3259 3264 csskd->outch.value.ui64 = css->outch;
3260 3265 csskd->msg.value.ui64 = css->msg;
3261 3266 csskd->sema.value.ui64 = css->sema;
3262 3267 csskd->namei.value.ui64 = css->namei;
3263 3268 csskd->ufsiget.value.ui64 = css->ufsiget;
3264 3269 csskd->ufsdirblk.value.ui64 = css->ufsdirblk;
3265 3270 csskd->ufsipage.value.ui64 = css->ufsipage;
3266 3271 csskd->ufsinopage.value.ui64 = css->ufsinopage;
3267 3272 csskd->procovf.value.ui64 = css->procovf;
3268 3273 csskd->intrthread.value.ui64 = 0;
3269 3274 for (i = 0; i < LOCK_LEVEL - 1; i++)
3270 3275 csskd->intrthread.value.ui64 += css->intr[i];
3271 3276 csskd->intrblk.value.ui64 = css->intrblk;
3272 3277 csskd->intrunpin.value.ui64 = css->intrunpin;
3273 3278 csskd->idlethread.value.ui64 = css->idlethread;
3274 3279 csskd->inv_swtch.value.ui64 = css->inv_swtch;
3275 3280 csskd->nthreads.value.ui64 = css->nthreads;
3276 3281 csskd->cpumigrate.value.ui64 = css->cpumigrate;
3277 3282 csskd->xcalls.value.ui64 = css->xcalls;
3278 3283 csskd->mutex_adenters.value.ui64 = css->mutex_adenters;
3279 3284 csskd->rw_rdfails.value.ui64 = css->rw_rdfails;
3280 3285 csskd->rw_wrfails.value.ui64 = css->rw_wrfails;
3281 3286 csskd->modload.value.ui64 = css->modload;
3282 3287 csskd->modunload.value.ui64 = css->modunload;
3283 3288 csskd->bawrite.value.ui64 = css->bawrite;
3284 3289 csskd->iowait.value.ui64 = css->iowait;
3285 3290
3286 3291 return (0);
3287 3292 }
3288 3293
3289 3294 static int
3290 3295 cpu_vm_stats_ks_update(kstat_t *ksp, int rw)
3291 3296 {
3292 3297 cpu_t *cp = (cpu_t *)ksp->ks_private;
3293 3298 struct cpu_vm_stats_ks_data *cvskd;
3294 3299 cpu_vm_stats_t *cvs;
3295 3300
3296 3301 if (rw == KSTAT_WRITE)
3297 3302 return (EACCES);
3298 3303
3299 3304 cvs = &cp->cpu_stats.vm;
3300 3305 cvskd = ksp->ks_data;
3301 3306
3302 3307 bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data,
3303 3308 sizeof (cpu_vm_stats_ks_data_template));
3304 3309 cvskd->pgrec.value.ui64 = cvs->pgrec;
3305 3310 cvskd->pgfrec.value.ui64 = cvs->pgfrec;
3306 3311 cvskd->pgin.value.ui64 = cvs->pgin;
3307 3312 cvskd->pgpgin.value.ui64 = cvs->pgpgin;
3308 3313 cvskd->pgout.value.ui64 = cvs->pgout;
3309 3314 cvskd->pgpgout.value.ui64 = cvs->pgpgout;
3310 3315 cvskd->swapin.value.ui64 = cvs->swapin;
3311 3316 cvskd->pgswapin.value.ui64 = cvs->pgswapin;
3312 3317 cvskd->swapout.value.ui64 = cvs->swapout;
3313 3318 cvskd->pgswapout.value.ui64 = cvs->pgswapout;
3314 3319 cvskd->zfod.value.ui64 = cvs->zfod;
3315 3320 cvskd->dfree.value.ui64 = cvs->dfree;
3316 3321 cvskd->scan.value.ui64 = cvs->scan;
3317 3322 cvskd->rev.value.ui64 = cvs->rev;
3318 3323 cvskd->hat_fault.value.ui64 = cvs->hat_fault;
3319 3324 cvskd->as_fault.value.ui64 = cvs->as_fault;
3320 3325 cvskd->maj_fault.value.ui64 = cvs->maj_fault;
3321 3326 cvskd->cow_fault.value.ui64 = cvs->cow_fault;
3322 3327 cvskd->prot_fault.value.ui64 = cvs->prot_fault;
3323 3328 cvskd->softlock.value.ui64 = cvs->softlock;
3324 3329 cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt;
3325 3330 cvskd->pgrrun.value.ui64 = cvs->pgrrun;
3326 3331 cvskd->execpgin.value.ui64 = cvs->execpgin;
3327 3332 cvskd->execpgout.value.ui64 = cvs->execpgout;
3328 3333 cvskd->execfree.value.ui64 = cvs->execfree;
3329 3334 cvskd->anonpgin.value.ui64 = cvs->anonpgin;
3330 3335 cvskd->anonpgout.value.ui64 = cvs->anonpgout;
3331 3336 cvskd->anonfree.value.ui64 = cvs->anonfree;
3332 3337 cvskd->fspgin.value.ui64 = cvs->fspgin;
3333 3338 cvskd->fspgout.value.ui64 = cvs->fspgout;
3334 3339 cvskd->fsfree.value.ui64 = cvs->fsfree;
3335 3340
3336 3341 return (0);
3337 3342 }
3338 3343
3339 3344 static int
3340 3345 cpu_stat_ks_update(kstat_t *ksp, int rw)
3341 3346 {
3342 3347 cpu_stat_t *cso;
3343 3348 cpu_t *cp;
3344 3349 int i;
3345 3350 hrtime_t msnsecs[NCMSTATES];
3346 3351
3347 3352 cso = (cpu_stat_t *)ksp->ks_data;
3348 3353 cp = (cpu_t *)ksp->ks_private;
3349 3354
3350 3355 if (rw == KSTAT_WRITE)
3351 3356 return (EACCES);
3352 3357
3353 3358 /*
3354 3359 * Read CPU mstate, but compare with the last values we
3355 3360 * received to make sure that the returned kstats never
3356 3361 * decrease.
3357 3362 */
3358 3363
3359 3364 get_cpu_mstate(cp, msnsecs);
3360 3365 msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]);
3361 3366 msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]);
3362 3367 msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]);
3363 3368 if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE])
3364 3369 cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE];
3365 3370 if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER])
3366 3371 cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER];
3367 3372 if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM])
3368 3373 cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM];
3369 3374 cso->cpu_sysinfo.cpu[CPU_WAIT] = 0;
3370 3375 cso->cpu_sysinfo.wait[W_IO] = 0;
3371 3376 cso->cpu_sysinfo.wait[W_SWAP] = 0;
3372 3377 cso->cpu_sysinfo.wait[W_PIO] = 0;
3373 3378 cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread);
3374 3379 cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite);
3375 3380 cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread);
3376 3381 cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite);
3377 3382 cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread);
3378 3383 cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite);
3379 3384 cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch);
3380 3385 cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap);
3381 3386 cso->cpu_sysinfo.intr = 0;
3382 3387 for (i = 0; i < PIL_MAX; i++)
3383 3388 cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]);
3384 3389 cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall);
3385 3390 cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread);
3386 3391 cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite);
3387 3392 cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork);
3388 3393 cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork);
3389 3394 cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec);
3390 3395 cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch);
3391 3396 cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech);
3392 3397 cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint);
3393 3398 cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint);
3394 3399 cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint);
3395 3400 cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch);
3396 3401 cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch);
3397 3402 cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch);
3398 3403 cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg);
3399 3404 cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema);
3400 3405 cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei);
3401 3406 cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget);
3402 3407 cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk);
3403 3408 cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage);
3404 3409 cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage);
3405 3410 cso->cpu_sysinfo.inodeovf = 0;
3406 3411 cso->cpu_sysinfo.fileovf = 0;
3407 3412 cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf);
3408 3413 cso->cpu_sysinfo.intrthread = 0;
3409 3414 for (i = 0; i < LOCK_LEVEL - 1; i++)
3410 3415 cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]);
3411 3416 cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk);
3412 3417 cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread);
3413 3418 cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch);
3414 3419 cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads);
3415 3420 cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate);
3416 3421 cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls);
3417 3422 cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters);
3418 3423 cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails);
3419 3424 cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails);
3420 3425 cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload);
3421 3426 cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload);
3422 3427 cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite);
3423 3428 cso->cpu_sysinfo.rw_enters = 0;
3424 3429 cso->cpu_sysinfo.win_uo_cnt = 0;
3425 3430 cso->cpu_sysinfo.win_uu_cnt = 0;
3426 3431 cso->cpu_sysinfo.win_so_cnt = 0;
3427 3432 cso->cpu_sysinfo.win_su_cnt = 0;
3428 3433 cso->cpu_sysinfo.win_suo_cnt = 0;
3429 3434
3430 3435 cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait);
3431 3436 cso->cpu_syswait.swap = 0;
3432 3437 cso->cpu_syswait.physio = 0;
3433 3438
3434 3439 cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec);
3435 3440 cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec);
3436 3441 cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin);
3437 3442 cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin);
3438 3443 cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout);
3439 3444 cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout);
3440 3445 cso->cpu_vminfo.swapin = CPU_STATS(cp, vm.swapin);
3441 3446 cso->cpu_vminfo.pgswapin = CPU_STATS(cp, vm.pgswapin);
3442 3447 cso->cpu_vminfo.swapout = CPU_STATS(cp, vm.swapout);
3443 3448 cso->cpu_vminfo.pgswapout = CPU_STATS(cp, vm.pgswapout);
3444 3449 cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod);
3445 3450 cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree);
3446 3451 cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan);
3447 3452 cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev);
3448 3453 cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault);
3449 3454 cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault);
3450 3455 cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault);
3451 3456 cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault);
3452 3457 cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault);
3453 3458 cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock);
3454 3459 cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt);
3455 3460 cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun);
3456 3461 cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin);
3457 3462 cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout);
3458 3463 cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree);
3459 3464 cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin);
3460 3465 cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout);
3461 3466 cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree);
3462 3467 cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin);
3463 3468 cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout);
3464 3469 cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree);
3465 3470
3466 3471 return (0);
3467 3472 }
↓ open down ↓ |
1919 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX