Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/disp/cpupart.c
+++ new/usr/src/uts/common/disp/cpupart.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1996, 2010, Oracle and/or its affiliates. All rights reserved.
23 - * Copyright (c) 2017 by Delphix. All rights reserved.
23 + * Copyright 2018 Joyent, Inc.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/systm.h>
28 28 #include <sys/cmn_err.h>
29 29 #include <sys/cpuvar.h>
30 30 #include <sys/thread.h>
31 31 #include <sys/disp.h>
32 32 #include <sys/kmem.h>
33 33 #include <sys/debug.h>
34 34 #include <sys/cpupart.h>
35 35 #include <sys/pset.h>
36 36 #include <sys/var.h>
37 37 #include <sys/cyclic.h>
38 38 #include <sys/lgrp.h>
39 39 #include <sys/pghw.h>
40 40 #include <sys/loadavg.h>
41 41 #include <sys/class.h>
42 42 #include <sys/fss.h>
43 43 #include <sys/pool.h>
44 44 #include <sys/pool_pset.h>
45 45 #include <sys/policy.h>
46 46
47 47 /*
48 48 * Calling pool_lock() protects the pools configuration, which includes
49 49 * CPU partitions. cpu_lock protects the CPU partition list, and prevents
50 50 * partitions from being created or destroyed while the lock is held.
51 51 * The lock ordering with respect to related locks is:
52 52 *
53 53 * pool_lock() ---> cpu_lock ---> pidlock --> p_lock
54 54 *
55 55 * Blocking memory allocations may be made while holding "pool_lock"
56 56 * or cpu_lock.
57 57 */
58 58
59 59 /*
60 60 * The cp_default partition is allocated statically, but its lgroup load average
61 61 * (lpl) list is allocated dynamically after kmem subsystem is initialized. This
62 62 * saves some memory since the space allocated reflects the actual number of
63 63 * lgroups supported by the platform. The lgrp facility provides a temporary
64 64 * space to hold lpl information during system bootstrap.
65 65 */
66 66
67 67 cpupart_t *cp_list_head;
68 68 cpupart_t cp_default;
69 69 static cpupartid_t cp_id_next;
70 70 uint_t cp_numparts;
71 71 uint_t cp_numparts_nonempty;
72 72
73 73 /*
74 74 * Need to limit total number of partitions to avoid slowing down the
75 75 * clock code too much. The clock code traverses the list of
76 76 * partitions and needs to be able to execute in a reasonable amount
77 77 * of time (less than 1/hz seconds). The maximum is sized based on
78 78 * max_ncpus so it shouldn't be a problem unless there are large
79 79 * numbers of empty partitions.
80 80 */
81 81 static uint_t cp_max_numparts;
82 82
83 83 /*
84 84 * Processor sets and CPU partitions are different but related concepts.
85 85 * A processor set is a user-level abstraction allowing users to create
86 86 * sets of CPUs and bind threads exclusively to those sets. A CPU
87 87 * partition is a kernel dispatcher object consisting of a set of CPUs
88 88 * and a global dispatch queue. The processor set abstraction is
89 89 * implemented via a CPU partition, and currently there is a 1-1
90 90 * mapping between processor sets and partitions (excluding the default
91 91 * partition, which is not visible as a processor set). Hence, the
92 92 * numbering for processor sets and CPU partitions is identical. This
93 93 * may not always be true in the future, and these macros could become
94 94 * less trivial if we support e.g. a processor set containing multiple
95 95 * CPU partitions.
96 96 */
97 97 #define PSTOCP(psid) ((cpupartid_t)((psid) == PS_NONE ? CP_DEFAULT : (psid)))
98 98 #define CPTOPS(cpid) ((psetid_t)((cpid) == CP_DEFAULT ? PS_NONE : (cpid)))
99 99
100 100 static int cpupart_unbind_threads(cpupart_t *, boolean_t);
101 101
102 102 /*
103 103 * Find a CPU partition given a processor set ID.
104 104 */
105 105 static cpupart_t *
106 106 cpupart_find_all(psetid_t psid)
107 107 {
108 108 cpupart_t *cp;
109 109 cpupartid_t cpid = PSTOCP(psid);
110 110
111 111 ASSERT(MUTEX_HELD(&cpu_lock));
112 112
113 113 /* default partition not visible as a processor set */
114 114 if (psid == CP_DEFAULT)
115 115 return (NULL);
116 116
117 117 if (psid == PS_MYID)
118 118 return (curthread->t_cpupart);
119 119
120 120 cp = cp_list_head;
121 121 do {
122 122 if (cp->cp_id == cpid)
123 123 return (cp);
124 124 cp = cp->cp_next;
125 125 } while (cp != cp_list_head);
126 126 return (NULL);
127 127 }
128 128
129 129 /*
130 130 * Find a CPU partition given a processor set ID if the processor set
131 131 * should be visible from the calling zone.
132 132 */
133 133 cpupart_t *
134 134 cpupart_find(psetid_t psid)
135 135 {
136 136 cpupart_t *cp;
137 137
138 138 ASSERT(MUTEX_HELD(&cpu_lock));
139 139 cp = cpupart_find_all(psid);
140 140 if (cp != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() &&
141 141 zone_pset_get(curproc->p_zone) != CPTOPS(cp->cp_id))
142 142 return (NULL);
143 143 return (cp);
144 144 }
145 145
146 146 static int
147 147 cpupart_kstat_update(kstat_t *ksp, int rw)
148 148 {
149 149 cpupart_t *cp = (cpupart_t *)ksp->ks_private;
150 150 cpupart_kstat_t *cpksp = ksp->ks_data;
151 151
152 152 if (rw == KSTAT_WRITE)
153 153 return (EACCES);
154 154
155 155 cpksp->cpk_updates.value.ui64 = cp->cp_updates;
156 156 cpksp->cpk_runnable.value.ui64 = cp->cp_nrunnable_cum;
157 157 cpksp->cpk_waiting.value.ui64 = cp->cp_nwaiting_cum;
158 158 cpksp->cpk_ncpus.value.ui32 = cp->cp_ncpus;
159 159 cpksp->cpk_avenrun_1min.value.ui32 = cp->cp_hp_avenrun[0] >>
160 160 (16 - FSHIFT);
161 161 cpksp->cpk_avenrun_5min.value.ui32 = cp->cp_hp_avenrun[1] >>
162 162 (16 - FSHIFT);
163 163 cpksp->cpk_avenrun_15min.value.ui32 = cp->cp_hp_avenrun[2] >>
164 164 (16 - FSHIFT);
165 165 return (0);
166 166 }
167 167
168 168 static void
169 169 cpupart_kstat_create(cpupart_t *cp)
170 170 {
171 171 kstat_t *ksp;
172 172 zoneid_t zoneid;
173 173
174 174 ASSERT(MUTEX_HELD(&cpu_lock));
175 175
176 176 /*
177 177 * We have a bit of a chicken-egg problem since this code will
178 178 * get called to create the kstats for CP_DEFAULT before the
179 179 * pools framework gets initialized. We circumvent the problem
180 180 * by special-casing cp_default.
181 181 */
182 182 if (cp != &cp_default && pool_pset_enabled())
183 183 zoneid = GLOBAL_ZONEID;
184 184 else
185 185 zoneid = ALL_ZONES;
186 186 ksp = kstat_create_zone("unix", cp->cp_id, "pset", "misc",
187 187 KSTAT_TYPE_NAMED,
188 188 sizeof (cpupart_kstat_t) / sizeof (kstat_named_t), 0, zoneid);
189 189 if (ksp != NULL) {
190 190 cpupart_kstat_t *cpksp = ksp->ks_data;
191 191
192 192 kstat_named_init(&cpksp->cpk_updates, "updates",
193 193 KSTAT_DATA_UINT64);
194 194 kstat_named_init(&cpksp->cpk_runnable, "runnable",
195 195 KSTAT_DATA_UINT64);
196 196 kstat_named_init(&cpksp->cpk_waiting, "waiting",
197 197 KSTAT_DATA_UINT64);
198 198 kstat_named_init(&cpksp->cpk_ncpus, "ncpus",
199 199 KSTAT_DATA_UINT32);
200 200 kstat_named_init(&cpksp->cpk_avenrun_1min, "avenrun_1min",
201 201 KSTAT_DATA_UINT32);
202 202 kstat_named_init(&cpksp->cpk_avenrun_5min, "avenrun_5min",
203 203 KSTAT_DATA_UINT32);
204 204 kstat_named_init(&cpksp->cpk_avenrun_15min, "avenrun_15min",
205 205 KSTAT_DATA_UINT32);
206 206
207 207 ksp->ks_update = cpupart_kstat_update;
208 208 ksp->ks_private = cp;
209 209
210 210 kstat_install(ksp);
211 211 }
212 212 cp->cp_kstat = ksp;
213 213 }
214 214
215 215 /*
216 216 * Initialize the cpupart's lgrp partions (lpls)
217 217 */
218 218 static void
219 219 cpupart_lpl_initialize(cpupart_t *cp)
220 220 {
221 221 int i, sz;
222 222
223 223 sz = cp->cp_nlgrploads = lgrp_plat_max_lgrps();
224 224 cp->cp_lgrploads = kmem_zalloc(sizeof (lpl_t) * sz, KM_SLEEP);
225 225
226 226 for (i = 0; i < sz; i++) {
227 227 /*
228 228 * The last entry of the lpl's resource set is always NULL
229 229 * by design (to facilitate iteration)...hence the "oversizing"
230 230 * by 1.
231 231 */
232 232 cp->cp_lgrploads[i].lpl_rset_sz = sz + 1;
233 233 cp->cp_lgrploads[i].lpl_rset =
234 234 kmem_zalloc(sizeof (struct lgrp_ld *) * (sz + 1), KM_SLEEP);
235 235 cp->cp_lgrploads[i].lpl_id2rset =
236 236 kmem_zalloc(sizeof (int) * (sz + 1), KM_SLEEP);
237 237 cp->cp_lgrploads[i].lpl_lgrpid = i;
238 238 }
239 239 }
240 240
241 241 /*
242 242 * Teardown the cpupart's lgrp partitions
243 243 */
244 244 static void
245 245 cpupart_lpl_teardown(cpupart_t *cp)
246 246 {
247 247 int i, sz;
248 248 lpl_t *lpl;
249 249
250 250 for (i = 0; i < cp->cp_nlgrploads; i++) {
251 251 lpl = &cp->cp_lgrploads[i];
252 252
253 253 sz = lpl->lpl_rset_sz;
254 254 kmem_free(lpl->lpl_rset, sizeof (struct lgrp_ld *) * sz);
255 255 kmem_free(lpl->lpl_id2rset, sizeof (int) * sz);
256 256 lpl->lpl_rset = NULL;
257 257 lpl->lpl_id2rset = NULL;
258 258 }
259 259 kmem_free(cp->cp_lgrploads, sizeof (lpl_t) * cp->cp_nlgrploads);
260 260 cp->cp_lgrploads = NULL;
261 261 }
262 262
263 263 /*
264 264 * Initialize the default partition and kpreempt disp queue.
265 265 */
266 266 void
267 267 cpupart_initialize_default(void)
268 268 {
269 269 lgrp_id_t i;
270 270
271 271 cp_list_head = &cp_default;
272 272 cp_default.cp_next = &cp_default;
273 273 cp_default.cp_prev = &cp_default;
274 274 cp_default.cp_id = CP_DEFAULT;
275 275 cp_default.cp_kp_queue.disp_maxrunpri = -1;
276 276 cp_default.cp_kp_queue.disp_max_unbound_pri = -1;
277 277 cp_default.cp_kp_queue.disp_cpu = NULL;
278 278 cp_default.cp_gen = 0;
279 279 cp_default.cp_loadavg.lg_cur = 0;
280 280 cp_default.cp_loadavg.lg_len = 0;
281 281 cp_default.cp_loadavg.lg_total = 0;
282 282 for (i = 0; i < S_LOADAVG_SZ; i++) {
283 283 cp_default.cp_loadavg.lg_loads[i] = 0;
284 284 }
285 285 DISP_LOCK_INIT(&cp_default.cp_kp_queue.disp_lock);
286 286 cp_id_next = CP_DEFAULT + 1;
287 287 cpupart_kstat_create(&cp_default);
288 288 cp_numparts = 1;
289 289 if (cp_max_numparts == 0) /* allow for /etc/system tuning */
290 290 cp_max_numparts = max_ncpus * 2 + 1;
291 291 /*
292 292 * Allocate space for cp_default list of lgrploads
293 293 */
294 294 cpupart_lpl_initialize(&cp_default);
295 295
296 296 /*
297 297 * The initial lpl topology is created in a special lpl list
298 298 * lpl_bootstrap. It should be copied to cp_default.
299 299 * NOTE: lpl_topo_bootstrap() also updates CPU0 cpu_lpl pointer to point
300 300 * to the correct lpl in the cp_default.cp_lgrploads list.
301 301 */
302 302 lpl_topo_bootstrap(cp_default.cp_lgrploads,
303 303 cp_default.cp_nlgrploads);
304 304
305 305
306 306 cp_default.cp_attr = PSET_NOESCAPE;
307 307 cp_numparts_nonempty = 1;
308 308 /*
309 309 * Set t0's home
310 310 */
311 311 t0.t_lpl = &cp_default.cp_lgrploads[LGRP_ROOTID];
312 312
313 313 bitset_init(&cp_default.cp_cmt_pgs);
314 314 bitset_init_fanout(&cp_default.cp_haltset, cp_haltset_fanout);
315 315
316 316 bitset_resize(&cp_default.cp_haltset, max_ncpus);
317 317 }
↓ open down ↓ |
284 lines elided |
↑ open up ↑ |
318 318
319 319
320 320 static int
321 321 cpupart_move_cpu(cpu_t *cp, cpupart_t *newpp, int forced)
322 322 {
323 323 cpupart_t *oldpp;
324 324 cpu_t *ncp, *newlist;
325 325 kthread_t *t;
326 326 int move_threads = 1;
327 327 lgrp_id_t lgrpid;
328 - proc_t *p;
328 + proc_t *p;
329 329 int lgrp_diff_lpl;
330 330 lpl_t *cpu_lpl;
331 331 int ret;
332 332 boolean_t unbind_all_threads = (forced != 0);
333 333
334 334 ASSERT(MUTEX_HELD(&cpu_lock));
335 335 ASSERT(newpp != NULL);
336 336
337 337 oldpp = cp->cpu_part;
338 338 ASSERT(oldpp != NULL);
339 339 ASSERT(oldpp->cp_ncpus > 0);
340 340
341 341 if (newpp == oldpp) {
342 342 /*
343 343 * Don't need to do anything.
344 344 */
345 345 return (0);
346 346 }
347 347
348 348 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_OUT);
349 349
350 350 if (!disp_bound_partition(cp, 0)) {
351 351 /*
352 352 * Don't need to move threads if there are no threads in
353 353 * the partition. Note that threads can't enter the
354 354 * partition while we're holding cpu_lock.
355 355 */
356 356 move_threads = 0;
357 357 } else if (oldpp->cp_ncpus == 1) {
358 358 /*
359 359 * The last CPU is removed from a partition which has threads
360 360 * running in it. Some of these threads may be bound to this
361 361 * CPU.
362 362 *
363 363 * Attempt to unbind threads from the CPU and from the processor
364 364 * set. Note that no threads should be bound to this CPU since
365 365 * cpupart_move_threads will refuse to move bound threads to
366 366 * other CPUs.
367 367 */
368 368 (void) cpu_unbind(oldpp->cp_cpulist->cpu_id, B_FALSE);
369 369 (void) cpupart_unbind_threads(oldpp, B_FALSE);
370 370
371 371 if (!disp_bound_partition(cp, 0)) {
372 372 /*
373 373 * No bound threads in this partition any more
374 374 */
375 375 move_threads = 0;
376 376 } else {
377 377 /*
378 378 * There are still threads bound to the partition
379 379 */
380 380 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
381 381 return (EBUSY);
382 382 }
383 383 }
384 384
385 385 /*
386 386 * If forced flag is set unbind any threads from this CPU.
387 387 * Otherwise unbind soft-bound threads only.
388 388 */
389 389 if ((ret = cpu_unbind(cp->cpu_id, unbind_all_threads)) != 0) {
390 390 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
391 391 return (ret);
392 392 }
393 393
394 394 /*
395 395 * Stop further threads weak binding to this cpu.
396 396 */
397 397 cpu_inmotion = cp;
398 398 membar_enter();
399 399
400 400 /*
401 401 * Notify the Processor Groups subsystem that the CPU
402 402 * will be moving cpu partitions. This is done before
403 403 * CPUs are paused to provide an opportunity for any
404 404 * needed memory allocations.
405 405 */
406 406 pg_cpupart_out(cp, oldpp);
407 407 pg_cpupart_in(cp, newpp);
408 408
409 409 again:
410 410 if (move_threads) {
411 411 int loop_count;
412 412 /*
413 413 * Check for threads strong or weak bound to this CPU.
414 414 */
415 415 for (loop_count = 0; disp_bound_threads(cp, 0); loop_count++) {
416 416 if (loop_count >= 5) {
417 417 cpu_state_change_notify(cp->cpu_id,
418 418 CPU_CPUPART_IN);
419 419 pg_cpupart_out(cp, newpp);
420 420 pg_cpupart_in(cp, oldpp);
421 421 cpu_inmotion = NULL;
422 422 return (EBUSY); /* some threads still bound */
423 423 }
424 424 delay(1);
425 425 }
426 426 }
427 427
428 428 /*
429 429 * Before we actually start changing data structures, notify
430 430 * the cyclic subsystem that we want to move this CPU out of its
431 431 * partition.
432 432 */
433 433 if (!cyclic_move_out(cp)) {
434 434 /*
435 435 * This CPU must be the last CPU in a processor set with
436 436 * a bound cyclic.
437 437 */
438 438 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
439 439 pg_cpupart_out(cp, newpp);
440 440 pg_cpupart_in(cp, oldpp);
441 441 cpu_inmotion = NULL;
442 442 return (EBUSY);
443 443 }
444 444
445 445 pause_cpus(cp, NULL);
446 446
447 447 if (move_threads) {
448 448 /*
449 449 * The thread on cpu before the pause thread may have read
450 450 * cpu_inmotion before we raised the barrier above. Check
451 451 * again.
452 452 */
453 453 if (disp_bound_threads(cp, 1)) {
454 454 start_cpus();
455 455 goto again;
456 456 }
457 457
458 458 }
459 459
460 460 /*
461 461 * Now that CPUs are paused, let the PG subsystem perform
462 462 * any necessary data structure updates.
463 463 */
464 464 pg_cpupart_move(cp, oldpp, newpp);
465 465
466 466 /* save this cpu's lgroup -- it'll be the same in the new partition */
467 467 lgrpid = cp->cpu_lpl->lpl_lgrpid;
468 468
469 469 cpu_lpl = cp->cpu_lpl;
470 470 /*
471 471 * let the lgroup framework know cp has left the partition
472 472 */
473 473 lgrp_config(LGRP_CONFIG_CPUPART_DEL, (uintptr_t)cp, lgrpid);
474 474
475 475 /* move out of old partition */
476 476 oldpp->cp_ncpus--;
477 477 if (oldpp->cp_ncpus > 0) {
478 478
479 479 ncp = cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part;
480 480 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part;
481 481 if (oldpp->cp_cpulist == cp) {
482 482 oldpp->cp_cpulist = ncp;
483 483 }
484 484 } else {
485 485 ncp = oldpp->cp_cpulist = NULL;
486 486 cp_numparts_nonempty--;
487 487 ASSERT(cp_numparts_nonempty != 0);
488 488 }
489 489 oldpp->cp_gen++;
490 490
491 491 /* move into new partition */
492 492 newlist = newpp->cp_cpulist;
493 493 if (newlist == NULL) {
494 494 newpp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp;
495 495 cp_numparts_nonempty++;
496 496 ASSERT(cp_numparts_nonempty != 0);
497 497 } else {
498 498 cp->cpu_next_part = newlist;
499 499 cp->cpu_prev_part = newlist->cpu_prev_part;
500 500 newlist->cpu_prev_part->cpu_next_part = cp;
501 501 newlist->cpu_prev_part = cp;
502 502 }
503 503 cp->cpu_part = newpp;
504 504 newpp->cp_ncpus++;
505 505 newpp->cp_gen++;
506 506
507 507 ASSERT(bitset_is_null(&newpp->cp_haltset));
508 508 ASSERT(bitset_is_null(&oldpp->cp_haltset));
509 509
510 510 /*
511 511 * let the lgroup framework know cp has entered the partition
512 512 */
513 513 lgrp_config(LGRP_CONFIG_CPUPART_ADD, (uintptr_t)cp, lgrpid);
514 514
515 515 /*
516 516 * If necessary, move threads off processor.
517 517 */
518 518 if (move_threads) {
519 519 ASSERT(ncp != NULL);
520 520
521 521 /*
522 522 * Walk thru the active process list to look for
523 523 * threads that need to have a new home lgroup,
524 524 * or the last CPU they run on is the same CPU
525 525 * being moved out of the partition.
526 526 */
527 527
528 528 for (p = practive; p != NULL; p = p->p_next) {
529 529
530 530 t = p->p_tlist;
531 531
532 532 if (t == NULL)
533 533 continue;
534 534
535 535 lgrp_diff_lpl = 0;
536 536
537 537 do {
538 538
539 539 ASSERT(t->t_lpl != NULL);
540 540
541 541 /*
542 542 * Update the count of how many threads are
543 543 * in this CPU's lgroup but have a different lpl
544 544 */
545 545
546 546 if (t->t_lpl != cpu_lpl &&
547 547 t->t_lpl->lpl_lgrpid == lgrpid)
548 548 lgrp_diff_lpl++;
549 549 /*
550 550 * If the lgroup that t is assigned to no
551 551 * longer has any CPUs in t's partition,
552 552 * we'll have to choose a new lgroup for t.
553 553 */
554 554
555 555 if (!LGRP_CPUS_IN_PART(t->t_lpl->lpl_lgrpid,
556 556 t->t_cpupart)) {
557 557 lgrp_move_thread(t,
558 558 lgrp_choose(t, t->t_cpupart), 0);
559 559 }
560 560
561 561 /*
562 562 * make sure lpl points to our own partition
↓ open down ↓ |
224 lines elided |
↑ open up ↑ |
563 563 */
564 564 ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads &&
565 565 (t->t_lpl < t->t_cpupart->cp_lgrploads +
566 566 t->t_cpupart->cp_nlgrploads));
567 567
568 568 ASSERT(t->t_lpl->lpl_ncpu > 0);
569 569
570 570 /* Update CPU last ran on if it was this CPU */
571 571 if (t->t_cpu == cp && t->t_cpupart == oldpp &&
572 572 t->t_bound_cpu != cp) {
573 - t->t_cpu = disp_lowpri_cpu(ncp,
574 - t->t_lpl, t->t_pri, NULL);
573 + t->t_cpu = disp_lowpri_cpu(ncp, t,
574 + t->t_pri);
575 575 }
576 576 t = t->t_forw;
577 577 } while (t != p->p_tlist);
578 578
579 579 /*
580 580 * Didn't find any threads in the same lgroup as this
581 581 * CPU with a different lpl, so remove the lgroup from
582 582 * the process lgroup bitmask.
583 583 */
584 584
585 585 if (lgrp_diff_lpl)
586 586 klgrpset_del(p->p_lgrpset, lgrpid);
587 587 }
588 588
589 589 /*
590 590 * Walk thread list looking for threads that need to be
591 591 * rehomed, since there are some threads that are not in
592 592 * their process's p_tlist.
593 593 */
594 594
595 595 t = curthread;
596 596
597 597 do {
598 598 ASSERT(t != NULL && t->t_lpl != NULL);
599 599
600 600 /*
601 601 * If the lgroup that t is assigned to no
602 602 * longer has any CPUs in t's partition,
603 603 * we'll have to choose a new lgroup for t.
604 604 * Also, choose best lgroup for home when
605 605 * thread has specified lgroup affinities,
606 606 * since there may be an lgroup with more
607 607 * affinity available after moving CPUs
608 608 * around.
609 609 */
610 610 if (!LGRP_CPUS_IN_PART(t->t_lpl->lpl_lgrpid,
611 611 t->t_cpupart) || t->t_lgrp_affinity) {
612 612 lgrp_move_thread(t,
613 613 lgrp_choose(t, t->t_cpupart), 1);
614 614 }
615 615
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
616 616 /* make sure lpl points to our own partition */
617 617 ASSERT((t->t_lpl >= t->t_cpupart->cp_lgrploads) &&
618 618 (t->t_lpl < t->t_cpupart->cp_lgrploads +
619 619 t->t_cpupart->cp_nlgrploads));
620 620
621 621 ASSERT(t->t_lpl->lpl_ncpu > 0);
622 622
623 623 /* Update CPU last ran on if it was this CPU */
624 624 if (t->t_cpu == cp && t->t_cpupart == oldpp &&
625 625 t->t_bound_cpu != cp) {
626 - t->t_cpu = disp_lowpri_cpu(ncp, t->t_lpl,
627 - t->t_pri, NULL);
626 + t->t_cpu = disp_lowpri_cpu(ncp, t,
627 + t->t_pri);
628 628 }
629 629
630 630 t = t->t_next;
631 631 } while (t != curthread);
632 632
633 633 /*
634 634 * Clear off the CPU's run queue, and the kp queue if the
635 635 * partition is now empty.
636 636 */
637 637 disp_cpu_inactive(cp);
638 638
639 639 /*
640 640 * Make cp switch to a thread from the new partition.
641 641 */
642 642 cp->cpu_runrun = 1;
643 643 cp->cpu_kprunrun = 1;
644 644 }
645 645
646 646 cpu_inmotion = NULL;
647 647 start_cpus();
648 648
649 649 /*
650 650 * Let anyone interested know that cpu has been added to the set.
651 651 */
652 652 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
653 653
654 654 /*
655 655 * Now let the cyclic subsystem know that it can reshuffle cyclics
656 656 * bound to the new processor set.
657 657 */
658 658 cyclic_move_in(cp);
659 659
660 660 return (0);
661 661 }
662 662
663 663 /*
664 664 * Check if thread can be moved to a new cpu partition. Called by
665 665 * cpupart_move_thread() and pset_bind_start().
666 666 */
667 667 int
668 668 cpupart_movable_thread(kthread_id_t tp, cpupart_t *cp, int ignore)
669 669 {
670 670 ASSERT(MUTEX_HELD(&cpu_lock));
671 671 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
672 672 ASSERT(cp != NULL);
673 673 ASSERT(THREAD_LOCK_HELD(tp));
674 674
675 675 /*
676 676 * CPU-bound threads can't be moved.
677 677 */
678 678 if (!ignore) {
679 679 cpu_t *boundcpu = tp->t_bound_cpu ? tp->t_bound_cpu :
680 680 tp->t_weakbound_cpu;
681 681 if (boundcpu != NULL && boundcpu->cpu_part != cp)
682 682 return (EBUSY);
683 683 }
684 684
685 685 if (tp->t_cid == sysdccid) {
686 686 return (EINVAL); /* For now, sysdc threads can't move */
687 687 }
688 688
689 689 return (0);
690 690 }
691 691
692 692 /*
693 693 * Move thread to new partition. If ignore is non-zero, then CPU
694 694 * bindings should be ignored (this is used when destroying a
695 695 * partition).
696 696 */
697 697 static int
698 698 cpupart_move_thread(kthread_id_t tp, cpupart_t *newpp, int ignore,
699 699 void *projbuf, void *zonebuf)
700 700 {
701 701 cpupart_t *oldpp = tp->t_cpupart;
702 702 int ret;
703 703
704 704 ASSERT(MUTEX_HELD(&cpu_lock));
705 705 ASSERT(MUTEX_HELD(&pidlock));
706 706 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
707 707 ASSERT(newpp != NULL);
708 708
709 709 if (newpp->cp_cpulist == NULL)
710 710 return (EINVAL);
711 711
712 712 /*
713 713 * Check for errors first.
714 714 */
715 715 thread_lock(tp);
716 716 if ((ret = cpupart_movable_thread(tp, newpp, ignore)) != 0) {
717 717 thread_unlock(tp);
718 718 return (ret);
719 719 }
720 720
721 721 /* move the thread */
722 722 if (oldpp != newpp) {
723 723 /*
724 724 * Make the thread switch to the new partition.
725 725 */
726 726 tp->t_cpupart = newpp;
727 727 ASSERT(tp->t_lpl != NULL);
728 728 /*
729 729 * Leave the thread on the same lgroup if possible; otherwise
730 730 * choose a new lgroup for it. In either case, update its
731 731 * t_lpl.
732 732 */
733 733 if (LGRP_CPUS_IN_PART(tp->t_lpl->lpl_lgrpid, newpp) &&
734 734 tp->t_lgrp_affinity == NULL) {
735 735 /*
736 736 * The thread's lgroup has CPUs in the thread's new
737 737 * partition, so the thread can stay assigned to the
738 738 * same lgroup. Update its t_lpl to point to the
739 739 * lpl_t for its lgroup in its new partition.
740 740 */
741 741 lgrp_move_thread(tp, &tp->t_cpupart->\
742 742 cp_lgrploads[tp->t_lpl->lpl_lgrpid], 1);
743 743 } else {
744 744 /*
745 745 * The thread's lgroup has no cpus in its new
746 746 * partition or it has specified lgroup affinities,
747 747 * so choose the best lgroup for the thread and
748 748 * assign it to that lgroup.
749 749 */
750 750 lgrp_move_thread(tp, lgrp_choose(tp, tp->t_cpupart),
751 751 1);
752 752 }
753 753 /*
754 754 * make sure lpl points to our own partition
755 755 */
756 756 ASSERT((tp->t_lpl >= tp->t_cpupart->cp_lgrploads) &&
757 757 (tp->t_lpl < tp->t_cpupart->cp_lgrploads +
758 758 tp->t_cpupart->cp_nlgrploads));
759 759
760 760 ASSERT(tp->t_lpl->lpl_ncpu > 0);
761 761
762 762 if (tp->t_state == TS_ONPROC) {
763 763 cpu_surrender(tp);
764 764 } else if (tp->t_state == TS_RUN) {
765 765 (void) dispdeq(tp);
766 766 setbackdq(tp);
767 767 }
768 768 }
769 769
770 770 /*
771 771 * Our binding has changed; set TP_CHANGEBIND.
772 772 */
773 773 tp->t_proc_flag |= TP_CHANGEBIND;
774 774 aston(tp);
775 775
776 776 thread_unlock(tp);
777 777 fss_changepset(tp, newpp, projbuf, zonebuf);
778 778
779 779 return (0); /* success */
780 780 }
781 781
782 782
783 783 /*
784 784 * This function binds a thread to a partition. Must be called with the
785 785 * p_lock of the containing process held (to keep the thread from going
786 786 * away), and thus also with cpu_lock held (since cpu_lock must be
787 787 * acquired before p_lock). If ignore is non-zero, then CPU bindings
788 788 * should be ignored (this is used when destroying a partition).
789 789 */
790 790 int
791 791 cpupart_bind_thread(kthread_id_t tp, psetid_t psid, int ignore, void *projbuf,
792 792 void *zonebuf)
793 793 {
794 794 cpupart_t *newpp;
795 795
796 796 ASSERT(pool_lock_held());
797 797 ASSERT(MUTEX_HELD(&cpu_lock));
798 798 ASSERT(MUTEX_HELD(&pidlock));
799 799 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
800 800
801 801 if (psid == PS_NONE)
802 802 newpp = &cp_default;
803 803 else {
804 804 newpp = cpupart_find(psid);
805 805 if (newpp == NULL) {
806 806 return (EINVAL);
807 807 }
808 808 }
809 809 return (cpupart_move_thread(tp, newpp, ignore, projbuf, zonebuf));
810 810 }
811 811
812 812
813 813 /*
814 814 * Create a new partition. On MP systems, this also allocates a
815 815 * kpreempt disp queue for that partition.
816 816 */
817 817 int
818 818 cpupart_create(psetid_t *psid)
819 819 {
820 820 cpupart_t *pp;
821 821
822 822 ASSERT(pool_lock_held());
823 823
824 824 pp = kmem_zalloc(sizeof (cpupart_t), KM_SLEEP);
825 825
826 826 mutex_enter(&cpu_lock);
827 827 if (cp_numparts == cp_max_numparts) {
828 828 mutex_exit(&cpu_lock);
829 829 kmem_free(pp, sizeof (cpupart_t));
830 830 return (ENOMEM);
831 831 }
832 832 cp_numparts++;
833 833 /* find the next free partition ID */
834 834 while (cpupart_find(CPTOPS(cp_id_next)) != NULL)
835 835 cp_id_next++;
836 836 pp->cp_id = cp_id_next++;
837 837 pp->cp_ncpus = 0;
838 838 pp->cp_cpulist = NULL;
839 839 pp->cp_attr = 0;
840 840 klgrpset_clear(pp->cp_lgrpset);
841 841 pp->cp_kp_queue.disp_maxrunpri = -1;
842 842 pp->cp_kp_queue.disp_max_unbound_pri = -1;
843 843 pp->cp_kp_queue.disp_cpu = NULL;
844 844 pp->cp_gen = 0;
845 845 DISP_LOCK_INIT(&pp->cp_kp_queue.disp_lock);
846 846 *psid = CPTOPS(pp->cp_id);
847 847 disp_kp_alloc(&pp->cp_kp_queue, v.v_nglobpris);
848 848 cpupart_kstat_create(pp);
849 849 cpupart_lpl_initialize(pp);
850 850
851 851 bitset_init(&pp->cp_cmt_pgs);
852 852
853 853 /*
854 854 * Initialize and size the partition's bitset of halted CPUs.
855 855 */
856 856 bitset_init_fanout(&pp->cp_haltset, cp_haltset_fanout);
857 857 bitset_resize(&pp->cp_haltset, max_ncpus);
858 858
859 859 /*
860 860 * Pause all CPUs while changing the partition list, to make sure
861 861 * the clock thread (which traverses the list without holding
862 862 * cpu_lock) isn't running.
863 863 */
864 864 pause_cpus(NULL, NULL);
865 865 pp->cp_next = cp_list_head;
866 866 pp->cp_prev = cp_list_head->cp_prev;
867 867 cp_list_head->cp_prev->cp_next = pp;
868 868 cp_list_head->cp_prev = pp;
869 869 start_cpus();
870 870 mutex_exit(&cpu_lock);
871 871
↓ open down ↓ |
234 lines elided |
↑ open up ↑ |
872 872 return (0);
873 873 }
874 874
875 875 /*
876 876 * Move threads from specified partition to cp_default. If `force' is specified,
877 877 * move all threads, otherwise move only soft-bound threads.
878 878 */
879 879 static int
880 880 cpupart_unbind_threads(cpupart_t *pp, boolean_t unbind_all)
881 881 {
882 - void *projbuf, *zonebuf;
882 + void *projbuf, *zonebuf;
883 883 kthread_t *t;
884 884 proc_t *p;
885 885 int err = 0;
886 886 psetid_t psid = pp->cp_id;
887 887
888 888 ASSERT(pool_lock_held());
889 889 ASSERT(MUTEX_HELD(&cpu_lock));
890 890
891 891 if (pp == NULL || pp == &cp_default) {
892 892 return (EINVAL);
893 893 }
894 894
895 895 /*
896 896 * Pre-allocate enough buffers for FSS for all active projects and
897 897 * for all active zones on the system. Unused buffers will be
898 898 * freed later by fss_freebuf().
899 899 */
900 900 projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
901 901 zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);
902 902
903 903 mutex_enter(&pidlock);
904 904 t = curthread;
905 905 do {
906 906 if (t->t_bind_pset == psid) {
907 907 again: p = ttoproc(t);
908 908 mutex_enter(&p->p_lock);
909 909 if (ttoproc(t) != p) {
910 910 /*
911 911 * lwp_exit has changed this thread's process
912 912 * pointer before we grabbed its p_lock.
913 913 */
914 914 mutex_exit(&p->p_lock);
915 915 goto again;
916 916 }
917 917
918 918 /*
919 919 * Can only unbind threads which have revocable binding
920 920 * unless force unbinding requested.
921 921 */
922 922 if (unbind_all || TB_PSET_IS_SOFT(t)) {
923 923 err = cpupart_bind_thread(t, PS_NONE, 1,
924 924 projbuf, zonebuf);
925 925 if (err) {
926 926 mutex_exit(&p->p_lock);
927 927 mutex_exit(&pidlock);
928 928 fss_freebuf(projbuf, FSS_ALLOC_PROJ);
929 929 fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
930 930 return (err);
931 931 }
932 932 t->t_bind_pset = PS_NONE;
933 933 }
934 934 mutex_exit(&p->p_lock);
935 935 }
936 936 t = t->t_next;
937 937 } while (t != curthread);
938 938
939 939 mutex_exit(&pidlock);
940 940 fss_freebuf(projbuf, FSS_ALLOC_PROJ);
941 941 fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
942 942 return (err);
943 943 }
944 944
945 945 /*
946 946 * Destroy a partition.
947 947 */
948 948 int
949 949 cpupart_destroy(psetid_t psid)
950 950 {
951 951 cpu_t *cp, *first_cp;
952 952 cpupart_t *pp, *newpp;
953 953 int err = 0;
954 954
955 955 ASSERT(pool_lock_held());
956 956 mutex_enter(&cpu_lock);
957 957
958 958 pp = cpupart_find(psid);
959 959 if (pp == NULL || pp == &cp_default) {
960 960 mutex_exit(&cpu_lock);
961 961 return (EINVAL);
962 962 }
963 963
964 964 /*
965 965 * Unbind all the threads currently bound to the partition.
966 966 */
967 967 err = cpupart_unbind_threads(pp, B_TRUE);
968 968 if (err) {
969 969 mutex_exit(&cpu_lock);
970 970 return (err);
971 971 }
972 972
973 973 newpp = &cp_default;
974 974 while ((cp = pp->cp_cpulist) != NULL) {
975 975 if (err = cpupart_move_cpu(cp, newpp, 0)) {
976 976 mutex_exit(&cpu_lock);
977 977 return (err);
978 978 }
979 979 }
980 980
981 981 ASSERT(bitset_is_null(&pp->cp_cmt_pgs));
982 982 ASSERT(bitset_is_null(&pp->cp_haltset));
983 983
984 984 /*
985 985 * Teardown the partition's group of active CMT PGs and halted
986 986 * CPUs now that they have all left.
987 987 */
988 988 bitset_fini(&pp->cp_cmt_pgs);
989 989 bitset_fini(&pp->cp_haltset);
990 990
991 991 /*
992 992 * Reset the pointers in any offline processors so they won't
993 993 * try to rejoin the destroyed partition when they're turned
994 994 * online.
995 995 */
996 996 first_cp = cp = CPU;
997 997 do {
998 998 if (cp->cpu_part == pp) {
999 999 ASSERT(cp->cpu_flags & CPU_OFFLINE);
1000 1000 cp->cpu_part = newpp;
1001 1001 }
1002 1002 cp = cp->cpu_next;
1003 1003 } while (cp != first_cp);
1004 1004
1005 1005 /*
1006 1006 * Pause all CPUs while changing the partition list, to make sure
1007 1007 * the clock thread (which traverses the list without holding
1008 1008 * cpu_lock) isn't running.
1009 1009 */
1010 1010 pause_cpus(NULL, NULL);
1011 1011 pp->cp_prev->cp_next = pp->cp_next;
1012 1012 pp->cp_next->cp_prev = pp->cp_prev;
1013 1013 if (cp_list_head == pp)
1014 1014 cp_list_head = pp->cp_next;
1015 1015 start_cpus();
1016 1016
1017 1017 if (cp_id_next > pp->cp_id)
1018 1018 cp_id_next = pp->cp_id;
1019 1019
1020 1020 if (pp->cp_kstat)
1021 1021 kstat_delete(pp->cp_kstat);
1022 1022
1023 1023 cp_numparts--;
1024 1024
1025 1025 disp_kp_free(&pp->cp_kp_queue);
1026 1026
1027 1027 cpupart_lpl_teardown(pp);
1028 1028
1029 1029 kmem_free(pp, sizeof (cpupart_t));
1030 1030 mutex_exit(&cpu_lock);
1031 1031
1032 1032 return (err);
1033 1033 }
1034 1034
1035 1035
1036 1036 /*
1037 1037 * Return the ID of the partition to which the specified processor belongs.
1038 1038 */
1039 1039 psetid_t
1040 1040 cpupart_query_cpu(cpu_t *cp)
1041 1041 {
1042 1042 ASSERT(MUTEX_HELD(&cpu_lock));
1043 1043
1044 1044 return (CPTOPS(cp->cpu_part->cp_id));
1045 1045 }
1046 1046
1047 1047
1048 1048 /*
1049 1049 * Attach a processor to an existing partition.
1050 1050 */
1051 1051 int
1052 1052 cpupart_attach_cpu(psetid_t psid, cpu_t *cp, int forced)
1053 1053 {
1054 1054 cpupart_t *pp;
1055 1055 int err;
1056 1056
1057 1057 ASSERT(pool_lock_held());
1058 1058 ASSERT(MUTEX_HELD(&cpu_lock));
1059 1059
1060 1060 pp = cpupart_find(psid);
1061 1061 if (pp == NULL)
1062 1062 return (EINVAL);
1063 1063 if (cp->cpu_flags & CPU_OFFLINE)
1064 1064 return (EINVAL);
1065 1065
1066 1066 err = cpupart_move_cpu(cp, pp, forced);
1067 1067 return (err);
1068 1068 }
1069 1069
1070 1070 /*
1071 1071 * Get a list of cpus belonging to the partition. If numcpus is NULL,
1072 1072 * this just checks for a valid partition. If numcpus is non-NULL but
1073 1073 * cpulist is NULL, the current number of cpus is stored in *numcpus.
1074 1074 * If both are non-NULL, the current number of cpus is stored in *numcpus,
1075 1075 * and a list of those cpus up to the size originally in *numcpus is
1076 1076 * stored in cpulist[]. Also, store the processor set id in *psid.
1077 1077 * This is useful in case the processor set id passed in was PS_MYID.
1078 1078 */
1079 1079 int
1080 1080 cpupart_get_cpus(psetid_t *psid, processorid_t *cpulist, uint_t *numcpus)
1081 1081 {
1082 1082 cpupart_t *pp;
1083 1083 uint_t ncpus;
1084 1084 cpu_t *c;
1085 1085 int i;
1086 1086
1087 1087 mutex_enter(&cpu_lock);
1088 1088 pp = cpupart_find(*psid);
1089 1089 if (pp == NULL) {
1090 1090 mutex_exit(&cpu_lock);
1091 1091 return (EINVAL);
1092 1092 }
1093 1093 *psid = CPTOPS(pp->cp_id);
1094 1094 ncpus = pp->cp_ncpus;
1095 1095 if (numcpus) {
1096 1096 if (ncpus > *numcpus) {
1097 1097 /*
1098 1098 * Only copy as many cpus as were passed in, but
1099 1099 * pass back the real number.
1100 1100 */
1101 1101 uint_t t = ncpus;
1102 1102 ncpus = *numcpus;
1103 1103 *numcpus = t;
1104 1104 } else
1105 1105 *numcpus = ncpus;
1106 1106
1107 1107 if (cpulist) {
1108 1108 c = pp->cp_cpulist;
1109 1109 for (i = 0; i < ncpus; i++) {
1110 1110 ASSERT(c != NULL);
1111 1111 cpulist[i] = c->cpu_id;
1112 1112 c = c->cpu_next_part;
1113 1113 }
1114 1114 }
1115 1115 }
1116 1116 mutex_exit(&cpu_lock);
1117 1117 return (0);
1118 1118 }
1119 1119
1120 1120 /*
1121 1121 * Reallocate kpreempt queues for each CPU partition. Called from
1122 1122 * disp_setup when a new scheduling class is loaded that increases the
1123 1123 * number of priorities in the system.
1124 1124 */
1125 1125 void
1126 1126 cpupart_kpqalloc(pri_t npri)
1127 1127 {
1128 1128 cpupart_t *cpp;
1129 1129
1130 1130 ASSERT(MUTEX_HELD(&cpu_lock));
1131 1131 cpp = cp_list_head;
1132 1132 do {
1133 1133 disp_kp_alloc(&cpp->cp_kp_queue, npri);
1134 1134 cpp = cpp->cp_next;
1135 1135 } while (cpp != cp_list_head);
1136 1136 }
1137 1137
1138 1138 int
1139 1139 cpupart_get_loadavg(psetid_t psid, int *buf, int nelem)
1140 1140 {
1141 1141 cpupart_t *cp;
1142 1142 int i;
1143 1143
1144 1144 ASSERT(nelem >= 0);
1145 1145 ASSERT(nelem <= LOADAVG_NSTATS);
1146 1146 ASSERT(MUTEX_HELD(&cpu_lock));
1147 1147
1148 1148 cp = cpupart_find(psid);
1149 1149 if (cp == NULL)
1150 1150 return (EINVAL);
1151 1151 for (i = 0; i < nelem; i++)
1152 1152 buf[i] = cp->cp_hp_avenrun[i] >> (16 - FSHIFT);
1153 1153
1154 1154 return (0);
1155 1155 }
1156 1156
1157 1157
1158 1158 uint_t
1159 1159 cpupart_list(psetid_t *list, uint_t nelem, int flag)
1160 1160 {
1161 1161 uint_t numpart = 0;
1162 1162 cpupart_t *cp;
1163 1163
1164 1164 ASSERT(MUTEX_HELD(&cpu_lock));
1165 1165 ASSERT(flag == CP_ALL || flag == CP_NONEMPTY);
1166 1166
1167 1167 if (list != NULL) {
1168 1168 cp = cp_list_head;
1169 1169 do {
1170 1170 if (((flag == CP_ALL) && (cp != &cp_default)) ||
1171 1171 ((flag == CP_NONEMPTY) && (cp->cp_ncpus != 0))) {
1172 1172 if (numpart == nelem)
1173 1173 break;
1174 1174 list[numpart++] = CPTOPS(cp->cp_id);
1175 1175 }
1176 1176 cp = cp->cp_next;
1177 1177 } while (cp != cp_list_head);
1178 1178 }
1179 1179
1180 1180 ASSERT(numpart < cp_numparts);
1181 1181
1182 1182 if (flag == CP_ALL)
1183 1183 numpart = cp_numparts - 1; /* leave out default partition */
1184 1184 else if (flag == CP_NONEMPTY)
1185 1185 numpart = cp_numparts_nonempty;
1186 1186
1187 1187 return (numpart);
1188 1188 }
1189 1189
1190 1190 int
1191 1191 cpupart_setattr(psetid_t psid, uint_t attr)
1192 1192 {
1193 1193 cpupart_t *cp;
1194 1194
1195 1195 ASSERT(pool_lock_held());
1196 1196
1197 1197 mutex_enter(&cpu_lock);
1198 1198 if ((cp = cpupart_find(psid)) == NULL) {
1199 1199 mutex_exit(&cpu_lock);
1200 1200 return (EINVAL);
1201 1201 }
1202 1202 /*
1203 1203 * PSET_NOESCAPE attribute for default cpu partition is always set
1204 1204 */
1205 1205 if (cp == &cp_default && !(attr & PSET_NOESCAPE)) {
1206 1206 mutex_exit(&cpu_lock);
1207 1207 return (EINVAL);
1208 1208 }
1209 1209 cp->cp_attr = attr;
1210 1210 mutex_exit(&cpu_lock);
1211 1211 return (0);
1212 1212 }
1213 1213
1214 1214 int
1215 1215 cpupart_getattr(psetid_t psid, uint_t *attrp)
1216 1216 {
1217 1217 cpupart_t *cp;
1218 1218
1219 1219 mutex_enter(&cpu_lock);
1220 1220 if ((cp = cpupart_find(psid)) == NULL) {
1221 1221 mutex_exit(&cpu_lock);
1222 1222 return (EINVAL);
1223 1223 }
1224 1224 *attrp = cp->cp_attr;
1225 1225 mutex_exit(&cpu_lock);
1226 1226 return (0);
1227 1227 }
↓ open down ↓ |
335 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX