Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/disp/cpupart.c
+++ new/usr/src/uts/common/disp/cpupart.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1996, 2010, Oracle and/or its affiliates. All rights reserved.
23 + *
24 + * Copyright 2018 Joyent, Inc.
23 25 * Copyright (c) 2017 by Delphix. All rights reserved.
24 26 */
25 27
26 28 #include <sys/types.h>
27 29 #include <sys/systm.h>
28 30 #include <sys/cmn_err.h>
29 31 #include <sys/cpuvar.h>
30 32 #include <sys/thread.h>
31 33 #include <sys/disp.h>
32 34 #include <sys/kmem.h>
33 35 #include <sys/debug.h>
34 36 #include <sys/cpupart.h>
35 37 #include <sys/pset.h>
36 38 #include <sys/var.h>
37 39 #include <sys/cyclic.h>
38 40 #include <sys/lgrp.h>
39 41 #include <sys/pghw.h>
40 42 #include <sys/loadavg.h>
41 43 #include <sys/class.h>
42 44 #include <sys/fss.h>
43 45 #include <sys/pool.h>
44 46 #include <sys/pool_pset.h>
45 47 #include <sys/policy.h>
46 48
47 49 /*
48 50 * Calling pool_lock() protects the pools configuration, which includes
49 51 * CPU partitions. cpu_lock protects the CPU partition list, and prevents
50 52 * partitions from being created or destroyed while the lock is held.
51 53 * The lock ordering with respect to related locks is:
52 54 *
53 55 * pool_lock() ---> cpu_lock ---> pidlock --> p_lock
54 56 *
55 57 * Blocking memory allocations may be made while holding "pool_lock"
56 58 * or cpu_lock.
57 59 */
58 60
59 61 /*
60 62 * The cp_default partition is allocated statically, but its lgroup load average
61 63 * (lpl) list is allocated dynamically after kmem subsystem is initialized. This
62 64 * saves some memory since the space allocated reflects the actual number of
63 65 * lgroups supported by the platform. The lgrp facility provides a temporary
64 66 * space to hold lpl information during system bootstrap.
65 67 */
66 68
67 69 cpupart_t *cp_list_head;
68 70 cpupart_t cp_default;
69 71 static cpupartid_t cp_id_next;
70 72 uint_t cp_numparts;
71 73 uint_t cp_numparts_nonempty;
72 74
73 75 /*
74 76 * Need to limit total number of partitions to avoid slowing down the
75 77 * clock code too much. The clock code traverses the list of
76 78 * partitions and needs to be able to execute in a reasonable amount
77 79 * of time (less than 1/hz seconds). The maximum is sized based on
78 80 * max_ncpus so it shouldn't be a problem unless there are large
79 81 * numbers of empty partitions.
80 82 */
81 83 static uint_t cp_max_numparts;
82 84
83 85 /*
84 86 * Processor sets and CPU partitions are different but related concepts.
85 87 * A processor set is a user-level abstraction allowing users to create
86 88 * sets of CPUs and bind threads exclusively to those sets. A CPU
87 89 * partition is a kernel dispatcher object consisting of a set of CPUs
88 90 * and a global dispatch queue. The processor set abstraction is
89 91 * implemented via a CPU partition, and currently there is a 1-1
90 92 * mapping between processor sets and partitions (excluding the default
91 93 * partition, which is not visible as a processor set). Hence, the
92 94 * numbering for processor sets and CPU partitions is identical. This
93 95 * may not always be true in the future, and these macros could become
94 96 * less trivial if we support e.g. a processor set containing multiple
95 97 * CPU partitions.
96 98 */
97 99 #define PSTOCP(psid) ((cpupartid_t)((psid) == PS_NONE ? CP_DEFAULT : (psid)))
98 100 #define CPTOPS(cpid) ((psetid_t)((cpid) == CP_DEFAULT ? PS_NONE : (cpid)))
99 101
100 102 static int cpupart_unbind_threads(cpupart_t *, boolean_t);
101 103
102 104 /*
103 105 * Find a CPU partition given a processor set ID.
104 106 */
105 107 static cpupart_t *
106 108 cpupart_find_all(psetid_t psid)
107 109 {
108 110 cpupart_t *cp;
109 111 cpupartid_t cpid = PSTOCP(psid);
110 112
111 113 ASSERT(MUTEX_HELD(&cpu_lock));
112 114
113 115 /* default partition not visible as a processor set */
114 116 if (psid == CP_DEFAULT)
115 117 return (NULL);
116 118
117 119 if (psid == PS_MYID)
118 120 return (curthread->t_cpupart);
119 121
120 122 cp = cp_list_head;
121 123 do {
122 124 if (cp->cp_id == cpid)
123 125 return (cp);
124 126 cp = cp->cp_next;
125 127 } while (cp != cp_list_head);
126 128 return (NULL);
127 129 }
128 130
129 131 /*
130 132 * Find a CPU partition given a processor set ID if the processor set
131 133 * should be visible from the calling zone.
132 134 */
133 135 cpupart_t *
134 136 cpupart_find(psetid_t psid)
135 137 {
136 138 cpupart_t *cp;
137 139
138 140 ASSERT(MUTEX_HELD(&cpu_lock));
139 141 cp = cpupart_find_all(psid);
140 142 if (cp != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() &&
141 143 zone_pset_get(curproc->p_zone) != CPTOPS(cp->cp_id))
142 144 return (NULL);
143 145 return (cp);
144 146 }
145 147
146 148 static int
147 149 cpupart_kstat_update(kstat_t *ksp, int rw)
148 150 {
149 151 cpupart_t *cp = (cpupart_t *)ksp->ks_private;
150 152 cpupart_kstat_t *cpksp = ksp->ks_data;
151 153
152 154 if (rw == KSTAT_WRITE)
153 155 return (EACCES);
154 156
155 157 cpksp->cpk_updates.value.ui64 = cp->cp_updates;
156 158 cpksp->cpk_runnable.value.ui64 = cp->cp_nrunnable_cum;
157 159 cpksp->cpk_waiting.value.ui64 = cp->cp_nwaiting_cum;
158 160 cpksp->cpk_ncpus.value.ui32 = cp->cp_ncpus;
159 161 cpksp->cpk_avenrun_1min.value.ui32 = cp->cp_hp_avenrun[0] >>
160 162 (16 - FSHIFT);
161 163 cpksp->cpk_avenrun_5min.value.ui32 = cp->cp_hp_avenrun[1] >>
162 164 (16 - FSHIFT);
163 165 cpksp->cpk_avenrun_15min.value.ui32 = cp->cp_hp_avenrun[2] >>
164 166 (16 - FSHIFT);
165 167 return (0);
166 168 }
167 169
168 170 static void
169 171 cpupart_kstat_create(cpupart_t *cp)
170 172 {
171 173 kstat_t *ksp;
172 174 zoneid_t zoneid;
173 175
174 176 ASSERT(MUTEX_HELD(&cpu_lock));
175 177
176 178 /*
177 179 * We have a bit of a chicken-egg problem since this code will
178 180 * get called to create the kstats for CP_DEFAULT before the
179 181 * pools framework gets initialized. We circumvent the problem
180 182 * by special-casing cp_default.
181 183 */
182 184 if (cp != &cp_default && pool_pset_enabled())
183 185 zoneid = GLOBAL_ZONEID;
184 186 else
185 187 zoneid = ALL_ZONES;
186 188 ksp = kstat_create_zone("unix", cp->cp_id, "pset", "misc",
187 189 KSTAT_TYPE_NAMED,
188 190 sizeof (cpupart_kstat_t) / sizeof (kstat_named_t), 0, zoneid);
189 191 if (ksp != NULL) {
190 192 cpupart_kstat_t *cpksp = ksp->ks_data;
191 193
192 194 kstat_named_init(&cpksp->cpk_updates, "updates",
193 195 KSTAT_DATA_UINT64);
194 196 kstat_named_init(&cpksp->cpk_runnable, "runnable",
195 197 KSTAT_DATA_UINT64);
196 198 kstat_named_init(&cpksp->cpk_waiting, "waiting",
197 199 KSTAT_DATA_UINT64);
198 200 kstat_named_init(&cpksp->cpk_ncpus, "ncpus",
199 201 KSTAT_DATA_UINT32);
200 202 kstat_named_init(&cpksp->cpk_avenrun_1min, "avenrun_1min",
201 203 KSTAT_DATA_UINT32);
202 204 kstat_named_init(&cpksp->cpk_avenrun_5min, "avenrun_5min",
203 205 KSTAT_DATA_UINT32);
204 206 kstat_named_init(&cpksp->cpk_avenrun_15min, "avenrun_15min",
205 207 KSTAT_DATA_UINT32);
206 208
207 209 ksp->ks_update = cpupart_kstat_update;
208 210 ksp->ks_private = cp;
209 211
210 212 kstat_install(ksp);
211 213 }
212 214 cp->cp_kstat = ksp;
213 215 }
214 216
215 217 /*
216 218 * Initialize the cpupart's lgrp partions (lpls)
217 219 */
218 220 static void
219 221 cpupart_lpl_initialize(cpupart_t *cp)
220 222 {
221 223 int i, sz;
222 224
223 225 sz = cp->cp_nlgrploads = lgrp_plat_max_lgrps();
224 226 cp->cp_lgrploads = kmem_zalloc(sizeof (lpl_t) * sz, KM_SLEEP);
225 227
226 228 for (i = 0; i < sz; i++) {
227 229 /*
228 230 * The last entry of the lpl's resource set is always NULL
229 231 * by design (to facilitate iteration)...hence the "oversizing"
230 232 * by 1.
231 233 */
232 234 cp->cp_lgrploads[i].lpl_rset_sz = sz + 1;
233 235 cp->cp_lgrploads[i].lpl_rset =
234 236 kmem_zalloc(sizeof (struct lgrp_ld *) * (sz + 1), KM_SLEEP);
235 237 cp->cp_lgrploads[i].lpl_id2rset =
236 238 kmem_zalloc(sizeof (int) * (sz + 1), KM_SLEEP);
237 239 cp->cp_lgrploads[i].lpl_lgrpid = i;
238 240 }
239 241 }
240 242
241 243 /*
242 244 * Teardown the cpupart's lgrp partitions
243 245 */
244 246 static void
245 247 cpupart_lpl_teardown(cpupart_t *cp)
246 248 {
247 249 int i, sz;
248 250 lpl_t *lpl;
249 251
250 252 for (i = 0; i < cp->cp_nlgrploads; i++) {
251 253 lpl = &cp->cp_lgrploads[i];
252 254
253 255 sz = lpl->lpl_rset_sz;
254 256 kmem_free(lpl->lpl_rset, sizeof (struct lgrp_ld *) * sz);
255 257 kmem_free(lpl->lpl_id2rset, sizeof (int) * sz);
256 258 lpl->lpl_rset = NULL;
257 259 lpl->lpl_id2rset = NULL;
258 260 }
259 261 kmem_free(cp->cp_lgrploads, sizeof (lpl_t) * cp->cp_nlgrploads);
260 262 cp->cp_lgrploads = NULL;
261 263 }
262 264
263 265 /*
264 266 * Initialize the default partition and kpreempt disp queue.
265 267 */
266 268 void
267 269 cpupart_initialize_default(void)
268 270 {
269 271 lgrp_id_t i;
270 272
271 273 cp_list_head = &cp_default;
272 274 cp_default.cp_next = &cp_default;
273 275 cp_default.cp_prev = &cp_default;
274 276 cp_default.cp_id = CP_DEFAULT;
275 277 cp_default.cp_kp_queue.disp_maxrunpri = -1;
276 278 cp_default.cp_kp_queue.disp_max_unbound_pri = -1;
277 279 cp_default.cp_kp_queue.disp_cpu = NULL;
278 280 cp_default.cp_gen = 0;
279 281 cp_default.cp_loadavg.lg_cur = 0;
280 282 cp_default.cp_loadavg.lg_len = 0;
281 283 cp_default.cp_loadavg.lg_total = 0;
282 284 for (i = 0; i < S_LOADAVG_SZ; i++) {
283 285 cp_default.cp_loadavg.lg_loads[i] = 0;
284 286 }
285 287 DISP_LOCK_INIT(&cp_default.cp_kp_queue.disp_lock);
286 288 cp_id_next = CP_DEFAULT + 1;
287 289 cpupart_kstat_create(&cp_default);
288 290 cp_numparts = 1;
289 291 if (cp_max_numparts == 0) /* allow for /etc/system tuning */
290 292 cp_max_numparts = max_ncpus * 2 + 1;
291 293 /*
292 294 * Allocate space for cp_default list of lgrploads
293 295 */
294 296 cpupart_lpl_initialize(&cp_default);
295 297
296 298 /*
297 299 * The initial lpl topology is created in a special lpl list
298 300 * lpl_bootstrap. It should be copied to cp_default.
299 301 * NOTE: lpl_topo_bootstrap() also updates CPU0 cpu_lpl pointer to point
300 302 * to the correct lpl in the cp_default.cp_lgrploads list.
301 303 */
302 304 lpl_topo_bootstrap(cp_default.cp_lgrploads,
303 305 cp_default.cp_nlgrploads);
304 306
305 307
306 308 cp_default.cp_attr = PSET_NOESCAPE;
307 309 cp_numparts_nonempty = 1;
308 310 /*
309 311 * Set t0's home
310 312 */
311 313 t0.t_lpl = &cp_default.cp_lgrploads[LGRP_ROOTID];
312 314
313 315 bitset_init(&cp_default.cp_cmt_pgs);
314 316 bitset_init_fanout(&cp_default.cp_haltset, cp_haltset_fanout);
315 317
316 318 bitset_resize(&cp_default.cp_haltset, max_ncpus);
317 319 }
↓ open down ↓ |
285 lines elided |
↑ open up ↑ |
318 320
319 321
320 322 static int
321 323 cpupart_move_cpu(cpu_t *cp, cpupart_t *newpp, int forced)
322 324 {
323 325 cpupart_t *oldpp;
324 326 cpu_t *ncp, *newlist;
325 327 kthread_t *t;
326 328 int move_threads = 1;
327 329 lgrp_id_t lgrpid;
328 - proc_t *p;
330 + proc_t *p;
329 331 int lgrp_diff_lpl;
330 332 lpl_t *cpu_lpl;
331 333 int ret;
332 334 boolean_t unbind_all_threads = (forced != 0);
333 335
334 336 ASSERT(MUTEX_HELD(&cpu_lock));
335 337 ASSERT(newpp != NULL);
336 338
337 339 oldpp = cp->cpu_part;
338 340 ASSERT(oldpp != NULL);
339 341 ASSERT(oldpp->cp_ncpus > 0);
340 342
341 343 if (newpp == oldpp) {
342 344 /*
343 345 * Don't need to do anything.
344 346 */
345 347 return (0);
346 348 }
347 349
348 350 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_OUT);
349 351
350 352 if (!disp_bound_partition(cp, 0)) {
351 353 /*
352 354 * Don't need to move threads if there are no threads in
353 355 * the partition. Note that threads can't enter the
354 356 * partition while we're holding cpu_lock.
355 357 */
356 358 move_threads = 0;
357 359 } else if (oldpp->cp_ncpus == 1) {
358 360 /*
359 361 * The last CPU is removed from a partition which has threads
360 362 * running in it. Some of these threads may be bound to this
361 363 * CPU.
362 364 *
363 365 * Attempt to unbind threads from the CPU and from the processor
364 366 * set. Note that no threads should be bound to this CPU since
365 367 * cpupart_move_threads will refuse to move bound threads to
366 368 * other CPUs.
367 369 */
368 370 (void) cpu_unbind(oldpp->cp_cpulist->cpu_id, B_FALSE);
369 371 (void) cpupart_unbind_threads(oldpp, B_FALSE);
370 372
371 373 if (!disp_bound_partition(cp, 0)) {
372 374 /*
373 375 * No bound threads in this partition any more
374 376 */
375 377 move_threads = 0;
376 378 } else {
377 379 /*
378 380 * There are still threads bound to the partition
379 381 */
380 382 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
381 383 return (EBUSY);
382 384 }
383 385 }
384 386
385 387 /*
386 388 * If forced flag is set unbind any threads from this CPU.
387 389 * Otherwise unbind soft-bound threads only.
388 390 */
389 391 if ((ret = cpu_unbind(cp->cpu_id, unbind_all_threads)) != 0) {
390 392 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
391 393 return (ret);
392 394 }
393 395
394 396 /*
395 397 * Stop further threads weak binding to this cpu.
396 398 */
397 399 cpu_inmotion = cp;
398 400 membar_enter();
399 401
400 402 /*
401 403 * Notify the Processor Groups subsystem that the CPU
402 404 * will be moving cpu partitions. This is done before
403 405 * CPUs are paused to provide an opportunity for any
404 406 * needed memory allocations.
405 407 */
406 408 pg_cpupart_out(cp, oldpp);
407 409 pg_cpupart_in(cp, newpp);
408 410
409 411 again:
410 412 if (move_threads) {
411 413 int loop_count;
412 414 /*
413 415 * Check for threads strong or weak bound to this CPU.
414 416 */
415 417 for (loop_count = 0; disp_bound_threads(cp, 0); loop_count++) {
416 418 if (loop_count >= 5) {
417 419 cpu_state_change_notify(cp->cpu_id,
418 420 CPU_CPUPART_IN);
419 421 pg_cpupart_out(cp, newpp);
420 422 pg_cpupart_in(cp, oldpp);
421 423 cpu_inmotion = NULL;
422 424 return (EBUSY); /* some threads still bound */
423 425 }
424 426 delay(1);
425 427 }
426 428 }
427 429
428 430 /*
429 431 * Before we actually start changing data structures, notify
430 432 * the cyclic subsystem that we want to move this CPU out of its
431 433 * partition.
432 434 */
433 435 if (!cyclic_move_out(cp)) {
434 436 /*
435 437 * This CPU must be the last CPU in a processor set with
436 438 * a bound cyclic.
437 439 */
438 440 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
439 441 pg_cpupart_out(cp, newpp);
440 442 pg_cpupart_in(cp, oldpp);
441 443 cpu_inmotion = NULL;
442 444 return (EBUSY);
443 445 }
444 446
445 447 pause_cpus(cp, NULL);
446 448
447 449 if (move_threads) {
448 450 /*
449 451 * The thread on cpu before the pause thread may have read
450 452 * cpu_inmotion before we raised the barrier above. Check
451 453 * again.
452 454 */
453 455 if (disp_bound_threads(cp, 1)) {
454 456 start_cpus();
455 457 goto again;
456 458 }
457 459
458 460 }
459 461
460 462 /*
461 463 * Now that CPUs are paused, let the PG subsystem perform
462 464 * any necessary data structure updates.
463 465 */
464 466 pg_cpupart_move(cp, oldpp, newpp);
465 467
466 468 /* save this cpu's lgroup -- it'll be the same in the new partition */
467 469 lgrpid = cp->cpu_lpl->lpl_lgrpid;
468 470
469 471 cpu_lpl = cp->cpu_lpl;
470 472 /*
471 473 * let the lgroup framework know cp has left the partition
472 474 */
473 475 lgrp_config(LGRP_CONFIG_CPUPART_DEL, (uintptr_t)cp, lgrpid);
474 476
475 477 /* move out of old partition */
476 478 oldpp->cp_ncpus--;
477 479 if (oldpp->cp_ncpus > 0) {
478 480
479 481 ncp = cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part;
480 482 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part;
481 483 if (oldpp->cp_cpulist == cp) {
482 484 oldpp->cp_cpulist = ncp;
483 485 }
484 486 } else {
485 487 ncp = oldpp->cp_cpulist = NULL;
486 488 cp_numparts_nonempty--;
487 489 ASSERT(cp_numparts_nonempty != 0);
488 490 }
489 491 oldpp->cp_gen++;
490 492
491 493 /* move into new partition */
492 494 newlist = newpp->cp_cpulist;
493 495 if (newlist == NULL) {
494 496 newpp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp;
495 497 cp_numparts_nonempty++;
496 498 ASSERT(cp_numparts_nonempty != 0);
497 499 } else {
498 500 cp->cpu_next_part = newlist;
499 501 cp->cpu_prev_part = newlist->cpu_prev_part;
500 502 newlist->cpu_prev_part->cpu_next_part = cp;
501 503 newlist->cpu_prev_part = cp;
502 504 }
503 505 cp->cpu_part = newpp;
504 506 newpp->cp_ncpus++;
505 507 newpp->cp_gen++;
506 508
507 509 ASSERT(bitset_is_null(&newpp->cp_haltset));
508 510 ASSERT(bitset_is_null(&oldpp->cp_haltset));
509 511
510 512 /*
511 513 * let the lgroup framework know cp has entered the partition
512 514 */
513 515 lgrp_config(LGRP_CONFIG_CPUPART_ADD, (uintptr_t)cp, lgrpid);
514 516
515 517 /*
516 518 * If necessary, move threads off processor.
517 519 */
518 520 if (move_threads) {
519 521 ASSERT(ncp != NULL);
520 522
521 523 /*
522 524 * Walk thru the active process list to look for
523 525 * threads that need to have a new home lgroup,
524 526 * or the last CPU they run on is the same CPU
525 527 * being moved out of the partition.
526 528 */
527 529
528 530 for (p = practive; p != NULL; p = p->p_next) {
529 531
530 532 t = p->p_tlist;
531 533
532 534 if (t == NULL)
533 535 continue;
534 536
535 537 lgrp_diff_lpl = 0;
536 538
537 539 do {
538 540
539 541 ASSERT(t->t_lpl != NULL);
540 542
541 543 /*
542 544 * Update the count of how many threads are
543 545 * in this CPU's lgroup but have a different lpl
544 546 */
545 547
546 548 if (t->t_lpl != cpu_lpl &&
547 549 t->t_lpl->lpl_lgrpid == lgrpid)
548 550 lgrp_diff_lpl++;
549 551 /*
550 552 * If the lgroup that t is assigned to no
551 553 * longer has any CPUs in t's partition,
552 554 * we'll have to choose a new lgroup for t.
553 555 */
554 556
555 557 if (!LGRP_CPUS_IN_PART(t->t_lpl->lpl_lgrpid,
556 558 t->t_cpupart)) {
557 559 lgrp_move_thread(t,
558 560 lgrp_choose(t, t->t_cpupart), 0);
559 561 }
560 562
561 563 /*
562 564 * make sure lpl points to our own partition
↓ open down ↓ |
224 lines elided |
↑ open up ↑ |
563 565 */
564 566 ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads &&
565 567 (t->t_lpl < t->t_cpupart->cp_lgrploads +
566 568 t->t_cpupart->cp_nlgrploads));
567 569
568 570 ASSERT(t->t_lpl->lpl_ncpu > 0);
569 571
570 572 /* Update CPU last ran on if it was this CPU */
571 573 if (t->t_cpu == cp && t->t_cpupart == oldpp &&
572 574 t->t_bound_cpu != cp) {
573 - t->t_cpu = disp_lowpri_cpu(ncp,
574 - t->t_lpl, t->t_pri, NULL);
575 + t->t_cpu = disp_lowpri_cpu(ncp, t,
576 + t->t_pri);
575 577 }
576 578 t = t->t_forw;
577 579 } while (t != p->p_tlist);
578 580
579 581 /*
580 582 * Didn't find any threads in the same lgroup as this
581 583 * CPU with a different lpl, so remove the lgroup from
582 584 * the process lgroup bitmask.
583 585 */
584 586
585 587 if (lgrp_diff_lpl)
586 588 klgrpset_del(p->p_lgrpset, lgrpid);
587 589 }
588 590
589 591 /*
590 592 * Walk thread list looking for threads that need to be
591 593 * rehomed, since there are some threads that are not in
592 594 * their process's p_tlist.
593 595 */
594 596
595 597 t = curthread;
596 598
597 599 do {
598 600 ASSERT(t != NULL && t->t_lpl != NULL);
599 601
600 602 /*
601 603 * If the lgroup that t is assigned to no
602 604 * longer has any CPUs in t's partition,
603 605 * we'll have to choose a new lgroup for t.
604 606 * Also, choose best lgroup for home when
605 607 * thread has specified lgroup affinities,
606 608 * since there may be an lgroup with more
607 609 * affinity available after moving CPUs
608 610 * around.
609 611 */
610 612 if (!LGRP_CPUS_IN_PART(t->t_lpl->lpl_lgrpid,
611 613 t->t_cpupart) || t->t_lgrp_affinity) {
612 614 lgrp_move_thread(t,
613 615 lgrp_choose(t, t->t_cpupart), 1);
614 616 }
615 617
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
616 618 /* make sure lpl points to our own partition */
617 619 ASSERT((t->t_lpl >= t->t_cpupart->cp_lgrploads) &&
618 620 (t->t_lpl < t->t_cpupart->cp_lgrploads +
619 621 t->t_cpupart->cp_nlgrploads));
620 622
621 623 ASSERT(t->t_lpl->lpl_ncpu > 0);
622 624
623 625 /* Update CPU last ran on if it was this CPU */
624 626 if (t->t_cpu == cp && t->t_cpupart == oldpp &&
625 627 t->t_bound_cpu != cp) {
626 - t->t_cpu = disp_lowpri_cpu(ncp, t->t_lpl,
627 - t->t_pri, NULL);
628 + t->t_cpu = disp_lowpri_cpu(ncp, t,
629 + t->t_pri);
628 630 }
629 631
630 632 t = t->t_next;
631 633 } while (t != curthread);
632 634
633 635 /*
634 636 * Clear off the CPU's run queue, and the kp queue if the
635 637 * partition is now empty.
636 638 */
637 639 disp_cpu_inactive(cp);
638 640
639 641 /*
640 642 * Make cp switch to a thread from the new partition.
641 643 */
642 644 cp->cpu_runrun = 1;
643 645 cp->cpu_kprunrun = 1;
644 646 }
645 647
646 648 cpu_inmotion = NULL;
647 649 start_cpus();
648 650
649 651 /*
650 652 * Let anyone interested know that cpu has been added to the set.
651 653 */
652 654 cpu_state_change_notify(cp->cpu_id, CPU_CPUPART_IN);
653 655
654 656 /*
655 657 * Now let the cyclic subsystem know that it can reshuffle cyclics
656 658 * bound to the new processor set.
657 659 */
658 660 cyclic_move_in(cp);
659 661
660 662 return (0);
661 663 }
662 664
663 665 /*
664 666 * Check if thread can be moved to a new cpu partition. Called by
665 667 * cpupart_move_thread() and pset_bind_start().
666 668 */
667 669 int
668 670 cpupart_movable_thread(kthread_id_t tp, cpupart_t *cp, int ignore)
669 671 {
670 672 ASSERT(MUTEX_HELD(&cpu_lock));
671 673 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
672 674 ASSERT(cp != NULL);
673 675 ASSERT(THREAD_LOCK_HELD(tp));
674 676
675 677 /*
676 678 * CPU-bound threads can't be moved.
677 679 */
678 680 if (!ignore) {
679 681 cpu_t *boundcpu = tp->t_bound_cpu ? tp->t_bound_cpu :
680 682 tp->t_weakbound_cpu;
681 683 if (boundcpu != NULL && boundcpu->cpu_part != cp)
682 684 return (EBUSY);
683 685 }
684 686
685 687 if (tp->t_cid == sysdccid) {
686 688 return (EINVAL); /* For now, sysdc threads can't move */
687 689 }
688 690
689 691 return (0);
690 692 }
691 693
692 694 /*
693 695 * Move thread to new partition. If ignore is non-zero, then CPU
694 696 * bindings should be ignored (this is used when destroying a
695 697 * partition).
696 698 */
697 699 static int
698 700 cpupart_move_thread(kthread_id_t tp, cpupart_t *newpp, int ignore,
699 701 void *projbuf, void *zonebuf)
700 702 {
701 703 cpupart_t *oldpp = tp->t_cpupart;
702 704 int ret;
703 705
704 706 ASSERT(MUTEX_HELD(&cpu_lock));
705 707 ASSERT(MUTEX_HELD(&pidlock));
706 708 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
707 709 ASSERT(newpp != NULL);
708 710
709 711 if (newpp->cp_cpulist == NULL)
710 712 return (EINVAL);
711 713
712 714 /*
713 715 * Check for errors first.
714 716 */
715 717 thread_lock(tp);
716 718 if ((ret = cpupart_movable_thread(tp, newpp, ignore)) != 0) {
717 719 thread_unlock(tp);
718 720 return (ret);
719 721 }
720 722
721 723 /* move the thread */
722 724 if (oldpp != newpp) {
723 725 /*
724 726 * Make the thread switch to the new partition.
725 727 */
726 728 tp->t_cpupart = newpp;
727 729 ASSERT(tp->t_lpl != NULL);
728 730 /*
729 731 * Leave the thread on the same lgroup if possible; otherwise
730 732 * choose a new lgroup for it. In either case, update its
731 733 * t_lpl.
732 734 */
733 735 if (LGRP_CPUS_IN_PART(tp->t_lpl->lpl_lgrpid, newpp) &&
734 736 tp->t_lgrp_affinity == NULL) {
735 737 /*
736 738 * The thread's lgroup has CPUs in the thread's new
737 739 * partition, so the thread can stay assigned to the
738 740 * same lgroup. Update its t_lpl to point to the
739 741 * lpl_t for its lgroup in its new partition.
740 742 */
741 743 lgrp_move_thread(tp, &tp->t_cpupart->\
742 744 cp_lgrploads[tp->t_lpl->lpl_lgrpid], 1);
743 745 } else {
744 746 /*
745 747 * The thread's lgroup has no cpus in its new
746 748 * partition or it has specified lgroup affinities,
747 749 * so choose the best lgroup for the thread and
748 750 * assign it to that lgroup.
749 751 */
750 752 lgrp_move_thread(tp, lgrp_choose(tp, tp->t_cpupart),
751 753 1);
752 754 }
753 755 /*
754 756 * make sure lpl points to our own partition
755 757 */
756 758 ASSERT((tp->t_lpl >= tp->t_cpupart->cp_lgrploads) &&
757 759 (tp->t_lpl < tp->t_cpupart->cp_lgrploads +
758 760 tp->t_cpupart->cp_nlgrploads));
759 761
760 762 ASSERT(tp->t_lpl->lpl_ncpu > 0);
761 763
762 764 if (tp->t_state == TS_ONPROC) {
763 765 cpu_surrender(tp);
764 766 } else if (tp->t_state == TS_RUN) {
765 767 (void) dispdeq(tp);
766 768 setbackdq(tp);
767 769 }
768 770 }
769 771
770 772 /*
771 773 * Our binding has changed; set TP_CHANGEBIND.
772 774 */
773 775 tp->t_proc_flag |= TP_CHANGEBIND;
774 776 aston(tp);
775 777
776 778 thread_unlock(tp);
777 779 fss_changepset(tp, newpp, projbuf, zonebuf);
778 780
779 781 return (0); /* success */
780 782 }
781 783
782 784
783 785 /*
784 786 * This function binds a thread to a partition. Must be called with the
785 787 * p_lock of the containing process held (to keep the thread from going
786 788 * away), and thus also with cpu_lock held (since cpu_lock must be
787 789 * acquired before p_lock). If ignore is non-zero, then CPU bindings
788 790 * should be ignored (this is used when destroying a partition).
789 791 */
790 792 int
791 793 cpupart_bind_thread(kthread_id_t tp, psetid_t psid, int ignore, void *projbuf,
792 794 void *zonebuf)
793 795 {
794 796 cpupart_t *newpp;
795 797
796 798 ASSERT(pool_lock_held());
797 799 ASSERT(MUTEX_HELD(&cpu_lock));
798 800 ASSERT(MUTEX_HELD(&pidlock));
799 801 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
800 802
801 803 if (psid == PS_NONE)
802 804 newpp = &cp_default;
803 805 else {
804 806 newpp = cpupart_find(psid);
805 807 if (newpp == NULL) {
806 808 return (EINVAL);
807 809 }
808 810 }
809 811 return (cpupart_move_thread(tp, newpp, ignore, projbuf, zonebuf));
810 812 }
811 813
812 814
813 815 /*
814 816 * Create a new partition. On MP systems, this also allocates a
815 817 * kpreempt disp queue for that partition.
816 818 */
817 819 int
818 820 cpupart_create(psetid_t *psid)
819 821 {
820 822 cpupart_t *pp;
821 823
822 824 ASSERT(pool_lock_held());
823 825
824 826 pp = kmem_zalloc(sizeof (cpupart_t), KM_SLEEP);
825 827
826 828 mutex_enter(&cpu_lock);
827 829 if (cp_numparts == cp_max_numparts) {
828 830 mutex_exit(&cpu_lock);
829 831 kmem_free(pp, sizeof (cpupart_t));
830 832 return (ENOMEM);
831 833 }
832 834 cp_numparts++;
833 835 /* find the next free partition ID */
834 836 while (cpupart_find(CPTOPS(cp_id_next)) != NULL)
835 837 cp_id_next++;
836 838 pp->cp_id = cp_id_next++;
837 839 pp->cp_ncpus = 0;
838 840 pp->cp_cpulist = NULL;
839 841 pp->cp_attr = 0;
840 842 klgrpset_clear(pp->cp_lgrpset);
841 843 pp->cp_kp_queue.disp_maxrunpri = -1;
842 844 pp->cp_kp_queue.disp_max_unbound_pri = -1;
843 845 pp->cp_kp_queue.disp_cpu = NULL;
844 846 pp->cp_gen = 0;
845 847 DISP_LOCK_INIT(&pp->cp_kp_queue.disp_lock);
846 848 *psid = CPTOPS(pp->cp_id);
847 849 disp_kp_alloc(&pp->cp_kp_queue, v.v_nglobpris);
848 850 cpupart_kstat_create(pp);
849 851 cpupart_lpl_initialize(pp);
850 852
851 853 bitset_init(&pp->cp_cmt_pgs);
852 854
853 855 /*
854 856 * Initialize and size the partition's bitset of halted CPUs.
855 857 */
856 858 bitset_init_fanout(&pp->cp_haltset, cp_haltset_fanout);
857 859 bitset_resize(&pp->cp_haltset, max_ncpus);
858 860
859 861 /*
860 862 * Pause all CPUs while changing the partition list, to make sure
861 863 * the clock thread (which traverses the list without holding
862 864 * cpu_lock) isn't running.
863 865 */
864 866 pause_cpus(NULL, NULL);
865 867 pp->cp_next = cp_list_head;
866 868 pp->cp_prev = cp_list_head->cp_prev;
867 869 cp_list_head->cp_prev->cp_next = pp;
868 870 cp_list_head->cp_prev = pp;
869 871 start_cpus();
870 872 mutex_exit(&cpu_lock);
871 873
↓ open down ↓ |
234 lines elided |
↑ open up ↑ |
872 874 return (0);
873 875 }
874 876
875 877 /*
876 878 * Move threads from specified partition to cp_default. If `force' is specified,
877 879 * move all threads, otherwise move only soft-bound threads.
878 880 */
879 881 static int
880 882 cpupart_unbind_threads(cpupart_t *pp, boolean_t unbind_all)
881 883 {
882 - void *projbuf, *zonebuf;
884 + void *projbuf, *zonebuf;
883 885 kthread_t *t;
884 886 proc_t *p;
885 887 int err = 0;
886 888 psetid_t psid = pp->cp_id;
887 889
888 890 ASSERT(pool_lock_held());
889 891 ASSERT(MUTEX_HELD(&cpu_lock));
890 892
891 893 if (pp == NULL || pp == &cp_default) {
892 894 return (EINVAL);
893 895 }
894 896
895 897 /*
896 898 * Pre-allocate enough buffers for FSS for all active projects and
897 899 * for all active zones on the system. Unused buffers will be
898 900 * freed later by fss_freebuf().
899 901 */
900 902 projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
901 903 zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);
902 904
903 905 mutex_enter(&pidlock);
904 906 t = curthread;
905 907 do {
906 908 if (t->t_bind_pset == psid) {
907 909 again: p = ttoproc(t);
908 910 mutex_enter(&p->p_lock);
909 911 if (ttoproc(t) != p) {
910 912 /*
911 913 * lwp_exit has changed this thread's process
912 914 * pointer before we grabbed its p_lock.
913 915 */
914 916 mutex_exit(&p->p_lock);
915 917 goto again;
916 918 }
917 919
918 920 /*
919 921 * Can only unbind threads which have revocable binding
920 922 * unless force unbinding requested.
921 923 */
922 924 if (unbind_all || TB_PSET_IS_SOFT(t)) {
923 925 err = cpupart_bind_thread(t, PS_NONE, 1,
924 926 projbuf, zonebuf);
925 927 if (err) {
926 928 mutex_exit(&p->p_lock);
927 929 mutex_exit(&pidlock);
928 930 fss_freebuf(projbuf, FSS_ALLOC_PROJ);
929 931 fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
930 932 return (err);
931 933 }
932 934 t->t_bind_pset = PS_NONE;
933 935 }
934 936 mutex_exit(&p->p_lock);
935 937 }
936 938 t = t->t_next;
937 939 } while (t != curthread);
938 940
939 941 mutex_exit(&pidlock);
940 942 fss_freebuf(projbuf, FSS_ALLOC_PROJ);
941 943 fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
942 944 return (err);
943 945 }
944 946
945 947 /*
946 948 * Destroy a partition.
947 949 */
948 950 int
949 951 cpupart_destroy(psetid_t psid)
950 952 {
951 953 cpu_t *cp, *first_cp;
952 954 cpupart_t *pp, *newpp;
953 955 int err = 0;
954 956
955 957 ASSERT(pool_lock_held());
956 958 mutex_enter(&cpu_lock);
957 959
958 960 pp = cpupart_find(psid);
959 961 if (pp == NULL || pp == &cp_default) {
960 962 mutex_exit(&cpu_lock);
961 963 return (EINVAL);
962 964 }
963 965
964 966 /*
965 967 * Unbind all the threads currently bound to the partition.
966 968 */
967 969 err = cpupart_unbind_threads(pp, B_TRUE);
968 970 if (err) {
969 971 mutex_exit(&cpu_lock);
970 972 return (err);
971 973 }
972 974
973 975 newpp = &cp_default;
974 976 while ((cp = pp->cp_cpulist) != NULL) {
975 977 if (err = cpupart_move_cpu(cp, newpp, 0)) {
976 978 mutex_exit(&cpu_lock);
977 979 return (err);
978 980 }
979 981 }
980 982
981 983 ASSERT(bitset_is_null(&pp->cp_cmt_pgs));
982 984 ASSERT(bitset_is_null(&pp->cp_haltset));
983 985
984 986 /*
985 987 * Teardown the partition's group of active CMT PGs and halted
986 988 * CPUs now that they have all left.
987 989 */
988 990 bitset_fini(&pp->cp_cmt_pgs);
989 991 bitset_fini(&pp->cp_haltset);
990 992
991 993 /*
992 994 * Reset the pointers in any offline processors so they won't
993 995 * try to rejoin the destroyed partition when they're turned
994 996 * online.
995 997 */
996 998 first_cp = cp = CPU;
997 999 do {
998 1000 if (cp->cpu_part == pp) {
999 1001 ASSERT(cp->cpu_flags & CPU_OFFLINE);
1000 1002 cp->cpu_part = newpp;
1001 1003 }
1002 1004 cp = cp->cpu_next;
1003 1005 } while (cp != first_cp);
1004 1006
1005 1007 /*
1006 1008 * Pause all CPUs while changing the partition list, to make sure
1007 1009 * the clock thread (which traverses the list without holding
1008 1010 * cpu_lock) isn't running.
1009 1011 */
1010 1012 pause_cpus(NULL, NULL);
1011 1013 pp->cp_prev->cp_next = pp->cp_next;
1012 1014 pp->cp_next->cp_prev = pp->cp_prev;
1013 1015 if (cp_list_head == pp)
1014 1016 cp_list_head = pp->cp_next;
1015 1017 start_cpus();
1016 1018
1017 1019 if (cp_id_next > pp->cp_id)
1018 1020 cp_id_next = pp->cp_id;
1019 1021
1020 1022 if (pp->cp_kstat)
1021 1023 kstat_delete(pp->cp_kstat);
1022 1024
1023 1025 cp_numparts--;
1024 1026
1025 1027 disp_kp_free(&pp->cp_kp_queue);
1026 1028
1027 1029 cpupart_lpl_teardown(pp);
1028 1030
1029 1031 kmem_free(pp, sizeof (cpupart_t));
1030 1032 mutex_exit(&cpu_lock);
1031 1033
1032 1034 return (err);
1033 1035 }
1034 1036
1035 1037
1036 1038 /*
1037 1039 * Return the ID of the partition to which the specified processor belongs.
1038 1040 */
1039 1041 psetid_t
1040 1042 cpupart_query_cpu(cpu_t *cp)
1041 1043 {
1042 1044 ASSERT(MUTEX_HELD(&cpu_lock));
1043 1045
1044 1046 return (CPTOPS(cp->cpu_part->cp_id));
1045 1047 }
1046 1048
1047 1049
1048 1050 /*
1049 1051 * Attach a processor to an existing partition.
1050 1052 */
1051 1053 int
1052 1054 cpupart_attach_cpu(psetid_t psid, cpu_t *cp, int forced)
1053 1055 {
1054 1056 cpupart_t *pp;
1055 1057 int err;
1056 1058
1057 1059 ASSERT(pool_lock_held());
1058 1060 ASSERT(MUTEX_HELD(&cpu_lock));
1059 1061
1060 1062 pp = cpupart_find(psid);
1061 1063 if (pp == NULL)
1062 1064 return (EINVAL);
1063 1065 if (cp->cpu_flags & CPU_OFFLINE)
1064 1066 return (EINVAL);
1065 1067
1066 1068 err = cpupart_move_cpu(cp, pp, forced);
1067 1069 return (err);
1068 1070 }
1069 1071
1070 1072 /*
1071 1073 * Get a list of cpus belonging to the partition. If numcpus is NULL,
1072 1074 * this just checks for a valid partition. If numcpus is non-NULL but
1073 1075 * cpulist is NULL, the current number of cpus is stored in *numcpus.
1074 1076 * If both are non-NULL, the current number of cpus is stored in *numcpus,
1075 1077 * and a list of those cpus up to the size originally in *numcpus is
1076 1078 * stored in cpulist[]. Also, store the processor set id in *psid.
1077 1079 * This is useful in case the processor set id passed in was PS_MYID.
1078 1080 */
1079 1081 int
1080 1082 cpupart_get_cpus(psetid_t *psid, processorid_t *cpulist, uint_t *numcpus)
1081 1083 {
1082 1084 cpupart_t *pp;
1083 1085 uint_t ncpus;
1084 1086 cpu_t *c;
1085 1087 int i;
1086 1088
1087 1089 mutex_enter(&cpu_lock);
1088 1090 pp = cpupart_find(*psid);
1089 1091 if (pp == NULL) {
1090 1092 mutex_exit(&cpu_lock);
1091 1093 return (EINVAL);
1092 1094 }
1093 1095 *psid = CPTOPS(pp->cp_id);
1094 1096 ncpus = pp->cp_ncpus;
1095 1097 if (numcpus) {
1096 1098 if (ncpus > *numcpus) {
1097 1099 /*
1098 1100 * Only copy as many cpus as were passed in, but
1099 1101 * pass back the real number.
1100 1102 */
1101 1103 uint_t t = ncpus;
1102 1104 ncpus = *numcpus;
1103 1105 *numcpus = t;
1104 1106 } else
1105 1107 *numcpus = ncpus;
1106 1108
1107 1109 if (cpulist) {
1108 1110 c = pp->cp_cpulist;
1109 1111 for (i = 0; i < ncpus; i++) {
1110 1112 ASSERT(c != NULL);
1111 1113 cpulist[i] = c->cpu_id;
1112 1114 c = c->cpu_next_part;
1113 1115 }
1114 1116 }
1115 1117 }
1116 1118 mutex_exit(&cpu_lock);
1117 1119 return (0);
1118 1120 }
1119 1121
1120 1122 /*
1121 1123 * Reallocate kpreempt queues for each CPU partition. Called from
1122 1124 * disp_setup when a new scheduling class is loaded that increases the
1123 1125 * number of priorities in the system.
1124 1126 */
1125 1127 void
1126 1128 cpupart_kpqalloc(pri_t npri)
1127 1129 {
1128 1130 cpupart_t *cpp;
1129 1131
1130 1132 ASSERT(MUTEX_HELD(&cpu_lock));
1131 1133 cpp = cp_list_head;
1132 1134 do {
1133 1135 disp_kp_alloc(&cpp->cp_kp_queue, npri);
1134 1136 cpp = cpp->cp_next;
1135 1137 } while (cpp != cp_list_head);
1136 1138 }
1137 1139
1138 1140 int
1139 1141 cpupart_get_loadavg(psetid_t psid, int *buf, int nelem)
1140 1142 {
1141 1143 cpupart_t *cp;
1142 1144 int i;
1143 1145
1144 1146 ASSERT(nelem >= 0);
1145 1147 ASSERT(nelem <= LOADAVG_NSTATS);
1146 1148 ASSERT(MUTEX_HELD(&cpu_lock));
1147 1149
1148 1150 cp = cpupart_find(psid);
1149 1151 if (cp == NULL)
1150 1152 return (EINVAL);
1151 1153 for (i = 0; i < nelem; i++)
1152 1154 buf[i] = cp->cp_hp_avenrun[i] >> (16 - FSHIFT);
1153 1155
1154 1156 return (0);
1155 1157 }
1156 1158
1157 1159
1158 1160 uint_t
1159 1161 cpupart_list(psetid_t *list, uint_t nelem, int flag)
1160 1162 {
1161 1163 uint_t numpart = 0;
1162 1164 cpupart_t *cp;
1163 1165
1164 1166 ASSERT(MUTEX_HELD(&cpu_lock));
1165 1167 ASSERT(flag == CP_ALL || flag == CP_NONEMPTY);
1166 1168
1167 1169 if (list != NULL) {
1168 1170 cp = cp_list_head;
1169 1171 do {
1170 1172 if (((flag == CP_ALL) && (cp != &cp_default)) ||
1171 1173 ((flag == CP_NONEMPTY) && (cp->cp_ncpus != 0))) {
1172 1174 if (numpart == nelem)
1173 1175 break;
1174 1176 list[numpart++] = CPTOPS(cp->cp_id);
1175 1177 }
1176 1178 cp = cp->cp_next;
1177 1179 } while (cp != cp_list_head);
1178 1180 }
1179 1181
1180 1182 ASSERT(numpart < cp_numparts);
1181 1183
1182 1184 if (flag == CP_ALL)
1183 1185 numpart = cp_numparts - 1; /* leave out default partition */
1184 1186 else if (flag == CP_NONEMPTY)
1185 1187 numpart = cp_numparts_nonempty;
1186 1188
1187 1189 return (numpart);
1188 1190 }
1189 1191
1190 1192 int
1191 1193 cpupart_setattr(psetid_t psid, uint_t attr)
1192 1194 {
1193 1195 cpupart_t *cp;
1194 1196
1195 1197 ASSERT(pool_lock_held());
1196 1198
1197 1199 mutex_enter(&cpu_lock);
1198 1200 if ((cp = cpupart_find(psid)) == NULL) {
1199 1201 mutex_exit(&cpu_lock);
1200 1202 return (EINVAL);
1201 1203 }
1202 1204 /*
1203 1205 * PSET_NOESCAPE attribute for default cpu partition is always set
1204 1206 */
1205 1207 if (cp == &cp_default && !(attr & PSET_NOESCAPE)) {
1206 1208 mutex_exit(&cpu_lock);
1207 1209 return (EINVAL);
1208 1210 }
1209 1211 cp->cp_attr = attr;
1210 1212 mutex_exit(&cpu_lock);
1211 1213 return (0);
1212 1214 }
1213 1215
1214 1216 int
1215 1217 cpupart_getattr(psetid_t psid, uint_t *attrp)
1216 1218 {
1217 1219 cpupart_t *cp;
1218 1220
1219 1221 mutex_enter(&cpu_lock);
1220 1222 if ((cp = cpupart_find(psid)) == NULL) {
1221 1223 mutex_exit(&cpu_lock);
1222 1224 return (EINVAL);
1223 1225 }
1224 1226 *attrp = cp->cp_attr;
1225 1227 mutex_exit(&cpu_lock);
1226 1228 return (0);
1227 1229 }
↓ open down ↓ |
335 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX