382 setbackdq(tp);
383 }
384 thread_unlock(tp);
385 }
386 }
387
388 /*
389 * Set affinity for a specified CPU.
390 *
391 * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
392 * curthread, will set affinity to the CPU on which the thread is currently
393 * running. For other cpu_id values, the caller must ensure that the
394 * referenced CPU remains valid, which can be done by holding cpu_lock across
395 * this call.
396 *
397 * CPU affinity is guaranteed after return of thread_affinity_set(). If a
398 * caller setting affinity to CPU_CURRENT requires that its thread not migrate
399 * CPUs prior to a successful return, it should take extra precautions (such as
400 * their own call to kpreempt_disable) to ensure that safety.
401 *
402 * A CPU affinity reference count is maintained by thread_affinity_set and
403 * thread_affinity_clear (incrementing and decrementing it, respectively),
404 * maintaining CPU affinity while the count is non-zero, and allowing regions
405 * of code which require affinity to be nested.
406 */
407 void
408 thread_affinity_set(kthread_id_t t, int cpu_id)
409 {
410 cpu_t *cp;
411
412 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
413
414 if (cpu_id == CPU_CURRENT) {
415 VERIFY3P(t, ==, curthread);
416 kpreempt_disable();
417 cp = CPU;
418 } else {
419 /*
420 * We should be asserting that cpu_lock is held here, but
421 * the NCA code doesn't acquire it. The following assert
422 * should be uncommented when the NCA code is fixed.
423 *
424 * ASSERT(MUTEX_HELD(&cpu_lock));
425 */
426 VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
427 cp = cpu[cpu_id];
428
429 /* user must provide a good cpu_id */
430 VERIFY(cp != NULL);
431 }
432
433 /*
434 * If there is already a hard affinity requested, and this affinity
435 * conflicts with that, panic.
436 */
437 thread_lock(t);
438 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
439 panic("affinity_set: setting %p but already bound to %p",
440 (void *)cp, (void *)t->t_bound_cpu);
441 }
442 t->t_affinitycnt++;
443 t->t_bound_cpu = cp;
444
445 /*
446 * Make sure we're running on the right CPU.
447 */
448 if (cp != t->t_cpu || t != curthread) {
449 ASSERT(cpu_id != CPU_CURRENT);
450 force_thread_migrate(t); /* drops thread lock */
451 } else {
452 thread_unlock(t);
453 }
454
455 if (cpu_id == CPU_CURRENT) {
456 kpreempt_enable();
457 }
458 }
459
460 /*
461 * Wrapper for backward compatibility.
462 */
463 void
464 affinity_set(int cpu_id)
465 {
466 thread_affinity_set(curthread, cpu_id);
467 }
468
469 /*
470 * Decrement the affinity reservation count and if it becomes zero,
471 * clear the CPU affinity for the current thread, or set it to the user's
472 * software binding request.
473 */
474 void
475 thread_affinity_clear(kthread_id_t t)
476 {
477 register processorid_t binding;
1472 * Otherwise, update the count of how many
1473 * threads are in this CPU's lgroup but have
1474 * a different lpl.
1475 */
1476
1477 if (cpu_lpl->lpl_ncpu == 0) {
1478 if (t->t_lpl == cpu_lpl)
1479 lgrp_move_thread(t,
1480 lgrp_choose(t,
1481 t->t_cpupart), 0);
1482 else if (t->t_lpl->lpl_lgrpid ==
1483 cpu_lpl->lpl_lgrpid)
1484 lgrp_diff_lpl++;
1485 }
1486 ASSERT(t->t_lpl->lpl_ncpu > 0);
1487
1488 /*
1489 * Update CPU last ran on if it was this CPU
1490 */
1491 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1492 t->t_cpu = disp_lowpri_cpu(ncp,
1493 t->t_lpl, t->t_pri, NULL);
1494 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1495 t->t_weakbound_cpu == cp);
1496
1497 t = t->t_forw;
1498 } while (t != p->p_tlist);
1499
1500 /*
1501 * Didn't find any threads in the same lgroup as this
1502 * CPU with a different lpl, so remove the lgroup from
1503 * the process lgroup bitmask.
1504 */
1505
1506 if (lgrp_diff_lpl == 0)
1507 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
1508 }
1509
1510 /*
1511 * Walk thread list looking for threads that need to be
1512 * rehomed, since there are some threads that are not in
1513 * their process's p_tlist.
1515
1516 t = curthread;
1517 do {
1518 ASSERT(t != NULL && t->t_lpl != NULL);
1519
1520 /*
1521 * Rehome threads with same lpl as this CPU when this
1522 * is the last CPU in the lpl.
1523 */
1524
1525 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1526 lgrp_move_thread(t,
1527 lgrp_choose(t, t->t_cpupart), 1);
1528
1529 ASSERT(t->t_lpl->lpl_ncpu > 0);
1530
1531 /*
1532 * Update CPU last ran on if it was this CPU
1533 */
1534
1535 if (t->t_cpu == cp && t->t_bound_cpu != cp) {
1536 t->t_cpu = disp_lowpri_cpu(ncp,
1537 t->t_lpl, t->t_pri, NULL);
1538 }
1539 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1540 t->t_weakbound_cpu == cp);
1541 t = t->t_next;
1542
1543 } while (t != curthread);
1544 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1545 cp->cpu_flags |= CPU_OFFLINE;
1546 disp_cpu_inactive(cp);
1547 if (!no_quiesce)
1548 cp->cpu_flags |= CPU_QUIESCED;
1549 ncpus_online--;
1550 cpu_set_state(cp);
1551 cpu_inmotion = NULL;
1552 start_cpus();
1553 cpu_stats_kstat_destroy(cp);
1554 cpu_delete_intrstat(cp);
1555 lgrp_kstat_destroy(cp);
1556 }
1557
1558 out:
|
382 setbackdq(tp);
383 }
384 thread_unlock(tp);
385 }
386 }
387
388 /*
389 * Set affinity for a specified CPU.
390 *
391 * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
392 * curthread, will set affinity to the CPU on which the thread is currently
393 * running. For other cpu_id values, the caller must ensure that the
394 * referenced CPU remains valid, which can be done by holding cpu_lock across
395 * this call.
396 *
397 * CPU affinity is guaranteed after return of thread_affinity_set(). If a
398 * caller setting affinity to CPU_CURRENT requires that its thread not migrate
399 * CPUs prior to a successful return, it should take extra precautions (such as
400 * their own call to kpreempt_disable) to ensure that safety.
401 *
402 * CPU_BEST can be used to pick a "best" CPU to migrate to, including
403 * potentially the current CPU.
404 *
405 * A CPU affinity reference count is maintained by thread_affinity_set and
406 * thread_affinity_clear (incrementing and decrementing it, respectively),
407 * maintaining CPU affinity while the count is non-zero, and allowing regions
408 * of code which require affinity to be nested.
409 */
410 void
411 thread_affinity_set(kthread_id_t t, int cpu_id)
412 {
413 cpu_t *cp;
414
415 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
416
417 if (cpu_id == CPU_CURRENT) {
418 VERIFY3P(t, ==, curthread);
419 kpreempt_disable();
420 cp = CPU;
421 } else if (cpu_id == CPU_BEST) {
422 VERIFY3P(t, ==, curthread);
423 kpreempt_disable();
424 cp = disp_choose_best_cpu();
425 } else {
426 /*
427 * We should be asserting that cpu_lock is held here, but
428 * the NCA code doesn't acquire it. The following assert
429 * should be uncommented when the NCA code is fixed.
430 *
431 * ASSERT(MUTEX_HELD(&cpu_lock));
432 */
433 VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
434 cp = cpu[cpu_id];
435
436 /* user must provide a good cpu_id */
437 VERIFY(cp != NULL);
438 }
439
440 /*
441 * If there is already a hard affinity requested, and this affinity
442 * conflicts with that, panic.
443 */
444 thread_lock(t);
445 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
446 panic("affinity_set: setting %p but already bound to %p",
447 (void *)cp, (void *)t->t_bound_cpu);
448 }
449 t->t_affinitycnt++;
450 t->t_bound_cpu = cp;
451
452 /*
453 * Make sure we're running on the right CPU.
454 */
455 if (cp != t->t_cpu || t != curthread) {
456 ASSERT(cpu_id != CPU_CURRENT);
457 force_thread_migrate(t); /* drops thread lock */
458 } else {
459 thread_unlock(t);
460 }
461
462 if (cpu_id == CPU_CURRENT || cpu_id == CPU_BEST)
463 kpreempt_enable();
464 }
465
466 /*
467 * Wrapper for backward compatibility.
468 */
469 void
470 affinity_set(int cpu_id)
471 {
472 thread_affinity_set(curthread, cpu_id);
473 }
474
475 /*
476 * Decrement the affinity reservation count and if it becomes zero,
477 * clear the CPU affinity for the current thread, or set it to the user's
478 * software binding request.
479 */
480 void
481 thread_affinity_clear(kthread_id_t t)
482 {
483 register processorid_t binding;
1478 * Otherwise, update the count of how many
1479 * threads are in this CPU's lgroup but have
1480 * a different lpl.
1481 */
1482
1483 if (cpu_lpl->lpl_ncpu == 0) {
1484 if (t->t_lpl == cpu_lpl)
1485 lgrp_move_thread(t,
1486 lgrp_choose(t,
1487 t->t_cpupart), 0);
1488 else if (t->t_lpl->lpl_lgrpid ==
1489 cpu_lpl->lpl_lgrpid)
1490 lgrp_diff_lpl++;
1491 }
1492 ASSERT(t->t_lpl->lpl_ncpu > 0);
1493
1494 /*
1495 * Update CPU last ran on if it was this CPU
1496 */
1497 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1498 t->t_cpu = disp_lowpri_cpu(ncp, t,
1499 t->t_pri);
1500 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1501 t->t_weakbound_cpu == cp);
1502
1503 t = t->t_forw;
1504 } while (t != p->p_tlist);
1505
1506 /*
1507 * Didn't find any threads in the same lgroup as this
1508 * CPU with a different lpl, so remove the lgroup from
1509 * the process lgroup bitmask.
1510 */
1511
1512 if (lgrp_diff_lpl == 0)
1513 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
1514 }
1515
1516 /*
1517 * Walk thread list looking for threads that need to be
1518 * rehomed, since there are some threads that are not in
1519 * their process's p_tlist.
1521
1522 t = curthread;
1523 do {
1524 ASSERT(t != NULL && t->t_lpl != NULL);
1525
1526 /*
1527 * Rehome threads with same lpl as this CPU when this
1528 * is the last CPU in the lpl.
1529 */
1530
1531 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1532 lgrp_move_thread(t,
1533 lgrp_choose(t, t->t_cpupart), 1);
1534
1535 ASSERT(t->t_lpl->lpl_ncpu > 0);
1536
1537 /*
1538 * Update CPU last ran on if it was this CPU
1539 */
1540
1541 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1542 t->t_cpu = disp_lowpri_cpu(ncp, t, t->t_pri);
1543
1544 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1545 t->t_weakbound_cpu == cp);
1546 t = t->t_next;
1547
1548 } while (t != curthread);
1549 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1550 cp->cpu_flags |= CPU_OFFLINE;
1551 disp_cpu_inactive(cp);
1552 if (!no_quiesce)
1553 cp->cpu_flags |= CPU_QUIESCED;
1554 ncpus_online--;
1555 cpu_set_state(cp);
1556 cpu_inmotion = NULL;
1557 start_cpus();
1558 cpu_stats_kstat_destroy(cp);
1559 cpu_delete_intrstat(cp);
1560 lgrp_kstat_destroy(cp);
1561 }
1562
1563 out:
|