383 setbackdq(tp);
384 }
385 thread_unlock(tp);
386 }
387 }
388
389 /*
390 * Set affinity for a specified CPU.
391 *
392 * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
393 * curthread, will set affinity to the CPU on which the thread is currently
394 * running. For other cpu_id values, the caller must ensure that the
395 * referenced CPU remains valid, which can be done by holding cpu_lock across
396 * this call.
397 *
398 * CPU affinity is guaranteed after return of thread_affinity_set(). If a
399 * caller setting affinity to CPU_CURRENT requires that its thread not migrate
400 * CPUs prior to a successful return, it should take extra precautions (such as
401 * their own call to kpreempt_disable) to ensure that safety.
402 *
403 * A CPU affinity reference count is maintained by thread_affinity_set and
404 * thread_affinity_clear (incrementing and decrementing it, respectively),
405 * maintaining CPU affinity while the count is non-zero, and allowing regions
406 * of code which require affinity to be nested.
407 */
408 void
409 thread_affinity_set(kthread_id_t t, int cpu_id)
410 {
411 cpu_t *cp;
412
413 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
414
415 if (cpu_id == CPU_CURRENT) {
416 VERIFY3P(t, ==, curthread);
417 kpreempt_disable();
418 cp = CPU;
419 } else {
420 /*
421 * We should be asserting that cpu_lock is held here, but
422 * the NCA code doesn't acquire it. The following assert
423 * should be uncommented when the NCA code is fixed.
424 *
425 * ASSERT(MUTEX_HELD(&cpu_lock));
426 */
427 VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
428 cp = cpu[cpu_id];
429
430 /* user must provide a good cpu_id */
431 VERIFY(cp != NULL);
432 }
433
434 /*
435 * If there is already a hard affinity requested, and this affinity
436 * conflicts with that, panic.
437 */
438 thread_lock(t);
439 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
440 panic("affinity_set: setting %p but already bound to %p",
441 (void *)cp, (void *)t->t_bound_cpu);
442 }
443 t->t_affinitycnt++;
444 t->t_bound_cpu = cp;
445
446 /*
447 * Make sure we're running on the right CPU.
448 */
449 if (cp != t->t_cpu || t != curthread) {
450 ASSERT(cpu_id != CPU_CURRENT);
451 force_thread_migrate(t); /* drops thread lock */
452 } else {
453 thread_unlock(t);
454 }
455
456 if (cpu_id == CPU_CURRENT) {
457 kpreempt_enable();
458 }
459 }
460
461 /*
462 * Wrapper for backward compatibility.
463 */
464 void
465 affinity_set(int cpu_id)
466 {
467 thread_affinity_set(curthread, cpu_id);
468 }
469
470 /*
471 * Decrement the affinity reservation count and if it becomes zero,
472 * clear the CPU affinity for the current thread, or set it to the user's
473 * software binding request.
474 */
475 void
476 thread_affinity_clear(kthread_id_t t)
477 {
478 register processorid_t binding;
1473 * Otherwise, update the count of how many
1474 * threads are in this CPU's lgroup but have
1475 * a different lpl.
1476 */
1477
1478 if (cpu_lpl->lpl_ncpu == 0) {
1479 if (t->t_lpl == cpu_lpl)
1480 lgrp_move_thread(t,
1481 lgrp_choose(t,
1482 t->t_cpupart), 0);
1483 else if (t->t_lpl->lpl_lgrpid ==
1484 cpu_lpl->lpl_lgrpid)
1485 lgrp_diff_lpl++;
1486 }
1487 ASSERT(t->t_lpl->lpl_ncpu > 0);
1488
1489 /*
1490 * Update CPU last ran on if it was this CPU
1491 */
1492 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1493 t->t_cpu = disp_lowpri_cpu(ncp,
1494 t->t_lpl, t->t_pri, NULL);
1495 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1496 t->t_weakbound_cpu == cp);
1497
1498 t = t->t_forw;
1499 } while (t != p->p_tlist);
1500
1501 /*
1502 * Didn't find any threads in the same lgroup as this
1503 * CPU with a different lpl, so remove the lgroup from
1504 * the process lgroup bitmask.
1505 */
1506
1507 if (lgrp_diff_lpl == 0)
1508 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
1509 }
1510
1511 /*
1512 * Walk thread list looking for threads that need to be
1513 * rehomed, since there are some threads that are not in
1514 * their process's p_tlist.
1516
1517 t = curthread;
1518 do {
1519 ASSERT(t != NULL && t->t_lpl != NULL);
1520
1521 /*
1522 * Rehome threads with same lpl as this CPU when this
1523 * is the last CPU in the lpl.
1524 */
1525
1526 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1527 lgrp_move_thread(t,
1528 lgrp_choose(t, t->t_cpupart), 1);
1529
1530 ASSERT(t->t_lpl->lpl_ncpu > 0);
1531
1532 /*
1533 * Update CPU last ran on if it was this CPU
1534 */
1535
1536 if (t->t_cpu == cp && t->t_bound_cpu != cp) {
1537 t->t_cpu = disp_lowpri_cpu(ncp,
1538 t->t_lpl, t->t_pri, NULL);
1539 }
1540 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1541 t->t_weakbound_cpu == cp);
1542 t = t->t_next;
1543
1544 } while (t != curthread);
1545 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1546 cp->cpu_flags |= CPU_OFFLINE;
1547 disp_cpu_inactive(cp);
1548 if (!no_quiesce)
1549 cp->cpu_flags |= CPU_QUIESCED;
1550 ncpus_online--;
1551 cpu_set_state(cp);
1552 cpu_inmotion = NULL;
1553 start_cpus();
1554 cpu_stats_kstat_destroy(cp);
1555 cpu_delete_intrstat(cp);
1556 lgrp_kstat_destroy(cp);
1557 }
1558
1559 out:
|
383 setbackdq(tp);
384 }
385 thread_unlock(tp);
386 }
387 }
388
389 /*
390 * Set affinity for a specified CPU.
391 *
392 * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
393 * curthread, will set affinity to the CPU on which the thread is currently
394 * running. For other cpu_id values, the caller must ensure that the
395 * referenced CPU remains valid, which can be done by holding cpu_lock across
396 * this call.
397 *
398 * CPU affinity is guaranteed after return of thread_affinity_set(). If a
399 * caller setting affinity to CPU_CURRENT requires that its thread not migrate
400 * CPUs prior to a successful return, it should take extra precautions (such as
401 * their own call to kpreempt_disable) to ensure that safety.
402 *
403 * CPU_BEST can be used to pick a "best" CPU to migrate to, including
404 * potentially the current CPU.
405 *
406 * A CPU affinity reference count is maintained by thread_affinity_set and
407 * thread_affinity_clear (incrementing and decrementing it, respectively),
408 * maintaining CPU affinity while the count is non-zero, and allowing regions
409 * of code which require affinity to be nested.
410 */
411 void
412 thread_affinity_set(kthread_id_t t, int cpu_id)
413 {
414 cpu_t *cp;
415
416 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
417
418 if (cpu_id == CPU_CURRENT) {
419 VERIFY3P(t, ==, curthread);
420 kpreempt_disable();
421 cp = CPU;
422 } else if (cpu_id == CPU_BEST) {
423 VERIFY3P(t, ==, curthread);
424 kpreempt_disable();
425 cp = disp_choose_best_cpu();
426 } else {
427 /*
428 * We should be asserting that cpu_lock is held here, but
429 * the NCA code doesn't acquire it. The following assert
430 * should be uncommented when the NCA code is fixed.
431 *
432 * ASSERT(MUTEX_HELD(&cpu_lock));
433 */
434 VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
435 cp = cpu[cpu_id];
436
437 /* user must provide a good cpu_id */
438 VERIFY(cp != NULL);
439 }
440
441 /*
442 * If there is already a hard affinity requested, and this affinity
443 * conflicts with that, panic.
444 */
445 thread_lock(t);
446 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
447 panic("affinity_set: setting %p but already bound to %p",
448 (void *)cp, (void *)t->t_bound_cpu);
449 }
450 t->t_affinitycnt++;
451 t->t_bound_cpu = cp;
452
453 /*
454 * Make sure we're running on the right CPU.
455 */
456 if (cp != t->t_cpu || t != curthread) {
457 ASSERT(cpu_id != CPU_CURRENT);
458 force_thread_migrate(t); /* drops thread lock */
459 } else {
460 thread_unlock(t);
461 }
462
463 if (cpu_id == CPU_CURRENT || cpu_id == CPU_BEST)
464 kpreempt_enable();
465 }
466
467 /*
468 * Wrapper for backward compatibility.
469 */
470 void
471 affinity_set(int cpu_id)
472 {
473 thread_affinity_set(curthread, cpu_id);
474 }
475
476 /*
477 * Decrement the affinity reservation count and if it becomes zero,
478 * clear the CPU affinity for the current thread, or set it to the user's
479 * software binding request.
480 */
481 void
482 thread_affinity_clear(kthread_id_t t)
483 {
484 register processorid_t binding;
1479 * Otherwise, update the count of how many
1480 * threads are in this CPU's lgroup but have
1481 * a different lpl.
1482 */
1483
1484 if (cpu_lpl->lpl_ncpu == 0) {
1485 if (t->t_lpl == cpu_lpl)
1486 lgrp_move_thread(t,
1487 lgrp_choose(t,
1488 t->t_cpupart), 0);
1489 else if (t->t_lpl->lpl_lgrpid ==
1490 cpu_lpl->lpl_lgrpid)
1491 lgrp_diff_lpl++;
1492 }
1493 ASSERT(t->t_lpl->lpl_ncpu > 0);
1494
1495 /*
1496 * Update CPU last ran on if it was this CPU
1497 */
1498 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1499 t->t_cpu = disp_lowpri_cpu(ncp, t,
1500 t->t_pri);
1501 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1502 t->t_weakbound_cpu == cp);
1503
1504 t = t->t_forw;
1505 } while (t != p->p_tlist);
1506
1507 /*
1508 * Didn't find any threads in the same lgroup as this
1509 * CPU with a different lpl, so remove the lgroup from
1510 * the process lgroup bitmask.
1511 */
1512
1513 if (lgrp_diff_lpl == 0)
1514 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
1515 }
1516
1517 /*
1518 * Walk thread list looking for threads that need to be
1519 * rehomed, since there are some threads that are not in
1520 * their process's p_tlist.
1522
1523 t = curthread;
1524 do {
1525 ASSERT(t != NULL && t->t_lpl != NULL);
1526
1527 /*
1528 * Rehome threads with same lpl as this CPU when this
1529 * is the last CPU in the lpl.
1530 */
1531
1532 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1533 lgrp_move_thread(t,
1534 lgrp_choose(t, t->t_cpupart), 1);
1535
1536 ASSERT(t->t_lpl->lpl_ncpu > 0);
1537
1538 /*
1539 * Update CPU last ran on if it was this CPU
1540 */
1541
1542 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1543 t->t_cpu = disp_lowpri_cpu(ncp, t, t->t_pri);
1544
1545 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1546 t->t_weakbound_cpu == cp);
1547 t = t->t_next;
1548
1549 } while (t != curthread);
1550 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1551 cp->cpu_flags |= CPU_OFFLINE;
1552 disp_cpu_inactive(cp);
1553 if (!no_quiesce)
1554 cp->cpu_flags |= CPU_QUIESCED;
1555 ncpus_online--;
1556 cpu_set_state(cp);
1557 cpu_inmotion = NULL;
1558 start_cpus();
1559 cpu_stats_kstat_destroy(cp);
1560 cpu_delete_intrstat(cp);
1561 lgrp_kstat_destroy(cp);
1562 }
1563
1564 out:
|