4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 */
25
26 /*
27 * Architecture-independent CPU control functions.
28 */
29
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/var.h>
33 #include <sys/thread.h>
34 #include <sys/cpuvar.h>
35 #include <sys/cpu_event.h>
36 #include <sys/kstat.h>
37 #include <sys/uadmin.h>
38 #include <sys/systm.h>
39 #include <sys/errno.h>
40 #include <sys/cmn_err.h>
41 #include <sys/procset.h>
42 #include <sys/processor.h>
43 #include <sys/debug.h>
369 {
370 ASSERT(THREAD_LOCK_HELD(tp));
371 if (tp == curthread) {
372 THREAD_TRANSITION(tp);
373 CL_SETRUN(tp);
374 thread_unlock_nopreempt(tp);
375 swtch();
376 } else {
377 if (tp->t_state == TS_ONPROC) {
378 cpu_surrender(tp);
379 } else if (tp->t_state == TS_RUN) {
380 (void) dispdeq(tp);
381 setbackdq(tp);
382 }
383 thread_unlock(tp);
384 }
385 }
386
387 /*
388 * Set affinity for a specified CPU.
389 * A reference count is incremented and the affinity is held until the
390 * reference count is decremented to zero by thread_affinity_clear().
391 * This is so regions of code requiring affinity can be nested.
392 * Caller needs to ensure that cpu_id remains valid, which can be
393 * done by holding cpu_lock across this call, unless the caller
394 * specifies CPU_CURRENT in which case the cpu_lock will be acquired
395 * by thread_affinity_set and CPU->cpu_id will be the target CPU.
396 */
397 void
398 thread_affinity_set(kthread_id_t t, int cpu_id)
399 {
400 cpu_t *cp;
401 int c;
402
403 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
404
405 if ((c = cpu_id) == CPU_CURRENT) {
406 mutex_enter(&cpu_lock);
407 cpu_id = CPU->cpu_id;
408 }
409 /*
410 * We should be asserting that cpu_lock is held here, but
411 * the NCA code doesn't acquire it. The following assert
412 * should be uncommented when the NCA code is fixed.
413 *
414 * ASSERT(MUTEX_HELD(&cpu_lock));
415 */
416 ASSERT((cpu_id >= 0) && (cpu_id < NCPU));
417 cp = cpu[cpu_id];
418 ASSERT(cp != NULL); /* user must provide a good cpu_id */
419 /*
420 * If there is already a hard affinity requested, and this affinity
421 * conflicts with that, panic.
422 */
423 thread_lock(t);
424 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
425 panic("affinity_set: setting %p but already bound to %p",
426 (void *)cp, (void *)t->t_bound_cpu);
427 }
428 t->t_affinitycnt++;
429 t->t_bound_cpu = cp;
430
431 /*
432 * Make sure we're running on the right CPU.
433 */
434 if (cp != t->t_cpu || t != curthread) {
435 force_thread_migrate(t); /* drops thread lock */
436 } else {
437 thread_unlock(t);
438 }
439
440 if (c == CPU_CURRENT)
441 mutex_exit(&cpu_lock);
442 }
443
444 /*
445 * Wrapper for backward compatibility.
446 */
447 void
448 affinity_set(int cpu_id)
449 {
450 thread_affinity_set(curthread, cpu_id);
451 }
452
453 /*
454 * Decrement the affinity reservation count and if it becomes zero,
455 * clear the CPU affinity for the current thread, or set it to the user's
456 * software binding request.
457 */
458 void
459 thread_affinity_clear(kthread_id_t t)
460 {
461 register processorid_t binding;
|
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2018 Joyent, Inc.
25 */
26
27 /*
28 * Architecture-independent CPU control functions.
29 */
30
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/var.h>
34 #include <sys/thread.h>
35 #include <sys/cpuvar.h>
36 #include <sys/cpu_event.h>
37 #include <sys/kstat.h>
38 #include <sys/uadmin.h>
39 #include <sys/systm.h>
40 #include <sys/errno.h>
41 #include <sys/cmn_err.h>
42 #include <sys/procset.h>
43 #include <sys/processor.h>
44 #include <sys/debug.h>
370 {
371 ASSERT(THREAD_LOCK_HELD(tp));
372 if (tp == curthread) {
373 THREAD_TRANSITION(tp);
374 CL_SETRUN(tp);
375 thread_unlock_nopreempt(tp);
376 swtch();
377 } else {
378 if (tp->t_state == TS_ONPROC) {
379 cpu_surrender(tp);
380 } else if (tp->t_state == TS_RUN) {
381 (void) dispdeq(tp);
382 setbackdq(tp);
383 }
384 thread_unlock(tp);
385 }
386 }
387
388 /*
389 * Set affinity for a specified CPU.
390 *
391 * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
392 * curthread, will set affinity to the CPU on which the thread is currently
393 * running. For other cpu_id values, the caller must ensure that the
394 * referenced CPU remains valid, which can be done by holding cpu_lock across
395 * this call.
396 *
397 * CPU affinity is guaranteed after return of thread_affinity_set(). If a
398 * caller setting affinity to CPU_CURRENT requires that its thread not migrate
399 * CPUs prior to a successful return, it should take extra precautions (such as
400 * their own call to kpreempt_disable) to ensure that safety.
401 *
402 * A CPU affinity reference count is maintained by thread_affinity_set and
403 * thread_affinity_clear (incrementing and decrementing it, respectively),
404 * maintaining CPU affinity while the count is non-zero, and allowing regions
405 * of code which require affinity to be nested.
406 */
407 void
408 thread_affinity_set(kthread_id_t t, int cpu_id)
409 {
410 cpu_t *cp;
411
412 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
413
414 if (cpu_id == CPU_CURRENT) {
415 VERIFY3P(t, ==, curthread);
416 kpreempt_disable();
417 cp = CPU;
418 } else {
419 /*
420 * We should be asserting that cpu_lock is held here, but
421 * the NCA code doesn't acquire it. The following assert
422 * should be uncommented when the NCA code is fixed.
423 *
424 * ASSERT(MUTEX_HELD(&cpu_lock));
425 */
426 VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
427 cp = cpu[cpu_id];
428
429 /* user must provide a good cpu_id */
430 VERIFY(cp != NULL);
431 }
432
433 /*
434 * If there is already a hard affinity requested, and this affinity
435 * conflicts with that, panic.
436 */
437 thread_lock(t);
438 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
439 panic("affinity_set: setting %p but already bound to %p",
440 (void *)cp, (void *)t->t_bound_cpu);
441 }
442 t->t_affinitycnt++;
443 t->t_bound_cpu = cp;
444
445 /*
446 * Make sure we're running on the right CPU.
447 */
448 if (cp != t->t_cpu || t != curthread) {
449 ASSERT(cpu_id != CPU_CURRENT);
450 force_thread_migrate(t); /* drops thread lock */
451 } else {
452 thread_unlock(t);
453 }
454
455 if (cpu_id == CPU_CURRENT) {
456 kpreempt_enable();
457 }
458 }
459
460 /*
461 * Wrapper for backward compatibility.
462 */
463 void
464 affinity_set(int cpu_id)
465 {
466 thread_affinity_set(curthread, cpu_id);
467 }
468
469 /*
470 * Decrement the affinity reservation count and if it becomes zero,
471 * clear the CPU affinity for the current thread, or set it to the user's
472 * software binding request.
473 */
474 void
475 thread_affinity_clear(kthread_id_t t)
476 {
477 register processorid_t binding;
|