Print this page
10923 thread_affinity_set(CPU_CURRENT) can skip cpu_lock
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>

*** 19,28 **** --- 19,29 ---- * CDDL HEADER END */ /* * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012 by Delphix. All rights reserved. + * Copyright 2018 Joyent, Inc. */ /* * Architecture-independent CPU control functions. */
*** 384,423 **** } } /* * Set affinity for a specified CPU. ! * A reference count is incremented and the affinity is held until the ! * reference count is decremented to zero by thread_affinity_clear(). ! * This is so regions of code requiring affinity can be nested. ! * Caller needs to ensure that cpu_id remains valid, which can be ! * done by holding cpu_lock across this call, unless the caller ! * specifies CPU_CURRENT in which case the cpu_lock will be acquired ! * by thread_affinity_set and CPU->cpu_id will be the target CPU. */ void thread_affinity_set(kthread_id_t t, int cpu_id) { cpu_t *cp; - int c; ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL)); ! if ((c = cpu_id) == CPU_CURRENT) { ! mutex_enter(&cpu_lock); ! cpu_id = CPU->cpu_id; ! } /* * We should be asserting that cpu_lock is held here, but * the NCA code doesn't acquire it. The following assert * should be uncommented when the NCA code is fixed. * * ASSERT(MUTEX_HELD(&cpu_lock)); */ ! ASSERT((cpu_id >= 0) && (cpu_id < NCPU)); cp = cpu[cpu_id]; ! ASSERT(cp != NULL); /* user must provide a good cpu_id */ /* * If there is already a hard affinity requested, and this affinity * conflicts with that, panic. */ thread_lock(t); --- 385,437 ---- } } /* * Set affinity for a specified CPU. ! * ! * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for ! * curthread, will set affinity to the CPU on which the thread is currently ! * running. For other cpu_id values, the caller must ensure that the ! * referenced CPU remains valid, which can be done by holding cpu_lock across ! * this call. ! * ! * CPU affinity is guaranteed after return of thread_affinity_set(). If a ! * caller setting affinity to CPU_CURRENT requires that its thread not migrate ! * CPUs prior to a successful return, it should take extra precautions (such as ! * their own call to kpreempt_disable) to ensure that safety. ! * ! * A CPU affinity reference count is maintained by thread_affinity_set and ! * thread_affinity_clear (incrementing and decrementing it, respectively), ! * maintaining CPU affinity while the count is non-zero, and allowing regions ! * of code which require affinity to be nested. */ void thread_affinity_set(kthread_id_t t, int cpu_id) { cpu_t *cp; ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL)); ! if (cpu_id == CPU_CURRENT) { ! VERIFY3P(t, ==, curthread); ! kpreempt_disable(); ! cp = CPU; ! } else { /* * We should be asserting that cpu_lock is held here, but * the NCA code doesn't acquire it. The following assert * should be uncommented when the NCA code is fixed. * * ASSERT(MUTEX_HELD(&cpu_lock)); */ ! VERIFY((cpu_id >= 0) && (cpu_id < NCPU)); cp = cpu[cpu_id]; ! ! /* user must provide a good cpu_id */ ! VERIFY(cp != NULL); ! } ! /* * If there is already a hard affinity requested, and this affinity * conflicts with that, panic. */ thread_lock(t);
*** 430,446 **** /* * Make sure we're running on the right CPU. */ if (cp != t->t_cpu || t != curthread) { force_thread_migrate(t); /* drops thread lock */ } else { thread_unlock(t); } ! if (c == CPU_CURRENT) ! mutex_exit(&cpu_lock); } /* * Wrapper for backward compatibility. */ --- 444,462 ---- /* * Make sure we're running on the right CPU. */ if (cp != t->t_cpu || t != curthread) { + ASSERT(cpu_id != CPU_CURRENT); force_thread_migrate(t); /* drops thread lock */ } else { thread_unlock(t); } ! if (cpu_id == CPU_CURRENT) { ! kpreempt_enable(); ! } } /* * Wrapper for backward compatibility. */