Print this page
10923 thread_affinity_set(CPU_CURRENT) can skip cpu_lock
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/cpu.c
          +++ new/usr/src/uts/common/os/cpu.c
↓ open down ↓ 13 lines elided ↑ open up ↑
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   * Copyright (c) 2012 by Delphix. All rights reserved.
       24 + * Copyright 2018 Joyent, Inc.
  24   25   */
  25   26  
  26   27  /*
  27   28   * Architecture-independent CPU control functions.
  28   29   */
  29   30  
  30   31  #include <sys/types.h>
  31   32  #include <sys/param.h>
  32   33  #include <sys/var.h>
  33   34  #include <sys/thread.h>
↓ open down ↓ 345 lines elided ↑ open up ↑
 379  380                  } else if (tp->t_state == TS_RUN) {
 380  381                          (void) dispdeq(tp);
 381  382                          setbackdq(tp);
 382  383                  }
 383  384                  thread_unlock(tp);
 384  385          }
 385  386  }
 386  387  
 387  388  /*
 388  389   * Set affinity for a specified CPU.
 389      - * A reference count is incremented and the affinity is held until the
 390      - * reference count is decremented to zero by thread_affinity_clear().
 391      - * This is so regions of code requiring affinity can be nested.
 392      - * Caller needs to ensure that cpu_id remains valid, which can be
 393      - * done by holding cpu_lock across this call, unless the caller
 394      - * specifies CPU_CURRENT in which case the cpu_lock will be acquired
 395      - * by thread_affinity_set and CPU->cpu_id will be the target CPU.
      390 + *
      391 + * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
      392 + * curthread, will set affinity to the CPU on which the thread is currently
      393 + * running.  For other cpu_id values, the caller must ensure that the
      394 + * referenced CPU remains valid, which can be done by holding cpu_lock across
      395 + * this call.
      396 + *
      397 + * CPU affinity is guaranteed after return of thread_affinity_set().  If a
      398 + * caller setting affinity to CPU_CURRENT requires that its thread not migrate
      399 + * CPUs prior to a successful return, it should take extra precautions (such as
      400 + * their own call to kpreempt_disable) to ensure that safety.
      401 + *
      402 + * A CPU affinity reference count is maintained by thread_affinity_set and
      403 + * thread_affinity_clear (incrementing and decrementing it, respectively),
      404 + * maintaining CPU affinity while the count is non-zero, and allowing regions
      405 + * of code which require affinity to be nested.
 396  406   */
 397  407  void
 398  408  thread_affinity_set(kthread_id_t t, int cpu_id)
 399  409  {
 400      -        cpu_t           *cp;
 401      -        int             c;
      410 +        cpu_t *cp;
 402  411  
 403  412          ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
 404  413  
 405      -        if ((c = cpu_id) == CPU_CURRENT) {
 406      -                mutex_enter(&cpu_lock);
 407      -                cpu_id = CPU->cpu_id;
      414 +        if (cpu_id == CPU_CURRENT) {
      415 +                VERIFY3P(t, ==, curthread);
      416 +                kpreempt_disable();
      417 +                cp = CPU;
      418 +        } else {
      419 +                /*
      420 +                 * We should be asserting that cpu_lock is held here, but
      421 +                 * the NCA code doesn't acquire it.  The following assert
      422 +                 * should be uncommented when the NCA code is fixed.
      423 +                 *
      424 +                 * ASSERT(MUTEX_HELD(&cpu_lock));
      425 +                 */
      426 +                VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
      427 +                cp = cpu[cpu_id];
      428 +
      429 +                /* user must provide a good cpu_id */
      430 +                VERIFY(cp != NULL);
 408  431          }
      432 +
 409  433          /*
 410      -         * We should be asserting that cpu_lock is held here, but
 411      -         * the NCA code doesn't acquire it.  The following assert
 412      -         * should be uncommented when the NCA code is fixed.
 413      -         *
 414      -         * ASSERT(MUTEX_HELD(&cpu_lock));
 415      -         */
 416      -        ASSERT((cpu_id >= 0) && (cpu_id < NCPU));
 417      -        cp = cpu[cpu_id];
 418      -        ASSERT(cp != NULL);             /* user must provide a good cpu_id */
 419      -        /*
 420  434           * If there is already a hard affinity requested, and this affinity
 421  435           * conflicts with that, panic.
 422  436           */
 423  437          thread_lock(t);
 424  438          if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
 425  439                  panic("affinity_set: setting %p but already bound to %p",
 426  440                      (void *)cp, (void *)t->t_bound_cpu);
 427  441          }
 428  442          t->t_affinitycnt++;
 429  443          t->t_bound_cpu = cp;
 430  444  
 431  445          /*
 432  446           * Make sure we're running on the right CPU.
 433  447           */
 434  448          if (cp != t->t_cpu || t != curthread) {
      449 +                ASSERT(cpu_id != CPU_CURRENT);
 435  450                  force_thread_migrate(t);        /* drops thread lock */
 436  451          } else {
 437  452                  thread_unlock(t);
 438  453          }
 439  454  
 440      -        if (c == CPU_CURRENT)
 441      -                mutex_exit(&cpu_lock);
      455 +        if (cpu_id == CPU_CURRENT) {
      456 +                kpreempt_enable();
      457 +        }
 442  458  }
 443  459  
 444  460  /*
 445  461   *      Wrapper for backward compatibility.
 446  462   */
 447  463  void
 448  464  affinity_set(int cpu_id)
 449  465  {
 450  466          thread_affinity_set(curthread, cpu_id);
 451  467  }
↓ open down ↓ 3000 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX