Print this page
11909 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/sys/thread.h
          +++ new/usr/src/uts/common/sys/thread.h
↓ open down ↓ 190 lines elided ↑ open up ↑
 191  191          short   t_sysnum;               /* system call number */
 192  192          kcondvar_t      t_delay_cv;
 193  193          kmutex_t        t_delay_lock;
 194  194  
 195  195          /*
 196  196           * Pointer to the dispatcher lock protecting t_state and state-related
 197  197           * flags.  This pointer can change during waits on the lock, so
 198  198           * it should be grabbed only by thread_lock().
 199  199           */
 200  200          disp_lock_t     *t_lockp;       /* pointer to the dispatcher lock */
 201      -        ushort_t        t_oldspl;       /* spl level before dispatcher locked */
      201 +        ushort_t        t_oldspl;       /* spl level before dispatcher locked */
 202  202          volatile char   t_pre_sys;      /* pre-syscall work needed */
 203  203          lock_t          t_lock_flush;   /* for lock_mutex_flush() impl */
 204  204          struct _disp    *t_disp_queue;  /* run queue for chosen CPU */
 205  205          clock_t         t_disp_time;    /* last time this thread was running */
 206      -        uint_t          t_kpri_req;     /* kernel priority required */
 207  206  
 208  207          /*
 209  208           * Post-syscall / post-trap flags.
 210      -         *      No lock is required to set these.
      209 +         *      No lock is required to set these.
 211  210           *      These must be cleared only by the thread itself.
 212  211           *
 213  212           *      t_astflag indicates that some post-trap processing is required,
 214  213           *              possibly a signal or a preemption.  The thread will not
 215  214           *              return to user with this set.
 216  215           *      t_post_sys indicates that some unusualy post-system call
 217  216           *              handling is required, such as an error or tracing.
 218  217           *      t_sig_check indicates that some condition in ISSIG() must be
 219      -         *              checked, but doesn't prevent returning to user.
      218 +         *              checked, but doesn't prevent returning to user.
 220  219           *      t_post_sys_ast is a way of checking whether any of these three
 221  220           *              flags are set.
 222  221           */
 223  222          union __tu {
 224  223                  struct __ts {
 225  224                          volatile char   _t_astflag;     /* AST requested */
 226  225                          volatile char   _t_sig_check;   /* ISSIG required */
 227  226                          volatile char   _t_post_sys;    /* post_syscall req */
 228  227                          volatile char   _t_trapret;     /* call CL_TRAPRET */
 229  228                  } _ts;
↓ open down ↓ 121 lines elided ↑ open up ↑
 351  350          kmutex_t        t_wait_mutex;   /* used in CV wait functions */
 352  351  
 353  352          char            *t_name;        /* thread name */
 354  353  
 355  354          uint64_t        t_unsafe;       /* unsafe to run with SMT VCPU thread */
 356  355  } kthread_t;
 357  356  
 358  357  /*
 359  358   * Thread flag (t_flag) definitions.
 360  359   *      These flags must be changed only for the current thread,
 361      - *      and not during preemption code, since the code being
      360 + *      and not during preemption code, since the code being
 362  361   *      preempted could be modifying the flags.
 363  362   *
 364  363   *      For the most part these flags do not need locking.
 365  364   *      The following flags will only be changed while the thread_lock is held,
 366  365   *      to give assurrance that they are consistent with t_state:
 367  366   *              T_WAKEABLE
 368  367   */
 369  368  #define T_INTR_THREAD   0x0001  /* thread is an interrupt thread */
 370  369  #define T_WAKEABLE      0x0002  /* thread is blocked, signals enabled */
 371  370  #define T_TOMASK        0x0004  /* use lwp_sigoldmask on return from signal */
↓ open down ↓ 132 lines elided ↑ open up ↑
 504  503   *      convert a proc pointer to a lwp pointer. this only works with
 505  504   *      procs that have only one lwp.
 506  505   *
 507  506   * ttolwp(x)
 508  507   *      convert a thread pointer to its lwp pointer.
 509  508   *
 510  509   * ttoproc(x)
 511  510   *      convert a thread pointer to its proc pointer.
 512  511   *
 513  512   * ttoproj(x)
 514      - *      convert a thread pointer to its project pointer.
      513 + *      convert a thread pointer to its project pointer.
 515  514   *
 516  515   * ttozone(x)
 517      - *      convert a thread pointer to its zone pointer.
      516 + *      convert a thread pointer to its zone pointer.
 518  517   *
 519  518   * lwptot(x)
 520  519   *      convert a lwp pointer to its thread pointer.
 521  520   *
 522  521   * lwptoproc(x)
 523  522   *      convert a lwp to its proc pointer.
 524  523   */
 525  524  #define proctot(x)      ((x)->p_tlist)
 526  525  #define proctolwp(x)    ((x)->p_tlist->t_lwp)
 527  526  #define ttolwp(x)       ((x)->t_lwp)
↓ open down ↓ 73 lines elided ↑ open up ↑
 601  600  int thread_setname(kthread_t *, const char *);
 602  601  int thread_vsetname(kthread_t *, const char *, ...);
 603  602  
 604  603  extern int default_binding_mode;
 605  604  
 606  605  #endif  /* _KERNEL */
 607  606  
 608  607  #define THREAD_NAME_MAX 32      /* includes terminating NUL */
 609  608  
 610  609  /*
 611      - * Macros to indicate that the thread holds resources that could be critical
 612      - * to other kernel threads, so this thread needs to have kernel priority
 613      - * if it blocks or is preempted.  Note that this is not necessary if the
 614      - * resource is a mutex or a writer lock because of priority inheritance.
 615      - *
 616      - * The only way one thread may legally manipulate another thread's t_kpri_req
 617      - * is to hold the target thread's thread lock while that thread is asleep.
 618      - * (The rwlock code does this to implement direct handoff to waiting readers.)
 619      - */
 620      -#define THREAD_KPRI_REQUEST()   (curthread->t_kpri_req++)
 621      -#define THREAD_KPRI_RELEASE()   (curthread->t_kpri_req--)
 622      -#define THREAD_KPRI_RELEASE_N(n) (curthread->t_kpri_req -= (n))
 623      -
 624      -/*
 625  610   * Macro to change a thread's priority.
 626  611   */
 627  612  #define THREAD_CHANGE_PRI(t, pri) {                                     \
 628  613          pri_t __new_pri = (pri);                                        \
 629  614          DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, __new_pri); \
 630  615          (t)->t_pri = __new_pri;                                         \
 631  616          schedctl_set_cidpri(t);                                         \
 632  617  }
 633  618  
 634  619  /*
↓ open down ↓ 6 lines elided ↑ open up ↑
 641  626  /*
 642  627   * Macros to change thread state and the associated lock.
 643  628   */
 644  629  #define THREAD_SET_STATE(tp, state, lp) \
 645  630                  ((tp)->t_state = state, (tp)->t_lockp = lp)
 646  631  
 647  632  /*
 648  633   * Point it at the transition lock, which is always held.
 649  634   * The previosly held lock is dropped.
 650  635   */
 651      -#define THREAD_TRANSITION(tp)   thread_transition(tp);
      636 +#define THREAD_TRANSITION(tp)   thread_transition(tp);
 652  637  /*
 653  638   * Set the thread's lock to be the transition lock, without dropping
 654  639   * previosly held lock.
 655  640   */
 656      -#define THREAD_TRANSITION_NOLOCK(tp)    ((tp)->t_lockp = &transition_lock)
      641 +#define THREAD_TRANSITION_NOLOCK(tp)    ((tp)->t_lockp = &transition_lock)
 657  642  
 658  643  /*
 659  644   * Put thread in run state, and set the lock pointer to the dispatcher queue
 660  645   * lock pointer provided.  This lock should be held.
 661  646   */
 662  647  #define THREAD_RUN(tp, lp)      THREAD_SET_STATE(tp, TS_RUN, lp)
 663  648  
 664  649  /*
 665  650   * Put thread in wait state, and set the lock pointer to the wait queue
 666  651   * lock pointer provided.  This lock should be held.
↓ open down ↓ 71 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX