Print this page
OS-7753 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/fss.c
          +++ new/usr/src/uts/common/disp/fss.c
↓ open down ↓ 13 lines elided ↑ open up ↑
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
  24      - * Copyright 2013, Joyent, Inc. All rights reserved.
       24 + * Copyright 2019 Joyent, Inc.
  25   25   */
  26   26  
  27   27  #include <sys/types.h>
  28   28  #include <sys/param.h>
  29   29  #include <sys/sysmacros.h>
  30   30  #include <sys/cred.h>
  31   31  #include <sys/proc.h>
  32   32  #include <sys/strsubr.h>
  33   33  #include <sys/priocntl.h>
  34   34  #include <sys/class.h>
↓ open down ↓ 1170 lines elided ↑ open up ↑
1205 1205                                   *         kpj_shares^2     zone_ext_shares^2
1206 1206                                   *
1207 1207                                   * Where zone_int_shares is the sum of shares
1208 1208                                   * of all active projects within the zone (and
1209 1209                                   * the pset), and zone_ext_shares is the number
1210 1210                                   * of zone shares (ie, zone.cpu-shares).
1211 1211                                   *
1212 1212                                   * If there is only one zone active on the pset
1213 1213                                   * the above reduces to:
1214 1214                                   *
1215      -                                 *                      zone_int_shares^2
     1215 +                                 *                      zone_int_shares^2
1216 1216                                   * shusage = usage * ---------------------
1217      -                                 *                      kpj_shares^2
     1217 +                                 *                      kpj_shares^2
1218 1218                                   *
1219 1219                                   * If there's only one project active in the
1220 1220                                   * zone this formula reduces to:
1221 1221                                   *
1222 1222                                   *                      pset_shares^2
1223 1223                                   * shusage = usage * ----------------------
1224 1224                                   *                      zone_ext_shares^2
1225 1225                                   *
1226 1226                                   * shusage is one input to calculating fss_pri
1227 1227                                   * in fss_newpri(). Larger values tend toward
↓ open down ↓ 138 lines elided ↑ open up ↑
1366 1366                  /*
1367 1367                   * Lock the thread and verify the state.
1368 1368                   */
1369 1369                  thread_lock(t);
1370 1370                  /*
1371 1371                   * Skip the thread if it is no longer in the FSS class or
1372 1372                   * is running with kernel mode priority.
1373 1373                   */
1374 1374                  if (t->t_cid != fss_cid)
1375 1375                          goto next;
1376      -                if ((fssproc->fss_flags & FSSKPRI) != 0)
1377      -                        goto next;
1378 1376  
1379 1377                  fssproj = FSSPROC2FSSPROJ(fssproc);
1380 1378                  if (fssproj == NULL)
1381 1379                          goto next;
1382 1380  
1383 1381                  if (fssproj->fssp_shares != 0) {
1384 1382                          /*
1385 1383                           * Decay fsspri value.
1386 1384                           */
1387 1385                          fsspri = fssproc->fss_fsspri;
↓ open down ↓ 494 lines elided ↑ open up ↑
1882 1880          cfssproc->fss_timeleft = fss_quantum;
1883 1881          cfssproc->fss_umdpri = pfssproc->fss_umdpri;
1884 1882          cfssproc->fss_fsspri = 0;
1885 1883          cfssproc->fss_uprilim = pfssproc->fss_uprilim;
1886 1884          cfssproc->fss_upri = pfssproc->fss_upri;
1887 1885          cfssproc->fss_tp = ct;
1888 1886          cfssproc->fss_nice = pfssproc->fss_nice;
1889 1887          cpucaps_sc_init(&cfssproc->fss_caps);
1890 1888  
1891 1889          cfssproc->fss_flags =
1892      -            pfssproc->fss_flags & ~(FSSKPRI | FSSBACKQ | FSSRESTORE);
     1890 +            pfssproc->fss_flags & ~(FSSBACKQ | FSSRESTORE);
1893 1891          ct->t_cldata = (void *)cfssproc;
1894 1892          ct->t_schedflag |= TS_RUNQMATCH;
1895 1893          thread_unlock(pt);
1896 1894  
1897 1895          fssproj->fssp_threads++;
1898 1896          mutex_exit(&fsspset->fssps_lock);
1899 1897  
1900 1898          /*
1901 1899           * Link new structure into fssproc hash table.
1902 1900           */
↓ open down ↓ 30 lines elided ↑ open up ↑
1933 1931          mutex_exit(&pidlock);
1934 1932          continuelwps(pp);
1935 1933  
1936 1934          thread_lock(t);
1937 1935  
1938 1936          fssproc = FSSPROC(t);
1939 1937          fss_newpri(fssproc, B_FALSE);
1940 1938          fssproc->fss_timeleft = fss_quantum;
1941 1939          t->t_pri = fssproc->fss_umdpri;
1942 1940          ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
1943      -        fssproc->fss_flags &= ~FSSKPRI;
1944 1941          THREAD_TRANSITION(t);
1945 1942  
1946 1943          /*
1947 1944           * We don't want to call fss_setrun(t) here because it may call
1948 1945           * fss_active, which we don't need.
1949 1946           */
1950 1947          fssproc->fss_flags &= ~FSSBACKQ;
1951 1948  
1952 1949          if (t->t_disp_time != ddi_get_lbolt())
1953 1950                  setbackdq(t);
↓ open down ↓ 78 lines elided ↑ open up ↑
2032 2029          if (nice > FSS_NICE_MAX)
2033 2030                  nice = FSS_NICE_MAX;
2034 2031  
2035 2032          thread_lock(t);
2036 2033  
2037 2034          fssproc->fss_uprilim = reqfssuprilim;
2038 2035          fssproc->fss_upri = reqfssupri;
2039 2036          fssproc->fss_nice = nice;
2040 2037          fss_newpri(fssproc, B_FALSE);
2041 2038  
2042      -        if ((fssproc->fss_flags & FSSKPRI) != 0) {
2043      -                thread_unlock(t);
2044      -                return (0);
2045      -        }
2046      -
2047 2039          fss_change_priority(t, fssproc);
2048 2040          thread_unlock(t);
2049 2041          return (0);
2050 2042  
2051 2043  }
2052 2044  
2053 2045  /*
2054 2046   * The thread is being stopped.
2055 2047   */
2056 2048  /*ARGSUSED*/
↓ open down ↓ 94 lines elided ↑ open up ↑
2151 2143          fssproc_t *fssproc = FSSPROC(t);
2152 2144          long epri = -1;
2153 2145          proc_t *pp = ttoproc(t);
2154 2146  
2155 2147          ASSERT(THREAD_LOCK_HELD(t));
2156 2148  
2157 2149          if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
2158 2150                  time_t swapout_time;
2159 2151  
2160 2152                  swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
2161      -                if (INHERITED(t) || (fssproc->fss_flags & FSSKPRI)) {
     2153 +                if (INHERITED(t)) {
2162 2154                          epri = (long)DISP_PRIO(t) + swapout_time;
2163 2155                  } else {
2164 2156                          /*
2165 2157                           * Threads which have been out for a long time,
2166 2158                           * have high user mode priority and are associated
2167 2159                           * with a small address space are more deserving.
2168 2160                           */
2169 2161                          epri = fssproc->fss_umdpri;
2170 2162                          ASSERT(epri >= 0 && epri <= fss_maxumdpri);
2171 2163                          epri += swapout_time - pp->p_swrss / nz(maxpgio)/2;
↓ open down ↓ 11 lines elided ↑ open up ↑
2183 2175  }
2184 2176  
2185 2177  /*
2186 2178   * fss_swapout() returns -1 if the thread isn't loaded or is not eligible to
2187 2179   * be swapped out. Otherwise, it returns the thread's effective priority
2188 2180   * based on if the swapper is in softswap or hardswap mode.
2189 2181   */
2190 2182  static pri_t
2191 2183  fss_swapout(kthread_t *t, int flags)
2192 2184  {
2193      -        fssproc_t *fssproc = FSSPROC(t);
2194 2185          long epri = -1;
2195 2186          proc_t *pp = ttoproc(t);
2196 2187          time_t swapin_time;
2197 2188  
2198 2189          ASSERT(THREAD_LOCK_HELD(t));
2199 2190  
2200 2191          if (INHERITED(t) ||
2201      -            (fssproc->fss_flags & FSSKPRI) ||
2202 2192              (t->t_proc_flag & TP_LWPEXIT) ||
2203 2193              (t->t_state & (TS_ZOMB|TS_FREE|TS_STOPPED|TS_ONPROC|TS_WAIT)) ||
2204 2194              !(t->t_schedflag & TS_LOAD) ||
2205 2195              !(SWAP_OK(t)))
2206 2196                  return (-1);
2207 2197  
2208 2198          ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
2209 2199  
2210 2200          swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
2211 2201  
↓ open down ↓ 22 lines elided ↑ open up ↑
2234 2224          epri += SHRT_MAX / 2;
2235 2225          if (epri < 0)
2236 2226                  epri = 0;
2237 2227          else if (epri > SHRT_MAX)
2238 2228                  epri = SHRT_MAX;
2239 2229  
2240 2230          return ((pri_t)epri);
2241 2231  }
2242 2232  
2243 2233  /*
2244      - * If thread is currently at a kernel mode priority (has slept) and is
2245      - * returning to the userland we assign it the appropriate user mode priority
2246      - * and time quantum here.  If we're lowering the thread's priority below that
2247      - * of other runnable threads then we will set runrun via cpu_surrender() to
2248      - * cause preemption.
     2234 + * Run swap-out checks when returning to userspace.
2249 2235   */
2250 2236  static void
2251 2237  fss_trapret(kthread_t *t)
2252 2238  {
2253      -        fssproc_t *fssproc = FSSPROC(t);
2254 2239          cpu_t *cp = CPU;
2255 2240  
2256 2241          ASSERT(THREAD_LOCK_HELD(t));
2257 2242          ASSERT(t == curthread);
2258 2243          ASSERT(cp->cpu_dispthread == t);
2259 2244          ASSERT(t->t_state == TS_ONPROC);
2260 2245  
2261      -        t->t_kpri_req = 0;
2262      -        if (fssproc->fss_flags & FSSKPRI) {
2263      -                /*
2264      -                 * If thread has blocked in the kernel
2265      -                 */
2266      -                THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2267      -                cp->cpu_dispatch_pri = DISP_PRIO(t);
2268      -                ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
2269      -                fssproc->fss_flags &= ~FSSKPRI;
2270      -
2271      -                if (DISP_MUST_SURRENDER(t))
2272      -                        cpu_surrender(t);
2273      -        }
2274      -
2275 2246          /*
2276 2247           * Swapout lwp if the swapper is waiting for this thread to reach
2277 2248           * a safe point.
2278 2249           */
2279 2250          if (t->t_schedflag & TS_SWAPENQ) {
2280 2251                  thread_unlock(t);
2281 2252                  swapout_lwp(ttolwp(t));
2282 2253                  thread_lock(t);
2283 2254          }
2284 2255  }
↓ open down ↓ 7 lines elided ↑ open up ↑
2292 2263  {
2293 2264          fssproc_t *fssproc = FSSPROC(t);
2294 2265          klwp_t *lwp;
2295 2266          uint_t flags;
2296 2267  
2297 2268          ASSERT(t == curthread);
2298 2269          ASSERT(THREAD_LOCK_HELD(curthread));
2299 2270          ASSERT(t->t_state == TS_ONPROC);
2300 2271  
2301 2272          /*
2302      -         * If preempted in the kernel, make sure the thread has a kernel
2303      -         * priority if needed.
2304      -         */
2305      -        lwp = curthread->t_lwp;
2306      -        if (!(fssproc->fss_flags & FSSKPRI) && lwp != NULL && t->t_kpri_req) {
2307      -                fssproc->fss_flags |= FSSKPRI;
2308      -                THREAD_CHANGE_PRI(t, minclsyspri);
2309      -                ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
2310      -                t->t_trapret = 1;       /* so that fss_trapret will run */
2311      -                aston(t);
2312      -        }
2313      -
2314      -        /*
2315 2273           * This thread may be placed on wait queue by CPU Caps. In this case we
2316 2274           * do not need to do anything until it is removed from the wait queue.
2317 2275           * Do not enforce CPU caps on threads running at a kernel priority
2318 2276           */
2319 2277          if (CPUCAPS_ON()) {
2320 2278                  (void) cpucaps_charge(t, &fssproc->fss_caps,
2321 2279                      CPUCAPS_CHARGE_ENFORCE);
2322 2280  
2323      -                if (!(fssproc->fss_flags & FSSKPRI) && CPUCAPS_ENFORCE(t))
     2281 +                if (CPUCAPS_ENFORCE(t))
2324 2282                          return;
2325 2283          }
2326 2284  
2327 2285          /*
2328 2286           * If preempted in user-land mark the thread as swappable because it
2329 2287           * cannot be holding any kernel locks.
2330 2288           */
2331 2289          ASSERT(t->t_schedflag & TS_DONT_SWAP);
     2290 +        lwp = ttolwp(t);
2332 2291          if (lwp != NULL && lwp->lwp_state == LWP_USER)
2333 2292                  t->t_schedflag &= ~TS_DONT_SWAP;
2334 2293  
2335 2294          /*
2336 2295           * Check to see if we're doing "preemption control" here.  If
2337 2296           * we are, and if the user has requested that this thread not
2338 2297           * be preempted, and if preemptions haven't been put off for
2339 2298           * too long, let the preemption happen here but try to make
2340 2299           * sure the thread is rescheduled as soon as possible.  We do
2341 2300           * this by putting it on the front of the highest priority run
2342 2301           * queue in the FSS class.  If the preemption has been put off
2343 2302           * for too long, clear the "nopreempt" bit and let the thread
2344 2303           * be preempted.
2345 2304           */
2346 2305          if (t->t_schedctl && schedctl_get_nopreempt(t)) {
2347 2306                  if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
2348 2307                          DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
2349      -                        if (!(fssproc->fss_flags & FSSKPRI)) {
2350      -                                /*
2351      -                                 * If not already remembered, remember current
2352      -                                 * priority for restoration in fss_yield().
2353      -                                 */
2354      -                                if (!(fssproc->fss_flags & FSSRESTORE)) {
2355      -                                        fssproc->fss_scpri = t->t_pri;
2356      -                                        fssproc->fss_flags |= FSSRESTORE;
2357      -                                }
2358      -                                THREAD_CHANGE_PRI(t, fss_maxumdpri);
2359      -                                t->t_schedflag |= TS_DONT_SWAP;
     2308 +                        /*
     2309 +                         * If not already remembered, remember current
     2310 +                         * priority for restoration in fss_yield().
     2311 +                         */
     2312 +                        if (!(fssproc->fss_flags & FSSRESTORE)) {
     2313 +                                fssproc->fss_scpri = t->t_pri;
     2314 +                                fssproc->fss_flags |= FSSRESTORE;
2360 2315                          }
     2316 +                        THREAD_CHANGE_PRI(t, fss_maxumdpri);
     2317 +                        t->t_schedflag |= TS_DONT_SWAP;
2361 2318                          schedctl_set_yield(t, 1);
2362 2319                          setfrontdq(t);
2363 2320                          return;
2364 2321                  } else {
2365 2322                          if (fssproc->fss_flags & FSSRESTORE) {
2366 2323                                  THREAD_CHANGE_PRI(t, fssproc->fss_scpri);
2367 2324                                  fssproc->fss_flags &= ~FSSRESTORE;
2368 2325                          }
2369 2326                          schedctl_set_nopreempt(t, 0);
2370 2327                          DTRACE_SCHED1(schedctl__preempt, kthread_t *, t);
2371 2328                          /*
2372 2329                           * Fall through and be preempted below.
2373 2330                           */
2374 2331                  }
2375 2332          }
2376 2333  
2377      -        flags = fssproc->fss_flags & (FSSBACKQ | FSSKPRI);
     2334 +        flags = fssproc->fss_flags & FSSBACKQ;
2378 2335  
2379 2336          if (flags == FSSBACKQ) {
2380 2337                  fssproc->fss_timeleft = fss_quantum;
2381 2338                  fssproc->fss_flags &= ~FSSBACKQ;
2382 2339                  setbackdq(t);
2383      -        } else if (flags == (FSSBACKQ | FSSKPRI)) {
2384      -                fssproc->fss_flags &= ~FSSBACKQ;
2385      -                setbackdq(t);
2386 2340          } else {
2387 2341                  setfrontdq(t);
2388 2342          }
2389 2343  }
2390 2344  
2391 2345  /*
2392 2346   * Called when a thread is waking up and is to be placed on the run queue.
2393 2347   */
2394 2348  static void
2395 2349  fss_setrun(kthread_t *t)
↓ open down ↓ 1 lines elided ↑ open up ↑
2397 2351          fssproc_t *fssproc = FSSPROC(t);
2398 2352  
2399 2353          ASSERT(THREAD_LOCK_HELD(t));    /* t should be in transition */
2400 2354  
2401 2355          if (t->t_state == TS_SLEEP || t->t_state == TS_STOPPED)
2402 2356                  fss_active(t);
2403 2357  
2404 2358          fssproc->fss_timeleft = fss_quantum;
2405 2359  
2406 2360          fssproc->fss_flags &= ~FSSBACKQ;
2407      -        /*
2408      -         * If previously were running at the kernel priority then keep that
2409      -         * priority and the fss_timeleft doesn't matter.
2410      -         */
2411      -        if ((fssproc->fss_flags & FSSKPRI) == 0)
2412      -                THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
     2361 +        THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2413 2362  
2414 2363          if (t->t_disp_time != ddi_get_lbolt())
2415 2364                  setbackdq(t);
2416 2365          else
2417 2366                  setfrontdq(t);
2418 2367  }
2419 2368  
2420 2369  /*
2421      - * Prepare thread for sleep. We reset the thread priority so it will run at the
2422      - * kernel priority level when it wakes up.
     2370 + * Prepare thread for sleep.
2423 2371   */
2424 2372  static void
2425 2373  fss_sleep(kthread_t *t)
2426 2374  {
2427 2375          fssproc_t *fssproc = FSSPROC(t);
2428 2376  
2429 2377          ASSERT(t == curthread);
2430 2378          ASSERT(THREAD_LOCK_HELD(t));
2431 2379  
2432 2380          ASSERT(t->t_state == TS_ONPROC);
2433 2381  
2434 2382          /*
2435 2383           * Account for time spent on CPU before going to sleep.
2436 2384           */
2437 2385          (void) CPUCAPS_CHARGE(t, &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE);
2438 2386  
2439 2387          fss_inactive(t);
2440      -
2441      -        /*
2442      -         * Assign a system priority to the thread and arrange for it to be
2443      -         * retained when the thread is next placed on the run queue (i.e.,
2444      -         * when it wakes up) instead of being given a new pri.  Also arrange
2445      -         * for trapret processing as the thread leaves the system call so it
2446      -         * will drop back to normal priority range.
2447      -         */
2448      -        if (t->t_kpri_req) {
2449      -                THREAD_CHANGE_PRI(t, minclsyspri);
2450      -                fssproc->fss_flags |= FSSKPRI;
2451      -                t->t_trapret = 1;       /* so that fss_trapret will run */
2452      -                aston(t);
2453      -        } else if (fssproc->fss_flags & FSSKPRI) {
2454      -                /*
2455      -                 * The thread has done a THREAD_KPRI_REQUEST(), slept, then
2456      -                 * done THREAD_KPRI_RELEASE() (so no t_kpri_req is 0 again),
2457      -                 * then slept again all without finishing the current system
2458      -                 * call so trapret won't have cleared FSSKPRI
2459      -                 */
2460      -                fssproc->fss_flags &= ~FSSKPRI;
2461      -                THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2462      -                if (DISP_MUST_SURRENDER(curthread))
2463      -                        cpu_surrender(t);
2464      -        }
2465 2388          t->t_stime = ddi_get_lbolt();   /* time stamp for the swapper */
2466 2389  }
2467 2390  
2468 2391  /*
2469 2392   * A tick interrupt has ocurrend on a running thread. Check to see if our
2470 2393   * time slice has expired.  We must also clear the TS_DONT_SWAP flag in
2471 2394   * t_schedflag if the thread is eligible to be swapped out.
2472 2395   */
2473 2396  static void
2474 2397  fss_tick(kthread_t *t)
↓ open down ↓ 21 lines elided ↑ open up ↑
2496 2419                  fssproc->fss_ticks++;
2497 2420                  disp_lock_exit_high(&fsspset->fssps_displock);
2498 2421          }
2499 2422  
2500 2423          /*
2501 2424           * Keep track of thread's project CPU usage.  Note that projects
2502 2425           * get charged even when threads are running in the kernel.
2503 2426           * Do not surrender CPU if running in the SYS class.
2504 2427           */
2505 2428          if (CPUCAPS_ON()) {
2506      -                cpucaps_enforce = cpucaps_charge(t,
2507      -                    &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE) &&
2508      -                    !(fssproc->fss_flags & FSSKPRI);
     2429 +                cpucaps_enforce = cpucaps_charge(t, &fssproc->fss_caps,
     2430 +                    CPUCAPS_CHARGE_ENFORCE);
2509 2431          }
2510 2432  
2511      -        /*
2512      -         * A thread's execution time for threads running in the SYS class
2513      -         * is not tracked.
2514      -         */
2515      -        if ((fssproc->fss_flags & FSSKPRI) == 0) {
     2433 +        if (--fssproc->fss_timeleft <= 0) {
     2434 +                pri_t new_pri;
     2435 +
2516 2436                  /*
2517      -                 * If thread is not in kernel mode, decrement its fss_timeleft
     2437 +                 * If we're doing preemption control and trying to avoid
     2438 +                 * preempting this thread, just note that the thread should
     2439 +                 * yield soon and let it keep running (unless it's been a
     2440 +                 * while).
2518 2441                   */
2519      -                if (--fssproc->fss_timeleft <= 0) {
2520      -                        pri_t new_pri;
2521      -
2522      -                        /*
2523      -                         * If we're doing preemption control and trying to
2524      -                         * avoid preempting this thread, just note that the
2525      -                         * thread should yield soon and let it keep running
2526      -                         * (unless it's been a while).
2527      -                         */
2528      -                        if (t->t_schedctl && schedctl_get_nopreempt(t)) {
2529      -                                if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
2530      -                                        DTRACE_SCHED1(schedctl__nopreempt,
2531      -                                            kthread_t *, t);
2532      -                                        schedctl_set_yield(t, 1);
2533      -                                        thread_unlock_nopreempt(t);
2534      -                                        return;
2535      -                                }
     2442 +                if (t->t_schedctl && schedctl_get_nopreempt(t)) {
     2443 +                        if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
     2444 +                                DTRACE_SCHED1(schedctl__nopreempt,
     2445 +                                    kthread_t *, t);
     2446 +                                schedctl_set_yield(t, 1);
     2447 +                                thread_unlock_nopreempt(t);
     2448 +                                return;
2536 2449                          }
2537      -                        fssproc->fss_flags &= ~FSSRESTORE;
     2450 +                }
     2451 +                fssproc->fss_flags &= ~FSSRESTORE;
2538 2452  
2539      -                        fss_newpri(fssproc, B_TRUE);
2540      -                        new_pri = fssproc->fss_umdpri;
2541      -                        ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
     2453 +                fss_newpri(fssproc, B_TRUE);
     2454 +                new_pri = fssproc->fss_umdpri;
     2455 +                ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
2542 2456  
2543      -                        /*
2544      -                         * When the priority of a thread is changed, it may
2545      -                         * be necessary to adjust its position on a sleep queue
2546      -                         * or dispatch queue. The function thread_change_pri
2547      -                         * accomplishes this.
2548      -                         */
2549      -                        if (thread_change_pri(t, new_pri, 0)) {
2550      -                                if ((t->t_schedflag & TS_LOAD) &&
2551      -                                    (lwp = t->t_lwp) &&
2552      -                                    lwp->lwp_state == LWP_USER)
2553      -                                        t->t_schedflag &= ~TS_DONT_SWAP;
2554      -                                fssproc->fss_timeleft = fss_quantum;
2555      -                        } else {
2556      -                                call_cpu_surrender = B_TRUE;
2557      -                        }
2558      -                } else if (t->t_state == TS_ONPROC &&
2559      -                    t->t_pri < t->t_disp_queue->disp_maxrunpri) {
2560      -                        /*
2561      -                         * If there is a higher-priority thread which is
2562      -                         * waiting for a processor, then thread surrenders
2563      -                         * the processor.
2564      -                         */
     2457 +                /*
     2458 +                 * When the priority of a thread is changed, it may be
     2459 +                 * necessary to adjust its position on a sleep queue or
     2460 +                 * dispatch queue. The function thread_change_pri accomplishes
     2461 +                 * this.
     2462 +                 */
     2463 +                if (thread_change_pri(t, new_pri, 0)) {
     2464 +                        if ((t->t_schedflag & TS_LOAD) &&
     2465 +                            (lwp = t->t_lwp) &&
     2466 +                            lwp->lwp_state == LWP_USER)
     2467 +                                t->t_schedflag &= ~TS_DONT_SWAP;
     2468 +                        fssproc->fss_timeleft = fss_quantum;
     2469 +                } else {
2565 2470                          call_cpu_surrender = B_TRUE;
2566 2471                  }
     2472 +        } else if (t->t_state == TS_ONPROC &&
     2473 +            t->t_pri < t->t_disp_queue->disp_maxrunpri) {
     2474 +                /*
     2475 +                 * If there is a higher-priority thread which is waiting for a
     2476 +                 * processor, then thread surrenders the processor.
     2477 +                 */
     2478 +                call_cpu_surrender = B_TRUE;
2567 2479          }
2568 2480  
2569 2481          if (cpucaps_enforce && 2 * fssproc->fss_timeleft > fss_quantum) {
2570 2482                  /*
2571 2483                   * The thread used more than half of its quantum, so assume that
2572 2484                   * it used the whole quantum.
2573 2485                   *
2574 2486                   * Update thread's priority just before putting it on the wait
2575 2487                   * queue so that it gets charged for the CPU time from its
2576 2488                   * quantum even before that quantum expires.
↓ open down ↓ 34 lines elided ↑ open up ↑
2611 2523  
2612 2524          ASSERT(THREAD_LOCK_HELD(t));
2613 2525          ASSERT(t->t_state == TS_SLEEP);
2614 2526  
2615 2527          fss_active(t);
2616 2528  
2617 2529          t->t_stime = ddi_get_lbolt();           /* time stamp for the swapper */
2618 2530          fssproc = FSSPROC(t);
2619 2531          fssproc->fss_flags &= ~FSSBACKQ;
2620 2532  
2621      -        if (fssproc->fss_flags & FSSKPRI) {
2622      -                /*
2623      -                 * If we already have a kernel priority assigned, then we
2624      -                 * just use it.
2625      -                 */
2626      -                setbackdq(t);
2627      -        } else if (t->t_kpri_req) {
2628      -                /*
2629      -                 * Give thread a priority boost if we were asked.
2630      -                 */
2631      -                fssproc->fss_flags |= FSSKPRI;
2632      -                THREAD_CHANGE_PRI(t, minclsyspri);
2633      -                setbackdq(t);
2634      -                t->t_trapret = 1;       /* so that fss_trapret will run */
2635      -                aston(t);
     2533 +        /* Recalculate the priority. */
     2534 +        if (t->t_disp_time == ddi_get_lbolt()) {
     2535 +                setfrontdq(t);
2636 2536          } else {
2637      -                /*
2638      -                 * Otherwise, we recalculate the priority.
2639      -                 */
2640      -                if (t->t_disp_time == ddi_get_lbolt()) {
2641      -                        setfrontdq(t);
2642      -                } else {
2643      -                        fssproc->fss_timeleft = fss_quantum;
2644      -                        THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2645      -                        setbackdq(t);
2646      -                }
     2537 +                fssproc->fss_timeleft = fss_quantum;
     2538 +                THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
     2539 +                setbackdq(t);
2647 2540          }
2648 2541  }
2649 2542  
2650 2543  /*
2651 2544   * fss_donice() is called when a nice(1) command is issued on the thread to
2652 2545   * alter the priority. The nice(1) command exists in Solaris for compatibility.
2653 2546   * Thread priority adjustments should be done via priocntl(1).
2654 2547   */
2655 2548  static int
2656 2549  fss_donice(kthread_t *t, cred_t *cr, int incr, int *retvalp)
↓ open down ↓ 335 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX