4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2013, Joyent, Inc. All rights reserved.
25 */
26
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cred.h>
31 #include <sys/proc.h>
32 #include <sys/strsubr.h>
33 #include <sys/priocntl.h>
34 #include <sys/class.h>
35 #include <sys/disp.h>
36 #include <sys/procset.h>
37 #include <sys/debug.h>
38 #include <sys/kmem.h>
39 #include <sys/errno.h>
40 #include <sys/systm.h>
41 #include <sys/schedctl.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
44 #include <sys/project.h>
1356 fssproj_t *fssproj;
1357 fsspri_t fsspri;
1358 pri_t fss_umdpri;
1359 kthread_t *t;
1360 int updated = 0;
1361
1362 mutex_enter(&fss_listlock[i]);
1363 for (fssproc = fss_listhead[i].fss_next; fssproc != &fss_listhead[i];
1364 fssproc = fssproc->fss_next) {
1365 t = fssproc->fss_tp;
1366 /*
1367 * Lock the thread and verify the state.
1368 */
1369 thread_lock(t);
1370 /*
1371 * Skip the thread if it is no longer in the FSS class or
1372 * is running with kernel mode priority.
1373 */
1374 if (t->t_cid != fss_cid)
1375 goto next;
1376 if ((fssproc->fss_flags & FSSKPRI) != 0)
1377 goto next;
1378
1379 fssproj = FSSPROC2FSSPROJ(fssproc);
1380 if (fssproj == NULL)
1381 goto next;
1382
1383 if (fssproj->fssp_shares != 0) {
1384 /*
1385 * Decay fsspri value.
1386 */
1387 fsspri = fssproc->fss_fsspri;
1388 fsspri = (fsspri * fss_nice_decay[fssproc->fss_nice]) /
1389 FSS_DECAY_BASE;
1390 fssproc->fss_fsspri = fsspri;
1391 }
1392
1393 if (t->t_schedctl && schedctl_get_nopreempt(t))
1394 goto next;
1395 if (t->t_state != TS_RUN && t->t_state != TS_WAIT) {
1396 /*
1397 * Make next syscall/trap call fss_trapret
1872 fsspset = FSSPROJ2FSSPSET(fssproj);
1873 thread_unlock(pt);
1874
1875 mutex_enter(&fsspset->fssps_lock);
1876 /*
1877 * Initialize child's fssproc structure.
1878 */
1879 thread_lock(pt);
1880 ASSERT(FSSPROJ(pt) == fssproj);
1881 cfssproc->fss_proj = fssproj;
1882 cfssproc->fss_timeleft = fss_quantum;
1883 cfssproc->fss_umdpri = pfssproc->fss_umdpri;
1884 cfssproc->fss_fsspri = 0;
1885 cfssproc->fss_uprilim = pfssproc->fss_uprilim;
1886 cfssproc->fss_upri = pfssproc->fss_upri;
1887 cfssproc->fss_tp = ct;
1888 cfssproc->fss_nice = pfssproc->fss_nice;
1889 cpucaps_sc_init(&cfssproc->fss_caps);
1890
1891 cfssproc->fss_flags =
1892 pfssproc->fss_flags & ~(FSSKPRI | FSSBACKQ | FSSRESTORE);
1893 ct->t_cldata = (void *)cfssproc;
1894 ct->t_schedflag |= TS_RUNQMATCH;
1895 thread_unlock(pt);
1896
1897 fssproj->fssp_threads++;
1898 mutex_exit(&fsspset->fssps_lock);
1899
1900 /*
1901 * Link new structure into fssproc hash table.
1902 */
1903 FSS_LIST_INSERT(cfssproc);
1904 return (0);
1905 }
1906
1907 /*
1908 * Child is placed at back of dispatcher queue and parent gives up processor
1909 * so that the child runs first after the fork. This allows the child
1910 * immediately execing to break the multiple use of copy on write pages with no
1911 * disk home. The parent will get to steal them back rather than uselessly
1912 * copying them.
1923
1924 /*
1925 * Grab the child's p_lock before dropping pidlock to ensure the
1926 * process does not disappear before we set it running.
1927 */
1928 mutex_enter(&cp->p_lock);
1929 continuelwps(cp);
1930 mutex_exit(&cp->p_lock);
1931
1932 mutex_enter(&pp->p_lock);
1933 mutex_exit(&pidlock);
1934 continuelwps(pp);
1935
1936 thread_lock(t);
1937
1938 fssproc = FSSPROC(t);
1939 fss_newpri(fssproc, B_FALSE);
1940 fssproc->fss_timeleft = fss_quantum;
1941 t->t_pri = fssproc->fss_umdpri;
1942 ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
1943 fssproc->fss_flags &= ~FSSKPRI;
1944 THREAD_TRANSITION(t);
1945
1946 /*
1947 * We don't want to call fss_setrun(t) here because it may call
1948 * fss_active, which we don't need.
1949 */
1950 fssproc->fss_flags &= ~FSSBACKQ;
1951
1952 if (t->t_disp_time != ddi_get_lbolt())
1953 setbackdq(t);
1954 else
1955 setfrontdq(t);
1956
1957 thread_unlock(t);
1958 /*
1959 * Safe to drop p_lock now since it is safe to change
1960 * the scheduling class after this point.
1961 */
1962 mutex_exit(&pp->p_lock);
1963
2022 (reqfssuprilim > fssproc->fss_uprilim) &&
2023 secpolicy_raisepriority(reqpcredp) != 0)
2024 return (EPERM);
2025
2026 /*
2027 * Set fss_nice to the nice value corresponding to the user priority we
2028 * are setting. Note that setting the nice field of the parameter
2029 * struct won't affect upri or nice.
2030 */
2031 nice = NZERO - (reqfssupri * NZERO) / fss_maxupri;
2032 if (nice > FSS_NICE_MAX)
2033 nice = FSS_NICE_MAX;
2034
2035 thread_lock(t);
2036
2037 fssproc->fss_uprilim = reqfssuprilim;
2038 fssproc->fss_upri = reqfssupri;
2039 fssproc->fss_nice = nice;
2040 fss_newpri(fssproc, B_FALSE);
2041
2042 if ((fssproc->fss_flags & FSSKPRI) != 0) {
2043 thread_unlock(t);
2044 return (0);
2045 }
2046
2047 fss_change_priority(t, fssproc);
2048 thread_unlock(t);
2049 return (0);
2050
2051 }
2052
2053 /*
2054 * The thread is being stopped.
2055 */
2056 /*ARGSUSED*/
2057 static void
2058 fss_stop(kthread_t *t, int why, int what)
2059 {
2060 ASSERT(THREAD_LOCK_HELD(t));
2061 ASSERT(t == curthread);
2062
2063 fss_inactive(t);
2064 }
2065
2066 /*
2141
2142 /*
2143 * fss_swapin() returns -1 if the thread is loaded or is not eligible to be
2144 * swapped in. Otherwise, it returns the thread's effective priority based
2145 * on swapout time and size of process (0 <= epri <= 0 SHRT_MAX).
2146 */
2147 /*ARGSUSED*/
2148 static pri_t
2149 fss_swapin(kthread_t *t, int flags)
2150 {
2151 fssproc_t *fssproc = FSSPROC(t);
2152 long epri = -1;
2153 proc_t *pp = ttoproc(t);
2154
2155 ASSERT(THREAD_LOCK_HELD(t));
2156
2157 if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
2158 time_t swapout_time;
2159
2160 swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
2161 if (INHERITED(t) || (fssproc->fss_flags & FSSKPRI)) {
2162 epri = (long)DISP_PRIO(t) + swapout_time;
2163 } else {
2164 /*
2165 * Threads which have been out for a long time,
2166 * have high user mode priority and are associated
2167 * with a small address space are more deserving.
2168 */
2169 epri = fssproc->fss_umdpri;
2170 ASSERT(epri >= 0 && epri <= fss_maxumdpri);
2171 epri += swapout_time - pp->p_swrss / nz(maxpgio)/2;
2172 }
2173 /*
2174 * Scale epri so that SHRT_MAX / 2 represents zero priority.
2175 */
2176 epri += SHRT_MAX / 2;
2177 if (epri < 0)
2178 epri = 0;
2179 else if (epri > SHRT_MAX)
2180 epri = SHRT_MAX;
2181 }
2182 return ((pri_t)epri);
2183 }
2184
2185 /*
2186 * fss_swapout() returns -1 if the thread isn't loaded or is not eligible to
2187 * be swapped out. Otherwise, it returns the thread's effective priority
2188 * based on if the swapper is in softswap or hardswap mode.
2189 */
2190 static pri_t
2191 fss_swapout(kthread_t *t, int flags)
2192 {
2193 fssproc_t *fssproc = FSSPROC(t);
2194 long epri = -1;
2195 proc_t *pp = ttoproc(t);
2196 time_t swapin_time;
2197
2198 ASSERT(THREAD_LOCK_HELD(t));
2199
2200 if (INHERITED(t) ||
2201 (fssproc->fss_flags & FSSKPRI) ||
2202 (t->t_proc_flag & TP_LWPEXIT) ||
2203 (t->t_state & (TS_ZOMB|TS_FREE|TS_STOPPED|TS_ONPROC|TS_WAIT)) ||
2204 !(t->t_schedflag & TS_LOAD) ||
2205 !(SWAP_OK(t)))
2206 return (-1);
2207
2208 ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
2209
2210 swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
2211
2212 if (flags == SOFTSWAP) {
2213 if (t->t_state == TS_SLEEP && swapin_time > maxslp) {
2214 epri = 0;
2215 } else {
2216 return ((pri_t)epri);
2217 }
2218 } else {
2219 pri_t pri;
2220
2221 if ((t->t_state == TS_SLEEP && swapin_time > fss_minslp) ||
2224 epri = swapin_time -
2225 (rm_asrss(pp->p_as) / nz(maxpgio)/2) - (long)pri;
2226 } else {
2227 return ((pri_t)epri);
2228 }
2229 }
2230
2231 /*
2232 * Scale epri so that SHRT_MAX / 2 represents zero priority.
2233 */
2234 epri += SHRT_MAX / 2;
2235 if (epri < 0)
2236 epri = 0;
2237 else if (epri > SHRT_MAX)
2238 epri = SHRT_MAX;
2239
2240 return ((pri_t)epri);
2241 }
2242
2243 /*
2244 * If thread is currently at a kernel mode priority (has slept) and is
2245 * returning to the userland we assign it the appropriate user mode priority
2246 * and time quantum here. If we're lowering the thread's priority below that
2247 * of other runnable threads then we will set runrun via cpu_surrender() to
2248 * cause preemption.
2249 */
2250 static void
2251 fss_trapret(kthread_t *t)
2252 {
2253 fssproc_t *fssproc = FSSPROC(t);
2254 cpu_t *cp = CPU;
2255
2256 ASSERT(THREAD_LOCK_HELD(t));
2257 ASSERT(t == curthread);
2258 ASSERT(cp->cpu_dispthread == t);
2259 ASSERT(t->t_state == TS_ONPROC);
2260
2261 t->t_kpri_req = 0;
2262 if (fssproc->fss_flags & FSSKPRI) {
2263 /*
2264 * If thread has blocked in the kernel
2265 */
2266 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2267 cp->cpu_dispatch_pri = DISP_PRIO(t);
2268 ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
2269 fssproc->fss_flags &= ~FSSKPRI;
2270
2271 if (DISP_MUST_SURRENDER(t))
2272 cpu_surrender(t);
2273 }
2274
2275 /*
2276 * Swapout lwp if the swapper is waiting for this thread to reach
2277 * a safe point.
2278 */
2279 if (t->t_schedflag & TS_SWAPENQ) {
2280 thread_unlock(t);
2281 swapout_lwp(ttolwp(t));
2282 thread_lock(t);
2283 }
2284 }
2285
2286 /*
2287 * Arrange for thread to be placed in appropriate location on dispatcher queue.
2288 * This is called with the current thread in TS_ONPROC and locked.
2289 */
2290 static void
2291 fss_preempt(kthread_t *t)
2292 {
2293 fssproc_t *fssproc = FSSPROC(t);
2294 klwp_t *lwp;
2295 uint_t flags;
2296
2297 ASSERT(t == curthread);
2298 ASSERT(THREAD_LOCK_HELD(curthread));
2299 ASSERT(t->t_state == TS_ONPROC);
2300
2301 /*
2302 * If preempted in the kernel, make sure the thread has a kernel
2303 * priority if needed.
2304 */
2305 lwp = curthread->t_lwp;
2306 if (!(fssproc->fss_flags & FSSKPRI) && lwp != NULL && t->t_kpri_req) {
2307 fssproc->fss_flags |= FSSKPRI;
2308 THREAD_CHANGE_PRI(t, minclsyspri);
2309 ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
2310 t->t_trapret = 1; /* so that fss_trapret will run */
2311 aston(t);
2312 }
2313
2314 /*
2315 * This thread may be placed on wait queue by CPU Caps. In this case we
2316 * do not need to do anything until it is removed from the wait queue.
2317 * Do not enforce CPU caps on threads running at a kernel priority
2318 */
2319 if (CPUCAPS_ON()) {
2320 (void) cpucaps_charge(t, &fssproc->fss_caps,
2321 CPUCAPS_CHARGE_ENFORCE);
2322
2323 if (!(fssproc->fss_flags & FSSKPRI) && CPUCAPS_ENFORCE(t))
2324 return;
2325 }
2326
2327 /*
2328 * If preempted in user-land mark the thread as swappable because it
2329 * cannot be holding any kernel locks.
2330 */
2331 ASSERT(t->t_schedflag & TS_DONT_SWAP);
2332 if (lwp != NULL && lwp->lwp_state == LWP_USER)
2333 t->t_schedflag &= ~TS_DONT_SWAP;
2334
2335 /*
2336 * Check to see if we're doing "preemption control" here. If
2337 * we are, and if the user has requested that this thread not
2338 * be preempted, and if preemptions haven't been put off for
2339 * too long, let the preemption happen here but try to make
2340 * sure the thread is rescheduled as soon as possible. We do
2341 * this by putting it on the front of the highest priority run
2342 * queue in the FSS class. If the preemption has been put off
2343 * for too long, clear the "nopreempt" bit and let the thread
2344 * be preempted.
2345 */
2346 if (t->t_schedctl && schedctl_get_nopreempt(t)) {
2347 if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
2348 DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
2349 if (!(fssproc->fss_flags & FSSKPRI)) {
2350 /*
2351 * If not already remembered, remember current
2352 * priority for restoration in fss_yield().
2353 */
2354 if (!(fssproc->fss_flags & FSSRESTORE)) {
2355 fssproc->fss_scpri = t->t_pri;
2356 fssproc->fss_flags |= FSSRESTORE;
2357 }
2358 THREAD_CHANGE_PRI(t, fss_maxumdpri);
2359 t->t_schedflag |= TS_DONT_SWAP;
2360 }
2361 schedctl_set_yield(t, 1);
2362 setfrontdq(t);
2363 return;
2364 } else {
2365 if (fssproc->fss_flags & FSSRESTORE) {
2366 THREAD_CHANGE_PRI(t, fssproc->fss_scpri);
2367 fssproc->fss_flags &= ~FSSRESTORE;
2368 }
2369 schedctl_set_nopreempt(t, 0);
2370 DTRACE_SCHED1(schedctl__preempt, kthread_t *, t);
2371 /*
2372 * Fall through and be preempted below.
2373 */
2374 }
2375 }
2376
2377 flags = fssproc->fss_flags & (FSSBACKQ | FSSKPRI);
2378
2379 if (flags == FSSBACKQ) {
2380 fssproc->fss_timeleft = fss_quantum;
2381 fssproc->fss_flags &= ~FSSBACKQ;
2382 setbackdq(t);
2383 } else if (flags == (FSSBACKQ | FSSKPRI)) {
2384 fssproc->fss_flags &= ~FSSBACKQ;
2385 setbackdq(t);
2386 } else {
2387 setfrontdq(t);
2388 }
2389 }
2390
2391 /*
2392 * Called when a thread is waking up and is to be placed on the run queue.
2393 */
2394 static void
2395 fss_setrun(kthread_t *t)
2396 {
2397 fssproc_t *fssproc = FSSPROC(t);
2398
2399 ASSERT(THREAD_LOCK_HELD(t)); /* t should be in transition */
2400
2401 if (t->t_state == TS_SLEEP || t->t_state == TS_STOPPED)
2402 fss_active(t);
2403
2404 fssproc->fss_timeleft = fss_quantum;
2405
2406 fssproc->fss_flags &= ~FSSBACKQ;
2407 /*
2408 * If previously were running at the kernel priority then keep that
2409 * priority and the fss_timeleft doesn't matter.
2410 */
2411 if ((fssproc->fss_flags & FSSKPRI) == 0)
2412 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2413
2414 if (t->t_disp_time != ddi_get_lbolt())
2415 setbackdq(t);
2416 else
2417 setfrontdq(t);
2418 }
2419
2420 /*
2421 * Prepare thread for sleep. We reset the thread priority so it will run at the
2422 * kernel priority level when it wakes up.
2423 */
2424 static void
2425 fss_sleep(kthread_t *t)
2426 {
2427 fssproc_t *fssproc = FSSPROC(t);
2428
2429 ASSERT(t == curthread);
2430 ASSERT(THREAD_LOCK_HELD(t));
2431
2432 ASSERT(t->t_state == TS_ONPROC);
2433
2434 /*
2435 * Account for time spent on CPU before going to sleep.
2436 */
2437 (void) CPUCAPS_CHARGE(t, &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE);
2438
2439 fss_inactive(t);
2440
2441 /*
2442 * Assign a system priority to the thread and arrange for it to be
2443 * retained when the thread is next placed on the run queue (i.e.,
2444 * when it wakes up) instead of being given a new pri. Also arrange
2445 * for trapret processing as the thread leaves the system call so it
2446 * will drop back to normal priority range.
2447 */
2448 if (t->t_kpri_req) {
2449 THREAD_CHANGE_PRI(t, minclsyspri);
2450 fssproc->fss_flags |= FSSKPRI;
2451 t->t_trapret = 1; /* so that fss_trapret will run */
2452 aston(t);
2453 } else if (fssproc->fss_flags & FSSKPRI) {
2454 /*
2455 * The thread has done a THREAD_KPRI_REQUEST(), slept, then
2456 * done THREAD_KPRI_RELEASE() (so no t_kpri_req is 0 again),
2457 * then slept again all without finishing the current system
2458 * call so trapret won't have cleared FSSKPRI
2459 */
2460 fssproc->fss_flags &= ~FSSKPRI;
2461 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2462 if (DISP_MUST_SURRENDER(curthread))
2463 cpu_surrender(t);
2464 }
2465 t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
2466 }
2467
2468 /*
2469 * A tick interrupt has ocurrend on a running thread. Check to see if our
2470 * time slice has expired. We must also clear the TS_DONT_SWAP flag in
2471 * t_schedflag if the thread is eligible to be swapped out.
2472 */
2473 static void
2474 fss_tick(kthread_t *t)
2475 {
2476 fssproc_t *fssproc;
2477 fssproj_t *fssproj;
2478 klwp_t *lwp;
2479 boolean_t call_cpu_surrender = B_FALSE;
2480 boolean_t cpucaps_enforce = B_FALSE;
2481
2482 ASSERT(MUTEX_HELD(&(ttoproc(t))->p_lock));
2483
2484 /*
2486 * holding our p_lock here.
2487 */
2488 thread_lock(t);
2489 fssproc = FSSPROC(t);
2490 fssproj = FSSPROC2FSSPROJ(fssproc);
2491 if (fssproj != NULL) {
2492 fsspset_t *fsspset = FSSPROJ2FSSPSET(fssproj);
2493 disp_lock_enter_high(&fsspset->fssps_displock);
2494 fssproj->fssp_ticks += fss_nice_tick[fssproc->fss_nice];
2495 fssproj->fssp_tick_cnt++;
2496 fssproc->fss_ticks++;
2497 disp_lock_exit_high(&fsspset->fssps_displock);
2498 }
2499
2500 /*
2501 * Keep track of thread's project CPU usage. Note that projects
2502 * get charged even when threads are running in the kernel.
2503 * Do not surrender CPU if running in the SYS class.
2504 */
2505 if (CPUCAPS_ON()) {
2506 cpucaps_enforce = cpucaps_charge(t,
2507 &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE) &&
2508 !(fssproc->fss_flags & FSSKPRI);
2509 }
2510
2511 /*
2512 * A thread's execution time for threads running in the SYS class
2513 * is not tracked.
2514 */
2515 if ((fssproc->fss_flags & FSSKPRI) == 0) {
2516 /*
2517 * If thread is not in kernel mode, decrement its fss_timeleft
2518 */
2519 if (--fssproc->fss_timeleft <= 0) {
2520 pri_t new_pri;
2521
2522 /*
2523 * If we're doing preemption control and trying to
2524 * avoid preempting this thread, just note that the
2525 * thread should yield soon and let it keep running
2526 * (unless it's been a while).
2527 */
2528 if (t->t_schedctl && schedctl_get_nopreempt(t)) {
2529 if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
2530 DTRACE_SCHED1(schedctl__nopreempt,
2531 kthread_t *, t);
2532 schedctl_set_yield(t, 1);
2533 thread_unlock_nopreempt(t);
2534 return;
2535 }
2536 }
2537 fssproc->fss_flags &= ~FSSRESTORE;
2538
2539 fss_newpri(fssproc, B_TRUE);
2540 new_pri = fssproc->fss_umdpri;
2541 ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
2542
2543 /*
2544 * When the priority of a thread is changed, it may
2545 * be necessary to adjust its position on a sleep queue
2546 * or dispatch queue. The function thread_change_pri
2547 * accomplishes this.
2548 */
2549 if (thread_change_pri(t, new_pri, 0)) {
2550 if ((t->t_schedflag & TS_LOAD) &&
2551 (lwp = t->t_lwp) &&
2552 lwp->lwp_state == LWP_USER)
2553 t->t_schedflag &= ~TS_DONT_SWAP;
2554 fssproc->fss_timeleft = fss_quantum;
2555 } else {
2556 call_cpu_surrender = B_TRUE;
2557 }
2558 } else if (t->t_state == TS_ONPROC &&
2559 t->t_pri < t->t_disp_queue->disp_maxrunpri) {
2560 /*
2561 * If there is a higher-priority thread which is
2562 * waiting for a processor, then thread surrenders
2563 * the processor.
2564 */
2565 call_cpu_surrender = B_TRUE;
2566 }
2567 }
2568
2569 if (cpucaps_enforce && 2 * fssproc->fss_timeleft > fss_quantum) {
2570 /*
2571 * The thread used more than half of its quantum, so assume that
2572 * it used the whole quantum.
2573 *
2574 * Update thread's priority just before putting it on the wait
2575 * queue so that it gets charged for the CPU time from its
2576 * quantum even before that quantum expires.
2577 */
2578 fss_newpri(fssproc, B_FALSE);
2579 if (t->t_pri != fssproc->fss_umdpri)
2580 fss_change_priority(t, fssproc);
2581
2582 /*
2583 * We need to call cpu_surrender for this thread due to cpucaps
2584 * enforcement, but fss_change_priority may have already done
2585 * so. In this case FSSBACKQ is set and there is no need to call
2586 * cpu-surrender again.
2587 */
2601 * Processes waking up go to the back of their queue. We don't need to assign
2602 * a time quantum here because thread is still at a kernel mode priority and
2603 * the time slicing is not done for threads running in the kernel after
2604 * sleeping. The proper time quantum will be assigned by fss_trapret before the
2605 * thread returns to user mode.
2606 */
2607 static void
2608 fss_wakeup(kthread_t *t)
2609 {
2610 fssproc_t *fssproc;
2611
2612 ASSERT(THREAD_LOCK_HELD(t));
2613 ASSERT(t->t_state == TS_SLEEP);
2614
2615 fss_active(t);
2616
2617 t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
2618 fssproc = FSSPROC(t);
2619 fssproc->fss_flags &= ~FSSBACKQ;
2620
2621 if (fssproc->fss_flags & FSSKPRI) {
2622 /*
2623 * If we already have a kernel priority assigned, then we
2624 * just use it.
2625 */
2626 setbackdq(t);
2627 } else if (t->t_kpri_req) {
2628 /*
2629 * Give thread a priority boost if we were asked.
2630 */
2631 fssproc->fss_flags |= FSSKPRI;
2632 THREAD_CHANGE_PRI(t, minclsyspri);
2633 setbackdq(t);
2634 t->t_trapret = 1; /* so that fss_trapret will run */
2635 aston(t);
2636 } else {
2637 /*
2638 * Otherwise, we recalculate the priority.
2639 */
2640 if (t->t_disp_time == ddi_get_lbolt()) {
2641 setfrontdq(t);
2642 } else {
2643 fssproc->fss_timeleft = fss_quantum;
2644 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2645 setbackdq(t);
2646 }
2647 }
2648 }
2649
2650 /*
2651 * fss_donice() is called when a nice(1) command is issued on the thread to
2652 * alter the priority. The nice(1) command exists in Solaris for compatibility.
2653 * Thread priority adjustments should be done via priocntl(1).
2654 */
2655 static int
2656 fss_donice(kthread_t *t, cred_t *cr, int incr, int *retvalp)
2657 {
2658 int newnice;
2659 fssproc_t *fssproc = FSSPROC(t);
2660 fssparms_t fssparms;
2661
2662 /*
2663 * If there is no change to priority, just return current setting.
2664 */
2665 if (incr == 0) {
2666 if (retvalp)
2667 *retvalp = fssproc->fss_nice - NZERO;
|
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2019 Joyent, Inc.
25 */
26
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cred.h>
31 #include <sys/proc.h>
32 #include <sys/strsubr.h>
33 #include <sys/priocntl.h>
34 #include <sys/class.h>
35 #include <sys/disp.h>
36 #include <sys/procset.h>
37 #include <sys/debug.h>
38 #include <sys/kmem.h>
39 #include <sys/errno.h>
40 #include <sys/systm.h>
41 #include <sys/schedctl.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
44 #include <sys/project.h>
1356 fssproj_t *fssproj;
1357 fsspri_t fsspri;
1358 pri_t fss_umdpri;
1359 kthread_t *t;
1360 int updated = 0;
1361
1362 mutex_enter(&fss_listlock[i]);
1363 for (fssproc = fss_listhead[i].fss_next; fssproc != &fss_listhead[i];
1364 fssproc = fssproc->fss_next) {
1365 t = fssproc->fss_tp;
1366 /*
1367 * Lock the thread and verify the state.
1368 */
1369 thread_lock(t);
1370 /*
1371 * Skip the thread if it is no longer in the FSS class or
1372 * is running with kernel mode priority.
1373 */
1374 if (t->t_cid != fss_cid)
1375 goto next;
1376
1377 fssproj = FSSPROC2FSSPROJ(fssproc);
1378 if (fssproj == NULL)
1379 goto next;
1380
1381 if (fssproj->fssp_shares != 0) {
1382 /*
1383 * Decay fsspri value.
1384 */
1385 fsspri = fssproc->fss_fsspri;
1386 fsspri = (fsspri * fss_nice_decay[fssproc->fss_nice]) /
1387 FSS_DECAY_BASE;
1388 fssproc->fss_fsspri = fsspri;
1389 }
1390
1391 if (t->t_schedctl && schedctl_get_nopreempt(t))
1392 goto next;
1393 if (t->t_state != TS_RUN && t->t_state != TS_WAIT) {
1394 /*
1395 * Make next syscall/trap call fss_trapret
1870 fsspset = FSSPROJ2FSSPSET(fssproj);
1871 thread_unlock(pt);
1872
1873 mutex_enter(&fsspset->fssps_lock);
1874 /*
1875 * Initialize child's fssproc structure.
1876 */
1877 thread_lock(pt);
1878 ASSERT(FSSPROJ(pt) == fssproj);
1879 cfssproc->fss_proj = fssproj;
1880 cfssproc->fss_timeleft = fss_quantum;
1881 cfssproc->fss_umdpri = pfssproc->fss_umdpri;
1882 cfssproc->fss_fsspri = 0;
1883 cfssproc->fss_uprilim = pfssproc->fss_uprilim;
1884 cfssproc->fss_upri = pfssproc->fss_upri;
1885 cfssproc->fss_tp = ct;
1886 cfssproc->fss_nice = pfssproc->fss_nice;
1887 cpucaps_sc_init(&cfssproc->fss_caps);
1888
1889 cfssproc->fss_flags =
1890 pfssproc->fss_flags & ~(FSSBACKQ | FSSRESTORE);
1891 ct->t_cldata = (void *)cfssproc;
1892 ct->t_schedflag |= TS_RUNQMATCH;
1893 thread_unlock(pt);
1894
1895 fssproj->fssp_threads++;
1896 mutex_exit(&fsspset->fssps_lock);
1897
1898 /*
1899 * Link new structure into fssproc hash table.
1900 */
1901 FSS_LIST_INSERT(cfssproc);
1902 return (0);
1903 }
1904
1905 /*
1906 * Child is placed at back of dispatcher queue and parent gives up processor
1907 * so that the child runs first after the fork. This allows the child
1908 * immediately execing to break the multiple use of copy on write pages with no
1909 * disk home. The parent will get to steal them back rather than uselessly
1910 * copying them.
1921
1922 /*
1923 * Grab the child's p_lock before dropping pidlock to ensure the
1924 * process does not disappear before we set it running.
1925 */
1926 mutex_enter(&cp->p_lock);
1927 continuelwps(cp);
1928 mutex_exit(&cp->p_lock);
1929
1930 mutex_enter(&pp->p_lock);
1931 mutex_exit(&pidlock);
1932 continuelwps(pp);
1933
1934 thread_lock(t);
1935
1936 fssproc = FSSPROC(t);
1937 fss_newpri(fssproc, B_FALSE);
1938 fssproc->fss_timeleft = fss_quantum;
1939 t->t_pri = fssproc->fss_umdpri;
1940 ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
1941 THREAD_TRANSITION(t);
1942
1943 /*
1944 * We don't want to call fss_setrun(t) here because it may call
1945 * fss_active, which we don't need.
1946 */
1947 fssproc->fss_flags &= ~FSSBACKQ;
1948
1949 if (t->t_disp_time != ddi_get_lbolt())
1950 setbackdq(t);
1951 else
1952 setfrontdq(t);
1953
1954 thread_unlock(t);
1955 /*
1956 * Safe to drop p_lock now since it is safe to change
1957 * the scheduling class after this point.
1958 */
1959 mutex_exit(&pp->p_lock);
1960
2019 (reqfssuprilim > fssproc->fss_uprilim) &&
2020 secpolicy_raisepriority(reqpcredp) != 0)
2021 return (EPERM);
2022
2023 /*
2024 * Set fss_nice to the nice value corresponding to the user priority we
2025 * are setting. Note that setting the nice field of the parameter
2026 * struct won't affect upri or nice.
2027 */
2028 nice = NZERO - (reqfssupri * NZERO) / fss_maxupri;
2029 if (nice > FSS_NICE_MAX)
2030 nice = FSS_NICE_MAX;
2031
2032 thread_lock(t);
2033
2034 fssproc->fss_uprilim = reqfssuprilim;
2035 fssproc->fss_upri = reqfssupri;
2036 fssproc->fss_nice = nice;
2037 fss_newpri(fssproc, B_FALSE);
2038
2039 fss_change_priority(t, fssproc);
2040 thread_unlock(t);
2041 return (0);
2042
2043 }
2044
2045 /*
2046 * The thread is being stopped.
2047 */
2048 /*ARGSUSED*/
2049 static void
2050 fss_stop(kthread_t *t, int why, int what)
2051 {
2052 ASSERT(THREAD_LOCK_HELD(t));
2053 ASSERT(t == curthread);
2054
2055 fss_inactive(t);
2056 }
2057
2058 /*
2133
2134 /*
2135 * fss_swapin() returns -1 if the thread is loaded or is not eligible to be
2136 * swapped in. Otherwise, it returns the thread's effective priority based
2137 * on swapout time and size of process (0 <= epri <= 0 SHRT_MAX).
2138 */
2139 /*ARGSUSED*/
2140 static pri_t
2141 fss_swapin(kthread_t *t, int flags)
2142 {
2143 fssproc_t *fssproc = FSSPROC(t);
2144 long epri = -1;
2145 proc_t *pp = ttoproc(t);
2146
2147 ASSERT(THREAD_LOCK_HELD(t));
2148
2149 if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
2150 time_t swapout_time;
2151
2152 swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
2153 if (INHERITED(t)) {
2154 epri = (long)DISP_PRIO(t) + swapout_time;
2155 } else {
2156 /*
2157 * Threads which have been out for a long time,
2158 * have high user mode priority and are associated
2159 * with a small address space are more deserving.
2160 */
2161 epri = fssproc->fss_umdpri;
2162 ASSERT(epri >= 0 && epri <= fss_maxumdpri);
2163 epri += swapout_time - pp->p_swrss / nz(maxpgio)/2;
2164 }
2165 /*
2166 * Scale epri so that SHRT_MAX / 2 represents zero priority.
2167 */
2168 epri += SHRT_MAX / 2;
2169 if (epri < 0)
2170 epri = 0;
2171 else if (epri > SHRT_MAX)
2172 epri = SHRT_MAX;
2173 }
2174 return ((pri_t)epri);
2175 }
2176
2177 /*
2178 * fss_swapout() returns -1 if the thread isn't loaded or is not eligible to
2179 * be swapped out. Otherwise, it returns the thread's effective priority
2180 * based on if the swapper is in softswap or hardswap mode.
2181 */
2182 static pri_t
2183 fss_swapout(kthread_t *t, int flags)
2184 {
2185 long epri = -1;
2186 proc_t *pp = ttoproc(t);
2187 time_t swapin_time;
2188
2189 ASSERT(THREAD_LOCK_HELD(t));
2190
2191 if (INHERITED(t) ||
2192 (t->t_proc_flag & TP_LWPEXIT) ||
2193 (t->t_state & (TS_ZOMB|TS_FREE|TS_STOPPED|TS_ONPROC|TS_WAIT)) ||
2194 !(t->t_schedflag & TS_LOAD) ||
2195 !(SWAP_OK(t)))
2196 return (-1);
2197
2198 ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
2199
2200 swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
2201
2202 if (flags == SOFTSWAP) {
2203 if (t->t_state == TS_SLEEP && swapin_time > maxslp) {
2204 epri = 0;
2205 } else {
2206 return ((pri_t)epri);
2207 }
2208 } else {
2209 pri_t pri;
2210
2211 if ((t->t_state == TS_SLEEP && swapin_time > fss_minslp) ||
2214 epri = swapin_time -
2215 (rm_asrss(pp->p_as) / nz(maxpgio)/2) - (long)pri;
2216 } else {
2217 return ((pri_t)epri);
2218 }
2219 }
2220
2221 /*
2222 * Scale epri so that SHRT_MAX / 2 represents zero priority.
2223 */
2224 epri += SHRT_MAX / 2;
2225 if (epri < 0)
2226 epri = 0;
2227 else if (epri > SHRT_MAX)
2228 epri = SHRT_MAX;
2229
2230 return ((pri_t)epri);
2231 }
2232
2233 /*
2234 * Run swap-out checks when returning to userspace.
2235 */
2236 static void
2237 fss_trapret(kthread_t *t)
2238 {
2239 cpu_t *cp = CPU;
2240
2241 ASSERT(THREAD_LOCK_HELD(t));
2242 ASSERT(t == curthread);
2243 ASSERT(cp->cpu_dispthread == t);
2244 ASSERT(t->t_state == TS_ONPROC);
2245
2246 /*
2247 * Swapout lwp if the swapper is waiting for this thread to reach
2248 * a safe point.
2249 */
2250 if (t->t_schedflag & TS_SWAPENQ) {
2251 thread_unlock(t);
2252 swapout_lwp(ttolwp(t));
2253 thread_lock(t);
2254 }
2255 }
2256
2257 /*
2258 * Arrange for thread to be placed in appropriate location on dispatcher queue.
2259 * This is called with the current thread in TS_ONPROC and locked.
2260 */
2261 static void
2262 fss_preempt(kthread_t *t)
2263 {
2264 fssproc_t *fssproc = FSSPROC(t);
2265 klwp_t *lwp;
2266 uint_t flags;
2267
2268 ASSERT(t == curthread);
2269 ASSERT(THREAD_LOCK_HELD(curthread));
2270 ASSERT(t->t_state == TS_ONPROC);
2271
2272 /*
2273 * This thread may be placed on wait queue by CPU Caps. In this case we
2274 * do not need to do anything until it is removed from the wait queue.
2275 * Do not enforce CPU caps on threads running at a kernel priority
2276 */
2277 if (CPUCAPS_ON()) {
2278 (void) cpucaps_charge(t, &fssproc->fss_caps,
2279 CPUCAPS_CHARGE_ENFORCE);
2280
2281 if (CPUCAPS_ENFORCE(t))
2282 return;
2283 }
2284
2285 /*
2286 * If preempted in user-land mark the thread as swappable because it
2287 * cannot be holding any kernel locks.
2288 */
2289 ASSERT(t->t_schedflag & TS_DONT_SWAP);
2290 lwp = ttolwp(t);
2291 if (lwp != NULL && lwp->lwp_state == LWP_USER)
2292 t->t_schedflag &= ~TS_DONT_SWAP;
2293
2294 /*
2295 * Check to see if we're doing "preemption control" here. If
2296 * we are, and if the user has requested that this thread not
2297 * be preempted, and if preemptions haven't been put off for
2298 * too long, let the preemption happen here but try to make
2299 * sure the thread is rescheduled as soon as possible. We do
2300 * this by putting it on the front of the highest priority run
2301 * queue in the FSS class. If the preemption has been put off
2302 * for too long, clear the "nopreempt" bit and let the thread
2303 * be preempted.
2304 */
2305 if (t->t_schedctl && schedctl_get_nopreempt(t)) {
2306 if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
2307 DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
2308 /*
2309 * If not already remembered, remember current
2310 * priority for restoration in fss_yield().
2311 */
2312 if (!(fssproc->fss_flags & FSSRESTORE)) {
2313 fssproc->fss_scpri = t->t_pri;
2314 fssproc->fss_flags |= FSSRESTORE;
2315 }
2316 THREAD_CHANGE_PRI(t, fss_maxumdpri);
2317 t->t_schedflag |= TS_DONT_SWAP;
2318 schedctl_set_yield(t, 1);
2319 setfrontdq(t);
2320 return;
2321 } else {
2322 if (fssproc->fss_flags & FSSRESTORE) {
2323 THREAD_CHANGE_PRI(t, fssproc->fss_scpri);
2324 fssproc->fss_flags &= ~FSSRESTORE;
2325 }
2326 schedctl_set_nopreempt(t, 0);
2327 DTRACE_SCHED1(schedctl__preempt, kthread_t *, t);
2328 /*
2329 * Fall through and be preempted below.
2330 */
2331 }
2332 }
2333
2334 flags = fssproc->fss_flags & FSSBACKQ;
2335
2336 if (flags == FSSBACKQ) {
2337 fssproc->fss_timeleft = fss_quantum;
2338 fssproc->fss_flags &= ~FSSBACKQ;
2339 setbackdq(t);
2340 } else {
2341 setfrontdq(t);
2342 }
2343 }
2344
2345 /*
2346 * Called when a thread is waking up and is to be placed on the run queue.
2347 */
2348 static void
2349 fss_setrun(kthread_t *t)
2350 {
2351 fssproc_t *fssproc = FSSPROC(t);
2352
2353 ASSERT(THREAD_LOCK_HELD(t)); /* t should be in transition */
2354
2355 if (t->t_state == TS_SLEEP || t->t_state == TS_STOPPED)
2356 fss_active(t);
2357
2358 fssproc->fss_timeleft = fss_quantum;
2359
2360 fssproc->fss_flags &= ~FSSBACKQ;
2361 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2362
2363 if (t->t_disp_time != ddi_get_lbolt())
2364 setbackdq(t);
2365 else
2366 setfrontdq(t);
2367 }
2368
2369 /*
2370 * Prepare thread for sleep.
2371 */
2372 static void
2373 fss_sleep(kthread_t *t)
2374 {
2375 fssproc_t *fssproc = FSSPROC(t);
2376
2377 ASSERT(t == curthread);
2378 ASSERT(THREAD_LOCK_HELD(t));
2379
2380 ASSERT(t->t_state == TS_ONPROC);
2381
2382 /*
2383 * Account for time spent on CPU before going to sleep.
2384 */
2385 (void) CPUCAPS_CHARGE(t, &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE);
2386
2387 fss_inactive(t);
2388 t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
2389 }
2390
2391 /*
2392 * A tick interrupt has ocurrend on a running thread. Check to see if our
2393 * time slice has expired. We must also clear the TS_DONT_SWAP flag in
2394 * t_schedflag if the thread is eligible to be swapped out.
2395 */
2396 static void
2397 fss_tick(kthread_t *t)
2398 {
2399 fssproc_t *fssproc;
2400 fssproj_t *fssproj;
2401 klwp_t *lwp;
2402 boolean_t call_cpu_surrender = B_FALSE;
2403 boolean_t cpucaps_enforce = B_FALSE;
2404
2405 ASSERT(MUTEX_HELD(&(ttoproc(t))->p_lock));
2406
2407 /*
2409 * holding our p_lock here.
2410 */
2411 thread_lock(t);
2412 fssproc = FSSPROC(t);
2413 fssproj = FSSPROC2FSSPROJ(fssproc);
2414 if (fssproj != NULL) {
2415 fsspset_t *fsspset = FSSPROJ2FSSPSET(fssproj);
2416 disp_lock_enter_high(&fsspset->fssps_displock);
2417 fssproj->fssp_ticks += fss_nice_tick[fssproc->fss_nice];
2418 fssproj->fssp_tick_cnt++;
2419 fssproc->fss_ticks++;
2420 disp_lock_exit_high(&fsspset->fssps_displock);
2421 }
2422
2423 /*
2424 * Keep track of thread's project CPU usage. Note that projects
2425 * get charged even when threads are running in the kernel.
2426 * Do not surrender CPU if running in the SYS class.
2427 */
2428 if (CPUCAPS_ON()) {
2429 cpucaps_enforce = cpucaps_charge(t, &fssproc->fss_caps,
2430 CPUCAPS_CHARGE_ENFORCE);
2431 }
2432
2433 if (--fssproc->fss_timeleft <= 0) {
2434 pri_t new_pri;
2435
2436 /*
2437 * If we're doing preemption control and trying to avoid
2438 * preempting this thread, just note that the thread should
2439 * yield soon and let it keep running (unless it's been a
2440 * while).
2441 */
2442 if (t->t_schedctl && schedctl_get_nopreempt(t)) {
2443 if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
2444 DTRACE_SCHED1(schedctl__nopreempt,
2445 kthread_t *, t);
2446 schedctl_set_yield(t, 1);
2447 thread_unlock_nopreempt(t);
2448 return;
2449 }
2450 }
2451 fssproc->fss_flags &= ~FSSRESTORE;
2452
2453 fss_newpri(fssproc, B_TRUE);
2454 new_pri = fssproc->fss_umdpri;
2455 ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
2456
2457 /*
2458 * When the priority of a thread is changed, it may be
2459 * necessary to adjust its position on a sleep queue or
2460 * dispatch queue. The function thread_change_pri accomplishes
2461 * this.
2462 */
2463 if (thread_change_pri(t, new_pri, 0)) {
2464 if ((t->t_schedflag & TS_LOAD) &&
2465 (lwp = t->t_lwp) &&
2466 lwp->lwp_state == LWP_USER)
2467 t->t_schedflag &= ~TS_DONT_SWAP;
2468 fssproc->fss_timeleft = fss_quantum;
2469 } else {
2470 call_cpu_surrender = B_TRUE;
2471 }
2472 } else if (t->t_state == TS_ONPROC &&
2473 t->t_pri < t->t_disp_queue->disp_maxrunpri) {
2474 /*
2475 * If there is a higher-priority thread which is waiting for a
2476 * processor, then thread surrenders the processor.
2477 */
2478 call_cpu_surrender = B_TRUE;
2479 }
2480
2481 if (cpucaps_enforce && 2 * fssproc->fss_timeleft > fss_quantum) {
2482 /*
2483 * The thread used more than half of its quantum, so assume that
2484 * it used the whole quantum.
2485 *
2486 * Update thread's priority just before putting it on the wait
2487 * queue so that it gets charged for the CPU time from its
2488 * quantum even before that quantum expires.
2489 */
2490 fss_newpri(fssproc, B_FALSE);
2491 if (t->t_pri != fssproc->fss_umdpri)
2492 fss_change_priority(t, fssproc);
2493
2494 /*
2495 * We need to call cpu_surrender for this thread due to cpucaps
2496 * enforcement, but fss_change_priority may have already done
2497 * so. In this case FSSBACKQ is set and there is no need to call
2498 * cpu-surrender again.
2499 */
2513 * Processes waking up go to the back of their queue. We don't need to assign
2514 * a time quantum here because thread is still at a kernel mode priority and
2515 * the time slicing is not done for threads running in the kernel after
2516 * sleeping. The proper time quantum will be assigned by fss_trapret before the
2517 * thread returns to user mode.
2518 */
2519 static void
2520 fss_wakeup(kthread_t *t)
2521 {
2522 fssproc_t *fssproc;
2523
2524 ASSERT(THREAD_LOCK_HELD(t));
2525 ASSERT(t->t_state == TS_SLEEP);
2526
2527 fss_active(t);
2528
2529 t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
2530 fssproc = FSSPROC(t);
2531 fssproc->fss_flags &= ~FSSBACKQ;
2532
2533 /* Recalculate the priority. */
2534 if (t->t_disp_time == ddi_get_lbolt()) {
2535 setfrontdq(t);
2536 } else {
2537 fssproc->fss_timeleft = fss_quantum;
2538 THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
2539 setbackdq(t);
2540 }
2541 }
2542
2543 /*
2544 * fss_donice() is called when a nice(1) command is issued on the thread to
2545 * alter the priority. The nice(1) command exists in Solaris for compatibility.
2546 * Thread priority adjustments should be done via priocntl(1).
2547 */
2548 static int
2549 fss_donice(kthread_t *t, cred_t *cr, int incr, int *retvalp)
2550 {
2551 int newnice;
2552 fssproc_t *fssproc = FSSPROC(t);
2553 fssparms_t fssparms;
2554
2555 /*
2556 * If there is no change to priority, just return current setting.
2557 */
2558 if (incr == 0) {
2559 if (retvalp)
2560 *retvalp = fssproc->fss_nice - NZERO;
|