Print this page
OS-7753 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
@@ -19,11 +19,11 @@
* CDDL HEADER END
*/
/*
* Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, Joyent, Inc. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
@@ -1371,12 +1371,10 @@
* Skip the thread if it is no longer in the FSS class or
* is running with kernel mode priority.
*/
if (t->t_cid != fss_cid)
goto next;
- if ((fssproc->fss_flags & FSSKPRI) != 0)
- goto next;
fssproj = FSSPROC2FSSPROJ(fssproc);
if (fssproj == NULL)
goto next;
@@ -1887,11 +1885,11 @@
cfssproc->fss_tp = ct;
cfssproc->fss_nice = pfssproc->fss_nice;
cpucaps_sc_init(&cfssproc->fss_caps);
cfssproc->fss_flags =
- pfssproc->fss_flags & ~(FSSKPRI | FSSBACKQ | FSSRESTORE);
+ pfssproc->fss_flags & ~(FSSBACKQ | FSSRESTORE);
ct->t_cldata = (void *)cfssproc;
ct->t_schedflag |= TS_RUNQMATCH;
thread_unlock(pt);
fssproj->fssp_threads++;
@@ -1938,11 +1936,10 @@
fssproc = FSSPROC(t);
fss_newpri(fssproc, B_FALSE);
fssproc->fss_timeleft = fss_quantum;
t->t_pri = fssproc->fss_umdpri;
ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
- fssproc->fss_flags &= ~FSSKPRI;
THREAD_TRANSITION(t);
/*
* We don't want to call fss_setrun(t) here because it may call
* fss_active, which we don't need.
@@ -2037,15 +2034,10 @@
fssproc->fss_uprilim = reqfssuprilim;
fssproc->fss_upri = reqfssupri;
fssproc->fss_nice = nice;
fss_newpri(fssproc, B_FALSE);
- if ((fssproc->fss_flags & FSSKPRI) != 0) {
- thread_unlock(t);
- return (0);
- }
-
fss_change_priority(t, fssproc);
thread_unlock(t);
return (0);
}
@@ -2156,11 +2148,11 @@
if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
time_t swapout_time;
swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
- if (INHERITED(t) || (fssproc->fss_flags & FSSKPRI)) {
+ if (INHERITED(t)) {
epri = (long)DISP_PRIO(t) + swapout_time;
} else {
/*
* Threads which have been out for a long time,
* have high user mode priority and are associated
@@ -2188,19 +2180,17 @@
* based on if the swapper is in softswap or hardswap mode.
*/
static pri_t
fss_swapout(kthread_t *t, int flags)
{
- fssproc_t *fssproc = FSSPROC(t);
long epri = -1;
proc_t *pp = ttoproc(t);
time_t swapin_time;
ASSERT(THREAD_LOCK_HELD(t));
if (INHERITED(t) ||
- (fssproc->fss_flags & FSSKPRI) ||
(t->t_proc_flag & TP_LWPEXIT) ||
(t->t_state & (TS_ZOMB|TS_FREE|TS_STOPPED|TS_ONPROC|TS_WAIT)) ||
!(t->t_schedflag & TS_LOAD) ||
!(SWAP_OK(t)))
return (-1);
@@ -2239,42 +2229,23 @@
return ((pri_t)epri);
}
/*
- * If thread is currently at a kernel mode priority (has slept) and is
- * returning to the userland we assign it the appropriate user mode priority
- * and time quantum here. If we're lowering the thread's priority below that
- * of other runnable threads then we will set runrun via cpu_surrender() to
- * cause preemption.
+ * Run swap-out checks when returning to userspace.
*/
static void
fss_trapret(kthread_t *t)
{
- fssproc_t *fssproc = FSSPROC(t);
cpu_t *cp = CPU;
ASSERT(THREAD_LOCK_HELD(t));
ASSERT(t == curthread);
ASSERT(cp->cpu_dispthread == t);
ASSERT(t->t_state == TS_ONPROC);
- t->t_kpri_req = 0;
- if (fssproc->fss_flags & FSSKPRI) {
/*
- * If thread has blocked in the kernel
- */
- THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
- cp->cpu_dispatch_pri = DISP_PRIO(t);
- ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
- fssproc->fss_flags &= ~FSSKPRI;
-
- if (DISP_MUST_SURRENDER(t))
- cpu_surrender(t);
- }
-
- /*
* Swapout lwp if the swapper is waiting for this thread to reach
* a safe point.
*/
if (t->t_schedflag & TS_SWAPENQ) {
thread_unlock(t);
@@ -2297,40 +2268,28 @@
ASSERT(t == curthread);
ASSERT(THREAD_LOCK_HELD(curthread));
ASSERT(t->t_state == TS_ONPROC);
/*
- * If preempted in the kernel, make sure the thread has a kernel
- * priority if needed.
- */
- lwp = curthread->t_lwp;
- if (!(fssproc->fss_flags & FSSKPRI) && lwp != NULL && t->t_kpri_req) {
- fssproc->fss_flags |= FSSKPRI;
- THREAD_CHANGE_PRI(t, minclsyspri);
- ASSERT(t->t_pri >= 0 && t->t_pri <= fss_maxglobpri);
- t->t_trapret = 1; /* so that fss_trapret will run */
- aston(t);
- }
-
- /*
* This thread may be placed on wait queue by CPU Caps. In this case we
* do not need to do anything until it is removed from the wait queue.
* Do not enforce CPU caps on threads running at a kernel priority
*/
if (CPUCAPS_ON()) {
(void) cpucaps_charge(t, &fssproc->fss_caps,
CPUCAPS_CHARGE_ENFORCE);
- if (!(fssproc->fss_flags & FSSKPRI) && CPUCAPS_ENFORCE(t))
+ if (CPUCAPS_ENFORCE(t))
return;
}
/*
* If preempted in user-land mark the thread as swappable because it
* cannot be holding any kernel locks.
*/
ASSERT(t->t_schedflag & TS_DONT_SWAP);
+ lwp = ttolwp(t);
if (lwp != NULL && lwp->lwp_state == LWP_USER)
t->t_schedflag &= ~TS_DONT_SWAP;
/*
* Check to see if we're doing "preemption control" here. If
@@ -2344,11 +2303,10 @@
* be preempted.
*/
if (t->t_schedctl && schedctl_get_nopreempt(t)) {
if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
- if (!(fssproc->fss_flags & FSSKPRI)) {
/*
* If not already remembered, remember current
* priority for restoration in fss_yield().
*/
if (!(fssproc->fss_flags & FSSRESTORE)) {
@@ -2355,11 +2313,10 @@
fssproc->fss_scpri = t->t_pri;
fssproc->fss_flags |= FSSRESTORE;
}
THREAD_CHANGE_PRI(t, fss_maxumdpri);
t->t_schedflag |= TS_DONT_SWAP;
- }
schedctl_set_yield(t, 1);
setfrontdq(t);
return;
} else {
if (fssproc->fss_flags & FSSRESTORE) {
@@ -2372,19 +2329,16 @@
* Fall through and be preempted below.
*/
}
}
- flags = fssproc->fss_flags & (FSSBACKQ | FSSKPRI);
+ flags = fssproc->fss_flags & FSSBACKQ;
if (flags == FSSBACKQ) {
fssproc->fss_timeleft = fss_quantum;
fssproc->fss_flags &= ~FSSBACKQ;
setbackdq(t);
- } else if (flags == (FSSBACKQ | FSSKPRI)) {
- fssproc->fss_flags &= ~FSSBACKQ;
- setbackdq(t);
} else {
setfrontdq(t);
}
}
@@ -2402,26 +2356,20 @@
fss_active(t);
fssproc->fss_timeleft = fss_quantum;
fssproc->fss_flags &= ~FSSBACKQ;
- /*
- * If previously were running at the kernel priority then keep that
- * priority and the fss_timeleft doesn't matter.
- */
- if ((fssproc->fss_flags & FSSKPRI) == 0)
THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
}
/*
- * Prepare thread for sleep. We reset the thread priority so it will run at the
- * kernel priority level when it wakes up.
+ * Prepare thread for sleep.
*/
static void
fss_sleep(kthread_t *t)
{
fssproc_t *fssproc = FSSPROC(t);
@@ -2435,35 +2383,10 @@
* Account for time spent on CPU before going to sleep.
*/
(void) CPUCAPS_CHARGE(t, &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE);
fss_inactive(t);
-
- /*
- * Assign a system priority to the thread and arrange for it to be
- * retained when the thread is next placed on the run queue (i.e.,
- * when it wakes up) instead of being given a new pri. Also arrange
- * for trapret processing as the thread leaves the system call so it
- * will drop back to normal priority range.
- */
- if (t->t_kpri_req) {
- THREAD_CHANGE_PRI(t, minclsyspri);
- fssproc->fss_flags |= FSSKPRI;
- t->t_trapret = 1; /* so that fss_trapret will run */
- aston(t);
- } else if (fssproc->fss_flags & FSSKPRI) {
- /*
- * The thread has done a THREAD_KPRI_REQUEST(), slept, then
- * done THREAD_KPRI_RELEASE() (so no t_kpri_req is 0 again),
- * then slept again all without finishing the current system
- * call so trapret won't have cleared FSSKPRI
- */
- fssproc->fss_flags &= ~FSSKPRI;
- THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
- if (DISP_MUST_SURRENDER(curthread))
- cpu_surrender(t);
- }
t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
}
/*
* A tick interrupt has ocurrend on a running thread. Check to see if our
@@ -2501,31 +2424,22 @@
* Keep track of thread's project CPU usage. Note that projects
* get charged even when threads are running in the kernel.
* Do not surrender CPU if running in the SYS class.
*/
if (CPUCAPS_ON()) {
- cpucaps_enforce = cpucaps_charge(t,
- &fssproc->fss_caps, CPUCAPS_CHARGE_ENFORCE) &&
- !(fssproc->fss_flags & FSSKPRI);
+ cpucaps_enforce = cpucaps_charge(t, &fssproc->fss_caps,
+ CPUCAPS_CHARGE_ENFORCE);
}
- /*
- * A thread's execution time for threads running in the SYS class
- * is not tracked.
- */
- if ((fssproc->fss_flags & FSSKPRI) == 0) {
- /*
- * If thread is not in kernel mode, decrement its fss_timeleft
- */
if (--fssproc->fss_timeleft <= 0) {
pri_t new_pri;
/*
- * If we're doing preemption control and trying to
- * avoid preempting this thread, just note that the
- * thread should yield soon and let it keep running
- * (unless it's been a while).
+ * If we're doing preemption control and trying to avoid
+ * preempting this thread, just note that the thread should
+ * yield soon and let it keep running (unless it's been a
+ * while).
*/
if (t->t_schedctl && schedctl_get_nopreempt(t)) {
if (fssproc->fss_timeleft > -SC_MAX_TICKS) {
DTRACE_SCHED1(schedctl__nopreempt,
kthread_t *, t);
@@ -2539,14 +2453,14 @@
fss_newpri(fssproc, B_TRUE);
new_pri = fssproc->fss_umdpri;
ASSERT(new_pri >= 0 && new_pri <= fss_maxglobpri);
/*
- * When the priority of a thread is changed, it may
- * be necessary to adjust its position on a sleep queue
- * or dispatch queue. The function thread_change_pri
- * accomplishes this.
+ * When the priority of a thread is changed, it may be
+ * necessary to adjust its position on a sleep queue or
+ * dispatch queue. The function thread_change_pri accomplishes
+ * this.
*/
if (thread_change_pri(t, new_pri, 0)) {
if ((t->t_schedflag & TS_LOAD) &&
(lwp = t->t_lwp) &&
lwp->lwp_state == LWP_USER)
@@ -2556,17 +2470,15 @@
call_cpu_surrender = B_TRUE;
}
} else if (t->t_state == TS_ONPROC &&
t->t_pri < t->t_disp_queue->disp_maxrunpri) {
/*
- * If there is a higher-priority thread which is
- * waiting for a processor, then thread surrenders
- * the processor.
+ * If there is a higher-priority thread which is waiting for a
+ * processor, then thread surrenders the processor.
*/
call_cpu_surrender = B_TRUE;
}
- }
if (cpucaps_enforce && 2 * fssproc->fss_timeleft > fss_quantum) {
/*
* The thread used more than half of its quantum, so assume that
* it used the whole quantum.
@@ -2616,37 +2528,18 @@
t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
fssproc = FSSPROC(t);
fssproc->fss_flags &= ~FSSBACKQ;
- if (fssproc->fss_flags & FSSKPRI) {
- /*
- * If we already have a kernel priority assigned, then we
- * just use it.
- */
- setbackdq(t);
- } else if (t->t_kpri_req) {
- /*
- * Give thread a priority boost if we were asked.
- */
- fssproc->fss_flags |= FSSKPRI;
- THREAD_CHANGE_PRI(t, minclsyspri);
- setbackdq(t);
- t->t_trapret = 1; /* so that fss_trapret will run */
- aston(t);
- } else {
- /*
- * Otherwise, we recalculate the priority.
- */
+ /* Recalculate the priority. */
if (t->t_disp_time == ddi_get_lbolt()) {
setfrontdq(t);
} else {
fssproc->fss_timeleft = fss_quantum;
THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
setbackdq(t);
}
- }
}
/*
* fss_donice() is called when a nice(1) command is issued on the thread to
* alter the priority. The nice(1) command exists in Solaris for compatibility.