Print this page
OS-7753 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
@@ -19,11 +19,11 @@
* CDDL HEADER END
*/
/*
* Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, Joyent, Inc. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
@@ -227,11 +227,10 @@
static void ia_parmsget(kthread_t *, void *);
static void ia_set_process_group(pid_t, pid_t, pid_t);
static void ts_change_priority(kthread_t *, tsproc_t *);
-extern pri_t ts_maxkmdpri; /* maximum kernel mode ts priority */
static pri_t ts_maxglobpri; /* maximum global priority used by ts class */
static kmutex_t ts_dptblock; /* protects time sharing dispatch table */
static kmutex_t ts_list_lock[TS_LISTS]; /* protects tsproc lists */
static tsproc_t ts_plisthead[TS_LISTS]; /* dummy tsproc at head of lists */
@@ -539,12 +538,12 @@
* Allocate a time-sharing class specific thread structure and
* initialize it with the parameters supplied. Also move the thread
* to specified time-sharing priority.
*/
static int
-ts_enterclass(kthread_t *t, id_t cid, void *parmsp,
- cred_t *reqpcredp, void *bufp)
+ts_enterclass(kthread_t *t, id_t cid, void *parmsp, cred_t *reqpcredp,
+ void *bufp)
{
tsparms_t *tsparmsp = (tsparms_t *)parmsp;
tsproc_t *tspp;
pri_t reqtsuprilim;
pri_t reqtsupri;
@@ -701,11 +700,11 @@
ctspp->ts_uprilim = ptspp->ts_uprilim;
ctspp->ts_upri = ptspp->ts_upri;
TS_NEWUMDPRI(ctspp);
ctspp->ts_nice = ptspp->ts_nice;
ctspp->ts_dispwait = 0;
- ctspp->ts_flags = ptspp->ts_flags & ~(TSKPRI | TSBACKQ | TSRESTORE);
+ ctspp->ts_flags = ptspp->ts_flags & ~(TSBACKQ | TSRESTORE);
ctspp->ts_tp = ct;
cpucaps_sc_init(&ctspp->ts_caps);
thread_unlock(t);
/*
@@ -752,11 +751,10 @@
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
t->t_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- tspp->ts_flags &= ~TSKPRI;
THREAD_TRANSITION(t);
ts_setrun(t);
thread_unlock(t);
/*
* Safe to drop p_lock now since since it is safe to change
@@ -1215,15 +1213,10 @@
tspp->ts_uprilim = reqtsuprilim;
tspp->ts_upri = reqtsupri;
TS_NEWUMDPRI(tspp);
tspp->ts_nice = nice;
- if ((tspp->ts_flags & TSKPRI) != 0) {
- thread_unlock(tx);
- return (0);
- }
-
tspp->ts_dispwait = 0;
ts_change_priority(tx, tspp);
thread_unlock(tx);
return (0);
}
@@ -1371,37 +1364,24 @@
*/
static void
ts_preempt(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
- klwp_t *lwp = curthread->t_lwp;
+ klwp_t *lwp = ttolwp(t);
pri_t oldpri = t->t_pri;
ASSERT(t == curthread);
ASSERT(THREAD_LOCK_HELD(curthread));
/*
- * If preempted in the kernel, make sure the thread has
- * a kernel priority if needed.
- */
- if (!(tspp->ts_flags & TSKPRI) && lwp != NULL && t->t_kpri_req) {
- tspp->ts_flags |= TSKPRI;
- THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
- ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- t->t_trapret = 1; /* so ts_trapret will run */
- aston(t);
- }
-
- /*
* This thread may be placed on wait queue by CPU Caps. In this case we
* do not need to do anything until it is removed from the wait queue.
- * Do not enforce CPU caps on threads running at a kernel priority
*/
if (CPUCAPS_ON()) {
(void) cpucaps_charge(t, &tspp->ts_caps,
CPUCAPS_CHARGE_ENFORCE);
- if (!(tspp->ts_flags & TSKPRI) && CPUCAPS_ENFORCE(t))
+ if (CPUCAPS_ENFORCE(t))
return;
}
/*
* If thread got preempted in the user-land then we know
@@ -1423,11 +1403,10 @@
* be preempted.
*/
if (t->t_schedctl && schedctl_get_nopreempt(t)) {
if (tspp->ts_timeleft > -SC_MAX_TICKS) {
DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
- if (!(tspp->ts_flags & TSKPRI)) {
/*
* If not already remembered, remember current
* priority for restoration in ts_yield().
*/
if (!(tspp->ts_flags & TSRESTORE)) {
@@ -1434,11 +1413,10 @@
tspp->ts_scpri = t->t_pri;
tspp->ts_flags |= TSRESTORE;
}
THREAD_CHANGE_PRI(t, ts_maxumdpri);
t->t_schedflag |= TS_DONT_SWAP;
- }
schedctl_set_yield(t, 1);
setfrontdq(t);
goto done;
} else {
if (tspp->ts_flags & TSRESTORE) {
@@ -1454,18 +1432,15 @@
* Fall through and be preempted below.
*/
}
}
- if ((tspp->ts_flags & (TSBACKQ|TSKPRI)) == TSBACKQ) {
+ if ((tspp->ts_flags & TSBACKQ) != 0) {
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
tspp->ts_flags &= ~TSBACKQ;
setbackdq(t);
- } else if ((tspp->ts_flags & (TSBACKQ|TSKPRI)) == (TSBACKQ|TSKPRI)) {
- tspp->ts_flags &= ~TSBACKQ;
- setbackdq(t);
} else {
setfrontdq(t);
}
done:
@@ -1483,16 +1458,13 @@
if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
- if ((tspp->ts_flags & TSKPRI) == 0) {
- THREAD_CHANGE_PRI(t,
- ts_dptbl[tspp->ts_umdpri].ts_globpri);
+ THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
}
- }
tspp->ts_flags &= ~TSBACKQ;
if (tspp->ts_flags & TSIA) {
if (tspp->ts_flags & TSIASET)
@@ -1507,18 +1479,16 @@
}
}
/*
- * Prepare thread for sleep. We reset the thread priority so it will
- * run at the kernel priority level when it wakes up.
+ * Prepare thread for sleep.
*/
static void
ts_sleep(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
- int flags;
pri_t old_pri = t->t_pri;
ASSERT(t == curthread);
ASSERT(THREAD_LOCK_HELD(t));
@@ -1525,44 +1495,23 @@
/*
* Account for time spent on CPU before going to sleep.
*/
(void) CPUCAPS_CHARGE(t, &tspp->ts_caps, CPUCAPS_CHARGE_ENFORCE);
- flags = tspp->ts_flags;
- if (t->t_kpri_req) {
- tspp->ts_flags = flags | TSKPRI;
- THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
- ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- t->t_trapret = 1; /* so ts_trapret will run */
- aston(t);
- } else if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
- /*
- * If thread has blocked in the kernel (as opposed to
- * being merely preempted), recompute the user mode priority.
- */
+ if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
THREAD_CHANGE_PRI(curthread,
ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(curthread->t_pri >= 0 &&
curthread->t_pri <= ts_maxglobpri);
- tspp->ts_flags = flags & ~TSKPRI;
if (DISP_MUST_SURRENDER(curthread))
cpu_surrender(curthread);
- } else if (flags & TSKPRI) {
- THREAD_CHANGE_PRI(curthread,
- ts_dptbl[tspp->ts_umdpri].ts_globpri);
- ASSERT(curthread->t_pri >= 0 &&
- curthread->t_pri <= ts_maxglobpri);
- tspp->ts_flags = flags & ~TSKPRI;
-
- if (DISP_MUST_SURRENDER(curthread))
- cpu_surrender(curthread);
}
t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
TRACE_2(TR_FAC_DISP, TR_SLEEP,
"sleep:tid %p old pri %d", t, old_pri);
}
@@ -1592,13 +1541,13 @@
*/
if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
time_t swapout_time;
swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
- if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)))
+ if (INHERITED(t) || (tspp->ts_flags & TSIASET)) {
epri = (long)DISP_PRIO(t) + swapout_time;
- else {
+ } else {
/*
* Threads which have been out for a long time,
* have high user mode priority and are associated
* with a small address space are more deserving
*/
@@ -1646,11 +1595,11 @@
proc_t *pp = ttoproc(t);
time_t swapin_time;
ASSERT(THREAD_LOCK_HELD(t));
- if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)) ||
+ if (INHERITED(t) || (tspp->ts_flags & TSIASET) ||
(t->t_proc_flag & TP_LWPEXIT) ||
(t->t_state & (TS_ZOMB | TS_FREE | TS_STOPPED |
TS_ONPROC | TS_WAIT)) ||
!(t->t_schedflag & TS_LOAD) || !SWAP_OK(t))
return (-1);
@@ -1715,22 +1664,21 @@
* Keep track of thread's project CPU usage. Note that projects
* get charged even when threads are running in the kernel.
*/
if (CPUCAPS_ON()) {
call_cpu_surrender = cpucaps_charge(t, &tspp->ts_caps,
- CPUCAPS_CHARGE_ENFORCE) && !(tspp->ts_flags & TSKPRI);
+ CPUCAPS_CHARGE_ENFORCE);
}
- if ((tspp->ts_flags & TSKPRI) == 0) {
if (--tspp->ts_timeleft <= 0) {
pri_t new_pri;
/*
- * If we're doing preemption control and trying to
- * avoid preempting this thread, just note that
- * the thread should yield soon and let it keep
- * running (unless it's been a while).
+ * If we're doing preemption control and trying to avoid
+ * preempting this thread, just note that the thread should
+ * yield soon and let it keep running (unless it's been a
+ * while).
*/
if (t->t_schedctl && schedctl_get_nopreempt(t)) {
if (tspp->ts_timeleft > -SC_MAX_TICKS) {
DTRACE_SCHED1(schedctl__nopreempt,
kthread_t *, t);
@@ -1749,14 +1697,13 @@
TS_NEWUMDPRI(tspp);
tspp->ts_dispwait = 0;
new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
/*
- * When the priority of a thread is changed,
- * it may be necessary to adjust its position
- * on a sleep queue or dispatch queue.
- * The function thread_change_pri accomplishes
+ * When the priority of a thread is changed, it may be
+ * necessary to adjust its position on a sleep queue or
+ * dispatch queue. The function thread_change_pri accomplishes
* this.
*/
if (thread_change_pri(t, new_pri, 0)) {
if ((t->t_schedflag & TS_LOAD) &&
(lwp = t->t_lwp) &&
@@ -1771,11 +1718,10 @@
"tick:tid %p old pri %d", t, oldpri);
} else if (t->t_state == TS_ONPROC &&
t->t_pri < t->t_disp_queue->disp_maxrunpri) {
call_cpu_surrender = B_TRUE;
}
- }
if (call_cpu_surrender) {
tspp->ts_flags |= TSBACKQ;
cpu_surrender(t);
}
@@ -1783,15 +1729,12 @@
thread_unlock_nopreempt(t); /* clock thread can't be preempted */
}
/*
- * If thread is currently at a kernel mode priority (has slept)
- * we assign it the appropriate user mode priority and time quantum
- * here. If we are lowering the thread's priority below that of
- * other runnable threads we will normally set runrun via cpu_surrender() to
- * cause preemption.
+ * If we are lowering the thread's priority below that of other runnable
+ * threads we will normally set runrun via cpu_surrender() to cause preemption.
*/
static void
ts_trapret(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)t->t_cldata;
@@ -1801,11 +1744,10 @@
ASSERT(THREAD_LOCK_HELD(t));
ASSERT(t == curthread);
ASSERT(cp->cpu_dispthread == t);
ASSERT(t->t_state == TS_ONPROC);
- t->t_kpri_req = 0;
if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
@@ -1815,31 +1757,18 @@
* being merely preempted), recompute the user mode priority.
*/
THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
cp->cpu_dispatch_pri = DISP_PRIO(t);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- tspp->ts_flags &= ~TSKPRI;
if (DISP_MUST_SURRENDER(t))
cpu_surrender(t);
- } else if (tspp->ts_flags & TSKPRI) {
- /*
- * If thread has blocked in the kernel (as opposed to
- * being merely preempted), recompute the user mode priority.
- */
- THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
- cp->cpu_dispatch_pri = DISP_PRIO(t);
- ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- tspp->ts_flags &= ~TSKPRI;
-
- if (DISP_MUST_SURRENDER(t))
- cpu_surrender(t);
}
/*
- * Swapout lwp if the swapper is waiting for this thread to
- * reach a safe point.
+ * Swapout lwp if the swapper is waiting for this thread to reach a
+ * safe point.
*/
if ((t->t_schedflag & TS_SWAPENQ) && !(tspp->ts_flags & TSIASET)) {
thread_unlock(t);
swapout_lwp(ttolwp(t));
thread_lock(t);
@@ -1929,12 +1858,10 @@
*/
if (tx->t_clfuncs != &ts_classfuncs.thread &&
tx->t_clfuncs != &ia_classfuncs.thread)
goto next;
tspp->ts_dispwait++;
- if ((tspp->ts_flags & TSKPRI) != 0)
- goto next;
if (tspp->ts_dispwait <= ts_dptbl[tspp->ts_umdpri].ts_maxwait)
goto next;
if (tx->t_schedctl && schedctl_get_nopreempt(tx))
goto next;
if (tx->t_state != TS_RUN && tx->t_state != TS_WAIT &&
@@ -1966,16 +1893,11 @@
return (updated);
}
/*
- * Processes waking up go to the back of their queue. We don't
- * need to assign a time quantum here because thread is still
- * at a kernel mode priority and the time slicing is not done
- * for threads running in the kernel after sleeping. The proper
- * time quantum will be assigned by ts_trapret before the thread
- * returns to user mode.
+ * Processes waking up go to the back of their queue.
*/
static void
ts_wakeup(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
@@ -1982,34 +1904,16 @@
ASSERT(THREAD_LOCK_HELD(t));
t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
- if (tspp->ts_flags & TSKPRI) {
- tspp->ts_flags &= ~TSBACKQ;
- if (tspp->ts_flags & TSIASET)
- setfrontdq(t);
- else
- setbackdq(t);
- } else if (t->t_kpri_req) {
- /*
- * Give thread a priority boost if we were asked.
- */
- tspp->ts_flags |= TSKPRI;
- THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
- setbackdq(t);
- t->t_trapret = 1; /* so that ts_trapret will run */
- aston(t);
- } else {
if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
- tspp->ts_timeleft =
- ts_dptbl[tspp->ts_cpupri].ts_quantum;
+ tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
- THREAD_CHANGE_PRI(t,
- ts_dptbl[tspp->ts_umdpri].ts_globpri);
+ THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
}
tspp->ts_flags &= ~TSBACKQ;
@@ -2022,11 +1926,10 @@
if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
}
- }
}
/*
* When a thread yields, put it on the back of the run queue.
@@ -2289,14 +2192,10 @@
}
tspp = tx->t_cldata;
tspp->ts_flags |= TSIASET;
tspp->ts_boost = ia_boost;
TS_NEWUMDPRI(tspp);
- if ((tspp->ts_flags & TSKPRI) != 0) {
- thread_unlock(tx);
- continue;
- }
tspp->ts_dispwait = 0;
ts_change_priority(tx, tspp);
thread_unlock(tx);
} while ((tx = tx->t_forw) != fg->p_tlist);
mutex_exit(&fg->p_lock);
@@ -2342,14 +2241,10 @@
}
tspp = tx->t_cldata;
tspp->ts_flags &= ~TSIASET;
tspp->ts_boost = -ia_boost;
TS_NEWUMDPRI(tspp);
- if ((tspp->ts_flags & TSKPRI) != 0) {
- thread_unlock(tx);
- continue;
- }
tspp->ts_dispwait = 0;
ts_change_priority(tx, tspp);
thread_unlock(tx);
} while ((tx = tx->t_forw) != bg->p_tlist);