Print this page
11909 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
*** 19,29 ****
* CDDL HEADER END
*/
/*
* Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
! * Copyright 2013, Joyent, Inc. All rights reserved.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
--- 19,29 ----
* CDDL HEADER END
*/
/*
* Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
! * Copyright 2019 Joyent, Inc.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
*** 227,237 ****
static void ia_parmsget(kthread_t *, void *);
static void ia_set_process_group(pid_t, pid_t, pid_t);
static void ts_change_priority(kthread_t *, tsproc_t *);
- extern pri_t ts_maxkmdpri; /* maximum kernel mode ts priority */
static pri_t ts_maxglobpri; /* maximum global priority used by ts class */
static kmutex_t ts_dptblock; /* protects time sharing dispatch table */
static kmutex_t ts_list_lock[TS_LISTS]; /* protects tsproc lists */
static tsproc_t ts_plisthead[TS_LISTS]; /* dummy tsproc at head of lists */
--- 227,236 ----
*** 539,550 ****
* Allocate a time-sharing class specific thread structure and
* initialize it with the parameters supplied. Also move the thread
* to specified time-sharing priority.
*/
static int
! ts_enterclass(kthread_t *t, id_t cid, void *parmsp,
! cred_t *reqpcredp, void *bufp)
{
tsparms_t *tsparmsp = (tsparms_t *)parmsp;
tsproc_t *tspp;
pri_t reqtsuprilim;
pri_t reqtsupri;
--- 538,549 ----
* Allocate a time-sharing class specific thread structure and
* initialize it with the parameters supplied. Also move the thread
* to specified time-sharing priority.
*/
static int
! ts_enterclass(kthread_t *t, id_t cid, void *parmsp, cred_t *reqpcredp,
! void *bufp)
{
tsparms_t *tsparmsp = (tsparms_t *)parmsp;
tsproc_t *tspp;
pri_t reqtsuprilim;
pri_t reqtsupri;
*** 701,711 ****
ctspp->ts_uprilim = ptspp->ts_uprilim;
ctspp->ts_upri = ptspp->ts_upri;
TS_NEWUMDPRI(ctspp);
ctspp->ts_nice = ptspp->ts_nice;
ctspp->ts_dispwait = 0;
! ctspp->ts_flags = ptspp->ts_flags & ~(TSKPRI | TSBACKQ | TSRESTORE);
ctspp->ts_tp = ct;
cpucaps_sc_init(&ctspp->ts_caps);
thread_unlock(t);
/*
--- 700,710 ----
ctspp->ts_uprilim = ptspp->ts_uprilim;
ctspp->ts_upri = ptspp->ts_upri;
TS_NEWUMDPRI(ctspp);
ctspp->ts_nice = ptspp->ts_nice;
ctspp->ts_dispwait = 0;
! ctspp->ts_flags = ptspp->ts_flags & ~(TSBACKQ | TSRESTORE);
ctspp->ts_tp = ct;
cpucaps_sc_init(&ctspp->ts_caps);
thread_unlock(t);
/*
*** 752,762 ****
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
t->t_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- tspp->ts_flags &= ~TSKPRI;
THREAD_TRANSITION(t);
ts_setrun(t);
thread_unlock(t);
/*
* Safe to drop p_lock now since since it is safe to change
--- 751,760 ----
*** 1215,1229 ****
tspp->ts_uprilim = reqtsuprilim;
tspp->ts_upri = reqtsupri;
TS_NEWUMDPRI(tspp);
tspp->ts_nice = nice;
- if ((tspp->ts_flags & TSKPRI) != 0) {
- thread_unlock(tx);
- return (0);
- }
-
tspp->ts_dispwait = 0;
ts_change_priority(tx, tspp);
thread_unlock(tx);
return (0);
}
--- 1213,1222 ----
*** 1371,1407 ****
*/
static void
ts_preempt(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
! klwp_t *lwp = curthread->t_lwp;
pri_t oldpri = t->t_pri;
ASSERT(t == curthread);
ASSERT(THREAD_LOCK_HELD(curthread));
/*
- * If preempted in the kernel, make sure the thread has
- * a kernel priority if needed.
- */
- if (!(tspp->ts_flags & TSKPRI) && lwp != NULL && t->t_kpri_req) {
- tspp->ts_flags |= TSKPRI;
- THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
- ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- t->t_trapret = 1; /* so ts_trapret will run */
- aston(t);
- }
-
- /*
* This thread may be placed on wait queue by CPU Caps. In this case we
* do not need to do anything until it is removed from the wait queue.
- * Do not enforce CPU caps on threads running at a kernel priority
*/
if (CPUCAPS_ON()) {
(void) cpucaps_charge(t, &tspp->ts_caps,
CPUCAPS_CHARGE_ENFORCE);
! if (!(tspp->ts_flags & TSKPRI) && CPUCAPS_ENFORCE(t))
return;
}
/*
* If thread got preempted in the user-land then we know
--- 1364,1387 ----
*/
static void
ts_preempt(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
! klwp_t *lwp = ttolwp(t);
pri_t oldpri = t->t_pri;
ASSERT(t == curthread);
ASSERT(THREAD_LOCK_HELD(curthread));
/*
* This thread may be placed on wait queue by CPU Caps. In this case we
* do not need to do anything until it is removed from the wait queue.
*/
if (CPUCAPS_ON()) {
(void) cpucaps_charge(t, &tspp->ts_caps,
CPUCAPS_CHARGE_ENFORCE);
! if (CPUCAPS_ENFORCE(t))
return;
}
/*
* If thread got preempted in the user-land then we know
*** 1423,1433 ****
* be preempted.
*/
if (t->t_schedctl && schedctl_get_nopreempt(t)) {
if (tspp->ts_timeleft > -SC_MAX_TICKS) {
DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
- if (!(tspp->ts_flags & TSKPRI)) {
/*
* If not already remembered, remember current
* priority for restoration in ts_yield().
*/
if (!(tspp->ts_flags & TSRESTORE)) {
--- 1403,1412 ----
*** 1434,1444 ****
tspp->ts_scpri = t->t_pri;
tspp->ts_flags |= TSRESTORE;
}
THREAD_CHANGE_PRI(t, ts_maxumdpri);
t->t_schedflag |= TS_DONT_SWAP;
- }
schedctl_set_yield(t, 1);
setfrontdq(t);
goto done;
} else {
if (tspp->ts_flags & TSRESTORE) {
--- 1413,1422 ----
*** 1454,1471 ****
* Fall through and be preempted below.
*/
}
}
! if ((tspp->ts_flags & (TSBACKQ|TSKPRI)) == TSBACKQ) {
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
tspp->ts_flags &= ~TSBACKQ;
setbackdq(t);
- } else if ((tspp->ts_flags & (TSBACKQ|TSKPRI)) == (TSBACKQ|TSKPRI)) {
- tspp->ts_flags &= ~TSBACKQ;
- setbackdq(t);
} else {
setfrontdq(t);
}
done:
--- 1432,1446 ----
* Fall through and be preempted below.
*/
}
}
! if ((tspp->ts_flags & TSBACKQ) != 0) {
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
tspp->ts_flags &= ~TSBACKQ;
setbackdq(t);
} else {
setfrontdq(t);
}
done:
*** 1483,1498 ****
if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
! if ((tspp->ts_flags & TSKPRI) == 0) {
! THREAD_CHANGE_PRI(t,
! ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
}
- }
tspp->ts_flags &= ~TSBACKQ;
if (tspp->ts_flags & TSIA) {
if (tspp->ts_flags & TSIASET)
--- 1458,1470 ----
if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
! THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
}
tspp->ts_flags &= ~TSBACKQ;
if (tspp->ts_flags & TSIA) {
if (tspp->ts_flags & TSIASET)
*** 1507,1524 ****
}
}
/*
! * Prepare thread for sleep. We reset the thread priority so it will
! * run at the kernel priority level when it wakes up.
*/
static void
ts_sleep(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
- int flags;
pri_t old_pri = t->t_pri;
ASSERT(t == curthread);
ASSERT(THREAD_LOCK_HELD(t));
--- 1479,1494 ----
}
}
/*
! * Prepare thread for sleep.
*/
static void
ts_sleep(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
pri_t old_pri = t->t_pri;
ASSERT(t == curthread);
ASSERT(THREAD_LOCK_HELD(t));
*** 1525,1568 ****
/*
* Account for time spent on CPU before going to sleep.
*/
(void) CPUCAPS_CHARGE(t, &tspp->ts_caps, CPUCAPS_CHARGE_ENFORCE);
! flags = tspp->ts_flags;
! if (t->t_kpri_req) {
! tspp->ts_flags = flags | TSKPRI;
! THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
! ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
! t->t_trapret = 1; /* so ts_trapret will run */
! aston(t);
! } else if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
! /*
! * If thread has blocked in the kernel (as opposed to
! * being merely preempted), recompute the user mode priority.
! */
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
THREAD_CHANGE_PRI(curthread,
ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(curthread->t_pri >= 0 &&
curthread->t_pri <= ts_maxglobpri);
- tspp->ts_flags = flags & ~TSKPRI;
if (DISP_MUST_SURRENDER(curthread))
cpu_surrender(curthread);
- } else if (flags & TSKPRI) {
- THREAD_CHANGE_PRI(curthread,
- ts_dptbl[tspp->ts_umdpri].ts_globpri);
- ASSERT(curthread->t_pri >= 0 &&
- curthread->t_pri <= ts_maxglobpri);
- tspp->ts_flags = flags & ~TSKPRI;
-
- if (DISP_MUST_SURRENDER(curthread))
- cpu_surrender(curthread);
}
t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
TRACE_2(TR_FAC_DISP, TR_SLEEP,
"sleep:tid %p old pri %d", t, old_pri);
}
--- 1495,1517 ----
/*
* Account for time spent on CPU before going to sleep.
*/
(void) CPUCAPS_CHARGE(t, &tspp->ts_caps, CPUCAPS_CHARGE_ENFORCE);
! if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
THREAD_CHANGE_PRI(curthread,
ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(curthread->t_pri >= 0 &&
curthread->t_pri <= ts_maxglobpri);
if (DISP_MUST_SURRENDER(curthread))
cpu_surrender(curthread);
}
t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
TRACE_2(TR_FAC_DISP, TR_SLEEP,
"sleep:tid %p old pri %d", t, old_pri);
}
*** 1592,1604 ****
*/
if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
time_t swapout_time;
swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
! if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)))
epri = (long)DISP_PRIO(t) + swapout_time;
! else {
/*
* Threads which have been out for a long time,
* have high user mode priority and are associated
* with a small address space are more deserving
*/
--- 1541,1553 ----
*/
if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
time_t swapout_time;
swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
! if (INHERITED(t) || (tspp->ts_flags & TSIASET)) {
epri = (long)DISP_PRIO(t) + swapout_time;
! } else {
/*
* Threads which have been out for a long time,
* have high user mode priority and are associated
* with a small address space are more deserving
*/
*** 1646,1656 ****
proc_t *pp = ttoproc(t);
time_t swapin_time;
ASSERT(THREAD_LOCK_HELD(t));
! if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)) ||
(t->t_proc_flag & TP_LWPEXIT) ||
(t->t_state & (TS_ZOMB | TS_FREE | TS_STOPPED |
TS_ONPROC | TS_WAIT)) ||
!(t->t_schedflag & TS_LOAD) || !SWAP_OK(t))
return (-1);
--- 1595,1605 ----
proc_t *pp = ttoproc(t);
time_t swapin_time;
ASSERT(THREAD_LOCK_HELD(t));
! if (INHERITED(t) || (tspp->ts_flags & TSIASET) ||
(t->t_proc_flag & TP_LWPEXIT) ||
(t->t_state & (TS_ZOMB | TS_FREE | TS_STOPPED |
TS_ONPROC | TS_WAIT)) ||
!(t->t_schedflag & TS_LOAD) || !SWAP_OK(t))
return (-1);
*** 1715,1736 ****
* Keep track of thread's project CPU usage. Note that projects
* get charged even when threads are running in the kernel.
*/
if (CPUCAPS_ON()) {
call_cpu_surrender = cpucaps_charge(t, &tspp->ts_caps,
! CPUCAPS_CHARGE_ENFORCE) && !(tspp->ts_flags & TSKPRI);
}
- if ((tspp->ts_flags & TSKPRI) == 0) {
if (--tspp->ts_timeleft <= 0) {
pri_t new_pri;
/*
! * If we're doing preemption control and trying to
! * avoid preempting this thread, just note that
! * the thread should yield soon and let it keep
! * running (unless it's been a while).
*/
if (t->t_schedctl && schedctl_get_nopreempt(t)) {
if (tspp->ts_timeleft > -SC_MAX_TICKS) {
DTRACE_SCHED1(schedctl__nopreempt,
kthread_t *, t);
--- 1664,1684 ----
* Keep track of thread's project CPU usage. Note that projects
* get charged even when threads are running in the kernel.
*/
if (CPUCAPS_ON()) {
call_cpu_surrender = cpucaps_charge(t, &tspp->ts_caps,
! CPUCAPS_CHARGE_ENFORCE);
}
if (--tspp->ts_timeleft <= 0) {
pri_t new_pri;
/*
! * If we're doing preemption control and trying to avoid
! * preempting this thread, just note that the thread should
! * yield soon and let it keep running (unless it's been a
! * while).
*/
if (t->t_schedctl && schedctl_get_nopreempt(t)) {
if (tspp->ts_timeleft > -SC_MAX_TICKS) {
DTRACE_SCHED1(schedctl__nopreempt,
kthread_t *, t);
*** 1749,1762 ****
TS_NEWUMDPRI(tspp);
tspp->ts_dispwait = 0;
new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
/*
! * When the priority of a thread is changed,
! * it may be necessary to adjust its position
! * on a sleep queue or dispatch queue.
! * The function thread_change_pri accomplishes
* this.
*/
if (thread_change_pri(t, new_pri, 0)) {
if ((t->t_schedflag & TS_LOAD) &&
(lwp = t->t_lwp) &&
--- 1697,1709 ----
TS_NEWUMDPRI(tspp);
tspp->ts_dispwait = 0;
new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
/*
! * When the priority of a thread is changed, it may be
! * necessary to adjust its position on a sleep queue or
! * dispatch queue. The function thread_change_pri accomplishes
* this.
*/
if (thread_change_pri(t, new_pri, 0)) {
if ((t->t_schedflag & TS_LOAD) &&
(lwp = t->t_lwp) &&
*** 1771,1781 ****
"tick:tid %p old pri %d", t, oldpri);
} else if (t->t_state == TS_ONPROC &&
t->t_pri < t->t_disp_queue->disp_maxrunpri) {
call_cpu_surrender = B_TRUE;
}
- }
if (call_cpu_surrender) {
tspp->ts_flags |= TSBACKQ;
cpu_surrender(t);
}
--- 1718,1727 ----
*** 1783,1797 ****
thread_unlock_nopreempt(t); /* clock thread can't be preempted */
}
/*
! * If thread is currently at a kernel mode priority (has slept)
! * we assign it the appropriate user mode priority and time quantum
! * here. If we are lowering the thread's priority below that of
! * other runnable threads we will normally set runrun via cpu_surrender() to
! * cause preemption.
*/
static void
ts_trapret(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)t->t_cldata;
--- 1729,1740 ----
thread_unlock_nopreempt(t); /* clock thread can't be preempted */
}
/*
! * If we are lowering the thread's priority below that of other runnable
! * threads we will normally set runrun via cpu_surrender() to cause preemption.
*/
static void
ts_trapret(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)t->t_cldata;
*** 1801,1811 ****
ASSERT(THREAD_LOCK_HELD(t));
ASSERT(t == curthread);
ASSERT(cp->cpu_dispthread == t);
ASSERT(t->t_state == TS_ONPROC);
- t->t_kpri_req = 0;
if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
--- 1744,1753 ----
*** 1815,1845 ****
* being merely preempted), recompute the user mode priority.
*/
THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
cp->cpu_dispatch_pri = DISP_PRIO(t);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- tspp->ts_flags &= ~TSKPRI;
if (DISP_MUST_SURRENDER(t))
cpu_surrender(t);
- } else if (tspp->ts_flags & TSKPRI) {
- /*
- * If thread has blocked in the kernel (as opposed to
- * being merely preempted), recompute the user mode priority.
- */
- THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
- cp->cpu_dispatch_pri = DISP_PRIO(t);
- ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
- tspp->ts_flags &= ~TSKPRI;
-
- if (DISP_MUST_SURRENDER(t))
- cpu_surrender(t);
}
/*
! * Swapout lwp if the swapper is waiting for this thread to
! * reach a safe point.
*/
if ((t->t_schedflag & TS_SWAPENQ) && !(tspp->ts_flags & TSIASET)) {
thread_unlock(t);
swapout_lwp(ttolwp(t));
thread_lock(t);
--- 1757,1774 ----
* being merely preempted), recompute the user mode priority.
*/
THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
cp->cpu_dispatch_pri = DISP_PRIO(t);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
if (DISP_MUST_SURRENDER(t))
cpu_surrender(t);
}
/*
! * Swapout lwp if the swapper is waiting for this thread to reach a
! * safe point.
*/
if ((t->t_schedflag & TS_SWAPENQ) && !(tspp->ts_flags & TSIASET)) {
thread_unlock(t);
swapout_lwp(ttolwp(t));
thread_lock(t);
*** 1929,1940 ****
*/
if (tx->t_clfuncs != &ts_classfuncs.thread &&
tx->t_clfuncs != &ia_classfuncs.thread)
goto next;
tspp->ts_dispwait++;
- if ((tspp->ts_flags & TSKPRI) != 0)
- goto next;
if (tspp->ts_dispwait <= ts_dptbl[tspp->ts_umdpri].ts_maxwait)
goto next;
if (tx->t_schedctl && schedctl_get_nopreempt(tx))
goto next;
if (tx->t_state != TS_RUN && tx->t_state != TS_WAIT &&
--- 1858,1867 ----
*** 1966,1981 ****
return (updated);
}
/*
! * Processes waking up go to the back of their queue. We don't
! * need to assign a time quantum here because thread is still
! * at a kernel mode priority and the time slicing is not done
! * for threads running in the kernel after sleeping. The proper
! * time quantum will be assigned by ts_trapret before the thread
! * returns to user mode.
*/
static void
ts_wakeup(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
--- 1893,1903 ----
return (updated);
}
/*
! * Processes waking up go to the back of their queue.
*/
static void
ts_wakeup(kthread_t *t)
{
tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
*** 1982,2015 ****
ASSERT(THREAD_LOCK_HELD(t));
t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
- if (tspp->ts_flags & TSKPRI) {
- tspp->ts_flags &= ~TSBACKQ;
- if (tspp->ts_flags & TSIASET)
- setfrontdq(t);
- else
- setbackdq(t);
- } else if (t->t_kpri_req) {
- /*
- * Give thread a priority boost if we were asked.
- */
- tspp->ts_flags |= TSKPRI;
- THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
- setbackdq(t);
- t->t_trapret = 1; /* so that ts_trapret will run */
- aston(t);
- } else {
if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
! tspp->ts_timeleft =
! ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
! THREAD_CHANGE_PRI(t,
! ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
}
tspp->ts_flags &= ~TSBACKQ;
--- 1904,1919 ----
ASSERT(THREAD_LOCK_HELD(t));
t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
TS_NEWUMDPRI(tspp);
! tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
tspp->ts_dispwait = 0;
! THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
}
tspp->ts_flags &= ~TSBACKQ;
*** 2022,2032 ****
if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
}
- }
}
/*
* When a thread yields, put it on the back of the run queue.
--- 1926,1935 ----
*** 2289,2302 ****
}
tspp = tx->t_cldata;
tspp->ts_flags |= TSIASET;
tspp->ts_boost = ia_boost;
TS_NEWUMDPRI(tspp);
- if ((tspp->ts_flags & TSKPRI) != 0) {
- thread_unlock(tx);
- continue;
- }
tspp->ts_dispwait = 0;
ts_change_priority(tx, tspp);
thread_unlock(tx);
} while ((tx = tx->t_forw) != fg->p_tlist);
mutex_exit(&fg->p_lock);
--- 2192,2201 ----
*** 2342,2355 ****
}
tspp = tx->t_cldata;
tspp->ts_flags &= ~TSIASET;
tspp->ts_boost = -ia_boost;
TS_NEWUMDPRI(tspp);
- if ((tspp->ts_flags & TSKPRI) != 0) {
- thread_unlock(tx);
- continue;
- }
tspp->ts_dispwait = 0;
ts_change_priority(tx, tspp);
thread_unlock(tx);
} while ((tx = tx->t_forw) != bg->p_tlist);
--- 2241,2250 ----