Print this page
11909 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/ts.c
          +++ new/usr/src/uts/common/disp/ts.c
↓ open down ↓ 13 lines elided ↑ open up ↑
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
  24      - * Copyright 2013, Joyent, Inc. All rights reserved.
       24 + * Copyright 2019 Joyent, Inc.
  25   25   */
  26   26  
  27   27  /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
  28      -/*        All Rights Reserved   */
       28 +/*        All Rights Reserved   */
  29   29  
  30   30  #include <sys/types.h>
  31   31  #include <sys/param.h>
  32   32  #include <sys/sysmacros.h>
  33   33  #include <sys/cred.h>
  34   34  #include <sys/proc.h>
  35   35  #include <sys/session.h>
  36   36  #include <sys/strsubr.h>
  37   37  #include <sys/signal.h>
  38   38  #include <sys/user.h>
↓ open down ↓ 183 lines elided ↑ open up ↑
 222  222  static int      ia_getclpri(pcpri_t *);
 223  223  static int      ia_parmsin(void *);
 224  224  static int      ia_vaparmsin(void *, pc_vaparms_t *);
 225  225  static int      ia_vaparmsout(void *, pc_vaparms_t *);
 226  226  static int      ia_parmsset(kthread_t *, void *, id_t, cred_t *);
 227  227  static void     ia_parmsget(kthread_t *, void *);
 228  228  static void     ia_set_process_group(pid_t, pid_t, pid_t);
 229  229  
 230  230  static void     ts_change_priority(kthread_t *, tsproc_t *);
 231  231  
 232      -extern pri_t    ts_maxkmdpri;   /* maximum kernel mode ts priority */
 233  232  static pri_t    ts_maxglobpri;  /* maximum global priority used by ts class */
 234  233  static kmutex_t ts_dptblock;    /* protects time sharing dispatch table */
 235  234  static kmutex_t ts_list_lock[TS_LISTS]; /* protects tsproc lists */
 236  235  static tsproc_t ts_plisthead[TS_LISTS]; /* dummy tsproc at head of lists */
 237  236  
 238  237  static gid_t    IA_gid = 0;
 239  238  
 240  239  static struct classfuncs ts_classfuncs = {
 241  240          /* class functions */
 242  241          ts_admin,
↓ open down ↓ 291 lines elided ↑ open up ↑
 534  533          return (0);
 535  534  }
 536  535  
 537  536  
 538  537  /*
 539  538   * Allocate a time-sharing class specific thread structure and
 540  539   * initialize it with the parameters supplied. Also move the thread
 541  540   * to specified time-sharing priority.
 542  541   */
 543  542  static int
 544      -ts_enterclass(kthread_t *t, id_t cid, void *parmsp,
 545      -        cred_t *reqpcredp, void *bufp)
      543 +ts_enterclass(kthread_t *t, id_t cid, void *parmsp, cred_t *reqpcredp,
      544 +    void *bufp)
 546  545  {
 547  546          tsparms_t       *tsparmsp = (tsparms_t *)parmsp;
 548  547          tsproc_t        *tspp;
 549  548          pri_t           reqtsuprilim;
 550  549          pri_t           reqtsupri;
 551  550          static uint32_t tspexists = 0;  /* set on first occurrence of */
 552  551                                          /*   a time-sharing process */
 553  552  
 554  553          tspp = (tsproc_t *)bufp;
 555  554          ASSERT(tspp != NULL);
↓ open down ↓ 140 lines elided ↑ open up ↑
 696  695           */
 697  696          thread_lock(t);
 698  697          ctspp->ts_timeleft = ts_dptbl[ptspp->ts_cpupri].ts_quantum;
 699  698          ctspp->ts_cpupri = ptspp->ts_cpupri;
 700  699          ctspp->ts_boost = ptspp->ts_boost;
 701  700          ctspp->ts_uprilim = ptspp->ts_uprilim;
 702  701          ctspp->ts_upri = ptspp->ts_upri;
 703  702          TS_NEWUMDPRI(ctspp);
 704  703          ctspp->ts_nice = ptspp->ts_nice;
 705  704          ctspp->ts_dispwait = 0;
 706      -        ctspp->ts_flags = ptspp->ts_flags & ~(TSKPRI | TSBACKQ | TSRESTORE);
      705 +        ctspp->ts_flags = ptspp->ts_flags & ~(TSBACKQ | TSRESTORE);
 707  706          ctspp->ts_tp = ct;
 708  707          cpucaps_sc_init(&ctspp->ts_caps);
 709  708          thread_unlock(t);
 710  709  
 711  710          /*
 712  711           * Link new structure into tsproc list.
 713  712           */
 714  713          ct->t_cldata = (void *)ctspp;
 715  714          TS_LIST_INSERT(ctspp);
 716  715          return (0);
↓ open down ↓ 30 lines elided ↑ open up ↑
 747  746          continuelwps(pp);
 748  747  
 749  748          thread_lock(t);
 750  749          tspp = (tsproc_t *)(t->t_cldata);
 751  750          tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_tqexp;
 752  751          TS_NEWUMDPRI(tspp);
 753  752          tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
 754  753          tspp->ts_dispwait = 0;
 755  754          t->t_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
 756  755          ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
 757      -        tspp->ts_flags &= ~TSKPRI;
 758  756          THREAD_TRANSITION(t);
 759  757          ts_setrun(t);
 760  758          thread_unlock(t);
 761  759          /*
 762  760           * Safe to drop p_lock now since since it is safe to change
 763  761           * the scheduling class after this point.
 764  762           */
 765  763          mutex_exit(&pp->p_lock);
 766  764  
 767  765          swtch();
↓ open down ↓ 442 lines elided ↑ open up ↑
1210 1208          if (nice >= 2 * NZERO)
1211 1209                  nice = 2 * NZERO - 1;
1212 1210  
1213 1211          thread_lock(tx);
1214 1212  
1215 1213          tspp->ts_uprilim = reqtsuprilim;
1216 1214          tspp->ts_upri = reqtsupri;
1217 1215          TS_NEWUMDPRI(tspp);
1218 1216          tspp->ts_nice = nice;
1219 1217  
1220      -        if ((tspp->ts_flags & TSKPRI) != 0) {
1221      -                thread_unlock(tx);
1222      -                return (0);
1223      -        }
1224      -
1225 1218          tspp->ts_dispwait = 0;
1226 1219          ts_change_priority(tx, tspp);
1227 1220          thread_unlock(tx);
1228 1221          return (0);
1229 1222  }
1230 1223  
1231 1224  
1232 1225  static int
1233 1226  ia_parmsset(kthread_t *tx, void *parmsp, id_t reqpcid, cred_t *reqpcredp)
1234 1227  {
1235 1228          tsproc_t        *tspp = (tsproc_t *)tx->t_cldata;
1236 1229          iaparms_t       *iaparmsp = (iaparms_t *)parmsp;
1237 1230          proc_t          *p;
1238 1231          pid_t           pid, pgid, sid;
1239 1232          pid_t           on, off;
1240      -        struct stdata   *stp;
     1233 +        struct stdata   *stp;
1241 1234          int             sess_held;
1242 1235  
1243 1236          /*
1244 1237           * Handle user priority changes
1245 1238           */
1246 1239          if (iaparmsp->ia_mode == IA_NOCHANGE)
1247 1240                  return (ts_parmsset(tx, parmsp, reqpcid, reqpcredp));
1248 1241  
1249 1242          /*
1250 1243           * Check permissions for changing modes.
↓ open down ↓ 115 lines elided ↑ open up ↑
1366 1359  /*
1367 1360   * Arrange for thread to be placed in appropriate location
1368 1361   * on dispatcher queue.
1369 1362   *
1370 1363   * This is called with the current thread in TS_ONPROC and locked.
1371 1364   */
1372 1365  static void
1373 1366  ts_preempt(kthread_t *t)
1374 1367  {
1375 1368          tsproc_t        *tspp = (tsproc_t *)(t->t_cldata);
1376      -        klwp_t          *lwp = curthread->t_lwp;
     1369 +        klwp_t          *lwp = ttolwp(t);
1377 1370          pri_t           oldpri = t->t_pri;
1378 1371  
1379 1372          ASSERT(t == curthread);
1380 1373          ASSERT(THREAD_LOCK_HELD(curthread));
1381 1374  
1382 1375          /*
1383      -         * If preempted in the kernel, make sure the thread has
1384      -         * a kernel priority if needed.
1385      -         */
1386      -        if (!(tspp->ts_flags & TSKPRI) && lwp != NULL && t->t_kpri_req) {
1387      -                tspp->ts_flags |= TSKPRI;
1388      -                THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
1389      -                ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1390      -                t->t_trapret = 1;               /* so ts_trapret will run */
1391      -                aston(t);
1392      -        }
1393      -
1394      -        /*
1395 1376           * This thread may be placed on wait queue by CPU Caps. In this case we
1396 1377           * do not need to do anything until it is removed from the wait queue.
1397      -         * Do not enforce CPU caps on threads running at a kernel priority
1398 1378           */
1399 1379          if (CPUCAPS_ON()) {
1400 1380                  (void) cpucaps_charge(t, &tspp->ts_caps,
1401 1381                      CPUCAPS_CHARGE_ENFORCE);
1402      -                if (!(tspp->ts_flags & TSKPRI) && CPUCAPS_ENFORCE(t))
     1382 +                if (CPUCAPS_ENFORCE(t))
1403 1383                          return;
1404 1384          }
1405 1385  
1406 1386          /*
1407 1387           * If thread got preempted in the user-land then we know
1408 1388           * it isn't holding any locks.  Mark it as swappable.
1409 1389           */
1410 1390          ASSERT(t->t_schedflag & TS_DONT_SWAP);
1411 1391          if (lwp != NULL && lwp->lwp_state == LWP_USER)
1412 1392                  t->t_schedflag &= ~TS_DONT_SWAP;
↓ open down ↓ 5 lines elided ↑ open up ↑
1418 1398           * too long, let the preemption happen here but try to make
1419 1399           * sure the thread is rescheduled as soon as possible.  We do
1420 1400           * this by putting it on the front of the highest priority run
1421 1401           * queue in the TS class.  If the preemption has been put off
1422 1402           * for too long, clear the "nopreempt" bit and let the thread
1423 1403           * be preempted.
1424 1404           */
1425 1405          if (t->t_schedctl && schedctl_get_nopreempt(t)) {
1426 1406                  if (tspp->ts_timeleft > -SC_MAX_TICKS) {
1427 1407                          DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
1428      -                        if (!(tspp->ts_flags & TSKPRI)) {
1429      -                                /*
1430      -                                 * If not already remembered, remember current
1431      -                                 * priority for restoration in ts_yield().
1432      -                                 */
1433      -                                if (!(tspp->ts_flags & TSRESTORE)) {
1434      -                                        tspp->ts_scpri = t->t_pri;
1435      -                                        tspp->ts_flags |= TSRESTORE;
1436      -                                }
1437      -                                THREAD_CHANGE_PRI(t, ts_maxumdpri);
1438      -                                t->t_schedflag |= TS_DONT_SWAP;
     1408 +                        /*
     1409 +                         * If not already remembered, remember current
     1410 +                         * priority for restoration in ts_yield().
     1411 +                         */
     1412 +                        if (!(tspp->ts_flags & TSRESTORE)) {
     1413 +                                tspp->ts_scpri = t->t_pri;
     1414 +                                tspp->ts_flags |= TSRESTORE;
1439 1415                          }
     1416 +                        THREAD_CHANGE_PRI(t, ts_maxumdpri);
     1417 +                        t->t_schedflag |= TS_DONT_SWAP;
1440 1418                          schedctl_set_yield(t, 1);
1441 1419                          setfrontdq(t);
1442 1420                          goto done;
1443 1421                  } else {
1444 1422                          if (tspp->ts_flags & TSRESTORE) {
1445 1423                                  THREAD_CHANGE_PRI(t, tspp->ts_scpri);
1446 1424                                  tspp->ts_flags &= ~TSRESTORE;
1447 1425                          }
1448 1426                          schedctl_set_nopreempt(t, 0);
1449 1427                          DTRACE_SCHED1(schedctl__preempt, kthread_t *, t);
1450 1428                          TNF_PROBE_2(schedctl_preempt, "schedctl TS ts_preempt",
1451 1429                              /* CSTYLED */, tnf_pid, pid, ttoproc(t)->p_pid,
1452 1430                              tnf_lwpid, lwpid, t->t_tid);
1453 1431                          /*
1454 1432                           * Fall through and be preempted below.
1455 1433                           */
1456 1434                  }
1457 1435          }
1458 1436  
1459      -        if ((tspp->ts_flags & (TSBACKQ|TSKPRI)) == TSBACKQ) {
     1437 +        if ((tspp->ts_flags & TSBACKQ) != 0) {
1460 1438                  tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
1461 1439                  tspp->ts_dispwait = 0;
1462 1440                  tspp->ts_flags &= ~TSBACKQ;
1463 1441                  setbackdq(t);
1464      -        } else if ((tspp->ts_flags & (TSBACKQ|TSKPRI)) == (TSBACKQ|TSKPRI)) {
1465      -                tspp->ts_flags &= ~TSBACKQ;
1466      -                setbackdq(t);
1467 1442          } else {
1468 1443                  setfrontdq(t);
1469 1444          }
1470 1445  
1471 1446  done:
1472 1447          TRACE_2(TR_FAC_DISP, TR_PREEMPT,
1473 1448              "preempt:tid %p old pri %d", t, oldpri);
1474 1449  }
1475 1450  
1476 1451  static void
↓ open down ↓ 1 lines elided ↑ open up ↑
1478 1453  {
1479 1454          tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
1480 1455  
1481 1456          ASSERT(THREAD_LOCK_HELD(t));    /* t should be in transition */
1482 1457  
1483 1458          if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
1484 1459                  tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
1485 1460                  TS_NEWUMDPRI(tspp);
1486 1461                  tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
1487 1462                  tspp->ts_dispwait = 0;
1488      -                if ((tspp->ts_flags & TSKPRI) == 0) {
1489      -                        THREAD_CHANGE_PRI(t,
1490      -                            ts_dptbl[tspp->ts_umdpri].ts_globpri);
1491      -                        ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1492      -                }
     1463 +                THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
     1464 +                ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1493 1465          }
1494 1466  
1495 1467          tspp->ts_flags &= ~TSBACKQ;
1496 1468  
1497 1469          if (tspp->ts_flags & TSIA) {
1498 1470                  if (tspp->ts_flags & TSIASET)
1499 1471                          setfrontdq(t);
1500 1472                  else
1501 1473                          setbackdq(t);
1502 1474          } else {
1503 1475                  if (t->t_disp_time != ddi_get_lbolt())
1504 1476                          setbackdq(t);
1505 1477                  else
1506 1478                          setfrontdq(t);
1507 1479          }
1508 1480  }
1509 1481  
1510 1482  
1511 1483  /*
1512      - * Prepare thread for sleep. We reset the thread priority so it will
1513      - * run at the kernel priority level when it wakes up.
     1484 + * Prepare thread for sleep.
1514 1485   */
1515 1486  static void
1516 1487  ts_sleep(kthread_t *t)
1517 1488  {
1518 1489          tsproc_t        *tspp = (tsproc_t *)(t->t_cldata);
1519      -        int             flags;
1520 1490          pri_t           old_pri = t->t_pri;
1521 1491  
1522 1492          ASSERT(t == curthread);
1523 1493          ASSERT(THREAD_LOCK_HELD(t));
1524 1494  
1525 1495          /*
1526 1496           * Account for time spent on CPU before going to sleep.
1527 1497           */
1528 1498          (void) CPUCAPS_CHARGE(t, &tspp->ts_caps, CPUCAPS_CHARGE_ENFORCE);
1529 1499  
1530      -        flags = tspp->ts_flags;
1531      -        if (t->t_kpri_req) {
1532      -                tspp->ts_flags = flags | TSKPRI;
1533      -                THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
1534      -                ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1535      -                t->t_trapret = 1;               /* so ts_trapret will run */
1536      -                aston(t);
1537      -        } else if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
1538      -                /*
1539      -                 * If thread has blocked in the kernel (as opposed to
1540      -                 * being merely preempted), recompute the user mode priority.
1541      -                 */
     1500 +        if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
1542 1501                  tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
1543 1502                  TS_NEWUMDPRI(tspp);
1544 1503                  tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
1545 1504                  tspp->ts_dispwait = 0;
1546 1505  
1547 1506                  THREAD_CHANGE_PRI(curthread,
1548 1507                      ts_dptbl[tspp->ts_umdpri].ts_globpri);
1549 1508                  ASSERT(curthread->t_pri >= 0 &&
1550 1509                      curthread->t_pri <= ts_maxglobpri);
1551      -                tspp->ts_flags = flags & ~TSKPRI;
1552 1510  
1553 1511                  if (DISP_MUST_SURRENDER(curthread))
1554 1512                          cpu_surrender(curthread);
1555      -        } else if (flags & TSKPRI) {
1556      -                THREAD_CHANGE_PRI(curthread,
1557      -                    ts_dptbl[tspp->ts_umdpri].ts_globpri);
1558      -                ASSERT(curthread->t_pri >= 0 &&
1559      -                    curthread->t_pri <= ts_maxglobpri);
1560      -                tspp->ts_flags = flags & ~TSKPRI;
1561      -
1562      -                if (DISP_MUST_SURRENDER(curthread))
1563      -                        cpu_surrender(curthread);
1564 1513          }
1565 1514          t->t_stime = ddi_get_lbolt();           /* time stamp for the swapper */
1566 1515          TRACE_2(TR_FAC_DISP, TR_SLEEP,
1567 1516              "sleep:tid %p old pri %d", t, old_pri);
1568 1517  }
1569 1518  
1570 1519  
1571 1520  /*
1572 1521   * Return Values:
1573 1522   *
↓ open down ↓ 13 lines elided ↑ open up ↑
1587 1536          ASSERT(THREAD_LOCK_HELD(t));
1588 1537  
1589 1538          /*
1590 1539           * We know that pri_t is a short.
1591 1540           * Be sure not to overrun its range.
1592 1541           */
1593 1542          if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
1594 1543                  time_t swapout_time;
1595 1544  
1596 1545                  swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
1597      -                if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)))
     1546 +                if (INHERITED(t) || (tspp->ts_flags & TSIASET)) {
1598 1547                          epri = (long)DISP_PRIO(t) + swapout_time;
1599      -                else {
     1548 +                } else {
1600 1549                          /*
1601 1550                           * Threads which have been out for a long time,
1602 1551                           * have high user mode priority and are associated
1603 1552                           * with a small address space are more deserving
1604 1553                           */
1605 1554                          epri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
1606 1555                          ASSERT(epri >= 0 && epri <= ts_maxumdpri);
1607 1556                          epri += swapout_time - pp->p_swrss / nz(maxpgio)/2;
1608 1557                  }
1609 1558                  /*
↓ open down ↓ 31 lines elided ↑ open up ↑
1641 1590  static pri_t
1642 1591  ts_swapout(kthread_t *t, int flags)
1643 1592  {
1644 1593          tsproc_t        *tspp = (tsproc_t *)(t->t_cldata);
1645 1594          long            epri = -1;
1646 1595          proc_t          *pp = ttoproc(t);
1647 1596          time_t          swapin_time;
1648 1597  
1649 1598          ASSERT(THREAD_LOCK_HELD(t));
1650 1599  
1651      -        if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)) ||
     1600 +        if (INHERITED(t) || (tspp->ts_flags & TSIASET) ||
1652 1601              (t->t_proc_flag & TP_LWPEXIT) ||
1653 1602              (t->t_state & (TS_ZOMB | TS_FREE | TS_STOPPED |
1654 1603              TS_ONPROC | TS_WAIT)) ||
1655 1604              !(t->t_schedflag & TS_LOAD) || !SWAP_OK(t))
1656 1605                  return (-1);
1657 1606  
1658 1607          ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
1659 1608  
1660 1609          /*
1661 1610           * We know that pri_t is a short.
↓ open down ↓ 48 lines elided ↑ open up ↑
1710 1659          ASSERT(MUTEX_HELD(&(ttoproc(t))->p_lock));
1711 1660  
1712 1661          thread_lock(t);
1713 1662  
1714 1663          /*
1715 1664           * Keep track of thread's project CPU usage.  Note that projects
1716 1665           * get charged even when threads are running in the kernel.
1717 1666           */
1718 1667          if (CPUCAPS_ON()) {
1719 1668                  call_cpu_surrender = cpucaps_charge(t, &tspp->ts_caps,
1720      -                    CPUCAPS_CHARGE_ENFORCE) && !(tspp->ts_flags & TSKPRI);
     1669 +                    CPUCAPS_CHARGE_ENFORCE);
1721 1670          }
1722 1671  
1723      -        if ((tspp->ts_flags & TSKPRI) == 0) {
1724      -                if (--tspp->ts_timeleft <= 0) {
1725      -                        pri_t   new_pri;
     1672 +        if (--tspp->ts_timeleft <= 0) {
     1673 +                pri_t   new_pri;
1726 1674  
1727      -                        /*
1728      -                         * If we're doing preemption control and trying to
1729      -                         * avoid preempting this thread, just note that
1730      -                         * the thread should yield soon and let it keep
1731      -                         * running (unless it's been a while).
1732      -                         */
1733      -                        if (t->t_schedctl && schedctl_get_nopreempt(t)) {
1734      -                                if (tspp->ts_timeleft > -SC_MAX_TICKS) {
1735      -                                        DTRACE_SCHED1(schedctl__nopreempt,
1736      -                                            kthread_t *, t);
1737      -                                        schedctl_set_yield(t, 1);
1738      -                                        thread_unlock_nopreempt(t);
1739      -                                        return;
1740      -                                }
1741      -
1742      -                                TNF_PROBE_2(schedctl_failsafe,
1743      -                                    "schedctl TS ts_tick", /* CSTYLED */,
1744      -                                    tnf_pid, pid, ttoproc(t)->p_pid,
1745      -                                    tnf_lwpid, lwpid, t->t_tid);
     1675 +                /*
     1676 +                 * If we're doing preemption control and trying to avoid
     1677 +                 * preempting this thread, just note that the thread should
     1678 +                 * yield soon and let it keep running (unless it's been a
     1679 +                 * while).
     1680 +                 */
     1681 +                if (t->t_schedctl && schedctl_get_nopreempt(t)) {
     1682 +                        if (tspp->ts_timeleft > -SC_MAX_TICKS) {
     1683 +                                DTRACE_SCHED1(schedctl__nopreempt,
     1684 +                                    kthread_t *, t);
     1685 +                                schedctl_set_yield(t, 1);
     1686 +                                thread_unlock_nopreempt(t);
     1687 +                                return;
1746 1688                          }
1747      -                        tspp->ts_flags &= ~TSRESTORE;
1748      -                        tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_tqexp;
1749      -                        TS_NEWUMDPRI(tspp);
1750      -                        tspp->ts_dispwait = 0;
1751      -                        new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
1752      -                        ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
1753      -                        /*
1754      -                         * When the priority of a thread is changed,
1755      -                         * it may be necessary to adjust its position
1756      -                         * on a sleep queue or dispatch queue.
1757      -                         * The function thread_change_pri accomplishes
1758      -                         * this.
1759      -                         */
1760      -                        if (thread_change_pri(t, new_pri, 0)) {
1761      -                                if ((t->t_schedflag & TS_LOAD) &&
1762      -                                    (lwp = t->t_lwp) &&
1763      -                                    lwp->lwp_state == LWP_USER)
1764      -                                        t->t_schedflag &= ~TS_DONT_SWAP;
1765      -                                tspp->ts_timeleft =
1766      -                                    ts_dptbl[tspp->ts_cpupri].ts_quantum;
1767      -                        } else {
1768      -                                call_cpu_surrender = B_TRUE;
1769      -                        }
1770      -                        TRACE_2(TR_FAC_DISP, TR_TICK,
1771      -                            "tick:tid %p old pri %d", t, oldpri);
1772      -                } else if (t->t_state == TS_ONPROC &&
1773      -                    t->t_pri < t->t_disp_queue->disp_maxrunpri) {
     1689 +
     1690 +                        TNF_PROBE_2(schedctl_failsafe,
     1691 +                            "schedctl TS ts_tick", /* CSTYLED */,
     1692 +                            tnf_pid, pid, ttoproc(t)->p_pid,
     1693 +                            tnf_lwpid, lwpid, t->t_tid);
     1694 +                }
     1695 +                tspp->ts_flags &= ~TSRESTORE;
     1696 +                tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_tqexp;
     1697 +                TS_NEWUMDPRI(tspp);
     1698 +                tspp->ts_dispwait = 0;
     1699 +                new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
     1700 +                ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
     1701 +                /*
     1702 +                 * When the priority of a thread is changed, it may be
     1703 +                 * necessary to adjust its position on a sleep queue or
     1704 +                 * dispatch queue.  The function thread_change_pri accomplishes
     1705 +                 * this.
     1706 +                 */
     1707 +                if (thread_change_pri(t, new_pri, 0)) {
     1708 +                        if ((t->t_schedflag & TS_LOAD) &&
     1709 +                            (lwp = t->t_lwp) &&
     1710 +                            lwp->lwp_state == LWP_USER)
     1711 +                                t->t_schedflag &= ~TS_DONT_SWAP;
     1712 +                        tspp->ts_timeleft =
     1713 +                            ts_dptbl[tspp->ts_cpupri].ts_quantum;
     1714 +                } else {
1774 1715                          call_cpu_surrender = B_TRUE;
1775 1716                  }
     1717 +                TRACE_2(TR_FAC_DISP, TR_TICK,
     1718 +                    "tick:tid %p old pri %d", t, oldpri);
     1719 +        } else if (t->t_state == TS_ONPROC &&
     1720 +            t->t_pri < t->t_disp_queue->disp_maxrunpri) {
     1721 +                call_cpu_surrender = B_TRUE;
1776 1722          }
1777 1723  
1778 1724          if (call_cpu_surrender) {
1779 1725                  tspp->ts_flags |= TSBACKQ;
1780 1726                  cpu_surrender(t);
1781 1727          }
1782 1728  
1783 1729          thread_unlock_nopreempt(t);     /* clock thread can't be preempted */
1784 1730  }
1785 1731  
1786 1732  
1787 1733  /*
1788      - * If thread is currently at a kernel mode priority (has slept)
1789      - * we assign it the appropriate user mode priority and time quantum
1790      - * here.  If we are lowering the thread's priority below that of
1791      - * other runnable threads we will normally set runrun via cpu_surrender() to
1792      - * cause preemption.
     1734 + * If we are lowering the thread's priority below that of other runnable
     1735 + * threads we will normally set runrun via cpu_surrender() to cause preemption.
1793 1736   */
1794 1737  static void
1795 1738  ts_trapret(kthread_t *t)
1796 1739  {
1797 1740          tsproc_t        *tspp = (tsproc_t *)t->t_cldata;
1798 1741          cpu_t           *cp = CPU;
1799 1742          pri_t           old_pri = curthread->t_pri;
1800 1743  
1801 1744          ASSERT(THREAD_LOCK_HELD(t));
1802 1745          ASSERT(t == curthread);
1803 1746          ASSERT(cp->cpu_dispthread == t);
1804 1747          ASSERT(t->t_state == TS_ONPROC);
1805 1748  
1806      -        t->t_kpri_req = 0;
1807 1749          if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
1808 1750                  tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
1809 1751                  TS_NEWUMDPRI(tspp);
1810 1752                  tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
1811 1753                  tspp->ts_dispwait = 0;
1812 1754  
1813 1755                  /*
1814 1756                   * If thread has blocked in the kernel (as opposed to
1815 1757                   * being merely preempted), recompute the user mode priority.
1816 1758                   */
1817 1759                  THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
1818 1760                  cp->cpu_dispatch_pri = DISP_PRIO(t);
1819 1761                  ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1820      -                tspp->ts_flags &= ~TSKPRI;
1821 1762  
1822 1763                  if (DISP_MUST_SURRENDER(t))
1823 1764                          cpu_surrender(t);
1824      -        } else if (tspp->ts_flags & TSKPRI) {
1825      -                /*
1826      -                 * If thread has blocked in the kernel (as opposed to
1827      -                 * being merely preempted), recompute the user mode priority.
1828      -                 */
1829      -                THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
1830      -                cp->cpu_dispatch_pri = DISP_PRIO(t);
1831      -                ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1832      -                tspp->ts_flags &= ~TSKPRI;
1833      -
1834      -                if (DISP_MUST_SURRENDER(t))
1835      -                        cpu_surrender(t);
1836 1765          }
1837 1766  
1838 1767          /*
1839      -         * Swapout lwp if the swapper is waiting for this thread to
1840      -         * reach a safe point.
     1768 +         * Swapout lwp if the swapper is waiting for this thread to reach a
     1769 +         * safe point.
1841 1770           */
1842 1771          if ((t->t_schedflag & TS_SWAPENQ) && !(tspp->ts_flags & TSIASET)) {
1843 1772                  thread_unlock(t);
1844 1773                  swapout_lwp(ttolwp(t));
1845 1774                  thread_lock(t);
1846 1775          }
1847 1776  
1848 1777          TRACE_2(TR_FAC_DISP, TR_TRAPRET,
1849 1778              "trapret:tid %p old pri %d", t, old_pri);
1850 1779  }
↓ open down ↓ 73 lines elided ↑ open up ↑
1924 1853                   * Lock the thread and verify state.
1925 1854                   */
1926 1855                  thread_lock(tx);
1927 1856                  /*
1928 1857                   * Skip the thread if it is no longer in the TS (or IA) class.
1929 1858                   */
1930 1859                  if (tx->t_clfuncs != &ts_classfuncs.thread &&
1931 1860                      tx->t_clfuncs != &ia_classfuncs.thread)
1932 1861                          goto next;
1933 1862                  tspp->ts_dispwait++;
1934      -                if ((tspp->ts_flags & TSKPRI) != 0)
1935      -                        goto next;
1936 1863                  if (tspp->ts_dispwait <= ts_dptbl[tspp->ts_umdpri].ts_maxwait)
1937 1864                          goto next;
1938 1865                  if (tx->t_schedctl && schedctl_get_nopreempt(tx))
1939 1866                          goto next;
1940 1867                  if (tx->t_state != TS_RUN && tx->t_state != TS_WAIT &&
1941 1868                      (tx->t_state != TS_SLEEP || !ts_sleep_promote)) {
1942 1869                          /* make next syscall/trap do CL_TRAPRET */
1943 1870                          tx->t_trapret = 1;
1944 1871                          aston(tx);
1945 1872                          goto next;
↓ open down ↓ 15 lines elided ↑ open up ↑
1961 1888                  }
1962 1889  next:
1963 1890                  thread_unlock(tx);
1964 1891          }
1965 1892          mutex_exit(&ts_list_lock[i]);
1966 1893  
1967 1894          return (updated);
1968 1895  }
1969 1896  
1970 1897  /*
1971      - * Processes waking up go to the back of their queue.  We don't
1972      - * need to assign a time quantum here because thread is still
1973      - * at a kernel mode priority and the time slicing is not done
1974      - * for threads running in the kernel after sleeping.  The proper
1975      - * time quantum will be assigned by ts_trapret before the thread
1976      - * returns to user mode.
     1898 + * Processes waking up go to the back of their queue.
1977 1899   */
1978 1900  static void
1979 1901  ts_wakeup(kthread_t *t)
1980 1902  {
1981 1903          tsproc_t        *tspp = (tsproc_t *)(t->t_cldata);
1982 1904  
1983 1905          ASSERT(THREAD_LOCK_HELD(t));
1984 1906  
1985 1907          t->t_stime = ddi_get_lbolt();           /* time stamp for the swapper */
1986 1908  
1987      -        if (tspp->ts_flags & TSKPRI) {
1988      -                tspp->ts_flags &= ~TSBACKQ;
     1909 +        if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
     1910 +                tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
     1911 +                TS_NEWUMDPRI(tspp);
     1912 +                tspp->ts_timeleft = ts_dptbl[tspp->ts_cpupri].ts_quantum;
     1913 +                tspp->ts_dispwait = 0;
     1914 +                THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
     1915 +                ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
     1916 +        }
     1917 +
     1918 +        tspp->ts_flags &= ~TSBACKQ;
     1919 +
     1920 +        if (tspp->ts_flags & TSIA) {
1989 1921                  if (tspp->ts_flags & TSIASET)
1990 1922                          setfrontdq(t);
1991 1923                  else
1992 1924                          setbackdq(t);
1993      -        } else if (t->t_kpri_req) {
1994      -                /*
1995      -                 * Give thread a priority boost if we were asked.
1996      -                 */
1997      -                tspp->ts_flags |= TSKPRI;
1998      -                THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
1999      -                setbackdq(t);
2000      -                t->t_trapret = 1;       /* so that ts_trapret will run */
2001      -                aston(t);
2002 1925          } else {
2003      -                if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
2004      -                        tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
2005      -                        TS_NEWUMDPRI(tspp);
2006      -                        tspp->ts_timeleft =
2007      -                            ts_dptbl[tspp->ts_cpupri].ts_quantum;
2008      -                        tspp->ts_dispwait = 0;
2009      -                        THREAD_CHANGE_PRI(t,
2010      -                            ts_dptbl[tspp->ts_umdpri].ts_globpri);
2011      -                        ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
2012      -                }
2013      -
2014      -                tspp->ts_flags &= ~TSBACKQ;
2015      -
2016      -                if (tspp->ts_flags & TSIA) {
2017      -                        if (tspp->ts_flags & TSIASET)
2018      -                                setfrontdq(t);
2019      -                        else
2020      -                                setbackdq(t);
2021      -                } else {
2022      -                        if (t->t_disp_time != ddi_get_lbolt())
2023      -                                setbackdq(t);
2024      -                        else
2025      -                                setfrontdq(t);
2026      -                }
     1926 +                if (t->t_disp_time != ddi_get_lbolt())
     1927 +                        setbackdq(t);
     1928 +                else
     1929 +                        setfrontdq(t);
2027 1930          }
2028 1931  }
2029 1932  
2030 1933  
2031 1934  /*
2032 1935   * When a thread yields, put it on the back of the run queue.
2033 1936   */
2034 1937  static void
2035 1938  ts_yield(kthread_t *t)
2036 1939  {
↓ open down ↓ 135 lines elided ↑ open up ↑
2172 2075           * not changing mode.
2173 2076           */
2174 2077          return (ts_parmsset(t, &tsparms, 0, cr));
2175 2078  }
2176 2079  
2177 2080  /*
2178 2081   * ia_set_process_group marks foreground processes as interactive
2179 2082   * and background processes as non-interactive iff the session
2180 2083   * leader is interactive.  This routine is called from two places:
2181 2084   *      strioctl:SPGRP when a new process group gets
2182      - *              control of the tty.
     2085 + *              control of the tty.
2183 2086   *      ia_parmsset-when the process in question is a session leader.
2184 2087   * ia_set_process_group assumes that pidlock is held by the caller,
2185 2088   * either strioctl or priocntlsys.  If the caller is priocntlsys
2186 2089   * (via ia_parmsset) then the p_lock of the session leader is held
2187 2090   * and the code needs to be careful about acquiring other p_locks.
2188 2091   */
2189 2092  static void
2190 2093  ia_set_process_group(pid_t sid, pid_t bg_pgid, pid_t fg_pgid)
2191 2094  {
2192      -        proc_t          *leader, *fg, *bg;
     2095 +        proc_t          *leader, *fg, *bg;
2193 2096          tsproc_t        *tspp;
2194 2097          kthread_t       *tx;
2195 2098          int             plocked = 0;
2196 2099  
2197 2100          ASSERT(MUTEX_HELD(&pidlock));
2198 2101  
2199 2102          /*
2200 2103           * see if the session leader is interactive AND
2201 2104           * if it is currently "on" AND controlling a tty
2202 2105           * iff it is then make the processes in the foreground
↓ open down ↓ 81 lines elided ↑ open up ↑
2284 2187                           * if this thread is not interactive continue
2285 2188                           */
2286 2189                          if (tx->t_cid != ia_cid) {
2287 2190                                  thread_unlock(tx);
2288 2191                                  continue;
2289 2192                          }
2290 2193                          tspp = tx->t_cldata;
2291 2194                          tspp->ts_flags |= TSIASET;
2292 2195                          tspp->ts_boost = ia_boost;
2293 2196                          TS_NEWUMDPRI(tspp);
2294      -                        if ((tspp->ts_flags & TSKPRI) != 0) {
2295      -                                thread_unlock(tx);
2296      -                                continue;
2297      -                        }
2298 2197                          tspp->ts_dispwait = 0;
2299 2198                          ts_change_priority(tx, tspp);
2300 2199                          thread_unlock(tx);
2301 2200                  } while ((tx = tx->t_forw) != fg->p_tlist);
2302 2201                  mutex_exit(&fg->p_lock);
2303 2202          }
2304 2203  skip:
2305 2204          if (bg_pgid == 0)
2306 2205                  return;
2307 2206          for (bg = (proc_t *)pgfind(bg_pgid); bg != NULL; bg = bg->p_pglink) {
↓ open down ↓ 29 lines elided ↑ open up ↑
2337 2236                           * if this thread is not interactive continue
2338 2237                           */
2339 2238                          if (tx->t_cid != ia_cid) {
2340 2239                                  thread_unlock(tx);
2341 2240                                  continue;
2342 2241                          }
2343 2242                          tspp = tx->t_cldata;
2344 2243                          tspp->ts_flags &= ~TSIASET;
2345 2244                          tspp->ts_boost = -ia_boost;
2346 2245                          TS_NEWUMDPRI(tspp);
2347      -                        if ((tspp->ts_flags & TSKPRI) != 0) {
2348      -                                thread_unlock(tx);
2349      -                                continue;
2350      -                        }
2351 2246  
2352 2247                          tspp->ts_dispwait = 0;
2353 2248                          ts_change_priority(tx, tspp);
2354 2249                          thread_unlock(tx);
2355 2250                  } while ((tx = tx->t_forw) != bg->p_tlist);
2356 2251                  mutex_exit(&bg->p_lock);
2357 2252          }
2358 2253  }
2359 2254  
2360 2255  
↓ open down ↓ 67 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX