Print this page
restore sparc comments
de-linting of .s files


   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #if defined(lint)
  26 #include <sys/types.h>
  27 #include <sys/thread.h>
  28 #else   /* lint */
  29 #include "assym.h"
  30 #endif  /* lint */
  31 
  32 #include <sys/cmn_err.h>
  33 #include <sys/ftrace.h>
  34 #include <sys/asm_linkage.h>
  35 #include <sys/machthread.h>
  36 #include <sys/machcpuvar.h>
  37 #include <sys/intreg.h>
  38 #include <sys/ivintr.h>
  39 
  40 #ifdef TRAPTRACE
  41 #include <sys/traptrace.h>
  42 #endif /* TRAPTRACE */
  43 
  44 #if defined(lint)
  45 
  46 /* ARGSUSED */
  47 void
  48 pil_interrupt(int level)
  49 {}
  50 
  51 #else   /* lint */
  52 
  53 
  54 /*
  55  * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
  56  *      Register passed from LEVEL_INTERRUPT(level)
  57  *      %g4 - interrupt request level
  58  */
  59         ENTRY_NP(pil_interrupt)
  60         !
  61         ! Register usage
  62         !       %g1 - cpu
  63         !       %g2 - pointer to intr_vec_t (iv)
  64         !       %g4 - pil
  65         !       %g3, %g5, %g6, %g7 - temps
  66         !
  67         ! Grab the first or list head intr_vec_t off the intr_head[pil]
  68         ! and panic immediately if list head is NULL. Otherwise, update
  69         ! intr_head[pil] to next intr_vec_t on the list and clear softint
  70         ! %clear_softint, if next intr_vec_t is NULL.
  71         !
  72         CPU_ADDR(%g1, %g5)              ! %g1 = cpu
  73         !


 143         !
 144         ! figure which handler to run and which %pil it starts at
 145         ! intr_thread starts at DISP_LEVEL to prevent preemption
 146         ! current_thread starts at PIL_MAX to protect cpu_intr_actv
 147         !
 148         mov     %g4, %g3                ! %g3 = %g4, pil
 149         cmp     %g4, LOCK_LEVEL
 150         bg,a,pt %xcc, 3f                ! branch if pil > LOCK_LEVEL
 151         mov     PIL_MAX, %g4            ! %g4 = PIL_MAX (15)
 152         sethi   %hi(intr_thread), %g1   ! %g1 = intr_thread
 153         mov     DISP_LEVEL, %g4         ! %g4 = DISP_LEVEL (11)
 154         ba,pt   %xcc, sys_trap
 155         or      %g1, %lo(intr_thread), %g1
 156 3:
 157         sethi   %hi(current_thread), %g1 ! %g1 = current_thread
 158         ba,pt   %xcc, sys_trap
 159         or      %g1, %lo(current_thread), %g1
 160         SET_SIZE(pil_interrupt_common)
 161         SET_SIZE(pil_interrupt)
 162 
 163 #endif  /* lint */
 164 
 165 
 166 #ifndef lint
 167 _spurious:
 168         .asciz  "!interrupt 0x%x at level %d not serviced"
 169 
 170 /*
 171  * SERVE_INTR_PRE is called once, just before the first invocation
 172  * of SERVE_INTR.
 173  *
 174  * Registers on entry:
 175  *
 176  * iv_p, cpu, regs: may be out-registers
 177  * ls1, ls2: local scratch registers
 178  * os1, os2, os3: scratch registers, may be out
 179  */
 180 
 181 #define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs)        \
 182         mov     iv_p, ls1;                                              \
 183         mov     iv_p, ls2;                                              \
 184         SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
 185 
 186 /*


 312         mov     os3, os4;                                               \
 313         GET_TRACE_TICK(os2, os3);                                       \
 314         stxa    os2, [os1 + TRAP_ENT_TICK]%asi;                         \
 315         TRACE_SAVE_TL_GL_REGS(os1, os2);                                \
 316         set     TT_SERVE_INTR, os2;                                     \
 317         rdpr    %pil, os3;                                              \
 318         or      os2, os3, os2;                                          \
 319         stha    os2, [os1 + TRAP_ENT_TT]%asi;                           \
 320         stna    %sp, [os1 + TRAP_ENT_SP]%asi;                           \
 321         stna    inum, [os1 + TRAP_ENT_TR]%asi;                          \
 322         stna    %g0, [os1 + TRAP_ENT_F1]%asi;                           \
 323         stna    %g0, [os1 + TRAP_ENT_F2]%asi;                           \
 324         stna    %g0, [os1 + TRAP_ENT_F3]%asi;                           \
 325         stna    %g0, [os1 + TRAP_ENT_F4]%asi;                           \
 326         TRACE_NEXT(os1, os2, os3);                                      \
 327         wrpr    %g0, os4, %pstate
 328 #else   /* TRAPTRACE */
 329 #define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
 330 #endif  /* TRAPTRACE */
 331 
 332 #endif  /* lint */
 333 
 334 #if defined(lint)
 335 
 336 /*ARGSUSED*/
 337 void
 338 intr_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
 339 {}
 340 
 341 #else   /* lint */
 342 
 343 #define INTRCNT_LIMIT 16
 344 
 345 /*
 346  * Handle an interrupt in a new thread.
 347  *      Entry:
 348  *              %o0       = pointer to regs structure
 349  *              %o1       = pointer to current intr_vec_t (iv) to be processed
 350  *              %o2       = pil
 351  *              %sp       = on current thread's kernel stack
 352  *              %o7       = return linkage to trap code
 353  *              %g7       = current thread
 354  *              %pstate   = normal globals, interrupts enabled, 
 355  *                          privileged, fp disabled
 356  *              %pil      = DISP_LEVEL
 357  *
 358  *      Register Usage
 359  *              %l0       = return linkage
 360  *              %l1       = pil
 361  *              %l2 - %l3 = scratch
 362  *              %l4 - %l7 = reserved for sys_trap


 875         ldn     [%o2 + CPU_INTR_THREAD], %o5    ! get list pointer
 876         stn     %o5, [THREAD_REG + T_LINK]
 877         call    swtch                           ! switch to best thread
 878         stn     THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
 879         ba,a,pt %xcc, .                         ! swtch() shouldn't return
 880         SET_SIZE(intr_thread_exit)
 881 
 882         .global ftrace_intr_thread_format_str
 883 ftrace_intr_thread_format_str:
 884         .asciz  "intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
 885 #ifdef DEBUG
 886 intr_thread_actv_bit_set:
 887         .asciz  "intr_thread(): cpu_intr_actv bit already set for PIL"
 888 intr_thread_actv_bit_not_set:
 889         .asciz  "intr_thread(): cpu_intr_actv bit not set for PIL"
 890 intr_thread_exit_actv_bit_set:
 891         .asciz  "intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
 892 intr_thread_t_intr_start_zero:
 893         .asciz  "intr_thread(): t_intr_start zero upon handler return"
 894 #endif /* DEBUG */
 895 #endif  /* lint */
 896 
 897 #if defined(lint)
 898 
 899 /*
 900  * Handle an interrupt in the current thread
 901  *      Entry:
 902  *              %o0       = pointer to regs structure
 903  *              %o1       = pointer to current intr_vec_t (iv) to be processed
 904  *              %o2       = pil
 905  *              %sp       = on current thread's kernel stack
 906  *              %o7       = return linkage to trap code
 907  *              %g7       = current thread
 908  *              %pstate   = normal globals, interrupts enabled, 
 909  *                          privileged, fp disabled
 910  *              %pil      = PIL_MAX
 911  *
 912  *      Register Usage
 913  *              %l0       = return linkage
 914  *              %l1       = old stack
 915  *              %l2 - %l3 = scratch
 916  *              %l4 - %l7 = reserved for sys_trap
 917  *              %o3       = cpu
 918  *              %o0       = scratch
 919  *              %o4 - %o5 = scratch
 920  */
 921 /* ARGSUSED */
 922 void
 923 current_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
 924 {}
 925 
 926 #else   /* lint */
 927 
 928         ENTRY_NP(current_thread)
 929         
 930         mov     %o7, %l0
 931         ldn     [THREAD_REG + T_CPU], %o3
 932 
 933         ldn     [THREAD_REG + T_ONFAULT], %l2
 934         brz,pt  %l2, no_onfault         ! branch if no onfault label set
 935         nop
 936         stn     %g0, [THREAD_REG + T_ONFAULT]! clear onfault label
 937         ldn     [THREAD_REG + T_LOFAULT], %l3
 938         stn     %g0, [THREAD_REG + T_LOFAULT]! clear lofault data
 939 
 940         sub     %o2, LOCK_LEVEL + 1, %o5
 941         sll     %o5, CPTRSHIFT, %o5
 942         add     %o5, CPU_OFD, %o4       ! %o4 has on_fault data offset
 943         stn     %l2, [%o3 + %o4]        ! save onfault label for pil %o2
 944         add     %o5, CPU_LFD, %o4       ! %o4 has lofault data offset
 945         stn     %l3, [%o3 + %o4]        ! save lofault data for pil %o2
 946 
 947 no_onfault:


1381         ! Enable interrupts and return  
1382         jmp     %l0 + 8
1383         wrpr    %g0, %o2, %pil                  ! enable interrupts
1384         SET_SIZE(current_thread)
1385 
1386 
1387 #ifdef DEBUG
1388 current_thread_wrong_pil:
1389         .asciz  "current_thread: unexpected pil level: %d"
1390 current_thread_actv_bit_set:
1391         .asciz  "current_thread(): cpu_intr_actv bit already set for PIL"
1392 current_thread_actv_bit_not_set:
1393         .asciz  "current_thread(): cpu_intr_actv bit not set for PIL"
1394 current_thread_nested_pil_zero:
1395         .asciz  "current_thread(): timestamp zero for nested PIL %d"
1396 current_thread_timestamp_zero:
1397         .asciz  "current_thread(): timestamp zero upon handler return"
1398 current_thread_nested_PIL_not_found:
1399         .asciz  "current_thread: couldn't find nested high-level PIL"
1400 #endif /* DEBUG */
1401 #endif /* lint */
1402 
1403 /*
1404  * Return a thread's interrupt level.
1405  * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1406  * must dig it out of the save area.
1407  *
1408  * Caller 'swears' that this really is an interrupt thread.
1409  *
1410  * int
1411  * intr_level(t)
1412  *      kthread_id_t    t;
1413  */
1414 
1415 #if defined(lint)
1416 
1417 /* ARGSUSED */
1418 int
1419 intr_level(kthread_id_t t)
1420 { return (0); }
1421 
1422 #else   /* lint */
1423 
1424         ENTRY_NP(intr_level)
1425         retl
1426         ldub    [%o0 + T_PIL], %o0              ! return saved pil
1427         SET_SIZE(intr_level)
1428 
1429 #endif  /* lint */
1430 
1431 #if defined(lint)
1432 
1433 /* ARGSUSED */
1434 int
1435 disable_pil_intr()
1436 { return (0); }
1437 
1438 #else   /* lint */
1439 
1440         ENTRY_NP(disable_pil_intr)
1441         rdpr    %pil, %o0
1442         retl
1443         wrpr    %g0, PIL_MAX, %pil              ! disable interrupts (1-15)
1444         SET_SIZE(disable_pil_intr)
1445 
1446 #endif  /* lint */
1447 
1448 #if defined(lint)
1449 
1450 /* ARGSUSED */
1451 void
1452 enable_pil_intr(int pil_save)
1453 {}
1454 
1455 #else   /* lint */
1456 
1457         ENTRY_NP(enable_pil_intr)
1458         retl
1459         wrpr    %o0, %pil
1460         SET_SIZE(enable_pil_intr)
1461 
1462 #endif  /* lint */
1463 
1464 #if defined(lint)
1465 
1466 /* ARGSUSED */
1467 uint_t
1468 disable_vec_intr(void)
1469 { return (0); }
1470 
1471 #else   /* lint */
1472 
1473         ENTRY_NP(disable_vec_intr)
1474         rdpr    %pstate, %o0
1475         andn    %o0, PSTATE_IE, %g1
1476         retl
1477         wrpr    %g0, %g1, %pstate               ! disable interrupt
1478         SET_SIZE(disable_vec_intr)
1479 
1480 #endif  /* lint */
1481 
1482 #if defined(lint)
1483 
1484 /* ARGSUSED */
1485 void
1486 enable_vec_intr(uint_t pstate_save)
1487 {}
1488 
1489 #else   /* lint */
1490 
1491         ENTRY_NP(enable_vec_intr)
1492         retl
1493         wrpr    %g0, %o0, %pstate
1494         SET_SIZE(enable_vec_intr)
1495 
1496 #endif  /* lint */
1497 
1498 #if defined(lint)
1499  
1500 void
1501 cbe_level14(void)
1502 {}
1503 
1504 #else   /* lint */
1505 
1506         ENTRY_NP(cbe_level14)
1507         save    %sp, -SA(MINFRAME), %sp ! get a new window
1508         !
1509         ! Make sure that this is from TICK_COMPARE; if not just return
1510         !
1511         rd      SOFTINT, %l1
1512         set     (TICK_INT_MASK | STICK_INT_MASK), %o2
1513         andcc   %l1, %o2, %g0
1514         bz,pn   %icc, 2f
1515         nop
1516 
1517         CPU_ADDR(%o1, %o2)
1518         call    cyclic_fire
1519         mov     %o1, %o0
1520 2:
1521         ret
1522         restore %g0, 1, %o0
1523         SET_SIZE(cbe_level14)
1524 
1525 #endif  /* lint */
1526 
1527 
1528 #if defined(lint)
1529 
1530 /* ARGSUSED */
1531 void
1532 kdi_setsoftint(uint64_t iv_p)
1533 {}
1534 
1535 #else   /* lint */
1536 
1537         ENTRY_NP(kdi_setsoftint)
1538         save    %sp, -SA(MINFRAME), %sp ! get a new window 
1539         rdpr    %pstate, %l5
1540         andn    %l5, PSTATE_IE, %l1
1541         wrpr    %l1, %pstate            ! disable interrupt
1542         !
1543         ! We have a pointer to an interrupt vector data structure.
1544         ! Put the request on the cpu's softint priority list and
1545         ! set %set_softint.
1546         !
1547         ! Register usage
1548         !       %i0 - pointer to intr_vec_t (iv)
1549         !       %l2 - requested pil
1550         !       %l4 - cpu
1551         !       %l5 - pstate
1552         !       %l1, %l3, %l6 - temps
1553         !
1554         ! check if a softint is pending for this softint, 
1555         ! if one is pending, don't bother queuing another.
1556         !


1590         stn     %i0, [%l3]              ! [%l3] = iv, set pil_next field
1591 2:
1592         !
1593         ! no pending intr_vec_t; make intr_vec_t as new head
1594         !
1595         add     %l4, INTR_HEAD, %l6     ! %l6 = &cpu->m_cpu.intr_head[pil]
1596         stn     %i0, [%l6 + %l0]        ! cpu->m_cpu.intr_head[pil] = iv
1597 3:
1598         !
1599         ! Write %set_softint with (1<<pil) to cause a "pil" level trap
1600         !
1601         mov     1, %l1                  ! %l1 = 1
1602         sll     %l1, %l2, %l1           ! %l1 = 1 << pil
1603         wr      %l1, SET_SOFTINT        ! trigger required pil softint
1604 4:
1605         wrpr    %g0, %l5, %pstate       ! %pstate = saved %pstate (in %l5)
1606         ret
1607         restore
1608         SET_SIZE(kdi_setsoftint)
1609         
1610 #endif  /* lint */
1611 
1612 #if defined(lint)
1613 
1614 /*ARGSUSED*/
1615 void
1616 setsoftint_tl1(uint64_t iv_p, uint64_t dummy)
1617 {}
1618 
1619 #else   /* lint */
1620 
1621         !
1622         ! Register usage
1623         !       Arguments:
1624         !       %g1 - Pointer to intr_vec_t (iv)
1625         !
1626         !       Internal:
1627         !       %g2 - pil
1628         !       %g4 - cpu
1629         !       %g3,%g5-g7 - temps
1630         !
1631         ENTRY_NP(setsoftint_tl1)
1632         !
1633         ! We have a pointer to an interrupt vector data structure.
1634         ! Put the request on the cpu's softint priority list and
1635         ! set %set_softint.
1636         !
1637         CPU_ADDR(%g4, %g2)              ! %g4 = cpu
1638         lduh    [%g1 + IV_PIL], %g2     ! %g2 = iv->iv_pil
1639 
1640         !


1685         ldn     [%g1 + IV_PIL_NEXT], %g6        ! 
1686         stna    %g6, [%g5 + TRAP_ENT_F1]%asi    ! trap_f1 = iv->iv_pil_next
1687         add     %g4, INTR_HEAD, %g6
1688         ldn     [%g6 + %g7], %g6                ! %g6=cpu->m_cpu.intr_head[pil]
1689         stna    %g6, [%g5 + TRAP_ENT_F2]%asi    ! trap_f2 = intr_head[pil]
1690         add     %g4, INTR_TAIL, %g6
1691         ldn     [%g6 + %g7], %g6                ! %g6=cpu->m_cpu.intr_tail[pil]
1692         stna    %g6, [%g5 + TRAP_ENT_F3]%asi    ! trap_f3 = intr_tail[pil]
1693         stna    %g2, [%g5 + TRAP_ENT_F4]%asi    ! trap_f4 = pil
1694         TRACE_NEXT(%g5, %g6, %g3)
1695 #endif /* TRAPTRACE */
1696         !
1697         ! Write %set_softint with (1<<pil) to cause a "pil" level trap
1698         !
1699         mov     1, %g5                  ! %g5 = 1
1700         sll     %g5, %g2, %g5           ! %g5 = 1 << pil
1701         wr      %g5, SET_SOFTINT        ! trigger required pil softint
1702         retry
1703         SET_SIZE(setsoftint_tl1)
1704 
1705 #endif  /* lint */
1706 
1707 #if defined(lint)
1708 
1709 /*ARGSUSED*/
1710 void
1711 setvecint_tl1(uint64_t inum, uint64_t dummy)
1712 {}
1713 
1714 #else   /* lint */
1715 
1716         !
1717         ! Register usage
1718         !       Arguments:
1719         !       %g1 - inumber
1720         !
1721         !       Internal:
1722         !       %g1 - softint pil mask
1723         !       %g2 - pil of intr_vec_t
1724         !       %g3 - pointer to current intr_vec_t (iv)
1725         !       %g4 - cpu
1726         !       %g5, %g6,%g7 - temps
1727         !
1728         ENTRY_NP(setvecint_tl1)
1729         !
1730         ! Verify the inumber received (should be inum < MAXIVNUM).
1731         !
1732         set     MAXIVNUM, %g2
1733         cmp     %g1, %g2
1734         bgeu,pn %xcc, .no_ivintr
1735         clr     %g2                     ! expected in .no_ivintr


1828         TRACE_NEXT(%g5, %g6, %g7)
1829 #endif /* TRAPTRACE */
1830         mov     1, %g6                  ! %g6 = 1
1831         sll     %g6, %g2, %g6           ! %g6 = 1 << pil
1832         or      %g1, %g6, %g1           ! %g1 |= (1 << pil), pil mask
1833         ldn     [%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1834         brnz,pn %g3, 0b                 ! iv->iv_vec_next is non NULL, goto 0b
1835         nop
1836         wr      %g1, SET_SOFTINT        ! triggered one or more pil softints
1837         retry
1838 
1839 .no_ivintr:
1840         ! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1841         mov     %g2, %g3
1842         mov     %g1, %g2
1843         set     no_ivintr, %g1
1844         ba,pt   %xcc, sys_trap
1845         mov     PIL_15, %g4
1846         SET_SIZE(setvecint_tl1)
1847 
1848 #endif  /* lint */
1849 
1850 #if defined(lint)
1851 
1852 /*ARGSUSED*/
1853 void
1854 wr_clr_softint(uint_t value)
1855 {}
1856 
1857 #else
1858 
1859         ENTRY_NP(wr_clr_softint)
1860         retl
1861         wr      %o0, CLEAR_SOFTINT
1862         SET_SIZE(wr_clr_softint)
1863 
1864 #endif /* lint */
1865 
1866 #if defined(lint)
1867 
1868 /*ARGSUSED*/
1869 void
1870 intr_enqueue_req(uint_t pil, uint64_t inum)
1871 {}
1872 
1873 #else   /* lint */
1874 
1875 /*
1876  * intr_enqueue_req
1877  *
1878  * %o0 - pil
1879  * %o1 - pointer to intr_vec_t (iv)
1880  * %o5 - preserved
1881  * %g5 - preserved
1882  */
1883         ENTRY_NP(intr_enqueue_req)
1884         !
1885         CPU_ADDR(%g4, %g1)              ! %g4 = cpu
1886 
1887         !
1888         ! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1889         !
1890         sll     %o0, CPTRSHIFT, %o0     ! %o0 = offset to pil entry
1891         add     %g4, INTR_TAIL, %g6     ! %g6 = &cpu->m_cpu.intr_tail
1892         ldn     [%o0 + %g6], %g1        ! %g1 = cpu->m_cpu.intr_tail[pil]
1893                                         !       current tail (ct)
1894         brz,pt  %g1, 2f                 ! branch if current tail is NULL


1904         ld      [%g4 + CPU_ID], %g6     ! for multi target softint, use cpuid
1905         sll     %g6, CPTRSHIFT, %g6     ! calculate offset address from cpuid
1906         add     %g3, %g6, %g3           ! %g3 = &ct->iv_xpil_next[cpuid]
1907 1:
1908         !
1909         ! update old tail
1910         !
1911         ba,pt   %xcc, 3f
1912         stn     %o1, [%g3]              ! {%g5] = iv, set pil_next field
1913 2:
1914         !
1915         ! no intr_vec_t's queued so make intr_vec_t as new head
1916         !
1917         add     %g4, INTR_HEAD, %g6     ! %g6 = &cpu->m_cpu.intr_head[pil]
1918         stn     %o1, [%g6 + %o0]        ! cpu->m_cpu.intr_head[pil] = iv
1919 3:
1920         retl
1921         nop
1922         SET_SIZE(intr_enqueue_req)
1923 
1924 #endif  /* lint */
1925 
1926 /*
1927  * Set CPU's base SPL level, based on which interrupt levels are active.
1928  *      Called at spl7 or above.
1929  */
1930 
1931 #if defined(lint)
1932 
1933 void
1934 set_base_spl(void)
1935 {}
1936 
1937 #else   /* lint */
1938 
1939         ENTRY_NP(set_base_spl)
1940         ldn     [THREAD_REG + T_CPU], %o2       ! load CPU pointer
1941         ld      [%o2 + CPU_INTR_ACTV], %o5      ! load active interrupts mask
1942 
1943 /*
1944  * WARNING: non-standard callinq sequence; do not call from C
1945  *      %o2 = pointer to CPU
1946  *      %o5 = updated CPU_INTR_ACTV
1947  */
1948 _intr_set_spl:                                  ! intr_thread_exit enters here
1949         !
1950         ! Determine highest interrupt level active.  Several could be blocked
1951         ! at higher levels than this one, so must convert flags to a PIL
1952         ! Normally nothing will be blocked, so test this first.
1953         !
1954         brz,pt  %o5, 1f                         ! nothing active
1955         sra     %o5, 11, %o3                    ! delay - set %o3 to bits 15-11
1956         set     _intr_flag_table, %o1
1957         tst     %o3                             ! see if any of the bits set
1958         ldub    [%o1 + %o3], %o3                ! load bit number


1969         ldub    [%o1 + %o3], %o3
1970 
1971         !
1972         ! highest interrupt level number active is in %l6
1973         !
1974 1:
1975         retl
1976         st      %o3, [%o2 + CPU_BASE_SPL]       ! delay - store base priority
1977         SET_SIZE(set_base_spl)
1978 
1979 /*
1980  * Table that finds the most significant bit set in a five bit field.
1981  * Each entry is the high-order bit number + 1 of it's index in the table.
1982  * This read-only data is in the text segment.
1983  */
1984 _intr_flag_table:
1985         .byte   0, 1, 2, 2,     3, 3, 3, 3,     4, 4, 4, 4,     4, 4, 4, 4
1986         .byte   5, 5, 5, 5,     5, 5, 5, 5,     5, 5, 5, 5,     5, 5, 5, 5
1987         .align  4
1988 
1989 #endif  /* lint */
1990 
1991 /*
1992  * int
1993  * intr_passivate(from, to)
1994  *      kthread_id_t    from;           interrupt thread
1995  *      kthread_id_t    to;             interrupted thread
1996  */
1997 
1998 #if defined(lint)
1999 
2000 /* ARGSUSED */
2001 int
2002 intr_passivate(kthread_id_t from, kthread_id_t to)
2003 { return (0); }
2004 
2005 #else   /* lint */
2006 
2007         ENTRY_NP(intr_passivate)
2008         save    %sp, -SA(MINFRAME), %sp ! get a new window 
2009 
2010         flushw                          ! force register windows to stack
2011         !
2012         ! restore registers from the base of the stack of the interrupt thread.
2013         !
2014         ldn     [%i0 + T_STACK], %i2    ! get stack save area pointer
2015         ldn     [%i2 + (0*GREGSIZE)], %l0       ! load locals
2016         ldn     [%i2 + (1*GREGSIZE)], %l1
2017         ldn     [%i2 + (2*GREGSIZE)], %l2
2018         ldn     [%i2 + (3*GREGSIZE)], %l3
2019         ldn     [%i2 + (4*GREGSIZE)], %l4
2020         ldn     [%i2 + (5*GREGSIZE)], %l5
2021         ldn     [%i2 + (6*GREGSIZE)], %l6
2022         ldn     [%i2 + (7*GREGSIZE)], %l7
2023         ldn     [%i2 + (8*GREGSIZE)], %o0       ! put ins from stack in outs
2024         ldn     [%i2 + (9*GREGSIZE)], %o1
2025         ldn     [%i2 + (10*GREGSIZE)], %o2
2026         ldn     [%i2 + (11*GREGSIZE)], %o3


2043         stn     %l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
2044         stn     %l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
2045         stn     %o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]  ! save ins using outs
2046         stn     %o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
2047         stn     %o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
2048         stn     %o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
2049         stn     %o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
2050         stn     %o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
2051         stn     %i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
2052                                                 ! fp, %i7 copied using %i4
2053         stn     %i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
2054         stn     %g0, [%i2 + ((8+6)*GREGSIZE)]
2055                                                 ! clear fp in save area
2056         
2057         ! load saved pil for return
2058         ldub    [%i0 + T_PIL], %i0
2059         ret
2060         restore
2061         SET_SIZE(intr_passivate)
2062 
2063 #endif  /* lint */
2064 
2065 #if defined(lint)
2066 
2067 /*
2068  * intr_get_time() is a resource for interrupt handlers to determine how
2069  * much time has been spent handling the current interrupt. Such a function
2070  * is needed because higher level interrupts can arrive during the
2071  * processing of an interrupt, thus making direct comparisons of %tick by
2072  * the handler inaccurate. intr_get_time() only returns time spent in the
2073  * current interrupt handler.
2074  *
2075  * The caller must be calling from an interrupt handler running at a pil
2076  * below or at lock level. Timings are not provided for high-level
2077  * interrupts.
2078  *
2079  * The first time intr_get_time() is called while handling an interrupt,
2080  * it returns the time since the interrupt handler was invoked. Subsequent
2081  * calls will return the time since the prior call to intr_get_time(). Time
2082  * is returned as ticks, adjusted for any clock divisor due to power 
2083  * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may 
2084  * not be the same across CPUs.
2085  *
2086  * Theory Of Intrstat[][]:


2107  * "checkpoints" the timing information by incrementing intrstat[pil][0]
2108  * to include elapsed running time, and by setting t_intr_start to %tick.
2109  * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
2110  * and updates intrstat[pil][1] to be the same as the new value of
2111  * intrstat[pil][0].
2112  *
2113  * In the normal handling of interrupts, after an interrupt handler returns
2114  * and the code in intr_thread() updates intrstat[pil][0], it then sets
2115  * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
2116  * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
2117  * is 0.
2118  *
2119  * Whenever interrupts arrive on a CPU which is handling a lower pil
2120  * interrupt, they update the lower pil's [0] to show time spent in the
2121  * handler that they've interrupted. This results in a growing discrepancy
2122  * between [0] and [1], which is returned the next time intr_get_time() is
2123  * called. Time spent in the higher-pil interrupt will not be returned in
2124  * the next intr_get_time() call from the original interrupt, because
2125  * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2126  */
2127 
2128 /*ARGSUSED*/
2129 uint64_t
2130 intr_get_time(void)
2131 { return 0; }
2132 #else   /* lint */
2133 
2134         ENTRY_NP(intr_get_time)
2135 #ifdef DEBUG
2136         !
2137         ! Lots of asserts, but just check panic_quiesce first.
2138         ! Don't bother with lots of tests if we're just ignoring them.
2139         !
2140         sethi   %hi(panic_quiesce), %o0
2141         ld      [%o0 + %lo(panic_quiesce)], %o0
2142         brnz,pn %o0, 2f
2143         nop     
2144         !
2145         ! ASSERT(%pil <= LOCK_LEVEL)
2146         !
2147         rdpr    %pil, %o1
2148         cmp     %o1, LOCK_LEVEL
2149         ble,pt  %xcc, 0f
2150         sethi   %hi(intr_get_time_high_pil), %o0        ! delay
2151         call    panic
2152         or      %o0, %lo(intr_get_time_high_pil), %o0
2153 0:      


2222         stx     %o2, [%o3]
2223         ldx     [%o3 + 8], %o4          ! %o4 = cpu_m.intrstat[pil][1]
2224         sub     %o2, %o4, %o0           ! %o0 is elapsed time since %o4
2225         stx     %o2, [%o3 + 8]          ! make [1] match [0], resetting time
2226 
2227         ld      [%o5 + CPU_BASE_SPL], %o2       ! restore %pil to the greater
2228         cmp     %o2, %o1                        ! of either our pil %o1 or
2229         movl    %xcc, %o1, %o2                  ! cpu_base_spl.
2230         retl
2231         wrpr    %g0, %o2, %pil
2232         SET_SIZE(intr_get_time)
2233 
2234 #ifdef DEBUG
2235 intr_get_time_high_pil:
2236         .asciz  "intr_get_time(): %pil > LOCK_LEVEL"
2237 intr_get_time_not_intr:
2238         .asciz  "intr_get_time(): not called from an interrupt thread"
2239 intr_get_time_no_start_time:
2240         .asciz  "intr_get_time(): t_intr_start == 0"
2241 #endif /* DEBUG */
2242 #endif  /* lint */


   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 




  25 #include "assym.h"

  26 
  27 #include <sys/cmn_err.h>
  28 #include <sys/ftrace.h>
  29 #include <sys/asm_linkage.h>
  30 #include <sys/machthread.h>
  31 #include <sys/machcpuvar.h>
  32 #include <sys/intreg.h>
  33 #include <sys/ivintr.h>
  34 
  35 #ifdef TRAPTRACE
  36 #include <sys/traptrace.h>
  37 #endif /* TRAPTRACE */
  38 

  39 








  40 /*
  41  * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
  42  *      Register passed from LEVEL_INTERRUPT(level)
  43  *      %g4 - interrupt request level
  44  */
  45         ENTRY_NP(pil_interrupt)
  46         !
  47         ! Register usage
  48         !       %g1 - cpu
  49         !       %g2 - pointer to intr_vec_t (iv)
  50         !       %g4 - pil
  51         !       %g3, %g5, %g6, %g7 - temps
  52         !
  53         ! Grab the first or list head intr_vec_t off the intr_head[pil]
  54         ! and panic immediately if list head is NULL. Otherwise, update
  55         ! intr_head[pil] to next intr_vec_t on the list and clear softint
  56         ! %clear_softint, if next intr_vec_t is NULL.
  57         !
  58         CPU_ADDR(%g1, %g5)              ! %g1 = cpu
  59         !


 129         !
 130         ! figure which handler to run and which %pil it starts at
 131         ! intr_thread starts at DISP_LEVEL to prevent preemption
 132         ! current_thread starts at PIL_MAX to protect cpu_intr_actv
 133         !
 134         mov     %g4, %g3                ! %g3 = %g4, pil
 135         cmp     %g4, LOCK_LEVEL
 136         bg,a,pt %xcc, 3f                ! branch if pil > LOCK_LEVEL
 137         mov     PIL_MAX, %g4            ! %g4 = PIL_MAX (15)
 138         sethi   %hi(intr_thread), %g1   ! %g1 = intr_thread
 139         mov     DISP_LEVEL, %g4         ! %g4 = DISP_LEVEL (11)
 140         ba,pt   %xcc, sys_trap
 141         or      %g1, %lo(intr_thread), %g1
 142 3:
 143         sethi   %hi(current_thread), %g1 ! %g1 = current_thread
 144         ba,pt   %xcc, sys_trap
 145         or      %g1, %lo(current_thread), %g1
 146         SET_SIZE(pil_interrupt_common)
 147         SET_SIZE(pil_interrupt)
 148 

 149 


 150 _spurious:
 151         .asciz  "!interrupt 0x%x at level %d not serviced"
 152 
 153 /*
 154  * SERVE_INTR_PRE is called once, just before the first invocation
 155  * of SERVE_INTR.
 156  *
 157  * Registers on entry:
 158  *
 159  * iv_p, cpu, regs: may be out-registers
 160  * ls1, ls2: local scratch registers
 161  * os1, os2, os3: scratch registers, may be out
 162  */
 163 
 164 #define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs)        \
 165         mov     iv_p, ls1;                                              \
 166         mov     iv_p, ls2;                                              \
 167         SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
 168 
 169 /*


 295         mov     os3, os4;                                               \
 296         GET_TRACE_TICK(os2, os3);                                       \
 297         stxa    os2, [os1 + TRAP_ENT_TICK]%asi;                         \
 298         TRACE_SAVE_TL_GL_REGS(os1, os2);                                \
 299         set     TT_SERVE_INTR, os2;                                     \
 300         rdpr    %pil, os3;                                              \
 301         or      os2, os3, os2;                                          \
 302         stha    os2, [os1 + TRAP_ENT_TT]%asi;                           \
 303         stna    %sp, [os1 + TRAP_ENT_SP]%asi;                           \
 304         stna    inum, [os1 + TRAP_ENT_TR]%asi;                          \
 305         stna    %g0, [os1 + TRAP_ENT_F1]%asi;                           \
 306         stna    %g0, [os1 + TRAP_ENT_F2]%asi;                           \
 307         stna    %g0, [os1 + TRAP_ENT_F3]%asi;                           \
 308         stna    %g0, [os1 + TRAP_ENT_F4]%asi;                           \
 309         TRACE_NEXT(os1, os2, os3);                                      \
 310         wrpr    %g0, os4, %pstate
 311 #else   /* TRAPTRACE */
 312 #define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
 313 #endif  /* TRAPTRACE */
 314 











 315 #define INTRCNT_LIMIT 16
 316 
 317 /*
 318  * Handle an interrupt in a new thread.
 319  *      Entry:
 320  *              %o0       = pointer to regs structure
 321  *              %o1       = pointer to current intr_vec_t (iv) to be processed
 322  *              %o2       = pil
 323  *              %sp       = on current thread's kernel stack
 324  *              %o7       = return linkage to trap code
 325  *              %g7       = current thread
 326  *              %pstate   = normal globals, interrupts enabled, 
 327  *                          privileged, fp disabled
 328  *              %pil      = DISP_LEVEL
 329  *
 330  *      Register Usage
 331  *              %l0       = return linkage
 332  *              %l1       = pil
 333  *              %l2 - %l3 = scratch
 334  *              %l4 - %l7 = reserved for sys_trap


 847         ldn     [%o2 + CPU_INTR_THREAD], %o5    ! get list pointer
 848         stn     %o5, [THREAD_REG + T_LINK]
 849         call    swtch                           ! switch to best thread
 850         stn     THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
 851         ba,a,pt %xcc, .                         ! swtch() shouldn't return
 852         SET_SIZE(intr_thread_exit)
 853 
 854         .global ftrace_intr_thread_format_str
 855 ftrace_intr_thread_format_str:
 856         .asciz  "intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
 857 #ifdef DEBUG
 858 intr_thread_actv_bit_set:
 859         .asciz  "intr_thread(): cpu_intr_actv bit already set for PIL"
 860 intr_thread_actv_bit_not_set:
 861         .asciz  "intr_thread(): cpu_intr_actv bit not set for PIL"
 862 intr_thread_exit_actv_bit_set:
 863         .asciz  "intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
 864 intr_thread_t_intr_start_zero:
 865         .asciz  "intr_thread(): t_intr_start zero upon handler return"
 866 #endif /* DEBUG */

 867 


 868 /*
 869  * Handle an interrupt in the current thread
 870  *      Entry:
 871  *              %o0       = pointer to regs structure
 872  *              %o1       = pointer to current intr_vec_t (iv) to be processed
 873  *              %o2       = pil
 874  *              %sp       = on current thread's kernel stack
 875  *              %o7       = return linkage to trap code
 876  *              %g7       = current thread
 877  *              %pstate   = normal globals, interrupts enabled,
 878  *                          privileged, fp disabled
 879  *              %pil      = PIL_MAX
 880  *
 881  *      Register Usage
 882  *              %l0       = return linkage
 883  *              %l1       = old stack
 884  *              %l2 - %l3 = scratch
 885  *              %l4 - %l7 = reserved for sys_trap
 886  *              %o3       = cpu
 887  *              %o0       = scratch
 888  *              %o4 - %o5 = scratch
 889  */







 890         ENTRY_NP(current_thread)
 891         
 892         mov     %o7, %l0
 893         ldn     [THREAD_REG + T_CPU], %o3
 894 
 895         ldn     [THREAD_REG + T_ONFAULT], %l2
 896         brz,pt  %l2, no_onfault         ! branch if no onfault label set
 897         nop
 898         stn     %g0, [THREAD_REG + T_ONFAULT]! clear onfault label
 899         ldn     [THREAD_REG + T_LOFAULT], %l3
 900         stn     %g0, [THREAD_REG + T_LOFAULT]! clear lofault data
 901 
 902         sub     %o2, LOCK_LEVEL + 1, %o5
 903         sll     %o5, CPTRSHIFT, %o5
 904         add     %o5, CPU_OFD, %o4       ! %o4 has on_fault data offset
 905         stn     %l2, [%o3 + %o4]        ! save onfault label for pil %o2
 906         add     %o5, CPU_LFD, %o4       ! %o4 has lofault data offset
 907         stn     %l3, [%o3 + %o4]        ! save lofault data for pil %o2
 908 
 909 no_onfault:


1343         ! Enable interrupts and return  
1344         jmp     %l0 + 8
1345         wrpr    %g0, %o2, %pil                  ! enable interrupts
1346         SET_SIZE(current_thread)
1347 
1348 
1349 #ifdef DEBUG
1350 current_thread_wrong_pil:
1351         .asciz  "current_thread: unexpected pil level: %d"
1352 current_thread_actv_bit_set:
1353         .asciz  "current_thread(): cpu_intr_actv bit already set for PIL"
1354 current_thread_actv_bit_not_set:
1355         .asciz  "current_thread(): cpu_intr_actv bit not set for PIL"
1356 current_thread_nested_pil_zero:
1357         .asciz  "current_thread(): timestamp zero for nested PIL %d"
1358 current_thread_timestamp_zero:
1359         .asciz  "current_thread(): timestamp zero upon handler return"
1360 current_thread_nested_PIL_not_found:
1361         .asciz  "current_thread: couldn't find nested high-level PIL"
1362 #endif /* DEBUG */

1363 
1364 /*
1365  * Return a thread's interrupt level.
1366  * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1367  * must dig it out of the save area.
1368  *
1369  * Caller 'swears' that this really is an interrupt thread.
1370  *
1371  * int
1372  * intr_level(t)
1373  *      kthread_id_t    t;
1374  */
1375 









1376         ENTRY_NP(intr_level)
1377         retl
1378         ldub    [%o0 + T_PIL], %o0              ! return saved pil
1379         SET_SIZE(intr_level)
1380 











1381         ENTRY_NP(disable_pil_intr)
1382         rdpr    %pil, %o0
1383         retl
1384         wrpr    %g0, PIL_MAX, %pil              ! disable interrupts (1-15)
1385         SET_SIZE(disable_pil_intr)
1386 











1387         ENTRY_NP(enable_pil_intr)
1388         retl
1389         wrpr    %o0, %pil
1390         SET_SIZE(enable_pil_intr)
1391 











1392         ENTRY_NP(disable_vec_intr)
1393         rdpr    %pstate, %o0
1394         andn    %o0, PSTATE_IE, %g1
1395         retl
1396         wrpr    %g0, %g1, %pstate               ! disable interrupt
1397         SET_SIZE(disable_vec_intr)
1398 











1399         ENTRY_NP(enable_vec_intr)
1400         retl
1401         wrpr    %g0, %o0, %pstate
1402         SET_SIZE(enable_vec_intr)
1403 










1404         ENTRY_NP(cbe_level14)
1405         save    %sp, -SA(MINFRAME), %sp ! get a new window
1406         !
1407         ! Make sure that this is from TICK_COMPARE; if not just return
1408         !
1409         rd      SOFTINT, %l1
1410         set     (TICK_INT_MASK | STICK_INT_MASK), %o2
1411         andcc   %l1, %o2, %g0
1412         bz,pn   %icc, 2f
1413         nop
1414 
1415         CPU_ADDR(%o1, %o2)
1416         call    cyclic_fire
1417         mov     %o1, %o0
1418 2:
1419         ret
1420         restore %g0, 1, %o0
1421         SET_SIZE(cbe_level14)
1422 

1423 










1424         ENTRY_NP(kdi_setsoftint)
1425         save    %sp, -SA(MINFRAME), %sp ! get a new window 
1426         rdpr    %pstate, %l5
1427         andn    %l5, PSTATE_IE, %l1
1428         wrpr    %l1, %pstate            ! disable interrupt
1429         !
1430         ! We have a pointer to an interrupt vector data structure.
1431         ! Put the request on the cpu's softint priority list and
1432         ! set %set_softint.
1433         !
1434         ! Register usage
1435         !       %i0 - pointer to intr_vec_t (iv)
1436         !       %l2 - requested pil
1437         !       %l4 - cpu
1438         !       %l5 - pstate
1439         !       %l1, %l3, %l6 - temps
1440         !
1441         ! check if a softint is pending for this softint, 
1442         ! if one is pending, don't bother queuing another.
1443         !


1477         stn     %i0, [%l3]              ! [%l3] = iv, set pil_next field
1478 2:
1479         !
1480         ! no pending intr_vec_t; make intr_vec_t as new head
1481         !
1482         add     %l4, INTR_HEAD, %l6     ! %l6 = &cpu->m_cpu.intr_head[pil]
1483         stn     %i0, [%l6 + %l0]        ! cpu->m_cpu.intr_head[pil] = iv
1484 3:
1485         !
1486         ! Write %set_softint with (1<<pil) to cause a "pil" level trap
1487         !
1488         mov     1, %l1                  ! %l1 = 1
1489         sll     %l1, %l2, %l1           ! %l1 = 1 << pil
1490         wr      %l1, SET_SOFTINT        ! trigger required pil softint
1491 4:
1492         wrpr    %g0, %l5, %pstate       ! %pstate = saved %pstate (in %l5)
1493         ret
1494         restore
1495         SET_SIZE(kdi_setsoftint)
1496         











1497         !
1498         ! Register usage
1499         !       Arguments:
1500         !       %g1 - Pointer to intr_vec_t (iv)
1501         !
1502         !       Internal:
1503         !       %g2 - pil
1504         !       %g4 - cpu
1505         !       %g3,%g5-g7 - temps
1506         !
1507         ENTRY_NP(setsoftint_tl1)
1508         !
1509         ! We have a pointer to an interrupt vector data structure.
1510         ! Put the request on the cpu's softint priority list and
1511         ! set %set_softint.
1512         !
1513         CPU_ADDR(%g4, %g2)              ! %g4 = cpu
1514         lduh    [%g1 + IV_PIL], %g2     ! %g2 = iv->iv_pil
1515 
1516         !


1561         ldn     [%g1 + IV_PIL_NEXT], %g6        ! 
1562         stna    %g6, [%g5 + TRAP_ENT_F1]%asi    ! trap_f1 = iv->iv_pil_next
1563         add     %g4, INTR_HEAD, %g6
1564         ldn     [%g6 + %g7], %g6                ! %g6=cpu->m_cpu.intr_head[pil]
1565         stna    %g6, [%g5 + TRAP_ENT_F2]%asi    ! trap_f2 = intr_head[pil]
1566         add     %g4, INTR_TAIL, %g6
1567         ldn     [%g6 + %g7], %g6                ! %g6=cpu->m_cpu.intr_tail[pil]
1568         stna    %g6, [%g5 + TRAP_ENT_F3]%asi    ! trap_f3 = intr_tail[pil]
1569         stna    %g2, [%g5 + TRAP_ENT_F4]%asi    ! trap_f4 = pil
1570         TRACE_NEXT(%g5, %g6, %g3)
1571 #endif /* TRAPTRACE */
1572         !
1573         ! Write %set_softint with (1<<pil) to cause a "pil" level trap
1574         !
1575         mov     1, %g5                  ! %g5 = 1
1576         sll     %g5, %g2, %g5           ! %g5 = 1 << pil
1577         wr      %g5, SET_SOFTINT        ! trigger required pil softint
1578         retry
1579         SET_SIZE(setsoftint_tl1)
1580 











1581         !
1582         ! Register usage
1583         !       Arguments:
1584         !       %g1 - inumber
1585         !
1586         !       Internal:
1587         !       %g1 - softint pil mask
1588         !       %g2 - pil of intr_vec_t
1589         !       %g3 - pointer to current intr_vec_t (iv)
1590         !       %g4 - cpu
1591         !       %g5, %g6,%g7 - temps
1592         !
1593         ENTRY_NP(setvecint_tl1)
1594         !
1595         ! Verify the inumber received (should be inum < MAXIVNUM).
1596         !
1597         set     MAXIVNUM, %g2
1598         cmp     %g1, %g2
1599         bgeu,pn %xcc, .no_ivintr
1600         clr     %g2                     ! expected in .no_ivintr


1693         TRACE_NEXT(%g5, %g6, %g7)
1694 #endif /* TRAPTRACE */
1695         mov     1, %g6                  ! %g6 = 1
1696         sll     %g6, %g2, %g6           ! %g6 = 1 << pil
1697         or      %g1, %g6, %g1           ! %g1 |= (1 << pil), pil mask
1698         ldn     [%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1699         brnz,pn %g3, 0b                 ! iv->iv_vec_next is non NULL, goto 0b
1700         nop
1701         wr      %g1, SET_SOFTINT        ! triggered one or more pil softints
1702         retry
1703 
1704 .no_ivintr:
1705         ! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1706         mov     %g2, %g3
1707         mov     %g1, %g2
1708         set     no_ivintr, %g1
1709         ba,pt   %xcc, sys_trap
1710         mov     PIL_15, %g4
1711         SET_SIZE(setvecint_tl1)
1712 











1713         ENTRY_NP(wr_clr_softint)
1714         retl
1715         wr      %o0, CLEAR_SOFTINT
1716         SET_SIZE(wr_clr_softint)
1717 











1718 /*
1719  * intr_enqueue_req
1720  *
1721  * %o0 - pil
1722  * %o1 - pointer to intr_vec_t (iv)
1723  * %o5 - preserved
1724  * %g5 - preserved
1725  */
1726         ENTRY_NP(intr_enqueue_req)
1727         !
1728         CPU_ADDR(%g4, %g1)              ! %g4 = cpu
1729 
1730         !
1731         ! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1732         !
1733         sll     %o0, CPTRSHIFT, %o0     ! %o0 = offset to pil entry
1734         add     %g4, INTR_TAIL, %g6     ! %g6 = &cpu->m_cpu.intr_tail
1735         ldn     [%o0 + %g6], %g1        ! %g1 = cpu->m_cpu.intr_tail[pil]
1736                                         !       current tail (ct)
1737         brz,pt  %g1, 2f                 ! branch if current tail is NULL


1747         ld      [%g4 + CPU_ID], %g6     ! for multi target softint, use cpuid
1748         sll     %g6, CPTRSHIFT, %g6     ! calculate offset address from cpuid
1749         add     %g3, %g6, %g3           ! %g3 = &ct->iv_xpil_next[cpuid]
1750 1:
1751         !
1752         ! update old tail
1753         !
1754         ba,pt   %xcc, 3f
1755         stn     %o1, [%g3]              ! {%g5] = iv, set pil_next field
1756 2:
1757         !
1758         ! no intr_vec_t's queued so make intr_vec_t as new head
1759         !
1760         add     %g4, INTR_HEAD, %g6     ! %g6 = &cpu->m_cpu.intr_head[pil]
1761         stn     %o1, [%g6 + %o0]        ! cpu->m_cpu.intr_head[pil] = iv
1762 3:
1763         retl
1764         nop
1765         SET_SIZE(intr_enqueue_req)
1766 


1767 /*
1768  * Set CPU's base SPL level, based on which interrupt levels are active.
1769  *      Called at spl7 or above.
1770  */
1771 








1772         ENTRY_NP(set_base_spl)
1773         ldn     [THREAD_REG + T_CPU], %o2       ! load CPU pointer
1774         ld      [%o2 + CPU_INTR_ACTV], %o5      ! load active interrupts mask
1775 
1776 /*
1777  * WARNING: non-standard callinq sequence; do not call from C
1778  *      %o2 = pointer to CPU
1779  *      %o5 = updated CPU_INTR_ACTV
1780  */
1781 _intr_set_spl:                                  ! intr_thread_exit enters here
1782         !
1783         ! Determine highest interrupt level active.  Several could be blocked
1784         ! at higher levels than this one, so must convert flags to a PIL
1785         ! Normally nothing will be blocked, so test this first.
1786         !
1787         brz,pt  %o5, 1f                         ! nothing active
1788         sra     %o5, 11, %o3                    ! delay - set %o3 to bits 15-11
1789         set     _intr_flag_table, %o1
1790         tst     %o3                             ! see if any of the bits set
1791         ldub    [%o1 + %o3], %o3                ! load bit number


1802         ldub    [%o1 + %o3], %o3
1803 
1804         !
1805         ! highest interrupt level number active is in %l6
1806         !
1807 1:
1808         retl
1809         st      %o3, [%o2 + CPU_BASE_SPL]       ! delay - store base priority
1810         SET_SIZE(set_base_spl)
1811 
1812 /*
1813  * Table that finds the most significant bit set in a five bit field.
1814  * Each entry is the high-order bit number + 1 of it's index in the table.
1815  * This read-only data is in the text segment.
1816  */
1817 _intr_flag_table:
1818         .byte   0, 1, 2, 2,     3, 3, 3, 3,     4, 4, 4, 4,     4, 4, 4, 4
1819         .byte   5, 5, 5, 5,     5, 5, 5, 5,     5, 5, 5, 5,     5, 5, 5, 5
1820         .align  4
1821 


1822 /*
1823  * int
1824  * intr_passivate(from, to)
1825  *      kthread_id_t    from;           interrupt thread
1826  *      kthread_id_t    to;             interrupted thread
1827  */
1828 









1829         ENTRY_NP(intr_passivate)
1830         save    %sp, -SA(MINFRAME), %sp ! get a new window 
1831 
1832         flushw                          ! force register windows to stack
1833         !
1834         ! restore registers from the base of the stack of the interrupt thread.
1835         !
1836         ldn     [%i0 + T_STACK], %i2    ! get stack save area pointer
1837         ldn     [%i2 + (0*GREGSIZE)], %l0       ! load locals
1838         ldn     [%i2 + (1*GREGSIZE)], %l1
1839         ldn     [%i2 + (2*GREGSIZE)], %l2
1840         ldn     [%i2 + (3*GREGSIZE)], %l3
1841         ldn     [%i2 + (4*GREGSIZE)], %l4
1842         ldn     [%i2 + (5*GREGSIZE)], %l5
1843         ldn     [%i2 + (6*GREGSIZE)], %l6
1844         ldn     [%i2 + (7*GREGSIZE)], %l7
1845         ldn     [%i2 + (8*GREGSIZE)], %o0       ! put ins from stack in outs
1846         ldn     [%i2 + (9*GREGSIZE)], %o1
1847         ldn     [%i2 + (10*GREGSIZE)], %o2
1848         ldn     [%i2 + (11*GREGSIZE)], %o3


1865         stn     %l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
1866         stn     %l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
1867         stn     %o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]  ! save ins using outs
1868         stn     %o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
1869         stn     %o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
1870         stn     %o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
1871         stn     %o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
1872         stn     %o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
1873         stn     %i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
1874                                                 ! fp, %i7 copied using %i4
1875         stn     %i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
1876         stn     %g0, [%i2 + ((8+6)*GREGSIZE)]
1877                                                 ! clear fp in save area
1878         
1879         ! load saved pil for return
1880         ldub    [%i0 + T_PIL], %i0
1881         ret
1882         restore
1883         SET_SIZE(intr_passivate)
1884 




1885 /*
1886  * intr_get_time() is a resource for interrupt handlers to determine how
1887  * much time has been spent handling the current interrupt. Such a function
1888  * is needed because higher level interrupts can arrive during the
1889  * processing of an interrupt, thus making direct comparisons of %tick by
1890  * the handler inaccurate. intr_get_time() only returns time spent in the
1891  * current interrupt handler.
1892  *
1893  * The caller must be calling from an interrupt handler running at a pil
1894  * below or at lock level. Timings are not provided for high-level
1895  * interrupts.
1896  *
1897  * The first time intr_get_time() is called while handling an interrupt,
1898  * it returns the time since the interrupt handler was invoked. Subsequent
1899  * calls will return the time since the prior call to intr_get_time(). Time
1900  * is returned as ticks, adjusted for any clock divisor due to power
1901  * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
1902  * not be the same across CPUs.
1903  *
1904  * Theory Of Intrstat[][]:


1925  * "checkpoints" the timing information by incrementing intrstat[pil][0]
1926  * to include elapsed running time, and by setting t_intr_start to %tick.
1927  * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
1928  * and updates intrstat[pil][1] to be the same as the new value of
1929  * intrstat[pil][0].
1930  *
1931  * In the normal handling of interrupts, after an interrupt handler returns
1932  * and the code in intr_thread() updates intrstat[pil][0], it then sets
1933  * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
1934  * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
1935  * is 0.
1936  *
1937  * Whenever interrupts arrive on a CPU which is handling a lower pil
1938  * interrupt, they update the lower pil's [0] to show time spent in the
1939  * handler that they've interrupted. This results in a growing discrepancy
1940  * between [0] and [1], which is returned the next time intr_get_time() is
1941  * called. Time spent in the higher-pil interrupt will not be returned in
1942  * the next intr_get_time() call from the original interrupt, because
1943  * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
1944  */







1945         ENTRY_NP(intr_get_time)
1946 #ifdef DEBUG
1947         !
1948         ! Lots of asserts, but just check panic_quiesce first.
1949         ! Don't bother with lots of tests if we're just ignoring them.
1950         !
1951         sethi   %hi(panic_quiesce), %o0
1952         ld      [%o0 + %lo(panic_quiesce)], %o0
1953         brnz,pn %o0, 2f
1954         nop     
1955         !
1956         ! ASSERT(%pil <= LOCK_LEVEL)
1957         !
1958         rdpr    %pil, %o1
1959         cmp     %o1, LOCK_LEVEL
1960         ble,pt  %xcc, 0f
1961         sethi   %hi(intr_get_time_high_pil), %o0        ! delay
1962         call    panic
1963         or      %o0, %lo(intr_get_time_high_pil), %o0
1964 0:      


2033         stx     %o2, [%o3]
2034         ldx     [%o3 + 8], %o4          ! %o4 = cpu_m.intrstat[pil][1]
2035         sub     %o2, %o4, %o0           ! %o0 is elapsed time since %o4
2036         stx     %o2, [%o3 + 8]          ! make [1] match [0], resetting time
2037 
2038         ld      [%o5 + CPU_BASE_SPL], %o2       ! restore %pil to the greater
2039         cmp     %o2, %o1                        ! of either our pil %o1 or
2040         movl    %xcc, %o1, %o2                  ! cpu_base_spl.
2041         retl
2042         wrpr    %g0, %o2, %pil
2043         SET_SIZE(intr_get_time)
2044 
2045 #ifdef DEBUG
2046 intr_get_time_high_pil:
2047         .asciz  "intr_get_time(): %pil > LOCK_LEVEL"
2048 intr_get_time_not_intr:
2049         .asciz  "intr_get_time(): not called from an interrupt thread"
2050 intr_get_time_no_start_time:
2051         .asciz  "intr_get_time(): t_intr_start == 0"
2052 #endif /* DEBUG */