5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #if defined(lint)
26 #include <sys/types.h>
27 #include <sys/thread.h>
28 #else /* lint */
29 #include "assym.h"
30 #endif /* lint */
31
32 #include <sys/cmn_err.h>
33 #include <sys/ftrace.h>
34 #include <sys/asm_linkage.h>
35 #include <sys/machthread.h>
36 #include <sys/machcpuvar.h>
37 #include <sys/intreg.h>
38 #include <sys/ivintr.h>
39
40 #ifdef TRAPTRACE
41 #include <sys/traptrace.h>
42 #endif /* TRAPTRACE */
43
44 #if defined(lint)
45
46 /* ARGSUSED */
47 void
48 pil_interrupt(int level)
49 {}
50
51 #else /* lint */
52
53
54 /*
55 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
56 * Register passed from LEVEL_INTERRUPT(level)
57 * %g4 - interrupt request level
58 */
59 ENTRY_NP(pil_interrupt)
60 !
61 ! Register usage
62 ! %g1 - cpu
63 ! %g2 - pointer to intr_vec_t (iv)
64 ! %g4 - pil
65 ! %g3, %g5, %g6, %g7 - temps
66 !
67 ! Grab the first or list head intr_vec_t off the intr_head[pil]
68 ! and panic immediately if list head is NULL. Otherwise, update
69 ! intr_head[pil] to next intr_vec_t on the list and clear softint
70 ! %clear_softint, if next intr_vec_t is NULL.
71 !
72 CPU_ADDR(%g1, %g5) ! %g1 = cpu
73 !
143 !
144 ! figure which handler to run and which %pil it starts at
145 ! intr_thread starts at DISP_LEVEL to prevent preemption
146 ! current_thread starts at PIL_MAX to protect cpu_intr_actv
147 !
148 mov %g4, %g3 ! %g3 = %g4, pil
149 cmp %g4, LOCK_LEVEL
150 bg,a,pt %xcc, 3f ! branch if pil > LOCK_LEVEL
151 mov PIL_MAX, %g4 ! %g4 = PIL_MAX (15)
152 sethi %hi(intr_thread), %g1 ! %g1 = intr_thread
153 mov DISP_LEVEL, %g4 ! %g4 = DISP_LEVEL (11)
154 ba,pt %xcc, sys_trap
155 or %g1, %lo(intr_thread), %g1
156 3:
157 sethi %hi(current_thread), %g1 ! %g1 = current_thread
158 ba,pt %xcc, sys_trap
159 or %g1, %lo(current_thread), %g1
160 SET_SIZE(pil_interrupt_common)
161 SET_SIZE(pil_interrupt)
162
163 #endif /* lint */
164
165
166 #ifndef lint
167 _spurious:
168 .asciz "!interrupt 0x%x at level %d not serviced"
169
170 /*
171 * SERVE_INTR_PRE is called once, just before the first invocation
172 * of SERVE_INTR.
173 *
174 * Registers on entry:
175 *
176 * iv_p, cpu, regs: may be out-registers
177 * ls1, ls2: local scratch registers
178 * os1, os2, os3: scratch registers, may be out
179 */
180
181 #define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs) \
182 mov iv_p, ls1; \
183 mov iv_p, ls2; \
184 SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
185
186 /*
312 mov os3, os4; \
313 GET_TRACE_TICK(os2, os3); \
314 stxa os2, [os1 + TRAP_ENT_TICK]%asi; \
315 TRACE_SAVE_TL_GL_REGS(os1, os2); \
316 set TT_SERVE_INTR, os2; \
317 rdpr %pil, os3; \
318 or os2, os3, os2; \
319 stha os2, [os1 + TRAP_ENT_TT]%asi; \
320 stna %sp, [os1 + TRAP_ENT_SP]%asi; \
321 stna inum, [os1 + TRAP_ENT_TR]%asi; \
322 stna %g0, [os1 + TRAP_ENT_F1]%asi; \
323 stna %g0, [os1 + TRAP_ENT_F2]%asi; \
324 stna %g0, [os1 + TRAP_ENT_F3]%asi; \
325 stna %g0, [os1 + TRAP_ENT_F4]%asi; \
326 TRACE_NEXT(os1, os2, os3); \
327 wrpr %g0, os4, %pstate
328 #else /* TRAPTRACE */
329 #define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
330 #endif /* TRAPTRACE */
331
332 #endif /* lint */
333
334 #if defined(lint)
335
336 /*ARGSUSED*/
337 void
338 intr_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
339 {}
340
341 #else /* lint */
342
343 #define INTRCNT_LIMIT 16
344
345 /*
346 * Handle an interrupt in a new thread.
347 * Entry:
348 * %o0 = pointer to regs structure
349 * %o1 = pointer to current intr_vec_t (iv) to be processed
350 * %o2 = pil
351 * %sp = on current thread's kernel stack
352 * %o7 = return linkage to trap code
353 * %g7 = current thread
354 * %pstate = normal globals, interrupts enabled,
355 * privileged, fp disabled
356 * %pil = DISP_LEVEL
357 *
358 * Register Usage
359 * %l0 = return linkage
360 * %l1 = pil
361 * %l2 - %l3 = scratch
362 * %l4 - %l7 = reserved for sys_trap
875 ldn [%o2 + CPU_INTR_THREAD], %o5 ! get list pointer
876 stn %o5, [THREAD_REG + T_LINK]
877 call swtch ! switch to best thread
878 stn THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
879 ba,a,pt %xcc, . ! swtch() shouldn't return
880 SET_SIZE(intr_thread_exit)
881
882 .global ftrace_intr_thread_format_str
883 ftrace_intr_thread_format_str:
884 .asciz "intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
885 #ifdef DEBUG
886 intr_thread_actv_bit_set:
887 .asciz "intr_thread(): cpu_intr_actv bit already set for PIL"
888 intr_thread_actv_bit_not_set:
889 .asciz "intr_thread(): cpu_intr_actv bit not set for PIL"
890 intr_thread_exit_actv_bit_set:
891 .asciz "intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
892 intr_thread_t_intr_start_zero:
893 .asciz "intr_thread(): t_intr_start zero upon handler return"
894 #endif /* DEBUG */
895 #endif /* lint */
896
897 #if defined(lint)
898
899 /*
900 * Handle an interrupt in the current thread
901 * Entry:
902 * %o0 = pointer to regs structure
903 * %o1 = pointer to current intr_vec_t (iv) to be processed
904 * %o2 = pil
905 * %sp = on current thread's kernel stack
906 * %o7 = return linkage to trap code
907 * %g7 = current thread
908 * %pstate = normal globals, interrupts enabled,
909 * privileged, fp disabled
910 * %pil = PIL_MAX
911 *
912 * Register Usage
913 * %l0 = return linkage
914 * %l1 = old stack
915 * %l2 - %l3 = scratch
916 * %l4 - %l7 = reserved for sys_trap
917 * %o3 = cpu
918 * %o0 = scratch
919 * %o4 - %o5 = scratch
920 */
921 /* ARGSUSED */
922 void
923 current_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
924 {}
925
926 #else /* lint */
927
928 ENTRY_NP(current_thread)
929
930 mov %o7, %l0
931 ldn [THREAD_REG + T_CPU], %o3
932
933 ldn [THREAD_REG + T_ONFAULT], %l2
934 brz,pt %l2, no_onfault ! branch if no onfault label set
935 nop
936 stn %g0, [THREAD_REG + T_ONFAULT]! clear onfault label
937 ldn [THREAD_REG + T_LOFAULT], %l3
938 stn %g0, [THREAD_REG + T_LOFAULT]! clear lofault data
939
940 sub %o2, LOCK_LEVEL + 1, %o5
941 sll %o5, CPTRSHIFT, %o5
942 add %o5, CPU_OFD, %o4 ! %o4 has on_fault data offset
943 stn %l2, [%o3 + %o4] ! save onfault label for pil %o2
944 add %o5, CPU_LFD, %o4 ! %o4 has lofault data offset
945 stn %l3, [%o3 + %o4] ! save lofault data for pil %o2
946
947 no_onfault:
1381 ! Enable interrupts and return
1382 jmp %l0 + 8
1383 wrpr %g0, %o2, %pil ! enable interrupts
1384 SET_SIZE(current_thread)
1385
1386
1387 #ifdef DEBUG
1388 current_thread_wrong_pil:
1389 .asciz "current_thread: unexpected pil level: %d"
1390 current_thread_actv_bit_set:
1391 .asciz "current_thread(): cpu_intr_actv bit already set for PIL"
1392 current_thread_actv_bit_not_set:
1393 .asciz "current_thread(): cpu_intr_actv bit not set for PIL"
1394 current_thread_nested_pil_zero:
1395 .asciz "current_thread(): timestamp zero for nested PIL %d"
1396 current_thread_timestamp_zero:
1397 .asciz "current_thread(): timestamp zero upon handler return"
1398 current_thread_nested_PIL_not_found:
1399 .asciz "current_thread: couldn't find nested high-level PIL"
1400 #endif /* DEBUG */
1401 #endif /* lint */
1402
1403 /*
1404 * Return a thread's interrupt level.
1405 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1406 * must dig it out of the save area.
1407 *
1408 * Caller 'swears' that this really is an interrupt thread.
1409 *
1410 * int
1411 * intr_level(t)
1412 * kthread_id_t t;
1413 */
1414
1415 #if defined(lint)
1416
1417 /* ARGSUSED */
1418 int
1419 intr_level(kthread_id_t t)
1420 { return (0); }
1421
1422 #else /* lint */
1423
1424 ENTRY_NP(intr_level)
1425 retl
1426 ldub [%o0 + T_PIL], %o0 ! return saved pil
1427 SET_SIZE(intr_level)
1428
1429 #endif /* lint */
1430
1431 #if defined(lint)
1432
1433 /* ARGSUSED */
1434 int
1435 disable_pil_intr()
1436 { return (0); }
1437
1438 #else /* lint */
1439
1440 ENTRY_NP(disable_pil_intr)
1441 rdpr %pil, %o0
1442 retl
1443 wrpr %g0, PIL_MAX, %pil ! disable interrupts (1-15)
1444 SET_SIZE(disable_pil_intr)
1445
1446 #endif /* lint */
1447
1448 #if defined(lint)
1449
1450 /* ARGSUSED */
1451 void
1452 enable_pil_intr(int pil_save)
1453 {}
1454
1455 #else /* lint */
1456
1457 ENTRY_NP(enable_pil_intr)
1458 retl
1459 wrpr %o0, %pil
1460 SET_SIZE(enable_pil_intr)
1461
1462 #endif /* lint */
1463
1464 #if defined(lint)
1465
1466 /* ARGSUSED */
1467 uint_t
1468 disable_vec_intr(void)
1469 { return (0); }
1470
1471 #else /* lint */
1472
1473 ENTRY_NP(disable_vec_intr)
1474 rdpr %pstate, %o0
1475 andn %o0, PSTATE_IE, %g1
1476 retl
1477 wrpr %g0, %g1, %pstate ! disable interrupt
1478 SET_SIZE(disable_vec_intr)
1479
1480 #endif /* lint */
1481
1482 #if defined(lint)
1483
1484 /* ARGSUSED */
1485 void
1486 enable_vec_intr(uint_t pstate_save)
1487 {}
1488
1489 #else /* lint */
1490
1491 ENTRY_NP(enable_vec_intr)
1492 retl
1493 wrpr %g0, %o0, %pstate
1494 SET_SIZE(enable_vec_intr)
1495
1496 #endif /* lint */
1497
1498 #if defined(lint)
1499
1500 void
1501 cbe_level14(void)
1502 {}
1503
1504 #else /* lint */
1505
1506 ENTRY_NP(cbe_level14)
1507 save %sp, -SA(MINFRAME), %sp ! get a new window
1508 !
1509 ! Make sure that this is from TICK_COMPARE; if not just return
1510 !
1511 rd SOFTINT, %l1
1512 set (TICK_INT_MASK | STICK_INT_MASK), %o2
1513 andcc %l1, %o2, %g0
1514 bz,pn %icc, 2f
1515 nop
1516
1517 CPU_ADDR(%o1, %o2)
1518 call cyclic_fire
1519 mov %o1, %o0
1520 2:
1521 ret
1522 restore %g0, 1, %o0
1523 SET_SIZE(cbe_level14)
1524
1525 #endif /* lint */
1526
1527
1528 #if defined(lint)
1529
1530 /* ARGSUSED */
1531 void
1532 kdi_setsoftint(uint64_t iv_p)
1533 {}
1534
1535 #else /* lint */
1536
1537 ENTRY_NP(kdi_setsoftint)
1538 save %sp, -SA(MINFRAME), %sp ! get a new window
1539 rdpr %pstate, %l5
1540 andn %l5, PSTATE_IE, %l1
1541 wrpr %l1, %pstate ! disable interrupt
1542 !
1543 ! We have a pointer to an interrupt vector data structure.
1544 ! Put the request on the cpu's softint priority list and
1545 ! set %set_softint.
1546 !
1547 ! Register usage
1548 ! %i0 - pointer to intr_vec_t (iv)
1549 ! %l2 - requested pil
1550 ! %l4 - cpu
1551 ! %l5 - pstate
1552 ! %l1, %l3, %l6 - temps
1553 !
1554 ! check if a softint is pending for this softint,
1555 ! if one is pending, don't bother queuing another.
1556 !
1590 stn %i0, [%l3] ! [%l3] = iv, set pil_next field
1591 2:
1592 !
1593 ! no pending intr_vec_t; make intr_vec_t as new head
1594 !
1595 add %l4, INTR_HEAD, %l6 ! %l6 = &cpu->m_cpu.intr_head[pil]
1596 stn %i0, [%l6 + %l0] ! cpu->m_cpu.intr_head[pil] = iv
1597 3:
1598 !
1599 ! Write %set_softint with (1<<pil) to cause a "pil" level trap
1600 !
1601 mov 1, %l1 ! %l1 = 1
1602 sll %l1, %l2, %l1 ! %l1 = 1 << pil
1603 wr %l1, SET_SOFTINT ! trigger required pil softint
1604 4:
1605 wrpr %g0, %l5, %pstate ! %pstate = saved %pstate (in %l5)
1606 ret
1607 restore
1608 SET_SIZE(kdi_setsoftint)
1609
1610 #endif /* lint */
1611
1612 #if defined(lint)
1613
1614 /*ARGSUSED*/
1615 void
1616 setsoftint_tl1(uint64_t iv_p, uint64_t dummy)
1617 {}
1618
1619 #else /* lint */
1620
1621 !
1622 ! Register usage
1623 ! Arguments:
1624 ! %g1 - Pointer to intr_vec_t (iv)
1625 !
1626 ! Internal:
1627 ! %g2 - pil
1628 ! %g4 - cpu
1629 ! %g3,%g5-g7 - temps
1630 !
1631 ENTRY_NP(setsoftint_tl1)
1632 !
1633 ! We have a pointer to an interrupt vector data structure.
1634 ! Put the request on the cpu's softint priority list and
1635 ! set %set_softint.
1636 !
1637 CPU_ADDR(%g4, %g2) ! %g4 = cpu
1638 lduh [%g1 + IV_PIL], %g2 ! %g2 = iv->iv_pil
1639
1640 !
1685 ldn [%g1 + IV_PIL_NEXT], %g6 !
1686 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! trap_f1 = iv->iv_pil_next
1687 add %g4, INTR_HEAD, %g6
1688 ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_head[pil]
1689 stna %g6, [%g5 + TRAP_ENT_F2]%asi ! trap_f2 = intr_head[pil]
1690 add %g4, INTR_TAIL, %g6
1691 ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_tail[pil]
1692 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! trap_f3 = intr_tail[pil]
1693 stna %g2, [%g5 + TRAP_ENT_F4]%asi ! trap_f4 = pil
1694 TRACE_NEXT(%g5, %g6, %g3)
1695 #endif /* TRAPTRACE */
1696 !
1697 ! Write %set_softint with (1<<pil) to cause a "pil" level trap
1698 !
1699 mov 1, %g5 ! %g5 = 1
1700 sll %g5, %g2, %g5 ! %g5 = 1 << pil
1701 wr %g5, SET_SOFTINT ! trigger required pil softint
1702 retry
1703 SET_SIZE(setsoftint_tl1)
1704
1705 #endif /* lint */
1706
1707 #if defined(lint)
1708
1709 /*ARGSUSED*/
1710 void
1711 setvecint_tl1(uint64_t inum, uint64_t dummy)
1712 {}
1713
1714 #else /* lint */
1715
1716 !
1717 ! Register usage
1718 ! Arguments:
1719 ! %g1 - inumber
1720 !
1721 ! Internal:
1722 ! %g1 - softint pil mask
1723 ! %g2 - pil of intr_vec_t
1724 ! %g3 - pointer to current intr_vec_t (iv)
1725 ! %g4 - cpu
1726 ! %g5, %g6,%g7 - temps
1727 !
1728 ENTRY_NP(setvecint_tl1)
1729 !
1730 ! Verify the inumber received (should be inum < MAXIVNUM).
1731 !
1732 set MAXIVNUM, %g2
1733 cmp %g1, %g2
1734 bgeu,pn %xcc, .no_ivintr
1735 clr %g2 ! expected in .no_ivintr
1828 TRACE_NEXT(%g5, %g6, %g7)
1829 #endif /* TRAPTRACE */
1830 mov 1, %g6 ! %g6 = 1
1831 sll %g6, %g2, %g6 ! %g6 = 1 << pil
1832 or %g1, %g6, %g1 ! %g1 |= (1 << pil), pil mask
1833 ldn [%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1834 brnz,pn %g3, 0b ! iv->iv_vec_next is non NULL, goto 0b
1835 nop
1836 wr %g1, SET_SOFTINT ! triggered one or more pil softints
1837 retry
1838
1839 .no_ivintr:
1840 ! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1841 mov %g2, %g3
1842 mov %g1, %g2
1843 set no_ivintr, %g1
1844 ba,pt %xcc, sys_trap
1845 mov PIL_15, %g4
1846 SET_SIZE(setvecint_tl1)
1847
1848 #endif /* lint */
1849
1850 #if defined(lint)
1851
1852 /*ARGSUSED*/
1853 void
1854 wr_clr_softint(uint_t value)
1855 {}
1856
1857 #else
1858
1859 ENTRY_NP(wr_clr_softint)
1860 retl
1861 wr %o0, CLEAR_SOFTINT
1862 SET_SIZE(wr_clr_softint)
1863
1864 #endif /* lint */
1865
1866 #if defined(lint)
1867
1868 /*ARGSUSED*/
1869 void
1870 intr_enqueue_req(uint_t pil, uint64_t inum)
1871 {}
1872
1873 #else /* lint */
1874
1875 /*
1876 * intr_enqueue_req
1877 *
1878 * %o0 - pil
1879 * %o1 - pointer to intr_vec_t (iv)
1880 * %o5 - preserved
1881 * %g5 - preserved
1882 */
1883 ENTRY_NP(intr_enqueue_req)
1884 !
1885 CPU_ADDR(%g4, %g1) ! %g4 = cpu
1886
1887 !
1888 ! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1889 !
1890 sll %o0, CPTRSHIFT, %o0 ! %o0 = offset to pil entry
1891 add %g4, INTR_TAIL, %g6 ! %g6 = &cpu->m_cpu.intr_tail
1892 ldn [%o0 + %g6], %g1 ! %g1 = cpu->m_cpu.intr_tail[pil]
1893 ! current tail (ct)
1894 brz,pt %g1, 2f ! branch if current tail is NULL
1904 ld [%g4 + CPU_ID], %g6 ! for multi target softint, use cpuid
1905 sll %g6, CPTRSHIFT, %g6 ! calculate offset address from cpuid
1906 add %g3, %g6, %g3 ! %g3 = &ct->iv_xpil_next[cpuid]
1907 1:
1908 !
1909 ! update old tail
1910 !
1911 ba,pt %xcc, 3f
1912 stn %o1, [%g3] ! {%g5] = iv, set pil_next field
1913 2:
1914 !
1915 ! no intr_vec_t's queued so make intr_vec_t as new head
1916 !
1917 add %g4, INTR_HEAD, %g6 ! %g6 = &cpu->m_cpu.intr_head[pil]
1918 stn %o1, [%g6 + %o0] ! cpu->m_cpu.intr_head[pil] = iv
1919 3:
1920 retl
1921 nop
1922 SET_SIZE(intr_enqueue_req)
1923
1924 #endif /* lint */
1925
1926 /*
1927 * Set CPU's base SPL level, based on which interrupt levels are active.
1928 * Called at spl7 or above.
1929 */
1930
1931 #if defined(lint)
1932
1933 void
1934 set_base_spl(void)
1935 {}
1936
1937 #else /* lint */
1938
1939 ENTRY_NP(set_base_spl)
1940 ldn [THREAD_REG + T_CPU], %o2 ! load CPU pointer
1941 ld [%o2 + CPU_INTR_ACTV], %o5 ! load active interrupts mask
1942
1943 /*
1944 * WARNING: non-standard callinq sequence; do not call from C
1945 * %o2 = pointer to CPU
1946 * %o5 = updated CPU_INTR_ACTV
1947 */
1948 _intr_set_spl: ! intr_thread_exit enters here
1949 !
1950 ! Determine highest interrupt level active. Several could be blocked
1951 ! at higher levels than this one, so must convert flags to a PIL
1952 ! Normally nothing will be blocked, so test this first.
1953 !
1954 brz,pt %o5, 1f ! nothing active
1955 sra %o5, 11, %o3 ! delay - set %o3 to bits 15-11
1956 set _intr_flag_table, %o1
1957 tst %o3 ! see if any of the bits set
1958 ldub [%o1 + %o3], %o3 ! load bit number
1969 ldub [%o1 + %o3], %o3
1970
1971 !
1972 ! highest interrupt level number active is in %l6
1973 !
1974 1:
1975 retl
1976 st %o3, [%o2 + CPU_BASE_SPL] ! delay - store base priority
1977 SET_SIZE(set_base_spl)
1978
1979 /*
1980 * Table that finds the most significant bit set in a five bit field.
1981 * Each entry is the high-order bit number + 1 of it's index in the table.
1982 * This read-only data is in the text segment.
1983 */
1984 _intr_flag_table:
1985 .byte 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4
1986 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
1987 .align 4
1988
1989 #endif /* lint */
1990
1991 /*
1992 * int
1993 * intr_passivate(from, to)
1994 * kthread_id_t from; interrupt thread
1995 * kthread_id_t to; interrupted thread
1996 */
1997
1998 #if defined(lint)
1999
2000 /* ARGSUSED */
2001 int
2002 intr_passivate(kthread_id_t from, kthread_id_t to)
2003 { return (0); }
2004
2005 #else /* lint */
2006
2007 ENTRY_NP(intr_passivate)
2008 save %sp, -SA(MINFRAME), %sp ! get a new window
2009
2010 flushw ! force register windows to stack
2011 !
2012 ! restore registers from the base of the stack of the interrupt thread.
2013 !
2014 ldn [%i0 + T_STACK], %i2 ! get stack save area pointer
2015 ldn [%i2 + (0*GREGSIZE)], %l0 ! load locals
2016 ldn [%i2 + (1*GREGSIZE)], %l1
2017 ldn [%i2 + (2*GREGSIZE)], %l2
2018 ldn [%i2 + (3*GREGSIZE)], %l3
2019 ldn [%i2 + (4*GREGSIZE)], %l4
2020 ldn [%i2 + (5*GREGSIZE)], %l5
2021 ldn [%i2 + (6*GREGSIZE)], %l6
2022 ldn [%i2 + (7*GREGSIZE)], %l7
2023 ldn [%i2 + (8*GREGSIZE)], %o0 ! put ins from stack in outs
2024 ldn [%i2 + (9*GREGSIZE)], %o1
2025 ldn [%i2 + (10*GREGSIZE)], %o2
2026 ldn [%i2 + (11*GREGSIZE)], %o3
2043 stn %l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
2044 stn %l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
2045 stn %o0, [%i3 + STACK_BIAS + (8*GREGSIZE)] ! save ins using outs
2046 stn %o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
2047 stn %o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
2048 stn %o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
2049 stn %o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
2050 stn %o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
2051 stn %i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
2052 ! fp, %i7 copied using %i4
2053 stn %i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
2054 stn %g0, [%i2 + ((8+6)*GREGSIZE)]
2055 ! clear fp in save area
2056
2057 ! load saved pil for return
2058 ldub [%i0 + T_PIL], %i0
2059 ret
2060 restore
2061 SET_SIZE(intr_passivate)
2062
2063 #endif /* lint */
2064
2065 #if defined(lint)
2066
2067 /*
2068 * intr_get_time() is a resource for interrupt handlers to determine how
2069 * much time has been spent handling the current interrupt. Such a function
2070 * is needed because higher level interrupts can arrive during the
2071 * processing of an interrupt, thus making direct comparisons of %tick by
2072 * the handler inaccurate. intr_get_time() only returns time spent in the
2073 * current interrupt handler.
2074 *
2075 * The caller must be calling from an interrupt handler running at a pil
2076 * below or at lock level. Timings are not provided for high-level
2077 * interrupts.
2078 *
2079 * The first time intr_get_time() is called while handling an interrupt,
2080 * it returns the time since the interrupt handler was invoked. Subsequent
2081 * calls will return the time since the prior call to intr_get_time(). Time
2082 * is returned as ticks, adjusted for any clock divisor due to power
2083 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
2084 * not be the same across CPUs.
2085 *
2086 * Theory Of Intrstat[][]:
2087 *
2088 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
2089 * uint64_ts per pil.
2090 *
2091 * intrstat[pil][0] is a cumulative count of the number of ticks spent
2092 * handling all interrupts at the specified pil on this CPU. It is
2093 * exported via kstats to the user.
2094 *
2095 * intrstat[pil][1] is always a count of ticks less than or equal to the
2096 * value in [0]. The difference between [1] and [0] is the value returned
2097 * by a call to intr_get_time(). At the start of interrupt processing,
2098 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
2099 * time, [0] will increase, but [1] will remain the same. A call to
2100 * intr_get_time() will return the difference, then update [1] to be the
2101 * same as [0]. Future calls will return the time since the last call.
2102 * Finally, when the interrupt completes, [1] is updated to the same as [0].
2103 *
2104 * Implementation:
2105 *
2106 * intr_get_time() works much like a higher level interrupt arriving. It
2107 * "checkpoints" the timing information by incrementing intrstat[pil][0]
2108 * to include elapsed running time, and by setting t_intr_start to %tick.
2109 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
2110 * and updates intrstat[pil][1] to be the same as the new value of
2111 * intrstat[pil][0].
2112 *
2113 * In the normal handling of interrupts, after an interrupt handler returns
2114 * and the code in intr_thread() updates intrstat[pil][0], it then sets
2115 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
2116 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
2117 * is 0.
2118 *
2119 * Whenever interrupts arrive on a CPU which is handling a lower pil
2120 * interrupt, they update the lower pil's [0] to show time spent in the
2121 * handler that they've interrupted. This results in a growing discrepancy
2122 * between [0] and [1], which is returned the next time intr_get_time() is
2123 * called. Time spent in the higher-pil interrupt will not be returned in
2124 * the next intr_get_time() call from the original interrupt, because
2125 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2126 */
2127
2128 /*ARGSUSED*/
2129 uint64_t
2130 intr_get_time(void)
2131 { return 0; }
2132 #else /* lint */
2133
2134 ENTRY_NP(intr_get_time)
2135 #ifdef DEBUG
2136 !
2137 ! Lots of asserts, but just check panic_quiesce first.
2138 ! Don't bother with lots of tests if we're just ignoring them.
2139 !
2140 sethi %hi(panic_quiesce), %o0
2141 ld [%o0 + %lo(panic_quiesce)], %o0
2142 brnz,pn %o0, 2f
2143 nop
2144 !
2145 ! ASSERT(%pil <= LOCK_LEVEL)
2146 !
2147 rdpr %pil, %o1
2148 cmp %o1, LOCK_LEVEL
2149 ble,pt %xcc, 0f
2150 sethi %hi(intr_get_time_high_pil), %o0 ! delay
2151 call panic
2152 or %o0, %lo(intr_get_time_high_pil), %o0
2153 0:
2222 stx %o2, [%o3]
2223 ldx [%o3 + 8], %o4 ! %o4 = cpu_m.intrstat[pil][1]
2224 sub %o2, %o4, %o0 ! %o0 is elapsed time since %o4
2225 stx %o2, [%o3 + 8] ! make [1] match [0], resetting time
2226
2227 ld [%o5 + CPU_BASE_SPL], %o2 ! restore %pil to the greater
2228 cmp %o2, %o1 ! of either our pil %o1 or
2229 movl %xcc, %o1, %o2 ! cpu_base_spl.
2230 retl
2231 wrpr %g0, %o2, %pil
2232 SET_SIZE(intr_get_time)
2233
2234 #ifdef DEBUG
2235 intr_get_time_high_pil:
2236 .asciz "intr_get_time(): %pil > LOCK_LEVEL"
2237 intr_get_time_not_intr:
2238 .asciz "intr_get_time(): not called from an interrupt thread"
2239 intr_get_time_no_start_time:
2240 .asciz "intr_get_time(): t_intr_start == 0"
2241 #endif /* DEBUG */
2242 #endif /* lint */
|
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include "assym.h"
26
27 #include <sys/cmn_err.h>
28 #include <sys/ftrace.h>
29 #include <sys/asm_linkage.h>
30 #include <sys/machthread.h>
31 #include <sys/machcpuvar.h>
32 #include <sys/intreg.h>
33 #include <sys/ivintr.h>
34
35 #ifdef TRAPTRACE
36 #include <sys/traptrace.h>
37 #endif /* TRAPTRACE */
38
39
40 /*
41 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
42 * Register passed from LEVEL_INTERRUPT(level)
43 * %g4 - interrupt request level
44 */
45 ENTRY_NP(pil_interrupt)
46 !
47 ! Register usage
48 ! %g1 - cpu
49 ! %g2 - pointer to intr_vec_t (iv)
50 ! %g4 - pil
51 ! %g3, %g5, %g6, %g7 - temps
52 !
53 ! Grab the first or list head intr_vec_t off the intr_head[pil]
54 ! and panic immediately if list head is NULL. Otherwise, update
55 ! intr_head[pil] to next intr_vec_t on the list and clear softint
56 ! %clear_softint, if next intr_vec_t is NULL.
57 !
58 CPU_ADDR(%g1, %g5) ! %g1 = cpu
59 !
129 !
130 ! figure which handler to run and which %pil it starts at
131 ! intr_thread starts at DISP_LEVEL to prevent preemption
132 ! current_thread starts at PIL_MAX to protect cpu_intr_actv
133 !
134 mov %g4, %g3 ! %g3 = %g4, pil
135 cmp %g4, LOCK_LEVEL
136 bg,a,pt %xcc, 3f ! branch if pil > LOCK_LEVEL
137 mov PIL_MAX, %g4 ! %g4 = PIL_MAX (15)
138 sethi %hi(intr_thread), %g1 ! %g1 = intr_thread
139 mov DISP_LEVEL, %g4 ! %g4 = DISP_LEVEL (11)
140 ba,pt %xcc, sys_trap
141 or %g1, %lo(intr_thread), %g1
142 3:
143 sethi %hi(current_thread), %g1 ! %g1 = current_thread
144 ba,pt %xcc, sys_trap
145 or %g1, %lo(current_thread), %g1
146 SET_SIZE(pil_interrupt_common)
147 SET_SIZE(pil_interrupt)
148
149
150 _spurious:
151 .asciz "!interrupt 0x%x at level %d not serviced"
152
153 /*
154 * SERVE_INTR_PRE is called once, just before the first invocation
155 * of SERVE_INTR.
156 *
157 * Registers on entry:
158 *
159 * iv_p, cpu, regs: may be out-registers
160 * ls1, ls2: local scratch registers
161 * os1, os2, os3: scratch registers, may be out
162 */
163
164 #define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs) \
165 mov iv_p, ls1; \
166 mov iv_p, ls2; \
167 SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
168
169 /*
295 mov os3, os4; \
296 GET_TRACE_TICK(os2, os3); \
297 stxa os2, [os1 + TRAP_ENT_TICK]%asi; \
298 TRACE_SAVE_TL_GL_REGS(os1, os2); \
299 set TT_SERVE_INTR, os2; \
300 rdpr %pil, os3; \
301 or os2, os3, os2; \
302 stha os2, [os1 + TRAP_ENT_TT]%asi; \
303 stna %sp, [os1 + TRAP_ENT_SP]%asi; \
304 stna inum, [os1 + TRAP_ENT_TR]%asi; \
305 stna %g0, [os1 + TRAP_ENT_F1]%asi; \
306 stna %g0, [os1 + TRAP_ENT_F2]%asi; \
307 stna %g0, [os1 + TRAP_ENT_F3]%asi; \
308 stna %g0, [os1 + TRAP_ENT_F4]%asi; \
309 TRACE_NEXT(os1, os2, os3); \
310 wrpr %g0, os4, %pstate
311 #else /* TRAPTRACE */
312 #define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
313 #endif /* TRAPTRACE */
314
315 #define INTRCNT_LIMIT 16
316
317 /*
318 * Handle an interrupt in a new thread.
319 * Entry:
320 * %o0 = pointer to regs structure
321 * %o1 = pointer to current intr_vec_t (iv) to be processed
322 * %o2 = pil
323 * %sp = on current thread's kernel stack
324 * %o7 = return linkage to trap code
325 * %g7 = current thread
326 * %pstate = normal globals, interrupts enabled,
327 * privileged, fp disabled
328 * %pil = DISP_LEVEL
329 *
330 * Register Usage
331 * %l0 = return linkage
332 * %l1 = pil
333 * %l2 - %l3 = scratch
334 * %l4 - %l7 = reserved for sys_trap
847 ldn [%o2 + CPU_INTR_THREAD], %o5 ! get list pointer
848 stn %o5, [THREAD_REG + T_LINK]
849 call swtch ! switch to best thread
850 stn THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
851 ba,a,pt %xcc, . ! swtch() shouldn't return
852 SET_SIZE(intr_thread_exit)
853
854 .global ftrace_intr_thread_format_str
855 ftrace_intr_thread_format_str:
856 .asciz "intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
857 #ifdef DEBUG
858 intr_thread_actv_bit_set:
859 .asciz "intr_thread(): cpu_intr_actv bit already set for PIL"
860 intr_thread_actv_bit_not_set:
861 .asciz "intr_thread(): cpu_intr_actv bit not set for PIL"
862 intr_thread_exit_actv_bit_set:
863 .asciz "intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
864 intr_thread_t_intr_start_zero:
865 .asciz "intr_thread(): t_intr_start zero upon handler return"
866 #endif /* DEBUG */
867
868 ENTRY_NP(current_thread)
869
870 mov %o7, %l0
871 ldn [THREAD_REG + T_CPU], %o3
872
873 ldn [THREAD_REG + T_ONFAULT], %l2
874 brz,pt %l2, no_onfault ! branch if no onfault label set
875 nop
876 stn %g0, [THREAD_REG + T_ONFAULT]! clear onfault label
877 ldn [THREAD_REG + T_LOFAULT], %l3
878 stn %g0, [THREAD_REG + T_LOFAULT]! clear lofault data
879
880 sub %o2, LOCK_LEVEL + 1, %o5
881 sll %o5, CPTRSHIFT, %o5
882 add %o5, CPU_OFD, %o4 ! %o4 has on_fault data offset
883 stn %l2, [%o3 + %o4] ! save onfault label for pil %o2
884 add %o5, CPU_LFD, %o4 ! %o4 has lofault data offset
885 stn %l3, [%o3 + %o4] ! save lofault data for pil %o2
886
887 no_onfault:
1321 ! Enable interrupts and return
1322 jmp %l0 + 8
1323 wrpr %g0, %o2, %pil ! enable interrupts
1324 SET_SIZE(current_thread)
1325
1326
1327 #ifdef DEBUG
1328 current_thread_wrong_pil:
1329 .asciz "current_thread: unexpected pil level: %d"
1330 current_thread_actv_bit_set:
1331 .asciz "current_thread(): cpu_intr_actv bit already set for PIL"
1332 current_thread_actv_bit_not_set:
1333 .asciz "current_thread(): cpu_intr_actv bit not set for PIL"
1334 current_thread_nested_pil_zero:
1335 .asciz "current_thread(): timestamp zero for nested PIL %d"
1336 current_thread_timestamp_zero:
1337 .asciz "current_thread(): timestamp zero upon handler return"
1338 current_thread_nested_PIL_not_found:
1339 .asciz "current_thread: couldn't find nested high-level PIL"
1340 #endif /* DEBUG */
1341
1342 /*
1343 * Return a thread's interrupt level.
1344 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1345 * must dig it out of the save area.
1346 *
1347 * Caller 'swears' that this really is an interrupt thread.
1348 *
1349 * int
1350 * intr_level(t)
1351 * kthread_id_t t;
1352 */
1353
1354 ENTRY_NP(intr_level)
1355 retl
1356 ldub [%o0 + T_PIL], %o0 ! return saved pil
1357 SET_SIZE(intr_level)
1358
1359 ENTRY_NP(disable_pil_intr)
1360 rdpr %pil, %o0
1361 retl
1362 wrpr %g0, PIL_MAX, %pil ! disable interrupts (1-15)
1363 SET_SIZE(disable_pil_intr)
1364
1365 ENTRY_NP(enable_pil_intr)
1366 retl
1367 wrpr %o0, %pil
1368 SET_SIZE(enable_pil_intr)
1369
1370 ENTRY_NP(disable_vec_intr)
1371 rdpr %pstate, %o0
1372 andn %o0, PSTATE_IE, %g1
1373 retl
1374 wrpr %g0, %g1, %pstate ! disable interrupt
1375 SET_SIZE(disable_vec_intr)
1376
1377 ENTRY_NP(enable_vec_intr)
1378 retl
1379 wrpr %g0, %o0, %pstate
1380 SET_SIZE(enable_vec_intr)
1381
1382 ENTRY_NP(cbe_level14)
1383 save %sp, -SA(MINFRAME), %sp ! get a new window
1384 !
1385 ! Make sure that this is from TICK_COMPARE; if not just return
1386 !
1387 rd SOFTINT, %l1
1388 set (TICK_INT_MASK | STICK_INT_MASK), %o2
1389 andcc %l1, %o2, %g0
1390 bz,pn %icc, 2f
1391 nop
1392
1393 CPU_ADDR(%o1, %o2)
1394 call cyclic_fire
1395 mov %o1, %o0
1396 2:
1397 ret
1398 restore %g0, 1, %o0
1399 SET_SIZE(cbe_level14)
1400
1401
1402 ENTRY_NP(kdi_setsoftint)
1403 save %sp, -SA(MINFRAME), %sp ! get a new window
1404 rdpr %pstate, %l5
1405 andn %l5, PSTATE_IE, %l1
1406 wrpr %l1, %pstate ! disable interrupt
1407 !
1408 ! We have a pointer to an interrupt vector data structure.
1409 ! Put the request on the cpu's softint priority list and
1410 ! set %set_softint.
1411 !
1412 ! Register usage
1413 ! %i0 - pointer to intr_vec_t (iv)
1414 ! %l2 - requested pil
1415 ! %l4 - cpu
1416 ! %l5 - pstate
1417 ! %l1, %l3, %l6 - temps
1418 !
1419 ! check if a softint is pending for this softint,
1420 ! if one is pending, don't bother queuing another.
1421 !
1455 stn %i0, [%l3] ! [%l3] = iv, set pil_next field
1456 2:
1457 !
1458 ! no pending intr_vec_t; make intr_vec_t as new head
1459 !
1460 add %l4, INTR_HEAD, %l6 ! %l6 = &cpu->m_cpu.intr_head[pil]
1461 stn %i0, [%l6 + %l0] ! cpu->m_cpu.intr_head[pil] = iv
1462 3:
1463 !
1464 ! Write %set_softint with (1<<pil) to cause a "pil" level trap
1465 !
1466 mov 1, %l1 ! %l1 = 1
1467 sll %l1, %l2, %l1 ! %l1 = 1 << pil
1468 wr %l1, SET_SOFTINT ! trigger required pil softint
1469 4:
1470 wrpr %g0, %l5, %pstate ! %pstate = saved %pstate (in %l5)
1471 ret
1472 restore
1473 SET_SIZE(kdi_setsoftint)
1474
1475 !
1476 ! Register usage
1477 ! Arguments:
1478 ! %g1 - Pointer to intr_vec_t (iv)
1479 !
1480 ! Internal:
1481 ! %g2 - pil
1482 ! %g4 - cpu
1483 ! %g3,%g5-g7 - temps
1484 !
1485 ENTRY_NP(setsoftint_tl1)
1486 !
1487 ! We have a pointer to an interrupt vector data structure.
1488 ! Put the request on the cpu's softint priority list and
1489 ! set %set_softint.
1490 !
1491 CPU_ADDR(%g4, %g2) ! %g4 = cpu
1492 lduh [%g1 + IV_PIL], %g2 ! %g2 = iv->iv_pil
1493
1494 !
1539 ldn [%g1 + IV_PIL_NEXT], %g6 !
1540 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! trap_f1 = iv->iv_pil_next
1541 add %g4, INTR_HEAD, %g6
1542 ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_head[pil]
1543 stna %g6, [%g5 + TRAP_ENT_F2]%asi ! trap_f2 = intr_head[pil]
1544 add %g4, INTR_TAIL, %g6
1545 ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_tail[pil]
1546 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! trap_f3 = intr_tail[pil]
1547 stna %g2, [%g5 + TRAP_ENT_F4]%asi ! trap_f4 = pil
1548 TRACE_NEXT(%g5, %g6, %g3)
1549 #endif /* TRAPTRACE */
1550 !
1551 ! Write %set_softint with (1<<pil) to cause a "pil" level trap
1552 !
1553 mov 1, %g5 ! %g5 = 1
1554 sll %g5, %g2, %g5 ! %g5 = 1 << pil
1555 wr %g5, SET_SOFTINT ! trigger required pil softint
1556 retry
1557 SET_SIZE(setsoftint_tl1)
1558
1559 !
1560 ! Register usage
1561 ! Arguments:
1562 ! %g1 - inumber
1563 !
1564 ! Internal:
1565 ! %g1 - softint pil mask
1566 ! %g2 - pil of intr_vec_t
1567 ! %g3 - pointer to current intr_vec_t (iv)
1568 ! %g4 - cpu
1569 ! %g5, %g6,%g7 - temps
1570 !
1571 ENTRY_NP(setvecint_tl1)
1572 !
1573 ! Verify the inumber received (should be inum < MAXIVNUM).
1574 !
1575 set MAXIVNUM, %g2
1576 cmp %g1, %g2
1577 bgeu,pn %xcc, .no_ivintr
1578 clr %g2 ! expected in .no_ivintr
1671 TRACE_NEXT(%g5, %g6, %g7)
1672 #endif /* TRAPTRACE */
1673 mov 1, %g6 ! %g6 = 1
1674 sll %g6, %g2, %g6 ! %g6 = 1 << pil
1675 or %g1, %g6, %g1 ! %g1 |= (1 << pil), pil mask
1676 ldn [%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1677 brnz,pn %g3, 0b ! iv->iv_vec_next is non NULL, goto 0b
1678 nop
1679 wr %g1, SET_SOFTINT ! triggered one or more pil softints
1680 retry
1681
1682 .no_ivintr:
1683 ! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1684 mov %g2, %g3
1685 mov %g1, %g2
1686 set no_ivintr, %g1
1687 ba,pt %xcc, sys_trap
1688 mov PIL_15, %g4
1689 SET_SIZE(setvecint_tl1)
1690
1691 ENTRY_NP(wr_clr_softint)
1692 retl
1693 wr %o0, CLEAR_SOFTINT
1694 SET_SIZE(wr_clr_softint)
1695
1696 /*
1697 * intr_enqueue_req
1698 *
1699 * %o0 - pil
1700 * %o1 - pointer to intr_vec_t (iv)
1701 * %o5 - preserved
1702 * %g5 - preserved
1703 */
1704 ENTRY_NP(intr_enqueue_req)
1705 !
1706 CPU_ADDR(%g4, %g1) ! %g4 = cpu
1707
1708 !
1709 ! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1710 !
1711 sll %o0, CPTRSHIFT, %o0 ! %o0 = offset to pil entry
1712 add %g4, INTR_TAIL, %g6 ! %g6 = &cpu->m_cpu.intr_tail
1713 ldn [%o0 + %g6], %g1 ! %g1 = cpu->m_cpu.intr_tail[pil]
1714 ! current tail (ct)
1715 brz,pt %g1, 2f ! branch if current tail is NULL
1725 ld [%g4 + CPU_ID], %g6 ! for multi target softint, use cpuid
1726 sll %g6, CPTRSHIFT, %g6 ! calculate offset address from cpuid
1727 add %g3, %g6, %g3 ! %g3 = &ct->iv_xpil_next[cpuid]
1728 1:
1729 !
1730 ! update old tail
1731 !
1732 ba,pt %xcc, 3f
1733 stn %o1, [%g3] ! {%g5] = iv, set pil_next field
1734 2:
1735 !
1736 ! no intr_vec_t's queued so make intr_vec_t as new head
1737 !
1738 add %g4, INTR_HEAD, %g6 ! %g6 = &cpu->m_cpu.intr_head[pil]
1739 stn %o1, [%g6 + %o0] ! cpu->m_cpu.intr_head[pil] = iv
1740 3:
1741 retl
1742 nop
1743 SET_SIZE(intr_enqueue_req)
1744
1745 /*
1746 * Set CPU's base SPL level, based on which interrupt levels are active.
1747 * Called at spl7 or above.
1748 */
1749
1750 ENTRY_NP(set_base_spl)
1751 ldn [THREAD_REG + T_CPU], %o2 ! load CPU pointer
1752 ld [%o2 + CPU_INTR_ACTV], %o5 ! load active interrupts mask
1753
1754 /*
1755 * WARNING: non-standard callinq sequence; do not call from C
1756 * %o2 = pointer to CPU
1757 * %o5 = updated CPU_INTR_ACTV
1758 */
1759 _intr_set_spl: ! intr_thread_exit enters here
1760 !
1761 ! Determine highest interrupt level active. Several could be blocked
1762 ! at higher levels than this one, so must convert flags to a PIL
1763 ! Normally nothing will be blocked, so test this first.
1764 !
1765 brz,pt %o5, 1f ! nothing active
1766 sra %o5, 11, %o3 ! delay - set %o3 to bits 15-11
1767 set _intr_flag_table, %o1
1768 tst %o3 ! see if any of the bits set
1769 ldub [%o1 + %o3], %o3 ! load bit number
1780 ldub [%o1 + %o3], %o3
1781
1782 !
1783 ! highest interrupt level number active is in %l6
1784 !
1785 1:
1786 retl
1787 st %o3, [%o2 + CPU_BASE_SPL] ! delay - store base priority
1788 SET_SIZE(set_base_spl)
1789
1790 /*
1791 * Table that finds the most significant bit set in a five bit field.
1792 * Each entry is the high-order bit number + 1 of it's index in the table.
1793 * This read-only data is in the text segment.
1794 */
1795 _intr_flag_table:
1796 .byte 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4
1797 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
1798 .align 4
1799
1800 /*
1801 * int
1802 * intr_passivate(from, to)
1803 * kthread_id_t from; interrupt thread
1804 * kthread_id_t to; interrupted thread
1805 */
1806
1807 ENTRY_NP(intr_passivate)
1808 save %sp, -SA(MINFRAME), %sp ! get a new window
1809
1810 flushw ! force register windows to stack
1811 !
1812 ! restore registers from the base of the stack of the interrupt thread.
1813 !
1814 ldn [%i0 + T_STACK], %i2 ! get stack save area pointer
1815 ldn [%i2 + (0*GREGSIZE)], %l0 ! load locals
1816 ldn [%i2 + (1*GREGSIZE)], %l1
1817 ldn [%i2 + (2*GREGSIZE)], %l2
1818 ldn [%i2 + (3*GREGSIZE)], %l3
1819 ldn [%i2 + (4*GREGSIZE)], %l4
1820 ldn [%i2 + (5*GREGSIZE)], %l5
1821 ldn [%i2 + (6*GREGSIZE)], %l6
1822 ldn [%i2 + (7*GREGSIZE)], %l7
1823 ldn [%i2 + (8*GREGSIZE)], %o0 ! put ins from stack in outs
1824 ldn [%i2 + (9*GREGSIZE)], %o1
1825 ldn [%i2 + (10*GREGSIZE)], %o2
1826 ldn [%i2 + (11*GREGSIZE)], %o3
1843 stn %l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
1844 stn %l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
1845 stn %o0, [%i3 + STACK_BIAS + (8*GREGSIZE)] ! save ins using outs
1846 stn %o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
1847 stn %o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
1848 stn %o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
1849 stn %o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
1850 stn %o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
1851 stn %i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
1852 ! fp, %i7 copied using %i4
1853 stn %i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
1854 stn %g0, [%i2 + ((8+6)*GREGSIZE)]
1855 ! clear fp in save area
1856
1857 ! load saved pil for return
1858 ldub [%i0 + T_PIL], %i0
1859 ret
1860 restore
1861 SET_SIZE(intr_passivate)
1862
1863 ENTRY_NP(intr_get_time)
1864 #ifdef DEBUG
1865 !
1866 ! Lots of asserts, but just check panic_quiesce first.
1867 ! Don't bother with lots of tests if we're just ignoring them.
1868 !
1869 sethi %hi(panic_quiesce), %o0
1870 ld [%o0 + %lo(panic_quiesce)], %o0
1871 brnz,pn %o0, 2f
1872 nop
1873 !
1874 ! ASSERT(%pil <= LOCK_LEVEL)
1875 !
1876 rdpr %pil, %o1
1877 cmp %o1, LOCK_LEVEL
1878 ble,pt %xcc, 0f
1879 sethi %hi(intr_get_time_high_pil), %o0 ! delay
1880 call panic
1881 or %o0, %lo(intr_get_time_high_pil), %o0
1882 0:
1951 stx %o2, [%o3]
1952 ldx [%o3 + 8], %o4 ! %o4 = cpu_m.intrstat[pil][1]
1953 sub %o2, %o4, %o0 ! %o0 is elapsed time since %o4
1954 stx %o2, [%o3 + 8] ! make [1] match [0], resetting time
1955
1956 ld [%o5 + CPU_BASE_SPL], %o2 ! restore %pil to the greater
1957 cmp %o2, %o1 ! of either our pil %o1 or
1958 movl %xcc, %o1, %o2 ! cpu_base_spl.
1959 retl
1960 wrpr %g0, %o2, %pil
1961 SET_SIZE(intr_get_time)
1962
1963 #ifdef DEBUG
1964 intr_get_time_high_pil:
1965 .asciz "intr_get_time(): %pil > LOCK_LEVEL"
1966 intr_get_time_not_intr:
1967 .asciz "intr_get_time(): not called from an interrupt thread"
1968 intr_get_time_no_start_time:
1969 .asciz "intr_get_time(): t_intr_start == 0"
1970 #endif /* DEBUG */
|