12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
25 */
26
27 /*
28 * SFMMU primitives. These primitives should only be used by sfmmu
29 * routines.
30 */
31
32 #if defined(lint)
33 #include <sys/types.h>
34 #else /* lint */
35 #include "assym.h"
36 #endif /* lint */
37
38 #include <sys/asm_linkage.h>
39 #include <sys/machtrap.h>
40 #include <sys/machasi.h>
41 #include <sys/sun4asi.h>
42 #include <sys/pte.h>
43 #include <sys/mmu.h>
44 #include <vm/hat_sfmmu.h>
45 #include <vm/seg_spt.h>
46 #include <sys/machparam.h>
47 #include <sys/privregs.h>
48 #include <sys/scb.h>
49 #include <sys/intreg.h>
50 #include <sys/machthread.h>
51 #include <sys/intr.h>
52 #include <sys/clock.h>
53 #include <sys/trapstat.h>
54
55 #ifdef TRAPTRACE
56 #include <sys/traptrace.h>
57
58 /*
59 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
60 */
61 #define TT_TRACE(label) \
62 ba label ;\
63 rd %pc, %g7
64 #else
65
66 #define TT_TRACE(label)
67
68 #endif /* TRAPTRACE */
69
70 #ifndef lint
71
72 #if (TTE_SUSPEND_SHIFT > 0)
73 #define TTE_SUSPEND_INT_SHIFT(reg) \
74 sllx reg, TTE_SUSPEND_SHIFT, reg
75 #else
76 #define TTE_SUSPEND_INT_SHIFT(reg)
77 #endif
78
79 #endif /* lint */
80
81 #ifndef lint
82
83 /*
84 * Assumes TSBE_TAG is 0
85 * Assumes TSBE_INTHI is 0
86 * Assumes TSBREG.split is 0
87 */
88
89 #if TSBE_TAG != 0
90 #error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
91 #endif
92
93 #if TSBTAG_INTHI != 0
94 #error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
95 #endif
96
97 /*
98 * The following code assumes the tsb is not split.
99 *
100 * With TSBs no longer shared between processes, it's no longer
101 * necessary to hash the context bits into the tsb index to get
102 * tsb coloring; the new implementation treats the TSB as a
453 #error - TSB_SOFTSZ_MASK too small
454 #endif
455
456
457 /*
458 * An implementation of setx which will be hot patched at run time.
459 * since it is being hot patched, there is no value passed in.
460 * Thus, essentially we are implementing
461 * setx value, tmp, dest
462 * where value is RUNTIME_PATCH (aka 0) in this case.
463 */
464 #define RUNTIME_PATCH_SETX(dest, tmp) \
465 sethi %hh(RUNTIME_PATCH), tmp ;\
466 sethi %lm(RUNTIME_PATCH), dest ;\
467 or tmp, %hm(RUNTIME_PATCH), tmp ;\
468 or dest, %lo(RUNTIME_PATCH), dest ;\
469 sllx tmp, 32, tmp ;\
470 nop /* for perf reasons */ ;\
471 or tmp, dest, dest /* contents of patched value */
472
473 #endif /* lint */
474
475
476 #if defined (lint)
477
478 /*
479 * sfmmu related subroutines
480 */
481 uint_t
482 sfmmu_disable_intrs()
483 { return(0); }
484
485 /* ARGSUSED */
486 void
487 sfmmu_enable_intrs(uint_t pstate_save)
488 {}
489
490 /* ARGSUSED */
491 int
492 sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
493 { return(0); }
494
495 /*
496 * Use cas, if tte has changed underneath us then reread and try again.
497 * In the case of a retry, it will update sttep with the new original.
498 */
499 /* ARGSUSED */
500 int
501 sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
502 { return(0); }
503
504 /*
505 * Use cas, if tte has changed underneath us then return 1, else return 0
506 */
507 /* ARGSUSED */
508 int
509 sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
510 { return(0); }
511
512 /* ARGSUSED */
513 void
514 sfmmu_copytte(tte_t *sttep, tte_t *dttep)
515 {}
516
517 /*ARGSUSED*/
518 struct tsbe *
519 sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
520 { return(0); }
521
522 /*ARGSUSED*/
523 uint64_t
524 sfmmu_make_tsbtag(caddr_t va)
525 { return(0); }
526
527 #else /* lint */
528
529 .seg ".data"
530 .global sfmmu_panic1
531 sfmmu_panic1:
532 .asciz "sfmmu_asm: interrupts already disabled"
533
534 .global sfmmu_panic3
535 sfmmu_panic3:
536 .asciz "sfmmu_asm: sfmmu_vatopfn called for user"
537
538 .global sfmmu_panic4
539 sfmmu_panic4:
540 .asciz "sfmmu_asm: 4M tsb pointer mis-match"
541
542 .global sfmmu_panic5
543 sfmmu_panic5:
544 .asciz "sfmmu_asm: no unlocked TTEs in TLB 0"
545
546 .global sfmmu_panic6
547 sfmmu_panic6:
548 .asciz "sfmmu_asm: interrupts not disabled"
914 * %o2 = vpshift (in)
915 * %o3 = tsb size code (in)
916 * %o4 = scratch register
917 */
918 ENTRY_NP(sfmmu_get_tsbe)
919 GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
920 retl
921 nop
922 SET_SIZE(sfmmu_get_tsbe)
923
924 /*
925 * Return a TSB tag for the given va.
926 * %o0 = va (in/clobbered)
927 * %o0 = va shifted to be in tsb tag format (with no context) (out)
928 */
929 ENTRY_NP(sfmmu_make_tsbtag)
930 retl
931 srln %o0, TTARGET_VA_SHIFT, %o0
932 SET_SIZE(sfmmu_make_tsbtag)
933
934 #endif /* lint */
935
936 /*
937 * Other sfmmu primitives
938 */
939
940
941 #if defined (lint)
942 void
943 sfmmu_patch_ktsb(void)
944 {
945 }
946
947 void
948 sfmmu_kpm_patch_tlbm(void)
949 {
950 }
951
952 void
953 sfmmu_kpm_patch_tsbm(void)
954 {
955 }
956
957 void
958 sfmmu_patch_shctx(void)
959 {
960 }
961
962 /* ARGSUSED */
963 void
964 sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
965 {
966 }
967
968 /* ARGSUSED */
969 void
970 sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
971 {
972 }
973
974 /* ARGSUSED */
975 void
976 sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
977 {
978 }
979
980 /* ARGSUSED */
981 void
982 sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
983 {
984 }
985
986 #else /* lint */
987
988 #define I_SIZE 4
989
990 ENTRY_NP(sfmmu_fix_ktlb_traptable)
991 /*
992 * %o0 = start of patch area
993 * %o1 = size code of TSB to patch
994 * %o3 = scratch
995 */
996 /* fix sll */
997 ld [%o0], %o3 /* get sll */
998 sub %o3, %o1, %o3 /* decrease shift by tsb szc */
999 st %o3, [%o0] /* write sll */
1000 flush %o0
1001 /* fix srl */
1002 add %o0, I_SIZE, %o0 /* goto next instr. */
1003 ld [%o0], %o3 /* get srl */
1004 sub %o3, %o1, %o3 /* decrease shift by tsb szc */
1005 st %o3, [%o0] /* write srl */
1006 retl
1007 flush %o0
1545 sethi %hi(ktsb_phys), %o4
1546 mov ASI_N, %o3
1547 ld [%o4 + %lo(ktsb_phys)], %o4
1548 movrnz %o4, ASI_MEM, %o3
1549 mov %o3, %asi
1550 #endif /* !sun4v */
1551 mov %o0, %g1 ! %g1 = vaddr
1552
1553 /* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1554 GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1555 /* %g2 = tsbep, %g1 clobbered */
1556
1557 srlx %o0, TTARGET_VA_SHIFT, %g1; ! %g1 = tag target
1558 /* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1559 TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1560
1561 retl
1562 membar #StoreStore|#StoreLoad
1563 SET_SIZE(sfmmu_kpm_unload_tsb)
1564
1565 #endif /* lint */
1566
1567
1568 #if defined (lint)
1569
1570 /*ARGSUSED*/
1571 pfn_t
1572 sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1573 { return(0); }
1574
1575 #else /* lint */
1576
1577 ENTRY_NP(sfmmu_ttetopfn)
1578 ldx [%o0], %g1 /* read tte */
1579 TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1580 /*
1581 * g1 = pfn
1582 */
1583 retl
1584 mov %g1, %o0
1585 SET_SIZE(sfmmu_ttetopfn)
1586
1587 #endif /* !lint */
1588
1589 /*
1590 * These macros are used to update global sfmmu hme hash statistics
1591 * in perf critical paths. It is only enabled in debug kernels or
1592 * if SFMMU_STAT_GATHER is defined
1593 */
1594 #if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1595 #define HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2) \
1596 ldn [tsbarea + TSBMISS_KHATID], tmp1 ;\
1597 mov HATSTAT_KHASH_SEARCH, tmp2 ;\
1598 cmp tmp1, hatid ;\
1599 movne %ncc, HATSTAT_UHASH_SEARCH, tmp2 ;\
1600 set sfmmu_global_stat, tmp1 ;\
1601 add tmp1, tmp2, tmp1 ;\
1602 ld [tmp1], tmp2 ;\
1603 inc tmp2 ;\
1604 st tmp2, [tmp1]
1605
1606 #define HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2) \
1607 ldn [tsbarea + TSBMISS_KHATID], tmp1 ;\
1608 mov HATSTAT_KHASH_LINKS, tmp2 ;\
1664 CPU_INDEX(tmp1, tsbma) ;\
1665 sethi %hi(kpmtsbm_area), tsbma ;\
1666 sllx tmp1, KPMTSBM_SHIFT, tmp1 ;\
1667 or tsbma, %lo(kpmtsbm_area), tsbma ;\
1668 add tsbma, tmp1, tsbma /* kpmtsbm area */ ;\
1669 /* VA range check */ ;\
1670 ldx [tsbma + KPMTSBM_VBASE], val ;\
1671 cmp tagacc, val ;\
1672 blu,pn %xcc, label ;\
1673 ldx [tsbma + KPMTSBM_VEND], tmp1 ;\
1674 cmp tagacc, tmp1 ;\
1675 bgeu,pn %xcc, label ;\
1676 lduw [tsbma + KPMTSBM_DTLBMISS], val ;\
1677 inc val ;\
1678 st val, [tsbma + KPMTSBM_DTLBMISS] ;\
1679 label:
1680 #else
1681 #define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1682 #endif /* KPM_TLBMISS_STATS_GATHER */
1683
1684 #if defined (lint)
1685 /*
1686 * The following routines are jumped to from the mmu trap handlers to do
1687 * the setting up to call systrap. They are separate routines instead of
1688 * being part of the handlers because the handlers would exceed 32
1689 * instructions and since this is part of the slow path the jump
1690 * cost is irrelevant.
1691 */
1692 void
1693 sfmmu_pagefault(void)
1694 {
1695 }
1696
1697 void
1698 sfmmu_mmu_trap(void)
1699 {
1700 }
1701
1702 void
1703 sfmmu_window_trap(void)
1704 {
1705 }
1706
1707 void
1708 sfmmu_kpm_exception(void)
1709 {
1710 }
1711
1712 #else /* lint */
1713
1714 #ifdef PTL1_PANIC_DEBUG
1715 .seg ".data"
1716 .global test_ptl1_panic
1717 test_ptl1_panic:
1718 .word 0
1719 .align 8
1720
1721 .seg ".text"
1722 .align 4
1723 #endif /* PTL1_PANIC_DEBUG */
1724
1725
1726 ENTRY_NP(sfmmu_pagefault)
1727 SET_GL_REG(1)
1728 USE_ALTERNATE_GLOBALS(%g5)
1729 GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1730 rdpr %tt, %g6
1731 cmp %g6, FAST_IMMU_MISS_TT
1732 be,a,pn %icc, 1f
1733 mov T_INSTR_MMU_MISS, %g3
1943 stuh %g2, [%g1 + CPUC_DTRACE_FLAGS]
1944 GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1945 stx %g3, [%g1 + CPUC_DTRACE_ILLVAL]
1946 done
1947 0:
1948 TSTAT_CHECK_TL1(1f, %g1, %g2)
1949 1:
1950 SET_GL_REG(1)
1951 USE_ALTERNATE_GLOBALS(%g5)
1952 GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1953 mov T_DATA_MMU_MISS, %g3 /* arg2 = traptype */
1954 /*
1955 * g2=tagacc g3.l=type g3.h=0
1956 */
1957 sethi %hi(trap), %g1
1958 or %g1, %lo(trap), %g1
1959 ba,pt %xcc, sys_trap
1960 mov -1, %g4
1961 SET_SIZE(sfmmu_kpm_exception)
1962
1963 #endif /* lint */
1964
1965 #if defined (lint)
1966
1967 void
1968 sfmmu_tsb_miss(void)
1969 {
1970 }
1971
1972 void
1973 sfmmu_kpm_dtsb_miss(void)
1974 {
1975 }
1976
1977 void
1978 sfmmu_kpm_dtsb_miss_small(void)
1979 {
1980 }
1981
1982 #else /* lint */
1983
1984 #if (IMAP_SEG != 0)
1985 #error - ism_map->ism_seg offset is not zero
1986 #endif
1987
1988 /*
1989 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1990 * tlb miss and branches to label "ismhit". If this is not an ISM
1991 * process or an ISM tlb miss it falls thru.
1992 *
1993 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1994 * this process.
1995 * If so, it will branch to label "ismhit". If not, it will fall through.
1996 *
1997 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1998 * so that any other threads of this process will not try and walk the ism
1999 * maps while they are being changed.
2000 *
2001 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
2002 * will make sure of that. This means we can terminate our search on
2003 * the first zero mapping we find.
3854 ldx [%g2 + MMFSA_I_CTX], %g3
3855 ldx [%g2 + MMFSA_D_CTX], %g3
3856 2:
3857 #else
3858 mov MMU_TAG_ACCESS, %g2
3859 be,a,pt %icc, 2f
3860 ldxa [%g2]ASI_IMMU, %g3
3861 ldxa [%g2]ASI_DMMU, %g3
3862 2: sllx %g3, TAGACC_CTX_LSHIFT, %g3
3863 #endif /* sun4v */
3864 brz,a,pn %g3, ptl1_panic ! panic if called for kernel
3865 mov PTL1_BAD_CTX_STEAL, %g1 ! since kernel ctx was stolen
3866 rdpr %tl, %g5
3867 cmp %g5, 1
3868 ble,pt %icc, sfmmu_mmu_trap
3869 nop
3870 TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3871 ba,pt %icc, sfmmu_window_trap
3872 nop
3873 SET_SIZE(sfmmu_tsb_miss)
3874 #endif /* lint */
3875
3876 #if defined (lint)
3877 /*
3878 * This routine will look for a user or kernel vaddr in the hash
3879 * structure. It returns a valid pfn or PFN_INVALID. It doesn't
3880 * grab any locks. It should only be used by other sfmmu routines.
3881 */
3882 /* ARGSUSED */
3883 pfn_t
3884 sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3885 {
3886 return(0);
3887 }
3888
3889 /* ARGSUSED */
3890 pfn_t
3891 sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
3892 {
3893 return(0);
3894 }
3895
3896 #else /* lint */
3897
3898 ENTRY_NP(sfmmu_vatopfn)
3899 /*
3900 * disable interrupts
3901 */
3902 rdpr %pstate, %o3
3903 #ifdef DEBUG
3904 PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3905 #endif
3906 /*
3907 * disable interrupts to protect the TSBMISS area
3908 */
3909 andn %o3, PSTATE_IE, %o5
3910 wrpr %o5, 0, %pstate
3911
3912 /*
3913 * o0 = vaddr
3914 * o1 = sfmmup
3915 * o2 = ttep
3916 */
3917 CPU_TSBMISS_AREA(%g1, %o5)
4096 * %o0 = vaddr
4097 */
4098 brgez,a,pn %g3, 1f /* check if tte is invalid */
4099 mov -1, %o0 /* output = -1 (PFN_INVALID) */
4100 TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4101 /*
4102 * g3 = pfn
4103 */
4104 ba,pt %xcc, 1f
4105 mov %g3, %o0
4106
4107 kvaszc2pfn_nohblk:
4108 mov -1, %o0
4109
4110 1:
4111 retl
4112 wrpr %g0, %o3, %pstate /* re-enable interrupts */
4113
4114 SET_SIZE(sfmmu_kvaszc2pfn)
4115
4116 #endif /* lint */
4117
4118
4119
4120 #if !defined(lint)
4121
4122 /*
4123 * kpm lock used between trap level tsbmiss handler and kpm C level.
4124 */
4125 #define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) \
4126 mov 0xff, tmp1 ;\
4127 label1: ;\
4128 casa [kpmlckp]asi, %g0, tmp1 ;\
4129 brnz,pn tmp1, label1 ;\
4130 mov 0xff, tmp1 ;\
4131 membar #LoadLoad
4132
4133 #define KPMLOCK_EXIT(kpmlckp, asi) \
4134 membar #LoadStore|#StoreStore ;\
4135 sta %g0, [kpmlckp]asi
4136
4137 /*
4138 * Lookup a memseg for a given pfn and if found, return the physical
4139 * address of the corresponding struct memseg in mseg, otherwise
4140 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
4141 * tsbmp, %asi is assumed to be ASI_MEM.
4646 cmp %g7, %g6
4647 bgeu %xcc, 0f
4648 ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4649 add %g7, RUNTIME_PATCH, %g7 /* must match TSTAT_TSBMISS_INSTR */
4650 wrpr %g7, %tpc
4651 add %g7, 4, %g7
4652 wrpr %g7, %tnpc
4653 0:
4654 retry
4655 5:
4656 /* g3=hlck_pa */
4657 KPMLOCK_EXIT(%g3, ASI_MEM)
4658 ba,pt %icc, sfmmu_kpm_exception
4659 nop
4660 SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4661
4662 #if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4663 #error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4664 #endif
4665
4666 #endif /* lint */
4667
4668 #ifdef lint
4669 /*
4670 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4671 * Called from C-level, sets/clears "go" indication for trap level handler.
4672 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4673 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4674 * Assumes khl_mutex is held when called from C-level.
4675 */
4676 /* ARGSUSED */
4677 void
4678 sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4679 {
4680 }
4681
4682 /*
4683 * kpm_smallpages: stores val to byte at address mapped within
4684 * low level lock brackets. The old value is returned.
4685 * Called from C-level.
4686 */
4687 /* ARGSUSED */
4688 int
4689 sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
4690 {
4691 return (0);
4692 }
4693
4694 #else /* lint */
4695
4696 .seg ".data"
4697 sfmmu_kpm_tsbmtl_panic:
4698 .ascii "sfmmu_kpm_tsbmtl: interrupts disabled"
4699 .byte 0
4700 sfmmu_kpm_stsbmtl_panic:
4701 .ascii "sfmmu_kpm_stsbmtl: interrupts disabled"
4702 .byte 0
4703 .align 4
4704 .seg ".text"
4705
4706 ENTRY_NP(sfmmu_kpm_tsbmtl)
4707 rdpr %pstate, %o3
4708 /*
4709 * %o0 = &kp_refcntc
4710 * %o1 = &khl_lock
4711 * %o2 = 0/1 (off/on)
4712 * %o3 = pstate save
4713 */
4714 #ifdef DEBUG
4715 andcc %o3, PSTATE_IE, %g0 /* if interrupts already */
4752 save %sp, -SA(MINFRAME), %sp
4753 sethi %hi(sfmmu_kpm_stsbmtl_panic), %o0
4754 call panic
4755 or %o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4756 ret
4757 restore
4758 1:
4759 #endif /* DEBUG */
4760 wrpr %o3, PSTATE_IE, %pstate /* disable interrupts */
4761
4762 KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4763 ldsb [%o0], %o5
4764 stb %o2, [%o0]
4765 KPMLOCK_EXIT(%o1, ASI_N)
4766
4767 and %o5, KPM_MAPPED_MASK, %o0 /* return old val */
4768 retl
4769 wrpr %g0, %o3, %pstate /* enable interrupts */
4770 SET_SIZE(sfmmu_kpm_stsbmtl)
4771
4772 #endif /* lint */
4773
4774 #ifndef lint
4775 #ifdef sun4v
4776 /*
4777 * User/kernel data miss w// multiple TSBs
4778 * The first probe covers 8K, 64K, and 512K page sizes,
4779 * because 64K and 512K mappings are replicated off 8K
4780 * pointer. Second probe covers 4M page size only.
4781 *
4782 * MMU fault area contains miss address and context.
4783 */
4784 ALTENTRY(sfmmu_slow_dmmu_miss)
4785 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) ! %g2 = ptagacc, %g3 = ctx type
4786
4787 slow_miss_common:
4788 /*
4789 * %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4790 * %g3 = ctx (cannot be INVALID_CONTEXT)
4791 */
4792 brnz,pt %g3, 8f ! check for user context
4793 nop
4794
4836 9:
4837 ba,a,pt %xcc, sfmmu_tsb_miss_tt
4838 .empty
4839 SET_SIZE(sfmmu_slow_dmmu_miss)
4840
4841
4842 /*
4843 * User/kernel instruction miss w/ multiple TSBs
4844 * The first probe covers 8K, 64K, and 512K page sizes,
4845 * because 64K and 512K mappings are replicated off 8K
4846 * pointer. Second probe covers 4M page size only.
4847 *
4848 * MMU fault area contains miss address and context.
4849 */
4850 ALTENTRY(sfmmu_slow_immu_miss)
4851 GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4852 ba,a,pt %xcc, slow_miss_common
4853 SET_SIZE(sfmmu_slow_immu_miss)
4854
4855 #endif /* sun4v */
4856 #endif /* lint */
4857
4858 #ifndef lint
4859
4860 /*
4861 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4862 */
4863 .seg ".data"
4864 .align 64
4865 .global tsbmiss_area
4866 tsbmiss_area:
4867 .skip (TSBMISS_SIZE * NCPU)
4868
4869 .align 64
4870 .global kpmtsbm_area
4871 kpmtsbm_area:
4872 .skip (KPMTSBM_SIZE * NCPU)
4873 #endif /* lint */
|
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
25 */
26
27 /*
28 * SFMMU primitives. These primitives should only be used by sfmmu
29 * routines.
30 */
31
32 #include "assym.h"
33
34 #include <sys/asm_linkage.h>
35 #include <sys/machtrap.h>
36 #include <sys/machasi.h>
37 #include <sys/sun4asi.h>
38 #include <sys/pte.h>
39 #include <sys/mmu.h>
40 #include <vm/hat_sfmmu.h>
41 #include <vm/seg_spt.h>
42 #include <sys/machparam.h>
43 #include <sys/privregs.h>
44 #include <sys/scb.h>
45 #include <sys/intreg.h>
46 #include <sys/machthread.h>
47 #include <sys/intr.h>
48 #include <sys/clock.h>
49 #include <sys/trapstat.h>
50
51 #ifdef TRAPTRACE
52 #include <sys/traptrace.h>
53
54 /*
55 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
56 */
57 #define TT_TRACE(label) \
58 ba label ;\
59 rd %pc, %g7
60 #else
61
62 #define TT_TRACE(label)
63
64 #endif /* TRAPTRACE */
65
66 #if (TTE_SUSPEND_SHIFT > 0)
67 #define TTE_SUSPEND_INT_SHIFT(reg) \
68 sllx reg, TTE_SUSPEND_SHIFT, reg
69 #else
70 #define TTE_SUSPEND_INT_SHIFT(reg)
71 #endif
72
73 /*
74 * Assumes TSBE_TAG is 0
75 * Assumes TSBE_INTHI is 0
76 * Assumes TSBREG.split is 0
77 */
78
79 #if TSBE_TAG != 0
80 #error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
81 #endif
82
83 #if TSBTAG_INTHI != 0
84 #error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
85 #endif
86
87 /*
88 * The following code assumes the tsb is not split.
89 *
90 * With TSBs no longer shared between processes, it's no longer
91 * necessary to hash the context bits into the tsb index to get
92 * tsb coloring; the new implementation treats the TSB as a
443 #error - TSB_SOFTSZ_MASK too small
444 #endif
445
446
447 /*
448 * An implementation of setx which will be hot patched at run time.
449 * since it is being hot patched, there is no value passed in.
450 * Thus, essentially we are implementing
451 * setx value, tmp, dest
452 * where value is RUNTIME_PATCH (aka 0) in this case.
453 */
454 #define RUNTIME_PATCH_SETX(dest, tmp) \
455 sethi %hh(RUNTIME_PATCH), tmp ;\
456 sethi %lm(RUNTIME_PATCH), dest ;\
457 or tmp, %hm(RUNTIME_PATCH), tmp ;\
458 or dest, %lo(RUNTIME_PATCH), dest ;\
459 sllx tmp, 32, tmp ;\
460 nop /* for perf reasons */ ;\
461 or tmp, dest, dest /* contents of patched value */
462
463
464 .seg ".data"
465 .global sfmmu_panic1
466 sfmmu_panic1:
467 .asciz "sfmmu_asm: interrupts already disabled"
468
469 .global sfmmu_panic3
470 sfmmu_panic3:
471 .asciz "sfmmu_asm: sfmmu_vatopfn called for user"
472
473 .global sfmmu_panic4
474 sfmmu_panic4:
475 .asciz "sfmmu_asm: 4M tsb pointer mis-match"
476
477 .global sfmmu_panic5
478 sfmmu_panic5:
479 .asciz "sfmmu_asm: no unlocked TTEs in TLB 0"
480
481 .global sfmmu_panic6
482 sfmmu_panic6:
483 .asciz "sfmmu_asm: interrupts not disabled"
849 * %o2 = vpshift (in)
850 * %o3 = tsb size code (in)
851 * %o4 = scratch register
852 */
853 ENTRY_NP(sfmmu_get_tsbe)
854 GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
855 retl
856 nop
857 SET_SIZE(sfmmu_get_tsbe)
858
859 /*
860 * Return a TSB tag for the given va.
861 * %o0 = va (in/clobbered)
862 * %o0 = va shifted to be in tsb tag format (with no context) (out)
863 */
864 ENTRY_NP(sfmmu_make_tsbtag)
865 retl
866 srln %o0, TTARGET_VA_SHIFT, %o0
867 SET_SIZE(sfmmu_make_tsbtag)
868
869 /*
870 * Other sfmmu primitives
871 */
872
873
874 #define I_SIZE 4
875
876 ENTRY_NP(sfmmu_fix_ktlb_traptable)
877 /*
878 * %o0 = start of patch area
879 * %o1 = size code of TSB to patch
880 * %o3 = scratch
881 */
882 /* fix sll */
883 ld [%o0], %o3 /* get sll */
884 sub %o3, %o1, %o3 /* decrease shift by tsb szc */
885 st %o3, [%o0] /* write sll */
886 flush %o0
887 /* fix srl */
888 add %o0, I_SIZE, %o0 /* goto next instr. */
889 ld [%o0], %o3 /* get srl */
890 sub %o3, %o1, %o3 /* decrease shift by tsb szc */
891 st %o3, [%o0] /* write srl */
892 retl
893 flush %o0
1431 sethi %hi(ktsb_phys), %o4
1432 mov ASI_N, %o3
1433 ld [%o4 + %lo(ktsb_phys)], %o4
1434 movrnz %o4, ASI_MEM, %o3
1435 mov %o3, %asi
1436 #endif /* !sun4v */
1437 mov %o0, %g1 ! %g1 = vaddr
1438
1439 /* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1440 GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1441 /* %g2 = tsbep, %g1 clobbered */
1442
1443 srlx %o0, TTARGET_VA_SHIFT, %g1; ! %g1 = tag target
1444 /* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1445 TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1446
1447 retl
1448 membar #StoreStore|#StoreLoad
1449 SET_SIZE(sfmmu_kpm_unload_tsb)
1450
1451
1452 ENTRY_NP(sfmmu_ttetopfn)
1453 ldx [%o0], %g1 /* read tte */
1454 TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1455 /*
1456 * g1 = pfn
1457 */
1458 retl
1459 mov %g1, %o0
1460 SET_SIZE(sfmmu_ttetopfn)
1461
1462 /*
1463 * These macros are used to update global sfmmu hme hash statistics
1464 * in perf critical paths. It is only enabled in debug kernels or
1465 * if SFMMU_STAT_GATHER is defined
1466 */
1467 #if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1468 #define HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2) \
1469 ldn [tsbarea + TSBMISS_KHATID], tmp1 ;\
1470 mov HATSTAT_KHASH_SEARCH, tmp2 ;\
1471 cmp tmp1, hatid ;\
1472 movne %ncc, HATSTAT_UHASH_SEARCH, tmp2 ;\
1473 set sfmmu_global_stat, tmp1 ;\
1474 add tmp1, tmp2, tmp1 ;\
1475 ld [tmp1], tmp2 ;\
1476 inc tmp2 ;\
1477 st tmp2, [tmp1]
1478
1479 #define HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2) \
1480 ldn [tsbarea + TSBMISS_KHATID], tmp1 ;\
1481 mov HATSTAT_KHASH_LINKS, tmp2 ;\
1537 CPU_INDEX(tmp1, tsbma) ;\
1538 sethi %hi(kpmtsbm_area), tsbma ;\
1539 sllx tmp1, KPMTSBM_SHIFT, tmp1 ;\
1540 or tsbma, %lo(kpmtsbm_area), tsbma ;\
1541 add tsbma, tmp1, tsbma /* kpmtsbm area */ ;\
1542 /* VA range check */ ;\
1543 ldx [tsbma + KPMTSBM_VBASE], val ;\
1544 cmp tagacc, val ;\
1545 blu,pn %xcc, label ;\
1546 ldx [tsbma + KPMTSBM_VEND], tmp1 ;\
1547 cmp tagacc, tmp1 ;\
1548 bgeu,pn %xcc, label ;\
1549 lduw [tsbma + KPMTSBM_DTLBMISS], val ;\
1550 inc val ;\
1551 st val, [tsbma + KPMTSBM_DTLBMISS] ;\
1552 label:
1553 #else
1554 #define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1555 #endif /* KPM_TLBMISS_STATS_GATHER */
1556
1557 #ifdef PTL1_PANIC_DEBUG
1558 .seg ".data"
1559 .global test_ptl1_panic
1560 test_ptl1_panic:
1561 .word 0
1562 .align 8
1563
1564 .seg ".text"
1565 .align 4
1566 #endif /* PTL1_PANIC_DEBUG */
1567
1568
1569 ENTRY_NP(sfmmu_pagefault)
1570 SET_GL_REG(1)
1571 USE_ALTERNATE_GLOBALS(%g5)
1572 GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1573 rdpr %tt, %g6
1574 cmp %g6, FAST_IMMU_MISS_TT
1575 be,a,pn %icc, 1f
1576 mov T_INSTR_MMU_MISS, %g3
1786 stuh %g2, [%g1 + CPUC_DTRACE_FLAGS]
1787 GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1788 stx %g3, [%g1 + CPUC_DTRACE_ILLVAL]
1789 done
1790 0:
1791 TSTAT_CHECK_TL1(1f, %g1, %g2)
1792 1:
1793 SET_GL_REG(1)
1794 USE_ALTERNATE_GLOBALS(%g5)
1795 GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1796 mov T_DATA_MMU_MISS, %g3 /* arg2 = traptype */
1797 /*
1798 * g2=tagacc g3.l=type g3.h=0
1799 */
1800 sethi %hi(trap), %g1
1801 or %g1, %lo(trap), %g1
1802 ba,pt %xcc, sys_trap
1803 mov -1, %g4
1804 SET_SIZE(sfmmu_kpm_exception)
1805
1806 #if (IMAP_SEG != 0)
1807 #error - ism_map->ism_seg offset is not zero
1808 #endif
1809
1810 /*
1811 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1812 * tlb miss and branches to label "ismhit". If this is not an ISM
1813 * process or an ISM tlb miss it falls thru.
1814 *
1815 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1816 * this process.
1817 * If so, it will branch to label "ismhit". If not, it will fall through.
1818 *
1819 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1820 * so that any other threads of this process will not try and walk the ism
1821 * maps while they are being changed.
1822 *
1823 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
1824 * will make sure of that. This means we can terminate our search on
1825 * the first zero mapping we find.
3676 ldx [%g2 + MMFSA_I_CTX], %g3
3677 ldx [%g2 + MMFSA_D_CTX], %g3
3678 2:
3679 #else
3680 mov MMU_TAG_ACCESS, %g2
3681 be,a,pt %icc, 2f
3682 ldxa [%g2]ASI_IMMU, %g3
3683 ldxa [%g2]ASI_DMMU, %g3
3684 2: sllx %g3, TAGACC_CTX_LSHIFT, %g3
3685 #endif /* sun4v */
3686 brz,a,pn %g3, ptl1_panic ! panic if called for kernel
3687 mov PTL1_BAD_CTX_STEAL, %g1 ! since kernel ctx was stolen
3688 rdpr %tl, %g5
3689 cmp %g5, 1
3690 ble,pt %icc, sfmmu_mmu_trap
3691 nop
3692 TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3693 ba,pt %icc, sfmmu_window_trap
3694 nop
3695 SET_SIZE(sfmmu_tsb_miss)
3696
3697 ENTRY_NP(sfmmu_vatopfn)
3698 /*
3699 * disable interrupts
3700 */
3701 rdpr %pstate, %o3
3702 #ifdef DEBUG
3703 PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3704 #endif
3705 /*
3706 * disable interrupts to protect the TSBMISS area
3707 */
3708 andn %o3, PSTATE_IE, %o5
3709 wrpr %o5, 0, %pstate
3710
3711 /*
3712 * o0 = vaddr
3713 * o1 = sfmmup
3714 * o2 = ttep
3715 */
3716 CPU_TSBMISS_AREA(%g1, %o5)
3895 * %o0 = vaddr
3896 */
3897 brgez,a,pn %g3, 1f /* check if tte is invalid */
3898 mov -1, %o0 /* output = -1 (PFN_INVALID) */
3899 TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
3900 /*
3901 * g3 = pfn
3902 */
3903 ba,pt %xcc, 1f
3904 mov %g3, %o0
3905
3906 kvaszc2pfn_nohblk:
3907 mov -1, %o0
3908
3909 1:
3910 retl
3911 wrpr %g0, %o3, %pstate /* re-enable interrupts */
3912
3913 SET_SIZE(sfmmu_kvaszc2pfn)
3914
3915
3916
3917 /*
3918 * kpm lock used between trap level tsbmiss handler and kpm C level.
3919 */
3920 #define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) \
3921 mov 0xff, tmp1 ;\
3922 label1: ;\
3923 casa [kpmlckp]asi, %g0, tmp1 ;\
3924 brnz,pn tmp1, label1 ;\
3925 mov 0xff, tmp1 ;\
3926 membar #LoadLoad
3927
3928 #define KPMLOCK_EXIT(kpmlckp, asi) \
3929 membar #LoadStore|#StoreStore ;\
3930 sta %g0, [kpmlckp]asi
3931
3932 /*
3933 * Lookup a memseg for a given pfn and if found, return the physical
3934 * address of the corresponding struct memseg in mseg, otherwise
3935 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
3936 * tsbmp, %asi is assumed to be ASI_MEM.
4441 cmp %g7, %g6
4442 bgeu %xcc, 0f
4443 ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4444 add %g7, RUNTIME_PATCH, %g7 /* must match TSTAT_TSBMISS_INSTR */
4445 wrpr %g7, %tpc
4446 add %g7, 4, %g7
4447 wrpr %g7, %tnpc
4448 0:
4449 retry
4450 5:
4451 /* g3=hlck_pa */
4452 KPMLOCK_EXIT(%g3, ASI_MEM)
4453 ba,pt %icc, sfmmu_kpm_exception
4454 nop
4455 SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4456
4457 #if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4458 #error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4459 #endif
4460
4461 .seg ".data"
4462 sfmmu_kpm_tsbmtl_panic:
4463 .ascii "sfmmu_kpm_tsbmtl: interrupts disabled"
4464 .byte 0
4465 sfmmu_kpm_stsbmtl_panic:
4466 .ascii "sfmmu_kpm_stsbmtl: interrupts disabled"
4467 .byte 0
4468 .align 4
4469 .seg ".text"
4470
4471 ENTRY_NP(sfmmu_kpm_tsbmtl)
4472 rdpr %pstate, %o3
4473 /*
4474 * %o0 = &kp_refcntc
4475 * %o1 = &khl_lock
4476 * %o2 = 0/1 (off/on)
4477 * %o3 = pstate save
4478 */
4479 #ifdef DEBUG
4480 andcc %o3, PSTATE_IE, %g0 /* if interrupts already */
4517 save %sp, -SA(MINFRAME), %sp
4518 sethi %hi(sfmmu_kpm_stsbmtl_panic), %o0
4519 call panic
4520 or %o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4521 ret
4522 restore
4523 1:
4524 #endif /* DEBUG */
4525 wrpr %o3, PSTATE_IE, %pstate /* disable interrupts */
4526
4527 KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4528 ldsb [%o0], %o5
4529 stb %o2, [%o0]
4530 KPMLOCK_EXIT(%o1, ASI_N)
4531
4532 and %o5, KPM_MAPPED_MASK, %o0 /* return old val */
4533 retl
4534 wrpr %g0, %o3, %pstate /* enable interrupts */
4535 SET_SIZE(sfmmu_kpm_stsbmtl)
4536
4537 #ifdef sun4v
4538 /*
4539 * User/kernel data miss w// multiple TSBs
4540 * The first probe covers 8K, 64K, and 512K page sizes,
4541 * because 64K and 512K mappings are replicated off 8K
4542 * pointer. Second probe covers 4M page size only.
4543 *
4544 * MMU fault area contains miss address and context.
4545 */
4546 ALTENTRY(sfmmu_slow_dmmu_miss)
4547 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) ! %g2 = ptagacc, %g3 = ctx type
4548
4549 slow_miss_common:
4550 /*
4551 * %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4552 * %g3 = ctx (cannot be INVALID_CONTEXT)
4553 */
4554 brnz,pt %g3, 8f ! check for user context
4555 nop
4556
4598 9:
4599 ba,a,pt %xcc, sfmmu_tsb_miss_tt
4600 .empty
4601 SET_SIZE(sfmmu_slow_dmmu_miss)
4602
4603
4604 /*
4605 * User/kernel instruction miss w/ multiple TSBs
4606 * The first probe covers 8K, 64K, and 512K page sizes,
4607 * because 64K and 512K mappings are replicated off 8K
4608 * pointer. Second probe covers 4M page size only.
4609 *
4610 * MMU fault area contains miss address and context.
4611 */
4612 ALTENTRY(sfmmu_slow_immu_miss)
4613 GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4614 ba,a,pt %xcc, slow_miss_common
4615 SET_SIZE(sfmmu_slow_immu_miss)
4616
4617 #endif /* sun4v */
4618
4619 /*
4620 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4621 */
4622 .seg ".data"
4623 .align 64
4624 .global tsbmiss_area
4625 tsbmiss_area:
4626 .skip (TSBMISS_SIZE * NCPU)
4627
4628 .align 64
4629 .global kpmtsbm_area
4630 kpmtsbm_area:
4631 .skip (KPMTSBM_SIZE * NCPU)
|