Print this page
de-linting of .s files

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/ml/sfmmu_asm.s
          +++ new/usr/src/uts/sfmmu/ml/sfmmu_asm.s
↓ open down ↓ 21 lines elided ↑ open up ↑
  22   22   * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   * Copyright (c) 2016 by Delphix. All rights reserved.
  25   25   */
  26   26  
  27   27  /*
  28   28   * SFMMU primitives.  These primitives should only be used by sfmmu
  29   29   * routines.
  30   30   */
  31   31  
  32      -#if defined(lint)
  33      -#include <sys/types.h>
  34      -#else   /* lint */
  35   32  #include "assym.h"
  36      -#endif  /* lint */
  37   33  
  38   34  #include <sys/asm_linkage.h>
  39   35  #include <sys/machtrap.h>
  40   36  #include <sys/machasi.h>
  41   37  #include <sys/sun4asi.h>
  42   38  #include <sys/pte.h>
  43   39  #include <sys/mmu.h>
  44   40  #include <vm/hat_sfmmu.h>
  45   41  #include <vm/seg_spt.h>
  46   42  #include <sys/machparam.h>
↓ open down ↓ 13 lines elided ↑ open up ↑
  60   56   */
  61   57  #define TT_TRACE(label)         \
  62   58          ba      label           ;\
  63   59          rd      %pc, %g7
  64   60  #else
  65   61  
  66   62  #define TT_TRACE(label)
  67   63  
  68   64  #endif /* TRAPTRACE */
  69   65  
  70      -#ifndef lint
  71      -
  72   66  #if (TTE_SUSPEND_SHIFT > 0)
  73   67  #define TTE_SUSPEND_INT_SHIFT(reg)                              \
  74   68          sllx    reg, TTE_SUSPEND_SHIFT, reg
  75   69  #else
  76   70  #define TTE_SUSPEND_INT_SHIFT(reg)
  77   71  #endif
  78   72  
  79      -#endif /* lint */
  80      -
  81      -#ifndef lint
  82      -
  83   73  /*
  84   74   * Assumes TSBE_TAG is 0
  85   75   * Assumes TSBE_INTHI is 0
  86   76   * Assumes TSBREG.split is 0
  87   77   */
  88   78  
  89   79  #if TSBE_TAG != 0
  90   80  #error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
  91   81  #endif
  92   82  
↓ open down ↓ 370 lines elided ↑ open up ↑
 463  453   */
 464  454  #define RUNTIME_PATCH_SETX(dest, tmp)                                   \
 465  455          sethi   %hh(RUNTIME_PATCH), tmp                                 ;\
 466  456          sethi   %lm(RUNTIME_PATCH), dest                                ;\
 467  457          or      tmp, %hm(RUNTIME_PATCH), tmp                            ;\
 468  458          or      dest, %lo(RUNTIME_PATCH), dest                          ;\
 469  459          sllx    tmp, 32, tmp                                            ;\
 470  460          nop                             /* for perf reasons */          ;\
 471  461          or      tmp, dest, dest         /* contents of patched value */
 472  462  
 473      -#endif /* lint */
 474  463  
 475      -
 476      -#if defined (lint)
 477      -
 478      -/*
 479      - * sfmmu related subroutines
 480      - */
 481      -uint_t
 482      -sfmmu_disable_intrs()
 483      -{ return(0); }
 484      -
 485      -/* ARGSUSED */
 486      -void
 487      -sfmmu_enable_intrs(uint_t pstate_save)
 488      -{}
 489      -
 490      -/* ARGSUSED */
 491      -int
 492      -sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
 493      -{ return(0); }
 494      -
 495      -/*
 496      - * Use cas, if tte has changed underneath us then reread and try again.
 497      - * In the case of a retry, it will update sttep with the new original.
 498      - */
 499      -/* ARGSUSED */
 500      -int
 501      -sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
 502      -{ return(0); }
 503      -
 504      -/*
 505      - * Use cas, if tte has changed underneath us then return 1, else return 0
 506      - */
 507      -/* ARGSUSED */
 508      -int
 509      -sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
 510      -{ return(0); }
 511      -
 512      -/* ARGSUSED */
 513      -void
 514      -sfmmu_copytte(tte_t *sttep, tte_t *dttep)
 515      -{}
 516      -
 517      -/*ARGSUSED*/
 518      -struct tsbe *
 519      -sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
 520      -{ return(0); }
 521      -
 522      -/*ARGSUSED*/
 523      -uint64_t
 524      -sfmmu_make_tsbtag(caddr_t va)
 525      -{ return(0); }
 526      -
 527      -#else   /* lint */
 528      -
 529  464          .seg    ".data"
 530  465          .global sfmmu_panic1
 531  466  sfmmu_panic1:
 532  467          .asciz  "sfmmu_asm: interrupts already disabled"
 533  468  
 534  469          .global sfmmu_panic3
 535  470  sfmmu_panic3:
 536  471          .asciz  "sfmmu_asm: sfmmu_vatopfn called for user"
 537  472  
 538  473          .global sfmmu_panic4
↓ open down ↓ 385 lines elided ↑ open up ↑
 924  859          /*
 925  860           * Return a TSB tag for the given va.
 926  861           * %o0 = va (in/clobbered)
 927  862           * %o0 = va shifted to be in tsb tag format (with no context) (out)
 928  863           */
 929  864          ENTRY_NP(sfmmu_make_tsbtag)
 930  865          retl
 931  866          srln    %o0, TTARGET_VA_SHIFT, %o0
 932  867          SET_SIZE(sfmmu_make_tsbtag)
 933  868  
 934      -#endif /* lint */
 935      -
 936  869  /*
 937  870   * Other sfmmu primitives
 938  871   */
 939  872  
 940  873  
 941      -#if defined (lint)
 942      -void
 943      -sfmmu_patch_ktsb(void)
 944      -{
 945      -}
 946      -
 947      -void
 948      -sfmmu_kpm_patch_tlbm(void)
 949      -{
 950      -}
 951      -
 952      -void
 953      -sfmmu_kpm_patch_tsbm(void)
 954      -{
 955      -}
 956      -
 957      -void
 958      -sfmmu_patch_shctx(void)
 959      -{
 960      -}
 961      -
 962      -/* ARGSUSED */
 963      -void
 964      -sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
 965      -{
 966      -}
 967      -
 968      -/* ARGSUSED */
 969      -void
 970      -sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
 971      -{
 972      -}
 973      -
 974      -/* ARGSUSED */
 975      -void
 976      -sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
 977      -{
 978      -}
 979      -
 980      -/* ARGSUSED */
 981      -void
 982      -sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
 983      -{
 984      -}
 985      -
 986      -#else /* lint */
 987      -
 988  874  #define I_SIZE          4
 989  875  
 990  876          ENTRY_NP(sfmmu_fix_ktlb_traptable)
 991  877          /*
 992  878           * %o0 = start of patch area
 993  879           * %o1 = size code of TSB to patch
 994  880           * %o3 = scratch
 995  881           */
 996  882          /* fix sll */
 997  883          ld      [%o0], %o3                      /* get sll */
↓ open down ↓ 557 lines elided ↑ open up ↑
1555 1441          /* %g2 = tsbep, %g1 clobbered */
1556 1442  
1557 1443          srlx    %o0, TTARGET_VA_SHIFT, %g1;     ! %g1 = tag target
1558 1444          /* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1559 1445          TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1560 1446  
1561 1447          retl
1562 1448            membar        #StoreStore|#StoreLoad
1563 1449          SET_SIZE(sfmmu_kpm_unload_tsb)
1564 1450  
1565      -#endif /* lint */
1566 1451  
1567      -
1568      -#if defined (lint)
1569      -
1570      -/*ARGSUSED*/
1571      -pfn_t
1572      -sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1573      -{ return(0); }
1574      -
1575      -#else /* lint */
1576      -
1577 1452          ENTRY_NP(sfmmu_ttetopfn)
1578 1453          ldx     [%o0], %g1                      /* read tte */
1579 1454          TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1580 1455          /*
1581 1456           * g1 = pfn
1582 1457           */
1583 1458          retl
1584 1459          mov     %g1, %o0
1585 1460          SET_SIZE(sfmmu_ttetopfn)
1586 1461  
1587      -#endif /* !lint */
1588      -
1589 1462  /*
1590 1463   * These macros are used to update global sfmmu hme hash statistics
1591 1464   * in perf critical paths. It is only enabled in debug kernels or
1592 1465   * if SFMMU_STAT_GATHER is defined
1593 1466   */
1594 1467  #if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1595 1468  #define HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)                  \
1596 1469          ldn     [tsbarea + TSBMISS_KHATID], tmp1                        ;\
1597 1470          mov     HATSTAT_KHASH_SEARCH, tmp2                              ;\
1598 1471          cmp     tmp1, hatid                                             ;\
↓ open down ↓ 75 lines elided ↑ open up ↑
1674 1547          cmp     tagacc, tmp1                                            ;\
1675 1548          bgeu,pn %xcc, label                                             ;\
1676 1549            lduw  [tsbma + KPMTSBM_DTLBMISS], val                         ;\
1677 1550          inc     val                                                     ;\
1678 1551          st      val, [tsbma + KPMTSBM_DTLBMISS]                         ;\
1679 1552  label:
1680 1553  #else
1681 1554  #define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1682 1555  #endif  /* KPM_TLBMISS_STATS_GATHER */
1683 1556  
1684      -#if defined (lint)
1685      -/*
1686      - * The following routines are jumped to from the mmu trap handlers to do
1687      - * the setting up to call systrap.  They are separate routines instead of
1688      - * being part of the handlers because the handlers would exceed 32
1689      - * instructions and since this is part of the slow path the jump
1690      - * cost is irrelevant.
1691      - */
1692      -void
1693      -sfmmu_pagefault(void)
1694      -{
1695      -}
1696      -
1697      -void
1698      -sfmmu_mmu_trap(void)
1699      -{
1700      -}
1701      -
1702      -void
1703      -sfmmu_window_trap(void)
1704      -{
1705      -}
1706      -
1707      -void
1708      -sfmmu_kpm_exception(void)
1709      -{
1710      -}
1711      -
1712      -#else /* lint */
1713      -
1714 1557  #ifdef  PTL1_PANIC_DEBUG
1715 1558          .seg    ".data"
1716 1559          .global test_ptl1_panic
1717 1560  test_ptl1_panic:
1718 1561          .word   0
1719 1562          .align  8
1720 1563  
1721 1564          .seg    ".text"
1722 1565          .align  4
1723 1566  #endif  /* PTL1_PANIC_DEBUG */
↓ open down ↓ 229 lines elided ↑ open up ↑
1953 1796          mov     T_DATA_MMU_MISS, %g3    /* arg2 = traptype */
1954 1797          /*
1955 1798           * g2=tagacc g3.l=type g3.h=0
1956 1799           */
1957 1800          sethi   %hi(trap), %g1
1958 1801          or      %g1, %lo(trap), %g1
1959 1802          ba,pt   %xcc, sys_trap
1960 1803          mov     -1, %g4
1961 1804          SET_SIZE(sfmmu_kpm_exception)
1962 1805  
1963      -#endif /* lint */
1964      -
1965      -#if defined (lint)
1966      -
1967      -void
1968      -sfmmu_tsb_miss(void)
1969      -{
1970      -}
1971      -
1972      -void
1973      -sfmmu_kpm_dtsb_miss(void)
1974      -{
1975      -}
1976      -
1977      -void
1978      -sfmmu_kpm_dtsb_miss_small(void)
1979      -{
1980      -}
1981      -
1982      -#else /* lint */
1983      -
1984 1806  #if (IMAP_SEG != 0)
1985 1807  #error - ism_map->ism_seg offset is not zero
1986 1808  #endif
1987 1809  
1988 1810  /*
1989 1811   * Copies ism mapping for this ctx in param "ism" if this is a ISM
1990 1812   * tlb miss and branches to label "ismhit". If this is not an ISM
1991 1813   * process or an ISM tlb miss it falls thru.
1992 1814   *
1993 1815   * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
↓ open down ↓ 1870 lines elided ↑ open up ↑
3864 3686          brz,a,pn %g3, ptl1_panic                ! panic if called for kernel
3865 3687            mov   PTL1_BAD_CTX_STEAL, %g1         ! since kernel ctx was stolen
3866 3688          rdpr    %tl, %g5
3867 3689          cmp     %g5, 1
3868 3690          ble,pt  %icc, sfmmu_mmu_trap
3869 3691            nop
3870 3692          TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3871 3693          ba,pt   %icc, sfmmu_window_trap
3872 3694            nop
3873 3695          SET_SIZE(sfmmu_tsb_miss)
3874      -#endif  /* lint */
3875 3696  
3876      -#if defined (lint)
3877      -/*
3878      - * This routine will look for a user or kernel vaddr in the hash
3879      - * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
3880      - * grab any locks.  It should only be used by other sfmmu routines.
3881      - */
3882      -/* ARGSUSED */
3883      -pfn_t
3884      -sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3885      -{
3886      -        return(0);
3887      -}
3888      -
3889      -/* ARGSUSED */
3890      -pfn_t
3891      -sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
3892      -{
3893      -        return(0);
3894      -}
3895      -
3896      -#else /* lint */
3897      -
3898 3697          ENTRY_NP(sfmmu_vatopfn)
3899 3698          /*
3900 3699           * disable interrupts
3901 3700           */
3902 3701          rdpr    %pstate, %o3
3903 3702  #ifdef DEBUG
3904 3703          PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3905 3704  #endif
3906 3705          /*
3907 3706           * disable interrupts to protect the TSBMISS area
↓ open down ↓ 198 lines elided ↑ open up ↑
4106 3905  
4107 3906  kvaszc2pfn_nohblk:
4108 3907          mov     -1, %o0
4109 3908  
4110 3909  1:
4111 3910          retl
4112 3911            wrpr  %g0, %o3, %pstate               /* re-enable interrupts */
4113 3912  
4114 3913          SET_SIZE(sfmmu_kvaszc2pfn)
4115 3914  
4116      -#endif /* lint */
4117 3915  
4118 3916  
4119      -
4120      -#if !defined(lint)
4121      -
4122 3917  /*
4123 3918   * kpm lock used between trap level tsbmiss handler and kpm C level.
4124 3919   */
4125 3920  #define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)                       \
4126 3921          mov     0xff, tmp1                                              ;\
4127 3922  label1:                                                                 ;\
4128 3923          casa    [kpmlckp]asi, %g0, tmp1                                 ;\
4129 3924          brnz,pn tmp1, label1                                            ;\
4130 3925          mov     0xff, tmp1                                              ;\
4131 3926          membar  #LoadLoad
↓ open down ↓ 524 lines elided ↑ open up ↑
4656 4451          /* g3=hlck_pa */
4657 4452          KPMLOCK_EXIT(%g3, ASI_MEM)
4658 4453          ba,pt   %icc, sfmmu_kpm_exception
4659 4454            nop
4660 4455          SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4661 4456  
4662 4457  #if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4663 4458  #error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4664 4459  #endif
4665 4460  
4666      -#endif /* lint */
4667      -
4668      -#ifdef  lint
4669      -/*
4670      - * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4671      - * Called from C-level, sets/clears "go" indication for trap level handler.
4672      - * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4673      - * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4674      - * Assumes khl_mutex is held when called from C-level.
4675      - */
4676      -/* ARGSUSED */
4677      -void
4678      -sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4679      -{
4680      -}
4681      -
4682      -/*
4683      - * kpm_smallpages: stores val to byte at address mapped within
4684      - * low level lock brackets. The old value is returned.
4685      - * Called from C-level.
4686      - */
4687      -/* ARGSUSED */
4688      -int
4689      -sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
4690      -{
4691      -        return (0);
4692      -}
4693      -
4694      -#else /* lint */
4695      -
4696 4461          .seg    ".data"
4697 4462  sfmmu_kpm_tsbmtl_panic:
4698 4463          .ascii  "sfmmu_kpm_tsbmtl: interrupts disabled"
4699 4464          .byte   0
4700 4465  sfmmu_kpm_stsbmtl_panic:
4701 4466          .ascii  "sfmmu_kpm_stsbmtl: interrupts disabled"
4702 4467          .byte   0
4703 4468          .align  4
4704 4469          .seg    ".text"
4705 4470  
↓ open down ↓ 56 lines elided ↑ open up ↑
4762 4527          KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4763 4528          ldsb    [%o0], %o5
4764 4529          stb     %o2, [%o0]
4765 4530          KPMLOCK_EXIT(%o1, ASI_N)
4766 4531  
4767 4532          and     %o5, KPM_MAPPED_MASK, %o0       /* return old val */
4768 4533          retl
4769 4534            wrpr  %g0, %o3, %pstate               /* enable interrupts */
4770 4535          SET_SIZE(sfmmu_kpm_stsbmtl)
4771 4536  
4772      -#endif /* lint */
4773      -
4774      -#ifndef lint
4775 4537  #ifdef sun4v
4776 4538          /*
4777 4539           * User/kernel data miss w// multiple TSBs
4778 4540           * The first probe covers 8K, 64K, and 512K page sizes,
4779 4541           * because 64K and 512K mappings are replicated off 8K
4780 4542           * pointer.  Second probe covers 4M page size only.
4781 4543           *
4782 4544           * MMU fault area contains miss address and context.
4783 4545           */
4784 4546          ALTENTRY(sfmmu_slow_dmmu_miss)
↓ open down ↓ 61 lines elided ↑ open up ↑
4846 4608           * pointer.  Second probe covers 4M page size only.
4847 4609           *
4848 4610           * MMU fault area contains miss address and context.
4849 4611           */
4850 4612          ALTENTRY(sfmmu_slow_immu_miss)
4851 4613          GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4852 4614          ba,a,pt %xcc, slow_miss_common
4853 4615          SET_SIZE(sfmmu_slow_immu_miss)
4854 4616  
4855 4617  #endif /* sun4v */
4856      -#endif  /* lint */
4857 4618  
4858      -#ifndef lint
4859      -
4860 4619  /*
4861 4620   * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4862 4621   */
4863 4622          .seg    ".data"
4864 4623          .align  64
4865 4624          .global tsbmiss_area
4866 4625  tsbmiss_area:
4867 4626          .skip   (TSBMISS_SIZE * NCPU)
4868 4627  
4869 4628          .align  64
4870 4629          .global kpmtsbm_area
4871 4630  kpmtsbm_area:
4872 4631          .skip   (KPMTSBM_SIZE * NCPU)
4873      -#endif  /* lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX