Print this page
de-linting of .s files

@@ -27,15 +27,11 @@
 /*
  * SFMMU primitives.  These primitives should only be used by sfmmu
  * routines.
  */
 
-#if defined(lint)
-#include <sys/types.h>
-#else   /* lint */
 #include "assym.h"
-#endif  /* lint */
 
 #include <sys/asm_linkage.h>
 #include <sys/machtrap.h>
 #include <sys/machasi.h>
 #include <sys/sun4asi.h>

@@ -65,23 +61,17 @@
 
 #define TT_TRACE(label)
 
 #endif /* TRAPTRACE */
 
-#ifndef lint
-
 #if (TTE_SUSPEND_SHIFT > 0)
 #define TTE_SUSPEND_INT_SHIFT(reg)                              \
         sllx    reg, TTE_SUSPEND_SHIFT, reg
 #else
 #define TTE_SUSPEND_INT_SHIFT(reg)
 #endif
 
-#endif /* lint */
-
-#ifndef lint
-
 /*
  * Assumes TSBE_TAG is 0
  * Assumes TSBE_INTHI is 0
  * Assumes TSBREG.split is 0
  */

@@ -468,66 +458,11 @@
         or      dest, %lo(RUNTIME_PATCH), dest                          ;\
         sllx    tmp, 32, tmp                                            ;\
         nop                             /* for perf reasons */          ;\
         or      tmp, dest, dest         /* contents of patched value */
 
-#endif /* lint */
 
-
-#if defined (lint)
-
-/*
- * sfmmu related subroutines
- */
-uint_t
-sfmmu_disable_intrs()
-{ return(0); }
-
-/* ARGSUSED */
-void
-sfmmu_enable_intrs(uint_t pstate_save)
-{}
-
-/* ARGSUSED */
-int
-sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
-{ return(0); }
-
-/*
- * Use cas, if tte has changed underneath us then reread and try again.
- * In the case of a retry, it will update sttep with the new original.
- */
-/* ARGSUSED */
-int
-sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
-{ return(0); }
-
-/*
- * Use cas, if tte has changed underneath us then return 1, else return 0
- */
-/* ARGSUSED */
-int
-sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
-{ return(0); }
-
-/* ARGSUSED */
-void
-sfmmu_copytte(tte_t *sttep, tte_t *dttep)
-{}
-
-/*ARGSUSED*/
-struct tsbe *
-sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
-{ return(0); }
-
-/*ARGSUSED*/
-uint64_t
-sfmmu_make_tsbtag(caddr_t va)
-{ return(0); }
-
-#else   /* lint */
-
         .seg    ".data"
         .global sfmmu_panic1
 sfmmu_panic1:
         .asciz  "sfmmu_asm: interrupts already disabled"
 

@@ -929,64 +864,15 @@
         ENTRY_NP(sfmmu_make_tsbtag)
         retl
         srln    %o0, TTARGET_VA_SHIFT, %o0
         SET_SIZE(sfmmu_make_tsbtag)
 
-#endif /* lint */
-
 /*
  * Other sfmmu primitives
  */
 
 
-#if defined (lint)
-void
-sfmmu_patch_ktsb(void)
-{
-}
-
-void
-sfmmu_kpm_patch_tlbm(void)
-{
-}
-
-void
-sfmmu_kpm_patch_tsbm(void)
-{
-}
-
-void
-sfmmu_patch_shctx(void)
-{
-}
-
-/* ARGSUSED */
-void
-sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
-{
-}
-
-/* ARGSUSED */
-void
-sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
-{
-}
-
-/* ARGSUSED */
-void
-sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
-{
-}
-
-/* ARGSUSED */
-void
-sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
-{
-}
-
-#else /* lint */
-
 #define I_SIZE          4
 
         ENTRY_NP(sfmmu_fix_ktlb_traptable)
         /*
          * %o0 = start of patch area

@@ -1560,22 +1446,11 @@
 
         retl
           membar        #StoreStore|#StoreLoad
         SET_SIZE(sfmmu_kpm_unload_tsb)
 
-#endif /* lint */
 
-
-#if defined (lint)
-
-/*ARGSUSED*/
-pfn_t
-sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
-{ return(0); }
-
-#else /* lint */
-
         ENTRY_NP(sfmmu_ttetopfn)
         ldx     [%o0], %g1                      /* read tte */
         TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
         /*
          * g1 = pfn

@@ -1582,12 +1457,10 @@
          */
         retl
         mov     %g1, %o0
         SET_SIZE(sfmmu_ttetopfn)
 
-#endif /* !lint */
-
 /*
  * These macros are used to update global sfmmu hme hash statistics
  * in perf critical paths. It is only enabled in debug kernels or
  * if SFMMU_STAT_GATHER is defined
  */

@@ -1679,40 +1552,10 @@
 label:
 #else
 #define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
 #endif  /* KPM_TLBMISS_STATS_GATHER */
 
-#if defined (lint)
-/*
- * The following routines are jumped to from the mmu trap handlers to do
- * the setting up to call systrap.  They are separate routines instead of
- * being part of the handlers because the handlers would exceed 32
- * instructions and since this is part of the slow path the jump
- * cost is irrelevant.
- */
-void
-sfmmu_pagefault(void)
-{
-}
-
-void
-sfmmu_mmu_trap(void)
-{
-}
-
-void
-sfmmu_window_trap(void)
-{
-}
-
-void
-sfmmu_kpm_exception(void)
-{
-}
-
-#else /* lint */
-
 #ifdef  PTL1_PANIC_DEBUG
         .seg    ".data"
         .global test_ptl1_panic
 test_ptl1_panic:
         .word   0

@@ -1958,31 +1801,10 @@
         or      %g1, %lo(trap), %g1
         ba,pt   %xcc, sys_trap
         mov     -1, %g4
         SET_SIZE(sfmmu_kpm_exception)
 
-#endif /* lint */
-
-#if defined (lint)
-
-void
-sfmmu_tsb_miss(void)
-{
-}
-
-void
-sfmmu_kpm_dtsb_miss(void)
-{
-}
-
-void
-sfmmu_kpm_dtsb_miss_small(void)
-{
-}
-
-#else /* lint */
-
 #if (IMAP_SEG != 0)
 #error - ism_map->ism_seg offset is not zero
 #endif
 
 /*

@@ -3869,34 +3691,11 @@
           nop
         TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
         ba,pt   %icc, sfmmu_window_trap
           nop
         SET_SIZE(sfmmu_tsb_miss)
-#endif  /* lint */
 
-#if defined (lint)
-/*
- * This routine will look for a user or kernel vaddr in the hash
- * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
- * grab any locks.  It should only be used by other sfmmu routines.
- */
-/* ARGSUSED */
-pfn_t
-sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
-{
-        return(0);
-}
-
-/* ARGSUSED */
-pfn_t
-sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
-{
-        return(0);
-}
-
-#else /* lint */
-
         ENTRY_NP(sfmmu_vatopfn)
         /*
          * disable interrupts
          */
         rdpr    %pstate, %o3

@@ -4111,16 +3910,12 @@
         retl
           wrpr  %g0, %o3, %pstate               /* re-enable interrupts */
 
         SET_SIZE(sfmmu_kvaszc2pfn)
 
-#endif /* lint */
 
 
-
-#if !defined(lint)
-
 /*
  * kpm lock used between trap level tsbmiss handler and kpm C level.
  */
 #define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)                       \
         mov     0xff, tmp1                                              ;\

@@ -4661,40 +4456,10 @@
 
 #if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
 #error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
 #endif
 
-#endif /* lint */
-
-#ifdef  lint
-/*
- * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
- * Called from C-level, sets/clears "go" indication for trap level handler.
- * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
- * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
- * Assumes khl_mutex is held when called from C-level.
- */
-/* ARGSUSED */
-void
-sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
-{
-}
-
-/*
- * kpm_smallpages: stores val to byte at address mapped within
- * low level lock brackets. The old value is returned.
- * Called from C-level.
- */
-/* ARGSUSED */
-int
-sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
-{
-        return (0);
-}
-
-#else /* lint */
-
         .seg    ".data"
 sfmmu_kpm_tsbmtl_panic:
         .ascii  "sfmmu_kpm_tsbmtl: interrupts disabled"
         .byte   0
 sfmmu_kpm_stsbmtl_panic:

@@ -4767,13 +4532,10 @@
         and     %o5, KPM_MAPPED_MASK, %o0       /* return old val */
         retl
           wrpr  %g0, %o3, %pstate               /* enable interrupts */
         SET_SIZE(sfmmu_kpm_stsbmtl)
 
-#endif /* lint */
-
-#ifndef lint
 #ifdef sun4v
         /*
          * User/kernel data miss w// multiple TSBs
          * The first probe covers 8K, 64K, and 512K page sizes,
          * because 64K and 512K mappings are replicated off 8K

@@ -4851,14 +4613,11 @@
         GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
         ba,a,pt %xcc, slow_miss_common
         SET_SIZE(sfmmu_slow_immu_miss)
 
 #endif /* sun4v */
-#endif  /* lint */
 
-#ifndef lint
-
 /*
  * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
  */
         .seg    ".data"
         .align  64

@@ -4868,6 +4627,5 @@
 
         .align  64
         .global kpmtsbm_area
 kpmtsbm_area:
         .skip   (KPMTSBM_SIZE * NCPU)
-#endif  /* lint */