Print this page
de-linting of .s files

*** 27,41 **** /* * SFMMU primitives. These primitives should only be used by sfmmu * routines. */ - #if defined(lint) - #include <sys/types.h> - #else /* lint */ #include "assym.h" - #endif /* lint */ #include <sys/asm_linkage.h> #include <sys/machtrap.h> #include <sys/machasi.h> #include <sys/sun4asi.h> --- 27,37 ----
*** 65,87 **** #define TT_TRACE(label) #endif /* TRAPTRACE */ - #ifndef lint - #if (TTE_SUSPEND_SHIFT > 0) #define TTE_SUSPEND_INT_SHIFT(reg) \ sllx reg, TTE_SUSPEND_SHIFT, reg #else #define TTE_SUSPEND_INT_SHIFT(reg) #endif - #endif /* lint */ - - #ifndef lint - /* * Assumes TSBE_TAG is 0 * Assumes TSBE_INTHI is 0 * Assumes TSBREG.split is 0 */ --- 61,77 ----
*** 468,533 **** or dest, %lo(RUNTIME_PATCH), dest ;\ sllx tmp, 32, tmp ;\ nop /* for perf reasons */ ;\ or tmp, dest, dest /* contents of patched value */ - #endif /* lint */ - - #if defined (lint) - - /* - * sfmmu related subroutines - */ - uint_t - sfmmu_disable_intrs() - { return(0); } - - /* ARGSUSED */ - void - sfmmu_enable_intrs(uint_t pstate_save) - {} - - /* ARGSUSED */ - int - sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag) - { return(0); } - - /* - * Use cas, if tte has changed underneath us then reread and try again. - * In the case of a retry, it will update sttep with the new original. - */ - /* ARGSUSED */ - int - sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep) - { return(0); } - - /* - * Use cas, if tte has changed underneath us then return 1, else return 0 - */ - /* ARGSUSED */ - int - sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep) - { return(0); } - - /* ARGSUSED */ - void - sfmmu_copytte(tte_t *sttep, tte_t *dttep) - {} - - /*ARGSUSED*/ - struct tsbe * - sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc) - { return(0); } - - /*ARGSUSED*/ - uint64_t - sfmmu_make_tsbtag(caddr_t va) - { return(0); } - - #else /* lint */ - .seg ".data" .global sfmmu_panic1 sfmmu_panic1: .asciz "sfmmu_asm: interrupts already disabled" --- 458,468 ----
*** 929,992 **** ENTRY_NP(sfmmu_make_tsbtag) retl srln %o0, TTARGET_VA_SHIFT, %o0 SET_SIZE(sfmmu_make_tsbtag) - #endif /* lint */ - /* * Other sfmmu primitives */ - #if defined (lint) - void - sfmmu_patch_ktsb(void) - { - } - - void - sfmmu_kpm_patch_tlbm(void) - { - } - - void - sfmmu_kpm_patch_tsbm(void) - { - } - - void - sfmmu_patch_shctx(void) - { - } - - /* ARGSUSED */ - void - sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys) - { - } - - /* ARGSUSED */ - void - sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys) - { - } - - /* ARGSUSED */ - void - sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift) - { - } - - /* ARGSUSED */ - void - sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift) - { - } - - #else /* lint */ - #define I_SIZE 4 ENTRY_NP(sfmmu_fix_ktlb_traptable) /* * %o0 = start of patch area --- 864,878 ----
*** 1560,1581 **** retl membar #StoreStore|#StoreLoad SET_SIZE(sfmmu_kpm_unload_tsb) - #endif /* lint */ - - #if defined (lint) - - /*ARGSUSED*/ - pfn_t - sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr) - { return(0); } - - #else /* lint */ - ENTRY_NP(sfmmu_ttetopfn) ldx [%o0], %g1 /* read tte */ TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4) /* * g1 = pfn --- 1446,1456 ----
*** 1582,1593 **** */ retl mov %g1, %o0 SET_SIZE(sfmmu_ttetopfn) - #endif /* !lint */ - /* * These macros are used to update global sfmmu hme hash statistics * in perf critical paths. It is only enabled in debug kernels or * if SFMMU_STAT_GATHER is defined */ --- 1457,1466 ----
*** 1679,1718 **** label: #else #define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) #endif /* KPM_TLBMISS_STATS_GATHER */ - #if defined (lint) - /* - * The following routines are jumped to from the mmu trap handlers to do - * the setting up to call systrap. They are separate routines instead of - * being part of the handlers because the handlers would exceed 32 - * instructions and since this is part of the slow path the jump - * cost is irrelevant. - */ - void - sfmmu_pagefault(void) - { - } - - void - sfmmu_mmu_trap(void) - { - } - - void - sfmmu_window_trap(void) - { - } - - void - sfmmu_kpm_exception(void) - { - } - - #else /* lint */ - #ifdef PTL1_PANIC_DEBUG .seg ".data" .global test_ptl1_panic test_ptl1_panic: .word 0 --- 1552,1561 ----
*** 1958,1988 **** or %g1, %lo(trap), %g1 ba,pt %xcc, sys_trap mov -1, %g4 SET_SIZE(sfmmu_kpm_exception) - #endif /* lint */ - - #if defined (lint) - - void - sfmmu_tsb_miss(void) - { - } - - void - sfmmu_kpm_dtsb_miss(void) - { - } - - void - sfmmu_kpm_dtsb_miss_small(void) - { - } - - #else /* lint */ - #if (IMAP_SEG != 0) #error - ism_map->ism_seg offset is not zero #endif /* --- 1801,1810 ----
*** 3869,3902 **** nop TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2) ba,pt %icc, sfmmu_window_trap nop SET_SIZE(sfmmu_tsb_miss) - #endif /* lint */ - #if defined (lint) - /* - * This routine will look for a user or kernel vaddr in the hash - * structure. It returns a valid pfn or PFN_INVALID. It doesn't - * grab any locks. It should only be used by other sfmmu routines. - */ - /* ARGSUSED */ - pfn_t - sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep) - { - return(0); - } - - /* ARGSUSED */ - pfn_t - sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno) - { - return(0); - } - - #else /* lint */ - ENTRY_NP(sfmmu_vatopfn) /* * disable interrupts */ rdpr %pstate, %o3 --- 3691,3701 ----
*** 4111,4126 **** retl wrpr %g0, %o3, %pstate /* re-enable interrupts */ SET_SIZE(sfmmu_kvaszc2pfn) - #endif /* lint */ - - #if !defined(lint) - /* * kpm lock used between trap level tsbmiss handler and kpm C level. */ #define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) \ mov 0xff, tmp1 ;\ --- 3910,3921 ----
*** 4661,4700 **** #if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE #error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct #endif - #endif /* lint */ - - #ifdef lint - /* - * Enable/disable tsbmiss handling at trap level for a kpm (large) page. - * Called from C-level, sets/clears "go" indication for trap level handler. - * khl_lock is a low level spin lock to protect the kp_tsbmtl field. - * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level. - * Assumes khl_mutex is held when called from C-level. - */ - /* ARGSUSED */ - void - sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd) - { - } - - /* - * kpm_smallpages: stores val to byte at address mapped within - * low level lock brackets. The old value is returned. - * Called from C-level. - */ - /* ARGSUSED */ - int - sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val) - { - return (0); - } - - #else /* lint */ - .seg ".data" sfmmu_kpm_tsbmtl_panic: .ascii "sfmmu_kpm_tsbmtl: interrupts disabled" .byte 0 sfmmu_kpm_stsbmtl_panic: --- 4456,4465 ----
*** 4767,4779 **** and %o5, KPM_MAPPED_MASK, %o0 /* return old val */ retl wrpr %g0, %o3, %pstate /* enable interrupts */ SET_SIZE(sfmmu_kpm_stsbmtl) - #endif /* lint */ - - #ifndef lint #ifdef sun4v /* * User/kernel data miss w// multiple TSBs * The first probe covers 8K, 64K, and 512K page sizes, * because 64K and 512K mappings are replicated off 8K --- 4532,4541 ----
*** 4851,4864 **** GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3) ba,a,pt %xcc, slow_miss_common SET_SIZE(sfmmu_slow_immu_miss) #endif /* sun4v */ - #endif /* lint */ - #ifndef lint - /* * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers. */ .seg ".data" .align 64 --- 4613,4623 ----
*** 4868,4873 **** .align 64 .global kpmtsbm_area kpmtsbm_area: .skip (KPMTSBM_SIZE * NCPU) - #endif /* lint */ --- 4627,4631 ----