Print this page
de-linting of .s files

*** 23,35 **** * Use is subject to license terms. * * Assembly code support for Cheetah/Cheetah+ modules */ - #if !defined(lint) #include "assym.h" - #endif /* !lint */ #include <sys/asm_linkage.h> #include <sys/mmu.h> #include <vm/hat_sfmmu.h> #include <sys/machparam.h> --- 23,33 ----
*** 49,60 **** #ifdef TRAPTRACE #include <sys/traptrace.h> #endif /* TRAPTRACE */ - #if !defined(lint) - /* BEGIN CSTYLED */ #define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \ ldxa [%g0]ASI_DCU, tmp1 ;\ btst DCU_DC, tmp1 /* is dcache enabled? */ ;\ --- 47,56 ----
*** 167,191 **** sub tmp2, tmp1, tmp2; \ 1: /* END CSTYLED */ - #endif /* !lint */ - /* * Cheetah MMU and Cache operations. */ - #if defined(lint) - - /* ARGSUSED */ - void - vtag_flushpage(caddr_t vaddr, uint64_t sfmmup) - {} - - #else /* lint */ - ENTRY_NP(vtag_flushpage) /* * flush page from the tlb * * %o0 = vaddr --- 163,176 ----
*** 250,269 **** retl wrpr %g0, %o5, %pstate /* enable interrupts */ SET_SIZE(vtag_flushpage) - #endif /* lint */ - - #if defined(lint) - - void - vtag_flushall(void) - {} - - #else /* lint */ - ENTRY_NP2(vtag_flushall, demap_all) /* * flush the tlb */ sethi %hi(FLUSH_ADDR), %o3 --- 235,244 ----
*** 274,295 **** retl nop SET_SIZE(demap_all) SET_SIZE(vtag_flushall) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup) - {} - - #else /* lint */ - ENTRY_NP(vtag_flushpage_tl1) /* * x-trap to flush page from tlb and tsb * * %g1 = vaddr, zero-extended on 32-bit kernel --- 249,259 ----
*** 330,351 **** stxa %g0, [%g1]ASI_ITLB_DEMAP stxa %g5, [%g4]ASI_DMMU /* restore old ctxnum */ retry SET_SIZE(vtag_flushpage_tl1) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt) - {} - - #else /* lint */ - ENTRY_NP(vtag_flush_pgcnt_tl1) /* * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb * * %g1 = vaddr, zero-extended on 32-bit kernel --- 294,304 ----
*** 420,440 **** stxa %g6, [%g4]ASI_DMMU /* restore old ctxnum */ retry SET_SIZE(vtag_flush_pgcnt_tl1) - #endif /* lint */ - - #if defined(lint) - - /*ARGSUSED*/ - void - vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2) - {} - - #else /* lint */ - ENTRY_NP(vtag_flushall_tl1) /* * x-trap to flush tlb */ set DEMAP_ALL_TYPE, %g4 --- 373,382 ----
*** 441,462 **** stxa %g0, [%g4]ASI_DTLB_DEMAP stxa %g0, [%g4]ASI_ITLB_DEMAP retry SET_SIZE(vtag_flushall_tl1) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - vac_flushpage(pfn_t pfnum, int vcolor) - {} - - #else /* lint */ - /* * vac_flushpage(pfnum, color) * Flush 1 8k page of the D-$ with physical page = pfnum * Algorithm: * The cheetah dcache is a 64k psuedo 4 way accaociative cache. --- 383,393 ----
*** 477,498 **** DCACHE_FLUSHPAGE(%o0, %o1, %o2, %o3, %o4) retl nop SET_SIZE(vac_flushpage) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor) - {} - - #else /* lint */ - ENTRY_NP(vac_flushpage_tl1) /* * x-trap to flush page from the d$ * * %g1 = pfnum, %g2 = color --- 408,418 ----
*** 499,520 **** */ DCACHE_FLUSHPAGE(%g1, %g2, %g3, %g4, %g5) retry SET_SIZE(vac_flushpage_tl1) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - vac_flushcolor(int vcolor, pfn_t pfnum) - {} - - #else /* lint */ - ENTRY(vac_flushcolor) /* * %o0 = vcolor */ DCACHE_FLUSHCOLOR(%o0, 0, %o1, %o2, %o3) --- 419,429 ----
*** 523,544 **** DCACHE_FLUSHCOLOR(%o0, 3, %o1, %o2, %o3) retl nop SET_SIZE(vac_flushcolor) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum) - {} - - #else /* lint */ - ENTRY(vac_flushcolor_tl1) /* * %g1 = vcolor */ DCACHE_FLUSHCOLOR(%g1, 0, %g2, %g3, %g4) --- 432,442 ----
*** 546,567 **** DCACHE_FLUSHCOLOR(%g1, 2, %g2, %g3, %g4) DCACHE_FLUSHCOLOR(%g1, 3, %g2, %g3, %g4) retry SET_SIZE(vac_flushcolor_tl1) - #endif /* lint */ - - #if defined(lint) - - int - idsr_busy(void) - { - return (0); - } - - #else /* lint */ - /* * Determine whether or not the IDSR is busy. * Entry: no arguments * Returns: 1 if busy, 0 otherwise */ --- 444,453 ----
*** 574,599 **** 1: retl nop SET_SIZE(idsr_busy) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2) - {} - - /* ARGSUSED */ - void - init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2) - {} - - #else /* lint */ - .global _dispatch_status_busy _dispatch_status_busy: .asciz "ASI_INTR_DISPATCH_STATUS error: busy" .align 4 --- 460,469 ----
*** 643,666 **** retl nop SET_SIZE(init_mondo_nocheck) SET_SIZE(init_mondo) - #endif /* lint */ - #if !(defined(JALAPENO) || defined(SERRANO)) - #if defined(lint) - - /* ARGSUSED */ - void - shipit(int upaid, int bn) - { return; } - - #else /* lint */ - /* * Ship mondo to aid using busy/nack pair bn */ ENTRY_NP(shipit) sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<18:14> = agent id --- 513,525 ----
*** 671,694 **** membar #Sync retl nop SET_SIZE(shipit) - #endif /* lint */ - #endif /* !(JALAPENO || SERRANO) */ - #if defined(lint) - - /* ARGSUSED */ - void - flush_instr_mem(caddr_t vaddr, size_t len) - {} - - #else /* lint */ - /* * flush_instr_mem: * Flush 1 page of the I-$ starting at vaddr * %o0 vaddr * %o1 bytes to be flushed --- 530,542 ----
*** 703,726 **** flush %o0 ! address irrelevant retl nop SET_SIZE(flush_instr_mem) - #endif /* lint */ - #if defined(CPU_IMP_ECACHE_ASSOC) - #if defined(lint) - - /* ARGSUSED */ - uint64_t - get_ecache_ctrl(void) - { return (0); } - - #else /* lint */ - ENTRY(get_ecache_ctrl) GET_CPU_IMPL(%o0) cmp %o0, JAGUAR_IMPL ! ! Putting an ASI access in the delay slot may --- 551,563 ----
*** 736,747 **** 2: retl nop SET_SIZE(get_ecache_ctrl) - #endif /* lint */ - #endif /* CPU_IMP_ECACHE_ASSOC */ #if !(defined(JALAPENO) || defined(SERRANO)) --- 573,582 ----
*** 749,767 **** * flush_ecache: * %o0 - 64 bit physical address * %o1 - ecache size * %o2 - ecache linesize */ - #if defined(lint) - /*ARGSUSED*/ - void - flush_ecache(uint64_t physaddr, size_t ecache_size, size_t ecache_linesize) - {} - - #else /* !lint */ - ENTRY(flush_ecache) /* * For certain CPU implementations, we have to flush the L2 cache * before flushing the ecache. --- 584,594 ----
*** 775,816 **** retl nop SET_SIZE(flush_ecache) - #endif /* lint */ - #endif /* !(JALAPENO || SERRANO) */ - #if defined(lint) - - void - flush_dcache(void) - {} - - #else /* lint */ - ENTRY(flush_dcache) ASM_LD(%o0, dcache_size) ASM_LD(%o1, dcache_linesize) CH_DCACHE_FLUSHALL(%o0, %o1, %o2) retl nop SET_SIZE(flush_dcache) - #endif /* lint */ - - #if defined(lint) - - void - flush_icache(void) - {} - - #else /* lint */ - ENTRY(flush_icache) GET_CPU_PRIVATE_PTR(%g0, %o0, %o2, flush_icache_1); ld [%o0 + CHPR_ICACHE_LINESIZE], %o1 ba,pt %icc, 2f ld [%o0 + CHPR_ICACHE_SIZE], %o0 --- 602,623 ----
*** 821,881 **** CH_ICACHE_FLUSHALL(%o0, %o1, %o2, %o4) retl nop SET_SIZE(flush_icache) - #endif /* lint */ - - #if defined(lint) - - /*ARGSUSED*/ - void - kdi_flush_idcache(int dcache_size, int dcache_lsize, int icache_size, - int icache_lsize) - { - } - - #else /* lint */ - ENTRY(kdi_flush_idcache) CH_DCACHE_FLUSHALL(%o0, %o1, %g1) CH_ICACHE_FLUSHALL(%o2, %o3, %g1, %g2) membar #Sync retl nop SET_SIZE(kdi_flush_idcache) - #endif /* lint */ - - #if defined(lint) - - void - flush_pcache(void) - {} - - #else /* lint */ - ENTRY(flush_pcache) PCACHE_FLUSHALL(%o0, %o1, %o2) retl nop SET_SIZE(flush_pcache) - #endif /* lint */ - #if defined(CPU_IMP_L1_CACHE_PARITY) - #if defined(lint) - - /* ARGSUSED */ - void - get_dcache_dtag(uint32_t dcache_idx, uint64_t *data) - {} - - #else /* lint */ - /* * Get dcache data and tag. The Dcache data is a pointer to a ch_dc_data_t * structure (see cheetahregs.h): * The Dcache *should* be turned off when this code is executed. */ --- 628,654 ----
*** 949,970 **** 4: retl wrpr %g0, %o5, %pstate SET_SIZE(get_dcache_dtag) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - get_icache_dtag(uint32_t ecache_idx, uint64_t *data) - {} - - #else /* lint */ - /* * Get icache data and tag. The data argument is a pointer to a ch_ic_data_t * structure (see cheetahregs.h): * The Icache *Must* be turned off when this function is called. * This is because diagnostic accesses to the Icache interfere with cache --- 722,732 ----
*** 1002,1022 **** retl wrpr %g0, %o5, %pstate SET_SIZE(get_icache_dtag) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - get_pcache_dtag(uint32_t pcache_idx, uint64_t *data) - {} - - #else /* lint */ - /* * Get pcache data and tags. * inputs: * pcache_idx - fully constructed VA for for accessing P$ diagnostic * registers. Contains PC_way and PC_addr shifted into --- 764,773 ----
*** 1048,1070 **** retl wrpr %g0, %o5, %pstate SET_SIZE(get_pcache_dtag) - #endif /* lint */ - #endif /* CPU_IMP_L1_CACHE_PARITY */ - #if defined(lint) - - /* ARGSUSED */ - void - set_dcu(uint64_t dcu) - {} - - #else /* lint */ - /* * re-enable the i$, d$, w$, and p$ according to bootup cache state. * Turn on WE, HPE, SPE, PE, IC, and DC bits defined as DCU_CACHE. * %o0 - 64 bit constant */ --- 799,810 ----
*** 1073,1106 **** flush %g0 /* flush required after changing the IC bit */ retl nop SET_SIZE(set_dcu) - #endif /* lint */ - - #if defined(lint) - - uint64_t - get_dcu(void) - { - return ((uint64_t)0); - } - - #else /* lint */ - /* * Return DCU register. */ ENTRY(get_dcu) ldxa [%g0]ASI_DCU, %o0 /* DCU control register */ retl nop SET_SIZE(get_dcu) - #endif /* lint */ - /* * Cheetah/Cheetah+ level 15 interrupt handler trap table entry. * * This handler is used to check for softints generated by error trap * handlers to report errors. On Cheetah, this mechanism is used by the --- 813,832 ----
*** 1107,1139 **** * Fast ECC at TL>0 error trap handler and, on Cheetah+, by both the Fast * ECC at TL>0 error and the I$/D$ parity error at TL>0 trap handlers. * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - void - ch_pil15_interrupt_instr(void) - {} - - #else /* lint */ - ENTRY_NP(ch_pil15_interrupt_instr) ASM_JMP(%g1, ch_pil15_interrupt) SET_SIZE(ch_pil15_interrupt_instr) - #endif - - #if defined(lint) - - void - ch_pil15_interrupt(void) - {} - - #else /* lint */ - ENTRY_NP(ch_pil15_interrupt) /* * Since pil_interrupt is hacked to assume that every level 15 * interrupt is generated by the CPU to indicate a performance --- 833,848 ----
*** 1183,1193 **** sethi %hi(pil_interrupt), %g1 jmp %g1 + %lo(pil_interrupt) mov PIL_15, %g4 SET_SIZE(ch_pil15_interrupt) - #endif /* * Error Handling * --- 892,901 ----
*** 1276,1293 **** * architecture-specific files. * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - - void - fecc_err_instr(void) - {} - - #else /* lint */ - ENTRY_NP(fecc_err_instr) membar #Sync ! Cheetah requires membar #Sync /* * Save current DCU state. Turn off the Dcache and Icache. --- 984,993 ----
*** 1298,1320 **** flush %g0 /* flush required after changing the IC bit */ ASM_JMP(%g4, fast_ecc_err) SET_SIZE(fecc_err_instr) - #endif /* lint */ - #if !(defined(JALAPENO) || defined(SERRANO)) - #if defined(lint) - - void - fast_ecc_err(void) - {} - - #else /* lint */ - .section ".text" .align 64 ENTRY_NP(fast_ecc_err) /* --- 998,1010 ----
*** 1450,1461 **** ba sys_trap movl %icc, PIL_14, %g4 SET_SIZE(fast_ecc_err) - #endif /* lint */ - #endif /* !(JALAPENO || SERRANO) */ /* * Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy: --- 1140,1149 ----
*** 1509,1532 **** * which goes to fecc_err_tl1_cont_instr, and we continue the handling there. * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - - void - fecc_err_tl1_instr(void) - {} - - #else /* lint */ - ENTRY_NP(fecc_err_tl1_instr) CH_ERR_TL1_TRAPENTRY(SWTRAP_0); SET_SIZE(fecc_err_tl1_instr) - #endif /* lint */ - /* * Software trap 0 at TL>0. * tt1_swtrap0 is replaced by fecc_err_tl1_cont_instr in cpu_init_trap of * the various architecture-specific files. This is used as a continuation * of the fast ecc handling where we've bought an extra TL level, so we can --- 1197,1210 ----
*** 1535,1567 **** * there's a reserved hole from 3-7. We only use bits 0-1 and 8-9 (the low * order two bits from %g1 and %g2 respectively). * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - void - fecc_err_tl1_cont_instr(void) - {} - - #else /* lint */ - ENTRY_NP(fecc_err_tl1_cont_instr) CH_ERR_TL1_SWTRAPENTRY(fast_ecc_tl1_err) SET_SIZE(fecc_err_tl1_cont_instr) - #endif /* lint */ - - #if defined(lint) - - void - ce_err(void) - {} - - #else /* lint */ - /* * The ce_err function handles disrupting trap type 0x63 at TL=0. * * AFSR errors bits which cause this trap are: * CE, EMC, EDU:ST, EDC, WDU, WDC, CPU, CPC, IVU, IVC --- 1213,1228 ----
*** 1736,1779 **** cmp %g4, PIL_14 ba sys_trap movl %icc, PIL_14, %g4 SET_SIZE(ce_err) - #endif /* lint */ - - #if defined(lint) - - /* - * This trap cannot happen at TL>0 which means this routine will never - * actually be called and so we treat this like a BAD TRAP panic. - */ - void - ce_err_tl1(void) - {} - - #else /* lint */ - .align 64 ENTRY_NP(ce_err_tl1) call ptl1_panic mov PTL1_BAD_TRAP, %g1 SET_SIZE(ce_err_tl1) - #endif /* lint */ - - #if defined(lint) - - void - async_err(void) - {} - - #else /* lint */ - /* * The async_err function handles deferred trap types 0xA * (instruction_access_error) and 0x32 (data_access_error) at TL>=0. * * AFSR errors bits which cause this trap are: --- 1397,1416 ----
*** 1967,1978 **** set cpu_deferred_error, %g1 ba sys_trap mov PIL_15, %g4 ! run at pil 15 SET_SIZE(async_err) - #endif /* lint */ - #if defined(CPU_IMP_L1_CACHE_PARITY) /* * D$ parity error trap (trap 71) at TL=0. * tt0_dperr is replaced by dcache_parity_instr in cpu_init_trap of --- 1604,1613 ----
*** 1979,1995 **** * the various architecture-specific files. This merely sets up the * arguments for cpu_parity_error and calls it via sys_trap. * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - - void - dcache_parity_instr(void) - {} - - #else /* lint */ ENTRY_NP(dcache_parity_instr) membar #Sync ! Cheetah+ requires membar #Sync set cpu_parity_error, %g1 or %g0, CH_ERR_DPE, %g2 rdpr %tpc, %g3 --- 1614,1623 ----
*** 1996,2031 **** sethi %hi(sys_trap), %g7 jmp %g7 + %lo(sys_trap) mov PIL_15, %g4 ! run at pil 15 SET_SIZE(dcache_parity_instr) - #endif /* lint */ - /* * D$ parity error trap (trap 71) at TL>0. * tt1_dperr is replaced by dcache_parity_tl1_instr in cpu_init_trap of * the various architecture-specific files. This generates a "Software * Trap 1" at TL>0, which goes to dcache_parity_tl1_cont_instr, and we * continue the handling there. * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - - void - dcache_parity_tl1_instr(void) - {} - - #else /* lint */ ENTRY_NP(dcache_parity_tl1_instr) CH_ERR_TL1_TRAPENTRY(SWTRAP_1); SET_SIZE(dcache_parity_tl1_instr) - #endif /* lint */ - /* * Software trap 1 at TL>0. * tt1_swtrap1 is replaced by dcache_parity_tl1_cont_instr in cpu_init_trap * of the various architecture-specific files. This is used as a continuation * of the dcache parity handling where we've bought an extra TL level, so we --- 1624,1648 ----
*** 2034,2069 **** * there's a reserved hole from 3-7. We only use bits 0-1 and 8-9 (the low * order two bits from %g1 and %g2 respectively). * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - - void - dcache_parity_tl1_cont_instr(void) - {} - - #else /* lint */ ENTRY_NP(dcache_parity_tl1_cont_instr) CH_ERR_TL1_SWTRAPENTRY(dcache_parity_tl1_err); SET_SIZE(dcache_parity_tl1_cont_instr) - #endif /* lint */ - /* * D$ parity error at TL>0 handler * We get here via trap 71 at TL>0->Software trap 1 at TL>0. We enter * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate. */ - #if defined(lint) - void - dcache_parity_tl1_err(void) - {} - - #else /* lint */ - ENTRY_NP(dcache_parity_tl1_err) /* * This macro saves all the %g registers in the ch_err_tl1_data * structure, updates the ch_err_tl1_flags and saves the %tpc in --- 1651,1670 ----
*** 2150,2177 **** * Restores the %g registers and issues retry. */ CH_ERR_TL1_EXIT; SET_SIZE(dcache_parity_tl1_err) - #endif /* lint */ - /* * I$ parity error trap (trap 72) at TL=0. * tt0_iperr is replaced by icache_parity_instr in cpu_init_trap of * the various architecture-specific files. This merely sets up the * arguments for cpu_parity_error and calls it via sys_trap. * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - void - icache_parity_instr(void) - {} - - #else /* lint */ - ENTRY_NP(icache_parity_instr) membar #Sync ! Cheetah+ requires membar #Sync set cpu_parity_error, %g1 or %g0, CH_ERR_IPE, %g2 rdpr %tpc, %g3 --- 1751,1769 ----
*** 2178,2211 **** sethi %hi(sys_trap), %g7 jmp %g7 + %lo(sys_trap) mov PIL_15, %g4 ! run at pil 15 SET_SIZE(icache_parity_instr) - #endif /* lint */ - /* * I$ parity error trap (trap 72) at TL>0. * tt1_iperr is replaced by icache_parity_tl1_instr in cpu_init_trap of * the various architecture-specific files. This generates a "Software * Trap 2" at TL>0, which goes to icache_parity_tl1_cont_instr, and we * continue the handling there. * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - - void - icache_parity_tl1_instr(void) - {} - - #else /* lint */ ENTRY_NP(icache_parity_tl1_instr) CH_ERR_TL1_TRAPENTRY(SWTRAP_2); SET_SIZE(icache_parity_tl1_instr) - #endif /* lint */ - /* * Software trap 2 at TL>0. * tt1_swtrap2 is replaced by icache_parity_tl1_cont_instr in cpu_init_trap * of the various architecture-specific files. This is used as a continuation * of the icache parity handling where we've bought an extra TL level, so we --- 1770,1792 ----
*** 2214,2250 **** * there's a reserved hole from 3-7. We only use bits 0-1 and 8-9 (the low * order two bits from %g1 and %g2 respectively). * NB: Must be 8 instructions or less to fit in trap table and code must * be relocatable. */ - #if defined(lint) - - void - icache_parity_tl1_cont_instr(void) - {} - - #else /* lint */ ENTRY_NP(icache_parity_tl1_cont_instr) CH_ERR_TL1_SWTRAPENTRY(icache_parity_tl1_err); SET_SIZE(icache_parity_tl1_cont_instr) - #endif /* lint */ - /* * I$ parity error at TL>0 handler * We get here via trap 72 at TL>0->Software trap 2 at TL>0. We enter * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate. */ - #if defined(lint) - void - icache_parity_tl1_err(void) - {} - - #else /* lint */ - ENTRY_NP(icache_parity_tl1_err) /* * This macro saves all the %g registers in the ch_err_tl1_data * structure, updates the ch_err_tl1_flags and saves the %tpc in --- 1795,1815 ----
*** 2316,2327 **** */ CH_ERR_TL1_EXIT; SET_SIZE(icache_parity_tl1_err) - #endif /* lint */ - #endif /* CPU_IMP_L1_CACHE_PARITY */ /* * The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the --- 1881,1890 ----
*** 2330,2348 **** * tlb itself. * * Note: These two routines are required by the Estar "cpr" loadable module. */ - #if defined(lint) - - /* ARGSUSED */ - void - itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag) - {} - - #else /* lint */ - ENTRY_NP(itlb_rd_entry) sllx %o0, 3, %o0 ldxa [%o0]ASI_ITLB_ACCESS, %g1 stx %g1, [%o1] ldxa [%o0]ASI_ITLB_TAGREAD, %g2 --- 1893,1902 ----
*** 2350,2371 **** andn %g2, %o4, %o5 retl stx %o5, [%o2] SET_SIZE(itlb_rd_entry) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag) - {} - - #else /* lint */ - ENTRY_NP(dtlb_rd_entry) sllx %o0, 3, %o0 ldxa [%o0]ASI_DTLB_ACCESS, %g1 stx %g1, [%o1] ldxa [%o0]ASI_DTLB_TAGREAD, %g2 --- 1904,1914 ----
*** 2372,2431 **** set TAGREAD_CTX_MASK, %o4 andn %g2, %o4, %o5 retl stx %o5, [%o2] SET_SIZE(dtlb_rd_entry) - #endif /* lint */ #if !(defined(JALAPENO) || defined(SERRANO)) - #if defined(lint) - - uint64_t - get_safari_config(void) - { return (0); } - - #else /* lint */ - ENTRY(get_safari_config) ldxa [%g0]ASI_SAFARI_CONFIG, %o0 retl nop SET_SIZE(get_safari_config) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - set_safari_config(uint64_t safari_config) - {} - - #else /* lint */ - ENTRY(set_safari_config) stxa %o0, [%g0]ASI_SAFARI_CONFIG membar #Sync retl nop SET_SIZE(set_safari_config) - #endif /* lint */ - #endif /* !(JALAPENO || SERRANO) */ - #if defined(lint) - - void - cpu_cleartickpnt(void) - {} - - #else /* lint */ /* * Clear the NPT (non-privileged trap) bit in the %tick/%stick * registers. In an effort to make the change in the * tick/stick counter as consistent as possible, we disable * all interrupts while we're changing the registers. We also --- 1915,1945 ----
*** 2461,2490 **** jmp %g4 + 4 wrpr %g0, %g1, %pstate /* restore processor state */ SET_SIZE(cpu_clearticknpt) - #endif /* lint */ - #if defined(CPU_IMP_L1_CACHE_PARITY) - #if defined(lint) - /* - * correct_dcache_parity(size_t size, size_t linesize) - * - * Correct D$ data parity by zeroing the data and initializing microtag - * for all indexes and all ways of the D$. - * - */ - /* ARGSUSED */ - void - correct_dcache_parity(size_t size, size_t linesize) - {} - - #else /* lint */ - ENTRY(correct_dcache_parity) /* * Register Usage: * * %o0 = input D$ size --- 1975,1987 ----
*** 2542,2592 **** retl nop SET_SIZE(correct_dcache_parity) - #endif /* lint */ - #endif /* CPU_IMP_L1_CACHE_PARITY */ - #if defined(lint) - /* - * Get timestamp (stick). - */ - /* ARGSUSED */ - void - stick_timestamp(int64_t *ts) - { - } - - #else /* lint */ - ENTRY_NP(stick_timestamp) rd STICK, %g1 ! read stick reg sllx %g1, 1, %g1 srlx %g1, 1, %g1 ! clear npt bit retl stx %g1, [%o0] ! store the timestamp SET_SIZE(stick_timestamp) - #endif /* lint */ - - #if defined(lint) - /* - * Set STICK adjusted by skew. - */ - /* ARGSUSED */ - void - stick_adj(int64_t skew) - { - } - - #else /* lint */ - ENTRY_NP(stick_adj) rdpr %pstate, %g1 ! save processor state andn %g1, PSTATE_IE, %g3 ba 1f ! cache align stick adj wrpr %g0, %g3, %pstate ! turn off interrupts --- 2039,2061 ----
*** 2600,2735 **** retl wrpr %g1, %pstate ! restore processor state SET_SIZE(stick_adj) - #endif /* lint */ - - #if defined(lint) - /* - * Debugger-specific stick retrieval - */ - /*ARGSUSED*/ - int - kdi_get_stick(uint64_t *stickp) - { - return (0); - } - - #else /* lint */ - ENTRY_NP(kdi_get_stick) rd STICK, %g1 stx %g1, [%o0] retl mov %g0, %o0 SET_SIZE(kdi_get_stick) - #endif /* lint */ - - #if defined(lint) - /* - * Invalidate the specified line from the D$. - * - * Register usage: - * %o0 - index for the invalidation, specifies DC_way and DC_addr - * - * ASI_DC_TAG, 0x47, is used in the following manner. A 64-bit value is - * stored to a particular DC_way and DC_addr in ASI_DC_TAG. - * - * The format of the stored 64-bit value is: - * - * +----------+--------+----------+ - * | Reserved | DC_tag | DC_valid | - * +----------+--------+----------+ - * 63 31 30 1 0 - * - * DC_tag is the 30-bit physical tag of the associated line. - * DC_valid is the 1-bit valid field for both the physical and snoop tags. - * - * The format of the 64-bit DC_way and DC_addr into ASI_DC_TAG is: - * - * +----------+--------+----------+----------+ - * | Reserved | DC_way | DC_addr | Reserved | - * +----------+--------+----------+----------+ - * 63 16 15 14 13 5 4 0 - * - * DC_way is a 2-bit index that selects one of the 4 ways. - * DC_addr is a 9-bit index that selects one of 512 tag/valid fields. - * - * Setting the DC_valid bit to zero for the specified DC_way and - * DC_addr index into the D$ results in an invalidation of a D$ line. - */ - /*ARGSUSED*/ - void - dcache_inval_line(int index) - { - } - #else /* lint */ ENTRY(dcache_inval_line) sll %o0, 5, %o0 ! shift index into DC_way and DC_addr stxa %g0, [%o0]ASI_DC_TAG ! zero the DC_valid and DC_tag bits membar #Sync retl nop SET_SIZE(dcache_inval_line) - #endif /* lint */ - #if defined(lint) - /* - * Invalidate the entire I$ - * - * Register usage: - * %o0 - specifies IC_way, IC_addr, IC_tag - * %o1 - scratch - * %o2 - used to save and restore DCU value - * %o3 - scratch - * %o5 - used to save and restore PSTATE - * - * Due to the behavior of the I$ control logic when accessing ASI_IC_TAG, - * the I$ should be turned off. Accesses to ASI_IC_TAG may collide and - * block out snoops and invalidates to the I$, causing I$ consistency - * to be broken. Before turning on the I$, all I$ lines must be invalidated. - * - * ASI_IC_TAG, 0x67, is used in the following manner. A 64-bit value is - * stored to a particular IC_way, IC_addr, IC_tag in ASI_IC_TAG. The - * info below describes store (write) use of ASI_IC_TAG. Note that read - * use of ASI_IC_TAG behaves differently. - * - * The format of the stored 64-bit value is: - * - * +----------+--------+---------------+-----------+ - * | Reserved | Valid | IC_vpred<7:0> | Undefined | - * +----------+--------+---------------+-----------+ - * 63 55 54 53 46 45 0 - * - * Valid is the 1-bit valid field for both the physical and snoop tags. - * IC_vpred is the 8-bit LPB bits for 8 instructions starting at - * the 32-byte boundary aligned address specified by IC_addr. - * - * The format of the 64-bit IC_way, IC_addr, IC_tag into ASI_IC_TAG is: - * - * +----------+--------+---------+--------+---------+ - * | Reserved | IC_way | IC_addr | IC_tag |Reserved | - * +----------+--------+---------+--------+---------+ - * 63 16 15 14 13 5 4 3 2 0 - * - * IC_way is a 2-bit index that selects one of the 4 ways. - * IC_addr[13:6] is an 8-bit index that selects one of 256 valid fields. - * IC_addr[5] is a "don't care" for a store. - * IC_tag set to 2 specifies that the stored value is to be interpreted - * as containing Valid and IC_vpred as described above. - * - * Setting the Valid bit to zero for the specified IC_way and - * IC_addr index into the I$ results in an invalidation of an I$ line. - */ - /*ARGSUSED*/ - void - icache_inval_all(void) - { - } - #else /* lint */ ENTRY(icache_inval_all) rdpr %pstate, %o5 andn %o5, PSTATE_IE, %o3 wrpr %g0, %o3, %pstate ! clear IE bit --- 2069,2093 ----
*** 2744,2764 **** CH_ICACHE_FLUSHALL(%o0, %o1, %o2, %o4) retl wrpr %g0, %o5, %pstate ! restore earlier pstate SET_SIZE(icache_inval_all) - #endif /* lint */ - #if defined(lint) - /* ARGSUSED */ - void - cache_scrubreq_tl1(uint64_t inum, uint64_t index) - { - } - - #else /* lint */ /* * cache_scrubreq_tl1 is the crosstrap handler called on offlined cpus via a * crosstrap. It atomically increments the outstanding request counter and, * if there was not already an outstanding request, branches to setsoftint_tl1 * to enqueue an intr_vec for the given inum. --- 2102,2113 ----
*** 2792,2813 **** ! not reached 1: retry SET_SIZE(cache_scrubreq_tl1) - #endif /* lint */ - - #if defined(lint) - - /* ARGSUSED */ - void - get_cpu_error_state(ch_cpu_errors_t *cpu_error_regs) - {} - - #else /* lint */ - /* * Get the error state for the processor. * Note that this must not be used at TL>0 */ ENTRY(get_cpu_error_state) --- 2141,2151 ----
*** 2851,2881 **** stx %o1, [%o0 + CH_CPU_ERRORS_AFSR] ldxa [%g0]ASI_AFAR, %o1 ! primary afar reg retl stx %o1, [%o0 + CH_CPU_ERRORS_AFAR] SET_SIZE(get_cpu_error_state) - #endif /* lint */ - #if defined(lint) - - /* - * Check a page of memory for errors. - * - * Load each 64 byte block from physical memory. - * Check AFSR after each load to see if an error - * was caused. If so, log/scrub that error. - * - * Used to determine if a page contains - * CEs when CEEN is disabled. - */ - /*ARGSUSED*/ - void - cpu_check_block(caddr_t va, uint_t psz) - {} - - #else /* lint */ - ENTRY(cpu_check_block) ! ! get a new window with room for the error regs ! save %sp, -SA(MINFRAME + CH_CPU_ERROR_SIZE), %sp --- 2189,2199 ----
*** 2910,2935 **** ret restore SET_SIZE(cpu_check_block) - #endif /* lint */ - - #if defined(lint) - - /* - * Perform a cpu logout called from C. This is used where we did not trap - * for the error but still want to gather "what we can". Caller must make - * sure cpu private area exists and that the indicated logout area is free - * for use, and that we are unable to migrate cpus. - */ - /*ARGSUSED*/ - void - cpu_delayed_logout(uint64_t afar, ch_cpu_logout_t *clop) - { } - - #else ENTRY(cpu_delayed_logout) rdpr %pstate, %o2 andn %o2, PSTATE_IE, %o2 wrpr %g0, %o2, %pstate ! disable interrupts PARK_SIBLING_CORE(%o2, %o3, %o4) ! %o2 has DCU value --- 2228,2237 ----
*** 2944,2964 **** wrpr %g0, %o2, %pstate retl nop SET_SIZE(cpu_delayed_logout) - #endif /* lint */ - - #if defined(lint) - - /*ARGSUSED*/ - int - dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain) - { return (0); } - - #else - ENTRY(dtrace_blksuword32) save %sp, -SA(MINFRAME + 4), %sp rdpr %pstate, %l1 andn %l1, PSTATE_IE, %l2 ! disable interrupts to --- 2246,2255 ----
*** 3019,3041 **** call dtrace_blksuword32_err restore SET_SIZE(dtrace_blksuword32) - #endif /* lint */ - #ifdef CHEETAHPLUS_ERRATUM_25 - #if defined(lint) - /* - * Claim a chunk of physical address space. - */ - /*ARGSUSED*/ - void - claimlines(uint64_t pa, size_t sz, int stride) - {} - #else /* lint */ ENTRY(claimlines) 1: subcc %o1, %o2, %o1 add %o0, %o1, %o3 bgeu,a,pt %xcc, 1b --- 2310,2321 ----
*** 3042,3064 **** casxa [%o3]ASI_MEM, %g0, %g0 membar #Sync retl nop SET_SIZE(claimlines) - #endif /* lint */ - #if defined(lint) - /* - * CPU feature initialization, - * turn BPE off, - * get device id. - */ - /*ARGSUSED*/ - void - cpu_feature_init(void) - {} - #else /* lint */ ENTRY(cpu_feature_init) save %sp, -SA(MINFRAME), %sp sethi %hi(cheetah_bpe_off), %o0 ld [%o0 + %lo(cheetah_bpe_off)], %o0 brz %o0, 1f --- 2322,2332 ----
*** 3087,3130 **** nop #endif /* CHEETAHPLUS_ERRATUM_34 */ ret restore SET_SIZE(cpu_feature_init) - #endif /* lint */ - #if defined(lint) - /* - * Copy a tsb entry atomically, from src to dest. - * src must be 128 bit aligned. - */ - /*ARGSUSED*/ - void - copy_tsb_entry(uintptr_t src, uintptr_t dest) - {} - #else /* lint */ ENTRY(copy_tsb_entry) ldda [%o0]ASI_NQUAD_LD, %o2 ! %o2 = tag, %o3 = data stx %o2, [%o1] stx %o3, [%o1 + 8 ] retl nop SET_SIZE(copy_tsb_entry) - #endif /* lint */ #endif /* CHEETAHPLUS_ERRATUM_25 */ #ifdef CHEETAHPLUS_ERRATUM_34 - #if defined(lint) - - /*ARGSUSED*/ - void - itlb_erratum34_fixup(void) - {} - - #else /* lint */ - ! ! In Cheetah+ erratum 34, under certain conditions an ITLB locked ! index 0 TTE will erroneously be displaced when a new TTE is ! loaded via ASI_ITLB_IN. In order to avoid cheetah+ erratum 34, ! locked index 0 TTEs must be relocated. --- 2355,2377 ----
*** 3185,3205 **** flush %o4 ! Flush required for I-MMU retl wrpr %g0, %o3, %pstate ! Enable interrupts SET_SIZE(itlb_erratum34_fixup) - #endif /* lint */ - - #if defined(lint) - - /*ARGSUSED*/ - void - dtlb_erratum34_fixup(void) - {} - - #else /* lint */ - ! ! In Cheetah+ erratum 34, under certain conditions a DTLB locked ! index 0 TTE will erroneously be displaced when a new TTE is ! loaded. In order to avoid cheetah+ erratum 34, locked index 0 ! TTEs must be relocated. --- 2432,2441 ----
*** 3254,3262 **** membar #Sync retl wrpr %g0, %o3, %pstate ! Enable interrupts SET_SIZE(dtlb_erratum34_fixup) - #endif /* lint */ - #endif /* CHEETAHPLUS_ERRATUM_34 */ --- 2490,2496 ----