Print this page
de-linting of .s files

*** 21,39 **** /* * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ - #pragma ident "%Z%%M% %I% %E% SMI" - - #if defined(lint) - #include <sys/types.h> - #include <sys/thread.h> - #include <sys/cpuvar.h> - #else /* lint */ #include "assym.h" - #endif /* lint */ #include <sys/t_lock.h> #include <sys/mutex.h> #include <sys/mutex_impl.h> #include <sys/rwlock_impl.h> --- 21,31 ----
*** 56,107 **** * uint8_t ldstub(uint8_t *cp) * * Store 0xFF at the specified location, and return its previous content. */ - #if defined(lint) - uint8_t - ldstub(uint8_t *cp) - { - uint8_t rv; - rv = *cp; - *cp = 0xFF; - return rv; - } - #else /* lint */ - ENTRY(ldstub) retl ldstub [%o0], %o0 SET_SIZE(ldstub) - #endif /* lint */ - /************************************************************************ * MEMORY BARRIERS -- see atomic.h for full descriptions. */ - #if defined(lint) - - void - membar_enter(void) - {} - - void - membar_exit(void) - {} - - void - membar_producer(void) - {} - - void - membar_consumer(void) - {} - - #else /* lint */ - #ifdef SF_ERRATA_51 .align 32 ENTRY(membar_return) retl nop --- 48,66 ----
*** 129,202 **** ENTRY(membar_consumer) MEMBAR_RETURN membar #LoadLoad SET_SIZE(membar_consumer) - #endif /* lint */ - /************************************************************************ * MINIMUM LOCKS */ - #if defined(lint) - - /* - * lock_try(lp), ulock_try(lp) - * - returns non-zero on success. - * - doesn't block interrupts so don't use this to spin on a lock. - * - uses "0xFF is busy, anything else is free" model. - * - * ulock_try() is for a lock in the user address space. - * For all V7/V8 sparc systems they are same since the kernel and - * user are mapped in a user' context. - * For V9 platforms the lock_try and ulock_try are different impl. - */ - - int - lock_try(lock_t *lp) - { - return (0xFF ^ ldstub(lp)); - } - - int - lock_spin_try(lock_t *lp) - { - return (0xFF ^ ldstub(lp)); - } - - void - lock_set(lock_t *lp) - { - extern void lock_set_spin(lock_t *); - - if (!lock_try(lp)) - lock_set_spin(lp); - membar_enter(); - } - - void - lock_clear(lock_t *lp) - { - membar_exit(); - *lp = 0; - } - - int - ulock_try(lock_t *lp) - { - return (0xFF ^ ldstub(lp)); - } - - void - ulock_clear(lock_t *lp) - { - membar_exit(); - *lp = 0; - } - - #else /* lint */ - .align 32 ENTRY(lock_try) ldstub [%o0], %o1 ! try to set lock, get value in %o1 brnz,pn %o1, 1f membar #LoadLoad --- 88,101 ----
*** 253,291 **** membar #LoadStore|#StoreStore retl stba %g0, [%o0]ASI_USER ! clear lock SET_SIZE(ulock_clear) - #endif /* lint */ - /* * lock_set_spl(lp, new_pil, *old_pil_addr) * Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr. */ - #if defined(lint) - - /* ARGSUSED */ - void - lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr) - { - extern int splr(int); - extern void lock_set_spl_spin(lock_t *, int, u_short *, int); - int old_pil; - - old_pil = splr(new_pil); - if (!lock_try(lp)) { - lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil); - } else { - *old_pil_addr = (u_short)old_pil; - membar_enter(); - } - } - - #else /* lint */ - ENTRY(lock_set_spl) rdpr %pil, %o3 ! %o3 = current pil cmp %o3, %o1 ! is current pil high enough? bl,a,pt %icc, 1f ! if not, write %pil in delay wrpr %g0, %o1, %pil --- 152,167 ----
*** 300,328 **** sethi %hi(lock_set_spl_spin), %o5 ! load up jmp to C jmp %o5 + %lo(lock_set_spl_spin) ! jmp to lock_set_spl_spin nop ! delay: do nothing SET_SIZE(lock_set_spl) - #endif /* lint */ - /* * lock_clear_splx(lp, s) */ - #if defined(lint) - - void - lock_clear_splx(lock_t *lp, int s) - { - extern void splx(int); - - lock_clear(lp); - splx(s); - } - - #else /* lint */ - ENTRY(lock_clear_splx) ldn [THREAD_REG + T_CPU], %o2 ! get CPU pointer membar #LoadStore|#StoreStore ld [%o2 + CPU_BASE_SPL], %o2 clrb [%o0] ! clear lock --- 176,189 ----
*** 331,342 **** .lock_clear_splx_lockstat_patch_point: retl wrpr %g0, %o2, %pil SET_SIZE(lock_clear_splx) - #endif /* lint */ - /* * mutex_enter() and mutex_exit(). * * These routines handle the simple cases of mutex_enter() (adaptive * lock, not held) and mutex_exit() (adaptive lock, held, no waiters). --- 192,201 ----
*** 357,389 **** * calls lockstat_hot_patch() to hot-patch the kernel mutex code. * Note that we don't need to test lockstat_event_mask here -- we won't * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats. */ - #if defined (lint) - - /* ARGSUSED */ - void - mutex_enter(kmutex_t *lp) - {} - - /* ARGSUSED */ - int - mutex_tryenter(kmutex_t *lp) - { return (0); } - - /* ARGSUSED */ - void - mutex_exit(kmutex_t *lp) - {} - - /* ARGSUSED */ - void * - mutex_owner_running(mutex_impl_t *lp) - { return (NULL); } - - #else .align 32 ENTRY(mutex_enter) mov THREAD_REG, %o1 casx [%o0], %g0, %o1 ! try to acquire as adaptive brnz,pn %o1, 1f ! locked or wrong type --- 216,225 ----
*** 471,504 **** 2: retl mov %o2, %o0 ! owner running, return cpu SET_SIZE(mutex_owner_running) - #endif /* lint */ - /* * rw_enter() and rw_exit(). * * These routines handle the simple cases of rw_enter (write-locking an unheld * lock or read-locking a lock that's neither write-locked nor write-wanted) * and rw_exit (no waiters or not the last reader). If anything complicated * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively. */ - #if defined(lint) - /* ARGSUSED */ - void - rw_enter(krwlock_t *lp, krw_t rw) - {} - - /* ARGSUSED */ - void - rw_exit(krwlock_t *lp) - {} - - #else - .align 16 ENTRY(rw_enter) cmp %o1, RW_WRITER ! entering as writer? be,a,pn %icc, 2f ! if so, go do it ... or THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner --- 307,325 ----
*** 578,597 **** .rw_write_exit_lockstat_patch_point: retl nop SET_SIZE(rw_exit) - #endif - - #if defined(lint) - - void - lockstat_hot_patch(void) - {} - - #else - #define RETL 0x81c3e008 #define NOP 0x01000000 #define BA 0x10800000 #define DISP22 ((1 << 22) - 1) --- 399,408 ----
*** 675,686 **** LS_LOCK_CLEAR_SPLX_RELEASE, RETL) ret restore SET_SIZE(lockstat_hot_patch) - #endif /* lint */ - /* * asm_mutex_spin_enter(mutex_t *) * * For use by assembly interrupt handler only. * Does not change spl, since the interrupt handler is assumed to be --- 486,495 ----
*** 690,700 **** * * Entry: %l6 - points to mutex * %l7 - address of call (returns to %l7+8) * Uses: %l6, %l5 */ - #ifndef lint .align 16 ENTRY_NP(asm_mutex_spin_enter) ldstub [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5 1: tst %l5 --- 499,508 ----
*** 720,730 **** bnz 2b ! after panic, feign success nop b 4b ldub [%l6 + M_SPINLOCK], %l5 ! delay - reload lock SET_SIZE(asm_mutex_spin_enter) - #endif /* lint */ /* * asm_mutex_spin_exit(mutex_t *) * * For use by assembly interrupt handler only. --- 528,537 ----
*** 733,829 **** * * Entry: %l6 - points to mutex * %l7 - address of call (returns to %l7+8) * Uses: none */ - #ifndef lint ENTRY_NP(asm_mutex_spin_exit) membar #LoadStore|#StoreStore jmp %l7 + 8 ! return clrb [%l6 + M_SPINLOCK] ! delay - clear lock SET_SIZE(asm_mutex_spin_exit) - #endif /* lint */ /* * thread_onproc() * Set thread in onproc state for the specified CPU. * Also set the thread lock pointer to the CPU's onproc lock. * Since the new lock isn't held, the store ordering is important. * If not done in assembler, the compiler could reorder the stores. */ - #if defined(lint) - void - thread_onproc(kthread_id_t t, cpu_t *cp) - { - t->t_state = TS_ONPROC; - t->t_lockp = &cp->cpu_thread_lock; - } - - #else /* lint */ - ENTRY(thread_onproc) set TS_ONPROC, %o2 ! TS_ONPROC state st %o2, [%o0 + T_STATE] ! store state add %o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running retl ! return stn %o3, [%o0 + T_LOCKP] ! delay - store new lock pointer SET_SIZE(thread_onproc) - #endif /* lint */ - /* delay function used in some mutex code - just do 3 nop cas ops */ - #if defined(lint) - - /* ARGSUSED */ - void - cas_delay(void *addr) - {} - #else /* lint */ ENTRY(cas_delay) casx [%o0], %g0, %g0 casx [%o0], %g0, %g0 retl casx [%o0], %g0, %g0 SET_SIZE(cas_delay) - #endif /* lint */ - #if defined(lint) - - /* - * alternative delay function for some niagara processors. The rd - * instruction uses less resources than casx on those cpus. - */ - /* ARGSUSED */ - void - rdccr_delay(void) - {} - #else /* lint */ ENTRY(rdccr_delay) rd %ccr, %g0 rd %ccr, %g0 retl rd %ccr, %g0 SET_SIZE(rdccr_delay) - #endif /* lint */ /* * mutex_delay_default(void) * Spins for approx a few hundred processor cycles and returns to caller. */ - #if defined(lint) - void - mutex_delay_default(void) - {} - - #else /* lint */ - ENTRY(mutex_delay_default) mov 72,%o0 1: brgz %o0, 1b dec %o0 retl nop SET_SIZE(mutex_delay_default) - #endif /* lint */ --- 540,594 ----