Print this page
de-linting of .s files


  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * General machine architecture & implementation specific
  28  * assembly language routines.
  29  */
  30 #if defined(lint)
  31 #include <sys/types.h>
  32 #include <sys/t_lock.h>
  33 #else   /* lint */
  34 #include "assym.h"
  35 #endif  /* lint */
  36 
  37 #include <sys/asm_linkage.h>
  38 #include <sys/machsystm.h>
  39 #include <sys/machthread.h>
  40 #include <sys/privregs.h>
  41 #include <sys/cmpregs.h>
  42 #include <sys/clock.h>
  43 #include <sys/fpras.h>
  44 
  45 #if defined(lint)
  46 
  47 uint64_t
  48 ultra_gettick(void)
  49 { return (0); }
  50 
  51 #else   /* lint */
  52 
  53 /*
  54  * This isn't the routine you're looking for.
  55  *
  56  * The routine simply returns the value of %tick on the *current* processor.
  57  * Most of the time, gettick() [which in turn maps to %stick on platforms
  58  * that have different CPU %tick rates] is what you want.
  59  */
  60 
  61         ENTRY(ultra_gettick)
  62         retl
  63         rdpr    %tick, %o0
  64         SET_SIZE(ultra_gettick)
  65 
  66 #endif  /* lint */
  67 
  68 #if defined(lint)
  69 
  70 /*ARGSUSED*/
  71 int
  72 getprocessorid(void)
  73 { return (0); }
  74 
  75 #else   /* lint */
  76 
  77 /*
  78  * Get the processor ID.
  79  * === MID reg as specified in 15dec89 sun4u spec, sec 5.4.3
  80  */
  81 
  82         ENTRY(getprocessorid)
  83         CPU_INDEX(%o0, %o1)
  84         retl
  85         nop
  86         SET_SIZE(getprocessorid)
  87 
  88 #endif  /* lint */
  89 
  90 #if defined(lint)
  91 /*ARGSUSED*/
  92 void
  93 set_error_enable_tl1(uint64_t neer, uint64_t action)
  94 {}
  95 
  96 /* ARGSUSED */
  97 void
  98 set_error_enable(uint64_t neer)
  99 {}
 100 
 101 uint64_t
 102 get_error_enable()
 103 {
 104         return ((uint64_t)0);
 105 }
 106 #else /* lint */
 107 
 108         ENTRY(set_error_enable_tl1)
 109         cmp     %g2, EER_SET_ABSOLUTE
 110         be      %xcc, 1f
 111           nop
 112         ldxa    [%g0]ASI_ESTATE_ERR, %g3
 113         membar  #Sync
 114         cmp     %g2, EER_SET_SETBITS
 115         be,a    %xcc, 1f
 116           or    %g3, %g1, %g1
 117         andn    %g3, %g1, %g1                   /* EER_SET_CLRBITS */
 118 1:
 119         stxa    %g1, [%g0]ASI_ESTATE_ERR        /* ecache error enable reg */
 120         membar  #Sync
 121         retry
 122         SET_SIZE(set_error_enable_tl1)
 123 
 124         ENTRY(set_error_enable)
 125         stxa    %o0, [%g0]ASI_ESTATE_ERR        /* ecache error enable reg */
 126         membar  #Sync
 127         retl
 128         nop
 129         SET_SIZE(set_error_enable)
 130 
 131         ENTRY(get_error_enable)
 132         retl
 133         ldxa    [%g0]ASI_ESTATE_ERR, %o0        /* ecache error enable reg */
 134         SET_SIZE(get_error_enable)
 135 
 136 #endif /* lint */
 137 
 138 #if defined(lint)
 139 void
 140 get_asyncflt(uint64_t *afsr)
 141 {
 142         afsr = afsr;
 143 }
 144 #else /* lint */
 145 
 146         ENTRY(get_asyncflt)
 147         ldxa    [%g0]ASI_AFSR, %o1              ! afsr reg
 148         retl
 149         stx     %o1, [%o0]
 150         SET_SIZE(get_asyncflt)
 151 
 152 #endif /* lint */
 153 
 154 #if defined(lint)
 155 void
 156 set_asyncflt(uint64_t afsr)
 157 {
 158         afsr = afsr;
 159 }
 160 #else /* lint */
 161 
 162         ENTRY(set_asyncflt)
 163         stxa    %o0, [%g0]ASI_AFSR              ! afsr reg
 164         membar  #Sync
 165         retl
 166         nop
 167         SET_SIZE(set_asyncflt)
 168 
 169 #endif /* lint */
 170 
 171 #if defined(lint)
 172 void
 173 get_asyncaddr(uint64_t *afar)
 174 {
 175         afar = afar;
 176 }
 177 #else /* lint */
 178 
 179         ENTRY(get_asyncaddr)
 180         ldxa    [%g0]ASI_AFAR, %o1              ! afar reg
 181         retl
 182         stx     %o1, [%o0]
 183         SET_SIZE(get_asyncaddr)
 184 
 185 #endif /* lint */
 186 
 187 #if defined(lint) || defined(__lint)
 188 
 189 /* ARGSUSED */
 190 hrtime_t
 191 tick2ns(hrtime_t tick, uint_t cpuid)
 192 { return 0; }
 193 
 194 #else   /* lint */
 195 
 196         ENTRY_NP(tick2ns)
 197         sethi   %hi(cpunodes), %o4
 198         or      %o4, %lo(cpunodes), %o4         ! %o4 = &cpunodes
 199         ! Register usage:
 200         !
 201         ! o0 = timestamp
 202         ! o2 = byte offset into cpunodes for tick_nsec_scale of this CPU
 203         ! o4 = &cpunodes
 204         !
 205         mulx    %o1, CPU_NODE_SIZE, %o2 ! %o2 = byte offset into cpunodes
 206         add     %o2, TICK_NSEC_SCALE, %o2
 207         ld      [%o4 + %o2], %o2        ! %o2 = cpunodes[cpuid].tick_nsec_scale
 208         NATIVE_TIME_TO_NSEC_SCALE(%o0, %o2, %o3, TICK_NSEC_SHIFT)
 209         retl
 210         nop
 211         SET_SIZE(tick2ns)
 212 
 213 #endif  /* lint */
 214 
 215 #if defined(lint)
 216 
 217 /* ARGSUSED */
 218 void
 219 set_cmp_error_steering(void)
 220 {}
 221 
 222 #else   /* lint */
 223 
 224         ENTRY(set_cmp_error_steering)
 225         membar  #Sync
 226         set     ASI_CORE_ID, %o0                ! %o0 = ASI_CORE_ID
 227         ldxa    [%o0]ASI_CMP_PER_CORE, %o0      ! get ASI_CORE_ID
 228         and     %o0, COREID_MASK, %o0
 229         set     ASI_CMP_ERROR_STEERING, %o1     ! %o1 = ERROR_STEERING_REG
 230         stxa    %o0, [%o1]ASI_CMP_SHARED        ! this core now hadles
 231         membar  #Sync                           !  non-core specific errors
 232         retl
 233         nop
 234         SET_SIZE(set_cmp_error_steering)
 235 
 236 #endif  /* lint */
 237 
 238 #if defined(lint)
 239 
 240 /* ARGSUSED */
 241 uint64_t
 242 ultra_getver(void)
 243 {
 244         return (0); 
 245 }
 246 
 247 #else /* lint */
 248 
 249         ENTRY(ultra_getver)
 250         retl
 251         rdpr    %ver, %o0
 252         SET_SIZE(ultra_getver)
 253 
 254 #endif /* lint */
 255 
 256 #if defined(lint)
 257 
 258 int
 259 fpras_chkfn_type1(void)
 260 { return 0; }
 261 
 262 #else   /* lint */
 263 
 264         /*
 265          * Check instructions using just the AX pipelines, designed by
 266          * C.B. Liaw of PNP.
 267          *
 268          * This function must match a struct fpras_chkfn and must be
 269          * block aligned.  A zero return means all was well.  These
 270          * instructions are chosen to be sensitive to bit corruptions
 271          * on the fpras rewrite, so if a bit corruption still produces
 272          * a valid instruction we should still get an incorrect result
 273          * here.  This function is never called directly - it is copied
 274          * into per-cpu and per-operation buffers;  it must therefore
 275          * be absolutely position independent.  If an illegal instruction
 276          * is encountered then the trap handler trampolines to the final
 277          * three instructions of this function.
 278          *
 279          * We want two instructions that are complements of one another,
 280          * and which can perform a calculation with a known result.
 281          *
 282          * SETHI:
 283          *


 404         ADDCCC_CBR1_CBR2_CBR2           ! 13
 405         SETHI_CBV2_CBR1                 ! 14
 406         ADDCCC_CBR1_CBR2_CBR2           ! 15
 407         SETHI_CBV2_CBR1                 ! 16
 408 
 409         addc    CBR1, CBR2, CBR2        ! 1
 410         sethi   %hi(CBV3), CBR1         ! 2
 411         cmp     CBR1, CBR2              ! 3
 412         movnz   %icc, FPRAS_BADCALC, %o0! 4, how detected
 413         retl                            ! 5
 414           mov   CBO2, CBR2              ! 6, restore borrowed register
 415         .skip 4*(13-7+1)                ! 7 - 13
 416                                         !
 417                                         ! illegal instr'n trap comes here
 418                                         !
 419         mov     CBO2, CBR2              ! 14, restore borrowed register
 420         retl                            ! 15
 421           mov   FPRAS_BADTRAP, %o0      ! 16, how detected
 422         SET_SIZE(fpras_chkfn_type1)
 423 
 424 #endif  /* lint */
 425 
 426 /*
 427  * fp_zero() - clear all fp data registers and the fsr
 428  */
 429 
 430 #if defined(lint) || defined(__lint)
 431 
 432 void
 433 fp_zero(void)
 434 {}
 435 
 436 #else   /* lint */
 437 
 438         ENTRY_NP(fp_zero)
 439         std     %g0, [%sp + ARGPUSH + STACK_BIAS]
 440         fzero   %f0
 441         fzero   %f2
 442         ldd     [%sp + ARGPUSH + STACK_BIAS], %fsr
 443         faddd   %f0, %f2, %f4
 444         fmuld   %f0, %f2, %f6
 445         faddd   %f0, %f2, %f8
 446         fmuld   %f0, %f2, %f10
 447         faddd   %f0, %f2, %f12
 448         fmuld   %f0, %f2, %f14
 449         faddd   %f0, %f2, %f16
 450         fmuld   %f0, %f2, %f18
 451         faddd   %f0, %f2, %f20
 452         fmuld   %f0, %f2, %f22
 453         faddd   %f0, %f2, %f24
 454         fmuld   %f0, %f2, %f26
 455         faddd   %f0, %f2, %f28
 456         fmuld   %f0, %f2, %f30
 457         faddd   %f0, %f2, %f32
 458         fmuld   %f0, %f2, %f34
 459         faddd   %f0, %f2, %f36
 460         fmuld   %f0, %f2, %f38
 461         faddd   %f0, %f2, %f40
 462         fmuld   %f0, %f2, %f42
 463         faddd   %f0, %f2, %f44
 464         fmuld   %f0, %f2, %f46
 465         faddd   %f0, %f2, %f48
 466         fmuld   %f0, %f2, %f50
 467         faddd   %f0, %f2, %f52
 468         fmuld   %f0, %f2, %f54
 469         faddd   %f0, %f2, %f56
 470         fmuld   %f0, %f2, %f58
 471         faddd   %f0, %f2, %f60
 472         retl
 473         fmuld   %f0, %f2, %f62
 474         SET_SIZE(fp_zero)
 475 
 476 #endif  /* lint */


  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * General machine architecture & implementation specific
  28  * assembly language routines.
  29  */




  30 #include "assym.h"

  31 
  32 #include <sys/asm_linkage.h>
  33 #include <sys/machsystm.h>
  34 #include <sys/machthread.h>
  35 #include <sys/privregs.h>
  36 #include <sys/cmpregs.h>
  37 #include <sys/clock.h>
  38 #include <sys/fpras.h>
  39 








  40 /*
  41  * This isn't the routine you're looking for.
  42  *
  43  * The routine simply returns the value of %tick on the *current* processor.
  44  * Most of the time, gettick() [which in turn maps to %stick on platforms
  45  * that have different CPU %tick rates] is what you want.
  46  */
  47 
  48         ENTRY(ultra_gettick)
  49         retl
  50         rdpr    %tick, %o0
  51         SET_SIZE(ultra_gettick)
  52 











  53 /*
  54  * Get the processor ID.
  55  * === MID reg as specified in 15dec89 sun4u spec, sec 5.4.3
  56  */
  57 
  58         ENTRY(getprocessorid)
  59         CPU_INDEX(%o0, %o1)
  60         retl
  61         nop
  62         SET_SIZE(getprocessorid)
  63 




















  64         ENTRY(set_error_enable_tl1)
  65         cmp     %g2, EER_SET_ABSOLUTE
  66         be      %xcc, 1f
  67           nop
  68         ldxa    [%g0]ASI_ESTATE_ERR, %g3
  69         membar  #Sync
  70         cmp     %g2, EER_SET_SETBITS
  71         be,a    %xcc, 1f
  72           or    %g3, %g1, %g1
  73         andn    %g3, %g1, %g1                   /* EER_SET_CLRBITS */
  74 1:
  75         stxa    %g1, [%g0]ASI_ESTATE_ERR        /* ecache error enable reg */
  76         membar  #Sync
  77         retry
  78         SET_SIZE(set_error_enable_tl1)
  79 
  80         ENTRY(set_error_enable)
  81         stxa    %o0, [%g0]ASI_ESTATE_ERR        /* ecache error enable reg */
  82         membar  #Sync
  83         retl
  84         nop
  85         SET_SIZE(set_error_enable)
  86 
  87         ENTRY(get_error_enable)
  88         retl
  89         ldxa    [%g0]ASI_ESTATE_ERR, %o0        /* ecache error enable reg */
  90         SET_SIZE(get_error_enable)
  91 










  92         ENTRY(get_asyncflt)
  93         ldxa    [%g0]ASI_AFSR, %o1              ! afsr reg
  94         retl
  95         stx     %o1, [%o0]
  96         SET_SIZE(get_asyncflt)
  97 










  98         ENTRY(set_asyncflt)
  99         stxa    %o0, [%g0]ASI_AFSR              ! afsr reg
 100         membar  #Sync
 101         retl
 102         nop
 103         SET_SIZE(set_asyncflt)
 104 










 105         ENTRY(get_asyncaddr)
 106         ldxa    [%g0]ASI_AFAR, %o1              ! afar reg
 107         retl
 108         stx     %o1, [%o0]
 109         SET_SIZE(get_asyncaddr)
 110 











 111         ENTRY_NP(tick2ns)
 112         sethi   %hi(cpunodes), %o4
 113         or      %o4, %lo(cpunodes), %o4         ! %o4 = &cpunodes
 114         ! Register usage:
 115         !
 116         ! o0 = timestamp
 117         ! o2 = byte offset into cpunodes for tick_nsec_scale of this CPU
 118         ! o4 = &cpunodes
 119         !
 120         mulx    %o1, CPU_NODE_SIZE, %o2 ! %o2 = byte offset into cpunodes
 121         add     %o2, TICK_NSEC_SCALE, %o2
 122         ld      [%o4 + %o2], %o2        ! %o2 = cpunodes[cpuid].tick_nsec_scale
 123         NATIVE_TIME_TO_NSEC_SCALE(%o0, %o2, %o3, TICK_NSEC_SHIFT)
 124         retl
 125         nop
 126         SET_SIZE(tick2ns)
 127 











 128         ENTRY(set_cmp_error_steering)
 129         membar  #Sync
 130         set     ASI_CORE_ID, %o0                ! %o0 = ASI_CORE_ID
 131         ldxa    [%o0]ASI_CMP_PER_CORE, %o0      ! get ASI_CORE_ID
 132         and     %o0, COREID_MASK, %o0
 133         set     ASI_CMP_ERROR_STEERING, %o1     ! %o1 = ERROR_STEERING_REG
 134         stxa    %o0, [%o1]ASI_CMP_SHARED        ! this core now hadles
 135         membar  #Sync                           !  non-core specific errors
 136         retl
 137         nop
 138         SET_SIZE(set_cmp_error_steering)
 139 













 140         ENTRY(ultra_getver)
 141         retl
 142         rdpr    %ver, %o0
 143         SET_SIZE(ultra_getver)
 144 










 145         /*
 146          * Check instructions using just the AX pipelines, designed by
 147          * C.B. Liaw of PNP.
 148          *
 149          * This function must match a struct fpras_chkfn and must be
 150          * block aligned.  A zero return means all was well.  These
 151          * instructions are chosen to be sensitive to bit corruptions
 152          * on the fpras rewrite, so if a bit corruption still produces
 153          * a valid instruction we should still get an incorrect result
 154          * here.  This function is never called directly - it is copied
 155          * into per-cpu and per-operation buffers;  it must therefore
 156          * be absolutely position independent.  If an illegal instruction
 157          * is encountered then the trap handler trampolines to the final
 158          * three instructions of this function.
 159          *
 160          * We want two instructions that are complements of one another,
 161          * and which can perform a calculation with a known result.
 162          *
 163          * SETHI:
 164          *


 285         ADDCCC_CBR1_CBR2_CBR2           ! 13
 286         SETHI_CBV2_CBR1                 ! 14
 287         ADDCCC_CBR1_CBR2_CBR2           ! 15
 288         SETHI_CBV2_CBR1                 ! 16
 289 
 290         addc    CBR1, CBR2, CBR2        ! 1
 291         sethi   %hi(CBV3), CBR1         ! 2
 292         cmp     CBR1, CBR2              ! 3
 293         movnz   %icc, FPRAS_BADCALC, %o0! 4, how detected
 294         retl                            ! 5
 295           mov   CBO2, CBR2              ! 6, restore borrowed register
 296         .skip 4*(13-7+1)                ! 7 - 13
 297                                         !
 298                                         ! illegal instr'n trap comes here
 299                                         !
 300         mov     CBO2, CBR2              ! 14, restore borrowed register
 301         retl                            ! 15
 302           mov   FPRAS_BADTRAP, %o0      ! 16, how detected
 303         SET_SIZE(fpras_chkfn_type1)
 304 


 305 /*
 306  * fp_zero() - clear all fp data registers and the fsr
 307  */
 308 








 309         ENTRY_NP(fp_zero)
 310         std     %g0, [%sp + ARGPUSH + STACK_BIAS]
 311         fzero   %f0
 312         fzero   %f2
 313         ldd     [%sp + ARGPUSH + STACK_BIAS], %fsr
 314         faddd   %f0, %f2, %f4
 315         fmuld   %f0, %f2, %f6
 316         faddd   %f0, %f2, %f8
 317         fmuld   %f0, %f2, %f10
 318         faddd   %f0, %f2, %f12
 319         fmuld   %f0, %f2, %f14
 320         faddd   %f0, %f2, %f16
 321         fmuld   %f0, %f2, %f18
 322         faddd   %f0, %f2, %f20
 323         fmuld   %f0, %f2, %f22
 324         faddd   %f0, %f2, %f24
 325         fmuld   %f0, %f2, %f26
 326         faddd   %f0, %f2, %f28
 327         fmuld   %f0, %f2, %f30
 328         faddd   %f0, %f2, %f32
 329         fmuld   %f0, %f2, %f34
 330         faddd   %f0, %f2, %f36
 331         fmuld   %f0, %f2, %f38
 332         faddd   %f0, %f2, %f40
 333         fmuld   %f0, %f2, %f42
 334         faddd   %f0, %f2, %f44
 335         fmuld   %f0, %f2, %f46
 336         faddd   %f0, %f2, %f48
 337         fmuld   %f0, %f2, %f50
 338         faddd   %f0, %f2, %f52
 339         fmuld   %f0, %f2, %f54
 340         faddd   %f0, %f2, %f56
 341         fmuld   %f0, %f2, %f58
 342         faddd   %f0, %f2, %f60
 343         retl
 344         fmuld   %f0, %f2, %f62
 345         SET_SIZE(fp_zero)
 346