1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * General assembly language routines.
  28  * It is the intent of this file to contain routines that are
  29  * independent of the specific kernel architecture, and those that are
  30  * common across kernel architectures.
  31  * As architectures diverge, and implementations of specific
  32  * architecture-dependent routines change, the routines should be moved
  33  * from this file into the respective ../`arch -k`/subr.s file.
  34  * Or, if you want to be really nice, move them to a file whose
  35  * name has something to do with the routine you are moving.
  36  */
  37 
  38 #if defined(lint)
  39 #include <sys/types.h>
  40 #include <sys/scb.h>
  41 #include <sys/systm.h>
  42 #include <sys/regset.h>
  43 #include <sys/sunddi.h>
  44 #include <sys/lockstat.h>
  45 #include <sys/dtrace.h>
  46 #include <sys/ftrace.h>
  47 #endif  /* lint */
  48 
  49 #include <sys/asm_linkage.h>
  50 #include <sys/privregs.h>
  51 #include <sys/machparam.h>        /* To get SYSBASE and PAGESIZE */
  52 #include <sys/machthread.h>
  53 #include <sys/clock.h>
  54 #include <sys/psr_compat.h>
  55 #include <sys/isa_defs.h>
  56 #include <sys/dditypes.h>
  57 #include <sys/panic.h>
  58 #include <sys/machlock.h>
  59 #include <sys/ontrap.h>
  60 
  61 #if !defined(lint)
  62 #include "assym.h"
  63 
  64         .seg    ".text"
  65         .align  4
  66 
  67 /*
  68  * Macro to raise processor priority level.
  69  * Avoid dropping processor priority if already at high level.
  70  * Also avoid going below CPU->cpu_base_spl, which could've just been set by
  71  * a higher-level interrupt thread that just blocked.
  72  *
  73  * level can be %o0 (not other regs used here) or a constant.
  74  */
  75 #define RAISE(level) \
  76         rdpr    %pil, %o1;              /* get current PIL */           \
  77         cmp     %o1, level;             /* is PIL high enough? */       \
  78         bge     1f;                     /* yes, return */               \
  79         nop;                                                            \
  80         wrpr    %g0, PIL_MAX, %pil;     /* freeze CPU_BASE_SPL */       \
  81         ldn     [THREAD_REG + T_CPU], %o2;                              \
  82         ld      [%o2 + CPU_BASE_SPL], %o2;                              \
  83         cmp     %o2, level;             /* compare new to base */       \
  84         movl    %xcc, level, %o2;       /* use new if base lower */     \
  85         wrpr    %g0, %o2, %pil;                                         \
  86 1:                                                                      \
  87         retl;                                                           \
  88         mov     %o1, %o0                /* return old PIL */
  89 
  90 /*
  91  * Macro to raise processor priority level to level >= DISP_LEVEL.
  92  * Doesn't require comparison to CPU->cpu_base_spl.
  93  *
  94  * newpil can be %o0 (not other regs used here) or a constant.
  95  */
  96 #define RAISE_HIGH(level) \
  97         rdpr    %pil, %o1;              /* get current PIL */           \
  98         cmp     %o1, level;             /* is PIL high enough? */       \
  99         bge     1f;                     /* yes, return */               \
 100         nop;                                                            \
 101         wrpr    %g0, level, %pil;       /* use chose value */           \
 102 1:                                                                      \
 103         retl;                                                           \
 104         mov     %o1, %o0                /* return old PIL */
 105         
 106 /*
 107  * Macro to set the priority to a specified level.
 108  * Avoid dropping the priority below CPU->cpu_base_spl.
 109  *
 110  * newpil can be %o0 (not other regs used here) or a constant with
 111  * the new PIL in the PSR_PIL field of the level arg.
 112  */
 113 #define SETPRI(level) \
 114         rdpr    %pil, %o1;              /* get current PIL */           \
 115         wrpr    %g0, PIL_MAX, %pil;     /* freeze CPU_BASE_SPL */       \
 116         ldn     [THREAD_REG + T_CPU], %o2;                              \
 117         ld      [%o2 + CPU_BASE_SPL], %o2;                              \
 118         cmp     %o2, level;             /* compare new to base */       \
 119         movl    %xcc, level, %o2;       /* use new if base lower */     \
 120         wrpr    %g0, %o2, %pil;                                         \
 121         retl;                                                           \
 122         mov     %o1, %o0                /* return old PIL */
 123 
 124 /*
 125  * Macro to set the priority to a specified level at or above LOCK_LEVEL.
 126  * Doesn't require comparison to CPU->cpu_base_spl.
 127  *
 128  * newpil can be %o0 (not other regs used here) or a constant with
 129  * the new PIL in the PSR_PIL field of the level arg.
 130  */
 131 #define SETPRI_HIGH(level) \
 132         rdpr    %pil, %o1;              /* get current PIL */           \
 133         wrpr    %g0, level, %pil;                                       \
 134         retl;                                                           \
 135         mov     %o1, %o0                /* return old PIL */
 136 
 137 #endif  /* lint */
 138 
 139         /*
 140          * Berkley 4.3 introduced symbolically named interrupt levels
 141          * as a way deal with priority in a machine independent fashion.
 142          * Numbered priorities are machine specific, and should be
 143          * discouraged where possible.
 144          *
 145          * Note, for the machine specific priorities there are
 146          * examples listed for devices that use a particular priority.
 147          * It should not be construed that all devices of that
 148          * type should be at that priority.  It is currently were
 149          * the current devices fit into the priority scheme based
 150          * upon time criticalness.
 151          *
 152          * The underlying assumption of these assignments is that
 153          * SPARC9 IPL 10 is the highest level from which a device
 154          * routine can call wakeup.  Devices that interrupt from higher
 155          * levels are restricted in what they can do.  If they need
 156          * kernels services they should schedule a routine at a lower
 157          * level (via software interrupt) to do the required
 158          * processing.
 159          *
 160          * Examples of this higher usage:
 161          *      Level   Usage
 162          *      15      Asynchronous memory exceptions
 163          *      14      Profiling clock (and PROM uart polling clock)
 164          *      13      Audio device
 165          *      12      Serial ports
 166          *      11      Floppy controller
 167          *
 168          * The serial ports request lower level processing on level 6.
 169          * Audio and floppy request lower level processing on level 4.
 170          *
 171          * Also, almost all splN routines (where N is a number or a
 172          * mnemonic) will do a RAISE(), on the assumption that they are
 173          * never used to lower our priority.
 174          * The exceptions are:
 175          *      spl8()          Because you can't be above 15 to begin with!
 176          *      splzs()         Because this is used at boot time to lower our
 177          *                      priority, to allow the PROM to poll the uart.
 178          *      spl0()          Used to lower priority to 0.
 179          */
 180 
 181 #if defined(lint)
 182 
 183 int spl0(void)          { return (0); }
 184 int spl6(void)          { return (0); }
 185 int spl7(void)          { return (0); }
 186 int spl8(void)          { return (0); }
 187 int splhi(void)         { return (0); }
 188 int splhigh(void)       { return (0); }
 189 int splzs(void)         { return (0); }
 190 
 191 #else   /* lint */
 192 
 193         /* locks out all interrupts, including memory errors */
 194         ENTRY(spl8)
 195         SETPRI_HIGH(15)
 196         SET_SIZE(spl8)
 197 
 198         /* just below the level that profiling runs */
 199         ENTRY(spl7)
 200         RAISE_HIGH(13)
 201         SET_SIZE(spl7)
 202 
 203         /* sun specific - highest priority onboard serial i/o zs ports */
 204         ENTRY(splzs)
 205         SETPRI_HIGH(12) /* Can't be a RAISE, as it's used to lower us */
 206         SET_SIZE(splzs)
 207 
 208         /*
 209          * should lock out clocks and all interrupts,
 210          * as you can see, there are exceptions
 211          */
 212         ENTRY(splhi)
 213         ALTENTRY(splhigh)
 214         ALTENTRY(spl6)
 215         ALTENTRY(i_ddi_splhigh)
 216         RAISE_HIGH(DISP_LEVEL)
 217         SET_SIZE(i_ddi_splhigh)
 218         SET_SIZE(spl6)
 219         SET_SIZE(splhigh)
 220         SET_SIZE(splhi)
 221 
 222         /* allow all interrupts */
 223         ENTRY(spl0)
 224         SETPRI(0)
 225         SET_SIZE(spl0)
 226 
 227 #endif  /* lint */
 228 
 229 /*
 230  * splx - set PIL back to that indicated by the old %pil passed as an argument,
 231  * or to the CPU's base priority, whichever is higher.
 232  */
 233 
 234 #if defined(lint)
 235 
 236 /* ARGSUSED */
 237 void
 238 splx(int level)
 239 {}
 240 
 241 #else   /* lint */
 242 
 243         ENTRY(splx)
 244         ALTENTRY(i_ddi_splx)
 245         SETPRI(%o0)             /* set PIL */
 246         SET_SIZE(i_ddi_splx)
 247         SET_SIZE(splx)
 248 
 249 #endif  /* level */
 250 
 251 /*
 252  * splr()
 253  *
 254  * splr is like splx but will only raise the priority and never drop it
 255  * Be careful not to set priority lower than CPU->cpu_base_pri,
 256  * even though it seems we're raising the priority, it could be set higher
 257  * at any time by an interrupt routine, so we must block interrupts and
 258  * look at CPU->cpu_base_pri.
 259  */
 260 
 261 #if defined(lint)
 262 
 263 /* ARGSUSED */
 264 int
 265 splr(int level)
 266 { return (0); }
 267 
 268 #else   /* lint */
 269         ENTRY(splr)
 270         RAISE(%o0)
 271         SET_SIZE(splr)
 272 
 273 #endif  /* lint */
 274 
 275 /*
 276  * on_fault()
 277  * Catch lofault faults. Like setjmp except it returns one
 278  * if code following causes uncorrectable fault. Turned off
 279  * by calling no_fault().
 280  */
 281 
 282 #if defined(lint)
 283 
 284 /* ARGSUSED */
 285 int
 286 on_fault(label_t *ljb)
 287 { return (0); }
 288 
 289 #else   /* lint */
 290 
 291         ENTRY(on_fault)
 292         membar  #Sync                   ! sync error barrier (see copy.s)
 293         stn     %o0, [THREAD_REG + T_ONFAULT]
 294         set     catch_fault, %o1
 295         b       setjmp                  ! let setjmp do the rest
 296         stn     %o1, [THREAD_REG + T_LOFAULT]   ! put catch_fault in t_lofault
 297 
 298 catch_fault:
 299         save    %sp, -SA(WINDOWSIZE), %sp ! goto next window so that we can rtn
 300         ldn     [THREAD_REG + T_ONFAULT], %o0
 301         membar  #Sync                           ! sync error barrier
 302         stn     %g0, [THREAD_REG + T_ONFAULT]   ! turn off onfault
 303         b       longjmp                 ! let longjmp do the rest
 304         stn     %g0, [THREAD_REG + T_LOFAULT]   ! turn off lofault
 305         SET_SIZE(on_fault)
 306 
 307 #endif  /* lint */
 308 
 309 /*
 310  * no_fault()
 311  * turn off fault catching.
 312  */
 313 
 314 #if defined(lint)
 315 
 316 void
 317 no_fault(void)
 318 {}
 319 
 320 #else   /* lint */
 321 
 322         ENTRY(no_fault)
 323         membar  #Sync                           ! sync error barrier
 324         stn     %g0, [THREAD_REG + T_ONFAULT]
 325         retl
 326         stn     %g0, [THREAD_REG + T_LOFAULT]   ! turn off lofault
 327         SET_SIZE(no_fault)
 328 
 329 #endif  /* lint */
 330 
 331 /*
 332  * Default trampoline code for on_trap() (see <sys/ontrap.h>).  On sparcv9,
 333  * the trap code will complete trap processing but reset the return %pc to
 334  * ot_trampoline, which will by default be set to the address of this code.
 335  * We longjmp(&curthread->t_ontrap->ot_jmpbuf) to return back to on_trap().
 336  */
 337 #if defined(lint)
 338 
 339 void 
 340 on_trap_trampoline(void)
 341 {}
 342 
 343 #else   /* lint */
 344 
 345         ENTRY(on_trap_trampoline)
 346         ldn     [THREAD_REG + T_ONTRAP], %o0    
 347         b       longjmp                 
 348         add     %o0, OT_JMPBUF, %o0
 349         SET_SIZE(on_trap_trampoline)
 350 
 351 #endif  /* lint */
 352 
 353 /*
 354  * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
 355  * more information about the on_trap() mechanism.  If the on_trap_data is the
 356  * same as the topmost stack element, we just modify that element.
 357  * On UltraSPARC, we need to issue a membar #Sync before modifying t_ontrap.
 358  * The issue barrier is defined to force all deferred errors to complete before
 359  * we go any further.  We want these errors to be processed before we modify
 360  * our current error protection.
 361  */
 362 #if defined(lint)
 363 
 364 /*ARGSUSED*/
 365 int
 366 on_trap(on_trap_data_t *otp, uint_t prot)
 367 { return (0); }
 368 
 369 #else   /* lint */
 370 
 371         ENTRY(on_trap)
 372         membar  #Sync                           ! force error barrier
 373         sth     %o1, [%o0 + OT_PROT]            ! ot_prot = prot
 374         sth     %g0, [%o0 + OT_TRAP]            ! ot_trap = 0
 375         set     on_trap_trampoline, %o2         ! %o2 = &on_trap_trampoline
 376         stn     %o2, [%o0 + OT_TRAMPOLINE]      ! ot_trampoline = %o2
 377         stn     %g0, [%o0 + OT_HANDLE]          ! ot_handle = NULL
 378         ldn     [THREAD_REG + T_ONTRAP], %o2    ! %o2 = curthread->t_ontrap
 379         cmp     %o0, %o2                        ! if (otp == %o2)
 380         be      0f                              !    don't modify t_ontrap
 381         stn     %g0, [%o0 + OT_PAD1]            ! delay - ot_pad1 = NULL
 382 
 383         stn     %o2, [%o0 + OT_PREV]            ! ot_prev = t_ontrap
 384         membar  #Sync                           ! force error barrier
 385         stn     %o0, [THREAD_REG + T_ONTRAP]    ! t_ontrap = otp
 386 
 387 0:      b       setjmp                          ! let setjmp do the rest
 388         add     %o0, OT_JMPBUF, %o0             ! %o0 = &ot_jmpbuf
 389         SET_SIZE(on_trap)
 390 
 391 #endif  /* lint */
 392 
 393 /*
 394  * Setjmp and longjmp implement non-local gotos using state vectors
 395  * type label_t.
 396  */
 397 
 398 #if defined(lint)
 399 
 400 /* ARGSUSED */
 401 int
 402 setjmp(label_t *lp)
 403 { return (0); }
 404 
 405 #else   /* lint */
 406 
 407         ENTRY(setjmp)
 408         stn     %o7, [%o0 + L_PC]       ! save return address
 409         stn     %sp, [%o0 + L_SP]       ! save stack ptr
 410         retl
 411         clr     %o0                     ! return 0
 412         SET_SIZE(setjmp)
 413 
 414 #endif  /* lint */
 415 
 416 
 417 #if defined(lint)
 418 
 419 /* ARGSUSED */
 420 void
 421 longjmp(label_t *lp)
 422 {}
 423 
 424 #else   /* lint */
 425 
 426         ENTRY(longjmp)
 427         !
 428         ! The following save is required so that an extra register
 429         ! window is flushed.  Flushw flushes nwindows-2
 430         ! register windows.  If setjmp and longjmp are called from
 431         ! within the same window, that window will not get pushed
 432         ! out onto the stack without the extra save below.  Tail call
 433         ! optimization can lead to callers of longjmp executing
 434         ! from a window that could be the same as the setjmp,
 435         ! thus the need for the following save.
 436         !
 437         save    %sp, -SA(MINFRAME), %sp
 438         flushw                          ! flush all but this window
 439         ldn     [%i0 + L_PC], %i7       ! restore return addr
 440         ldn     [%i0 + L_SP], %fp       ! restore sp for dest on foreign stack
 441         ret                             ! return 1
 442         restore %g0, 1, %o0             ! takes underflow, switches stacks
 443         SET_SIZE(longjmp)
 444 
 445 #endif  /* lint */
 446 
 447 /*
 448  * movtuc(length, from, to, table)
 449  *
 450  * VAX movtuc instruction (sort of).
 451  */
 452 
 453 #if defined(lint)
 454 
 455 /*ARGSUSED*/
 456 int
 457 movtuc(size_t length, u_char *from, u_char *to, u_char table[])
 458 { return (0); }
 459 
 460 #else   /* lint */
 461 
 462         ENTRY(movtuc)
 463         tst     %o0
 464         ble,pn  %ncc, 2f                ! check length
 465         clr     %o4
 466 
 467         ldub    [%o1 + %o4], %g1        ! get next byte in string
 468 0:
 469         ldub    [%o3 + %g1], %g1        ! get corresponding table entry
 470         tst     %g1                     ! escape char?
 471         bnz     1f
 472         stb     %g1, [%o2 + %o4]        ! delay slot, store it
 473 
 474         retl                            ! return (bytes moved)
 475         mov     %o4, %o0
 476 1:
 477         inc     %o4                     ! increment index
 478         cmp     %o4, %o0                ! index < length ?
 479         bl,a,pt %ncc, 0b
 480         ldub    [%o1 + %o4], %g1        ! delay slot, get next byte in string
 481 2:
 482         retl                            ! return (bytes moved)
 483         mov     %o4, %o0
 484         SET_SIZE(movtuc)
 485 
 486 #endif  /* lint */
 487 
 488 /*
 489  * scanc(length, string, table, mask)
 490  *
 491  * VAX scanc instruction.
 492  */
 493 
 494 #if defined(lint)
 495 
 496 /*ARGSUSED*/
 497 int
 498 scanc(size_t length, u_char *string, u_char table[], u_char mask)
 499 { return (0); }
 500 
 501 #else   /* lint */
 502 
 503         ENTRY(scanc)
 504         tst     %o0     
 505         ble,pn  %ncc, 1f                ! check length
 506         clr     %o4
 507 0:
 508         ldub    [%o1 + %o4], %g1        ! get next byte in string
 509         cmp     %o4, %o0                ! interlock slot, index < length ?
 510         ldub    [%o2 + %g1], %g1        ! get corresponding table entry
 511         bge,pn  %ncc, 1f                ! interlock slot
 512         btst    %o3, %g1                ! apply the mask
 513         bz,a    0b
 514         inc     %o4                     ! delay slot, increment index
 515 1:
 516         retl                            ! return(length - index)
 517         sub     %o0, %o4, %o0
 518         SET_SIZE(scanc)
 519 
 520 #endif  /* lint */
 521 
 522 /*
 523  * if a() calls b() calls caller(),
 524  * caller() returns return address in a().
 525  */
 526 
 527 #if defined(lint)
 528 
 529 caddr_t
 530 caller(void)
 531 { return (0); }
 532 
 533 #else   /* lint */
 534 
 535         ENTRY(caller)
 536         retl
 537         mov     %i7, %o0
 538         SET_SIZE(caller)
 539 
 540 #endif  /* lint */
 541 
 542 /*
 543  * if a() calls callee(), callee() returns the
 544  * return address in a();
 545  */
 546 
 547 #if defined(lint)
 548 
 549 caddr_t
 550 callee(void)
 551 { return (0); }
 552 
 553 #else   /* lint */
 554 
 555         ENTRY(callee)
 556         retl
 557         mov     %o7, %o0
 558         SET_SIZE(callee)
 559 
 560 #endif  /* lint */
 561 
 562 /*
 563  * return the current frame pointer
 564  */
 565 
 566 #if defined(lint)
 567 
 568 greg_t
 569 getfp(void)
 570 { return (0); }
 571 
 572 #else   /* lint */
 573 
 574         ENTRY(getfp)
 575         retl
 576         mov     %fp, %o0
 577         SET_SIZE(getfp)
 578 
 579 #endif  /* lint */
 580 
 581 /*
 582  * Get vector base register
 583  */
 584 
 585 #if defined(lint)
 586 
 587 greg_t
 588 gettbr(void)
 589 { return (0); }
 590 
 591 #else   /* lint */
 592 
 593         ENTRY(gettbr)
 594         retl
 595         mov     %tbr, %o0
 596         SET_SIZE(gettbr)
 597 
 598 #endif  /* lint */
 599 
 600 /*
 601  * Get processor state register, V9 faked to look like V8.
 602  * Note: does not provide ccr.xcc and provides FPRS.FEF instead of
 603  * PSTATE.PEF, because PSTATE.PEF is always on in order to allow the
 604  * libc_psr memcpy routines to run without hitting the fp_disabled trap.
 605  */
 606 
 607 #if defined(lint)
 608 
 609 greg_t
 610 getpsr(void)
 611 { return (0); }
 612 
 613 #else   /* lint */
 614 
 615         ENTRY(getpsr)
 616         rd      %ccr, %o1                       ! get ccr
 617         sll     %o1, PSR_ICC_SHIFT, %o0         ! move icc to V8 psr.icc
 618         rd      %fprs, %o1                      ! get fprs
 619         and     %o1, FPRS_FEF, %o1              ! mask out dirty upper/lower
 620         sllx    %o1, PSR_FPRS_FEF_SHIFT, %o1    ! shift fef to V8 psr.ef
 621         or      %o0, %o1, %o0                   ! or into psr.ef
 622         set     V9_PSR_IMPLVER, %o1             ! SI assigned impl/ver: 0xef
 623         retl
 624         or      %o0, %o1, %o0                   ! or into psr.impl/ver
 625         SET_SIZE(getpsr)
 626 
 627 #endif  /* lint */
 628 
 629 /*
 630  * Get current processor interrupt level
 631  */
 632 
 633 #if defined(lint)
 634 
 635 u_int
 636 getpil(void)
 637 { return (0); }
 638 
 639 #else   /* lint */
 640 
 641         ENTRY(getpil)
 642         retl
 643         rdpr    %pil, %o0
 644         SET_SIZE(getpil)
 645 
 646 #endif  /* lint */
 647 
 648 #if defined(lint)
 649 
 650 /*ARGSUSED*/
 651 void
 652 setpil(u_int pil)
 653 {}
 654 
 655 #else   /* lint */
 656 
 657         ENTRY(setpil)
 658         retl
 659         wrpr    %g0, %o0, %pil
 660         SET_SIZE(setpil)
 661 
 662 #endif  /* lint */
 663 
 664 
 665 /*
 666  * _insque(entryp, predp)
 667  *
 668  * Insert entryp after predp in a doubly linked list.
 669  */
 670 
 671 #if defined(lint)
 672 
 673 /*ARGSUSED*/
 674 void
 675 _insque(caddr_t entryp, caddr_t predp)
 676 {}
 677 
 678 #else   /* lint */
 679 
 680         ENTRY(_insque)
 681         ldn     [%o1], %g1              ! predp->forw
 682         stn     %o1, [%o0 + CPTRSIZE]   ! entryp->back = predp
 683         stn     %g1, [%o0]              ! entryp->forw = predp->forw
 684         stn     %o0, [%o1]              ! predp->forw = entryp
 685         retl
 686         stn     %o0, [%g1 + CPTRSIZE]   ! predp->forw->back = entryp
 687         SET_SIZE(_insque)
 688 
 689 #endif  /* lint */
 690 
 691 /*
 692  * _remque(entryp)
 693  *
 694  * Remove entryp from a doubly linked list
 695  */
 696 
 697 #if defined(lint)
 698 
 699 /*ARGSUSED*/
 700 void
 701 _remque(caddr_t entryp)
 702 {}
 703 
 704 #else   /* lint */
 705 
 706         ENTRY(_remque)
 707         ldn     [%o0], %g1              ! entryp->forw
 708         ldn     [%o0 + CPTRSIZE], %g2   ! entryp->back
 709         stn     %g1, [%g2]              ! entryp->back->forw = entryp->forw
 710         retl
 711         stn     %g2, [%g1 + CPTRSIZE]   ! entryp->forw->back = entryp->back
 712         SET_SIZE(_remque)
 713 
 714 #endif  /* lint */
 715 
 716 
 717 /*
 718  * strlen(str)
 719  *
 720  * Returns the number of non-NULL bytes in string argument.
 721  *
 722  * XXX -  why is this here, rather than the traditional file?
 723  *        why does it have local labels which don't start with a `.'?
 724  */
 725 
 726 #if defined(lint)
 727 
 728 /*ARGSUSED*/
 729 size_t
 730 strlen(const char *str)
 731 { return (0); }
 732 
 733 #else   /* lint */
 734 
 735         ENTRY(strlen)
 736         mov     %o0, %o1
 737         andcc   %o1, 3, %o3             ! is src word aligned
 738         bz      $nowalgnd
 739         clr     %o0                     ! length of non-zero bytes
 740         cmp     %o3, 2                  ! is src half-word aligned
 741         be      $s2algn
 742         cmp     %o3, 3                  ! src is byte aligned
 743         ldub    [%o1], %o3              ! move 1 or 3 bytes to align it
 744         inc     1, %o1                  ! in either case, safe to do a byte
 745         be      $s3algn
 746         tst     %o3
 747 $s1algn:
 748         bnz,a   $s2algn                 ! now go align dest
 749         inc     1, %o0
 750         b,a     $done
 751 
 752 $s2algn:
 753         lduh    [%o1], %o3              ! know src is half-byte aligned
 754         inc     2, %o1
 755         srl     %o3, 8, %o4
 756         tst     %o4                     ! is the first byte zero
 757         bnz,a   1f
 758         inc     %o0
 759         b,a     $done
 760 1:      andcc   %o3, 0xff, %o3          ! is the second byte zero
 761         bnz,a   $nowalgnd
 762         inc     %o0
 763         b,a     $done
 764 $s3algn:
 765         bnz,a   $nowalgnd
 766         inc     1, %o0
 767         b,a     $done
 768 
 769 $nowalgnd:
 770         ! use trick to check if any read bytes of a word are zero
 771         ! the following two constants will generate "byte carries"
 772         ! and check if any bit in a byte is set, if all characters
 773         ! are 7bits (unsigned) this allways works, otherwise
 774         ! there is a specil case that rarely happens, see below
 775 
 776         set     0x7efefeff, %o3
 777         set     0x81010100, %o4
 778 
 779 3:      ld      [%o1], %o2              ! main loop
 780         inc     4, %o1
 781         add     %o2, %o3, %o5           ! generate byte-carries
 782         xor     %o5, %o2, %o5           ! see if orignal bits set
 783         and     %o5, %o4, %o5
 784         cmp     %o5, %o4                ! if ==,  no zero bytes
 785         be,a    3b
 786         inc     4, %o0
 787 
 788         ! check for the zero byte and increment the count appropriately
 789         ! some information (the carry bit) is lost if bit 31
 790         ! was set (very rare), if this is the rare condition,
 791         ! return to the main loop again
 792 
 793         sethi   %hi(0xff000000), %o5    ! mask used to test for terminator
 794         andcc   %o2, %o5, %g0           ! check if first byte was zero
 795         bnz     1f
 796         srl     %o5, 8, %o5
 797 $done:
 798         retl
 799         nop
 800 1:      andcc   %o2, %o5, %g0           ! check if second byte was zero
 801         bnz     1f
 802         srl     %o5, 8, %o5
 803 $done1:
 804         retl
 805         inc     %o0
 806 1:      andcc   %o2, %o5, %g0           ! check if third byte was zero
 807         bnz     1f
 808         andcc   %o2, 0xff, %g0          ! check if last byte is zero
 809 $done2:
 810         retl
 811         inc     2, %o0
 812 1:      bnz,a   3b
 813         inc     4, %o0                  ! count of bytes
 814 $done3:
 815         retl
 816         inc     3, %o0
 817         SET_SIZE(strlen)
 818 
 819 #endif  /* lint */
 820 
 821 /*
 822  * Provide a C callable interface to the membar instruction.
 823  */
 824 
 825 #if defined(lint)
 826 
 827 void
 828 membar_ldld(void)
 829 {}
 830 
 831 void
 832 membar_stld(void)
 833 {}
 834 
 835 void
 836 membar_ldst(void)
 837 {}
 838 
 839 void
 840 membar_stst(void)
 841 {}
 842 
 843 void
 844 membar_ldld_ldst(void)
 845 {}
 846 
 847 void
 848 membar_ldld_stld(void)
 849 {}
 850 
 851 void
 852 membar_ldld_stst(void)
 853 {}
 854 
 855 void
 856 membar_stld_ldld(void)
 857 {}
 858 
 859 void
 860 membar_stld_ldst(void)
 861 {}
 862 
 863 void
 864 membar_stld_stst(void)
 865 {}
 866 
 867 void
 868 membar_ldst_ldld(void)
 869 {}
 870 
 871 void
 872 membar_ldst_stld(void)
 873 {}
 874 
 875 void
 876 membar_ldst_stst(void)
 877 {}
 878 
 879 void
 880 membar_stst_ldld(void)
 881 {}
 882 
 883 void
 884 membar_stst_stld(void)
 885 {}
 886 
 887 void
 888 membar_stst_ldst(void)
 889 {}
 890 
 891 void
 892 membar_lookaside(void)
 893 {}
 894 
 895 void
 896 membar_memissue(void)
 897 {}
 898 
 899 void
 900 membar_sync(void)
 901 {}
 902 
 903 #else
 904         ENTRY(membar_ldld)
 905         retl
 906         membar  #LoadLoad
 907         SET_SIZE(membar_ldld)
 908 
 909         ENTRY(membar_stld)
 910         retl
 911         membar  #StoreLoad
 912         SET_SIZE(membar_stld)
 913 
 914         ENTRY(membar_ldst)
 915         retl
 916         membar  #LoadStore
 917         SET_SIZE(membar_ldst)
 918 
 919         ENTRY(membar_stst)
 920         retl
 921         membar  #StoreStore
 922         SET_SIZE(membar_stst)
 923 
 924         ENTRY(membar_ldld_stld)
 925         ALTENTRY(membar_stld_ldld)
 926         retl
 927         membar  #LoadLoad|#StoreLoad
 928         SET_SIZE(membar_stld_ldld)
 929         SET_SIZE(membar_ldld_stld)
 930 
 931         ENTRY(membar_ldld_ldst)
 932         ALTENTRY(membar_ldst_ldld)
 933         retl
 934         membar  #LoadLoad|#LoadStore
 935         SET_SIZE(membar_ldst_ldld)
 936         SET_SIZE(membar_ldld_ldst)
 937 
 938         ENTRY(membar_ldld_stst)
 939         ALTENTRY(membar_stst_ldld)
 940         retl
 941         membar  #LoadLoad|#StoreStore
 942         SET_SIZE(membar_stst_ldld)
 943         SET_SIZE(membar_ldld_stst)
 944 
 945         ENTRY(membar_stld_ldst)
 946         ALTENTRY(membar_ldst_stld)
 947         retl
 948         membar  #StoreLoad|#LoadStore
 949         SET_SIZE(membar_ldst_stld)
 950         SET_SIZE(membar_stld_ldst)
 951 
 952         ENTRY(membar_stld_stst)
 953         ALTENTRY(membar_stst_stld)
 954         retl
 955         membar  #StoreLoad|#StoreStore
 956         SET_SIZE(membar_stst_stld)
 957         SET_SIZE(membar_stld_stst)
 958 
 959         ENTRY(membar_ldst_stst)
 960         ALTENTRY(membar_stst_ldst)
 961         retl
 962         membar  #LoadStore|#StoreStore
 963         SET_SIZE(membar_stst_ldst)
 964         SET_SIZE(membar_ldst_stst)
 965 
 966         ENTRY(membar_lookaside)
 967         retl
 968         membar  #Lookaside
 969         SET_SIZE(membar_lookaside)
 970 
 971         ENTRY(membar_memissue)
 972         retl
 973         membar  #MemIssue
 974         SET_SIZE(membar_memissue)
 975 
 976         ENTRY(membar_sync)
 977         retl
 978         membar  #Sync
 979         SET_SIZE(membar_sync)
 980 
 981 #endif  /* lint */
 982 
 983 
 984 #if defined(lint)
 985 
 986 /*ARGSUSED*/
 987 int
 988 fuword64(const void *addr, uint64_t *dst)
 989 { return (0); }
 990 
 991 /*ARGSUSED*/
 992 int
 993 fuword32(const void *addr, uint32_t *dst)
 994 { return (0); }
 995 
 996 /*ARGSUSED*/
 997 int
 998 fuword16(const void *addr, uint16_t *dst)
 999 { return (0); }
1000 
1001 /*ARGSUSED*/
1002 int
1003 fuword8(const void *addr, uint8_t *dst)
1004 { return (0); }
1005 
1006 /*ARGSUSED*/
1007 int
1008 dtrace_ft_fuword64(const void *addr, uint64_t *dst)
1009 { return (0); }
1010 
1011 /*ARGSUSED*/
1012 int
1013 dtrace_ft_fuword32(const void *addr, uint32_t *dst)
1014 { return (0); }
1015 
1016 #else   /* lint */
1017 
1018 /*
1019  * Since all of the fuword() variants are so similar, we have a macro to spit
1020  * them out.
1021  */
1022 
1023 #define FUWORD(NAME, LOAD, STORE, COPYOP)       \
1024         ENTRY(NAME);                            \
1025         sethi   %hi(1f), %o5;                   \
1026         ldn     [THREAD_REG + T_LOFAULT], %o3;  \
1027         or      %o5, %lo(1f), %o5;              \
1028         membar  #Sync;                          \
1029         stn     %o5, [THREAD_REG + T_LOFAULT];  \
1030         LOAD    [%o0]ASI_USER, %o2;             \
1031         membar  #Sync;                          \
1032         stn     %o3, [THREAD_REG + T_LOFAULT];  \
1033         mov     0, %o0;                         \
1034         retl;                                   \
1035         STORE   %o2, [%o1];                     \
1036 1:                                              \
1037         membar  #Sync;                          \
1038         stn     %o3, [THREAD_REG + T_LOFAULT];  \
1039         ldn     [THREAD_REG + T_COPYOPS], %o2;  \
1040         brz     %o2, 2f;                        \
1041         nop;                                    \
1042         ldn     [%o2 + COPYOP], %g1;            \
1043         jmp     %g1;                            \
1044         nop;                                    \
1045 2:                                              \
1046         retl;                                   \
1047         mov     -1, %o0;                        \
1048         SET_SIZE(NAME)
1049 
1050         FUWORD(fuword64, ldxa, stx, CP_FUWORD64)
1051         FUWORD(fuword32, lda, st, CP_FUWORD32)
1052         FUWORD(fuword16, lduha, sth, CP_FUWORD16)
1053         FUWORD(fuword8, lduba, stb, CP_FUWORD8)
1054 
1055 #endif  /* lint */
1056 
1057 
1058 #if defined(lint)
1059 
1060 /*ARGSUSED*/
1061 int
1062 suword64(void *addr, uint64_t value)
1063 { return (0); }
1064 
1065 /*ARGSUSED*/
1066 int
1067 suword32(void *addr, uint32_t value)
1068 { return (0); }
1069 
1070 /*ARGSUSED*/
1071 int
1072 suword16(void *addr, uint16_t value)
1073 { return (0); }
1074 
1075 /*ARGSUSED*/
1076 int
1077 suword8(void *addr, uint8_t value)
1078 { return (0); }
1079 
1080 #else   /* lint */
1081 
1082 /*
1083  * Since all of the suword() variants are so similar, we have a macro to spit
1084  * them out.
1085  */
1086 
1087 #define SUWORD(NAME, STORE, COPYOP)             \
1088         ENTRY(NAME)                             \
1089         sethi   %hi(1f), %o5;                   \
1090         ldn     [THREAD_REG + T_LOFAULT], %o3;  \
1091         or      %o5, %lo(1f), %o5;              \
1092         membar  #Sync;                          \
1093         stn     %o5, [THREAD_REG + T_LOFAULT];  \
1094         STORE   %o1, [%o0]ASI_USER;             \
1095         membar  #Sync;                          \
1096         stn     %o3, [THREAD_REG + T_LOFAULT];  \
1097         retl;                                   \
1098         clr     %o0;                            \
1099 1:                                              \
1100         membar  #Sync;                          \
1101         stn     %o3, [THREAD_REG + T_LOFAULT];  \
1102         ldn     [THREAD_REG + T_COPYOPS], %o2;  \
1103         brz     %o2, 2f;                        \
1104         nop;                                    \
1105         ldn     [%o2 + COPYOP], %g1;            \
1106         jmp     %g1;                            \
1107         nop;                                    \
1108 2:                                              \
1109         retl;                                   \
1110         mov     -1, %o0;                        \
1111         SET_SIZE(NAME)
1112 
1113         SUWORD(suword64, stxa, CP_SUWORD64)
1114         SUWORD(suword32, sta, CP_SUWORD32)
1115         SUWORD(suword16, stha, CP_SUWORD16)
1116         SUWORD(suword8, stba, CP_SUWORD8)
1117 
1118 #endif  /* lint */
1119 
1120 #if defined(lint)
1121 
1122 /*ARGSUSED*/
1123 void
1124 fuword8_noerr(const void *addr, uint8_t *dst)
1125 {}
1126 
1127 /*ARGSUSED*/
1128 void
1129 fuword16_noerr(const void *addr, uint16_t *dst)
1130 {}
1131 
1132 /*ARGSUSED*/
1133 void
1134 fuword32_noerr(const void *addr, uint32_t *dst)
1135 {}
1136 
1137 /*ARGSUSED*/
1138 void
1139 fuword64_noerr(const void *addr, uint64_t *dst)
1140 {}
1141 
1142 #else   /* lint */
1143 
1144         ENTRY(fuword8_noerr)
1145         lduba   [%o0]ASI_USER, %o0      
1146         retl
1147         stb     %o0, [%o1]
1148         SET_SIZE(fuword8_noerr)
1149 
1150         ENTRY(fuword16_noerr)
1151         lduha   [%o0]ASI_USER, %o0
1152         retl
1153         sth     %o0, [%o1]
1154         SET_SIZE(fuword16_noerr)
1155 
1156         ENTRY(fuword32_noerr)
1157         lda     [%o0]ASI_USER, %o0
1158         retl
1159         st      %o0, [%o1]
1160         SET_SIZE(fuword32_noerr)
1161 
1162         ENTRY(fuword64_noerr)
1163         ldxa    [%o0]ASI_USER, %o0
1164         retl
1165         stx     %o0, [%o1]
1166         SET_SIZE(fuword64_noerr)
1167 
1168 #endif  /* lint */
1169 
1170 #if defined(lint)
1171 
1172 /*ARGSUSED*/
1173 void
1174 suword8_noerr(void *addr, uint8_t value)
1175 {}
1176 
1177 /*ARGSUSED*/
1178 void
1179 suword16_noerr(void *addr, uint16_t value)
1180 {}
1181 
1182 /*ARGSUSED*/
1183 void
1184 suword32_noerr(void *addr, uint32_t value)
1185 {}
1186 
1187 /*ARGSUSED*/
1188 void
1189 suword64_noerr(void *addr, uint64_t value)
1190 {}
1191 
1192 #else   /* lint */
1193 
1194         ENTRY(suword8_noerr)
1195         retl
1196         stba    %o1, [%o0]ASI_USER
1197         SET_SIZE(suword8_noerr)
1198 
1199         ENTRY(suword16_noerr)
1200         retl
1201         stha    %o1, [%o0]ASI_USER
1202         SET_SIZE(suword16_noerr)
1203 
1204         ENTRY(suword32_noerr)
1205         retl
1206         sta     %o1, [%o0]ASI_USER
1207         SET_SIZE(suword32_noerr)
1208 
1209         ENTRY(suword64_noerr)
1210         retl
1211         stxa    %o1, [%o0]ASI_USER
1212         SET_SIZE(suword64_noerr)
1213 
1214 #endif  /* lint */
1215 
1216 #if defined(__lint)
1217 
1218 /*ARGSUSED*/
1219 int
1220 subyte(void *addr, uchar_t value)
1221 { return (0); }
1222 
1223 /*ARGSUSED*/
1224 void
1225 subyte_noerr(void *addr, uchar_t value)
1226 {}
1227 
1228 /*ARGSUSED*/
1229 int
1230 fulword(const void *addr, ulong_t *valuep)
1231 { return (0); }
1232 
1233 /*ARGSUSED*/
1234 void
1235 fulword_noerr(const void *addr, ulong_t *valuep)
1236 {}
1237 
1238 /*ARGSUSED*/
1239 int
1240 sulword(void *addr, ulong_t valuep)
1241 { return (0); }
1242 
1243 /*ARGSUSED*/
1244 void
1245 sulword_noerr(void *addr, ulong_t valuep)
1246 {}
1247 
1248 #else
1249 
1250         .weak   subyte
1251         subyte=suword8
1252         .weak   subyte_noerr
1253         subyte_noerr=suword8_noerr
1254 #ifdef _LP64
1255         .weak   fulword
1256         fulword=fuword64
1257         .weak   fulword_noerr
1258         fulword_noerr=fuword64_noerr
1259         .weak   sulword
1260         sulword=suword64
1261         .weak   sulword_noerr
1262         sulword_noerr=suword64_noerr
1263 #else
1264         .weak   fulword
1265         fulword=fuword32
1266         .weak   fulword_noerr
1267         fulword_noerr=fuword32_noerr
1268         .weak   sulword
1269         sulword=suword32
1270         .weak   sulword_noerr
1271         sulword_noerr=suword32_noerr
1272 #endif  /* LP64 */
1273 
1274 #endif  /* lint */
1275 
1276 /*
1277  * We define rdtick here, but not for sun4v. On sun4v systems, the %tick
1278  * and %stick should not be read directly without considering the tick
1279  * and stick offset kernel variables introduced to support sun4v OS
1280  * suspension.
1281  */
1282 #if !defined (sun4v)
1283 
1284 #if defined (lint)
1285 
1286 hrtime_t
1287 rdtick()
1288 { return (0); }
1289 
1290 #else /* lint */
1291 
1292         ENTRY(rdtick)
1293         retl
1294         rd      %tick, %o0
1295         SET_SIZE(rdtick)
1296 
1297 #endif /* lint */
1298 
1299 #endif /* !sun4v */
1300 
1301 /*
1302  * Set tba to given address, no side effects.
1303  */
1304 #if defined (lint)
1305 
1306 /*ARGSUSED*/
1307 void *
1308 set_tba(void *new_tba)
1309 { return (0); }
1310 
1311 #else   /* lint */
1312 
1313         ENTRY(set_tba)
1314         mov     %o0, %o1
1315         rdpr    %tba, %o0
1316         wrpr    %o1, %tba
1317         retl
1318         nop
1319         SET_SIZE(set_tba)
1320 
1321 #endif  /* lint */
1322 
1323 #if defined (lint)
1324 
1325 /*ARGSUSED*/
1326 void *
1327 get_tba()
1328 { return (0); }
1329 
1330 #else   /* lint */
1331 
1332         ENTRY(get_tba)
1333         retl
1334         rdpr    %tba, %o0
1335         SET_SIZE(get_tba)
1336 
1337 #endif  /* lint */
1338 
1339 #if defined(lint) || defined(__lint)
1340 
1341 /* ARGSUSED */
1342 void
1343 setpstate(u_int pstate)
1344 {}
1345 
1346 #else   /* lint */
1347 
1348         ENTRY_NP(setpstate)
1349         retl
1350         wrpr    %g0, %o0, %pstate
1351         SET_SIZE(setpstate)
1352 
1353 #endif  /* lint */
1354 
1355 #if defined(lint) || defined(__lint)
1356 
1357 u_int
1358 getpstate(void)
1359 { return(0); }
1360 
1361 #else   /* lint */
1362 
1363         ENTRY_NP(getpstate)
1364         retl
1365         rdpr    %pstate, %o0
1366         SET_SIZE(getpstate)
1367 
1368 #endif  /* lint */
1369 
1370 #if defined(lint) || defined(__lint)
1371 
1372 dtrace_icookie_t
1373 dtrace_interrupt_disable(void)
1374 { return (0); }
1375 
1376 #else   /* lint */
1377 
1378         ENTRY_NP(dtrace_interrupt_disable)
1379         rdpr    %pstate, %o0
1380         andn    %o0, PSTATE_IE, %o1
1381         retl
1382         wrpr    %g0, %o1, %pstate
1383         SET_SIZE(dtrace_interrupt_disable)
1384 
1385 #endif  /* lint */
1386 
1387 #if defined(lint) || defined(__lint)
1388 
1389 /*ARGSUSED*/
1390 void
1391 dtrace_interrupt_enable(dtrace_icookie_t cookie)
1392 {}
1393 
1394 #else
1395 
1396         ENTRY_NP(dtrace_interrupt_enable)
1397         retl
1398         wrpr    %g0, %o0, %pstate 
1399         SET_SIZE(dtrace_interrupt_enable)
1400 
1401 #endif /* lint*/
1402 
1403 #if defined(lint)
1404 
1405 void
1406 dtrace_membar_producer(void)
1407 {}
1408 
1409 void
1410 dtrace_membar_consumer(void)
1411 {}
1412 
1413 #else   /* lint */
1414 
1415 #ifdef SF_ERRATA_51
1416         .align 32
1417         ENTRY(dtrace_membar_return)
1418         retl
1419         nop
1420         SET_SIZE(dtrace_membar_return)
1421 #define DTRACE_MEMBAR_RETURN    ba,pt %icc, dtrace_membar_return
1422 #else
1423 #define DTRACE_MEMBAR_RETURN    retl
1424 #endif
1425 
1426         ENTRY(dtrace_membar_producer)
1427         DTRACE_MEMBAR_RETURN
1428         membar  #StoreStore
1429         SET_SIZE(dtrace_membar_producer)
1430 
1431         ENTRY(dtrace_membar_consumer)
1432         DTRACE_MEMBAR_RETURN
1433         membar  #LoadLoad
1434         SET_SIZE(dtrace_membar_consumer)
1435 
1436 #endif  /* lint */
1437 
1438 #if defined(lint) || defined(__lint)
1439 
1440 void
1441 dtrace_flush_windows(void)
1442 {}
1443 
1444 #else
1445 
1446         ENTRY_NP(dtrace_flush_windows)
1447         retl
1448         flushw
1449         SET_SIZE(dtrace_flush_windows)
1450 
1451 #endif  /* lint */
1452 
1453 #if defined(lint)
1454 
1455 /*ARGSUSED*/
1456 int
1457 getpcstack_top(pc_t *pcstack, int limit, uintptr_t *lastfp, pc_t *lastpc)
1458 {
1459         return (0);
1460 }
1461 
1462 #else   /* lint */
1463 
1464         /*
1465          * %g1  pcstack
1466          * %g2  iteration count
1467          * %g3  final %fp
1468          * %g4  final %i7
1469          * %g5  saved %cwp (so we can get back to the original window)
1470          *
1471          * %o0  pcstack / return value (iteration count)
1472          * %o1  limit / saved %cansave
1473          * %o2  lastfp
1474          * %o3  lastpc
1475          * %o4  saved %canrestore
1476          * %o5  saved %pstate (to restore interrupts)
1477          *
1478          * Note:  The frame pointer returned via lastfp is safe to use as
1479          *      long as getpcstack_top() returns either (0) or a value less
1480          *      than (limit).
1481          */
1482         ENTRY_NP(getpcstack_top)
1483 
1484         rdpr    %pstate, %o5
1485         andn    %o5, PSTATE_IE, %g1
1486         wrpr    %g0, %g1, %pstate       ! disable interrupts
1487 
1488         mov     %o0, %g1                ! we need the pcstack pointer while
1489                                         ! we're visiting other windows
1490 
1491         rdpr    %canrestore, %g2        ! number of available windows
1492         sub     %g2, 1, %g2             ! account for skipped frame
1493         cmp     %g2, %o1                ! compare with limit
1494         movg    %icc, %o1, %g2          ! %g2 = min(%canrestore-1, limit)
1495 
1496         brlez,a,pn %g2, 3f              ! Use slow path if count <= 0 --
1497         clr     %o0                     ! return zero.
1498 
1499         mov     %g2, %o0                ! set up return value
1500 
1501         rdpr    %cwp, %g5               ! remember the register window state
1502         rdpr    %cansave, %o1           ! 'restore' changes, so we can undo
1503         rdpr    %canrestore, %o4        ! its effects when we finish.
1504 
1505         restore                         ! skip caller's frame
1506 1:
1507         st      %i7, [%g1]              ! stash return address in pcstack
1508         restore                         ! go to the next frame
1509         subcc   %g2, 1, %g2             ! decrement the count
1510         bnz,pt  %icc, 1b                ! loop until count reaches 0
1511         add     %g1, 4, %g1             ! increment pcstack
1512 
1513         mov     %i6, %g3                ! copy the final %fp and return PC
1514         mov     %i7, %g4                ! aside so we can return them to our
1515                                         ! caller
1516 
1517         wrpr    %g0, %g5, %cwp          ! jump back to the original window
1518         wrpr    %g0, %o1, %cansave      ! and restore the original register
1519         wrpr    %g0, %o4, %canrestore   ! window state.
1520 2:
1521         stn     %g3, [%o2]              ! store the frame pointer and pc
1522         st      %g4, [%o3]              ! so our caller can continue the trace
1523 
1524         retl                            ! return to caller
1525         wrpr    %g0, %o5, %pstate       ! restore interrupts
1526 
1527 3:
1528         flushw                          ! flush register windows, then
1529         ldn     [%fp + STACK_BIAS + 14*CLONGSIZE], %g3  ! load initial fp
1530         ba      2b
1531         ldn     [%fp + STACK_BIAS + 15*CLONGSIZE], %g4  ! and pc
1532         SET_SIZE(getpcstack_top)
1533 
1534 #endif  /* lint */
1535 
1536 #if defined(lint) || defined(__lint)
1537 
1538 /* ARGSUSED */
1539 void
1540 setwstate(u_int wstate)
1541 {}
1542 
1543 #else   /* lint */
1544 
1545         ENTRY_NP(setwstate)
1546         retl
1547         wrpr    %g0, %o0, %wstate
1548         SET_SIZE(setwstate)
1549 
1550 #endif  /* lint */
1551 
1552 
1553 #if defined(lint) || defined(__lint)
1554 
1555 u_int
1556 getwstate(void)
1557 { return(0); }
1558 
1559 #else   /* lint */
1560 
1561         ENTRY_NP(getwstate)
1562         retl
1563         rdpr    %wstate, %o0
1564         SET_SIZE(getwstate)
1565 
1566 #endif  /* lint */
1567 
1568 
1569 /*
1570  * int panic_trigger(int *tp)
1571  *
1572  * A panic trigger is a word which is updated atomically and can only be set
1573  * once.  We atomically store 0xFF into the high byte and load the old value.
1574  * If the byte was 0xFF, the trigger has already been activated and we fail.
1575  * If the previous value was 0 or not 0xFF, we succeed.  This allows a
1576  * partially corrupt trigger to still trigger correctly.  DTrace has its own
1577  * version of this function to allow it to panic correctly from probe context.
1578  */
1579 #if defined(lint)
1580 
1581 /*ARGSUSED*/
1582 int panic_trigger(int *tp) { return (0); }
1583 
1584 /*ARGSUSED*/
1585 int dtrace_panic_trigger(int *tp) { return (0); }
1586 
1587 #else   /* lint */
1588 
1589         ENTRY_NP(panic_trigger)
1590         ldstub  [%o0], %o0              ! store 0xFF, load byte into %o0
1591         cmp     %o0, 0xFF               ! compare %o0 to 0xFF
1592         set     1, %o1                  ! %o1 = 1
1593         be,a    0f                      ! if (%o0 == 0xFF) goto 0f (else annul)
1594         set     0, %o1                  ! delay - %o1 = 0
1595 0:      retl
1596         mov     %o1, %o0                ! return (%o1);
1597         SET_SIZE(panic_trigger)
1598 
1599         ENTRY_NP(dtrace_panic_trigger)
1600         ldstub  [%o0], %o0              ! store 0xFF, load byte into %o0
1601         cmp     %o0, 0xFF               ! compare %o0 to 0xFF
1602         set     1, %o1                  ! %o1 = 1
1603         be,a    0f                      ! if (%o0 == 0xFF) goto 0f (else annul)
1604         set     0, %o1                  ! delay - %o1 = 0
1605 0:      retl
1606         mov     %o1, %o0                ! return (%o1);
1607         SET_SIZE(dtrace_panic_trigger)
1608 
1609 #endif  /* lint */
1610 
1611 /*
1612  * void vpanic(const char *format, va_list alist)
1613  *
1614  * The panic() and cmn_err() functions invoke vpanic() as a common entry point
1615  * into the panic code implemented in panicsys().  vpanic() is responsible
1616  * for passing through the format string and arguments, and constructing a
1617  * regs structure on the stack into which it saves the current register
1618  * values.  If we are not dying due to a fatal trap, these registers will
1619  * then be preserved in panicbuf as the current processor state.  Before
1620  * invoking panicsys(), vpanic() activates the first panic trigger (see
1621  * common/os/panic.c) and switches to the panic_stack if successful.  Note that
1622  * DTrace takes a slightly different panic path if it must panic from probe
1623  * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
1624  * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
1625  * branches back into vpanic().
1626  */
1627 #if defined(lint)
1628 
1629 /*ARGSUSED*/
1630 void vpanic(const char *format, va_list alist) {}
1631 
1632 /*ARGSUSED*/
1633 void dtrace_vpanic(const char *format, va_list alist) {}
1634 
1635 #else   /* lint */
1636 
1637         ENTRY_NP(vpanic)
1638 
1639         save    %sp, -SA(MINFRAME + REGSIZE), %sp       ! save and allocate regs
1640 
1641         !
1642         ! The v9 struct regs has a 64-bit r_tstate field, which we use here
1643         ! to store the %ccr, %asi, %pstate, and %cwp as they would appear
1644         ! in %tstate if a trap occurred.  We leave it up to the debugger to
1645         ! realize what happened and extract the register values.
1646         !
1647         rd      %ccr, %l0                               ! %l0 = %ccr
1648         sllx    %l0, TSTATE_CCR_SHIFT, %l0              ! %l0 <<= CCR_SHIFT
1649         rd      %asi, %l1                               ! %l1 = %asi
1650         sllx    %l1, TSTATE_ASI_SHIFT, %l1              ! %l1 <<= ASI_SHIFT
1651         or      %l0, %l1, %l0                           ! %l0 |= %l1
1652         rdpr    %pstate, %l1                            ! %l1 = %pstate
1653         sllx    %l1, TSTATE_PSTATE_SHIFT, %l1           ! %l1 <<= PSTATE_SHIFT
1654         or      %l0, %l1, %l0                           ! %l0 |= %l1
1655         rdpr    %cwp, %l1                               ! %l1 = %cwp
1656         sllx    %l1, TSTATE_CWP_SHIFT, %l1              ! %l1 <<= CWP_SHIFT
1657         or      %l0, %l1, %l0                           ! %l0 |= %l1
1658 
1659         set     vpanic, %l1                             ! %l1 = %pc (vpanic)
1660         add     %l1, 4, %l2                             ! %l2 = %npc (vpanic+4)
1661         rd      %y, %l3                                 ! %l3 = %y
1662         !
1663         ! Flush register windows before panic_trigger() in order to avoid a
1664         ! problem that a dump hangs if flush_windows() causes another panic.
1665         !
1666         call    flush_windows
1667         nop
1668 
1669         sethi   %hi(panic_quiesce), %o0
1670         call    panic_trigger
1671         or      %o0, %lo(panic_quiesce), %o0            ! if (!panic_trigger(
1672 
1673 vpanic_common:
1674         tst     %o0                                     !     &panic_quiesce))
1675         be      0f                                      !   goto 0f;
1676         mov     %o0, %l4                                !   delay - %l4 = %o0
1677 
1678         !
1679         ! If panic_trigger() was successful, we are the first to initiate a
1680         ! panic: switch to the panic_stack.
1681         !
1682         set     panic_stack, %o0                        ! %o0 = panic_stack
1683         set     PANICSTKSIZE, %o1                       ! %o1 = size of stack
1684         add     %o0, %o1, %o0                           ! %o0 = top of stack
1685 
1686         sub     %o0, SA(MINFRAME + REGSIZE) + STACK_BIAS, %sp
1687 
1688         !
1689         ! Now that we've got everything set up, store each register to its
1690         ! designated location in the regs structure allocated on the stack.
1691         ! The register set we store is the equivalent of the registers at
1692         ! the time the %pc was pointing to vpanic, thus the %i's now contain
1693         ! what the %o's contained prior to the save instruction.
1694         !
1695 0:      stx     %l0, [%sp + STACK_BIAS + SA(MINFRAME) + TSTATE_OFF]
1696         stx     %g1, [%sp + STACK_BIAS + SA(MINFRAME) + G1_OFF]
1697         stx     %g2, [%sp + STACK_BIAS + SA(MINFRAME) + G2_OFF]
1698         stx     %g3, [%sp + STACK_BIAS + SA(MINFRAME) + G3_OFF]
1699         stx     %g4, [%sp + STACK_BIAS + SA(MINFRAME) + G4_OFF]
1700         stx     %g5, [%sp + STACK_BIAS + SA(MINFRAME) + G5_OFF]
1701         stx     %g6, [%sp + STACK_BIAS + SA(MINFRAME) + G6_OFF]
1702         stx     %g7, [%sp + STACK_BIAS + SA(MINFRAME) + G7_OFF]
1703         stx     %i0, [%sp + STACK_BIAS + SA(MINFRAME) + O0_OFF]
1704         stx     %i1, [%sp + STACK_BIAS + SA(MINFRAME) + O1_OFF]
1705         stx     %i2, [%sp + STACK_BIAS + SA(MINFRAME) + O2_OFF]
1706         stx     %i3, [%sp + STACK_BIAS + SA(MINFRAME) + O3_OFF]
1707         stx     %i4, [%sp + STACK_BIAS + SA(MINFRAME) + O4_OFF]
1708         stx     %i5, [%sp + STACK_BIAS + SA(MINFRAME) + O5_OFF]
1709         stx     %i6, [%sp + STACK_BIAS + SA(MINFRAME) + O6_OFF]
1710         stx     %i7, [%sp + STACK_BIAS + SA(MINFRAME) + O7_OFF]
1711         stn     %l1, [%sp + STACK_BIAS + SA(MINFRAME) + PC_OFF]
1712         stn     %l2, [%sp + STACK_BIAS + SA(MINFRAME) + NPC_OFF]
1713         st      %l3, [%sp + STACK_BIAS + SA(MINFRAME) + Y_OFF]
1714 
1715         mov     %l4, %o3                                ! %o3 = on_panic_stack
1716         add     %sp, STACK_BIAS + SA(MINFRAME), %o2     ! %o2 = &regs
1717         mov     %i1, %o1                                ! %o1 = alist
1718         call    panicsys                                ! panicsys();
1719         mov     %i0, %o0                                ! %o0 = format
1720         ret
1721         restore
1722 
1723         SET_SIZE(vpanic)
1724 
1725         ENTRY_NP(dtrace_vpanic)
1726 
1727         save    %sp, -SA(MINFRAME + REGSIZE), %sp       ! save and allocate regs
1728 
1729         !
1730         ! The v9 struct regs has a 64-bit r_tstate field, which we use here
1731         ! to store the %ccr, %asi, %pstate, and %cwp as they would appear
1732         ! in %tstate if a trap occurred.  We leave it up to the debugger to
1733         ! realize what happened and extract the register values.
1734         !
1735         rd      %ccr, %l0                               ! %l0 = %ccr
1736         sllx    %l0, TSTATE_CCR_SHIFT, %l0              ! %l0 <<= CCR_SHIFT
1737         rd      %asi, %l1                               ! %l1 = %asi
1738         sllx    %l1, TSTATE_ASI_SHIFT, %l1              ! %l1 <<= ASI_SHIFT
1739         or      %l0, %l1, %l0                           ! %l0 |= %l1
1740         rdpr    %pstate, %l1                            ! %l1 = %pstate
1741         sllx    %l1, TSTATE_PSTATE_SHIFT, %l1           ! %l1 <<= PSTATE_SHIFT
1742         or      %l0, %l1, %l0                           ! %l0 |= %l1
1743         rdpr    %cwp, %l1                               ! %l1 = %cwp
1744         sllx    %l1, TSTATE_CWP_SHIFT, %l1              ! %l1 <<= CWP_SHIFT
1745         or      %l0, %l1, %l0                           ! %l0 |= %l1
1746 
1747         set     dtrace_vpanic, %l1                      ! %l1 = %pc (vpanic)
1748         add     %l1, 4, %l2                             ! %l2 = %npc (vpanic+4)
1749         rd      %y, %l3                                 ! %l3 = %y
1750         !
1751         ! Flush register windows before panic_trigger() in order to avoid a
1752         ! problem that a dump hangs if flush_windows() causes another panic.
1753         !
1754         call    dtrace_flush_windows
1755         nop
1756 
1757         sethi   %hi(panic_quiesce), %o0
1758         call    dtrace_panic_trigger
1759         or      %o0, %lo(panic_quiesce), %o0            ! if (!panic_trigger(
1760 
1761         ba,a    vpanic_common
1762         SET_SIZE(dtrace_vpanic)
1763         
1764 #endif  /* lint */
1765 
1766 #if defined(lint)
1767 
1768 /*ARGSUSED*/
1769 
1770 uint_t
1771 get_subcc_ccr( uint64_t addrl, uint64_t addrr)
1772 { return (0); }
1773 
1774 #else   /* lint */
1775 
1776         ENTRY(get_subcc_ccr)
1777         wr      %g0, %ccr       ! clear condition codes
1778         subcc   %o0, %o1, %g0
1779         retl
1780         rd      %ccr, %o0       ! return condition codes
1781         SET_SIZE(get_subcc_ccr)
1782 
1783 #endif  /* lint */
1784 
1785 #if defined(lint) || defined(__lint)
1786 
1787 ftrace_icookie_t
1788 ftrace_interrupt_disable(void)
1789 { return (0); }
1790 
1791 #else   /* lint */
1792 
1793         ENTRY_NP(ftrace_interrupt_disable)
1794         rdpr    %pstate, %o0
1795         andn    %o0, PSTATE_IE, %o1
1796         retl
1797         wrpr    %g0, %o1, %pstate
1798         SET_SIZE(ftrace_interrupt_disable)
1799 
1800 #endif  /* lint */
1801 
1802 #if defined(lint) || defined(__lint)
1803 
1804 /*ARGSUSED*/
1805 void
1806 ftrace_interrupt_enable(ftrace_icookie_t cookie)
1807 {}
1808 
1809 #else
1810 
1811         ENTRY_NP(ftrace_interrupt_enable)
1812         retl
1813         wrpr    %g0, %o0, %pstate 
1814         SET_SIZE(ftrace_interrupt_enable)
1815 
1816 #endif /* lint*/