Print this page
de-linting of .s files


  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * General assembly language routines.
  28  * It is the intent of this file to contain routines that are
  29  * independent of the specific kernel architecture, and those that are
  30  * common across kernel architectures.
  31  * As architectures diverge, and implementations of specific
  32  * architecture-dependent routines change, the routines should be moved
  33  * from this file into the respective ../`arch -k`/subr.s file.
  34  * Or, if you want to be really nice, move them to a file whose
  35  * name has something to do with the routine you are moving.
  36  */
  37 
  38 #if defined(lint)
  39 #include <sys/types.h>
  40 #include <sys/scb.h>
  41 #include <sys/systm.h>
  42 #include <sys/regset.h>
  43 #include <sys/sunddi.h>
  44 #include <sys/lockstat.h>
  45 #include <sys/dtrace.h>
  46 #include <sys/ftrace.h>
  47 #endif  /* lint */
  48 
  49 #include <sys/asm_linkage.h>
  50 #include <sys/privregs.h>
  51 #include <sys/machparam.h>        /* To get SYSBASE and PAGESIZE */
  52 #include <sys/machthread.h>
  53 #include <sys/clock.h>
  54 #include <sys/psr_compat.h>
  55 #include <sys/isa_defs.h>
  56 #include <sys/dditypes.h>
  57 #include <sys/panic.h>
  58 #include <sys/machlock.h>
  59 #include <sys/ontrap.h>
  60 
  61 #if !defined(lint)
  62 #include "assym.h"
  63 
  64         .seg    ".text"
  65         .align  4
  66 
  67 /*
  68  * Macro to raise processor priority level.
  69  * Avoid dropping processor priority if already at high level.
  70  * Also avoid going below CPU->cpu_base_spl, which could've just been set by
  71  * a higher-level interrupt thread that just blocked.
  72  *
  73  * level can be %o0 (not other regs used here) or a constant.
  74  */
  75 #define RAISE(level) \
  76         rdpr    %pil, %o1;              /* get current PIL */           \
  77         cmp     %o1, level;             /* is PIL high enough? */       \
  78         bge     1f;                     /* yes, return */               \
  79         nop;                                                            \
  80         wrpr    %g0, PIL_MAX, %pil;     /* freeze CPU_BASE_SPL */       \
  81         ldn     [THREAD_REG + T_CPU], %o2;                              \


 117         ld      [%o2 + CPU_BASE_SPL], %o2;                              \
 118         cmp     %o2, level;             /* compare new to base */       \
 119         movl    %xcc, level, %o2;       /* use new if base lower */     \
 120         wrpr    %g0, %o2, %pil;                                         \
 121         retl;                                                           \
 122         mov     %o1, %o0                /* return old PIL */
 123 
 124 /*
 125  * Macro to set the priority to a specified level at or above LOCK_LEVEL.
 126  * Doesn't require comparison to CPU->cpu_base_spl.
 127  *
 128  * newpil can be %o0 (not other regs used here) or a constant with
 129  * the new PIL in the PSR_PIL field of the level arg.
 130  */
 131 #define SETPRI_HIGH(level) \
 132         rdpr    %pil, %o1;              /* get current PIL */           \
 133         wrpr    %g0, level, %pil;                                       \
 134         retl;                                                           \
 135         mov     %o1, %o0                /* return old PIL */
 136 
 137 #endif  /* lint */
 138 
 139         /*
 140          * Berkley 4.3 introduced symbolically named interrupt levels
 141          * as a way deal with priority in a machine independent fashion.
 142          * Numbered priorities are machine specific, and should be
 143          * discouraged where possible.
 144          *
 145          * Note, for the machine specific priorities there are
 146          * examples listed for devices that use a particular priority.
 147          * It should not be construed that all devices of that
 148          * type should be at that priority.  It is currently were
 149          * the current devices fit into the priority scheme based
 150          * upon time criticalness.
 151          *
 152          * The underlying assumption of these assignments is that
 153          * SPARC9 IPL 10 is the highest level from which a device
 154          * routine can call wakeup.  Devices that interrupt from higher
 155          * levels are restricted in what they can do.  If they need
 156          * kernels services they should schedule a routine at a lower
 157          * level (via software interrupt) to do the required
 158          * processing.


 161          *      Level   Usage
 162          *      15      Asynchronous memory exceptions
 163          *      14      Profiling clock (and PROM uart polling clock)
 164          *      13      Audio device
 165          *      12      Serial ports
 166          *      11      Floppy controller
 167          *
 168          * The serial ports request lower level processing on level 6.
 169          * Audio and floppy request lower level processing on level 4.
 170          *
 171          * Also, almost all splN routines (where N is a number or a
 172          * mnemonic) will do a RAISE(), on the assumption that they are
 173          * never used to lower our priority.
 174          * The exceptions are:
 175          *      spl8()          Because you can't be above 15 to begin with!
 176          *      splzs()         Because this is used at boot time to lower our
 177          *                      priority, to allow the PROM to poll the uart.
 178          *      spl0()          Used to lower priority to 0.
 179          */
 180 
 181 #if defined(lint)
 182 
 183 int spl0(void)          { return (0); }
 184 int spl6(void)          { return (0); }
 185 int spl7(void)          { return (0); }
 186 int spl8(void)          { return (0); }
 187 int splhi(void)         { return (0); }
 188 int splhigh(void)       { return (0); }
 189 int splzs(void)         { return (0); }
 190 
 191 #else   /* lint */
 192 
 193         /* locks out all interrupts, including memory errors */
 194         ENTRY(spl8)
 195         SETPRI_HIGH(15)
 196         SET_SIZE(spl8)
 197 
 198         /* just below the level that profiling runs */
 199         ENTRY(spl7)
 200         RAISE_HIGH(13)
 201         SET_SIZE(spl7)
 202 
 203         /* sun specific - highest priority onboard serial i/o zs ports */
 204         ENTRY(splzs)
 205         SETPRI_HIGH(12) /* Can't be a RAISE, as it's used to lower us */
 206         SET_SIZE(splzs)
 207 
 208         /*
 209          * should lock out clocks and all interrupts,
 210          * as you can see, there are exceptions
 211          */
 212         ENTRY(splhi)
 213         ALTENTRY(splhigh)
 214         ALTENTRY(spl6)
 215         ALTENTRY(i_ddi_splhigh)
 216         RAISE_HIGH(DISP_LEVEL)
 217         SET_SIZE(i_ddi_splhigh)
 218         SET_SIZE(spl6)
 219         SET_SIZE(splhigh)
 220         SET_SIZE(splhi)
 221 
 222         /* allow all interrupts */
 223         ENTRY(spl0)
 224         SETPRI(0)
 225         SET_SIZE(spl0)
 226 
 227 #endif  /* lint */
 228 
 229 /*
 230  * splx - set PIL back to that indicated by the old %pil passed as an argument,
 231  * or to the CPU's base priority, whichever is higher.
 232  */
 233 
 234 #if defined(lint)
 235 
 236 /* ARGSUSED */
 237 void
 238 splx(int level)
 239 {}
 240 
 241 #else   /* lint */
 242 
 243         ENTRY(splx)
 244         ALTENTRY(i_ddi_splx)
 245         SETPRI(%o0)             /* set PIL */
 246         SET_SIZE(i_ddi_splx)
 247         SET_SIZE(splx)
 248 
 249 #endif  /* level */
 250 
 251 /*
 252  * splr()
 253  *
 254  * splr is like splx but will only raise the priority and never drop it
 255  * Be careful not to set priority lower than CPU->cpu_base_pri,
 256  * even though it seems we're raising the priority, it could be set higher
 257  * at any time by an interrupt routine, so we must block interrupts and
 258  * look at CPU->cpu_base_pri.
 259  */
 260 
 261 #if defined(lint)
 262 
 263 /* ARGSUSED */
 264 int
 265 splr(int level)
 266 { return (0); }
 267 
 268 #else   /* lint */
 269         ENTRY(splr)
 270         RAISE(%o0)
 271         SET_SIZE(splr)
 272 
 273 #endif  /* lint */
 274 
 275 /*
 276  * on_fault()
 277  * Catch lofault faults. Like setjmp except it returns one
 278  * if code following causes uncorrectable fault. Turned off
 279  * by calling no_fault().
 280  */
 281 
 282 #if defined(lint)
 283 
 284 /* ARGSUSED */
 285 int
 286 on_fault(label_t *ljb)
 287 { return (0); }
 288 
 289 #else   /* lint */
 290 
 291         ENTRY(on_fault)
 292         membar  #Sync                   ! sync error barrier (see copy.s)
 293         stn     %o0, [THREAD_REG + T_ONFAULT]
 294         set     catch_fault, %o1
 295         b       setjmp                  ! let setjmp do the rest
 296         stn     %o1, [THREAD_REG + T_LOFAULT]   ! put catch_fault in t_lofault
 297 
 298 catch_fault:
 299         save    %sp, -SA(WINDOWSIZE), %sp ! goto next window so that we can rtn
 300         ldn     [THREAD_REG + T_ONFAULT], %o0
 301         membar  #Sync                           ! sync error barrier
 302         stn     %g0, [THREAD_REG + T_ONFAULT]   ! turn off onfault
 303         b       longjmp                 ! let longjmp do the rest
 304         stn     %g0, [THREAD_REG + T_LOFAULT]   ! turn off lofault
 305         SET_SIZE(on_fault)
 306 
 307 #endif  /* lint */
 308 
 309 /*
 310  * no_fault()
 311  * turn off fault catching.
 312  */
 313 
 314 #if defined(lint)
 315 
 316 void
 317 no_fault(void)
 318 {}
 319 
 320 #else   /* lint */
 321 
 322         ENTRY(no_fault)
 323         membar  #Sync                           ! sync error barrier
 324         stn     %g0, [THREAD_REG + T_ONFAULT]
 325         retl
 326         stn     %g0, [THREAD_REG + T_LOFAULT]   ! turn off lofault
 327         SET_SIZE(no_fault)
 328 
 329 #endif  /* lint */
 330 
 331 /*
 332  * Default trampoline code for on_trap() (see <sys/ontrap.h>).  On sparcv9,
 333  * the trap code will complete trap processing but reset the return %pc to
 334  * ot_trampoline, which will by default be set to the address of this code.
 335  * We longjmp(&curthread->t_ontrap->ot_jmpbuf) to return back to on_trap().
 336  */
 337 #if defined(lint)
 338 
 339 void 
 340 on_trap_trampoline(void)
 341 {}
 342 
 343 #else   /* lint */
 344 
 345         ENTRY(on_trap_trampoline)
 346         ldn     [THREAD_REG + T_ONTRAP], %o0    
 347         b       longjmp                 
 348         add     %o0, OT_JMPBUF, %o0
 349         SET_SIZE(on_trap_trampoline)
 350 
 351 #endif  /* lint */
 352 
 353 /*
 354  * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
 355  * more information about the on_trap() mechanism.  If the on_trap_data is the
 356  * same as the topmost stack element, we just modify that element.
 357  * On UltraSPARC, we need to issue a membar #Sync before modifying t_ontrap.
 358  * The issue barrier is defined to force all deferred errors to complete before
 359  * we go any further.  We want these errors to be processed before we modify
 360  * our current error protection.
 361  */
 362 #if defined(lint)
 363 
 364 /*ARGSUSED*/
 365 int
 366 on_trap(on_trap_data_t *otp, uint_t prot)
 367 { return (0); }
 368 
 369 #else   /* lint */
 370 
 371         ENTRY(on_trap)
 372         membar  #Sync                           ! force error barrier
 373         sth     %o1, [%o0 + OT_PROT]            ! ot_prot = prot
 374         sth     %g0, [%o0 + OT_TRAP]            ! ot_trap = 0
 375         set     on_trap_trampoline, %o2         ! %o2 = &on_trap_trampoline
 376         stn     %o2, [%o0 + OT_TRAMPOLINE]      ! ot_trampoline = %o2
 377         stn     %g0, [%o0 + OT_HANDLE]          ! ot_handle = NULL
 378         ldn     [THREAD_REG + T_ONTRAP], %o2    ! %o2 = curthread->t_ontrap
 379         cmp     %o0, %o2                        ! if (otp == %o2)
 380         be      0f                              !    don't modify t_ontrap
 381         stn     %g0, [%o0 + OT_PAD1]            ! delay - ot_pad1 = NULL
 382 
 383         stn     %o2, [%o0 + OT_PREV]            ! ot_prev = t_ontrap
 384         membar  #Sync                           ! force error barrier
 385         stn     %o0, [THREAD_REG + T_ONTRAP]    ! t_ontrap = otp
 386 
 387 0:      b       setjmp                          ! let setjmp do the rest
 388         add     %o0, OT_JMPBUF, %o0             ! %o0 = &ot_jmpbuf
 389         SET_SIZE(on_trap)
 390 
 391 #endif  /* lint */
 392 
 393 /*
 394  * Setjmp and longjmp implement non-local gotos using state vectors
 395  * type label_t.
 396  */
 397 
 398 #if defined(lint)
 399 
 400 /* ARGSUSED */
 401 int
 402 setjmp(label_t *lp)
 403 { return (0); }
 404 
 405 #else   /* lint */
 406 
 407         ENTRY(setjmp)
 408         stn     %o7, [%o0 + L_PC]       ! save return address
 409         stn     %sp, [%o0 + L_SP]       ! save stack ptr
 410         retl
 411         clr     %o0                     ! return 0
 412         SET_SIZE(setjmp)
 413 
 414 #endif  /* lint */
 415 
 416 
 417 #if defined(lint)
 418 
 419 /* ARGSUSED */
 420 void
 421 longjmp(label_t *lp)
 422 {}
 423 
 424 #else   /* lint */
 425 
 426         ENTRY(longjmp)
 427         !
 428         ! The following save is required so that an extra register
 429         ! window is flushed.  Flushw flushes nwindows-2
 430         ! register windows.  If setjmp and longjmp are called from
 431         ! within the same window, that window will not get pushed
 432         ! out onto the stack without the extra save below.  Tail call
 433         ! optimization can lead to callers of longjmp executing
 434         ! from a window that could be the same as the setjmp,
 435         ! thus the need for the following save.
 436         !
 437         save    %sp, -SA(MINFRAME), %sp
 438         flushw                          ! flush all but this window
 439         ldn     [%i0 + L_PC], %i7       ! restore return addr
 440         ldn     [%i0 + L_SP], %fp       ! restore sp for dest on foreign stack
 441         ret                             ! return 1
 442         restore %g0, 1, %o0             ! takes underflow, switches stacks
 443         SET_SIZE(longjmp)
 444 
 445 #endif  /* lint */
 446 
 447 /*
 448  * movtuc(length, from, to, table)
 449  *
 450  * VAX movtuc instruction (sort of).
 451  */
 452 
 453 #if defined(lint)
 454 
 455 /*ARGSUSED*/
 456 int
 457 movtuc(size_t length, u_char *from, u_char *to, u_char table[])
 458 { return (0); }
 459 
 460 #else   /* lint */
 461 
 462         ENTRY(movtuc)
 463         tst     %o0
 464         ble,pn  %ncc, 2f                ! check length
 465         clr     %o4
 466 
 467         ldub    [%o1 + %o4], %g1        ! get next byte in string
 468 0:
 469         ldub    [%o3 + %g1], %g1        ! get corresponding table entry
 470         tst     %g1                     ! escape char?
 471         bnz     1f
 472         stb     %g1, [%o2 + %o4]        ! delay slot, store it
 473 
 474         retl                            ! return (bytes moved)
 475         mov     %o4, %o0
 476 1:
 477         inc     %o4                     ! increment index
 478         cmp     %o4, %o0                ! index < length ?
 479         bl,a,pt %ncc, 0b
 480         ldub    [%o1 + %o4], %g1        ! delay slot, get next byte in string
 481 2:
 482         retl                            ! return (bytes moved)
 483         mov     %o4, %o0
 484         SET_SIZE(movtuc)
 485 
 486 #endif  /* lint */
 487 
 488 /*
 489  * scanc(length, string, table, mask)
 490  *
 491  * VAX scanc instruction.
 492  */
 493 
 494 #if defined(lint)
 495 
 496 /*ARGSUSED*/
 497 int
 498 scanc(size_t length, u_char *string, u_char table[], u_char mask)
 499 { return (0); }
 500 
 501 #else   /* lint */
 502 
 503         ENTRY(scanc)
 504         tst     %o0     
 505         ble,pn  %ncc, 1f                ! check length
 506         clr     %o4
 507 0:
 508         ldub    [%o1 + %o4], %g1        ! get next byte in string
 509         cmp     %o4, %o0                ! interlock slot, index < length ?
 510         ldub    [%o2 + %g1], %g1        ! get corresponding table entry
 511         bge,pn  %ncc, 1f                ! interlock slot
 512         btst    %o3, %g1                ! apply the mask
 513         bz,a    0b
 514         inc     %o4                     ! delay slot, increment index
 515 1:
 516         retl                            ! return(length - index)
 517         sub     %o0, %o4, %o0
 518         SET_SIZE(scanc)
 519 
 520 #endif  /* lint */
 521 
 522 /*
 523  * if a() calls b() calls caller(),
 524  * caller() returns return address in a().
 525  */
 526 
 527 #if defined(lint)
 528 
 529 caddr_t
 530 caller(void)
 531 { return (0); }
 532 
 533 #else   /* lint */
 534 
 535         ENTRY(caller)
 536         retl
 537         mov     %i7, %o0
 538         SET_SIZE(caller)
 539 
 540 #endif  /* lint */
 541 
 542 /*
 543  * if a() calls callee(), callee() returns the
 544  * return address in a();
 545  */
 546 
 547 #if defined(lint)
 548 
 549 caddr_t
 550 callee(void)
 551 { return (0); }
 552 
 553 #else   /* lint */
 554 
 555         ENTRY(callee)
 556         retl
 557         mov     %o7, %o0
 558         SET_SIZE(callee)
 559 
 560 #endif  /* lint */
 561 
 562 /*
 563  * return the current frame pointer
 564  */
 565 
 566 #if defined(lint)
 567 
 568 greg_t
 569 getfp(void)
 570 { return (0); }
 571 
 572 #else   /* lint */
 573 
 574         ENTRY(getfp)
 575         retl
 576         mov     %fp, %o0
 577         SET_SIZE(getfp)
 578 
 579 #endif  /* lint */
 580 
 581 /*
 582  * Get vector base register
 583  */
 584 
 585 #if defined(lint)
 586 
 587 greg_t
 588 gettbr(void)
 589 { return (0); }
 590 
 591 #else   /* lint */
 592 
 593         ENTRY(gettbr)
 594         retl
 595         mov     %tbr, %o0
 596         SET_SIZE(gettbr)
 597 
 598 #endif  /* lint */
 599 
 600 /*
 601  * Get processor state register, V9 faked to look like V8.
 602  * Note: does not provide ccr.xcc and provides FPRS.FEF instead of
 603  * PSTATE.PEF, because PSTATE.PEF is always on in order to allow the
 604  * libc_psr memcpy routines to run without hitting the fp_disabled trap.
 605  */
 606 
 607 #if defined(lint)
 608 
 609 greg_t
 610 getpsr(void)
 611 { return (0); }
 612 
 613 #else   /* lint */
 614 
 615         ENTRY(getpsr)
 616         rd      %ccr, %o1                       ! get ccr
 617         sll     %o1, PSR_ICC_SHIFT, %o0         ! move icc to V8 psr.icc
 618         rd      %fprs, %o1                      ! get fprs
 619         and     %o1, FPRS_FEF, %o1              ! mask out dirty upper/lower
 620         sllx    %o1, PSR_FPRS_FEF_SHIFT, %o1    ! shift fef to V8 psr.ef
 621         or      %o0, %o1, %o0                   ! or into psr.ef
 622         set     V9_PSR_IMPLVER, %o1             ! SI assigned impl/ver: 0xef
 623         retl
 624         or      %o0, %o1, %o0                   ! or into psr.impl/ver
 625         SET_SIZE(getpsr)
 626 
 627 #endif  /* lint */
 628 
 629 /*
 630  * Get current processor interrupt level
 631  */
 632 
 633 #if defined(lint)
 634 
 635 u_int
 636 getpil(void)
 637 { return (0); }
 638 
 639 #else   /* lint */
 640 
 641         ENTRY(getpil)
 642         retl
 643         rdpr    %pil, %o0
 644         SET_SIZE(getpil)
 645 
 646 #endif  /* lint */
 647 
 648 #if defined(lint)
 649 
 650 /*ARGSUSED*/
 651 void
 652 setpil(u_int pil)
 653 {}
 654 
 655 #else   /* lint */
 656 
 657         ENTRY(setpil)
 658         retl
 659         wrpr    %g0, %o0, %pil
 660         SET_SIZE(setpil)
 661 
 662 #endif  /* lint */
 663 
 664 
 665 /*
 666  * _insque(entryp, predp)
 667  *
 668  * Insert entryp after predp in a doubly linked list.
 669  */
 670 
 671 #if defined(lint)
 672 
 673 /*ARGSUSED*/
 674 void
 675 _insque(caddr_t entryp, caddr_t predp)
 676 {}
 677 
 678 #else   /* lint */
 679 
 680         ENTRY(_insque)
 681         ldn     [%o1], %g1              ! predp->forw
 682         stn     %o1, [%o0 + CPTRSIZE]   ! entryp->back = predp
 683         stn     %g1, [%o0]              ! entryp->forw = predp->forw
 684         stn     %o0, [%o1]              ! predp->forw = entryp
 685         retl
 686         stn     %o0, [%g1 + CPTRSIZE]   ! predp->forw->back = entryp
 687         SET_SIZE(_insque)
 688 
 689 #endif  /* lint */
 690 
 691 /*
 692  * _remque(entryp)
 693  *
 694  * Remove entryp from a doubly linked list
 695  */
 696 
 697 #if defined(lint)
 698 
 699 /*ARGSUSED*/
 700 void
 701 _remque(caddr_t entryp)
 702 {}
 703 
 704 #else   /* lint */
 705 
 706         ENTRY(_remque)
 707         ldn     [%o0], %g1              ! entryp->forw
 708         ldn     [%o0 + CPTRSIZE], %g2   ! entryp->back
 709         stn     %g1, [%g2]              ! entryp->back->forw = entryp->forw
 710         retl
 711         stn     %g2, [%g1 + CPTRSIZE]   ! entryp->forw->back = entryp->back
 712         SET_SIZE(_remque)
 713 
 714 #endif  /* lint */
 715 
 716 
 717 /*
 718  * strlen(str)
 719  *
 720  * Returns the number of non-NULL bytes in string argument.
 721  *
 722  * XXX -  why is this here, rather than the traditional file?
 723  *        why does it have local labels which don't start with a `.'?
 724  */
 725 
 726 #if defined(lint)
 727 
 728 /*ARGSUSED*/
 729 size_t
 730 strlen(const char *str)
 731 { return (0); }
 732 
 733 #else   /* lint */
 734 
 735         ENTRY(strlen)
 736         mov     %o0, %o1
 737         andcc   %o1, 3, %o3             ! is src word aligned
 738         bz      $nowalgnd
 739         clr     %o0                     ! length of non-zero bytes
 740         cmp     %o3, 2                  ! is src half-word aligned
 741         be      $s2algn
 742         cmp     %o3, 3                  ! src is byte aligned
 743         ldub    [%o1], %o3              ! move 1 or 3 bytes to align it
 744         inc     1, %o1                  ! in either case, safe to do a byte
 745         be      $s3algn
 746         tst     %o3
 747 $s1algn:
 748         bnz,a   $s2algn                 ! now go align dest
 749         inc     1, %o0
 750         b,a     $done
 751 
 752 $s2algn:
 753         lduh    [%o1], %o3              ! know src is half-byte aligned
 754         inc     2, %o1


 799         nop
 800 1:      andcc   %o2, %o5, %g0           ! check if second byte was zero
 801         bnz     1f
 802         srl     %o5, 8, %o5
 803 $done1:
 804         retl
 805         inc     %o0
 806 1:      andcc   %o2, %o5, %g0           ! check if third byte was zero
 807         bnz     1f
 808         andcc   %o2, 0xff, %g0          ! check if last byte is zero
 809 $done2:
 810         retl
 811         inc     2, %o0
 812 1:      bnz,a   3b
 813         inc     4, %o0                  ! count of bytes
 814 $done3:
 815         retl
 816         inc     3, %o0
 817         SET_SIZE(strlen)
 818 
 819 #endif  /* lint */
 820 
 821 /*
 822  * Provide a C callable interface to the membar instruction.
 823  */
 824 
 825 #if defined(lint)
 826 
 827 void
 828 membar_ldld(void)
 829 {}
 830 
 831 void
 832 membar_stld(void)
 833 {}
 834 
 835 void
 836 membar_ldst(void)
 837 {}
 838 
 839 void
 840 membar_stst(void)
 841 {}
 842 
 843 void
 844 membar_ldld_ldst(void)
 845 {}
 846 
 847 void
 848 membar_ldld_stld(void)
 849 {}
 850 
 851 void
 852 membar_ldld_stst(void)
 853 {}
 854 
 855 void
 856 membar_stld_ldld(void)
 857 {}
 858 
 859 void
 860 membar_stld_ldst(void)
 861 {}
 862 
 863 void
 864 membar_stld_stst(void)
 865 {}
 866 
 867 void
 868 membar_ldst_ldld(void)
 869 {}
 870 
 871 void
 872 membar_ldst_stld(void)
 873 {}
 874 
 875 void
 876 membar_ldst_stst(void)
 877 {}
 878 
 879 void
 880 membar_stst_ldld(void)
 881 {}
 882 
 883 void
 884 membar_stst_stld(void)
 885 {}
 886 
 887 void
 888 membar_stst_ldst(void)
 889 {}
 890 
 891 void
 892 membar_lookaside(void)
 893 {}
 894 
 895 void
 896 membar_memissue(void)
 897 {}
 898 
 899 void
 900 membar_sync(void)
 901 {}
 902 
 903 #else
 904         ENTRY(membar_ldld)
 905         retl
 906         membar  #LoadLoad
 907         SET_SIZE(membar_ldld)
 908 
 909         ENTRY(membar_stld)
 910         retl
 911         membar  #StoreLoad
 912         SET_SIZE(membar_stld)
 913 
 914         ENTRY(membar_ldst)
 915         retl
 916         membar  #LoadStore
 917         SET_SIZE(membar_ldst)
 918 
 919         ENTRY(membar_stst)
 920         retl
 921         membar  #StoreStore
 922         SET_SIZE(membar_stst)
 923 


 961         retl
 962         membar  #LoadStore|#StoreStore
 963         SET_SIZE(membar_stst_ldst)
 964         SET_SIZE(membar_ldst_stst)
 965 
 966         ENTRY(membar_lookaside)
 967         retl
 968         membar  #Lookaside
 969         SET_SIZE(membar_lookaside)
 970 
 971         ENTRY(membar_memissue)
 972         retl
 973         membar  #MemIssue
 974         SET_SIZE(membar_memissue)
 975 
 976         ENTRY(membar_sync)
 977         retl
 978         membar  #Sync
 979         SET_SIZE(membar_sync)
 980 
 981 #endif  /* lint */
 982 
 983 
 984 #if defined(lint)
 985 
 986 /*ARGSUSED*/
 987 int
 988 fuword64(const void *addr, uint64_t *dst)
 989 { return (0); }
 990 
 991 /*ARGSUSED*/
 992 int
 993 fuword32(const void *addr, uint32_t *dst)
 994 { return (0); }
 995 
 996 /*ARGSUSED*/
 997 int
 998 fuword16(const void *addr, uint16_t *dst)
 999 { return (0); }
1000 
1001 /*ARGSUSED*/
1002 int
1003 fuword8(const void *addr, uint8_t *dst)
1004 { return (0); }
1005 
1006 /*ARGSUSED*/
1007 int
1008 dtrace_ft_fuword64(const void *addr, uint64_t *dst)
1009 { return (0); }
1010 
1011 /*ARGSUSED*/
1012 int
1013 dtrace_ft_fuword32(const void *addr, uint32_t *dst)
1014 { return (0); }
1015 
1016 #else   /* lint */
1017 
1018 /*
1019  * Since all of the fuword() variants are so similar, we have a macro to spit
1020  * them out.
1021  */
1022 
1023 #define FUWORD(NAME, LOAD, STORE, COPYOP)       \
1024         ENTRY(NAME);                            \
1025         sethi   %hi(1f), %o5;                   \
1026         ldn     [THREAD_REG + T_LOFAULT], %o3;  \
1027         or      %o5, %lo(1f), %o5;              \
1028         membar  #Sync;                          \
1029         stn     %o5, [THREAD_REG + T_LOFAULT];  \
1030         LOAD    [%o0]ASI_USER, %o2;             \
1031         membar  #Sync;                          \
1032         stn     %o3, [THREAD_REG + T_LOFAULT];  \
1033         mov     0, %o0;                         \
1034         retl;                                   \
1035         STORE   %o2, [%o1];                     \
1036 1:                                              \
1037         membar  #Sync;                          \
1038         stn     %o3, [THREAD_REG + T_LOFAULT];  \
1039         ldn     [THREAD_REG + T_COPYOPS], %o2;  \
1040         brz     %o2, 2f;                        \
1041         nop;                                    \
1042         ldn     [%o2 + COPYOP], %g1;            \
1043         jmp     %g1;                            \
1044         nop;                                    \
1045 2:                                              \
1046         retl;                                   \
1047         mov     -1, %o0;                        \
1048         SET_SIZE(NAME)
1049 
1050         FUWORD(fuword64, ldxa, stx, CP_FUWORD64)
1051         FUWORD(fuword32, lda, st, CP_FUWORD32)
1052         FUWORD(fuword16, lduha, sth, CP_FUWORD16)
1053         FUWORD(fuword8, lduba, stb, CP_FUWORD8)
1054 
1055 #endif  /* lint */
1056 
1057 
1058 #if defined(lint)
1059 
1060 /*ARGSUSED*/
1061 int
1062 suword64(void *addr, uint64_t value)
1063 { return (0); }
1064 
1065 /*ARGSUSED*/
1066 int
1067 suword32(void *addr, uint32_t value)
1068 { return (0); }
1069 
1070 /*ARGSUSED*/
1071 int
1072 suword16(void *addr, uint16_t value)
1073 { return (0); }
1074 
1075 /*ARGSUSED*/
1076 int
1077 suword8(void *addr, uint8_t value)
1078 { return (0); }
1079 
1080 #else   /* lint */
1081 
1082 /*
1083  * Since all of the suword() variants are so similar, we have a macro to spit
1084  * them out.
1085  */
1086 
1087 #define SUWORD(NAME, STORE, COPYOP)             \
1088         ENTRY(NAME)                             \
1089         sethi   %hi(1f), %o5;                   \
1090         ldn     [THREAD_REG + T_LOFAULT], %o3;  \
1091         or      %o5, %lo(1f), %o5;              \
1092         membar  #Sync;                          \
1093         stn     %o5, [THREAD_REG + T_LOFAULT];  \
1094         STORE   %o1, [%o0]ASI_USER;             \
1095         membar  #Sync;                          \
1096         stn     %o3, [THREAD_REG + T_LOFAULT];  \
1097         retl;                                   \
1098         clr     %o0;                            \
1099 1:                                              \
1100         membar  #Sync;                          \
1101         stn     %o3, [THREAD_REG + T_LOFAULT];  \
1102         ldn     [THREAD_REG + T_COPYOPS], %o2;  \
1103         brz     %o2, 2f;                        \
1104         nop;                                    \
1105         ldn     [%o2 + COPYOP], %g1;            \
1106         jmp     %g1;                            \
1107         nop;                                    \
1108 2:                                              \
1109         retl;                                   \
1110         mov     -1, %o0;                        \
1111         SET_SIZE(NAME)
1112 
1113         SUWORD(suword64, stxa, CP_SUWORD64)
1114         SUWORD(suword32, sta, CP_SUWORD32)
1115         SUWORD(suword16, stha, CP_SUWORD16)
1116         SUWORD(suword8, stba, CP_SUWORD8)
1117 
1118 #endif  /* lint */
1119 
1120 #if defined(lint)
1121 
1122 /*ARGSUSED*/
1123 void
1124 fuword8_noerr(const void *addr, uint8_t *dst)
1125 {}
1126 
1127 /*ARGSUSED*/
1128 void
1129 fuword16_noerr(const void *addr, uint16_t *dst)
1130 {}
1131 
1132 /*ARGSUSED*/
1133 void
1134 fuword32_noerr(const void *addr, uint32_t *dst)
1135 {}
1136 
1137 /*ARGSUSED*/
1138 void
1139 fuword64_noerr(const void *addr, uint64_t *dst)
1140 {}
1141 
1142 #else   /* lint */
1143 
1144         ENTRY(fuword8_noerr)
1145         lduba   [%o0]ASI_USER, %o0      
1146         retl
1147         stb     %o0, [%o1]
1148         SET_SIZE(fuword8_noerr)
1149 
1150         ENTRY(fuword16_noerr)
1151         lduha   [%o0]ASI_USER, %o0
1152         retl
1153         sth     %o0, [%o1]
1154         SET_SIZE(fuword16_noerr)
1155 
1156         ENTRY(fuword32_noerr)
1157         lda     [%o0]ASI_USER, %o0
1158         retl
1159         st      %o0, [%o1]
1160         SET_SIZE(fuword32_noerr)
1161 
1162         ENTRY(fuword64_noerr)
1163         ldxa    [%o0]ASI_USER, %o0
1164         retl
1165         stx     %o0, [%o1]
1166         SET_SIZE(fuword64_noerr)
1167 
1168 #endif  /* lint */
1169 
1170 #if defined(lint)
1171 
1172 /*ARGSUSED*/
1173 void
1174 suword8_noerr(void *addr, uint8_t value)
1175 {}
1176 
1177 /*ARGSUSED*/
1178 void
1179 suword16_noerr(void *addr, uint16_t value)
1180 {}
1181 
1182 /*ARGSUSED*/
1183 void
1184 suword32_noerr(void *addr, uint32_t value)
1185 {}
1186 
1187 /*ARGSUSED*/
1188 void
1189 suword64_noerr(void *addr, uint64_t value)
1190 {}
1191 
1192 #else   /* lint */
1193 
1194         ENTRY(suword8_noerr)
1195         retl
1196         stba    %o1, [%o0]ASI_USER
1197         SET_SIZE(suword8_noerr)
1198 
1199         ENTRY(suword16_noerr)
1200         retl
1201         stha    %o1, [%o0]ASI_USER
1202         SET_SIZE(suword16_noerr)
1203 
1204         ENTRY(suword32_noerr)
1205         retl
1206         sta     %o1, [%o0]ASI_USER
1207         SET_SIZE(suword32_noerr)
1208 
1209         ENTRY(suword64_noerr)
1210         retl
1211         stxa    %o1, [%o0]ASI_USER
1212         SET_SIZE(suword64_noerr)
1213 
1214 #endif  /* lint */
1215 
1216 #if defined(__lint)
1217 
1218 /*ARGSUSED*/
1219 int
1220 subyte(void *addr, uchar_t value)
1221 { return (0); }
1222 
1223 /*ARGSUSED*/
1224 void
1225 subyte_noerr(void *addr, uchar_t value)
1226 {}
1227 
1228 /*ARGSUSED*/
1229 int
1230 fulword(const void *addr, ulong_t *valuep)
1231 { return (0); }
1232 
1233 /*ARGSUSED*/
1234 void
1235 fulword_noerr(const void *addr, ulong_t *valuep)
1236 {}
1237 
1238 /*ARGSUSED*/
1239 int
1240 sulword(void *addr, ulong_t valuep)
1241 { return (0); }
1242 
1243 /*ARGSUSED*/
1244 void
1245 sulword_noerr(void *addr, ulong_t valuep)
1246 {}
1247 
1248 #else
1249 
1250         .weak   subyte
1251         subyte=suword8
1252         .weak   subyte_noerr
1253         subyte_noerr=suword8_noerr
1254 #ifdef _LP64
1255         .weak   fulword
1256         fulword=fuword64
1257         .weak   fulword_noerr
1258         fulword_noerr=fuword64_noerr
1259         .weak   sulword
1260         sulword=suword64
1261         .weak   sulword_noerr
1262         sulword_noerr=suword64_noerr
1263 #else
1264         .weak   fulword
1265         fulword=fuword32
1266         .weak   fulword_noerr
1267         fulword_noerr=fuword32_noerr
1268         .weak   sulword
1269         sulword=suword32
1270         .weak   sulword_noerr
1271         sulword_noerr=suword32_noerr
1272 #endif  /* LP64 */
1273 
1274 #endif  /* lint */
1275 
1276 /*
1277  * We define rdtick here, but not for sun4v. On sun4v systems, the %tick
1278  * and %stick should not be read directly without considering the tick
1279  * and stick offset kernel variables introduced to support sun4v OS
1280  * suspension.
1281  */
1282 #if !defined (sun4v)
1283 
1284 #if defined (lint)
1285 
1286 hrtime_t
1287 rdtick()
1288 { return (0); }
1289 
1290 #else /* lint */
1291 
1292         ENTRY(rdtick)
1293         retl
1294         rd      %tick, %o0
1295         SET_SIZE(rdtick)
1296 
1297 #endif /* lint */
1298 
1299 #endif /* !sun4v */
1300 
1301 /*
1302  * Set tba to given address, no side effects.
1303  */
1304 #if defined (lint)
1305 
1306 /*ARGSUSED*/
1307 void *
1308 set_tba(void *new_tba)
1309 { return (0); }
1310 
1311 #else   /* lint */
1312 
1313         ENTRY(set_tba)
1314         mov     %o0, %o1
1315         rdpr    %tba, %o0
1316         wrpr    %o1, %tba
1317         retl
1318         nop
1319         SET_SIZE(set_tba)
1320 
1321 #endif  /* lint */
1322 
1323 #if defined (lint)
1324 
1325 /*ARGSUSED*/
1326 void *
1327 get_tba()
1328 { return (0); }
1329 
1330 #else   /* lint */
1331 
1332         ENTRY(get_tba)
1333         retl
1334         rdpr    %tba, %o0
1335         SET_SIZE(get_tba)
1336 
1337 #endif  /* lint */
1338 
1339 #if defined(lint) || defined(__lint)
1340 
1341 /* ARGSUSED */
1342 void
1343 setpstate(u_int pstate)
1344 {}
1345 
1346 #else   /* lint */
1347 
1348         ENTRY_NP(setpstate)
1349         retl
1350         wrpr    %g0, %o0, %pstate
1351         SET_SIZE(setpstate)
1352 
1353 #endif  /* lint */
1354 
1355 #if defined(lint) || defined(__lint)
1356 
1357 u_int
1358 getpstate(void)
1359 { return(0); }
1360 
1361 #else   /* lint */
1362 
1363         ENTRY_NP(getpstate)
1364         retl
1365         rdpr    %pstate, %o0
1366         SET_SIZE(getpstate)
1367 
1368 #endif  /* lint */
1369 
1370 #if defined(lint) || defined(__lint)
1371 
1372 dtrace_icookie_t
1373 dtrace_interrupt_disable(void)
1374 { return (0); }
1375 
1376 #else   /* lint */
1377 
1378         ENTRY_NP(dtrace_interrupt_disable)
1379         rdpr    %pstate, %o0
1380         andn    %o0, PSTATE_IE, %o1
1381         retl
1382         wrpr    %g0, %o1, %pstate
1383         SET_SIZE(dtrace_interrupt_disable)
1384 
1385 #endif  /* lint */
1386 
1387 #if defined(lint) || defined(__lint)
1388 
1389 /*ARGSUSED*/
1390 void
1391 dtrace_interrupt_enable(dtrace_icookie_t cookie)
1392 {}
1393 
1394 #else
1395 
1396         ENTRY_NP(dtrace_interrupt_enable)
1397         retl
1398         wrpr    %g0, %o0, %pstate 
1399         SET_SIZE(dtrace_interrupt_enable)
1400 
1401 #endif /* lint*/
1402 
1403 #if defined(lint)
1404 
1405 void
1406 dtrace_membar_producer(void)
1407 {}
1408 
1409 void
1410 dtrace_membar_consumer(void)
1411 {}
1412 
1413 #else   /* lint */
1414 
1415 #ifdef SF_ERRATA_51
1416         .align 32
1417         ENTRY(dtrace_membar_return)
1418         retl
1419         nop
1420         SET_SIZE(dtrace_membar_return)
1421 #define DTRACE_MEMBAR_RETURN    ba,pt %icc, dtrace_membar_return
1422 #else
1423 #define DTRACE_MEMBAR_RETURN    retl
1424 #endif
1425 
1426         ENTRY(dtrace_membar_producer)
1427         DTRACE_MEMBAR_RETURN
1428         membar  #StoreStore
1429         SET_SIZE(dtrace_membar_producer)
1430 
1431         ENTRY(dtrace_membar_consumer)
1432         DTRACE_MEMBAR_RETURN
1433         membar  #LoadLoad
1434         SET_SIZE(dtrace_membar_consumer)
1435 
1436 #endif  /* lint */
1437 
1438 #if defined(lint) || defined(__lint)
1439 
1440 void
1441 dtrace_flush_windows(void)
1442 {}
1443 
1444 #else
1445 
1446         ENTRY_NP(dtrace_flush_windows)
1447         retl
1448         flushw
1449         SET_SIZE(dtrace_flush_windows)
1450 
1451 #endif  /* lint */
1452 
1453 #if defined(lint)
1454 
1455 /*ARGSUSED*/
1456 int
1457 getpcstack_top(pc_t *pcstack, int limit, uintptr_t *lastfp, pc_t *lastpc)
1458 {
1459         return (0);
1460 }
1461 
1462 #else   /* lint */
1463 
1464         /*
1465          * %g1  pcstack
1466          * %g2  iteration count
1467          * %g3  final %fp
1468          * %g4  final %i7
1469          * %g5  saved %cwp (so we can get back to the original window)
1470          *
1471          * %o0  pcstack / return value (iteration count)
1472          * %o1  limit / saved %cansave
1473          * %o2  lastfp
1474          * %o3  lastpc
1475          * %o4  saved %canrestore
1476          * %o5  saved %pstate (to restore interrupts)
1477          *
1478          * Note:  The frame pointer returned via lastfp is safe to use as
1479          *      long as getpcstack_top() returns either (0) or a value less
1480          *      than (limit).
1481          */
1482         ENTRY_NP(getpcstack_top)
1483 


1514         mov     %i7, %g4                ! aside so we can return them to our
1515                                         ! caller
1516 
1517         wrpr    %g0, %g5, %cwp          ! jump back to the original window
1518         wrpr    %g0, %o1, %cansave      ! and restore the original register
1519         wrpr    %g0, %o4, %canrestore   ! window state.
1520 2:
1521         stn     %g3, [%o2]              ! store the frame pointer and pc
1522         st      %g4, [%o3]              ! so our caller can continue the trace
1523 
1524         retl                            ! return to caller
1525         wrpr    %g0, %o5, %pstate       ! restore interrupts
1526 
1527 3:
1528         flushw                          ! flush register windows, then
1529         ldn     [%fp + STACK_BIAS + 14*CLONGSIZE], %g3  ! load initial fp
1530         ba      2b
1531         ldn     [%fp + STACK_BIAS + 15*CLONGSIZE], %g4  ! and pc
1532         SET_SIZE(getpcstack_top)
1533 
1534 #endif  /* lint */
1535 
1536 #if defined(lint) || defined(__lint)
1537 
1538 /* ARGSUSED */
1539 void
1540 setwstate(u_int wstate)
1541 {}
1542 
1543 #else   /* lint */
1544 
1545         ENTRY_NP(setwstate)
1546         retl
1547         wrpr    %g0, %o0, %wstate
1548         SET_SIZE(setwstate)
1549 
1550 #endif  /* lint */
1551 
1552 
1553 #if defined(lint) || defined(__lint)
1554 
1555 u_int
1556 getwstate(void)
1557 { return(0); }
1558 
1559 #else   /* lint */
1560 
1561         ENTRY_NP(getwstate)
1562         retl
1563         rdpr    %wstate, %o0
1564         SET_SIZE(getwstate)
1565 
1566 #endif  /* lint */
1567 
1568 
1569 /*
1570  * int panic_trigger(int *tp)
1571  *
1572  * A panic trigger is a word which is updated atomically and can only be set
1573  * once.  We atomically store 0xFF into the high byte and load the old value.
1574  * If the byte was 0xFF, the trigger has already been activated and we fail.
1575  * If the previous value was 0 or not 0xFF, we succeed.  This allows a
1576  * partially corrupt trigger to still trigger correctly.  DTrace has its own
1577  * version of this function to allow it to panic correctly from probe context.
1578  */
1579 #if defined(lint)
1580 
1581 /*ARGSUSED*/
1582 int panic_trigger(int *tp) { return (0); }
1583 
1584 /*ARGSUSED*/
1585 int dtrace_panic_trigger(int *tp) { return (0); }
1586 
1587 #else   /* lint */
1588 
1589         ENTRY_NP(panic_trigger)
1590         ldstub  [%o0], %o0              ! store 0xFF, load byte into %o0
1591         cmp     %o0, 0xFF               ! compare %o0 to 0xFF
1592         set     1, %o1                  ! %o1 = 1
1593         be,a    0f                      ! if (%o0 == 0xFF) goto 0f (else annul)
1594         set     0, %o1                  ! delay - %o1 = 0
1595 0:      retl
1596         mov     %o1, %o0                ! return (%o1);
1597         SET_SIZE(panic_trigger)
1598 
1599         ENTRY_NP(dtrace_panic_trigger)
1600         ldstub  [%o0], %o0              ! store 0xFF, load byte into %o0
1601         cmp     %o0, 0xFF               ! compare %o0 to 0xFF
1602         set     1, %o1                  ! %o1 = 1
1603         be,a    0f                      ! if (%o0 == 0xFF) goto 0f (else annul)
1604         set     0, %o1                  ! delay - %o1 = 0
1605 0:      retl
1606         mov     %o1, %o0                ! return (%o1);
1607         SET_SIZE(dtrace_panic_trigger)
1608 
1609 #endif  /* lint */
1610 
1611 /*
1612  * void vpanic(const char *format, va_list alist)
1613  *
1614  * The panic() and cmn_err() functions invoke vpanic() as a common entry point
1615  * into the panic code implemented in panicsys().  vpanic() is responsible
1616  * for passing through the format string and arguments, and constructing a
1617  * regs structure on the stack into which it saves the current register
1618  * values.  If we are not dying due to a fatal trap, these registers will
1619  * then be preserved in panicbuf as the current processor state.  Before
1620  * invoking panicsys(), vpanic() activates the first panic trigger (see
1621  * common/os/panic.c) and switches to the panic_stack if successful.  Note that
1622  * DTrace takes a slightly different panic path if it must panic from probe
1623  * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
1624  * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
1625  * branches back into vpanic().
1626  */
1627 #if defined(lint)
1628 
1629 /*ARGSUSED*/
1630 void vpanic(const char *format, va_list alist) {}
1631 
1632 /*ARGSUSED*/
1633 void dtrace_vpanic(const char *format, va_list alist) {}
1634 
1635 #else   /* lint */
1636 
1637         ENTRY_NP(vpanic)
1638 
1639         save    %sp, -SA(MINFRAME + REGSIZE), %sp       ! save and allocate regs
1640 
1641         !
1642         ! The v9 struct regs has a 64-bit r_tstate field, which we use here
1643         ! to store the %ccr, %asi, %pstate, and %cwp as they would appear
1644         ! in %tstate if a trap occurred.  We leave it up to the debugger to
1645         ! realize what happened and extract the register values.
1646         !
1647         rd      %ccr, %l0                               ! %l0 = %ccr
1648         sllx    %l0, TSTATE_CCR_SHIFT, %l0              ! %l0 <<= CCR_SHIFT
1649         rd      %asi, %l1                               ! %l1 = %asi
1650         sllx    %l1, TSTATE_ASI_SHIFT, %l1              ! %l1 <<= ASI_SHIFT
1651         or      %l0, %l1, %l0                           ! %l0 |= %l1
1652         rdpr    %pstate, %l1                            ! %l1 = %pstate
1653         sllx    %l1, TSTATE_PSTATE_SHIFT, %l1           ! %l1 <<= PSTATE_SHIFT
1654         or      %l0, %l1, %l0                           ! %l0 |= %l1
1655         rdpr    %cwp, %l1                               ! %l1 = %cwp
1656         sllx    %l1, TSTATE_CWP_SHIFT, %l1              ! %l1 <<= CWP_SHIFT


1744         sllx    %l1, TSTATE_CWP_SHIFT, %l1              ! %l1 <<= CWP_SHIFT
1745         or      %l0, %l1, %l0                           ! %l0 |= %l1
1746 
1747         set     dtrace_vpanic, %l1                      ! %l1 = %pc (vpanic)
1748         add     %l1, 4, %l2                             ! %l2 = %npc (vpanic+4)
1749         rd      %y, %l3                                 ! %l3 = %y
1750         !
1751         ! Flush register windows before panic_trigger() in order to avoid a
1752         ! problem that a dump hangs if flush_windows() causes another panic.
1753         !
1754         call    dtrace_flush_windows
1755         nop
1756 
1757         sethi   %hi(panic_quiesce), %o0
1758         call    dtrace_panic_trigger
1759         or      %o0, %lo(panic_quiesce), %o0            ! if (!panic_trigger(
1760 
1761         ba,a    vpanic_common
1762         SET_SIZE(dtrace_vpanic)
1763         
1764 #endif  /* lint */
1765 
1766 #if defined(lint)
1767 
1768 /*ARGSUSED*/
1769 
1770 uint_t
1771 get_subcc_ccr( uint64_t addrl, uint64_t addrr)
1772 { return (0); }
1773 
1774 #else   /* lint */
1775 
1776         ENTRY(get_subcc_ccr)
1777         wr      %g0, %ccr       ! clear condition codes
1778         subcc   %o0, %o1, %g0
1779         retl
1780         rd      %ccr, %o0       ! return condition codes
1781         SET_SIZE(get_subcc_ccr)
1782 
1783 #endif  /* lint */
1784 
1785 #if defined(lint) || defined(__lint)
1786 
1787 ftrace_icookie_t
1788 ftrace_interrupt_disable(void)
1789 { return (0); }
1790 
1791 #else   /* lint */
1792 
1793         ENTRY_NP(ftrace_interrupt_disable)
1794         rdpr    %pstate, %o0
1795         andn    %o0, PSTATE_IE, %o1
1796         retl
1797         wrpr    %g0, %o1, %pstate
1798         SET_SIZE(ftrace_interrupt_disable)
1799 
1800 #endif  /* lint */
1801 
1802 #if defined(lint) || defined(__lint)
1803 
1804 /*ARGSUSED*/
1805 void
1806 ftrace_interrupt_enable(ftrace_icookie_t cookie)
1807 {}
1808 
1809 #else
1810 
1811         ENTRY_NP(ftrace_interrupt_enable)
1812         retl
1813         wrpr    %g0, %o0, %pstate 
1814         SET_SIZE(ftrace_interrupt_enable)
1815 
1816 #endif /* lint*/


  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * General assembly language routines.
  28  * It is the intent of this file to contain routines that are
  29  * independent of the specific kernel architecture, and those that are
  30  * common across kernel architectures.
  31  * As architectures diverge, and implementations of specific
  32  * architecture-dependent routines change, the routines should be moved
  33  * from this file into the respective ../`arch -k`/subr.s file.
  34  * Or, if you want to be really nice, move them to a file whose
  35  * name has something to do with the routine you are moving.
  36  */
  37 











  38 #include <sys/asm_linkage.h>
  39 #include <sys/privregs.h>
  40 #include <sys/machparam.h>        /* To get SYSBASE and PAGESIZE */
  41 #include <sys/machthread.h>
  42 #include <sys/clock.h>
  43 #include <sys/psr_compat.h>
  44 #include <sys/isa_defs.h>
  45 #include <sys/dditypes.h>
  46 #include <sys/panic.h>
  47 #include <sys/machlock.h>
  48 #include <sys/ontrap.h>
  49 

  50 #include "assym.h"
  51 
  52         .seg    ".text"
  53         .align  4
  54 
  55 /*
  56  * Macro to raise processor priority level.
  57  * Avoid dropping processor priority if already at high level.
  58  * Also avoid going below CPU->cpu_base_spl, which could've just been set by
  59  * a higher-level interrupt thread that just blocked.
  60  *
  61  * level can be %o0 (not other regs used here) or a constant.
  62  */
  63 #define RAISE(level) \
  64         rdpr    %pil, %o1;              /* get current PIL */           \
  65         cmp     %o1, level;             /* is PIL high enough? */       \
  66         bge     1f;                     /* yes, return */               \
  67         nop;                                                            \
  68         wrpr    %g0, PIL_MAX, %pil;     /* freeze CPU_BASE_SPL */       \
  69         ldn     [THREAD_REG + T_CPU], %o2;                              \


 105         ld      [%o2 + CPU_BASE_SPL], %o2;                              \
 106         cmp     %o2, level;             /* compare new to base */       \
 107         movl    %xcc, level, %o2;       /* use new if base lower */     \
 108         wrpr    %g0, %o2, %pil;                                         \
 109         retl;                                                           \
 110         mov     %o1, %o0                /* return old PIL */
 111 
 112 /*
 113  * Macro to set the priority to a specified level at or above LOCK_LEVEL.
 114  * Doesn't require comparison to CPU->cpu_base_spl.
 115  *
 116  * newpil can be %o0 (not other regs used here) or a constant with
 117  * the new PIL in the PSR_PIL field of the level arg.
 118  */
 119 #define SETPRI_HIGH(level) \
 120         rdpr    %pil, %o1;              /* get current PIL */           \
 121         wrpr    %g0, level, %pil;                                       \
 122         retl;                                                           \
 123         mov     %o1, %o0                /* return old PIL */
 124 


 125         /*
 126          * Berkley 4.3 introduced symbolically named interrupt levels
 127          * as a way deal with priority in a machine independent fashion.
 128          * Numbered priorities are machine specific, and should be
 129          * discouraged where possible.
 130          *
 131          * Note, for the machine specific priorities there are
 132          * examples listed for devices that use a particular priority.
 133          * It should not be construed that all devices of that
 134          * type should be at that priority.  It is currently were
 135          * the current devices fit into the priority scheme based
 136          * upon time criticalness.
 137          *
 138          * The underlying assumption of these assignments is that
 139          * SPARC9 IPL 10 is the highest level from which a device
 140          * routine can call wakeup.  Devices that interrupt from higher
 141          * levels are restricted in what they can do.  If they need
 142          * kernels services they should schedule a routine at a lower
 143          * level (via software interrupt) to do the required
 144          * processing.


 147          *      Level   Usage
 148          *      15      Asynchronous memory exceptions
 149          *      14      Profiling clock (and PROM uart polling clock)
 150          *      13      Audio device
 151          *      12      Serial ports
 152          *      11      Floppy controller
 153          *
 154          * The serial ports request lower level processing on level 6.
 155          * Audio and floppy request lower level processing on level 4.
 156          *
 157          * Also, almost all splN routines (where N is a number or a
 158          * mnemonic) will do a RAISE(), on the assumption that they are
 159          * never used to lower our priority.
 160          * The exceptions are:
 161          *      spl8()          Because you can't be above 15 to begin with!
 162          *      splzs()         Because this is used at boot time to lower our
 163          *                      priority, to allow the PROM to poll the uart.
 164          *      spl0()          Used to lower priority to 0.
 165          */
 166 












 167         /* locks out all interrupts, including memory errors */
 168         ENTRY(spl8)
 169         SETPRI_HIGH(15)
 170         SET_SIZE(spl8)
 171 
 172         /* just below the level that profiling runs */
 173         ENTRY(spl7)
 174         RAISE_HIGH(13)
 175         SET_SIZE(spl7)
 176 
 177         /* sun specific - highest priority onboard serial i/o zs ports */
 178         ENTRY(splzs)
 179         SETPRI_HIGH(12) /* Can't be a RAISE, as it's used to lower us */
 180         SET_SIZE(splzs)
 181 
 182         /*
 183          * should lock out clocks and all interrupts,
 184          * as you can see, there are exceptions
 185          */
 186         ENTRY(splhi)
 187         ALTENTRY(splhigh)
 188         ALTENTRY(spl6)
 189         ALTENTRY(i_ddi_splhigh)
 190         RAISE_HIGH(DISP_LEVEL)
 191         SET_SIZE(i_ddi_splhigh)
 192         SET_SIZE(spl6)
 193         SET_SIZE(splhigh)
 194         SET_SIZE(splhi)
 195 
 196         /* allow all interrupts */
 197         ENTRY(spl0)
 198         SETPRI(0)
 199         SET_SIZE(spl0)
 200 


 201 /*
 202  * splx - set PIL back to that indicated by the old %pil passed as an argument,
 203  * or to the CPU's base priority, whichever is higher.
 204  */
 205 









 206         ENTRY(splx)
 207         ALTENTRY(i_ddi_splx)
 208         SETPRI(%o0)             /* set PIL */
 209         SET_SIZE(i_ddi_splx)
 210         SET_SIZE(splx)
 211 


 212 /*
 213  * splr()
 214  *
 215  * splr is like splx but will only raise the priority and never drop it
 216  * Be careful not to set priority lower than CPU->cpu_base_pri,
 217  * even though it seems we're raising the priority, it could be set higher
 218  * at any time by an interrupt routine, so we must block interrupts and
 219  * look at CPU->cpu_base_pri.
 220  */
 221 








 222         ENTRY(splr)
 223         RAISE(%o0)
 224         SET_SIZE(splr)
 225 


 226 /*
 227  * on_fault()
 228  * Catch lofault faults. Like setjmp except it returns one
 229  * if code following causes uncorrectable fault. Turned off
 230  * by calling no_fault().
 231  */
 232 









 233         ENTRY(on_fault)
 234         membar  #Sync                   ! sync error barrier (see copy.s)
 235         stn     %o0, [THREAD_REG + T_ONFAULT]
 236         set     catch_fault, %o1
 237         b       setjmp                  ! let setjmp do the rest
 238         stn     %o1, [THREAD_REG + T_LOFAULT]   ! put catch_fault in t_lofault
 239 
 240 catch_fault:
 241         save    %sp, -SA(WINDOWSIZE), %sp ! goto next window so that we can rtn
 242         ldn     [THREAD_REG + T_ONFAULT], %o0
 243         membar  #Sync                           ! sync error barrier
 244         stn     %g0, [THREAD_REG + T_ONFAULT]   ! turn off onfault
 245         b       longjmp                 ! let longjmp do the rest
 246         stn     %g0, [THREAD_REG + T_LOFAULT]   ! turn off lofault
 247         SET_SIZE(on_fault)
 248 


 249 /*
 250  * no_fault()
 251  * turn off fault catching.
 252  */
 253 








 254         ENTRY(no_fault)
 255         membar  #Sync                           ! sync error barrier
 256         stn     %g0, [THREAD_REG + T_ONFAULT]
 257         retl
 258         stn     %g0, [THREAD_REG + T_LOFAULT]   ! turn off lofault
 259         SET_SIZE(no_fault)
 260 


 261 /*
 262  * Default trampoline code for on_trap() (see <sys/ontrap.h>).  On sparcv9,
 263  * the trap code will complete trap processing but reset the return %pc to
 264  * ot_trampoline, which will by default be set to the address of this code.
 265  * We longjmp(&curthread->t_ontrap->ot_jmpbuf) to return back to on_trap().
 266  */

 267 






 268         ENTRY(on_trap_trampoline)
 269         ldn     [THREAD_REG + T_ONTRAP], %o0    
 270         b       longjmp                 
 271         add     %o0, OT_JMPBUF, %o0
 272         SET_SIZE(on_trap_trampoline)
 273 


 274 /*
 275  * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
 276  * more information about the on_trap() mechanism.  If the on_trap_data is the
 277  * same as the topmost stack element, we just modify that element.
 278  * On UltraSPARC, we need to issue a membar #Sync before modifying t_ontrap.
 279  * The issue barrier is defined to force all deferred errors to complete before
 280  * we go any further.  We want these errors to be processed before we modify
 281  * our current error protection.
 282  */

 283 







 284         ENTRY(on_trap)
 285         membar  #Sync                           ! force error barrier
 286         sth     %o1, [%o0 + OT_PROT]            ! ot_prot = prot
 287         sth     %g0, [%o0 + OT_TRAP]            ! ot_trap = 0
 288         set     on_trap_trampoline, %o2         ! %o2 = &on_trap_trampoline
 289         stn     %o2, [%o0 + OT_TRAMPOLINE]      ! ot_trampoline = %o2
 290         stn     %g0, [%o0 + OT_HANDLE]          ! ot_handle = NULL
 291         ldn     [THREAD_REG + T_ONTRAP], %o2    ! %o2 = curthread->t_ontrap
 292         cmp     %o0, %o2                        ! if (otp == %o2)
 293         be      0f                              !    don't modify t_ontrap
 294         stn     %g0, [%o0 + OT_PAD1]            ! delay - ot_pad1 = NULL
 295 
 296         stn     %o2, [%o0 + OT_PREV]            ! ot_prev = t_ontrap
 297         membar  #Sync                           ! force error barrier
 298         stn     %o0, [THREAD_REG + T_ONTRAP]    ! t_ontrap = otp
 299 
 300 0:      b       setjmp                          ! let setjmp do the rest
 301         add     %o0, OT_JMPBUF, %o0             ! %o0 = &ot_jmpbuf
 302         SET_SIZE(on_trap)
 303 


 304 /*
 305  * Setjmp and longjmp implement non-local gotos using state vectors
 306  * type label_t.
 307  */
 308 









 309         ENTRY(setjmp)
 310         stn     %o7, [%o0 + L_PC]       ! save return address
 311         stn     %sp, [%o0 + L_SP]       ! save stack ptr
 312         retl
 313         clr     %o0                     ! return 0
 314         SET_SIZE(setjmp)
 315 

 316 










 317         ENTRY(longjmp)
 318         !
 319         ! The following save is required so that an extra register
 320         ! window is flushed.  Flushw flushes nwindows-2
 321         ! register windows.  If setjmp and longjmp are called from
 322         ! within the same window, that window will not get pushed
 323         ! out onto the stack without the extra save below.  Tail call
 324         ! optimization can lead to callers of longjmp executing
 325         ! from a window that could be the same as the setjmp,
 326         ! thus the need for the following save.
 327         !
 328         save    %sp, -SA(MINFRAME), %sp
 329         flushw                          ! flush all but this window
 330         ldn     [%i0 + L_PC], %i7       ! restore return addr
 331         ldn     [%i0 + L_SP], %fp       ! restore sp for dest on foreign stack
 332         ret                             ! return 1
 333         restore %g0, 1, %o0             ! takes underflow, switches stacks
 334         SET_SIZE(longjmp)
 335 


 336 /*
 337  * movtuc(length, from, to, table)
 338  *
 339  * VAX movtuc instruction (sort of).
 340  */
 341 









 342         ENTRY(movtuc)
 343         tst     %o0
 344         ble,pn  %ncc, 2f                ! check length
 345         clr     %o4
 346 
 347         ldub    [%o1 + %o4], %g1        ! get next byte in string
 348 0:
 349         ldub    [%o3 + %g1], %g1        ! get corresponding table entry
 350         tst     %g1                     ! escape char?
 351         bnz     1f
 352         stb     %g1, [%o2 + %o4]        ! delay slot, store it
 353 
 354         retl                            ! return (bytes moved)
 355         mov     %o4, %o0
 356 1:
 357         inc     %o4                     ! increment index
 358         cmp     %o4, %o0                ! index < length ?
 359         bl,a,pt %ncc, 0b
 360         ldub    [%o1 + %o4], %g1        ! delay slot, get next byte in string
 361 2:
 362         retl                            ! return (bytes moved)
 363         mov     %o4, %o0
 364         SET_SIZE(movtuc)
 365 


 366 /*
 367  * scanc(length, string, table, mask)
 368  *
 369  * VAX scanc instruction.
 370  */
 371 









 372         ENTRY(scanc)
 373         tst     %o0     
 374         ble,pn  %ncc, 1f                ! check length
 375         clr     %o4
 376 0:
 377         ldub    [%o1 + %o4], %g1        ! get next byte in string
 378         cmp     %o4, %o0                ! interlock slot, index < length ?
 379         ldub    [%o2 + %g1], %g1        ! get corresponding table entry
 380         bge,pn  %ncc, 1f                ! interlock slot
 381         btst    %o3, %g1                ! apply the mask
 382         bz,a    0b
 383         inc     %o4                     ! delay slot, increment index
 384 1:
 385         retl                            ! return(length - index)
 386         sub     %o0, %o4, %o0
 387         SET_SIZE(scanc)
 388 


 389 /*
 390  * if a() calls b() calls caller(),
 391  * caller() returns return address in a().
 392  */
 393 








 394         ENTRY(caller)
 395         retl
 396         mov     %i7, %o0
 397         SET_SIZE(caller)
 398 


 399 /*
 400  * if a() calls callee(), callee() returns the
 401  * return address in a();
 402  */
 403 








 404         ENTRY(callee)
 405         retl
 406         mov     %o7, %o0
 407         SET_SIZE(callee)
 408 


 409 /*
 410  * return the current frame pointer
 411  */
 412 








 413         ENTRY(getfp)
 414         retl
 415         mov     %fp, %o0
 416         SET_SIZE(getfp)
 417 


 418 /*
 419  * Get vector base register
 420  */
 421 








 422         ENTRY(gettbr)
 423         retl
 424         mov     %tbr, %o0
 425         SET_SIZE(gettbr)
 426 


 427 /*
 428  * Get processor state register, V9 faked to look like V8.
 429  * Note: does not provide ccr.xcc and provides FPRS.FEF instead of
 430  * PSTATE.PEF, because PSTATE.PEF is always on in order to allow the
 431  * libc_psr memcpy routines to run without hitting the fp_disabled trap.
 432  */
 433 








 434         ENTRY(getpsr)
 435         rd      %ccr, %o1                       ! get ccr
 436         sll     %o1, PSR_ICC_SHIFT, %o0         ! move icc to V8 psr.icc
 437         rd      %fprs, %o1                      ! get fprs
 438         and     %o1, FPRS_FEF, %o1              ! mask out dirty upper/lower
 439         sllx    %o1, PSR_FPRS_FEF_SHIFT, %o1    ! shift fef to V8 psr.ef
 440         or      %o0, %o1, %o0                   ! or into psr.ef
 441         set     V9_PSR_IMPLVER, %o1             ! SI assigned impl/ver: 0xef
 442         retl
 443         or      %o0, %o1, %o0                   ! or into psr.impl/ver
 444         SET_SIZE(getpsr)
 445 


 446 /*
 447  * Get current processor interrupt level
 448  */
 449 








 450         ENTRY(getpil)
 451         retl
 452         rdpr    %pil, %o0
 453         SET_SIZE(getpil)
 454 











 455         ENTRY(setpil)
 456         retl
 457         wrpr    %g0, %o0, %pil
 458         SET_SIZE(setpil)
 459 

 460 

 461 /*
 462  * _insque(entryp, predp)
 463  *
 464  * Insert entryp after predp in a doubly linked list.
 465  */
 466 









 467         ENTRY(_insque)
 468         ldn     [%o1], %g1              ! predp->forw
 469         stn     %o1, [%o0 + CPTRSIZE]   ! entryp->back = predp
 470         stn     %g1, [%o0]              ! entryp->forw = predp->forw
 471         stn     %o0, [%o1]              ! predp->forw = entryp
 472         retl
 473         stn     %o0, [%g1 + CPTRSIZE]   ! predp->forw->back = entryp
 474         SET_SIZE(_insque)
 475 


 476 /*
 477  * _remque(entryp)
 478  *
 479  * Remove entryp from a doubly linked list
 480  */
 481 









 482         ENTRY(_remque)
 483         ldn     [%o0], %g1              ! entryp->forw
 484         ldn     [%o0 + CPTRSIZE], %g2   ! entryp->back
 485         stn     %g1, [%g2]              ! entryp->back->forw = entryp->forw
 486         retl
 487         stn     %g2, [%g1 + CPTRSIZE]   ! entryp->forw->back = entryp->back
 488         SET_SIZE(_remque)
 489 

 490 

 491 /*
 492  * strlen(str)
 493  *
 494  * Returns the number of non-NULL bytes in string argument.
 495  *
 496  * XXX -  why is this here, rather than the traditional file?
 497  *        why does it have local labels which don't start with a `.'?
 498  */
 499 









 500         ENTRY(strlen)
 501         mov     %o0, %o1
 502         andcc   %o1, 3, %o3             ! is src word aligned
 503         bz      $nowalgnd
 504         clr     %o0                     ! length of non-zero bytes
 505         cmp     %o3, 2                  ! is src half-word aligned
 506         be      $s2algn
 507         cmp     %o3, 3                  ! src is byte aligned
 508         ldub    [%o1], %o3              ! move 1 or 3 bytes to align it
 509         inc     1, %o1                  ! in either case, safe to do a byte
 510         be      $s3algn
 511         tst     %o3
 512 $s1algn:
 513         bnz,a   $s2algn                 ! now go align dest
 514         inc     1, %o0
 515         b,a     $done
 516 
 517 $s2algn:
 518         lduh    [%o1], %o3              ! know src is half-byte aligned
 519         inc     2, %o1


 564         nop
 565 1:      andcc   %o2, %o5, %g0           ! check if second byte was zero
 566         bnz     1f
 567         srl     %o5, 8, %o5
 568 $done1:
 569         retl
 570         inc     %o0
 571 1:      andcc   %o2, %o5, %g0           ! check if third byte was zero
 572         bnz     1f
 573         andcc   %o2, 0xff, %g0          ! check if last byte is zero
 574 $done2:
 575         retl
 576         inc     2, %o0
 577 1:      bnz,a   3b
 578         inc     4, %o0                  ! count of bytes
 579 $done3:
 580         retl
 581         inc     3, %o0
 582         SET_SIZE(strlen)
 583 


 584 /*
 585  * Provide a C callable interface to the membar instruction.
 586  */
 587 















































































 588         ENTRY(membar_ldld)
 589         retl
 590         membar  #LoadLoad
 591         SET_SIZE(membar_ldld)
 592 
 593         ENTRY(membar_stld)
 594         retl
 595         membar  #StoreLoad
 596         SET_SIZE(membar_stld)
 597 
 598         ENTRY(membar_ldst)
 599         retl
 600         membar  #LoadStore
 601         SET_SIZE(membar_ldst)
 602 
 603         ENTRY(membar_stst)
 604         retl
 605         membar  #StoreStore
 606         SET_SIZE(membar_stst)
 607 


 645         retl
 646         membar  #LoadStore|#StoreStore
 647         SET_SIZE(membar_stst_ldst)
 648         SET_SIZE(membar_ldst_stst)
 649 
 650         ENTRY(membar_lookaside)
 651         retl
 652         membar  #Lookaside
 653         SET_SIZE(membar_lookaside)
 654 
 655         ENTRY(membar_memissue)
 656         retl
 657         membar  #MemIssue
 658         SET_SIZE(membar_memissue)
 659 
 660         ENTRY(membar_sync)
 661         retl
 662         membar  #Sync
 663         SET_SIZE(membar_sync)
 664 

 665 



































 666 /*
 667  * Since all of the fuword() variants are so similar, we have a macro to spit
 668  * them out.
 669  */
 670 
 671 #define FUWORD(NAME, LOAD, STORE, COPYOP)       \
 672         ENTRY(NAME);                            \
 673         sethi   %hi(1f), %o5;                   \
 674         ldn     [THREAD_REG + T_LOFAULT], %o3;  \
 675         or      %o5, %lo(1f), %o5;              \
 676         membar  #Sync;                          \
 677         stn     %o5, [THREAD_REG + T_LOFAULT];  \
 678         LOAD    [%o0]ASI_USER, %o2;             \
 679         membar  #Sync;                          \
 680         stn     %o3, [THREAD_REG + T_LOFAULT];  \
 681         mov     0, %o0;                         \
 682         retl;                                   \
 683         STORE   %o2, [%o1];                     \
 684 1:                                              \
 685         membar  #Sync;                          \
 686         stn     %o3, [THREAD_REG + T_LOFAULT];  \
 687         ldn     [THREAD_REG + T_COPYOPS], %o2;  \
 688         brz     %o2, 2f;                        \
 689         nop;                                    \
 690         ldn     [%o2 + COPYOP], %g1;            \
 691         jmp     %g1;                            \
 692         nop;                                    \
 693 2:                                              \
 694         retl;                                   \
 695         mov     -1, %o0;                        \
 696         SET_SIZE(NAME)
 697 
 698         FUWORD(fuword64, ldxa, stx, CP_FUWORD64)
 699         FUWORD(fuword32, lda, st, CP_FUWORD32)
 700         FUWORD(fuword16, lduha, sth, CP_FUWORD16)
 701         FUWORD(fuword8, lduba, stb, CP_FUWORD8)
 702 

 703 

























 704 /*
 705  * Since all of the suword() variants are so similar, we have a macro to spit
 706  * them out.
 707  */
 708 
 709 #define SUWORD(NAME, STORE, COPYOP)             \
 710         ENTRY(NAME)                             \
 711         sethi   %hi(1f), %o5;                   \
 712         ldn     [THREAD_REG + T_LOFAULT], %o3;  \
 713         or      %o5, %lo(1f), %o5;              \
 714         membar  #Sync;                          \
 715         stn     %o5, [THREAD_REG + T_LOFAULT];  \
 716         STORE   %o1, [%o0]ASI_USER;             \
 717         membar  #Sync;                          \
 718         stn     %o3, [THREAD_REG + T_LOFAULT];  \
 719         retl;                                   \
 720         clr     %o0;                            \
 721 1:                                              \
 722         membar  #Sync;                          \
 723         stn     %o3, [THREAD_REG + T_LOFAULT];  \
 724         ldn     [THREAD_REG + T_COPYOPS], %o2;  \
 725         brz     %o2, 2f;                        \
 726         nop;                                    \
 727         ldn     [%o2 + COPYOP], %g1;            \
 728         jmp     %g1;                            \
 729         nop;                                    \
 730 2:                                              \
 731         retl;                                   \
 732         mov     -1, %o0;                        \
 733         SET_SIZE(NAME)
 734 
 735         SUWORD(suword64, stxa, CP_SUWORD64)
 736         SUWORD(suword32, sta, CP_SUWORD32)
 737         SUWORD(suword16, stha, CP_SUWORD16)
 738         SUWORD(suword8, stba, CP_SUWORD8)
 739 


























 740         ENTRY(fuword8_noerr)
 741         lduba   [%o0]ASI_USER, %o0      
 742         retl
 743         stb     %o0, [%o1]
 744         SET_SIZE(fuword8_noerr)
 745 
 746         ENTRY(fuword16_noerr)
 747         lduha   [%o0]ASI_USER, %o0
 748         retl
 749         sth     %o0, [%o1]
 750         SET_SIZE(fuword16_noerr)
 751 
 752         ENTRY(fuword32_noerr)
 753         lda     [%o0]ASI_USER, %o0
 754         retl
 755         st      %o0, [%o1]
 756         SET_SIZE(fuword32_noerr)
 757 
 758         ENTRY(fuword64_noerr)
 759         ldxa    [%o0]ASI_USER, %o0
 760         retl
 761         stx     %o0, [%o1]
 762         SET_SIZE(fuword64_noerr)
 763 


























 764         ENTRY(suword8_noerr)
 765         retl
 766         stba    %o1, [%o0]ASI_USER
 767         SET_SIZE(suword8_noerr)
 768 
 769         ENTRY(suword16_noerr)
 770         retl
 771         stha    %o1, [%o0]ASI_USER
 772         SET_SIZE(suword16_noerr)
 773 
 774         ENTRY(suword32_noerr)
 775         retl
 776         sta     %o1, [%o0]ASI_USER
 777         SET_SIZE(suword32_noerr)
 778 
 779         ENTRY(suword64_noerr)
 780         retl
 781         stxa    %o1, [%o0]ASI_USER
 782         SET_SIZE(suword64_noerr)
 783 




































 784         .weak   subyte
 785         subyte=suword8
 786         .weak   subyte_noerr
 787         subyte_noerr=suword8_noerr
 788 #ifdef _LP64
 789         .weak   fulword
 790         fulword=fuword64
 791         .weak   fulword_noerr
 792         fulword_noerr=fuword64_noerr
 793         .weak   sulword
 794         sulword=suword64
 795         .weak   sulword_noerr
 796         sulword_noerr=suword64_noerr
 797 #else
 798         .weak   fulword
 799         fulword=fuword32
 800         .weak   fulword_noerr
 801         fulword_noerr=fuword32_noerr
 802         .weak   sulword
 803         sulword=suword32
 804         .weak   sulword_noerr
 805         sulword_noerr=suword32_noerr
 806 #endif  /* LP64 */
 807 


 808 /*
 809  * We define rdtick here, but not for sun4v. On sun4v systems, the %tick
 810  * and %stick should not be read directly without considering the tick
 811  * and stick offset kernel variables introduced to support sun4v OS
 812  * suspension.
 813  */
 814 #if !defined (sun4v)
 815 








 816         ENTRY(rdtick)
 817         retl
 818         rd      %tick, %o0
 819         SET_SIZE(rdtick)
 820 


 821 #endif /* !sun4v */
 822 
 823 /*
 824  * Set tba to given address, no side effects.
 825  */

 826 







 827         ENTRY(set_tba)
 828         mov     %o0, %o1
 829         rdpr    %tba, %o0
 830         wrpr    %o1, %tba
 831         retl
 832         nop
 833         SET_SIZE(set_tba)
 834 











 835         ENTRY(get_tba)
 836         retl
 837         rdpr    %tba, %o0
 838         SET_SIZE(get_tba)
 839 











 840         ENTRY_NP(setpstate)
 841         retl
 842         wrpr    %g0, %o0, %pstate
 843         SET_SIZE(setpstate)
 844 










 845         ENTRY_NP(getpstate)
 846         retl
 847         rdpr    %pstate, %o0
 848         SET_SIZE(getpstate)
 849 










 850         ENTRY_NP(dtrace_interrupt_disable)
 851         rdpr    %pstate, %o0
 852         andn    %o0, PSTATE_IE, %o1
 853         retl
 854         wrpr    %g0, %o1, %pstate
 855         SET_SIZE(dtrace_interrupt_disable)
 856 











 857         ENTRY_NP(dtrace_interrupt_enable)
 858         retl
 859         wrpr    %g0, %o0, %pstate 
 860         SET_SIZE(dtrace_interrupt_enable)
 861 














 862 #ifdef SF_ERRATA_51
 863         .align 32
 864         ENTRY(dtrace_membar_return)
 865         retl
 866         nop
 867         SET_SIZE(dtrace_membar_return)
 868 #define DTRACE_MEMBAR_RETURN    ba,pt %icc, dtrace_membar_return
 869 #else
 870 #define DTRACE_MEMBAR_RETURN    retl
 871 #endif
 872 
 873         ENTRY(dtrace_membar_producer)
 874         DTRACE_MEMBAR_RETURN
 875         membar  #StoreStore
 876         SET_SIZE(dtrace_membar_producer)
 877 
 878         ENTRY(dtrace_membar_consumer)
 879         DTRACE_MEMBAR_RETURN
 880         membar  #LoadLoad
 881         SET_SIZE(dtrace_membar_consumer)
 882 










 883         ENTRY_NP(dtrace_flush_windows)
 884         retl
 885         flushw
 886         SET_SIZE(dtrace_flush_windows)
 887 













 888         /*
 889          * %g1  pcstack
 890          * %g2  iteration count
 891          * %g3  final %fp
 892          * %g4  final %i7
 893          * %g5  saved %cwp (so we can get back to the original window)
 894          *
 895          * %o0  pcstack / return value (iteration count)
 896          * %o1  limit / saved %cansave
 897          * %o2  lastfp
 898          * %o3  lastpc
 899          * %o4  saved %canrestore
 900          * %o5  saved %pstate (to restore interrupts)
 901          *
 902          * Note:  The frame pointer returned via lastfp is safe to use as
 903          *      long as getpcstack_top() returns either (0) or a value less
 904          *      than (limit).
 905          */
 906         ENTRY_NP(getpcstack_top)
 907 


 938         mov     %i7, %g4                ! aside so we can return them to our
 939                                         ! caller
 940 
 941         wrpr    %g0, %g5, %cwp          ! jump back to the original window
 942         wrpr    %g0, %o1, %cansave      ! and restore the original register
 943         wrpr    %g0, %o4, %canrestore   ! window state.
 944 2:
 945         stn     %g3, [%o2]              ! store the frame pointer and pc
 946         st      %g4, [%o3]              ! so our caller can continue the trace
 947 
 948         retl                            ! return to caller
 949         wrpr    %g0, %o5, %pstate       ! restore interrupts
 950 
 951 3:
 952         flushw                          ! flush register windows, then
 953         ldn     [%fp + STACK_BIAS + 14*CLONGSIZE], %g3  ! load initial fp
 954         ba      2b
 955         ldn     [%fp + STACK_BIAS + 15*CLONGSIZE], %g4  ! and pc
 956         SET_SIZE(getpcstack_top)
 957 











 958         ENTRY_NP(setwstate)
 959         retl
 960         wrpr    %g0, %o0, %wstate
 961         SET_SIZE(setwstate)
 962 

 963 









 964         ENTRY_NP(getwstate)
 965         retl
 966         rdpr    %wstate, %o0
 967         SET_SIZE(getwstate)
 968 

 969 

 970 /*
 971  * int panic_trigger(int *tp)
 972  *
 973  * A panic trigger is a word which is updated atomically and can only be set
 974  * once.  We atomically store 0xFF into the high byte and load the old value.
 975  * If the byte was 0xFF, the trigger has already been activated and we fail.
 976  * If the previous value was 0 or not 0xFF, we succeed.  This allows a
 977  * partially corrupt trigger to still trigger correctly.  DTrace has its own
 978  * version of this function to allow it to panic correctly from probe context.
 979  */

 980 








 981         ENTRY_NP(panic_trigger)
 982         ldstub  [%o0], %o0              ! store 0xFF, load byte into %o0
 983         cmp     %o0, 0xFF               ! compare %o0 to 0xFF
 984         set     1, %o1                  ! %o1 = 1
 985         be,a    0f                      ! if (%o0 == 0xFF) goto 0f (else annul)
 986         set     0, %o1                  ! delay - %o1 = 0
 987 0:      retl
 988         mov     %o1, %o0                ! return (%o1);
 989         SET_SIZE(panic_trigger)
 990 
 991         ENTRY_NP(dtrace_panic_trigger)
 992         ldstub  [%o0], %o0              ! store 0xFF, load byte into %o0
 993         cmp     %o0, 0xFF               ! compare %o0 to 0xFF
 994         set     1, %o1                  ! %o1 = 1
 995         be,a    0f                      ! if (%o0 == 0xFF) goto 0f (else annul)
 996         set     0, %o1                  ! delay - %o1 = 0
 997 0:      retl
 998         mov     %o1, %o0                ! return (%o1);
 999         SET_SIZE(dtrace_panic_trigger)
1000 


1001 /*
1002  * void vpanic(const char *format, va_list alist)
1003  *
1004  * The panic() and cmn_err() functions invoke vpanic() as a common entry point
1005  * into the panic code implemented in panicsys().  vpanic() is responsible
1006  * for passing through the format string and arguments, and constructing a
1007  * regs structure on the stack into which it saves the current register
1008  * values.  If we are not dying due to a fatal trap, these registers will
1009  * then be preserved in panicbuf as the current processor state.  Before
1010  * invoking panicsys(), vpanic() activates the first panic trigger (see
1011  * common/os/panic.c) and switches to the panic_stack if successful.  Note that
1012  * DTrace takes a slightly different panic path if it must panic from probe
1013  * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
1014  * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
1015  * branches back into vpanic().
1016  */

1017 








1018         ENTRY_NP(vpanic)
1019 
1020         save    %sp, -SA(MINFRAME + REGSIZE), %sp       ! save and allocate regs
1021 
1022         !
1023         ! The v9 struct regs has a 64-bit r_tstate field, which we use here
1024         ! to store the %ccr, %asi, %pstate, and %cwp as they would appear
1025         ! in %tstate if a trap occurred.  We leave it up to the debugger to
1026         ! realize what happened and extract the register values.
1027         !
1028         rd      %ccr, %l0                               ! %l0 = %ccr
1029         sllx    %l0, TSTATE_CCR_SHIFT, %l0              ! %l0 <<= CCR_SHIFT
1030         rd      %asi, %l1                               ! %l1 = %asi
1031         sllx    %l1, TSTATE_ASI_SHIFT, %l1              ! %l1 <<= ASI_SHIFT
1032         or      %l0, %l1, %l0                           ! %l0 |= %l1
1033         rdpr    %pstate, %l1                            ! %l1 = %pstate
1034         sllx    %l1, TSTATE_PSTATE_SHIFT, %l1           ! %l1 <<= PSTATE_SHIFT
1035         or      %l0, %l1, %l0                           ! %l0 |= %l1
1036         rdpr    %cwp, %l1                               ! %l1 = %cwp
1037         sllx    %l1, TSTATE_CWP_SHIFT, %l1              ! %l1 <<= CWP_SHIFT


1125         sllx    %l1, TSTATE_CWP_SHIFT, %l1              ! %l1 <<= CWP_SHIFT
1126         or      %l0, %l1, %l0                           ! %l0 |= %l1
1127 
1128         set     dtrace_vpanic, %l1                      ! %l1 = %pc (vpanic)
1129         add     %l1, 4, %l2                             ! %l2 = %npc (vpanic+4)
1130         rd      %y, %l3                                 ! %l3 = %y
1131         !
1132         ! Flush register windows before panic_trigger() in order to avoid a
1133         ! problem that a dump hangs if flush_windows() causes another panic.
1134         !
1135         call    dtrace_flush_windows
1136         nop
1137 
1138         sethi   %hi(panic_quiesce), %o0
1139         call    dtrace_panic_trigger
1140         or      %o0, %lo(panic_quiesce), %o0            ! if (!panic_trigger(
1141 
1142         ba,a    vpanic_common
1143         SET_SIZE(dtrace_vpanic)
1144         












1145         ENTRY(get_subcc_ccr)
1146         wr      %g0, %ccr       ! clear condition codes
1147         subcc   %o0, %o1, %g0
1148         retl
1149         rd      %ccr, %o0       ! return condition codes
1150         SET_SIZE(get_subcc_ccr)
1151 










1152         ENTRY_NP(ftrace_interrupt_disable)
1153         rdpr    %pstate, %o0
1154         andn    %o0, PSTATE_IE, %o1
1155         retl
1156         wrpr    %g0, %o1, %pstate
1157         SET_SIZE(ftrace_interrupt_disable)
1158 











1159         ENTRY_NP(ftrace_interrupt_enable)
1160         retl
1161         wrpr    %g0, %o0, %pstate 
1162         SET_SIZE(ftrace_interrupt_enable)
1163