1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #if defined(lint)
  27 #include <sys/types.h>
  28 #include <sys/thread.h>
  29 #else   /* lint */
  30 #include "assym.h"
  31 #endif  /* lint */
  32 
  33 #include <sys/asm_linkage.h>
  34 #include <sys/machthread.h>
  35 #include <sys/machcpuvar.h>
  36 #include <sys/intreg.h>
  37 #include <sys/cmn_err.h>
  38 #include <sys/ftrace.h>
  39 #include <sys/machasi.h>
  40 #include <sys/scb.h>
  41 #include <sys/error.h>
  42 #include <sys/mmu.h>
  43 #include <vm/hat_sfmmu.h>
  44 #define INTR_REPORT_SIZE        64
  45 
  46 #ifdef TRAPTRACE
  47 #include <sys/traptrace.h>
  48 #endif /* TRAPTRACE */
  49 
  50 #if defined(lint)
  51 
  52 void
  53 cpu_mondo(void)
  54 {}
  55 
  56 #else   /* lint */
  57 
  58 
  59 /*
  60  * (TT 0x7c, TL>0) CPU Mondo Queue Handler
  61  *      Globals are the Interrupt Globals.
  62  */
  63         ENTRY_NP(cpu_mondo)
  64         !
  65         !       Register Usage:-
  66         !       %g5     PC for fasttrap TL>0 handler
  67         !       %g1     arg 1   
  68         !       %g2     arg 2   
  69         !       %g3     queue base VA 
  70         !       %g4     queue size mask 
  71         !       %g6     head ptr
  72         !       %g7     tail ptr        
  73         mov     CPU_MONDO_Q_HD, %g3     
  74         ldxa    [%g3]ASI_QUEUE, %g6     ! %g6 = head ptr 
  75         mov     CPU_MONDO_Q_TL, %g4     
  76         ldxa    [%g4]ASI_QUEUE, %g7     ! %g7 = tail ptr 
  77         cmp     %g6, %g7
  78         be,pn   %xcc, 3f                ! head == tail
  79         nop
  80         
  81         CPU_ADDR(%g1,%g2)
  82         add     %g1, CPU_MCPU, %g2
  83         ldx     [%g2 + MCPU_CPU_Q_BASE], %g3    ! %g3 = queue base PA
  84         ldx     [%g2 + MCPU_CPU_Q_SIZE], %g4    ! queue size
  85         sub     %g4, 1, %g4             ! %g4 = queue size mask 
  86 
  87         ! Load interrupt receive data registers 1 and 2 to fetch
  88         ! the arguments for the fast trap handler.
  89         !
  90         ! XXX - Since the data words in the interrupt report are not defined yet 
  91         ! we assume that the consective words contain valid data and preserve
  92         ! sun4u's xcall mondo arguments. 
  93         ! Register usage:
  94         !       %g5     PC for fasttrap TL>0 handler
  95         !       %g1     arg 1   
  96         !       %g2     arg 2   
  97 
  98         ldxa    [%g3 + %g6]ASI_MEM, %g5 ! get PC from q base + head
  99         add     %g6, 0x8, %g6           ! inc head
 100         ldxa    [%g3 + %g6]ASI_MEM, %g1 ! read data word 1
 101         add     %g6, 0x8, %g6           ! inc head
 102         ldxa    [%g3 + %g6]ASI_MEM, %g2 ! read data word 2
 103         add     %g6, (INTR_REPORT_SIZE - 16) , %g6 ! inc head to next record    
 104         and     %g6, %g4, %g6           ! and size mask for wrap around 
 105         mov     CPU_MONDO_Q_HD, %g3     
 106         stxa    %g6, [%g3]ASI_QUEUE     ! store head pointer 
 107         membar  #Sync
 108 
 109 #ifdef TRAPTRACE
 110         TRACE_PTR(%g4, %g6)
 111         GET_TRACE_TICK(%g6, %g3)
 112         stxa    %g6, [%g4 + TRAP_ENT_TICK]%asi
 113         TRACE_SAVE_TL_GL_REGS(%g4, %g6)
 114         rdpr    %tt, %g6
 115         stha    %g6, [%g4 + TRAP_ENT_TT]%asi
 116         rdpr    %tpc, %g6
 117         stna    %g6, [%g4 + TRAP_ENT_TPC]%asi
 118         rdpr    %tstate, %g6
 119         stxa    %g6, [%g4 + TRAP_ENT_TSTATE]%asi
 120         stna    %sp, [%g4 + TRAP_ENT_SP]%asi
 121         stna    %g5, [%g4 + TRAP_ENT_TR]%asi    ! pc of the TL>0 handler
 122         stna    %g1, [%g4 + TRAP_ENT_F1]%asi    ! arg1
 123         stna    %g2, [%g4 + TRAP_ENT_F3]%asi    ! arg2
 124         mov     CPU_MONDO_Q_HD, %g6
 125         ldxa    [%g6]ASI_QUEUE, %g6             ! new head offset
 126         stna    %g6, [%g4 + TRAP_ENT_F2]%asi
 127         stna    %g7, [%g4 + TRAP_ENT_F4]%asi    ! tail offset
 128         TRACE_NEXT(%g4, %g6, %g3)
 129 #endif /* TRAPTRACE */
 130 
 131         /*
 132          * For now catch invalid PC being passed via cpu_mondo queue
 133          */
 134         set     KERNELBASE, %g4
 135         cmp     %g5, %g4
 136         bl,pn   %xcc, 2f                ! branch if bad %pc
 137           nop
 138         
 139 
 140         /*
 141          * If this platform supports shared contexts and we are jumping
 142          * to OBP code, then we need to invalidate both contexts to prevent OBP
 143          * from corrupting the shared context registers.
 144          *
 145          * If shared contexts are not supported then the next two instructions
 146          * will be patched with:
 147          *
 148          * jmp       %g5
 149          * nop
 150          *
 151          */
 152         .global sfmmu_shctx_cpu_mondo_patch
 153 sfmmu_shctx_cpu_mondo_patch:
 154         set     OFW_START_ADDR, %g4     ! Check if this a call into OBP?
 155         cmp     %g5, %g4
 156         bl,pt %xcc, 1f
 157           nop
 158         set     OFW_END_ADDR, %g4       
 159         cmp     %g5, %g4
 160         bg,pn %xcc, 1f          
 161           nop
 162         mov     MMU_PCONTEXT, %g3
 163         ldxa    [%g3]ASI_MMU_CTX, %g4
 164         cmp     %g4, INVALID_CONTEXT    ! Check if we are in kernel mode
 165         ble,pn %xcc, 1f                 ! or the primary context is invalid
 166           nop
 167         set     INVALID_CONTEXT, %g4    ! Invalidate contexts - compatability
 168         stxa    %g4, [%g3]ASI_MMU_CTX   ! mode ensures shared contexts are also
 169         mov     MMU_SCONTEXT, %g3       ! invalidated.
 170         stxa    %g4, [%g3]ASI_MMU_CTX
 171         membar  #Sync
 172         mov     %o0, %g3                ! save output regs
 173         mov     %o1, %g4
 174         mov     %o5, %g6
 175         clr     %o0                     ! Invalidate tsbs, set ntsb = 0
 176         clr     %o1                     ! and HV_TSB_INFO_PA = 0
 177         mov     MMU_TSB_CTXNON0, %o5
 178         ta      FAST_TRAP               ! set TSB info for user process
 179         brnz,a,pn %o0, ptl1_panic
 180           mov   PTL1_BAD_HCALL, %g1
 181         mov     %g3, %o0                ! restore output regs
 182         mov     %g4, %o1
 183         mov     %g6, %o5
 184 1:
 185         jmp     %g5                     ! jump to traphandler
 186         nop
 187 2:
 188         ! invalid trap handler, discard it for now
 189         set     cpu_mondo_inval, %g4
 190         ldx     [%g4], %g5
 191         inc     %g5
 192         stx     %g5, [%g4]
 193 3:
 194         retry
 195         /* Never Reached */
 196         SET_SIZE(cpu_mondo)
 197 
 198 #endif /* lint */
 199 
 200 #if defined(lint)
 201 
 202 void
 203 dev_mondo(void)
 204 {}
 205 
 206 #else   /* lint */
 207 
 208 
 209 /*
 210  * (TT 0x7d, TL>0) Dev Mondo Queue Handler
 211  *      Globals are the Interrupt Globals.
 212  * We only process one interrupt at a time causing us to keep
 213  * taking this trap till the queue is empty.
 214  * We really should drain the whole queue for better performance
 215  * but this will do for now.
 216  */
 217         ENTRY_NP(dev_mondo)
 218         !
 219         !       Register Usage:-
 220         !       %g5     PC for fasttrap TL>0 handler
 221         !       %g1     arg 1   
 222         !       %g2     arg 2   
 223         !       %g3     queue base PA 
 224         !       %g4     queue size mask 
 225         !       %g6     head ptr
 226         !       %g7     tail ptr        
 227         mov     DEV_MONDO_Q_HD, %g3     
 228         ldxa    [%g3]ASI_QUEUE, %g6     ! %g6 = head ptr 
 229         mov     DEV_MONDO_Q_TL, %g4     
 230         ldxa    [%g4]ASI_QUEUE, %g7     ! %g7 = tail ptr 
 231         cmp     %g6, %g7
 232         be,pn   %xcc, 0f                ! head == tail
 233         nop
 234 
 235         CPU_ADDR(%g1,%g2)
 236         add     %g1, CPU_MCPU, %g2
 237         ldx     [%g2 + MCPU_DEV_Q_BASE], %g3    ! %g3 = queue base PA
 238 
 239         ! Register usage:
 240         !       %g5 - inum
 241         !       %g1 - cpu struct pointer used below in TRAPTRACE 
 242         !
 243         ldxa    [%g3 + %g6]ASI_MEM, %g5 ! get inum from q base + head
 244 
 245         !
 246         ! We verify that inum is valid ( < MAXVNUM). If it is greater
 247         ! than MAXVNUM, we let setvecint_tl1 take care of it.
 248         !
 249         set     MAXIVNUM, %g4
 250         cmp     %g5, %g4
 251         bgeu,a,pn       %xcc, 1f
 252         ldx     [%g2 + MCPU_DEV_Q_SIZE], %g4    ! queue size - delay slot
 253 
 254         !
 255         !       Copy 64-byte payload to the *iv_payload if it is not NULL
 256         !
 257         set     intr_vec_table, %g1             ! %g1 = intr_vec_table
 258         sll     %g5, CPTRSHIFT, %g7             ! %g7 = offset to inum entry
 259                                                 !       in the intr_vec_table
 260         add     %g1, %g7, %g7                   ! %g7 = &intr_vec_table[inum]
 261         ldn     [%g7], %g1                      ! %g1 = ptr to intr_vec_t (iv)
 262 
 263         !
 264         ! Verify the pointer to first intr_vec_t for a given inum and
 265         ! it should not be NULL. If this pointer is NULL, then it is a
 266         ! spurious interrupt. In this case, just call setvecint_tl1 and
 267         ! it will handle this spurious interrupt.
 268         !
 269         brz,a,pn        %g1, 1f                 ! if %g1 is NULL
 270         ldx     [%g2 + MCPU_DEV_Q_SIZE], %g4    ! queue size - delay slot
 271 
 272         ldx     [%g1 + IV_PAYLOAD_BUF], %g1     ! %g1 = iv->iv_payload_buf
 273         brz,a,pt        %g1, 1f                 ! if it is NULL
 274         ldx     [%g2 + MCPU_DEV_Q_SIZE], %g4    ! queue size - delay slot
 275 
 276         !
 277         !       Now move 64 byte payload from mondo queue to buf        
 278         !
 279         mov     %g6, %g7                        ! %g7 = head ptr 
 280         ldxa    [%g3 + %g7]ASI_MEM, %g4
 281         stx     %g4, [%g1 + 0]                  ! byte 0 - 7
 282         add     %g7, 8, %g7
 283         ldxa    [%g3 + %g7]ASI_MEM, %g4
 284         stx     %g4, [%g1 + 8]                  ! byte 8 - 15
 285         add     %g7, 8, %g7
 286         ldxa    [%g3 + %g7]ASI_MEM, %g4
 287         stx     %g4, [%g1 + 16]                 ! byte 16 - 23
 288         add     %g7, 8, %g7
 289         ldxa    [%g3 + %g7]ASI_MEM, %g4
 290         stx     %g4, [%g1 + 24]                 ! byte 24 - 31
 291         add     %g7, 8, %g7
 292         ldxa    [%g3 + %g7]ASI_MEM, %g4
 293         stx     %g4, [%g1 + 32]                 ! byte 32 - 39
 294         add     %g7, 8, %g7
 295         ldxa    [%g3 + %g7]ASI_MEM, %g4
 296         stx     %g4, [%g1 + 40]                 ! byte 40 - 47
 297         add     %g7, 8, %g7
 298         ldxa    [%g3 + %g7]ASI_MEM, %g4
 299         stx     %g4, [%g1 + 48]                 ! byte 48 - 55
 300         add     %g7, 8, %g7
 301         ldxa    [%g3 + %g7]ASI_MEM, %g4
 302         stx     %g4, [%g1 + 56]                 ! byte 56 - 63
 303         ldx     [%g2 + MCPU_DEV_Q_SIZE], %g4    ! queue size
 304 
 305 1:      sub     %g4, 1, %g4             ! %g4 = queue size mask 
 306         add     %g6, INTR_REPORT_SIZE , %g6 ! inc head to next record   
 307         and     %g6, %g4, %g6           ! and mask for wrap around      
 308         mov     DEV_MONDO_Q_HD, %g3     
 309         stxa    %g6, [%g3]ASI_QUEUE     ! increment head offset 
 310         membar  #Sync
 311 
 312 #ifdef TRAPTRACE
 313         TRACE_PTR(%g4, %g6)
 314         GET_TRACE_TICK(%g6, %g3)
 315         stxa    %g6, [%g4 + TRAP_ENT_TICK]%asi
 316         TRACE_SAVE_TL_GL_REGS(%g4, %g6)
 317         rdpr    %tt, %g6
 318         stha    %g6, [%g4 + TRAP_ENT_TT]%asi
 319         rdpr    %tpc, %g6
 320         stna    %g6, [%g4 + TRAP_ENT_TPC]%asi
 321         rdpr    %tstate, %g6
 322         stxa    %g6, [%g4 + TRAP_ENT_TSTATE]%asi
 323         ! move head to sp
 324         ldx     [%g2 + MCPU_DEV_Q_BASE], %g6
 325         stna    %g6, [%g4 + TRAP_ENT_SP]%asi    ! Device Queue Base PA
 326         stna    %g5, [%g4 + TRAP_ENT_TR]%asi    ! Inum 
 327         mov     DEV_MONDO_Q_HD, %g6     
 328         ldxa    [%g6]ASI_QUEUE, %g6             ! New head offset 
 329         stna    %g6, [%g4 + TRAP_ENT_F1]%asi
 330         ldx     [%g2 + MCPU_DEV_Q_SIZE], %g6
 331         stna    %g6, [%g4 + TRAP_ENT_F2]%asi    ! Q Size        
 332         stna    %g7, [%g4 + TRAP_ENT_F3]%asi    ! tail offset
 333         stna    %g0, [%g4 + TRAP_ENT_F4]%asi
 334         TRACE_NEXT(%g4, %g6, %g3)
 335 #endif /* TRAPTRACE */
 336 
 337         !
 338         ! setvecint_tl1 will do all the work, and finish with a retry
 339         !
 340         ba,pt   %xcc, setvecint_tl1
 341         mov     %g5, %g1                ! setvecint_tl1 expects inum in %g1
 342 
 343 0:      retry 
 344 
 345         /* Never Reached */
 346         SET_SIZE(dev_mondo)
 347 #endif /* lint */
 348 
 349 #if defined(lint)
 350 uint64_t cpu_mondo_inval;
 351 #else /* lint */
 352         .seg    ".data"
 353         .global cpu_mondo_inval
 354         .align  8
 355 cpu_mondo_inval:
 356         .skip   8
 357 
 358         .seg    ".text"
 359 #endif  /* lint */
 360 
 361 
 362 #if defined(lint)
 363 
 364 void
 365 resumable_error(void)
 366 {}
 367 
 368 #else   /* lint */
 369 
 370 /*
 371  * (TT 0x7e, TL>0) Resumeable Error Queue Handler
 372  *      We keep a shadow copy of the queue in kernel buf.
 373  *      Read the resumable queue head and tail offset
 374  *      If there are entries on the queue, move them to
 375  *      the kernel buf, which is next to the resumable
 376  *      queue in the memory. Call C routine to process.
 377  */
 378         ENTRY_NP(resumable_error)
 379         mov     CPU_RQ_HD, %g4
 380         ldxa    [%g4]ASI_QUEUE, %g2             ! %g2 = Q head offset 
 381         mov     CPU_RQ_TL, %g4
 382         ldxa    [%g4]ASI_QUEUE, %g3             ! %g3 = Q tail offset
 383         mov     %g2, %g6                        ! save head in %g2
 384 
 385         cmp     %g6, %g3
 386         be,pn   %xcc, 0f                        ! head == tail
 387         nop
 388 
 389         CPU_ADDR(%g1, %g4)                      ! %g1 = cpu struct addr
 390 
 391 2:      set     CPU_RQ_BASE_OFF, %g4
 392         ldx     [%g1 + %g4], %g4                ! %g4 = queue base PA
 393         add     %g6, %g4, %g4                   ! %g4 = PA of ER in Q           
 394         set     CPU_RQ_SIZE, %g7
 395         add     %g4, %g7, %g7                   ! %g7=PA of ER in kernel buf
 396 
 397         ldxa    [%g7]ASI_MEM, %g5               ! %g5=first 8 byte of ER buf
 398         cmp     0, %g5
 399         bne,pn  %xcc, 1f                        ! first 8 byte is not 0
 400         nop
 401 
 402         /* Now we can move 64 bytes from queue to buf */
 403         set     0, %g5
 404         ldxa    [%g4 + %g5]ASI_MEM, %g1
 405         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 0 - 7    
 406         add     %g5, 8, %g5
 407         ldxa    [%g4 + %g5]ASI_MEM, %g1
 408         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 8 - 15
 409         add     %g5, 8, %g5
 410         ldxa    [%g4 + %g5]ASI_MEM, %g1
 411         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 16 - 23
 412         add     %g5, 8, %g5
 413         ldxa    [%g4 + %g5]ASI_MEM, %g1
 414         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 24 - 31
 415         add     %g5, 8, %g5
 416         ldxa    [%g4 + %g5]ASI_MEM, %g1
 417         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 32 - 39
 418         add     %g5, 8, %g5
 419         ldxa    [%g4 + %g5]ASI_MEM, %g1
 420         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 40 - 47
 421         add     %g5, 8, %g5
 422         ldxa    [%g4 + %g5]ASI_MEM, %g1
 423         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 48 - 55
 424         add     %g5, 8, %g5
 425         ldxa    [%g4 + %g5]ASI_MEM, %g1
 426         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 56 - 63
 427 
 428         set     CPU_RQ_SIZE, %g5                ! %g5 = queue size
 429         sub     %g5, 1, %g5                     ! %g5 = queu size mask
 430 
 431         add     %g6, Q_ENTRY_SIZE, %g6          ! increment q head to next
 432         and     %g6, %g5, %g6                   ! size mask for warp around
 433         cmp     %g6, %g3                        ! head == tail ??
 434 
 435         bne,pn  %xcc, 2b                        ! still have more to process
 436         nop
 437 
 438         /*
 439          * head equals to tail now, we can update the queue head 
 440          * and call sys_trap
 441          */
 442         mov     CPU_RQ_HD, %g4
 443         stxa    %g6, [%g4]ASI_QUEUE             ! update head offset
 444         membar  #Sync
 445         
 446         /*
 447          * Call sys_trap at PIL 14 unless we're already at PIL 15. %g2.l is
 448          * head offset(arg2) and %g3 is tail
 449          * offset(arg3).
 450          */
 451         set     process_resumable_error, %g1
 452         rdpr    %pil, %g4
 453         cmp     %g4, PIL_14
 454         ba      sys_trap
 455           movl  %icc, PIL_14, %g4
 456 
 457         /*
 458          * We are here because the C routine is not able to process
 459          * errors in time. So the first 8 bytes of ER in buf has not
 460          * been cleared. We update head to tail and call sys_trap to
 461          * print out an error message
 462          */
 463         
 464 1:      mov     CPU_RQ_HD, %g4
 465         stxa    %g3, [%g4]ASI_QUEUE             ! set head equal to tail
 466         membar  #Sync
 467 
 468         /*
 469          * Set %g2 to %g6, which is current head offset. %g2 
 470          * is arg2 of the C routine. %g3 is the tail offset,
 471          * which is arg3 of the C routine.
 472          * Call rq_overflow at PIL 14 unless we're already at PIL 15.
 473          */
 474         mov     %g6, %g2
 475         set     rq_overflow, %g1
 476         rdpr    %pil, %g4
 477         cmp     %g4, PIL_14
 478         ba      sys_trap
 479           movl  %icc, PIL_14, %g4
 480 
 481 0:      retry
 482 
 483         /*NOTREACHED*/
 484         SET_SIZE(resumable_error)
 485 #endif /* lint */
 486 
 487 #if defined(lint)
 488 
 489 void
 490 nonresumable_error(void)
 491 {}
 492 
 493 #else   /* lint */
 494 
 495 /*
 496  * (TT 0x7f, TL>0) Non-resumeable Error Queue Handler
 497  *      We keep a shadow copy of the queue in kernel buf.
 498  *      Read non-resumable queue head and tail offset
 499  *      If there are entries on the queue, move them to
 500  *      the kernel buf, which is next to the non-resumable
 501  *      queue in the memory. Call C routine to process.
 502  */
 503         ENTRY_NP(nonresumable_error)
 504         mov     CPU_NRQ_HD, %g4
 505         ldxa    [%g4]ASI_QUEUE, %g2             ! %g2 = Q head offset 
 506         mov     CPU_NRQ_TL, %g4
 507         ldxa    [%g4]ASI_QUEUE, %g3             ! %g3 = Q tail offset
 508 
 509         cmp     %g2, %g3
 510         be,pn   %xcc, 0f                        ! head == tail
 511         nop
 512 
 513         /* force %gl to 1 as sys_trap requires */
 514         wrpr    %g0, 1, %gl
 515         mov     CPU_NRQ_HD, %g4
 516         ldxa    [%g4]ASI_QUEUE, %g2             ! %g2 = Q head offset 
 517         mov     CPU_NRQ_TL, %g4
 518         ldxa    [%g4]ASI_QUEUE, %g3             ! %g3 = Q tail offset
 519         mov     %g2, %g6                        ! save head in %g2
 520 
 521         CPU_PADDR(%g1, %g4)                     ! %g1 = cpu struct paddr
 522 
 523 2:      set     CPU_NRQ_BASE_OFF, %g4
 524         ldxa    [%g1 + %g4]ASI_MEM, %g4         ! %g4 = queue base PA
 525         add     %g6, %g4, %g4                   ! %g4 = PA of ER in Q           
 526         set     CPU_NRQ_SIZE, %g7
 527         add     %g4, %g7, %g7                   ! %g7 = PA of ER in kernel buf
 528 
 529         ldxa    [%g7]ASI_MEM, %g5               ! %g5 = first 8 byte of ER buf
 530         cmp     0, %g5
 531         bne,pn  %xcc, 1f                        ! first 8 byte is not 0
 532         nop
 533 
 534         /* Now we can move 64 bytes from queue to buf */
 535         set     0, %g5
 536         ldxa    [%g4 + %g5]ASI_MEM, %g1
 537         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 0 - 7    
 538         add     %g5, 8, %g5
 539         ldxa    [%g4 + %g5]ASI_MEM, %g1
 540         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 8 - 15
 541         add     %g5, 8, %g5
 542         ldxa    [%g4 + %g5]ASI_MEM, %g1
 543         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 16 - 23
 544         add     %g5, 8, %g5
 545         ldxa    [%g4 + %g5]ASI_MEM, %g1
 546         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 24 - 31
 547         add     %g5, 8, %g5
 548         ldxa    [%g4 + %g5]ASI_MEM, %g1
 549         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 32 - 39
 550         add     %g5, 8, %g5
 551         ldxa    [%g4 + %g5]ASI_MEM, %g1
 552         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 40 - 47
 553         add     %g5, 8, %g5
 554         ldxa    [%g4 + %g5]ASI_MEM, %g1
 555         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 48 - 55
 556         add     %g5, 8, %g5
 557         ldxa    [%g4 + %g5]ASI_MEM, %g1
 558         stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 56 - 63
 559 
 560         set     CPU_NRQ_SIZE, %g5               ! %g5 = queue size
 561         sub     %g5, 1, %g5                     ! %g5 = queu size mask
 562 
 563         add     %g6, Q_ENTRY_SIZE, %g6          ! increment q head to next
 564         and     %g6, %g5, %g6                   ! size mask for warp around
 565         cmp     %g6, %g3                        ! head == tail ??
 566 
 567         bne,pn  %xcc, 2b                        ! still have more to process
 568         nop
 569 
 570         /*
 571          * head equals to tail now, we can update the queue head 
 572          * and call sys_trap
 573          */
 574         mov     CPU_NRQ_HD, %g4
 575         stxa    %g6, [%g4]ASI_QUEUE             ! update head offset
 576         membar  #Sync
 577 
 578         /*
 579          * Call sys_trap. %g2 is TL(arg2), %g3 is head and tail
 580          * offset(arg3).
 581          * %g3 looks like following:
 582          *      +--------------------+--------------------+
 583          *      |   tail offset      |    head offset     |
 584          *      +--------------------+--------------------+
 585          *      63                 32 31                 0
 586          *
 587          * Run at PIL 14 unless we're already at PIL 15.
 588          */
 589         sllx    %g3, 32, %g3                    ! %g3.h = tail offset
 590         or      %g3, %g2, %g3                   ! %g3.l = head offset
 591         rdpr    %tl, %g2                        ! %g2 = current tl
 592 
 593         /*
 594          * Now check if the first error that sent us here was caused
 595          * in user's SPILL/FILL trap. If it was, we call sys_trap to
 596          * kill the user process. Several considerations:
 597          * - If multiple nonresumable errors happen, we only check the
 598          *   first one. Nonresumable errors cause system either panic
 599          *   or kill the user process. So the system has already
 600          *   panic'ed or killed user process after processing the first
 601          *   error. Therefore, no need to check if other error packet
 602          *   for this type of error.
 603          * - Errors happen in user's SPILL/FILL trap will bring us at
 604          *   TL = 2.
 605          * - We need to lower TL to 1 to get the trap type and tstate.
 606          *   We don't go back to TL = 2 so no need to save states.
 607          */
 608         cmp     %g2, 2  
 609         bne,pt  %xcc, 3f                        ! if tl != 2
 610         nop
 611         /* Check to see if the trap pc is in a window spill/fill handling */
 612         rdpr    %tpc, %g4
 613         /* tpc should be in the trap table */
 614         set     trap_table, %g5
 615         cmp     %g4, %g5
 616         blu,pt  %xcc, 3f
 617         nop
 618         set     etrap_table, %g5
 619         cmp     %g4, %g5
 620         bgeu,pt %xcc, 3f
 621         nop     
 622         /* Set tl to 1 in order to read tt[1] and tstate[1] */
 623         wrpr    %g0, 1, %tl
 624         rdpr    %tt, %g4                        ! %g4 = tt[1]
 625         /* Check if tt[1] is a window trap */
 626         and     %g4, WTRAP_TTMASK, %g4
 627         cmp     %g4, WTRAP_TYPE
 628         bne,pt  %xcc, 3f
 629         nop
 630         rdpr    %tstate, %g5                    ! %g5 = tstate[1]
 631         btst    TSTATE_PRIV, %g5
 632         bnz     %xcc, 3f                        ! Is it from user code?
 633         nop
 634         /*
 635          * Now we know the error happened in user's SPILL/FILL trap.
 636          * Turn on the user spill/fill flag in %g2
 637          */
 638         mov     1, %g4
 639         sllx    %g4, ERRH_U_SPILL_FILL_SHIFT, %g4
 640         or      %g2, %g4, %g2                   ! turn on flag in %g2
 641         
 642 3:      sub     %g2, 1, %g2                     ! %g2.l = previous tl
 643 
 644         set     process_nonresumable_error, %g1
 645         rdpr    %pil, %g4
 646         cmp     %g4, PIL_14
 647         ba      sys_trap
 648           movl  %icc, PIL_14, %g4
 649 
 650         /*
 651          * We are here because the C routine is not able to process
 652          * errors in time. So the first 8 bytes of ER in buf has not
 653          * been cleared. We call sys_trap to panic.
 654          * Run at PIL 14 unless we're already at PIL 15.
 655          */
 656 1:      set     nrq_overflow, %g1
 657         rdpr    %pil, %g4
 658         cmp     %g4, PIL_14
 659         ba      sys_trap
 660           movl  %icc, PIL_14, %g4
 661 
 662 0:      retry
 663 
 664         /*NOTREACHED*/
 665         SET_SIZE(nonresumable_error)
 666 #endif /* lint */