Print this page
    
de-linting of .s files
    
      
        | Split | Close | 
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/sun4v/ml/mach_interrupt.s
          +++ new/usr/src/uts/sun4v/ml/mach_interrupt.s
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  
    | ↓ open down ↓ | 15 lines elided | ↑ open up ↑ | 
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26      -#if defined(lint)
  27      -#include <sys/types.h>
  28      -#include <sys/thread.h>
  29      -#else   /* lint */
  30   26  #include "assym.h"
  31      -#endif  /* lint */
  32   27  
  33   28  #include <sys/asm_linkage.h>
  34   29  #include <sys/machthread.h>
  35   30  #include <sys/machcpuvar.h>
  36   31  #include <sys/intreg.h>
  37   32  #include <sys/cmn_err.h>
  38   33  #include <sys/ftrace.h>
  39   34  #include <sys/machasi.h>
  40   35  #include <sys/scb.h>
  41   36  #include <sys/error.h>
  42   37  #include <sys/mmu.h>
  43   38  #include <vm/hat_sfmmu.h>
  44   39  #define INTR_REPORT_SIZE        64
  45   40  
  46   41  #ifdef TRAPTRACE
  47   42  #include <sys/traptrace.h>
  48   43  #endif /* TRAPTRACE */
  49   44  
  50      -#if defined(lint)
  51   45  
  52      -void
  53      -cpu_mondo(void)
  54      -{}
  55      -
  56      -#else   /* lint */
  57      -
  58      -
  59   46  /*
  60   47   * (TT 0x7c, TL>0) CPU Mondo Queue Handler
  61   48   *      Globals are the Interrupt Globals.
  62   49   */
  63   50          ENTRY_NP(cpu_mondo)
  64   51          !
  65   52          !       Register Usage:-
  66   53          !       %g5     PC for fasttrap TL>0 handler
  67   54          !       %g1     arg 1   
  68   55          !       %g2     arg 2   
  69   56          !       %g3     queue base VA 
  70   57          !       %g4     queue size mask 
  71   58          !       %g6     head ptr
  72   59          !       %g7     tail ptr        
  73   60          mov     CPU_MONDO_Q_HD, %g3     
  74   61          ldxa    [%g3]ASI_QUEUE, %g6     ! %g6 = head ptr 
  75   62          mov     CPU_MONDO_Q_TL, %g4     
  76   63          ldxa    [%g4]ASI_QUEUE, %g7     ! %g7 = tail ptr 
  77   64          cmp     %g6, %g7
  78   65          be,pn   %xcc, 3f                ! head == tail
  79   66          nop
  80   67          
  81   68          CPU_ADDR(%g1,%g2)
  82   69          add     %g1, CPU_MCPU, %g2
  83   70          ldx     [%g2 + MCPU_CPU_Q_BASE], %g3    ! %g3 = queue base PA
  84   71          ldx     [%g2 + MCPU_CPU_Q_SIZE], %g4    ! queue size
  85   72          sub     %g4, 1, %g4             ! %g4 = queue size mask 
  86   73  
  87   74          ! Load interrupt receive data registers 1 and 2 to fetch
  88   75          ! the arguments for the fast trap handler.
  89   76          !
  90   77          ! XXX - Since the data words in the interrupt report are not defined yet 
  91   78          ! we assume that the consective words contain valid data and preserve
  92   79          ! sun4u's xcall mondo arguments. 
  93   80          ! Register usage:
  94   81          !       %g5     PC for fasttrap TL>0 handler
  95   82          !       %g1     arg 1   
  96   83          !       %g2     arg 2   
  97   84  
  98   85          ldxa    [%g3 + %g6]ASI_MEM, %g5 ! get PC from q base + head
  99   86          add     %g6, 0x8, %g6           ! inc head
 100   87          ldxa    [%g3 + %g6]ASI_MEM, %g1 ! read data word 1
 101   88          add     %g6, 0x8, %g6           ! inc head
 102   89          ldxa    [%g3 + %g6]ASI_MEM, %g2 ! read data word 2
 103   90          add     %g6, (INTR_REPORT_SIZE - 16) , %g6 ! inc head to next record    
 104   91          and     %g6, %g4, %g6           ! and size mask for wrap around 
 105   92          mov     CPU_MONDO_Q_HD, %g3     
 106   93          stxa    %g6, [%g3]ASI_QUEUE     ! store head pointer 
 107   94          membar  #Sync
 108   95  
 109   96  #ifdef TRAPTRACE
 110   97          TRACE_PTR(%g4, %g6)
 111   98          GET_TRACE_TICK(%g6, %g3)
 112   99          stxa    %g6, [%g4 + TRAP_ENT_TICK]%asi
 113  100          TRACE_SAVE_TL_GL_REGS(%g4, %g6)
 114  101          rdpr    %tt, %g6
 115  102          stha    %g6, [%g4 + TRAP_ENT_TT]%asi
 116  103          rdpr    %tpc, %g6
 117  104          stna    %g6, [%g4 + TRAP_ENT_TPC]%asi
 118  105          rdpr    %tstate, %g6
 119  106          stxa    %g6, [%g4 + TRAP_ENT_TSTATE]%asi
 120  107          stna    %sp, [%g4 + TRAP_ENT_SP]%asi
 121  108          stna    %g5, [%g4 + TRAP_ENT_TR]%asi    ! pc of the TL>0 handler
 122  109          stna    %g1, [%g4 + TRAP_ENT_F1]%asi    ! arg1
 123  110          stna    %g2, [%g4 + TRAP_ENT_F3]%asi    ! arg2
 124  111          mov     CPU_MONDO_Q_HD, %g6
 125  112          ldxa    [%g6]ASI_QUEUE, %g6             ! new head offset
 126  113          stna    %g6, [%g4 + TRAP_ENT_F2]%asi
 127  114          stna    %g7, [%g4 + TRAP_ENT_F4]%asi    ! tail offset
 128  115          TRACE_NEXT(%g4, %g6, %g3)
 129  116  #endif /* TRAPTRACE */
 130  117  
 131  118          /*
 132  119           * For now catch invalid PC being passed via cpu_mondo queue
 133  120           */
 134  121          set     KERNELBASE, %g4
 135  122          cmp     %g5, %g4
 136  123          bl,pn   %xcc, 2f                ! branch if bad %pc
 137  124            nop
 138  125          
 139  126  
 140  127          /*
 141  128           * If this platform supports shared contexts and we are jumping
 142  129           * to OBP code, then we need to invalidate both contexts to prevent OBP
 143  130           * from corrupting the shared context registers.
 144  131           *
 145  132           * If shared contexts are not supported then the next two instructions
 146  133           * will be patched with:
 147  134           *
 148  135           * jmp       %g5
 149  136           * nop
 150  137           *
 151  138           */
 152  139          .global sfmmu_shctx_cpu_mondo_patch
 153  140  sfmmu_shctx_cpu_mondo_patch:
 154  141          set     OFW_START_ADDR, %g4     ! Check if this a call into OBP?
 155  142          cmp     %g5, %g4
 156  143          bl,pt %xcc, 1f
 157  144            nop
 158  145          set     OFW_END_ADDR, %g4       
 159  146          cmp     %g5, %g4
 160  147          bg,pn %xcc, 1f          
 161  148            nop
 162  149          mov     MMU_PCONTEXT, %g3
 163  150          ldxa    [%g3]ASI_MMU_CTX, %g4
 164  151          cmp     %g4, INVALID_CONTEXT    ! Check if we are in kernel mode
 165  152          ble,pn %xcc, 1f                 ! or the primary context is invalid
 166  153            nop
 167  154          set     INVALID_CONTEXT, %g4    ! Invalidate contexts - compatability
 168  155          stxa    %g4, [%g3]ASI_MMU_CTX   ! mode ensures shared contexts are also
 169  156          mov     MMU_SCONTEXT, %g3       ! invalidated.
 170  157          stxa    %g4, [%g3]ASI_MMU_CTX
 171  158          membar  #Sync
 172  159          mov     %o0, %g3                ! save output regs
 173  160          mov     %o1, %g4
 174  161          mov     %o5, %g6
 175  162          clr     %o0                     ! Invalidate tsbs, set ntsb = 0
 176  163          clr     %o1                     ! and HV_TSB_INFO_PA = 0
 177  164          mov     MMU_TSB_CTXNON0, %o5
 178  165          ta      FAST_TRAP               ! set TSB info for user process
 179  166          brnz,a,pn %o0, ptl1_panic
 180  167            mov   PTL1_BAD_HCALL, %g1
 181  168          mov     %g3, %o0                ! restore output regs
 182  169          mov     %g4, %o1
 183  170          mov     %g6, %o5
 184  171  1:
 185  172          jmp     %g5                     ! jump to traphandler
 186  173          nop
 187  174  2:
  
    | ↓ open down ↓ | 119 lines elided | ↑ open up ↑ | 
 188  175          ! invalid trap handler, discard it for now
 189  176          set     cpu_mondo_inval, %g4
 190  177          ldx     [%g4], %g5
 191  178          inc     %g5
 192  179          stx     %g5, [%g4]
 193  180  3:
 194  181          retry
 195  182          /* Never Reached */
 196  183          SET_SIZE(cpu_mondo)
 197  184  
 198      -#endif /* lint */
 199  185  
 200      -#if defined(lint)
 201      -
 202      -void
 203      -dev_mondo(void)
 204      -{}
 205      -
 206      -#else   /* lint */
 207      -
 208      -
 209  186  /*
 210  187   * (TT 0x7d, TL>0) Dev Mondo Queue Handler
 211  188   *      Globals are the Interrupt Globals.
 212  189   * We only process one interrupt at a time causing us to keep
 213  190   * taking this trap till the queue is empty.
 214  191   * We really should drain the whole queue for better performance
 215  192   * but this will do for now.
 216  193   */
 217  194          ENTRY_NP(dev_mondo)
 218  195          !
 219  196          !       Register Usage:-
 220  197          !       %g5     PC for fasttrap TL>0 handler
 221  198          !       %g1     arg 1   
 222  199          !       %g2     arg 2   
 223  200          !       %g3     queue base PA 
 224  201          !       %g4     queue size mask 
 225  202          !       %g6     head ptr
 226  203          !       %g7     tail ptr        
 227  204          mov     DEV_MONDO_Q_HD, %g3     
 228  205          ldxa    [%g3]ASI_QUEUE, %g6     ! %g6 = head ptr 
 229  206          mov     DEV_MONDO_Q_TL, %g4     
 230  207          ldxa    [%g4]ASI_QUEUE, %g7     ! %g7 = tail ptr 
 231  208          cmp     %g6, %g7
 232  209          be,pn   %xcc, 0f                ! head == tail
 233  210          nop
 234  211  
 235  212          CPU_ADDR(%g1,%g2)
 236  213          add     %g1, CPU_MCPU, %g2
 237  214          ldx     [%g2 + MCPU_DEV_Q_BASE], %g3    ! %g3 = queue base PA
 238  215  
 239  216          ! Register usage:
 240  217          !       %g5 - inum
 241  218          !       %g1 - cpu struct pointer used below in TRAPTRACE 
 242  219          !
 243  220          ldxa    [%g3 + %g6]ASI_MEM, %g5 ! get inum from q base + head
 244  221  
 245  222          !
 246  223          ! We verify that inum is valid ( < MAXVNUM). If it is greater
 247  224          ! than MAXVNUM, we let setvecint_tl1 take care of it.
 248  225          !
 249  226          set     MAXIVNUM, %g4
 250  227          cmp     %g5, %g4
 251  228          bgeu,a,pn       %xcc, 1f
 252  229          ldx     [%g2 + MCPU_DEV_Q_SIZE], %g4    ! queue size - delay slot
 253  230  
 254  231          !
 255  232          !       Copy 64-byte payload to the *iv_payload if it is not NULL
 256  233          !
 257  234          set     intr_vec_table, %g1             ! %g1 = intr_vec_table
 258  235          sll     %g5, CPTRSHIFT, %g7             ! %g7 = offset to inum entry
 259  236                                                  !       in the intr_vec_table
 260  237          add     %g1, %g7, %g7                   ! %g7 = &intr_vec_table[inum]
 261  238          ldn     [%g7], %g1                      ! %g1 = ptr to intr_vec_t (iv)
 262  239  
 263  240          !
 264  241          ! Verify the pointer to first intr_vec_t for a given inum and
 265  242          ! it should not be NULL. If this pointer is NULL, then it is a
 266  243          ! spurious interrupt. In this case, just call setvecint_tl1 and
 267  244          ! it will handle this spurious interrupt.
 268  245          !
 269  246          brz,a,pn        %g1, 1f                 ! if %g1 is NULL
 270  247          ldx     [%g2 + MCPU_DEV_Q_SIZE], %g4    ! queue size - delay slot
 271  248  
 272  249          ldx     [%g1 + IV_PAYLOAD_BUF], %g1     ! %g1 = iv->iv_payload_buf
 273  250          brz,a,pt        %g1, 1f                 ! if it is NULL
 274  251          ldx     [%g2 + MCPU_DEV_Q_SIZE], %g4    ! queue size - delay slot
 275  252  
 276  253          !
 277  254          !       Now move 64 byte payload from mondo queue to buf        
 278  255          !
 279  256          mov     %g6, %g7                        ! %g7 = head ptr 
 280  257          ldxa    [%g3 + %g7]ASI_MEM, %g4
 281  258          stx     %g4, [%g1 + 0]                  ! byte 0 - 7
 282  259          add     %g7, 8, %g7
 283  260          ldxa    [%g3 + %g7]ASI_MEM, %g4
 284  261          stx     %g4, [%g1 + 8]                  ! byte 8 - 15
 285  262          add     %g7, 8, %g7
 286  263          ldxa    [%g3 + %g7]ASI_MEM, %g4
 287  264          stx     %g4, [%g1 + 16]                 ! byte 16 - 23
 288  265          add     %g7, 8, %g7
 289  266          ldxa    [%g3 + %g7]ASI_MEM, %g4
 290  267          stx     %g4, [%g1 + 24]                 ! byte 24 - 31
 291  268          add     %g7, 8, %g7
 292  269          ldxa    [%g3 + %g7]ASI_MEM, %g4
 293  270          stx     %g4, [%g1 + 32]                 ! byte 32 - 39
 294  271          add     %g7, 8, %g7
 295  272          ldxa    [%g3 + %g7]ASI_MEM, %g4
 296  273          stx     %g4, [%g1 + 40]                 ! byte 40 - 47
 297  274          add     %g7, 8, %g7
 298  275          ldxa    [%g3 + %g7]ASI_MEM, %g4
 299  276          stx     %g4, [%g1 + 48]                 ! byte 48 - 55
 300  277          add     %g7, 8, %g7
 301  278          ldxa    [%g3 + %g7]ASI_MEM, %g4
 302  279          stx     %g4, [%g1 + 56]                 ! byte 56 - 63
 303  280          ldx     [%g2 + MCPU_DEV_Q_SIZE], %g4    ! queue size
 304  281  
 305  282  1:      sub     %g4, 1, %g4             ! %g4 = queue size mask 
 306  283          add     %g6, INTR_REPORT_SIZE , %g6 ! inc head to next record   
 307  284          and     %g6, %g4, %g6           ! and mask for wrap around      
 308  285          mov     DEV_MONDO_Q_HD, %g3     
 309  286          stxa    %g6, [%g3]ASI_QUEUE     ! increment head offset 
 310  287          membar  #Sync
 311  288  
 312  289  #ifdef TRAPTRACE
 313  290          TRACE_PTR(%g4, %g6)
 314  291          GET_TRACE_TICK(%g6, %g3)
 315  292          stxa    %g6, [%g4 + TRAP_ENT_TICK]%asi
 316  293          TRACE_SAVE_TL_GL_REGS(%g4, %g6)
 317  294          rdpr    %tt, %g6
 318  295          stha    %g6, [%g4 + TRAP_ENT_TT]%asi
 319  296          rdpr    %tpc, %g6
 320  297          stna    %g6, [%g4 + TRAP_ENT_TPC]%asi
 321  298          rdpr    %tstate, %g6
 322  299          stxa    %g6, [%g4 + TRAP_ENT_TSTATE]%asi
 323  300          ! move head to sp
 324  301          ldx     [%g2 + MCPU_DEV_Q_BASE], %g6
 325  302          stna    %g6, [%g4 + TRAP_ENT_SP]%asi    ! Device Queue Base PA
 326  303          stna    %g5, [%g4 + TRAP_ENT_TR]%asi    ! Inum 
 327  304          mov     DEV_MONDO_Q_HD, %g6     
 328  305          ldxa    [%g6]ASI_QUEUE, %g6             ! New head offset 
 329  306          stna    %g6, [%g4 + TRAP_ENT_F1]%asi
 330  307          ldx     [%g2 + MCPU_DEV_Q_SIZE], %g6
 331  308          stna    %g6, [%g4 + TRAP_ENT_F2]%asi    ! Q Size        
 332  309          stna    %g7, [%g4 + TRAP_ENT_F3]%asi    ! tail offset
 333  310          stna    %g0, [%g4 + TRAP_ENT_F4]%asi
 334  311          TRACE_NEXT(%g4, %g6, %g3)
 335  312  #endif /* TRAPTRACE */
 336  313  
  
    | ↓ open down ↓ | 118 lines elided | ↑ open up ↑ | 
 337  314          !
 338  315          ! setvecint_tl1 will do all the work, and finish with a retry
 339  316          !
 340  317          ba,pt   %xcc, setvecint_tl1
 341  318          mov     %g5, %g1                ! setvecint_tl1 expects inum in %g1
 342  319  
 343  320  0:      retry 
 344  321  
 345  322          /* Never Reached */
 346  323          SET_SIZE(dev_mondo)
 347      -#endif /* lint */
 348  324  
 349      -#if defined(lint)
 350      -uint64_t cpu_mondo_inval;
 351      -#else /* lint */
 352  325          .seg    ".data"
 353  326          .global cpu_mondo_inval
 354  327          .align  8
 355  328  cpu_mondo_inval:
 356  329          .skip   8
 357  330  
 358  331          .seg    ".text"
 359      -#endif  /* lint */
 360  332  
 361  333  
 362      -#if defined(lint)
 363      -
 364      -void
 365      -resumable_error(void)
 366      -{}
 367      -
 368      -#else   /* lint */
 369      -
 370  334  /*
 371  335   * (TT 0x7e, TL>0) Resumeable Error Queue Handler
 372  336   *      We keep a shadow copy of the queue in kernel buf.
 373  337   *      Read the resumable queue head and tail offset
 374  338   *      If there are entries on the queue, move them to
 375  339   *      the kernel buf, which is next to the resumable
 376  340   *      queue in the memory. Call C routine to process.
 377  341   */
 378  342          ENTRY_NP(resumable_error)
 379  343          mov     CPU_RQ_HD, %g4
 380  344          ldxa    [%g4]ASI_QUEUE, %g2             ! %g2 = Q head offset 
 381  345          mov     CPU_RQ_TL, %g4
 382  346          ldxa    [%g4]ASI_QUEUE, %g3             ! %g3 = Q tail offset
 383  347          mov     %g2, %g6                        ! save head in %g2
 384  348  
 385  349          cmp     %g6, %g3
 386  350          be,pn   %xcc, 0f                        ! head == tail
 387  351          nop
 388  352  
 389  353          CPU_ADDR(%g1, %g4)                      ! %g1 = cpu struct addr
 390  354  
 391  355  2:      set     CPU_RQ_BASE_OFF, %g4
 392  356          ldx     [%g1 + %g4], %g4                ! %g4 = queue base PA
 393  357          add     %g6, %g4, %g4                   ! %g4 = PA of ER in Q           
 394  358          set     CPU_RQ_SIZE, %g7
 395  359          add     %g4, %g7, %g7                   ! %g7=PA of ER in kernel buf
 396  360  
 397  361          ldxa    [%g7]ASI_MEM, %g5               ! %g5=first 8 byte of ER buf
 398  362          cmp     0, %g5
 399  363          bne,pn  %xcc, 1f                        ! first 8 byte is not 0
 400  364          nop
 401  365  
 402  366          /* Now we can move 64 bytes from queue to buf */
 403  367          set     0, %g5
 404  368          ldxa    [%g4 + %g5]ASI_MEM, %g1
 405  369          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 0 - 7    
 406  370          add     %g5, 8, %g5
 407  371          ldxa    [%g4 + %g5]ASI_MEM, %g1
 408  372          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 8 - 15
 409  373          add     %g5, 8, %g5
 410  374          ldxa    [%g4 + %g5]ASI_MEM, %g1
 411  375          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 16 - 23
 412  376          add     %g5, 8, %g5
 413  377          ldxa    [%g4 + %g5]ASI_MEM, %g1
 414  378          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 24 - 31
 415  379          add     %g5, 8, %g5
 416  380          ldxa    [%g4 + %g5]ASI_MEM, %g1
 417  381          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 32 - 39
 418  382          add     %g5, 8, %g5
 419  383          ldxa    [%g4 + %g5]ASI_MEM, %g1
 420  384          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 40 - 47
 421  385          add     %g5, 8, %g5
 422  386          ldxa    [%g4 + %g5]ASI_MEM, %g1
 423  387          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 48 - 55
 424  388          add     %g5, 8, %g5
 425  389          ldxa    [%g4 + %g5]ASI_MEM, %g1
 426  390          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 56 - 63
 427  391  
 428  392          set     CPU_RQ_SIZE, %g5                ! %g5 = queue size
 429  393          sub     %g5, 1, %g5                     ! %g5 = queu size mask
 430  394  
 431  395          add     %g6, Q_ENTRY_SIZE, %g6          ! increment q head to next
 432  396          and     %g6, %g5, %g6                   ! size mask for warp around
 433  397          cmp     %g6, %g3                        ! head == tail ??
 434  398  
 435  399          bne,pn  %xcc, 2b                        ! still have more to process
 436  400          nop
 437  401  
 438  402          /*
 439  403           * head equals to tail now, we can update the queue head 
 440  404           * and call sys_trap
 441  405           */
 442  406          mov     CPU_RQ_HD, %g4
 443  407          stxa    %g6, [%g4]ASI_QUEUE             ! update head offset
 444  408          membar  #Sync
 445  409          
 446  410          /*
 447  411           * Call sys_trap at PIL 14 unless we're already at PIL 15. %g2.l is
 448  412           * head offset(arg2) and %g3 is tail
 449  413           * offset(arg3).
 450  414           */
 451  415          set     process_resumable_error, %g1
 452  416          rdpr    %pil, %g4
 453  417          cmp     %g4, PIL_14
 454  418          ba      sys_trap
 455  419            movl  %icc, PIL_14, %g4
 456  420  
 457  421          /*
 458  422           * We are here because the C routine is not able to process
 459  423           * errors in time. So the first 8 bytes of ER in buf has not
 460  424           * been cleared. We update head to tail and call sys_trap to
 461  425           * print out an error message
 462  426           */
 463  427          
 464  428  1:      mov     CPU_RQ_HD, %g4
 465  429          stxa    %g3, [%g4]ASI_QUEUE             ! set head equal to tail
 466  430          membar  #Sync
 467  431  
 468  432          /*
 469  433           * Set %g2 to %g6, which is current head offset. %g2 
 470  434           * is arg2 of the C routine. %g3 is the tail offset,
 471  435           * which is arg3 of the C routine.
 472  436           * Call rq_overflow at PIL 14 unless we're already at PIL 15.
 473  437           */
 474  438          mov     %g6, %g2
  
    | ↓ open down ↓ | 95 lines elided | ↑ open up ↑ | 
 475  439          set     rq_overflow, %g1
 476  440          rdpr    %pil, %g4
 477  441          cmp     %g4, PIL_14
 478  442          ba      sys_trap
 479  443            movl  %icc, PIL_14, %g4
 480  444  
 481  445  0:      retry
 482  446  
 483  447          /*NOTREACHED*/
 484  448          SET_SIZE(resumable_error)
 485      -#endif /* lint */
 486  449  
 487      -#if defined(lint)
 488      -
 489      -void
 490      -nonresumable_error(void)
 491      -{}
 492      -
 493      -#else   /* lint */
 494      -
 495  450  /*
 496  451   * (TT 0x7f, TL>0) Non-resumeable Error Queue Handler
 497  452   *      We keep a shadow copy of the queue in kernel buf.
 498  453   *      Read non-resumable queue head and tail offset
 499  454   *      If there are entries on the queue, move them to
 500  455   *      the kernel buf, which is next to the non-resumable
 501  456   *      queue in the memory. Call C routine to process.
 502  457   */
 503  458          ENTRY_NP(nonresumable_error)
 504  459          mov     CPU_NRQ_HD, %g4
 505  460          ldxa    [%g4]ASI_QUEUE, %g2             ! %g2 = Q head offset 
 506  461          mov     CPU_NRQ_TL, %g4
 507  462          ldxa    [%g4]ASI_QUEUE, %g3             ! %g3 = Q tail offset
 508  463  
 509  464          cmp     %g2, %g3
 510  465          be,pn   %xcc, 0f                        ! head == tail
 511  466          nop
 512  467  
 513  468          /* force %gl to 1 as sys_trap requires */
 514  469          wrpr    %g0, 1, %gl
 515  470          mov     CPU_NRQ_HD, %g4
 516  471          ldxa    [%g4]ASI_QUEUE, %g2             ! %g2 = Q head offset 
 517  472          mov     CPU_NRQ_TL, %g4
 518  473          ldxa    [%g4]ASI_QUEUE, %g3             ! %g3 = Q tail offset
 519  474          mov     %g2, %g6                        ! save head in %g2
 520  475  
 521  476          CPU_PADDR(%g1, %g4)                     ! %g1 = cpu struct paddr
 522  477  
 523  478  2:      set     CPU_NRQ_BASE_OFF, %g4
 524  479          ldxa    [%g1 + %g4]ASI_MEM, %g4         ! %g4 = queue base PA
 525  480          add     %g6, %g4, %g4                   ! %g4 = PA of ER in Q           
 526  481          set     CPU_NRQ_SIZE, %g7
 527  482          add     %g4, %g7, %g7                   ! %g7 = PA of ER in kernel buf
 528  483  
 529  484          ldxa    [%g7]ASI_MEM, %g5               ! %g5 = first 8 byte of ER buf
 530  485          cmp     0, %g5
 531  486          bne,pn  %xcc, 1f                        ! first 8 byte is not 0
 532  487          nop
 533  488  
 534  489          /* Now we can move 64 bytes from queue to buf */
 535  490          set     0, %g5
 536  491          ldxa    [%g4 + %g5]ASI_MEM, %g1
 537  492          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 0 - 7    
 538  493          add     %g5, 8, %g5
 539  494          ldxa    [%g4 + %g5]ASI_MEM, %g1
 540  495          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 8 - 15
 541  496          add     %g5, 8, %g5
 542  497          ldxa    [%g4 + %g5]ASI_MEM, %g1
 543  498          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 16 - 23
 544  499          add     %g5, 8, %g5
 545  500          ldxa    [%g4 + %g5]ASI_MEM, %g1
 546  501          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 24 - 31
 547  502          add     %g5, 8, %g5
 548  503          ldxa    [%g4 + %g5]ASI_MEM, %g1
 549  504          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 32 - 39
 550  505          add     %g5, 8, %g5
 551  506          ldxa    [%g4 + %g5]ASI_MEM, %g1
 552  507          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 40 - 47
 553  508          add     %g5, 8, %g5
 554  509          ldxa    [%g4 + %g5]ASI_MEM, %g1
 555  510          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 48 - 55
 556  511          add     %g5, 8, %g5
 557  512          ldxa    [%g4 + %g5]ASI_MEM, %g1
 558  513          stxa    %g1, [%g7 + %g5]ASI_MEM         ! byte 56 - 63
 559  514  
 560  515          set     CPU_NRQ_SIZE, %g5               ! %g5 = queue size
 561  516          sub     %g5, 1, %g5                     ! %g5 = queu size mask
 562  517  
 563  518          add     %g6, Q_ENTRY_SIZE, %g6          ! increment q head to next
 564  519          and     %g6, %g5, %g6                   ! size mask for warp around
 565  520          cmp     %g6, %g3                        ! head == tail ??
 566  521  
 567  522          bne,pn  %xcc, 2b                        ! still have more to process
 568  523          nop
 569  524  
 570  525          /*
 571  526           * head equals to tail now, we can update the queue head 
 572  527           * and call sys_trap
 573  528           */
 574  529          mov     CPU_NRQ_HD, %g4
 575  530          stxa    %g6, [%g4]ASI_QUEUE             ! update head offset
 576  531          membar  #Sync
 577  532  
 578  533          /*
 579  534           * Call sys_trap. %g2 is TL(arg2), %g3 is head and tail
 580  535           * offset(arg3).
 581  536           * %g3 looks like following:
 582  537           *      +--------------------+--------------------+
 583  538           *      |   tail offset      |    head offset     |
 584  539           *      +--------------------+--------------------+
 585  540           *      63                 32 31                 0
 586  541           *
 587  542           * Run at PIL 14 unless we're already at PIL 15.
 588  543           */
 589  544          sllx    %g3, 32, %g3                    ! %g3.h = tail offset
 590  545          or      %g3, %g2, %g3                   ! %g3.l = head offset
 591  546          rdpr    %tl, %g2                        ! %g2 = current tl
 592  547  
 593  548          /*
 594  549           * Now check if the first error that sent us here was caused
 595  550           * in user's SPILL/FILL trap. If it was, we call sys_trap to
 596  551           * kill the user process. Several considerations:
 597  552           * - If multiple nonresumable errors happen, we only check the
 598  553           *   first one. Nonresumable errors cause system either panic
 599  554           *   or kill the user process. So the system has already
 600  555           *   panic'ed or killed user process after processing the first
 601  556           *   error. Therefore, no need to check if other error packet
 602  557           *   for this type of error.
 603  558           * - Errors happen in user's SPILL/FILL trap will bring us at
 604  559           *   TL = 2.
 605  560           * - We need to lower TL to 1 to get the trap type and tstate.
 606  561           *   We don't go back to TL = 2 so no need to save states.
 607  562           */
 608  563          cmp     %g2, 2  
 609  564          bne,pt  %xcc, 3f                        ! if tl != 2
 610  565          nop
 611  566          /* Check to see if the trap pc is in a window spill/fill handling */
 612  567          rdpr    %tpc, %g4
 613  568          /* tpc should be in the trap table */
 614  569          set     trap_table, %g5
 615  570          cmp     %g4, %g5
 616  571          blu,pt  %xcc, 3f
 617  572          nop
 618  573          set     etrap_table, %g5
 619  574          cmp     %g4, %g5
 620  575          bgeu,pt %xcc, 3f
 621  576          nop     
 622  577          /* Set tl to 1 in order to read tt[1] and tstate[1] */
 623  578          wrpr    %g0, 1, %tl
 624  579          rdpr    %tt, %g4                        ! %g4 = tt[1]
 625  580          /* Check if tt[1] is a window trap */
 626  581          and     %g4, WTRAP_TTMASK, %g4
 627  582          cmp     %g4, WTRAP_TYPE
 628  583          bne,pt  %xcc, 3f
 629  584          nop
 630  585          rdpr    %tstate, %g5                    ! %g5 = tstate[1]
 631  586          btst    TSTATE_PRIV, %g5
 632  587          bnz     %xcc, 3f                        ! Is it from user code?
 633  588          nop
 634  589          /*
 635  590           * Now we know the error happened in user's SPILL/FILL trap.
 636  591           * Turn on the user spill/fill flag in %g2
 637  592           */
 638  593          mov     1, %g4
 639  594          sllx    %g4, ERRH_U_SPILL_FILL_SHIFT, %g4
 640  595          or      %g2, %g4, %g2                   ! turn on flag in %g2
 641  596          
 642  597  3:      sub     %g2, 1, %g2                     ! %g2.l = previous tl
 643  598  
 644  599          set     process_nonresumable_error, %g1
 645  600          rdpr    %pil, %g4
 646  601          cmp     %g4, PIL_14
 647  602          ba      sys_trap
 648  603            movl  %icc, PIL_14, %g4
 649  604  
 650  605          /*
 651  606           * We are here because the C routine is not able to process
 652  607           * errors in time. So the first 8 bytes of ER in buf has not
 653  608           * been cleared. We call sys_trap to panic.
 654  609           * Run at PIL 14 unless we're already at PIL 15.
 655  610           */
  
    | ↓ open down ↓ | 151 lines elided | ↑ open up ↑ | 
 656  611  1:      set     nrq_overflow, %g1
 657  612          rdpr    %pil, %g4
 658  613          cmp     %g4, PIL_14
 659  614          ba      sys_trap
 660  615            movl  %icc, PIL_14, %g4
 661  616  
 662  617  0:      retry
 663  618  
 664  619          /*NOTREACHED*/
 665  620          SET_SIZE(nonresumable_error)
 666      -#endif /* lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX