Print this page
de-linting of .s files

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s
          +++ new/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s
↓ open down ↓ 17 lines elided ↑ open up ↑
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   *
  25   25   * Assembly code support for the Cheetah+ module
  26   26   */
  27   27  
  28      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  29      -
  30      -#if !defined(lint)
  31   28  #include "assym.h"
  32      -#endif  /* lint */
  33   29  
  34   30  #include <sys/asm_linkage.h>
  35   31  #include <sys/mmu.h>
  36   32  #include <vm/hat_sfmmu.h>
  37   33  #include <sys/machparam.h>
  38   34  #include <sys/machcpuvar.h>
  39   35  #include <sys/machthread.h>
  40   36  #include <sys/machtrap.h>
  41   37  #include <sys/privregs.h>
  42   38  #include <sys/asm_linkage.h>
↓ open down ↓ 5 lines elided ↑ open up ↑
  48   44  #include <sys/async.h>
  49   45  #include <sys/clock.h>
  50   46  #include <sys/cheetahasm.h>
  51   47  #include <sys/cmpregs.h>
  52   48  
  53   49  #ifdef TRAPTRACE
  54   50  #include <sys/traptrace.h>
  55   51  #endif /* TRAPTRACE */
  56   52  
  57   53  
  58      -#if !defined(lint)
  59      -
  60   54  /* BEGIN CSTYLED */
  61   55  
  62   56  /*
  63   57   * Cheetah+ version to reflush an Ecache line by index.
  64   58   *
  65   59   * By default we assume the Ecache is 2-way so we flush both
  66   60   * ways. Even if the cache is direct-mapped no harm will come
  67   61   * from performing the flush twice, apart from perhaps a performance
  68   62   * penalty.
  69   63   *
↓ open down ↓ 70 lines elided ↑ open up ↑
 140  134          and     physaddr, l2_idx_out, l3_idx_out;                               \
 141  135          set     PN_L3_IDX_DISP_FLUSH, l2_idx_out;                               \
 142  136          or      l2_idx_out, l3_idx_out, l3_idx_out;                             \
 143  137          set     PN_L2_SET_SIZE, l2_idx_out;                                     \
 144  138          sub     l2_idx_out, 1, l2_idx_out;                                      \
 145  139          and     physaddr, l2_idx_out, l2_idx_out;                               \
 146  140          set     PN_L2_IDX_DISP_FLUSH, scr3;                                     \
 147  141          or      l2_idx_out, scr3, l2_idx_out;                                   \
 148  142          PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
 149  143  
 150      -#endif  /* !lint */
 151      -
 152  144  /*
 153  145   * Fast ECC error at TL>0 handler
 154  146   * We get here via trap 70 at TL>0->Software trap 0 at TL>0.  We enter
 155  147   * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
 156  148   * For a complete description of the Fast ECC at TL>0 handling see the
 157  149   * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
 158  150   * us3_common_asm.s
 159  151   */
 160      -#if defined(lint)
 161  152  
 162      -void
 163      -fast_ecc_tl1_err(void)
 164      -{}
 165      -
 166      -#else   /* lint */
 167      -
 168  153          .section ".text"
 169  154          .align  64
 170  155          ENTRY_NP(fast_ecc_tl1_err)
 171  156  
 172  157          /*
 173  158           * This macro turns off the D$/I$ if they are on and saves their
 174  159           * original state in ch_err_tl1_tmp, saves all the %g registers in the
 175  160           * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
 176  161           * the %tpc in ch_err_tl1_tpc.  At the end of this macro, %g1 will
 177  162           * point to the ch_err_tl1_data structure and the original D$/I$ state
↓ open down ↓ 228 lines elided ↑ open up ↑
 406  391           */
 407  392          CH_ERR_TL1_EXIT;
 408  393  
 409  394          /*
 410  395           * Establish panic exit label.
 411  396           */
 412  397          CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
 413  398  
 414  399          SET_SIZE(fast_ecc_tl1_err)
 415  400  
 416      -#endif  /* lint */
 417  401  
 418      -
 419      -#if defined(lint)
 420      -/*
 421      - * scrubphys - Pass in the aligned physical memory address
 422      - * that you want to scrub, along with the ecache set size.
 423      - *
 424      - *      1) Displacement flush the E$ line corresponding to %addr.
 425      - *         The first ldxa guarantees that the %addr is no longer in
 426      - *         M, O, or E (goes to I or S (if instruction fetch also happens).
 427      - *      2) "Write" the data using a CAS %addr,%g0,%g0.
 428      - *         The casxa guarantees a transition from I to M or S to M.
 429      - *      3) Displacement flush the E$ line corresponding to %addr.
 430      - *         The second ldxa pushes the M line out of the ecache, into the
 431      - *         writeback buffers, on the way to memory.
 432      - *      4) The "membar #Sync" pushes the cache line out of the writeback
 433      - *         buffers onto the bus, on the way to dram finally.
 434      - *
 435      - * This is a modified version of the algorithm suggested by Gary Lauterbach.
 436      - * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
 437      - * as modified, but then we found out that for spitfire, if it misses in the
 438      - * E$ it will probably install as an M, but if it hits in the E$, then it
 439      - * will stay E, if the store doesn't happen. So the first displacement flush
 440      - * should ensure that the CAS will miss in the E$.  Arrgh.
 441      - */
 442      -/* ARGSUSED */
 443      -void
 444      -scrubphys(uint64_t paddr, int ecache_set_size)
 445      -{}
 446      -
 447      -#else   /* lint */
 448  402          ENTRY(scrubphys)
 449  403          rdpr    %pstate, %o4
 450  404          andn    %o4, PSTATE_IE | PSTATE_AM, %o5
 451  405          wrpr    %o5, %g0, %pstate       ! clear IE, AM bits
 452  406  
 453  407          GET_CPU_IMPL(%o5)               ! Panther Ecache is flushed differently
 454  408          cmp     %o5, PANTHER_IMPL
 455  409          bne     scrubphys_1
 456  410            nop
 457  411          PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
↓ open down ↓ 5 lines elided ↑ open up ↑
 463  417          ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
 464  418          casxa   [%o0]ASI_MEM, %g0, %g0
 465  419          ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
 466  420  scrubphys_2:
 467  421          wrpr    %g0, %o4, %pstate       ! restore earlier pstate register value
 468  422  
 469  423          retl
 470  424          membar  #Sync                   ! move the data out of the load buffer
 471  425          SET_SIZE(scrubphys)
 472  426  
 473      -#endif  /* lint */
 474  427  
 475      -
 476      -#if defined(lint)
 477      -/*
 478      - * clearphys - Pass in the physical memory address of the checkblock
 479      - * that you want to push out, cleared with a recognizable pattern,
 480      - * from the ecache.
 481      - *
 482      - * To ensure that the ecc gets recalculated after the bad data is cleared,
 483      - * we must write out enough data to fill the w$ line (64 bytes). So we read
 484      - * in an entire ecache subblock's worth of data, and write it back out.
 485      - * Then we overwrite the 16 bytes of bad data with the pattern.
 486      - */
 487      -/* ARGSUSED */
 488      -void
 489      -clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
 490      -{
 491      -}
 492      -
 493      -#else   /* lint */
 494  428          ENTRY(clearphys)
 495  429          /* turn off IE, AM bits */
 496  430          rdpr    %pstate, %o4
 497  431          andn    %o4, PSTATE_IE | PSTATE_AM, %o5
 498  432          wrpr    %o5, %g0, %pstate
 499  433  
 500  434          /* turn off NCEEN */
 501  435          ldxa    [%g0]ASI_ESTATE_ERR, %o5
 502  436          andn    %o5, EN_REG_NCEEN, %o3
 503  437          stxa    %o3, [%g0]ASI_ESTATE_ERR
↓ open down ↓ 37 lines elided ↑ open up ↑
 541  475  
 542  476          /* turn NCEEN back on */
 543  477          stxa    %o5, [%g0]ASI_ESTATE_ERR
 544  478          membar  #Sync
 545  479  
 546  480          /* return and re-enable IE and AM */
 547  481          retl
 548  482            wrpr  %g0, %o4, %pstate
 549  483          SET_SIZE(clearphys)
 550  484  
 551      -#endif  /* lint */
 552  485  
 553      -
 554      -#if defined(lint)
 555      -/*
 556      - * Cheetah+ Ecache displacement flush the specified line from the E$
 557      - *
 558      - * For Panther, this means flushing the specified line from both the
 559      - * L2 cache and L3 cache.
 560      - *
 561      - * Register usage:
 562      - *      %o0 - 64 bit physical address for flushing
 563      - *      %o1 - Ecache set size
 564      - */
 565      -/*ARGSUSED*/
 566      -void
 567      -ecache_flush_line(uint64_t flushaddr, int ec_set_size)
 568      -{
 569      -}
 570      -#else   /* lint */
 571  486          ENTRY(ecache_flush_line)
 572  487  
 573  488          GET_CPU_IMPL(%o3)               ! Panther Ecache is flushed differently
 574  489          cmp     %o3, PANTHER_IMPL
 575  490          bne     ecache_flush_line_1
 576  491            nop
 577  492  
 578  493          PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
 579  494          b       ecache_flush_line_2
 580  495            nop
 581  496  ecache_flush_line_1:
 582  497          ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
 583  498  ecache_flush_line_2:
 584  499          retl
 585  500            nop
 586  501          SET_SIZE(ecache_flush_line)
 587      -#endif  /* lint */
 588  502  
 589      -#if defined(lint)
 590      -void
 591      -set_afsr_ext(uint64_t afsr_ext)
 592      -{
 593      -        afsr_ext = afsr_ext;
 594      -}
 595      -#else /* lint */
 596      -
 597  503          ENTRY(set_afsr_ext)
 598  504          set     ASI_AFSR_EXT_VA, %o1
 599  505          stxa    %o0, [%o1]ASI_AFSR              ! afsr_ext reg
 600  506          membar  #Sync
 601  507          retl
 602  508          nop
 603  509          SET_SIZE(set_afsr_ext)
 604  510  
 605      -#endif /* lint */
 606  511  
 607      -
 608      -#if defined(lint)
 609      -/*
 610      - * The CPU jumps here from the MMU exception handler if an ITLB parity
 611      - * error is detected and we are running on Panther.
 612      - *
 613      - * In this routine we collect diagnostic information and write it to our
 614      - * logout structure (if possible) and clear all ITLB entries that may have
 615      - * caused our parity trap.
 616      - * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
 617      - * and log any error messages. As for parameters to cpu_tlb_parity_error, we
 618      - * send two:
 619      - *
 620      - * %g2  - Contains the VA whose lookup in the ITLB caused the parity error
 621      - * %g3  - Contains the tlo_info field of the pn_tlb_logout logout struct,
 622      - *        regardless of whether or not we actually used the logout struct.
 623      - *
 624      - * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
 625      - * parameters to the data contained in the logout structure in order to
 626      - * determine whether the logout information is valid for this particular
 627      - * error or not.
 628      - */
 629      -void
 630      -itlb_parity_trap(void)
 631      -{}
 632      -
 633      -#else   /* lint */
 634      -
 635  512          ENTRY_NP(itlb_parity_trap)
 636  513          /*
 637  514           * Collect important information about the trap which will be
 638  515           * used as a parameter to the TL0 handler.
 639  516           */
 640  517          wr      %g0, ASI_IMMU, %asi
 641  518          rdpr    %tpc, %g2                       ! VA that caused the IMMU trap
 642  519          ldxa    [MMU_TAG_ACCESS_EXT]%asi, %g3   ! read the trap VA page size
 643  520          set     PN_ITLB_PGSZ_MASK, %g4
 644  521          and     %g3, %g4, %g3
↓ open down ↓ 102 lines elided ↑ open up ↑
 747  624           * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
 748  625           * already at PIL 15.    */
 749  626          set     cpu_tlb_parity_error, %g1
 750  627          rdpr    %pil, %g4
 751  628          cmp     %g4, PIL_14
 752  629          movl    %icc, PIL_14, %g4
 753  630          ba      sys_trap
 754  631            nop
 755  632          SET_SIZE(itlb_parity_trap)
 756  633  
 757      -#endif  /* lint */
 758      -
 759      -#if defined(lint)
 760      -/*
 761      - * The CPU jumps here from the MMU exception handler if a DTLB parity
 762      - * error is detected and we are running on Panther.
 763      - *
 764      - * In this routine we collect diagnostic information and write it to our
 765      - * logout structure (if possible) and clear all DTLB entries that may have
 766      - * caused our parity trap.
 767      - * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
 768      - * and log any error messages. As for parameters to cpu_tlb_parity_error, we
 769      - * send two:
 770      - *
 771      - * %g2  - Contains the VA whose lookup in the DTLB caused the parity error
 772      - * %g3  - Contains the tlo_info field of the pn_tlb_logout logout struct,
 773      - *        regardless of whether or not we actually used the logout struct.
 774      - *
 775      - * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
 776      - * parameters to the data contained in the logout structure in order to
 777      - * determine whether the logout information is valid for this particular
 778      - * error or not.
 779      - */
 780      -void
 781      -dtlb_parity_trap(void)
 782      -{}
 783      -
 784      -#else   /* lint */
 785      -
 786  634          ENTRY_NP(dtlb_parity_trap)
 787  635          /*
 788  636           * Collect important information about the trap which will be
 789  637           * used as a parameter to the TL0 handler.
 790  638           */
 791  639          wr      %g0, ASI_DMMU, %asi
 792  640          ldxa    [MMU_SFAR]%asi, %g2             ! VA that caused the IMMU trap
 793  641          ldxa    [MMU_TAG_ACCESS_EXT]%asi, %g3   ! read the trap VA page sizes
 794  642          set     PN_DTLB_PGSZ_MASK, %g4
 795  643          and     %g3, %g4, %g3
↓ open down ↓ 164 lines elided ↑ open up ↑
 960  808           * those will lead to a system panic.
 961  809           */
 962  810          set     cpu_tlb_parity_error, %g1
 963  811          rdpr    %pil, %g4
 964  812          cmp     %g4, PIL_14
 965  813          movl    %icc, PIL_14, %g4
 966  814          ba      sys_trap
 967  815            nop
 968  816          SET_SIZE(dtlb_parity_trap)
 969  817  
 970      -#endif  /* lint */
 971  818  
 972      -
 973      -#if defined(lint)
 974      -/*
 975      - * Calculates the Panther TLB index based on a virtual address and page size
 976      - *
 977      - * Register usage:
 978      - *      %o0 - virtual address whose index we want
 979      - *      %o1 - Page Size of the TLB in question as encoded in the
 980      - *            ASI_[D|I]MMU_TAG_ACCESS_EXT register.
 981      - */
 982      -uint64_t
 983      -pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
 984      -{
 985      -        return ((va + pg_sz)-(va + pg_sz));
 986      -}
 987      -#else   /* lint */
 988  819          ENTRY(pn_get_tlb_index)
 989  820  
 990  821          PN_GET_TLB_INDEX(%o0, %o1)
 991  822  
 992  823          retl
 993  824            nop
 994  825          SET_SIZE(pn_get_tlb_index)
 995      -#endif  /* lint */
 996  826  
 997  827  
 998      -#if defined(lint)
 999      -/*
1000      - * For Panther CPUs we need to flush the IPB after any I$ or D$
1001      - * parity errors are detected.
1002      - */
1003      -void
1004      -flush_ipb(void)
1005      -{ return; }
1006      -
1007      -#else   /* lint */
1008      -
1009  828          ENTRY(flush_ipb)
1010  829          clr     %o0
1011  830  
1012  831  flush_ipb_1:
1013  832          stxa    %g0, [%o0]ASI_IPB_TAG
1014  833          membar  #Sync
1015  834          cmp     %o0, PN_IPB_TAG_ADDR_MAX
1016  835          blt     flush_ipb_1
1017  836            add   %o0, PN_IPB_TAG_ADDR_LINESIZE,  %o0
1018  837  
1019  838          sethi   %hi(FLUSH_ADDR), %o0
1020  839          flush   %o0
1021  840          retl
1022  841          nop
1023  842          SET_SIZE(flush_ipb)
1024  843  
1025      -#endif  /* lint */
1026  844  
1027      -
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX