Print this page
restore sparc comments
de-linting of .s files

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s
          +++ new/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s
↓ open down ↓ 17 lines elided ↑ open up ↑
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   *
  25   25   * Assembly code support for the Cheetah+ module
  26   26   */
  27   27  
  28      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  29      -
  30      -#if !defined(lint)
  31   28  #include "assym.h"
  32      -#endif  /* lint */
  33   29  
  34   30  #include <sys/asm_linkage.h>
  35   31  #include <sys/mmu.h>
  36   32  #include <vm/hat_sfmmu.h>
  37   33  #include <sys/machparam.h>
  38   34  #include <sys/machcpuvar.h>
  39   35  #include <sys/machthread.h>
  40   36  #include <sys/machtrap.h>
  41   37  #include <sys/privregs.h>
  42   38  #include <sys/asm_linkage.h>
↓ open down ↓ 5 lines elided ↑ open up ↑
  48   44  #include <sys/async.h>
  49   45  #include <sys/clock.h>
  50   46  #include <sys/cheetahasm.h>
  51   47  #include <sys/cmpregs.h>
  52   48  
  53   49  #ifdef TRAPTRACE
  54   50  #include <sys/traptrace.h>
  55   51  #endif /* TRAPTRACE */
  56   52  
  57   53  
  58      -#if !defined(lint)
  59      -
  60   54  /* BEGIN CSTYLED */
  61   55  
  62   56  /*
  63   57   * Cheetah+ version to reflush an Ecache line by index.
  64   58   *
  65   59   * By default we assume the Ecache is 2-way so we flush both
  66   60   * ways. Even if the cache is direct-mapped no harm will come
  67   61   * from performing the flush twice, apart from perhaps a performance
  68   62   * penalty.
  69   63   *
↓ open down ↓ 70 lines elided ↑ open up ↑
 140  134          and     physaddr, l2_idx_out, l3_idx_out;                               \
 141  135          set     PN_L3_IDX_DISP_FLUSH, l2_idx_out;                               \
 142  136          or      l2_idx_out, l3_idx_out, l3_idx_out;                             \
 143  137          set     PN_L2_SET_SIZE, l2_idx_out;                                     \
 144  138          sub     l2_idx_out, 1, l2_idx_out;                                      \
 145  139          and     physaddr, l2_idx_out, l2_idx_out;                               \
 146  140          set     PN_L2_IDX_DISP_FLUSH, scr3;                                     \
 147  141          or      l2_idx_out, scr3, l2_idx_out;                                   \
 148  142          PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
 149  143  
 150      -#endif  /* !lint */
 151      -
 152  144  /*
 153  145   * Fast ECC error at TL>0 handler
 154  146   * We get here via trap 70 at TL>0->Software trap 0 at TL>0.  We enter
 155  147   * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
 156  148   * For a complete description of the Fast ECC at TL>0 handling see the
 157  149   * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
 158  150   * us3_common_asm.s
 159  151   */
 160      -#if defined(lint)
 161  152  
 162      -void
 163      -fast_ecc_tl1_err(void)
 164      -{}
 165      -
 166      -#else   /* lint */
 167      -
 168  153          .section ".text"
 169  154          .align  64
 170  155          ENTRY_NP(fast_ecc_tl1_err)
 171  156  
 172  157          /*
 173  158           * This macro turns off the D$/I$ if they are on and saves their
 174  159           * original state in ch_err_tl1_tmp, saves all the %g registers in the
 175  160           * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
 176  161           * the %tpc in ch_err_tl1_tpc.  At the end of this macro, %g1 will
 177  162           * point to the ch_err_tl1_data structure and the original D$/I$ state
↓ open down ↓ 228 lines elided ↑ open up ↑
 406  391           */
 407  392          CH_ERR_TL1_EXIT;
 408  393  
 409  394          /*
 410  395           * Establish panic exit label.
 411  396           */
 412  397          CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
 413  398  
 414  399          SET_SIZE(fast_ecc_tl1_err)
 415  400  
 416      -#endif  /* lint */
 417  401  
 418      -
 419      -#if defined(lint)
 420  402  /*
 421  403   * scrubphys - Pass in the aligned physical memory address
 422  404   * that you want to scrub, along with the ecache set size.
 423  405   *
 424  406   *      1) Displacement flush the E$ line corresponding to %addr.
 425  407   *         The first ldxa guarantees that the %addr is no longer in
 426  408   *         M, O, or E (goes to I or S (if instruction fetch also happens).
 427  409   *      2) "Write" the data using a CAS %addr,%g0,%g0.
 428  410   *         The casxa guarantees a transition from I to M or S to M.
 429  411   *      3) Displacement flush the E$ line corresponding to %addr.
↓ open down ↓ 2 lines elided ↑ open up ↑
 432  414   *      4) The "membar #Sync" pushes the cache line out of the writeback
 433  415   *         buffers onto the bus, on the way to dram finally.
 434  416   *
 435  417   * This is a modified version of the algorithm suggested by Gary Lauterbach.
 436  418   * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
 437  419   * as modified, but then we found out that for spitfire, if it misses in the
 438  420   * E$ it will probably install as an M, but if it hits in the E$, then it
 439  421   * will stay E, if the store doesn't happen. So the first displacement flush
 440  422   * should ensure that the CAS will miss in the E$.  Arrgh.
 441  423   */
 442      -/* ARGSUSED */
 443      -void
 444      -scrubphys(uint64_t paddr, int ecache_set_size)
 445      -{}
 446      -
 447      -#else   /* lint */
 448  424          ENTRY(scrubphys)
 449  425          rdpr    %pstate, %o4
 450  426          andn    %o4, PSTATE_IE | PSTATE_AM, %o5
 451  427          wrpr    %o5, %g0, %pstate       ! clear IE, AM bits
 452  428  
 453  429          GET_CPU_IMPL(%o5)               ! Panther Ecache is flushed differently
 454  430          cmp     %o5, PANTHER_IMPL
 455  431          bne     scrubphys_1
 456  432            nop
 457  433          PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
↓ open down ↓ 5 lines elided ↑ open up ↑
 463  439          ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
 464  440          casxa   [%o0]ASI_MEM, %g0, %g0
 465  441          ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
 466  442  scrubphys_2:
 467  443          wrpr    %g0, %o4, %pstate       ! restore earlier pstate register value
 468  444  
 469  445          retl
 470  446          membar  #Sync                   ! move the data out of the load buffer
 471  447          SET_SIZE(scrubphys)
 472  448  
 473      -#endif  /* lint */
 474  449  
 475      -
 476      -#if defined(lint)
 477  450  /*
 478  451   * clearphys - Pass in the physical memory address of the checkblock
 479  452   * that you want to push out, cleared with a recognizable pattern,
 480  453   * from the ecache.
 481  454   *
 482  455   * To ensure that the ecc gets recalculated after the bad data is cleared,
 483  456   * we must write out enough data to fill the w$ line (64 bytes). So we read
 484  457   * in an entire ecache subblock's worth of data, and write it back out.
 485  458   * Then we overwrite the 16 bytes of bad data with the pattern.
 486  459   */
 487      -/* ARGSUSED */
 488      -void
 489      -clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
 490      -{
 491      -}
 492      -
 493      -#else   /* lint */
 494  460          ENTRY(clearphys)
 495  461          /* turn off IE, AM bits */
 496  462          rdpr    %pstate, %o4
 497  463          andn    %o4, PSTATE_IE | PSTATE_AM, %o5
 498  464          wrpr    %o5, %g0, %pstate
 499  465  
 500  466          /* turn off NCEEN */
 501  467          ldxa    [%g0]ASI_ESTATE_ERR, %o5
 502  468          andn    %o5, EN_REG_NCEEN, %o3
 503  469          stxa    %o3, [%g0]ASI_ESTATE_ERR
↓ open down ↓ 37 lines elided ↑ open up ↑
 541  507  
 542  508          /* turn NCEEN back on */
 543  509          stxa    %o5, [%g0]ASI_ESTATE_ERR
 544  510          membar  #Sync
 545  511  
 546  512          /* return and re-enable IE and AM */
 547  513          retl
 548  514            wrpr  %g0, %o4, %pstate
 549  515          SET_SIZE(clearphys)
 550  516  
 551      -#endif  /* lint */
 552  517  
 553      -
 554      -#if defined(lint)
 555  518  /*
 556  519   * Cheetah+ Ecache displacement flush the specified line from the E$
 557  520   *
 558  521   * For Panther, this means flushing the specified line from both the
 559  522   * L2 cache and L3 cache.
 560  523   *
 561  524   * Register usage:
 562  525   *      %o0 - 64 bit physical address for flushing
 563  526   *      %o1 - Ecache set size
 564  527   */
 565      -/*ARGSUSED*/
 566      -void
 567      -ecache_flush_line(uint64_t flushaddr, int ec_set_size)
 568      -{
 569      -}
 570      -#else   /* lint */
 571  528          ENTRY(ecache_flush_line)
 572  529  
 573  530          GET_CPU_IMPL(%o3)               ! Panther Ecache is flushed differently
 574  531          cmp     %o3, PANTHER_IMPL
 575  532          bne     ecache_flush_line_1
 576  533            nop
 577  534  
 578  535          PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
 579  536          b       ecache_flush_line_2
 580  537            nop
 581  538  ecache_flush_line_1:
 582  539          ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
 583  540  ecache_flush_line_2:
 584  541          retl
 585  542            nop
 586  543          SET_SIZE(ecache_flush_line)
 587      -#endif  /* lint */
 588  544  
 589      -#if defined(lint)
 590      -void
 591      -set_afsr_ext(uint64_t afsr_ext)
 592      -{
 593      -        afsr_ext = afsr_ext;
 594      -}
 595      -#else /* lint */
 596      -
 597  545          ENTRY(set_afsr_ext)
 598  546          set     ASI_AFSR_EXT_VA, %o1
 599  547          stxa    %o0, [%o1]ASI_AFSR              ! afsr_ext reg
 600  548          membar  #Sync
 601  549          retl
 602  550          nop
 603  551          SET_SIZE(set_afsr_ext)
 604  552  
 605      -#endif /* lint */
 606  553  
 607      -
 608      -#if defined(lint)
 609  554  /*
 610  555   * The CPU jumps here from the MMU exception handler if an ITLB parity
 611  556   * error is detected and we are running on Panther.
 612  557   *
 613  558   * In this routine we collect diagnostic information and write it to our
 614  559   * logout structure (if possible) and clear all ITLB entries that may have
 615  560   * caused our parity trap.
 616  561   * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
 617  562   * and log any error messages. As for parameters to cpu_tlb_parity_error, we
 618  563   * send two:
 619  564   *
 620  565   * %g2  - Contains the VA whose lookup in the ITLB caused the parity error
 621  566   * %g3  - Contains the tlo_info field of the pn_tlb_logout logout struct,
 622  567   *        regardless of whether or not we actually used the logout struct.
 623  568   *
 624  569   * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
 625  570   * parameters to the data contained in the logout structure in order to
 626  571   * determine whether the logout information is valid for this particular
 627  572   * error or not.
 628  573   */
 629      -void
 630      -itlb_parity_trap(void)
 631      -{}
 632      -
 633      -#else   /* lint */
 634      -
 635  574          ENTRY_NP(itlb_parity_trap)
 636  575          /*
 637  576           * Collect important information about the trap which will be
 638  577           * used as a parameter to the TL0 handler.
 639  578           */
 640  579          wr      %g0, ASI_IMMU, %asi
 641  580          rdpr    %tpc, %g2                       ! VA that caused the IMMU trap
 642  581          ldxa    [MMU_TAG_ACCESS_EXT]%asi, %g3   ! read the trap VA page size
 643  582          set     PN_ITLB_PGSZ_MASK, %g4
 644  583          and     %g3, %g4, %g3
↓ open down ↓ 102 lines elided ↑ open up ↑
 747  686           * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
 748  687           * already at PIL 15.    */
 749  688          set     cpu_tlb_parity_error, %g1
 750  689          rdpr    %pil, %g4
 751  690          cmp     %g4, PIL_14
 752  691          movl    %icc, PIL_14, %g4
 753  692          ba      sys_trap
 754  693            nop
 755  694          SET_SIZE(itlb_parity_trap)
 756  695  
 757      -#endif  /* lint */
 758      -
 759      -#if defined(lint)
 760  696  /*
 761  697   * The CPU jumps here from the MMU exception handler if a DTLB parity
 762  698   * error is detected and we are running on Panther.
 763  699   *
 764  700   * In this routine we collect diagnostic information and write it to our
 765  701   * logout structure (if possible) and clear all DTLB entries that may have
 766  702   * caused our parity trap.
 767  703   * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
 768  704   * and log any error messages. As for parameters to cpu_tlb_parity_error, we
 769  705   * send two:
 770  706   *
 771  707   * %g2  - Contains the VA whose lookup in the DTLB caused the parity error
 772  708   * %g3  - Contains the tlo_info field of the pn_tlb_logout logout struct,
 773  709   *        regardless of whether or not we actually used the logout struct.
 774  710   *
 775  711   * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
 776  712   * parameters to the data contained in the logout structure in order to
 777  713   * determine whether the logout information is valid for this particular
 778  714   * error or not.
 779  715   */
 780      -void
 781      -dtlb_parity_trap(void)
 782      -{}
 783      -
 784      -#else   /* lint */
 785      -
 786  716          ENTRY_NP(dtlb_parity_trap)
 787  717          /*
 788  718           * Collect important information about the trap which will be
 789  719           * used as a parameter to the TL0 handler.
 790  720           */
 791  721          wr      %g0, ASI_DMMU, %asi
 792  722          ldxa    [MMU_SFAR]%asi, %g2             ! VA that caused the IMMU trap
 793  723          ldxa    [MMU_TAG_ACCESS_EXT]%asi, %g3   ! read the trap VA page sizes
 794  724          set     PN_DTLB_PGSZ_MASK, %g4
 795  725          and     %g3, %g4, %g3
↓ open down ↓ 164 lines elided ↑ open up ↑
 960  890           * those will lead to a system panic.
 961  891           */
 962  892          set     cpu_tlb_parity_error, %g1
 963  893          rdpr    %pil, %g4
 964  894          cmp     %g4, PIL_14
 965  895          movl    %icc, PIL_14, %g4
 966  896          ba      sys_trap
 967  897            nop
 968  898          SET_SIZE(dtlb_parity_trap)
 969  899  
 970      -#endif  /* lint */
 971  900  
 972      -
 973      -#if defined(lint)
 974  901  /*
 975  902   * Calculates the Panther TLB index based on a virtual address and page size
 976  903   *
 977  904   * Register usage:
 978  905   *      %o0 - virtual address whose index we want
 979  906   *      %o1 - Page Size of the TLB in question as encoded in the
 980  907   *            ASI_[D|I]MMU_TAG_ACCESS_EXT register.
 981  908   */
 982      -uint64_t
 983      -pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
 984      -{
 985      -        return ((va + pg_sz)-(va + pg_sz));
 986      -}
 987      -#else   /* lint */
 988  909          ENTRY(pn_get_tlb_index)
 989  910  
 990  911          PN_GET_TLB_INDEX(%o0, %o1)
 991  912  
 992  913          retl
 993  914            nop
 994  915          SET_SIZE(pn_get_tlb_index)
 995      -#endif  /* lint */
 996  916  
 997  917  
 998      -#if defined(lint)
 999  918  /*
1000  919   * For Panther CPUs we need to flush the IPB after any I$ or D$
1001  920   * parity errors are detected.
1002  921   */
1003      -void
1004      -flush_ipb(void)
1005      -{ return; }
1006      -
1007      -#else   /* lint */
1008      -
1009  922          ENTRY(flush_ipb)
1010  923          clr     %o0
1011  924  
1012  925  flush_ipb_1:
1013  926          stxa    %g0, [%o0]ASI_IPB_TAG
1014  927          membar  #Sync
1015  928          cmp     %o0, PN_IPB_TAG_ADDR_MAX
1016  929          blt     flush_ipb_1
1017  930            add   %o0, PN_IPB_TAG_ADDR_LINESIZE,  %o0
1018  931  
1019  932          sethi   %hi(FLUSH_ADDR), %o0
1020  933          flush   %o0
1021  934          retl
1022  935          nop
1023  936          SET_SIZE(flush_ipb)
1024  937  
1025      -#endif  /* lint */
1026  938  
1027      -
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX