Print this page
restore sparc comments
de-linting of .s files

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sun4u/cpu/spitfire_asm.s
          +++ new/usr/src/uts/sun4u/cpu/spitfire_asm.s
↓ open down ↓ 15 lines elided ↑ open up ↑
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  27      -
  28      -#if !defined(lint)
  29   26  #include "assym.h"
  30      -#endif  /* lint */
  31   27  
  32   28  #include <sys/asm_linkage.h>
  33   29  #include <sys/mmu.h>
  34   30  #include <vm/hat_sfmmu.h>
  35   31  #include <sys/machparam.h>
  36   32  #include <sys/machcpuvar.h>
  37   33  #include <sys/machthread.h>
  38   34  #include <sys/privregs.h>
  39   35  #include <sys/asm_linkage.h>
  40   36  #include <sys/machasi.h>
  41   37  #include <sys/trap.h>
  42   38  #include <sys/spitregs.h>
  43   39  #include <sys/xc_impl.h>
  44   40  #include <sys/intreg.h>
  45   41  #include <sys/async.h>
  46   42  
  47   43  #ifdef TRAPTRACE
  48   44  #include <sys/traptrace.h>
  49   45  #endif /* TRAPTRACE */
  50   46  
  51      -#ifndef lint
  52      -
  53   47  /* BEGIN CSTYLED */
  54   48  #define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3)                  \
  55   49          ldxa    [%g0]ASI_LSU, tmp1                                      ;\
  56   50          btst    LSU_DC, tmp1            /* is dcache enabled? */        ;\
  57   51          bz,pn   %icc, 1f                                                ;\
  58   52          sethi   %hi(dcache_linesize), tmp1                              ;\
  59   53          ld      [tmp1 + %lo(dcache_linesize)], tmp1                     ;\
  60   54          sethi   %hi(dflush_type), tmp2                                  ;\
  61   55          ld      [tmp2 + %lo(dflush_type)], tmp2                         ;\
  62   56          cmp     tmp2, FLUSHPAGE_TYPE                                    ;\
↓ open down ↓ 277 lines elided ↑ open up ↑
 340  334   * boundary and skipping the physical address being flushed. It takes
 341  335   * 10 loads to guarantee that the physical address has been flushed.
 342  336   */
 343  337  
 344  338  #define HB_ECACHE_FLUSH_CNT     2
 345  339  #define HB_PHYS_FLUSH_CNT       10      /* #loads to flush specific paddr */
 346  340  #endif /* HUMMINGBIRD */
 347  341  
 348  342  /* END CSTYLED */
 349  343  
 350      -#endif  /* !lint */
 351      -
 352  344  /*
 353  345   * Spitfire MMU and Cache operations.
 354  346   */
 355  347  
 356      -#if defined(lint)
 357      -
 358      -/*ARGSUSED*/
 359      -void
 360      -vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
 361      -{}
 362      -
 363      -/*ARGSUSED*/
 364      -void
 365      -vtag_flushall(void)
 366      -{}
 367      -        
 368      -/*ARGSUSED*/
 369      -void
 370      -vtag_flushall_uctxs(void)
 371      -{}
 372      -                
 373      -/*ARGSUSED*/
 374      -void
 375      -vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
 376      -{}
 377      -
 378      -/*ARGSUSED*/
 379      -void
 380      -vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
 381      -{}
 382      -
 383      -/*ARGSUSED*/
 384      -void
 385      -vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
 386      -{}
 387      -
 388      -/*ARGSUSED*/
 389      -void
 390      -vac_flushpage(pfn_t pfnum, int vcolor)
 391      -{}
 392      -
 393      -/*ARGSUSED*/
 394      -void
 395      -vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
 396      -{}
 397      -
 398      -/*ARGSUSED*/
 399      -void
 400      -init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
 401      -{}
 402      -
 403      -/*ARGSUSED*/
 404      -void
 405      -init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
 406      -{}
 407      -
 408      -/*ARGSUSED*/
 409      -void
 410      -flush_instr_mem(caddr_t vaddr, size_t len)
 411      -{}
 412      -
 413      -/*ARGSUSED*/
 414      -void
 415      -flush_ecache(uint64_t physaddr, size_t size, size_t linesize)
 416      -{}
 417      -
 418      -/*ARGSUSED*/
 419      -void
 420      -get_ecache_dtag(uint32_t ecache_idx, uint64_t *ecache_data,
 421      -                uint64_t *ecache_tag, uint64_t *oafsr, uint64_t *acc_afsr)
 422      -{}
 423      -
 424      -/* ARGSUSED */
 425      -uint64_t
 426      -get_ecache_tag(uint32_t id, uint64_t *nafsr, uint64_t *acc_afsr)
 427      -{
 428      -        return ((uint64_t)0);
 429      -}
 430      -
 431      -/* ARGSUSED */
 432      -uint64_t
 433      -check_ecache_line(uint32_t id, uint64_t *acc_afsr)
 434      -{
 435      -        return ((uint64_t)0);
 436      -}
 437      -
 438      -/*ARGSUSED*/
 439      -void
 440      -kdi_flush_idcache(int dcache_size, int dcache_lsize,
 441      -    int icache_size, int icache_lsize)
 442      -{}
 443      -
 444      -#else   /* lint */
 445      -
 446  348          ENTRY_NP(vtag_flushpage)
 447  349          /*
 448  350           * flush page from the tlb
 449  351           *
 450  352           * %o0 = vaddr
 451  353           * %o1 = sfmmup
 452  354           */
 453  355          rdpr    %pstate, %o5
 454  356  #ifdef DEBUG
 455  357          PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
↓ open down ↓ 493 lines elided ↑ open up ↑
 949  851          bl,a    1b
 950  852            add     %i1, 8, %i1
 951  853          stxa    %i0, [%g0]ASI_AFSR              ! clear AFSR
 952  854          membar  #Sync
 953  855          stxa    %g1, [%g0]ASI_ESTATE_ERR        ! restore error enable
 954  856          membar  #Sync
 955  857          wrpr    %g0, %i5, %pstate
 956  858          ret
 957  859            restore
 958  860          SET_SIZE(get_ecache_dtag)
 959      -#endif /* lint */
 960  861  
 961      -#if defined(lint)
 962  862  /*
 963  863   * The ce_err function handles trap type 0x63 (corrected_ECC_error) at tl=0.
 964  864   * Steps: 1. GET AFSR  2. Get AFAR <40:4> 3. Get datapath error status
 965  865   *        4. Clear datapath error bit(s) 5. Clear AFSR error bit
 966  866   *        6. package data in %g2 and %g3 7. call cpu_ce_error vis sys_trap
 967  867   * %g2: [ 52:43 UDB lower | 42:33 UDB upper | 32:0 afsr ] - arg #3/arg #1
 968  868   * %g3: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
 969  869   */
 970      -void
 971      -ce_err(void)
 972      -{}
 973      -
 974      -void
 975      -ce_err_tl1(void)
 976      -{}
 977      -
 978      -
 979      -/*
 980      - * The async_err function handles trap types 0x0A (instruction_access_error)
 981      - * and 0x32 (data_access_error) at TL = 0 and TL > 0.  When we branch here,
 982      - * %g5 will have the trap type (with 0x200 set if we're at TL > 0).
 983      - *
 984      - * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
 985      - *        4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
 986      - *        6. package data in %g2 and %g3 7. disable all cpu errors, because
 987      - *        trap is likely to be fatal 8. call cpu_async_error vis sys_trap
 988      - *
 989      - * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
 990      - * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
 991      - */
 992      -void
 993      -async_err(void)
 994      -{}
 995      -
 996      -/*
 997      - * The clr_datapath function clears any error bits set in the UDB regs.
 998      - */
 999      -void
1000      -clr_datapath(void)
1001      -{}
1002      -
1003      -/*
1004      - * The get_udb_errors() function gets the current value of the
1005      - * Datapath Error Registers.
1006      - */
1007      -/*ARGSUSED*/
1008      -void
1009      -get_udb_errors(uint64_t *udbh, uint64_t *udbl)
1010      -{
1011      -        *udbh = 0;
1012      -        *udbl = 0;
1013      -}
1014      -
1015      -#else   /* lint */
1016      -
1017  870          ENTRY_NP(ce_err)
1018  871          ldxa    [%g0]ASI_AFSR, %g3      ! save afsr in g3
1019  872  
1020  873          !
1021  874          ! Check for a UE... From Kevin.Normoyle:
1022  875          ! We try to switch to the trap for the UE, but since that's
1023  876          ! a hardware pipeline, we might get to the CE trap before we
1024  877          ! can switch. The UDB and AFSR registers will have both the
1025  878          ! UE and CE bits set but the UDB syndrome and the AFAR will be
1026  879          ! for the UE.
↓ open down ↓ 74 lines elided ↑ open up ↑
1101  954          mov     %o4, %o0                ! save AFAR upper 32 bits
1102  955          mov     %o2, %o4                ! lower 32 bits of AFSR
1103  956          mov     %o1, %o2                ! lower 32 bits of AFAR
1104  957          mov     %o0, %o1                ! upper 32 bits of AFAR
1105  958          set     .celevel1msg, %o0
1106  959          call    panic
1107  960          nop
1108  961          SET_SIZE(ce_trap_tl1)
1109  962  #endif
1110  963  
1111      -        !
1112      -        ! async_err is the assembly glue code to get us from the actual trap
1113      -        ! into the CPU module's C error handler.  Note that we also branch
1114      -        ! here from ce_err() above.
1115      -        !
      964 +/*
      965 + * The async_err function handles trap types 0x0A (instruction_access_error)
      966 + * and 0x32 (data_access_error) at TL = 0 and TL > 0.  When we branch here,
      967 + * %g5 will have the trap type (with 0x200 set if we're at TL > 0).
      968 + *
      969 + * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
      970 + *        4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
      971 + *        6. package data in %g2 and %g3 7. disable all cpu errors, because
      972 + *        trap is likely to be fatal 8. call cpu_async_error vis sys_trap
      973 + *
      974 + * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
      975 + * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
      976 + *
      977 + * async_err is the assembly glue code to get us from the actual trap
      978 + * into the CPU module's C error handler.  Note that we also branch
      979 + * here from ce_err() above.
      980 + */
1116  981          ENTRY_NP(async_err)
1117  982          stxa    %g0, [%g0]ASI_ESTATE_ERR ! disable ecc and other cpu errors
1118  983          membar  #Sync                   ! membar sync required
1119  984  
1120  985          ldxa    [%g0]ASI_AFSR, %g3      ! save afsr in g3
1121  986          ldxa    [%g0]ASI_AFAR, %g2      ! save afar in g2
1122  987  
1123  988          sllx    %g5, 53, %g5            ! move ttype to <63:53>
1124  989          or      %g3, %g5, %g3           ! or to afsr in g3
1125  990  
↓ open down ↓ 53 lines elided ↑ open up ↑
1179 1044          or      %g3, %g5, %g3           ! or with afsr bits
1180 1045  
1181 1046          RESET_USER_RTT_REGS(%g4, %g5, dis_err_panic1_resetskip)
1182 1047  dis_err_panic1_resetskip:
1183 1048  
1184 1049          sethi   %hi(sys_trap), %g5
1185 1050          jmp     %g5 + %lo(sys_trap)     ! goto sys_trap
1186 1051            sub   %g0, 1, %g4
1187 1052          SET_SIZE(dis_err_panic1)
1188 1053  
     1054 +/*
     1055 + * The clr_datapath function clears any error bits set in the UDB regs.
     1056 + */
1189 1057          ENTRY(clr_datapath)
1190 1058          set     P_DER_H, %o4                    ! put P_DER_H in o4
1191 1059          ldxa    [%o4]ASI_SDB_INTR_R, %o5        ! read sdb upper half into o3
1192 1060          or      %g0, 0x3, %o2                   ! put 0x3 in o2
1193 1061          sllx    %o2, 8, %o2                     ! shift o2 to <9:8> sdb
1194 1062          andcc   %o5, %o2, %o1                   ! check for UE,CE in upper half
1195 1063          bz,a    1f                              ! no error, goto 1f
1196 1064            nop
1197 1065          stxa    %o1, [%o4]ASI_SDB_INTR_W        ! clear sdb reg UE,CE error bits
1198 1066          membar  #Sync                           ! membar sync required
↓ open down ↓ 3 lines elided ↑ open up ↑
1202 1070          andcc   %o5, %o2, %o1                   ! check for UE,CE in lower half
1203 1071          bz,a    2f                              ! no error, goto 2f
1204 1072            nop
1205 1073          stxa    %o1, [%o4]ASI_SDB_INTR_W        ! clear sdb reg UE,CE error bits
1206 1074          membar  #Sync
1207 1075  2:
1208 1076          retl
1209 1077            nop
1210 1078          SET_SIZE(clr_datapath)
1211 1079  
     1080 +/*
     1081 + * The get_udb_errors() function gets the current value of the
     1082 + * Datapath Error Registers.
     1083 + */
1212 1084          ENTRY(get_udb_errors)
1213 1085          set     P_DER_H, %o3
1214 1086          ldxa    [%o3]ASI_SDB_INTR_R, %o2
1215 1087          stx     %o2, [%o0]
1216 1088          set     P_DER_L, %o3
1217 1089          ldxa    [%o3]ASI_SDB_INTR_R, %o2
1218 1090          retl
1219 1091            stx   %o2, [%o1]
1220 1092          SET_SIZE(get_udb_errors)
1221 1093  
1222      -#endif /* lint */
1223      -
1224      -#if defined(lint)
1225 1094  /*
1226 1095   * The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the
1227 1096   * tte, the virtual address, and the ctxnum of the specified tlb entry.  They
1228 1097   * should only be used in places where you have no choice but to look at the
1229 1098   * tlb itself.
1230 1099   *
1231 1100   * Note: These two routines are required by the Estar "cpr" loadable module.
1232 1101   */
1233      -/*ARGSUSED*/
1234      -void
1235      -itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1236      -{}
1237      -
1238      -/*ARGSUSED*/
1239      -void
1240      -dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1241      -{}
1242      -#else   /* lint */
1243 1102  /*
1244 1103   * NB - In Spitfire cpus, when reading a tte from the hardware, we
1245 1104   * need to clear [42-41] because the general definitions in pte.h
1246 1105   * define the PA to be [42-13] whereas Spitfire really uses [40-13].
1247 1106   * When cloning these routines for other cpus the "andn" below is not
1248 1107   * necessary.
1249 1108   */
1250 1109          ENTRY_NP(itlb_rd_entry)
1251 1110          sllx    %o0, 3, %o0
1252 1111  #if defined(SF_ERRATA_32)
↓ open down ↓ 26 lines elided ↑ open up ↑
1279 1138          set     TTE_SPITFIRE_PFNHI_CLEAR, %g2           ! spitfire only
1280 1139          sllx    %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2      ! see comment above
1281 1140          andn    %g1, %g2, %g1                           ! itlb_rd_entry
1282 1141          stx     %g1, [%o1]
1283 1142          ldxa    [%o0]ASI_DTLB_TAGREAD, %g2
1284 1143          set     TAGREAD_CTX_MASK, %o4
1285 1144          andn    %g2, %o4, %o5
1286 1145          retl
1287 1146            stx   %o5, [%o2]
1288 1147          SET_SIZE(dtlb_rd_entry)
1289      -#endif /* lint */
1290 1148  
1291      -#if defined(lint)
1292      -
1293      -/*
1294      - * routines to get and set the LSU register
1295      - */
1296      -uint64_t
1297      -get_lsu(void)
1298      -{
1299      -        return ((uint64_t)0);
1300      -}
1301      -
1302      -/*ARGSUSED*/
1303      -void
1304      -set_lsu(uint64_t lsu)
1305      -{}
1306      -
1307      -#else /* lint */
1308      -
1309 1149          ENTRY(set_lsu)
1310 1150          stxa    %o0, [%g0]ASI_LSU               ! store to LSU
1311 1151          retl
1312 1152          membar  #Sync
1313 1153          SET_SIZE(set_lsu)
1314 1154  
1315 1155          ENTRY(get_lsu)
1316 1156          retl
1317 1157          ldxa    [%g0]ASI_LSU, %o0               ! load LSU
1318 1158          SET_SIZE(get_lsu)
1319 1159  
1320      -#endif /* lint */
1321      -
1322      -#ifndef lint
1323 1160          /*
1324 1161           * Clear the NPT (non-privileged trap) bit in the %tick
1325 1162           * registers. In an effort to make the change in the
1326 1163           * tick counter as consistent as possible, we disable
1327 1164           * all interrupts while we're changing the registers. We also
1328 1165           * ensure that the read and write instructions are in the same
1329 1166           * line in the instruction cache.
1330 1167           */
1331 1168          ENTRY_NP(cpu_clearticknpt)
1332 1169          rdpr    %pstate, %g1            /* save processor state */
↓ open down ↓ 118 lines elided ↑ open up ↑
1451 1288          or      %o3, %o0, %o3                   ! aggregate AFSR in cpu private
1452 1289          stx     %o3, [%o1]
1453 1290  3:
1454 1291          stxa    %o0, [%g0]ASI_AFSR              ! clear AFSR
1455 1292          membar  #Sync
1456 1293          stxa    %g1, [%g0]ASI_ESTATE_ERR        ! Turn error enable back on
1457 1294          membar  #Sync
1458 1295          retl
1459 1296          wrpr    %g0, %o5, %pstate
1460 1297          SET_SIZE(check_ecache_line)
1461      -#endif /* lint */
1462 1298  
1463      -#if defined(lint)
1464      -uint64_t
1465      -read_and_clear_afsr()
1466      -{
1467      -        return ((uint64_t)0);
1468      -}
1469      -#else   /* lint */
1470 1299          ENTRY(read_and_clear_afsr)
1471 1300          ldxa    [%g0]ASI_AFSR, %o0
1472 1301          retl
1473 1302            stxa  %o0, [%g0]ASI_AFSR              ! clear AFSR
1474 1303          SET_SIZE(read_and_clear_afsr)
1475      -#endif  /* lint */
1476 1304  
1477      -#if defined(lint)
1478      -/* ARGSUSED */
1479      -void
1480      -scrubphys(uint64_t paddr, int ecache_size)
1481      -{
1482      -}
1483      -
1484      -#else   /* lint */
1485      -
1486 1305  /*
1487 1306   * scrubphys - Pass in the aligned physical memory address that you want
1488 1307   * to scrub, along with the ecache size.
1489 1308   *
1490 1309   *      1) Displacement flush the E$ line corresponding to %addr.
1491 1310   *         The first ldxa guarantees that the %addr is no longer in
1492 1311   *         M, O, or E (goes to I or S (if instruction fetch also happens).
1493 1312   *      2) "Write" the data using a CAS %addr,%g0,%g0.
1494 1313   *         The casxa guarantees a transition from I to M or S to M.
1495 1314   *      3) Displacement flush the E$ line corresponding to %addr.
↓ open down ↓ 106 lines elided ↑ open up ↑
1602 1421          membar  #Sync
1603 1422          stxa    %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1604 1423          membar  #Sync
1605 1424  #endif /* HUMMINGBIRD */
1606 1425          wrpr    %g0, %o4, %pstate       ! restore earlier pstate register value
1607 1426  
1608 1427          retl
1609 1428          membar  #Sync                   ! move the data out of the load buffer
1610 1429          SET_SIZE(scrubphys)
1611 1430  
1612      -#endif  /* lint */
1613      -
1614      -#if defined(lint)
1615      -
1616 1431  /*
1617 1432   * clearphys - Pass in the aligned physical memory address that you want
1618 1433   * to push out, as a 64 byte block of zeros, from the ecache zero-filled.
1619 1434   * Since this routine does not bypass the ecache, it is possible that
1620 1435   * it could generate a UE error while trying to clear the a bad line.
1621 1436   * This routine clears and restores the error enable flag.
1622 1437   * TBD - Hummingbird may need similar protection
1623 1438   */
1624      -/* ARGSUSED */
1625      -void
1626      -clearphys(uint64_t paddr, int ecache_size, int ecache_linesize)
1627      -{
1628      -}
1629      -
1630      -#else   /* lint */
1631      -
1632 1439          ENTRY(clearphys)
1633 1440          or      %o2, %g0, %o3   ! ecache linesize
1634 1441          or      %o1, %g0, %o2   ! ecache size
1635 1442  #ifndef HUMMINGBIRD
1636 1443          or      %o3, %g0, %o4   ! save ecache linesize
1637 1444          xor     %o0, %o2, %o1   ! calculate alias address
1638 1445          add     %o2, %o2, %o3   ! 2 * ecachesize
1639 1446          sub     %o3, 1, %o3     ! -1 == mask
1640 1447          and     %o1, %o3, %o1   ! and with xor'd address
1641 1448          set     ecache_flushaddr, %o3
↓ open down ↓ 110 lines elided ↑ open up ↑
1752 1559  
1753 1560          membar  #Sync
1754 1561          stxa    %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1755 1562          membar  #Sync
1756 1563  #endif /* HUMMINGBIRD... */
1757 1564  
1758 1565          retl
1759 1566          wrpr    %g0, %o4, %pstate       ! restore earlier pstate register value
1760 1567          SET_SIZE(clearphys)
1761 1568  
1762      -#endif  /* lint */
1763      -
1764      -#if defined(lint)
1765      -/* ARGSUSED */
1766      -void
1767      -flushecacheline(uint64_t paddr, int ecache_size)
1768      -{
1769      -}
1770      -
1771      -#else   /* lint */
1772 1569  /*
1773 1570   * flushecacheline - This is a simpler version of scrubphys
1774 1571   * which simply does a displacement flush of the line in
1775 1572   * question. This routine is mainly used in handling async
1776 1573   * errors where we want to get rid of a bad line in ecache.
1777 1574   * Note that if the line is modified and it has suffered
1778 1575   * data corruption - we are guarantee that the hw will write
1779 1576   * a UE back to mark the page poisoned.
1780 1577   */
1781 1578          ENTRY(flushecacheline)
↓ open down ↓ 93 lines elided ↑ open up ↑
1875 1672          stxa    %g1, [%g0]ASI_ESTATE_ERR        ! restore error enable
1876 1673          membar  #Sync                   
1877 1674  
1878 1675          stxa    %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1879 1676          membar  #Sync
1880 1677  #endif /* HUMMINGBIRD */
1881 1678          retl
1882 1679          wrpr    %g0, %o4, %pstate       
1883 1680          SET_SIZE(flushecacheline)
1884 1681  
1885      -#endif  /* lint */
1886      -
1887      -#if defined(lint)
1888      -/* ARGSUSED */
1889      -void
1890      -ecache_scrubreq_tl1(uint64_t inum, uint64_t dummy)
1891      -{
1892      -}
1893      -
1894      -#else   /* lint */
1895 1682  /*
1896 1683   * ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz
1897 1684   * from the clock CPU.  It atomically increments the outstanding request
1898 1685   * counter and, if there was not already an outstanding request,
1899 1686   * branches to setsoftint_tl1 to enqueue an intr_vec for the given inum.
1900 1687   */
1901 1688  
1902 1689          ! Register usage:
1903 1690          !
1904 1691          ! Arguments:
↓ open down ↓ 16 lines elided ↑ open up ↑
1921 1708          add     %g2, 0x1, %g3
1922 1709          brnz,pn %g2, 1f                 ! no need to enqueue more intr_vec
1923 1710            st    %g3, [%g4]              ! delay - store incremented counter
1924 1711          jmp     %g6                     ! setsoftint_tl1(%g1) - queue intr_vec
1925 1712            nop
1926 1713          ! not reached
1927 1714  1:
1928 1715          retry
1929 1716          SET_SIZE(ecache_scrubreq_tl1)
1930 1717  
1931      -#endif  /* lint */
1932      -
1933      -#if defined(lint)
1934      -/*ARGSUSED*/
1935      -void
1936      -write_ec_tag_parity(uint32_t id)
1937      -{}
1938      -#else /* lint */
1939      -
1940 1718          /*
1941 1719           * write_ec_tag_parity(), which zero's the ecache tag,
1942 1720           * marks the state as invalid and writes good parity to the tag.
1943 1721           * Input %o1= 32 bit E$ index
1944 1722           */
1945 1723          ENTRY(write_ec_tag_parity)
1946 1724          or      %g0, 1, %o4
1947 1725          sllx    %o4, 39, %o4                    ! set bit 40 for e$ tag access
1948 1726          or      %o0, %o4, %o4                 ! %o4 = ecache addr for tag write
1949 1727  
↓ open down ↓ 19 lines elided ↑ open up ↑
1969 1747          stxa    %o3, [%g0]ASI_EC_DIAG           ! update with the above info
1970 1748          stxa    %g0, [%o4]ASI_EC_W
1971 1749          membar  #Sync
1972 1750  
1973 1751          stxa    %g1, [%g0]ASI_ESTATE_ERR        ! Turn error enable back on
1974 1752          membar  #Sync
1975 1753          retl
1976 1754          wrpr    %g0, %o5, %pstate
1977 1755          SET_SIZE(write_ec_tag_parity)
1978 1756  
1979      -#endif /* lint */
1980      -
1981      -#if defined(lint)
1982      -/*ARGSUSED*/
1983      -void
1984      -write_hb_ec_tag_parity(uint32_t id)
1985      -{}
1986      -#else /* lint */
1987      -
1988 1757          /*
1989 1758           * write_hb_ec_tag_parity(), which zero's the ecache tag,
1990 1759           * marks the state as invalid and writes good parity to the tag.
1991 1760           * Input %o1= 32 bit E$ index
1992 1761           */
1993 1762          ENTRY(write_hb_ec_tag_parity)
1994 1763          or      %g0, 1, %o4
1995 1764          sllx    %o4, 39, %o4                    ! set bit 40 for e$ tag access
1996 1765          or      %o0, %o4, %o4               ! %o4 = ecache addr for tag write
1997 1766  
↓ open down ↓ 24 lines elided ↑ open up ↑
2022 1791          stxa    %o3, [%g0]ASI_EC_DIAG           ! update with the above info
2023 1792          stxa    %g0, [%o4]ASI_EC_W
2024 1793          membar  #Sync
2025 1794  
2026 1795          stxa    %g1, [%g0]ASI_ESTATE_ERR        ! Turn error enable back on
2027 1796          membar  #Sync
2028 1797          retl
2029 1798          wrpr    %g0, %o5, %pstate
2030 1799          SET_SIZE(write_hb_ec_tag_parity)
2031 1800  
2032      -#endif /* lint */
2033      -
2034 1801  #define VIS_BLOCKSIZE           64
2035 1802  
2036      -#if defined(lint)
2037      -
2038      -/*ARGSUSED*/
2039      -int
2040      -dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
2041      -{ return (0); }
2042      -
2043      -#else
2044      -
2045 1803          ENTRY(dtrace_blksuword32)
2046 1804          save    %sp, -SA(MINFRAME + 4), %sp
2047 1805  
2048 1806          rdpr    %pstate, %l1
2049 1807          andn    %l1, PSTATE_IE, %l2             ! disable interrupts to
2050 1808          wrpr    %g0, %l2, %pstate               ! protect our FPU diddling
2051 1809  
2052 1810          rd      %fprs, %l0
2053 1811          andcc   %l0, FPRS_FEF, %g0
2054 1812          bz,a,pt %xcc, 1f                        ! if the fpu is disabled
↓ open down ↓ 44 lines elided ↑ open up ↑
2099 1857          brnz,pt %i2, 1f
2100 1858          nop
2101 1859          ret
2102 1860          restore %g0, -1, %o0
2103 1861  1:
2104 1862          call    dtrace_blksuword32_err
2105 1863          restore
2106 1864  
2107 1865          SET_SIZE(dtrace_blksuword32)
2108 1866  
2109      -#endif /* lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX