6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #pragma ident "%Z%%M% %I% %E% SMI"
27
28 #if !defined(lint)
29 #include "assym.h"
30 #endif /* lint */
31
32 #include <sys/asm_linkage.h>
33 #include <sys/mmu.h>
34 #include <vm/hat_sfmmu.h>
35 #include <sys/machparam.h>
36 #include <sys/machcpuvar.h>
37 #include <sys/machthread.h>
38 #include <sys/privregs.h>
39 #include <sys/asm_linkage.h>
40 #include <sys/machasi.h>
41 #include <sys/trap.h>
42 #include <sys/spitregs.h>
43 #include <sys/xc_impl.h>
44 #include <sys/intreg.h>
45 #include <sys/async.h>
46
47 #ifdef TRAPTRACE
48 #include <sys/traptrace.h>
49 #endif /* TRAPTRACE */
50
51 #ifndef lint
52
53 /* BEGIN CSTYLED */
54 #define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \
55 ldxa [%g0]ASI_LSU, tmp1 ;\
56 btst LSU_DC, tmp1 /* is dcache enabled? */ ;\
57 bz,pn %icc, 1f ;\
58 sethi %hi(dcache_linesize), tmp1 ;\
59 ld [tmp1 + %lo(dcache_linesize)], tmp1 ;\
60 sethi %hi(dflush_type), tmp2 ;\
61 ld [tmp2 + %lo(dflush_type)], tmp2 ;\
62 cmp tmp2, FLUSHPAGE_TYPE ;\
63 be,pt %icc, 2f ;\
64 sllx arg1, SF_DC_VBIT_SHIFT, arg1 /* tag to compare */ ;\
65 sethi %hi(dcache_size), tmp3 ;\
66 ld [tmp3 + %lo(dcache_size)], tmp3 ;\
67 cmp tmp2, FLUSHMATCH_TYPE ;\
68 be,pt %icc, 3f ;\
69 nop ;\
70 /* \
71 * flushtype = FLUSHALL_TYPE, flush the whole thing \
72 * tmp3 = cache size \
330 * we are done flushing it. Keep interrupts off while flushing in this
331 * manner.
332 *
333 * We flush the entire ecache by starting at one end and loading each
334 * successive ecache line for the 2*ecache-size range. We have to repeat
335 * the flush operation to guarantee that the entire ecache has been
336 * flushed.
337 *
338 * For flushing a specific physical address, we start at the aliased
339 * address and load at set-size stride, wrapping around at 2*ecache-size
340 * boundary and skipping the physical address being flushed. It takes
341 * 10 loads to guarantee that the physical address has been flushed.
342 */
343
344 #define HB_ECACHE_FLUSH_CNT 2
345 #define HB_PHYS_FLUSH_CNT 10 /* #loads to flush specific paddr */
346 #endif /* HUMMINGBIRD */
347
348 /* END CSTYLED */
349
350 #endif /* !lint */
351
352 /*
353 * Spitfire MMU and Cache operations.
354 */
355
356 #if defined(lint)
357
358 /*ARGSUSED*/
359 void
360 vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
361 {}
362
363 /*ARGSUSED*/
364 void
365 vtag_flushall(void)
366 {}
367
368 /*ARGSUSED*/
369 void
370 vtag_flushall_uctxs(void)
371 {}
372
373 /*ARGSUSED*/
374 void
375 vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
376 {}
377
378 /*ARGSUSED*/
379 void
380 vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
381 {}
382
383 /*ARGSUSED*/
384 void
385 vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
386 {}
387
388 /*ARGSUSED*/
389 void
390 vac_flushpage(pfn_t pfnum, int vcolor)
391 {}
392
393 /*ARGSUSED*/
394 void
395 vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
396 {}
397
398 /*ARGSUSED*/
399 void
400 init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
401 {}
402
403 /*ARGSUSED*/
404 void
405 init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
406 {}
407
408 /*ARGSUSED*/
409 void
410 flush_instr_mem(caddr_t vaddr, size_t len)
411 {}
412
413 /*ARGSUSED*/
414 void
415 flush_ecache(uint64_t physaddr, size_t size, size_t linesize)
416 {}
417
418 /*ARGSUSED*/
419 void
420 get_ecache_dtag(uint32_t ecache_idx, uint64_t *ecache_data,
421 uint64_t *ecache_tag, uint64_t *oafsr, uint64_t *acc_afsr)
422 {}
423
424 /* ARGSUSED */
425 uint64_t
426 get_ecache_tag(uint32_t id, uint64_t *nafsr, uint64_t *acc_afsr)
427 {
428 return ((uint64_t)0);
429 }
430
431 /* ARGSUSED */
432 uint64_t
433 check_ecache_line(uint32_t id, uint64_t *acc_afsr)
434 {
435 return ((uint64_t)0);
436 }
437
438 /*ARGSUSED*/
439 void
440 kdi_flush_idcache(int dcache_size, int dcache_lsize,
441 int icache_size, int icache_lsize)
442 {}
443
444 #else /* lint */
445
446 ENTRY_NP(vtag_flushpage)
447 /*
448 * flush page from the tlb
449 *
450 * %o0 = vaddr
451 * %o1 = sfmmup
452 */
453 rdpr %pstate, %o5
454 #ifdef DEBUG
455 PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
456 #endif /* DEBUG */
457 /*
458 * disable ints
459 */
460 andn %o5, PSTATE_IE, %o4
461 wrpr %o4, 0, %pstate
462
463 /*
464 * Then, blow out the tlb
465 * Interrupts are disabled to prevent the secondary ctx register
939 stx %i0, [%i1] ! save the AFSR
940
941 brz %i4, 2f ! acc_afsr == NULL?
942 nop
943 ldx [%i4], %g4
944 or %g4, %i0, %g4 ! aggregate AFSR in cpu private
945 stx %g4, [%i4]
946 2:
947 add %i2, 8, %i2
948 cmp %i2, 64
949 bl,a 1b
950 add %i1, 8, %i1
951 stxa %i0, [%g0]ASI_AFSR ! clear AFSR
952 membar #Sync
953 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
954 membar #Sync
955 wrpr %g0, %i5, %pstate
956 ret
957 restore
958 SET_SIZE(get_ecache_dtag)
959 #endif /* lint */
960
961 #if defined(lint)
962 /*
963 * The ce_err function handles trap type 0x63 (corrected_ECC_error) at tl=0.
964 * Steps: 1. GET AFSR 2. Get AFAR <40:4> 3. Get datapath error status
965 * 4. Clear datapath error bit(s) 5. Clear AFSR error bit
966 * 6. package data in %g2 and %g3 7. call cpu_ce_error vis sys_trap
967 * %g2: [ 52:43 UDB lower | 42:33 UDB upper | 32:0 afsr ] - arg #3/arg #1
968 * %g3: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
969 */
970 void
971 ce_err(void)
972 {}
973
974 void
975 ce_err_tl1(void)
976 {}
977
978
979 /*
980 * The async_err function handles trap types 0x0A (instruction_access_error)
981 * and 0x32 (data_access_error) at TL = 0 and TL > 0. When we branch here,
982 * %g5 will have the trap type (with 0x200 set if we're at TL > 0).
983 *
984 * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
985 * 4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
986 * 6. package data in %g2 and %g3 7. disable all cpu errors, because
987 * trap is likely to be fatal 8. call cpu_async_error vis sys_trap
988 *
989 * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
990 * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
991 */
992 void
993 async_err(void)
994 {}
995
996 /*
997 * The clr_datapath function clears any error bits set in the UDB regs.
998 */
999 void
1000 clr_datapath(void)
1001 {}
1002
1003 /*
1004 * The get_udb_errors() function gets the current value of the
1005 * Datapath Error Registers.
1006 */
1007 /*ARGSUSED*/
1008 void
1009 get_udb_errors(uint64_t *udbh, uint64_t *udbl)
1010 {
1011 *udbh = 0;
1012 *udbl = 0;
1013 }
1014
1015 #else /* lint */
1016
1017 ENTRY_NP(ce_err)
1018 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
1019
1020 !
1021 ! Check for a UE... From Kevin.Normoyle:
1022 ! We try to switch to the trap for the UE, but since that's
1023 ! a hardware pipeline, we might get to the CE trap before we
1024 ! can switch. The UDB and AFSR registers will have both the
1025 ! UE and CE bits set but the UDB syndrome and the AFAR will be
1026 ! for the UE.
1027 !
1028 or %g0, 1, %g1 ! put 1 in g1
1029 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
1030 andcc %g1, %g3, %g0 ! check for UE in afsr
1031 bnz async_err ! handle the UE, not the CE
1032 or %g0, 0x63, %g5 ! pass along the CE ttype
1033 !
1034 ! Disable further CE traps to avoid recursion (stack overflow)
1035 ! and staying above XCALL_PIL for extended periods.
1036 !
1202 andcc %o5, %o2, %o1 ! check for UE,CE in lower half
1203 bz,a 2f ! no error, goto 2f
1204 nop
1205 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1206 membar #Sync
1207 2:
1208 retl
1209 nop
1210 SET_SIZE(clr_datapath)
1211
1212 ENTRY(get_udb_errors)
1213 set P_DER_H, %o3
1214 ldxa [%o3]ASI_SDB_INTR_R, %o2
1215 stx %o2, [%o0]
1216 set P_DER_L, %o3
1217 ldxa [%o3]ASI_SDB_INTR_R, %o2
1218 retl
1219 stx %o2, [%o1]
1220 SET_SIZE(get_udb_errors)
1221
1222 #endif /* lint */
1223
1224 #if defined(lint)
1225 /*
1226 * The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the
1227 * tte, the virtual address, and the ctxnum of the specified tlb entry. They
1228 * should only be used in places where you have no choice but to look at the
1229 * tlb itself.
1230 *
1231 * Note: These two routines are required by the Estar "cpr" loadable module.
1232 */
1233 /*ARGSUSED*/
1234 void
1235 itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1236 {}
1237
1238 /*ARGSUSED*/
1239 void
1240 dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1241 {}
1242 #else /* lint */
1243 /*
1244 * NB - In Spitfire cpus, when reading a tte from the hardware, we
1245 * need to clear [42-41] because the general definitions in pte.h
1246 * define the PA to be [42-13] whereas Spitfire really uses [40-13].
1247 * When cloning these routines for other cpus the "andn" below is not
1248 * necessary.
1249 */
1250 ENTRY_NP(itlb_rd_entry)
1251 sllx %o0, 3, %o0
1252 #if defined(SF_ERRATA_32)
1253 sethi %hi(FLUSH_ADDR), %g2
1254 set MMU_PCONTEXT, %g1
1255 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1256 flush %g2
1257 #endif
1258 ldxa [%o0]ASI_ITLB_ACCESS, %g1
1259 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1260 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1261 andn %g1, %g2, %g1 ! for details
1262 stx %g1, [%o1]
1263 ldxa [%o0]ASI_ITLB_TAGREAD, %g2
1269
1270 ENTRY_NP(dtlb_rd_entry)
1271 sllx %o0, 3, %o0
1272 #if defined(SF_ERRATA_32)
1273 sethi %hi(FLUSH_ADDR), %g2
1274 set MMU_PCONTEXT, %g1
1275 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1276 flush %g2
1277 #endif
1278 ldxa [%o0]ASI_DTLB_ACCESS, %g1
1279 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1280 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1281 andn %g1, %g2, %g1 ! itlb_rd_entry
1282 stx %g1, [%o1]
1283 ldxa [%o0]ASI_DTLB_TAGREAD, %g2
1284 set TAGREAD_CTX_MASK, %o4
1285 andn %g2, %o4, %o5
1286 retl
1287 stx %o5, [%o2]
1288 SET_SIZE(dtlb_rd_entry)
1289 #endif /* lint */
1290
1291 #if defined(lint)
1292
1293 /*
1294 * routines to get and set the LSU register
1295 */
1296 uint64_t
1297 get_lsu(void)
1298 {
1299 return ((uint64_t)0);
1300 }
1301
1302 /*ARGSUSED*/
1303 void
1304 set_lsu(uint64_t lsu)
1305 {}
1306
1307 #else /* lint */
1308
1309 ENTRY(set_lsu)
1310 stxa %o0, [%g0]ASI_LSU ! store to LSU
1311 retl
1312 membar #Sync
1313 SET_SIZE(set_lsu)
1314
1315 ENTRY(get_lsu)
1316 retl
1317 ldxa [%g0]ASI_LSU, %o0 ! load LSU
1318 SET_SIZE(get_lsu)
1319
1320 #endif /* lint */
1321
1322 #ifndef lint
1323 /*
1324 * Clear the NPT (non-privileged trap) bit in the %tick
1325 * registers. In an effort to make the change in the
1326 * tick counter as consistent as possible, we disable
1327 * all interrupts while we're changing the registers. We also
1328 * ensure that the read and write instructions are in the same
1329 * line in the instruction cache.
1330 */
1331 ENTRY_NP(cpu_clearticknpt)
1332 rdpr %pstate, %g1 /* save processor state */
1333 andn %g1, PSTATE_IE, %g3 /* turn off */
1334 wrpr %g0, %g3, %pstate /* interrupts */
1335 rdpr %tick, %g2 /* get tick register */
1336 brgez,pn %g2, 1f /* if NPT bit off, we're done */
1337 mov 1, %g3 /* create mask */
1338 sllx %g3, 63, %g3 /* for NPT bit */
1339 ba,a,pt %xcc, 2f
1340 .align 64 /* Align to I$ boundary */
1341 2:
1342 rdpr %tick, %g2 /* get tick register */
1441 bl,a 2b
1442 add %o4, 8, %o4
1443
1444 membar #Sync
1445 ldxa [%g0]ASI_AFSR, %o0 ! read accumulated AFSR
1446 srlx %o0, P_AFSR_CP_SHIFT, %o2
1447 btst 1, %o2
1448 bz 3f
1449 nop
1450 ldx [%o1], %o3
1451 or %o3, %o0, %o3 ! aggregate AFSR in cpu private
1452 stx %o3, [%o1]
1453 3:
1454 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1455 membar #Sync
1456 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1457 membar #Sync
1458 retl
1459 wrpr %g0, %o5, %pstate
1460 SET_SIZE(check_ecache_line)
1461 #endif /* lint */
1462
1463 #if defined(lint)
1464 uint64_t
1465 read_and_clear_afsr()
1466 {
1467 return ((uint64_t)0);
1468 }
1469 #else /* lint */
1470 ENTRY(read_and_clear_afsr)
1471 ldxa [%g0]ASI_AFSR, %o0
1472 retl
1473 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1474 SET_SIZE(read_and_clear_afsr)
1475 #endif /* lint */
1476
1477 #if defined(lint)
1478 /* ARGSUSED */
1479 void
1480 scrubphys(uint64_t paddr, int ecache_size)
1481 {
1482 }
1483
1484 #else /* lint */
1485
1486 /*
1487 * scrubphys - Pass in the aligned physical memory address that you want
1488 * to scrub, along with the ecache size.
1489 *
1490 * 1) Displacement flush the E$ line corresponding to %addr.
1491 * The first ldxa guarantees that the %addr is no longer in
1492 * M, O, or E (goes to I or S (if instruction fetch also happens).
1493 * 2) "Write" the data using a CAS %addr,%g0,%g0.
1494 * The casxa guarantees a transition from I to M or S to M.
1495 * 3) Displacement flush the E$ line corresponding to %addr.
1496 * The second ldxa pushes the M line out of the ecache, into the
1497 * writeback buffers, on the way to memory.
1498 * 4) The "membar #Sync" pushes the cache line out of the writeback
1499 * buffers onto the bus, on the way to dram finally.
1500 *
1501 * This is a modified version of the algorithm suggested by Gary Lauterbach.
1502 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
1503 * as modified, but then we found out that for spitfire, if it misses in the
1504 * E$ it will probably install as an M, but if it hits in the E$, then it
1505 * will stay E, if the store doesn't happen. So the first displacement flush
1592 brgz,pt %g4, 2b
1593 dec %g4
1594
1595 casxa [%o0]ASI_MEM, %g0, %g0
1596
1597 ! Flush %o0 from ecahe again.
1598 ! Need single displacement flush at offset %o1 this time as
1599 ! the E$ is already in direct map mode.
1600 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1601
1602 membar #Sync
1603 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1604 membar #Sync
1605 #endif /* HUMMINGBIRD */
1606 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1607
1608 retl
1609 membar #Sync ! move the data out of the load buffer
1610 SET_SIZE(scrubphys)
1611
1612 #endif /* lint */
1613
1614 #if defined(lint)
1615
1616 /*
1617 * clearphys - Pass in the aligned physical memory address that you want
1618 * to push out, as a 64 byte block of zeros, from the ecache zero-filled.
1619 * Since this routine does not bypass the ecache, it is possible that
1620 * it could generate a UE error while trying to clear the a bad line.
1621 * This routine clears and restores the error enable flag.
1622 * TBD - Hummingbird may need similar protection
1623 */
1624 /* ARGSUSED */
1625 void
1626 clearphys(uint64_t paddr, int ecache_size, int ecache_linesize)
1627 {
1628 }
1629
1630 #else /* lint */
1631
1632 ENTRY(clearphys)
1633 or %o2, %g0, %o3 ! ecache linesize
1634 or %o1, %g0, %o2 ! ecache size
1635 #ifndef HUMMINGBIRD
1636 or %o3, %g0, %o4 ! save ecache linesize
1637 xor %o0, %o2, %o1 ! calculate alias address
1638 add %o2, %o2, %o3 ! 2 * ecachesize
1639 sub %o3, 1, %o3 ! -1 == mask
1640 and %o1, %o3, %o1 ! and with xor'd address
1641 set ecache_flushaddr, %o3
1642 ldx [%o3], %o3
1643 or %o4, %g0, %o2 ! saved ecache linesize
1644
1645 rdpr %pstate, %o4
1646 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1647 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1648
1649 ldxa [%g0]ASI_ESTATE_ERR, %g1
1650 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1651 membar #Sync
1742 nop
1743 brgz,pt %g4, 2b
1744 dec %g4
1745
1746 casxa [%o0]ASI_MEM, %g0, %g0
1747
1748 ! Flush %o0 from ecahe again.
1749 ! Need single displacement flush at offset %o1 this time as
1750 ! the E$ is already in direct map mode.
1751 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1752
1753 membar #Sync
1754 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1755 membar #Sync
1756 #endif /* HUMMINGBIRD... */
1757
1758 retl
1759 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1760 SET_SIZE(clearphys)
1761
1762 #endif /* lint */
1763
1764 #if defined(lint)
1765 /* ARGSUSED */
1766 void
1767 flushecacheline(uint64_t paddr, int ecache_size)
1768 {
1769 }
1770
1771 #else /* lint */
1772 /*
1773 * flushecacheline - This is a simpler version of scrubphys
1774 * which simply does a displacement flush of the line in
1775 * question. This routine is mainly used in handling async
1776 * errors where we want to get rid of a bad line in ecache.
1777 * Note that if the line is modified and it has suffered
1778 * data corruption - we are guarantee that the hw will write
1779 * a UE back to mark the page poisoned.
1780 */
1781 ENTRY(flushecacheline)
1782 or %o1, %g0, %o2 ! put ecache size in %o2
1783 #ifndef HUMMINGBIRD
1784 xor %o0, %o2, %o1 ! calculate alias address
1785 add %o2, %o2, %o3 ! 2 * ecachesize in case
1786 ! addr == ecache_flushaddr
1787 sub %o3, 1, %o3 ! -1 == mask
1788 and %o1, %o3, %o1 ! and with xor'd address
1789 set ecache_flushaddr, %o3
1790 ldx [%o3], %o3
1791
1865 3:
1866 add %o1, %g2, %o1 ! calculate offset in next set
1867 and %o1, %g3, %o1 ! force offset within aliased range
1868 cmp %o1, %o5 ! skip loads from physaddr
1869 be,pn %ncc, 3b
1870 nop
1871 brgz,pt %g5, 2b
1872 dec %g5
1873
1874 membar #Sync
1875 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1876 membar #Sync
1877
1878 stxa %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1879 membar #Sync
1880 #endif /* HUMMINGBIRD */
1881 retl
1882 wrpr %g0, %o4, %pstate
1883 SET_SIZE(flushecacheline)
1884
1885 #endif /* lint */
1886
1887 #if defined(lint)
1888 /* ARGSUSED */
1889 void
1890 ecache_scrubreq_tl1(uint64_t inum, uint64_t dummy)
1891 {
1892 }
1893
1894 #else /* lint */
1895 /*
1896 * ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz
1897 * from the clock CPU. It atomically increments the outstanding request
1898 * counter and, if there was not already an outstanding request,
1899 * branches to setsoftint_tl1 to enqueue an intr_vec for the given inum.
1900 */
1901
1902 ! Register usage:
1903 !
1904 ! Arguments:
1905 ! %g1 - inum
1906 !
1907 ! Internal:
1908 ! %g2, %g3, %g5 - scratch
1909 ! %g4 - ptr. to spitfire_scrub_misc ec_scrub_outstanding.
1910 ! %g6 - setsoftint_tl1 address
1911
1912 ENTRY_NP(ecache_scrubreq_tl1)
1913 set SFPR_SCRUB_MISC + EC_SCRUB_OUTSTANDING, %g2
1914 GET_CPU_PRIVATE_PTR(%g2, %g4, %g5, 1f);
1915 ld [%g4], %g2 ! cpu's ec_scrub_outstanding.
1916 set setsoftint_tl1, %g6
1917 !
1918 ! no need to use atomic instructions for the following
1919 ! increment - we're at tl1
1920 !
1921 add %g2, 0x1, %g3
1922 brnz,pn %g2, 1f ! no need to enqueue more intr_vec
1923 st %g3, [%g4] ! delay - store incremented counter
1924 jmp %g6 ! setsoftint_tl1(%g1) - queue intr_vec
1925 nop
1926 ! not reached
1927 1:
1928 retry
1929 SET_SIZE(ecache_scrubreq_tl1)
1930
1931 #endif /* lint */
1932
1933 #if defined(lint)
1934 /*ARGSUSED*/
1935 void
1936 write_ec_tag_parity(uint32_t id)
1937 {}
1938 #else /* lint */
1939
1940 /*
1941 * write_ec_tag_parity(), which zero's the ecache tag,
1942 * marks the state as invalid and writes good parity to the tag.
1943 * Input %o1= 32 bit E$ index
1944 */
1945 ENTRY(write_ec_tag_parity)
1946 or %g0, 1, %o4
1947 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1948 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1949
1950 rdpr %pstate, %o5
1951 andn %o5, PSTATE_IE | PSTATE_AM, %o1
1952 wrpr %o1, %g0, %pstate ! clear IE, AM bits
1953
1954 ldxa [%g0]ASI_ESTATE_ERR, %g1
1955 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1956 membar #Sync
1957
1958 ba 1f
1959 nop
1960 /*
1961 * Align on the ecache boundary in order to force
1962 * ciritical code section onto the same ecache line.
1963 */
1964 .align 64
1965
1966 1:
1967 set S_EC_PARITY, %o3 ! clear tag, state invalid
1968 sllx %o3, S_ECPAR_SHIFT, %o3 ! and with good tag parity
1969 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
1970 stxa %g0, [%o4]ASI_EC_W
1971 membar #Sync
1972
1973 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1974 membar #Sync
1975 retl
1976 wrpr %g0, %o5, %pstate
1977 SET_SIZE(write_ec_tag_parity)
1978
1979 #endif /* lint */
1980
1981 #if defined(lint)
1982 /*ARGSUSED*/
1983 void
1984 write_hb_ec_tag_parity(uint32_t id)
1985 {}
1986 #else /* lint */
1987
1988 /*
1989 * write_hb_ec_tag_parity(), which zero's the ecache tag,
1990 * marks the state as invalid and writes good parity to the tag.
1991 * Input %o1= 32 bit E$ index
1992 */
1993 ENTRY(write_hb_ec_tag_parity)
1994 or %g0, 1, %o4
1995 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1996 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1997
1998 rdpr %pstate, %o5
1999 andn %o5, PSTATE_IE | PSTATE_AM, %o1
2000 wrpr %o1, %g0, %pstate ! clear IE, AM bits
2001
2002 ldxa [%g0]ASI_ESTATE_ERR, %g1
2003 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
2004 membar #Sync
2005
2006 ba 1f
2007 nop
2012 .align 64
2013 1:
2014 #ifdef HUMMINGBIRD
2015 set HB_EC_PARITY, %o3 ! clear tag, state invalid
2016 sllx %o3, HB_ECPAR_SHIFT, %o3 ! and with good tag parity
2017 #else /* !HUMMINGBIRD */
2018 set SB_EC_PARITY, %o3 ! clear tag, state invalid
2019 sllx %o3, SB_ECPAR_SHIFT, %o3 ! and with good tag parity
2020 #endif /* !HUMMINGBIRD */
2021
2022 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
2023 stxa %g0, [%o4]ASI_EC_W
2024 membar #Sync
2025
2026 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
2027 membar #Sync
2028 retl
2029 wrpr %g0, %o5, %pstate
2030 SET_SIZE(write_hb_ec_tag_parity)
2031
2032 #endif /* lint */
2033
2034 #define VIS_BLOCKSIZE 64
2035
2036 #if defined(lint)
2037
2038 /*ARGSUSED*/
2039 int
2040 dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
2041 { return (0); }
2042
2043 #else
2044
2045 ENTRY(dtrace_blksuword32)
2046 save %sp, -SA(MINFRAME + 4), %sp
2047
2048 rdpr %pstate, %l1
2049 andn %l1, PSTATE_IE, %l2 ! disable interrupts to
2050 wrpr %g0, %l2, %pstate ! protect our FPU diddling
2051
2052 rd %fprs, %l0
2053 andcc %l0, FPRS_FEF, %g0
2054 bz,a,pt %xcc, 1f ! if the fpu is disabled
2055 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
2056
2057 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
2058 1:
2059 set 0f, %l5
2060 /*
2061 * We're about to write a block full or either total garbage
2062 * (not kernel data, don't worry) or user floating-point data
2063 * (so it only _looks_ like garbage).
2064 */
2089
2090 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
2091 1:
2092
2093 wrpr %g0, %l1, %pstate ! restore interrupts
2094
2095 /*
2096 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
2097 * which deals with watchpoints. Otherwise, just return -1.
2098 */
2099 brnz,pt %i2, 1f
2100 nop
2101 ret
2102 restore %g0, -1, %o0
2103 1:
2104 call dtrace_blksuword32_err
2105 restore
2106
2107 SET_SIZE(dtrace_blksuword32)
2108
2109 #endif /* lint */
|
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include "assym.h"
27
28 #include <sys/asm_linkage.h>
29 #include <sys/mmu.h>
30 #include <vm/hat_sfmmu.h>
31 #include <sys/machparam.h>
32 #include <sys/machcpuvar.h>
33 #include <sys/machthread.h>
34 #include <sys/privregs.h>
35 #include <sys/asm_linkage.h>
36 #include <sys/machasi.h>
37 #include <sys/trap.h>
38 #include <sys/spitregs.h>
39 #include <sys/xc_impl.h>
40 #include <sys/intreg.h>
41 #include <sys/async.h>
42
43 #ifdef TRAPTRACE
44 #include <sys/traptrace.h>
45 #endif /* TRAPTRACE */
46
47 /* BEGIN CSTYLED */
48 #define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \
49 ldxa [%g0]ASI_LSU, tmp1 ;\
50 btst LSU_DC, tmp1 /* is dcache enabled? */ ;\
51 bz,pn %icc, 1f ;\
52 sethi %hi(dcache_linesize), tmp1 ;\
53 ld [tmp1 + %lo(dcache_linesize)], tmp1 ;\
54 sethi %hi(dflush_type), tmp2 ;\
55 ld [tmp2 + %lo(dflush_type)], tmp2 ;\
56 cmp tmp2, FLUSHPAGE_TYPE ;\
57 be,pt %icc, 2f ;\
58 sllx arg1, SF_DC_VBIT_SHIFT, arg1 /* tag to compare */ ;\
59 sethi %hi(dcache_size), tmp3 ;\
60 ld [tmp3 + %lo(dcache_size)], tmp3 ;\
61 cmp tmp2, FLUSHMATCH_TYPE ;\
62 be,pt %icc, 3f ;\
63 nop ;\
64 /* \
65 * flushtype = FLUSHALL_TYPE, flush the whole thing \
66 * tmp3 = cache size \
324 * we are done flushing it. Keep interrupts off while flushing in this
325 * manner.
326 *
327 * We flush the entire ecache by starting at one end and loading each
328 * successive ecache line for the 2*ecache-size range. We have to repeat
329 * the flush operation to guarantee that the entire ecache has been
330 * flushed.
331 *
332 * For flushing a specific physical address, we start at the aliased
333 * address and load at set-size stride, wrapping around at 2*ecache-size
334 * boundary and skipping the physical address being flushed. It takes
335 * 10 loads to guarantee that the physical address has been flushed.
336 */
337
338 #define HB_ECACHE_FLUSH_CNT 2
339 #define HB_PHYS_FLUSH_CNT 10 /* #loads to flush specific paddr */
340 #endif /* HUMMINGBIRD */
341
342 /* END CSTYLED */
343
344 /*
345 * Spitfire MMU and Cache operations.
346 */
347
348 ENTRY_NP(vtag_flushpage)
349 /*
350 * flush page from the tlb
351 *
352 * %o0 = vaddr
353 * %o1 = sfmmup
354 */
355 rdpr %pstate, %o5
356 #ifdef DEBUG
357 PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
358 #endif /* DEBUG */
359 /*
360 * disable ints
361 */
362 andn %o5, PSTATE_IE, %o4
363 wrpr %o4, 0, %pstate
364
365 /*
366 * Then, blow out the tlb
367 * Interrupts are disabled to prevent the secondary ctx register
841 stx %i0, [%i1] ! save the AFSR
842
843 brz %i4, 2f ! acc_afsr == NULL?
844 nop
845 ldx [%i4], %g4
846 or %g4, %i0, %g4 ! aggregate AFSR in cpu private
847 stx %g4, [%i4]
848 2:
849 add %i2, 8, %i2
850 cmp %i2, 64
851 bl,a 1b
852 add %i1, 8, %i1
853 stxa %i0, [%g0]ASI_AFSR ! clear AFSR
854 membar #Sync
855 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
856 membar #Sync
857 wrpr %g0, %i5, %pstate
858 ret
859 restore
860 SET_SIZE(get_ecache_dtag)
861
862 ENTRY_NP(ce_err)
863 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
864
865 !
866 ! Check for a UE... From Kevin.Normoyle:
867 ! We try to switch to the trap for the UE, but since that's
868 ! a hardware pipeline, we might get to the CE trap before we
869 ! can switch. The UDB and AFSR registers will have both the
870 ! UE and CE bits set but the UDB syndrome and the AFAR will be
871 ! for the UE.
872 !
873 or %g0, 1, %g1 ! put 1 in g1
874 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
875 andcc %g1, %g3, %g0 ! check for UE in afsr
876 bnz async_err ! handle the UE, not the CE
877 or %g0, 0x63, %g5 ! pass along the CE ttype
878 !
879 ! Disable further CE traps to avoid recursion (stack overflow)
880 ! and staying above XCALL_PIL for extended periods.
881 !
1047 andcc %o5, %o2, %o1 ! check for UE,CE in lower half
1048 bz,a 2f ! no error, goto 2f
1049 nop
1050 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1051 membar #Sync
1052 2:
1053 retl
1054 nop
1055 SET_SIZE(clr_datapath)
1056
1057 ENTRY(get_udb_errors)
1058 set P_DER_H, %o3
1059 ldxa [%o3]ASI_SDB_INTR_R, %o2
1060 stx %o2, [%o0]
1061 set P_DER_L, %o3
1062 ldxa [%o3]ASI_SDB_INTR_R, %o2
1063 retl
1064 stx %o2, [%o1]
1065 SET_SIZE(get_udb_errors)
1066
1067 /*
1068 * NB - In Spitfire cpus, when reading a tte from the hardware, we
1069 * need to clear [42-41] because the general definitions in pte.h
1070 * define the PA to be [42-13] whereas Spitfire really uses [40-13].
1071 * When cloning these routines for other cpus the "andn" below is not
1072 * necessary.
1073 */
1074 ENTRY_NP(itlb_rd_entry)
1075 sllx %o0, 3, %o0
1076 #if defined(SF_ERRATA_32)
1077 sethi %hi(FLUSH_ADDR), %g2
1078 set MMU_PCONTEXT, %g1
1079 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1080 flush %g2
1081 #endif
1082 ldxa [%o0]ASI_ITLB_ACCESS, %g1
1083 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1084 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1085 andn %g1, %g2, %g1 ! for details
1086 stx %g1, [%o1]
1087 ldxa [%o0]ASI_ITLB_TAGREAD, %g2
1093
1094 ENTRY_NP(dtlb_rd_entry)
1095 sllx %o0, 3, %o0
1096 #if defined(SF_ERRATA_32)
1097 sethi %hi(FLUSH_ADDR), %g2
1098 set MMU_PCONTEXT, %g1
1099 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1100 flush %g2
1101 #endif
1102 ldxa [%o0]ASI_DTLB_ACCESS, %g1
1103 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1104 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1105 andn %g1, %g2, %g1 ! itlb_rd_entry
1106 stx %g1, [%o1]
1107 ldxa [%o0]ASI_DTLB_TAGREAD, %g2
1108 set TAGREAD_CTX_MASK, %o4
1109 andn %g2, %o4, %o5
1110 retl
1111 stx %o5, [%o2]
1112 SET_SIZE(dtlb_rd_entry)
1113
1114 ENTRY(set_lsu)
1115 stxa %o0, [%g0]ASI_LSU ! store to LSU
1116 retl
1117 membar #Sync
1118 SET_SIZE(set_lsu)
1119
1120 ENTRY(get_lsu)
1121 retl
1122 ldxa [%g0]ASI_LSU, %o0 ! load LSU
1123 SET_SIZE(get_lsu)
1124
1125 /*
1126 * Clear the NPT (non-privileged trap) bit in the %tick
1127 * registers. In an effort to make the change in the
1128 * tick counter as consistent as possible, we disable
1129 * all interrupts while we're changing the registers. We also
1130 * ensure that the read and write instructions are in the same
1131 * line in the instruction cache.
1132 */
1133 ENTRY_NP(cpu_clearticknpt)
1134 rdpr %pstate, %g1 /* save processor state */
1135 andn %g1, PSTATE_IE, %g3 /* turn off */
1136 wrpr %g0, %g3, %pstate /* interrupts */
1137 rdpr %tick, %g2 /* get tick register */
1138 brgez,pn %g2, 1f /* if NPT bit off, we're done */
1139 mov 1, %g3 /* create mask */
1140 sllx %g3, 63, %g3 /* for NPT bit */
1141 ba,a,pt %xcc, 2f
1142 .align 64 /* Align to I$ boundary */
1143 2:
1144 rdpr %tick, %g2 /* get tick register */
1243 bl,a 2b
1244 add %o4, 8, %o4
1245
1246 membar #Sync
1247 ldxa [%g0]ASI_AFSR, %o0 ! read accumulated AFSR
1248 srlx %o0, P_AFSR_CP_SHIFT, %o2
1249 btst 1, %o2
1250 bz 3f
1251 nop
1252 ldx [%o1], %o3
1253 or %o3, %o0, %o3 ! aggregate AFSR in cpu private
1254 stx %o3, [%o1]
1255 3:
1256 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1257 membar #Sync
1258 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1259 membar #Sync
1260 retl
1261 wrpr %g0, %o5, %pstate
1262 SET_SIZE(check_ecache_line)
1263
1264 ENTRY(read_and_clear_afsr)
1265 ldxa [%g0]ASI_AFSR, %o0
1266 retl
1267 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1268 SET_SIZE(read_and_clear_afsr)
1269
1270 /*
1271 * scrubphys - Pass in the aligned physical memory address that you want
1272 * to scrub, along with the ecache size.
1273 *
1274 * 1) Displacement flush the E$ line corresponding to %addr.
1275 * The first ldxa guarantees that the %addr is no longer in
1276 * M, O, or E (goes to I or S (if instruction fetch also happens).
1277 * 2) "Write" the data using a CAS %addr,%g0,%g0.
1278 * The casxa guarantees a transition from I to M or S to M.
1279 * 3) Displacement flush the E$ line corresponding to %addr.
1280 * The second ldxa pushes the M line out of the ecache, into the
1281 * writeback buffers, on the way to memory.
1282 * 4) The "membar #Sync" pushes the cache line out of the writeback
1283 * buffers onto the bus, on the way to dram finally.
1284 *
1285 * This is a modified version of the algorithm suggested by Gary Lauterbach.
1286 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
1287 * as modified, but then we found out that for spitfire, if it misses in the
1288 * E$ it will probably install as an M, but if it hits in the E$, then it
1289 * will stay E, if the store doesn't happen. So the first displacement flush
1376 brgz,pt %g4, 2b
1377 dec %g4
1378
1379 casxa [%o0]ASI_MEM, %g0, %g0
1380
1381 ! Flush %o0 from ecahe again.
1382 ! Need single displacement flush at offset %o1 this time as
1383 ! the E$ is already in direct map mode.
1384 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1385
1386 membar #Sync
1387 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1388 membar #Sync
1389 #endif /* HUMMINGBIRD */
1390 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1391
1392 retl
1393 membar #Sync ! move the data out of the load buffer
1394 SET_SIZE(scrubphys)
1395
1396 ENTRY(clearphys)
1397 or %o2, %g0, %o3 ! ecache linesize
1398 or %o1, %g0, %o2 ! ecache size
1399 #ifndef HUMMINGBIRD
1400 or %o3, %g0, %o4 ! save ecache linesize
1401 xor %o0, %o2, %o1 ! calculate alias address
1402 add %o2, %o2, %o3 ! 2 * ecachesize
1403 sub %o3, 1, %o3 ! -1 == mask
1404 and %o1, %o3, %o1 ! and with xor'd address
1405 set ecache_flushaddr, %o3
1406 ldx [%o3], %o3
1407 or %o4, %g0, %o2 ! saved ecache linesize
1408
1409 rdpr %pstate, %o4
1410 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1411 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1412
1413 ldxa [%g0]ASI_ESTATE_ERR, %g1
1414 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1415 membar #Sync
1506 nop
1507 brgz,pt %g4, 2b
1508 dec %g4
1509
1510 casxa [%o0]ASI_MEM, %g0, %g0
1511
1512 ! Flush %o0 from ecahe again.
1513 ! Need single displacement flush at offset %o1 this time as
1514 ! the E$ is already in direct map mode.
1515 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1516
1517 membar #Sync
1518 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1519 membar #Sync
1520 #endif /* HUMMINGBIRD... */
1521
1522 retl
1523 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1524 SET_SIZE(clearphys)
1525
1526 /*
1527 * flushecacheline - This is a simpler version of scrubphys
1528 * which simply does a displacement flush of the line in
1529 * question. This routine is mainly used in handling async
1530 * errors where we want to get rid of a bad line in ecache.
1531 * Note that if the line is modified and it has suffered
1532 * data corruption - we are guarantee that the hw will write
1533 * a UE back to mark the page poisoned.
1534 */
1535 ENTRY(flushecacheline)
1536 or %o1, %g0, %o2 ! put ecache size in %o2
1537 #ifndef HUMMINGBIRD
1538 xor %o0, %o2, %o1 ! calculate alias address
1539 add %o2, %o2, %o3 ! 2 * ecachesize in case
1540 ! addr == ecache_flushaddr
1541 sub %o3, 1, %o3 ! -1 == mask
1542 and %o1, %o3, %o1 ! and with xor'd address
1543 set ecache_flushaddr, %o3
1544 ldx [%o3], %o3
1545
1619 3:
1620 add %o1, %g2, %o1 ! calculate offset in next set
1621 and %o1, %g3, %o1 ! force offset within aliased range
1622 cmp %o1, %o5 ! skip loads from physaddr
1623 be,pn %ncc, 3b
1624 nop
1625 brgz,pt %g5, 2b
1626 dec %g5
1627
1628 membar #Sync
1629 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1630 membar #Sync
1631
1632 stxa %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1633 membar #Sync
1634 #endif /* HUMMINGBIRD */
1635 retl
1636 wrpr %g0, %o4, %pstate
1637 SET_SIZE(flushecacheline)
1638
1639 /*
1640 * ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz
1641 * from the clock CPU. It atomically increments the outstanding request
1642 * counter and, if there was not already an outstanding request,
1643 * branches to setsoftint_tl1 to enqueue an intr_vec for the given inum.
1644 */
1645
1646 ! Register usage:
1647 !
1648 ! Arguments:
1649 ! %g1 - inum
1650 !
1651 ! Internal:
1652 ! %g2, %g3, %g5 - scratch
1653 ! %g4 - ptr. to spitfire_scrub_misc ec_scrub_outstanding.
1654 ! %g6 - setsoftint_tl1 address
1655
1656 ENTRY_NP(ecache_scrubreq_tl1)
1657 set SFPR_SCRUB_MISC + EC_SCRUB_OUTSTANDING, %g2
1658 GET_CPU_PRIVATE_PTR(%g2, %g4, %g5, 1f);
1659 ld [%g4], %g2 ! cpu's ec_scrub_outstanding.
1660 set setsoftint_tl1, %g6
1661 !
1662 ! no need to use atomic instructions for the following
1663 ! increment - we're at tl1
1664 !
1665 add %g2, 0x1, %g3
1666 brnz,pn %g2, 1f ! no need to enqueue more intr_vec
1667 st %g3, [%g4] ! delay - store incremented counter
1668 jmp %g6 ! setsoftint_tl1(%g1) - queue intr_vec
1669 nop
1670 ! not reached
1671 1:
1672 retry
1673 SET_SIZE(ecache_scrubreq_tl1)
1674
1675 /*
1676 * write_ec_tag_parity(), which zero's the ecache tag,
1677 * marks the state as invalid and writes good parity to the tag.
1678 * Input %o1= 32 bit E$ index
1679 */
1680 ENTRY(write_ec_tag_parity)
1681 or %g0, 1, %o4
1682 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1683 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1684
1685 rdpr %pstate, %o5
1686 andn %o5, PSTATE_IE | PSTATE_AM, %o1
1687 wrpr %o1, %g0, %pstate ! clear IE, AM bits
1688
1689 ldxa [%g0]ASI_ESTATE_ERR, %g1
1690 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1691 membar #Sync
1692
1693 ba 1f
1694 nop
1695 /*
1696 * Align on the ecache boundary in order to force
1697 * ciritical code section onto the same ecache line.
1698 */
1699 .align 64
1700
1701 1:
1702 set S_EC_PARITY, %o3 ! clear tag, state invalid
1703 sllx %o3, S_ECPAR_SHIFT, %o3 ! and with good tag parity
1704 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
1705 stxa %g0, [%o4]ASI_EC_W
1706 membar #Sync
1707
1708 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1709 membar #Sync
1710 retl
1711 wrpr %g0, %o5, %pstate
1712 SET_SIZE(write_ec_tag_parity)
1713
1714 /*
1715 * write_hb_ec_tag_parity(), which zero's the ecache tag,
1716 * marks the state as invalid and writes good parity to the tag.
1717 * Input %o1= 32 bit E$ index
1718 */
1719 ENTRY(write_hb_ec_tag_parity)
1720 or %g0, 1, %o4
1721 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1722 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1723
1724 rdpr %pstate, %o5
1725 andn %o5, PSTATE_IE | PSTATE_AM, %o1
1726 wrpr %o1, %g0, %pstate ! clear IE, AM bits
1727
1728 ldxa [%g0]ASI_ESTATE_ERR, %g1
1729 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1730 membar #Sync
1731
1732 ba 1f
1733 nop
1738 .align 64
1739 1:
1740 #ifdef HUMMINGBIRD
1741 set HB_EC_PARITY, %o3 ! clear tag, state invalid
1742 sllx %o3, HB_ECPAR_SHIFT, %o3 ! and with good tag parity
1743 #else /* !HUMMINGBIRD */
1744 set SB_EC_PARITY, %o3 ! clear tag, state invalid
1745 sllx %o3, SB_ECPAR_SHIFT, %o3 ! and with good tag parity
1746 #endif /* !HUMMINGBIRD */
1747
1748 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
1749 stxa %g0, [%o4]ASI_EC_W
1750 membar #Sync
1751
1752 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1753 membar #Sync
1754 retl
1755 wrpr %g0, %o5, %pstate
1756 SET_SIZE(write_hb_ec_tag_parity)
1757
1758 #define VIS_BLOCKSIZE 64
1759
1760 ENTRY(dtrace_blksuword32)
1761 save %sp, -SA(MINFRAME + 4), %sp
1762
1763 rdpr %pstate, %l1
1764 andn %l1, PSTATE_IE, %l2 ! disable interrupts to
1765 wrpr %g0, %l2, %pstate ! protect our FPU diddling
1766
1767 rd %fprs, %l0
1768 andcc %l0, FPRS_FEF, %g0
1769 bz,a,pt %xcc, 1f ! if the fpu is disabled
1770 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
1771
1772 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
1773 1:
1774 set 0f, %l5
1775 /*
1776 * We're about to write a block full or either total garbage
1777 * (not kernel data, don't worry) or user floating-point data
1778 * (so it only _looks_ like garbage).
1779 */
1804
1805 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
1806 1:
1807
1808 wrpr %g0, %l1, %pstate ! restore interrupts
1809
1810 /*
1811 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1812 * which deals with watchpoints. Otherwise, just return -1.
1813 */
1814 brnz,pt %i2, 1f
1815 nop
1816 ret
1817 restore %g0, -1, %o0
1818 1:
1819 call dtrace_blksuword32_err
1820 restore
1821
1822 SET_SIZE(dtrace_blksuword32)
1823
|