6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #pragma ident "%Z%%M% %I% %E% SMI"
27
28 #if !defined(lint)
29 #include "assym.h"
30 #endif /* lint */
31
32 #include <sys/asm_linkage.h>
33 #include <sys/mmu.h>
34 #include <vm/hat_sfmmu.h>
35 #include <sys/machparam.h>
36 #include <sys/machcpuvar.h>
37 #include <sys/machthread.h>
38 #include <sys/privregs.h>
39 #include <sys/asm_linkage.h>
40 #include <sys/machasi.h>
41 #include <sys/trap.h>
42 #include <sys/spitregs.h>
43 #include <sys/xc_impl.h>
44 #include <sys/intreg.h>
45 #include <sys/async.h>
46
47 #ifdef TRAPTRACE
48 #include <sys/traptrace.h>
49 #endif /* TRAPTRACE */
50
51 #ifndef lint
52
53 /* BEGIN CSTYLED */
54 #define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \
55 ldxa [%g0]ASI_LSU, tmp1 ;\
56 btst LSU_DC, tmp1 /* is dcache enabled? */ ;\
57 bz,pn %icc, 1f ;\
58 sethi %hi(dcache_linesize), tmp1 ;\
59 ld [tmp1 + %lo(dcache_linesize)], tmp1 ;\
60 sethi %hi(dflush_type), tmp2 ;\
61 ld [tmp2 + %lo(dflush_type)], tmp2 ;\
62 cmp tmp2, FLUSHPAGE_TYPE ;\
63 be,pt %icc, 2f ;\
64 sllx arg1, SF_DC_VBIT_SHIFT, arg1 /* tag to compare */ ;\
65 sethi %hi(dcache_size), tmp3 ;\
66 ld [tmp3 + %lo(dcache_size)], tmp3 ;\
67 cmp tmp2, FLUSHMATCH_TYPE ;\
68 be,pt %icc, 3f ;\
69 nop ;\
70 /* \
71 * flushtype = FLUSHALL_TYPE, flush the whole thing \
72 * tmp3 = cache size \
330 * we are done flushing it. Keep interrupts off while flushing in this
331 * manner.
332 *
333 * We flush the entire ecache by starting at one end and loading each
334 * successive ecache line for the 2*ecache-size range. We have to repeat
335 * the flush operation to guarantee that the entire ecache has been
336 * flushed.
337 *
338 * For flushing a specific physical address, we start at the aliased
339 * address and load at set-size stride, wrapping around at 2*ecache-size
340 * boundary and skipping the physical address being flushed. It takes
341 * 10 loads to guarantee that the physical address has been flushed.
342 */
343
344 #define HB_ECACHE_FLUSH_CNT 2
345 #define HB_PHYS_FLUSH_CNT 10 /* #loads to flush specific paddr */
346 #endif /* HUMMINGBIRD */
347
348 /* END CSTYLED */
349
350 #endif /* !lint */
351
352 /*
353 * Spitfire MMU and Cache operations.
354 */
355
356 #if defined(lint)
357
358 /*ARGSUSED*/
359 void
360 vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
361 {}
362
363 /*ARGSUSED*/
364 void
365 vtag_flushall(void)
366 {}
367
368 /*ARGSUSED*/
369 void
370 vtag_flushall_uctxs(void)
371 {}
372
373 /*ARGSUSED*/
374 void
375 vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
376 {}
377
378 /*ARGSUSED*/
379 void
380 vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
381 {}
382
383 /*ARGSUSED*/
384 void
385 vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
386 {}
387
388 /*ARGSUSED*/
389 void
390 vac_flushpage(pfn_t pfnum, int vcolor)
391 {}
392
393 /*ARGSUSED*/
394 void
395 vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
396 {}
397
398 /*ARGSUSED*/
399 void
400 init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
401 {}
402
403 /*ARGSUSED*/
404 void
405 init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
406 {}
407
408 /*ARGSUSED*/
409 void
410 flush_instr_mem(caddr_t vaddr, size_t len)
411 {}
412
413 /*ARGSUSED*/
414 void
415 flush_ecache(uint64_t physaddr, size_t size, size_t linesize)
416 {}
417
418 /*ARGSUSED*/
419 void
420 get_ecache_dtag(uint32_t ecache_idx, uint64_t *ecache_data,
421 uint64_t *ecache_tag, uint64_t *oafsr, uint64_t *acc_afsr)
422 {}
423
424 /* ARGSUSED */
425 uint64_t
426 get_ecache_tag(uint32_t id, uint64_t *nafsr, uint64_t *acc_afsr)
427 {
428 return ((uint64_t)0);
429 }
430
431 /* ARGSUSED */
432 uint64_t
433 check_ecache_line(uint32_t id, uint64_t *acc_afsr)
434 {
435 return ((uint64_t)0);
436 }
437
438 /*ARGSUSED*/
439 void
440 kdi_flush_idcache(int dcache_size, int dcache_lsize,
441 int icache_size, int icache_lsize)
442 {}
443
444 #else /* lint */
445
446 ENTRY_NP(vtag_flushpage)
447 /*
448 * flush page from the tlb
449 *
450 * %o0 = vaddr
451 * %o1 = sfmmup
452 */
453 rdpr %pstate, %o5
454 #ifdef DEBUG
455 PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
456 #endif /* DEBUG */
457 /*
458 * disable ints
459 */
460 andn %o5, PSTATE_IE, %o4
461 wrpr %o4, 0, %pstate
462
463 /*
464 * Then, blow out the tlb
465 * Interrupts are disabled to prevent the secondary ctx register
939 stx %i0, [%i1] ! save the AFSR
940
941 brz %i4, 2f ! acc_afsr == NULL?
942 nop
943 ldx [%i4], %g4
944 or %g4, %i0, %g4 ! aggregate AFSR in cpu private
945 stx %g4, [%i4]
946 2:
947 add %i2, 8, %i2
948 cmp %i2, 64
949 bl,a 1b
950 add %i1, 8, %i1
951 stxa %i0, [%g0]ASI_AFSR ! clear AFSR
952 membar #Sync
953 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
954 membar #Sync
955 wrpr %g0, %i5, %pstate
956 ret
957 restore
958 SET_SIZE(get_ecache_dtag)
959 #endif /* lint */
960
961 #if defined(lint)
962 /*
963 * The ce_err function handles trap type 0x63 (corrected_ECC_error) at tl=0.
964 * Steps: 1. GET AFSR 2. Get AFAR <40:4> 3. Get datapath error status
965 * 4. Clear datapath error bit(s) 5. Clear AFSR error bit
966 * 6. package data in %g2 and %g3 7. call cpu_ce_error vis sys_trap
967 * %g2: [ 52:43 UDB lower | 42:33 UDB upper | 32:0 afsr ] - arg #3/arg #1
968 * %g3: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
969 */
970 void
971 ce_err(void)
972 {}
973
974 void
975 ce_err_tl1(void)
976 {}
977
978
979 /*
980 * The async_err function handles trap types 0x0A (instruction_access_error)
981 * and 0x32 (data_access_error) at TL = 0 and TL > 0. When we branch here,
982 * %g5 will have the trap type (with 0x200 set if we're at TL > 0).
983 *
984 * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
985 * 4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
986 * 6. package data in %g2 and %g3 7. disable all cpu errors, because
987 * trap is likely to be fatal 8. call cpu_async_error vis sys_trap
988 *
989 * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
990 * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
991 */
992 void
993 async_err(void)
994 {}
995
996 /*
997 * The clr_datapath function clears any error bits set in the UDB regs.
998 */
999 void
1000 clr_datapath(void)
1001 {}
1002
1003 /*
1004 * The get_udb_errors() function gets the current value of the
1005 * Datapath Error Registers.
1006 */
1007 /*ARGSUSED*/
1008 void
1009 get_udb_errors(uint64_t *udbh, uint64_t *udbl)
1010 {
1011 *udbh = 0;
1012 *udbl = 0;
1013 }
1014
1015 #else /* lint */
1016
1017 ENTRY_NP(ce_err)
1018 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
1019
1020 !
1021 ! Check for a UE... From Kevin.Normoyle:
1022 ! We try to switch to the trap for the UE, but since that's
1023 ! a hardware pipeline, we might get to the CE trap before we
1024 ! can switch. The UDB and AFSR registers will have both the
1025 ! UE and CE bits set but the UDB syndrome and the AFAR will be
1026 ! for the UE.
1027 !
1028 or %g0, 1, %g1 ! put 1 in g1
1029 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
1030 andcc %g1, %g3, %g0 ! check for UE in afsr
1031 bnz async_err ! handle the UE, not the CE
1032 or %g0, 0x63, %g5 ! pass along the CE ttype
1033 !
1034 ! Disable further CE traps to avoid recursion (stack overflow)
1035 ! and staying above XCALL_PIL for extended periods.
1036 !
1091 nop
1092 #endif
1093 SET_SIZE(ce_err_tl1)
1094
1095 #ifdef TRAPTRACE
1096 .celevel1msg:
1097 .asciz "Softerror with trap tracing at tl1: AFAR 0x%08x.%08x AFSR 0x%08x.%08x";
1098
1099 ENTRY_NP(ce_trap_tl1)
1100 ! upper 32 bits of AFSR already in o3
1101 mov %o4, %o0 ! save AFAR upper 32 bits
1102 mov %o2, %o4 ! lower 32 bits of AFSR
1103 mov %o1, %o2 ! lower 32 bits of AFAR
1104 mov %o0, %o1 ! upper 32 bits of AFAR
1105 set .celevel1msg, %o0
1106 call panic
1107 nop
1108 SET_SIZE(ce_trap_tl1)
1109 #endif
1110
1111 !
1112 ! async_err is the assembly glue code to get us from the actual trap
1113 ! into the CPU module's C error handler. Note that we also branch
1114 ! here from ce_err() above.
1115 !
1116 ENTRY_NP(async_err)
1117 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable ecc and other cpu errors
1118 membar #Sync ! membar sync required
1119
1120 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
1121 ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
1122
1123 sllx %g5, 53, %g5 ! move ttype to <63:53>
1124 or %g3, %g5, %g3 ! or to afsr in g3
1125
1126 or %g0, 1, %g1 ! put 1 in g1
1127 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
1128 andcc %g1, %g3, %g0 ! check for UE in afsr
1129 bz,a,pn %icc, 2f ! if !UE skip sdb read/clear
1130 nop
1131
1132 set P_DER_H, %g4 ! put P_DER_H in g4
1133 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into 56
1134 or %g0, 1, %g6 ! put 1 in g6
1135 sllx %g6, 9, %g6 ! shift g6 to <9> sdb UE
1169 ! save destination routine is in g1
1170 ldxa [%g0]ASI_AFAR, %g2 ! read afar
1171 ldxa [%g0]ASI_AFSR, %g3 ! read afsr
1172 set P_DER_H, %g4 ! put P_DER_H in g4
1173 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
1174 sllx %g5, 33, %g5 ! shift upper bits to <42:33>
1175 or %g3, %g5, %g3 ! or with afsr bits
1176 set P_DER_L, %g4 ! put P_DER_L in g4
1177 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5
1178 sllx %g5, 43, %g5 ! shift upper bits to <52:43>
1179 or %g3, %g5, %g3 ! or with afsr bits
1180
1181 RESET_USER_RTT_REGS(%g4, %g5, dis_err_panic1_resetskip)
1182 dis_err_panic1_resetskip:
1183
1184 sethi %hi(sys_trap), %g5
1185 jmp %g5 + %lo(sys_trap) ! goto sys_trap
1186 sub %g0, 1, %g4
1187 SET_SIZE(dis_err_panic1)
1188
1189 ENTRY(clr_datapath)
1190 set P_DER_H, %o4 ! put P_DER_H in o4
1191 ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb upper half into o3
1192 or %g0, 0x3, %o2 ! put 0x3 in o2
1193 sllx %o2, 8, %o2 ! shift o2 to <9:8> sdb
1194 andcc %o5, %o2, %o1 ! check for UE,CE in upper half
1195 bz,a 1f ! no error, goto 1f
1196 nop
1197 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1198 membar #Sync ! membar sync required
1199 1:
1200 set P_DER_L, %o4 ! put P_DER_L in o4
1201 ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb lower half into o5
1202 andcc %o5, %o2, %o1 ! check for UE,CE in lower half
1203 bz,a 2f ! no error, goto 2f
1204 nop
1205 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1206 membar #Sync
1207 2:
1208 retl
1209 nop
1210 SET_SIZE(clr_datapath)
1211
1212 ENTRY(get_udb_errors)
1213 set P_DER_H, %o3
1214 ldxa [%o3]ASI_SDB_INTR_R, %o2
1215 stx %o2, [%o0]
1216 set P_DER_L, %o3
1217 ldxa [%o3]ASI_SDB_INTR_R, %o2
1218 retl
1219 stx %o2, [%o1]
1220 SET_SIZE(get_udb_errors)
1221
1222 #endif /* lint */
1223
1224 #if defined(lint)
1225 /*
1226 * The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the
1227 * tte, the virtual address, and the ctxnum of the specified tlb entry. They
1228 * should only be used in places where you have no choice but to look at the
1229 * tlb itself.
1230 *
1231 * Note: These two routines are required by the Estar "cpr" loadable module.
1232 */
1233 /*ARGSUSED*/
1234 void
1235 itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1236 {}
1237
1238 /*ARGSUSED*/
1239 void
1240 dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1241 {}
1242 #else /* lint */
1243 /*
1244 * NB - In Spitfire cpus, when reading a tte from the hardware, we
1245 * need to clear [42-41] because the general definitions in pte.h
1246 * define the PA to be [42-13] whereas Spitfire really uses [40-13].
1247 * When cloning these routines for other cpus the "andn" below is not
1248 * necessary.
1249 */
1250 ENTRY_NP(itlb_rd_entry)
1251 sllx %o0, 3, %o0
1252 #if defined(SF_ERRATA_32)
1253 sethi %hi(FLUSH_ADDR), %g2
1254 set MMU_PCONTEXT, %g1
1255 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1256 flush %g2
1257 #endif
1258 ldxa [%o0]ASI_ITLB_ACCESS, %g1
1259 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1260 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1261 andn %g1, %g2, %g1 ! for details
1262 stx %g1, [%o1]
1269
1270 ENTRY_NP(dtlb_rd_entry)
1271 sllx %o0, 3, %o0
1272 #if defined(SF_ERRATA_32)
1273 sethi %hi(FLUSH_ADDR), %g2
1274 set MMU_PCONTEXT, %g1
1275 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1276 flush %g2
1277 #endif
1278 ldxa [%o0]ASI_DTLB_ACCESS, %g1
1279 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1280 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1281 andn %g1, %g2, %g1 ! itlb_rd_entry
1282 stx %g1, [%o1]
1283 ldxa [%o0]ASI_DTLB_TAGREAD, %g2
1284 set TAGREAD_CTX_MASK, %o4
1285 andn %g2, %o4, %o5
1286 retl
1287 stx %o5, [%o2]
1288 SET_SIZE(dtlb_rd_entry)
1289 #endif /* lint */
1290
1291 #if defined(lint)
1292
1293 /*
1294 * routines to get and set the LSU register
1295 */
1296 uint64_t
1297 get_lsu(void)
1298 {
1299 return ((uint64_t)0);
1300 }
1301
1302 /*ARGSUSED*/
1303 void
1304 set_lsu(uint64_t lsu)
1305 {}
1306
1307 #else /* lint */
1308
1309 ENTRY(set_lsu)
1310 stxa %o0, [%g0]ASI_LSU ! store to LSU
1311 retl
1312 membar #Sync
1313 SET_SIZE(set_lsu)
1314
1315 ENTRY(get_lsu)
1316 retl
1317 ldxa [%g0]ASI_LSU, %o0 ! load LSU
1318 SET_SIZE(get_lsu)
1319
1320 #endif /* lint */
1321
1322 #ifndef lint
1323 /*
1324 * Clear the NPT (non-privileged trap) bit in the %tick
1325 * registers. In an effort to make the change in the
1326 * tick counter as consistent as possible, we disable
1327 * all interrupts while we're changing the registers. We also
1328 * ensure that the read and write instructions are in the same
1329 * line in the instruction cache.
1330 */
1331 ENTRY_NP(cpu_clearticknpt)
1332 rdpr %pstate, %g1 /* save processor state */
1333 andn %g1, PSTATE_IE, %g3 /* turn off */
1334 wrpr %g0, %g3, %pstate /* interrupts */
1335 rdpr %tick, %g2 /* get tick register */
1336 brgez,pn %g2, 1f /* if NPT bit off, we're done */
1337 mov 1, %g3 /* create mask */
1338 sllx %g3, 63, %g3 /* for NPT bit */
1339 ba,a,pt %xcc, 2f
1340 .align 64 /* Align to I$ boundary */
1341 2:
1342 rdpr %tick, %g2 /* get tick register */
1441 bl,a 2b
1442 add %o4, 8, %o4
1443
1444 membar #Sync
1445 ldxa [%g0]ASI_AFSR, %o0 ! read accumulated AFSR
1446 srlx %o0, P_AFSR_CP_SHIFT, %o2
1447 btst 1, %o2
1448 bz 3f
1449 nop
1450 ldx [%o1], %o3
1451 or %o3, %o0, %o3 ! aggregate AFSR in cpu private
1452 stx %o3, [%o1]
1453 3:
1454 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1455 membar #Sync
1456 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1457 membar #Sync
1458 retl
1459 wrpr %g0, %o5, %pstate
1460 SET_SIZE(check_ecache_line)
1461 #endif /* lint */
1462
1463 #if defined(lint)
1464 uint64_t
1465 read_and_clear_afsr()
1466 {
1467 return ((uint64_t)0);
1468 }
1469 #else /* lint */
1470 ENTRY(read_and_clear_afsr)
1471 ldxa [%g0]ASI_AFSR, %o0
1472 retl
1473 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1474 SET_SIZE(read_and_clear_afsr)
1475 #endif /* lint */
1476
1477 #if defined(lint)
1478 /* ARGSUSED */
1479 void
1480 scrubphys(uint64_t paddr, int ecache_size)
1481 {
1482 }
1483
1484 #else /* lint */
1485
1486 /*
1487 * scrubphys - Pass in the aligned physical memory address that you want
1488 * to scrub, along with the ecache size.
1489 *
1490 * 1) Displacement flush the E$ line corresponding to %addr.
1491 * The first ldxa guarantees that the %addr is no longer in
1492 * M, O, or E (goes to I or S (if instruction fetch also happens).
1493 * 2) "Write" the data using a CAS %addr,%g0,%g0.
1494 * The casxa guarantees a transition from I to M or S to M.
1495 * 3) Displacement flush the E$ line corresponding to %addr.
1496 * The second ldxa pushes the M line out of the ecache, into the
1497 * writeback buffers, on the way to memory.
1498 * 4) The "membar #Sync" pushes the cache line out of the writeback
1499 * buffers onto the bus, on the way to dram finally.
1500 *
1501 * This is a modified version of the algorithm suggested by Gary Lauterbach.
1502 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
1503 * as modified, but then we found out that for spitfire, if it misses in the
1504 * E$ it will probably install as an M, but if it hits in the E$, then it
1505 * will stay E, if the store doesn't happen. So the first displacement flush
1592 brgz,pt %g4, 2b
1593 dec %g4
1594
1595 casxa [%o0]ASI_MEM, %g0, %g0
1596
1597 ! Flush %o0 from ecahe again.
1598 ! Need single displacement flush at offset %o1 this time as
1599 ! the E$ is already in direct map mode.
1600 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1601
1602 membar #Sync
1603 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1604 membar #Sync
1605 #endif /* HUMMINGBIRD */
1606 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1607
1608 retl
1609 membar #Sync ! move the data out of the load buffer
1610 SET_SIZE(scrubphys)
1611
1612 #endif /* lint */
1613
1614 #if defined(lint)
1615
1616 /*
1617 * clearphys - Pass in the aligned physical memory address that you want
1618 * to push out, as a 64 byte block of zeros, from the ecache zero-filled.
1619 * Since this routine does not bypass the ecache, it is possible that
1620 * it could generate a UE error while trying to clear the a bad line.
1621 * This routine clears and restores the error enable flag.
1622 * TBD - Hummingbird may need similar protection
1623 */
1624 /* ARGSUSED */
1625 void
1626 clearphys(uint64_t paddr, int ecache_size, int ecache_linesize)
1627 {
1628 }
1629
1630 #else /* lint */
1631
1632 ENTRY(clearphys)
1633 or %o2, %g0, %o3 ! ecache linesize
1634 or %o1, %g0, %o2 ! ecache size
1635 #ifndef HUMMINGBIRD
1636 or %o3, %g0, %o4 ! save ecache linesize
1637 xor %o0, %o2, %o1 ! calculate alias address
1638 add %o2, %o2, %o3 ! 2 * ecachesize
1639 sub %o3, 1, %o3 ! -1 == mask
1640 and %o1, %o3, %o1 ! and with xor'd address
1641 set ecache_flushaddr, %o3
1642 ldx [%o3], %o3
1643 or %o4, %g0, %o2 ! saved ecache linesize
1644
1645 rdpr %pstate, %o4
1646 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1647 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1648
1649 ldxa [%g0]ASI_ESTATE_ERR, %g1
1650 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1651 membar #Sync
1742 nop
1743 brgz,pt %g4, 2b
1744 dec %g4
1745
1746 casxa [%o0]ASI_MEM, %g0, %g0
1747
1748 ! Flush %o0 from ecahe again.
1749 ! Need single displacement flush at offset %o1 this time as
1750 ! the E$ is already in direct map mode.
1751 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1752
1753 membar #Sync
1754 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1755 membar #Sync
1756 #endif /* HUMMINGBIRD... */
1757
1758 retl
1759 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1760 SET_SIZE(clearphys)
1761
1762 #endif /* lint */
1763
1764 #if defined(lint)
1765 /* ARGSUSED */
1766 void
1767 flushecacheline(uint64_t paddr, int ecache_size)
1768 {
1769 }
1770
1771 #else /* lint */
1772 /*
1773 * flushecacheline - This is a simpler version of scrubphys
1774 * which simply does a displacement flush of the line in
1775 * question. This routine is mainly used in handling async
1776 * errors where we want to get rid of a bad line in ecache.
1777 * Note that if the line is modified and it has suffered
1778 * data corruption - we are guarantee that the hw will write
1779 * a UE back to mark the page poisoned.
1780 */
1781 ENTRY(flushecacheline)
1782 or %o1, %g0, %o2 ! put ecache size in %o2
1783 #ifndef HUMMINGBIRD
1784 xor %o0, %o2, %o1 ! calculate alias address
1785 add %o2, %o2, %o3 ! 2 * ecachesize in case
1786 ! addr == ecache_flushaddr
1787 sub %o3, 1, %o3 ! -1 == mask
1788 and %o1, %o3, %o1 ! and with xor'd address
1789 set ecache_flushaddr, %o3
1790 ldx [%o3], %o3
1791
1865 3:
1866 add %o1, %g2, %o1 ! calculate offset in next set
1867 and %o1, %g3, %o1 ! force offset within aliased range
1868 cmp %o1, %o5 ! skip loads from physaddr
1869 be,pn %ncc, 3b
1870 nop
1871 brgz,pt %g5, 2b
1872 dec %g5
1873
1874 membar #Sync
1875 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1876 membar #Sync
1877
1878 stxa %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1879 membar #Sync
1880 #endif /* HUMMINGBIRD */
1881 retl
1882 wrpr %g0, %o4, %pstate
1883 SET_SIZE(flushecacheline)
1884
1885 #endif /* lint */
1886
1887 #if defined(lint)
1888 /* ARGSUSED */
1889 void
1890 ecache_scrubreq_tl1(uint64_t inum, uint64_t dummy)
1891 {
1892 }
1893
1894 #else /* lint */
1895 /*
1896 * ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz
1897 * from the clock CPU. It atomically increments the outstanding request
1898 * counter and, if there was not already an outstanding request,
1899 * branches to setsoftint_tl1 to enqueue an intr_vec for the given inum.
1900 */
1901
1902 ! Register usage:
1903 !
1904 ! Arguments:
1905 ! %g1 - inum
1906 !
1907 ! Internal:
1908 ! %g2, %g3, %g5 - scratch
1909 ! %g4 - ptr. to spitfire_scrub_misc ec_scrub_outstanding.
1910 ! %g6 - setsoftint_tl1 address
1911
1912 ENTRY_NP(ecache_scrubreq_tl1)
1913 set SFPR_SCRUB_MISC + EC_SCRUB_OUTSTANDING, %g2
1914 GET_CPU_PRIVATE_PTR(%g2, %g4, %g5, 1f);
1915 ld [%g4], %g2 ! cpu's ec_scrub_outstanding.
1916 set setsoftint_tl1, %g6
1917 !
1918 ! no need to use atomic instructions for the following
1919 ! increment - we're at tl1
1920 !
1921 add %g2, 0x1, %g3
1922 brnz,pn %g2, 1f ! no need to enqueue more intr_vec
1923 st %g3, [%g4] ! delay - store incremented counter
1924 jmp %g6 ! setsoftint_tl1(%g1) - queue intr_vec
1925 nop
1926 ! not reached
1927 1:
1928 retry
1929 SET_SIZE(ecache_scrubreq_tl1)
1930
1931 #endif /* lint */
1932
1933 #if defined(lint)
1934 /*ARGSUSED*/
1935 void
1936 write_ec_tag_parity(uint32_t id)
1937 {}
1938 #else /* lint */
1939
1940 /*
1941 * write_ec_tag_parity(), which zero's the ecache tag,
1942 * marks the state as invalid and writes good parity to the tag.
1943 * Input %o1= 32 bit E$ index
1944 */
1945 ENTRY(write_ec_tag_parity)
1946 or %g0, 1, %o4
1947 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1948 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1949
1950 rdpr %pstate, %o5
1951 andn %o5, PSTATE_IE | PSTATE_AM, %o1
1952 wrpr %o1, %g0, %pstate ! clear IE, AM bits
1953
1954 ldxa [%g0]ASI_ESTATE_ERR, %g1
1955 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1956 membar #Sync
1957
1958 ba 1f
1959 nop
1960 /*
1961 * Align on the ecache boundary in order to force
1962 * ciritical code section onto the same ecache line.
1963 */
1964 .align 64
1965
1966 1:
1967 set S_EC_PARITY, %o3 ! clear tag, state invalid
1968 sllx %o3, S_ECPAR_SHIFT, %o3 ! and with good tag parity
1969 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
1970 stxa %g0, [%o4]ASI_EC_W
1971 membar #Sync
1972
1973 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1974 membar #Sync
1975 retl
1976 wrpr %g0, %o5, %pstate
1977 SET_SIZE(write_ec_tag_parity)
1978
1979 #endif /* lint */
1980
1981 #if defined(lint)
1982 /*ARGSUSED*/
1983 void
1984 write_hb_ec_tag_parity(uint32_t id)
1985 {}
1986 #else /* lint */
1987
1988 /*
1989 * write_hb_ec_tag_parity(), which zero's the ecache tag,
1990 * marks the state as invalid and writes good parity to the tag.
1991 * Input %o1= 32 bit E$ index
1992 */
1993 ENTRY(write_hb_ec_tag_parity)
1994 or %g0, 1, %o4
1995 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1996 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1997
1998 rdpr %pstate, %o5
1999 andn %o5, PSTATE_IE | PSTATE_AM, %o1
2000 wrpr %o1, %g0, %pstate ! clear IE, AM bits
2001
2002 ldxa [%g0]ASI_ESTATE_ERR, %g1
2003 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
2004 membar #Sync
2005
2006 ba 1f
2007 nop
2012 .align 64
2013 1:
2014 #ifdef HUMMINGBIRD
2015 set HB_EC_PARITY, %o3 ! clear tag, state invalid
2016 sllx %o3, HB_ECPAR_SHIFT, %o3 ! and with good tag parity
2017 #else /* !HUMMINGBIRD */
2018 set SB_EC_PARITY, %o3 ! clear tag, state invalid
2019 sllx %o3, SB_ECPAR_SHIFT, %o3 ! and with good tag parity
2020 #endif /* !HUMMINGBIRD */
2021
2022 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
2023 stxa %g0, [%o4]ASI_EC_W
2024 membar #Sync
2025
2026 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
2027 membar #Sync
2028 retl
2029 wrpr %g0, %o5, %pstate
2030 SET_SIZE(write_hb_ec_tag_parity)
2031
2032 #endif /* lint */
2033
2034 #define VIS_BLOCKSIZE 64
2035
2036 #if defined(lint)
2037
2038 /*ARGSUSED*/
2039 int
2040 dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
2041 { return (0); }
2042
2043 #else
2044
2045 ENTRY(dtrace_blksuword32)
2046 save %sp, -SA(MINFRAME + 4), %sp
2047
2048 rdpr %pstate, %l1
2049 andn %l1, PSTATE_IE, %l2 ! disable interrupts to
2050 wrpr %g0, %l2, %pstate ! protect our FPU diddling
2051
2052 rd %fprs, %l0
2053 andcc %l0, FPRS_FEF, %g0
2054 bz,a,pt %xcc, 1f ! if the fpu is disabled
2055 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
2056
2057 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
2058 1:
2059 set 0f, %l5
2060 /*
2061 * We're about to write a block full or either total garbage
2062 * (not kernel data, don't worry) or user floating-point data
2063 * (so it only _looks_ like garbage).
2064 */
2089
2090 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
2091 1:
2092
2093 wrpr %g0, %l1, %pstate ! restore interrupts
2094
2095 /*
2096 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
2097 * which deals with watchpoints. Otherwise, just return -1.
2098 */
2099 brnz,pt %i2, 1f
2100 nop
2101 ret
2102 restore %g0, -1, %o0
2103 1:
2104 call dtrace_blksuword32_err
2105 restore
2106
2107 SET_SIZE(dtrace_blksuword32)
2108
2109 #endif /* lint */
|
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include "assym.h"
27
28 #include <sys/asm_linkage.h>
29 #include <sys/mmu.h>
30 #include <vm/hat_sfmmu.h>
31 #include <sys/machparam.h>
32 #include <sys/machcpuvar.h>
33 #include <sys/machthread.h>
34 #include <sys/privregs.h>
35 #include <sys/asm_linkage.h>
36 #include <sys/machasi.h>
37 #include <sys/trap.h>
38 #include <sys/spitregs.h>
39 #include <sys/xc_impl.h>
40 #include <sys/intreg.h>
41 #include <sys/async.h>
42
43 #ifdef TRAPTRACE
44 #include <sys/traptrace.h>
45 #endif /* TRAPTRACE */
46
47 /* BEGIN CSTYLED */
48 #define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \
49 ldxa [%g0]ASI_LSU, tmp1 ;\
50 btst LSU_DC, tmp1 /* is dcache enabled? */ ;\
51 bz,pn %icc, 1f ;\
52 sethi %hi(dcache_linesize), tmp1 ;\
53 ld [tmp1 + %lo(dcache_linesize)], tmp1 ;\
54 sethi %hi(dflush_type), tmp2 ;\
55 ld [tmp2 + %lo(dflush_type)], tmp2 ;\
56 cmp tmp2, FLUSHPAGE_TYPE ;\
57 be,pt %icc, 2f ;\
58 sllx arg1, SF_DC_VBIT_SHIFT, arg1 /* tag to compare */ ;\
59 sethi %hi(dcache_size), tmp3 ;\
60 ld [tmp3 + %lo(dcache_size)], tmp3 ;\
61 cmp tmp2, FLUSHMATCH_TYPE ;\
62 be,pt %icc, 3f ;\
63 nop ;\
64 /* \
65 * flushtype = FLUSHALL_TYPE, flush the whole thing \
66 * tmp3 = cache size \
324 * we are done flushing it. Keep interrupts off while flushing in this
325 * manner.
326 *
327 * We flush the entire ecache by starting at one end and loading each
328 * successive ecache line for the 2*ecache-size range. We have to repeat
329 * the flush operation to guarantee that the entire ecache has been
330 * flushed.
331 *
332 * For flushing a specific physical address, we start at the aliased
333 * address and load at set-size stride, wrapping around at 2*ecache-size
334 * boundary and skipping the physical address being flushed. It takes
335 * 10 loads to guarantee that the physical address has been flushed.
336 */
337
338 #define HB_ECACHE_FLUSH_CNT 2
339 #define HB_PHYS_FLUSH_CNT 10 /* #loads to flush specific paddr */
340 #endif /* HUMMINGBIRD */
341
342 /* END CSTYLED */
343
344 /*
345 * Spitfire MMU and Cache operations.
346 */
347
348 ENTRY_NP(vtag_flushpage)
349 /*
350 * flush page from the tlb
351 *
352 * %o0 = vaddr
353 * %o1 = sfmmup
354 */
355 rdpr %pstate, %o5
356 #ifdef DEBUG
357 PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
358 #endif /* DEBUG */
359 /*
360 * disable ints
361 */
362 andn %o5, PSTATE_IE, %o4
363 wrpr %o4, 0, %pstate
364
365 /*
366 * Then, blow out the tlb
367 * Interrupts are disabled to prevent the secondary ctx register
841 stx %i0, [%i1] ! save the AFSR
842
843 brz %i4, 2f ! acc_afsr == NULL?
844 nop
845 ldx [%i4], %g4
846 or %g4, %i0, %g4 ! aggregate AFSR in cpu private
847 stx %g4, [%i4]
848 2:
849 add %i2, 8, %i2
850 cmp %i2, 64
851 bl,a 1b
852 add %i1, 8, %i1
853 stxa %i0, [%g0]ASI_AFSR ! clear AFSR
854 membar #Sync
855 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
856 membar #Sync
857 wrpr %g0, %i5, %pstate
858 ret
859 restore
860 SET_SIZE(get_ecache_dtag)
861
862 /*
863 * The ce_err function handles trap type 0x63 (corrected_ECC_error) at tl=0.
864 * Steps: 1. GET AFSR 2. Get AFAR <40:4> 3. Get datapath error status
865 * 4. Clear datapath error bit(s) 5. Clear AFSR error bit
866 * 6. package data in %g2 and %g3 7. call cpu_ce_error vis sys_trap
867 * %g2: [ 52:43 UDB lower | 42:33 UDB upper | 32:0 afsr ] - arg #3/arg #1
868 * %g3: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
869 */
870 ENTRY_NP(ce_err)
871 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
872
873 !
874 ! Check for a UE... From Kevin.Normoyle:
875 ! We try to switch to the trap for the UE, but since that's
876 ! a hardware pipeline, we might get to the CE trap before we
877 ! can switch. The UDB and AFSR registers will have both the
878 ! UE and CE bits set but the UDB syndrome and the AFAR will be
879 ! for the UE.
880 !
881 or %g0, 1, %g1 ! put 1 in g1
882 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
883 andcc %g1, %g3, %g0 ! check for UE in afsr
884 bnz async_err ! handle the UE, not the CE
885 or %g0, 0x63, %g5 ! pass along the CE ttype
886 !
887 ! Disable further CE traps to avoid recursion (stack overflow)
888 ! and staying above XCALL_PIL for extended periods.
889 !
944 nop
945 #endif
946 SET_SIZE(ce_err_tl1)
947
948 #ifdef TRAPTRACE
949 .celevel1msg:
950 .asciz "Softerror with trap tracing at tl1: AFAR 0x%08x.%08x AFSR 0x%08x.%08x";
951
952 ENTRY_NP(ce_trap_tl1)
953 ! upper 32 bits of AFSR already in o3
954 mov %o4, %o0 ! save AFAR upper 32 bits
955 mov %o2, %o4 ! lower 32 bits of AFSR
956 mov %o1, %o2 ! lower 32 bits of AFAR
957 mov %o0, %o1 ! upper 32 bits of AFAR
958 set .celevel1msg, %o0
959 call panic
960 nop
961 SET_SIZE(ce_trap_tl1)
962 #endif
963
964 /*
965 * The async_err function handles trap types 0x0A (instruction_access_error)
966 * and 0x32 (data_access_error) at TL = 0 and TL > 0. When we branch here,
967 * %g5 will have the trap type (with 0x200 set if we're at TL > 0).
968 *
969 * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
970 * 4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
971 * 6. package data in %g2 and %g3 7. disable all cpu errors, because
972 * trap is likely to be fatal 8. call cpu_async_error vis sys_trap
973 *
974 * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
975 * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
976 *
977 * async_err is the assembly glue code to get us from the actual trap
978 * into the CPU module's C error handler. Note that we also branch
979 * here from ce_err() above.
980 */
981 ENTRY_NP(async_err)
982 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable ecc and other cpu errors
983 membar #Sync ! membar sync required
984
985 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
986 ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
987
988 sllx %g5, 53, %g5 ! move ttype to <63:53>
989 or %g3, %g5, %g3 ! or to afsr in g3
990
991 or %g0, 1, %g1 ! put 1 in g1
992 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
993 andcc %g1, %g3, %g0 ! check for UE in afsr
994 bz,a,pn %icc, 2f ! if !UE skip sdb read/clear
995 nop
996
997 set P_DER_H, %g4 ! put P_DER_H in g4
998 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into 56
999 or %g0, 1, %g6 ! put 1 in g6
1000 sllx %g6, 9, %g6 ! shift g6 to <9> sdb UE
1034 ! save destination routine is in g1
1035 ldxa [%g0]ASI_AFAR, %g2 ! read afar
1036 ldxa [%g0]ASI_AFSR, %g3 ! read afsr
1037 set P_DER_H, %g4 ! put P_DER_H in g4
1038 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
1039 sllx %g5, 33, %g5 ! shift upper bits to <42:33>
1040 or %g3, %g5, %g3 ! or with afsr bits
1041 set P_DER_L, %g4 ! put P_DER_L in g4
1042 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5
1043 sllx %g5, 43, %g5 ! shift upper bits to <52:43>
1044 or %g3, %g5, %g3 ! or with afsr bits
1045
1046 RESET_USER_RTT_REGS(%g4, %g5, dis_err_panic1_resetskip)
1047 dis_err_panic1_resetskip:
1048
1049 sethi %hi(sys_trap), %g5
1050 jmp %g5 + %lo(sys_trap) ! goto sys_trap
1051 sub %g0, 1, %g4
1052 SET_SIZE(dis_err_panic1)
1053
1054 /*
1055 * The clr_datapath function clears any error bits set in the UDB regs.
1056 */
1057 ENTRY(clr_datapath)
1058 set P_DER_H, %o4 ! put P_DER_H in o4
1059 ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb upper half into o3
1060 or %g0, 0x3, %o2 ! put 0x3 in o2
1061 sllx %o2, 8, %o2 ! shift o2 to <9:8> sdb
1062 andcc %o5, %o2, %o1 ! check for UE,CE in upper half
1063 bz,a 1f ! no error, goto 1f
1064 nop
1065 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1066 membar #Sync ! membar sync required
1067 1:
1068 set P_DER_L, %o4 ! put P_DER_L in o4
1069 ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb lower half into o5
1070 andcc %o5, %o2, %o1 ! check for UE,CE in lower half
1071 bz,a 2f ! no error, goto 2f
1072 nop
1073 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1074 membar #Sync
1075 2:
1076 retl
1077 nop
1078 SET_SIZE(clr_datapath)
1079
1080 /*
1081 * The get_udb_errors() function gets the current value of the
1082 * Datapath Error Registers.
1083 */
1084 ENTRY(get_udb_errors)
1085 set P_DER_H, %o3
1086 ldxa [%o3]ASI_SDB_INTR_R, %o2
1087 stx %o2, [%o0]
1088 set P_DER_L, %o3
1089 ldxa [%o3]ASI_SDB_INTR_R, %o2
1090 retl
1091 stx %o2, [%o1]
1092 SET_SIZE(get_udb_errors)
1093
1094 /*
1095 * The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the
1096 * tte, the virtual address, and the ctxnum of the specified tlb entry. They
1097 * should only be used in places where you have no choice but to look at the
1098 * tlb itself.
1099 *
1100 * Note: These two routines are required by the Estar "cpr" loadable module.
1101 */
1102 /*
1103 * NB - In Spitfire cpus, when reading a tte from the hardware, we
1104 * need to clear [42-41] because the general definitions in pte.h
1105 * define the PA to be [42-13] whereas Spitfire really uses [40-13].
1106 * When cloning these routines for other cpus the "andn" below is not
1107 * necessary.
1108 */
1109 ENTRY_NP(itlb_rd_entry)
1110 sllx %o0, 3, %o0
1111 #if defined(SF_ERRATA_32)
1112 sethi %hi(FLUSH_ADDR), %g2
1113 set MMU_PCONTEXT, %g1
1114 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1115 flush %g2
1116 #endif
1117 ldxa [%o0]ASI_ITLB_ACCESS, %g1
1118 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1119 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1120 andn %g1, %g2, %g1 ! for details
1121 stx %g1, [%o1]
1128
1129 ENTRY_NP(dtlb_rd_entry)
1130 sllx %o0, 3, %o0
1131 #if defined(SF_ERRATA_32)
1132 sethi %hi(FLUSH_ADDR), %g2
1133 set MMU_PCONTEXT, %g1
1134 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1135 flush %g2
1136 #endif
1137 ldxa [%o0]ASI_DTLB_ACCESS, %g1
1138 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1139 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1140 andn %g1, %g2, %g1 ! itlb_rd_entry
1141 stx %g1, [%o1]
1142 ldxa [%o0]ASI_DTLB_TAGREAD, %g2
1143 set TAGREAD_CTX_MASK, %o4
1144 andn %g2, %o4, %o5
1145 retl
1146 stx %o5, [%o2]
1147 SET_SIZE(dtlb_rd_entry)
1148
1149 ENTRY(set_lsu)
1150 stxa %o0, [%g0]ASI_LSU ! store to LSU
1151 retl
1152 membar #Sync
1153 SET_SIZE(set_lsu)
1154
1155 ENTRY(get_lsu)
1156 retl
1157 ldxa [%g0]ASI_LSU, %o0 ! load LSU
1158 SET_SIZE(get_lsu)
1159
1160 /*
1161 * Clear the NPT (non-privileged trap) bit in the %tick
1162 * registers. In an effort to make the change in the
1163 * tick counter as consistent as possible, we disable
1164 * all interrupts while we're changing the registers. We also
1165 * ensure that the read and write instructions are in the same
1166 * line in the instruction cache.
1167 */
1168 ENTRY_NP(cpu_clearticknpt)
1169 rdpr %pstate, %g1 /* save processor state */
1170 andn %g1, PSTATE_IE, %g3 /* turn off */
1171 wrpr %g0, %g3, %pstate /* interrupts */
1172 rdpr %tick, %g2 /* get tick register */
1173 brgez,pn %g2, 1f /* if NPT bit off, we're done */
1174 mov 1, %g3 /* create mask */
1175 sllx %g3, 63, %g3 /* for NPT bit */
1176 ba,a,pt %xcc, 2f
1177 .align 64 /* Align to I$ boundary */
1178 2:
1179 rdpr %tick, %g2 /* get tick register */
1278 bl,a 2b
1279 add %o4, 8, %o4
1280
1281 membar #Sync
1282 ldxa [%g0]ASI_AFSR, %o0 ! read accumulated AFSR
1283 srlx %o0, P_AFSR_CP_SHIFT, %o2
1284 btst 1, %o2
1285 bz 3f
1286 nop
1287 ldx [%o1], %o3
1288 or %o3, %o0, %o3 ! aggregate AFSR in cpu private
1289 stx %o3, [%o1]
1290 3:
1291 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1292 membar #Sync
1293 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1294 membar #Sync
1295 retl
1296 wrpr %g0, %o5, %pstate
1297 SET_SIZE(check_ecache_line)
1298
1299 ENTRY(read_and_clear_afsr)
1300 ldxa [%g0]ASI_AFSR, %o0
1301 retl
1302 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1303 SET_SIZE(read_and_clear_afsr)
1304
1305 /*
1306 * scrubphys - Pass in the aligned physical memory address that you want
1307 * to scrub, along with the ecache size.
1308 *
1309 * 1) Displacement flush the E$ line corresponding to %addr.
1310 * The first ldxa guarantees that the %addr is no longer in
1311 * M, O, or E (goes to I or S (if instruction fetch also happens).
1312 * 2) "Write" the data using a CAS %addr,%g0,%g0.
1313 * The casxa guarantees a transition from I to M or S to M.
1314 * 3) Displacement flush the E$ line corresponding to %addr.
1315 * The second ldxa pushes the M line out of the ecache, into the
1316 * writeback buffers, on the way to memory.
1317 * 4) The "membar #Sync" pushes the cache line out of the writeback
1318 * buffers onto the bus, on the way to dram finally.
1319 *
1320 * This is a modified version of the algorithm suggested by Gary Lauterbach.
1321 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
1322 * as modified, but then we found out that for spitfire, if it misses in the
1323 * E$ it will probably install as an M, but if it hits in the E$, then it
1324 * will stay E, if the store doesn't happen. So the first displacement flush
1411 brgz,pt %g4, 2b
1412 dec %g4
1413
1414 casxa [%o0]ASI_MEM, %g0, %g0
1415
1416 ! Flush %o0 from ecahe again.
1417 ! Need single displacement flush at offset %o1 this time as
1418 ! the E$ is already in direct map mode.
1419 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1420
1421 membar #Sync
1422 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1423 membar #Sync
1424 #endif /* HUMMINGBIRD */
1425 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1426
1427 retl
1428 membar #Sync ! move the data out of the load buffer
1429 SET_SIZE(scrubphys)
1430
1431 /*
1432 * clearphys - Pass in the aligned physical memory address that you want
1433 * to push out, as a 64 byte block of zeros, from the ecache zero-filled.
1434 * Since this routine does not bypass the ecache, it is possible that
1435 * it could generate a UE error while trying to clear the a bad line.
1436 * This routine clears and restores the error enable flag.
1437 * TBD - Hummingbird may need similar protection
1438 */
1439 ENTRY(clearphys)
1440 or %o2, %g0, %o3 ! ecache linesize
1441 or %o1, %g0, %o2 ! ecache size
1442 #ifndef HUMMINGBIRD
1443 or %o3, %g0, %o4 ! save ecache linesize
1444 xor %o0, %o2, %o1 ! calculate alias address
1445 add %o2, %o2, %o3 ! 2 * ecachesize
1446 sub %o3, 1, %o3 ! -1 == mask
1447 and %o1, %o3, %o1 ! and with xor'd address
1448 set ecache_flushaddr, %o3
1449 ldx [%o3], %o3
1450 or %o4, %g0, %o2 ! saved ecache linesize
1451
1452 rdpr %pstate, %o4
1453 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1454 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1455
1456 ldxa [%g0]ASI_ESTATE_ERR, %g1
1457 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1458 membar #Sync
1549 nop
1550 brgz,pt %g4, 2b
1551 dec %g4
1552
1553 casxa [%o0]ASI_MEM, %g0, %g0
1554
1555 ! Flush %o0 from ecahe again.
1556 ! Need single displacement flush at offset %o1 this time as
1557 ! the E$ is already in direct map mode.
1558 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1559
1560 membar #Sync
1561 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1562 membar #Sync
1563 #endif /* HUMMINGBIRD... */
1564
1565 retl
1566 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1567 SET_SIZE(clearphys)
1568
1569 /*
1570 * flushecacheline - This is a simpler version of scrubphys
1571 * which simply does a displacement flush of the line in
1572 * question. This routine is mainly used in handling async
1573 * errors where we want to get rid of a bad line in ecache.
1574 * Note that if the line is modified and it has suffered
1575 * data corruption - we are guarantee that the hw will write
1576 * a UE back to mark the page poisoned.
1577 */
1578 ENTRY(flushecacheline)
1579 or %o1, %g0, %o2 ! put ecache size in %o2
1580 #ifndef HUMMINGBIRD
1581 xor %o0, %o2, %o1 ! calculate alias address
1582 add %o2, %o2, %o3 ! 2 * ecachesize in case
1583 ! addr == ecache_flushaddr
1584 sub %o3, 1, %o3 ! -1 == mask
1585 and %o1, %o3, %o1 ! and with xor'd address
1586 set ecache_flushaddr, %o3
1587 ldx [%o3], %o3
1588
1662 3:
1663 add %o1, %g2, %o1 ! calculate offset in next set
1664 and %o1, %g3, %o1 ! force offset within aliased range
1665 cmp %o1, %o5 ! skip loads from physaddr
1666 be,pn %ncc, 3b
1667 nop
1668 brgz,pt %g5, 2b
1669 dec %g5
1670
1671 membar #Sync
1672 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1673 membar #Sync
1674
1675 stxa %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1676 membar #Sync
1677 #endif /* HUMMINGBIRD */
1678 retl
1679 wrpr %g0, %o4, %pstate
1680 SET_SIZE(flushecacheline)
1681
1682 /*
1683 * ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz
1684 * from the clock CPU. It atomically increments the outstanding request
1685 * counter and, if there was not already an outstanding request,
1686 * branches to setsoftint_tl1 to enqueue an intr_vec for the given inum.
1687 */
1688
1689 ! Register usage:
1690 !
1691 ! Arguments:
1692 ! %g1 - inum
1693 !
1694 ! Internal:
1695 ! %g2, %g3, %g5 - scratch
1696 ! %g4 - ptr. to spitfire_scrub_misc ec_scrub_outstanding.
1697 ! %g6 - setsoftint_tl1 address
1698
1699 ENTRY_NP(ecache_scrubreq_tl1)
1700 set SFPR_SCRUB_MISC + EC_SCRUB_OUTSTANDING, %g2
1701 GET_CPU_PRIVATE_PTR(%g2, %g4, %g5, 1f);
1702 ld [%g4], %g2 ! cpu's ec_scrub_outstanding.
1703 set setsoftint_tl1, %g6
1704 !
1705 ! no need to use atomic instructions for the following
1706 ! increment - we're at tl1
1707 !
1708 add %g2, 0x1, %g3
1709 brnz,pn %g2, 1f ! no need to enqueue more intr_vec
1710 st %g3, [%g4] ! delay - store incremented counter
1711 jmp %g6 ! setsoftint_tl1(%g1) - queue intr_vec
1712 nop
1713 ! not reached
1714 1:
1715 retry
1716 SET_SIZE(ecache_scrubreq_tl1)
1717
1718 /*
1719 * write_ec_tag_parity(), which zero's the ecache tag,
1720 * marks the state as invalid and writes good parity to the tag.
1721 * Input %o1= 32 bit E$ index
1722 */
1723 ENTRY(write_ec_tag_parity)
1724 or %g0, 1, %o4
1725 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1726 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1727
1728 rdpr %pstate, %o5
1729 andn %o5, PSTATE_IE | PSTATE_AM, %o1
1730 wrpr %o1, %g0, %pstate ! clear IE, AM bits
1731
1732 ldxa [%g0]ASI_ESTATE_ERR, %g1
1733 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1734 membar #Sync
1735
1736 ba 1f
1737 nop
1738 /*
1739 * Align on the ecache boundary in order to force
1740 * ciritical code section onto the same ecache line.
1741 */
1742 .align 64
1743
1744 1:
1745 set S_EC_PARITY, %o3 ! clear tag, state invalid
1746 sllx %o3, S_ECPAR_SHIFT, %o3 ! and with good tag parity
1747 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
1748 stxa %g0, [%o4]ASI_EC_W
1749 membar #Sync
1750
1751 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1752 membar #Sync
1753 retl
1754 wrpr %g0, %o5, %pstate
1755 SET_SIZE(write_ec_tag_parity)
1756
1757 /*
1758 * write_hb_ec_tag_parity(), which zero's the ecache tag,
1759 * marks the state as invalid and writes good parity to the tag.
1760 * Input %o1= 32 bit E$ index
1761 */
1762 ENTRY(write_hb_ec_tag_parity)
1763 or %g0, 1, %o4
1764 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1765 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1766
1767 rdpr %pstate, %o5
1768 andn %o5, PSTATE_IE | PSTATE_AM, %o1
1769 wrpr %o1, %g0, %pstate ! clear IE, AM bits
1770
1771 ldxa [%g0]ASI_ESTATE_ERR, %g1
1772 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1773 membar #Sync
1774
1775 ba 1f
1776 nop
1781 .align 64
1782 1:
1783 #ifdef HUMMINGBIRD
1784 set HB_EC_PARITY, %o3 ! clear tag, state invalid
1785 sllx %o3, HB_ECPAR_SHIFT, %o3 ! and with good tag parity
1786 #else /* !HUMMINGBIRD */
1787 set SB_EC_PARITY, %o3 ! clear tag, state invalid
1788 sllx %o3, SB_ECPAR_SHIFT, %o3 ! and with good tag parity
1789 #endif /* !HUMMINGBIRD */
1790
1791 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
1792 stxa %g0, [%o4]ASI_EC_W
1793 membar #Sync
1794
1795 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1796 membar #Sync
1797 retl
1798 wrpr %g0, %o5, %pstate
1799 SET_SIZE(write_hb_ec_tag_parity)
1800
1801 #define VIS_BLOCKSIZE 64
1802
1803 ENTRY(dtrace_blksuword32)
1804 save %sp, -SA(MINFRAME + 4), %sp
1805
1806 rdpr %pstate, %l1
1807 andn %l1, PSTATE_IE, %l2 ! disable interrupts to
1808 wrpr %g0, %l2, %pstate ! protect our FPU diddling
1809
1810 rd %fprs, %l0
1811 andcc %l0, FPRS_FEF, %g0
1812 bz,a,pt %xcc, 1f ! if the fpu is disabled
1813 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
1814
1815 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
1816 1:
1817 set 0f, %l5
1818 /*
1819 * We're about to write a block full or either total garbage
1820 * (not kernel data, don't worry) or user floating-point data
1821 * (so it only _looks_ like garbage).
1822 */
1847
1848 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
1849 1:
1850
1851 wrpr %g0, %l1, %pstate ! restore interrupts
1852
1853 /*
1854 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1855 * which deals with watchpoints. Otherwise, just return -1.
1856 */
1857 brnz,pt %i2, 1f
1858 nop
1859 ret
1860 restore %g0, -1, %o0
1861 1:
1862 call dtrace_blksuword32_err
1863 restore
1864
1865 SET_SIZE(dtrace_blksuword32)
1866
|