18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 # ident "%Z%%M% %I% %E% SMI"
27
28 #include <sys/param.h>
29 #include <sys/errno.h>
30 #include <sys/asm_linkage.h>
31 #include <sys/vtrace.h>
32 #include <sys/machthread.h>
33 #include <sys/clock.h>
34 #include <sys/asi.h>
35 #include <sys/fsr.h>
36 #include <sys/privregs.h>
37
38 #if !defined(lint)
39 #include "assym.h"
40 #endif /* lint */
41
42
43 /*
44 * Less then or equal this number of bytes we will always copy byte-for-byte
45 */
46 #define SMALL_LIMIT 7
47
48 /*
49 * LOFAULT_SET : Flag set by kzero and kcopy to indicate that t_lofault
50 * handler was set
51 */
52 #define LOFAULT_SET 2
53
54
55 /*
56 * Copy a block of storage, returning an error code if `from' or
57 * `to' takes a kernel pagefault which cannot be resolved.
58 * Returns errno value on pagefault error, 0 if all ok
59 */
60
61
62
63 #if defined(lint)
64
65 /* ARGSUSED */
66 int
67 kcopy(const void *from, void *to, size_t count)
68 { return(0); }
69
70 #else /* lint */
71
72 .seg ".text"
73 .align 4
74
75 ENTRY(kcopy)
76
77 save %sp, -SA(MINFRAME), %sp
78 set .copyerr, %l7 ! copyerr is lofault value
79 ldn [THREAD_REG + T_LOFAULT], %o5 ! save existing handler
80 or %o5, LOFAULT_SET, %o5
81 membar #Sync ! sync error barrier
82 b .do_copy ! common code
83 stn %l7, [THREAD_REG + T_LOFAULT] ! set t_lofault
84
85 /*
86 * We got here because of a fault during kcopy.
87 * Errno value is in %g1.
88 */
89 .copyerr:
90 ! The kcopy() *always* sets a t_lofault handler and it ORs LOFAULT_SET
91 ! into %o5 to indicate it has set t_lofault handler. Need to clear
92 ! LOFAULT_SET flag before restoring the error handler.
93 andn %o5, LOFAULT_SET, %o5
94 membar #Sync ! sync error barrier
95 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
96 ret
97 restore %g1, 0, %o0
98
99 SET_SIZE(kcopy)
100 #endif /* lint */
101
102
103 /*
104 * Copy a block of storage - must not overlap (from + len <= to).
105 */
106 #if defined(lint)
107
108 /* ARGSUSED */
109 void
110 bcopy(const void *from, void *to, size_t count)
111 {}
112
113 #else /* lint */
114
115 ENTRY(bcopy)
116
117 save %sp, -SA(MINFRAME), %sp
118 clr %o5 ! flag LOFAULT_SET is not set for bcopy
119
120 .do_copy:
121 mov %i1, %g5 ! save dest addr start
122
123 mov %i2, %l6 ! save size
124
125 cmp %i2, 12 ! for small counts
126 blu %ncc, .bytecp ! just copy bytes
127 .empty
128
129 !
130 ! use aligned transfers where possible
131 !
132 xor %i0, %i1, %o4 ! xor from and to address
133 btst 7, %o4 ! if lower three bits zero
134 bz .aldoubcp ! can align on double boundary
358
359 /*
360 * Common code used to align transfers on word and doubleword
361 * boudaries. Aligns source and destination and returns a count
362 * of aligned bytes to transfer in %i3
363 */
364 1:
365 inc %i0 ! inc from
366 stb %o4, [%i1] ! write a byte
367 inc %i1 ! inc to
368 dec %i2 ! dec count
369 .alignit:
370 btst %o0, %i0 ! %o0 is bit mask to check for alignment
371 bnz,a 1b
372 ldub [%i0], %o4 ! read next byte
373
374 retl
375 andn %i2, %o0, %i3 ! return size of aligned bytes
376 SET_SIZE(bcopy)
377
378 #endif /* lint */
379
380 /*
381 * Block copy with possibly overlapped operands.
382 */
383
384 #if defined(lint)
385
386 /*ARGSUSED*/
387 void
388 ovbcopy(const void *from, void *to, size_t count)
389 {}
390
391 #else /* lint */
392
393 ENTRY(ovbcopy)
394 tst %o2 ! check count
395 bgu,a %ncc, 1f ! nothing to do or bad arguments
396 subcc %o0, %o1, %o3 ! difference of from and to address
397
398 retl ! return
399 nop
400 1:
401 bneg,a %ncc, 2f
402 neg %o3 ! if < 0, make it positive
403 2: cmp %o2, %o3 ! cmp size and abs(from - to)
404 bleu %ncc, bcopy ! if size <= abs(diff): use bcopy,
405 .empty ! no overlap
406 cmp %o0, %o1 ! compare from and to addresses
407 blu %ncc, .ov_bkwd ! if from < to, copy backwards
408 nop
409 !
410 ! Copy forwards.
411 !
412 .ov_fwd:
415 stb %o3, [%o1] ! write to address
416 deccc %o2 ! dec count
417 bgu %ncc, .ov_fwd ! loop till done
418 inc %o1 ! inc to address
419
420 retl ! return
421 nop
422 !
423 ! Copy backwards.
424 !
425 .ov_bkwd:
426 deccc %o2 ! dec count
427 ldub [%o0 + %o2], %o3 ! get byte at end of src
428 bgu %ncc, .ov_bkwd ! loop till done
429 stb %o3, [%o1 + %o2] ! delay slot, store at end of dst
430
431 retl ! return
432 nop
433 SET_SIZE(ovbcopy)
434
435 #endif /* lint */
436
437 /*
438 * hwblkpagecopy()
439 *
440 * Copies exactly one page. This routine assumes the caller (ppcopy)
441 * has already disabled kernel preemption and has checked
442 * use_hw_bcopy.
443 */
444 #ifdef lint
445 /*ARGSUSED*/
446 void
447 hwblkpagecopy(const void *src, void *dst)
448 { }
449 #else /* lint */
450 ENTRY(hwblkpagecopy)
451 save %sp, -SA(MINFRAME), %sp
452
453 ! %i0 - source address (arg)
454 ! %i1 - destination address (arg)
455 ! %i2 - length of region (not arg)
456
457 set PAGESIZE, %i2
458 mov %i1, %o0 ! store destination address for flushing
459
460 /*
461 * Copying exactly one page and PAGESIZE is in mutliple of 0x80.
462 */
463 1:
464 ldx [%i0+0x0], %l0
465 ldx [%i0+0x8], %l1
466 ldx [%i0+0x10], %l2
467 ldx [%i0+0x18], %l3
468 ldx [%i0+0x20], %l4
469 ldx [%i0+0x28], %l5
492 stx %l3, [%i1+0x58]
493 stx %l4, [%i1+0x60]
494 stx %l5, [%i1+0x68]
495 stx %l6, [%i1+0x70]
496 stx %l7, [%i1+0x78]
497
498 add %i0, 0x80, %i0
499 subcc %i2, 0x80, %i2
500 bgu,pt %xcc, 1b
501 add %i1, 0x80, %i1
502
503 ! %o0 contains the dest. address
504 set PAGESIZE, %o1
505 call sync_icache
506 nop
507
508 membar #Sync
509 ret
510 restore %g0, 0, %o0
511 SET_SIZE(hwblkpagecopy)
512 #endif /* lint */
513
514
515 /*
516 * Transfer data to and from user space -
517 * Note that these routines can cause faults
518 * It is assumed that the kernel has nothing at
519 * less than KERNELBASE in the virtual address space.
520 *
521 * Note that copyin(9F) and copyout(9F) are part of the
522 * DDI/DKI which specifies that they return '-1' on "errors."
523 *
524 * Sigh.
525 *
526 * So there's two extremely similar routines - xcopyin() and xcopyout()
527 * which return the errno that we've faithfully computed. This
528 * allows other callers (e.g. uiomove(9F)) to work correctly.
529 * Given that these are used pretty heavily, we expand the calling
530 * sequences inline for all flavours (rather than making wrappers).
531 *
532 * There are also stub routines for xcopyout_little and xcopyin_little,
562 * if any data is is left to be copied by examining %o3. If that is
563 * zero, we're done and can go home. If not, we figure out what the
564 * largest chunk size left to be copied is and branch to that copy
565 * loop unless there's only one byte left. We load that as we're
566 * branching to code that stores it just before we return.
567 *
568 * Fault handlers are invoked if we reference memory that has no
569 * current mapping. All forms share the same copyio_fault handler.
570 * This routine handles fixing up the stack and general housecleaning.
571 * Each copy operation has a simple fault handler that is then called
572 * to do the work specific to the invidual operation. The handler
573 * for copyOP and xcopyOP are found at the end of individual function.
574 * The handlers for xcopyOP_little are found at the end of xcopyin_little.
575 * The handlers for copyOP_noerr are found at the end of copyin_noerr.
576 */
577
578 /*
579 * Copy kernel data to user space (copyout/xcopyout/xcopyout_little).
580 */
581
582 #if defined(lint)
583
584 /*ARGSUSED*/
585 int
586 copyout(const void *kaddr, void *uaddr, size_t count)
587 { return (0); }
588
589 #else /* lint */
590
591 /*
592 * We save the arguments in the following registers in case of a fault:
593 * kaddr - %g2
594 * uaddr - %g3
595 * count - %g4
596 */
597 #define SAVE_SRC %g2
598 #define SAVE_DST %g3
599 #define SAVE_COUNT %g4
600
601 #define REAL_LOFAULT %g5
602 #define SAVED_LOFAULT %g6
603
604 /*
605 * Generic copyio fault handler. This is the first line of defense when a
606 * fault occurs in (x)copyin/(x)copyout. In order for this to function
607 * properly, the value of the 'real' lofault handler should be in REAL_LOFAULT.
608 * This allows us to share common code for all the flavors of the copy
609 * operations, including the _noerr versions.
610 *
804 ldub [%o0 + %o3], %o4
805 stba %o4, [%o1 + %o3]ASI_USER
806 .dcofh:
807 membar #Sync
808 stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
809 retl
810 clr %o0
811
812 .copyout_err:
813 ldn [THREAD_REG + T_COPYOPS], %o4
814 brz %o4, 2f
815 nop
816 ldn [%o4 + CP_COPYOUT], %g2
817 jmp %g2
818 nop
819 2:
820 retl
821 mov -1, %o0
822 SET_SIZE(copyout)
823
824 #endif /* lint */
825
826
827 #ifdef lint
828
829 /*ARGSUSED*/
830 int
831 xcopyout(const void *kaddr, void *uaddr, size_t count)
832 { return (0); }
833
834 #else /* lint */
835
836 ENTRY(xcopyout)
837 sethi %hi(.xcopyout_err), REAL_LOFAULT
838 b .do_copyout
839 or REAL_LOFAULT, %lo(.xcopyout_err), REAL_LOFAULT
840 .xcopyout_err:
841 ldn [THREAD_REG + T_COPYOPS], %o4
842 brz %o4, 2f
843 nop
844 ldn [%o4 + CP_XCOPYOUT], %g2
845 jmp %g2
846 nop
847 2:
848 retl
849 mov %g1, %o0
850 SET_SIZE(xcopyout)
851
852 #endif /* lint */
853
854 #ifdef lint
855
856 /*ARGSUSED*/
857 int
858 xcopyout_little(const void *kaddr, void *uaddr, size_t count)
859 { return (0); }
860
861 #else /* lint */
862
863 ENTRY(xcopyout_little)
864 sethi %hi(.little_err), %o4
865 ldn [THREAD_REG + T_LOFAULT], %o5
866 or %o4, %lo(.little_err), %o4
867 membar #Sync ! sync error barrier
868 stn %o4, [THREAD_REG + T_LOFAULT]
869
870 subcc %g0, %o2, %o3
871 add %o0, %o2, %o0
872 bz,pn %ncc, 2f ! check for zero bytes
873 sub %o2, 1, %o4
874 add %o0, %o4, %o0 ! start w/last byte
875 add %o1, %o2, %o1
876 ldub [%o0+%o3], %o4
877
878 1: stba %o4, [%o1+%o3]ASI_AIUSL
879 inccc %o3
880 sub %o0, 2, %o0 ! get next byte
881 bcc,a,pt %ncc, 1b
882 ldub [%o0+%o3], %o4
883
884 2: membar #Sync ! sync error barrier
885 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
886 retl
887 mov %g0, %o0 ! return (0)
888 SET_SIZE(xcopyout_little)
889
890 #endif /* lint */
891
892 /*
893 * Copy user data to kernel space (copyin/xcopyin/xcopyin_little)
894 */
895
896 #if defined(lint)
897
898 /*ARGSUSED*/
899 int
900 copyin(const void *uaddr, void *kaddr, size_t count)
901 { return (0); }
902
903 #else /* lint */
904
905 ENTRY(copyin)
906 sethi %hi(.copyin_err), REAL_LOFAULT
907 or REAL_LOFAULT, %lo(.copyin_err), REAL_LOFAULT
908
909 .do_copyin:
910 !
911 ! Check the length and bail if zero.
912 !
913 tst %o2
914 bnz,pt %ncc, 1f
915 nop
916 retl
917 clr %o0
918 1:
919 sethi %hi(copyio_fault), %o3
920 ldn [THREAD_REG + T_LOFAULT], SAVED_LOFAULT
921 or %o3, %lo(copyio_fault), %o3
922 membar #Sync
923 stn %o3, [THREAD_REG + T_LOFAULT]
924
1074 lduba [%o0 + %o3]ASI_USER, %o4
1075 stb %o4, [%o1 + %o3]
1076 .dcifh:
1077 membar #Sync
1078 stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
1079 retl
1080 clr %o0
1081
1082 .copyin_err:
1083 ldn [THREAD_REG + T_COPYOPS], %o4
1084 brz %o4, 2f
1085 nop
1086 ldn [%o4 + CP_COPYIN], %g2
1087 jmp %g2
1088 nop
1089 2:
1090 retl
1091 mov -1, %o0
1092 SET_SIZE(copyin)
1093
1094 #endif /* lint */
1095
1096 #ifdef lint
1097
1098 /*ARGSUSED*/
1099 int
1100 xcopyin(const void *uaddr, void *kaddr, size_t count)
1101 { return (0); }
1102
1103 #else /* lint */
1104
1105 ENTRY(xcopyin)
1106 sethi %hi(.xcopyin_err), REAL_LOFAULT
1107 b .do_copyin
1108 or REAL_LOFAULT, %lo(.xcopyin_err), REAL_LOFAULT
1109 .xcopyin_err:
1110 ldn [THREAD_REG + T_COPYOPS], %o4
1111 brz %o4, 2f
1112 nop
1113 ldn [%o4 + CP_XCOPYIN], %g2
1114 jmp %g2
1115 nop
1116 2:
1117 retl
1118 mov %g1, %o0
1119 SET_SIZE(xcopyin)
1120
1121 #endif /* lint */
1122
1123 #ifdef lint
1124
1125 /*ARGSUSED*/
1126 int
1127 xcopyin_little(const void *uaddr, void *kaddr, size_t count)
1128 { return (0); }
1129
1130 #else /* lint */
1131
1132 ENTRY(xcopyin_little)
1133 sethi %hi(.little_err), %o4
1134 ldn [THREAD_REG + T_LOFAULT], %o5
1135 or %o4, %lo(.little_err), %o4
1136 membar #Sync ! sync error barrier
1137 stn %o4, [THREAD_REG + T_LOFAULT]
1138
1139 subcc %g0, %o2, %o3
1140 add %o0, %o2, %o0
1141 bz,pn %ncc, 2f ! check for zero bytes
1142 sub %o2, 1, %o4
1143 add %o0, %o4, %o0 ! start w/last byte
1144 add %o1, %o2, %o1
1145 lduba [%o0+%o3]ASI_AIUSL, %o4
1146
1147 1: stb %o4, [%o1+%o3]
1148 inccc %o3
1149 sub %o0, 2, %o0 ! get next byte
1150 bcc,a,pt %ncc, 1b
1151 lduba [%o0+%o3]ASI_AIUSL, %o4
1152
1153 2: membar #Sync ! sync error barrier
1154 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
1155 retl
1156 mov %g0, %o0 ! return (0)
1157
1158 .little_err:
1159 membar #Sync ! sync error barrier
1160 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
1161 retl
1162 mov %g1, %o0
1163 SET_SIZE(xcopyin_little)
1164
1165 #endif /* lint */
1166
1167
1168 /*
1169 * Copy a block of storage - must not overlap (from + len <= to).
1170 * No fault handler installed (to be called under on_fault())
1171 */
1172 #if defined(lint)
1173
1174 /* ARGSUSED */
1175 void
1176 copyin_noerr(const void *ufrom, void *kto, size_t count)
1177 {}
1178
1179 #else /* lint */
1180
1181 ENTRY(copyin_noerr)
1182 sethi %hi(.copyio_noerr), REAL_LOFAULT
1183 b .do_copyin
1184 or REAL_LOFAULT, %lo(.copyio_noerr), REAL_LOFAULT
1185 .copyio_noerr:
1186 jmp SAVED_LOFAULT
1187 nop
1188 SET_SIZE(copyin_noerr)
1189
1190 #endif /* lint */
1191
1192 /*
1193 * Copy a block of storage - must not overlap (from + len <= to).
1194 * No fault handler installed (to be called under on_fault())
1195 */
1196
1197 #if defined(lint)
1198
1199 /* ARGSUSED */
1200 void
1201 copyout_noerr(const void *kfrom, void *uto, size_t count)
1202 {}
1203
1204 #else /* lint */
1205
1206 ENTRY(copyout_noerr)
1207 sethi %hi(.copyio_noerr), REAL_LOFAULT
1208 b .do_copyout
1209 or REAL_LOFAULT, %lo(.copyio_noerr), REAL_LOFAULT
1210 SET_SIZE(copyout_noerr)
1211
1212 #endif /* lint */
1213
1214 #if defined(lint)
1215
1216 int use_hw_bcopy = 1;
1217 int use_hw_bzero = 1;
1218
1219 #else /* !lint */
1220
1221 .align 4
1222 DGDEF(use_hw_bcopy)
1223 .word 1
1224 DGDEF(use_hw_bzero)
1225 .word 1
1226
1227 .align 64
1228 .section ".text"
1229 #endif /* !lint */
1230
1231
1232 /*
1233 * hwblkclr - clears block-aligned, block-multiple-sized regions that are
1234 * longer than 256 bytes in length. For the generic module we will simply
1235 * call bzero and return 1 to ensure that the pages in cache should be
1236 * flushed to ensure integrity.
1237 * Caller is responsible for ensuring use_hw_bzero is true and that
1238 * kpreempt_disable() has been called.
1239 */
1240 #ifdef lint
1241 /*ARGSUSED*/
1242 int
1243 hwblkclr(void *addr, size_t len)
1244 {
1245 return(0);
1246 }
1247 #else /* lint */
1248 ! %i0 - start address
1249 ! %i1 - length of region (multiple of 64)
1250
1251 ENTRY(hwblkclr)
1252 save %sp, -SA(MINFRAME), %sp
1253
1254 ! Simply call bzero and notify the caller that bzero was used
1255 mov %i0, %o0
1256 call bzero
1257 mov %i1, %o1
1258 ret
1259 restore %g0, 1, %o0 ! return (1) - did not use block operations
1260
1261 SET_SIZE(hwblkclr)
1262 #endif /* lint */
1263
1264 #ifdef lint
1265 /* Copy 32 bytes of data from src to dst using physical addresses */
1266 /*ARGSUSED*/
1267 void
1268 hw_pa_bcopy32(uint64_t src, uint64_t dst)
1269 {}
1270 #else /*!lint */
1271
1272 /*
1273 * Copy 32 bytes of data from src (%o0) to dst (%o1)
1274 * using physical addresses.
1275 */
1276 ENTRY_NP(hw_pa_bcopy32)
1277 rdpr %pstate, %g1
1278 andn %g1, PSTATE_IE, %g2
1279 wrpr %g0, %g2, %pstate
1280
1281 ldxa [%o0]ASI_MEM, %o2
1282 add %o0, 8, %o0
1283 ldxa [%o0]ASI_MEM, %o3
1284 add %o0, 8, %o0
1285 ldxa [%o0]ASI_MEM, %o4
1286 add %o0, 8, %o0
1287 ldxa [%o0]ASI_MEM, %o5
1288 stxa %o2, [%o1]ASI_MEM
1289 add %o1, 8, %o1
1290 stxa %o3, [%o1]ASI_MEM
1291 add %o1, 8, %o1
1292 stxa %o4, [%o1]ASI_MEM
1293 add %o1, 8, %o1
1294 stxa %o5, [%o1]ASI_MEM
1295
1296 membar #Sync
1297 retl
1298 wrpr %g0, %g1, %pstate
1299 SET_SIZE(hw_pa_bcopy32)
1300 #endif /* lint */
1301
1302 /*
1303 * Zero a block of storage.
1304 *
1305 * uzero is used by the kernel to zero a block in user address space.
1306 */
1307
1308
1309 #if defined(lint)
1310
1311 /* ARGSUSED */
1312 int
1313 kzero(void *addr, size_t count)
1314 { return(0); }
1315
1316 /* ARGSUSED */
1317 void
1318 uzero(void *addr, size_t count)
1319 {}
1320
1321 #else /* lint */
1322
1323 ENTRY(uzero)
1324 !
1325 ! Set a new lo_fault handler only if we came in with one
1326 ! already specified.
1327 !
1328 wr %g0, ASI_USER, %asi
1329 ldn [THREAD_REG + T_LOFAULT], %o5
1330 tst %o5
1331 bz,pt %ncc, .do_zero
1332 sethi %hi(.zeroerr), %o2
1333 or %o2, %lo(.zeroerr), %o2
1334 membar #Sync
1335 ba,pt %ncc, .do_zero
1336 stn %o2, [THREAD_REG + T_LOFAULT]
1337
1338 ENTRY(kzero)
1339 !
1340 ! Always set a lo_fault handler
1341 !
1342 wr %g0, ASI_P, %asi
1375 ! Old handler was zero. Just return the error.
1376 !
1377 retl ! return
1378 mov %g1, %o0 ! error code from %g1
1379 3:
1380 !
1381 ! We're here because %o5 was non-zero. It was non-zero
1382 ! because either LOFAULT_SET was present, a previous fault
1383 ! handler was present or both. In all cases we need to reset
1384 ! T_LOFAULT to the value of %o5 after clearing LOFAULT_SET
1385 ! before we either simply return the error or we invoke the
1386 ! previously specified handler.
1387 !
1388 be %ncc, 2b
1389 stn %o5, [THREAD_REG + T_LOFAULT]
1390 jmp %o5 ! goto real handler
1391 nop
1392 SET_SIZE(kzero)
1393 SET_SIZE(uzero)
1394
1395 #endif /* lint */
1396
1397 /*
1398 * Zero a block of storage.
1399 */
1400
1401 #if defined(lint)
1402
1403 /* ARGSUSED */
1404 void
1405 bzero(void *addr, size_t count)
1406 {}
1407
1408 #else /* lint */
1409
1410 ENTRY(bzero)
1411 wr %g0, ASI_P, %asi
1412
1413 ldn [THREAD_REG + T_LOFAULT], %o5 ! save old vector
1414 tst %o5
1415 bz,pt %ncc, .do_zero
1416 sethi %hi(.zeroerr), %o2
1417 or %o2, %lo(.zeroerr), %o2
1418 membar #Sync ! sync error barrier
1419 stn %o2, [THREAD_REG + T_LOFAULT] ! install new vector
1420
1421 .do_zero:
1422 cmp %o1, 7
1423 blu,pn %ncc, .byteclr
1424 nop
1425
1426 cmp %o1, 15
1427 blu,pn %ncc, .wdalign
1428 nop
1429
1579 ! We're just concerned with whether t_lofault was set
1580 ! when we came in. We end up here from either kzero()
1581 ! or bzero(). kzero() *always* sets a lofault handler.
1582 ! It ors LOFAULT_SET into %o5 to indicate it has done
1583 ! this even if the value of %o5 is otherwise zero.
1584 ! bzero() sets a lofault handler *only* if one was
1585 ! previously set. Accordingly we need to examine
1586 ! %o5 and if it is non-zero be sure to clear LOFAULT_SET
1587 ! before resetting the error handler.
1588 !
1589 tst %o5
1590 bz %ncc, 1f
1591 andn %o5, LOFAULT_SET, %o5
1592 membar #Sync ! sync error barrier
1593 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
1594 1:
1595 retl
1596 clr %o0 ! return (0)
1597
1598 SET_SIZE(bzero)
1599 #endif /* lint */
|
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 # ident "%Z%%M% %I% %E% SMI"
27
28 #include <sys/param.h>
29 #include <sys/errno.h>
30 #include <sys/asm_linkage.h>
31 #include <sys/vtrace.h>
32 #include <sys/machthread.h>
33 #include <sys/clock.h>
34 #include <sys/asi.h>
35 #include <sys/fsr.h>
36 #include <sys/privregs.h>
37
38 #include "assym.h"
39
40
41 /*
42 * Less then or equal this number of bytes we will always copy byte-for-byte
43 */
44 #define SMALL_LIMIT 7
45
46 /*
47 * LOFAULT_SET : Flag set by kzero and kcopy to indicate that t_lofault
48 * handler was set
49 */
50 #define LOFAULT_SET 2
51
52
53 /*
54 * Copy a block of storage, returning an error code if `from' or
55 * `to' takes a kernel pagefault which cannot be resolved.
56 * Returns errno value on pagefault error, 0 if all ok
57 */
58
59
60
61 .seg ".text"
62 .align 4
63
64 ENTRY(kcopy)
65
66 save %sp, -SA(MINFRAME), %sp
67 set .copyerr, %l7 ! copyerr is lofault value
68 ldn [THREAD_REG + T_LOFAULT], %o5 ! save existing handler
69 or %o5, LOFAULT_SET, %o5
70 membar #Sync ! sync error barrier
71 b .do_copy ! common code
72 stn %l7, [THREAD_REG + T_LOFAULT] ! set t_lofault
73
74 /*
75 * We got here because of a fault during kcopy.
76 * Errno value is in %g1.
77 */
78 .copyerr:
79 ! The kcopy() *always* sets a t_lofault handler and it ORs LOFAULT_SET
80 ! into %o5 to indicate it has set t_lofault handler. Need to clear
81 ! LOFAULT_SET flag before restoring the error handler.
82 andn %o5, LOFAULT_SET, %o5
83 membar #Sync ! sync error barrier
84 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
85 ret
86 restore %g1, 0, %o0
87
88 SET_SIZE(kcopy)
89
90
91 /*
92 * Copy a block of storage - must not overlap (from + len <= to).
93 */
94
95 ENTRY(bcopy)
96
97 save %sp, -SA(MINFRAME), %sp
98 clr %o5 ! flag LOFAULT_SET is not set for bcopy
99
100 .do_copy:
101 mov %i1, %g5 ! save dest addr start
102
103 mov %i2, %l6 ! save size
104
105 cmp %i2, 12 ! for small counts
106 blu %ncc, .bytecp ! just copy bytes
107 .empty
108
109 !
110 ! use aligned transfers where possible
111 !
112 xor %i0, %i1, %o4 ! xor from and to address
113 btst 7, %o4 ! if lower three bits zero
114 bz .aldoubcp ! can align on double boundary
338
339 /*
340 * Common code used to align transfers on word and doubleword
341 * boudaries. Aligns source and destination and returns a count
342 * of aligned bytes to transfer in %i3
343 */
344 1:
345 inc %i0 ! inc from
346 stb %o4, [%i1] ! write a byte
347 inc %i1 ! inc to
348 dec %i2 ! dec count
349 .alignit:
350 btst %o0, %i0 ! %o0 is bit mask to check for alignment
351 bnz,a 1b
352 ldub [%i0], %o4 ! read next byte
353
354 retl
355 andn %i2, %o0, %i3 ! return size of aligned bytes
356 SET_SIZE(bcopy)
357
358 /*
359 * Block copy with possibly overlapped operands.
360 */
361
362 ENTRY(ovbcopy)
363 tst %o2 ! check count
364 bgu,a %ncc, 1f ! nothing to do or bad arguments
365 subcc %o0, %o1, %o3 ! difference of from and to address
366
367 retl ! return
368 nop
369 1:
370 bneg,a %ncc, 2f
371 neg %o3 ! if < 0, make it positive
372 2: cmp %o2, %o3 ! cmp size and abs(from - to)
373 bleu %ncc, bcopy ! if size <= abs(diff): use bcopy,
374 .empty ! no overlap
375 cmp %o0, %o1 ! compare from and to addresses
376 blu %ncc, .ov_bkwd ! if from < to, copy backwards
377 nop
378 !
379 ! Copy forwards.
380 !
381 .ov_fwd:
384 stb %o3, [%o1] ! write to address
385 deccc %o2 ! dec count
386 bgu %ncc, .ov_fwd ! loop till done
387 inc %o1 ! inc to address
388
389 retl ! return
390 nop
391 !
392 ! Copy backwards.
393 !
394 .ov_bkwd:
395 deccc %o2 ! dec count
396 ldub [%o0 + %o2], %o3 ! get byte at end of src
397 bgu %ncc, .ov_bkwd ! loop till done
398 stb %o3, [%o1 + %o2] ! delay slot, store at end of dst
399
400 retl ! return
401 nop
402 SET_SIZE(ovbcopy)
403
404 /*
405 * hwblkpagecopy()
406 *
407 * Copies exactly one page. This routine assumes the caller (ppcopy)
408 * has already disabled kernel preemption and has checked
409 * use_hw_bcopy.
410 */
411 ENTRY(hwblkpagecopy)
412 save %sp, -SA(MINFRAME), %sp
413
414 ! %i0 - source address (arg)
415 ! %i1 - destination address (arg)
416 ! %i2 - length of region (not arg)
417
418 set PAGESIZE, %i2
419 mov %i1, %o0 ! store destination address for flushing
420
421 /*
422 * Copying exactly one page and PAGESIZE is in mutliple of 0x80.
423 */
424 1:
425 ldx [%i0+0x0], %l0
426 ldx [%i0+0x8], %l1
427 ldx [%i0+0x10], %l2
428 ldx [%i0+0x18], %l3
429 ldx [%i0+0x20], %l4
430 ldx [%i0+0x28], %l5
453 stx %l3, [%i1+0x58]
454 stx %l4, [%i1+0x60]
455 stx %l5, [%i1+0x68]
456 stx %l6, [%i1+0x70]
457 stx %l7, [%i1+0x78]
458
459 add %i0, 0x80, %i0
460 subcc %i2, 0x80, %i2
461 bgu,pt %xcc, 1b
462 add %i1, 0x80, %i1
463
464 ! %o0 contains the dest. address
465 set PAGESIZE, %o1
466 call sync_icache
467 nop
468
469 membar #Sync
470 ret
471 restore %g0, 0, %o0
472 SET_SIZE(hwblkpagecopy)
473
474
475 /*
476 * Transfer data to and from user space -
477 * Note that these routines can cause faults
478 * It is assumed that the kernel has nothing at
479 * less than KERNELBASE in the virtual address space.
480 *
481 * Note that copyin(9F) and copyout(9F) are part of the
482 * DDI/DKI which specifies that they return '-1' on "errors."
483 *
484 * Sigh.
485 *
486 * So there's two extremely similar routines - xcopyin() and xcopyout()
487 * which return the errno that we've faithfully computed. This
488 * allows other callers (e.g. uiomove(9F)) to work correctly.
489 * Given that these are used pretty heavily, we expand the calling
490 * sequences inline for all flavours (rather than making wrappers).
491 *
492 * There are also stub routines for xcopyout_little and xcopyin_little,
522 * if any data is is left to be copied by examining %o3. If that is
523 * zero, we're done and can go home. If not, we figure out what the
524 * largest chunk size left to be copied is and branch to that copy
525 * loop unless there's only one byte left. We load that as we're
526 * branching to code that stores it just before we return.
527 *
528 * Fault handlers are invoked if we reference memory that has no
529 * current mapping. All forms share the same copyio_fault handler.
530 * This routine handles fixing up the stack and general housecleaning.
531 * Each copy operation has a simple fault handler that is then called
532 * to do the work specific to the invidual operation. The handler
533 * for copyOP and xcopyOP are found at the end of individual function.
534 * The handlers for xcopyOP_little are found at the end of xcopyin_little.
535 * The handlers for copyOP_noerr are found at the end of copyin_noerr.
536 */
537
538 /*
539 * Copy kernel data to user space (copyout/xcopyout/xcopyout_little).
540 */
541
542 /*
543 * We save the arguments in the following registers in case of a fault:
544 * kaddr - %g2
545 * uaddr - %g3
546 * count - %g4
547 */
548 #define SAVE_SRC %g2
549 #define SAVE_DST %g3
550 #define SAVE_COUNT %g4
551
552 #define REAL_LOFAULT %g5
553 #define SAVED_LOFAULT %g6
554
555 /*
556 * Generic copyio fault handler. This is the first line of defense when a
557 * fault occurs in (x)copyin/(x)copyout. In order for this to function
558 * properly, the value of the 'real' lofault handler should be in REAL_LOFAULT.
559 * This allows us to share common code for all the flavors of the copy
560 * operations, including the _noerr versions.
561 *
755 ldub [%o0 + %o3], %o4
756 stba %o4, [%o1 + %o3]ASI_USER
757 .dcofh:
758 membar #Sync
759 stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
760 retl
761 clr %o0
762
763 .copyout_err:
764 ldn [THREAD_REG + T_COPYOPS], %o4
765 brz %o4, 2f
766 nop
767 ldn [%o4 + CP_COPYOUT], %g2
768 jmp %g2
769 nop
770 2:
771 retl
772 mov -1, %o0
773 SET_SIZE(copyout)
774
775
776 ENTRY(xcopyout)
777 sethi %hi(.xcopyout_err), REAL_LOFAULT
778 b .do_copyout
779 or REAL_LOFAULT, %lo(.xcopyout_err), REAL_LOFAULT
780 .xcopyout_err:
781 ldn [THREAD_REG + T_COPYOPS], %o4
782 brz %o4, 2f
783 nop
784 ldn [%o4 + CP_XCOPYOUT], %g2
785 jmp %g2
786 nop
787 2:
788 retl
789 mov %g1, %o0
790 SET_SIZE(xcopyout)
791
792 ENTRY(xcopyout_little)
793 sethi %hi(.little_err), %o4
794 ldn [THREAD_REG + T_LOFAULT], %o5
795 or %o4, %lo(.little_err), %o4
796 membar #Sync ! sync error barrier
797 stn %o4, [THREAD_REG + T_LOFAULT]
798
799 subcc %g0, %o2, %o3
800 add %o0, %o2, %o0
801 bz,pn %ncc, 2f ! check for zero bytes
802 sub %o2, 1, %o4
803 add %o0, %o4, %o0 ! start w/last byte
804 add %o1, %o2, %o1
805 ldub [%o0+%o3], %o4
806
807 1: stba %o4, [%o1+%o3]ASI_AIUSL
808 inccc %o3
809 sub %o0, 2, %o0 ! get next byte
810 bcc,a,pt %ncc, 1b
811 ldub [%o0+%o3], %o4
812
813 2: membar #Sync ! sync error barrier
814 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
815 retl
816 mov %g0, %o0 ! return (0)
817 SET_SIZE(xcopyout_little)
818
819 /*
820 * Copy user data to kernel space (copyin/xcopyin/xcopyin_little)
821 */
822
823 ENTRY(copyin)
824 sethi %hi(.copyin_err), REAL_LOFAULT
825 or REAL_LOFAULT, %lo(.copyin_err), REAL_LOFAULT
826
827 .do_copyin:
828 !
829 ! Check the length and bail if zero.
830 !
831 tst %o2
832 bnz,pt %ncc, 1f
833 nop
834 retl
835 clr %o0
836 1:
837 sethi %hi(copyio_fault), %o3
838 ldn [THREAD_REG + T_LOFAULT], SAVED_LOFAULT
839 or %o3, %lo(copyio_fault), %o3
840 membar #Sync
841 stn %o3, [THREAD_REG + T_LOFAULT]
842
992 lduba [%o0 + %o3]ASI_USER, %o4
993 stb %o4, [%o1 + %o3]
994 .dcifh:
995 membar #Sync
996 stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
997 retl
998 clr %o0
999
1000 .copyin_err:
1001 ldn [THREAD_REG + T_COPYOPS], %o4
1002 brz %o4, 2f
1003 nop
1004 ldn [%o4 + CP_COPYIN], %g2
1005 jmp %g2
1006 nop
1007 2:
1008 retl
1009 mov -1, %o0
1010 SET_SIZE(copyin)
1011
1012 ENTRY(xcopyin)
1013 sethi %hi(.xcopyin_err), REAL_LOFAULT
1014 b .do_copyin
1015 or REAL_LOFAULT, %lo(.xcopyin_err), REAL_LOFAULT
1016 .xcopyin_err:
1017 ldn [THREAD_REG + T_COPYOPS], %o4
1018 brz %o4, 2f
1019 nop
1020 ldn [%o4 + CP_XCOPYIN], %g2
1021 jmp %g2
1022 nop
1023 2:
1024 retl
1025 mov %g1, %o0
1026 SET_SIZE(xcopyin)
1027
1028 ENTRY(xcopyin_little)
1029 sethi %hi(.little_err), %o4
1030 ldn [THREAD_REG + T_LOFAULT], %o5
1031 or %o4, %lo(.little_err), %o4
1032 membar #Sync ! sync error barrier
1033 stn %o4, [THREAD_REG + T_LOFAULT]
1034
1035 subcc %g0, %o2, %o3
1036 add %o0, %o2, %o0
1037 bz,pn %ncc, 2f ! check for zero bytes
1038 sub %o2, 1, %o4
1039 add %o0, %o4, %o0 ! start w/last byte
1040 add %o1, %o2, %o1
1041 lduba [%o0+%o3]ASI_AIUSL, %o4
1042
1043 1: stb %o4, [%o1+%o3]
1044 inccc %o3
1045 sub %o0, 2, %o0 ! get next byte
1046 bcc,a,pt %ncc, 1b
1047 lduba [%o0+%o3]ASI_AIUSL, %o4
1048
1049 2: membar #Sync ! sync error barrier
1050 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
1051 retl
1052 mov %g0, %o0 ! return (0)
1053
1054 .little_err:
1055 membar #Sync ! sync error barrier
1056 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
1057 retl
1058 mov %g1, %o0
1059 SET_SIZE(xcopyin_little)
1060
1061
1062 /*
1063 * Copy a block of storage - must not overlap (from + len <= to).
1064 * No fault handler installed (to be called under on_fault())
1065 */
1066
1067 ENTRY(copyin_noerr)
1068 sethi %hi(.copyio_noerr), REAL_LOFAULT
1069 b .do_copyin
1070 or REAL_LOFAULT, %lo(.copyio_noerr), REAL_LOFAULT
1071 .copyio_noerr:
1072 jmp SAVED_LOFAULT
1073 nop
1074 SET_SIZE(copyin_noerr)
1075
1076 /*
1077 * Copy a block of storage - must not overlap (from + len <= to).
1078 * No fault handler installed (to be called under on_fault())
1079 */
1080
1081 ENTRY(copyout_noerr)
1082 sethi %hi(.copyio_noerr), REAL_LOFAULT
1083 b .do_copyout
1084 or REAL_LOFAULT, %lo(.copyio_noerr), REAL_LOFAULT
1085 SET_SIZE(copyout_noerr)
1086
1087 .align 4
1088 DGDEF(use_hw_bcopy)
1089 .word 1
1090 DGDEF(use_hw_bzero)
1091 .word 1
1092
1093 .align 64
1094 .section ".text"
1095
1096
1097 /*
1098 * hwblkclr - clears block-aligned, block-multiple-sized regions that are
1099 * longer than 256 bytes in length. For the generic module we will simply
1100 * call bzero and return 1 to ensure that the pages in cache should be
1101 * flushed to ensure integrity.
1102 * Caller is responsible for ensuring use_hw_bzero is true and that
1103 * kpreempt_disable() has been called.
1104 */
1105 ! %i0 - start address
1106 ! %i1 - length of region (multiple of 64)
1107
1108 ENTRY(hwblkclr)
1109 save %sp, -SA(MINFRAME), %sp
1110
1111 ! Simply call bzero and notify the caller that bzero was used
1112 mov %i0, %o0
1113 call bzero
1114 mov %i1, %o1
1115 ret
1116 restore %g0, 1, %o0 ! return (1) - did not use block operations
1117
1118 SET_SIZE(hwblkclr)
1119
1120 /*
1121 * Copy 32 bytes of data from src (%o0) to dst (%o1)
1122 * using physical addresses.
1123 */
1124 ENTRY_NP(hw_pa_bcopy32)
1125 rdpr %pstate, %g1
1126 andn %g1, PSTATE_IE, %g2
1127 wrpr %g0, %g2, %pstate
1128
1129 ldxa [%o0]ASI_MEM, %o2
1130 add %o0, 8, %o0
1131 ldxa [%o0]ASI_MEM, %o3
1132 add %o0, 8, %o0
1133 ldxa [%o0]ASI_MEM, %o4
1134 add %o0, 8, %o0
1135 ldxa [%o0]ASI_MEM, %o5
1136 stxa %o2, [%o1]ASI_MEM
1137 add %o1, 8, %o1
1138 stxa %o3, [%o1]ASI_MEM
1139 add %o1, 8, %o1
1140 stxa %o4, [%o1]ASI_MEM
1141 add %o1, 8, %o1
1142 stxa %o5, [%o1]ASI_MEM
1143
1144 membar #Sync
1145 retl
1146 wrpr %g0, %g1, %pstate
1147 SET_SIZE(hw_pa_bcopy32)
1148
1149 /*
1150 * Zero a block of storage.
1151 *
1152 * uzero is used by the kernel to zero a block in user address space.
1153 */
1154
1155
1156 ENTRY(uzero)
1157 !
1158 ! Set a new lo_fault handler only if we came in with one
1159 ! already specified.
1160 !
1161 wr %g0, ASI_USER, %asi
1162 ldn [THREAD_REG + T_LOFAULT], %o5
1163 tst %o5
1164 bz,pt %ncc, .do_zero
1165 sethi %hi(.zeroerr), %o2
1166 or %o2, %lo(.zeroerr), %o2
1167 membar #Sync
1168 ba,pt %ncc, .do_zero
1169 stn %o2, [THREAD_REG + T_LOFAULT]
1170
1171 ENTRY(kzero)
1172 !
1173 ! Always set a lo_fault handler
1174 !
1175 wr %g0, ASI_P, %asi
1208 ! Old handler was zero. Just return the error.
1209 !
1210 retl ! return
1211 mov %g1, %o0 ! error code from %g1
1212 3:
1213 !
1214 ! We're here because %o5 was non-zero. It was non-zero
1215 ! because either LOFAULT_SET was present, a previous fault
1216 ! handler was present or both. In all cases we need to reset
1217 ! T_LOFAULT to the value of %o5 after clearing LOFAULT_SET
1218 ! before we either simply return the error or we invoke the
1219 ! previously specified handler.
1220 !
1221 be %ncc, 2b
1222 stn %o5, [THREAD_REG + T_LOFAULT]
1223 jmp %o5 ! goto real handler
1224 nop
1225 SET_SIZE(kzero)
1226 SET_SIZE(uzero)
1227
1228 /*
1229 * Zero a block of storage.
1230 */
1231
1232 ENTRY(bzero)
1233 wr %g0, ASI_P, %asi
1234
1235 ldn [THREAD_REG + T_LOFAULT], %o5 ! save old vector
1236 tst %o5
1237 bz,pt %ncc, .do_zero
1238 sethi %hi(.zeroerr), %o2
1239 or %o2, %lo(.zeroerr), %o2
1240 membar #Sync ! sync error barrier
1241 stn %o2, [THREAD_REG + T_LOFAULT] ! install new vector
1242
1243 .do_zero:
1244 cmp %o1, 7
1245 blu,pn %ncc, .byteclr
1246 nop
1247
1248 cmp %o1, 15
1249 blu,pn %ncc, .wdalign
1250 nop
1251
1401 ! We're just concerned with whether t_lofault was set
1402 ! when we came in. We end up here from either kzero()
1403 ! or bzero(). kzero() *always* sets a lofault handler.
1404 ! It ors LOFAULT_SET into %o5 to indicate it has done
1405 ! this even if the value of %o5 is otherwise zero.
1406 ! bzero() sets a lofault handler *only* if one was
1407 ! previously set. Accordingly we need to examine
1408 ! %o5 and if it is non-zero be sure to clear LOFAULT_SET
1409 ! before resetting the error handler.
1410 !
1411 tst %o5
1412 bz %ncc, 1f
1413 andn %o5, LOFAULT_SET, %o5
1414 membar #Sync ! sync error barrier
1415 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
1416 1:
1417 retl
1418 clr %o0 ! return (0)
1419
1420 SET_SIZE(bzero)
|