Print this page
restore sparc comments
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4v/cpu/common_asm.s
+++ new/usr/src/uts/sun4v/cpu/common_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 -#if !defined(lint)
26 25 #include "assym.h"
27 -#endif
28 26
29 27 /*
30 28 * General assembly language routines.
31 29 * It is the intent of this file to contain routines that are
32 30 * specific to cpu architecture.
33 31 */
34 32
35 33 /*
36 34 * WARNING: If you add a fast trap handler which can be invoked by a
37 35 * non-privileged user, you may have to use the FAST_TRAP_DONE macro
38 36 * instead of "done" instruction to return back to the user mode. See
39 37 * comments for the "fast_trap_done" entry point for more information.
40 38 */
41 39 #define FAST_TRAP_DONE \
42 40 ba,a fast_trap_done
43 41
44 42 #include <sys/machclock.h>
45 43 #include <sys/clock.h>
46 44
47 -#if defined(lint)
48 -#include <sys/types.h>
49 -#include <sys/scb.h>
50 -#include <sys/systm.h>
51 -#include <sys/regset.h>
52 -#include <sys/sunddi.h>
53 -#include <sys/lockstat.h>
54 -#endif /* lint */
55 45
56 -
57 46 #include <sys/asm_linkage.h>
58 47 #include <sys/privregs.h>
59 48 #include <vm/hat_sfmmu.h>
60 49 #include <sys/machparam.h> /* To get SYSBASE and PAGESIZE */
61 50 #include <sys/machthread.h>
62 51 #include <sys/clock.h>
63 52 #include <sys/intreg.h>
64 53 #include <sys/psr_compat.h>
65 54 #include <sys/isa_defs.h>
66 55 #include <sys/dditypes.h>
67 56 #include <sys/intr.h>
68 57 #include <sys/hypervisor_api.h>
69 58
70 -#if !defined(lint)
71 59 #include "assym.h"
72 -#endif
73 60
74 61 #define ICACHE_FLUSHSZ 0x20
75 62
76 -#if defined(lint)
77 63 /*
78 - * Softint generated when counter field of tick reg matches value field
64 + * Softint generated when counter field of tick reg matches value field
79 65 * of tick_cmpr reg
80 66 */
81 -/*ARGSUSED*/
82 -void
83 -tickcmpr_set(uint64_t clock_cycles)
84 -{}
85 -
86 -#else /* lint */
87 -
88 67 ENTRY_NP(tickcmpr_set)
89 68 ! get 64-bit clock_cycles interval
90 69 mov %o0, %o2
91 70 mov 8, %o3 ! A reasonable initial step size
92 71 1:
93 72 WR_TICKCMPR(%o2,%o4,%o5,__LINE__) ! Write to TICK_CMPR
94 73
95 74 GET_NATIVE_TIME(%o0,%o4,%o5,__LINE__) ! Read %tick to confirm the
96 75 ! value we wrote was in the
97 76 ! future.
98 77
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
99 78 cmp %o2, %o0 ! If the value we wrote was in the
100 79 bg,pt %xcc, 2f ! future, then blow out of here.
101 80 sllx %o3, 1, %o3 ! If not, then double our step size,
102 81 ba,pt %xcc, 1b ! and take another lap.
103 82 add %o0, %o3, %o2 !
104 83 2:
105 84 retl
106 85 nop
107 86 SET_SIZE(tickcmpr_set)
108 87
109 -#endif /* lint */
110 -
111 -#if defined(lint)
112 -
113 -void
114 -tickcmpr_disable(void)
115 -{}
116 -
117 -#else
118 -
119 88 ENTRY_NP(tickcmpr_disable)
120 89 mov 1, %g1
121 90 sllx %g1, TICKINT_DIS_SHFT, %o0
122 91 WR_TICKCMPR(%o0,%o4,%o5,__LINE__) ! Write to TICK_CMPR
123 92 retl
124 93 nop
125 94 SET_SIZE(tickcmpr_disable)
126 95
127 -#endif
96 + .seg ".text"
97 +tick_write_delta_panic:
98 + .asciz "tick_write_delta: not supported, delta: 0x%lx"
128 99
129 -#if defined(lint)
130 -
131 100 /*
132 101 * tick_write_delta() is intended to increment %stick by the specified delta,
133 102 * but %stick is only writeable in hyperprivileged mode and at present there
134 103 * is no provision for this. tick_write_delta is called by the cylic subsystem
135 104 * if a negative %stick delta is observed after cyclic processing is resumed
136 105 * after an event such as an OS suspend/resume. On sun4v, the suspend/resume
137 106 * routines should adjust the %stick offset preventing the cyclic subsystem
138 107 * from detecting a negative delta. If a negative delta is detected, panic the
139 108 * system. The negative delta could be caused by improper %stick
140 109 * synchronization after a suspend/resume.
141 110 */
142 -
143 -/*ARGSUSED*/
144 -void
145 -tick_write_delta(uint64_t delta)
146 -{}
147 -
148 -#else /* lint */
149 -
150 - .seg ".text"
151 -tick_write_delta_panic:
152 - .asciz "tick_write_delta: not supported, delta: 0x%lx"
153 -
154 111 ENTRY_NP(tick_write_delta)
155 112 sethi %hi(tick_write_delta_panic), %o1
156 113 save %sp, -SA(MINFRAME), %sp ! get a new window to preserve caller
157 114 mov %i0, %o1
158 115 call panic
159 116 or %i1, %lo(tick_write_delta_panic), %o0
160 117 /*NOTREACHED*/
161 118 retl
162 119 nop
163 -#endif
164 120
165 -#if defined(lint)
166 -/*
167 - * return 1 if disabled
168 - */
169 -
170 -int
171 -tickcmpr_disabled(void)
172 -{ return (0); }
173 -
174 -#else /* lint */
175 -
176 121 ENTRY_NP(tickcmpr_disabled)
177 122 RD_TICKCMPR(%g1,%o0,%o1,__LINE__)
178 123 retl
179 124 srlx %g1, TICKINT_DIS_SHFT, %o0
180 125 SET_SIZE(tickcmpr_disabled)
181 126
182 -#endif /* lint */
183 -
184 127 /*
185 128 * Get current tick
186 129 */
187 -#if defined(lint)
188 130
189 -u_longlong_t
190 -gettick(void)
191 -{ return (0); }
192 -
193 -u_longlong_t
194 -randtick(void)
195 -{ return (0); }
196 -
197 -#else /* lint */
198 -
199 131 ENTRY(gettick)
200 132 ALTENTRY(randtick)
201 133 GET_NATIVE_TIME(%o0,%o2,%o3,__LINE__)
202 134 retl
203 135 nop
204 136 SET_SIZE(randtick)
205 137 SET_SIZE(gettick)
206 138
207 -#endif /* lint */
208 -
209 139 /*
210 140 * Get current tick. For trapstat use only.
211 141 */
212 -#if defined (lint)
213 -
214 -hrtime_t
215 -rdtick()
216 -{ return (0); }
217 -
218 -#else
219 142 ENTRY(rdtick)
220 143 retl
221 144 RD_TICK_PHYSICAL(%o0)
222 145 SET_SIZE(rdtick)
223 -#endif /* lint */
224 146
225 147
226 148 /*
227 149 * Return the counter portion of the tick register.
228 150 */
229 151
230 -#if defined(lint)
231 -
232 -uint64_t
233 -gettick_counter(void)
234 -{ return(0); }
235 -
236 -uint64_t
237 -gettick_npt(void)
238 -{ return(0); }
239 -
240 -uint64_t
241 -getstick_npt(void)
242 -{ return(0); }
243 -
244 -#else /* lint */
245 -
246 152 ENTRY_NP(gettick_counter)
247 153 RD_TICK(%o0,%o1,%o2,__LINE__)
248 154 retl
249 155 nop
250 156 SET_SIZE(gettick_counter)
251 157
252 158 ENTRY_NP(gettick_npt)
253 159 RD_TICK_PHYSICAL(%o0)
254 160 retl
255 161 srlx %o0, 63, %o0
256 162 SET_SIZE(gettick_npt)
257 163
258 164 ENTRY_NP(getstick_npt)
259 165 RD_STICK_PHYSICAL(%o0)
260 166 retl
261 167 srlx %o0, 63, %o0
262 168 SET_SIZE(getstick_npt)
263 -#endif /* lint */
264 169
265 170 /*
266 171 * Provide a C callable interface to the trap that reads the hi-res timer.
267 172 * Returns 64-bit nanosecond timestamp in %o0 and %o1.
268 173 */
269 174
270 -#if defined(lint)
271 -
272 -hrtime_t
273 -gethrtime(void)
274 -{
275 - return ((hrtime_t)0);
276 -}
277 -
278 -hrtime_t
279 -gethrtime_unscaled(void)
280 -{
281 - return ((hrtime_t)0);
282 -}
283 -
284 -hrtime_t
285 -gethrtime_max(void)
286 -{
287 - return ((hrtime_t)0);
288 -}
289 -
290 -void
291 -scalehrtime(hrtime_t *hrt)
292 -{
293 - *hrt = 0;
294 -}
295 -
296 -void
297 -gethrestime(timespec_t *tp)
298 -{
299 - tp->tv_sec = 0;
300 - tp->tv_nsec = 0;
301 -}
302 -
303 -time_t
304 -gethrestime_sec(void)
305 -{
306 - return (0);
307 -}
308 -
309 -void
310 -gethrestime_lasttick(timespec_t *tp)
311 -{
312 - tp->tv_sec = 0;
313 - tp->tv_nsec = 0;
314 -}
315 -
316 -/*ARGSUSED*/
317 -void
318 -hres_tick(void)
319 -{
320 -}
321 -
322 -void
323 -panic_hres_tick(void)
324 -{
325 -}
326 -
327 -#else /* lint */
328 -
329 175 ENTRY_NP(gethrtime)
330 176 GET_HRTIME(%g1,%o0,%o1,%o2,%o3,%o4,%o5,%g2,__LINE__)
331 177 ! %g1 = hrtime
332 178 retl
333 179 mov %g1, %o0
334 180 SET_SIZE(gethrtime)
335 181
336 182 ENTRY_NP(gethrtime_unscaled)
337 183 GET_NATIVE_TIME(%g1,%o2,%o3,__LINE__) ! %g1 = native time
338 184 retl
339 185 mov %g1, %o0
340 186 SET_SIZE(gethrtime_unscaled)
341 187
342 188 ENTRY_NP(gethrtime_waitfree)
343 189 ALTENTRY(dtrace_gethrtime)
344 190 GET_NATIVE_TIME(%g1,%o2,%o3,__LINE__) ! %g1 = native time
345 191 NATIVE_TIME_TO_NSEC(%g1, %o2, %o3)
346 192 retl
347 193 mov %g1, %o0
348 194 SET_SIZE(dtrace_gethrtime)
349 195 SET_SIZE(gethrtime_waitfree)
350 196
351 197 ENTRY(gethrtime_max)
352 198 NATIVE_TIME_MAX(%g1)
353 199 NATIVE_TIME_TO_NSEC(%g1, %o0, %o1)
354 200
355 201 ! hrtime_t's are signed, max hrtime_t must be positive
356 202 mov -1, %o2
357 203 brlz,a %g1, 1f
358 204 srlx %o2, 1, %g1
359 205 1:
360 206 retl
361 207 mov %g1, %o0
362 208 SET_SIZE(gethrtime_max)
363 209
364 210 ENTRY(scalehrtime)
365 211 ldx [%o0], %o1
366 212 NATIVE_TIME_TO_NSEC(%o1, %o2, %o3)
367 213 retl
368 214 stx %o1, [%o0]
369 215 SET_SIZE(scalehrtime)
370 216
371 217 /*
372 218 * Fast trap to return a timestamp, uses trap window, leaves traps
373 219 * disabled. Returns a 64-bit nanosecond timestamp in %o0 and %o1.
374 220 *
375 221 * This is the handler for the ST_GETHRTIME trap.
376 222 */
377 223
378 224 ENTRY_NP(get_timestamp)
379 225 GET_HRTIME(%g1,%g2,%g3,%g4,%g5,%o0,%o1,%o2,__LINE__)
380 226 ! %g1 = hrtime
381 227 srlx %g1, 32, %o0 ! %o0 = hi32(%g1)
382 228 srl %g1, 0, %o1 ! %o1 = lo32(%g1)
383 229 FAST_TRAP_DONE
384 230 SET_SIZE(get_timestamp)
385 231
386 232 /*
387 233 * Macro to convert GET_HRESTIME() bits into a timestamp.
388 234 *
389 235 * We use two separate macros so that the platform-dependent GET_HRESTIME()
390 236 * can be as small as possible; CONV_HRESTIME() implements the generic part.
391 237 */
392 238 #define CONV_HRESTIME(hrestsec, hrestnsec, adj, nslt, nano) \
393 239 brz,pt adj, 3f; /* no adjustments, it's easy */ \
394 240 add hrestnsec, nslt, hrestnsec; /* hrest.tv_nsec += nslt */ \
395 241 brlz,pn adj, 2f; /* if hrestime_adj negative */ \
396 242 srlx nslt, ADJ_SHIFT, nslt; /* delay: nslt >>= 4 */ \
397 243 subcc adj, nslt, %g0; /* hrestime_adj - nslt/16 */ \
398 244 movg %xcc, nslt, adj; /* adj by min(adj, nslt/16) */ \
399 245 ba 3f; /* go convert to sec/nsec */ \
400 246 add hrestnsec, adj, hrestnsec; /* delay: apply adjustment */ \
401 247 2: addcc adj, nslt, %g0; /* hrestime_adj + nslt/16 */ \
402 248 bge,a,pt %xcc, 3f; /* is adj less negative? */ \
403 249 add hrestnsec, adj, hrestnsec; /* yes: hrest.nsec += adj */ \
404 250 sub hrestnsec, nslt, hrestnsec; /* no: hrest.nsec -= nslt/16 */ \
405 251 3: cmp hrestnsec, nano; /* more than a billion? */ \
406 252 bl,pt %xcc, 4f; /* if not, we're done */ \
407 253 nop; /* delay: do nothing :( */ \
408 254 add hrestsec, 1, hrestsec; /* hrest.tv_sec++; */ \
409 255 sub hrestnsec, nano, hrestnsec; /* hrest.tv_nsec -= NANOSEC; */ \
410 256 ba,a 3b; /* check >= billion again */ \
411 257 4:
412 258
413 259 ENTRY_NP(gethrestime)
414 260 GET_HRESTIME(%o1,%o2,%o3,%o4,%o5,%g1,%g2,%g3,%g4,__LINE__)
415 261 CONV_HRESTIME(%o1, %o2, %o3, %o4, %o5)
416 262 stn %o1, [%o0]
417 263 retl
418 264 stn %o2, [%o0 + CLONGSIZE]
419 265 SET_SIZE(gethrestime)
420 266
421 267 /*
422 268 * Similar to gethrestime(), but gethrestime_sec() returns current hrestime
423 269 * seconds.
424 270 */
425 271 ENTRY_NP(gethrestime_sec)
426 272 GET_HRESTIME(%o0,%o2,%o3,%o4,%o5,%g1,%g2,%g3,%g4,__LINE__)
427 273 CONV_HRESTIME(%o0, %o2, %o3, %o4, %o5)
428 274 retl ! %o0 current hrestime seconds
429 275 nop
430 276 SET_SIZE(gethrestime_sec)
431 277
432 278 /*
433 279 * Returns the hrestime on the last tick. This is simpler than gethrestime()
434 280 * and gethrestime_sec(): no conversion is required. gethrestime_lasttick()
435 281 * follows the same locking algorithm as GET_HRESTIME and GET_HRTIME,
436 282 * outlined in detail in clock.h. (Unlike GET_HRESTIME/GET_HRTIME, we don't
437 283 * rely on load dependencies to effect the membar #LoadLoad, instead declaring
438 284 * it explicitly.)
439 285 */
440 286 ENTRY_NP(gethrestime_lasttick)
441 287 sethi %hi(hres_lock), %o1
442 288 0:
443 289 lduw [%o1 + %lo(hres_lock)], %o2 ! Load lock value
444 290 membar #LoadLoad ! Load of lock must complete
445 291 andn %o2, 1, %o2 ! Mask off lowest bit
446 292 ldn [%o1 + %lo(hrestime)], %g1 ! Seconds.
447 293 add %o1, %lo(hrestime), %o4
448 294 ldn [%o4 + CLONGSIZE], %g2 ! Nanoseconds.
449 295 membar #LoadLoad ! All loads must complete
450 296 lduw [%o1 + %lo(hres_lock)], %o3 ! Reload lock value
451 297 cmp %o3, %o2 ! If lock is locked or has
452 298 bne 0b ! changed, retry.
453 299 stn %g1, [%o0] ! Delay: store seconds
454 300 retl
455 301 stn %g2, [%o0 + CLONGSIZE] ! Delay: store nanoseconds
456 302 SET_SIZE(gethrestime_lasttick)
457 303
458 304 /*
459 305 * Fast trap for gettimeofday(). Returns a timestruc_t in %o0 and %o1.
460 306 *
461 307 * This is the handler for the ST_GETHRESTIME trap.
462 308 */
463 309
464 310 ENTRY_NP(get_hrestime)
465 311 GET_HRESTIME(%o0,%o1,%g1,%g2,%g3,%g4,%g5,%o2,%o3,__LINE__)
466 312 CONV_HRESTIME(%o0, %o1, %g1, %g2, %g3)
467 313 FAST_TRAP_DONE
468 314 SET_SIZE(get_hrestime)
469 315
470 316 /*
471 317 * Fast trap to return lwp virtual time, uses trap window, leaves traps
472 318 * disabled. Returns a 64-bit number in %o0:%o1, which is the number
473 319 * of nanoseconds consumed.
474 320 *
475 321 * This is the handler for the ST_GETHRVTIME trap.
476 322 *
477 323 * Register usage:
478 324 * %o0, %o1 = return lwp virtual time
479 325 * %o2 = CPU/thread
480 326 * %o3 = lwp
481 327 * %g1 = scratch
482 328 * %g5 = scratch
483 329 */
484 330 ENTRY_NP(get_virtime)
485 331 GET_NATIVE_TIME(%g5,%g1,%g2,__LINE__) ! %g5 = native time in ticks
486 332 CPU_ADDR(%g2, %g3) ! CPU struct ptr to %g2
487 333 ldn [%g2 + CPU_THREAD], %g2 ! thread pointer to %g2
488 334 ldn [%g2 + T_LWP], %g3 ! lwp pointer to %g3
489 335
490 336 /*
491 337 * Subtract start time of current microstate from time
492 338 * of day to get increment for lwp virtual time.
493 339 */
494 340 ldx [%g3 + LWP_STATE_START], %g1 ! ms_state_start
495 341 sub %g5, %g1, %g5
496 342
497 343 /*
498 344 * Add current value of ms_acct[LMS_USER]
499 345 */
500 346 ldx [%g3 + LWP_ACCT_USER], %g1 ! ms_acct[LMS_USER]
501 347 add %g5, %g1, %g5
502 348 NATIVE_TIME_TO_NSEC(%g5, %g1, %o0)
503 349
504 350 srl %g5, 0, %o1 ! %o1 = lo32(%g5)
505 351 srlx %g5, 32, %o0 ! %o0 = hi32(%g5)
506 352
507 353 FAST_TRAP_DONE
508 354 SET_SIZE(get_virtime)
509 355
510 356
511 357
512 358 .seg ".text"
513 359 hrtime_base_panic:
514 360 .asciz "hrtime_base stepping back"
515 361
516 362
517 363 ENTRY_NP(hres_tick)
518 364 save %sp, -SA(MINFRAME), %sp ! get a new window
519 365
520 366 sethi %hi(hrestime), %l4
521 367 ldstub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5 ! try locking
522 368 7: tst %l5
523 369 bz,pt %xcc, 8f ! if we got it, drive on
524 370 ld [%l4 + %lo(nsec_scale)], %l5 ! delay: %l5 = scaling factor
525 371 ldub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
526 372 9: tst %l5
527 373 bz,a,pn %xcc, 7b
528 374 ldstub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
529 375 ba,pt %xcc, 9b
530 376 ldub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
531 377 8:
532 378 membar #StoreLoad|#StoreStore
533 379
534 380 !
535 381 ! update hres_last_tick. %l5 has the scaling factor (nsec_scale).
536 382 !
537 383 ldx [%l4 + %lo(hrtime_base)], %g1 ! load current hrtime_base
538 384 GET_NATIVE_TIME(%l0,%l3,%l6,__LINE__) ! current native time
539 385 stx %l0, [%l4 + %lo(hres_last_tick)]! prev = current
540 386 ! convert native time to nsecs
541 387 NATIVE_TIME_TO_NSEC_SCALE(%l0, %l5, %l2, NSEC_SHIFT)
542 388
543 389 sub %l0, %g1, %i1 ! get accurate nsec delta
544 390
545 391 ldx [%l4 + %lo(hrtime_base)], %l1
546 392 cmp %l1, %l0
547 393 bg,pn %xcc, 9f
548 394 nop
549 395
550 396 stx %l0, [%l4 + %lo(hrtime_base)] ! update hrtime_base
551 397
552 398 !
553 399 ! apply adjustment, if any
554 400 !
555 401 ldx [%l4 + %lo(hrestime_adj)], %l0 ! %l0 = hrestime_adj
556 402 brz %l0, 2f
557 403 ! hrestime_adj == 0 ?
558 404 ! yes, skip adjustments
559 405 clr %l5 ! delay: set adj to zero
560 406 tst %l0 ! is hrestime_adj >= 0 ?
561 407 bge,pt %xcc, 1f ! yes, go handle positive case
562 408 srl %i1, ADJ_SHIFT, %l5 ! delay: %l5 = adj
563 409
564 410 addcc %l0, %l5, %g0 ! hrestime_adj < -adj ?
565 411 bl,pt %xcc, 2f ! yes, use current adj
566 412 neg %l5 ! delay: %l5 = -adj
567 413 ba,pt %xcc, 2f
568 414 mov %l0, %l5 ! no, so set adj = hrestime_adj
569 415 1:
570 416 subcc %l0, %l5, %g0 ! hrestime_adj < adj ?
571 417 bl,a,pt %xcc, 2f ! yes, set adj = hrestime_adj
572 418 mov %l0, %l5 ! delay: adj = hrestime_adj
573 419 2:
574 420 ldx [%l4 + %lo(timedelta)], %l0 ! %l0 = timedelta
575 421 sub %l0, %l5, %l0 ! timedelta -= adj
576 422
577 423 stx %l0, [%l4 + %lo(timedelta)] ! store new timedelta
578 424 stx %l0, [%l4 + %lo(hrestime_adj)] ! hrestime_adj = timedelta
579 425
580 426 or %l4, %lo(hrestime), %l2
581 427 ldn [%l2], %i2 ! %i2:%i3 = hrestime sec:nsec
582 428 ldn [%l2 + CLONGSIZE], %i3
583 429 add %i3, %l5, %i3 ! hrestime.nsec += adj
584 430 add %i3, %i1, %i3 ! hrestime.nsec += nslt
585 431
586 432 set NANOSEC, %l5 ! %l5 = NANOSEC
587 433 cmp %i3, %l5
588 434 bl,pt %xcc, 5f ! if hrestime.tv_nsec < NANOSEC
589 435 sethi %hi(one_sec), %i1 ! delay
590 436 add %i2, 0x1, %i2 ! hrestime.tv_sec++
591 437 sub %i3, %l5, %i3 ! hrestime.tv_nsec - NANOSEC
592 438 mov 0x1, %l5
593 439 st %l5, [%i1 + %lo(one_sec)]
594 440 5:
595 441 stn %i2, [%l2]
596 442 stn %i3, [%l2 + CLONGSIZE] ! store the new hrestime
597 443
598 444 membar #StoreStore
599 445
600 446 ld [%l4 + %lo(hres_lock)], %i1
601 447 inc %i1 ! release lock
602 448 st %i1, [%l4 + %lo(hres_lock)] ! clear hres_lock
603 449
604 450 ret
605 451 restore
606 452
607 453 9:
608 454 !
609 455 ! release hres_lock
610 456 !
↓ open down ↓ |
272 lines elided |
↑ open up ↑ |
611 457 ld [%l4 + %lo(hres_lock)], %i1
612 458 inc %i1
613 459 st %i1, [%l4 + %lo(hres_lock)]
614 460
615 461 sethi %hi(hrtime_base_panic), %o0
616 462 call panic
617 463 or %o0, %lo(hrtime_base_panic), %o0
618 464
619 465 SET_SIZE(hres_tick)
620 466
621 -#endif /* lint */
622 -
623 -#if !defined(lint) && !defined(__lint)
624 -
625 467 .seg ".text"
626 468 kstat_q_panic_msg:
627 469 .asciz "kstat_q_exit: qlen == 0"
628 470
629 471 ENTRY(kstat_q_panic)
630 472 save %sp, -SA(MINFRAME), %sp
631 473 sethi %hi(kstat_q_panic_msg), %o0
632 474 call panic
633 475 or %o0, %lo(kstat_q_panic_msg), %o0
634 476 /*NOTREACHED*/
635 477 SET_SIZE(kstat_q_panic)
636 478
637 479 #define BRZPN brz,pn
638 480 #define BRZPT brz,pt
639 481
640 482 #define KSTAT_Q_UPDATE(QOP, QBR, QZERO, QRETURN, QTYPE) \
641 483 ld [%o0 + QTYPE/**/CNT], %o1; /* %o1 = old qlen */ \
642 484 QOP %o1, 1, %o2; /* %o2 = new qlen */ \
643 485 QBR %o1, QZERO; /* done if qlen == 0 */ \
644 486 st %o2, [%o0 + QTYPE/**/CNT]; /* delay: save qlen */ \
645 487 ldx [%o0 + QTYPE/**/LASTUPDATE], %o3; \
646 488 ldx [%o0 + QTYPE/**/TIME], %o4; /* %o4 = old time */ \
647 489 ldx [%o0 + QTYPE/**/LENTIME], %o5; /* %o5 = old lentime */ \
648 490 sub %g1, %o3, %o2; /* %o2 = time delta */ \
649 491 mulx %o1, %o2, %o3; /* %o3 = cur lentime */ \
650 492 add %o4, %o2, %o4; /* %o4 = new time */ \
651 493 add %o5, %o3, %o5; /* %o5 = new lentime */ \
652 494 stx %o4, [%o0 + QTYPE/**/TIME]; /* save time */ \
653 495 stx %o5, [%o0 + QTYPE/**/LENTIME]; /* save lentime */ \
654 496 QRETURN; \
655 497 stx %g1, [%o0 + QTYPE/**/LASTUPDATE]; /* lastupdate = now */
656 498
657 499 #if !defined(DEBUG)
658 500 /*
659 501 * same as KSTAT_Q_UPDATE but without:
660 502 * QBR %o1, QZERO;
661 503 * to be used only with non-debug build. mimics ASSERT() behaviour.
662 504 */
663 505 #define KSTAT_Q_UPDATE_ND(QOP, QRETURN, QTYPE) \
664 506 ld [%o0 + QTYPE/**/CNT], %o1; /* %o1 = old qlen */ \
665 507 QOP %o1, 1, %o2; /* %o2 = new qlen */ \
666 508 st %o2, [%o0 + QTYPE/**/CNT]; /* delay: save qlen */ \
667 509 ldx [%o0 + QTYPE/**/LASTUPDATE], %o3; \
668 510 ldx [%o0 + QTYPE/**/TIME], %o4; /* %o4 = old time */ \
669 511 ldx [%o0 + QTYPE/**/LENTIME], %o5; /* %o5 = old lentime */ \
670 512 sub %g1, %o3, %o2; /* %o2 = time delta */ \
671 513 mulx %o1, %o2, %o3; /* %o3 = cur lentime */ \
672 514 add %o4, %o2, %o4; /* %o4 = new time */ \
673 515 add %o5, %o3, %o5; /* %o5 = new lentime */ \
674 516 stx %o4, [%o0 + QTYPE/**/TIME]; /* save time */ \
675 517 stx %o5, [%o0 + QTYPE/**/LENTIME]; /* save lentime */ \
676 518 QRETURN; \
677 519 stx %g1, [%o0 + QTYPE/**/LASTUPDATE]; /* lastupdate = now */
678 520 #endif
679 521
680 522 .align 16
681 523 ENTRY(kstat_waitq_enter)
682 524 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
683 525 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_W)
684 526 SET_SIZE(kstat_waitq_enter)
685 527
686 528 .align 16
687 529 ENTRY(kstat_waitq_exit)
688 530 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
689 531 #if defined(DEBUG)
690 532 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, retl, KSTAT_IO_W)
691 533 #else
692 534 KSTAT_Q_UPDATE_ND(sub, retl, KSTAT_IO_W)
693 535 #endif
694 536 SET_SIZE(kstat_waitq_exit)
695 537
696 538 .align 16
697 539 ENTRY(kstat_runq_enter)
698 540 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
699 541 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_R)
700 542 SET_SIZE(kstat_runq_enter)
701 543
702 544 .align 16
703 545 ENTRY(kstat_runq_exit)
704 546 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
705 547 #if defined(DEBUG)
706 548 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, retl, KSTAT_IO_R)
707 549 #else
708 550 KSTAT_Q_UPDATE_ND(sub, retl, KSTAT_IO_R)
709 551 #endif
710 552 SET_SIZE(kstat_runq_exit)
711 553
712 554 .align 16
713 555 ENTRY(kstat_waitq_to_runq)
714 556 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
715 557 #if defined(DEBUG)
716 558 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, 1:, KSTAT_IO_W)
717 559 #else
718 560 KSTAT_Q_UPDATE_ND(sub, 1:, KSTAT_IO_W)
719 561 #endif
720 562 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_R)
721 563 SET_SIZE(kstat_waitq_to_runq)
722 564
723 565 .align 16
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
724 566 ENTRY(kstat_runq_back_to_waitq)
725 567 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
726 568 #if defined(DEBUG)
727 569 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, 1:, KSTAT_IO_R)
728 570 #else
729 571 KSTAT_Q_UPDATE_ND(sub, 1:, KSTAT_IO_R)
730 572 #endif
731 573 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_W)
732 574 SET_SIZE(kstat_runq_back_to_waitq)
733 575
734 -#endif /* lint */
735 -
736 -#ifdef lint
737 -
738 -int64_t timedelta;
739 -hrtime_t hres_last_tick;
740 -volatile timestruc_t hrestime;
741 -int64_t hrestime_adj;
742 -volatile int hres_lock;
743 -uint_t nsec_scale;
744 -hrtime_t hrtime_base;
745 -int traptrace_use_stick;
746 -
747 -#else
748 576 /*
749 577 * -- WARNING --
750 578 *
751 579 * The following variables MUST be together on a 128-byte boundary.
752 580 * In addition to the primary performance motivation (having them all
753 581 * on the same cache line(s)), code here and in the GET*TIME() macros
754 582 * assumes that they all have the same high 22 address bits (so
755 583 * there's only one sethi).
756 584 */
757 585 .seg ".data"
758 586 .global timedelta, hres_last_tick, hrestime, hrestime_adj
759 587 .global hres_lock, nsec_scale, hrtime_base, traptrace_use_stick
760 588 .global nsec_shift, adj_shift, native_tick_offset, native_stick_offset
761 589
762 590 /* XXX - above comment claims 128-bytes is necessary */
763 591 .align 64
764 592 timedelta:
765 593 .word 0, 0 /* int64_t */
766 594 hres_last_tick:
767 595 .word 0, 0 /* hrtime_t */
768 596 hrestime:
769 597 .nword 0, 0 /* 2 longs */
770 598 hrestime_adj:
771 599 .word 0, 0 /* int64_t */
772 600 hres_lock:
773 601 .word 0
774 602 nsec_scale:
775 603 .word 0
776 604 hrtime_base:
777 605 .word 0, 0
778 606 traptrace_use_stick:
779 607 .word 0
780 608 nsec_shift:
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
781 609 .word NSEC_SHIFT
782 610 adj_shift:
783 611 .word ADJ_SHIFT
784 612 .align 8
785 613 native_tick_offset:
786 614 .word 0, 0
787 615 .align 8
788 616 native_stick_offset:
789 617 .word 0, 0
790 618
791 -#endif
792 619
793 -
794 620 /*
795 621 * drv_usecwait(clock_t n) [DDI/DKI - section 9F]
796 622 * usec_delay(int n) [compatibility - should go one day]
797 623 * Delay by spinning.
798 624 *
799 625 * delay for n microseconds. numbers <= 0 delay 1 usec
800 626 *
801 627 * With UltraSPARC-III the combination of supporting mixed-speed CPUs
802 628 * and variable clock rate for power management requires that we
803 629 * use %stick to implement this routine.
804 630 */
805 631
806 -#if defined(lint)
807 -
808 -/*ARGSUSED*/
809 -void
810 -drv_usecwait(clock_t n)
811 -{}
812 -
813 -/*ARGSUSED*/
814 -void
815 -usec_delay(int n)
816 -{}
817 -
818 -#else /* lint */
819 -
820 632 ENTRY(drv_usecwait)
821 633 ALTENTRY(usec_delay)
822 634 brlez,a,pn %o0, 0f
823 635 mov 1, %o0
824 636 0:
825 637 sethi %hi(sticks_per_usec), %o1
826 638 lduw [%o1 + %lo(sticks_per_usec)], %o1
827 639 mulx %o1, %o0, %o1 ! Scale usec to ticks
828 640 inc %o1 ! We don't start on a tick edge
829 641 GET_NATIVE_TIME(%o2,%o3,%o4,__LINE__)
830 642 add %o1, %o2, %o1
831 643
832 644 1: cmp %o1, %o2
833 645 GET_NATIVE_TIME(%o2,%o3,%o4,__LINE__)
834 646 bgeu,pt %xcc, 1b
835 647 nop
836 648 retl
837 649 nop
838 650 SET_SIZE(usec_delay)
839 651 SET_SIZE(drv_usecwait)
840 -#endif /* lint */
841 652
842 -#if defined(lint)
843 -
844 -/* ARGSUSED */
845 -void
846 -pil14_interrupt(int level)
847 -{}
848 -
849 -#else
850 -
851 653 /*
852 654 * Level-14 interrupt prologue.
853 655 */
854 656 ENTRY_NP(pil14_interrupt)
855 657 CPU_ADDR(%g1, %g2)
856 658 rdpr %pil, %g6 ! %g6 = interrupted PIL
857 659 stn %g6, [%g1 + CPU_PROFILE_PIL] ! record interrupted PIL
858 660 rdpr %tstate, %g6
859 661 rdpr %tpc, %g5
860 662 btst TSTATE_PRIV, %g6 ! trap from supervisor mode?
861 663 bnz,a,pt %xcc, 1f
862 664 stn %g5, [%g1 + CPU_PROFILE_PC] ! if so, record kernel PC
863 665 stn %g5, [%g1 + CPU_PROFILE_UPC] ! if not, record user PC
864 666 ba pil_interrupt_common ! must be large-disp branch
865 667 stn %g0, [%g1 + CPU_PROFILE_PC] ! zero kernel PC
866 668 1: ba pil_interrupt_common ! must be large-disp branch
867 669 stn %g0, [%g1 + CPU_PROFILE_UPC] ! zero user PC
868 670 SET_SIZE(pil14_interrupt)
869 671
870 672 ENTRY_NP(tick_rtt)
871 673 !
872 674 ! Load TICK_COMPARE into %o5; if bit 63 is set, then TICK_COMPARE is
873 675 ! disabled. If TICK_COMPARE is enabled, we know that we need to
874 676 ! reenqueue the interrupt request structure. We'll then check TICKINT
875 677 ! in SOFTINT; if it's set, then we know that we were in a TICK_COMPARE
876 678 ! interrupt. In this case, TICK_COMPARE may have been rewritten
877 679 ! recently; we'll compare %o5 to the current time to verify that it's
878 680 ! in the future.
879 681 !
880 682 ! Note that %o5 is live until after 1f.
881 683 ! XXX - there is a subroutine call while %o5 is live!
882 684 !
883 685 RD_TICKCMPR(%o5,%g1,%g2,__LINE__)
884 686 srlx %o5, TICKINT_DIS_SHFT, %g1
885 687 brnz,pt %g1, 2f
886 688 nop
887 689
888 690 rdpr %pstate, %g5
889 691 andn %g5, PSTATE_IE, %g1
890 692 wrpr %g0, %g1, %pstate ! Disable vec interrupts
891 693
892 694 sethi %hi(cbe_level14_inum), %o1
893 695 ldx [%o1 + %lo(cbe_level14_inum)], %o1
894 696 call intr_enqueue_req ! preserves %o5 and %g5
895 697 mov PIL_14, %o0
896 698
897 699 ! Check SOFTINT for TICKINT/STICKINT
898 700 rd SOFTINT, %o4
899 701 set (TICK_INT_MASK | STICK_INT_MASK), %o0
900 702 andcc %o4, %o0, %g0
901 703 bz,a,pn %icc, 2f
902 704 wrpr %g0, %g5, %pstate ! Enable vec interrupts
903 705
904 706 ! clear TICKINT/STICKINT
905 707 wr %o0, CLEAR_SOFTINT
906 708
907 709 !
908 710 ! Now that we've cleared TICKINT, we can reread %tick and confirm
909 711 ! that the value we programmed is still in the future. If it isn't,
910 712 ! we need to reprogram TICK_COMPARE to fire as soon as possible.
911 713 !
912 714 GET_NATIVE_TIME(%o0,%g1,%g2,__LINE__) ! %o0 = tick
913 715 cmp %o5, %o0 ! In the future?
914 716 bg,a,pt %xcc, 2f ! Yes, drive on.
915 717 wrpr %g0, %g5, %pstate ! delay: enable vec intr
916 718
917 719 !
918 720 ! If we're here, then we have programmed TICK_COMPARE with a %tick
919 721 ! which is in the past; we'll now load an initial step size, and loop
920 722 ! until we've managed to program TICK_COMPARE to fire in the future.
921 723 !
922 724 mov 8, %o4 ! 8 = arbitrary inital step
923 725 1: add %o0, %o4, %o5 ! Add the step
924 726 WR_TICKCMPR(%o5,%g1,%g2,__LINE__) ! Write to TICK_CMPR
925 727 GET_NATIVE_TIME(%o0,%g1,%g2,__LINE__) ! %o0 = tick
↓ open down ↓ |
65 lines elided |
↑ open up ↑ |
926 728 cmp %o5, %o0 ! In the future?
927 729 bg,a,pt %xcc, 2f ! Yes, drive on.
928 730 wrpr %g0, %g5, %pstate ! delay: enable vec intr
929 731 ba 1b ! No, try again.
930 732 sllx %o4, 1, %o4 ! delay: double step size
931 733
932 734 2: ba current_thread_complete
933 735 nop
934 736 SET_SIZE(tick_rtt)
935 737
936 -#endif /* lint */
937 -
938 -#if defined(lint)
939 -
940 -/* ARGSUSED */
941 -void
942 -pil15_interrupt(int level)
943 -{}
944 -
945 -#else /* lint */
946 -
947 738 /*
948 739 * Level-15 interrupt prologue.
949 740 */
950 741 ENTRY_NP(pil15_interrupt)
951 742 CPU_ADDR(%g1, %g2)
952 743 rdpr %tstate, %g6
953 744 rdpr %tpc, %g5
954 745 btst TSTATE_PRIV, %g6 ! trap from supervisor mode?
955 746 bnz,a,pt %xcc, 1f
956 747 stn %g5, [%g1 + CPU_CPCPROFILE_PC] ! if so, record kernel PC
957 748 stn %g5, [%g1 + CPU_CPCPROFILE_UPC] ! if not, record user PC
958 749 ba pil15_epilogue ! must be large-disp branch
959 750 stn %g0, [%g1 + CPU_CPCPROFILE_PC] ! zero kernel PC
960 751 1: ba pil15_epilogue ! must be large-disp branch
961 752 stn %g0, [%g1 + CPU_CPCPROFILE_UPC] ! zero user PC
962 753 SET_SIZE(pil15_interrupt)
963 754
964 -#endif /* lint */
965 -
966 -#if defined(lint)
967 755 /*
968 756 * Prefetch a page_t for write or read, this assumes a linear
969 757 * scan of sequential page_t's.
970 758 */
971 -/*ARGSUSED*/
972 -void
973 -prefetch_page_w(void *pp)
974 -{}
975 -
976 -/*ARGSUSED*/
977 -void
978 -prefetch_page_r(void *pp)
979 -{}
980 -#else /* lint */
981 -
982 759 /* XXXQ These should be inline templates, not functions */
983 760 ENTRY(prefetch_page_w)
984 761 retl
985 762 nop
986 763 SET_SIZE(prefetch_page_w)
987 764
988 765 ENTRY(prefetch_page_r)
989 766 retl
990 767 nop
991 768 SET_SIZE(prefetch_page_r)
992 769
993 -#endif /* lint */
994 -
995 -#if defined(lint)
996 770 /*
997 - * Prefetch struct smap for write.
771 + * Prefetch struct smap for write.
998 772 */
999 -/*ARGSUSED*/
1000 -void
1001 -prefetch_smap_w(void *smp)
1002 -{}
1003 -#else /* lint */
1004 -
1005 773 /* XXXQ These should be inline templates, not functions */
1006 774 ENTRY(prefetch_smap_w)
1007 775 retl
1008 776 nop
1009 777 SET_SIZE(prefetch_smap_w)
1010 778
1011 -#endif /* lint */
1012 -
1013 779 /*
1014 780 * Generic sun4v MMU and Cache operations.
1015 781 */
1016 782
1017 -#if defined(lint)
1018 -
1019 -/*ARGSUSED*/
1020 -void
1021 -vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
1022 -{}
1023 -
1024 -/*ARGSUSED*/
1025 -void
1026 -vtag_flushall(void)
1027 -{}
1028 -
1029 -/*ARGSUSED*/
1030 -void
1031 -vtag_unmap_perm_tl1(uint64_t vaddr, uint64_t ctxnum)
1032 -{}
1033 -
1034 -/*ARGSUSED*/
1035 -void
1036 -vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
1037 -{}
1038 -
1039 -/*ARGSUSED*/
1040 -void
1041 -vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
1042 -{}
1043 -
1044 -/*ARGSUSED*/
1045 -void
1046 -vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
1047 -{}
1048 -
1049 -/*ARGSUSED*/
1050 -void
1051 -vac_flushpage(pfn_t pfnum, int vcolor)
1052 -{}
1053 -
1054 -/*ARGSUSED*/
1055 -void
1056 -vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
1057 -{}
1058 -
1059 -/*ARGSUSED*/
1060 -void
1061 -flush_instr_mem(caddr_t vaddr, size_t len)
1062 -{}
1063 -
1064 -#else /* lint */
1065 -
1066 783 ENTRY_NP(vtag_flushpage)
1067 784 /*
1068 785 * flush page from the tlb
1069 786 *
1070 787 * %o0 = vaddr
1071 788 * %o1 = sfmmup
1072 789 */
1073 790 SFMMU_CPU_CNUM(%o1, %g1, %g2) /* %g1 = sfmmu cnum on this CPU */
1074 791
1075 792 mov %g1, %o1
1076 793 mov MAP_ITLB | MAP_DTLB, %o2
1077 794 ta MMU_UNMAP_ADDR
1078 795 brz,pt %o0, 1f
1079 796 nop
1080 797 ba panic_bad_hcall
1081 798 mov MMU_UNMAP_ADDR, %o1
1082 799 1:
1083 800 retl
1084 801 nop
1085 802 SET_SIZE(vtag_flushpage)
1086 803
1087 804 ENTRY_NP(vtag_flushall)
1088 805 mov %g0, %o0 ! XXX no cpu list yet
1089 806 mov %g0, %o1 ! XXX no cpu list yet
1090 807 mov MAP_ITLB | MAP_DTLB, %o2
1091 808 mov MMU_DEMAP_ALL, %o5
1092 809 ta FAST_TRAP
1093 810 brz,pt %o0, 1f
1094 811 nop
1095 812 ba panic_bad_hcall
1096 813 mov MMU_DEMAP_ALL, %o1
1097 814 1:
1098 815 retl
1099 816 nop
1100 817 SET_SIZE(vtag_flushall)
1101 818
1102 819 ENTRY_NP(vtag_unmap_perm_tl1)
1103 820 /*
1104 821 * x-trap to unmap perm map entry
1105 822 * %g1 = vaddr
1106 823 * %g2 = ctxnum (KCONTEXT only)
1107 824 */
1108 825 mov %o0, %g3
1109 826 mov %o1, %g4
1110 827 mov %o2, %g5
1111 828 mov %o5, %g6
1112 829 mov %g1, %o0
1113 830 mov %g2, %o1
1114 831 mov MAP_ITLB | MAP_DTLB, %o2
1115 832 mov UNMAP_PERM_ADDR, %o5
1116 833 ta FAST_TRAP
1117 834 brz,pt %o0, 1f
1118 835 nop
1119 836
1120 837 mov PTL1_BAD_HCALL, %g1
1121 838
1122 839 cmp %o0, H_ENOMAP
1123 840 move %xcc, PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP, %g1
1124 841
1125 842 cmp %o0, H_EINVAL
1126 843 move %xcc, PTL1_BAD_HCALL_UNMAP_PERM_EINVAL, %g1
1127 844
1128 845 ba,a ptl1_panic
1129 846 1:
1130 847 mov %g6, %o5
1131 848 mov %g5, %o2
1132 849 mov %g4, %o1
1133 850 mov %g3, %o0
1134 851 retry
1135 852 SET_SIZE(vtag_unmap_perm_tl1)
1136 853
1137 854 ENTRY_NP(vtag_flushpage_tl1)
1138 855 /*
1139 856 * x-trap to flush page from tlb and tsb
1140 857 *
1141 858 * %g1 = vaddr, zero-extended on 32-bit kernel
1142 859 * %g2 = sfmmup
1143 860 *
1144 861 * assumes TSBE_TAG = 0
1145 862 */
1146 863 srln %g1, MMU_PAGESHIFT, %g1
1147 864 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
1148 865 mov %o0, %g3
1149 866 mov %o1, %g4
1150 867 mov %o2, %g5
1151 868 mov %g1, %o0 /* vaddr */
1152 869
1153 870 SFMMU_CPU_CNUM(%g2, %o1, %g6) /* %o1 = sfmmu cnum on this CPU */
1154 871
1155 872 mov MAP_ITLB | MAP_DTLB, %o2
1156 873 ta MMU_UNMAP_ADDR
1157 874 brz,pt %o0, 1f
1158 875 nop
1159 876 ba ptl1_panic
1160 877 mov PTL1_BAD_HCALL, %g1
1161 878 1:
1162 879 mov %g5, %o2
1163 880 mov %g4, %o1
1164 881 mov %g3, %o0
1165 882 membar #Sync
1166 883 retry
1167 884 SET_SIZE(vtag_flushpage_tl1)
1168 885
1169 886 ENTRY_NP(vtag_flush_pgcnt_tl1)
1170 887 /*
1171 888 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
1172 889 *
1173 890 * %g1 = vaddr, zero-extended on 32-bit kernel
1174 891 * %g2 = <sfmmup58|pgcnt6>, (pgcnt - 1) is pass'ed in via pgcnt6 bits.
1175 892 *
1176 893 * NOTE: this handler relies on the fact that no
1177 894 * interrupts or traps can occur during the loop
1178 895 * issuing the TLB_DEMAP operations. It is assumed
1179 896 * that interrupts are disabled and this code is
1180 897 * fetching from the kernel locked text address.
1181 898 *
1182 899 * assumes TSBE_TAG = 0
1183 900 */
1184 901 srln %g1, MMU_PAGESHIFT, %g1
1185 902 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
1186 903 mov %o0, %g3
1187 904 mov %o1, %g4
1188 905 mov %o2, %g5
1189 906
1190 907 and %g2, SFMMU_PGCNT_MASK, %g7 /* g7 = pgcnt - 1 */
1191 908 add %g7, 1, %g7 /* g7 = pgcnt */
1192 909
1193 910 andn %g2, SFMMU_PGCNT_MASK, %o0 /* %o0 = sfmmup */
1194 911
1195 912 SFMMU_CPU_CNUM(%o0, %g2, %g6) /* %g2 = sfmmu cnum on this CPU */
1196 913
1197 914 set MMU_PAGESIZE, %g6 /* g6 = pgsize */
1198 915
1199 916 1:
1200 917 mov %g1, %o0 /* vaddr */
1201 918 mov %g2, %o1 /* cnum */
1202 919 mov MAP_ITLB | MAP_DTLB, %o2
1203 920 ta MMU_UNMAP_ADDR
1204 921 brz,pt %o0, 2f
1205 922 nop
1206 923 ba ptl1_panic
1207 924 mov PTL1_BAD_HCALL, %g1
1208 925 2:
1209 926 deccc %g7 /* decr pgcnt */
1210 927 bnz,pt %icc,1b
1211 928 add %g1, %g6, %g1 /* go to nextpage */
1212 929
1213 930 mov %g5, %o2
1214 931 mov %g4, %o1
1215 932 mov %g3, %o0
1216 933 membar #Sync
1217 934 retry
1218 935 SET_SIZE(vtag_flush_pgcnt_tl1)
1219 936
1220 937 ! Not implemented on US1/US2
1221 938 ENTRY_NP(vtag_flushall_tl1)
1222 939 mov %o0, %g3
1223 940 mov %o1, %g4
1224 941 mov %o2, %g5
1225 942 mov %o3, %g6 ! XXXQ not used?
1226 943 mov %o5, %g7
1227 944 mov %g0, %o0 ! XXX no cpu list yet
1228 945 mov %g0, %o1 ! XXX no cpu list yet
1229 946 mov MAP_ITLB | MAP_DTLB, %o2
1230 947 mov MMU_DEMAP_ALL, %o5
1231 948 ta FAST_TRAP
1232 949 brz,pt %o0, 1f
1233 950 nop
1234 951 ba ptl1_panic
1235 952 mov PTL1_BAD_HCALL, %g1
1236 953 1:
1237 954 mov %g7, %o5
1238 955 mov %g6, %o3 ! XXXQ not used?
1239 956 mov %g5, %o2
1240 957 mov %g4, %o1
1241 958 mov %g3, %o0
1242 959 retry
1243 960 SET_SIZE(vtag_flushall_tl1)
1244 961
1245 962 /*
1246 963 * flush_instr_mem:
1247 964 * Flush a portion of the I-$ starting at vaddr
1248 965 * %o0 vaddr
1249 966 * %o1 bytes to be flushed
1250 967 */
1251 968
1252 969 ENTRY(flush_instr_mem)
1253 970 membar #StoreStore ! Ensure the stores
1254 971 ! are globally visible
↓ open down ↓ |
179 lines elided |
↑ open up ↑ |
1255 972 1:
1256 973 flush %o0
1257 974 subcc %o1, ICACHE_FLUSHSZ, %o1 ! bytes = bytes-0x20
1258 975 bgu,pt %ncc, 1b
1259 976 add %o0, ICACHE_FLUSHSZ, %o0 ! vaddr = vaddr+0x20
1260 977
1261 978 retl
1262 979 nop
1263 980 SET_SIZE(flush_instr_mem)
1264 981
1265 -#endif /* !lint */
1266 -
1267 982 #if !defined(CUSTOM_FPZERO)
1268 983
1269 984 /*
1270 985 * fp_zero() - clear all fp data registers and the fsr
1271 986 */
1272 987
1273 -#if defined(lint) || defined(__lint)
1274 -
1275 -void
1276 -fp_zero(void)
1277 -{}
1278 -
1279 -#else /* lint */
1280 -
1281 988 .global fp_zero_zero
1282 989 .align 8
1283 990 fp_zero_zero:
1284 991 .xword 0
1285 992
1286 993 ENTRY_NP(fp_zero)
1287 994 sethi %hi(fp_zero_zero), %o0
1288 995 ldx [%o0 + %lo(fp_zero_zero)], %fsr
1289 996 ldd [%o0 + %lo(fp_zero_zero)], %f0
1290 997 fmovd %f0, %f2
1291 998 fmovd %f0, %f4
1292 999 fmovd %f0, %f6
1293 1000 fmovd %f0, %f8
1294 1001 fmovd %f0, %f10
1295 1002 fmovd %f0, %f12
1296 1003 fmovd %f0, %f14
1297 1004 fmovd %f0, %f16
1298 1005 fmovd %f0, %f18
1299 1006 fmovd %f0, %f20
1300 1007 fmovd %f0, %f22
1301 1008 fmovd %f0, %f24
1302 1009 fmovd %f0, %f26
1303 1010 fmovd %f0, %f28
1304 1011 fmovd %f0, %f30
1305 1012 fmovd %f0, %f32
1306 1013 fmovd %f0, %f34
1307 1014 fmovd %f0, %f36
1308 1015 fmovd %f0, %f38
1309 1016 fmovd %f0, %f40
1310 1017 fmovd %f0, %f42
1311 1018 fmovd %f0, %f44
1312 1019 fmovd %f0, %f46
1313 1020 fmovd %f0, %f48
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
1314 1021 fmovd %f0, %f50
1315 1022 fmovd %f0, %f52
1316 1023 fmovd %f0, %f54
1317 1024 fmovd %f0, %f56
1318 1025 fmovd %f0, %f58
1319 1026 fmovd %f0, %f60
1320 1027 retl
1321 1028 fmovd %f0, %f62
1322 1029 SET_SIZE(fp_zero)
1323 1030
1324 -#endif /* lint */
1325 1031 #endif /* CUSTOM_FPZERO */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX