Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4v/cpu/common_asm.s
+++ new/usr/src/uts/sun4v/cpu/common_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 -#if !defined(lint)
26 25 #include "assym.h"
27 -#endif
28 26
29 27 /*
30 28 * General assembly language routines.
31 29 * It is the intent of this file to contain routines that are
32 30 * specific to cpu architecture.
33 31 */
34 32
35 33 /*
36 34 * WARNING: If you add a fast trap handler which can be invoked by a
37 35 * non-privileged user, you may have to use the FAST_TRAP_DONE macro
38 36 * instead of "done" instruction to return back to the user mode. See
39 37 * comments for the "fast_trap_done" entry point for more information.
40 38 */
41 39 #define FAST_TRAP_DONE \
42 40 ba,a fast_trap_done
43 41
44 42 #include <sys/machclock.h>
45 43 #include <sys/clock.h>
46 44
47 -#if defined(lint)
48 -#include <sys/types.h>
49 -#include <sys/scb.h>
50 -#include <sys/systm.h>
51 -#include <sys/regset.h>
52 -#include <sys/sunddi.h>
53 -#include <sys/lockstat.h>
54 -#endif /* lint */
55 45
56 -
57 46 #include <sys/asm_linkage.h>
58 47 #include <sys/privregs.h>
59 48 #include <vm/hat_sfmmu.h>
60 49 #include <sys/machparam.h> /* To get SYSBASE and PAGESIZE */
61 50 #include <sys/machthread.h>
62 51 #include <sys/clock.h>
63 52 #include <sys/intreg.h>
64 53 #include <sys/psr_compat.h>
65 54 #include <sys/isa_defs.h>
66 55 #include <sys/dditypes.h>
67 56 #include <sys/intr.h>
68 57 #include <sys/hypervisor_api.h>
69 58
70 -#if !defined(lint)
71 59 #include "assym.h"
72 -#endif
73 60
74 61 #define ICACHE_FLUSHSZ 0x20
75 62
76 -#if defined(lint)
77 -/*
78 - * Softint generated when counter field of tick reg matches value field
79 - * of tick_cmpr reg
80 - */
81 -/*ARGSUSED*/
82 -void
83 -tickcmpr_set(uint64_t clock_cycles)
84 -{}
85 -
86 -#else /* lint */
87 -
88 63 ENTRY_NP(tickcmpr_set)
89 64 ! get 64-bit clock_cycles interval
90 65 mov %o0, %o2
91 66 mov 8, %o3 ! A reasonable initial step size
92 67 1:
93 68 WR_TICKCMPR(%o2,%o4,%o5,__LINE__) ! Write to TICK_CMPR
94 69
95 70 GET_NATIVE_TIME(%o0,%o4,%o5,__LINE__) ! Read %tick to confirm the
96 71 ! value we wrote was in the
97 72 ! future.
98 73
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
99 74 cmp %o2, %o0 ! If the value we wrote was in the
100 75 bg,pt %xcc, 2f ! future, then blow out of here.
101 76 sllx %o3, 1, %o3 ! If not, then double our step size,
102 77 ba,pt %xcc, 1b ! and take another lap.
103 78 add %o0, %o3, %o2 !
104 79 2:
105 80 retl
106 81 nop
107 82 SET_SIZE(tickcmpr_set)
108 83
109 -#endif /* lint */
110 -
111 -#if defined(lint)
112 -
113 -void
114 -tickcmpr_disable(void)
115 -{}
116 -
117 -#else
118 -
119 84 ENTRY_NP(tickcmpr_disable)
120 85 mov 1, %g1
121 86 sllx %g1, TICKINT_DIS_SHFT, %o0
122 87 WR_TICKCMPR(%o0,%o4,%o5,__LINE__) ! Write to TICK_CMPR
123 88 retl
124 89 nop
125 90 SET_SIZE(tickcmpr_disable)
126 91
127 -#endif
128 -
129 -#if defined(lint)
130 -
131 -/*
132 - * tick_write_delta() is intended to increment %stick by the specified delta,
133 - * but %stick is only writeable in hyperprivileged mode and at present there
134 - * is no provision for this. tick_write_delta is called by the cylic subsystem
135 - * if a negative %stick delta is observed after cyclic processing is resumed
136 - * after an event such as an OS suspend/resume. On sun4v, the suspend/resume
137 - * routines should adjust the %stick offset preventing the cyclic subsystem
138 - * from detecting a negative delta. If a negative delta is detected, panic the
139 - * system. The negative delta could be caused by improper %stick
140 - * synchronization after a suspend/resume.
141 - */
142 -
143 -/*ARGSUSED*/
144 -void
145 -tick_write_delta(uint64_t delta)
146 -{}
147 -
148 -#else /* lint */
149 -
150 92 .seg ".text"
151 93 tick_write_delta_panic:
152 94 .asciz "tick_write_delta: not supported, delta: 0x%lx"
153 95
154 96 ENTRY_NP(tick_write_delta)
155 97 sethi %hi(tick_write_delta_panic), %o1
156 98 save %sp, -SA(MINFRAME), %sp ! get a new window to preserve caller
157 99 mov %i0, %o1
158 100 call panic
159 101 or %i1, %lo(tick_write_delta_panic), %o0
160 102 /*NOTREACHED*/
161 103 retl
162 104 nop
163 -#endif
164 105
165 -#if defined(lint)
166 -/*
167 - * return 1 if disabled
168 - */
169 -
170 -int
171 -tickcmpr_disabled(void)
172 -{ return (0); }
173 -
174 -#else /* lint */
175 -
176 106 ENTRY_NP(tickcmpr_disabled)
177 107 RD_TICKCMPR(%g1,%o0,%o1,__LINE__)
178 108 retl
179 109 srlx %g1, TICKINT_DIS_SHFT, %o0
180 110 SET_SIZE(tickcmpr_disabled)
181 111
182 -#endif /* lint */
183 -
184 112 /*
185 113 * Get current tick
186 114 */
187 -#if defined(lint)
188 115
189 -u_longlong_t
190 -gettick(void)
191 -{ return (0); }
192 -
193 -u_longlong_t
194 -randtick(void)
195 -{ return (0); }
196 -
197 -#else /* lint */
198 -
199 116 ENTRY(gettick)
200 117 ALTENTRY(randtick)
201 118 GET_NATIVE_TIME(%o0,%o2,%o3,__LINE__)
202 119 retl
203 120 nop
204 121 SET_SIZE(randtick)
205 122 SET_SIZE(gettick)
206 123
207 -#endif /* lint */
208 -
209 124 /*
210 125 * Get current tick. For trapstat use only.
211 126 */
212 -#if defined (lint)
213 -
214 -hrtime_t
215 -rdtick()
216 -{ return (0); }
217 -
218 -#else
219 127 ENTRY(rdtick)
220 128 retl
221 129 RD_TICK_PHYSICAL(%o0)
222 130 SET_SIZE(rdtick)
223 -#endif /* lint */
224 131
225 132
226 133 /*
227 134 * Return the counter portion of the tick register.
228 135 */
229 136
230 -#if defined(lint)
231 -
232 -uint64_t
233 -gettick_counter(void)
234 -{ return(0); }
235 -
236 -uint64_t
237 -gettick_npt(void)
238 -{ return(0); }
239 -
240 -uint64_t
241 -getstick_npt(void)
242 -{ return(0); }
243 -
244 -#else /* lint */
245 -
246 137 ENTRY_NP(gettick_counter)
247 138 RD_TICK(%o0,%o1,%o2,__LINE__)
248 139 retl
249 140 nop
250 141 SET_SIZE(gettick_counter)
251 142
252 143 ENTRY_NP(gettick_npt)
253 144 RD_TICK_PHYSICAL(%o0)
254 145 retl
255 146 srlx %o0, 63, %o0
256 147 SET_SIZE(gettick_npt)
257 148
258 149 ENTRY_NP(getstick_npt)
259 150 RD_STICK_PHYSICAL(%o0)
260 151 retl
261 152 srlx %o0, 63, %o0
262 153 SET_SIZE(getstick_npt)
263 -#endif /* lint */
264 154
265 155 /*
266 156 * Provide a C callable interface to the trap that reads the hi-res timer.
267 157 * Returns 64-bit nanosecond timestamp in %o0 and %o1.
268 158 */
269 159
270 -#if defined(lint)
271 -
272 -hrtime_t
273 -gethrtime(void)
274 -{
275 - return ((hrtime_t)0);
276 -}
277 -
278 -hrtime_t
279 -gethrtime_unscaled(void)
280 -{
281 - return ((hrtime_t)0);
282 -}
283 -
284 -hrtime_t
285 -gethrtime_max(void)
286 -{
287 - return ((hrtime_t)0);
288 -}
289 -
290 -void
291 -scalehrtime(hrtime_t *hrt)
292 -{
293 - *hrt = 0;
294 -}
295 -
296 -void
297 -gethrestime(timespec_t *tp)
298 -{
299 - tp->tv_sec = 0;
300 - tp->tv_nsec = 0;
301 -}
302 -
303 -time_t
304 -gethrestime_sec(void)
305 -{
306 - return (0);
307 -}
308 -
309 -void
310 -gethrestime_lasttick(timespec_t *tp)
311 -{
312 - tp->tv_sec = 0;
313 - tp->tv_nsec = 0;
314 -}
315 -
316 -/*ARGSUSED*/
317 -void
318 -hres_tick(void)
319 -{
320 -}
321 -
322 -void
323 -panic_hres_tick(void)
324 -{
325 -}
326 -
327 -#else /* lint */
328 -
329 160 ENTRY_NP(gethrtime)
330 161 GET_HRTIME(%g1,%o0,%o1,%o2,%o3,%o4,%o5,%g2,__LINE__)
331 162 ! %g1 = hrtime
332 163 retl
333 164 mov %g1, %o0
334 165 SET_SIZE(gethrtime)
335 166
336 167 ENTRY_NP(gethrtime_unscaled)
337 168 GET_NATIVE_TIME(%g1,%o2,%o3,__LINE__) ! %g1 = native time
338 169 retl
339 170 mov %g1, %o0
340 171 SET_SIZE(gethrtime_unscaled)
341 172
342 173 ENTRY_NP(gethrtime_waitfree)
343 174 ALTENTRY(dtrace_gethrtime)
344 175 GET_NATIVE_TIME(%g1,%o2,%o3,__LINE__) ! %g1 = native time
345 176 NATIVE_TIME_TO_NSEC(%g1, %o2, %o3)
346 177 retl
347 178 mov %g1, %o0
348 179 SET_SIZE(dtrace_gethrtime)
349 180 SET_SIZE(gethrtime_waitfree)
350 181
351 182 ENTRY(gethrtime_max)
352 183 NATIVE_TIME_MAX(%g1)
353 184 NATIVE_TIME_TO_NSEC(%g1, %o0, %o1)
354 185
355 186 ! hrtime_t's are signed, max hrtime_t must be positive
356 187 mov -1, %o2
357 188 brlz,a %g1, 1f
358 189 srlx %o2, 1, %g1
359 190 1:
360 191 retl
361 192 mov %g1, %o0
362 193 SET_SIZE(gethrtime_max)
363 194
364 195 ENTRY(scalehrtime)
365 196 ldx [%o0], %o1
366 197 NATIVE_TIME_TO_NSEC(%o1, %o2, %o3)
367 198 retl
368 199 stx %o1, [%o0]
369 200 SET_SIZE(scalehrtime)
370 201
371 202 /*
372 203 * Fast trap to return a timestamp, uses trap window, leaves traps
373 204 * disabled. Returns a 64-bit nanosecond timestamp in %o0 and %o1.
374 205 *
375 206 * This is the handler for the ST_GETHRTIME trap.
376 207 */
377 208
378 209 ENTRY_NP(get_timestamp)
379 210 GET_HRTIME(%g1,%g2,%g3,%g4,%g5,%o0,%o1,%o2,__LINE__)
380 211 ! %g1 = hrtime
381 212 srlx %g1, 32, %o0 ! %o0 = hi32(%g1)
382 213 srl %g1, 0, %o1 ! %o1 = lo32(%g1)
383 214 FAST_TRAP_DONE
384 215 SET_SIZE(get_timestamp)
385 216
386 217 /*
387 218 * Macro to convert GET_HRESTIME() bits into a timestamp.
388 219 *
389 220 * We use two separate macros so that the platform-dependent GET_HRESTIME()
390 221 * can be as small as possible; CONV_HRESTIME() implements the generic part.
391 222 */
392 223 #define CONV_HRESTIME(hrestsec, hrestnsec, adj, nslt, nano) \
393 224 brz,pt adj, 3f; /* no adjustments, it's easy */ \
394 225 add hrestnsec, nslt, hrestnsec; /* hrest.tv_nsec += nslt */ \
395 226 brlz,pn adj, 2f; /* if hrestime_adj negative */ \
396 227 srlx nslt, ADJ_SHIFT, nslt; /* delay: nslt >>= 4 */ \
397 228 subcc adj, nslt, %g0; /* hrestime_adj - nslt/16 */ \
398 229 movg %xcc, nslt, adj; /* adj by min(adj, nslt/16) */ \
399 230 ba 3f; /* go convert to sec/nsec */ \
400 231 add hrestnsec, adj, hrestnsec; /* delay: apply adjustment */ \
401 232 2: addcc adj, nslt, %g0; /* hrestime_adj + nslt/16 */ \
402 233 bge,a,pt %xcc, 3f; /* is adj less negative? */ \
403 234 add hrestnsec, adj, hrestnsec; /* yes: hrest.nsec += adj */ \
404 235 sub hrestnsec, nslt, hrestnsec; /* no: hrest.nsec -= nslt/16 */ \
405 236 3: cmp hrestnsec, nano; /* more than a billion? */ \
406 237 bl,pt %xcc, 4f; /* if not, we're done */ \
407 238 nop; /* delay: do nothing :( */ \
408 239 add hrestsec, 1, hrestsec; /* hrest.tv_sec++; */ \
409 240 sub hrestnsec, nano, hrestnsec; /* hrest.tv_nsec -= NANOSEC; */ \
410 241 ba,a 3b; /* check >= billion again */ \
411 242 4:
412 243
413 244 ENTRY_NP(gethrestime)
414 245 GET_HRESTIME(%o1,%o2,%o3,%o4,%o5,%g1,%g2,%g3,%g4,__LINE__)
415 246 CONV_HRESTIME(%o1, %o2, %o3, %o4, %o5)
416 247 stn %o1, [%o0]
417 248 retl
418 249 stn %o2, [%o0 + CLONGSIZE]
419 250 SET_SIZE(gethrestime)
420 251
421 252 /*
422 253 * Similar to gethrestime(), but gethrestime_sec() returns current hrestime
423 254 * seconds.
424 255 */
425 256 ENTRY_NP(gethrestime_sec)
426 257 GET_HRESTIME(%o0,%o2,%o3,%o4,%o5,%g1,%g2,%g3,%g4,__LINE__)
427 258 CONV_HRESTIME(%o0, %o2, %o3, %o4, %o5)
428 259 retl ! %o0 current hrestime seconds
429 260 nop
430 261 SET_SIZE(gethrestime_sec)
431 262
432 263 /*
433 264 * Returns the hrestime on the last tick. This is simpler than gethrestime()
434 265 * and gethrestime_sec(): no conversion is required. gethrestime_lasttick()
435 266 * follows the same locking algorithm as GET_HRESTIME and GET_HRTIME,
436 267 * outlined in detail in clock.h. (Unlike GET_HRESTIME/GET_HRTIME, we don't
437 268 * rely on load dependencies to effect the membar #LoadLoad, instead declaring
438 269 * it explicitly.)
439 270 */
440 271 ENTRY_NP(gethrestime_lasttick)
441 272 sethi %hi(hres_lock), %o1
442 273 0:
443 274 lduw [%o1 + %lo(hres_lock)], %o2 ! Load lock value
444 275 membar #LoadLoad ! Load of lock must complete
445 276 andn %o2, 1, %o2 ! Mask off lowest bit
446 277 ldn [%o1 + %lo(hrestime)], %g1 ! Seconds.
447 278 add %o1, %lo(hrestime), %o4
448 279 ldn [%o4 + CLONGSIZE], %g2 ! Nanoseconds.
449 280 membar #LoadLoad ! All loads must complete
450 281 lduw [%o1 + %lo(hres_lock)], %o3 ! Reload lock value
451 282 cmp %o3, %o2 ! If lock is locked or has
452 283 bne 0b ! changed, retry.
453 284 stn %g1, [%o0] ! Delay: store seconds
454 285 retl
455 286 stn %g2, [%o0 + CLONGSIZE] ! Delay: store nanoseconds
456 287 SET_SIZE(gethrestime_lasttick)
457 288
458 289 /*
459 290 * Fast trap for gettimeofday(). Returns a timestruc_t in %o0 and %o1.
460 291 *
461 292 * This is the handler for the ST_GETHRESTIME trap.
462 293 */
463 294
464 295 ENTRY_NP(get_hrestime)
465 296 GET_HRESTIME(%o0,%o1,%g1,%g2,%g3,%g4,%g5,%o2,%o3,__LINE__)
466 297 CONV_HRESTIME(%o0, %o1, %g1, %g2, %g3)
467 298 FAST_TRAP_DONE
468 299 SET_SIZE(get_hrestime)
469 300
470 301 /*
471 302 * Fast trap to return lwp virtual time, uses trap window, leaves traps
472 303 * disabled. Returns a 64-bit number in %o0:%o1, which is the number
473 304 * of nanoseconds consumed.
474 305 *
475 306 * This is the handler for the ST_GETHRVTIME trap.
476 307 *
477 308 * Register usage:
478 309 * %o0, %o1 = return lwp virtual time
479 310 * %o2 = CPU/thread
480 311 * %o3 = lwp
481 312 * %g1 = scratch
482 313 * %g5 = scratch
483 314 */
484 315 ENTRY_NP(get_virtime)
485 316 GET_NATIVE_TIME(%g5,%g1,%g2,__LINE__) ! %g5 = native time in ticks
486 317 CPU_ADDR(%g2, %g3) ! CPU struct ptr to %g2
487 318 ldn [%g2 + CPU_THREAD], %g2 ! thread pointer to %g2
488 319 ldn [%g2 + T_LWP], %g3 ! lwp pointer to %g3
489 320
490 321 /*
491 322 * Subtract start time of current microstate from time
492 323 * of day to get increment for lwp virtual time.
493 324 */
494 325 ldx [%g3 + LWP_STATE_START], %g1 ! ms_state_start
495 326 sub %g5, %g1, %g5
496 327
497 328 /*
498 329 * Add current value of ms_acct[LMS_USER]
499 330 */
500 331 ldx [%g3 + LWP_ACCT_USER], %g1 ! ms_acct[LMS_USER]
501 332 add %g5, %g1, %g5
502 333 NATIVE_TIME_TO_NSEC(%g5, %g1, %o0)
503 334
504 335 srl %g5, 0, %o1 ! %o1 = lo32(%g5)
505 336 srlx %g5, 32, %o0 ! %o0 = hi32(%g5)
506 337
507 338 FAST_TRAP_DONE
508 339 SET_SIZE(get_virtime)
509 340
510 341
511 342
512 343 .seg ".text"
513 344 hrtime_base_panic:
514 345 .asciz "hrtime_base stepping back"
515 346
516 347
517 348 ENTRY_NP(hres_tick)
518 349 save %sp, -SA(MINFRAME), %sp ! get a new window
519 350
520 351 sethi %hi(hrestime), %l4
521 352 ldstub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5 ! try locking
522 353 7: tst %l5
523 354 bz,pt %xcc, 8f ! if we got it, drive on
524 355 ld [%l4 + %lo(nsec_scale)], %l5 ! delay: %l5 = scaling factor
525 356 ldub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
526 357 9: tst %l5
527 358 bz,a,pn %xcc, 7b
528 359 ldstub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
529 360 ba,pt %xcc, 9b
530 361 ldub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
531 362 8:
532 363 membar #StoreLoad|#StoreStore
533 364
534 365 !
535 366 ! update hres_last_tick. %l5 has the scaling factor (nsec_scale).
536 367 !
537 368 ldx [%l4 + %lo(hrtime_base)], %g1 ! load current hrtime_base
538 369 GET_NATIVE_TIME(%l0,%l3,%l6,__LINE__) ! current native time
539 370 stx %l0, [%l4 + %lo(hres_last_tick)]! prev = current
540 371 ! convert native time to nsecs
541 372 NATIVE_TIME_TO_NSEC_SCALE(%l0, %l5, %l2, NSEC_SHIFT)
542 373
543 374 sub %l0, %g1, %i1 ! get accurate nsec delta
544 375
545 376 ldx [%l4 + %lo(hrtime_base)], %l1
546 377 cmp %l1, %l0
547 378 bg,pn %xcc, 9f
548 379 nop
549 380
550 381 stx %l0, [%l4 + %lo(hrtime_base)] ! update hrtime_base
551 382
552 383 !
553 384 ! apply adjustment, if any
554 385 !
555 386 ldx [%l4 + %lo(hrestime_adj)], %l0 ! %l0 = hrestime_adj
556 387 brz %l0, 2f
557 388 ! hrestime_adj == 0 ?
558 389 ! yes, skip adjustments
559 390 clr %l5 ! delay: set adj to zero
560 391 tst %l0 ! is hrestime_adj >= 0 ?
561 392 bge,pt %xcc, 1f ! yes, go handle positive case
562 393 srl %i1, ADJ_SHIFT, %l5 ! delay: %l5 = adj
563 394
564 395 addcc %l0, %l5, %g0 ! hrestime_adj < -adj ?
565 396 bl,pt %xcc, 2f ! yes, use current adj
566 397 neg %l5 ! delay: %l5 = -adj
567 398 ba,pt %xcc, 2f
568 399 mov %l0, %l5 ! no, so set adj = hrestime_adj
569 400 1:
570 401 subcc %l0, %l5, %g0 ! hrestime_adj < adj ?
571 402 bl,a,pt %xcc, 2f ! yes, set adj = hrestime_adj
572 403 mov %l0, %l5 ! delay: adj = hrestime_adj
573 404 2:
574 405 ldx [%l4 + %lo(timedelta)], %l0 ! %l0 = timedelta
575 406 sub %l0, %l5, %l0 ! timedelta -= adj
576 407
577 408 stx %l0, [%l4 + %lo(timedelta)] ! store new timedelta
578 409 stx %l0, [%l4 + %lo(hrestime_adj)] ! hrestime_adj = timedelta
579 410
580 411 or %l4, %lo(hrestime), %l2
581 412 ldn [%l2], %i2 ! %i2:%i3 = hrestime sec:nsec
582 413 ldn [%l2 + CLONGSIZE], %i3
583 414 add %i3, %l5, %i3 ! hrestime.nsec += adj
584 415 add %i3, %i1, %i3 ! hrestime.nsec += nslt
585 416
586 417 set NANOSEC, %l5 ! %l5 = NANOSEC
587 418 cmp %i3, %l5
588 419 bl,pt %xcc, 5f ! if hrestime.tv_nsec < NANOSEC
589 420 sethi %hi(one_sec), %i1 ! delay
590 421 add %i2, 0x1, %i2 ! hrestime.tv_sec++
591 422 sub %i3, %l5, %i3 ! hrestime.tv_nsec - NANOSEC
592 423 mov 0x1, %l5
593 424 st %l5, [%i1 + %lo(one_sec)]
594 425 5:
595 426 stn %i2, [%l2]
596 427 stn %i3, [%l2 + CLONGSIZE] ! store the new hrestime
597 428
598 429 membar #StoreStore
599 430
600 431 ld [%l4 + %lo(hres_lock)], %i1
601 432 inc %i1 ! release lock
602 433 st %i1, [%l4 + %lo(hres_lock)] ! clear hres_lock
603 434
604 435 ret
605 436 restore
606 437
607 438 9:
608 439 !
609 440 ! release hres_lock
610 441 !
↓ open down ↓ |
272 lines elided |
↑ open up ↑ |
611 442 ld [%l4 + %lo(hres_lock)], %i1
612 443 inc %i1
613 444 st %i1, [%l4 + %lo(hres_lock)]
614 445
615 446 sethi %hi(hrtime_base_panic), %o0
616 447 call panic
617 448 or %o0, %lo(hrtime_base_panic), %o0
618 449
619 450 SET_SIZE(hres_tick)
620 451
621 -#endif /* lint */
622 -
623 -#if !defined(lint) && !defined(__lint)
624 -
625 452 .seg ".text"
626 453 kstat_q_panic_msg:
627 454 .asciz "kstat_q_exit: qlen == 0"
628 455
629 456 ENTRY(kstat_q_panic)
630 457 save %sp, -SA(MINFRAME), %sp
631 458 sethi %hi(kstat_q_panic_msg), %o0
632 459 call panic
633 460 or %o0, %lo(kstat_q_panic_msg), %o0
634 461 /*NOTREACHED*/
635 462 SET_SIZE(kstat_q_panic)
636 463
637 464 #define BRZPN brz,pn
638 465 #define BRZPT brz,pt
639 466
640 467 #define KSTAT_Q_UPDATE(QOP, QBR, QZERO, QRETURN, QTYPE) \
641 468 ld [%o0 + QTYPE/**/CNT], %o1; /* %o1 = old qlen */ \
642 469 QOP %o1, 1, %o2; /* %o2 = new qlen */ \
643 470 QBR %o1, QZERO; /* done if qlen == 0 */ \
644 471 st %o2, [%o0 + QTYPE/**/CNT]; /* delay: save qlen */ \
645 472 ldx [%o0 + QTYPE/**/LASTUPDATE], %o3; \
646 473 ldx [%o0 + QTYPE/**/TIME], %o4; /* %o4 = old time */ \
647 474 ldx [%o0 + QTYPE/**/LENTIME], %o5; /* %o5 = old lentime */ \
648 475 sub %g1, %o3, %o2; /* %o2 = time delta */ \
649 476 mulx %o1, %o2, %o3; /* %o3 = cur lentime */ \
650 477 add %o4, %o2, %o4; /* %o4 = new time */ \
651 478 add %o5, %o3, %o5; /* %o5 = new lentime */ \
652 479 stx %o4, [%o0 + QTYPE/**/TIME]; /* save time */ \
653 480 stx %o5, [%o0 + QTYPE/**/LENTIME]; /* save lentime */ \
654 481 QRETURN; \
655 482 stx %g1, [%o0 + QTYPE/**/LASTUPDATE]; /* lastupdate = now */
656 483
657 484 #if !defined(DEBUG)
658 485 /*
659 486 * same as KSTAT_Q_UPDATE but without:
660 487 * QBR %o1, QZERO;
661 488 * to be used only with non-debug build. mimics ASSERT() behaviour.
662 489 */
663 490 #define KSTAT_Q_UPDATE_ND(QOP, QRETURN, QTYPE) \
664 491 ld [%o0 + QTYPE/**/CNT], %o1; /* %o1 = old qlen */ \
665 492 QOP %o1, 1, %o2; /* %o2 = new qlen */ \
666 493 st %o2, [%o0 + QTYPE/**/CNT]; /* delay: save qlen */ \
667 494 ldx [%o0 + QTYPE/**/LASTUPDATE], %o3; \
668 495 ldx [%o0 + QTYPE/**/TIME], %o4; /* %o4 = old time */ \
669 496 ldx [%o0 + QTYPE/**/LENTIME], %o5; /* %o5 = old lentime */ \
670 497 sub %g1, %o3, %o2; /* %o2 = time delta */ \
671 498 mulx %o1, %o2, %o3; /* %o3 = cur lentime */ \
672 499 add %o4, %o2, %o4; /* %o4 = new time */ \
673 500 add %o5, %o3, %o5; /* %o5 = new lentime */ \
674 501 stx %o4, [%o0 + QTYPE/**/TIME]; /* save time */ \
675 502 stx %o5, [%o0 + QTYPE/**/LENTIME]; /* save lentime */ \
676 503 QRETURN; \
677 504 stx %g1, [%o0 + QTYPE/**/LASTUPDATE]; /* lastupdate = now */
678 505 #endif
679 506
680 507 .align 16
681 508 ENTRY(kstat_waitq_enter)
682 509 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
683 510 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_W)
684 511 SET_SIZE(kstat_waitq_enter)
685 512
686 513 .align 16
687 514 ENTRY(kstat_waitq_exit)
688 515 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
689 516 #if defined(DEBUG)
690 517 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, retl, KSTAT_IO_W)
691 518 #else
692 519 KSTAT_Q_UPDATE_ND(sub, retl, KSTAT_IO_W)
693 520 #endif
694 521 SET_SIZE(kstat_waitq_exit)
695 522
696 523 .align 16
697 524 ENTRY(kstat_runq_enter)
698 525 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
699 526 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_R)
700 527 SET_SIZE(kstat_runq_enter)
701 528
702 529 .align 16
703 530 ENTRY(kstat_runq_exit)
704 531 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
705 532 #if defined(DEBUG)
706 533 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, retl, KSTAT_IO_R)
707 534 #else
708 535 KSTAT_Q_UPDATE_ND(sub, retl, KSTAT_IO_R)
709 536 #endif
710 537 SET_SIZE(kstat_runq_exit)
711 538
712 539 .align 16
713 540 ENTRY(kstat_waitq_to_runq)
714 541 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
715 542 #if defined(DEBUG)
716 543 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, 1:, KSTAT_IO_W)
717 544 #else
718 545 KSTAT_Q_UPDATE_ND(sub, 1:, KSTAT_IO_W)
719 546 #endif
720 547 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_R)
721 548 SET_SIZE(kstat_waitq_to_runq)
722 549
723 550 .align 16
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
724 551 ENTRY(kstat_runq_back_to_waitq)
725 552 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
726 553 #if defined(DEBUG)
727 554 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, 1:, KSTAT_IO_R)
728 555 #else
729 556 KSTAT_Q_UPDATE_ND(sub, 1:, KSTAT_IO_R)
730 557 #endif
731 558 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_W)
732 559 SET_SIZE(kstat_runq_back_to_waitq)
733 560
734 -#endif /* lint */
735 -
736 -#ifdef lint
737 -
738 -int64_t timedelta;
739 -hrtime_t hres_last_tick;
740 -volatile timestruc_t hrestime;
741 -int64_t hrestime_adj;
742 -volatile int hres_lock;
743 -uint_t nsec_scale;
744 -hrtime_t hrtime_base;
745 -int traptrace_use_stick;
746 -
747 -#else
748 561 /*
749 562 * -- WARNING --
750 563 *
751 564 * The following variables MUST be together on a 128-byte boundary.
752 565 * In addition to the primary performance motivation (having them all
753 566 * on the same cache line(s)), code here and in the GET*TIME() macros
754 567 * assumes that they all have the same high 22 address bits (so
755 568 * there's only one sethi).
756 569 */
757 570 .seg ".data"
758 571 .global timedelta, hres_last_tick, hrestime, hrestime_adj
759 572 .global hres_lock, nsec_scale, hrtime_base, traptrace_use_stick
760 573 .global nsec_shift, adj_shift, native_tick_offset, native_stick_offset
761 574
762 575 /* XXX - above comment claims 128-bytes is necessary */
763 576 .align 64
764 577 timedelta:
765 578 .word 0, 0 /* int64_t */
766 579 hres_last_tick:
767 580 .word 0, 0 /* hrtime_t */
768 581 hrestime:
769 582 .nword 0, 0 /* 2 longs */
770 583 hrestime_adj:
771 584 .word 0, 0 /* int64_t */
772 585 hres_lock:
773 586 .word 0
774 587 nsec_scale:
775 588 .word 0
776 589 hrtime_base:
777 590 .word 0, 0
778 591 traptrace_use_stick:
779 592 .word 0
780 593 nsec_shift:
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
781 594 .word NSEC_SHIFT
782 595 adj_shift:
783 596 .word ADJ_SHIFT
784 597 .align 8
785 598 native_tick_offset:
786 599 .word 0, 0
787 600 .align 8
788 601 native_stick_offset:
789 602 .word 0, 0
790 603
791 -#endif
792 604
793 -
794 605 /*
795 606 * drv_usecwait(clock_t n) [DDI/DKI - section 9F]
796 607 * usec_delay(int n) [compatibility - should go one day]
797 608 * Delay by spinning.
798 609 *
799 610 * delay for n microseconds. numbers <= 0 delay 1 usec
800 611 *
801 612 * With UltraSPARC-III the combination of supporting mixed-speed CPUs
802 613 * and variable clock rate for power management requires that we
803 614 * use %stick to implement this routine.
804 615 */
805 616
806 -#if defined(lint)
807 -
808 -/*ARGSUSED*/
809 -void
810 -drv_usecwait(clock_t n)
811 -{}
812 -
813 -/*ARGSUSED*/
814 -void
815 -usec_delay(int n)
816 -{}
817 -
818 -#else /* lint */
819 -
820 617 ENTRY(drv_usecwait)
821 618 ALTENTRY(usec_delay)
822 619 brlez,a,pn %o0, 0f
823 620 mov 1, %o0
824 621 0:
825 622 sethi %hi(sticks_per_usec), %o1
826 623 lduw [%o1 + %lo(sticks_per_usec)], %o1
827 624 mulx %o1, %o0, %o1 ! Scale usec to ticks
828 625 inc %o1 ! We don't start on a tick edge
829 626 GET_NATIVE_TIME(%o2,%o3,%o4,__LINE__)
830 627 add %o1, %o2, %o1
831 628
832 629 1: cmp %o1, %o2
833 630 GET_NATIVE_TIME(%o2,%o3,%o4,__LINE__)
834 631 bgeu,pt %xcc, 1b
835 632 nop
836 633 retl
837 634 nop
838 635 SET_SIZE(usec_delay)
839 636 SET_SIZE(drv_usecwait)
840 -#endif /* lint */
841 637
842 -#if defined(lint)
843 -
844 -/* ARGSUSED */
845 -void
846 -pil14_interrupt(int level)
847 -{}
848 -
849 -#else
850 -
851 638 /*
852 639 * Level-14 interrupt prologue.
853 640 */
854 641 ENTRY_NP(pil14_interrupt)
855 642 CPU_ADDR(%g1, %g2)
856 643 rdpr %pil, %g6 ! %g6 = interrupted PIL
857 644 stn %g6, [%g1 + CPU_PROFILE_PIL] ! record interrupted PIL
858 645 rdpr %tstate, %g6
859 646 rdpr %tpc, %g5
860 647 btst TSTATE_PRIV, %g6 ! trap from supervisor mode?
861 648 bnz,a,pt %xcc, 1f
862 649 stn %g5, [%g1 + CPU_PROFILE_PC] ! if so, record kernel PC
863 650 stn %g5, [%g1 + CPU_PROFILE_UPC] ! if not, record user PC
864 651 ba pil_interrupt_common ! must be large-disp branch
865 652 stn %g0, [%g1 + CPU_PROFILE_PC] ! zero kernel PC
866 653 1: ba pil_interrupt_common ! must be large-disp branch
867 654 stn %g0, [%g1 + CPU_PROFILE_UPC] ! zero user PC
868 655 SET_SIZE(pil14_interrupt)
869 656
870 657 ENTRY_NP(tick_rtt)
871 658 !
872 659 ! Load TICK_COMPARE into %o5; if bit 63 is set, then TICK_COMPARE is
873 660 ! disabled. If TICK_COMPARE is enabled, we know that we need to
874 661 ! reenqueue the interrupt request structure. We'll then check TICKINT
875 662 ! in SOFTINT; if it's set, then we know that we were in a TICK_COMPARE
876 663 ! interrupt. In this case, TICK_COMPARE may have been rewritten
877 664 ! recently; we'll compare %o5 to the current time to verify that it's
878 665 ! in the future.
879 666 !
880 667 ! Note that %o5 is live until after 1f.
881 668 ! XXX - there is a subroutine call while %o5 is live!
882 669 !
883 670 RD_TICKCMPR(%o5,%g1,%g2,__LINE__)
884 671 srlx %o5, TICKINT_DIS_SHFT, %g1
885 672 brnz,pt %g1, 2f
886 673 nop
887 674
888 675 rdpr %pstate, %g5
889 676 andn %g5, PSTATE_IE, %g1
890 677 wrpr %g0, %g1, %pstate ! Disable vec interrupts
891 678
892 679 sethi %hi(cbe_level14_inum), %o1
893 680 ldx [%o1 + %lo(cbe_level14_inum)], %o1
894 681 call intr_enqueue_req ! preserves %o5 and %g5
895 682 mov PIL_14, %o0
896 683
897 684 ! Check SOFTINT for TICKINT/STICKINT
898 685 rd SOFTINT, %o4
899 686 set (TICK_INT_MASK | STICK_INT_MASK), %o0
900 687 andcc %o4, %o0, %g0
901 688 bz,a,pn %icc, 2f
902 689 wrpr %g0, %g5, %pstate ! Enable vec interrupts
903 690
904 691 ! clear TICKINT/STICKINT
905 692 wr %o0, CLEAR_SOFTINT
906 693
907 694 !
908 695 ! Now that we've cleared TICKINT, we can reread %tick and confirm
909 696 ! that the value we programmed is still in the future. If it isn't,
910 697 ! we need to reprogram TICK_COMPARE to fire as soon as possible.
911 698 !
912 699 GET_NATIVE_TIME(%o0,%g1,%g2,__LINE__) ! %o0 = tick
913 700 cmp %o5, %o0 ! In the future?
914 701 bg,a,pt %xcc, 2f ! Yes, drive on.
915 702 wrpr %g0, %g5, %pstate ! delay: enable vec intr
916 703
917 704 !
918 705 ! If we're here, then we have programmed TICK_COMPARE with a %tick
919 706 ! which is in the past; we'll now load an initial step size, and loop
920 707 ! until we've managed to program TICK_COMPARE to fire in the future.
921 708 !
922 709 mov 8, %o4 ! 8 = arbitrary inital step
923 710 1: add %o0, %o4, %o5 ! Add the step
924 711 WR_TICKCMPR(%o5,%g1,%g2,__LINE__) ! Write to TICK_CMPR
925 712 GET_NATIVE_TIME(%o0,%g1,%g2,__LINE__) ! %o0 = tick
↓ open down ↓ |
65 lines elided |
↑ open up ↑ |
926 713 cmp %o5, %o0 ! In the future?
927 714 bg,a,pt %xcc, 2f ! Yes, drive on.
928 715 wrpr %g0, %g5, %pstate ! delay: enable vec intr
929 716 ba 1b ! No, try again.
930 717 sllx %o4, 1, %o4 ! delay: double step size
931 718
932 719 2: ba current_thread_complete
933 720 nop
934 721 SET_SIZE(tick_rtt)
935 722
936 -#endif /* lint */
937 -
938 -#if defined(lint)
939 -
940 -/* ARGSUSED */
941 -void
942 -pil15_interrupt(int level)
943 -{}
944 -
945 -#else /* lint */
946 -
947 723 /*
948 724 * Level-15 interrupt prologue.
949 725 */
950 726 ENTRY_NP(pil15_interrupt)
951 727 CPU_ADDR(%g1, %g2)
952 728 rdpr %tstate, %g6
953 729 rdpr %tpc, %g5
954 730 btst TSTATE_PRIV, %g6 ! trap from supervisor mode?
955 731 bnz,a,pt %xcc, 1f
956 732 stn %g5, [%g1 + CPU_CPCPROFILE_PC] ! if so, record kernel PC
957 733 stn %g5, [%g1 + CPU_CPCPROFILE_UPC] ! if not, record user PC
958 734 ba pil15_epilogue ! must be large-disp branch
959 735 stn %g0, [%g1 + CPU_CPCPROFILE_PC] ! zero kernel PC
960 736 1: ba pil15_epilogue ! must be large-disp branch
961 737 stn %g0, [%g1 + CPU_CPCPROFILE_UPC] ! zero user PC
962 738 SET_SIZE(pil15_interrupt)
963 739
964 -#endif /* lint */
965 -
966 -#if defined(lint)
967 -/*
968 - * Prefetch a page_t for write or read, this assumes a linear
969 - * scan of sequential page_t's.
970 - */
971 -/*ARGSUSED*/
972 -void
973 -prefetch_page_w(void *pp)
974 -{}
975 -
976 -/*ARGSUSED*/
977 -void
978 -prefetch_page_r(void *pp)
979 -{}
980 -#else /* lint */
981 -
982 740 /* XXXQ These should be inline templates, not functions */
983 741 ENTRY(prefetch_page_w)
984 742 retl
985 743 nop
986 744 SET_SIZE(prefetch_page_w)
987 745
988 746 ENTRY(prefetch_page_r)
989 747 retl
990 748 nop
991 749 SET_SIZE(prefetch_page_r)
992 750
993 -#endif /* lint */
994 -
995 -#if defined(lint)
996 -/*
997 - * Prefetch struct smap for write.
998 - */
999 -/*ARGSUSED*/
1000 -void
1001 -prefetch_smap_w(void *smp)
1002 -{}
1003 -#else /* lint */
1004 -
1005 751 /* XXXQ These should be inline templates, not functions */
1006 752 ENTRY(prefetch_smap_w)
1007 753 retl
1008 754 nop
1009 755 SET_SIZE(prefetch_smap_w)
1010 756
1011 -#endif /* lint */
1012 -
1013 757 /*
1014 758 * Generic sun4v MMU and Cache operations.
1015 759 */
1016 760
1017 -#if defined(lint)
1018 -
1019 -/*ARGSUSED*/
1020 -void
1021 -vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
1022 -{}
1023 -
1024 -/*ARGSUSED*/
1025 -void
1026 -vtag_flushall(void)
1027 -{}
1028 -
1029 -/*ARGSUSED*/
1030 -void
1031 -vtag_unmap_perm_tl1(uint64_t vaddr, uint64_t ctxnum)
1032 -{}
1033 -
1034 -/*ARGSUSED*/
1035 -void
1036 -vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
1037 -{}
1038 -
1039 -/*ARGSUSED*/
1040 -void
1041 -vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
1042 -{}
1043 -
1044 -/*ARGSUSED*/
1045 -void
1046 -vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
1047 -{}
1048 -
1049 -/*ARGSUSED*/
1050 -void
1051 -vac_flushpage(pfn_t pfnum, int vcolor)
1052 -{}
1053 -
1054 -/*ARGSUSED*/
1055 -void
1056 -vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
1057 -{}
1058 -
1059 -/*ARGSUSED*/
1060 -void
1061 -flush_instr_mem(caddr_t vaddr, size_t len)
1062 -{}
1063 -
1064 -#else /* lint */
1065 -
1066 761 ENTRY_NP(vtag_flushpage)
1067 762 /*
1068 763 * flush page from the tlb
1069 764 *
1070 765 * %o0 = vaddr
1071 766 * %o1 = sfmmup
1072 767 */
1073 768 SFMMU_CPU_CNUM(%o1, %g1, %g2) /* %g1 = sfmmu cnum on this CPU */
1074 769
1075 770 mov %g1, %o1
1076 771 mov MAP_ITLB | MAP_DTLB, %o2
1077 772 ta MMU_UNMAP_ADDR
1078 773 brz,pt %o0, 1f
1079 774 nop
1080 775 ba panic_bad_hcall
1081 776 mov MMU_UNMAP_ADDR, %o1
1082 777 1:
1083 778 retl
1084 779 nop
1085 780 SET_SIZE(vtag_flushpage)
1086 781
1087 782 ENTRY_NP(vtag_flushall)
1088 783 mov %g0, %o0 ! XXX no cpu list yet
1089 784 mov %g0, %o1 ! XXX no cpu list yet
1090 785 mov MAP_ITLB | MAP_DTLB, %o2
1091 786 mov MMU_DEMAP_ALL, %o5
1092 787 ta FAST_TRAP
1093 788 brz,pt %o0, 1f
1094 789 nop
1095 790 ba panic_bad_hcall
1096 791 mov MMU_DEMAP_ALL, %o1
1097 792 1:
1098 793 retl
1099 794 nop
1100 795 SET_SIZE(vtag_flushall)
1101 796
1102 797 ENTRY_NP(vtag_unmap_perm_tl1)
1103 798 /*
1104 799 * x-trap to unmap perm map entry
1105 800 * %g1 = vaddr
1106 801 * %g2 = ctxnum (KCONTEXT only)
1107 802 */
1108 803 mov %o0, %g3
1109 804 mov %o1, %g4
1110 805 mov %o2, %g5
1111 806 mov %o5, %g6
1112 807 mov %g1, %o0
1113 808 mov %g2, %o1
1114 809 mov MAP_ITLB | MAP_DTLB, %o2
1115 810 mov UNMAP_PERM_ADDR, %o5
1116 811 ta FAST_TRAP
1117 812 brz,pt %o0, 1f
1118 813 nop
1119 814
1120 815 mov PTL1_BAD_HCALL, %g1
1121 816
1122 817 cmp %o0, H_ENOMAP
1123 818 move %xcc, PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP, %g1
1124 819
1125 820 cmp %o0, H_EINVAL
1126 821 move %xcc, PTL1_BAD_HCALL_UNMAP_PERM_EINVAL, %g1
1127 822
1128 823 ba,a ptl1_panic
1129 824 1:
1130 825 mov %g6, %o5
1131 826 mov %g5, %o2
1132 827 mov %g4, %o1
1133 828 mov %g3, %o0
1134 829 retry
1135 830 SET_SIZE(vtag_unmap_perm_tl1)
1136 831
1137 832 ENTRY_NP(vtag_flushpage_tl1)
1138 833 /*
1139 834 * x-trap to flush page from tlb and tsb
1140 835 *
1141 836 * %g1 = vaddr, zero-extended on 32-bit kernel
1142 837 * %g2 = sfmmup
1143 838 *
1144 839 * assumes TSBE_TAG = 0
1145 840 */
1146 841 srln %g1, MMU_PAGESHIFT, %g1
1147 842 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
1148 843 mov %o0, %g3
1149 844 mov %o1, %g4
1150 845 mov %o2, %g5
1151 846 mov %g1, %o0 /* vaddr */
1152 847
1153 848 SFMMU_CPU_CNUM(%g2, %o1, %g6) /* %o1 = sfmmu cnum on this CPU */
1154 849
1155 850 mov MAP_ITLB | MAP_DTLB, %o2
1156 851 ta MMU_UNMAP_ADDR
1157 852 brz,pt %o0, 1f
1158 853 nop
1159 854 ba ptl1_panic
1160 855 mov PTL1_BAD_HCALL, %g1
1161 856 1:
1162 857 mov %g5, %o2
1163 858 mov %g4, %o1
1164 859 mov %g3, %o0
1165 860 membar #Sync
1166 861 retry
1167 862 SET_SIZE(vtag_flushpage_tl1)
1168 863
1169 864 ENTRY_NP(vtag_flush_pgcnt_tl1)
1170 865 /*
1171 866 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
1172 867 *
1173 868 * %g1 = vaddr, zero-extended on 32-bit kernel
1174 869 * %g2 = <sfmmup58|pgcnt6>, (pgcnt - 1) is pass'ed in via pgcnt6 bits.
1175 870 *
1176 871 * NOTE: this handler relies on the fact that no
1177 872 * interrupts or traps can occur during the loop
1178 873 * issuing the TLB_DEMAP operations. It is assumed
1179 874 * that interrupts are disabled and this code is
1180 875 * fetching from the kernel locked text address.
1181 876 *
1182 877 * assumes TSBE_TAG = 0
1183 878 */
1184 879 srln %g1, MMU_PAGESHIFT, %g1
1185 880 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
1186 881 mov %o0, %g3
1187 882 mov %o1, %g4
1188 883 mov %o2, %g5
1189 884
1190 885 and %g2, SFMMU_PGCNT_MASK, %g7 /* g7 = pgcnt - 1 */
1191 886 add %g7, 1, %g7 /* g7 = pgcnt */
1192 887
1193 888 andn %g2, SFMMU_PGCNT_MASK, %o0 /* %o0 = sfmmup */
1194 889
1195 890 SFMMU_CPU_CNUM(%o0, %g2, %g6) /* %g2 = sfmmu cnum on this CPU */
1196 891
1197 892 set MMU_PAGESIZE, %g6 /* g6 = pgsize */
1198 893
1199 894 1:
1200 895 mov %g1, %o0 /* vaddr */
1201 896 mov %g2, %o1 /* cnum */
1202 897 mov MAP_ITLB | MAP_DTLB, %o2
1203 898 ta MMU_UNMAP_ADDR
1204 899 brz,pt %o0, 2f
1205 900 nop
1206 901 ba ptl1_panic
1207 902 mov PTL1_BAD_HCALL, %g1
1208 903 2:
1209 904 deccc %g7 /* decr pgcnt */
1210 905 bnz,pt %icc,1b
1211 906 add %g1, %g6, %g1 /* go to nextpage */
1212 907
1213 908 mov %g5, %o2
1214 909 mov %g4, %o1
1215 910 mov %g3, %o0
1216 911 membar #Sync
1217 912 retry
1218 913 SET_SIZE(vtag_flush_pgcnt_tl1)
1219 914
1220 915 ! Not implemented on US1/US2
1221 916 ENTRY_NP(vtag_flushall_tl1)
1222 917 mov %o0, %g3
1223 918 mov %o1, %g4
1224 919 mov %o2, %g5
1225 920 mov %o3, %g6 ! XXXQ not used?
1226 921 mov %o5, %g7
1227 922 mov %g0, %o0 ! XXX no cpu list yet
1228 923 mov %g0, %o1 ! XXX no cpu list yet
1229 924 mov MAP_ITLB | MAP_DTLB, %o2
1230 925 mov MMU_DEMAP_ALL, %o5
1231 926 ta FAST_TRAP
1232 927 brz,pt %o0, 1f
1233 928 nop
1234 929 ba ptl1_panic
1235 930 mov PTL1_BAD_HCALL, %g1
1236 931 1:
1237 932 mov %g7, %o5
1238 933 mov %g6, %o3 ! XXXQ not used?
1239 934 mov %g5, %o2
1240 935 mov %g4, %o1
1241 936 mov %g3, %o0
1242 937 retry
1243 938 SET_SIZE(vtag_flushall_tl1)
1244 939
1245 940 /*
1246 941 * flush_instr_mem:
1247 942 * Flush a portion of the I-$ starting at vaddr
1248 943 * %o0 vaddr
1249 944 * %o1 bytes to be flushed
1250 945 */
1251 946
1252 947 ENTRY(flush_instr_mem)
1253 948 membar #StoreStore ! Ensure the stores
1254 949 ! are globally visible
↓ open down ↓ |
179 lines elided |
↑ open up ↑ |
1255 950 1:
1256 951 flush %o0
1257 952 subcc %o1, ICACHE_FLUSHSZ, %o1 ! bytes = bytes-0x20
1258 953 bgu,pt %ncc, 1b
1259 954 add %o0, ICACHE_FLUSHSZ, %o0 ! vaddr = vaddr+0x20
1260 955
1261 956 retl
1262 957 nop
1263 958 SET_SIZE(flush_instr_mem)
1264 959
1265 -#endif /* !lint */
1266 -
1267 960 #if !defined(CUSTOM_FPZERO)
1268 961
1269 962 /*
1270 963 * fp_zero() - clear all fp data registers and the fsr
1271 964 */
1272 965
1273 -#if defined(lint) || defined(__lint)
1274 -
1275 -void
1276 -fp_zero(void)
1277 -{}
1278 -
1279 -#else /* lint */
1280 -
1281 966 .global fp_zero_zero
1282 967 .align 8
1283 968 fp_zero_zero:
1284 969 .xword 0
1285 970
1286 971 ENTRY_NP(fp_zero)
1287 972 sethi %hi(fp_zero_zero), %o0
1288 973 ldx [%o0 + %lo(fp_zero_zero)], %fsr
1289 974 ldd [%o0 + %lo(fp_zero_zero)], %f0
1290 975 fmovd %f0, %f2
1291 976 fmovd %f0, %f4
1292 977 fmovd %f0, %f6
1293 978 fmovd %f0, %f8
1294 979 fmovd %f0, %f10
1295 980 fmovd %f0, %f12
1296 981 fmovd %f0, %f14
1297 982 fmovd %f0, %f16
1298 983 fmovd %f0, %f18
1299 984 fmovd %f0, %f20
1300 985 fmovd %f0, %f22
1301 986 fmovd %f0, %f24
1302 987 fmovd %f0, %f26
1303 988 fmovd %f0, %f28
1304 989 fmovd %f0, %f30
1305 990 fmovd %f0, %f32
1306 991 fmovd %f0, %f34
1307 992 fmovd %f0, %f36
1308 993 fmovd %f0, %f38
1309 994 fmovd %f0, %f40
1310 995 fmovd %f0, %f42
1311 996 fmovd %f0, %f44
1312 997 fmovd %f0, %f46
1313 998 fmovd %f0, %f48
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
1314 999 fmovd %f0, %f50
1315 1000 fmovd %f0, %f52
1316 1001 fmovd %f0, %f54
1317 1002 fmovd %f0, %f56
1318 1003 fmovd %f0, %f58
1319 1004 fmovd %f0, %f60
1320 1005 retl
1321 1006 fmovd %f0, %f62
1322 1007 SET_SIZE(fp_zero)
1323 1008
1324 -#endif /* lint */
1325 1009 #endif /* CUSTOM_FPZERO */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX