Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4v/vm/mach_sfmmu_asm.s
+++ new/usr/src/uts/sun4v/vm/mach_sfmmu_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * SFMMU primitives. These primitives should only be used by sfmmu
28 28 * routines.
29 29 */
30 30
31 -#if defined(lint)
32 -#include <sys/types.h>
33 -#else /* lint */
34 31 #include "assym.h"
35 -#endif /* lint */
36 32
37 33 #include <sys/asm_linkage.h>
38 34 #include <sys/machtrap.h>
39 35 #include <sys/machasi.h>
40 36 #include <sys/sun4asi.h>
41 37 #include <sys/pte.h>
42 38 #include <sys/mmu.h>
43 39 #include <vm/hat_sfmmu.h>
44 40 #include <vm/seg_spt.h>
45 41 #include <sys/machparam.h>
46 42 #include <sys/privregs.h>
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
47 43 #include <sys/scb.h>
48 44 #include <sys/intreg.h>
49 45 #include <sys/machthread.h>
50 46 #include <sys/clock.h>
51 47 #include <sys/trapstat.h>
52 48
53 49 /*
54 50 * sfmmu related subroutines
55 51 */
56 52
57 -#if defined (lint)
58 -
59 -/* ARGSUSED */
60 -void
61 -sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
62 -{}
63 -
64 -int
65 -sfmmu_getctx_pri()
66 -{ return(0); }
67 -
68 -int
69 -sfmmu_getctx_sec()
70 -{ return(0); }
71 -
72 -/* ARGSUSED */
73 -void
74 -sfmmu_setctx_sec(uint_t ctx)
75 -{}
76 -
77 -/* ARGSUSED */
78 -void
79 -sfmmu_load_mmustate(sfmmu_t *sfmmup)
80 -{
81 -}
82 -
83 -#else /* lint */
84 -
85 53 /*
86 54 * Invalidate either the context of a specific victim or any process
87 55 * currently running on this CPU.
88 56 *
89 57 * %g1 = sfmmup whose ctx is being stolen (victim)
90 58 * when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT.
91 59 * Note %g1 is the only input argument used by this xcall handler.
92 60 */
93 61
94 62 ENTRY(sfmmu_raise_tsb_exception)
95 63 !
96 64 ! if (victim == INVALID_CONTEXT) {
97 65 ! if (sec-ctx > INVALID_CONTEXT)
98 66 ! write INVALID_CONTEXT to sec-ctx
99 67 ! if (pri-ctx > INVALID_CONTEXT)
100 68 ! write INVALID_CONTEXT to pri-ctx
101 69 !
102 70 ! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
103 71 ! return
104 72 ! } else {
105 73 ! if (sec-ctx > INVALID_CONTEXT)
106 74 ! write INVALID_CONTEXT to sec-ctx
107 75 !
108 76 ! if (pri-ctx > INVALID_CONTEXT)
109 77 ! write INVALID_CONTEXT to pri-ctx
110 78 ! }
111 79 !
112 80
113 81 sethi %hi(ksfmmup), %g3
114 82 ldx [%g3 + %lo(ksfmmup)], %g3
115 83 cmp %g1, %g3
116 84 be,a,pn %xcc, ptl1_panic /* can't invalidate kernel ctx */
117 85 mov PTL1_BAD_RAISE_TSBEXCP, %g1
118 86
119 87 set INVALID_CONTEXT, %g2
120 88
121 89 cmp %g1, INVALID_CONTEXT
122 90 bne,pt %xcc, 1f /* called from wrap_around? */
123 91 mov MMU_SCONTEXT, %g3
124 92
125 93 ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = sec-ctx */
126 94 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx ? */
127 95 ble,pn %xcc, 0f /* yes, no need to change */
128 96 mov MMU_PCONTEXT, %g7
129 97
130 98 stxa %g2, [%g3]ASI_MMU_CTX /* set invalid ctx */
131 99 membar #Sync
132 100
133 101 0:
134 102 ldxa [%g7]ASI_MMU_CTX, %g5 /* %g5 = pri-ctx */
135 103 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx? */
136 104 ble,pn %xcc, 6f /* yes, no need to change */
137 105 nop
138 106
139 107 stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */
140 108 membar #Sync
141 109
142 110 6: /* flushall tlb */
143 111 mov %o0, %g3
144 112 mov %o1, %g4
145 113 mov %o2, %g6
146 114 mov %o5, %g7
147 115
148 116 mov %g0, %o0 ! XXX no cpu list yet
149 117 mov %g0, %o1 ! XXX no cpu list yet
150 118 mov MAP_ITLB | MAP_DTLB, %o2
151 119 mov MMU_DEMAP_ALL, %o5
152 120 ta FAST_TRAP
153 121 brz,pt %o0, 5f
154 122 nop
155 123 ba ptl1_panic /* bad HV call */
156 124 mov PTL1_BAD_RAISE_TSBEXCP, %g1
157 125 5:
158 126 mov %g3, %o0
159 127 mov %g4, %o1
160 128 mov %g6, %o2
161 129 mov %g7, %o5
162 130
163 131 ba 3f
164 132 nop
165 133 1:
166 134 /*
167 135 * %g1 = sfmmup
168 136 * %g2 = INVALID_CONTEXT
169 137 * %g3 = MMU_SCONTEXT
170 138 */
171 139 CPU_TSBMISS_AREA(%g5, %g6) /* load cpu tsbmiss area */
172 140 ldx [%g5 + TSBMISS_UHATID], %g5 /* load usfmmup */
173 141
174 142 cmp %g5, %g1 /* is it the victim? */
175 143 bne,pt %xcc, 2f /* is our sec-ctx a victim? */
176 144 nop
177 145
178 146 ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = sec-ctx */
179 147 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx ? */
180 148 ble,pn %xcc, 0f /* yes, no need to change */
181 149 mov MMU_PCONTEXT, %g7
182 150
183 151 stxa %g2, [%g3]ASI_MMU_CTX /* set sec-ctx to invalid */
184 152 membar #Sync
185 153
186 154 0:
187 155 ldxa [%g7]ASI_MMU_CTX, %g4 /* %g4 = pri-ctx */
188 156 cmp %g4, INVALID_CONTEXT /* is pri-ctx the victim? */
189 157 ble %icc, 3f /* no need to change pri-ctx */
190 158 nop
191 159 stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */
192 160 membar #Sync
193 161
194 162 3:
195 163 /* TSB program must be cleared - walkers do not check a context. */
196 164 mov %o0, %g3
197 165 mov %o1, %g4
198 166 mov %o5, %g7
199 167 clr %o0
200 168 clr %o1
201 169 mov MMU_TSB_CTXNON0, %o5
202 170 ta FAST_TRAP
203 171 brnz,a,pn %o0, ptl1_panic
204 172 mov PTL1_BAD_HCALL, %g1
205 173 mov %g3, %o0
206 174 mov %g4, %o1
207 175 mov %g7, %o5
208 176 2:
209 177 retry
210 178 SET_SIZE(sfmmu_raise_tsb_exception)
211 179
212 180 ENTRY_NP(sfmmu_getctx_pri)
213 181 set MMU_PCONTEXT, %o0
214 182 retl
215 183 ldxa [%o0]ASI_MMU_CTX, %o0
216 184 SET_SIZE(sfmmu_getctx_pri)
217 185
218 186 ENTRY_NP(sfmmu_getctx_sec)
219 187 set MMU_SCONTEXT, %o0
220 188 retl
221 189 ldxa [%o0]ASI_MMU_CTX, %o0
222 190 SET_SIZE(sfmmu_getctx_sec)
223 191
224 192 /*
225 193 * Set the secondary context register for this process.
226 194 * %o0 = context number
227 195 */
228 196 ENTRY_NP(sfmmu_setctx_sec)
229 197 /*
230 198 * From resume we call sfmmu_setctx_sec with interrupts disabled.
231 199 * But we can also get called from C with interrupts enabled. So,
232 200 * we need to check first.
233 201 */
234 202
235 203 /* If interrupts are not disabled, then disable them */
236 204 rdpr %pstate, %g1
237 205 btst PSTATE_IE, %g1
238 206 bnz,a,pt %icc, 1f
239 207 wrpr %g1, PSTATE_IE, %pstate /* disable interrupts */
240 208 1:
241 209 mov MMU_SCONTEXT, %o1
242 210 stxa %o0, [%o1]ASI_MMU_CTX /* set 2nd context reg. */
243 211 membar #Sync
244 212 /*
245 213 * if the routine is entered with intr enabled, then enable intr now.
246 214 * otherwise, keep intr disabled, return without enabing intr.
247 215 * %g1 - old intr state
248 216 */
249 217 btst PSTATE_IE, %g1
250 218 bnz,a,pt %icc, 2f
251 219 wrpr %g0, %g1, %pstate /* enable interrupts */
252 220 2: retl
253 221 nop
254 222 SET_SIZE(sfmmu_setctx_sec)
255 223
256 224 /*
257 225 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
258 226 * returns the detection value in %o0.
259 227 */
260 228 ENTRY_NP(sfmmu_setup_4lp)
261 229 set ktsb_phys, %o2
262 230 mov 1, %o1
263 231 st %o1, [%o2]
264 232 retl
265 233 mov %o1, %o0
266 234 SET_SIZE(sfmmu_setup_4lp)
267 235
268 236 /*
269 237 * Called to load MMU registers and tsbmiss area
270 238 * for the active process. This function should
271 239 * only be called from TL=0.
272 240 *
273 241 * %o0 - hat pointer
274 242 */
275 243 ENTRY_NP(sfmmu_load_mmustate)
276 244
277 245 #ifdef DEBUG
278 246 PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l1, %g1)
279 247 #endif /* DEBUG */
280 248
281 249 sethi %hi(ksfmmup), %o3
282 250 ldx [%o3 + %lo(ksfmmup)], %o3
283 251 cmp %o3, %o0
284 252 be,pn %xcc, 7f ! if kernel as, do nothing
285 253 nop
286 254
287 255 set MMU_SCONTEXT, %o3
288 256 ldxa [%o3]ASI_MMU_CTX, %o5
289 257
290 258 cmp %o5, INVALID_CONTEXT ! ctx is invalid?
291 259 bne,pt %icc, 1f
292 260 nop
293 261
294 262 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
295 263 stx %o0, [%o2 + TSBMISS_UHATID]
296 264 stx %g0, [%o2 + TSBMISS_SHARED_UHATID]
297 265 #ifdef DEBUG
298 266 /* check if hypervisor/hardware should handle user TSB */
299 267 sethi %hi(hv_use_non0_tsb), %o2
300 268 ld [%o2 + %lo(hv_use_non0_tsb)], %o2
301 269 brz,pn %o2, 0f
302 270 nop
303 271 #endif /* DEBUG */
304 272 clr %o0 ! ntsb = 0 for invalid ctx
305 273 clr %o1 ! HV_TSB_INFO_PA = 0 if inv ctx
306 274 mov MMU_TSB_CTXNON0, %o5
307 275 ta FAST_TRAP ! set TSB info for user process
308 276 brnz,a,pn %o0, panic_bad_hcall
309 277 mov MMU_TSB_CTXNON0, %o1
310 278 0:
311 279 retl
312 280 nop
313 281 1:
314 282 /*
315 283 * We need to set up the TSB base register, tsbmiss
316 284 * area, and pass the TSB information into the hypervisor
317 285 */
318 286 ldx [%o0 + SFMMU_TSB], %o1 ! %o1 = first tsbinfo
319 287 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second tsbinfo
320 288
321 289 /* create/set first UTSBREG */
322 290 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = user tsbreg
323 291 SET_UTSBREG(SCRATCHPAD_UTSBREG1, %o2, %o3)
324 292
325 293 brz,pt %g2, 2f
326 294 mov -1, %o2 ! use -1 if no second TSB
327 295
328 296 /* make 2nd UTSBREG */
329 297 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = user tsbreg
330 298 2:
331 299 SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
332 300
333 301 /* make 3rd and 4th TSB */
334 302 CPU_TSBMISS_AREA(%o4, %o3) ! %o4 = tsbmiss area
335 303
336 304 ldx [%o0 + SFMMU_SCDP], %g2 ! %g2 = sfmmu_scd
337 305 brz,pt %g2, 3f
338 306 mov -1, %o2 ! use -1 if no third TSB
339 307
340 308 ldx [%g2 + SCD_SFMMUP], %g3 ! %g3 = scdp->scd_sfmmup
341 309 ldx [%g3 + SFMMU_TSB], %o1 ! %o1 = first scd tsbinfo
342 310 brz,pn %o1, 9f
343 311 nop ! panic if no third TSB
344 312
345 313 /* make 3rd UTSBREG */
346 314 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = user tsbreg
347 315 3:
348 316 SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR, %o2)
349 317
350 318 brz,pt %g2, 4f
351 319 mov -1, %o2 ! use -1 if no 3rd or 4th TSB
352 320
353 321 brz,pt %o1, 4f
354 322 mov -1, %o2 ! use -1 if no 3rd or 4th TSB
355 323 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second scd tsbinfo
356 324 brz,pt %g2, 4f
357 325 mov -1, %o2 ! use -1 if no 4th TSB
358 326
359 327 /* make 4th UTSBREG */
360 328 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = user tsbreg
361 329 4:
362 330 SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR4M, %o2)
363 331
364 332 #ifdef DEBUG
365 333 /* check if hypervisor/hardware should handle user TSB */
366 334 sethi %hi(hv_use_non0_tsb), %o2
367 335 ld [%o2 + %lo(hv_use_non0_tsb)], %o2
368 336 brz,pn %o2, 6f
369 337 nop
370 338 #endif /* DEBUG */
371 339 CPU_ADDR(%o2, %o4) ! load CPU struct addr to %o2 using %o4
372 340 ldub [%o2 + CPU_TSTAT_FLAGS], %o1 ! load cpu_tstat_flag to %o1
373 341
374 342 mov %o0, %o3 ! preserve %o0
375 343 btst TSTAT_TLB_STATS, %o1
376 344 bnz,a,pn %icc, 5f ! ntsb = 0 if TLB stats enabled
377 345 clr %o0
378 346
379 347 ldx [%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_CNT], %o0
380 348 5:
381 349 ldx [%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_PA], %o1
382 350 mov MMU_TSB_CTXNON0, %o5
383 351 ta FAST_TRAP ! set TSB info for user process
384 352 brnz,a,pn %o0, panic_bad_hcall
385 353 mov MMU_TSB_CTXNON0, %o1
386 354 mov %o3, %o0 ! restore %o0
387 355 6:
388 356 ldx [%o0 + SFMMU_ISMBLKPA], %o1 ! copy members of sfmmu
389 357 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
390 358 stx %o1, [%o2 + TSBMISS_ISMBLKPA] ! sfmmu_tsb_miss into the
391 359 ldub [%o0 + SFMMU_TTEFLAGS], %o3 ! per-CPU tsbmiss area.
392 360 ldub [%o0 + SFMMU_RTTEFLAGS], %o4
393 361 ldx [%o0 + SFMMU_SRDP], %o1
394 362 stx %o0, [%o2 + TSBMISS_UHATID]
395 363 stub %o3, [%o2 + TSBMISS_UTTEFLAGS]
396 364 stub %o4, [%o2 + TSBMISS_URTTEFLAGS]
397 365 stx %o1, [%o2 + TSBMISS_SHARED_UHATID]
398 366 brz,pn %o1, 7f ! check for sfmmu_srdp
399 367 add %o0, SFMMU_HMERMAP, %o1
400 368 add %o2, TSBMISS_SHMERMAP, %o2
401 369 mov SFMMU_HMERGNMAP_WORDS, %o3
402 370 ! set tsbmiss shmermap
403 371 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
404 372
405 373 ldx [%o0 + SFMMU_SCDP], %o4 ! %o4 = sfmmu_scd
406 374 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
407 375 mov SFMMU_HMERGNMAP_WORDS, %o3
408 376 brnz,pt %o4, 8f ! check for sfmmu_scdp else
409 377 add %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
410 378 ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
411 379 7:
412 380 retl
413 381 nop
414 382 8: ! set tsbmiss scd_shmermap
415 383 add %o4, SCD_HMERMAP, %o1
416 384 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
417 385 retl
418 386 nop
419 387 9:
420 388 sethi %hi(panicstr), %g1 ! panic if no 3rd TSB
421 389 ldx [%g1 + %lo(panicstr)], %g1
422 390 tst %g1
↓ open down ↓ |
328 lines elided |
↑ open up ↑ |
423 391
424 392 bnz,pn %xcc, 7b
425 393 nop
426 394
427 395 sethi %hi(sfmmu_panic10), %o0
428 396 call panic
429 397 or %o0, %lo(sfmmu_panic10), %o0
430 398
431 399 SET_SIZE(sfmmu_load_mmustate)
432 400
433 -#endif /* lint */
434 -
435 -#if defined(lint)
436 -
437 -/* Prefetch "struct tsbe" while walking TSBs */
438 -/*ARGSUSED*/
439 -void
440 -prefetch_tsbe_read(struct tsbe *tsbep)
441 -{}
442 -
443 -/* Prefetch the tsbe that we are about to write */
444 -/*ARGSUSED*/
445 -void
446 -prefetch_tsbe_write(struct tsbe *tsbep)
447 -{}
448 -
449 -#else /* lint */
450 -
451 401 ENTRY(prefetch_tsbe_read)
452 402 retl
453 403 nop
454 404 SET_SIZE(prefetch_tsbe_read)
455 405
456 406 ENTRY(prefetch_tsbe_write)
457 407 retl
458 408 nop
459 409 SET_SIZE(prefetch_tsbe_write)
460 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX