de-linting of .s files
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * SFMMU primitives. These primitives should only be used by sfmmu
28 * routines.
29 */
30
31 #include "assym.h"
32
33 #include <sys/asm_linkage.h>
34 #include <sys/machtrap.h>
35 #include <sys/machasi.h>
36 #include <sys/sun4asi.h>
37 #include <sys/pte.h>
38 #include <sys/mmu.h>
39 #include <vm/hat_sfmmu.h>
40 #include <vm/seg_spt.h>
41 #include <sys/machparam.h>
42 #include <sys/privregs.h>
43 #include <sys/scb.h>
44 #include <sys/intreg.h>
45 #include <sys/machthread.h>
46 #include <sys/clock.h>
47 #include <sys/trapstat.h>
48
49 /*
50 * sfmmu related subroutines
51 */
52
53 /*
54 * Invalidate either the context of a specific victim or any process
55 * currently running on this CPU.
56 *
57 * %g1 = sfmmup whose ctx is being stolen (victim)
58 * when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT.
59 * Note %g1 is the only input argument used by this xcall handler.
60 */
61
62 ENTRY(sfmmu_raise_tsb_exception)
63 !
64 ! if (victim == INVALID_CONTEXT) {
65 ! if (sec-ctx > INVALID_CONTEXT)
66 ! write INVALID_CONTEXT to sec-ctx
67 ! if (pri-ctx > INVALID_CONTEXT)
68 ! write INVALID_CONTEXT to pri-ctx
69 !
70 ! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
71 ! return
72 ! } else {
73 ! if (sec-ctx > INVALID_CONTEXT)
74 ! write INVALID_CONTEXT to sec-ctx
75 !
76 ! if (pri-ctx > INVALID_CONTEXT)
77 ! write INVALID_CONTEXT to pri-ctx
78 ! }
79 !
80
81 sethi %hi(ksfmmup), %g3
82 ldx [%g3 + %lo(ksfmmup)], %g3
83 cmp %g1, %g3
84 be,a,pn %xcc, ptl1_panic /* can't invalidate kernel ctx */
85 mov PTL1_BAD_RAISE_TSBEXCP, %g1
86
87 set INVALID_CONTEXT, %g2
88
89 cmp %g1, INVALID_CONTEXT
90 bne,pt %xcc, 1f /* called from wrap_around? */
91 mov MMU_SCONTEXT, %g3
92
93 ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = sec-ctx */
94 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx ? */
95 ble,pn %xcc, 0f /* yes, no need to change */
96 mov MMU_PCONTEXT, %g7
97
98 stxa %g2, [%g3]ASI_MMU_CTX /* set invalid ctx */
99 membar #Sync
100
101 0:
102 ldxa [%g7]ASI_MMU_CTX, %g5 /* %g5 = pri-ctx */
103 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx? */
104 ble,pn %xcc, 6f /* yes, no need to change */
105 nop
106
107 stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */
108 membar #Sync
109
110 6: /* flushall tlb */
111 mov %o0, %g3
112 mov %o1, %g4
113 mov %o2, %g6
114 mov %o5, %g7
115
116 mov %g0, %o0 ! XXX no cpu list yet
117 mov %g0, %o1 ! XXX no cpu list yet
118 mov MAP_ITLB | MAP_DTLB, %o2
119 mov MMU_DEMAP_ALL, %o5
120 ta FAST_TRAP
121 brz,pt %o0, 5f
122 nop
123 ba ptl1_panic /* bad HV call */
124 mov PTL1_BAD_RAISE_TSBEXCP, %g1
125 5:
126 mov %g3, %o0
127 mov %g4, %o1
128 mov %g6, %o2
129 mov %g7, %o5
130
131 ba 3f
132 nop
133 1:
134 /*
135 * %g1 = sfmmup
136 * %g2 = INVALID_CONTEXT
137 * %g3 = MMU_SCONTEXT
138 */
139 CPU_TSBMISS_AREA(%g5, %g6) /* load cpu tsbmiss area */
140 ldx [%g5 + TSBMISS_UHATID], %g5 /* load usfmmup */
141
142 cmp %g5, %g1 /* is it the victim? */
143 bne,pt %xcc, 2f /* is our sec-ctx a victim? */
144 nop
145
146 ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = sec-ctx */
147 cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx ? */
148 ble,pn %xcc, 0f /* yes, no need to change */
149 mov MMU_PCONTEXT, %g7
150
151 stxa %g2, [%g3]ASI_MMU_CTX /* set sec-ctx to invalid */
152 membar #Sync
153
154 0:
155 ldxa [%g7]ASI_MMU_CTX, %g4 /* %g4 = pri-ctx */
156 cmp %g4, INVALID_CONTEXT /* is pri-ctx the victim? */
157 ble %icc, 3f /* no need to change pri-ctx */
158 nop
159 stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */
160 membar #Sync
161
162 3:
163 /* TSB program must be cleared - walkers do not check a context. */
164 mov %o0, %g3
165 mov %o1, %g4
166 mov %o5, %g7
167 clr %o0
168 clr %o1
169 mov MMU_TSB_CTXNON0, %o5
170 ta FAST_TRAP
171 brnz,a,pn %o0, ptl1_panic
172 mov PTL1_BAD_HCALL, %g1
173 mov %g3, %o0
174 mov %g4, %o1
175 mov %g7, %o5
176 2:
177 retry
178 SET_SIZE(sfmmu_raise_tsb_exception)
179
180 ENTRY_NP(sfmmu_getctx_pri)
181 set MMU_PCONTEXT, %o0
182 retl
183 ldxa [%o0]ASI_MMU_CTX, %o0
184 SET_SIZE(sfmmu_getctx_pri)
185
186 ENTRY_NP(sfmmu_getctx_sec)
187 set MMU_SCONTEXT, %o0
188 retl
189 ldxa [%o0]ASI_MMU_CTX, %o0
190 SET_SIZE(sfmmu_getctx_sec)
191
192 /*
193 * Set the secondary context register for this process.
194 * %o0 = context number
195 */
196 ENTRY_NP(sfmmu_setctx_sec)
197 /*
198 * From resume we call sfmmu_setctx_sec with interrupts disabled.
199 * But we can also get called from C with interrupts enabled. So,
200 * we need to check first.
201 */
202
203 /* If interrupts are not disabled, then disable them */
204 rdpr %pstate, %g1
205 btst PSTATE_IE, %g1
206 bnz,a,pt %icc, 1f
207 wrpr %g1, PSTATE_IE, %pstate /* disable interrupts */
208 1:
209 mov MMU_SCONTEXT, %o1
210 stxa %o0, [%o1]ASI_MMU_CTX /* set 2nd context reg. */
211 membar #Sync
212 /*
213 * if the routine is entered with intr enabled, then enable intr now.
214 * otherwise, keep intr disabled, return without enabing intr.
215 * %g1 - old intr state
216 */
217 btst PSTATE_IE, %g1
218 bnz,a,pt %icc, 2f
219 wrpr %g0, %g1, %pstate /* enable interrupts */
220 2: retl
221 nop
222 SET_SIZE(sfmmu_setctx_sec)
223
224 /*
225 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
226 * returns the detection value in %o0.
227 */
228 ENTRY_NP(sfmmu_setup_4lp)
229 set ktsb_phys, %o2
230 mov 1, %o1
231 st %o1, [%o2]
232 retl
233 mov %o1, %o0
234 SET_SIZE(sfmmu_setup_4lp)
235
236 /*
237 * Called to load MMU registers and tsbmiss area
238 * for the active process. This function should
239 * only be called from TL=0.
240 *
241 * %o0 - hat pointer
242 */
243 ENTRY_NP(sfmmu_load_mmustate)
244
245 #ifdef DEBUG
246 PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l1, %g1)
247 #endif /* DEBUG */
248
249 sethi %hi(ksfmmup), %o3
250 ldx [%o3 + %lo(ksfmmup)], %o3
251 cmp %o3, %o0
252 be,pn %xcc, 7f ! if kernel as, do nothing
253 nop
254
255 set MMU_SCONTEXT, %o3
256 ldxa [%o3]ASI_MMU_CTX, %o5
257
258 cmp %o5, INVALID_CONTEXT ! ctx is invalid?
259 bne,pt %icc, 1f
260 nop
261
262 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
263 stx %o0, [%o2 + TSBMISS_UHATID]
264 stx %g0, [%o2 + TSBMISS_SHARED_UHATID]
265 #ifdef DEBUG
266 /* check if hypervisor/hardware should handle user TSB */
267 sethi %hi(hv_use_non0_tsb), %o2
268 ld [%o2 + %lo(hv_use_non0_tsb)], %o2
269 brz,pn %o2, 0f
270 nop
271 #endif /* DEBUG */
272 clr %o0 ! ntsb = 0 for invalid ctx
273 clr %o1 ! HV_TSB_INFO_PA = 0 if inv ctx
274 mov MMU_TSB_CTXNON0, %o5
275 ta FAST_TRAP ! set TSB info for user process
276 brnz,a,pn %o0, panic_bad_hcall
277 mov MMU_TSB_CTXNON0, %o1
278 0:
279 retl
280 nop
281 1:
282 /*
283 * We need to set up the TSB base register, tsbmiss
284 * area, and pass the TSB information into the hypervisor
285 */
286 ldx [%o0 + SFMMU_TSB], %o1 ! %o1 = first tsbinfo
287 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second tsbinfo
288
289 /* create/set first UTSBREG */
290 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = user tsbreg
291 SET_UTSBREG(SCRATCHPAD_UTSBREG1, %o2, %o3)
292
293 brz,pt %g2, 2f
294 mov -1, %o2 ! use -1 if no second TSB
295
296 /* make 2nd UTSBREG */
297 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = user tsbreg
298 2:
299 SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
300
301 /* make 3rd and 4th TSB */
302 CPU_TSBMISS_AREA(%o4, %o3) ! %o4 = tsbmiss area
303
304 ldx [%o0 + SFMMU_SCDP], %g2 ! %g2 = sfmmu_scd
305 brz,pt %g2, 3f
306 mov -1, %o2 ! use -1 if no third TSB
307
308 ldx [%g2 + SCD_SFMMUP], %g3 ! %g3 = scdp->scd_sfmmup
309 ldx [%g3 + SFMMU_TSB], %o1 ! %o1 = first scd tsbinfo
310 brz,pn %o1, 9f
311 nop ! panic if no third TSB
312
313 /* make 3rd UTSBREG */
314 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = user tsbreg
315 3:
316 SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR, %o2)
317
318 brz,pt %g2, 4f
319 mov -1, %o2 ! use -1 if no 3rd or 4th TSB
320
321 brz,pt %o1, 4f
322 mov -1, %o2 ! use -1 if no 3rd or 4th TSB
323 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second scd tsbinfo
324 brz,pt %g2, 4f
325 mov -1, %o2 ! use -1 if no 4th TSB
326
327 /* make 4th UTSBREG */
328 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = user tsbreg
329 4:
330 SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR4M, %o2)
331
332 #ifdef DEBUG
333 /* check if hypervisor/hardware should handle user TSB */
334 sethi %hi(hv_use_non0_tsb), %o2
335 ld [%o2 + %lo(hv_use_non0_tsb)], %o2
336 brz,pn %o2, 6f
337 nop
338 #endif /* DEBUG */
339 CPU_ADDR(%o2, %o4) ! load CPU struct addr to %o2 using %o4
340 ldub [%o2 + CPU_TSTAT_FLAGS], %o1 ! load cpu_tstat_flag to %o1
341
342 mov %o0, %o3 ! preserve %o0
343 btst TSTAT_TLB_STATS, %o1
344 bnz,a,pn %icc, 5f ! ntsb = 0 if TLB stats enabled
345 clr %o0
346
347 ldx [%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_CNT], %o0
348 5:
349 ldx [%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_PA], %o1
350 mov MMU_TSB_CTXNON0, %o5
351 ta FAST_TRAP ! set TSB info for user process
352 brnz,a,pn %o0, panic_bad_hcall
353 mov MMU_TSB_CTXNON0, %o1
354 mov %o3, %o0 ! restore %o0
355 6:
356 ldx [%o0 + SFMMU_ISMBLKPA], %o1 ! copy members of sfmmu
357 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
358 stx %o1, [%o2 + TSBMISS_ISMBLKPA] ! sfmmu_tsb_miss into the
359 ldub [%o0 + SFMMU_TTEFLAGS], %o3 ! per-CPU tsbmiss area.
360 ldub [%o0 + SFMMU_RTTEFLAGS], %o4
361 ldx [%o0 + SFMMU_SRDP], %o1
362 stx %o0, [%o2 + TSBMISS_UHATID]
363 stub %o3, [%o2 + TSBMISS_UTTEFLAGS]
364 stub %o4, [%o2 + TSBMISS_URTTEFLAGS]
365 stx %o1, [%o2 + TSBMISS_SHARED_UHATID]
366 brz,pn %o1, 7f ! check for sfmmu_srdp
367 add %o0, SFMMU_HMERMAP, %o1
368 add %o2, TSBMISS_SHMERMAP, %o2
369 mov SFMMU_HMERGNMAP_WORDS, %o3
370 ! set tsbmiss shmermap
371 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
372
373 ldx [%o0 + SFMMU_SCDP], %o4 ! %o4 = sfmmu_scd
374 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
375 mov SFMMU_HMERGNMAP_WORDS, %o3
376 brnz,pt %o4, 8f ! check for sfmmu_scdp else
377 add %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
378 ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
379 7:
380 retl
381 nop
382 8: ! set tsbmiss scd_shmermap
383 add %o4, SCD_HMERMAP, %o1
384 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
385 retl
386 nop
387 9:
388 sethi %hi(panicstr), %g1 ! panic if no 3rd TSB
389 ldx [%g1 + %lo(panicstr)], %g1
390 tst %g1
391
392 bnz,pn %xcc, 7b
393 nop
394
395 sethi %hi(sfmmu_panic10), %o0
396 call panic
397 or %o0, %lo(sfmmu_panic10), %o0
398
399 SET_SIZE(sfmmu_load_mmustate)
400
401 ENTRY(prefetch_tsbe_read)
402 retl
403 nop
404 SET_SIZE(prefetch_tsbe_read)
405
406 ENTRY(prefetch_tsbe_write)
407 retl
408 nop
409 SET_SIZE(prefetch_tsbe_write)
--- EOF ---