1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #if defined(lint)
27 #include <sys/types.h>
28 #include <sys/cpuvar.h>
29 #else /*lint */
30 #include "assym.h"
31 #endif /* lint */
32
33 #include <sys/asm_linkage.h>
34 #include <sys/privregs.h>
35 #include <sys/x_call.h>
36 #include <sys/xc_impl.h>
37 #include <sys/machthread.h>
38 #include <sys/hypervisor_api.h>
39
40 #ifdef TRAPTRACE
41 #include <sys/traptrace.h>
42 #endif /* TRAPTRACE */
43
44
45 #if defined(lint)
46
47 /* ARGSUSED */
48 void
49 self_xcall(struct cpu *cpu, uint64_t arg1, uint64_t arg2, xcfunc_t *func)
50 {}
51
52 #else
53
54 /*
55 * Entered by the software trap (TT=ST_SELFXCALL, TL>0) thru send_self_xcall().
56 * Emulate the mondo handler - vec_interrupt().
57 *
58 * Global registers are the Alternate Globals.
59 * Arguments:
60 * %o0 - CPU
61 * ILP32 kernel:
62 * %o5 - function to call
63 * %o1, %o2, %o3, %o4 - arguments
64 * LP64 kernel:
65 * %o3 - function to call
66 * %o1, %o2 - arguments
67 */
68 ENTRY_NP(self_xcall)
69 !
70 ! TL>0 handlers are expected to do "retry"
71 ! prepare their return PC and nPC now
72 !
73 rdpr %tnpc, %g1
74 wrpr %g1, %tpc ! PC <- TNPC[TL]
75 add %g1, 4, %g1
76 wrpr %g1, %tnpc ! nPC <- TNPC[TL] + 4
77
78 #ifdef TRAPTRACE
79 TRACE_PTR(%g4, %g6)
80 GET_TRACE_TICK(%g6, %g3)
81 stxa %g6, [%g4 + TRAP_ENT_TICK]%asi
82 rdpr %tl, %g6
83 stha %g6, [%g4 + TRAP_ENT_TL]%asi
84 rdpr %tt, %g6
85 stha %g6, [%g4 + TRAP_ENT_TT]%asi
86 stna %o3, [%g4 + TRAP_ENT_TR]%asi ! pc of the TL>0 handler
87 rdpr %tpc, %g6
88 stna %g6, [%g4 + TRAP_ENT_TPC]%asi
89 rdpr %tstate, %g6
90 stxa %g6, [%g4 + TRAP_ENT_TSTATE]%asi
91 stna %sp, [%g4 + TRAP_ENT_SP]%asi
92 stna %o1, [%g4 + TRAP_ENT_F1]%asi ! arg 1
93 stna %o2, [%g4 + TRAP_ENT_F2]%asi ! arg 2
94 stna %g0, [%g4 + TRAP_ENT_F3]%asi
95 stna %g0, [%g4 + TRAP_ENT_F4]%asi
96 TRACE_NEXT(%g4, %g6, %g3)
97 #endif /* TRAPTRACE */
98 !
99 ! Load the arguments for the fast trap handler.
100 !
101 mov %o1, %g1
102 jmp %o3 ! call the fast trap handler
103 mov %o2, %g2
104 /* Not Reached */
105 SET_SIZE(self_xcall)
106
107 #endif /* lint */
108
109 #ifdef TRAPTRACE
110 #if defined(lint)
111
112 /* ARGSUSED */
113 void
114 xc_trace(u_int traptype, cpuset_t *cpu_set, xcfunc_t *func,
115 uint64_t arg1, uint64_t arg2)
116 {}
117
118 #else /* lint */
119 ENTRY(xc_trace)
120 rdpr %pstate, %g1
121 andn %g1, PSTATE_IE | PSTATE_AM, %g2
122 wrpr %g0, %g2, %pstate /* disable interrupts */
123 TRACE_PTR(%g3, %g4)
124 GET_TRACE_TICK(%g6, %g4)
125 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi
126 stha %g0, [%g3 + TRAP_ENT_TL]%asi
127 set TT_XCALL, %g2
128 or %o0, %g2, %g4
129 stha %g4, [%g3 + TRAP_ENT_TT]%asi
130 stna %o7, [%g3 + TRAP_ENT_TPC]%asi
131 ldn [%o1], %g2
132 stna %g2, [%g3 + TRAP_ENT_SP]%asi /* sp = cpuset */
133 stna %o2, [%g3 + TRAP_ENT_TR]%asi /* tr = func */
134 stna %o3, [%g3 + TRAP_ENT_F1]%asi /* f1 = arg1 */
135 stna %o4, [%g3 + TRAP_ENT_F2]%asi /* f2 = arg2 */
136 stna %g0, [%g3 + TRAP_ENT_F3]%asi /* f3 = 0 */
137 stna %i7, [%g3 + TRAP_ENT_F4]%asi /* f4 = xcall caller */
138 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* tstate = pstate */
139 TRACE_NEXT(%g2, %g3, %g4)
140 /*
141 * In the case of a cpuset of greater size than a long we
142 * grab extra trace buffers just to store the cpuset.
143 * Seems like a waste but popular opinion opted for this
144 * rather than increase the size of the buffer.
145 */
146 #if CPUSET_SIZE > CLONGSIZE
147 add %o1, CPUSET_SIZE, %g5 /* end of cpuset */
148 clr %o2
149 1:
150 TRACE_PTR(%g3, %g4)
151 stha %g0, [%g3 + TRAP_ENT_TL]%asi
152 set TT_XCALL_CONT, %g2
153 or %g2, %o2, %g2 /* continuation # */
154 stha %g2, [%g3 + TRAP_ENT_TT]%asi
155 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi /* same tick */
156 stna %g0, [%g3 + TRAP_ENT_TPC]%asi /* clr unused fields */
157 stna %g0, [%g3 + TRAP_ENT_SP]%asi
158 stna %g0, [%g3 + TRAP_ENT_TR]%asi
159 stxa %g0, [%g3 + TRAP_ENT_TSTATE]%asi
160 stna %g0, [%g3 + TRAP_ENT_F2]%asi
161 stna %g0, [%g3 + TRAP_ENT_F3]%asi
162 stna %g0, [%g3 + TRAP_ENT_F4]%asi
163 ldn [%o1], %g2
164 stna %g2, [%g3 + TRAP_ENT_F1]%asi
165 add %o1, CLONGSIZE, %o1
166 cmp %o1, %g5
167 bge 2f
168 ldn [%o1], %g2
169 stna %g2, [%g3 + TRAP_ENT_F2]%asi
170 add %o1, CLONGSIZE, %o1
171 cmp %o1, %g5
172 bge 2f
173 ldn [%o1], %g2
174 stna %g2, [%g3 + TRAP_ENT_F3]%asi
175 add %o1, CLONGSIZE, %o1
176 cmp %o1, %g5
177 bge 2f
178 ldn [%o1], %g2
179 stna %g2, [%g3 + TRAP_ENT_F4]%asi
180 add %o1, CLONGSIZE, %o1
181 2:
182 TRACE_NEXT(%g2, %g3, %g4)
183 cmp %o1, %g5
184 bl 1b
185 inc %o2
186 #endif /* CPUSET_SIZE */
187 retl
188 wrpr %g0, %g1, %pstate /* enable interrupts */
189 SET_SIZE(xc_trace)
190
191 #endif /* lint */
192 #endif /* TRAPTRACE */
193
194 #if defined(lint)
195
196 /*ARGSUSED*/
197 void
198 init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
199 {}
200
201 /*ARGSUSED*/
202 int
203 shipit(int n, uint64_t cpuid)
204 { return(0); }
205
206 #else /* lint */
207 /*
208 * Setup interrupt dispatch data registers
209 * Entry:
210 * %o0 - function or inumber to call
211 * %o1, %o2 - arguments (2 uint64_t's)
212 */
213 ENTRY(init_mondo)
214 ALTENTRY(init_mondo_nocheck)
215 CPU_ADDR(%g1, %g4) ! load CPU struct addr
216 add %g1, CPU_MCPU, %g1
217 ldx [%g1 + MCPU_MONDO_DATA], %g1
218 stx %o0, [%g1]
219 stx %o1, [%g1+8]
220 stx %o2, [%g1+0x10]
221 stx %g0, [%g1+0x18]
222 stx %g0, [%g1+0x20]
223 stx %g0, [%g1+0x28]
224 stx %g0, [%g1+0x30]
225 stx %g0, [%g1+0x38]
226 retl
227 membar #Sync ! allowed to be in the delay slot
228 SET_SIZE(init_mondo)
229
230 /*
231 * Ship mondo to cpuid
232 */
233 ENTRY_NP(shipit)
234 /* For now use dummy interface: cpu# func arg1 arg2 */
235 CPU_ADDR(%g1, %g4)
236 add %g1, CPU_MCPU, %g1
237 ldx [%g1 + MCPU_MONDO_DATA_RA], %o2
238 mov HV_INTR_SEND, %o5
239 ta FAST_TRAP
240 retl
241 membar #Sync
242 SET_SIZE(shipit)
243
244 #endif /* lint */
245
246 #if defined(lint)
247
248 /*ARGSUSED*/
249 uint64_t
250 get_cpuaddr(uint64_t reg, uint64_t scr)
251 { return (0);}
252
253 #else /* lint */
254 /*
255 * Get cpu structure
256 * Entry:
257 * %o0 - register for CPU_ADDR macro
258 * %o1 - scratch for CPU_ADDR macro
259 */
260 ENTRY(get_cpuaddr)
261 CPU_ADDR(%o0, %o1) ! %o0 == CPU struct addr
262 retl
263 nop
264 SET_SIZE(get_cpuaddr)
265
266 #endif /* lint */
267
268 #if defined(lint)
269 /* ARGSUSED */
270 void
271 xt_sync_tl1(uint64_t *cpu_sync_addr)
272 {}
273
274 #else /* lint */
275 /*
276 * This is to ensure that previously called xtrap handlers have executed on
277 * sun4v. We zero out the byte corresponding to its cpuid in the
278 * array passed to us from xt_sync(), so the sender knows the previous
279 * mondo has been executed.
280 * Register:
281 * %g1 - Addr of the cpu_sync array.
282 */
283 ENTRY_NP(xt_sync_tl1)
284 CPU_INDEX(%g3, %g4) /* %g3 = cpu id */
285 stb %g0, [%g1 + %g3]
286 retry
287 SET_SIZE(xt_sync_tl1)
288
289 #endif /* lint */