Print this page
XXX AVX procfs
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/ia32/syscall/getcontext.c
+++ new/usr/src/uts/intel/ia32/syscall/getcontext.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 #include <sys/param.h>
31 31 #include <sys/types.h>
32 32 #include <sys/vmparam.h>
33 33 #include <sys/systm.h>
34 34 #include <sys/signal.h>
35 35 #include <sys/stack.h>
36 36 #include <sys/regset.h>
37 37 #include <sys/privregs.h>
38 38 #include <sys/frame.h>
39 39 #include <sys/proc.h>
40 40 #include <sys/brand.h>
41 41 #include <sys/psw.h>
42 42 #include <sys/ucontext.h>
43 43 #include <sys/asm_linkage.h>
44 44 #include <sys/errno.h>
45 45 #include <sys/archsystm.h>
46 46 #include <sys/schedctl.h>
47 47 #include <sys/debug.h>
48 48 #include <sys/sysmacros.h>
49 49
50 50 /*
51 51 * Save user context.
52 52 */
53 53 void
54 54 savecontext(ucontext_t *ucp, const k_sigset_t *mask)
55 55 {
56 56 proc_t *p = ttoproc(curthread);
57 57 klwp_t *lwp = ttolwp(curthread);
58 58 struct regs *rp = lwptoregs(lwp);
59 59
60 60 /*
61 61 * We unconditionally assign to every field through the end
62 62 * of the gregs, but we need to bzero() everything -after- that
63 63 * to avoid having any kernel stack garbage escape to userland.
64 64 */
65 65 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
66 66 offsetof(ucontext_t, uc_mcontext.fpregs));
67 67
68 68 ucp->uc_flags = UC_ALL;
69 69 ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
70 70
71 71 /*
72 72 * Try to copyin() the ustack if one is registered. If the stack
73 73 * has zero size, this indicates that stack bounds checking has
74 74 * been disabled for this LWP. If stack bounds checking is disabled
75 75 * or the copyin() fails, we fall back to the legacy behavior.
76 76 */
77 77 if (lwp->lwp_ustack == NULL ||
78 78 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
79 79 sizeof (ucp->uc_stack)) != 0 ||
80 80 ucp->uc_stack.ss_size == 0) {
81 81
82 82 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
83 83 ucp->uc_stack = lwp->lwp_sigaltstack;
84 84 } else {
85 85 ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
86 86 ucp->uc_stack.ss_size = p->p_stksize;
87 87 ucp->uc_stack.ss_flags = 0;
88 88 }
89 89 }
90 90
91 91 /*
92 92 * If either the trace flag or REQUEST_STEP is set,
93 93 * arrange for single-stepping and turn off the trace flag.
94 94 */
95 95 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
96 96 /*
97 97 * Clear PS_T so that saved user context won't have trace
98 98 * flag set.
99 99 */
100 100 rp->r_ps &= ~PS_T;
101 101
102 102 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
103 103 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
104 104 /*
105 105 * trap() always checks DEBUG_PENDING before
106 106 * checking for any pending signal. This at times
107 107 * can potentially lead to DEBUG_PENDING not being
108 108 * honoured. (for eg: the lwp is stopped by
109 109 * stop_on_fault() called from trap(), after being
110 110 * awakened it might see a pending signal and call
111 111 * savecontext(), however on the way back to userland
112 112 * there is no place it can be detected). Hence in
113 113 * anticipation of such occassions, set AST flag for
114 114 * the thread which will make the thread take an
115 115 * excursion through trap() where it will be handled
116 116 * appropriately.
117 117 */
118 118 aston(curthread);
119 119 }
120 120 }
121 121
122 122 getgregs(lwp, ucp->uc_mcontext.gregs);
123 123 if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
124 124 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
125 125 else
126 126 ucp->uc_flags &= ~UC_FPU;
127 127
128 128 sigktou(mask, &ucp->uc_sigmask);
129 129 }
130 130
131 131 /*
132 132 * Restore user context.
133 133 */
134 134 void
135 135 restorecontext(ucontext_t *ucp)
136 136 {
137 137 kthread_t *t = curthread;
138 138 klwp_t *lwp = ttolwp(t);
139 139
140 140 lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
141 141
142 142 if (ucp->uc_flags & UC_STACK) {
143 143 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
144 144 lwp->lwp_sigaltstack = ucp->uc_stack;
145 145 else
146 146 lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
147 147 }
148 148
149 149 if (ucp->uc_flags & UC_CPU) {
150 150 /*
151 151 * If the trace flag is set, mark the lwp to take a
152 152 * single-step trap on return to user level (below).
153 153 * The x86 lcall interface and sysenter has already done this,
154 154 * and turned off the flag, but amd64 syscall interface has not.
155 155 */
156 156 if (lwptoregs(lwp)->r_ps & PS_T)
157 157 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
158 158 setgregs(lwp, ucp->uc_mcontext.gregs);
159 159 lwp->lwp_eosys = JUSTRETURN;
160 160 t->t_post_sys = 1;
161 161 aston(curthread);
162 162 }
163 163
164 164 if (ucp->uc_flags & UC_FPU)
165 165 setfpregs(lwp, &ucp->uc_mcontext.fpregs);
166 166
167 167 if (ucp->uc_flags & UC_SIGMASK) {
168 168 /*
169 169 * We don't need to acquire p->p_lock here;
170 170 * we are manipulating thread-private data.
171 171 */
172 172 schedctl_finish_sigblock(t);
173 173 sigutok(&ucp->uc_sigmask, &t->t_hold);
174 174 if (sigcheck(ttoproc(t), t))
175 175 t->t_sig_check = 1;
176 176 }
↓ open down ↓ |
176 lines elided |
↑ open up ↑ |
177 177 }
178 178
179 179
180 180 int
181 181 getsetcontext(int flag, void *arg)
182 182 {
183 183 ucontext_t uc;
184 184 ucontext_t *ucp;
185 185 klwp_t *lwp = ttolwp(curthread);
186 186 stack_t dummy_stk;
187 + caddr_t xregs = NULL;
188 + int xregs_size = 0;
187 189
188 190 /*
189 191 * In future releases, when the ucontext structure grows,
190 192 * getcontext should be modified to only return the fields
191 193 * specified in the uc_flags. That way, the structure can grow
192 194 * and still be binary compatible will all .o's which will only
193 195 * have old fields defined in uc_flags
194 196 */
195 197
196 198 switch (flag) {
197 199 default:
198 200 return (set_errno(EINVAL));
199 201
200 202 case GETCONTEXT:
201 203 schedctl_finish_sigblock(curthread);
202 204 savecontext(&uc, &curthread->t_hold);
203 205 if (uc.uc_flags & UC_SIGMASK)
204 206 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
205 207 if (copyout(&uc, arg, sizeof (uc)))
206 208 return (set_errno(EFAULT));
207 209 return (0);
208 210
209 211 case SETCONTEXT:
210 212 ucp = arg;
211 213 if (ucp == NULL)
212 214 exit(CLD_EXITED, 0);
213 215 /*
214 216 * Don't copyin filler or floating state unless we need it.
215 217 * The ucontext_t struct and fields are specified in the ABI.
216 218 */
217 219 if (copyin(ucp, &uc, sizeof (ucontext_t) -
218 220 sizeof (uc.uc_filler) -
219 221 sizeof (uc.uc_mcontext.fpregs))) {
220 222 return (set_errno(EFAULT));
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
221 223 }
222 224 if (uc.uc_flags & UC_SIGMASK)
223 225 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
224 226
225 227 if ((uc.uc_flags & UC_FPU) &&
226 228 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
227 229 sizeof (uc.uc_mcontext.fpregs))) {
228 230 return (set_errno(EFAULT));
229 231 }
230 232
233 + /*
234 + * Get extra register state.
235 + */
236 + xregs_clrptr(lwp, &uc);
237 +
231 238 restorecontext(&uc);
232 239
233 240 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
234 241 (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
235 242 sizeof (uc.uc_stack));
243 +
244 + /*
245 + * Free extra register state.
246 + */
247 + if (xregs_size)
248 + kmem_free(xregs, xregs_size);
249 +
236 250 return (0);
237 251
238 252 case GETUSTACK:
239 253 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
240 254 return (set_errno(EFAULT));
241 255 return (0);
242 256
243 257 case SETUSTACK:
244 258 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
245 259 return (set_errno(EFAULT));
246 260 lwp->lwp_ustack = (uintptr_t)arg;
247 261 return (0);
248 262 }
249 263 }
250 264
251 265 #ifdef _SYSCALL32_IMPL
252 266
253 267 /*
254 268 * Save user context for 32-bit processes.
255 269 */
256 270 void
257 271 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask)
258 272 {
259 273 proc_t *p = ttoproc(curthread);
260 274 klwp_t *lwp = ttolwp(curthread);
261 275 struct regs *rp = lwptoregs(lwp);
262 276
263 277 bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
264 278 offsetof(ucontext32_t, uc_mcontext.fpregs));
265 279
266 280 ucp->uc_flags = UC_ALL;
267 281 ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
268 282
269 283 if (lwp->lwp_ustack == NULL ||
270 284 copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
271 285 sizeof (ucp->uc_stack)) != 0 ||
272 286 ucp->uc_stack.ss_size == 0) {
273 287
274 288 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
275 289 ucp->uc_stack.ss_sp =
276 290 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
277 291 ucp->uc_stack.ss_size =
278 292 (size32_t)lwp->lwp_sigaltstack.ss_size;
279 293 ucp->uc_stack.ss_flags = SS_ONSTACK;
280 294 } else {
281 295 ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
282 296 (p->p_usrstack - p->p_stksize);
283 297 ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
284 298 ucp->uc_stack.ss_flags = 0;
285 299 }
286 300 }
287 301
288 302 /*
289 303 * If either the trace flag or REQUEST_STEP is set, arrange
290 304 * for single-stepping and turn off the trace flag.
291 305 */
292 306 if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
293 307 /*
294 308 * Clear PS_T so that saved user context won't have trace
295 309 * flag set.
296 310 */
297 311 rp->r_ps &= ~PS_T;
298 312
299 313 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
300 314 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
301 315 /*
302 316 * See comments in savecontext().
303 317 */
304 318 aston(curthread);
305 319 }
306 320 }
307 321
308 322 getgregs32(lwp, ucp->uc_mcontext.gregs);
309 323 if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
310 324 getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
311 325 else
312 326 ucp->uc_flags &= ~UC_FPU;
313 327
314 328 sigktou(mask, &ucp->uc_sigmask);
315 329 }
316 330
317 331 int
318 332 getsetcontext32(int flag, void *arg)
319 333 {
320 334 ucontext32_t uc;
321 335 ucontext_t ucnat;
322 336 ucontext32_t *ucp;
323 337 klwp_t *lwp = ttolwp(curthread);
324 338 caddr32_t ustack32;
325 339 stack32_t dummy_stk32;
326 340
327 341 switch (flag) {
328 342 default:
329 343 return (set_errno(EINVAL));
330 344
331 345 case GETCONTEXT:
332 346 schedctl_finish_sigblock(curthread);
333 347 savecontext32(&uc, &curthread->t_hold);
334 348 if (uc.uc_flags & UC_SIGMASK)
335 349 SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
336 350 if (copyout(&uc, arg, sizeof (uc)))
337 351 return (set_errno(EFAULT));
338 352 return (0);
339 353
340 354 case SETCONTEXT:
341 355 ucp = arg;
342 356 if (ucp == NULL)
343 357 exit(CLD_EXITED, 0);
344 358 if (copyin(ucp, &uc, sizeof (uc) -
345 359 sizeof (uc.uc_filler) -
346 360 sizeof (uc.uc_mcontext.fpregs))) {
347 361 return (set_errno(EFAULT));
348 362 }
349 363 if (uc.uc_flags & UC_SIGMASK)
350 364 SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
351 365 if ((uc.uc_flags & UC_FPU) &&
352 366 copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
353 367 sizeof (uc.uc_mcontext.fpregs))) {
354 368 return (set_errno(EFAULT));
355 369 }
356 370
357 371 ucontext_32ton(&uc, &ucnat);
358 372 restorecontext(&ucnat);
359 373
360 374 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
361 375 (void) copyout(&uc.uc_stack,
362 376 (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
363 377 return (0);
364 378
365 379 case GETUSTACK:
366 380 ustack32 = (caddr32_t)lwp->lwp_ustack;
367 381 if (copyout(&ustack32, arg, sizeof (ustack32)))
368 382 return (set_errno(EFAULT));
369 383 return (0);
370 384
371 385 case SETUSTACK:
372 386 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
373 387 return (set_errno(EFAULT));
374 388 lwp->lwp_ustack = (uintptr_t)arg;
375 389 return (0);
376 390 }
377 391 }
378 392
379 393 #endif /* _SYSCALL32_IMPL */
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX