1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  28 /*        All Rights Reserved   */
  29 
  30 #include <sys/param.h>
  31 #include <sys/types.h>
  32 #include <sys/vmparam.h>
  33 #include <sys/systm.h>
  34 #include <sys/signal.h>
  35 #include <sys/stack.h>
  36 #include <sys/regset.h>
  37 #include <sys/privregs.h>
  38 #include <sys/frame.h>
  39 #include <sys/proc.h>
  40 #include <sys/brand.h>
  41 #include <sys/psw.h>
  42 #include <sys/ucontext.h>
  43 #include <sys/asm_linkage.h>
  44 #include <sys/errno.h>
  45 #include <sys/archsystm.h>
  46 #include <sys/schedctl.h>
  47 #include <sys/debug.h>
  48 #include <sys/sysmacros.h>
  49 
  50 /*
  51  * Save user context.
  52  */
  53 void
  54 savecontext(ucontext_t *ucp, const k_sigset_t *mask)
  55 {
  56         proc_t *p = ttoproc(curthread);
  57         klwp_t *lwp = ttolwp(curthread);
  58         struct regs *rp = lwptoregs(lwp);
  59 
  60         /*
  61          * We unconditionally assign to every field through the end
  62          * of the gregs, but we need to bzero() everything -after- that
  63          * to avoid having any kernel stack garbage escape to userland.
  64          */
  65         bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
  66             offsetof(ucontext_t, uc_mcontext.fpregs));
  67 
  68         ucp->uc_flags = UC_ALL;
  69         ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
  70 
  71         /*
  72          * Try to copyin() the ustack if one is registered. If the stack
  73          * has zero size, this indicates that stack bounds checking has
  74          * been disabled for this LWP. If stack bounds checking is disabled
  75          * or the copyin() fails, we fall back to the legacy behavior.
  76          */
  77         if (lwp->lwp_ustack == NULL ||
  78             copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
  79             sizeof (ucp->uc_stack)) != 0 ||
  80             ucp->uc_stack.ss_size == 0) {
  81 
  82                 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
  83                         ucp->uc_stack = lwp->lwp_sigaltstack;
  84                 } else {
  85                         ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
  86                         ucp->uc_stack.ss_size = p->p_stksize;
  87                         ucp->uc_stack.ss_flags = 0;
  88                 }
  89         }
  90 
  91         /*
  92          * If either the trace flag or REQUEST_STEP is set,
  93          * arrange for single-stepping and turn off the trace flag.
  94          */
  95         if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
  96                 /*
  97                  * Clear PS_T so that saved user context won't have trace
  98                  * flag set.
  99                  */
 100                 rp->r_ps &= ~PS_T;
 101 
 102                 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
 103                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 104                         /*
 105                          * trap() always checks DEBUG_PENDING before
 106                          * checking for any pending signal. This at times
 107                          * can potentially lead to DEBUG_PENDING not being
 108                          * honoured. (for eg: the lwp is stopped by
 109                          * stop_on_fault() called from trap(), after being
 110                          * awakened it might see a pending signal and call
 111                          * savecontext(), however on the way back to userland
 112                          * there is no place it can be detected). Hence in
 113                          * anticipation of such occassions, set AST flag for
 114                          * the thread which will make the thread take an
 115                          * excursion through trap() where it will be handled
 116                          * appropriately.
 117                          */
 118                         aston(curthread);
 119                 }
 120         }
 121 
 122         getgregs(lwp, ucp->uc_mcontext.gregs);
 123         if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
 124                 getfpregs(lwp, &ucp->uc_mcontext.fpregs);
 125         else
 126                 ucp->uc_flags &= ~UC_FPU;
 127 
 128         sigktou(mask, &ucp->uc_sigmask);
 129 }
 130 
 131 /*
 132  * Restore user context.
 133  */
 134 void
 135 restorecontext(ucontext_t *ucp)
 136 {
 137         kthread_t *t = curthread;
 138         klwp_t *lwp = ttolwp(t);
 139 
 140         lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
 141 
 142         if (ucp->uc_flags & UC_STACK) {
 143                 if (ucp->uc_stack.ss_flags == SS_ONSTACK)
 144                         lwp->lwp_sigaltstack = ucp->uc_stack;
 145                 else
 146                         lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
 147         }
 148 
 149         if (ucp->uc_flags & UC_CPU) {
 150                 /*
 151                  * If the trace flag is set, mark the lwp to take a
 152                  * single-step trap on return to user level (below).
 153                  * The x86 lcall interface and sysenter has already done this,
 154                  * and turned off the flag, but amd64 syscall interface has not.
 155                  */
 156                 if (lwptoregs(lwp)->r_ps & PS_T)
 157                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 158                 setgregs(lwp, ucp->uc_mcontext.gregs);
 159                 lwp->lwp_eosys = JUSTRETURN;
 160                 t->t_post_sys = 1;
 161                 aston(curthread);
 162         }
 163 
 164         if (ucp->uc_flags & UC_FPU)
 165                 setfpregs(lwp, &ucp->uc_mcontext.fpregs);
 166 
 167         if (ucp->uc_flags & UC_SIGMASK) {
 168                 /*
 169                  * We don't need to acquire p->p_lock here;
 170                  * we are manipulating thread-private data.
 171                  */
 172                 schedctl_finish_sigblock(t);
 173                 sigutok(&ucp->uc_sigmask, &t->t_hold);
 174                 if (sigcheck(ttoproc(t), t))
 175                         t->t_sig_check = 1;
 176         }
 177 }
 178 
 179 
 180 int
 181 getsetcontext(int flag, void *arg)
 182 {
 183         ucontext_t uc;
 184         ucontext_t *ucp;
 185         klwp_t *lwp = ttolwp(curthread);
 186         stack_t dummy_stk;
 187         caddr_t xregs = NULL;
 188         int xregs_size = 0;
 189 
 190         /*
 191          * In future releases, when the ucontext structure grows,
 192          * getcontext should be modified to only return the fields
 193          * specified in the uc_flags.  That way, the structure can grow
 194          * and still be binary compatible will all .o's which will only
 195          * have old fields defined in uc_flags
 196          */
 197 
 198         switch (flag) {
 199         default:
 200                 return (set_errno(EINVAL));
 201 
 202         case GETCONTEXT:
 203                 schedctl_finish_sigblock(curthread);
 204                 savecontext(&uc, &curthread->t_hold);
 205                 if (uc.uc_flags & UC_SIGMASK)
 206                         SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 207                 if (copyout(&uc, arg, sizeof (uc)))
 208                         return (set_errno(EFAULT));
 209                 return (0);
 210 
 211         case SETCONTEXT:
 212                 ucp = arg;
 213                 if (ucp == NULL)
 214                         exit(CLD_EXITED, 0);
 215                 /*
 216                  * Don't copyin filler or floating state unless we need it.
 217                  * The ucontext_t struct and fields are specified in the ABI.
 218                  */
 219                 if (copyin(ucp, &uc, sizeof (ucontext_t) -
 220                     sizeof (uc.uc_filler) -
 221                     sizeof (uc.uc_mcontext.fpregs))) {
 222                         return (set_errno(EFAULT));
 223                 }
 224                 if (uc.uc_flags & UC_SIGMASK)
 225                         SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
 226 
 227                 if ((uc.uc_flags & UC_FPU) &&
 228                     copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
 229                     sizeof (uc.uc_mcontext.fpregs))) {
 230                         return (set_errno(EFAULT));
 231                 }
 232 
 233                 /*
 234                  * Get extra register state.
 235                  */
 236                 xregs_clrptr(lwp, &uc);
 237 
 238                 restorecontext(&uc);
 239 
 240                 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
 241                         (void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
 242                             sizeof (uc.uc_stack));
 243 
 244                 /*
 245                  * Free extra register state.
 246                  */
 247                 if (xregs_size)
 248                         kmem_free(xregs, xregs_size);
 249 
 250                 return (0);
 251 
 252         case GETUSTACK:
 253                 if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
 254                         return (set_errno(EFAULT));
 255                 return (0);
 256 
 257         case SETUSTACK:
 258                 if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
 259                         return (set_errno(EFAULT));
 260                 lwp->lwp_ustack = (uintptr_t)arg;
 261                 return (0);
 262         }
 263 }
 264 
 265 #ifdef _SYSCALL32_IMPL
 266 
 267 /*
 268  * Save user context for 32-bit processes.
 269  */
 270 void
 271 savecontext32(ucontext32_t *ucp, const k_sigset_t *mask)
 272 {
 273         proc_t *p = ttoproc(curthread);
 274         klwp_t *lwp = ttolwp(curthread);
 275         struct regs *rp = lwptoregs(lwp);
 276 
 277         bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
 278             offsetof(ucontext32_t, uc_mcontext.fpregs));
 279 
 280         ucp->uc_flags = UC_ALL;
 281         ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
 282 
 283         if (lwp->lwp_ustack == NULL ||
 284             copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
 285             sizeof (ucp->uc_stack)) != 0 ||
 286             ucp->uc_stack.ss_size == 0) {
 287 
 288                 if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
 289                         ucp->uc_stack.ss_sp =
 290                             (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
 291                         ucp->uc_stack.ss_size =
 292                             (size32_t)lwp->lwp_sigaltstack.ss_size;
 293                         ucp->uc_stack.ss_flags = SS_ONSTACK;
 294                 } else {
 295                         ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
 296                             (p->p_usrstack - p->p_stksize);
 297                         ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
 298                         ucp->uc_stack.ss_flags = 0;
 299                 }
 300         }
 301 
 302         /*
 303          * If either the trace flag or REQUEST_STEP is set, arrange
 304          * for single-stepping and turn off the trace flag.
 305          */
 306         if ((rp->r_ps & PS_T) || (lwp->lwp_pcb.pcb_flags & REQUEST_STEP)) {
 307                 /*
 308                  * Clear PS_T so that saved user context won't have trace
 309                  * flag set.
 310                  */
 311                 rp->r_ps &= ~PS_T;
 312 
 313                 if (!(lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP)) {
 314                         lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
 315                         /*
 316                          * See comments in savecontext().
 317                          */
 318                         aston(curthread);
 319                 }
 320         }
 321 
 322         getgregs32(lwp, ucp->uc_mcontext.gregs);
 323         if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
 324                 getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
 325         else
 326                 ucp->uc_flags &= ~UC_FPU;
 327 
 328         sigktou(mask, &ucp->uc_sigmask);
 329 }
 330 
 331 int
 332 getsetcontext32(int flag, void *arg)
 333 {
 334         ucontext32_t uc;
 335         ucontext_t ucnat;
 336         ucontext32_t *ucp;
 337         klwp_t *lwp = ttolwp(curthread);
 338         caddr32_t ustack32;
 339         stack32_t dummy_stk32;
 340 
 341         switch (flag) {
 342         default:
 343                 return (set_errno(EINVAL));
 344 
 345         case GETCONTEXT:
 346                 schedctl_finish_sigblock(curthread);
 347                 savecontext32(&uc, &curthread->t_hold);
 348                 if (uc.uc_flags & UC_SIGMASK)
 349                         SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
 350                 if (copyout(&uc, arg, sizeof (uc)))
 351                         return (set_errno(EFAULT));
 352                 return (0);
 353 
 354         case SETCONTEXT:
 355                 ucp = arg;
 356                 if (ucp == NULL)
 357                         exit(CLD_EXITED, 0);
 358                 if (copyin(ucp, &uc, sizeof (uc) -
 359                     sizeof (uc.uc_filler) -
 360                     sizeof (uc.uc_mcontext.fpregs))) {
 361                         return (set_errno(EFAULT));
 362                 }
 363                 if (uc.uc_flags & UC_SIGMASK)
 364                         SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
 365                 if ((uc.uc_flags & UC_FPU) &&
 366                     copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
 367                     sizeof (uc.uc_mcontext.fpregs))) {
 368                         return (set_errno(EFAULT));
 369                 }
 370 
 371                 ucontext_32ton(&uc, &ucnat);
 372                 restorecontext(&ucnat);
 373 
 374                 if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
 375                         (void) copyout(&uc.uc_stack,
 376                             (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
 377                 return (0);
 378 
 379         case GETUSTACK:
 380                 ustack32 = (caddr32_t)lwp->lwp_ustack;
 381                 if (copyout(&ustack32, arg, sizeof (ustack32)))
 382                         return (set_errno(EFAULT));
 383                 return (0);
 384 
 385         case SETUSTACK:
 386                 if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
 387                         return (set_errno(EFAULT));
 388                 lwp->lwp_ustack = (uintptr_t)arg;
 389                 return (0);
 390         }
 391 }
 392 
 393 #endif  /* _SYSCALL32_IMPL */