Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/os/intr.c
          +++ new/usr/src/uts/i86pc/os/intr.c
↓ open down ↓ 13 lines elided ↑ open up ↑
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  24      - * Copyright (c) 2012, Joyent, Inc.  All rights reserverd.
       24 + * Copyright (c) 2018 Joyent, Inc.  All rights reserverd.
  25   25   */
  26   26  
  27   27  /*
  28   28   * To understand the present state of interrupt handling on i86pc, we must
  29   29   * first consider the history of interrupt controllers and our way of handling
  30   30   * interrupts.
  31   31   *
  32   32   * History of Interrupt Controllers on i86pc
  33   33   * -----------------------------------------
  34   34   *
↓ open down ↓ 429 lines elided ↑ open up ↑
 464  464  #include <sys/archsystm.h>
 465  465  #include <sys/machsystm.h>
 466  466  #include <sys/ontrap.h>
 467  467  #include <sys/x86_archext.h>
 468  468  #include <sys/promif.h>
 469  469  #include <vm/hat_i86.h>
 470  470  #if defined(__xpv)
 471  471  #include <sys/hypervisor.h>
 472  472  #endif
 473  473  
      474 +#if defined(__amd64) && !defined(__xpv)
      475 +/* If this fails, then the padding numbers in machcpuvar.h are wrong. */
      476 +CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_pad))
      477 +    < MMU_PAGESIZE);
      478 +CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_kpti))
      479 +    >= MMU_PAGESIZE);
      480 +CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_kpti_dbg))
      481 +    < 2 * MMU_PAGESIZE);
      482 +CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_pad2))
      483 +    < 2 * MMU_PAGESIZE);
      484 +CTASSERT(((sizeof (struct kpti_frame)) & 0xF) == 0);
      485 +CTASSERT(((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_kpti_dbg))
      486 +    & 0xF) == 0);
      487 +CTASSERT((offsetof(struct kpti_frame, kf_tr_rsp) & 0xF) == 0);
      488 +#endif
 474  489  
 475  490  #if defined(__xpv) && defined(DEBUG)
 476  491  
 477  492  /*
 478  493   * This panic message is intended as an aid to interrupt debugging.
 479  494   *
 480  495   * The associated assertion tests the condition of enabling
 481  496   * events when events are already enabled.  The implication
 482  497   * being that whatever code the programmer thought was
 483  498   * protected by having events disabled until the second
↓ open down ↓ 982 lines elided ↑ open up ↑
1466 1481                          psig();
1467 1482                          tp->t_sig_check = 1;
1468 1483                          cli();
1469 1484                  }
1470 1485                  tp->t_lwp->lwp_pcb.pcb_rupdate = 0;
1471 1486  
1472 1487  #endif  /* __amd64 */
1473 1488                  return (1);
1474 1489          }
1475 1490  
     1491 +#if !defined(__xpv)
1476 1492          /*
     1493 +         * Assert that we're not trying to return into the syscall return
     1494 +         * trampolines. Things will go baaaaad if we try to do that.
     1495 +         *
     1496 +         * Note that none of these run with interrupts on, so this should
     1497 +         * never happen (even in the sysexit case the STI doesn't take effect
     1498 +         * until after sysexit finishes).
     1499 +         */
     1500 +        extern void tr_sysc_ret_start();
     1501 +        extern void tr_sysc_ret_end();
     1502 +        ASSERT(!(rp->r_pc >= (uintptr_t)tr_sysc_ret_start &&
     1503 +            rp->r_pc <= (uintptr_t)tr_sysc_ret_end));
     1504 +#endif
     1505 +
     1506 +        /*
1477 1507           * Here if we are returning to supervisor mode.
1478 1508           * Check for a kernel preemption request.
1479 1509           */
1480 1510          if (CPU->cpu_kprunrun && (rp->r_ps & PS_IE)) {
1481 1511  
1482 1512                  /*
1483 1513                   * Do nothing if already in kpreempt
1484 1514                   */
1485 1515                  if (!tp->t_preempt_lk) {
1486 1516                          tp->t_preempt_lk = 1;
↓ open down ↓ 138 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX