1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  *
  25  * Copyright 2018 Joyent, Inc.
  26  */
  27 
  28 /*
  29  * Management of KMDB's IDT, which is installed upon KMDB activation.
  30  *
  31  * Debugger activation has two flavors, which cover the cases where KMDB is
  32  * loaded at boot, and when it is loaded after boot.  In brief, in both cases,
  33  * the KDI needs to interpose upon several handlers in the IDT.  When
  34  * mod-loaded KMDB is deactivated, we undo the IDT interposition, restoring the
  35  * handlers to what they were before we started.
  36  *
  37  * We also take over the entirety of IDT (except the double-fault handler) on
  38  * the active CPU when we're in kmdb so we can handle things like page faults
  39  * sensibly.
  40  *
  41  * Boot-loaded KMDB
  42  *
  43  * When we're first activated, we're running on boot's IDT.  We need to be able
  44  * to function in this world, so we'll install our handlers into boot's IDT.
  45  * This is a little complicated: we're using the fake cpu_t set up by
  46  * boot_kdi_tmpinit(), so we can't access cpu_idt directly.  Instead,
  47  * kdi_idt_write() notices that cpu_idt is NULL, and works around this problem.
  48  *
  49  * Later, when we're about to switch to the kernel's IDT, it'll call us via
  50  * kdi_idt_sync(), allowing us to add our handlers to the new IDT.  While
  51  * boot-loaded KMDB can't be unloaded, we still need to save the descriptors we
  52  * replace so we can pass traps back to the kernel as necessary.
  53  *
  54  * The last phase of boot-loaded KMDB activation occurs at non-boot CPU
  55  * startup.  We will be called on each non-boot CPU, thus allowing us to set up
  56  * any watchpoints that may have been configured on the boot CPU and interpose
  57  * on the given CPU's IDT.  We don't save the interposed descriptors in this
  58  * case -- see kdi_cpu_init() for details.
  59  *
  60  * Mod-loaded KMDB
  61  *
  62  * This style of activation is much simpler, as the CPUs are already running,
  63  * and are using their own copy of the kernel's IDT.  We simply interpose upon
  64  * each CPU's IDT.  We save the handlers we replace, both for deactivation and
  65  * for passing traps back to the kernel.  Note that for the hypervisors'
  66  * benefit, we need to xcall to the other CPUs to do this, since we need to
  67  * actively set the trap entries in its virtual IDT from that vcpu's context
  68  * rather than just modifying the IDT table from the CPU running kdi_activate().
  69  */
  70 
  71 #include <sys/types.h>
  72 #include <sys/segments.h>
  73 #include <sys/trap.h>
  74 #include <sys/cpuvar.h>
  75 #include <sys/reboot.h>
  76 #include <sys/sunddi.h>
  77 #include <sys/archsystm.h>
  78 #include <sys/kdi_impl.h>
  79 #include <sys/x_call.h>
  80 #include <ia32/sys/psw.h>
  81 #include <vm/hat_i86.h>
  82 
  83 #define KDI_GATE_NVECS  3
  84 
  85 #define KDI_IDT_NOSAVE  0
  86 #define KDI_IDT_SAVE    1
  87 
  88 #define KDI_IDT_DTYPE_KERNEL    0
  89 #define KDI_IDT_DTYPE_BOOT      1
  90 
  91 /* Solely to keep kdiregs_t in the CTF, otherwise unused. */
  92 kdiregs_t kdi_regs;
  93 
  94 kdi_cpusave_t *kdi_cpusave;
  95 int kdi_ncpusave;
  96 
  97 static kdi_main_t kdi_kmdb_main;
  98 
  99 kdi_drreg_t kdi_drreg;
 100 
 101 #ifndef __amd64
 102 /* Used to track the current set of valid kernel selectors. */
 103 uint32_t        kdi_cs;
 104 uint32_t        kdi_ds;
 105 uint32_t        kdi_fs;
 106 uint32_t        kdi_gs;
 107 #endif
 108 
 109 uintptr_t       kdi_kernel_handler;
 110 
 111 int             kdi_trap_switch;
 112 
 113 #define KDI_MEMRANGES_MAX       2
 114 
 115 kdi_memrange_t  kdi_memranges[KDI_MEMRANGES_MAX];
 116 int             kdi_nmemranges;
 117 
 118 typedef void idt_hdlr_f(void);
 119 
 120 extern idt_hdlr_f kdi_trap0, kdi_trap1, kdi_int2, kdi_trap3, kdi_trap4;
 121 extern idt_hdlr_f kdi_trap5, kdi_trap6, kdi_trap7, kdi_trap9;
 122 extern idt_hdlr_f kdi_traperr10, kdi_traperr11, kdi_traperr12;
 123 extern idt_hdlr_f kdi_traperr13, kdi_traperr14, kdi_trap16, kdi_traperr17;
 124 extern idt_hdlr_f kdi_trap18, kdi_trap19, kdi_trap20, kdi_ivct32;
 125 extern idt_hdlr_f kdi_invaltrap;
 126 extern size_t kdi_ivct_size;
 127 
 128 typedef struct kdi_gate_spec {
 129         uint_t kgs_vec;
 130         uint_t kgs_dpl;
 131 } kdi_gate_spec_t;
 132 
 133 /*
 134  * Beware: kdi_pass_to_kernel() has unpleasant knowledge of this list.
 135  */
 136 static const kdi_gate_spec_t kdi_gate_specs[KDI_GATE_NVECS] = {
 137         { T_SGLSTP, TRP_KPL },
 138         { T_BPTFLT, TRP_UPL },
 139         { T_DBGENTR, TRP_KPL }
 140 };
 141 
 142 static gate_desc_t kdi_kgates[KDI_GATE_NVECS];
 143 
 144 extern gate_desc_t kdi_idt[NIDT];
 145 
 146 struct idt_description {
 147         uint_t id_low;
 148         uint_t id_high;
 149         idt_hdlr_f *id_basehdlr;
 150         size_t *id_incrp;
 151 } idt_description[] = {
 152         { T_ZERODIV, 0,         kdi_trap0, NULL },
 153         { T_SGLSTP, 0,          kdi_trap1, NULL },
 154         { T_NMIFLT, 0,          kdi_int2, NULL },
 155         { T_BPTFLT, 0,          kdi_trap3, NULL },
 156         { T_OVFLW, 0,           kdi_trap4, NULL },
 157         { T_BOUNDFLT, 0,        kdi_trap5, NULL },
 158         { T_ILLINST, 0,         kdi_trap6, NULL },
 159         { T_NOEXTFLT, 0,        kdi_trap7, NULL },
 160 #if !defined(__xpv)
 161         { T_DBLFLT, 0,          syserrtrap, NULL },
 162 #endif
 163         { T_EXTOVRFLT, 0,       kdi_trap9, NULL },
 164         { T_TSSFLT, 0,          kdi_traperr10, NULL },
 165         { T_SEGFLT, 0,          kdi_traperr11, NULL },
 166         { T_STKFLT, 0,          kdi_traperr12, NULL },
 167         { T_GPFLT, 0,           kdi_traperr13, NULL },
 168         { T_PGFLT, 0,           kdi_traperr14, NULL },
 169         { 15, 0,                kdi_invaltrap, NULL },
 170         { T_EXTERRFLT, 0,       kdi_trap16, NULL },
 171         { T_ALIGNMENT, 0,       kdi_traperr17, NULL },
 172         { T_MCE, 0,             kdi_trap18, NULL },
 173         { T_SIMDFPE, 0,         kdi_trap19, NULL },
 174         { T_DBGENTR, 0,         kdi_trap20, NULL },
 175         { 21, 31,               kdi_invaltrap, NULL },
 176         { 32, 255,              kdi_ivct32, &kdi_ivct_size },
 177         { 0, 0, NULL },
 178 };
 179 
 180 void
 181 kdi_idt_init(selector_t sel)
 182 {
 183         struct idt_description *id;
 184         int i;
 185 
 186         for (id = idt_description; id->id_basehdlr != NULL; id++) {
 187                 uint_t high = id->id_high != 0 ? id->id_high : id->id_low;
 188                 size_t incr = id->id_incrp != NULL ? *id->id_incrp : 0;
 189 
 190 #if !defined(__xpv)
 191                 if (kpti_enable && sel == KCS_SEL && id->id_low == T_DBLFLT)
 192                         id->id_basehdlr = tr_syserrtrap;
 193 #endif
 194 
 195                 for (i = id->id_low; i <= high; i++) {
 196                         caddr_t hdlr = (caddr_t)id->id_basehdlr +
 197                             incr * (i - id->id_low);
 198                         set_gatesegd(&kdi_idt[i], (void (*)())hdlr, sel,
 199                             SDT_SYSIGT, TRP_KPL, IST_DBG);
 200                 }
 201         }
 202 }
 203 
 204 static void
 205 kdi_idt_gates_install(selector_t sel, int saveold)
 206 {
 207         gate_desc_t gates[KDI_GATE_NVECS];
 208         int i;
 209 
 210         bzero(gates, sizeof (*gates));
 211 
 212         for (i = 0; i < KDI_GATE_NVECS; i++) {
 213                 const kdi_gate_spec_t *gs = &kdi_gate_specs[i];
 214                 uintptr_t func = GATESEG_GETOFFSET(&kdi_idt[gs->kgs_vec]);
 215                 set_gatesegd(&gates[i], (void (*)())func, sel, SDT_SYSIGT,
 216                     gs->kgs_dpl, IST_DBG);
 217         }
 218 
 219         for (i = 0; i < KDI_GATE_NVECS; i++) {
 220                 uint_t vec = kdi_gate_specs[i].kgs_vec;
 221 
 222                 if (saveold)
 223                         kdi_kgates[i] = CPU->cpu_m.mcpu_idt[vec];
 224 
 225                 kdi_idt_write(&gates[i], vec);
 226         }
 227 }
 228 
 229 static void
 230 kdi_idt_gates_restore(void)
 231 {
 232         int i;
 233 
 234         for (i = 0; i < KDI_GATE_NVECS; i++)
 235                 kdi_idt_write(&kdi_kgates[i], kdi_gate_specs[i].kgs_vec);
 236 }
 237 
 238 /*
 239  * Called when we switch to the kernel's IDT.  We need to interpose on the
 240  * kernel's IDT entries and stop using KMDBCODE_SEL.
 241  */
 242 void
 243 kdi_idt_sync(void)
 244 {
 245         kdi_idt_init(KCS_SEL);
 246         kdi_idt_gates_install(KCS_SEL, KDI_IDT_SAVE);
 247 }
 248 
 249 void
 250 kdi_update_drreg(kdi_drreg_t *drreg)
 251 {
 252         kdi_drreg = *drreg;
 253 }
 254 
 255 void
 256 kdi_memrange_add(caddr_t base, size_t len)
 257 {
 258         kdi_memrange_t *mr = &kdi_memranges[kdi_nmemranges];
 259 
 260         ASSERT(kdi_nmemranges != KDI_MEMRANGES_MAX);
 261 
 262         mr->mr_base = base;
 263         mr->mr_lim = base + len - 1;
 264         kdi_nmemranges++;
 265 }
 266 
 267 void
 268 kdi_idt_switch(kdi_cpusave_t *cpusave)
 269 {
 270         if (cpusave == NULL)
 271                 kdi_idtr_set(kdi_idt, sizeof (kdi_idt) - 1);
 272         else
 273                 kdi_idtr_set(cpusave->krs_idt, (sizeof (*idt0) * NIDT) - 1);
 274 }
 275 
 276 /*
 277  * Activation for CPUs other than the boot CPU, called from that CPU's
 278  * mp_startup().  We saved the kernel's descriptors when we initialized the
 279  * boot CPU, so we don't want to do it again.  Saving the handlers from this
 280  * CPU's IDT would actually be dangerous with the CPU initialization method in
 281  * use at the time of this writing.  With that method, the startup code creates
 282  * the IDTs for slave CPUs by copying the one used by the boot CPU, which has
 283  * already been interposed upon by KMDB.  Were we to interpose again, we'd
 284  * replace the kernel's descriptors with our own in the save area.  By not
 285  * saving, but still overwriting, we'll work in the current world, and in any
 286  * future world where the IDT is generated from scratch.
 287  */
 288 void
 289 kdi_cpu_init(void)
 290 {
 291         kdi_idt_gates_install(KCS_SEL, KDI_IDT_NOSAVE);
 292         /* Load the debug registers. */
 293         kdi_cpu_debug_init(&kdi_cpusave[CPU->cpu_id]);
 294 }
 295 
 296 /*
 297  * Activation for all CPUs for mod-loaded kmdb, i.e. a kmdb that wasn't
 298  * loaded at boot.
 299  */
 300 static int
 301 kdi_cpu_activate(void)
 302 {
 303         kdi_idt_gates_install(KCS_SEL, KDI_IDT_SAVE);
 304         return (0);
 305 }
 306 
 307 void
 308 kdi_activate(kdi_main_t main, kdi_cpusave_t *cpusave, uint_t ncpusave)
 309 {
 310         int i;
 311         cpuset_t cpuset;
 312 
 313         CPUSET_ALL(cpuset);
 314 
 315         kdi_cpusave = cpusave;
 316         kdi_ncpusave = ncpusave;
 317 
 318         kdi_kmdb_main = main;
 319 
 320         for (i = 0; i < kdi_ncpusave; i++) {
 321                 kdi_cpusave[i].krs_cpu_id = i;
 322 
 323                 kdi_cpusave[i].krs_curcrumb =
 324                     &kdi_cpusave[i].krs_crumbs[KDI_NCRUMBS - 1];
 325                 kdi_cpusave[i].krs_curcrumbidx = KDI_NCRUMBS - 1;
 326         }
 327 
 328         if (boothowto & RB_KMDB)
 329                 kdi_idt_init(KMDBCODE_SEL);
 330         else
 331                 kdi_idt_init(KCS_SEL);
 332 
 333         /* The initial selector set.  Updated by the debugger-entry code */
 334 #ifndef __amd64
 335         kdi_cs = B32CODE_SEL;
 336         kdi_ds = kdi_fs = kdi_gs = B32DATA_SEL;
 337 #endif
 338 
 339         kdi_memranges[0].mr_base = kdi_segdebugbase;
 340         kdi_memranges[0].mr_lim = kdi_segdebugbase + kdi_segdebugsize - 1;
 341         kdi_nmemranges = 1;
 342 
 343         kdi_drreg.dr_ctl = KDIREG_DRCTL_RESERVED;
 344         kdi_drreg.dr_stat = KDIREG_DRSTAT_RESERVED;
 345 
 346         if (boothowto & RB_KMDB) {
 347                 kdi_idt_gates_install(KMDBCODE_SEL, KDI_IDT_NOSAVE);
 348         } else {
 349                 xc_call(0, 0, 0, CPUSET2BV(cpuset),
 350                     (xc_func_t)kdi_cpu_activate);
 351         }
 352 }
 353 
 354 static int
 355 kdi_cpu_deactivate(void)
 356 {
 357         kdi_idt_gates_restore();
 358         return (0);
 359 }
 360 
 361 void
 362 kdi_deactivate(void)
 363 {
 364         cpuset_t cpuset;
 365         CPUSET_ALL(cpuset);
 366 
 367         xc_call(0, 0, 0, CPUSET2BV(cpuset), (xc_func_t)kdi_cpu_deactivate);
 368         kdi_nmemranges = 0;
 369 }
 370 
 371 /*
 372  * We receive all breakpoints and single step traps.  Some of them, including
 373  * those from userland and those induced by DTrace providers, are intended for
 374  * the kernel, and must be processed there.  We adopt this
 375  * ours-until-proven-otherwise position due to the painful consequences of
 376  * sending the kernel an unexpected breakpoint or single step.  Unless someone
 377  * can prove to us that the kernel is prepared to handle the trap, we'll assume
 378  * there's a problem and will give the user a chance to debug it.
 379  *
 380  * If we return 2, then the calling code should restore the trap-time %cr3: that
 381  * is, it really is a kernel-originated trap.
 382  */
 383 int
 384 kdi_trap_pass(kdi_cpusave_t *cpusave)
 385 {
 386         greg_t tt = cpusave->krs_gregs[KDIREG_TRAPNO];
 387         greg_t pc = cpusave->krs_gregs[KDIREG_PC];
 388         greg_t cs = cpusave->krs_gregs[KDIREG_CS];
 389 
 390         if (USERMODE(cs))
 391                 return (1);
 392 
 393         if (tt != T_BPTFLT && tt != T_SGLSTP)
 394                 return (0);
 395 
 396         if (tt == T_BPTFLT && kdi_dtrace_get_state() ==
 397             KDI_DTSTATE_DTRACE_ACTIVE)
 398                 return (2);
 399 
 400         /*
 401          * See the comments in the kernel's T_SGLSTP handler for why we need to
 402          * do this.
 403          */
 404 #if !defined(__xpv)
 405         if (tt == T_SGLSTP &&
 406             (pc == (greg_t)sys_sysenter || pc == (greg_t)brand_sys_sysenter ||
 407             pc == (greg_t)tr_sys_sysenter ||
 408             pc == (greg_t)tr_brand_sys_sysenter)) {
 409 #else
 410         if (tt == T_SGLSTP &&
 411             (pc == (greg_t)sys_sysenter || pc == (greg_t)brand_sys_sysenter)) {
 412 #endif
 413                 return (1);
 414         }
 415 
 416         return (0);
 417 }
 418 
 419 /*
 420  * State has been saved, and all CPUs are on the CPU-specific stacks.  All
 421  * CPUs enter here, and head off into the debugger proper.
 422  */
 423 void
 424 kdi_debugger_entry(kdi_cpusave_t *cpusave)
 425 {
 426         /*
 427          * BPTFLT gives us control with %eip set to the instruction *after*
 428          * the int 3.  Back it off, so we're looking at the instruction that
 429          * triggered the fault.
 430          */
 431         if (cpusave->krs_gregs[KDIREG_TRAPNO] == T_BPTFLT)
 432                 cpusave->krs_gregs[KDIREG_PC]--;
 433 
 434         kdi_kmdb_main(cpusave);
 435 }