Print this page
9736 kmdb tortures via single-step miscellaneous trap
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/kdi/kdi_asm.s
+++ new/usr/src/uts/intel/kdi/kdi_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 *
26 26 * Copyright 2018 Joyent, Inc.
27 27 */
28 28
29 29 /*
30 30 * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains
31 31 * the IDT stubs that drop into here (mainly via kdi_cmnint).
32 32 */
33 33
34 34 #if defined(__lint)
35 35 #include <sys/types.h>
36 36 #else
37 37
38 38 #include <sys/segments.h>
39 39 #include <sys/asm_linkage.h>
40 40 #include <sys/controlregs.h>
41 41 #include <sys/x86_archext.h>
42 42 #include <sys/privregs.h>
43 43 #include <sys/machprivregs.h>
44 44 #include <sys/kdi_regs.h>
45 45 #include <sys/psw.h>
46 46 #include <sys/uadmin.h>
47 47 #ifdef __xpv
48 48 #include <sys/hypervisor.h>
49 49 #endif
50 50 #include <kdi_assym.h>
51 51 #include <assym.h>
52 52
53 53 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
54 54 #define GET_CPUSAVE_ADDR \
55 55 movzbq %gs:CPU_ID, %rbx; \
56 56 movq %rbx, %rax; \
57 57 movq $KRS_SIZE, %rcx; \
58 58 mulq %rcx; \
59 59 movq $kdi_cpusave, %rdx; \
60 60 /*CSTYLED*/ \
61 61 addq (%rdx), %rax
62 62
63 63 /*
64 64 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
65 65 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
66 66 * debugger through the trap handler. We don't want to clobber the saved IDT
67 67 * in the process, as we'd end up resuming the world on our IDT.
68 68 */
69 69 #define SAVE_IDTGDT \
70 70 movq %gs:CPU_IDT, %r11; \
71 71 leaq kdi_idt(%rip), %rsi; \
72 72 cmpq %rsi, %r11; \
73 73 je 1f; \
74 74 movq %r11, KRS_IDT(%rax); \
75 75 movq %gs:CPU_GDT, %r11; \
76 76 movq %r11, KRS_GDT(%rax); \
77 77 1:
78 78
79 79 #ifdef __xpv
80 80
81 81 /*
82 82 * Already on kernel gsbase via the hypervisor.
83 83 */
84 84 #define SAVE_GSBASE(reg) /* nothing */
85 85 #define RESTORE_GSBASE(reg) /* nothing */
86 86
87 87 #else
88 88
89 89 #define SAVE_GSBASE(base) \
90 90 movl $MSR_AMD_GSBASE, %ecx; \
91 91 rdmsr; \
92 92 shlq $32, %rdx; \
93 93 orq %rax, %rdx; \
94 94 movq %rdx, REG_OFF(KDIREG_GSBASE)(base); \
95 95 movl $MSR_AMD_KGSBASE, %ecx; \
96 96 rdmsr; \
97 97 shlq $32, %rdx; \
98 98 orq %rax, %rdx; \
99 99 movq %rdx, REG_OFF(KDIREG_KGSBASE)(base)
100 100
101 101 /*
102 102 * We shouldn't have stomped on KGSBASE, so don't try to restore it.
103 103 */
104 104 #define RESTORE_GSBASE(base) \
105 105 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
106 106 movq %rdx, %rax; \
107 107 shrq $32, %rdx; \
108 108 movl $MSR_AMD_GSBASE, %ecx; \
109 109 wrmsr
110 110
111 111 #endif /* __xpv */
112 112
113 113 /*
114 114 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.
115 115 */
116 116 #define KDI_SAVE_REGS(base) \
117 117 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
118 118 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
119 119 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
120 120 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
121 121 movq %r8, REG_OFF(KDIREG_R8)(base); \
122 122 movq %r9, REG_OFF(KDIREG_R9)(base); \
123 123 movq %rax, REG_OFF(KDIREG_RAX)(base); \
124 124 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
125 125 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
126 126 movq %r10, REG_OFF(KDIREG_R10)(base); \
127 127 movq %r11, REG_OFF(KDIREG_R11)(base); \
128 128 movq %r12, REG_OFF(KDIREG_R12)(base); \
129 129 movq %r13, REG_OFF(KDIREG_R13)(base); \
130 130 movq %r14, REG_OFF(KDIREG_R14)(base); \
131 131 movq %r15, REG_OFF(KDIREG_R15)(base); \
132 132 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
133 133 movq REG_OFF(KDIREG_RIP)(base), %rax; \
134 134 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
135 135 movq %cr2, %rax; \
136 136 movq %rax, REG_OFF(KDIREG_CR2)(base); \
137 137 clrq %rax; \
138 138 movw %ds, %ax; \
139 139 movq %rax, REG_OFF(KDIREG_DS)(base); \
140 140 movw %es, %ax; \
141 141 movq %rax, REG_OFF(KDIREG_ES)(base); \
142 142 movw %fs, %ax; \
143 143 movq %rax, REG_OFF(KDIREG_FS)(base); \
144 144 movw %gs, %ax; \
145 145 movq %rax, REG_OFF(KDIREG_GS)(base); \
146 146 SAVE_GSBASE(base)
147 147
148 148 #define KDI_RESTORE_REGS(base) \
149 149 movq base, %rdi; \
150 150 RESTORE_GSBASE(%rdi); \
151 151 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
152 152 movw %ax, %es; \
153 153 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
154 154 movw %ax, %ds; \
155 155 movq REG_OFF(KDIREG_CR2)(base), %rax; \
156 156 movq %rax, %cr2; \
157 157 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
158 158 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
159 159 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
160 160 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
161 161 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
162 162 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
163 163 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
164 164 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
165 165 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
166 166 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
167 167 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
168 168 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
169 169 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
170 170 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
171 171 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
172 172
173 173 /*
174 174 * Given the address of the current CPU's cpusave area in %rax, the following
175 175 * macro restores the debugging state to said CPU. Restored state includes
176 176 * the debug registers from the global %dr variables.
177 177 *
178 178 * Takes the cpusave area in %rdi as a parameter.
179 179 */
180 180 #define KDI_RESTORE_DEBUGGING_STATE \
181 181 pushq %rdi; \
182 182 leaq kdi_drreg(%rip), %r15; \
183 183 movl $7, %edi; \
184 184 movq DR_CTL(%r15), %rsi; \
185 185 call kdi_dreg_set; \
186 186 \
187 187 movl $6, %edi; \
188 188 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
189 189 call kdi_dreg_set; \
190 190 \
191 191 movl $0, %edi; \
192 192 movq DRADDR_OFF(0)(%r15), %rsi; \
193 193 call kdi_dreg_set; \
194 194 movl $1, %edi; \
195 195 movq DRADDR_OFF(1)(%r15), %rsi; \
196 196 call kdi_dreg_set; \
197 197 movl $2, %edi; \
198 198 movq DRADDR_OFF(2)(%r15), %rsi; \
199 199 call kdi_dreg_set; \
200 200 movl $3, %edi; \
201 201 movq DRADDR_OFF(3)(%r15), %rsi; \
202 202 call kdi_dreg_set; \
203 203 popq %rdi;
204 204
205 205 /*
206 206 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
207 207 * The following macros manage the buffer.
208 208 */
209 209
210 210 /* Advance the ring buffer */
211 211 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
212 212 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
213 213 cmpq $[KDI_NCRUMBS - 1], tmp1; \
214 214 jge 1f; \
215 215 /* Advance the pointer and index */ \
216 216 addq $1, tmp1; \
217 217 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
218 218 movq KRS_CURCRUMB(cpusave), tmp1; \
219 219 addq $KRM_SIZE, tmp1; \
220 220 jmp 2f; \
221 221 1: /* Reset the pointer and index */ \
222 222 movq $0, KRS_CURCRUMBIDX(cpusave); \
223 223 leaq KRS_CRUMBS(cpusave), tmp1; \
224 224 2: movq tmp1, KRS_CURCRUMB(cpusave); \
225 225 /* Clear the new crumb */ \
226 226 movq $KDI_NCRUMBS, tmp2; \
227 227 3: movq $0, -4(tmp1, tmp2, 4); \
228 228 decq tmp2; \
229 229 jnz 3b
230 230
231 231 /* Set a value in the current breadcrumb buffer */
232 232 #define ADD_CRUMB(cpusave, offset, value, tmp) \
233 233 movq KRS_CURCRUMB(cpusave), tmp; \
234 234 movq value, offset(tmp)
235 235
236 236 /* XXX implement me */
237 237 ENTRY_NP(kdi_nmiint)
238 238 clrq %rcx
239 239 movq (%rcx), %rcx
240 240 SET_SIZE(kdi_nmiint)
241 241
242 242 /*
243 243 * The main entry point for master CPUs. It also serves as the trap
244 244 * handler for all traps and interrupts taken during single-step.
245 245 */
246 246 ENTRY_NP(kdi_cmnint)
247 247 ALTENTRY(kdi_master_entry)
248 248
249 249 pushq %rax
250 250 CLI(%rax)
251 251 popq %rax
252 252
253 253 /* Save current register state */
254 254 subq $REG_OFF(KDIREG_TRAPNO), %rsp
255 255 KDI_SAVE_REGS(%rsp)
256 256
257 257 #ifdef __xpv
258 258 /*
259 259 * Clear saved_upcall_mask in unused byte of cs slot on stack.
260 260 * It can only confuse things.
261 261 */
262 262 movb $0, REG_OFF(KDIREG_CS)+4(%rsp)
263 263 #endif
264 264
265 265 #if !defined(__xpv)
266 266 /*
267 267 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
268 268 * KGSBASE can be trusted, as the kernel may or may not have already
269 269 * done a swapgs. All is not lost, as the kernel can divine the correct
270 270 * value for us. Note that the previous GSBASE is saved in the
271 271 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
272 272 * blown away. On the hypervisor, we don't need to do this, since it's
273 273 * ensured we're on our requested kernel GSBASE already.
274 274 */
275 275 subq $10, %rsp
276 276 sgdt (%rsp)
277 277 movq 2(%rsp), %rdi /* gdt base now in %rdi */
278 278 addq $10, %rsp
279 279 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
280 280
281 281 movq %rax, %rdx
282 282 shrq $32, %rdx
283 283 movl $MSR_AMD_GSBASE, %ecx
284 284 wrmsr
285 285
286 286 /*
287 287 * In the trampoline we stashed the incoming %cr3. Copy this into
288 288 * the kdiregs for restoration and later use.
289 289 */
290 290 mov %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx
291 291 mov %rdx, REG_OFF(KDIREG_CR3)(%rsp)
292 292 /*
293 293 * Switch to the kernel's %cr3. From the early interrupt handler
294 294 * until now we've been running on the "paranoid" %cr3 (that of kas
295 295 * from early in boot).
296 296 *
297 297 * If we took the interrupt from somewhere already on the kas/paranoid
298 298 * %cr3 though, don't change it (this could happen if kcr3 is corrupt
299 299 * and we took a gptrap earlier from this very code).
300 300 */
301 301 cmpq %rdx, kpti_safe_cr3
302 302 je .no_kcr3
303 303 mov %gs:CPU_KPTI_KCR3, %rdx
304 304 cmpq $0, %rdx
305 305 je .no_kcr3
306 306 mov %rdx, %cr3
307 307 .no_kcr3:
308 308
309 309 #endif /* __xpv */
310 310
311 311 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
312 312
313 313 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
314 314
315 315 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
316 316
317 317 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
318 318 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
319 319 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
320 320 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
321 321 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
322 322
323 323 movq %rsp, %rbp
324 324 pushq %rax
325 325
326 326 /*
327 327 * Were we in the debugger when we took the trap (i.e. was %esp in one
328 328 * of the debugger's memory ranges)?
329 329 */
330 330 leaq kdi_memranges, %rcx
331 331 movl kdi_nmemranges, %edx
332 332 1:
333 333 cmpq MR_BASE(%rcx), %rsp
334 334 jl 2f /* below this range -- try the next one */
335 335 cmpq MR_LIM(%rcx), %rsp
336 336 jg 2f /* above this range -- try the next one */
337 337 jmp 3f /* matched within this range */
338 338
339 339 2:
340 340 decl %edx
341 341 jz kdi_save_common_state /* %rsp not within debugger memory */
342 342 addq $MR_SIZE, %rcx
343 343 jmp 1b
344 344
345 345 3: /*
346 346 * The master is still set. That should only happen if we hit a trap
347 347 * while running in the debugger. Note that it may be an intentional
348 348 * fault. kmdb_dpi_handle_fault will sort it all out.
349 349 */
350 350
351 351 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
352 352 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
353 353 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
354 354 movq %rbx, %rcx /* cpuid */
355 355
356 356 call kdi_dvec_handle_fault
357 357
358 358 /*
359 359 * If we're here, we ran into a debugger problem, and the user
360 360 * elected to solve it by having the debugger debug itself. The
361 361 * state we're about to save is that of the debugger when it took
362 362 * the fault.
363 363 */
364 364
365 365 jmp kdi_save_common_state
366 366
367 367 SET_SIZE(kdi_master_entry)
368 368 SET_SIZE(kdi_cmnint)
369 369
370 370 /*
371 371 * The cross-call handler for slave CPUs.
372 372 *
373 373 * The debugger is single-threaded, so only one CPU, called the master, may be
374 374 * running it at any given time. The other CPUs, known as slaves, spin in a
375 375 * busy loop until there's something for them to do. This is the entry point
376 376 * for the slaves - they'll be sent here in response to a cross-call sent by the
377 377 * master.
378 378 */
379 379
380 380 ENTRY_NP(kdi_slave_entry)
381 381
382 382 /*
383 383 * Cross calls are implemented as function calls, so our stack currently
384 384 * looks like one you'd get from a zero-argument function call. That
385 385 * is, there's the return %rip at %rsp, and that's about it. We need
386 386 * to make it look like an interrupt stack. When we first save, we'll
387 387 * reverse the saved %ss and %rip, which we'll fix back up when we've
388 388 * freed up some general-purpose registers. We'll also need to fix up
389 389 * the saved %rsp.
390 390 */
391 391
392 392 pushq %rsp /* pushed value off by 8 */
393 393 pushfq
394 394 CLI(%rax)
395 395 pushq $KCS_SEL
396 396 clrq %rax
397 397 movw %ss, %ax
398 398 pushq %rax /* rip should be here */
↓ open down ↓ |
398 lines elided |
↑ open up ↑ |
399 399 pushq $-1 /* phony trap error code */
400 400 pushq $-1 /* phony trap number */
401 401
402 402 subq $REG_OFF(KDIREG_TRAPNO), %rsp
403 403 KDI_SAVE_REGS(%rsp)
404 404
405 405 movq %cr3, %rax
406 406 movq %rax, REG_OFF(KDIREG_CR3)(%rsp)
407 407
408 408 movq REG_OFF(KDIREG_SS)(%rsp), %rax
409 + movq %rax, REG_OFF(KDIREG_SAVPC)(%rsp)
409 410 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
410 411 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
411 412
412 413 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
413 414 addq $8, %rax
414 415 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
415 416
416 417 /*
417 418 * We've saved all of the general-purpose registers, and have a stack
418 419 * that is irettable (after we strip down to the error code)
419 420 */
420 421
421 422 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
422 423
423 424 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
424 425
425 426 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
426 427
427 428 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
428 429 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
430 + movq REG_OFF(KDIREG_RSP)(%rsp), %rcx
431 + ADD_CRUMB(%rax, KRM_SP, %rcx, %rdx)
432 + ADD_CRUMB(%rax, KRM_TRAPNO, $-1, %rdx)
429 433
434 + movq $KDI_CPU_STATE_SLAVE, KRS_CPU_STATE(%rax)
435 +
430 436 pushq %rax
431 437 jmp kdi_save_common_state
432 438
433 439 SET_SIZE(kdi_slave_entry)
434 440
435 441 /*
436 442 * The state of the world:
437 443 *
438 444 * The stack has a complete set of saved registers and segment
439 445 * selectors, arranged in the kdi_regs.h order. It also has a pointer
440 446 * to our cpusave area.
441 447 *
442 448 * We need to save, into the cpusave area, a pointer to these saved
443 449 * registers. First we check whether we should jump straight back to
444 450 * the kernel. If not, we save a few more registers, ready the
445 451 * machine for debugger entry, and enter the debugger.
446 452 */
447 453
448 454 ENTRY_NP(kdi_save_common_state)
449 455
450 456 popq %rdi /* the cpusave area */
451 457 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
452 458
453 459 pushq %rdi
454 460 call kdi_trap_pass
455 461 testq %rax, %rax
456 462 jnz kdi_pass_to_kernel
457 463 popq %rax /* cpusave in %rax */
458 464
459 465 SAVE_IDTGDT
460 466
461 467 #if !defined(__xpv)
462 468 /* Save off %cr0, and clear write protect */
463 469 movq %cr0, %rcx
464 470 movq %rcx, KRS_CR0(%rax)
465 471 andq $_BITNOT(CR0_WP), %rcx
466 472 movq %rcx, %cr0
467 473 #endif
468 474
469 475 /* Save the debug registers and disable any active watchpoints */
470 476
471 477 movq %rax, %r15 /* save cpusave area ptr */
472 478 movl $7, %edi
473 479 call kdi_dreg_get
474 480 movq %rax, KRS_DRCTL(%r15)
475 481
476 482 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
477 483 movq %rax, %rsi
478 484 movl $7, %edi
479 485 call kdi_dreg_set
480 486
481 487 movl $6, %edi
482 488 call kdi_dreg_get
483 489 movq %rax, KRS_DRSTAT(%r15)
484 490
485 491 movl $0, %edi
486 492 call kdi_dreg_get
487 493 movq %rax, KRS_DROFF(0)(%r15)
488 494
489 495 movl $1, %edi
490 496 call kdi_dreg_get
491 497 movq %rax, KRS_DROFF(1)(%r15)
492 498
493 499 movl $2, %edi
494 500 call kdi_dreg_get
495 501 movq %rax, KRS_DROFF(2)(%r15)
496 502
497 503 movl $3, %edi
498 504 call kdi_dreg_get
499 505 movq %rax, KRS_DROFF(3)(%r15)
500 506
501 507 movq %r15, %rax /* restore cpu save area to rax */
502 508
503 509 clrq %rbp /* stack traces should end here */
504 510
505 511 pushq %rax
506 512 movq %rax, %rdi /* cpusave */
507 513
508 514 call kdi_debugger_entry
509 515
510 516 /* Pass cpusave to kdi_resume */
511 517 popq %rdi
512 518
513 519 jmp kdi_resume
514 520
515 521 SET_SIZE(kdi_save_common_state)
516 522
517 523 /*
518 524 * Resume the world. The code that calls kdi_resume has already
519 525 * decided whether or not to restore the IDT.
520 526 */
521 527 /* cpusave in %rdi */
522 528 ENTRY_NP(kdi_resume)
523 529
524 530 /*
525 531 * Send this CPU back into the world
526 532 */
527 533 #if !defined(__xpv)
528 534 movq KRS_CR0(%rdi), %rdx
529 535 movq %rdx, %cr0
530 536 #endif
531 537
532 538 KDI_RESTORE_DEBUGGING_STATE
533 539
534 540 movq KRS_GREGS(%rdi), %rsp
535 541
536 542 #if !defined(__xpv)
537 543 /*
538 544 * If we're going back via tr_iret_kdi, then we want to copy the
539 545 * final %cr3 we're going to back into the kpti_dbg area now.
540 546 *
541 547 * Since the trampoline needs to find the kpti_dbg too, we enter it
542 548 * with %r13 set to point at that. The real %r13 (to restore before
543 549 * the iret) we stash in the kpti_dbg itself.
544 550 */
545 551 movq %gs:CPU_SELF, %r13 /* can't leaq %gs:*, use self-ptr */
546 552 addq $CPU_KPTI_DBG, %r13
547 553
548 554 movq REG_OFF(KDIREG_R13)(%rsp), %rdx
549 555 movq %rdx, KPTI_R13(%r13)
550 556
551 557 movq REG_OFF(KDIREG_CR3)(%rsp), %rdx
552 558 movq %rdx, KPTI_TR_CR3(%r13)
553 559
554 560 /* The trampoline will undo this later. */
555 561 movq %r13, REG_OFF(KDIREG_R13)(%rsp)
556 562 #endif
557 563
558 564 KDI_RESTORE_REGS(%rsp)
559 565 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
560 566 /*
561 567 * The common trampoline code will restore %cr3 to the right value
562 568 * for either kernel or userland.
563 569 */
564 570 #if !defined(__xpv)
565 571 jmp tr_iret_kdi
566 572 #else
567 573 IRET
568 574 #endif
569 575 /*NOTREACHED*/
570 576 SET_SIZE(kdi_resume)
571 577
572 578
573 579 /*
574 580 * We took a trap that should be handled by the kernel, not KMDB.
575 581 *
576 582 * We're hard-coding the three cases where KMDB has installed permanent
577 583 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
578 584 * to work with; we can't use a global since other CPUs can easily pass
579 585 * through here at the same time.
580 586 *
581 587 * Note that we handle T_DBGENTR since userspace might have tried it.
582 588 *
583 589 * The trap handler will expect the stack to be in trap order, with %rip
584 590 * being the last entry, so we'll need to restore all our regs. On
585 591 * i86xpv we'll need to compensate for XPV_TRAP_POP.
586 592 *
587 593 * %rax on entry is either 1 or 2, which is from kdi_trap_pass().
588 594 * kdi_cmnint stashed the original %cr3 into KDIREG_CR3, then (probably)
589 595 * switched us to the CPU's kf_kernel_cr3. But we're about to call, for
590 596 * example:
591 597 *
592 598 * dbgtrap->trap()->tr_iret_kernel
593 599 *
594 600 * which, unlike, tr_iret_kdi, doesn't restore the original %cr3, so
595 601 * we'll do so here if needed.
596 602 *
597 603 * This isn't just a matter of tidiness: for example, consider:
598 604 *
599 605 * hat_switch(oldhat=kas.a_hat, newhat=prochat)
600 606 * setcr3()
601 607 * reset_kpti()
602 608 * *brktrap* due to fbt on reset_kpti:entry
603 609 *
604 610 * Here, we have the new hat's %cr3, but we haven't yet updated
605 611 * kf_kernel_cr3 (so its currently kas's). So if we don't restore here,
606 612 * we'll stay on kas's cr3 value on returning from the trap: not good if
607 613 * we fault on a userspace address.
608 614 */
609 615 ENTRY_NP(kdi_pass_to_kernel)
610 616
611 617 popq %rdi /* cpusave */
612 618 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
613 619 movq KRS_GREGS(%rdi), %rsp
614 620
615 621 cmpq $2, %rax
616 622 jne no_restore_cr3
617 623 movq REG_OFF(KDIREG_CR3)(%rsp), %r11
618 624 movq %r11, %cr3
619 625
620 626 no_restore_cr3:
621 627 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
622 628
623 629 cmpq $T_SGLSTP, %rdi
624 630 je kdi_pass_dbgtrap
625 631 cmpq $T_BPTFLT, %rdi
626 632 je kdi_pass_brktrap
627 633 cmpq $T_DBGENTR, %rdi
628 634 je kdi_pass_invaltrap
629 635 /*
630 636 * Hmm, unknown handler. Somebody forgot to update this when they
631 637 * added a new trap interposition... try to drop back into kmdb.
632 638 */
633 639 int $T_DBGENTR
634 640
635 641 #define CALL_TRAP_HANDLER(name) \
636 642 KDI_RESTORE_REGS(%rsp); \
637 643 /* Discard state, trapno, err */ \
638 644 addq $REG_OFF(KDIREG_RIP), %rsp; \
639 645 XPV_TRAP_PUSH; \
640 646 jmp %cs:name
641 647
642 648 kdi_pass_dbgtrap:
643 649 CALL_TRAP_HANDLER(dbgtrap)
644 650 /*NOTREACHED*/
645 651 kdi_pass_brktrap:
646 652 CALL_TRAP_HANDLER(brktrap)
647 653 /*NOTREACHED*/
648 654 kdi_pass_invaltrap:
649 655 CALL_TRAP_HANDLER(invaltrap)
650 656 /*NOTREACHED*/
651 657
652 658 SET_SIZE(kdi_pass_to_kernel)
653 659
654 660 /*
655 661 * A minimal version of mdboot(), to be used by the master CPU only.
656 662 */
657 663 ENTRY_NP(kdi_reboot)
658 664
659 665 movl $AD_BOOT, %edi
660 666 movl $A_SHUTDOWN, %esi
661 667 call *psm_shutdownf
662 668 #if defined(__xpv)
663 669 movl $SHUTDOWN_reboot, %edi
664 670 call HYPERVISOR_shutdown
665 671 #else
666 672 call reset
667 673 #endif
668 674 /*NOTREACHED*/
669 675
670 676 SET_SIZE(kdi_reboot)
671 677
672 678 ENTRY_NP(kdi_cpu_debug_init)
673 679 pushq %rbp
674 680 movq %rsp, %rbp
675 681
676 682 pushq %rbx /* macro will clobber %rbx */
677 683 KDI_RESTORE_DEBUGGING_STATE
678 684 popq %rbx
679 685
680 686 leave
681 687 ret
682 688 SET_SIZE(kdi_cpu_debug_init)
683 689
684 690 #define GETDREG(name, r) \
685 691 ENTRY_NP(name); \
686 692 movq r, %rax; \
687 693 ret; \
688 694 SET_SIZE(name)
689 695
690 696 #define SETDREG(name, r) \
691 697 ENTRY_NP(name); \
692 698 movq %rdi, r; \
693 699 ret; \
694 700 SET_SIZE(name)
695 701
696 702 GETDREG(kdi_getdr0, %dr0)
697 703 GETDREG(kdi_getdr1, %dr1)
698 704 GETDREG(kdi_getdr2, %dr2)
699 705 GETDREG(kdi_getdr3, %dr3)
700 706 GETDREG(kdi_getdr6, %dr6)
701 707 GETDREG(kdi_getdr7, %dr7)
702 708
703 709 SETDREG(kdi_setdr0, %dr0)
704 710 SETDREG(kdi_setdr1, %dr1)
705 711 SETDREG(kdi_setdr2, %dr2)
706 712 SETDREG(kdi_setdr3, %dr3)
707 713 SETDREG(kdi_setdr6, %dr6)
708 714 SETDREG(kdi_setdr7, %dr7)
709 715
710 716 #endif /* !__lint */
↓ open down ↓ |
271 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX