Print this page
9685 KPTI %cr3 handling needs fixes
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/kdi/kdi_asm.s
+++ new/usr/src/uts/intel/kdi/kdi_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 *
26 26 * Copyright 2018 Joyent, Inc.
27 27 */
28 28
29 29 /*
30 30 * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains
31 31 * the IDT stubs that drop into here (mainly via kdi_cmnint).
32 32 */
33 33
34 34 #if defined(__lint)
35 35 #include <sys/types.h>
36 36 #else
37 37
38 38 #include <sys/segments.h>
39 39 #include <sys/asm_linkage.h>
40 40 #include <sys/controlregs.h>
41 41 #include <sys/x86_archext.h>
42 42 #include <sys/privregs.h>
43 43 #include <sys/machprivregs.h>
44 44 #include <sys/kdi_regs.h>
45 45 #include <sys/psw.h>
46 46 #include <sys/uadmin.h>
47 47 #ifdef __xpv
48 48 #include <sys/hypervisor.h>
49 49 #endif
50 50 #include <kdi_assym.h>
51 51 #include <assym.h>
52 52
53 53 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
54 54 #define GET_CPUSAVE_ADDR \
55 55 movzbq %gs:CPU_ID, %rbx; \
56 56 movq %rbx, %rax; \
57 57 movq $KRS_SIZE, %rcx; \
58 58 mulq %rcx; \
59 59 movq $kdi_cpusave, %rdx; \
60 60 /*CSTYLED*/ \
61 61 addq (%rdx), %rax
62 62
63 63 /*
64 64 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
65 65 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
66 66 * debugger through the trap handler. We don't want to clobber the saved IDT
67 67 * in the process, as we'd end up resuming the world on our IDT.
68 68 */
69 69 #define SAVE_IDTGDT \
70 70 movq %gs:CPU_IDT, %r11; \
71 71 leaq kdi_idt(%rip), %rsi; \
72 72 cmpq %rsi, %r11; \
73 73 je 1f; \
74 74 movq %r11, KRS_IDT(%rax); \
75 75 movq %gs:CPU_GDT, %r11; \
76 76 movq %r11, KRS_GDT(%rax); \
77 77 1:
78 78
79 79 #ifdef __xpv
80 80
81 81 /*
82 82 * Already on kernel gsbase via the hypervisor.
83 83 */
84 84 #define SAVE_GSBASE(reg) /* nothing */
85 85 #define RESTORE_GSBASE(reg) /* nothing */
86 86
87 87 #else
88 88
89 89 #define SAVE_GSBASE(base) \
90 90 movl $MSR_AMD_GSBASE, %ecx; \
91 91 rdmsr; \
92 92 shlq $32, %rdx; \
93 93 orq %rax, %rdx; \
94 94 movq %rdx, REG_OFF(KDIREG_GSBASE)(base); \
95 95 movl $MSR_AMD_KGSBASE, %ecx; \
96 96 rdmsr; \
97 97 shlq $32, %rdx; \
98 98 orq %rax, %rdx; \
99 99 movq %rdx, REG_OFF(KDIREG_KGSBASE)(base)
100 100
101 101 /*
102 102 * We shouldn't have stomped on KGSBASE, so don't try to restore it.
103 103 */
104 104 #define RESTORE_GSBASE(base) \
105 105 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
106 106 movq %rdx, %rax; \
107 107 shrq $32, %rdx; \
108 108 movl $MSR_AMD_GSBASE, %ecx; \
109 109 wrmsr
110 110
111 111 #endif /* __xpv */
112 112
113 113 /*
114 114 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.
115 115 */
116 116 #define KDI_SAVE_REGS(base) \
117 117 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
118 118 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
119 119 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
120 120 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
121 121 movq %r8, REG_OFF(KDIREG_R8)(base); \
122 122 movq %r9, REG_OFF(KDIREG_R9)(base); \
123 123 movq %rax, REG_OFF(KDIREG_RAX)(base); \
124 124 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
125 125 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
126 126 movq %r10, REG_OFF(KDIREG_R10)(base); \
127 127 movq %r11, REG_OFF(KDIREG_R11)(base); \
128 128 movq %r12, REG_OFF(KDIREG_R12)(base); \
129 129 movq %r13, REG_OFF(KDIREG_R13)(base); \
130 130 movq %r14, REG_OFF(KDIREG_R14)(base); \
131 131 movq %r15, REG_OFF(KDIREG_R15)(base); \
132 132 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
133 133 movq REG_OFF(KDIREG_RIP)(base), %rax; \
134 134 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
135 135 movq %cr2, %rax; \
136 136 movq %rax, REG_OFF(KDIREG_CR2)(base); \
137 137 clrq %rax; \
138 138 movw %ds, %ax; \
139 139 movq %rax, REG_OFF(KDIREG_DS)(base); \
140 140 movw %es, %ax; \
141 141 movq %rax, REG_OFF(KDIREG_ES)(base); \
142 142 movw %fs, %ax; \
143 143 movq %rax, REG_OFF(KDIREG_FS)(base); \
144 144 movw %gs, %ax; \
145 145 movq %rax, REG_OFF(KDIREG_GS)(base); \
146 146 SAVE_GSBASE(base)
147 147
148 148 #define KDI_RESTORE_REGS(base) \
149 149 movq base, %rdi; \
150 150 RESTORE_GSBASE(%rdi); \
151 151 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
152 152 movw %ax, %es; \
153 153 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
154 154 movw %ax, %ds; \
155 155 movq REG_OFF(KDIREG_CR2)(base), %rax; \
156 156 movq %rax, %cr2; \
157 157 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
158 158 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
159 159 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
160 160 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
161 161 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
162 162 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
163 163 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
164 164 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
165 165 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
166 166 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
167 167 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
168 168 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
169 169 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
170 170 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
171 171 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
172 172
173 173 /*
174 174 * Given the address of the current CPU's cpusave area in %rax, the following
175 175 * macro restores the debugging state to said CPU. Restored state includes
176 176 * the debug registers from the global %dr variables.
177 177 *
178 178 * Takes the cpusave area in %rdi as a parameter.
179 179 */
180 180 #define KDI_RESTORE_DEBUGGING_STATE \
181 181 pushq %rdi; \
182 182 leaq kdi_drreg(%rip), %r15; \
183 183 movl $7, %edi; \
184 184 movq DR_CTL(%r15), %rsi; \
185 185 call kdi_dreg_set; \
186 186 \
187 187 movl $6, %edi; \
188 188 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
189 189 call kdi_dreg_set; \
190 190 \
191 191 movl $0, %edi; \
192 192 movq DRADDR_OFF(0)(%r15), %rsi; \
193 193 call kdi_dreg_set; \
194 194 movl $1, %edi; \
195 195 movq DRADDR_OFF(1)(%r15), %rsi; \
196 196 call kdi_dreg_set; \
197 197 movl $2, %edi; \
198 198 movq DRADDR_OFF(2)(%r15), %rsi; \
199 199 call kdi_dreg_set; \
200 200 movl $3, %edi; \
201 201 movq DRADDR_OFF(3)(%r15), %rsi; \
202 202 call kdi_dreg_set; \
203 203 popq %rdi;
204 204
205 205 /*
206 206 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
207 207 * The following macros manage the buffer.
208 208 */
209 209
210 210 /* Advance the ring buffer */
211 211 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
212 212 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
213 213 cmpq $[KDI_NCRUMBS - 1], tmp1; \
214 214 jge 1f; \
215 215 /* Advance the pointer and index */ \
216 216 addq $1, tmp1; \
217 217 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
218 218 movq KRS_CURCRUMB(cpusave), tmp1; \
219 219 addq $KRM_SIZE, tmp1; \
220 220 jmp 2f; \
221 221 1: /* Reset the pointer and index */ \
222 222 movq $0, KRS_CURCRUMBIDX(cpusave); \
223 223 leaq KRS_CRUMBS(cpusave), tmp1; \
224 224 2: movq tmp1, KRS_CURCRUMB(cpusave); \
225 225 /* Clear the new crumb */ \
226 226 movq $KDI_NCRUMBS, tmp2; \
227 227 3: movq $0, -4(tmp1, tmp2, 4); \
228 228 decq tmp2; \
229 229 jnz 3b
230 230
231 231 /* Set a value in the current breadcrumb buffer */
232 232 #define ADD_CRUMB(cpusave, offset, value, tmp) \
233 233 movq KRS_CURCRUMB(cpusave), tmp; \
234 234 movq value, offset(tmp)
235 235
236 236 /* XXX implement me */
237 237 ENTRY_NP(kdi_nmiint)
238 238 clrq %rcx
239 239 movq (%rcx), %rcx
240 240 SET_SIZE(kdi_nmiint)
241 241
242 242 /*
243 243 * The main entry point for master CPUs. It also serves as the trap
244 244 * handler for all traps and interrupts taken during single-step.
245 245 */
246 246 ENTRY_NP(kdi_cmnint)
247 247 ALTENTRY(kdi_master_entry)
248 248
249 249 pushq %rax
250 250 CLI(%rax)
251 251 popq %rax
252 252
253 253 /* Save current register state */
254 254 subq $REG_OFF(KDIREG_TRAPNO), %rsp
255 255 KDI_SAVE_REGS(%rsp)
256 256
257 257 #ifdef __xpv
258 258 /*
259 259 * Clear saved_upcall_mask in unused byte of cs slot on stack.
260 260 * It can only confuse things.
261 261 */
262 262 movb $0, REG_OFF(KDIREG_CS)+4(%rsp)
263 263 #endif
264 264
265 265 #if !defined(__xpv)
266 266 /*
267 267 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
268 268 * KGSBASE can be trusted, as the kernel may or may not have already
269 269 * done a swapgs. All is not lost, as the kernel can divine the correct
270 270 * value for us. Note that the previous GSBASE is saved in the
271 271 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
272 272 * blown away. On the hypervisor, we don't need to do this, since it's
273 273 * ensured we're on our requested kernel GSBASE already.
274 274 */
275 275 subq $10, %rsp
276 276 sgdt (%rsp)
277 277 movq 2(%rsp), %rdi /* gdt base now in %rdi */
278 278 addq $10, %rsp
279 279 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
280 280
281 281 movq %rax, %rdx
282 282 shrq $32, %rdx
283 283 movl $MSR_AMD_GSBASE, %ecx
284 284 wrmsr
285 285
286 286 /*
287 287 * In the trampoline we stashed the incoming %cr3. Copy this into
288 288 * the kdiregs for restoration and later use.
289 289 */
290 290 mov %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx
291 291 mov %rdx, REG_OFF(KDIREG_CR3)(%rsp)
292 292 /*
293 293 * Switch to the kernel's %cr3. From the early interrupt handler
294 294 * until now we've been running on the "paranoid" %cr3 (that of kas
295 295 * from early in boot).
296 296 *
297 297 * If we took the interrupt from somewhere already on the kas/paranoid
298 298 * %cr3 though, don't change it (this could happen if kcr3 is corrupt
299 299 * and we took a gptrap earlier from this very code).
300 300 */
301 301 cmpq %rdx, kpti_safe_cr3
302 302 je .no_kcr3
303 303 mov %gs:CPU_KPTI_KCR3, %rdx
304 304 cmpq $0, %rdx
305 305 je .no_kcr3
306 306 mov %rdx, %cr3
307 307 .no_kcr3:
308 308
309 309 #endif /* __xpv */
310 310
311 311 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
312 312
313 313 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
314 314
315 315 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
316 316
317 317 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
318 318 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
319 319 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
320 320 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
321 321 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
322 322
323 323 movq %rsp, %rbp
324 324 pushq %rax
325 325
326 326 /*
327 327 * Were we in the debugger when we took the trap (i.e. was %esp in one
328 328 * of the debugger's memory ranges)?
329 329 */
330 330 leaq kdi_memranges, %rcx
331 331 movl kdi_nmemranges, %edx
332 332 1:
333 333 cmpq MR_BASE(%rcx), %rsp
334 334 jl 2f /* below this range -- try the next one */
335 335 cmpq MR_LIM(%rcx), %rsp
336 336 jg 2f /* above this range -- try the next one */
337 337 jmp 3f /* matched within this range */
338 338
339 339 2:
340 340 decl %edx
341 341 jz kdi_save_common_state /* %rsp not within debugger memory */
342 342 addq $MR_SIZE, %rcx
343 343 jmp 1b
344 344
345 345 3: /*
346 346 * The master is still set. That should only happen if we hit a trap
347 347 * while running in the debugger. Note that it may be an intentional
348 348 * fault. kmdb_dpi_handle_fault will sort it all out.
349 349 */
350 350
351 351 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
352 352 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
353 353 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
354 354 movq %rbx, %rcx /* cpuid */
355 355
356 356 call kdi_dvec_handle_fault
357 357
358 358 /*
359 359 * If we're here, we ran into a debugger problem, and the user
360 360 * elected to solve it by having the debugger debug itself. The
361 361 * state we're about to save is that of the debugger when it took
362 362 * the fault.
363 363 */
364 364
365 365 jmp kdi_save_common_state
366 366
367 367 SET_SIZE(kdi_master_entry)
368 368 SET_SIZE(kdi_cmnint)
369 369
370 370 /*
371 371 * The cross-call handler for slave CPUs.
372 372 *
373 373 * The debugger is single-threaded, so only one CPU, called the master, may be
374 374 * running it at any given time. The other CPUs, known as slaves, spin in a
375 375 * busy loop until there's something for them to do. This is the entry point
376 376 * for the slaves - they'll be sent here in response to a cross-call sent by the
377 377 * master.
378 378 */
379 379
380 380 ENTRY_NP(kdi_slave_entry)
381 381
382 382 /*
383 383 * Cross calls are implemented as function calls, so our stack currently
384 384 * looks like one you'd get from a zero-argument function call. That
385 385 * is, there's the return %rip at %rsp, and that's about it. We need
386 386 * to make it look like an interrupt stack. When we first save, we'll
387 387 * reverse the saved %ss and %rip, which we'll fix back up when we've
388 388 * freed up some general-purpose registers. We'll also need to fix up
389 389 * the saved %rsp.
390 390 */
391 391
392 392 pushq %rsp /* pushed value off by 8 */
393 393 pushfq
394 394 CLI(%rax)
395 395 pushq $KCS_SEL
396 396 clrq %rax
397 397 movw %ss, %ax
398 398 pushq %rax /* rip should be here */
399 399 pushq $-1 /* phony trap error code */
400 400 pushq $-1 /* phony trap number */
401 401
402 402 subq $REG_OFF(KDIREG_TRAPNO), %rsp
403 403 KDI_SAVE_REGS(%rsp)
404 404
405 405 movq %cr3, %rax
406 406 movq %rax, REG_OFF(KDIREG_CR3)(%rsp)
407 407
408 408 movq REG_OFF(KDIREG_SS)(%rsp), %rax
409 409 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
410 410 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
411 411
412 412 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
413 413 addq $8, %rax
414 414 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
415 415
416 416 /*
417 417 * We've saved all of the general-purpose registers, and have a stack
418 418 * that is irettable (after we strip down to the error code)
419 419 */
420 420
421 421 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
422 422
423 423 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
424 424
425 425 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
426 426
427 427 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
428 428 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
429 429
430 430 pushq %rax
431 431 jmp kdi_save_common_state
432 432
433 433 SET_SIZE(kdi_slave_entry)
434 434
435 435 /*
436 436 * The state of the world:
437 437 *
438 438 * The stack has a complete set of saved registers and segment
439 439 * selectors, arranged in the kdi_regs.h order. It also has a pointer
440 440 * to our cpusave area.
441 441 *
442 442 * We need to save, into the cpusave area, a pointer to these saved
443 443 * registers. First we check whether we should jump straight back to
444 444 * the kernel. If not, we save a few more registers, ready the
↓ open down ↓ |
444 lines elided |
↑ open up ↑ |
445 445 * machine for debugger entry, and enter the debugger.
446 446 */
447 447
448 448 ENTRY_NP(kdi_save_common_state)
449 449
450 450 popq %rdi /* the cpusave area */
451 451 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
452 452
453 453 pushq %rdi
454 454 call kdi_trap_pass
455 - cmpq $1, %rax
456 - je kdi_pass_to_kernel
455 + testq %rax, %rax
456 + jnz kdi_pass_to_kernel
457 457 popq %rax /* cpusave in %rax */
458 458
459 459 SAVE_IDTGDT
460 460
461 461 #if !defined(__xpv)
462 462 /* Save off %cr0, and clear write protect */
463 463 movq %cr0, %rcx
464 464 movq %rcx, KRS_CR0(%rax)
465 465 andq $_BITNOT(CR0_WP), %rcx
466 466 movq %rcx, %cr0
467 467 #endif
468 468
469 469 /* Save the debug registers and disable any active watchpoints */
470 470
471 471 movq %rax, %r15 /* save cpusave area ptr */
472 472 movl $7, %edi
473 473 call kdi_dreg_get
474 474 movq %rax, KRS_DRCTL(%r15)
475 475
476 476 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
477 477 movq %rax, %rsi
478 478 movl $7, %edi
479 479 call kdi_dreg_set
480 480
481 481 movl $6, %edi
482 482 call kdi_dreg_get
483 483 movq %rax, KRS_DRSTAT(%r15)
484 484
485 485 movl $0, %edi
486 486 call kdi_dreg_get
487 487 movq %rax, KRS_DROFF(0)(%r15)
488 488
489 489 movl $1, %edi
490 490 call kdi_dreg_get
491 491 movq %rax, KRS_DROFF(1)(%r15)
492 492
493 493 movl $2, %edi
494 494 call kdi_dreg_get
495 495 movq %rax, KRS_DROFF(2)(%r15)
496 496
497 497 movl $3, %edi
498 498 call kdi_dreg_get
499 499 movq %rax, KRS_DROFF(3)(%r15)
500 500
501 501 movq %r15, %rax /* restore cpu save area to rax */
502 502
503 503 clrq %rbp /* stack traces should end here */
504 504
505 505 pushq %rax
506 506 movq %rax, %rdi /* cpusave */
507 507
508 508 call kdi_debugger_entry
509 509
510 510 /* Pass cpusave to kdi_resume */
511 511 popq %rdi
512 512
513 513 jmp kdi_resume
514 514
515 515 SET_SIZE(kdi_save_common_state)
516 516
517 517 /*
518 518 * Resume the world. The code that calls kdi_resume has already
519 519 * decided whether or not to restore the IDT.
520 520 */
521 521 /* cpusave in %rdi */
522 522 ENTRY_NP(kdi_resume)
523 523
524 524 /*
525 525 * Send this CPU back into the world
526 526 */
527 527 #if !defined(__xpv)
528 528 movq KRS_CR0(%rdi), %rdx
529 529 movq %rdx, %cr0
530 530 #endif
531 531
532 532 KDI_RESTORE_DEBUGGING_STATE
533 533
534 534 movq KRS_GREGS(%rdi), %rsp
535 535
536 536 #if !defined(__xpv)
537 537 /*
538 538 * If we're going back via tr_iret_kdi, then we want to copy the
539 539 * final %cr3 we're going to back into the kpti_dbg area now.
540 540 *
541 541 * Since the trampoline needs to find the kpti_dbg too, we enter it
542 542 * with %r13 set to point at that. The real %r13 (to restore before
543 543 * the iret) we stash in the kpti_dbg itself.
544 544 */
545 545 movq %gs:CPU_SELF, %r13 /* can't leaq %gs:*, use self-ptr */
546 546 addq $CPU_KPTI_DBG, %r13
547 547
548 548 movq REG_OFF(KDIREG_R13)(%rsp), %rdx
549 549 movq %rdx, KPTI_R13(%r13)
550 550
551 551 movq REG_OFF(KDIREG_CR3)(%rsp), %rdx
552 552 movq %rdx, KPTI_TR_CR3(%r13)
553 553
554 554 /* The trampoline will undo this later. */
555 555 movq %r13, REG_OFF(KDIREG_R13)(%rsp)
556 556 #endif
557 557
558 558 KDI_RESTORE_REGS(%rsp)
559 559 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
560 560 /*
561 561 * The common trampoline code will restore %cr3 to the right value
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
562 562 * for either kernel or userland.
563 563 */
564 564 #if !defined(__xpv)
565 565 jmp tr_iret_kdi
566 566 #else
567 567 IRET
568 568 #endif
569 569 /*NOTREACHED*/
570 570 SET_SIZE(kdi_resume)
571 571
572 - ENTRY_NP(kdi_pass_to_kernel)
573 572
574 - popq %rdi /* cpusave */
575 -
576 - movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
577 -
578 573 /*
579 - * Find the trap and vector off the right kernel handler. The trap
580 - * handler will expect the stack to be in trap order, with %rip being
581 - * the last entry, so we'll need to restore all our regs. On i86xpv
582 - * we'll need to compensate for XPV_TRAP_POP.
574 + * We took a trap that should be handled by the kernel, not KMDB.
583 575 *
584 576 * We're hard-coding the three cases where KMDB has installed permanent
585 577 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
586 578 * to work with; we can't use a global since other CPUs can easily pass
587 579 * through here at the same time.
588 580 *
589 581 * Note that we handle T_DBGENTR since userspace might have tried it.
582 + *
583 + * The trap handler will expect the stack to be in trap order, with %rip
584 + * being the last entry, so we'll need to restore all our regs. On
585 + * i86xpv we'll need to compensate for XPV_TRAP_POP.
586 + *
587 + * %rax on entry is either 1 or 2, which is from kdi_trap_pass().
588 + * kdi_cmnint stashed the original %cr3 into KDIREG_CR3, then (probably)
589 + * switched us to the CPU's kf_kernel_cr3. But we're about to call, for
590 + * example:
591 + *
592 + * dbgtrap->trap()->tr_iret_kernel
593 + *
594 + * which, unlike, tr_iret_kdi, doesn't restore the original %cr3, so
595 + * we'll do so here if needed.
596 + *
597 + * This isn't just a matter of tidiness: for example, consider:
598 + *
599 + * hat_switch(oldhat=kas.a_hat, newhat=prochat)
600 + * setcr3()
601 + * reset_kpti()
602 + * *brktrap* due to fbt on reset_kpti:entry
603 + *
604 + * Here, we have the new hat's %cr3, but we haven't yet updated
605 + * kf_kernel_cr3 (so its currently kas's). So if we don't restore here,
606 + * we'll stay on kas's cr3 value on returning from the trap: not good if
607 + * we fault on a userspace address.
590 608 */
609 + ENTRY_NP(kdi_pass_to_kernel)
610 +
611 + popq %rdi /* cpusave */
612 + movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
591 613 movq KRS_GREGS(%rdi), %rsp
614 +
615 + cmpq $2, %rax
616 + jne no_restore_cr3
617 + movq REG_OFF(KDIREG_CR3)(%rsp), %r11
618 + movq %r11, %cr3
619 +
620 +no_restore_cr3:
592 621 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
622 +
593 623 cmpq $T_SGLSTP, %rdi
594 - je 1f
624 + je kdi_pass_dbgtrap
595 625 cmpq $T_BPTFLT, %rdi
596 - je 2f
626 + je kdi_pass_brktrap
597 627 cmpq $T_DBGENTR, %rdi
598 - je 3f
628 + je kdi_pass_invaltrap
599 629 /*
600 630 * Hmm, unknown handler. Somebody forgot to update this when they
601 631 * added a new trap interposition... try to drop back into kmdb.
602 632 */
603 633 int $T_DBGENTR
604 634
605 635 #define CALL_TRAP_HANDLER(name) \
606 636 KDI_RESTORE_REGS(%rsp); \
607 637 /* Discard state, trapno, err */ \
608 638 addq $REG_OFF(KDIREG_RIP), %rsp; \
609 639 XPV_TRAP_PUSH; \
610 640 jmp %cs:name
611 641
612 -1:
642 +kdi_pass_dbgtrap:
613 643 CALL_TRAP_HANDLER(dbgtrap)
614 644 /*NOTREACHED*/
615 -2:
645 +kdi_pass_brktrap:
616 646 CALL_TRAP_HANDLER(brktrap)
617 647 /*NOTREACHED*/
618 -3:
648 +kdi_pass_invaltrap:
619 649 CALL_TRAP_HANDLER(invaltrap)
620 650 /*NOTREACHED*/
621 651
622 652 SET_SIZE(kdi_pass_to_kernel)
623 653
624 654 /*
625 655 * A minimal version of mdboot(), to be used by the master CPU only.
626 656 */
627 657 ENTRY_NP(kdi_reboot)
628 658
629 659 movl $AD_BOOT, %edi
630 660 movl $A_SHUTDOWN, %esi
631 661 call *psm_shutdownf
632 662 #if defined(__xpv)
633 663 movl $SHUTDOWN_reboot, %edi
634 664 call HYPERVISOR_shutdown
635 665 #else
636 666 call reset
637 667 #endif
638 668 /*NOTREACHED*/
639 669
640 670 SET_SIZE(kdi_reboot)
641 671
642 672 ENTRY_NP(kdi_cpu_debug_init)
643 673 pushq %rbp
644 674 movq %rsp, %rbp
645 675
646 676 pushq %rbx /* macro will clobber %rbx */
647 677 KDI_RESTORE_DEBUGGING_STATE
648 678 popq %rbx
649 679
650 680 leave
651 681 ret
652 682 SET_SIZE(kdi_cpu_debug_init)
653 683
654 684 #define GETDREG(name, r) \
655 685 ENTRY_NP(name); \
656 686 movq r, %rax; \
657 687 ret; \
658 688 SET_SIZE(name)
659 689
660 690 #define SETDREG(name, r) \
661 691 ENTRY_NP(name); \
662 692 movq %rdi, r; \
663 693 ret; \
664 694 SET_SIZE(name)
665 695
666 696 GETDREG(kdi_getdr0, %dr0)
667 697 GETDREG(kdi_getdr1, %dr1)
668 698 GETDREG(kdi_getdr2, %dr2)
669 699 GETDREG(kdi_getdr3, %dr3)
670 700 GETDREG(kdi_getdr6, %dr6)
671 701 GETDREG(kdi_getdr7, %dr7)
672 702
673 703 SETDREG(kdi_setdr0, %dr0)
674 704 SETDREG(kdi_setdr1, %dr1)
675 705 SETDREG(kdi_setdr2, %dr2)
676 706 SETDREG(kdi_setdr3, %dr3)
677 707 SETDREG(kdi_setdr6, %dr6)
678 708 SETDREG(kdi_setdr7, %dr7)
679 709
680 710 #endif /* !__lint */
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX