Print this page
11859 need swapgs mitigation
Reviewed by: Robert Mustacchi <rm@fingolfin.org>
Reviewed by: Dan McDonald <danmcd@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@fingolfin.org>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/kdi/kdi_asm.s
+++ new/usr/src/uts/intel/kdi/kdi_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 *
26 - * Copyright 2018 Joyent, Inc.
26 + * Copyright 2019 Joyent, Inc.
27 27 */
28 28
29 29 /*
30 30 * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains
31 31 * the IDT stubs that drop into here (mainly via kdi_cmnint).
32 32 */
33 33
34 34 #if defined(__lint)
35 35 #include <sys/types.h>
36 36 #else
37 37
38 38 #include <sys/segments.h>
39 39 #include <sys/asm_linkage.h>
40 40 #include <sys/controlregs.h>
41 41 #include <sys/x86_archext.h>
42 42 #include <sys/privregs.h>
43 43 #include <sys/machprivregs.h>
44 44 #include <sys/kdi_regs.h>
45 45 #include <sys/psw.h>
46 46 #include <sys/uadmin.h>
47 47 #ifdef __xpv
48 48 #include <sys/hypervisor.h>
49 49 #endif
50 50 #include <kdi_assym.h>
51 51 #include <assym.h>
52 52
53 53 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
54 54 #define GET_CPUSAVE_ADDR \
55 55 movzbq %gs:CPU_ID, %rbx; \
56 56 movq %rbx, %rax; \
57 57 movq $KRS_SIZE, %rcx; \
58 58 mulq %rcx; \
59 59 movq $kdi_cpusave, %rdx; \
60 60 /*CSTYLED*/ \
61 61 addq (%rdx), %rax
62 62
63 63 /*
64 64 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
65 65 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
66 66 * debugger through the trap handler. We don't want to clobber the saved IDT
67 67 * in the process, as we'd end up resuming the world on our IDT.
68 68 */
69 69 #define SAVE_IDTGDT \
70 70 movq %gs:CPU_IDT, %r11; \
71 71 leaq kdi_idt(%rip), %rsi; \
72 72 cmpq %rsi, %r11; \
73 73 je 1f; \
74 74 movq %r11, KRS_IDT(%rax); \
75 75 movq %gs:CPU_GDT, %r11; \
76 76 movq %r11, KRS_GDT(%rax); \
77 77 1:
78 78
79 79 #ifdef __xpv
80 80
81 81 /*
82 82 * Already on kernel gsbase via the hypervisor.
83 83 */
84 84 #define SAVE_GSBASE(reg) /* nothing */
85 85 #define RESTORE_GSBASE(reg) /* nothing */
86 86
87 87 #else
88 88
89 89 #define SAVE_GSBASE(base) \
90 90 movl $MSR_AMD_GSBASE, %ecx; \
91 91 rdmsr; \
92 92 shlq $32, %rdx; \
93 93 orq %rax, %rdx; \
94 94 movq %rdx, REG_OFF(KDIREG_GSBASE)(base); \
95 95 movl $MSR_AMD_KGSBASE, %ecx; \
96 96 rdmsr; \
97 97 shlq $32, %rdx; \
98 98 orq %rax, %rdx; \
99 99 movq %rdx, REG_OFF(KDIREG_KGSBASE)(base)
100 100
101 101 /*
102 102 * We shouldn't have stomped on KGSBASE, so don't try to restore it.
103 103 */
104 104 #define RESTORE_GSBASE(base) \
105 105 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
106 106 movq %rdx, %rax; \
107 107 shrq $32, %rdx; \
108 108 movl $MSR_AMD_GSBASE, %ecx; \
109 109 wrmsr
110 110
111 111 #endif /* __xpv */
112 112
113 113 /*
114 114 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.
115 115 */
116 116 #define KDI_SAVE_REGS(base) \
117 117 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
118 118 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
119 119 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
120 120 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
121 121 movq %r8, REG_OFF(KDIREG_R8)(base); \
122 122 movq %r9, REG_OFF(KDIREG_R9)(base); \
123 123 movq %rax, REG_OFF(KDIREG_RAX)(base); \
124 124 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
125 125 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
126 126 movq %r10, REG_OFF(KDIREG_R10)(base); \
127 127 movq %r11, REG_OFF(KDIREG_R11)(base); \
128 128 movq %r12, REG_OFF(KDIREG_R12)(base); \
129 129 movq %r13, REG_OFF(KDIREG_R13)(base); \
130 130 movq %r14, REG_OFF(KDIREG_R14)(base); \
131 131 movq %r15, REG_OFF(KDIREG_R15)(base); \
132 132 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
133 133 movq REG_OFF(KDIREG_RIP)(base), %rax; \
134 134 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
135 135 movq %cr2, %rax; \
136 136 movq %rax, REG_OFF(KDIREG_CR2)(base); \
137 137 clrq %rax; \
138 138 movw %ds, %ax; \
139 139 movq %rax, REG_OFF(KDIREG_DS)(base); \
140 140 movw %es, %ax; \
141 141 movq %rax, REG_OFF(KDIREG_ES)(base); \
142 142 movw %fs, %ax; \
143 143 movq %rax, REG_OFF(KDIREG_FS)(base); \
144 144 movw %gs, %ax; \
145 145 movq %rax, REG_OFF(KDIREG_GS)(base); \
146 146 SAVE_GSBASE(base)
147 147
148 148 #define KDI_RESTORE_REGS(base) \
149 149 movq base, %rdi; \
150 150 RESTORE_GSBASE(%rdi); \
151 151 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
152 152 movw %ax, %es; \
153 153 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
154 154 movw %ax, %ds; \
155 155 movq REG_OFF(KDIREG_CR2)(base), %rax; \
156 156 movq %rax, %cr2; \
157 157 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
158 158 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
159 159 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
160 160 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
161 161 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
162 162 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
163 163 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
164 164 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
165 165 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
166 166 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
167 167 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
168 168 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
169 169 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
170 170 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
171 171 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
172 172
173 173 /*
174 174 * Given the address of the current CPU's cpusave area in %rax, the following
175 175 * macro restores the debugging state to said CPU. Restored state includes
176 176 * the debug registers from the global %dr variables.
177 177 *
178 178 * Takes the cpusave area in %rdi as a parameter.
179 179 */
180 180 #define KDI_RESTORE_DEBUGGING_STATE \
181 181 pushq %rdi; \
182 182 leaq kdi_drreg(%rip), %r15; \
183 183 movl $7, %edi; \
184 184 movq DR_CTL(%r15), %rsi; \
185 185 call kdi_dreg_set; \
186 186 \
187 187 movl $6, %edi; \
188 188 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
189 189 call kdi_dreg_set; \
190 190 \
191 191 movl $0, %edi; \
192 192 movq DRADDR_OFF(0)(%r15), %rsi; \
193 193 call kdi_dreg_set; \
194 194 movl $1, %edi; \
195 195 movq DRADDR_OFF(1)(%r15), %rsi; \
196 196 call kdi_dreg_set; \
197 197 movl $2, %edi; \
198 198 movq DRADDR_OFF(2)(%r15), %rsi; \
199 199 call kdi_dreg_set; \
200 200 movl $3, %edi; \
201 201 movq DRADDR_OFF(3)(%r15), %rsi; \
202 202 call kdi_dreg_set; \
203 203 popq %rdi;
204 204
205 205 /*
206 206 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
207 207 * The following macros manage the buffer.
208 208 */
209 209
210 210 /* Advance the ring buffer */
211 211 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
212 212 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
213 213 cmpq $[KDI_NCRUMBS - 1], tmp1; \
214 214 jge 1f; \
215 215 /* Advance the pointer and index */ \
216 216 addq $1, tmp1; \
217 217 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
218 218 movq KRS_CURCRUMB(cpusave), tmp1; \
219 219 addq $KRM_SIZE, tmp1; \
220 220 jmp 2f; \
221 221 1: /* Reset the pointer and index */ \
222 222 movq $0, KRS_CURCRUMBIDX(cpusave); \
223 223 leaq KRS_CRUMBS(cpusave), tmp1; \
224 224 2: movq tmp1, KRS_CURCRUMB(cpusave); \
225 225 /* Clear the new crumb */ \
226 226 movq $KDI_NCRUMBS, tmp2; \
227 227 3: movq $0, -4(tmp1, tmp2, 4); \
228 228 decq tmp2; \
229 229 jnz 3b
230 230
231 231 /* Set a value in the current breadcrumb buffer */
232 232 #define ADD_CRUMB(cpusave, offset, value, tmp) \
233 233 movq KRS_CURCRUMB(cpusave), tmp; \
234 234 movq value, offset(tmp)
235 235
236 236 /* XXX implement me */
237 237 ENTRY_NP(kdi_nmiint)
238 238 clrq %rcx
239 239 movq (%rcx), %rcx
240 240 SET_SIZE(kdi_nmiint)
241 241
242 242 /*
243 243 * The main entry point for master CPUs. It also serves as the trap
244 244 * handler for all traps and interrupts taken during single-step.
245 245 */
246 246 ENTRY_NP(kdi_cmnint)
247 247 ALTENTRY(kdi_master_entry)
248 248
249 249 pushq %rax
250 250 CLI(%rax)
251 251 popq %rax
252 252
253 253 /* Save current register state */
254 254 subq $REG_OFF(KDIREG_TRAPNO), %rsp
255 255 KDI_SAVE_REGS(%rsp)
256 256
257 257 #ifdef __xpv
258 258 /*
259 259 * Clear saved_upcall_mask in unused byte of cs slot on stack.
260 260 * It can only confuse things.
261 261 */
262 262 movb $0, REG_OFF(KDIREG_CS)+4(%rsp)
263 263 #endif
↓ open down ↓ |
227 lines elided |
↑ open up ↑ |
264 264
265 265 #if !defined(__xpv)
266 266 /*
267 267 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
268 268 * KGSBASE can be trusted, as the kernel may or may not have already
269 269 * done a swapgs. All is not lost, as the kernel can divine the correct
270 270 * value for us. Note that the previous GSBASE is saved in the
271 271 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
272 272 * blown away. On the hypervisor, we don't need to do this, since it's
273 273 * ensured we're on our requested kernel GSBASE already.
274 + *
275 + * No need to worry about swapgs speculation here as it's unconditional
276 + * and via wrmsr anyway.
274 277 */
275 278 subq $10, %rsp
276 279 sgdt (%rsp)
277 280 movq 2(%rsp), %rdi /* gdt base now in %rdi */
278 281 addq $10, %rsp
279 282 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
280 283
281 284 movq %rax, %rdx
282 285 shrq $32, %rdx
283 286 movl $MSR_AMD_GSBASE, %ecx
284 287 wrmsr
285 288
286 289 /*
287 290 * In the trampoline we stashed the incoming %cr3. Copy this into
288 291 * the kdiregs for restoration and later use.
289 292 */
290 293 mov %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx
291 294 mov %rdx, REG_OFF(KDIREG_CR3)(%rsp)
292 295 /*
293 296 * Switch to the kernel's %cr3. From the early interrupt handler
294 297 * until now we've been running on the "paranoid" %cr3 (that of kas
295 298 * from early in boot).
296 299 *
297 300 * If we took the interrupt from somewhere already on the kas/paranoid
298 301 * %cr3 though, don't change it (this could happen if kcr3 is corrupt
299 302 * and we took a gptrap earlier from this very code).
300 303 */
301 304 cmpq %rdx, kpti_safe_cr3
302 305 je .no_kcr3
303 306 mov %gs:CPU_KPTI_KCR3, %rdx
304 307 cmpq $0, %rdx
305 308 je .no_kcr3
306 309 mov %rdx, %cr3
307 310 .no_kcr3:
308 311
309 312 #endif /* __xpv */
310 313
311 314 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
312 315
313 316 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
314 317
315 318 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
316 319
317 320 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
318 321 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
319 322 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
320 323 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
321 324 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
322 325
323 326 movq %rsp, %rbp
324 327 pushq %rax
325 328
326 329 /*
327 330 * Were we in the debugger when we took the trap (i.e. was %esp in one
328 331 * of the debugger's memory ranges)?
329 332 */
330 333 leaq kdi_memranges, %rcx
331 334 movl kdi_nmemranges, %edx
332 335 1:
333 336 cmpq MR_BASE(%rcx), %rsp
334 337 jl 2f /* below this range -- try the next one */
335 338 cmpq MR_LIM(%rcx), %rsp
336 339 jg 2f /* above this range -- try the next one */
337 340 jmp 3f /* matched within this range */
338 341
339 342 2:
340 343 decl %edx
341 344 jz kdi_save_common_state /* %rsp not within debugger memory */
342 345 addq $MR_SIZE, %rcx
343 346 jmp 1b
344 347
345 348 3: /*
346 349 * The master is still set. That should only happen if we hit a trap
347 350 * while running in the debugger. Note that it may be an intentional
348 351 * fault. kmdb_dpi_handle_fault will sort it all out.
349 352 */
350 353
351 354 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
352 355 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
353 356 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
354 357 movq %rbx, %rcx /* cpuid */
355 358
356 359 call kdi_dvec_handle_fault
357 360
358 361 /*
359 362 * If we're here, we ran into a debugger problem, and the user
360 363 * elected to solve it by having the debugger debug itself. The
361 364 * state we're about to save is that of the debugger when it took
362 365 * the fault.
363 366 */
364 367
365 368 jmp kdi_save_common_state
366 369
367 370 SET_SIZE(kdi_master_entry)
368 371 SET_SIZE(kdi_cmnint)
369 372
370 373 /*
371 374 * The cross-call handler for slave CPUs.
372 375 *
373 376 * The debugger is single-threaded, so only one CPU, called the master, may be
374 377 * running it at any given time. The other CPUs, known as slaves, spin in a
375 378 * busy loop until there's something for them to do. This is the entry point
376 379 * for the slaves - they'll be sent here in response to a cross-call sent by the
377 380 * master.
378 381 */
379 382
380 383 ENTRY_NP(kdi_slave_entry)
381 384
382 385 /*
383 386 * Cross calls are implemented as function calls, so our stack currently
384 387 * looks like one you'd get from a zero-argument function call. That
385 388 * is, there's the return %rip at %rsp, and that's about it. We need
386 389 * to make it look like an interrupt stack. When we first save, we'll
387 390 * reverse the saved %ss and %rip, which we'll fix back up when we've
388 391 * freed up some general-purpose registers. We'll also need to fix up
389 392 * the saved %rsp.
390 393 */
391 394
392 395 pushq %rsp /* pushed value off by 8 */
393 396 pushfq
394 397 CLI(%rax)
395 398 pushq $KCS_SEL
396 399 clrq %rax
397 400 movw %ss, %ax
398 401 pushq %rax /* rip should be here */
399 402 pushq $-1 /* phony trap error code */
400 403 pushq $-1 /* phony trap number */
401 404
402 405 subq $REG_OFF(KDIREG_TRAPNO), %rsp
403 406 KDI_SAVE_REGS(%rsp)
404 407
405 408 movq %cr3, %rax
406 409 movq %rax, REG_OFF(KDIREG_CR3)(%rsp)
407 410
408 411 movq REG_OFF(KDIREG_SS)(%rsp), %rax
409 412 movq %rax, REG_OFF(KDIREG_SAVPC)(%rsp)
410 413 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
411 414 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
412 415
413 416 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
414 417 addq $8, %rax
415 418 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
416 419
417 420 /*
418 421 * We've saved all of the general-purpose registers, and have a stack
419 422 * that is irettable (after we strip down to the error code)
420 423 */
421 424
422 425 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
423 426
424 427 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
425 428
426 429 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
427 430
428 431 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
429 432 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
430 433 movq REG_OFF(KDIREG_RSP)(%rsp), %rcx
431 434 ADD_CRUMB(%rax, KRM_SP, %rcx, %rdx)
432 435 ADD_CRUMB(%rax, KRM_TRAPNO, $-1, %rdx)
433 436
434 437 movq $KDI_CPU_STATE_SLAVE, KRS_CPU_STATE(%rax)
435 438
436 439 pushq %rax
437 440 jmp kdi_save_common_state
438 441
439 442 SET_SIZE(kdi_slave_entry)
440 443
441 444 /*
442 445 * The state of the world:
443 446 *
444 447 * The stack has a complete set of saved registers and segment
445 448 * selectors, arranged in the kdi_regs.h order. It also has a pointer
446 449 * to our cpusave area.
447 450 *
448 451 * We need to save, into the cpusave area, a pointer to these saved
449 452 * registers. First we check whether we should jump straight back to
450 453 * the kernel. If not, we save a few more registers, ready the
451 454 * machine for debugger entry, and enter the debugger.
452 455 */
453 456
454 457 ENTRY_NP(kdi_save_common_state)
455 458
456 459 popq %rdi /* the cpusave area */
457 460 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
458 461
459 462 pushq %rdi
460 463 call kdi_trap_pass
461 464 testq %rax, %rax
462 465 jnz kdi_pass_to_kernel
463 466 popq %rax /* cpusave in %rax */
464 467
465 468 SAVE_IDTGDT
466 469
467 470 #if !defined(__xpv)
468 471 /* Save off %cr0, and clear write protect */
469 472 movq %cr0, %rcx
470 473 movq %rcx, KRS_CR0(%rax)
471 474 andq $_BITNOT(CR0_WP), %rcx
472 475 movq %rcx, %cr0
473 476 #endif
474 477
475 478 /* Save the debug registers and disable any active watchpoints */
476 479
477 480 movq %rax, %r15 /* save cpusave area ptr */
478 481 movl $7, %edi
479 482 call kdi_dreg_get
480 483 movq %rax, KRS_DRCTL(%r15)
481 484
482 485 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
483 486 movq %rax, %rsi
484 487 movl $7, %edi
485 488 call kdi_dreg_set
486 489
487 490 movl $6, %edi
488 491 call kdi_dreg_get
489 492 movq %rax, KRS_DRSTAT(%r15)
490 493
491 494 movl $0, %edi
492 495 call kdi_dreg_get
493 496 movq %rax, KRS_DROFF(0)(%r15)
494 497
495 498 movl $1, %edi
496 499 call kdi_dreg_get
497 500 movq %rax, KRS_DROFF(1)(%r15)
498 501
499 502 movl $2, %edi
500 503 call kdi_dreg_get
501 504 movq %rax, KRS_DROFF(2)(%r15)
502 505
503 506 movl $3, %edi
504 507 call kdi_dreg_get
505 508 movq %rax, KRS_DROFF(3)(%r15)
506 509
507 510 movq %r15, %rax /* restore cpu save area to rax */
508 511
509 512 clrq %rbp /* stack traces should end here */
510 513
511 514 pushq %rax
512 515 movq %rax, %rdi /* cpusave */
513 516
514 517 call kdi_debugger_entry
515 518
516 519 /* Pass cpusave to kdi_resume */
517 520 popq %rdi
518 521
519 522 jmp kdi_resume
520 523
521 524 SET_SIZE(kdi_save_common_state)
522 525
523 526 /*
524 527 * Resume the world. The code that calls kdi_resume has already
525 528 * decided whether or not to restore the IDT.
526 529 */
527 530 /* cpusave in %rdi */
528 531 ENTRY_NP(kdi_resume)
529 532
530 533 /*
531 534 * Send this CPU back into the world
532 535 */
533 536 #if !defined(__xpv)
534 537 movq KRS_CR0(%rdi), %rdx
535 538 movq %rdx, %cr0
536 539 #endif
537 540
538 541 KDI_RESTORE_DEBUGGING_STATE
539 542
540 543 movq KRS_GREGS(%rdi), %rsp
541 544
542 545 #if !defined(__xpv)
543 546 /*
544 547 * If we're going back via tr_iret_kdi, then we want to copy the
545 548 * final %cr3 we're going to back into the kpti_dbg area now.
546 549 *
547 550 * Since the trampoline needs to find the kpti_dbg too, we enter it
548 551 * with %r13 set to point at that. The real %r13 (to restore before
549 552 * the iret) we stash in the kpti_dbg itself.
550 553 */
551 554 movq %gs:CPU_SELF, %r13 /* can't leaq %gs:*, use self-ptr */
552 555 addq $CPU_KPTI_DBG, %r13
553 556
554 557 movq REG_OFF(KDIREG_R13)(%rsp), %rdx
555 558 movq %rdx, KPTI_R13(%r13)
556 559
557 560 movq REG_OFF(KDIREG_CR3)(%rsp), %rdx
558 561 movq %rdx, KPTI_TR_CR3(%r13)
559 562
560 563 /* The trampoline will undo this later. */
561 564 movq %r13, REG_OFF(KDIREG_R13)(%rsp)
562 565 #endif
563 566
564 567 KDI_RESTORE_REGS(%rsp)
565 568 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
566 569 /*
567 570 * The common trampoline code will restore %cr3 to the right value
568 571 * for either kernel or userland.
569 572 */
570 573 #if !defined(__xpv)
571 574 jmp tr_iret_kdi
572 575 #else
573 576 IRET
574 577 #endif
575 578 /*NOTREACHED*/
576 579 SET_SIZE(kdi_resume)
577 580
578 581
579 582 /*
580 583 * We took a trap that should be handled by the kernel, not KMDB.
581 584 *
582 585 * We're hard-coding the three cases where KMDB has installed permanent
583 586 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
584 587 * to work with; we can't use a global since other CPUs can easily pass
585 588 * through here at the same time.
586 589 *
587 590 * Note that we handle T_DBGENTR since userspace might have tried it.
588 591 *
589 592 * The trap handler will expect the stack to be in trap order, with %rip
590 593 * being the last entry, so we'll need to restore all our regs. On
591 594 * i86xpv we'll need to compensate for XPV_TRAP_POP.
592 595 *
593 596 * %rax on entry is either 1 or 2, which is from kdi_trap_pass().
594 597 * kdi_cmnint stashed the original %cr3 into KDIREG_CR3, then (probably)
595 598 * switched us to the CPU's kf_kernel_cr3. But we're about to call, for
596 599 * example:
597 600 *
598 601 * dbgtrap->trap()->tr_iret_kernel
599 602 *
600 603 * which, unlike, tr_iret_kdi, doesn't restore the original %cr3, so
601 604 * we'll do so here if needed.
602 605 *
603 606 * This isn't just a matter of tidiness: for example, consider:
604 607 *
605 608 * hat_switch(oldhat=kas.a_hat, newhat=prochat)
606 609 * setcr3()
607 610 * reset_kpti()
608 611 * *brktrap* due to fbt on reset_kpti:entry
609 612 *
610 613 * Here, we have the new hat's %cr3, but we haven't yet updated
611 614 * kf_kernel_cr3 (so its currently kas's). So if we don't restore here,
612 615 * we'll stay on kas's cr3 value on returning from the trap: not good if
613 616 * we fault on a userspace address.
614 617 */
615 618 ENTRY_NP(kdi_pass_to_kernel)
616 619
617 620 popq %rdi /* cpusave */
618 621 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
619 622 movq KRS_GREGS(%rdi), %rsp
620 623
621 624 cmpq $2, %rax
622 625 jne no_restore_cr3
623 626 movq REG_OFF(KDIREG_CR3)(%rsp), %r11
624 627 movq %r11, %cr3
625 628
626 629 no_restore_cr3:
627 630 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
628 631
629 632 cmpq $T_SGLSTP, %rdi
630 633 je kdi_pass_dbgtrap
631 634 cmpq $T_BPTFLT, %rdi
632 635 je kdi_pass_brktrap
633 636 cmpq $T_DBGENTR, %rdi
634 637 je kdi_pass_invaltrap
635 638 /*
636 639 * Hmm, unknown handler. Somebody forgot to update this when they
637 640 * added a new trap interposition... try to drop back into kmdb.
638 641 */
639 642 int $T_DBGENTR
640 643
641 644 #define CALL_TRAP_HANDLER(name) \
642 645 KDI_RESTORE_REGS(%rsp); \
643 646 /* Discard state, trapno, err */ \
644 647 addq $REG_OFF(KDIREG_RIP), %rsp; \
645 648 XPV_TRAP_PUSH; \
646 649 jmp %cs:name
647 650
648 651 kdi_pass_dbgtrap:
649 652 CALL_TRAP_HANDLER(dbgtrap)
650 653 /*NOTREACHED*/
651 654 kdi_pass_brktrap:
652 655 CALL_TRAP_HANDLER(brktrap)
653 656 /*NOTREACHED*/
654 657 kdi_pass_invaltrap:
655 658 CALL_TRAP_HANDLER(invaltrap)
656 659 /*NOTREACHED*/
657 660
658 661 SET_SIZE(kdi_pass_to_kernel)
659 662
660 663 /*
661 664 * A minimal version of mdboot(), to be used by the master CPU only.
662 665 */
663 666 ENTRY_NP(kdi_reboot)
664 667
665 668 movl $AD_BOOT, %edi
666 669 movl $A_SHUTDOWN, %esi
667 670 call *psm_shutdownf
668 671 #if defined(__xpv)
669 672 movl $SHUTDOWN_reboot, %edi
670 673 call HYPERVISOR_shutdown
671 674 #else
672 675 call reset
673 676 #endif
674 677 /*NOTREACHED*/
675 678
676 679 SET_SIZE(kdi_reboot)
677 680
678 681 ENTRY_NP(kdi_cpu_debug_init)
679 682 pushq %rbp
680 683 movq %rsp, %rbp
681 684
682 685 pushq %rbx /* macro will clobber %rbx */
683 686 KDI_RESTORE_DEBUGGING_STATE
684 687 popq %rbx
685 688
686 689 leave
687 690 ret
688 691 SET_SIZE(kdi_cpu_debug_init)
689 692
690 693 #define GETDREG(name, r) \
691 694 ENTRY_NP(name); \
692 695 movq r, %rax; \
693 696 ret; \
694 697 SET_SIZE(name)
695 698
696 699 #define SETDREG(name, r) \
697 700 ENTRY_NP(name); \
698 701 movq %rdi, r; \
699 702 ret; \
700 703 SET_SIZE(name)
701 704
702 705 GETDREG(kdi_getdr0, %dr0)
703 706 GETDREG(kdi_getdr1, %dr1)
704 707 GETDREG(kdi_getdr2, %dr2)
705 708 GETDREG(kdi_getdr3, %dr3)
706 709 GETDREG(kdi_getdr6, %dr6)
707 710 GETDREG(kdi_getdr7, %dr7)
708 711
709 712 SETDREG(kdi_setdr0, %dr0)
710 713 SETDREG(kdi_setdr1, %dr1)
711 714 SETDREG(kdi_setdr2, %dr2)
712 715 SETDREG(kdi_setdr3, %dr3)
713 716 SETDREG(kdi_setdr6, %dr6)
714 717 SETDREG(kdi_setdr7, %dr7)
715 718
716 719 #endif /* !__lint */
↓ open down ↓ |
433 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX