Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/kdi/kdi_asm.s
+++ new/usr/src/uts/intel/kdi/kdi_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
24 24 * Use is subject to license terms.
25 25 *
26 26 * Copyright 2019 Joyent, Inc.
27 27 */
28 28
29 29 /*
30 30 * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains
31 31 * the IDT stubs that drop into here (mainly via kdi_cmnint).
32 32 */
33 33
34 -#if defined(__lint)
35 -#include <sys/types.h>
36 -#else
37 -
38 34 #include <sys/segments.h>
39 35 #include <sys/asm_linkage.h>
40 36 #include <sys/controlregs.h>
41 37 #include <sys/x86_archext.h>
42 38 #include <sys/privregs.h>
43 39 #include <sys/machprivregs.h>
44 40 #include <sys/kdi_regs.h>
45 41 #include <sys/psw.h>
46 42 #include <sys/uadmin.h>
47 43 #ifdef __xpv
48 44 #include <sys/hypervisor.h>
49 45 #endif
50 46 #include <kdi_assym.h>
51 47 #include <assym.h>
52 48
53 49 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
54 50 #define GET_CPUSAVE_ADDR \
55 51 movzbq %gs:CPU_ID, %rbx; \
56 52 movq %rbx, %rax; \
57 53 movq $KRS_SIZE, %rcx; \
58 54 mulq %rcx; \
59 55 movq $kdi_cpusave, %rdx; \
60 56 /*CSTYLED*/ \
61 57 addq (%rdx), %rax
62 58
63 59 /*
64 60 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
65 61 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
66 62 * debugger through the trap handler. We don't want to clobber the saved IDT
67 63 * in the process, as we'd end up resuming the world on our IDT.
68 64 */
69 65 #define SAVE_IDTGDT \
70 66 movq %gs:CPU_IDT, %r11; \
71 67 leaq kdi_idt(%rip), %rsi; \
72 68 cmpq %rsi, %r11; \
73 69 je 1f; \
74 70 movq %r11, KRS_IDT(%rax); \
75 71 movq %gs:CPU_GDT, %r11; \
76 72 movq %r11, KRS_GDT(%rax); \
77 73 1:
78 74
79 75 #ifdef __xpv
80 76
81 77 /*
82 78 * Already on kernel gsbase via the hypervisor.
83 79 */
84 80 #define SAVE_GSBASE(reg) /* nothing */
85 81 #define RESTORE_GSBASE(reg) /* nothing */
86 82
87 83 #else
88 84
89 85 #define SAVE_GSBASE(base) \
90 86 movl $MSR_AMD_GSBASE, %ecx; \
91 87 rdmsr; \
92 88 shlq $32, %rdx; \
93 89 orq %rax, %rdx; \
94 90 movq %rdx, REG_OFF(KDIREG_GSBASE)(base); \
95 91 movl $MSR_AMD_KGSBASE, %ecx; \
96 92 rdmsr; \
97 93 shlq $32, %rdx; \
98 94 orq %rax, %rdx; \
99 95 movq %rdx, REG_OFF(KDIREG_KGSBASE)(base)
100 96
101 97 /*
102 98 * We shouldn't have stomped on KGSBASE, so don't try to restore it.
103 99 */
104 100 #define RESTORE_GSBASE(base) \
105 101 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
106 102 movq %rdx, %rax; \
107 103 shrq $32, %rdx; \
108 104 movl $MSR_AMD_GSBASE, %ecx; \
109 105 wrmsr
110 106
111 107 #endif /* __xpv */
112 108
113 109 /*
114 110 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.
115 111 */
116 112 #define KDI_SAVE_REGS(base) \
117 113 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
118 114 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
119 115 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
120 116 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
121 117 movq %r8, REG_OFF(KDIREG_R8)(base); \
122 118 movq %r9, REG_OFF(KDIREG_R9)(base); \
123 119 movq %rax, REG_OFF(KDIREG_RAX)(base); \
124 120 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
125 121 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
126 122 movq %r10, REG_OFF(KDIREG_R10)(base); \
127 123 movq %r11, REG_OFF(KDIREG_R11)(base); \
128 124 movq %r12, REG_OFF(KDIREG_R12)(base); \
129 125 movq %r13, REG_OFF(KDIREG_R13)(base); \
130 126 movq %r14, REG_OFF(KDIREG_R14)(base); \
131 127 movq %r15, REG_OFF(KDIREG_R15)(base); \
132 128 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
133 129 movq REG_OFF(KDIREG_RIP)(base), %rax; \
134 130 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
135 131 movq %cr2, %rax; \
136 132 movq %rax, REG_OFF(KDIREG_CR2)(base); \
137 133 clrq %rax; \
138 134 movw %ds, %ax; \
139 135 movq %rax, REG_OFF(KDIREG_DS)(base); \
140 136 movw %es, %ax; \
141 137 movq %rax, REG_OFF(KDIREG_ES)(base); \
142 138 movw %fs, %ax; \
143 139 movq %rax, REG_OFF(KDIREG_FS)(base); \
144 140 movw %gs, %ax; \
145 141 movq %rax, REG_OFF(KDIREG_GS)(base); \
146 142 SAVE_GSBASE(base)
147 143
148 144 #define KDI_RESTORE_REGS(base) \
149 145 movq base, %rdi; \
150 146 RESTORE_GSBASE(%rdi); \
151 147 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
152 148 movw %ax, %es; \
153 149 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
154 150 movw %ax, %ds; \
155 151 movq REG_OFF(KDIREG_CR2)(base), %rax; \
156 152 movq %rax, %cr2; \
157 153 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
158 154 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
159 155 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
160 156 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
161 157 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
162 158 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
163 159 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
164 160 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
165 161 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
166 162 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
167 163 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
168 164 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
169 165 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
170 166 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
171 167 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
172 168
173 169 /*
174 170 * Given the address of the current CPU's cpusave area in %rax, the following
175 171 * macro restores the debugging state to said CPU. Restored state includes
176 172 * the debug registers from the global %dr variables.
177 173 *
178 174 * Takes the cpusave area in %rdi as a parameter.
179 175 */
180 176 #define KDI_RESTORE_DEBUGGING_STATE \
181 177 pushq %rdi; \
182 178 leaq kdi_drreg(%rip), %r15; \
183 179 movl $7, %edi; \
184 180 movq DR_CTL(%r15), %rsi; \
185 181 call kdi_dreg_set; \
186 182 \
187 183 movl $6, %edi; \
188 184 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
189 185 call kdi_dreg_set; \
190 186 \
191 187 movl $0, %edi; \
192 188 movq DRADDR_OFF(0)(%r15), %rsi; \
193 189 call kdi_dreg_set; \
194 190 movl $1, %edi; \
195 191 movq DRADDR_OFF(1)(%r15), %rsi; \
196 192 call kdi_dreg_set; \
197 193 movl $2, %edi; \
198 194 movq DRADDR_OFF(2)(%r15), %rsi; \
199 195 call kdi_dreg_set; \
200 196 movl $3, %edi; \
201 197 movq DRADDR_OFF(3)(%r15), %rsi; \
202 198 call kdi_dreg_set; \
203 199 popq %rdi;
204 200
205 201 /*
206 202 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
207 203 * The following macros manage the buffer.
208 204 */
209 205
210 206 /* Advance the ring buffer */
211 207 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
212 208 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
213 209 cmpq $[KDI_NCRUMBS - 1], tmp1; \
214 210 jge 1f; \
215 211 /* Advance the pointer and index */ \
216 212 addq $1, tmp1; \
217 213 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
218 214 movq KRS_CURCRUMB(cpusave), tmp1; \
219 215 addq $KRM_SIZE, tmp1; \
220 216 jmp 2f; \
221 217 1: /* Reset the pointer and index */ \
222 218 movq $0, KRS_CURCRUMBIDX(cpusave); \
223 219 leaq KRS_CRUMBS(cpusave), tmp1; \
224 220 2: movq tmp1, KRS_CURCRUMB(cpusave); \
225 221 /* Clear the new crumb */ \
226 222 movq $KDI_NCRUMBS, tmp2; \
227 223 3: movq $0, -4(tmp1, tmp2, 4); \
228 224 decq tmp2; \
229 225 jnz 3b
230 226
231 227 /* Set a value in the current breadcrumb buffer */
232 228 #define ADD_CRUMB(cpusave, offset, value, tmp) \
233 229 movq KRS_CURCRUMB(cpusave), tmp; \
234 230 movq value, offset(tmp)
235 231
236 232 /* XXX implement me */
237 233 ENTRY_NP(kdi_nmiint)
238 234 clrq %rcx
239 235 movq (%rcx), %rcx
240 236 SET_SIZE(kdi_nmiint)
241 237
242 238 /*
243 239 * The main entry point for master CPUs. It also serves as the trap
244 240 * handler for all traps and interrupts taken during single-step.
245 241 */
246 242 ENTRY_NP(kdi_cmnint)
247 243 ALTENTRY(kdi_master_entry)
248 244
249 245 pushq %rax
250 246 CLI(%rax)
251 247 popq %rax
252 248
253 249 /* Save current register state */
254 250 subq $REG_OFF(KDIREG_TRAPNO), %rsp
255 251 KDI_SAVE_REGS(%rsp)
256 252
257 253 #ifdef __xpv
258 254 /*
259 255 * Clear saved_upcall_mask in unused byte of cs slot on stack.
260 256 * It can only confuse things.
261 257 */
262 258 movb $0, REG_OFF(KDIREG_CS)+4(%rsp)
263 259 #endif
264 260
265 261 #if !defined(__xpv)
266 262 /*
267 263 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
268 264 * KGSBASE can be trusted, as the kernel may or may not have already
269 265 * done a swapgs. All is not lost, as the kernel can divine the correct
270 266 * value for us. Note that the previous GSBASE is saved in the
271 267 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
272 268 * blown away. On the hypervisor, we don't need to do this, since it's
273 269 * ensured we're on our requested kernel GSBASE already.
274 270 *
275 271 * No need to worry about swapgs speculation here as it's unconditional
276 272 * and via wrmsr anyway.
277 273 */
278 274 subq $10, %rsp
279 275 sgdt (%rsp)
280 276 movq 2(%rsp), %rdi /* gdt base now in %rdi */
281 277 addq $10, %rsp
282 278 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
283 279
284 280 movq %rax, %rdx
285 281 shrq $32, %rdx
286 282 movl $MSR_AMD_GSBASE, %ecx
287 283 wrmsr
288 284
289 285 /*
290 286 * In the trampoline we stashed the incoming %cr3. Copy this into
291 287 * the kdiregs for restoration and later use.
292 288 */
293 289 mov %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx
294 290 mov %rdx, REG_OFF(KDIREG_CR3)(%rsp)
295 291 /*
296 292 * Switch to the kernel's %cr3. From the early interrupt handler
297 293 * until now we've been running on the "paranoid" %cr3 (that of kas
298 294 * from early in boot).
299 295 *
300 296 * If we took the interrupt from somewhere already on the kas/paranoid
301 297 * %cr3 though, don't change it (this could happen if kcr3 is corrupt
302 298 * and we took a gptrap earlier from this very code).
303 299 */
304 300 cmpq %rdx, kpti_safe_cr3
305 301 je .no_kcr3
306 302 mov %gs:CPU_KPTI_KCR3, %rdx
307 303 cmpq $0, %rdx
308 304 je .no_kcr3
309 305 mov %rdx, %cr3
310 306 .no_kcr3:
311 307
312 308 #endif /* __xpv */
313 309
314 310 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
315 311
316 312 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
317 313
318 314 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
319 315
320 316 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
321 317 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
322 318 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
323 319 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
324 320 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
325 321
326 322 movq %rsp, %rbp
327 323 pushq %rax
328 324
329 325 /*
330 326 * Were we in the debugger when we took the trap (i.e. was %esp in one
331 327 * of the debugger's memory ranges)?
332 328 */
333 329 leaq kdi_memranges, %rcx
334 330 movl kdi_nmemranges, %edx
335 331 1:
336 332 cmpq MR_BASE(%rcx), %rsp
337 333 jl 2f /* below this range -- try the next one */
338 334 cmpq MR_LIM(%rcx), %rsp
339 335 jg 2f /* above this range -- try the next one */
340 336 jmp 3f /* matched within this range */
341 337
342 338 2:
343 339 decl %edx
344 340 jz kdi_save_common_state /* %rsp not within debugger memory */
345 341 addq $MR_SIZE, %rcx
346 342 jmp 1b
347 343
348 344 3: /*
349 345 * The master is still set. That should only happen if we hit a trap
350 346 * while running in the debugger. Note that it may be an intentional
351 347 * fault. kmdb_dpi_handle_fault will sort it all out.
352 348 */
353 349
354 350 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
355 351 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
356 352 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
357 353 movq %rbx, %rcx /* cpuid */
358 354
359 355 call kdi_dvec_handle_fault
360 356
361 357 /*
362 358 * If we're here, we ran into a debugger problem, and the user
363 359 * elected to solve it by having the debugger debug itself. The
364 360 * state we're about to save is that of the debugger when it took
365 361 * the fault.
366 362 */
367 363
368 364 jmp kdi_save_common_state
369 365
370 366 SET_SIZE(kdi_master_entry)
371 367 SET_SIZE(kdi_cmnint)
372 368
373 369 /*
374 370 * The cross-call handler for slave CPUs.
375 371 *
376 372 * The debugger is single-threaded, so only one CPU, called the master, may be
377 373 * running it at any given time. The other CPUs, known as slaves, spin in a
378 374 * busy loop until there's something for them to do. This is the entry point
379 375 * for the slaves - they'll be sent here in response to a cross-call sent by the
380 376 * master.
381 377 */
382 378
383 379 ENTRY_NP(kdi_slave_entry)
384 380
385 381 /*
386 382 * Cross calls are implemented as function calls, so our stack currently
387 383 * looks like one you'd get from a zero-argument function call. That
388 384 * is, there's the return %rip at %rsp, and that's about it. We need
389 385 * to make it look like an interrupt stack. When we first save, we'll
390 386 * reverse the saved %ss and %rip, which we'll fix back up when we've
391 387 * freed up some general-purpose registers. We'll also need to fix up
392 388 * the saved %rsp.
393 389 */
394 390
395 391 pushq %rsp /* pushed value off by 8 */
396 392 pushfq
397 393 CLI(%rax)
398 394 pushq $KCS_SEL
399 395 clrq %rax
400 396 movw %ss, %ax
401 397 pushq %rax /* rip should be here */
402 398 pushq $-1 /* phony trap error code */
403 399 pushq $-1 /* phony trap number */
404 400
405 401 subq $REG_OFF(KDIREG_TRAPNO), %rsp
406 402 KDI_SAVE_REGS(%rsp)
407 403
408 404 movq %cr3, %rax
409 405 movq %rax, REG_OFF(KDIREG_CR3)(%rsp)
410 406
411 407 movq REG_OFF(KDIREG_SS)(%rsp), %rax
412 408 movq %rax, REG_OFF(KDIREG_SAVPC)(%rsp)
413 409 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
414 410 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
415 411
416 412 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
417 413 addq $8, %rax
418 414 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
419 415
420 416 /*
421 417 * We've saved all of the general-purpose registers, and have a stack
422 418 * that is irettable (after we strip down to the error code)
423 419 */
424 420
425 421 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
426 422
427 423 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
428 424
429 425 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
430 426
431 427 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
432 428 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
433 429 movq REG_OFF(KDIREG_RSP)(%rsp), %rcx
434 430 ADD_CRUMB(%rax, KRM_SP, %rcx, %rdx)
435 431 ADD_CRUMB(%rax, KRM_TRAPNO, $-1, %rdx)
436 432
437 433 movq $KDI_CPU_STATE_SLAVE, KRS_CPU_STATE(%rax)
438 434
439 435 pushq %rax
440 436 jmp kdi_save_common_state
441 437
442 438 SET_SIZE(kdi_slave_entry)
443 439
444 440 /*
445 441 * The state of the world:
446 442 *
447 443 * The stack has a complete set of saved registers and segment
448 444 * selectors, arranged in the kdi_regs.h order. It also has a pointer
449 445 * to our cpusave area.
450 446 *
451 447 * We need to save, into the cpusave area, a pointer to these saved
452 448 * registers. First we check whether we should jump straight back to
453 449 * the kernel. If not, we save a few more registers, ready the
454 450 * machine for debugger entry, and enter the debugger.
455 451 */
456 452
457 453 ENTRY_NP(kdi_save_common_state)
458 454
459 455 popq %rdi /* the cpusave area */
460 456 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
461 457
462 458 pushq %rdi
463 459 call kdi_trap_pass
464 460 testq %rax, %rax
465 461 jnz kdi_pass_to_kernel
466 462 popq %rax /* cpusave in %rax */
467 463
468 464 SAVE_IDTGDT
469 465
470 466 #if !defined(__xpv)
471 467 /* Save off %cr0, and clear write protect */
472 468 movq %cr0, %rcx
473 469 movq %rcx, KRS_CR0(%rax)
474 470 andq $_BITNOT(CR0_WP), %rcx
475 471 movq %rcx, %cr0
476 472 #endif
477 473
478 474 /* Save the debug registers and disable any active watchpoints */
479 475
480 476 movq %rax, %r15 /* save cpusave area ptr */
481 477 movl $7, %edi
482 478 call kdi_dreg_get
483 479 movq %rax, KRS_DRCTL(%r15)
484 480
485 481 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
486 482 movq %rax, %rsi
487 483 movl $7, %edi
488 484 call kdi_dreg_set
489 485
490 486 movl $6, %edi
491 487 call kdi_dreg_get
492 488 movq %rax, KRS_DRSTAT(%r15)
493 489
494 490 movl $0, %edi
495 491 call kdi_dreg_get
496 492 movq %rax, KRS_DROFF(0)(%r15)
497 493
498 494 movl $1, %edi
499 495 call kdi_dreg_get
500 496 movq %rax, KRS_DROFF(1)(%r15)
501 497
502 498 movl $2, %edi
503 499 call kdi_dreg_get
504 500 movq %rax, KRS_DROFF(2)(%r15)
505 501
506 502 movl $3, %edi
507 503 call kdi_dreg_get
508 504 movq %rax, KRS_DROFF(3)(%r15)
509 505
510 506 movq %r15, %rax /* restore cpu save area to rax */
511 507
512 508 clrq %rbp /* stack traces should end here */
513 509
514 510 pushq %rax
515 511 movq %rax, %rdi /* cpusave */
516 512
517 513 call kdi_debugger_entry
518 514
519 515 /* Pass cpusave to kdi_resume */
520 516 popq %rdi
521 517
522 518 jmp kdi_resume
523 519
524 520 SET_SIZE(kdi_save_common_state)
525 521
526 522 /*
527 523 * Resume the world. The code that calls kdi_resume has already
528 524 * decided whether or not to restore the IDT.
529 525 */
530 526 /* cpusave in %rdi */
531 527 ENTRY_NP(kdi_resume)
532 528
533 529 /*
534 530 * Send this CPU back into the world
535 531 */
536 532 #if !defined(__xpv)
537 533 movq KRS_CR0(%rdi), %rdx
538 534 movq %rdx, %cr0
539 535 #endif
540 536
541 537 KDI_RESTORE_DEBUGGING_STATE
542 538
543 539 movq KRS_GREGS(%rdi), %rsp
544 540
545 541 #if !defined(__xpv)
546 542 /*
547 543 * If we're going back via tr_iret_kdi, then we want to copy the
548 544 * final %cr3 we're going to back into the kpti_dbg area now.
549 545 *
550 546 * Since the trampoline needs to find the kpti_dbg too, we enter it
551 547 * with %r13 set to point at that. The real %r13 (to restore before
552 548 * the iret) we stash in the kpti_dbg itself.
553 549 */
554 550 movq %gs:CPU_SELF, %r13 /* can't leaq %gs:*, use self-ptr */
555 551 addq $CPU_KPTI_DBG, %r13
556 552
557 553 movq REG_OFF(KDIREG_R13)(%rsp), %rdx
558 554 movq %rdx, KPTI_R13(%r13)
559 555
560 556 movq REG_OFF(KDIREG_CR3)(%rsp), %rdx
561 557 movq %rdx, KPTI_TR_CR3(%r13)
562 558
563 559 /* The trampoline will undo this later. */
564 560 movq %r13, REG_OFF(KDIREG_R13)(%rsp)
565 561 #endif
566 562
567 563 KDI_RESTORE_REGS(%rsp)
568 564 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
569 565 /*
570 566 * The common trampoline code will restore %cr3 to the right value
571 567 * for either kernel or userland.
572 568 */
573 569 #if !defined(__xpv)
574 570 jmp tr_iret_kdi
575 571 #else
576 572 IRET
577 573 #endif
578 574 /*NOTREACHED*/
579 575 SET_SIZE(kdi_resume)
580 576
581 577
582 578 /*
583 579 * We took a trap that should be handled by the kernel, not KMDB.
584 580 *
585 581 * We're hard-coding the three cases where KMDB has installed permanent
586 582 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
587 583 * to work with; we can't use a global since other CPUs can easily pass
588 584 * through here at the same time.
589 585 *
590 586 * Note that we handle T_DBGENTR since userspace might have tried it.
591 587 *
592 588 * The trap handler will expect the stack to be in trap order, with %rip
593 589 * being the last entry, so we'll need to restore all our regs. On
594 590 * i86xpv we'll need to compensate for XPV_TRAP_POP.
595 591 *
596 592 * %rax on entry is either 1 or 2, which is from kdi_trap_pass().
597 593 * kdi_cmnint stashed the original %cr3 into KDIREG_CR3, then (probably)
598 594 * switched us to the CPU's kf_kernel_cr3. But we're about to call, for
599 595 * example:
600 596 *
601 597 * dbgtrap->trap()->tr_iret_kernel
602 598 *
603 599 * which, unlike, tr_iret_kdi, doesn't restore the original %cr3, so
604 600 * we'll do so here if needed.
605 601 *
606 602 * This isn't just a matter of tidiness: for example, consider:
607 603 *
608 604 * hat_switch(oldhat=kas.a_hat, newhat=prochat)
609 605 * setcr3()
610 606 * reset_kpti()
611 607 * *brktrap* due to fbt on reset_kpti:entry
612 608 *
613 609 * Here, we have the new hat's %cr3, but we haven't yet updated
614 610 * kf_kernel_cr3 (so its currently kas's). So if we don't restore here,
615 611 * we'll stay on kas's cr3 value on returning from the trap: not good if
616 612 * we fault on a userspace address.
617 613 */
618 614 ENTRY_NP(kdi_pass_to_kernel)
619 615
620 616 popq %rdi /* cpusave */
621 617 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
622 618 movq KRS_GREGS(%rdi), %rsp
623 619
624 620 cmpq $2, %rax
625 621 jne no_restore_cr3
626 622 movq REG_OFF(KDIREG_CR3)(%rsp), %r11
627 623 movq %r11, %cr3
628 624
629 625 no_restore_cr3:
630 626 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
631 627
632 628 cmpq $T_SGLSTP, %rdi
633 629 je kdi_pass_dbgtrap
634 630 cmpq $T_BPTFLT, %rdi
635 631 je kdi_pass_brktrap
636 632 cmpq $T_DBGENTR, %rdi
637 633 je kdi_pass_invaltrap
638 634 /*
639 635 * Hmm, unknown handler. Somebody forgot to update this when they
640 636 * added a new trap interposition... try to drop back into kmdb.
641 637 */
642 638 int $T_DBGENTR
643 639
644 640 #define CALL_TRAP_HANDLER(name) \
645 641 KDI_RESTORE_REGS(%rsp); \
646 642 /* Discard state, trapno, err */ \
647 643 addq $REG_OFF(KDIREG_RIP), %rsp; \
648 644 XPV_TRAP_PUSH; \
649 645 jmp %cs:name
650 646
651 647 kdi_pass_dbgtrap:
652 648 CALL_TRAP_HANDLER(dbgtrap)
653 649 /*NOTREACHED*/
654 650 kdi_pass_brktrap:
655 651 CALL_TRAP_HANDLER(brktrap)
656 652 /*NOTREACHED*/
657 653 kdi_pass_invaltrap:
658 654 CALL_TRAP_HANDLER(invaltrap)
659 655 /*NOTREACHED*/
660 656
661 657 SET_SIZE(kdi_pass_to_kernel)
662 658
663 659 /*
664 660 * A minimal version of mdboot(), to be used by the master CPU only.
665 661 */
666 662 ENTRY_NP(kdi_reboot)
667 663
668 664 movl $AD_BOOT, %edi
669 665 movl $A_SHUTDOWN, %esi
670 666 call *psm_shutdownf
671 667 #if defined(__xpv)
672 668 movl $SHUTDOWN_reboot, %edi
673 669 call HYPERVISOR_shutdown
674 670 #else
675 671 call reset
676 672 #endif
677 673 /*NOTREACHED*/
678 674
679 675 SET_SIZE(kdi_reboot)
680 676
681 677 ENTRY_NP(kdi_cpu_debug_init)
682 678 pushq %rbp
683 679 movq %rsp, %rbp
684 680
685 681 pushq %rbx /* macro will clobber %rbx */
686 682 KDI_RESTORE_DEBUGGING_STATE
687 683 popq %rbx
688 684
689 685 leave
690 686 ret
691 687 SET_SIZE(kdi_cpu_debug_init)
692 688
693 689 #define GETDREG(name, r) \
694 690 ENTRY_NP(name); \
695 691 movq r, %rax; \
696 692 ret; \
697 693 SET_SIZE(name)
698 694
699 695 #define SETDREG(name, r) \
700 696 ENTRY_NP(name); \
701 697 movq %rdi, r; \
702 698 ret; \
703 699 SET_SIZE(name)
704 700
705 701 GETDREG(kdi_getdr0, %dr0)
706 702 GETDREG(kdi_getdr1, %dr1)
707 703 GETDREG(kdi_getdr2, %dr2)
708 704 GETDREG(kdi_getdr3, %dr3)
↓ open down ↓ |
661 lines elided |
↑ open up ↑ |
709 705 GETDREG(kdi_getdr6, %dr6)
710 706 GETDREG(kdi_getdr7, %dr7)
711 707
712 708 SETDREG(kdi_setdr0, %dr0)
713 709 SETDREG(kdi_setdr1, %dr1)
714 710 SETDREG(kdi_setdr2, %dr2)
715 711 SETDREG(kdi_setdr3, %dr3)
716 712 SETDREG(kdi_setdr6, %dr6)
717 713 SETDREG(kdi_setdr7, %dr7)
718 714
719 -#endif /* !__lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX