Print this page
9210 remove KMDB branch debugging support
9211 ::crregs could do with cr2/cr3 support
9209 ::ttrace should be able to filter by thread
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/kdi/amd64/kdi_asm.s
+++ new/usr/src/uts/intel/kdi/amd64/kdi_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 + *
26 + * Copyright 2018 Joyent, Inc.
25 27 */
26 28
27 -#pragma ident "%Z%%M% %I% %E% SMI"
28 -
29 29 /*
30 30 * Debugger entry for both master and slave CPUs
31 31 */
32 32
33 33 #if defined(__lint)
34 34 #include <sys/types.h>
35 35 #endif
36 36
37 37 #include <sys/segments.h>
38 38 #include <sys/asm_linkage.h>
39 39 #include <sys/controlregs.h>
40 40 #include <sys/x86_archext.h>
41 41 #include <sys/privregs.h>
42 42 #include <sys/machprivregs.h>
43 43 #include <sys/kdi_regs.h>
44 44 #include <sys/psw.h>
45 45 #include <sys/uadmin.h>
46 46 #ifdef __xpv
47 47 #include <sys/hypervisor.h>
48 48 #endif
49 49
50 50 #ifdef _ASM
51 51
52 52 #include <kdi_assym.h>
53 53 #include <assym.h>
54 54
55 55 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
56 56 #define GET_CPUSAVE_ADDR \
57 57 movzbq %gs:CPU_ID, %rbx; \
58 58 movq %rbx, %rax; \
59 59 movq $KRS_SIZE, %rcx; \
60 60 mulq %rcx; \
61 61 movq $kdi_cpusave, %rdx; \
62 62 /*CSTYLED*/ \
63 63 addq (%rdx), %rax
64 64
65 65 /*
66 66 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
67 67 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
68 68 * debugger through the trap handler. We don't want to clobber the saved IDT
69 69 * in the process, as we'd end up resuming the world on our IDT.
70 70 */
71 71 #define SAVE_IDTGDT \
72 72 movq %gs:CPU_IDT, %r11; \
73 73 leaq kdi_idt(%rip), %rsi; \
74 74 cmpq %rsi, %r11; \
75 75 je 1f; \
76 76 movq %r11, KRS_IDT(%rax); \
77 77 movq %gs:CPU_GDT, %r11; \
78 78 movq %r11, KRS_GDT(%rax); \
79 79 1:
80 80
81 81 #ifdef __xpv
82 82
83 83 #define SAVE_GSBASE(reg) /* nothing */
84 84 #define RESTORE_GSBASE(reg) /* nothing */
85 85
86 86 #else
87 87
88 88 #define SAVE_GSBASE(base) \
89 89 movl $MSR_AMD_GSBASE, %ecx; \
90 90 rdmsr; \
91 91 shlq $32, %rdx; \
92 92 orq %rax, %rdx; \
93 93 movq %rdx, REG_OFF(KDIREG_GSBASE)(base)
94 94
95 95 #define RESTORE_GSBASE(base) \
96 96 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
97 97 movq %rdx, %rax; \
98 98 shrq $32, %rdx; \
99 99 movl $MSR_AMD_GSBASE, %ecx; \
100 100 wrmsr
101 101
102 102 #endif /* __xpv */
103 103
104 104 /*
105 105 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. Note
106 106 * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
107 107 * unnecessary.
108 108 */
109 109 #define KDI_SAVE_REGS(base) \
110 110 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
111 111 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
112 112 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
113 113 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
114 114 movq %r8, REG_OFF(KDIREG_R8)(base); \
115 115 movq %r9, REG_OFF(KDIREG_R9)(base); \
116 116 movq %rax, REG_OFF(KDIREG_RAX)(base); \
117 117 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
118 118 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
119 119 movq %r10, REG_OFF(KDIREG_R10)(base); \
120 120 movq %r11, REG_OFF(KDIREG_R11)(base); \
121 121 movq %r12, REG_OFF(KDIREG_R12)(base); \
122 122 movq %r13, REG_OFF(KDIREG_R13)(base); \
123 123 movq %r14, REG_OFF(KDIREG_R14)(base); \
124 124 movq %r15, REG_OFF(KDIREG_R15)(base); \
125 125 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
126 126 movq REG_OFF(KDIREG_RIP)(base), %rax; \
127 127 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
128 128 clrq %rax; \
129 129 movw %ds, %ax; \
130 130 movq %rax, REG_OFF(KDIREG_DS)(base); \
131 131 movw %es, %ax; \
132 132 movq %rax, REG_OFF(KDIREG_ES)(base); \
133 133 movw %fs, %ax; \
134 134 movq %rax, REG_OFF(KDIREG_FS)(base); \
135 135 movw %gs, %ax; \
136 136 movq %rax, REG_OFF(KDIREG_GS)(base); \
137 137 SAVE_GSBASE(base)
138 138
139 139 #define KDI_RESTORE_REGS(base) \
140 140 movq base, %rdi; \
141 141 RESTORE_GSBASE(%rdi); \
142 142 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
143 143 movw %ax, %es; \
144 144 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
145 145 movw %ax, %ds; \
146 146 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
147 147 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
148 148 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
149 149 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
150 150 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
151 151 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
152 152 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
153 153 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
154 154 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
↓ open down ↓ |
116 lines elided |
↑ open up ↑ |
155 155 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
156 156 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
157 157 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
158 158 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
159 159 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
160 160 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
161 161
162 162 /*
163 163 * Given the address of the current CPU's cpusave area in %rax, the following
164 164 * macro restores the debugging state to said CPU. Restored state includes
165 - * the debug registers from the global %dr variables, and debugging MSRs from
166 - * the CPU save area. This code would be in a separate routine, but for the
167 - * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
168 - * the number of jumps taken subsequent to the update of said MSRs. We can
169 - * remove one jump (the ret) by using a macro instead of a function for the
170 - * debugging state restoration code.
165 + * the debug registers from the global %dr variables.
171 166 *
172 - * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
173 - */
167 + * Takes the cpusave area in %rdi as a parameter.
168 + */
174 169 #define KDI_RESTORE_DEBUGGING_STATE \
175 170 pushq %rdi; \
176 171 leaq kdi_drreg(%rip), %r15; \
177 172 movl $7, %edi; \
178 173 movq DR_CTL(%r15), %rsi; \
179 174 call kdi_dreg_set; \
180 175 \
181 176 movl $6, %edi; \
182 177 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
183 178 call kdi_dreg_set; \
184 179 \
185 180 movl $0, %edi; \
186 181 movq DRADDR_OFF(0)(%r15), %rsi; \
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
187 182 call kdi_dreg_set; \
188 183 movl $1, %edi; \
189 184 movq DRADDR_OFF(1)(%r15), %rsi; \
190 185 call kdi_dreg_set; \
191 186 movl $2, %edi; \
192 187 movq DRADDR_OFF(2)(%r15), %rsi; \
193 188 call kdi_dreg_set; \
194 189 movl $3, %edi; \
195 190 movq DRADDR_OFF(3)(%r15), %rsi; \
196 191 call kdi_dreg_set; \
197 - popq %rdi; \
198 - \
199 - /* \
200 - * Write any requested MSRs. \
201 - */ \
202 - movq KRS_MSR(%rdi), %rbx; \
203 - cmpq $0, %rbx; \
204 - je 3f; \
205 -1: \
206 - movl MSR_NUM(%rbx), %ecx; \
207 - cmpl $0, %ecx; \
208 - je 3f; \
209 - \
210 - movl MSR_TYPE(%rbx), %edx; \
211 - cmpl $KDI_MSR_WRITE, %edx; \
212 - jne 2f; \
213 - \
214 - movq MSR_VALP(%rbx), %rdx; \
215 - movl 0(%rdx), %eax; \
216 - movl 4(%rdx), %edx; \
217 - wrmsr; \
218 -2: \
219 - addq $MSR_SIZE, %rbx; \
220 - jmp 1b; \
221 -3: \
222 - /* \
223 - * We must not branch after re-enabling LBR. If \
224 - * kdi_wsr_wrexit_msr is set, it contains the number \
225 - * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
226 - * contains the value that is to be written to enable \
227 - * LBR. \
228 - */ \
229 - leaq kdi_msr_wrexit_msr(%rip), %rcx; \
230 - movl (%rcx), %ecx; \
231 - cmpl $0, %ecx; \
232 - je 1f; \
233 - \
234 - leaq kdi_msr_wrexit_valp(%rip), %rdx; \
235 - movq (%rdx), %rdx; \
236 - movl 0(%rdx), %eax; \
237 - movl 4(%rdx), %edx; \
238 - \
239 - wrmsr; \
240 -1:
192 + popq %rdi;
241 193
242 194 /*
243 195 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
244 196 * The following macros manage the buffer.
245 197 */
246 198
247 199 /* Advance the ring buffer */
248 200 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
249 201 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
250 202 cmpq $[KDI_NCRUMBS - 1], tmp1; \
251 203 jge 1f; \
252 204 /* Advance the pointer and index */ \
253 205 addq $1, tmp1; \
254 206 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
255 207 movq KRS_CURCRUMB(cpusave), tmp1; \
256 208 addq $KRM_SIZE, tmp1; \
257 209 jmp 2f; \
258 210 1: /* Reset the pointer and index */ \
259 211 movq $0, KRS_CURCRUMBIDX(cpusave); \
260 212 leaq KRS_CRUMBS(cpusave), tmp1; \
261 213 2: movq tmp1, KRS_CURCRUMB(cpusave); \
262 214 /* Clear the new crumb */ \
263 215 movq $KDI_NCRUMBS, tmp2; \
264 216 3: movq $0, -4(tmp1, tmp2, 4); \
265 217 decq tmp2; \
266 218 jnz 3b
267 219
268 220 /* Set a value in the current breadcrumb buffer */
269 221 #define ADD_CRUMB(cpusave, offset, value, tmp) \
270 222 movq KRS_CURCRUMB(cpusave), tmp; \
271 223 movq value, offset(tmp)
272 224
273 225 #endif /* _ASM */
274 226
275 227 #if defined(__lint)
276 228 void
277 229 kdi_cmnint(void)
278 230 {
279 231 }
280 232 #else /* __lint */
281 233
282 234 /* XXX implement me */
283 235 ENTRY_NP(kdi_nmiint)
284 236 clrq %rcx
285 237 movq (%rcx), %rcx
286 238 SET_SIZE(kdi_nmiint)
287 239
288 240 /*
289 241 * The main entry point for master CPUs. It also serves as the trap
290 242 * handler for all traps and interrupts taken during single-step.
291 243 */
292 244 ENTRY_NP(kdi_cmnint)
293 245 ALTENTRY(kdi_master_entry)
294 246
295 247 pushq %rax
296 248 CLI(%rax)
297 249 popq %rax
298 250
299 251 /* Save current register state */
300 252 subq $REG_OFF(KDIREG_TRAPNO), %rsp
301 253 KDI_SAVE_REGS(%rsp)
302 254
303 255 #ifdef __xpv
304 256 /*
305 257 * Clear saved_upcall_mask in unused byte of cs slot on stack.
306 258 * It can only confuse things.
307 259 */
308 260 movb $0, REG_OFF(KDIREG_CS)+4(%rsp)
309 261 #endif
310 262
311 263 #if !defined(__xpv)
312 264 /*
313 265 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
314 266 * KGSBASE can be trusted, as the kernel may or may not have already
315 267 * done a swapgs. All is not lost, as the kernel can divine the correct
316 268 * value for us. Note that the previous GSBASE is saved in the
317 269 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
318 270 * blown away. On the hypervisor, we don't need to do this, since it's
319 271 * ensured we're on our requested kernel GSBASE already.
320 272 */
321 273 subq $10, %rsp
322 274 sgdt (%rsp)
323 275 movq 2(%rsp), %rdi /* gdt base now in %rdi */
324 276 addq $10, %rsp
325 277 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
326 278
327 279 movq %rax, %rdx
328 280 shrq $32, %rdx
329 281 movl $MSR_AMD_GSBASE, %ecx
330 282 wrmsr
331 283 #endif /* __xpv */
332 284
333 285 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
334 286
335 287 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
336 288
337 289 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
338 290
339 291 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
340 292 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
341 293 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
342 294 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
343 295 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
344 296
345 297 movq %rsp, %rbp
346 298 pushq %rax
347 299
348 300 /*
349 301 * Were we in the debugger when we took the trap (i.e. was %esp in one
350 302 * of the debugger's memory ranges)?
351 303 */
352 304 leaq kdi_memranges, %rcx
353 305 movl kdi_nmemranges, %edx
354 306 1: cmpq MR_BASE(%rcx), %rsp
355 307 jl 2f /* below this range -- try the next one */
356 308 cmpq MR_LIM(%rcx), %rsp
357 309 jg 2f /* above this range -- try the next one */
358 310 jmp 3f /* matched within this range */
359 311
360 312 2: decl %edx
361 313 jz kdi_save_common_state /* %rsp not within debugger memory */
362 314 addq $MR_SIZE, %rcx
363 315 jmp 1b
364 316
365 317 3: /*
366 318 * The master is still set. That should only happen if we hit a trap
367 319 * while running in the debugger. Note that it may be an intentional
368 320 * fault. kmdb_dpi_handle_fault will sort it all out.
369 321 */
370 322
371 323 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
372 324 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
373 325 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
374 326 movq %rbx, %rcx /* cpuid */
375 327
376 328 call kdi_dvec_handle_fault
377 329
378 330 /*
379 331 * If we're here, we ran into a debugger problem, and the user
380 332 * elected to solve it by having the debugger debug itself. The
381 333 * state we're about to save is that of the debugger when it took
382 334 * the fault.
383 335 */
384 336
385 337 jmp kdi_save_common_state
386 338
387 339 SET_SIZE(kdi_master_entry)
388 340 SET_SIZE(kdi_cmnint)
389 341
390 342 #endif /* __lint */
391 343
392 344 /*
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
393 345 * The cross-call handler for slave CPUs.
394 346 *
395 347 * The debugger is single-threaded, so only one CPU, called the master, may be
396 348 * running it at any given time. The other CPUs, known as slaves, spin in a
397 349 * busy loop until there's something for them to do. This is the entry point
398 350 * for the slaves - they'll be sent here in response to a cross-call sent by the
399 351 * master.
400 352 */
401 353
402 354 #if defined(__lint)
403 -char kdi_slave_entry_patch;
404 -
405 355 void
406 356 kdi_slave_entry(void)
407 357 {
408 358 }
409 359 #else /* __lint */
410 - .globl kdi_slave_entry_patch;
411 -
412 360 ENTRY_NP(kdi_slave_entry)
413 361
414 - /* kdi_msr_add_clrentry knows where this is */
415 -kdi_slave_entry_patch:
416 - KDI_MSR_PATCH;
417 -
418 362 /*
419 363 * Cross calls are implemented as function calls, so our stack currently
420 364 * looks like one you'd get from a zero-argument function call. That
421 365 * is, there's the return %rip at %rsp, and that's about it. We need
422 366 * to make it look like an interrupt stack. When we first save, we'll
423 367 * reverse the saved %ss and %rip, which we'll fix back up when we've
424 368 * freed up some general-purpose registers. We'll also need to fix up
425 369 * the saved %rsp.
426 370 */
427 371
428 372 pushq %rsp /* pushed value off by 8 */
429 373 pushfq
430 374 CLI(%rax)
431 375 pushq $KCS_SEL
432 376 clrq %rax
433 377 movw %ss, %ax
434 378 pushq %rax /* rip should be here */
435 379 pushq $-1 /* phony trap error code */
436 380 pushq $-1 /* phony trap number */
437 381
438 382 subq $REG_OFF(KDIREG_TRAPNO), %rsp
439 383 KDI_SAVE_REGS(%rsp)
440 384
441 385 movq REG_OFF(KDIREG_SS)(%rsp), %rax
442 386 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
443 387 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
444 388
445 389 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
446 390 addq $8, %rax
447 391 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
448 392
449 393 /*
450 394 * We've saved all of the general-purpose registers, and have a stack
451 395 * that is irettable (after we strip down to the error code)
452 396 */
453 397
454 398 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
455 399
456 400 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
457 401
458 402 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
459 403
460 404 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
461 405 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
462 406
463 407 pushq %rax
464 408 jmp kdi_save_common_state
465 409
466 410 SET_SIZE(kdi_slave_entry)
467 411
468 412 #endif /* __lint */
469 413
470 414 /*
471 415 * The state of the world:
472 416 *
473 417 * The stack has a complete set of saved registers and segment
474 418 * selectors, arranged in the kdi_regs.h order. It also has a pointer
475 419 * to our cpusave area.
476 420 *
477 421 * We need to save, into the cpusave area, a pointer to these saved
478 422 * registers. First we check whether we should jump straight back to
479 423 * the kernel. If not, we save a few more registers, ready the
480 424 * machine for debugger entry, and enter the debugger.
481 425 */
482 426
483 427 #if !defined(__lint)
484 428
485 429 ENTRY_NP(kdi_save_common_state)
486 430
487 431 popq %rdi /* the cpusave area */
488 432 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
489 433
490 434 pushq %rdi
491 435 call kdi_trap_pass
492 436 cmpq $1, %rax
493 437 je kdi_pass_to_kernel
494 438 popq %rax /* cpusave in %rax */
495 439
496 440 SAVE_IDTGDT
497 441
498 442 #if !defined(__xpv)
499 443 /* Save off %cr0, and clear write protect */
500 444 movq %cr0, %rcx
501 445 movq %rcx, KRS_CR0(%rax)
502 446 andq $_BITNOT(CR0_WP), %rcx
503 447 movq %rcx, %cr0
504 448 #endif
505 449
506 450 /* Save the debug registers and disable any active watchpoints */
507 451
508 452 movq %rax, %r15 /* save cpusave area ptr */
509 453 movl $7, %edi
510 454 call kdi_dreg_get
511 455 movq %rax, KRS_DRCTL(%r15)
512 456
513 457 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
514 458 movq %rax, %rsi
515 459 movl $7, %edi
516 460 call kdi_dreg_set
517 461
518 462 movl $6, %edi
519 463 call kdi_dreg_get
520 464 movq %rax, KRS_DRSTAT(%r15)
521 465
522 466 movl $0, %edi
523 467 call kdi_dreg_get
524 468 movq %rax, KRS_DROFF(0)(%r15)
525 469
526 470 movl $1, %edi
527 471 call kdi_dreg_get
528 472 movq %rax, KRS_DROFF(1)(%r15)
529 473
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
530 474 movl $2, %edi
531 475 call kdi_dreg_get
532 476 movq %rax, KRS_DROFF(2)(%r15)
533 477
534 478 movl $3, %edi
535 479 call kdi_dreg_get
536 480 movq %rax, KRS_DROFF(3)(%r15)
537 481
538 482 movq %r15, %rax /* restore cpu save area to rax */
539 483
540 - /*
541 - * Save any requested MSRs.
542 - */
543 - movq KRS_MSR(%rax), %rcx
544 - cmpq $0, %rcx
545 - je no_msr
546 -
547 - pushq %rax /* rdmsr clobbers %eax */
548 - movq %rcx, %rbx
549 -
550 -1:
551 - movl MSR_NUM(%rbx), %ecx
552 - cmpl $0, %ecx
553 - je msr_done
554 -
555 - movl MSR_TYPE(%rbx), %edx
556 - cmpl $KDI_MSR_READ, %edx
557 - jne msr_next
558 -
559 - rdmsr /* addr in %ecx, value into %edx:%eax */
560 - movl %eax, MSR_VAL(%rbx)
561 - movl %edx, _CONST(MSR_VAL + 4)(%rbx)
562 -
563 -msr_next:
564 - addq $MSR_SIZE, %rbx
565 - jmp 1b
566 -
567 -msr_done:
568 - popq %rax
569 -
570 -no_msr:
571 484 clrq %rbp /* stack traces should end here */
572 485
573 486 pushq %rax
574 487 movq %rax, %rdi /* cpusave */
575 488
576 489 call kdi_debugger_entry
577 490
578 491 /* Pass cpusave to kdi_resume */
579 492 popq %rdi
580 493
581 494 jmp kdi_resume
582 495
583 496 SET_SIZE(kdi_save_common_state)
584 497
585 498 #endif /* !__lint */
586 499
587 500 /*
588 501 * Resume the world. The code that calls kdi_resume has already
589 502 * decided whether or not to restore the IDT.
590 503 */
591 504 #if defined(__lint)
592 505 void
593 506 kdi_resume(void)
594 507 {
595 508 }
596 509 #else /* __lint */
597 510
598 511 /* cpusave in %rdi */
599 512 ENTRY_NP(kdi_resume)
600 513
601 514 /*
602 515 * Send this CPU back into the world
603 516 */
604 517 #if !defined(__xpv)
605 518 movq KRS_CR0(%rdi), %rdx
606 519 movq %rdx, %cr0
607 520 #endif
608 521
609 522 KDI_RESTORE_DEBUGGING_STATE
610 523
611 524 movq KRS_GREGS(%rdi), %rsp
612 525 KDI_RESTORE_REGS(%rsp)
613 526 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
614 527 IRET
615 528 /*NOTREACHED*/
616 529 SET_SIZE(kdi_resume)
617 530
618 531 #endif /* __lint */
619 532
620 533 #if !defined(__lint)
621 534
622 535 ENTRY_NP(kdi_pass_to_kernel)
623 536
624 537 popq %rdi /* cpusave */
625 538
626 539 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
627 540
628 541 /*
629 542 * Find the trap and vector off the right kernel handler. The trap
630 543 * handler will expect the stack to be in trap order, with %rip being
631 544 * the last entry, so we'll need to restore all our regs. On i86xpv
632 545 * we'll need to compensate for XPV_TRAP_POP.
633 546 *
634 547 * We're hard-coding the three cases where KMDB has installed permanent
635 548 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
636 549 * to work with; we can't use a global since other CPUs can easily pass
637 550 * through here at the same time.
638 551 *
639 552 * Note that we handle T_DBGENTR since userspace might have tried it.
640 553 */
641 554 movq KRS_GREGS(%rdi), %rsp
642 555 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
643 556 cmpq $T_SGLSTP, %rdi
644 557 je 1f
645 558 cmpq $T_BPTFLT, %rdi
646 559 je 2f
647 560 cmpq $T_DBGENTR, %rdi
648 561 je 3f
649 562 /*
650 563 * Hmm, unknown handler. Somebody forgot to update this when they
651 564 * added a new trap interposition... try to drop back into kmdb.
652 565 */
653 566 int $T_DBGENTR
654 567
655 568 #define CALL_TRAP_HANDLER(name) \
656 569 KDI_RESTORE_REGS(%rsp); \
657 570 /* Discard state, trapno, err */ \
658 571 addq $REG_OFF(KDIREG_RIP), %rsp; \
659 572 XPV_TRAP_PUSH; \
660 573 jmp %cs:name
661 574
662 575 1:
663 576 CALL_TRAP_HANDLER(dbgtrap)
664 577 /*NOTREACHED*/
665 578 2:
666 579 CALL_TRAP_HANDLER(brktrap)
667 580 /*NOTREACHED*/
668 581 3:
669 582 CALL_TRAP_HANDLER(invaltrap)
670 583 /*NOTREACHED*/
671 584
672 585 SET_SIZE(kdi_pass_to_kernel)
673 586
674 587 /*
675 588 * A minimal version of mdboot(), to be used by the master CPU only.
676 589 */
677 590 ENTRY_NP(kdi_reboot)
678 591
679 592 movl $AD_BOOT, %edi
680 593 movl $A_SHUTDOWN, %esi
681 594 call *psm_shutdownf
682 595 #if defined(__xpv)
683 596 movl $SHUTDOWN_reboot, %edi
684 597 call HYPERVISOR_shutdown
685 598 #else
686 599 call reset
687 600 #endif
688 601 /*NOTREACHED*/
689 602
690 603 SET_SIZE(kdi_reboot)
691 604
692 605 #endif /* !__lint */
693 606
694 607 #if defined(__lint)
695 608 /*ARGSUSED*/
696 609 void
697 610 kdi_cpu_debug_init(kdi_cpusave_t *save)
698 611 {
699 612 }
700 613 #else /* __lint */
701 614
702 615 ENTRY_NP(kdi_cpu_debug_init)
703 616 pushq %rbp
704 617 movq %rsp, %rbp
705 618
706 619 pushq %rbx /* macro will clobber %rbx */
707 620 KDI_RESTORE_DEBUGGING_STATE
708 621 popq %rbx
709 622
710 623 leave
711 624 ret
712 625
713 626 SET_SIZE(kdi_cpu_debug_init)
714 627 #endif /* !__lint */
715 628
↓ open down ↓ |
135 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX