Print this page
9210 remove KMDB branch debugging support
9211 ::crregs could do with cr2/cr3 support
9209 ::ttrace should be able to filter by thread
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/kdi/ia32/kdi_asm.s
+++ new/usr/src/uts/intel/kdi/ia32/kdi_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 + *
26 + * Copyright 2018 Joyent, Inc.
25 27 */
26 28
27 -#pragma ident "%Z%%M% %I% %E% SMI"
28 -
29 29 /*
30 30 * Debugger entry for both master and slave CPUs
31 31 */
32 32
33 33 #if defined(__lint)
34 34 #include <sys/types.h>
35 35 #endif
36 36
37 37 #include <sys/segments.h>
38 38 #include <sys/asm_linkage.h>
39 39 #include <sys/controlregs.h>
40 40 #include <sys/x86_archext.h>
41 41 #include <sys/privregs.h>
42 42 #include <sys/machprivregs.h>
43 43 #include <sys/kdi_regs.h>
44 44 #include <sys/uadmin.h>
45 45 #include <sys/psw.h>
46 46
47 47 #ifdef _ASM
48 48
49 49 #include <kdi_assym.h>
50 50 #include <assym.h>
51 51
52 52 /* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
53 53 #define GET_CPUSAVE_ADDR \
54 54 movl %gs:CPU_ID, %ebx; \
55 55 movl %ebx, %eax; \
56 56 movl $KRS_SIZE, %ecx; \
57 57 mull %ecx; \
58 58 movl $kdi_cpusave, %edx; \
59 59 /*CSTYLED*/ \
60 60 addl (%edx), %eax
61 61
62 62 /*
63 63 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
64 64 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
65 65 * debugger through the trap handler. We don't want to clobber the saved IDT
66 66 * in the process, as we'd end up resuming the world on our IDT.
67 67 */
68 68 #define SAVE_IDTGDT \
69 69 movl %gs:CPU_IDT, %edx; \
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
70 70 cmpl $kdi_idt, %edx; \
71 71 je 1f; \
72 72 movl %edx, KRS_IDT(%eax); \
73 73 movl %gs:CPU_GDT, %edx; \
74 74 movl %edx, KRS_GDT(%eax); \
75 75 1:
76 76
77 77 /*
78 78 * Given the address of the current CPU's cpusave area in %edi, the following
79 79 * macro restores the debugging state to said CPU. Restored state includes
80 - * the debug registers from the global %dr variables, and debugging MSRs from
81 - * the CPU save area. This code would be in a separate routine, but for the
82 - * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
83 - * the number of jumps taken subsequent to the update of said MSRs. We can
84 - * remove one jump (the ret) by using a macro instead of a function for the
85 - * debugging state restoration code.
86 - *
87 - * Takes the cpusave area in %edi as a parameter, clobbers %eax-%edx
88 - */
80 + * the debug registers from the global %dr variables.
81 + */
89 82 #define KDI_RESTORE_DEBUGGING_STATE \
90 83 leal kdi_drreg, %ebx; \
91 84 \
92 85 pushl DR_CTL(%ebx); \
93 86 pushl $7; \
94 87 call kdi_dreg_set; \
95 88 addl $8, %esp; \
96 89 \
97 90 pushl $KDIREG_DRSTAT_RESERVED; \
98 91 pushl $6; \
99 92 call kdi_dreg_set; \
100 93 addl $8, %esp; \
101 94 \
102 95 pushl DRADDR_OFF(0)(%ebx); \
103 96 pushl $0; \
104 97 call kdi_dreg_set; \
105 98 addl $8, %esp; \
106 99 \
107 100 pushl DRADDR_OFF(1)(%ebx); \
108 101 pushl $1; \
109 102 call kdi_dreg_set; \
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
110 103 addl $8, %esp; \
111 104 \
112 105 pushl DRADDR_OFF(2)(%ebx); \
113 106 pushl $2; \
114 107 call kdi_dreg_set; \
115 108 addl $8, %esp; \
116 109 \
117 110 pushl DRADDR_OFF(3)(%ebx); \
118 111 pushl $3; \
119 112 call kdi_dreg_set; \
120 - addl $8, %esp; \
121 - \
122 - /* \
123 - * Write any requested MSRs. \
124 - */ \
125 - movl KRS_MSR(%edi), %ebx; \
126 - cmpl $0, %ebx; \
127 - je 3f; \
128 -1: \
129 - movl MSR_NUM(%ebx), %ecx; \
130 - cmpl $0, %ecx; \
131 - je 3f; \
132 - \
133 - movl MSR_TYPE(%ebx), %edx; \
134 - cmpl $KDI_MSR_WRITE, %edx; \
135 - jne 2f; \
136 - \
137 - movl MSR_VALP(%ebx), %edx; \
138 - movl 0(%edx), %eax; \
139 - movl 4(%edx), %edx; \
140 - wrmsr; \
141 -2: \
142 - addl $MSR_SIZE, %ebx; \
143 - jmp 1b; \
144 -3: \
145 - /* \
146 - * We must not branch after re-enabling LBR. If \
147 - * kdi_wsr_wrexit_msr is set, it contains the number \
148 - * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
149 - * contains the value that is to be written to enable \
150 - * LBR. \
151 - */ \
152 - movl kdi_msr_wrexit_msr, %ecx; \
153 - cmpl $0, %ecx; \
154 - je 1f; \
155 - \
156 - movl kdi_msr_wrexit_valp, %edx; \
157 - movl 0(%edx), %eax; \
158 - movl 4(%edx), %edx; \
159 - \
160 - wrmsr; \
161 -1:
113 + addl $8, %esp;
162 114
163 115 #define KDI_RESTORE_REGS() \
164 116 /* Discard savfp and savpc */ \
165 117 addl $8, %esp; \
166 118 popl %ss; \
167 119 popl %gs; \
168 120 popl %fs; \
169 121 popl %es; \
170 122 popl %ds; \
171 123 popal; \
172 124 /* Discard trapno and err */ \
173 125 addl $8, %esp
174 126
175 127 /*
176 128 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
177 129 * The following macros manage the buffer.
178 130 */
179 131
180 132 /* Advance the ring buffer */
181 133 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
182 134 movl KRS_CURCRUMBIDX(cpusave), tmp1; \
183 135 cmpl $[KDI_NCRUMBS - 1], tmp1; \
184 136 jge 1f; \
185 137 /* Advance the pointer and index */ \
186 138 addl $1, tmp1; \
187 139 movl tmp1, KRS_CURCRUMBIDX(cpusave); \
188 140 movl KRS_CURCRUMB(cpusave), tmp1; \
189 141 addl $KRM_SIZE, tmp1; \
190 142 jmp 2f; \
191 143 1: /* Reset the pointer and index */ \
192 144 movw $0, KRS_CURCRUMBIDX(cpusave); \
193 145 leal KRS_CRUMBS(cpusave), tmp1; \
194 146 2: movl tmp1, KRS_CURCRUMB(cpusave); \
195 147 /* Clear the new crumb */ \
196 148 movl $KDI_NCRUMBS, tmp2; \
197 149 3: movl $0, -4(tmp1, tmp2, 4); \
198 150 decl tmp2; \
199 151 jnz 3b
200 152
201 153 /* Set a value in the current breadcrumb buffer */
202 154 #define ADD_CRUMB(cpusave, offset, value, tmp) \
203 155 movl KRS_CURCRUMB(cpusave), tmp; \
204 156 movl value, offset(tmp)
205 157
206 158 #endif /* _ASM */
207 159
208 160 /*
209 161 * The main entry point for master CPUs. It also serves as the trap handler
210 162 * for all traps and interrupts taken during single-step.
211 163 */
212 164 #if defined(__lint)
213 165 void
214 166 kdi_cmnint(void)
215 167 {
216 168 }
217 169 #else /* __lint */
218 170
219 171 /* XXX implement me */
220 172 ENTRY_NP(kdi_nmiint)
221 173 clr %ecx
222 174 movl (%ecx), %ecx
223 175 SET_SIZE(kdi_nmiint)
224 176
225 177 ENTRY_NP(kdi_cmnint)
226 178 ALTENTRY(kdi_master_entry)
227 179
228 180 /* Save all registers and selectors */
229 181
230 182 pushal
231 183 pushl %ds
232 184 pushl %es
233 185 pushl %fs
234 186 pushl %gs
235 187 pushl %ss
236 188
237 189 subl $8, %esp
238 190 movl %ebp, REG_OFF(KDIREG_SAVFP)(%esp)
239 191 movl REG_OFF(KDIREG_EIP)(%esp), %eax
240 192 movl %eax, REG_OFF(KDIREG_SAVPC)(%esp)
241 193
242 194 /*
243 195 * If the kernel has started using its own selectors, we should too.
244 196 * Update our saved selectors if they haven't been updated already.
245 197 */
246 198 movw %cs, %ax
247 199 cmpw $KCS_SEL, %ax
248 200 jne 1f /* The kernel hasn't switched yet */
249 201
250 202 movw $KDS_SEL, %ax
251 203 movw %ax, %ds
252 204 movw kdi_cs, %ax
253 205 cmpw $KCS_SEL, %ax
254 206 je 1f /* We already switched */
255 207
256 208 /*
257 209 * The kernel switched, but we haven't. Update our saved selectors
258 210 * to match the kernel's copies for use below.
259 211 */
260 212 movl $KCS_SEL, kdi_cs
261 213 movl $KDS_SEL, kdi_ds
262 214 movl $KFS_SEL, kdi_fs
263 215 movl $KGS_SEL, kdi_gs
264 216
265 217 1:
266 218 /*
267 219 * Set the selectors to a known state. If we come in from kmdb's IDT,
268 220 * we'll be on boot's %cs. This will cause GET_CPUSAVE_ADDR to return
269 221 * CPU 0's cpusave, regardless of which CPU we're on, and chaos will
270 222 * ensue. So, if we've got $KCSSEL in kdi_cs, switch to it. The other
271 223 * selectors are restored normally.
272 224 */
273 225 movw %cs:kdi_cs, %ax
274 226 cmpw $KCS_SEL, %ax
275 227 jne 1f
276 228 ljmp $KCS_SEL, $1f
277 229 1:
278 230 movw %cs:kdi_ds, %ds
279 231 movw kdi_ds, %es
280 232 movw kdi_fs, %fs
281 233 movw kdi_gs, %gs
282 234 movw kdi_ds, %ss
283 235
284 236 /*
285 237 * This has to come after we set %gs to the kernel descriptor. Since
286 238 * we've hijacked some IDT entries used in user-space such as the
287 239 * breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
288 240 * in %gs. On the hypervisor, CLI() needs GDT_GS to access the machcpu.
289 241 */
290 242 CLI(%eax)
291 243
292 244 #if defined(__xpv)
293 245 /*
294 246 * Clear saved_upcall_mask in unused byte of cs slot on stack.
295 247 * It can only confuse things.
296 248 */
297 249 movb $0, REG_OFF(KDIREG_CS)+2(%esp)
298 250
299 251 #endif
300 252
301 253 GET_CPUSAVE_ADDR /* %eax = cpusave, %ebx = CPU ID */
302 254
303 255 ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
304 256
305 257 ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %edx)
306 258
307 259 movl REG_OFF(KDIREG_EIP)(%esp), %ecx
308 260 ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
309 261 ADD_CRUMB(%eax, KRM_SP, %esp, %edx)
310 262 movl REG_OFF(KDIREG_TRAPNO)(%esp), %ecx
311 263 ADD_CRUMB(%eax, KRM_TRAPNO, %ecx, %edx)
312 264
313 265 movl %esp, %ebp
314 266 pushl %eax
315 267
316 268 /*
317 269 * Were we in the debugger when we took the trap (i.e. was %esp in one
318 270 * of the debugger's memory ranges)?
319 271 */
320 272 leal kdi_memranges, %ecx
321 273 movl kdi_nmemranges, %edx
322 274 1: cmpl MR_BASE(%ecx), %esp
323 275 jl 2f /* below this range -- try the next one */
324 276 cmpl MR_LIM(%ecx), %esp
325 277 jg 2f /* above this range -- try the next one */
326 278 jmp 3f /* matched within this range */
327 279
328 280 2: decl %edx
329 281 jz kdi_save_common_state /* %esp not within debugger memory */
330 282 addl $MR_SIZE, %ecx
331 283 jmp 1b
332 284
333 285 3: /*
334 286 * %esp was within one of the debugger's memory ranges. This should
335 287 * only happen when we take a trap while running in the debugger.
336 288 * kmdb_dpi_handle_fault will determine whether or not it was an
337 289 * expected trap, and will take the appropriate action.
338 290 */
339 291
340 292 pushl %ebx /* cpuid */
341 293
342 294 movl REG_OFF(KDIREG_ESP)(%ebp), %ecx
343 295 addl $REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), %ecx
344 296 pushl %ecx
345 297
346 298 pushl REG_OFF(KDIREG_EIP)(%ebp)
347 299 pushl REG_OFF(KDIREG_TRAPNO)(%ebp)
348 300
349 301 call kdi_dvec_handle_fault
350 302 addl $16, %esp
351 303
352 304 /*
353 305 * If we're here, we ran into a debugger problem, and the user
354 306 * elected to solve it by having the debugger debug itself. The
355 307 * state we're about to save is that of the debugger when it took
356 308 * the fault.
357 309 */
358 310
359 311 jmp kdi_save_common_state
360 312
361 313 SET_SIZE(kdi_master_entry)
362 314 SET_SIZE(kdi_cmnint)
363 315
364 316 #endif /* __lint */
365 317
366 318 /*
↓ open down ↓ |
195 lines elided |
↑ open up ↑ |
367 319 * The cross-call handler for slave CPUs.
368 320 *
369 321 * The debugger is single-threaded, so only one CPU, called the master, may be
370 322 * running it at any given time. The other CPUs, known as slaves, spin in a
371 323 * busy loop until there's something for them to do. This is the entry point
372 324 * for the slaves - they'll be sent here in response to a cross-call sent by the
373 325 * master.
374 326 */
375 327
376 328 #if defined(__lint)
377 -char kdi_slave_entry_patch;
378 -
379 329 void
380 330 kdi_slave_entry(void)
381 331 {
382 332 }
383 333 #else /* __lint */
384 - .globl kdi_slave_entry_patch;
385 -
386 334 ENTRY_NP(kdi_slave_entry)
387 335
388 - /* kdi_msr_add_clrentry knows where this is */
389 -kdi_slave_entry_patch:
390 - KDI_MSR_PATCH;
391 -
392 336 /*
393 337 * Cross calls are implemented as function calls, so our stack
394 338 * currently looks like one you'd get from a zero-argument function
395 339 * call. There's an %eip at %esp, and that's about it. We want to
396 340 * make it look like the master CPU's stack. By doing this, we can
397 341 * use the same resume code for both master and slave. We need to
398 342 * make our stack look like a `struct regs' before we jump into the
399 343 * common save routine.
400 344 */
401 345
402 346 pushl %cs
403 347 pushfl
404 348 pushl $-1 /* A phony trap error code */
405 349 pushl $-1 /* A phony trap number */
406 350 pushal
407 351 pushl %ds
408 352 pushl %es
409 353 pushl %fs
410 354 pushl %gs
411 355 pushl %ss
412 356
413 357 subl $8, %esp
414 358 movl %ebp, REG_OFF(KDIREG_SAVFP)(%esp)
415 359 movl REG_OFF(KDIREG_EIP)(%esp), %eax
416 360 movl %eax, REG_OFF(KDIREG_SAVPC)(%esp)
417 361
418 362 /*
419 363 * Swap our saved EFLAGS and %eip. Each is where the other
420 364 * should be.
421 365 */
422 366 movl REG_OFF(KDIREG_EFLAGS)(%esp), %eax
423 367 xchgl REG_OFF(KDIREG_EIP)(%esp), %eax
424 368 movl %eax, REG_OFF(KDIREG_EFLAGS)(%esp)
425 369
426 370 /*
427 371 * Our stack now matches struct regs, and is irettable. We don't need
428 372 * to do anything special for the hypervisor w.r.t. PS_IE since we
429 373 * iret twice anyway; the second iret back to the hypervisor
430 374 * will re-enable interrupts.
431 375 */
432 376 CLI(%eax)
433 377
434 378 /* Load sanitized segment selectors */
435 379 movw kdi_ds, %ds
436 380 movw kdi_ds, %es
437 381 movw kdi_fs, %fs
438 382 movw kdi_gs, %gs
439 383 movw kdi_ds, %ss
440 384
441 385 GET_CPUSAVE_ADDR /* %eax = cpusave, %ebx = CPU ID */
442 386
443 387 ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
444 388
445 389 ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %edx)
446 390
447 391 movl REG_OFF(KDIREG_EIP)(%esp), %ecx
448 392 ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
449 393
450 394 pushl %eax
451 395 jmp kdi_save_common_state
452 396
453 397 SET_SIZE(kdi_slave_entry)
454 398
455 399 #endif /* __lint */
456 400
457 401 /*
458 402 * The state of the world:
459 403 *
460 404 * The stack has a complete set of saved registers and segment
461 405 * selectors, arranged in `struct regs' order (or vice-versa), up to
462 406 * and including EFLAGS. It also has a pointer to our cpusave area.
463 407 *
464 408 * We need to save a pointer to these saved registers. We also want
465 409 * to adjust the saved %esp - it should point just beyond the saved
466 410 * registers to the last frame of the thread we interrupted. Finally,
467 411 * we want to clear out bits 16-31 of the saved selectors, as the
468 412 * selector pushls don't automatically clear them.
469 413 */
470 414 #if !defined(__lint)
471 415
472 416 ENTRY_NP(kdi_save_common_state)
473 417
474 418 popl %eax /* the cpusave area */
475 419
476 420 movl %esp, KRS_GREGS(%eax) /* save ptr to current saved regs */
477 421
478 422 addl $REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), KDIREG_OFF(KDIREG_ESP)(%esp)
479 423
480 424 andl $0xffff, KDIREG_OFF(KDIREG_SS)(%esp)
481 425 andl $0xffff, KDIREG_OFF(KDIREG_GS)(%esp)
482 426 andl $0xffff, KDIREG_OFF(KDIREG_FS)(%esp)
483 427 andl $0xffff, KDIREG_OFF(KDIREG_ES)(%esp)
484 428 andl $0xffff, KDIREG_OFF(KDIREG_DS)(%esp)
485 429
486 430 pushl %eax
487 431 call kdi_trap_pass
488 432 cmpl $1, %eax
489 433 je kdi_pass_to_kernel
490 434 popl %eax
491 435
492 436 SAVE_IDTGDT
493 437
494 438 #if !defined(__xpv)
495 439 /* Save off %cr0, and clear write protect */
496 440 movl %cr0, %ecx
497 441 movl %ecx, KRS_CR0(%eax)
498 442 andl $_BITNOT(CR0_WP), %ecx
499 443 movl %ecx, %cr0
500 444 #endif
501 445 pushl %edi
502 446 movl %eax, %edi
503 447
504 448 /* Save the debug registers and disable any active watchpoints */
505 449 pushl $7
506 450 call kdi_dreg_get
507 451 addl $4, %esp
508 452
509 453 movl %eax, KRS_DRCTL(%edi)
510 454 andl $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %eax
511 455
512 456 pushl %eax
513 457 pushl $7
514 458 call kdi_dreg_set
515 459 addl $8, %esp
516 460
517 461 pushl $6
518 462 call kdi_dreg_get
519 463 addl $4, %esp
520 464 movl %eax, KRS_DRSTAT(%edi)
521 465
522 466 pushl $0
523 467 call kdi_dreg_get
524 468 addl $4, %esp
525 469 movl %eax, KRS_DROFF(0)(%edi)
526 470
527 471 pushl $1
528 472 call kdi_dreg_get
529 473 addl $4, %esp
530 474 movl %eax, KRS_DROFF(1)(%edi)
531 475
532 476 pushl $2
533 477 call kdi_dreg_get
534 478 addl $4, %esp
↓ open down ↓ |
133 lines elided |
↑ open up ↑ |
535 479 movl %eax, KRS_DROFF(2)(%edi)
536 480
537 481 pushl $3
538 482 call kdi_dreg_get
539 483 addl $4, %esp
540 484 movl %eax, KRS_DROFF(3)(%edi)
541 485
542 486 movl %edi, %eax
543 487 popl %edi
544 488
545 - /*
546 - * Save any requested MSRs.
547 - */
548 - movl KRS_MSR(%eax), %ecx
549 - cmpl $0, %ecx
550 - je no_msr
551 -
552 - pushl %eax /* rdmsr clobbers %eax */
553 - movl %ecx, %ebx
554 -1:
555 - movl MSR_NUM(%ebx), %ecx
556 - cmpl $0, %ecx
557 - je msr_done
558 -
559 - movl MSR_TYPE(%ebx), %edx
560 - cmpl $KDI_MSR_READ, %edx
561 - jne msr_next
562 -
563 - rdmsr /* addr in %ecx, value into %edx:%eax */
564 - movl %eax, MSR_VAL(%ebx)
565 - movl %edx, _CONST(MSR_VAL + 4)(%ebx)
566 -
567 -msr_next:
568 - addl $MSR_SIZE, %ebx
569 - jmp 1b
570 -
571 -msr_done:
572 - popl %eax
573 -
574 -no_msr:
575 489 clr %ebp /* stack traces should end here */
576 490
577 491 pushl %eax
578 492 call kdi_debugger_entry
579 493 popl %eax
580 -
494 +
581 495 jmp kdi_resume
582 496
583 497 SET_SIZE(kdi_save_common_state)
584 498
585 499 #endif /* !__lint */
586 500
587 501 /*
588 502 * Resume the world. The code that calls kdi_resume has already
589 503 * decided whether or not to restore the IDT.
590 504 */
591 505 #if defined(__lint)
592 506 void
593 507 kdi_resume(void)
594 508 {
595 509 }
596 510 #else /* __lint */
597 511
598 512 /* cpusave in %eax */
599 513 ENTRY_NP(kdi_resume)
600 514
601 515 /*
602 516 * Send this CPU back into the world
603 517 */
604 518
605 519 #if !defined(__xpv)
606 520 movl KRS_CR0(%eax), %edx
607 521 movl %edx, %cr0
608 522 #endif
609 523
610 524 pushl %edi
611 525 movl %eax, %edi
612 526
613 527 KDI_RESTORE_DEBUGGING_STATE
614 528
615 529 popl %edi
616 530
617 531 #if defined(__xpv)
618 532 /*
619 533 * kmdb might have set PS_T in the saved eflags, so we can't use
620 534 * intr_restore, since that restores all of eflags; instead, just
621 535 * pick up PS_IE from the saved eflags.
622 536 */
623 537 movl REG_OFF(KDIREG_EFLAGS)(%esp), %eax
624 538 testl $PS_IE, %eax
625 539 jz 2f
626 540 STI
627 541 2:
628 542 #endif
629 543
630 544 addl $8, %esp /* Discard savfp and savpc */
631 545
632 546 popl %ss
633 547 popl %gs
634 548 popl %fs
635 549 popl %es
636 550 popl %ds
637 551 popal
638 552
639 553 addl $8, %esp /* Discard TRAPNO and ERROR */
640 554
641 555 IRET
642 556
643 557 SET_SIZE(kdi_resume)
644 558 #endif /* __lint */
645 559
646 560 #if !defined(__lint)
647 561
648 562 ENTRY_NP(kdi_pass_to_kernel)
649 563
650 564 /* pop cpusave, leaving %esp pointing to saved regs */
651 565 popl %eax
652 566
653 567 movl $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%eax)
654 568
655 569 /*
656 570 * Find the trap and vector off the right kernel handler. The trap
657 571 * handler will expect the stack to be in trap order, with %eip being
658 572 * the last entry, so we'll need to restore all our regs.
659 573 *
660 574 * We're hard-coding the three cases where KMDB has installed permanent
661 575 * handlers, since after we restore, we don't have registers to work
662 576 * with; we can't use a global since other CPUs can easily pass through
663 577 * here at the same time.
664 578 *
665 579 * Note that we handle T_DBGENTR since userspace might have tried it.
666 580 */
667 581 movl REG_OFF(KDIREG_TRAPNO)(%esp), %eax
668 582 cmpl $T_SGLSTP, %eax
669 583 je kpass_dbgtrap
670 584 cmpl $T_BPTFLT, %eax
671 585 je kpass_brktrap
672 586 cmpl $T_DBGENTR, %eax
673 587 je kpass_invaltrap
674 588 /*
675 589 * Hmm, unknown handler. Somebody forgot to update this when they
676 590 * added a new trap interposition... try to drop back into kmdb.
677 591 */
678 592 int $T_DBGENTR
679 593
680 594 kpass_dbgtrap:
681 595 KDI_RESTORE_REGS()
682 596 ljmp $KCS_SEL, $1f
683 597 1: jmp %cs:dbgtrap
684 598 /*NOTREACHED*/
685 599
686 600 kpass_brktrap:
687 601 KDI_RESTORE_REGS()
688 602 ljmp $KCS_SEL, $2f
689 603 2: jmp %cs:brktrap
690 604 /*NOTREACHED*/
691 605
692 606 kpass_invaltrap:
693 607 KDI_RESTORE_REGS()
694 608 ljmp $KCS_SEL, $3f
695 609 3: jmp %cs:invaltrap
696 610 /*NOTREACHED*/
697 611
698 612 SET_SIZE(kdi_pass_to_kernel)
699 613
700 614 /*
701 615 * A minimal version of mdboot(), to be used by the master CPU only.
702 616 */
703 617 ENTRY_NP(kdi_reboot)
704 618
705 619 pushl $AD_BOOT
706 620 pushl $A_SHUTDOWN
707 621 call *psm_shutdownf
708 622 addl $8, %esp
709 623
710 624 #if defined(__xpv)
711 625 pushl $SHUTDOWN_reboot
712 626 call HYPERVISOR_shutdown
713 627 #else
714 628 call reset
715 629 #endif
716 630 /*NOTREACHED*/
717 631
718 632 SET_SIZE(kdi_reboot)
719 633
720 634 #endif /* !__lint */
721 635
722 636 #if defined(__lint)
723 637 /*ARGSUSED*/
724 638 void
725 639 kdi_cpu_debug_init(kdi_cpusave_t *save)
726 640 {
727 641 }
728 642 #else /* __lint */
729 643
730 644 ENTRY_NP(kdi_cpu_debug_init)
731 645 pushl %ebp
732 646 movl %esp, %ebp
733 647
734 648 pushl %edi
735 649 pushl %ebx
736 650
737 651 movl 8(%ebp), %edi
738 652
739 653 KDI_RESTORE_DEBUGGING_STATE
740 654
741 655 popl %ebx
742 656 popl %edi
743 657 leave
744 658 ret
745 659
746 660 SET_SIZE(kdi_cpu_debug_init)
747 661 #endif /* !__lint */
748 662
↓ open down ↓ |
158 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX