349 }
350
351 static int
352 kdi_cpu_deactivate(void)
353 {
354 kdi_idt_gates_restore();
355 return (0);
356 }
357
358 void
359 kdi_deactivate(void)
360 {
361 cpuset_t cpuset;
362 CPUSET_ALL(cpuset);
363
364 xc_call(0, 0, 0, CPUSET2BV(cpuset), (xc_func_t)kdi_cpu_deactivate);
365 kdi_nmemranges = 0;
366 }
367
368 /*
369 * We receive all breakpoints and single step traps. Some of them,
370 * including those from userland and those induced by DTrace providers,
371 * are intended for the kernel, and must be processed there. We adopt
372 * this ours-until-proven-otherwise position due to the painful
373 * consequences of sending the kernel an unexpected breakpoint or
374 * single step. Unless someone can prove to us that the kernel is
375 * prepared to handle the trap, we'll assume there's a problem and will
376 * give the user a chance to debug it.
377 */
378 int
379 kdi_trap_pass(kdi_cpusave_t *cpusave)
380 {
381 greg_t tt = cpusave->krs_gregs[KDIREG_TRAPNO];
382 greg_t pc = cpusave->krs_gregs[KDIREG_PC];
383 greg_t cs = cpusave->krs_gregs[KDIREG_CS];
384
385 if (USERMODE(cs))
386 return (1);
387
388 if (tt != T_BPTFLT && tt != T_SGLSTP)
389 return (0);
390
391 if (tt == T_BPTFLT && kdi_dtrace_get_state() ==
392 KDI_DTSTATE_DTRACE_ACTIVE)
393 return (1);
394
395 /*
396 * See the comments in the kernel's T_SGLSTP handler for why we need to
397 * do this.
398 */
399 #if !defined(__xpv)
400 if (tt == T_SGLSTP &&
401 (pc == (greg_t)sys_sysenter || pc == (greg_t)brand_sys_sysenter ||
402 pc == (greg_t)tr_sys_sysenter ||
403 pc == (greg_t)tr_brand_sys_sysenter)) {
404 #else
405 if (tt == T_SGLSTP &&
406 (pc == (greg_t)sys_sysenter || pc == (greg_t)brand_sys_sysenter)) {
407 #endif
408 return (1);
409 }
410
411 return (0);
412 }
413
|
349 }
350
351 static int
352 kdi_cpu_deactivate(void)
353 {
354 kdi_idt_gates_restore();
355 return (0);
356 }
357
358 void
359 kdi_deactivate(void)
360 {
361 cpuset_t cpuset;
362 CPUSET_ALL(cpuset);
363
364 xc_call(0, 0, 0, CPUSET2BV(cpuset), (xc_func_t)kdi_cpu_deactivate);
365 kdi_nmemranges = 0;
366 }
367
368 /*
369 * We receive all breakpoints and single step traps. Some of them, including
370 * those from userland and those induced by DTrace providers, are intended for
371 * the kernel, and must be processed there. We adopt this
372 * ours-until-proven-otherwise position due to the painful consequences of
373 * sending the kernel an unexpected breakpoint or single step. Unless someone
374 * can prove to us that the kernel is prepared to handle the trap, we'll assume
375 * there's a problem and will give the user a chance to debug it.
376 *
377 * If we return 2, then the calling code should restore the trap-time %cr3: that
378 * is, it really is a kernel-originated trap.
379 */
380 int
381 kdi_trap_pass(kdi_cpusave_t *cpusave)
382 {
383 greg_t tt = cpusave->krs_gregs[KDIREG_TRAPNO];
384 greg_t pc = cpusave->krs_gregs[KDIREG_PC];
385 greg_t cs = cpusave->krs_gregs[KDIREG_CS];
386
387 if (USERMODE(cs))
388 return (1);
389
390 if (tt != T_BPTFLT && tt != T_SGLSTP)
391 return (0);
392
393 if (tt == T_BPTFLT && kdi_dtrace_get_state() ==
394 KDI_DTSTATE_DTRACE_ACTIVE)
395 return (2);
396
397 /*
398 * See the comments in the kernel's T_SGLSTP handler for why we need to
399 * do this.
400 */
401 #if !defined(__xpv)
402 if (tt == T_SGLSTP &&
403 (pc == (greg_t)sys_sysenter || pc == (greg_t)brand_sys_sysenter ||
404 pc == (greg_t)tr_sys_sysenter ||
405 pc == (greg_t)tr_brand_sys_sysenter)) {
406 #else
407 if (tt == T_SGLSTP &&
408 (pc == (greg_t)sys_sysenter || pc == (greg_t)brand_sys_sysenter)) {
409 #endif
410 return (1);
411 }
412
413 return (0);
414 }
415
|