4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
25 */
26
27 /*
28 * DTrace - Dynamic Tracing for Solaris
29 *
30 * This is the implementation of the Solaris Dynamic Tracing framework
31 * (DTrace). The user-visible interface to DTrace is described at length in
32 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
33 * library, the in-kernel DTrace framework, and the DTrace providers are
34 * described in the block comments in the <sys/dtrace.h> header file. The
35 * internal architecture of DTrace is described in the block comments in the
36 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
37 * implementation very much assume mastery of all of these sources; if one has
38 * an unanswered question about the implementation, one should consult them
39 * first.
40 *
41 * The functions here are ordered roughly as follows:
42 *
43 * - Probe context functions
44 * - Probe hashing functions
355 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
356
357 #ifndef __i386
358 #define DTRACE_ALIGNCHECK(addr, size, flags) \
359 if (addr & (size - 1)) { \
360 *flags |= CPU_DTRACE_BADALIGN; \
361 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
362 return (0); \
363 }
364 #else
365 #define DTRACE_ALIGNCHECK(addr, size, flags)
366 #endif
367
368 /*
369 * Test whether a range of memory starting at testaddr of size testsz falls
370 * within the range of memory described by addr, sz. We take care to avoid
371 * problems with overflow and underflow of the unsigned quantities, and
372 * disallow all negative sizes. Ranges of size 0 are allowed.
373 */
374 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
375 ((testaddr) - (baseaddr) < (basesz) && \
376 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
377 (testaddr) + (testsz) >= (testaddr))
378
379 /*
380 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
381 * alloc_sz on the righthand side of the comparison in order to avoid overflow
382 * or underflow in the comparison with it. This is simpler than the INRANGE
383 * check above, because we know that the dtms_scratch_ptr is valid in the
384 * range. Allocations of size zero are allowed.
385 */
386 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
387 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
388 (mstate)->dtms_scratch_ptr >= (alloc_sz))
389
390 #define DTRACE_LOADFUNC(bits) \
391 /*CSTYLED*/ \
392 uint##bits##_t \
393 dtrace_load##bits(uintptr_t addr) \
394 { \
395 size_t size = bits / NBBY; \
396 /*CSTYLED*/ \
457 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
458
459 static size_t dtrace_strlen(const char *, size_t);
460 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
461 static void dtrace_enabling_provide(dtrace_provider_t *);
462 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
463 static void dtrace_enabling_matchall(void);
464 static void dtrace_enabling_reap(void);
465 static dtrace_state_t *dtrace_anon_grab(void);
466 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
467 dtrace_state_t *, uint64_t, uint64_t);
468 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
469 static void dtrace_buffer_drop(dtrace_buffer_t *);
470 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
471 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
472 dtrace_state_t *, dtrace_mstate_t *);
473 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
474 dtrace_optval_t);
475 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
476 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
477
478 /*
479 * DTrace Probe Context Functions
480 *
481 * These functions are called from probe context. Because probe context is
482 * any context in which C may be called, arbitrarily locks may be held,
483 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
484 * As a result, functions called from probe context may only call other DTrace
485 * support functions -- they may not interact at all with the system at large.
486 * (Note that the ASSERT macro is made probe-context safe by redefining it in
487 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
488 * loads are to be performed from probe context, they _must_ be in terms of
489 * the safe dtrace_load*() variants.
490 *
491 * Some functions in this block are not actually called from probe context;
492 * for these functions, there will be a comment above the function reading
493 * "Note: not called from probe context."
494 */
495 void
496 dtrace_panic(const char *format, ...)
601 * be issued. This includes the DTrace scratch areas, and any DTrace variable
602 * region. The caller of dtrace_canstore() is responsible for performing any
603 * alignment checks that are needed before stores are actually executed.
604 */
605 static int
606 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
607 dtrace_vstate_t *vstate)
608 {
609 /*
610 * First, check to see if the address is in scratch space...
611 */
612 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
613 mstate->dtms_scratch_size))
614 return (1);
615
616 /*
617 * Now check to see if it's a dynamic variable. This check will pick
618 * up both thread-local variables and any global dynamically-allocated
619 * variables.
620 */
621 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
622 vstate->dtvs_dynvars.dtds_size)) {
623 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
624 uintptr_t base = (uintptr_t)dstate->dtds_base +
625 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
626 uintptr_t chunkoffs;
627
628 /*
629 * Before we assume that we can store here, we need to make
630 * sure that it isn't in our metadata -- storing to our
631 * dynamic variable metadata would corrupt our state. For
632 * the range to not include any dynamic variable metadata,
633 * it must:
634 *
635 * (1) Start above the hash table that is at the base of
636 * the dynamic variable space
637 *
638 * (2) Have a starting chunk offset that is beyond the
639 * dtrace_dynvar_t that is at the base of every chunk
640 *
641 * (3) Not span a chunk boundary
685 dtrace_vstate_t *vstate)
686 {
687 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
688
689 /*
690 * If we hold the privilege to read from kernel memory, then
691 * everything is readable.
692 */
693 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
694 return (1);
695
696 /*
697 * You can obviously read that which you can store.
698 */
699 if (dtrace_canstore(addr, sz, mstate, vstate))
700 return (1);
701
702 /*
703 * We're allowed to read from our own string table.
704 */
705 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
706 mstate->dtms_difo->dtdo_strlen))
707 return (1);
708
709 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
710 *illval = addr;
711 return (0);
712 }
713
714 /*
715 * Convenience routine to check to see if a given string is within a memory
716 * region in which a load may be issued given the user's privilege level;
717 * this exists so that we don't need to issue unnecessary dtrace_strlen()
718 * calls in the event that the user has all privileges.
719 */
720 static int
721 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
722 dtrace_vstate_t *vstate)
723 {
724 size_t strsz;
725
726 /*
727 * If we hold the privilege to read from kernel memory, then
728 * everything is readable.
2875 return (dtrace_getreg(lwp->lwp_regs, ndx));
2876 }
2877
2878 case DIF_VAR_VMREGS: {
2879 uint64_t rval;
2880
2881 if (!dtrace_priv_kernel(state))
2882 return (0);
2883
2884 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2885
2886 rval = dtrace_getvmreg(ndx,
2887 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags);
2888
2889 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2890
2891 return (rval);
2892 }
2893
2894 case DIF_VAR_CURTHREAD:
2895 if (!dtrace_priv_kernel(state))
2896 return (0);
2897 return ((uint64_t)(uintptr_t)curthread);
2898
2899 case DIF_VAR_TIMESTAMP:
2900 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2901 mstate->dtms_timestamp = dtrace_gethrtime();
2902 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2903 }
2904 return (mstate->dtms_timestamp);
2905
2906 case DIF_VAR_VTIMESTAMP:
2907 ASSERT(dtrace_vtime_references != 0);
2908 return (curthread->t_dtrace_vtime);
2909
2910 case DIF_VAR_WALLTIMESTAMP:
2911 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2912 mstate->dtms_walltimestamp = dtrace_gethrestime();
2913 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2914 }
2915 return (mstate->dtms_walltimestamp);
4890 if (cc_c == 0)
4891 pc = DIF_INSTR_LABEL(instr);
4892 break;
4893 case DIF_OP_BL:
4894 if (cc_n ^ cc_v)
4895 pc = DIF_INSTR_LABEL(instr);
4896 break;
4897 case DIF_OP_BLU:
4898 if (cc_c)
4899 pc = DIF_INSTR_LABEL(instr);
4900 break;
4901 case DIF_OP_BLE:
4902 if (cc_z | (cc_n ^ cc_v))
4903 pc = DIF_INSTR_LABEL(instr);
4904 break;
4905 case DIF_OP_BLEU:
4906 if (cc_c | cc_z)
4907 pc = DIF_INSTR_LABEL(instr);
4908 break;
4909 case DIF_OP_RLDSB:
4910 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4911 *flags |= CPU_DTRACE_KPRIV;
4912 *illval = regs[r1];
4913 break;
4914 }
4915 /*FALLTHROUGH*/
4916 case DIF_OP_LDSB:
4917 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4918 break;
4919 case DIF_OP_RLDSH:
4920 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4921 *flags |= CPU_DTRACE_KPRIV;
4922 *illval = regs[r1];
4923 break;
4924 }
4925 /*FALLTHROUGH*/
4926 case DIF_OP_LDSH:
4927 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4928 break;
4929 case DIF_OP_RLDSW:
4930 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4931 *flags |= CPU_DTRACE_KPRIV;
4932 *illval = regs[r1];
4933 break;
4934 }
4935 /*FALLTHROUGH*/
4936 case DIF_OP_LDSW:
4937 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4938 break;
4939 case DIF_OP_RLDUB:
4940 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4941 *flags |= CPU_DTRACE_KPRIV;
4942 *illval = regs[r1];
4943 break;
4944 }
4945 /*FALLTHROUGH*/
4946 case DIF_OP_LDUB:
4947 regs[rd] = dtrace_load8(regs[r1]);
4948 break;
4949 case DIF_OP_RLDUH:
4950 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4951 *flags |= CPU_DTRACE_KPRIV;
4952 *illval = regs[r1];
4953 break;
4954 }
4955 /*FALLTHROUGH*/
4956 case DIF_OP_LDUH:
4957 regs[rd] = dtrace_load16(regs[r1]);
4958 break;
4959 case DIF_OP_RLDUW:
4960 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4961 *flags |= CPU_DTRACE_KPRIV;
4962 *illval = regs[r1];
4963 break;
4964 }
4965 /*FALLTHROUGH*/
4966 case DIF_OP_LDUW:
4967 regs[rd] = dtrace_load32(regs[r1]);
4968 break;
4969 case DIF_OP_RLDX:
4970 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
4971 *flags |= CPU_DTRACE_KPRIV;
4972 *illval = regs[r1];
4973 break;
4974 }
4975 /*FALLTHROUGH*/
4976 case DIF_OP_LDX:
4977 regs[rd] = dtrace_load64(regs[r1]);
4978 break;
4979 case DIF_OP_ULDSB:
4980 regs[rd] = (int8_t)
4981 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4982 break;
4983 case DIF_OP_ULDSH:
4984 regs[rd] = (int16_t)
4985 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
4986 break;
4987 case DIF_OP_ULDSW:
4988 regs[rd] = (int32_t)
4989 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
4990 break;
4991 case DIF_OP_ULDUB:
4992 regs[rd] =
4993 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4994 break;
|
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 */
26
27 /*
28 * DTrace - Dynamic Tracing for Solaris
29 *
30 * This is the implementation of the Solaris Dynamic Tracing framework
31 * (DTrace). The user-visible interface to DTrace is described at length in
32 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
33 * library, the in-kernel DTrace framework, and the DTrace providers are
34 * described in the block comments in the <sys/dtrace.h> header file. The
35 * internal architecture of DTrace is described in the block comments in the
36 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
37 * implementation very much assume mastery of all of these sources; if one has
38 * an unanswered question about the implementation, one should consult them
39 * first.
40 *
41 * The functions here are ordered roughly as follows:
42 *
43 * - Probe context functions
44 * - Probe hashing functions
355 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
356
357 #ifndef __i386
358 #define DTRACE_ALIGNCHECK(addr, size, flags) \
359 if (addr & (size - 1)) { \
360 *flags |= CPU_DTRACE_BADALIGN; \
361 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
362 return (0); \
363 }
364 #else
365 #define DTRACE_ALIGNCHECK(addr, size, flags)
366 #endif
367
368 /*
369 * Test whether a range of memory starting at testaddr of size testsz falls
370 * within the range of memory described by addr, sz. We take care to avoid
371 * problems with overflow and underflow of the unsigned quantities, and
372 * disallow all negative sizes. Ranges of size 0 are allowed.
373 */
374 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
375 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
376 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
377 (testaddr) + (testsz) >= (testaddr))
378
379 /*
380 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
381 * alloc_sz on the righthand side of the comparison in order to avoid overflow
382 * or underflow in the comparison with it. This is simpler than the INRANGE
383 * check above, because we know that the dtms_scratch_ptr is valid in the
384 * range. Allocations of size zero are allowed.
385 */
386 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
387 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
388 (mstate)->dtms_scratch_ptr >= (alloc_sz))
389
390 #define DTRACE_LOADFUNC(bits) \
391 /*CSTYLED*/ \
392 uint##bits##_t \
393 dtrace_load##bits(uintptr_t addr) \
394 { \
395 size_t size = bits / NBBY; \
396 /*CSTYLED*/ \
457 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
458
459 static size_t dtrace_strlen(const char *, size_t);
460 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
461 static void dtrace_enabling_provide(dtrace_provider_t *);
462 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
463 static void dtrace_enabling_matchall(void);
464 static void dtrace_enabling_reap(void);
465 static dtrace_state_t *dtrace_anon_grab(void);
466 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
467 dtrace_state_t *, uint64_t, uint64_t);
468 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
469 static void dtrace_buffer_drop(dtrace_buffer_t *);
470 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
471 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
472 dtrace_state_t *, dtrace_mstate_t *);
473 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
474 dtrace_optval_t);
475 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
476 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
477 static int dtrace_priv_proc(dtrace_state_t *, dtrace_mstate_t *);
478
479 /*
480 * DTrace Probe Context Functions
481 *
482 * These functions are called from probe context. Because probe context is
483 * any context in which C may be called, arbitrarily locks may be held,
484 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
485 * As a result, functions called from probe context may only call other DTrace
486 * support functions -- they may not interact at all with the system at large.
487 * (Note that the ASSERT macro is made probe-context safe by redefining it in
488 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
489 * loads are to be performed from probe context, they _must_ be in terms of
490 * the safe dtrace_load*() variants.
491 *
492 * Some functions in this block are not actually called from probe context;
493 * for these functions, there will be a comment above the function reading
494 * "Note: not called from probe context."
495 */
496 void
497 dtrace_panic(const char *format, ...)
602 * be issued. This includes the DTrace scratch areas, and any DTrace variable
603 * region. The caller of dtrace_canstore() is responsible for performing any
604 * alignment checks that are needed before stores are actually executed.
605 */
606 static int
607 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
608 dtrace_vstate_t *vstate)
609 {
610 /*
611 * First, check to see if the address is in scratch space...
612 */
613 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
614 mstate->dtms_scratch_size))
615 return (1);
616
617 /*
618 * Now check to see if it's a dynamic variable. This check will pick
619 * up both thread-local variables and any global dynamically-allocated
620 * variables.
621 */
622 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base,
623 vstate->dtvs_dynvars.dtds_size)) {
624 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
625 uintptr_t base = (uintptr_t)dstate->dtds_base +
626 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
627 uintptr_t chunkoffs;
628
629 /*
630 * Before we assume that we can store here, we need to make
631 * sure that it isn't in our metadata -- storing to our
632 * dynamic variable metadata would corrupt our state. For
633 * the range to not include any dynamic variable metadata,
634 * it must:
635 *
636 * (1) Start above the hash table that is at the base of
637 * the dynamic variable space
638 *
639 * (2) Have a starting chunk offset that is beyond the
640 * dtrace_dynvar_t that is at the base of every chunk
641 *
642 * (3) Not span a chunk boundary
686 dtrace_vstate_t *vstate)
687 {
688 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
689
690 /*
691 * If we hold the privilege to read from kernel memory, then
692 * everything is readable.
693 */
694 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
695 return (1);
696
697 /*
698 * You can obviously read that which you can store.
699 */
700 if (dtrace_canstore(addr, sz, mstate, vstate))
701 return (1);
702
703 /*
704 * We're allowed to read from our own string table.
705 */
706 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
707 mstate->dtms_difo->dtdo_strlen))
708 return (1);
709
710 if (vstate->dtvs_state != NULL &&
711 dtrace_priv_proc(vstate->dtvs_state, mstate)) {
712 proc_t *p;
713
714 /*
715 * When we have privileges to the current process, there are
716 * several context-related kernel structures that are safe to
717 * read, even absent the privilege to read from kernel memory.
718 * These reads are safe because these structures contain only
719 * state that (1) we're permitted to read, (2) is harmless or
720 * (3) contains pointers to additional kernel state that we're
721 * not permitted to read (and as such, do not present an
722 * opportunity for privilege escalation). Finally (and
723 * critically), because of the nature of their relation with
724 * the current thread context, the memory associated with these
725 * structures cannot change over the duration of probe context,
726 * and it is therefore impossible for this memory to be
727 * deallocated and reallocated as something else while it's
728 * being operated upon.
729 */
730 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t)))
731 return (1);
732
733 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
734 sz, curthread->t_procp, sizeof (proc_t))) {
735 return (1);
736 }
737
738 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
739 curthread->t_cred, sizeof (cred_t))) {
740 return (1);
741 }
742
743 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
744 &(p->p_pidp->pid_id), sizeof (pid_t))) {
745 return (1);
746 }
747
748 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
749 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
750 return (1);
751 }
752 }
753
754 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
755 *illval = addr;
756 return (0);
757 }
758
759 /*
760 * Convenience routine to check to see if a given string is within a memory
761 * region in which a load may be issued given the user's privilege level;
762 * this exists so that we don't need to issue unnecessary dtrace_strlen()
763 * calls in the event that the user has all privileges.
764 */
765 static int
766 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
767 dtrace_vstate_t *vstate)
768 {
769 size_t strsz;
770
771 /*
772 * If we hold the privilege to read from kernel memory, then
773 * everything is readable.
2920 return (dtrace_getreg(lwp->lwp_regs, ndx));
2921 }
2922
2923 case DIF_VAR_VMREGS: {
2924 uint64_t rval;
2925
2926 if (!dtrace_priv_kernel(state))
2927 return (0);
2928
2929 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2930
2931 rval = dtrace_getvmreg(ndx,
2932 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags);
2933
2934 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2935
2936 return (rval);
2937 }
2938
2939 case DIF_VAR_CURTHREAD:
2940 if (!dtrace_priv_proc(state, mstate))
2941 return (0);
2942 return ((uint64_t)(uintptr_t)curthread);
2943
2944 case DIF_VAR_TIMESTAMP:
2945 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2946 mstate->dtms_timestamp = dtrace_gethrtime();
2947 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2948 }
2949 return (mstate->dtms_timestamp);
2950
2951 case DIF_VAR_VTIMESTAMP:
2952 ASSERT(dtrace_vtime_references != 0);
2953 return (curthread->t_dtrace_vtime);
2954
2955 case DIF_VAR_WALLTIMESTAMP:
2956 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2957 mstate->dtms_walltimestamp = dtrace_gethrestime();
2958 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2959 }
2960 return (mstate->dtms_walltimestamp);
4935 if (cc_c == 0)
4936 pc = DIF_INSTR_LABEL(instr);
4937 break;
4938 case DIF_OP_BL:
4939 if (cc_n ^ cc_v)
4940 pc = DIF_INSTR_LABEL(instr);
4941 break;
4942 case DIF_OP_BLU:
4943 if (cc_c)
4944 pc = DIF_INSTR_LABEL(instr);
4945 break;
4946 case DIF_OP_BLE:
4947 if (cc_z | (cc_n ^ cc_v))
4948 pc = DIF_INSTR_LABEL(instr);
4949 break;
4950 case DIF_OP_BLEU:
4951 if (cc_c | cc_z)
4952 pc = DIF_INSTR_LABEL(instr);
4953 break;
4954 case DIF_OP_RLDSB:
4955 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
4956 break;
4957 /*FALLTHROUGH*/
4958 case DIF_OP_LDSB:
4959 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4960 break;
4961 case DIF_OP_RLDSH:
4962 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
4963 break;
4964 /*FALLTHROUGH*/
4965 case DIF_OP_LDSH:
4966 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4967 break;
4968 case DIF_OP_RLDSW:
4969 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
4970 break;
4971 /*FALLTHROUGH*/
4972 case DIF_OP_LDSW:
4973 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4974 break;
4975 case DIF_OP_RLDUB:
4976 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
4977 break;
4978 /*FALLTHROUGH*/
4979 case DIF_OP_LDUB:
4980 regs[rd] = dtrace_load8(regs[r1]);
4981 break;
4982 case DIF_OP_RLDUH:
4983 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
4984 break;
4985 /*FALLTHROUGH*/
4986 case DIF_OP_LDUH:
4987 regs[rd] = dtrace_load16(regs[r1]);
4988 break;
4989 case DIF_OP_RLDUW:
4990 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
4991 break;
4992 /*FALLTHROUGH*/
4993 case DIF_OP_LDUW:
4994 regs[rd] = dtrace_load32(regs[r1]);
4995 break;
4996 case DIF_OP_RLDX:
4997 if (!dtrace_canload(regs[r1], 8, mstate, vstate))
4998 break;
4999 /*FALLTHROUGH*/
5000 case DIF_OP_LDX:
5001 regs[rd] = dtrace_load64(regs[r1]);
5002 break;
5003 case DIF_OP_ULDSB:
5004 regs[rd] = (int8_t)
5005 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5006 break;
5007 case DIF_OP_ULDSH:
5008 regs[rd] = (int16_t)
5009 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5010 break;
5011 case DIF_OP_ULDSW:
5012 regs[rd] = (int32_t)
5013 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5014 break;
5015 case DIF_OP_ULDUB:
5016 regs[rd] =
5017 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5018 break;
|