Print this page
4469 DTrace helper tracing should be dynamic


 252 
 253 static dtrace_pops_t    dtrace_provider_ops = {
 254         (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
 255         (void (*)(void *, struct modctl *))dtrace_nullop,
 256         (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
 257         (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
 258         (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
 259         (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
 260         NULL,
 261         NULL,
 262         NULL,
 263         (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
 264 };
 265 
 266 static dtrace_id_t      dtrace_probeid_begin;   /* special BEGIN probe */
 267 static dtrace_id_t      dtrace_probeid_end;     /* special END probe */
 268 dtrace_id_t             dtrace_probeid_error;   /* special ERROR probe */
 269 
 270 /*
 271  * DTrace Helper Tracing Variables
 272  */
 273 uint32_t dtrace_helptrace_next = 0;










 274 uint32_t dtrace_helptrace_nlocals;
 275 char    *dtrace_helptrace_buffer;
 276 int     dtrace_helptrace_bufsize = 512 * 1024;
 277 
 278 #ifdef DEBUG
 279 int     dtrace_helptrace_enabled = 1;
 280 #else
 281 int     dtrace_helptrace_enabled = 0;
 282 #endif
 283 
 284 /*
 285  * DTrace Error Hashing
 286  *
 287  * On DEBUG kernels, DTrace will track the errors that has seen in a hash
 288  * table.  This is very useful for checking coverage of tests that are
 289  * expected to induce DIF or DOF processing errors, and may be useful for
 290  * debugging problems in the DIF code generator or in DOF generation .  The
 291  * error hash may be examined with the ::dtrace_errhash MDB dcmd.
 292  */
 293 #ifdef DEBUG
 294 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
 295 static const char *dtrace_errlast;
 296 static kthread_t *dtrace_errthread;
 297 static kmutex_t dtrace_errlock;
 298 #endif
 299 
 300 /*
 301  * DTrace Macros and Constants
 302  *


14304                  * trying to retain more enablings than are allowed -- but
14305                  * we only have one anonymous enabling, and we are guaranteed
14306                  * to be allowed at least one retained enabling; we assert
14307                  * that dtrace_enabling_retain() returns success.
14308                  */
14309                 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
14310                 ASSERT(rval == 0);
14311 
14312                 dtrace_enabling_dump(dtrace_anon.dta_enabling);
14313         }
14314 }
14315 
14316 /*
14317  * DTrace Helper Functions
14318  */
14319 static void
14320 dtrace_helper_trace(dtrace_helper_action_t *helper,
14321     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
14322 {
14323         uint32_t size, next, nnext, i;
14324         dtrace_helptrace_t *ent;
14325         uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14326 
14327         if (!dtrace_helptrace_enabled)
14328                 return;
14329 
14330         ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14331 
14332         /*
14333          * What would a tracing framework be without its own tracing
14334          * framework?  (Well, a hell of a lot simpler, for starters...)
14335          */
14336         size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14337             sizeof (uint64_t) - sizeof (uint64_t);
14338 
14339         /*
14340          * Iterate until we can allocate a slot in the trace buffer.
14341          */
14342         do {
14343                 next = dtrace_helptrace_next;
14344 
14345                 if (next + size < dtrace_helptrace_bufsize) {
14346                         nnext = next + size;
14347                 } else {
14348                         nnext = size;
14349                 }
14350         } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14351 
14352         /*
14353          * We have our slot; fill it in.
14354          */
14355         if (nnext == size)

14356                 next = 0;

14357 
14358         ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
14359         ent->dtht_helper = helper;
14360         ent->dtht_where = where;
14361         ent->dtht_nlocals = vstate->dtvs_nlocals;
14362 
14363         ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14364             mstate->dtms_fltoffs : -1;
14365         ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14366         ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
14367 
14368         for (i = 0; i < vstate->dtvs_nlocals; i++) {
14369                 dtrace_statvar_t *svar;
14370 
14371                 if ((svar = vstate->dtvs_locals[i]) == NULL)
14372                         continue;
14373 
14374                 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14375                 ent->dtht_locals[i] =
14376                     ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
14377         }
14378 }
14379 
14380 static uint64_t
14381 dtrace_helper(int which, dtrace_mstate_t *mstate,
14382     dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14383 {
14384         uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14385         uint64_t sarg0 = mstate->dtms_arg[0];
14386         uint64_t sarg1 = mstate->dtms_arg[1];
14387         uint64_t rval;
14388         dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14389         dtrace_helper_action_t *helper;
14390         dtrace_vstate_t *vstate;
14391         dtrace_difo_t *pred;
14392         int i, trace = dtrace_helptrace_enabled;
14393 
14394         ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14395 
14396         if (helpers == NULL)
14397                 return (0);
14398 
14399         if ((helper = helpers->dthps_actions[which]) == NULL)
14400                 return (0);
14401 
14402         vstate = &helpers->dthps_vstate;
14403         mstate->dtms_arg[0] = arg0;
14404         mstate->dtms_arg[1] = arg1;
14405 
14406         /*
14407          * Now iterate over each helper.  If its predicate evaluates to 'true',
14408          * we'll call the corresponding actions.  Note that the below calls
14409          * to dtrace_dif_emulate() may set faults in machine state.  This is
14410          * okay:  our caller (the outer dtrace_dif_emulate()) will simply plow
14411          * the stored DIF offset with its own (which is the desired behavior).
14412          * Also, note the calls to dtrace_dif_emulate() may allocate scratch


15689          * Once we've registered, we can assert that dtrace_provider is our
15690          * pseudo provider.
15691          */
15692         (void) dtrace_register("dtrace", &dtrace_provider_attr,
15693             DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15694 
15695         ASSERT(dtrace_provider != NULL);
15696         ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15697 
15698         dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15699             dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15700         dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15701             dtrace_provider, NULL, NULL, "END", 0, NULL);
15702         dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15703             dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15704 
15705         dtrace_anon_property();
15706         mutex_exit(&cpu_lock);
15707 
15708         /*
15709          * If DTrace helper tracing is enabled, we need to allocate the
15710          * trace buffer and initialize the values.
15711          */
15712         if (dtrace_helptrace_enabled) {
15713                 ASSERT(dtrace_helptrace_buffer == NULL);
15714                 dtrace_helptrace_buffer =
15715                     kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15716                 dtrace_helptrace_next = 0;
15717         }
15718 
15719         /*
15720          * If there are already providers, we must ask them to provide their
15721          * probes, and then match any anonymous enabling against them.  Note
15722          * that there should be no other retained enablings at this time:
15723          * the only retained enablings at this time should be the anonymous
15724          * enabling.
15725          */
15726         if (dtrace_anon.dta_enabling != NULL) {
15727                 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15728 
15729                 dtrace_enabling_provide(NULL);
15730                 state = dtrace_anon.dta_state;
15731 
15732                 /*
15733                  * We couldn't hold cpu_lock across the above call to
15734                  * dtrace_enabling_provide(), but we must hold it to actually
15735                  * enable the probes.  We have to drop all of our locks, pick
15736                  * up cpu_lock, and regain our locks before matching the
15737                  * retained anonymous enabling.
15738                  */
15739                 mutex_exit(&dtrace_lock);


15795         mutex_enter(&dtrace_provider_lock);
15796         dtrace_probe_provide(NULL, NULL);
15797         mutex_exit(&dtrace_provider_lock);
15798 
15799         mutex_enter(&cpu_lock);
15800         mutex_enter(&dtrace_lock);
15801         dtrace_opens++;
15802         dtrace_membar_producer();
15803 
15804         /*
15805          * If the kernel debugger is active (that is, if the kernel debugger
15806          * modified text in some way), we won't allow the open.
15807          */
15808         if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15809                 dtrace_opens--;
15810                 mutex_exit(&cpu_lock);
15811                 mutex_exit(&dtrace_lock);
15812                 return (EBUSY);
15813         }
15814 












15815         state = dtrace_state_create(devp, cred_p);
15816         mutex_exit(&cpu_lock);
15817 
15818         if (state == NULL) {
15819                 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15820                         (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15821                 mutex_exit(&dtrace_lock);
15822                 return (EAGAIN);
15823         }
15824 
15825         mutex_exit(&dtrace_lock);
15826 
15827         return (0);
15828 }
15829 
15830 /*ARGSUSED*/
15831 static int
15832 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15833 {
15834         minor_t minor = getminor(dev);
15835         dtrace_state_t *state;

15836 
15837         if (minor == DTRACEMNRN_HELPER)
15838                 return (0);
15839 
15840         state = ddi_get_soft_state(dtrace_softstate, minor);
15841 
15842         mutex_enter(&cpu_lock);
15843         mutex_enter(&dtrace_lock);
15844 
15845         if (state->dts_anon) {
15846                 /*
15847                  * There is anonymous state. Destroy that first.
15848                  */
15849                 ASSERT(dtrace_anon.dta_state == NULL);
15850                 dtrace_state_destroy(state->dts_anon);
15851         }
15852 












15853         dtrace_state_destroy(state);
15854         ASSERT(dtrace_opens > 0);
15855 
15856         /*
15857          * Only relinquish control of the kernel debugger interface when there
15858          * are no consumers and no anonymous enablings.
15859          */
15860         if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15861                 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15862 





15863         mutex_exit(&dtrace_lock);
15864         mutex_exit(&cpu_lock);
15865 
15866         return (0);
15867 }
15868 
15869 /*ARGSUSED*/
15870 static int
15871 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15872 {
15873         int rval;
15874         dof_helper_t help, *dhp = NULL;
15875 
15876         switch (cmd) {
15877         case DTRACEHIOC_ADDDOF:
15878                 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15879                         dtrace_dof_error(NULL, "failed to copyin DOF helper");
15880                         return (EFAULT);
15881                 }
15882 


16736                 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16737         }
16738 
16739         bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16740         unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16741         dtrace_cpu_init = NULL;
16742         dtrace_helpers_cleanup = NULL;
16743         dtrace_helpers_fork = NULL;
16744         dtrace_cpustart_init = NULL;
16745         dtrace_cpustart_fini = NULL;
16746         dtrace_debugger_init = NULL;
16747         dtrace_debugger_fini = NULL;
16748         dtrace_modload = NULL;
16749         dtrace_modunload = NULL;
16750 
16751         ASSERT(dtrace_getf == 0);
16752         ASSERT(dtrace_closef == NULL);
16753 
16754         mutex_exit(&cpu_lock);
16755 
16756         if (dtrace_helptrace_enabled) {
16757                 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16758                 dtrace_helptrace_buffer = NULL;
16759         }
16760 
16761         kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16762         dtrace_probes = NULL;
16763         dtrace_nprobes = 0;
16764 
16765         dtrace_hash_destroy(dtrace_bymod);
16766         dtrace_hash_destroy(dtrace_byfunc);
16767         dtrace_hash_destroy(dtrace_byname);
16768         dtrace_bymod = NULL;
16769         dtrace_byfunc = NULL;
16770         dtrace_byname = NULL;
16771 
16772         kmem_cache_destroy(dtrace_state_cache);
16773         vmem_destroy(dtrace_minor);
16774         vmem_destroy(dtrace_arena);
16775 
16776         if (dtrace_toxrange != NULL) {
16777                 kmem_free(dtrace_toxrange,
16778                     dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16779                 dtrace_toxrange = NULL;
16780                 dtrace_toxranges = 0;




 252 
 253 static dtrace_pops_t    dtrace_provider_ops = {
 254         (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
 255         (void (*)(void *, struct modctl *))dtrace_nullop,
 256         (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
 257         (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
 258         (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
 259         (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
 260         NULL,
 261         NULL,
 262         NULL,
 263         (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
 264 };
 265 
 266 static dtrace_id_t      dtrace_probeid_begin;   /* special BEGIN probe */
 267 static dtrace_id_t      dtrace_probeid_end;     /* special END probe */
 268 dtrace_id_t             dtrace_probeid_error;   /* special ERROR probe */
 269 
 270 /*
 271  * DTrace Helper Tracing Variables
 272  *
 273  * These variables should be set dynamically to enable helper tracing.  The
 274  * only variables that should be set are dtrace_helptrace_enable (which should
 275  * be set to a non-zero value to allocate helper tracing buffers on the next
 276  * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a
 277  * non-zero value to deallocate helper tracing buffers on the next close of
 278  * /dev/dtrace).  When (and only when) helper tracing is disabled, the
 279  * buffer size may also be set via dtrace_helptrace_bufsize.
 280  */
 281 int                     dtrace_helptrace_enable = 0;
 282 int                     dtrace_helptrace_disable = 0;
 283 int                     dtrace_helptrace_bufsize = 16 * 1024 * 1024;
 284 uint32_t                dtrace_helptrace_nlocals;
 285 static dtrace_helptrace_t *dtrace_helptrace_buffer;
 286 static uint32_t         dtrace_helptrace_next = 0;
 287 static int              dtrace_helptrace_wrapped = 0;





 288 
 289 /*
 290  * DTrace Error Hashing
 291  *
 292  * On DEBUG kernels, DTrace will track the errors that has seen in a hash
 293  * table.  This is very useful for checking coverage of tests that are
 294  * expected to induce DIF or DOF processing errors, and may be useful for
 295  * debugging problems in the DIF code generator or in DOF generation .  The
 296  * error hash may be examined with the ::dtrace_errhash MDB dcmd.
 297  */
 298 #ifdef DEBUG
 299 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
 300 static const char *dtrace_errlast;
 301 static kthread_t *dtrace_errthread;
 302 static kmutex_t dtrace_errlock;
 303 #endif
 304 
 305 /*
 306  * DTrace Macros and Constants
 307  *


14309                  * trying to retain more enablings than are allowed -- but
14310                  * we only have one anonymous enabling, and we are guaranteed
14311                  * to be allowed at least one retained enabling; we assert
14312                  * that dtrace_enabling_retain() returns success.
14313                  */
14314                 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
14315                 ASSERT(rval == 0);
14316 
14317                 dtrace_enabling_dump(dtrace_anon.dta_enabling);
14318         }
14319 }
14320 
14321 /*
14322  * DTrace Helper Functions
14323  */
14324 static void
14325 dtrace_helper_trace(dtrace_helper_action_t *helper,
14326     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
14327 {
14328         uint32_t size, next, nnext, i;
14329         dtrace_helptrace_t *ent, *buffer;
14330         uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14331 
14332         if ((buffer = dtrace_helptrace_buffer) == NULL)
14333                 return;
14334 
14335         ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14336 
14337         /*
14338          * What would a tracing framework be without its own tracing
14339          * framework?  (Well, a hell of a lot simpler, for starters...)
14340          */
14341         size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14342             sizeof (uint64_t) - sizeof (uint64_t);
14343 
14344         /*
14345          * Iterate until we can allocate a slot in the trace buffer.
14346          */
14347         do {
14348                 next = dtrace_helptrace_next;
14349 
14350                 if (next + size < dtrace_helptrace_bufsize) {
14351                         nnext = next + size;
14352                 } else {
14353                         nnext = size;
14354                 }
14355         } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14356 
14357         /*
14358          * We have our slot; fill it in.
14359          */
14360         if (nnext == size) {
14361                 dtrace_helptrace_wrapped++;
14362                 next = 0;
14363         }
14364 
14365         ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next);
14366         ent->dtht_helper = helper;
14367         ent->dtht_where = where;
14368         ent->dtht_nlocals = vstate->dtvs_nlocals;
14369 
14370         ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14371             mstate->dtms_fltoffs : -1;
14372         ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14373         ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
14374 
14375         for (i = 0; i < vstate->dtvs_nlocals; i++) {
14376                 dtrace_statvar_t *svar;
14377 
14378                 if ((svar = vstate->dtvs_locals[i]) == NULL)
14379                         continue;
14380 
14381                 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14382                 ent->dtht_locals[i] =
14383                     ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
14384         }
14385 }
14386 
14387 static uint64_t
14388 dtrace_helper(int which, dtrace_mstate_t *mstate,
14389     dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14390 {
14391         uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14392         uint64_t sarg0 = mstate->dtms_arg[0];
14393         uint64_t sarg1 = mstate->dtms_arg[1];
14394         uint64_t rval;
14395         dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14396         dtrace_helper_action_t *helper;
14397         dtrace_vstate_t *vstate;
14398         dtrace_difo_t *pred;
14399         int i, trace = dtrace_helptrace_buffer != NULL;
14400 
14401         ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14402 
14403         if (helpers == NULL)
14404                 return (0);
14405 
14406         if ((helper = helpers->dthps_actions[which]) == NULL)
14407                 return (0);
14408 
14409         vstate = &helpers->dthps_vstate;
14410         mstate->dtms_arg[0] = arg0;
14411         mstate->dtms_arg[1] = arg1;
14412 
14413         /*
14414          * Now iterate over each helper.  If its predicate evaluates to 'true',
14415          * we'll call the corresponding actions.  Note that the below calls
14416          * to dtrace_dif_emulate() may set faults in machine state.  This is
14417          * okay:  our caller (the outer dtrace_dif_emulate()) will simply plow
14418          * the stored DIF offset with its own (which is the desired behavior).
14419          * Also, note the calls to dtrace_dif_emulate() may allocate scratch


15696          * Once we've registered, we can assert that dtrace_provider is our
15697          * pseudo provider.
15698          */
15699         (void) dtrace_register("dtrace", &dtrace_provider_attr,
15700             DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15701 
15702         ASSERT(dtrace_provider != NULL);
15703         ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15704 
15705         dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15706             dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15707         dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15708             dtrace_provider, NULL, NULL, "END", 0, NULL);
15709         dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15710             dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15711 
15712         dtrace_anon_property();
15713         mutex_exit(&cpu_lock);
15714 
15715         /*











15716          * If there are already providers, we must ask them to provide their
15717          * probes, and then match any anonymous enabling against them.  Note
15718          * that there should be no other retained enablings at this time:
15719          * the only retained enablings at this time should be the anonymous
15720          * enabling.
15721          */
15722         if (dtrace_anon.dta_enabling != NULL) {
15723                 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15724 
15725                 dtrace_enabling_provide(NULL);
15726                 state = dtrace_anon.dta_state;
15727 
15728                 /*
15729                  * We couldn't hold cpu_lock across the above call to
15730                  * dtrace_enabling_provide(), but we must hold it to actually
15731                  * enable the probes.  We have to drop all of our locks, pick
15732                  * up cpu_lock, and regain our locks before matching the
15733                  * retained anonymous enabling.
15734                  */
15735                 mutex_exit(&dtrace_lock);


15791         mutex_enter(&dtrace_provider_lock);
15792         dtrace_probe_provide(NULL, NULL);
15793         mutex_exit(&dtrace_provider_lock);
15794 
15795         mutex_enter(&cpu_lock);
15796         mutex_enter(&dtrace_lock);
15797         dtrace_opens++;
15798         dtrace_membar_producer();
15799 
15800         /*
15801          * If the kernel debugger is active (that is, if the kernel debugger
15802          * modified text in some way), we won't allow the open.
15803          */
15804         if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15805                 dtrace_opens--;
15806                 mutex_exit(&cpu_lock);
15807                 mutex_exit(&dtrace_lock);
15808                 return (EBUSY);
15809         }
15810 
15811         if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) {
15812                 /*
15813                  * If DTrace helper tracing is enabled, we need to allocate the
15814                  * trace buffer and initialize the values.
15815                  */
15816                 dtrace_helptrace_buffer =
15817                     kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15818                 dtrace_helptrace_next = 0;
15819                 dtrace_helptrace_wrapped = 0;
15820                 dtrace_helptrace_enable = 0;
15821         }
15822 
15823         state = dtrace_state_create(devp, cred_p);
15824         mutex_exit(&cpu_lock);
15825 
15826         if (state == NULL) {
15827                 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15828                         (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15829                 mutex_exit(&dtrace_lock);
15830                 return (EAGAIN);
15831         }
15832 
15833         mutex_exit(&dtrace_lock);
15834 
15835         return (0);
15836 }
15837 
15838 /*ARGSUSED*/
15839 static int
15840 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15841 {
15842         minor_t minor = getminor(dev);
15843         dtrace_state_t *state;
15844         dtrace_helptrace_t *buf = NULL;
15845 
15846         if (minor == DTRACEMNRN_HELPER)
15847                 return (0);
15848 
15849         state = ddi_get_soft_state(dtrace_softstate, minor);
15850 
15851         mutex_enter(&cpu_lock);
15852         mutex_enter(&dtrace_lock);
15853 
15854         if (state->dts_anon) {
15855                 /*
15856                  * There is anonymous state. Destroy that first.
15857                  */
15858                 ASSERT(dtrace_anon.dta_state == NULL);
15859                 dtrace_state_destroy(state->dts_anon);
15860         }
15861 
15862         if (dtrace_helptrace_disable) {
15863                 /*
15864                  * If we have been told to disable helper tracing, set the
15865                  * buffer to NULL before calling into dtrace_state_destroy();
15866                  * we take advantage of its dtrace_sync() to know that no
15867                  * CPU is in probe context with enabled helper tracing
15868                  * after it returns.
15869                  */
15870                 buf = dtrace_helptrace_buffer;
15871                 dtrace_helptrace_buffer = NULL;
15872         }
15873 
15874         dtrace_state_destroy(state);
15875         ASSERT(dtrace_opens > 0);
15876 
15877         /*
15878          * Only relinquish control of the kernel debugger interface when there
15879          * are no consumers and no anonymous enablings.
15880          */
15881         if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15882                 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15883 
15884         if (buf != NULL) {
15885                 kmem_free(buf, dtrace_helptrace_bufsize);
15886                 dtrace_helptrace_disable = 0;
15887         }
15888 
15889         mutex_exit(&dtrace_lock);
15890         mutex_exit(&cpu_lock);
15891 
15892         return (0);
15893 }
15894 
15895 /*ARGSUSED*/
15896 static int
15897 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15898 {
15899         int rval;
15900         dof_helper_t help, *dhp = NULL;
15901 
15902         switch (cmd) {
15903         case DTRACEHIOC_ADDDOF:
15904                 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15905                         dtrace_dof_error(NULL, "failed to copyin DOF helper");
15906                         return (EFAULT);
15907                 }
15908 


16762                 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16763         }
16764 
16765         bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16766         unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16767         dtrace_cpu_init = NULL;
16768         dtrace_helpers_cleanup = NULL;
16769         dtrace_helpers_fork = NULL;
16770         dtrace_cpustart_init = NULL;
16771         dtrace_cpustart_fini = NULL;
16772         dtrace_debugger_init = NULL;
16773         dtrace_debugger_fini = NULL;
16774         dtrace_modload = NULL;
16775         dtrace_modunload = NULL;
16776 
16777         ASSERT(dtrace_getf == 0);
16778         ASSERT(dtrace_closef == NULL);
16779 
16780         mutex_exit(&cpu_lock);
16781 





16782         kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16783         dtrace_probes = NULL;
16784         dtrace_nprobes = 0;
16785 
16786         dtrace_hash_destroy(dtrace_bymod);
16787         dtrace_hash_destroy(dtrace_byfunc);
16788         dtrace_hash_destroy(dtrace_byname);
16789         dtrace_bymod = NULL;
16790         dtrace_byfunc = NULL;
16791         dtrace_byname = NULL;
16792 
16793         kmem_cache_destroy(dtrace_state_cache);
16794         vmem_destroy(dtrace_minor);
16795         vmem_destroy(dtrace_arena);
16796 
16797         if (dtrace_toxrange != NULL) {
16798                 kmem_free(dtrace_toxrange,
16799                     dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16800                 dtrace_toxrange = NULL;
16801                 dtrace_toxranges = 0;