1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 /*
  26  * Copyright 2012 Garrett D'Amore <garrett@damore.org>.  All rights reserved.
  27  */
  28 
  29 
  30 #include <sys/types.h>
  31 #include <sys/sysmacros.h>
  32 #include <sys/buf.h>
  33 #include <sys/errno.h>
  34 #include <sys/modctl.h>
  35 #include <sys/conf.h>
  36 #include <sys/stat.h>
  37 #include <sys/kmem.h>
  38 #include <sys/proc.h>
  39 #include <sys/cpuvar.h>
  40 #include <sys/ddi_impldefs.h>
  41 #include <sys/ddi.h>
  42 #include <sys/fm/protocol.h>
  43 #include <sys/fm/util.h>
  44 #include <sys/fm/io/ddi.h>
  45 #include <sys/sysevent/eventdefs.h>
  46 #include <sys/sunddi.h>
  47 #include <sys/sunndi.h>
  48 #include <sys/debug.h>
  49 #include <sys/bofi.h>
  50 #include <sys/dvma.h>
  51 #include <sys/bofi_impl.h>
  52 
  53 /*
  54  * Testing the resilience of a hardened device driver requires a suitably wide
  55  * range of different types of "typical" hardware faults to be injected,
  56  * preferably in a controlled and repeatable fashion. This is not in general
  57  * possible via hardware, so the "fault injection test harness" is provided.
  58  * This works by intercepting calls from the driver to various DDI routines,
  59  * and then corrupting the result of those DDI routine calls as if the
  60  * hardware had caused the corruption.
  61  *
  62  * Conceptually, the bofi driver consists of two parts:
  63  *
  64  * A driver interface that supports a number of ioctls which allow error
  65  * definitions ("errdefs") to be defined and subsequently managed. The
  66  * driver is a clone driver, so each open will create a separate
  67  * invocation. Any errdefs created by using ioctls to that invocation
  68  * will automatically be deleted when that invocation is closed.
  69  *
  70  * Intercept routines: When the bofi driver is attached, it edits the
  71  * bus_ops structure of the bus nexus specified by the "bofi-nexus"
  72  * field in the "bofi.conf" file, thus allowing the
  73  * bofi driver to intercept various ddi functions. These intercept
  74  * routines primarily carry out fault injections based on the errdefs
  75  * created for that device.
  76  *
  77  * Faults can be injected into:
  78  *
  79  * DMA (corrupting data for DMA to/from memory areas defined by
  80  * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
  81  *
  82  * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
  83  * etc),
  84  *
  85  * Interrupts (generating spurious interrupts, losing interrupts,
  86  * delaying interrupts).
  87  *
  88  * By default, ddi routines called from all drivers will be intercepted
  89  * and faults potentially injected. However, the "bofi-to-test" field in
  90  * the "bofi.conf" file can be set to a space-separated list of drivers to
  91  * test (or by preceding each driver name in the list with an "!", a list
  92  * of drivers not to test).
  93  *
  94  * In addition to fault injection, the bofi driver does a number of static
  95  * checks which are controlled by properties in the "bofi.conf" file.
  96  *
  97  * "bofi-ddi-check" - if set will validate that there are no PIO access
  98  * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
  99  *
 100  * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
 101  * validate that calls to ddi_get8(), ddi_put8(), etc are not made
 102  * specifying addresses outside the range of the access_handle.
 103  *
 104  * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
 105  * are being made correctly.
 106  */
 107 
 108 extern void *bp_mapin_common(struct buf *, int);
 109 
 110 static int bofi_ddi_check;
 111 static int bofi_sync_check;
 112 static int bofi_range_check;
 113 
 114 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
 115 
 116 #define LLSZMASK (sizeof (uint64_t)-1)
 117 
 118 #define HDL_HASH_TBL_SIZE 64
 119 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
 120 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
 121 #define HDL_DHASH(x) \
 122         (&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
 123 #define HDL_HHASH(x) \
 124         (&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
 125 
 126 static struct bofi_shadow shadow_list;
 127 static struct bofi_errent *errent_listp;
 128 
 129 static char driver_list[NAMESIZE];
 130 static int driver_list_size;
 131 static int driver_list_neg;
 132 static char nexus_name[NAMESIZE];
 133 
 134 static int initialized = 0;
 135 
 136 #define NCLONES 2560
 137 static int clone_tab[NCLONES];
 138 
 139 static dev_info_t *our_dip;
 140 
 141 static kmutex_t bofi_mutex;
 142 static kmutex_t clone_tab_mutex;
 143 static kmutex_t bofi_low_mutex;
 144 static ddi_iblock_cookie_t bofi_low_cookie;
 145 static uint_t   bofi_signal(caddr_t arg);
 146 static int      bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
 147 static int      bofi_attach(dev_info_t *, ddi_attach_cmd_t);
 148 static int      bofi_detach(dev_info_t *, ddi_detach_cmd_t);
 149 static int      bofi_open(dev_t *, int, int, cred_t *);
 150 static int      bofi_close(dev_t, int, int, cred_t *);
 151 static int      bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
 152 static int      bofi_errdef_alloc(struct bofi_errdef *, char *,
 153                     struct bofi_errent *);
 154 static int      bofi_errdef_free(struct bofi_errent *);
 155 static void     bofi_start(struct bofi_errctl *, char *);
 156 static void     bofi_stop(struct bofi_errctl *, char *);
 157 static void     bofi_broadcast(struct bofi_errctl *, char *);
 158 static void     bofi_clear_acc_chk(struct bofi_errctl *, char *);
 159 static void     bofi_clear_errors(struct bofi_errctl *, char *);
 160 static void     bofi_clear_errdefs(struct bofi_errctl *, char *);
 161 static int      bofi_errdef_check(struct bofi_errstate *,
 162                     struct acc_log_elem **);
 163 static int      bofi_errdef_check_w(struct bofi_errstate *,
 164                     struct acc_log_elem **);
 165 static int      bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
 166                     off_t, off_t, caddr_t *);
 167 static int      bofi_dma_allochdl(dev_info_t *, dev_info_t *,
 168                     ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
 169                     ddi_dma_handle_t *);
 170 static int      bofi_dma_freehdl(dev_info_t *, dev_info_t *,
 171                     ddi_dma_handle_t);
 172 static int      bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
 173                     ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
 174                     uint_t *);
 175 static int      bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
 176                     ddi_dma_handle_t);
 177 static int      bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
 178                     off_t, size_t, uint_t);
 179 static int      bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
 180                     enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
 181 static int      bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
 182                     uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
 183 static int      bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
 184                     ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
 185                     void *result);
 186 static int      bofi_fm_ereport_callback(sysevent_t *ev, void *cookie);
 187 
 188 evchan_t *bofi_error_chan;
 189 
 190 #define FM_SIMULATED_DMA "simulated.dma"
 191 #define FM_SIMULATED_PIO "simulated.pio"
 192 
 193 #if defined(__sparc)
 194 static void     bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t,
 195                     uint_t, ddi_dma_cookie_t *);
 196 static void     bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t);
 197 static void     bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t);
 198 static void     bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t);
 199 #endif
 200 static int      driver_under_test(dev_info_t *);
 201 static int      bofi_check_acc_hdl(ddi_acc_impl_t *);
 202 static int      bofi_check_dma_hdl(ddi_dma_impl_t *);
 203 static int      bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
 204                     ddi_eventcookie_t eventhdl, void *impl_data);
 205 
 206 static struct bus_ops bofi_bus_ops = {
 207         BUSO_REV,
 208         bofi_map,
 209         NULL,
 210         NULL,
 211         NULL,
 212         i_ddi_map_fault,
 213         NULL,
 214         bofi_dma_allochdl,
 215         bofi_dma_freehdl,
 216         bofi_dma_bindhdl,
 217         bofi_dma_unbindhdl,
 218         bofi_dma_flush,
 219         bofi_dma_win,
 220         bofi_dma_ctl,
 221         NULL,
 222         ddi_bus_prop_op,
 223         ndi_busop_get_eventcookie,
 224         ndi_busop_add_eventcall,
 225         ndi_busop_remove_eventcall,
 226         bofi_post_event,
 227         NULL,
 228         0,
 229         0,
 230         0,
 231         0,
 232         0,
 233         0,
 234         0,
 235         bofi_intr_ops
 236 };
 237 
 238 static struct cb_ops bofi_cb_ops = {
 239         bofi_open,              /* open */
 240         bofi_close,             /* close */
 241         nodev,                  /* strategy */
 242         nodev,                  /* print */
 243         nodev,                  /* dump */
 244         nodev,                  /* read */
 245         nodev,                  /* write */
 246         bofi_ioctl,             /* ioctl */
 247         nodev,                  /* devmap */
 248         nodev,                  /* mmap */
 249         nodev,                  /* segmap */
 250         nochpoll,               /* chpoll */
 251         ddi_prop_op,            /* prop_op */
 252         NULL,                   /* for STREAMS drivers */
 253         D_MP,                   /* driver compatibility flag */
 254         CB_REV,                 /* cb_ops revision */
 255         nodev,                  /* aread */
 256         nodev                   /* awrite */
 257 };
 258 
 259 static struct dev_ops bofi_ops = {
 260         DEVO_REV,               /* driver build version */
 261         0,                      /* device reference count */
 262         bofi_getinfo,
 263         nulldev,
 264         nulldev,                /* probe */
 265         bofi_attach,
 266         bofi_detach,
 267         nulldev,                /* reset */
 268         &bofi_cb_ops,
 269         (struct bus_ops *)NULL,
 270         nulldev,                /* power */
 271         ddi_quiesce_not_needed,         /* quiesce */
 272 };
 273 
 274 /* module configuration stuff */
 275 static void    *statep;
 276 
 277 static struct modldrv modldrv = {
 278         &mod_driverops,
 279         "bofi driver",
 280         &bofi_ops
 281 };
 282 
 283 static struct modlinkage modlinkage = {
 284         MODREV_1,
 285         { &modldrv, NULL }
 286 };
 287 
 288 static struct bus_ops save_bus_ops;
 289 
 290 #if defined(__sparc)
 291 static struct dvma_ops bofi_dvma_ops = {
 292         DVMAO_REV,
 293         bofi_dvma_kaddr_load,
 294         bofi_dvma_unload,
 295         bofi_dvma_sync
 296 };
 297 #endif
 298 
 299 /*
 300  * support routine - map user page into kernel virtual
 301  */
 302 static caddr_t
 303 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
 304 {
 305         struct buf buf;
 306         struct proc proc;
 307 
 308         /*
 309          * mock up a buf structure so we can call bp_mapin_common()
 310          */
 311         buf.b_flags = B_PHYS;
 312         buf.b_un.b_addr = (caddr_t)addr;
 313         buf.b_bcount = (size_t)len;
 314         proc.p_as = as;
 315         buf.b_proc = &proc;
 316         return (bp_mapin_common(&buf, flag));
 317 }
 318 
 319 
 320 /*
 321  * support routine - map page chain into kernel virtual
 322  */
 323 static caddr_t
 324 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
 325 {
 326         struct buf buf;
 327 
 328         /*
 329          * mock up a buf structure so we can call bp_mapin_common()
 330          */
 331         buf.b_flags = B_PAGEIO;
 332         buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
 333         buf.b_bcount = (size_t)len;
 334         buf.b_pages = pp;
 335         return (bp_mapin_common(&buf, flag));
 336 }
 337 
 338 
 339 /*
 340  * support routine - map page array into kernel virtual
 341  */
 342 static caddr_t
 343 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
 344     int flag)
 345 {
 346         struct buf buf;
 347         struct proc proc;
 348 
 349         /*
 350          * mock up a buf structure so we can call bp_mapin_common()
 351          */
 352         buf.b_flags = B_PHYS|B_SHADOW;
 353         buf.b_un.b_addr = addr;
 354         buf.b_bcount = len;
 355         buf.b_shadow = pplist;
 356         proc.p_as = as;
 357         buf.b_proc = &proc;
 358         return (bp_mapin_common(&buf, flag));
 359 }
 360 
 361 
 362 /*
 363  * support routine - map dmareq into kernel virtual if not already
 364  * fills in *lenp with length
 365  * *mapaddr will be new kernel virtual address - or null if no mapping needed
 366  */
 367 static caddr_t
 368 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
 369         offset_t *lenp)
 370 {
 371         int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
 372 
 373         *lenp = dmareqp->dmar_object.dmao_size;
 374         if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
 375                 *mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
 376                     dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
 377                     dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
 378                 return (*mapaddrp);
 379         } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
 380                 *mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
 381                     dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
 382                     dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
 383                     dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
 384                 return (*mapaddrp);
 385         } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
 386                 *mapaddrp = NULL;
 387                 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
 388         } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
 389                 *mapaddrp = NULL;
 390                 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
 391         } else {
 392                 *mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
 393                     dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
 394                     dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
 395                 return (*mapaddrp);
 396         }
 397 }
 398 
 399 
 400 /*
 401  * support routine - free off kernel virtual mapping as allocated by
 402  * ddi_dmareq_mapin()
 403  */
 404 static void
 405 ddi_dmareq_mapout(caddr_t addr, offset_t len, int map_flags, page_t *pp,
 406     page_t **pplist)
 407 {
 408         struct buf buf;
 409 
 410         if (addr == NULL)
 411                 return;
 412         /*
 413          * mock up a buf structure
 414          */
 415         buf.b_flags = B_REMAPPED | map_flags;
 416         buf.b_un.b_addr = addr;
 417         buf.b_bcount = (size_t)len;
 418         buf.b_pages = pp;
 419         buf.b_shadow = pplist;
 420         bp_mapout(&buf);
 421 }
 422 
 423 static time_t
 424 bofi_gettime()
 425 {
 426         timestruc_t ts;
 427 
 428         gethrestime(&ts);
 429         return (ts.tv_sec);
 430 }
 431 
 432 /*
 433  * reset the bus_ops structure of the specified nexus to point to
 434  * the original values in the save_bus_ops structure.
 435  *
 436  * Note that both this routine and modify_bus_ops() rely on the current
 437  * behavior of the framework in that nexus drivers are not unloadable
 438  *
 439  */
 440 
 441 static int
 442 reset_bus_ops(char *name, struct bus_ops *bop)
 443 {
 444         struct modctl *modp;
 445         struct modldrv *mp;
 446         struct bus_ops *bp;
 447         struct dev_ops *ops;
 448 
 449         mutex_enter(&mod_lock);
 450         /*
 451          * find specified module
 452          */
 453         modp = &modules;
 454         do {
 455                 if (strcmp(name, modp->mod_modname) == 0) {
 456                         if (!modp->mod_linkage) {
 457                                 mutex_exit(&mod_lock);
 458                                 return (0);
 459                         }
 460                         mp = modp->mod_linkage->ml_linkage[0];
 461                         if (!mp || !mp->drv_dev_ops) {
 462                                 mutex_exit(&mod_lock);
 463                                 return (0);
 464                         }
 465                         ops = mp->drv_dev_ops;
 466                         bp = ops->devo_bus_ops;
 467                         if (!bp) {
 468                                 mutex_exit(&mod_lock);
 469                                 return (0);
 470                         }
 471                         if (ops->devo_refcnt > 0) {
 472                                 /*
 473                                  * As long as devices are active with modified
 474                                  * bus ops bofi must not go away. There may be
 475                                  * drivers with modified access or dma handles.
 476                                  */
 477                                 mutex_exit(&mod_lock);
 478                                 return (0);
 479                         }
 480                         cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
 481                             mp->drv_linkinfo);
 482                         bp->bus_intr_op = bop->bus_intr_op;
 483                         bp->bus_post_event = bop->bus_post_event;
 484                         bp->bus_map = bop->bus_map;
 485                         bp->bus_dma_map = bop->bus_dma_map;
 486                         bp->bus_dma_allochdl = bop->bus_dma_allochdl;
 487                         bp->bus_dma_freehdl = bop->bus_dma_freehdl;
 488                         bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
 489                         bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
 490                         bp->bus_dma_flush = bop->bus_dma_flush;
 491                         bp->bus_dma_win = bop->bus_dma_win;
 492                         bp->bus_dma_ctl = bop->bus_dma_ctl;
 493                         mutex_exit(&mod_lock);
 494                         return (1);
 495                 }
 496         } while ((modp = modp->mod_next) != &modules);
 497         mutex_exit(&mod_lock);
 498         return (0);
 499 }
 500 
 501 /*
 502  * modify the bus_ops structure of the specified nexus to point to bofi
 503  * routines, saving the original values in the save_bus_ops structure
 504  */
 505 
 506 static int
 507 modify_bus_ops(char *name, struct bus_ops *bop)
 508 {
 509         struct modctl *modp;
 510         struct modldrv *mp;
 511         struct bus_ops *bp;
 512         struct dev_ops *ops;
 513 
 514         if (ddi_name_to_major(name) == -1)
 515                 return (0);
 516 
 517         mutex_enter(&mod_lock);
 518         /*
 519          * find specified module
 520          */
 521         modp = &modules;
 522         do {
 523                 if (strcmp(name, modp->mod_modname) == 0) {
 524                         if (!modp->mod_linkage) {
 525                                 mutex_exit(&mod_lock);
 526                                 return (0);
 527                         }
 528                         mp = modp->mod_linkage->ml_linkage[0];
 529                         if (!mp || !mp->drv_dev_ops) {
 530                                 mutex_exit(&mod_lock);
 531                                 return (0);
 532                         }
 533                         ops = mp->drv_dev_ops;
 534                         bp = ops->devo_bus_ops;
 535                         if (!bp) {
 536                                 mutex_exit(&mod_lock);
 537                                 return (0);
 538                         }
 539                         if (ops->devo_refcnt == 0) {
 540                                 /*
 541                                  * If there is no device active for this
 542                                  * module then there is nothing to do for bofi.
 543                                  */
 544                                 mutex_exit(&mod_lock);
 545                                 return (0);
 546                         }
 547                         cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
 548                             mp->drv_linkinfo);
 549                         save_bus_ops = *bp;
 550                         bp->bus_intr_op = bop->bus_intr_op;
 551                         bp->bus_post_event = bop->bus_post_event;
 552                         bp->bus_map = bop->bus_map;
 553                         bp->bus_dma_map = bop->bus_dma_map;
 554                         bp->bus_dma_allochdl = bop->bus_dma_allochdl;
 555                         bp->bus_dma_freehdl = bop->bus_dma_freehdl;
 556                         bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
 557                         bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
 558                         bp->bus_dma_flush = bop->bus_dma_flush;
 559                         bp->bus_dma_win = bop->bus_dma_win;
 560                         bp->bus_dma_ctl = bop->bus_dma_ctl;
 561                         mutex_exit(&mod_lock);
 562                         return (1);
 563                 }
 564         } while ((modp = modp->mod_next) != &modules);
 565         mutex_exit(&mod_lock);
 566         return (0);
 567 }
 568 
 569 
 570 int
 571 _init(void)
 572 {
 573         int    e;
 574 
 575         e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
 576         if (e != 0)
 577                 return (e);
 578         if ((e = mod_install(&modlinkage)) != 0)
 579                 ddi_soft_state_fini(&statep);
 580         return (e);
 581 }
 582 
 583 
 584 int
 585 _fini(void)
 586 {
 587         int e;
 588 
 589         if ((e = mod_remove(&modlinkage)) != 0)
 590                 return (e);
 591         ddi_soft_state_fini(&statep);
 592         return (e);
 593 }
 594 
 595 
 596 int
 597 _info(struct modinfo *modinfop)
 598 {
 599         return (mod_info(&modlinkage, modinfop));
 600 }
 601 
 602 
 603 static int
 604 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 605 {
 606         char *name;
 607         char buf[80];
 608         int i;
 609         int s, ss;
 610         int size = NAMESIZE;
 611         int new_string;
 612         char *ptr;
 613 
 614         if (cmd != DDI_ATTACH)
 615                 return (DDI_FAILURE);
 616         /*
 617          * only one instance - but we clone using the open routine
 618          */
 619         if (ddi_get_instance(dip) > 0)
 620                 return (DDI_FAILURE);
 621 
 622         if (!initialized) {
 623                 if ((name = ddi_get_name(dip)) == NULL)
 624                         return (DDI_FAILURE);
 625                 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
 626                 if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
 627                     DDI_PSEUDO, NULL) == DDI_FAILURE)
 628                         return (DDI_FAILURE);
 629 
 630                 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
 631                     &bofi_low_cookie) != DDI_SUCCESS) {
 632                         ddi_remove_minor_node(dip, buf);
 633                         return (DDI_FAILURE); /* fail attach */
 634                 }
 635                 /*
 636                  * get nexus name (from conf file)
 637                  */
 638                 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
 639                     "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
 640                         ddi_remove_minor_node(dip, buf);
 641                         return (DDI_FAILURE);
 642                 }
 643                 /*
 644                  * get whether to do dma map kmem private checking
 645                  */
 646                 if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
 647                     dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
 648                         bofi_range_check = 0;
 649                 else if (strcmp(ptr, "panic") == 0)
 650                         bofi_range_check = 2;
 651                 else if (strcmp(ptr, "warn") == 0)
 652                         bofi_range_check = 1;
 653                 else
 654                         bofi_range_check = 0;
 655                 ddi_prop_free(ptr);
 656 
 657                 /*
 658                  * get whether to prevent direct access to register
 659                  */
 660                 if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
 661                     dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
 662                         bofi_ddi_check = 0;
 663                 else if (strcmp(ptr, "on") == 0)
 664                         bofi_ddi_check = 1;
 665                 else
 666                         bofi_ddi_check = 0;
 667                 ddi_prop_free(ptr);
 668 
 669                 /*
 670                  * get whether to do copy on ddi_dma_sync
 671                  */
 672                 if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
 673                     dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
 674                         bofi_sync_check = 0;
 675                 else if (strcmp(ptr, "on") == 0)
 676                         bofi_sync_check = 1;
 677                 else
 678                         bofi_sync_check = 0;
 679                 ddi_prop_free(ptr);
 680 
 681                 /*
 682                  * get driver-under-test names (from conf file)
 683                  */
 684                 size = NAMESIZE;
 685                 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
 686                     "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
 687                         driver_list[0] = 0;
 688                 /*
 689                  * and convert into a sequence of strings
 690                  */
 691                 driver_list_neg = 1;
 692                 new_string = 1;
 693                 driver_list_size = strlen(driver_list);
 694                 for (i = 0; i < driver_list_size; i++) {
 695                         if (driver_list[i] == ' ') {
 696                                 driver_list[i] = '\0';
 697                                 new_string = 1;
 698                         } else if (new_string) {
 699                                 if (driver_list[i] != '!')
 700                                         driver_list_neg = 0;
 701                                 new_string = 0;
 702                         }
 703                 }
 704                 /*
 705                  * initialize mutex, lists
 706                  */
 707                 mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
 708                     NULL);
 709                 /*
 710                  * fake up iblock cookie - need to protect outselves
 711                  * against drivers that use hilevel interrupts
 712                  */
 713                 ss = spl8();
 714                 s = spl8();
 715                 splx(ss);
 716                 mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
 717                 mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
 718                     (void *)bofi_low_cookie);
 719                 shadow_list.next = &shadow_list;
 720                 shadow_list.prev = &shadow_list;
 721                 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
 722                         hhash_table[i].hnext = &hhash_table[i];
 723                         hhash_table[i].hprev = &hhash_table[i];
 724                         dhash_table[i].dnext = &dhash_table[i];
 725                         dhash_table[i].dprev = &dhash_table[i];
 726                 }
 727                 for (i = 1; i < BOFI_NLINKS; i++)
 728                         bofi_link_array[i].link = &bofi_link_array[i-1];
 729                 bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
 730                 /*
 731                  * overlay bus_ops structure
 732                  */
 733                 if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
 734                         ddi_remove_minor_node(dip, buf);
 735                         mutex_destroy(&clone_tab_mutex);
 736                         mutex_destroy(&bofi_mutex);
 737                         mutex_destroy(&bofi_low_mutex);
 738                         return (DDI_FAILURE);
 739                 }
 740                 if (sysevent_evc_bind(FM_ERROR_CHAN, &bofi_error_chan, 0) == 0)
 741                         (void) sysevent_evc_subscribe(bofi_error_chan, "bofi",
 742                             EC_FM, bofi_fm_ereport_callback, NULL, 0);
 743 
 744                 /*
 745                  * save dip for getinfo
 746                  */
 747                 our_dip = dip;
 748                 ddi_report_dev(dip);
 749                 initialized = 1;
 750         }
 751         return (DDI_SUCCESS);
 752 }
 753 
 754 
 755 static int
 756 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 757 {
 758         char *name;
 759         char buf[80];
 760 
 761         if (cmd != DDI_DETACH)
 762                 return (DDI_FAILURE);
 763         if (ddi_get_instance(dip) > 0)
 764                 return (DDI_FAILURE);
 765         if ((name = ddi_get_name(dip)) == NULL)
 766                 return (DDI_FAILURE);
 767         (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
 768         mutex_enter(&bofi_low_mutex);
 769         mutex_enter(&bofi_mutex);
 770         /*
 771          * make sure test bofi is no longer in use
 772          */
 773         if (shadow_list.next != &shadow_list || errent_listp != NULL) {
 774                 mutex_exit(&bofi_mutex);
 775                 mutex_exit(&bofi_low_mutex);
 776                 return (DDI_FAILURE);
 777         }
 778         mutex_exit(&bofi_mutex);
 779         mutex_exit(&bofi_low_mutex);
 780 
 781         /*
 782          * restore bus_ops structure
 783          */
 784         if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
 785                 return (DDI_FAILURE);
 786 
 787         (void) sysevent_evc_unbind(bofi_error_chan);
 788 
 789         mutex_destroy(&clone_tab_mutex);
 790         mutex_destroy(&bofi_mutex);
 791         mutex_destroy(&bofi_low_mutex);
 792         ddi_remove_minor_node(dip, buf);
 793         our_dip = NULL;
 794         initialized = 0;
 795         return (DDI_SUCCESS);
 796 }
 797 
 798 
 799 /* ARGSUSED */
 800 static int
 801 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
 802 {
 803         dev_t   dev = (dev_t)arg;
 804         int     minor = (int)getminor(dev);
 805         int     retval;
 806 
 807         switch (cmd) {
 808         case DDI_INFO_DEVT2DEVINFO:
 809                 if (minor != 0 || our_dip == NULL) {
 810                         *result = (void *)NULL;
 811                         retval = DDI_FAILURE;
 812                 } else {
 813                         *result = (void *)our_dip;
 814                         retval = DDI_SUCCESS;
 815                 }
 816                 break;
 817         case DDI_INFO_DEVT2INSTANCE:
 818                 *result = (void *)0;
 819                 retval = DDI_SUCCESS;
 820                 break;
 821         default:
 822                 retval = DDI_FAILURE;
 823         }
 824         return (retval);
 825 }
 826 
 827 
 828 /* ARGSUSED */
 829 static int
 830 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
 831 {
 832         int     minor = (int)getminor(*devp);
 833         struct bofi_errent *softc;
 834 
 835         /*
 836          * only allow open on minor=0 - the clone device
 837          */
 838         if (minor != 0)
 839                 return (ENXIO);
 840         /*
 841          * fail if not attached
 842          */
 843         if (!initialized)
 844                 return (ENXIO);
 845         /*
 846          * find a free slot and grab it
 847          */
 848         mutex_enter(&clone_tab_mutex);
 849         for (minor = 1; minor < NCLONES; minor++) {
 850                 if (clone_tab[minor] == 0) {
 851                         clone_tab[minor] = 1;
 852                         break;
 853                 }
 854         }
 855         mutex_exit(&clone_tab_mutex);
 856         if (minor == NCLONES)
 857                 return (EAGAIN);
 858         /*
 859          * soft state structure for this clone is used to maintain a list
 860          * of allocated errdefs so they can be freed on close
 861          */
 862         if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
 863                 mutex_enter(&clone_tab_mutex);
 864                 clone_tab[minor] = 0;
 865                 mutex_exit(&clone_tab_mutex);
 866                 return (EAGAIN);
 867         }
 868         softc = ddi_get_soft_state(statep, minor);
 869         softc->cnext = softc;
 870         softc->cprev = softc;
 871 
 872         *devp = makedevice(getmajor(*devp), minor);
 873         return (0);
 874 }
 875 
 876 
 877 /* ARGSUSED */
 878 static int
 879 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
 880 {
 881         int     minor = (int)getminor(dev);
 882         struct bofi_errent *softc;
 883         struct bofi_errent *ep, *next_ep;
 884 
 885         softc = ddi_get_soft_state(statep, minor);
 886         if (softc == NULL)
 887                 return (ENXIO);
 888         /*
 889          * find list of errdefs and free them off
 890          */
 891         for (ep = softc->cnext; ep != softc; ) {
 892                 next_ep = ep->cnext;
 893                 (void) bofi_errdef_free(ep);
 894                 ep = next_ep;
 895         }
 896         /*
 897          * free clone tab slot
 898          */
 899         mutex_enter(&clone_tab_mutex);
 900         clone_tab[minor] = 0;
 901         mutex_exit(&clone_tab_mutex);
 902 
 903         ddi_soft_state_free(statep, minor);
 904         return (0);
 905 }
 906 
 907 
 908 /* ARGSUSED */
 909 static int
 910 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
 911         int *rvalp)
 912 {
 913         struct bofi_errent *softc;
 914         int     minor = (int)getminor(dev);
 915         struct bofi_errdef errdef;
 916         struct bofi_errctl errctl;
 917         struct bofi_errstate errstate;
 918         void *ed_handle;
 919         struct bofi_get_handles get_handles;
 920         struct bofi_get_hdl_info hdl_info;
 921         struct handle_info *hdlip;
 922         struct handle_info *hib;
 923 
 924         char *buffer;
 925         char *bufptr;
 926         char *endbuf;
 927         int req_count, count, err;
 928         char *namep;
 929         struct bofi_shadow *hp;
 930         int retval;
 931         struct bofi_shadow *hhashp;
 932         int i;
 933 
 934         switch (cmd) {
 935         case BOFI_ADD_DEF:
 936                 /*
 937                  * add a new error definition
 938                  */
 939 #ifdef _MULTI_DATAMODEL
 940                 switch (ddi_model_convert_from(mode & FMODELS)) {
 941                 case DDI_MODEL_ILP32:
 942                 {
 943                         /*
 944                          * For use when a 32 bit app makes a call into a
 945                          * 64 bit ioctl
 946                          */
 947                         struct bofi_errdef32    errdef_32;
 948 
 949                         if (ddi_copyin((void *)arg, &errdef_32,
 950                             sizeof (struct bofi_errdef32), mode)) {
 951                                 return (EFAULT);
 952                         }
 953                         errdef.namesize = errdef_32.namesize;
 954                         (void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
 955                         errdef.instance = errdef_32.instance;
 956                         errdef.rnumber = errdef_32.rnumber;
 957                         errdef.offset = errdef_32.offset;
 958                         errdef.len = errdef_32.len;
 959                         errdef.access_type = errdef_32.access_type;
 960                         errdef.access_count = errdef_32.access_count;
 961                         errdef.fail_count = errdef_32.fail_count;
 962                         errdef.acc_chk = errdef_32.acc_chk;
 963                         errdef.optype = errdef_32.optype;
 964                         errdef.operand = errdef_32.operand;
 965                         errdef.log.logsize = errdef_32.log.logsize;
 966                         errdef.log.entries = errdef_32.log.entries;
 967                         errdef.log.flags = errdef_32.log.flags;
 968                         errdef.log.wrapcnt = errdef_32.log.wrapcnt;
 969                         errdef.log.start_time = errdef_32.log.start_time;
 970                         errdef.log.stop_time = errdef_32.log.stop_time;
 971                         errdef.log.logbase =
 972                             (caddr_t)(uintptr_t)errdef_32.log.logbase;
 973                         errdef.errdef_handle = errdef_32.errdef_handle;
 974                         break;
 975                 }
 976                 case DDI_MODEL_NONE:
 977                         if (ddi_copyin((void *)arg, &errdef,
 978                             sizeof (struct bofi_errdef), mode))
 979                                 return (EFAULT);
 980                         break;
 981                 }
 982 #else /* ! _MULTI_DATAMODEL */
 983                 if (ddi_copyin((void *)arg, &errdef,
 984                     sizeof (struct bofi_errdef), mode) != 0)
 985                         return (EFAULT);
 986 #endif /* _MULTI_DATAMODEL */
 987                 /*
 988                  * do some validation
 989                  */
 990                 if (errdef.fail_count == 0)
 991                         errdef.optype = 0;
 992                 if (errdef.optype != 0) {
 993                         if (errdef.access_type & BOFI_INTR &&
 994                             errdef.optype != BOFI_DELAY_INTR &&
 995                             errdef.optype != BOFI_LOSE_INTR &&
 996                             errdef.optype != BOFI_EXTRA_INTR)
 997                                 return (EINVAL);
 998                         if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
 999                             errdef.optype == BOFI_NO_TRANSFER)
1000                                 return (EINVAL);
1001                         if ((errdef.access_type & (BOFI_PIO_RW)) &&
1002                             errdef.optype != BOFI_EQUAL &&
1003                             errdef.optype != BOFI_OR &&
1004                             errdef.optype != BOFI_XOR &&
1005                             errdef.optype != BOFI_AND &&
1006                             errdef.optype != BOFI_NO_TRANSFER)
1007                                 return (EINVAL);
1008                 }
1009                 /*
1010                  * find softstate for this clone, so we can tag
1011                  * new errdef on to it
1012                  */
1013                 softc = ddi_get_soft_state(statep, minor);
1014                 if (softc == NULL)
1015                         return (ENXIO);
1016                 /*
1017                  * read in name
1018                  */
1019                 if (errdef.namesize > NAMESIZE)
1020                         return (EINVAL);
1021                 namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
1022                 (void) strncpy(namep, errdef.name, errdef.namesize);
1023 
1024                 if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
1025                         (void) bofi_errdef_free((struct bofi_errent *)
1026                             (uintptr_t)errdef.errdef_handle);
1027                         kmem_free(namep, errdef.namesize+1);
1028                         return (EINVAL);
1029                 }
1030                 /*
1031                  * copy out errdef again, including filled in errdef_handle
1032                  */
1033 #ifdef _MULTI_DATAMODEL
1034                 switch (ddi_model_convert_from(mode & FMODELS)) {
1035                 case DDI_MODEL_ILP32:
1036                 {
1037                         /*
1038                          * For use when a 32 bit app makes a call into a
1039                          * 64 bit ioctl
1040                          */
1041                         struct bofi_errdef32    errdef_32;
1042 
1043                         errdef_32.namesize = errdef.namesize;
1044                         (void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
1045                         errdef_32.instance = errdef.instance;
1046                         errdef_32.rnumber = errdef.rnumber;
1047                         errdef_32.offset = errdef.offset;
1048                         errdef_32.len = errdef.len;
1049                         errdef_32.access_type = errdef.access_type;
1050                         errdef_32.access_count = errdef.access_count;
1051                         errdef_32.fail_count = errdef.fail_count;
1052                         errdef_32.acc_chk = errdef.acc_chk;
1053                         errdef_32.optype = errdef.optype;
1054                         errdef_32.operand = errdef.operand;
1055                         errdef_32.log.logsize = errdef.log.logsize;
1056                         errdef_32.log.entries = errdef.log.entries;
1057                         errdef_32.log.flags = errdef.log.flags;
1058                         errdef_32.log.wrapcnt = errdef.log.wrapcnt;
1059                         errdef_32.log.start_time = errdef.log.start_time;
1060                         errdef_32.log.stop_time = errdef.log.stop_time;
1061                         errdef_32.log.logbase =
1062                             (caddr32_t)(uintptr_t)errdef.log.logbase;
1063                         errdef_32.errdef_handle = errdef.errdef_handle;
1064                         if (ddi_copyout(&errdef_32, (void *)arg,
1065                             sizeof (struct bofi_errdef32), mode) != 0) {
1066                                 (void) bofi_errdef_free((struct bofi_errent *)
1067                                     errdef.errdef_handle);
1068                                 kmem_free(namep, errdef.namesize+1);
1069                                 return (EFAULT);
1070                         }
1071                         break;
1072                 }
1073                 case DDI_MODEL_NONE:
1074                         if (ddi_copyout(&errdef, (void *)arg,
1075                             sizeof (struct bofi_errdef), mode) != 0) {
1076                                 (void) bofi_errdef_free((struct bofi_errent *)
1077                                     errdef.errdef_handle);
1078                                 kmem_free(namep, errdef.namesize+1);
1079                                 return (EFAULT);
1080                         }
1081                         break;
1082                 }
1083 #else /* ! _MULTI_DATAMODEL */
1084                 if (ddi_copyout(&errdef, (void *)arg,
1085                     sizeof (struct bofi_errdef), mode) != 0) {
1086                         (void) bofi_errdef_free((struct bofi_errent *)
1087                             (uintptr_t)errdef.errdef_handle);
1088                         kmem_free(namep, errdef.namesize+1);
1089                         return (EFAULT);
1090                 }
1091 #endif /* _MULTI_DATAMODEL */
1092                 return (0);
1093         case BOFI_DEL_DEF:
1094                 /*
1095                  * delete existing errdef
1096                  */
1097                 if (ddi_copyin((void *)arg, &ed_handle,
1098                     sizeof (void *), mode) != 0)
1099                         return (EFAULT);
1100                 return (bofi_errdef_free((struct bofi_errent *)ed_handle));
1101         case BOFI_START:
1102                 /*
1103                  * start all errdefs corresponding to
1104                  * this name and instance
1105                  */
1106                 if (ddi_copyin((void *)arg, &errctl,
1107                     sizeof (struct bofi_errctl), mode) != 0)
1108                         return (EFAULT);
1109                 /*
1110                  * copy in name
1111                  */
1112                 if (errctl.namesize > NAMESIZE)
1113                         return (EINVAL);
1114                 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1115                 (void) strncpy(namep, errctl.name, errctl.namesize);
1116                 bofi_start(&errctl, namep);
1117                 kmem_free(namep, errctl.namesize+1);
1118                 return (0);
1119         case BOFI_STOP:
1120                 /*
1121                  * stop all errdefs corresponding to
1122                  * this name and instance
1123                  */
1124                 if (ddi_copyin((void *)arg, &errctl,
1125                     sizeof (struct bofi_errctl), mode) != 0)
1126                         return (EFAULT);
1127                 /*
1128                  * copy in name
1129                  */
1130                 if (errctl.namesize > NAMESIZE)
1131                         return (EINVAL);
1132                 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1133                 (void) strncpy(namep, errctl.name, errctl.namesize);
1134                 bofi_stop(&errctl, namep);
1135                 kmem_free(namep, errctl.namesize+1);
1136                 return (0);
1137         case BOFI_BROADCAST:
1138                 /*
1139                  * wakeup all errdefs corresponding to
1140                  * this name and instance
1141                  */
1142                 if (ddi_copyin((void *)arg, &errctl,
1143                     sizeof (struct bofi_errctl), mode) != 0)
1144                         return (EFAULT);
1145                 /*
1146                  * copy in name
1147                  */
1148                 if (errctl.namesize > NAMESIZE)
1149                         return (EINVAL);
1150                 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1151                 (void) strncpy(namep, errctl.name, errctl.namesize);
1152                 bofi_broadcast(&errctl, namep);
1153                 kmem_free(namep, errctl.namesize+1);
1154                 return (0);
1155         case BOFI_CLEAR_ACC_CHK:
1156                 /*
1157                  * clear "acc_chk" for all errdefs corresponding to
1158                  * this name and instance
1159                  */
1160                 if (ddi_copyin((void *)arg, &errctl,
1161                     sizeof (struct bofi_errctl), mode) != 0)
1162                         return (EFAULT);
1163                 /*
1164                  * copy in name
1165                  */
1166                 if (errctl.namesize > NAMESIZE)
1167                         return (EINVAL);
1168                 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1169                 (void) strncpy(namep, errctl.name, errctl.namesize);
1170                 bofi_clear_acc_chk(&errctl, namep);
1171                 kmem_free(namep, errctl.namesize+1);
1172                 return (0);
1173         case BOFI_CLEAR_ERRORS:
1174                 /*
1175                  * set "fail_count" to 0 for all errdefs corresponding to
1176                  * this name and instance whose "access_count"
1177                  * has expired.
1178                  */
1179                 if (ddi_copyin((void *)arg, &errctl,
1180                     sizeof (struct bofi_errctl), mode) != 0)
1181                         return (EFAULT);
1182                 /*
1183                  * copy in name
1184                  */
1185                 if (errctl.namesize > NAMESIZE)
1186                         return (EINVAL);
1187                 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1188                 (void) strncpy(namep, errctl.name, errctl.namesize);
1189                 bofi_clear_errors(&errctl, namep);
1190                 kmem_free(namep, errctl.namesize+1);
1191                 return (0);
1192         case BOFI_CLEAR_ERRDEFS:
1193                 /*
1194                  * set "access_count" and "fail_count" to 0 for all errdefs
1195                  * corresponding to this name and instance
1196                  */
1197                 if (ddi_copyin((void *)arg, &errctl,
1198                     sizeof (struct bofi_errctl), mode) != 0)
1199                         return (EFAULT);
1200                 /*
1201                  * copy in name
1202                  */
1203                 if (errctl.namesize > NAMESIZE)
1204                         return (EINVAL);
1205                 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1206                 (void) strncpy(namep, errctl.name, errctl.namesize);
1207                 bofi_clear_errdefs(&errctl, namep);
1208                 kmem_free(namep, errctl.namesize+1);
1209                 return (0);
1210         case BOFI_CHK_STATE:
1211         {
1212                 struct acc_log_elem *klg;
1213                 size_t uls;
1214                 /*
1215                  * get state for this errdef - read in dummy errstate
1216                  * with just the errdef_handle filled in
1217                  */
1218 #ifdef _MULTI_DATAMODEL
1219                 switch (ddi_model_convert_from(mode & FMODELS)) {
1220                 case DDI_MODEL_ILP32:
1221                 {
1222                         /*
1223                          * For use when a 32 bit app makes a call into a
1224                          * 64 bit ioctl
1225                          */
1226                         struct bofi_errstate32  errstate_32;
1227 
1228                         if (ddi_copyin((void *)arg, &errstate_32,
1229                             sizeof (struct bofi_errstate32), mode) != 0) {
1230                                 return (EFAULT);
1231                         }
1232                         errstate.fail_time = errstate_32.fail_time;
1233                         errstate.msg_time = errstate_32.msg_time;
1234                         errstate.access_count = errstate_32.access_count;
1235                         errstate.fail_count = errstate_32.fail_count;
1236                         errstate.acc_chk = errstate_32.acc_chk;
1237                         errstate.errmsg_count = errstate_32.errmsg_count;
1238                         (void) strncpy(errstate.buffer, errstate_32.buffer,
1239                             ERRMSGSIZE);
1240                         errstate.severity = errstate_32.severity;
1241                         errstate.log.logsize = errstate_32.log.logsize;
1242                         errstate.log.entries = errstate_32.log.entries;
1243                         errstate.log.flags = errstate_32.log.flags;
1244                         errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1245                         errstate.log.start_time = errstate_32.log.start_time;
1246                         errstate.log.stop_time = errstate_32.log.stop_time;
1247                         errstate.log.logbase =
1248                             (caddr_t)(uintptr_t)errstate_32.log.logbase;
1249                         errstate.errdef_handle = errstate_32.errdef_handle;
1250                         break;
1251                 }
1252                 case DDI_MODEL_NONE:
1253                         if (ddi_copyin((void *)arg, &errstate,
1254                             sizeof (struct bofi_errstate), mode) != 0)
1255                                 return (EFAULT);
1256                         break;
1257                 }
1258 #else /* ! _MULTI_DATAMODEL */
1259                 if (ddi_copyin((void *)arg, &errstate,
1260                     sizeof (struct bofi_errstate), mode) != 0)
1261                         return (EFAULT);
1262 #endif /* _MULTI_DATAMODEL */
1263                 if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
1264                         return (EINVAL);
1265                 /*
1266                  * copy out real errstate structure
1267                  */
1268                 uls = errstate.log.logsize;
1269                 if (errstate.log.entries > uls && uls)
1270                         /* insufficient user memory */
1271                         errstate.log.entries = uls;
1272                 /* always pass back a time */
1273                 if (errstate.log.stop_time == 0ul)
1274                         (void) drv_getparm(TIME, &(errstate.log.stop_time));
1275 
1276 #ifdef _MULTI_DATAMODEL
1277                 switch (ddi_model_convert_from(mode & FMODELS)) {
1278                 case DDI_MODEL_ILP32:
1279                 {
1280                         /*
1281                          * For use when a 32 bit app makes a call into a
1282                          * 64 bit ioctl
1283                          */
1284                         struct bofi_errstate32  errstate_32;
1285 
1286                         errstate_32.fail_time = errstate.fail_time;
1287                         errstate_32.msg_time = errstate.msg_time;
1288                         errstate_32.access_count = errstate.access_count;
1289                         errstate_32.fail_count = errstate.fail_count;
1290                         errstate_32.acc_chk = errstate.acc_chk;
1291                         errstate_32.errmsg_count = errstate.errmsg_count;
1292                         (void) strncpy(errstate_32.buffer, errstate.buffer,
1293                             ERRMSGSIZE);
1294                         errstate_32.severity = errstate.severity;
1295                         errstate_32.log.logsize = errstate.log.logsize;
1296                         errstate_32.log.entries = errstate.log.entries;
1297                         errstate_32.log.flags = errstate.log.flags;
1298                         errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1299                         errstate_32.log.start_time = errstate.log.start_time;
1300                         errstate_32.log.stop_time = errstate.log.stop_time;
1301                         errstate_32.log.logbase =
1302                             (caddr32_t)(uintptr_t)errstate.log.logbase;
1303                         errstate_32.errdef_handle = errstate.errdef_handle;
1304                         if (ddi_copyout(&errstate_32, (void *)arg,
1305                             sizeof (struct bofi_errstate32), mode) != 0)
1306                                 return (EFAULT);
1307                         break;
1308                 }
1309                 case DDI_MODEL_NONE:
1310                         if (ddi_copyout(&errstate, (void *)arg,
1311                             sizeof (struct bofi_errstate), mode) != 0)
1312                                 return (EFAULT);
1313                         break;
1314                 }
1315 #else /* ! _MULTI_DATAMODEL */
1316                 if (ddi_copyout(&errstate, (void *)arg,
1317                     sizeof (struct bofi_errstate), mode) != 0)
1318                         return (EFAULT);
1319 #endif /* _MULTI_DATAMODEL */
1320                 if (uls && errstate.log.entries &&
1321                     ddi_copyout(klg, errstate.log.logbase,
1322                     errstate.log.entries * sizeof (struct acc_log_elem),
1323                     mode) != 0) {
1324                         return (EFAULT);
1325                 }
1326                 return (retval);
1327         }
1328         case BOFI_CHK_STATE_W:
1329         {
1330                 struct acc_log_elem *klg;
1331                 size_t uls;
1332                 /*
1333                  * get state for this errdef - read in dummy errstate
1334                  * with just the errdef_handle filled in. Then wait for
1335                  * a ddi_report_fault message to come back
1336                  */
1337 #ifdef _MULTI_DATAMODEL
1338                 switch (ddi_model_convert_from(mode & FMODELS)) {
1339                 case DDI_MODEL_ILP32:
1340                 {
1341                         /*
1342                          * For use when a 32 bit app makes a call into a
1343                          * 64 bit ioctl
1344                          */
1345                         struct bofi_errstate32  errstate_32;
1346 
1347                         if (ddi_copyin((void *)arg, &errstate_32,
1348                             sizeof (struct bofi_errstate32), mode) != 0) {
1349                                 return (EFAULT);
1350                         }
1351                         errstate.fail_time = errstate_32.fail_time;
1352                         errstate.msg_time = errstate_32.msg_time;
1353                         errstate.access_count = errstate_32.access_count;
1354                         errstate.fail_count = errstate_32.fail_count;
1355                         errstate.acc_chk = errstate_32.acc_chk;
1356                         errstate.errmsg_count = errstate_32.errmsg_count;
1357                         (void) strncpy(errstate.buffer, errstate_32.buffer,
1358                             ERRMSGSIZE);
1359                         errstate.severity = errstate_32.severity;
1360                         errstate.log.logsize = errstate_32.log.logsize;
1361                         errstate.log.entries = errstate_32.log.entries;
1362                         errstate.log.flags = errstate_32.log.flags;
1363                         errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1364                         errstate.log.start_time = errstate_32.log.start_time;
1365                         errstate.log.stop_time = errstate_32.log.stop_time;
1366                         errstate.log.logbase =
1367                             (caddr_t)(uintptr_t)errstate_32.log.logbase;
1368                         errstate.errdef_handle = errstate_32.errdef_handle;
1369                         break;
1370                 }
1371                 case DDI_MODEL_NONE:
1372                         if (ddi_copyin((void *)arg, &errstate,
1373                             sizeof (struct bofi_errstate), mode) != 0)
1374                                 return (EFAULT);
1375                         break;
1376                 }
1377 #else /* ! _MULTI_DATAMODEL */
1378                 if (ddi_copyin((void *)arg, &errstate,
1379                     sizeof (struct bofi_errstate), mode) != 0)
1380                         return (EFAULT);
1381 #endif /* _MULTI_DATAMODEL */
1382                 if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
1383                         return (EINVAL);
1384                 /*
1385                  * copy out real errstate structure
1386                  */
1387                 uls = errstate.log.logsize;
1388                 uls = errstate.log.logsize;
1389                 if (errstate.log.entries > uls && uls)
1390                         /* insufficient user memory */
1391                         errstate.log.entries = uls;
1392                 /* always pass back a time */
1393                 if (errstate.log.stop_time == 0ul)
1394                         (void) drv_getparm(TIME, &(errstate.log.stop_time));
1395 
1396 #ifdef _MULTI_DATAMODEL
1397                 switch (ddi_model_convert_from(mode & FMODELS)) {
1398                 case DDI_MODEL_ILP32:
1399                 {
1400                         /*
1401                          * For use when a 32 bit app makes a call into a
1402                          * 64 bit ioctl
1403                          */
1404                         struct bofi_errstate32  errstate_32;
1405 
1406                         errstate_32.fail_time = errstate.fail_time;
1407                         errstate_32.msg_time = errstate.msg_time;
1408                         errstate_32.access_count = errstate.access_count;
1409                         errstate_32.fail_count = errstate.fail_count;
1410                         errstate_32.acc_chk = errstate.acc_chk;
1411                         errstate_32.errmsg_count = errstate.errmsg_count;
1412                         (void) strncpy(errstate_32.buffer, errstate.buffer,
1413                             ERRMSGSIZE);
1414                         errstate_32.severity = errstate.severity;
1415                         errstate_32.log.logsize = errstate.log.logsize;
1416                         errstate_32.log.entries = errstate.log.entries;
1417                         errstate_32.log.flags = errstate.log.flags;
1418                         errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1419                         errstate_32.log.start_time = errstate.log.start_time;
1420                         errstate_32.log.stop_time = errstate.log.stop_time;
1421                         errstate_32.log.logbase =
1422                             (caddr32_t)(uintptr_t)errstate.log.logbase;
1423                         errstate_32.errdef_handle = errstate.errdef_handle;
1424                         if (ddi_copyout(&errstate_32, (void *)arg,
1425                             sizeof (struct bofi_errstate32), mode) != 0)
1426                                 return (EFAULT);
1427                         break;
1428                 }
1429                 case DDI_MODEL_NONE:
1430                         if (ddi_copyout(&errstate, (void *)arg,
1431                             sizeof (struct bofi_errstate), mode) != 0)
1432                                 return (EFAULT);
1433                         break;
1434                 }
1435 #else /* ! _MULTI_DATAMODEL */
1436                 if (ddi_copyout(&errstate, (void *)arg,
1437                     sizeof (struct bofi_errstate), mode) != 0)
1438                         return (EFAULT);
1439 #endif /* _MULTI_DATAMODEL */
1440 
1441                 if (uls && errstate.log.entries &&
1442                     ddi_copyout(klg, errstate.log.logbase,
1443                     errstate.log.entries * sizeof (struct acc_log_elem),
1444                     mode) != 0) {
1445                         return (EFAULT);
1446                 }
1447                 return (retval);
1448         }
1449         case BOFI_GET_HANDLES:
1450                 /*
1451                  * display existing handles
1452                  */
1453 #ifdef _MULTI_DATAMODEL
1454                 switch (ddi_model_convert_from(mode & FMODELS)) {
1455                 case DDI_MODEL_ILP32:
1456                 {
1457                         /*
1458                          * For use when a 32 bit app makes a call into a
1459                          * 64 bit ioctl
1460                          */
1461                         struct bofi_get_handles32       get_handles_32;
1462 
1463                         if (ddi_copyin((void *)arg, &get_handles_32,
1464                             sizeof (get_handles_32), mode) != 0) {
1465                                 return (EFAULT);
1466                         }
1467                         get_handles.namesize = get_handles_32.namesize;
1468                         (void) strncpy(get_handles.name, get_handles_32.name,
1469                             NAMESIZE);
1470                         get_handles.instance = get_handles_32.instance;
1471                         get_handles.count = get_handles_32.count;
1472                         get_handles.buffer =
1473                             (caddr_t)(uintptr_t)get_handles_32.buffer;
1474                         break;
1475                 }
1476                 case DDI_MODEL_NONE:
1477                         if (ddi_copyin((void *)arg, &get_handles,
1478                             sizeof (get_handles), mode) != 0)
1479                                 return (EFAULT);
1480                         break;
1481                 }
1482 #else /* ! _MULTI_DATAMODEL */
1483                 if (ddi_copyin((void *)arg, &get_handles,
1484                     sizeof (get_handles), mode) != 0)
1485                         return (EFAULT);
1486 #endif /* _MULTI_DATAMODEL */
1487                 /*
1488                  * read in name
1489                  */
1490                 if (get_handles.namesize > NAMESIZE)
1491                         return (EINVAL);
1492                 namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
1493                 (void) strncpy(namep, get_handles.name, get_handles.namesize);
1494                 req_count = get_handles.count;
1495                 bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
1496                 endbuf = bufptr + req_count;
1497                 /*
1498                  * display existing handles
1499                  */
1500                 mutex_enter(&bofi_low_mutex);
1501                 mutex_enter(&bofi_mutex);
1502                 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1503                         hhashp = &hhash_table[i];
1504                         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1505                                 if (!driver_under_test(hp->dip))
1506                                         continue;
1507                                 if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
1508                                     ddi_name_to_major(namep))
1509                                         continue;
1510                                 if (hp->instance != get_handles.instance)
1511                                         continue;
1512                                 /*
1513                                  * print information per handle - note that
1514                                  * DMA* means an unbound DMA handle
1515                                  */
1516                                 (void) snprintf(bufptr, (size_t)(endbuf-bufptr),
1517                                     "  %s %d %s ", hp->name, hp->instance,
1518                                     (hp->type == BOFI_INT_HDL) ? "INTR" :
1519                                     (hp->type == BOFI_ACC_HDL) ? "PIO" :
1520                                     (hp->type == BOFI_DMA_HDL) ? "DMA" :
1521                                     (hp->hparrayp != NULL) ? "DVMA" : "DMA*");
1522                                 bufptr += strlen(bufptr);
1523                                 if (hp->type == BOFI_ACC_HDL) {
1524                                         if (hp->len == INT_MAX - hp->offset)
1525                                                 (void) snprintf(bufptr,
1526                                                     (size_t)(endbuf-bufptr),
1527                                                     "reg set %d off 0x%llx\n",
1528                                                     hp->rnumber, hp->offset);
1529                                         else
1530                                                 (void) snprintf(bufptr,
1531                                                     (size_t)(endbuf-bufptr),
1532                                                     "reg set %d off 0x%llx"
1533                                                     " len 0x%llx\n",
1534                                                     hp->rnumber, hp->offset,
1535                                                     hp->len);
1536                                 } else if (hp->type == BOFI_DMA_HDL)
1537                                         (void) snprintf(bufptr,
1538                                             (size_t)(endbuf-bufptr),
1539                                             "handle no %d len 0x%llx"
1540                                             " addr 0x%p\n", hp->rnumber,
1541                                             hp->len, (void *)hp->addr);
1542                                 else if (hp->type == BOFI_NULL &&
1543                                     hp->hparrayp == NULL)
1544                                         (void) snprintf(bufptr,
1545                                             (size_t)(endbuf-bufptr),
1546                                             "handle no %d\n", hp->rnumber);
1547                                 else
1548                                         (void) snprintf(bufptr,
1549                                             (size_t)(endbuf-bufptr), "\n");
1550                                 bufptr += strlen(bufptr);
1551                         }
1552                 }
1553                 mutex_exit(&bofi_mutex);
1554                 mutex_exit(&bofi_low_mutex);
1555                 err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
1556                 kmem_free(namep, get_handles.namesize+1);
1557                 kmem_free(buffer, req_count);
1558                 if (err != 0)
1559                         return (EFAULT);
1560                 else
1561                         return (0);
1562         case BOFI_GET_HANDLE_INFO:
1563                 /*
1564                  * display existing handles
1565                  */
1566 #ifdef _MULTI_DATAMODEL
1567                 switch (ddi_model_convert_from(mode & FMODELS)) {
1568                 case DDI_MODEL_ILP32:
1569                 {
1570                         /*
1571                          * For use when a 32 bit app makes a call into a
1572                          * 64 bit ioctl
1573                          */
1574                         struct bofi_get_hdl_info32      hdl_info_32;
1575 
1576                         if (ddi_copyin((void *)arg, &hdl_info_32,
1577                             sizeof (hdl_info_32), mode)) {
1578                                 return (EFAULT);
1579                         }
1580                         hdl_info.namesize = hdl_info_32.namesize;
1581                         (void) strncpy(hdl_info.name, hdl_info_32.name,
1582                             NAMESIZE);
1583                         hdl_info.count = hdl_info_32.count;
1584                         hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
1585                         break;
1586                 }
1587                 case DDI_MODEL_NONE:
1588                         if (ddi_copyin((void *)arg, &hdl_info,
1589                             sizeof (hdl_info), mode))
1590                                 return (EFAULT);
1591                         break;
1592                 }
1593 #else /* ! _MULTI_DATAMODEL */
1594                 if (ddi_copyin((void *)arg, &hdl_info,
1595                     sizeof (hdl_info), mode))
1596                         return (EFAULT);
1597 #endif /* _MULTI_DATAMODEL */
1598                 if (hdl_info.namesize > NAMESIZE)
1599                         return (EINVAL);
1600                 namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
1601                 (void) strncpy(namep, hdl_info.name, hdl_info.namesize);
1602                 req_count = hdl_info.count;
1603                 count = hdl_info.count = 0; /* the actual no of handles */
1604                 if (req_count > 0) {
1605                         hib = hdlip =
1606                             kmem_zalloc(req_count * sizeof (struct handle_info),
1607                             KM_SLEEP);
1608                 } else {
1609                         hib = hdlip = 0;
1610                         req_count = hdl_info.count = 0;
1611                 }
1612 
1613                 /*
1614                  * display existing handles
1615                  */
1616                 mutex_enter(&bofi_low_mutex);
1617                 mutex_enter(&bofi_mutex);
1618                 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1619                         hhashp = &hhash_table[i];
1620                         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1621                                 if (!driver_under_test(hp->dip) ||
1622                                     ddi_name_to_major(ddi_get_name(hp->dip)) !=
1623                                     ddi_name_to_major(namep) ||
1624                                     ++(hdl_info.count) > req_count ||
1625                                     count == req_count)
1626                                         continue;
1627 
1628                                 hdlip->instance = hp->instance;
1629                                 hdlip->rnumber = hp->rnumber;
1630                                 switch (hp->type) {
1631                                 case BOFI_ACC_HDL:
1632                                         hdlip->access_type = BOFI_PIO_RW;
1633                                         hdlip->offset = hp->offset;
1634                                         hdlip->len = hp->len;
1635                                         break;
1636                                 case BOFI_DMA_HDL:
1637                                         hdlip->access_type = 0;
1638                                         if (hp->flags & DDI_DMA_WRITE)
1639                                                 hdlip->access_type |=
1640                                                     BOFI_DMA_W;
1641                                         if (hp->flags & DDI_DMA_READ)
1642                                                 hdlip->access_type |=
1643                                                     BOFI_DMA_R;
1644                                         hdlip->len = hp->len;
1645                                         hdlip->addr_cookie =
1646                                             (uint64_t)(uintptr_t)hp->addr;
1647                                         break;
1648                                 case BOFI_INT_HDL:
1649                                         hdlip->access_type = BOFI_INTR;
1650                                         break;
1651                                 default:
1652                                         hdlip->access_type = 0;
1653                                         break;
1654                                 }
1655                                 hdlip++;
1656                                 count++;
1657                         }
1658                 }
1659                 mutex_exit(&bofi_mutex);
1660                 mutex_exit(&bofi_low_mutex);
1661                 err = 0;
1662 #ifdef _MULTI_DATAMODEL
1663                 switch (ddi_model_convert_from(mode & FMODELS)) {
1664                 case DDI_MODEL_ILP32:
1665                 {
1666                         /*
1667                          * For use when a 32 bit app makes a call into a
1668                          * 64 bit ioctl
1669                          */
1670                         struct bofi_get_hdl_info32      hdl_info_32;
1671 
1672                         hdl_info_32.namesize = hdl_info.namesize;
1673                         (void) strncpy(hdl_info_32.name, hdl_info.name,
1674                             NAMESIZE);
1675                         hdl_info_32.count = hdl_info.count;
1676                         hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
1677                         if (ddi_copyout(&hdl_info_32, (void *)arg,
1678                             sizeof (hdl_info_32), mode) != 0) {
1679                                 kmem_free(namep, hdl_info.namesize+1);
1680                                 if (req_count > 0)
1681                                         kmem_free(hib,
1682                                             req_count * sizeof (*hib));
1683                                 return (EFAULT);
1684                         }
1685                         break;
1686                 }
1687                 case DDI_MODEL_NONE:
1688                         if (ddi_copyout(&hdl_info, (void *)arg,
1689                             sizeof (hdl_info), mode) != 0) {
1690                                 kmem_free(namep, hdl_info.namesize+1);
1691                                 if (req_count > 0)
1692                                         kmem_free(hib,
1693                                             req_count * sizeof (*hib));
1694                                 return (EFAULT);
1695                         }
1696                         break;
1697                 }
1698 #else /* ! _MULTI_DATAMODEL */
1699                 if (ddi_copyout(&hdl_info, (void *)arg,
1700                     sizeof (hdl_info), mode) != 0) {
1701                         kmem_free(namep, hdl_info.namesize+1);
1702                         if (req_count > 0)
1703                                 kmem_free(hib, req_count * sizeof (*hib));
1704                         return (EFAULT);
1705                 }
1706 #endif /* ! _MULTI_DATAMODEL */
1707                 if (count > 0) {
1708                         if (ddi_copyout(hib, hdl_info.hdli,
1709                             count * sizeof (*hib), mode) != 0) {
1710                                 kmem_free(namep, hdl_info.namesize+1);
1711                                 if (req_count > 0)
1712                                         kmem_free(hib,
1713                                             req_count * sizeof (*hib));
1714                                 return (EFAULT);
1715                         }
1716                 }
1717                 kmem_free(namep, hdl_info.namesize+1);
1718                 if (req_count > 0)
1719                         kmem_free(hib, req_count * sizeof (*hib));
1720                 return (err);
1721         default:
1722                 return (ENOTTY);
1723         }
1724 }
1725 
1726 
1727 /*
1728  * add a new error definition
1729  */
1730 static int
1731 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
1732         struct bofi_errent *softc)
1733 {
1734         struct bofi_errent *ep;
1735         struct bofi_shadow *hp;
1736         struct bofi_link   *lp;
1737 
1738         /*
1739          * allocate errdef structure and put on in-use list
1740          */
1741         ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
1742         ep->errdef = *errdefp;
1743         ep->name = namep;
1744         ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
1745         ep->errstate.severity = DDI_SERVICE_RESTORED;
1746         ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
1747         cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
1748         /*
1749          * allocate space for logging
1750          */
1751         ep->errdef.log.entries = 0;
1752         ep->errdef.log.wrapcnt = 0;
1753         if (ep->errdef.access_type & BOFI_LOG)
1754                 ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
1755                     ep->errdef.log.logsize, KM_SLEEP);
1756         else
1757                 ep->logbase = NULL;
1758         /*
1759          * put on in-use list
1760          */
1761         mutex_enter(&bofi_low_mutex);
1762         mutex_enter(&bofi_mutex);
1763         ep->next = errent_listp;
1764         errent_listp = ep;
1765         /*
1766          * and add it to the per-clone list
1767          */
1768         ep->cnext = softc->cnext;
1769         softc->cnext->cprev = ep;
1770         ep->cprev = softc;
1771         softc->cnext = ep;
1772 
1773         /*
1774          * look for corresponding shadow handle structures and if we find any
1775          * tag this errdef structure on to their link lists.
1776          */
1777         for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1778                 if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
1779                     hp->instance == errdefp->instance &&
1780                     (((errdefp->access_type & BOFI_DMA_RW) &&
1781                     (ep->errdef.rnumber == -1 ||
1782                     hp->rnumber == ep->errdef.rnumber) &&
1783                     hp->type == BOFI_DMA_HDL &&
1784                     (((uintptr_t)(hp->addr + ep->errdef.offset +
1785                     ep->errdef.len) & ~LLSZMASK) >
1786                     ((uintptr_t)((hp->addr + ep->errdef.offset) +
1787                     LLSZMASK) & ~LLSZMASK))) ||
1788                     ((errdefp->access_type & BOFI_INTR) &&
1789                     hp->type == BOFI_INT_HDL) ||
1790                     ((errdefp->access_type & BOFI_PIO_RW) &&
1791                     hp->type == BOFI_ACC_HDL &&
1792                     (errdefp->rnumber == -1 ||
1793                     hp->rnumber == errdefp->rnumber) &&
1794                     (errdefp->len == 0 ||
1795                     hp->offset < errdefp->offset + errdefp->len) &&
1796                     hp->offset + hp->len > errdefp->offset))) {
1797                         lp = bofi_link_freelist;
1798                         if (lp != NULL) {
1799                                 bofi_link_freelist = lp->link;
1800                                 lp->errentp = ep;
1801                                 lp->link = hp->link;
1802                                 hp->link = lp;
1803                         }
1804                 }
1805         }
1806         errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
1807         mutex_exit(&bofi_mutex);
1808         mutex_exit(&bofi_low_mutex);
1809         ep->softintr_id = NULL;
1810         return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
1811             NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
1812 }
1813 
1814 
1815 /*
1816  * delete existing errdef
1817  */
1818 static int
1819 bofi_errdef_free(struct bofi_errent *ep)
1820 {
1821         struct bofi_errent *hep, *prev_hep;
1822         struct bofi_link *lp, *prev_lp, *next_lp;
1823         struct bofi_shadow *hp;
1824 
1825         mutex_enter(&bofi_low_mutex);
1826         mutex_enter(&bofi_mutex);
1827         /*
1828          * don't just assume its a valid ep - check that its on the
1829          * in-use list
1830          */
1831         prev_hep = NULL;
1832         for (hep = errent_listp; hep != NULL; ) {
1833                 if (hep == ep)
1834                         break;
1835                 prev_hep = hep;
1836                 hep = hep->next;
1837         }
1838         if (hep == NULL) {
1839                 mutex_exit(&bofi_mutex);
1840                 mutex_exit(&bofi_low_mutex);
1841                 return (EINVAL);
1842         }
1843         /*
1844          * found it - delete from in-use list
1845          */
1846 
1847         if (prev_hep)
1848                 prev_hep->next = hep->next;
1849         else
1850                 errent_listp = hep->next;
1851         /*
1852          * and take it off the per-clone list
1853          */
1854         hep->cnext->cprev = hep->cprev;
1855         hep->cprev->cnext = hep->cnext;
1856         /*
1857          * see if we are on any shadow handle link lists - and if we
1858          * are then take us off
1859          */
1860         for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1861                 prev_lp = NULL;
1862                 for (lp = hp->link; lp != NULL; ) {
1863                         if (lp->errentp == ep) {
1864                                 if (prev_lp)
1865                                         prev_lp->link = lp->link;
1866                                 else
1867                                         hp->link = lp->link;
1868                                 next_lp = lp->link;
1869                                 lp->link = bofi_link_freelist;
1870                                 bofi_link_freelist = lp;
1871                                 lp = next_lp;
1872                         } else {
1873                                 prev_lp = lp;
1874                                 lp = lp->link;
1875                         }
1876                 }
1877         }
1878         mutex_exit(&bofi_mutex);
1879         mutex_exit(&bofi_low_mutex);
1880 
1881         cv_destroy(&ep->cv);
1882         kmem_free(ep->name, ep->errdef.namesize+1);
1883         if ((ep->errdef.access_type & BOFI_LOG) &&
1884             ep->errdef.log.logsize && ep->logbase) /* double check */
1885                 kmem_free(ep->logbase,
1886                     sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
1887 
1888         if (ep->softintr_id)
1889                 ddi_remove_softintr(ep->softintr_id);
1890         kmem_free(ep, sizeof (struct bofi_errent));
1891         return (0);
1892 }
1893 
1894 
1895 /*
1896  * start all errdefs corresponding to this name and instance
1897  */
1898 static void
1899 bofi_start(struct bofi_errctl *errctlp, char *namep)
1900 {
1901         struct bofi_errent *ep;
1902 
1903         /*
1904          * look for any errdefs with matching name and instance
1905          */
1906         mutex_enter(&bofi_low_mutex);
1907         for (ep = errent_listp; ep != NULL; ep = ep->next)
1908                 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1909                     errctlp->instance == ep->errdef.instance) {
1910                         ep->state |= BOFI_DEV_ACTIVE;
1911                         (void) drv_getparm(TIME, &(ep->errdef.log.start_time));
1912                         ep->errdef.log.stop_time = 0ul;
1913                 }
1914         mutex_exit(&bofi_low_mutex);
1915 }
1916 
1917 
1918 /*
1919  * stop all errdefs corresponding to this name and instance
1920  */
1921 static void
1922 bofi_stop(struct bofi_errctl *errctlp, char *namep)
1923 {
1924         struct bofi_errent *ep;
1925 
1926         /*
1927          * look for any errdefs with matching name and instance
1928          */
1929         mutex_enter(&bofi_low_mutex);
1930         for (ep = errent_listp; ep != NULL; ep = ep->next)
1931                 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1932                     errctlp->instance == ep->errdef.instance) {
1933                         ep->state &= ~BOFI_DEV_ACTIVE;
1934                         if (ep->errdef.log.stop_time == 0ul)
1935                                 (void) drv_getparm(TIME,
1936                                     &(ep->errdef.log.stop_time));
1937                 }
1938         mutex_exit(&bofi_low_mutex);
1939 }
1940 
1941 
1942 /*
1943  * wake up any thread waiting on this errdefs
1944  */
1945 static uint_t
1946 bofi_signal(caddr_t arg)
1947 {
1948         struct bofi_errdef *edp = (struct bofi_errdef *)arg;
1949         struct bofi_errent *hep;
1950         struct bofi_errent *ep =
1951             (struct bofi_errent *)(uintptr_t)edp->errdef_handle;
1952 
1953         mutex_enter(&bofi_low_mutex);
1954         for (hep = errent_listp; hep != NULL; ) {
1955                 if (hep == ep)
1956                         break;
1957                 hep = hep->next;
1958         }
1959         if (hep == NULL) {
1960                 mutex_exit(&bofi_low_mutex);
1961                 return (DDI_INTR_UNCLAIMED);
1962         }
1963         if ((ep->errdef.access_type & BOFI_LOG) &&
1964             (edp->log.flags & BOFI_LOG_FULL)) {
1965                 edp->log.stop_time = bofi_gettime();
1966                 ep->state |= BOFI_NEW_MESSAGE;
1967                 if (ep->state & BOFI_MESSAGE_WAIT)
1968                         cv_broadcast(&ep->cv);
1969                 ep->state &= ~BOFI_MESSAGE_WAIT;
1970         }
1971         if (ep->errstate.msg_time != 0) {
1972                 ep->state |= BOFI_NEW_MESSAGE;
1973                 if (ep->state & BOFI_MESSAGE_WAIT)
1974                         cv_broadcast(&ep->cv);
1975                 ep->state &= ~BOFI_MESSAGE_WAIT;
1976         }
1977         mutex_exit(&bofi_low_mutex);
1978         return (DDI_INTR_CLAIMED);
1979 }
1980 
1981 
1982 /*
1983  * wake up all errdefs corresponding to this name and instance
1984  */
1985 static void
1986 bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
1987 {
1988         struct bofi_errent *ep;
1989 
1990         /*
1991          * look for any errdefs with matching name and instance
1992          */
1993         mutex_enter(&bofi_low_mutex);
1994         for (ep = errent_listp; ep != NULL; ep = ep->next)
1995                 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1996                     errctlp->instance == ep->errdef.instance) {
1997                         /*
1998                          * wake up sleepers
1999                          */
2000                         ep->state |= BOFI_NEW_MESSAGE;
2001                         if (ep->state & BOFI_MESSAGE_WAIT)
2002                                 cv_broadcast(&ep->cv);
2003                         ep->state &= ~BOFI_MESSAGE_WAIT;
2004                 }
2005         mutex_exit(&bofi_low_mutex);
2006 }
2007 
2008 
2009 /*
2010  * clear "acc_chk" for all errdefs corresponding to this name and instance
2011  * and wake them up.
2012  */
2013 static void
2014 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
2015 {
2016         struct bofi_errent *ep;
2017 
2018         /*
2019          * look for any errdefs with matching name and instance
2020          */
2021         mutex_enter(&bofi_low_mutex);
2022         for (ep = errent_listp; ep != NULL; ep = ep->next)
2023                 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2024                     errctlp->instance == ep->errdef.instance) {
2025                         mutex_enter(&bofi_mutex);
2026                         if (ep->errdef.access_count == 0 &&
2027                             ep->errdef.fail_count == 0)
2028                                 ep->errdef.acc_chk = 0;
2029                         mutex_exit(&bofi_mutex);
2030                         /*
2031                          * wake up sleepers
2032                          */
2033                         ep->state |= BOFI_NEW_MESSAGE;
2034                         if (ep->state & BOFI_MESSAGE_WAIT)
2035                                 cv_broadcast(&ep->cv);
2036                         ep->state &= ~BOFI_MESSAGE_WAIT;
2037                 }
2038         mutex_exit(&bofi_low_mutex);
2039 }
2040 
2041 
2042 /*
2043  * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2044  * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2045  */
2046 static void
2047 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
2048 {
2049         struct bofi_errent *ep;
2050 
2051         /*
2052          * look for any errdefs with matching name and instance
2053          */
2054         mutex_enter(&bofi_low_mutex);
2055         for (ep = errent_listp; ep != NULL; ep = ep->next)
2056                 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2057                     errctlp->instance == ep->errdef.instance) {
2058                         mutex_enter(&bofi_mutex);
2059                         if (ep->errdef.access_count == 0) {
2060                                 ep->errdef.acc_chk = 0;
2061                                 ep->errdef.fail_count = 0;
2062                                 mutex_exit(&bofi_mutex);
2063                                 if (ep->errdef.log.stop_time == 0ul)
2064                                         (void) drv_getparm(TIME,
2065                                             &(ep->errdef.log.stop_time));
2066                         } else
2067                                 mutex_exit(&bofi_mutex);
2068                         /*
2069                          * wake up sleepers
2070                          */
2071                         ep->state |= BOFI_NEW_MESSAGE;
2072                         if (ep->state & BOFI_MESSAGE_WAIT)
2073                                 cv_broadcast(&ep->cv);
2074                         ep->state &= ~BOFI_MESSAGE_WAIT;
2075                 }
2076         mutex_exit(&bofi_low_mutex);
2077 }
2078 
2079 
2080 /*
2081  * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2082  * this name and instance, set "acc_chk" to 0, and wake them up.
2083  */
2084 static void
2085 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
2086 {
2087         struct bofi_errent *ep;
2088 
2089         /*
2090          * look for any errdefs with matching name and instance
2091          */
2092         mutex_enter(&bofi_low_mutex);
2093         for (ep = errent_listp; ep != NULL; ep = ep->next)
2094                 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2095                     errctlp->instance == ep->errdef.instance) {
2096                         mutex_enter(&bofi_mutex);
2097                         ep->errdef.acc_chk = 0;
2098                         ep->errdef.access_count = 0;
2099                         ep->errdef.fail_count = 0;
2100                         mutex_exit(&bofi_mutex);
2101                         if (ep->errdef.log.stop_time == 0ul)
2102                                 (void) drv_getparm(TIME,
2103                                     &(ep->errdef.log.stop_time));
2104                         /*
2105                          * wake up sleepers
2106                          */
2107                         ep->state |= BOFI_NEW_MESSAGE;
2108                         if (ep->state & BOFI_MESSAGE_WAIT)
2109                                 cv_broadcast(&ep->cv);
2110                         ep->state &= ~BOFI_MESSAGE_WAIT;
2111                 }
2112         mutex_exit(&bofi_low_mutex);
2113 }
2114 
2115 
2116 /*
2117  * get state for this errdef
2118  */
2119 static int
2120 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
2121 {
2122         struct bofi_errent *hep;
2123         struct bofi_errent *ep;
2124 
2125         ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2126         mutex_enter(&bofi_low_mutex);
2127         /*
2128          * don't just assume its a valid ep - check that its on the
2129          * in-use list
2130          */
2131         for (hep = errent_listp; hep != NULL; hep = hep->next)
2132                 if (hep == ep)
2133                         break;
2134         if (hep == NULL) {
2135                 mutex_exit(&bofi_low_mutex);
2136                 return (EINVAL);
2137         }
2138         mutex_enter(&bofi_mutex);
2139         ep->errstate.access_count = ep->errdef.access_count;
2140         ep->errstate.fail_count = ep->errdef.fail_count;
2141         ep->errstate.acc_chk = ep->errdef.acc_chk;
2142         ep->errstate.log = ep->errdef.log;
2143         *logpp = ep->logbase;
2144         *errstatep = ep->errstate;
2145         mutex_exit(&bofi_mutex);
2146         mutex_exit(&bofi_low_mutex);
2147         return (0);
2148 }
2149 
2150 
2151 /*
2152  * Wait for a ddi_report_fault message to come back for this errdef
2153  * Then return state for this errdef.
2154  * fault report is intercepted by bofi_post_event, which triggers
2155  * bofi_signal via a softint, which will wake up this routine if
2156  * we are waiting
2157  */
2158 static int
2159 bofi_errdef_check_w(struct bofi_errstate *errstatep,
2160     struct acc_log_elem **logpp)
2161 {
2162         struct bofi_errent *hep;
2163         struct bofi_errent *ep;
2164         int rval = 0;
2165 
2166         ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2167         mutex_enter(&bofi_low_mutex);
2168 retry:
2169         /*
2170          * don't just assume its a valid ep - check that its on the
2171          * in-use list
2172          */
2173         for (hep = errent_listp; hep != NULL; hep = hep->next)
2174                 if (hep == ep)
2175                         break;
2176         if (hep == NULL) {
2177                 mutex_exit(&bofi_low_mutex);
2178                 return (EINVAL);
2179         }
2180         /*
2181          * wait for ddi_report_fault for the devinfo corresponding
2182          * to this errdef
2183          */
2184         if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
2185                 ep->state |= BOFI_MESSAGE_WAIT;
2186                 if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0) {
2187                         if (!(ep->state & BOFI_NEW_MESSAGE))
2188                                 rval = EINTR;
2189                 }
2190                 goto retry;
2191         }
2192         ep->state &= ~BOFI_NEW_MESSAGE;
2193         /*
2194          * we either didn't need to sleep, we've been woken up or we've been
2195          * signaled - either way return state now
2196          */
2197         mutex_enter(&bofi_mutex);
2198         ep->errstate.access_count = ep->errdef.access_count;
2199         ep->errstate.fail_count = ep->errdef.fail_count;
2200         ep->errstate.acc_chk = ep->errdef.acc_chk;
2201         ep->errstate.log = ep->errdef.log;
2202         *logpp = ep->logbase;
2203         *errstatep = ep->errstate;
2204         mutex_exit(&bofi_mutex);
2205         mutex_exit(&bofi_low_mutex);
2206         return (rval);
2207 }
2208 
2209 
2210 /*
2211  * support routine - check if requested driver is defined as under test in the
2212  * conf file.
2213  */
2214 static int
2215 driver_under_test(dev_info_t *rdip)
2216 {
2217         int i;
2218         char    *rname;
2219         major_t rmaj;
2220 
2221         rname = ddi_get_name(rdip);
2222         rmaj = ddi_name_to_major(rname);
2223 
2224         /*
2225          * Enforce the user to specifically request the following drivers.
2226          */
2227         for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
2228                 if (driver_list_neg == 0) {
2229                         if (rmaj == ddi_name_to_major(&driver_list[i]))
2230                                 return (1);
2231                 } else {
2232                         if (rmaj == ddi_name_to_major(&driver_list[i+1]))
2233                                 return (0);
2234                 }
2235         }
2236         if (driver_list_neg == 0)
2237                 return (0);
2238         else
2239                 return (1);
2240 
2241 }
2242 
2243 
2244 static void
2245 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
2246     size_t repcount, uint64_t *valuep)
2247 {
2248         struct bofi_errdef *edp = &(ep->errdef);
2249         struct acc_log *log = &edp->log;
2250 
2251         ASSERT(log != NULL);
2252         ASSERT(MUTEX_HELD(&bofi_mutex));
2253 
2254         if (log->flags & BOFI_LOG_REPIO)
2255                 repcount = 1;
2256         else if (repcount == 0 && edp->access_count > 0 &&
2257             (log->flags & BOFI_LOG_FULL) == 0)
2258                 edp->access_count += 1;
2259 
2260         if (repcount && log->entries < log->logsize) {
2261                 struct acc_log_elem *elem = ep->logbase + log->entries;
2262 
2263                 if (log->flags & BOFI_LOG_TIMESTAMP)
2264                         elem->access_time = bofi_gettime();
2265                 elem->access_type = at;
2266                 elem->offset = offset;
2267                 elem->value = valuep ? *valuep : 0ll;
2268                 elem->size = len;
2269                 elem->repcount = repcount;
2270                 ++log->entries;
2271                 if (log->entries == log->logsize) {
2272                         log->flags |= BOFI_LOG_FULL;
2273                         ddi_trigger_softintr(((struct bofi_errent *)
2274                             (uintptr_t)edp->errdef_handle)->softintr_id);
2275                 }
2276         }
2277         if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
2278                 log->wrapcnt++;
2279                 edp->access_count = log->logsize;
2280                 log->entries = 0;    /* wrap back to the start */
2281         }
2282 }
2283 
2284 
2285 /*
2286  * got a condition match on dma read/write - check counts and corrupt
2287  * data if necessary
2288  *
2289  * bofi_mutex always held when this is called.
2290  */
2291 static void
2292 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
2293         uint_t synctype, off_t off, off_t length)
2294 {
2295         uint64_t operand;
2296         int i;
2297         off_t len;
2298         caddr_t logaddr;
2299         uint64_t *addr;
2300         uint64_t *endaddr;
2301         ddi_dma_impl_t *hdlp;
2302         ndi_err_t *errp;
2303 
2304         ASSERT(MUTEX_HELD(&bofi_mutex));
2305         if ((ep->errdef.access_count ||
2306             ep->errdef.fail_count) &&
2307             (ep->errdef.access_type & BOFI_LOG)) {
2308                 uint_t atype;
2309 
2310                 if (synctype == DDI_DMA_SYNC_FORDEV)
2311                         atype = BOFI_DMA_W;
2312                 else if (synctype == DDI_DMA_SYNC_FORCPU ||
2313                     synctype == DDI_DMA_SYNC_FORKERNEL)
2314                         atype = BOFI_DMA_R;
2315                 else
2316                         atype = 0;
2317                 if ((off <= ep->errdef.offset &&
2318                     off + length > ep->errdef.offset) ||
2319                     (off > ep->errdef.offset &&
2320                     off < ep->errdef.offset + ep->errdef.len)) {
2321                         logaddr = (caddr_t)((uintptr_t)(hp->addr +
2322                             off + LLSZMASK) & ~LLSZMASK);
2323 
2324                         log_acc_event(ep, atype, logaddr - hp->addr,
2325                             length, 1, 0);
2326                 }
2327         }
2328         if (ep->errdef.access_count > 1) {
2329                 ep->errdef.access_count--;
2330         } else if (ep->errdef.fail_count > 0) {
2331                 ep->errdef.fail_count--;
2332                 ep->errdef.access_count = 0;
2333                 /*
2334                  * OK do the corruption
2335                  */
2336                 if (ep->errstate.fail_time == 0)
2337                         ep->errstate.fail_time = bofi_gettime();
2338                 /*
2339                  * work out how much to corrupt
2340                  *
2341                  * Make sure endaddr isn't greater than hp->addr + hp->len.
2342                  * If endaddr becomes less than addr len becomes negative
2343                  * and the following loop isn't entered.
2344                  */
2345                 addr = (uint64_t *)((uintptr_t)((hp->addr +
2346                     ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
2347                 endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
2348                     ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
2349                 len = endaddr - addr;
2350                 operand = ep->errdef.operand;
2351                 hdlp = (ddi_dma_impl_t *)(hp->hdl.dma_handle);
2352                 errp = &hdlp->dmai_error;
2353                 if (ep->errdef.acc_chk & 2) {
2354                         uint64_t ena;
2355                         char buf[FM_MAX_CLASS];
2356 
2357                         errp->err_status = DDI_FM_NONFATAL;
2358                         (void) snprintf(buf, FM_MAX_CLASS, FM_SIMULATED_DMA);
2359                         ena = fm_ena_generate(0, FM_ENA_FMT1);
2360                         ddi_fm_ereport_post(hp->dip, buf, ena,
2361                             DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2362                             FM_EREPORT_VERS0, NULL);
2363                 }
2364                 switch (ep->errdef.optype) {
2365                 case BOFI_EQUAL :
2366                         for (i = 0; i < len; i++)
2367                                 *(addr + i) = operand;
2368                         break;
2369                 case BOFI_AND :
2370                         for (i = 0; i < len; i++)
2371                                 *(addr + i) &= operand;
2372                         break;
2373                 case BOFI_OR :
2374                         for (i = 0; i < len; i++)
2375                                 *(addr + i) |= operand;
2376                         break;
2377                 case BOFI_XOR :
2378                         for (i = 0; i < len; i++)
2379                                 *(addr + i) ^= operand;
2380                         break;
2381                 default:
2382                         /* do nothing */
2383                         break;
2384                 }
2385         }
2386 }
2387 
2388 
2389 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
2390 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
2391 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
2392 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
2393 
2394 
2395 /*
2396  * check all errdefs linked to this shadow handle. If we've got a condition
2397  * match check counts and corrupt data if necessary
2398  *
2399  * bofi_mutex always held when this is called.
2400  *
2401  * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2402  * from io-space before calling this, so we pass in the func to do the
2403  * transfer as a parameter.
2404  */
2405 static uint64_t
2406 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
2407         uint64_t (*func)(), size_t repcount, size_t accsize)
2408 {
2409         struct bofi_errent *ep;
2410         struct bofi_link   *lp;
2411         uint64_t operand;
2412         uintptr_t minlen;
2413         intptr_t base;
2414         int done_get = 0;
2415         uint64_t get_val, gv;
2416         ddi_acc_impl_t *hdlp;
2417         ndi_err_t *errp;
2418 
2419         ASSERT(MUTEX_HELD(&bofi_mutex));
2420         /*
2421          * check through all errdefs associated with this shadow handle
2422          */
2423         for (lp = hp->link; lp != NULL; lp = lp->link) {
2424                 ep = lp->errentp;
2425                 if (ep->errdef.len == 0)
2426                         minlen = hp->len;
2427                 else
2428                         minlen = min(hp->len, ep->errdef.len);
2429                 base = addr - hp->addr - ep->errdef.offset + hp->offset;
2430                 if ((ep->errdef.access_type & BOFI_PIO_R) &&
2431                     (ep->state & BOFI_DEV_ACTIVE) &&
2432                     base >= 0 && base < minlen) {
2433                         /*
2434                          * condition match for pio read
2435                          */
2436                         if (ep->errdef.access_count > 1) {
2437                                 ep->errdef.access_count--;
2438                                 if (done_get == 0) {
2439                                         done_get = 1;
2440                                         gv = get_val = func(hp, addr);
2441                                 }
2442                                 if (ep->errdef.access_type & BOFI_LOG) {
2443                                         log_acc_event(ep, BOFI_PIO_R,
2444                                             addr - hp->addr,
2445                                             accsize, repcount, &gv);
2446                                 }
2447                         } else if (ep->errdef.fail_count > 0) {
2448                                 ep->errdef.fail_count--;
2449                                 ep->errdef.access_count = 0;
2450                                 /*
2451                                  * OK do corruption
2452                                  */
2453                                 if (ep->errstate.fail_time == 0)
2454                                         ep->errstate.fail_time = bofi_gettime();
2455                                 operand = ep->errdef.operand;
2456                                 if (done_get == 0) {
2457                                         if (ep->errdef.optype ==
2458                                             BOFI_NO_TRANSFER)
2459                                                 /*
2460                                                  * no transfer - bomb out
2461                                                  */
2462                                                 return (operand);
2463                                         done_get = 1;
2464                                         gv = get_val = func(hp, addr);
2465 
2466                                 }
2467                                 if (ep->errdef.access_type & BOFI_LOG) {
2468                                         log_acc_event(ep, BOFI_PIO_R,
2469                                             addr - hp->addr,
2470                                             accsize, repcount, &gv);
2471                                 }
2472                                 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2473                                 errp = hdlp->ahi_err;
2474                                 if (ep->errdef.acc_chk & 1) {
2475                                         uint64_t ena;
2476                                         char buf[FM_MAX_CLASS];
2477 
2478                                         errp->err_status = DDI_FM_NONFATAL;
2479                                         (void) snprintf(buf, FM_MAX_CLASS,
2480                                             FM_SIMULATED_PIO);
2481                                         ena = fm_ena_generate(0, FM_ENA_FMT1);
2482                                         ddi_fm_ereport_post(hp->dip, buf, ena,
2483                                             DDI_NOSLEEP, FM_VERSION,
2484                                             DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2485                                             NULL);
2486                                 }
2487                                 switch (ep->errdef.optype) {
2488                                 case BOFI_EQUAL :
2489                                         get_val = operand;
2490                                         break;
2491                                 case BOFI_AND :
2492                                         get_val &= operand;
2493                                         break;
2494                                 case BOFI_OR :
2495                                         get_val |= operand;
2496                                         break;
2497                                 case BOFI_XOR :
2498                                         get_val ^= operand;
2499                                         break;
2500                                 default:
2501                                         /* do nothing */
2502                                         break;
2503                                 }
2504                         }
2505                 }
2506         }
2507         if (done_get == 0)
2508                 return (func(hp, addr));
2509         else
2510                 return (get_val);
2511 }
2512 
2513 
2514 /*
2515  * check all errdefs linked to this shadow handle. If we've got a condition
2516  * match check counts and corrupt data if necessary
2517  *
2518  * bofi_mutex always held when this is called.
2519  *
2520  * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2521  * is to be written out to io-space, 1 otherwise
2522  */
2523 static int
2524 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
2525                                 size_t size, size_t repcount)
2526 {
2527         struct bofi_errent *ep;
2528         struct bofi_link   *lp;
2529         uintptr_t minlen;
2530         intptr_t base;
2531         uint64_t v = *valuep;
2532         ddi_acc_impl_t *hdlp;
2533         ndi_err_t *errp;
2534 
2535         ASSERT(MUTEX_HELD(&bofi_mutex));
2536         /*
2537          * check through all errdefs associated with this shadow handle
2538          */
2539         for (lp = hp->link; lp != NULL; lp = lp->link) {
2540                 ep = lp->errentp;
2541                 if (ep->errdef.len == 0)
2542                         minlen = hp->len;
2543                 else
2544                         minlen = min(hp->len, ep->errdef.len);
2545                 base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
2546                 if ((ep->errdef.access_type & BOFI_PIO_W) &&
2547                     (ep->state & BOFI_DEV_ACTIVE) &&
2548                     base >= 0 && base < minlen) {
2549                         /*
2550                          * condition match for pio write
2551                          */
2552 
2553                         if (ep->errdef.access_count > 1) {
2554                                 ep->errdef.access_count--;
2555                                 if (ep->errdef.access_type & BOFI_LOG)
2556                                         log_acc_event(ep, BOFI_PIO_W,
2557                                             addr - hp->addr, size,
2558                                             repcount, &v);
2559                         } else if (ep->errdef.fail_count > 0) {
2560                                 ep->errdef.fail_count--;
2561                                 ep->errdef.access_count = 0;
2562                                 if (ep->errdef.access_type & BOFI_LOG)
2563                                         log_acc_event(ep, BOFI_PIO_W,
2564                                             addr - hp->addr, size,
2565                                             repcount, &v);
2566                                 /*
2567                                  * OK do corruption
2568                                  */
2569                                 if (ep->errstate.fail_time == 0)
2570                                         ep->errstate.fail_time = bofi_gettime();
2571                                 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2572                                 errp = hdlp->ahi_err;
2573                                 if (ep->errdef.acc_chk & 1) {
2574                                         uint64_t ena;
2575                                         char buf[FM_MAX_CLASS];
2576 
2577                                         errp->err_status = DDI_FM_NONFATAL;
2578                                         (void) snprintf(buf, FM_MAX_CLASS,
2579                                             FM_SIMULATED_PIO);
2580                                         ena = fm_ena_generate(0, FM_ENA_FMT1);
2581                                         ddi_fm_ereport_post(hp->dip, buf, ena,
2582                                             DDI_NOSLEEP, FM_VERSION,
2583                                             DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2584                                             NULL);
2585                                 }
2586                                 switch (ep->errdef.optype) {
2587                                 case BOFI_EQUAL :
2588                                         *valuep = ep->errdef.operand;
2589                                         break;
2590                                 case BOFI_AND :
2591                                         *valuep &= ep->errdef.operand;
2592                                         break;
2593                                 case BOFI_OR :
2594                                         *valuep |= ep->errdef.operand;
2595                                         break;
2596                                 case BOFI_XOR :
2597                                         *valuep ^= ep->errdef.operand;
2598                                         break;
2599                                 case BOFI_NO_TRANSFER :
2600                                         /*
2601                                          * no transfer - bomb out
2602                                          */
2603                                         return (0);
2604                                 default:
2605                                         /* do nothing */
2606                                         break;
2607                                 }
2608                         }
2609                 }
2610         }
2611         return (1);
2612 }
2613 
2614 
2615 static uint64_t
2616 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
2617 {
2618         return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
2619 }
2620 
2621 #define BOFI_READ_CHECKS(type) \
2622         if (bofi_ddi_check) \
2623                 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2624         if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2625             (caddr_t)addr - hp->addr >= hp->len)) { \
2626                 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2627                     "ddi_get() out of range addr %p not in %p/%llx", \
2628                     (void *)addr, (void *)hp->addr, hp->len); \
2629                 return (0); \
2630         }
2631 
2632 /*
2633  * our getb() routine - use tryenter
2634  */
2635 static uint8_t
2636 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
2637 {
2638         struct bofi_shadow *hp;
2639         uint8_t retval;
2640 
2641         hp = handle->ahi_common.ah_bus_private;
2642         BOFI_READ_CHECKS(uint8_t)
2643         if (!hp->link || !mutex_tryenter(&bofi_mutex))
2644                 return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
2645         retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
2646             1);
2647         mutex_exit(&bofi_mutex);
2648         return (retval);
2649 }
2650 
2651 
2652 static uint64_t
2653 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
2654 {
2655         return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
2656 }
2657 
2658 
2659 /*
2660  * our getw() routine - use tryenter
2661  */
2662 static uint16_t
2663 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
2664 {
2665         struct bofi_shadow *hp;
2666         uint16_t retval;
2667 
2668         hp = handle->ahi_common.ah_bus_private;
2669         BOFI_READ_CHECKS(uint16_t)
2670         if (!hp->link || !mutex_tryenter(&bofi_mutex))
2671                 return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
2672         retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2673             2);
2674         mutex_exit(&bofi_mutex);
2675         return (retval);
2676 }
2677 
2678 
2679 static uint64_t
2680 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
2681 {
2682         return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
2683 }
2684 
2685 
2686 /*
2687  * our getl() routine - use tryenter
2688  */
2689 static uint32_t
2690 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
2691 {
2692         struct bofi_shadow *hp;
2693         uint32_t retval;
2694 
2695         hp = handle->ahi_common.ah_bus_private;
2696         BOFI_READ_CHECKS(uint32_t)
2697         if (!hp->link || !mutex_tryenter(&bofi_mutex))
2698                 return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
2699         retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
2700             4);
2701         mutex_exit(&bofi_mutex);
2702         return (retval);
2703 }
2704 
2705 
2706 static uint64_t
2707 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
2708 {
2709         return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
2710 }
2711 
2712 
2713 /*
2714  * our getll() routine - use tryenter
2715  */
2716 static uint64_t
2717 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
2718 {
2719         struct bofi_shadow *hp;
2720         uint64_t retval;
2721 
2722         hp = handle->ahi_common.ah_bus_private;
2723         BOFI_READ_CHECKS(uint64_t)
2724         if (!hp->link || !mutex_tryenter(&bofi_mutex))
2725                 return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
2726         retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
2727             8);
2728         mutex_exit(&bofi_mutex);
2729         return (retval);
2730 }
2731 
2732 #define BOFI_WRITE_TESTS(type) \
2733         if (bofi_ddi_check) \
2734                 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2735         if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2736             (caddr_t)addr - hp->addr >= hp->len)) { \
2737                 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2738                     "ddi_put() out of range addr %p not in %p/%llx\n", \
2739                     (void *)addr, (void *)hp->addr, hp->len); \
2740                 return; \
2741         }
2742 
2743 /*
2744  * our putb() routine - use tryenter
2745  */
2746 static void
2747 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
2748 {
2749         struct bofi_shadow *hp;
2750         uint64_t llvalue = value;
2751 
2752         hp = handle->ahi_common.ah_bus_private;
2753         BOFI_WRITE_TESTS(uint8_t)
2754         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2755                 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2756                 return;
2757         }
2758         if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
2759                 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2760         mutex_exit(&bofi_mutex);
2761 }
2762 
2763 
2764 /*
2765  * our putw() routine - use tryenter
2766  */
2767 static void
2768 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
2769 {
2770         struct bofi_shadow *hp;
2771         uint64_t llvalue = value;
2772 
2773         hp = handle->ahi_common.ah_bus_private;
2774         BOFI_WRITE_TESTS(uint16_t)
2775         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2776                 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2777                 return;
2778         }
2779         if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
2780                 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2781         mutex_exit(&bofi_mutex);
2782 }
2783 
2784 
2785 /*
2786  * our putl() routine - use tryenter
2787  */
2788 static void
2789 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
2790 {
2791         struct bofi_shadow *hp;
2792         uint64_t llvalue = value;
2793 
2794         hp = handle->ahi_common.ah_bus_private;
2795         BOFI_WRITE_TESTS(uint32_t)
2796         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2797                 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2798                 return;
2799         }
2800         if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
2801                 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2802         mutex_exit(&bofi_mutex);
2803 }
2804 
2805 
2806 /*
2807  * our putll() routine - use tryenter
2808  */
2809 static void
2810 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
2811 {
2812         struct bofi_shadow *hp;
2813         uint64_t llvalue = value;
2814 
2815         hp = handle->ahi_common.ah_bus_private;
2816         BOFI_WRITE_TESTS(uint64_t)
2817         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2818                 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2819                 return;
2820         }
2821         if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
2822                 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2823         mutex_exit(&bofi_mutex);
2824 }
2825 
2826 #define BOFI_REP_READ_TESTS(type) \
2827         if (bofi_ddi_check) \
2828                 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2829         if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2830             (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2831                 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2832                     "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2833                     (void *)dev_addr, (void *)hp->addr, hp->len); \
2834                 if ((caddr_t)dev_addr < hp->addr || \
2835                     (caddr_t)dev_addr - hp->addr >= hp->len) \
2836                         return; \
2837                 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2838         }
2839 
2840 /*
2841  * our rep_getb() routine - use tryenter
2842  */
2843 static void
2844 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2845         size_t repcount, uint_t flags)
2846 {
2847         struct bofi_shadow *hp;
2848         int i;
2849         uint8_t *addr;
2850 
2851         hp = handle->ahi_common.ah_bus_private;
2852         BOFI_REP_READ_TESTS(uint8_t)
2853         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2854                 hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
2855                     repcount, flags);
2856                 return;
2857         }
2858         for (i = 0; i < repcount; i++) {
2859                 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2860                 *(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
2861                     do_bofi_rd8, i ? 0 : repcount, 1);
2862         }
2863         mutex_exit(&bofi_mutex);
2864 }
2865 
2866 
2867 /*
2868  * our rep_getw() routine - use tryenter
2869  */
2870 static void
2871 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2872         uint16_t *dev_addr, size_t repcount, uint_t flags)
2873 {
2874         struct bofi_shadow *hp;
2875         int i;
2876         uint16_t *addr;
2877 
2878         hp = handle->ahi_common.ah_bus_private;
2879         BOFI_REP_READ_TESTS(uint16_t)
2880         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2881                 hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
2882                     repcount, flags);
2883                 return;
2884         }
2885         for (i = 0; i < repcount; i++) {
2886                 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2887                 *(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
2888                     do_bofi_rd16, i ? 0 : repcount, 2);
2889         }
2890         mutex_exit(&bofi_mutex);
2891 }
2892 
2893 
2894 /*
2895  * our rep_getl() routine - use tryenter
2896  */
2897 static void
2898 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2899         uint32_t *dev_addr, size_t repcount, uint_t flags)
2900 {
2901         struct bofi_shadow *hp;
2902         int i;
2903         uint32_t *addr;
2904 
2905         hp = handle->ahi_common.ah_bus_private;
2906         BOFI_REP_READ_TESTS(uint32_t)
2907         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2908                 hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
2909                     repcount, flags);
2910                 return;
2911         }
2912         for (i = 0; i < repcount; i++) {
2913                 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2914                 *(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
2915                     do_bofi_rd32, i ? 0 : repcount, 4);
2916         }
2917         mutex_exit(&bofi_mutex);
2918 }
2919 
2920 
2921 /*
2922  * our rep_getll() routine - use tryenter
2923  */
2924 static void
2925 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2926         uint64_t *dev_addr, size_t repcount, uint_t flags)
2927 {
2928         struct bofi_shadow *hp;
2929         int i;
2930         uint64_t *addr;
2931 
2932         hp = handle->ahi_common.ah_bus_private;
2933         BOFI_REP_READ_TESTS(uint64_t)
2934         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2935                 hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
2936                     repcount, flags);
2937                 return;
2938         }
2939         for (i = 0; i < repcount; i++) {
2940                 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2941                 *(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
2942                     do_bofi_rd64, i ? 0 : repcount, 8);
2943         }
2944         mutex_exit(&bofi_mutex);
2945 }
2946 
2947 #define BOFI_REP_WRITE_TESTS(type) \
2948         if (bofi_ddi_check) \
2949                 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2950         if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2951             (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2952                 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2953                     "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2954                     (void *)dev_addr, (void *)hp->addr, hp->len); \
2955                 if ((caddr_t)dev_addr < hp->addr || \
2956                     (caddr_t)dev_addr - hp->addr >= hp->len) \
2957                         return; \
2958                 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2959         }
2960 
2961 /*
2962  * our rep_putb() routine - use tryenter
2963  */
2964 static void
2965 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2966         size_t repcount, uint_t flags)
2967 {
2968         struct bofi_shadow *hp;
2969         int i;
2970         uint64_t llvalue;
2971         uint8_t *addr;
2972 
2973         hp = handle->ahi_common.ah_bus_private;
2974         BOFI_REP_WRITE_TESTS(uint8_t)
2975         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2976                 hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
2977                     repcount, flags);
2978                 return;
2979         }
2980         for (i = 0; i < repcount; i++) {
2981                 llvalue = *(host_addr + i);
2982                 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2983                 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
2984                     repcount))
2985                         hp->save.acc.ahi_put8(&hp->save.acc, addr,
2986                             (uint8_t)llvalue);
2987         }
2988         mutex_exit(&bofi_mutex);
2989 }
2990 
2991 
2992 /*
2993  * our rep_putw() routine - use tryenter
2994  */
2995 static void
2996 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2997         uint16_t *dev_addr, size_t repcount, uint_t flags)
2998 {
2999         struct bofi_shadow *hp;
3000         int i;
3001         uint64_t llvalue;
3002         uint16_t *addr;
3003 
3004         hp = handle->ahi_common.ah_bus_private;
3005         BOFI_REP_WRITE_TESTS(uint16_t)
3006         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3007                 hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
3008                     repcount, flags);
3009                 return;
3010         }
3011         for (i = 0; i < repcount; i++) {
3012                 llvalue = *(host_addr + i);
3013                 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3014                 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
3015                     repcount))
3016                         hp->save.acc.ahi_put16(&hp->save.acc, addr,
3017                             (uint16_t)llvalue);
3018         }
3019         mutex_exit(&bofi_mutex);
3020 }
3021 
3022 
3023 /*
3024  * our rep_putl() routine - use tryenter
3025  */
3026 static void
3027 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
3028         uint32_t *dev_addr, size_t repcount, uint_t flags)
3029 {
3030         struct bofi_shadow *hp;
3031         int i;
3032         uint64_t llvalue;
3033         uint32_t *addr;
3034 
3035         hp = handle->ahi_common.ah_bus_private;
3036         BOFI_REP_WRITE_TESTS(uint32_t)
3037         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3038                 hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
3039                     repcount, flags);
3040                 return;
3041         }
3042         for (i = 0; i < repcount; i++) {
3043                 llvalue = *(host_addr + i);
3044                 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3045                 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
3046                     repcount))
3047                         hp->save.acc.ahi_put32(&hp->save.acc, addr,
3048                             (uint32_t)llvalue);
3049         }
3050         mutex_exit(&bofi_mutex);
3051 }
3052 
3053 
3054 /*
3055  * our rep_putll() routine - use tryenter
3056  */
3057 static void
3058 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
3059         uint64_t *dev_addr, size_t repcount, uint_t flags)
3060 {
3061         struct bofi_shadow *hp;
3062         int i;
3063         uint64_t llvalue;
3064         uint64_t *addr;
3065 
3066         hp = handle->ahi_common.ah_bus_private;
3067         BOFI_REP_WRITE_TESTS(uint64_t)
3068         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3069                 hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
3070                     repcount, flags);
3071                 return;
3072         }
3073         for (i = 0; i < repcount; i++) {
3074                 llvalue = *(host_addr + i);
3075                 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3076                 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
3077                     repcount))
3078                         hp->save.acc.ahi_put64(&hp->save.acc, addr,
3079                             (uint64_t)llvalue);
3080         }
3081         mutex_exit(&bofi_mutex);
3082 }
3083 
3084 
3085 /*
3086  * our ddi_map routine
3087  */
3088 static int
3089 bofi_map(dev_info_t *dip, dev_info_t *rdip,
3090         ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
3091 {
3092         ddi_acc_impl_t *ap;
3093         struct bofi_shadow *hp;
3094         struct bofi_errent *ep;
3095         struct bofi_link   *lp, *next_lp;
3096         int retval;
3097         struct bofi_shadow *dhashp;
3098         struct bofi_shadow *hhashp;
3099 
3100         switch (reqp->map_op) {
3101         case DDI_MO_MAP_LOCKED:
3102                 /*
3103                  * for this case get nexus to do real work first
3104                  */
3105                 retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
3106                     vaddrp);
3107                 if (retval != DDI_SUCCESS)
3108                         return (retval);
3109 
3110                 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3111                 if (ap == NULL)
3112                         return (DDI_SUCCESS);
3113                 /*
3114                  * if driver_list is set, only intercept those drivers
3115                  */
3116                 if (!driver_under_test(ap->ahi_common.ah_dip))
3117                         return (DDI_SUCCESS);
3118 
3119                 /*
3120                  * support for ddi_regs_map_setup()
3121                  * - allocate shadow handle structure and fill it in
3122                  */
3123                 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
3124                 (void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
3125                     NAMESIZE);
3126                 hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
3127                 hp->dip = ap->ahi_common.ah_dip;
3128                 hp->addr = *vaddrp;
3129                 /*
3130                  * return spurious value to catch direct access to registers
3131                  */
3132                 if (bofi_ddi_check)
3133                         *vaddrp = (caddr_t)64;
3134                 hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
3135                 hp->offset = offset;
3136                 if (len == 0)
3137                         hp->len = INT_MAX - offset;
3138                 else
3139                         hp->len = min(len, INT_MAX - offset);
3140                 hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
3141                 hp->link = NULL;
3142                 hp->type = BOFI_ACC_HDL;
3143                 /*
3144                  * save existing function pointers and plug in our own
3145                  */
3146                 hp->save.acc = *ap;
3147                 ap->ahi_get8 = bofi_rd8;
3148                 ap->ahi_get16 = bofi_rd16;
3149                 ap->ahi_get32 = bofi_rd32;
3150                 ap->ahi_get64 = bofi_rd64;
3151                 ap->ahi_put8 = bofi_wr8;
3152                 ap->ahi_put16 = bofi_wr16;
3153                 ap->ahi_put32 = bofi_wr32;
3154                 ap->ahi_put64 = bofi_wr64;
3155                 ap->ahi_rep_get8 = bofi_rep_rd8;
3156                 ap->ahi_rep_get16 = bofi_rep_rd16;
3157                 ap->ahi_rep_get32 = bofi_rep_rd32;
3158                 ap->ahi_rep_get64 = bofi_rep_rd64;
3159                 ap->ahi_rep_put8 = bofi_rep_wr8;
3160                 ap->ahi_rep_put16 = bofi_rep_wr16;
3161                 ap->ahi_rep_put32 = bofi_rep_wr32;
3162                 ap->ahi_rep_put64 = bofi_rep_wr64;
3163                 ap->ahi_fault_check = bofi_check_acc_hdl;
3164 #if defined(__sparc)
3165 #else
3166                 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
3167 #endif
3168                 /*
3169                  * stick in a pointer to our shadow handle
3170                  */
3171                 ap->ahi_common.ah_bus_private = hp;
3172                 /*
3173                  * add to dhash, hhash and inuse lists
3174                  */
3175                 mutex_enter(&bofi_low_mutex);
3176                 mutex_enter(&bofi_mutex);
3177                 hp->next = shadow_list.next;
3178                 shadow_list.next->prev = hp;
3179                 hp->prev = &shadow_list;
3180                 shadow_list.next = hp;
3181                 hhashp = HDL_HHASH(ap);
3182                 hp->hnext = hhashp->hnext;
3183                 hhashp->hnext->hprev = hp;
3184                 hp->hprev = hhashp;
3185                 hhashp->hnext = hp;
3186                 dhashp = HDL_DHASH(hp->dip);
3187                 hp->dnext = dhashp->dnext;
3188                 dhashp->dnext->dprev = hp;
3189                 hp->dprev = dhashp;
3190                 dhashp->dnext = hp;
3191                 /*
3192                  * chain on any pre-existing errdefs that apply to this
3193                  * acc_handle
3194                  */
3195                 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3196                         if (ddi_name_to_major(hp->name) ==
3197                             ddi_name_to_major(ep->name) &&
3198                             hp->instance == ep->errdef.instance &&
3199                             (ep->errdef.access_type & BOFI_PIO_RW) &&
3200                             (ep->errdef.rnumber == -1 ||
3201                             hp->rnumber == ep->errdef.rnumber) &&
3202                             (ep->errdef.len == 0 ||
3203                             offset < ep->errdef.offset + ep->errdef.len) &&
3204                             offset + hp->len > ep->errdef.offset) {
3205                                 lp = bofi_link_freelist;
3206                                 if (lp != NULL) {
3207                                         bofi_link_freelist = lp->link;
3208                                         lp->errentp = ep;
3209                                         lp->link = hp->link;
3210                                         hp->link = lp;
3211                                 }
3212                         }
3213                 }
3214                 mutex_exit(&bofi_mutex);
3215                 mutex_exit(&bofi_low_mutex);
3216                 return (DDI_SUCCESS);
3217         case DDI_MO_UNMAP:
3218 
3219                 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3220                 if (ap == NULL)
3221                         break;
3222                 /*
3223                  * support for ddi_regs_map_free()
3224                  * - check we really have a shadow handle for this one
3225                  */
3226                 mutex_enter(&bofi_low_mutex);
3227                 mutex_enter(&bofi_mutex);
3228                 hhashp = HDL_HHASH(ap);
3229                 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3230                         if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
3231                                 break;
3232                 if (hp == hhashp) {
3233                         mutex_exit(&bofi_mutex);
3234                         mutex_exit(&bofi_low_mutex);
3235                         break;
3236                 }
3237                 /*
3238                  * got a shadow handle - restore original pointers
3239                  */
3240                 *ap = hp->save.acc;
3241                 *vaddrp = hp->addr;
3242                 /*
3243                  * remove from dhash, hhash and inuse lists
3244                  */
3245                 hp->hnext->hprev = hp->hprev;
3246                 hp->hprev->hnext = hp->hnext;
3247                 hp->dnext->dprev = hp->dprev;
3248                 hp->dprev->dnext = hp->dnext;
3249                 hp->next->prev = hp->prev;
3250                 hp->prev->next = hp->next;
3251                 /*
3252                  * free any errdef link structures tagged onto the shadow handle
3253                  */
3254                 for (lp = hp->link; lp != NULL; ) {
3255                         next_lp = lp->link;
3256                         lp->link = bofi_link_freelist;
3257                         bofi_link_freelist = lp;
3258                         lp = next_lp;
3259                 }
3260                 hp->link = NULL;
3261                 mutex_exit(&bofi_mutex);
3262                 mutex_exit(&bofi_low_mutex);
3263                 /*
3264                  * finally delete shadow handle
3265                  */
3266                 kmem_free(hp, sizeof (struct bofi_shadow));
3267                 break;
3268         default:
3269                 break;
3270         }
3271         return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
3272 }
3273 
3274 
3275 /*
3276  * chain any pre-existing errdefs on to newly created dma handle
3277  * if required call do_dma_corrupt() to corrupt data
3278  */
3279 static void
3280 chain_on_errdefs(struct bofi_shadow *hp)
3281 {
3282         struct bofi_errent *ep;
3283         struct bofi_link   *lp;
3284 
3285         ASSERT(MUTEX_HELD(&bofi_mutex));
3286         /*
3287          * chain on any pre-existing errdefs that apply to this dma_handle
3288          */
3289         for (ep = errent_listp; ep != NULL; ep = ep->next) {
3290                 if (ddi_name_to_major(hp->name) ==
3291                     ddi_name_to_major(ep->name) &&
3292                     hp->instance == ep->errdef.instance &&
3293                     (ep->errdef.rnumber == -1 ||
3294                     hp->rnumber == ep->errdef.rnumber) &&
3295                     ((ep->errdef.access_type & BOFI_DMA_RW) &&
3296                     (((uintptr_t)(hp->addr + ep->errdef.offset +
3297                     ep->errdef.len) & ~LLSZMASK) >
3298                     ((uintptr_t)((hp->addr + ep->errdef.offset) +
3299                     LLSZMASK) & ~LLSZMASK)))) {
3300                         /*
3301                          * got a match - link it on
3302                          */
3303                         lp = bofi_link_freelist;
3304                         if (lp != NULL) {
3305                                 bofi_link_freelist = lp->link;
3306                                 lp->errentp = ep;
3307                                 lp->link = hp->link;
3308                                 hp->link = lp;
3309                                 if ((ep->errdef.access_type & BOFI_DMA_W) &&
3310                                     (hp->flags & DDI_DMA_WRITE) &&
3311                                     (ep->state & BOFI_DEV_ACTIVE)) {
3312                                         do_dma_corrupt(hp, ep,
3313                                             DDI_DMA_SYNC_FORDEV,
3314                                             0, hp->len);
3315                                 }
3316                         }
3317                 }
3318         }
3319 }
3320 
3321 
3322 /*
3323  * need to do copy byte-by-byte in case one of pages is little-endian
3324  */
3325 static void
3326 xbcopy(void *from, void *to, u_longlong_t len)
3327 {
3328         uchar_t *f = from;
3329         uchar_t *t = to;
3330 
3331         while (len--)
3332                 *t++ = *f++;
3333 }
3334 
3335 
3336 /*
3337  * our ddi_dma_allochdl routine
3338  */
3339 static int
3340 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
3341         int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
3342 {
3343         int retval = DDI_DMA_NORESOURCES;
3344         struct bofi_shadow *hp, *xhp;
3345         int maxrnumber = 0;
3346         struct bofi_shadow *dhashp;
3347         struct bofi_shadow *hhashp;
3348         ddi_dma_impl_t *mp;
3349 
3350         /*
3351          * if driver_list is set, only intercept those drivers
3352          */
3353         if (!driver_under_test(rdip))
3354                 return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
3355                     waitfp, arg, handlep));
3356 
3357         /*
3358          * allocate shadow handle structure and fill it in
3359          */
3360         hp = kmem_zalloc(sizeof (struct bofi_shadow),
3361             ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
3362         if (hp == NULL) {
3363                 /*
3364                  * what to do here? Wait a bit and try again
3365                  */
3366                 if (waitfp != DDI_DMA_DONTWAIT)
3367                         (void) timeout((void (*)())waitfp, arg, 10);
3368                 return (retval);
3369         }
3370         (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3371         hp->instance = ddi_get_instance(rdip);
3372         hp->dip = rdip;
3373         hp->link = NULL;
3374         hp->type = BOFI_NULL;
3375         /*
3376          * call nexus to do the real work
3377          */
3378         retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
3379             handlep);
3380         if (retval != DDI_SUCCESS) {
3381                 kmem_free(hp, sizeof (struct bofi_shadow));
3382                 return (retval);
3383         }
3384         /*
3385          * now point set dma_handle to point to real handle
3386          */
3387         hp->hdl.dma_handle = *handlep;
3388         mp = (ddi_dma_impl_t *)*handlep;
3389         mp->dmai_fault_check = bofi_check_dma_hdl;
3390         /*
3391          * bind and unbind are cached in devinfo - must overwrite them
3392          * - note that our bind and unbind are quite happy dealing with
3393          * any handles for this devinfo that were previously allocated
3394          */
3395         if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3396                 DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3397         if (save_bus_ops.bus_dma_unbindhdl ==
3398             DEVI(rdip)->devi_bus_dma_unbindfunc)
3399                 DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3400         mutex_enter(&bofi_low_mutex);
3401         mutex_enter(&bofi_mutex);
3402         /*
3403          * get an "rnumber" for this handle - really just seeking to
3404          * get a unique number - generally only care for early allocated
3405          * handles - so we get as far as INT_MAX, just stay there
3406          */
3407         dhashp = HDL_DHASH(hp->dip);
3408         for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3409                 if (ddi_name_to_major(xhp->name) ==
3410                     ddi_name_to_major(hp->name) &&
3411                     xhp->instance == hp->instance &&
3412                     (xhp->type == BOFI_DMA_HDL ||
3413                     xhp->type == BOFI_NULL))
3414                         if (xhp->rnumber >= maxrnumber) {
3415                                 if (xhp->rnumber == INT_MAX)
3416                                         maxrnumber = INT_MAX;
3417                                 else
3418                                         maxrnumber = xhp->rnumber + 1;
3419                         }
3420         hp->rnumber = maxrnumber;
3421         /*
3422          * add to dhash, hhash and inuse lists
3423          */
3424         hp->next = shadow_list.next;
3425         shadow_list.next->prev = hp;
3426         hp->prev = &shadow_list;
3427         shadow_list.next = hp;
3428         hhashp = HDL_HHASH(*handlep);
3429         hp->hnext = hhashp->hnext;
3430         hhashp->hnext->hprev = hp;
3431         hp->hprev = hhashp;
3432         hhashp->hnext = hp;
3433         dhashp = HDL_DHASH(hp->dip);
3434         hp->dnext = dhashp->dnext;
3435         dhashp->dnext->dprev = hp;
3436         hp->dprev = dhashp;
3437         dhashp->dnext = hp;
3438         mutex_exit(&bofi_mutex);
3439         mutex_exit(&bofi_low_mutex);
3440         return (retval);
3441 }
3442 
3443 
3444 /*
3445  * our ddi_dma_freehdl routine
3446  */
3447 static int
3448 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3449 {
3450         int retval;
3451         struct bofi_shadow *hp;
3452         struct bofi_shadow *hhashp;
3453 
3454         /*
3455          * find shadow for this handle
3456          */
3457         mutex_enter(&bofi_low_mutex);
3458         mutex_enter(&bofi_mutex);
3459         hhashp = HDL_HHASH(handle);
3460         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3461                 if (hp->hdl.dma_handle == handle)
3462                         break;
3463         mutex_exit(&bofi_mutex);
3464         mutex_exit(&bofi_low_mutex);
3465         /*
3466          * call nexus to do the real work
3467          */
3468         retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
3469         if (retval != DDI_SUCCESS) {
3470                 return (retval);
3471         }
3472         /*
3473          * did we really have a shadow for this handle
3474          */
3475         if (hp == hhashp)
3476                 return (retval);
3477         /*
3478          * yes we have - see if it's still bound
3479          */
3480         mutex_enter(&bofi_low_mutex);
3481         mutex_enter(&bofi_mutex);
3482         if (hp->type != BOFI_NULL)
3483                 panic("driver freeing bound dma_handle");
3484         /*
3485          * remove from dhash, hhash and inuse lists
3486          */
3487         hp->hnext->hprev = hp->hprev;
3488         hp->hprev->hnext = hp->hnext;
3489         hp->dnext->dprev = hp->dprev;
3490         hp->dprev->dnext = hp->dnext;
3491         hp->next->prev = hp->prev;
3492         hp->prev->next = hp->next;
3493         mutex_exit(&bofi_mutex);
3494         mutex_exit(&bofi_low_mutex);
3495 
3496         kmem_free(hp, sizeof (struct bofi_shadow));
3497         return (retval);
3498 }
3499 
3500 
3501 /*
3502  * our ddi_dma_bindhdl routine
3503  */
3504 static int
3505 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
3506         ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
3507         ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3508 {
3509         int retval = DDI_DMA_NORESOURCES;
3510         auto struct ddi_dma_req dmareq;
3511         struct bofi_shadow *hp;
3512         struct bofi_shadow *hhashp;
3513         ddi_dma_impl_t *mp;
3514         unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3515 
3516         /*
3517          * check we really have a shadow for this handle
3518          */
3519         mutex_enter(&bofi_low_mutex);
3520         mutex_enter(&bofi_mutex);
3521         hhashp = HDL_HHASH(handle);
3522         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3523                 if (hp->hdl.dma_handle == handle)
3524                         break;
3525         mutex_exit(&bofi_mutex);
3526         mutex_exit(&bofi_low_mutex);
3527         if (hp == hhashp) {
3528                 /*
3529                  * no we don't - just call nexus to do the real work
3530                  */
3531                 return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3532                     cookiep, ccountp);
3533         }
3534         /*
3535          * yes we have - see if it's already bound
3536          */
3537         if (hp->type != BOFI_NULL)
3538                 return (DDI_DMA_INUSE);
3539 
3540         hp->flags = dmareqp->dmar_flags;
3541         if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
3542                 hp->map_flags = B_PAGEIO;
3543                 hp->map_pp = dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp;
3544         } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
3545                 hp->map_flags = B_SHADOW;
3546                 hp->map_pplist = dmareqp->dmar_object.dmao_obj.virt_obj.v_priv;
3547         } else {
3548                 hp->map_flags = 0;
3549         }
3550         /*
3551          * get a kernel virtual mapping
3552          */
3553         hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3554         if (hp->addr == NULL)
3555                 goto error;
3556         if (bofi_sync_check) {
3557                 /*
3558                  * Take a copy and pass pointers to this up to nexus instead.
3559                  * Data will be copied from the original on explicit
3560                  * and implicit ddi_dma_sync()
3561                  *
3562                  * - maintain page alignment because some devices assume it.
3563                  */
3564                 hp->origaddr = hp->addr;
3565                 hp->allocaddr = ddi_umem_alloc(
3566                     ((uintptr_t)hp->addr & pagemask) + hp->len,
3567                     (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
3568                     &hp->umem_cookie);
3569                 if (hp->allocaddr == NULL)
3570                         goto error;
3571                 hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3572                 if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3573                         xbcopy(hp->origaddr, hp->addr, hp->len);
3574                 dmareq = *dmareqp;
3575                 dmareq.dmar_object.dmao_size = hp->len;
3576                 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3577                 dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3578                 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3579                 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3580                 dmareqp = &dmareq;
3581         }
3582         /*
3583          * call nexus to do the real work
3584          */
3585         retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3586             cookiep, ccountp);
3587         if (retval != DDI_SUCCESS)
3588                 goto error2;
3589         /*
3590          * unset DMP_NOSYNC
3591          */
3592         mp = (ddi_dma_impl_t *)handle;
3593         mp->dmai_rflags &= ~DMP_NOSYNC;
3594         /*
3595          * chain on any pre-existing errdefs that apply to this
3596          * acc_handle and corrupt if required (as there is an implicit
3597          * ddi_dma_sync() in this call)
3598          */
3599         mutex_enter(&bofi_low_mutex);
3600         mutex_enter(&bofi_mutex);
3601         hp->type = BOFI_DMA_HDL;
3602         chain_on_errdefs(hp);
3603         mutex_exit(&bofi_mutex);
3604         mutex_exit(&bofi_low_mutex);
3605         return (retval);
3606 
3607 error:
3608         if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3609                 /*
3610                  * what to do here? Wait a bit and try again
3611                  */
3612                 (void) timeout((void (*)())dmareqp->dmar_fp,
3613                     dmareqp->dmar_arg, 10);
3614         }
3615 error2:
3616         if (hp) {
3617                 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3618                     hp->map_pp, hp->map_pplist);
3619                 if (bofi_sync_check && hp->allocaddr)
3620                         ddi_umem_free(hp->umem_cookie);
3621                 hp->mapaddr = NULL;
3622                 hp->allocaddr = NULL;
3623                 hp->origaddr = NULL;
3624         }
3625         return (retval);
3626 }
3627 
3628 
3629 /*
3630  * our ddi_dma_unbindhdl routine
3631  */
3632 static int
3633 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3634 {
3635         struct bofi_link *lp, *next_lp;
3636         struct bofi_errent *ep;
3637         int retval;
3638         struct bofi_shadow *hp;
3639         struct bofi_shadow *hhashp;
3640 
3641         /*
3642          * call nexus to do the real work
3643          */
3644         retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
3645         if (retval != DDI_SUCCESS)
3646                 return (retval);
3647         /*
3648          * check we really have a shadow for this handle
3649          */
3650         mutex_enter(&bofi_low_mutex);
3651         mutex_enter(&bofi_mutex);
3652         hhashp = HDL_HHASH(handle);
3653         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3654                 if (hp->hdl.dma_handle == handle)
3655                         break;
3656         if (hp == hhashp) {
3657                 mutex_exit(&bofi_mutex);
3658                 mutex_exit(&bofi_low_mutex);
3659                 return (retval);
3660         }
3661         /*
3662          * yes we have - see if it's already unbound
3663          */
3664         if (hp->type == BOFI_NULL)
3665                 panic("driver unbinding unbound dma_handle");
3666         /*
3667          * free any errdef link structures tagged on to this
3668          * shadow handle
3669          */
3670         for (lp = hp->link; lp != NULL; ) {
3671                 next_lp = lp->link;
3672                 /*
3673                  * there is an implicit sync_for_cpu on free -
3674                  * may need to corrupt
3675                  */
3676                 ep = lp->errentp;
3677                 if ((ep->errdef.access_type & BOFI_DMA_R) &&
3678                     (hp->flags & DDI_DMA_READ) &&
3679                     (ep->state & BOFI_DEV_ACTIVE)) {
3680                         do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
3681                 }
3682                 lp->link = bofi_link_freelist;
3683                 bofi_link_freelist = lp;
3684                 lp = next_lp;
3685         }
3686         hp->link = NULL;
3687         hp->type = BOFI_NULL;
3688         mutex_exit(&bofi_mutex);
3689         mutex_exit(&bofi_low_mutex);
3690 
3691         if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
3692                 /*
3693                  * implicit sync_for_cpu - copy data back
3694                  */
3695                 if (hp->allocaddr)
3696                         xbcopy(hp->addr, hp->origaddr, hp->len);
3697         ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3698             hp->map_pp, hp->map_pplist);
3699         if (bofi_sync_check && hp->allocaddr)
3700                 ddi_umem_free(hp->umem_cookie);
3701         hp->mapaddr = NULL;
3702         hp->allocaddr = NULL;
3703         hp->origaddr = NULL;
3704         return (retval);
3705 }
3706 
3707 
3708 /*
3709  * our ddi_dma_sync routine
3710  */
3711 static int
3712 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
3713                 ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
3714 {
3715         struct bofi_link *lp;
3716         struct bofi_errent *ep;
3717         struct bofi_shadow *hp;
3718         struct bofi_shadow *hhashp;
3719         int retval;
3720 
3721         if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
3722                 /*
3723                  * in this case get nexus driver to do sync first
3724                  */
3725                 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3726                     len, flags);
3727                 if (retval != DDI_SUCCESS)
3728                         return (retval);
3729         }
3730         /*
3731          * check we really have a shadow for this handle
3732          */
3733         mutex_enter(&bofi_low_mutex);
3734         mutex_enter(&bofi_mutex);
3735         hhashp = HDL_HHASH(handle);
3736         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3737                 if (hp->hdl.dma_handle == handle &&
3738                     hp->type == BOFI_DMA_HDL)
3739                         break;
3740         mutex_exit(&bofi_mutex);
3741         mutex_exit(&bofi_low_mutex);
3742         if (hp != hhashp) {
3743                 /*
3744                  * yes - do we need to copy data from original
3745                  */
3746                 if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
3747                         if (hp->allocaddr)
3748                                 xbcopy(hp->origaddr+off, hp->addr+off,
3749                                     len ? len : (hp->len - off));
3750                 /*
3751                  * yes - check if we need to corrupt the data
3752                  */
3753                 mutex_enter(&bofi_low_mutex);
3754                 mutex_enter(&bofi_mutex);
3755                 for (lp = hp->link; lp != NULL; lp = lp->link) {
3756                         ep = lp->errentp;
3757                         if ((((ep->errdef.access_type & BOFI_DMA_R) &&
3758                             (flags == DDI_DMA_SYNC_FORCPU ||
3759                             flags == DDI_DMA_SYNC_FORKERNEL)) ||
3760                             ((ep->errdef.access_type & BOFI_DMA_W) &&
3761                             (flags == DDI_DMA_SYNC_FORDEV))) &&
3762                             (ep->state & BOFI_DEV_ACTIVE)) {
3763                                 do_dma_corrupt(hp, ep, flags, off,
3764                                     len ? len : (hp->len - off));
3765                         }
3766                 }
3767                 mutex_exit(&bofi_mutex);
3768                 mutex_exit(&bofi_low_mutex);
3769                 /*
3770                  *  do we need to copy data to original
3771                  */
3772                 if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
3773                     flags == DDI_DMA_SYNC_FORKERNEL))
3774                         if (hp->allocaddr)
3775                                 xbcopy(hp->addr+off, hp->origaddr+off,
3776                                     len ? len : (hp->len - off));
3777         }
3778         if (flags == DDI_DMA_SYNC_FORDEV)
3779                 /*
3780                  * in this case get nexus driver to do sync last
3781                  */
3782                 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3783                     len, flags);
3784         return (retval);
3785 }
3786 
3787 
3788 /*
3789  * our dma_win routine
3790  */
3791 static int
3792 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
3793         ddi_dma_handle_t handle, uint_t win, off_t *offp,
3794         size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3795 {
3796         struct bofi_shadow *hp;
3797         struct bofi_shadow *hhashp;
3798         int retval;
3799         ddi_dma_impl_t *mp;
3800 
3801         /*
3802          * call nexus to do the real work
3803          */
3804         retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
3805             cookiep, ccountp);
3806         if (retval != DDI_SUCCESS)
3807                 return (retval);
3808         /*
3809          * check we really have a shadow for this handle
3810          */
3811         mutex_enter(&bofi_low_mutex);
3812         mutex_enter(&bofi_mutex);
3813         hhashp = HDL_HHASH(handle);
3814         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3815                 if (hp->hdl.dma_handle == handle)
3816                         break;
3817         if (hp != hhashp) {
3818                 /*
3819                  * yes - make sure DMP_NOSYNC is unset
3820                  */
3821                 mp = (ddi_dma_impl_t *)handle;
3822                 mp->dmai_rflags &= ~DMP_NOSYNC;
3823         }
3824         mutex_exit(&bofi_mutex);
3825         mutex_exit(&bofi_low_mutex);
3826         return (retval);
3827 }
3828 
3829 
3830 /*
3831  * our dma_ctl routine
3832  */
3833 static int
3834 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
3835                 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
3836                 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
3837 {
3838         struct bofi_shadow *hp;
3839         struct bofi_shadow *hhashp;
3840         int retval;
3841         int i;
3842         struct bofi_shadow *dummyhp;
3843 
3844         /*
3845          * get nexus to do real work
3846          */
3847         retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
3848             lenp, objp, flags);
3849         if (retval != DDI_SUCCESS)
3850                 return (retval);
3851         /*
3852          * if driver_list is set, only intercept those drivers
3853          */
3854         if (!driver_under_test(rdip))
3855                 return (DDI_SUCCESS);
3856 
3857 #if defined(__sparc)
3858         /*
3859          * check if this is a dvma_reserve - that one's like a
3860          * dma_allochdl and needs to be handled separately
3861          */
3862         if (request == DDI_DMA_RESERVE) {
3863                 bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp);
3864                 return (DDI_SUCCESS);
3865         }
3866 #endif
3867         /*
3868          * check we really have a shadow for this handle
3869          */
3870         mutex_enter(&bofi_low_mutex);
3871         mutex_enter(&bofi_mutex);
3872         hhashp = HDL_HHASH(handle);
3873         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3874                 if (hp->hdl.dma_handle == handle)
3875                         break;
3876         if (hp == hhashp) {
3877                 mutex_exit(&bofi_mutex);
3878                 mutex_exit(&bofi_low_mutex);
3879                 return (retval);
3880         }
3881         /*
3882          * yes we have - see what kind of command this is
3883          */
3884         switch (request) {
3885         case DDI_DMA_RELEASE:
3886                 /*
3887                  * dvma release - release dummy handle and all the index handles
3888                  */
3889                 dummyhp = hp;
3890                 dummyhp->hnext->hprev = dummyhp->hprev;
3891                 dummyhp->hprev->hnext = dummyhp->hnext;
3892                 mutex_exit(&bofi_mutex);
3893                 mutex_exit(&bofi_low_mutex);
3894                 for (i = 0; i < dummyhp->len; i++) {
3895                         hp = dummyhp->hparrayp[i];
3896                         /*
3897                          * chek none of the index handles were still loaded
3898                          */
3899                         if (hp->type != BOFI_NULL)
3900                                 panic("driver releasing loaded dvma");
3901                         /*
3902                          * remove from dhash and inuse lists
3903                          */
3904                         mutex_enter(&bofi_low_mutex);
3905                         mutex_enter(&bofi_mutex);
3906                         hp->dnext->dprev = hp->dprev;
3907                         hp->dprev->dnext = hp->dnext;
3908                         hp->next->prev = hp->prev;
3909                         hp->prev->next = hp->next;
3910                         mutex_exit(&bofi_mutex);
3911                         mutex_exit(&bofi_low_mutex);
3912 
3913                         if (bofi_sync_check && hp->allocaddr)
3914                                 ddi_umem_free(hp->umem_cookie);
3915                         kmem_free(hp, sizeof (struct bofi_shadow));
3916                 }
3917                 kmem_free(dummyhp->hparrayp, dummyhp->len *
3918                     sizeof (struct bofi_shadow *));
3919                 kmem_free(dummyhp, sizeof (struct bofi_shadow));
3920                 return (retval);
3921         default:
3922                 break;
3923         }
3924         mutex_exit(&bofi_mutex);
3925         mutex_exit(&bofi_low_mutex);
3926         return (retval);
3927 }
3928 
3929 #if defined(__sparc)
3930 /*
3931  * dvma reserve case from bofi_dma_ctl()
3932  */
3933 static void
3934 bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle)
3935 {
3936         struct bofi_shadow *hp;
3937         struct bofi_shadow *dummyhp;
3938         struct bofi_shadow *dhashp;
3939         struct bofi_shadow *hhashp;
3940         ddi_dma_impl_t *mp;
3941         struct fast_dvma *nexus_private;
3942         int i, count;
3943 
3944         mp = (ddi_dma_impl_t *)handle;
3945         count = mp->dmai_ndvmapages;
3946         /*
3947          * allocate dummy shadow handle structure
3948          */
3949         dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP);
3950         if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
3951                 /*
3952                  * overlay our routines over the nexus's dvma routines
3953                  */
3954                 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
3955                 dummyhp->save.dvma_ops = *(nexus_private->ops);
3956                 nexus_private->ops = &bofi_dvma_ops;
3957         }
3958         /*
3959          * now fill in the dummy handle. This just gets put on hhash queue
3960          * so our dvma routines can find and index off to the handle they
3961          * really want.
3962          */
3963         (void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE);
3964         dummyhp->instance = ddi_get_instance(rdip);
3965         dummyhp->rnumber = -1;
3966         dummyhp->dip = rdip;
3967         dummyhp->len = count;
3968         dummyhp->hdl.dma_handle = handle;
3969         dummyhp->link = NULL;
3970         dummyhp->type = BOFI_NULL;
3971         /*
3972          * allocate space for real handles
3973          */
3974         dummyhp->hparrayp = kmem_alloc(count *
3975             sizeof (struct bofi_shadow *), KM_SLEEP);
3976         for (i = 0; i < count; i++) {
3977                 /*
3978                  * allocate shadow handle structures and fill them in
3979                  */
3980                 hp = kmem_zalloc(sizeof (*hp), KM_SLEEP);
3981                 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3982                 hp->instance = ddi_get_instance(rdip);
3983                 hp->rnumber = -1;
3984                 hp->dip = rdip;
3985                 hp->hdl.dma_handle = 0;
3986                 hp->link = NULL;
3987                 hp->type = BOFI_NULL;
3988                 if (bofi_sync_check) {
3989                         unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3990                         /*
3991                          * Take a copy and set this to be hp->addr
3992                          * Data will be copied to and from the original on
3993                          * explicit and implicit ddi_dma_sync()
3994                          *
3995                          * - maintain page alignment because some devices
3996                          * assume it.
3997                          */
3998                         hp->allocaddr = ddi_umem_alloc(
3999                             ((int)(uintptr_t)hp->addr & pagemask)
4000                             + pagemask + 1,
4001                             KM_SLEEP, &hp->umem_cookie);
4002                         hp->addr = hp->allocaddr +
4003                             ((int)(uintptr_t)hp->addr & pagemask);
4004                 }
4005                 /*
4006                  * add to dhash and inuse lists.
4007                  * these don't go on hhash queue.
4008                  */
4009                 mutex_enter(&bofi_low_mutex);
4010                 mutex_enter(&bofi_mutex);
4011                 hp->next = shadow_list.next;
4012                 shadow_list.next->prev = hp;
4013                 hp->prev = &shadow_list;
4014                 shadow_list.next = hp;
4015                 dhashp = HDL_DHASH(hp->dip);
4016                 hp->dnext = dhashp->dnext;
4017                 dhashp->dnext->dprev = hp;
4018                 hp->dprev = dhashp;
4019                 dhashp->dnext = hp;
4020                 dummyhp->hparrayp[i] = hp;
4021                 mutex_exit(&bofi_mutex);
4022                 mutex_exit(&bofi_low_mutex);
4023         }
4024         /*
4025          * add dummy handle to hhash list only
4026          */
4027         mutex_enter(&bofi_low_mutex);
4028         mutex_enter(&bofi_mutex);
4029         hhashp = HDL_HHASH(handle);
4030         dummyhp->hnext = hhashp->hnext;
4031         hhashp->hnext->hprev = dummyhp;
4032         dummyhp->hprev = hhashp;
4033         hhashp->hnext = dummyhp;
4034         mutex_exit(&bofi_mutex);
4035         mutex_exit(&bofi_low_mutex);
4036 }
4037 
4038 /*
4039  * our dvma_kaddr_load()
4040  */
4041 static void
4042 bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
4043         ddi_dma_cookie_t *cp)
4044 {
4045         struct bofi_shadow *dummyhp;
4046         struct bofi_shadow *hp;
4047         struct bofi_shadow *hhashp;
4048         struct bofi_errent *ep;
4049         struct bofi_link   *lp;
4050 
4051         /*
4052          * check we really have a dummy shadow for this handle
4053          */
4054         mutex_enter(&bofi_low_mutex);
4055         mutex_enter(&bofi_mutex);
4056         hhashp = HDL_HHASH(h);
4057         for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4058             dummyhp = dummyhp->hnext)
4059                 if (dummyhp->hdl.dma_handle == h)
4060                         break;
4061         mutex_exit(&bofi_mutex);
4062         mutex_exit(&bofi_low_mutex);
4063         if (dummyhp == hhashp) {
4064                 /*
4065                  * no dummy shadow - panic
4066                  */
4067                 panic("driver dvma_kaddr_load with no reserve");
4068         }
4069 
4070         /*
4071          * find real hp
4072          */
4073         hp = dummyhp->hparrayp[index];
4074         /*
4075          * check its not already loaded
4076          */
4077         if (hp->type != BOFI_NULL)
4078                 panic("driver loading loaded dvma");
4079         /*
4080          * if were doing copying, just need to change origaddr and get
4081          * nexus to map hp->addr again
4082          * if not, set hp->addr to new address.
4083          * - note these are always kernel virtual addresses - no need to map
4084          */
4085         if (bofi_sync_check && hp->allocaddr) {
4086                 hp->origaddr = a;
4087                 a = hp->addr;
4088         } else
4089                 hp->addr = a;
4090         hp->len = len;
4091         /*
4092          * get nexus to do the real work
4093          */
4094         dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp);
4095         /*
4096          * chain on any pre-existing errdefs that apply to this dma_handle
4097          * no need to corrupt - there's no implicit dma_sync on this one
4098          */
4099         mutex_enter(&bofi_low_mutex);
4100         mutex_enter(&bofi_mutex);
4101         hp->type = BOFI_DMA_HDL;
4102         for (ep = errent_listp; ep != NULL; ep = ep->next) {
4103                 if (ddi_name_to_major(hp->name) ==
4104                     ddi_name_to_major(ep->name) &&
4105                     hp->instance == ep->errdef.instance &&
4106                     (ep->errdef.rnumber == -1 ||
4107                     hp->rnumber == ep->errdef.rnumber) &&
4108                     ((ep->errdef.access_type & BOFI_DMA_RW) &&
4109                     (((uintptr_t)(hp->addr + ep->errdef.offset +
4110                     ep->errdef.len) & ~LLSZMASK) >
4111                     ((uintptr_t)((hp->addr + ep->errdef.offset) +
4112                     LLSZMASK) & ~LLSZMASK)))) {
4113                         lp = bofi_link_freelist;
4114                         if (lp != NULL) {
4115                                 bofi_link_freelist = lp->link;
4116                                 lp->errentp = ep;
4117                                 lp->link = hp->link;
4118                                 hp->link = lp;
4119                         }
4120                 }
4121         }
4122         mutex_exit(&bofi_mutex);
4123         mutex_exit(&bofi_low_mutex);
4124 }
4125 
4126 /*
4127  * our dvma_unload()
4128  */
4129 static void
4130 bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
4131 {
4132         struct bofi_link *lp, *next_lp;
4133         struct bofi_errent *ep;
4134         struct bofi_shadow *dummyhp;
4135         struct bofi_shadow *hp;
4136         struct bofi_shadow *hhashp;
4137 
4138         /*
4139          * check we really have a dummy shadow for this handle
4140          */
4141         mutex_enter(&bofi_low_mutex);
4142         mutex_enter(&bofi_mutex);
4143         hhashp = HDL_HHASH(h);
4144         for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4145             dummyhp = dummyhp->hnext)
4146                 if (dummyhp->hdl.dma_handle == h)
4147                         break;
4148         mutex_exit(&bofi_mutex);
4149         mutex_exit(&bofi_low_mutex);
4150         if (dummyhp == hhashp) {
4151                 /*
4152                  * no dummy shadow - panic
4153                  */
4154                 panic("driver dvma_unload with no reserve");
4155         }
4156         dummyhp->save.dvma_ops.dvma_unload(h, index, view);
4157         /*
4158          * find real hp
4159          */
4160         hp = dummyhp->hparrayp[index];
4161         /*
4162          * check its not already unloaded
4163          */
4164         if (hp->type == BOFI_NULL)
4165                 panic("driver unloading unloaded dvma");
4166         /*
4167          * free any errdef link structures tagged on to this
4168          * shadow handle - do corruption if necessary
4169          */
4170         mutex_enter(&bofi_low_mutex);
4171         mutex_enter(&bofi_mutex);
4172         for (lp = hp->link; lp != NULL; ) {
4173                 next_lp = lp->link;
4174                 ep = lp->errentp;
4175                 if ((ep->errdef.access_type & BOFI_DMA_R) &&
4176                     (view == DDI_DMA_SYNC_FORCPU ||
4177                     view == DDI_DMA_SYNC_FORKERNEL) &&
4178                     (ep->state & BOFI_DEV_ACTIVE)) {
4179                         do_dma_corrupt(hp, ep, view, 0, hp->len);
4180                 }
4181                 lp->link = bofi_link_freelist;
4182                 bofi_link_freelist = lp;
4183                 lp = next_lp;
4184         }
4185         hp->link = NULL;
4186         hp->type = BOFI_NULL;
4187         mutex_exit(&bofi_mutex);
4188         mutex_exit(&bofi_low_mutex);
4189         /*
4190          * if there is an explicit sync_for_cpu, then do copy to original
4191          */
4192         if (bofi_sync_check &&
4193             (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL))
4194                 if (hp->allocaddr)
4195                         xbcopy(hp->addr, hp->origaddr, hp->len);
4196 }
4197 
4198 /*
4199  * our dvma_unload()
4200  */
4201 static void
4202 bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
4203 {
4204         struct bofi_link *lp;
4205         struct bofi_errent *ep;
4206         struct bofi_shadow *hp;
4207         struct bofi_shadow *dummyhp;
4208         struct bofi_shadow *hhashp;
4209 
4210         /*
4211          * check we really have a dummy shadow for this handle
4212          */
4213         mutex_enter(&bofi_low_mutex);
4214         mutex_enter(&bofi_mutex);
4215         hhashp = HDL_HHASH(h);
4216         for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4217             dummyhp = dummyhp->hnext)
4218                 if (dummyhp->hdl.dma_handle == h)
4219                         break;
4220         mutex_exit(&bofi_mutex);
4221         mutex_exit(&bofi_low_mutex);
4222         if (dummyhp == hhashp) {
4223                 /*
4224                  * no dummy shadow - panic
4225                  */
4226                 panic("driver dvma_sync with no reserve");
4227         }
4228         /*
4229          * find real hp
4230          */
4231         hp = dummyhp->hparrayp[index];
4232         /*
4233          * check its already loaded
4234          */
4235         if (hp->type == BOFI_NULL)
4236                 panic("driver syncing unloaded dvma");
4237         if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)
4238                 /*
4239                  * in this case do sync first
4240                  */
4241                 dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4242         /*
4243          * if there is an explicit sync_for_dev, then do copy from original
4244          */
4245         if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) {
4246                 if (hp->allocaddr)
4247                         xbcopy(hp->origaddr, hp->addr, hp->len);
4248         }
4249         /*
4250          * do corruption if necessary
4251          */
4252         mutex_enter(&bofi_low_mutex);
4253         mutex_enter(&bofi_mutex);
4254         for (lp = hp->link; lp != NULL; lp = lp->link) {
4255                 ep = lp->errentp;
4256                 if ((((ep->errdef.access_type & BOFI_DMA_R) &&
4257                     (view == DDI_DMA_SYNC_FORCPU ||
4258                     view == DDI_DMA_SYNC_FORKERNEL)) ||
4259                     ((ep->errdef.access_type & BOFI_DMA_W) &&
4260                     (view == DDI_DMA_SYNC_FORDEV))) &&
4261                     (ep->state & BOFI_DEV_ACTIVE)) {
4262                         do_dma_corrupt(hp, ep, view, 0, hp->len);
4263                 }
4264         }
4265         mutex_exit(&bofi_mutex);
4266         mutex_exit(&bofi_low_mutex);
4267         /*
4268          * if there is an explicit sync_for_cpu, then do copy to original
4269          */
4270         if (bofi_sync_check &&
4271             (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) {
4272                 if (hp->allocaddr)
4273                         xbcopy(hp->addr, hp->origaddr, hp->len);
4274         }
4275         if (view == DDI_DMA_SYNC_FORDEV)
4276                 /*
4277                  * in this case do sync last
4278                  */
4279                 dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4280 }
4281 #endif
4282 
4283 /*
4284  * bofi intercept routine - gets called instead of users interrupt routine
4285  */
4286 static uint_t
4287 bofi_intercept_intr(caddr_t xp, caddr_t arg2)
4288 {
4289         struct bofi_errent *ep;
4290         struct bofi_link   *lp;
4291         struct bofi_shadow *hp;
4292         int intr_count = 1;
4293         int i;
4294         uint_t retval = DDI_INTR_UNCLAIMED;
4295         uint_t result;
4296         int unclaimed_counter = 0;
4297         int jabber_detected = 0;
4298 
4299         hp = (struct bofi_shadow *)xp;
4300         /*
4301          * check if nothing to do
4302          */
4303         if (hp->link == NULL)
4304                 return (hp->save.intr.int_handler
4305                     (hp->save.intr.int_handler_arg1, arg2));
4306         mutex_enter(&bofi_mutex);
4307         /*
4308          * look for any errdefs
4309          */
4310         for (lp = hp->link; lp != NULL; lp = lp->link) {
4311                 ep = lp->errentp;
4312                 if (ep->state & BOFI_DEV_ACTIVE) {
4313                         /*
4314                          * got one
4315                          */
4316                         if ((ep->errdef.access_count ||
4317                             ep->errdef.fail_count) &&
4318                             (ep->errdef.access_type & BOFI_LOG))
4319                                 log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
4320                         if (ep->errdef.access_count > 1) {
4321                                 ep->errdef.access_count--;
4322                         } else if (ep->errdef.fail_count > 0) {
4323                                 ep->errdef.fail_count--;
4324                                 ep->errdef.access_count = 0;
4325                                 /*
4326                                  * OK do "corruption"
4327                                  */
4328                                 if (ep->errstate.fail_time == 0)
4329                                         ep->errstate.fail_time = bofi_gettime();
4330                                 switch (ep->errdef.optype) {
4331                                 case BOFI_DELAY_INTR:
4332                                         if (!hp->hilevel) {
4333                                                 drv_usecwait
4334                                                     (ep->errdef.operand);
4335                                         }
4336                                         break;
4337                                 case BOFI_LOSE_INTR:
4338                                         intr_count = 0;
4339                                         break;
4340                                 case BOFI_EXTRA_INTR:
4341                                         intr_count += ep->errdef.operand;
4342                                         break;
4343                                 default:
4344                                         break;
4345                                 }
4346                         }
4347                 }
4348         }
4349         mutex_exit(&bofi_mutex);
4350         /*
4351          * send extra or fewer interrupts as requested
4352          */
4353         for (i = 0; i < intr_count; i++) {
4354                 result = hp->save.intr.int_handler
4355                     (hp->save.intr.int_handler_arg1, arg2);
4356                 if (result == DDI_INTR_CLAIMED)
4357                         unclaimed_counter >>= 1;
4358                 else if (++unclaimed_counter >= 20)
4359                         jabber_detected = 1;
4360                 if (i == 0)
4361                         retval = result;
4362         }
4363         /*
4364          * if more than 1000 spurious interrupts requested and
4365          * jabber not detected - give warning
4366          */
4367         if (intr_count > 1000 && !jabber_detected)
4368                 panic("undetected interrupt jabber: %s%d",
4369                     hp->name, hp->instance);
4370         /*
4371          * return first response - or "unclaimed" if none
4372          */
4373         return (retval);
4374 }
4375 
4376 
4377 /*
4378  * our ddi_check_acc_hdl
4379  */
4380 /* ARGSUSED */
4381 static int
4382 bofi_check_acc_hdl(ddi_acc_impl_t *handle)
4383 {
4384         struct bofi_shadow *hp;
4385         struct bofi_link   *lp;
4386         uint_t result = 0;
4387 
4388         hp = handle->ahi_common.ah_bus_private;
4389         if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
4390                 return (0);
4391         }
4392         for (lp = hp->link; lp != NULL; lp = lp->link) {
4393                 /*
4394                  * OR in error state from all associated
4395                  * errdef structures
4396                  */
4397                 if (lp->errentp->errdef.access_count == 0 &&
4398                     (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4399                         result = (lp->errentp->errdef.acc_chk & 1);
4400                 }
4401         }
4402         mutex_exit(&bofi_mutex);
4403         return (result);
4404 }
4405 
4406 /*
4407  * our ddi_check_dma_hdl
4408  */
4409 /* ARGSUSED */
4410 static int
4411 bofi_check_dma_hdl(ddi_dma_impl_t *handle)
4412 {
4413         struct bofi_shadow *hp;
4414         struct bofi_link   *lp;
4415         struct bofi_shadow *hhashp;
4416         uint_t result = 0;
4417 
4418         if (!mutex_tryenter(&bofi_mutex)) {
4419                 return (0);
4420         }
4421         hhashp = HDL_HHASH(handle);
4422         for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4423                 if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
4424                         break;
4425         if (hp == hhashp) {
4426                 mutex_exit(&bofi_mutex);
4427                 return (0);
4428         }
4429         if (!hp->link) {
4430                 mutex_exit(&bofi_mutex);
4431                 return (0);
4432         }
4433         for (lp = hp->link; lp != NULL; lp = lp->link) {
4434                 /*
4435                  * OR in error state from all associated
4436                  * errdef structures
4437                  */
4438                 if (lp->errentp->errdef.access_count == 0 &&
4439                     (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4440                         result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
4441                 }
4442         }
4443         mutex_exit(&bofi_mutex);
4444         return (result);
4445 }
4446 
4447 
4448 /* ARGSUSED */
4449 static int
4450 bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
4451                     ddi_eventcookie_t eventhdl, void *impl_data)
4452 {
4453         ddi_eventcookie_t ec;
4454         struct ddi_fault_event_data *arg;
4455         struct bofi_errent *ep;
4456         struct bofi_shadow *hp;
4457         struct bofi_shadow *dhashp;
4458         struct bofi_link   *lp;
4459 
4460         ASSERT(eventhdl);
4461         if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
4462                 return (DDI_FAILURE);
4463 
4464         if (ec != eventhdl)
4465                 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
4466                     impl_data));
4467 
4468         arg = (struct ddi_fault_event_data *)impl_data;
4469         mutex_enter(&bofi_mutex);
4470         /*
4471          * find shadow handles with appropriate dev_infos
4472          * and set error reported on all associated errdef structures
4473          */
4474         dhashp = HDL_DHASH(arg->f_dip);
4475         for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
4476                 if (hp->dip == arg->f_dip) {
4477                         for (lp = hp->link; lp != NULL; lp = lp->link) {
4478                                 ep = lp->errentp;
4479                                 ep->errstate.errmsg_count++;
4480                                 if ((ep->errstate.msg_time == NULL ||
4481                                     ep->errstate.severity > arg->f_impact) &&
4482                                     (ep->state & BOFI_DEV_ACTIVE)) {
4483                                         ep->errstate.msg_time = bofi_gettime();
4484                                         ep->errstate.severity = arg->f_impact;
4485                                         (void) strncpy(ep->errstate.buffer,
4486                                             arg->f_message, ERRMSGSIZE);
4487                                         ddi_trigger_softintr(ep->softintr_id);
4488                                 }
4489                         }
4490                 }
4491         }
4492         mutex_exit(&bofi_mutex);
4493         return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
4494 }
4495 
4496 /*ARGSUSED*/
4497 static int
4498 bofi_fm_ereport_callback(sysevent_t *ev, void *cookie)
4499 {
4500         char *class = "";
4501         char *path = "";
4502         char *ptr;
4503         nvlist_t *nvlist;
4504         nvlist_t *detector;
4505         ddi_fault_impact_t impact;
4506         struct bofi_errent *ep;
4507         struct bofi_shadow *hp;
4508         struct bofi_link   *lp;
4509         char service_class[FM_MAX_CLASS];
4510         char hppath[MAXPATHLEN];
4511         int service_ereport = 0;
4512 
4513         (void) sysevent_get_attr_list(ev, &nvlist);
4514         (void) nvlist_lookup_string(nvlist, FM_CLASS, &class);
4515         if (nvlist_lookup_nvlist(nvlist, FM_EREPORT_DETECTOR, &detector) == 0)
4516                 (void) nvlist_lookup_string(detector, FM_FMRI_DEV_PATH, &path);
4517 
4518         (void) snprintf(service_class, FM_MAX_CLASS, "%s.%s.%s.",
4519             FM_EREPORT_CLASS, DDI_IO_CLASS, DDI_FM_SERVICE_IMPACT);
4520         if (strncmp(class, service_class, strlen(service_class) - 1) == 0)
4521                 service_ereport = 1;
4522 
4523         mutex_enter(&bofi_mutex);
4524         /*
4525          * find shadow handles with appropriate dev_infos
4526          * and set error reported on all associated errdef structures
4527          */
4528         for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
4529                 (void) ddi_pathname(hp->dip, hppath);
4530                 if (strcmp(path, hppath) != 0)
4531                         continue;
4532                 for (lp = hp->link; lp != NULL; lp = lp->link) {
4533                         ep = lp->errentp;
4534                         ep->errstate.errmsg_count++;
4535                         if (!(ep->state & BOFI_DEV_ACTIVE))
4536                                 continue;
4537                         if (ep->errstate.msg_time != NULL)
4538                                 continue;
4539                         if (service_ereport) {
4540                                 ptr = class + strlen(service_class);
4541                                 if (strcmp(ptr, DDI_FM_SERVICE_LOST) == 0)
4542                                         impact = DDI_SERVICE_LOST;
4543                                 else if (strcmp(ptr,
4544                                     DDI_FM_SERVICE_DEGRADED) == 0)
4545                                         impact = DDI_SERVICE_DEGRADED;
4546                                 else if (strcmp(ptr,
4547                                     DDI_FM_SERVICE_RESTORED) == 0)
4548                                         impact = DDI_SERVICE_RESTORED;
4549                                 else
4550                                         impact = DDI_SERVICE_UNAFFECTED;
4551                                 if (ep->errstate.severity > impact)
4552                                         ep->errstate.severity = impact;
4553                         } else if (ep->errstate.buffer[0] == '\0') {
4554                                 (void) strncpy(ep->errstate.buffer, class,
4555                                     ERRMSGSIZE);
4556                         }
4557                         if (ep->errstate.buffer[0] != '\0' &&
4558                             ep->errstate.severity < DDI_SERVICE_RESTORED) {
4559                                 ep->errstate.msg_time = bofi_gettime();
4560                                 ddi_trigger_softintr(ep->softintr_id);
4561                         }
4562                 }
4563         }
4564         nvlist_free(nvlist);
4565         mutex_exit(&bofi_mutex);
4566         return (0);
4567 }
4568 
4569 /*
4570  * our intr_ops routine
4571  */
4572 static int
4573 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
4574     ddi_intr_handle_impl_t *hdlp, void *result)
4575 {
4576         int retval;
4577         struct bofi_shadow *hp;
4578         struct bofi_shadow *dhashp;
4579         struct bofi_shadow *hhashp;
4580         struct bofi_errent *ep;
4581         struct bofi_link   *lp, *next_lp;
4582 
4583         switch (intr_op) {
4584         case DDI_INTROP_ADDISR:
4585                 /*
4586                  * if driver_list is set, only intercept those drivers
4587                  */
4588                 if (!driver_under_test(rdip))
4589                         return (save_bus_ops.bus_intr_op(dip, rdip,
4590                             intr_op, hdlp, result));
4591                 /*
4592                  * allocate shadow handle structure and fill in
4593                  */
4594                 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
4595                 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4596                 hp->instance = ddi_get_instance(rdip);
4597                 hp->save.intr.int_handler = hdlp->ih_cb_func;
4598                 hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
4599                 hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
4600                 hdlp->ih_cb_arg1 = (caddr_t)hp;
4601                 hp->bofi_inum = hdlp->ih_inum;
4602                 hp->dip = rdip;
4603                 hp->link = NULL;
4604                 hp->type = BOFI_INT_HDL;
4605                 /*
4606                  * save whether hilevel or not
4607                  */
4608 
4609                 if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
4610                         hp->hilevel = 1;
4611                 else
4612                         hp->hilevel = 0;
4613 
4614                 /*
4615                  * call nexus to do real work, but specifying our handler, and
4616                  * our shadow handle as argument
4617                  */
4618                 retval = save_bus_ops.bus_intr_op(dip, rdip,
4619                     intr_op, hdlp, result);
4620                 if (retval != DDI_SUCCESS) {
4621                         kmem_free(hp, sizeof (struct bofi_shadow));
4622                         return (retval);
4623                 }
4624                 /*
4625                  * add to dhash, hhash and inuse lists
4626                  */
4627                 mutex_enter(&bofi_low_mutex);
4628                 mutex_enter(&bofi_mutex);
4629                 hp->next = shadow_list.next;
4630                 shadow_list.next->prev = hp;
4631                 hp->prev = &shadow_list;
4632                 shadow_list.next = hp;
4633                 hhashp = HDL_HHASH(hdlp->ih_inum);
4634                 hp->hnext = hhashp->hnext;
4635                 hhashp->hnext->hprev = hp;
4636                 hp->hprev = hhashp;
4637                 hhashp->hnext = hp;
4638                 dhashp = HDL_DHASH(hp->dip);
4639                 hp->dnext = dhashp->dnext;
4640                 dhashp->dnext->dprev = hp;
4641                 hp->dprev = dhashp;
4642                 dhashp->dnext = hp;
4643                 /*
4644                  * chain on any pre-existing errdefs that apply to this
4645                  * acc_handle
4646                  */
4647                 for (ep = errent_listp; ep != NULL; ep = ep->next) {
4648                         if (ddi_name_to_major(hp->name) ==
4649                             ddi_name_to_major(ep->name) &&
4650                             hp->instance == ep->errdef.instance &&
4651                             (ep->errdef.access_type & BOFI_INTR)) {
4652                                 lp = bofi_link_freelist;
4653                                 if (lp != NULL) {
4654                                         bofi_link_freelist = lp->link;
4655                                         lp->errentp = ep;
4656                                         lp->link = hp->link;
4657                                         hp->link = lp;
4658                                 }
4659                         }
4660                 }
4661                 mutex_exit(&bofi_mutex);
4662                 mutex_exit(&bofi_low_mutex);
4663                 return (retval);
4664         case DDI_INTROP_REMISR:
4665                 /*
4666                  * call nexus routine first
4667                  */
4668                 retval = save_bus_ops.bus_intr_op(dip, rdip,
4669                     intr_op, hdlp, result);
4670                 /*
4671                  * find shadow handle
4672                  */
4673                 mutex_enter(&bofi_low_mutex);
4674                 mutex_enter(&bofi_mutex);
4675                 hhashp = HDL_HHASH(hdlp->ih_inum);
4676                 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
4677                         if (hp->dip == rdip &&
4678                             hp->type == BOFI_INT_HDL &&
4679                             hp->bofi_inum == hdlp->ih_inum) {
4680                                 break;
4681                         }
4682                 }
4683                 if (hp == hhashp) {
4684                         mutex_exit(&bofi_mutex);
4685                         mutex_exit(&bofi_low_mutex);
4686                         return (retval);
4687                 }
4688                 /*
4689                  * found one - remove from dhash, hhash and inuse lists
4690                  */
4691                 hp->hnext->hprev = hp->hprev;
4692                 hp->hprev->hnext = hp->hnext;
4693                 hp->dnext->dprev = hp->dprev;
4694                 hp->dprev->dnext = hp->dnext;
4695                 hp->next->prev = hp->prev;
4696                 hp->prev->next = hp->next;
4697                 /*
4698                  * free any errdef link structures
4699                  * tagged on to this shadow handle
4700                  */
4701                 for (lp = hp->link; lp != NULL; ) {
4702                         next_lp = lp->link;
4703                         lp->link = bofi_link_freelist;
4704                         bofi_link_freelist = lp;
4705                         lp = next_lp;
4706                 }
4707                 hp->link = NULL;
4708                 mutex_exit(&bofi_mutex);
4709                 mutex_exit(&bofi_low_mutex);
4710                 kmem_free(hp, sizeof (struct bofi_shadow));
4711                 return (retval);
4712         default:
4713                 return (save_bus_ops.bus_intr_op(dip, rdip,
4714                     intr_op, hdlp, result));
4715         }
4716 }