Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/bofi.c
+++ new/usr/src/uts/common/io/bofi.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /*
26 26 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
27 27 */
28 28
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/sysmacros.h>
32 32 #include <sys/buf.h>
33 33 #include <sys/errno.h>
34 34 #include <sys/modctl.h>
35 35 #include <sys/conf.h>
36 36 #include <sys/stat.h>
37 37 #include <sys/kmem.h>
38 38 #include <sys/proc.h>
39 39 #include <sys/cpuvar.h>
40 40 #include <sys/ddi_impldefs.h>
41 41 #include <sys/ddi.h>
42 42 #include <sys/fm/protocol.h>
43 43 #include <sys/fm/util.h>
44 44 #include <sys/fm/io/ddi.h>
45 45 #include <sys/sysevent/eventdefs.h>
46 46 #include <sys/sunddi.h>
47 47 #include <sys/sunndi.h>
48 48 #include <sys/debug.h>
49 49 #include <sys/bofi.h>
50 50 #include <sys/dvma.h>
51 51 #include <sys/bofi_impl.h>
52 52
53 53 /*
54 54 * Testing the resilience of a hardened device driver requires a suitably wide
55 55 * range of different types of "typical" hardware faults to be injected,
56 56 * preferably in a controlled and repeatable fashion. This is not in general
57 57 * possible via hardware, so the "fault injection test harness" is provided.
58 58 * This works by intercepting calls from the driver to various DDI routines,
59 59 * and then corrupting the result of those DDI routine calls as if the
60 60 * hardware had caused the corruption.
61 61 *
62 62 * Conceptually, the bofi driver consists of two parts:
63 63 *
64 64 * A driver interface that supports a number of ioctls which allow error
65 65 * definitions ("errdefs") to be defined and subsequently managed. The
66 66 * driver is a clone driver, so each open will create a separate
67 67 * invocation. Any errdefs created by using ioctls to that invocation
68 68 * will automatically be deleted when that invocation is closed.
69 69 *
70 70 * Intercept routines: When the bofi driver is attached, it edits the
71 71 * bus_ops structure of the bus nexus specified by the "bofi-nexus"
72 72 * field in the "bofi.conf" file, thus allowing the
73 73 * bofi driver to intercept various ddi functions. These intercept
74 74 * routines primarily carry out fault injections based on the errdefs
75 75 * created for that device.
76 76 *
77 77 * Faults can be injected into:
78 78 *
79 79 * DMA (corrupting data for DMA to/from memory areas defined by
80 80 * ddi_dma_setup(), ddi_dma_bind_handle(), etc)
81 81 *
82 82 * Physical IO (corrupting data sent/received via ddi_get8(), ddi_put8(),
83 83 * etc),
84 84 *
85 85 * Interrupts (generating spurious interrupts, losing interrupts,
86 86 * delaying interrupts).
87 87 *
88 88 * By default, ddi routines called from all drivers will be intercepted
89 89 * and faults potentially injected. However, the "bofi-to-test" field in
90 90 * the "bofi.conf" file can be set to a space-separated list of drivers to
91 91 * test (or by preceding each driver name in the list with an "!", a list
92 92 * of drivers not to test).
93 93 *
94 94 * In addition to fault injection, the bofi driver does a number of static
95 95 * checks which are controlled by properties in the "bofi.conf" file.
96 96 *
97 97 * "bofi-ddi-check" - if set will validate that there are no PIO access
98 98 * other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
99 99 *
100 100 * "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
101 101 * validate that calls to ddi_get8(), ddi_put8(), etc are not made
102 102 * specifying addresses outside the range of the access_handle.
103 103 *
104 104 * "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
105 105 * are being made correctly.
106 106 */
107 107
108 108 extern void *bp_mapin_common(struct buf *, int);
109 109
110 110 static int bofi_ddi_check;
111 111 static int bofi_sync_check;
112 112 static int bofi_range_check;
113 113
114 114 static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
115 115
116 116 #define LLSZMASK (sizeof (uint64_t)-1)
117 117
118 118 #define HDL_HASH_TBL_SIZE 64
119 119 static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
120 120 static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
121 121 #define HDL_DHASH(x) \
122 122 (&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
123 123 #define HDL_HHASH(x) \
124 124 (&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
125 125
126 126 static struct bofi_shadow shadow_list;
127 127 static struct bofi_errent *errent_listp;
128 128
129 129 static char driver_list[NAMESIZE];
130 130 static int driver_list_size;
131 131 static int driver_list_neg;
132 132 static char nexus_name[NAMESIZE];
133 133
134 134 static int initialized = 0;
135 135
136 136 #define NCLONES 2560
137 137 static int clone_tab[NCLONES];
138 138
139 139 static dev_info_t *our_dip;
140 140
141 141 static kmutex_t bofi_mutex;
142 142 static kmutex_t clone_tab_mutex;
143 143 static kmutex_t bofi_low_mutex;
144 144 static ddi_iblock_cookie_t bofi_low_cookie;
145 145 static uint_t bofi_signal(caddr_t arg);
146 146 static int bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
147 147 static int bofi_attach(dev_info_t *, ddi_attach_cmd_t);
148 148 static int bofi_detach(dev_info_t *, ddi_detach_cmd_t);
149 149 static int bofi_open(dev_t *, int, int, cred_t *);
150 150 static int bofi_close(dev_t, int, int, cred_t *);
151 151 static int bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
152 152 static int bofi_errdef_alloc(struct bofi_errdef *, char *,
153 153 struct bofi_errent *);
154 154 static int bofi_errdef_free(struct bofi_errent *);
155 155 static void bofi_start(struct bofi_errctl *, char *);
156 156 static void bofi_stop(struct bofi_errctl *, char *);
157 157 static void bofi_broadcast(struct bofi_errctl *, char *);
158 158 static void bofi_clear_acc_chk(struct bofi_errctl *, char *);
159 159 static void bofi_clear_errors(struct bofi_errctl *, char *);
160 160 static void bofi_clear_errdefs(struct bofi_errctl *, char *);
161 161 static int bofi_errdef_check(struct bofi_errstate *,
162 162 struct acc_log_elem **);
163 163 static int bofi_errdef_check_w(struct bofi_errstate *,
164 164 struct acc_log_elem **);
165 165 static int bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
166 166 off_t, off_t, caddr_t *);
167 167 static int bofi_dma_allochdl(dev_info_t *, dev_info_t *,
168 168 ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
169 169 ddi_dma_handle_t *);
170 170 static int bofi_dma_freehdl(dev_info_t *, dev_info_t *,
171 171 ddi_dma_handle_t);
172 172 static int bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
173 173 ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
174 174 uint_t *);
175 175 static int bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
176 176 ddi_dma_handle_t);
177 177 static int bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
178 178 off_t, size_t, uint_t);
179 179 static int bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
180 180 enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
181 181 static int bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
182 182 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
183 183 static int bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
184 184 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
185 185 void *result);
186 186 static int bofi_fm_ereport_callback(sysevent_t *ev, void *cookie);
187 187
188 188 evchan_t *bofi_error_chan;
189 189
190 190 #define FM_SIMULATED_DMA "simulated.dma"
191 191 #define FM_SIMULATED_PIO "simulated.pio"
192 192
193 193 #if defined(__sparc)
194 194 static void bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t,
195 195 uint_t, ddi_dma_cookie_t *);
196 196 static void bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t);
197 197 static void bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t);
198 198 static void bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t);
199 199 #endif
200 200 static int driver_under_test(dev_info_t *);
201 201 static int bofi_check_acc_hdl(ddi_acc_impl_t *);
202 202 static int bofi_check_dma_hdl(ddi_dma_impl_t *);
203 203 static int bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
204 204 ddi_eventcookie_t eventhdl, void *impl_data);
205 205
206 206 static struct bus_ops bofi_bus_ops = {
207 207 BUSO_REV,
208 208 bofi_map,
209 209 NULL,
210 210 NULL,
211 211 NULL,
212 212 i_ddi_map_fault,
213 213 NULL,
214 214 bofi_dma_allochdl,
215 215 bofi_dma_freehdl,
216 216 bofi_dma_bindhdl,
217 217 bofi_dma_unbindhdl,
218 218 bofi_dma_flush,
219 219 bofi_dma_win,
220 220 bofi_dma_ctl,
221 221 NULL,
222 222 ddi_bus_prop_op,
223 223 ndi_busop_get_eventcookie,
224 224 ndi_busop_add_eventcall,
225 225 ndi_busop_remove_eventcall,
226 226 bofi_post_event,
227 227 NULL,
228 228 0,
229 229 0,
230 230 0,
231 231 0,
232 232 0,
233 233 0,
234 234 0,
235 235 bofi_intr_ops
236 236 };
237 237
238 238 static struct cb_ops bofi_cb_ops = {
239 239 bofi_open, /* open */
240 240 bofi_close, /* close */
241 241 nodev, /* strategy */
242 242 nodev, /* print */
243 243 nodev, /* dump */
244 244 nodev, /* read */
245 245 nodev, /* write */
246 246 bofi_ioctl, /* ioctl */
247 247 nodev, /* devmap */
248 248 nodev, /* mmap */
249 249 nodev, /* segmap */
250 250 nochpoll, /* chpoll */
251 251 ddi_prop_op, /* prop_op */
252 252 NULL, /* for STREAMS drivers */
253 253 D_MP, /* driver compatibility flag */
254 254 CB_REV, /* cb_ops revision */
255 255 nodev, /* aread */
256 256 nodev /* awrite */
257 257 };
258 258
259 259 static struct dev_ops bofi_ops = {
260 260 DEVO_REV, /* driver build version */
261 261 0, /* device reference count */
262 262 bofi_getinfo,
263 263 nulldev,
264 264 nulldev, /* probe */
265 265 bofi_attach,
266 266 bofi_detach,
267 267 nulldev, /* reset */
268 268 &bofi_cb_ops,
269 269 (struct bus_ops *)NULL,
270 270 nulldev, /* power */
271 271 ddi_quiesce_not_needed, /* quiesce */
272 272 };
273 273
274 274 /* module configuration stuff */
↓ open down ↓ |
274 lines elided |
↑ open up ↑ |
275 275 static void *statep;
276 276
277 277 static struct modldrv modldrv = {
278 278 &mod_driverops,
279 279 "bofi driver",
280 280 &bofi_ops
281 281 };
282 282
283 283 static struct modlinkage modlinkage = {
284 284 MODREV_1,
285 - &modldrv,
286 - 0
285 + { &modldrv, NULL }
287 286 };
288 287
289 288 static struct bus_ops save_bus_ops;
290 289
291 290 #if defined(__sparc)
292 291 static struct dvma_ops bofi_dvma_ops = {
293 292 DVMAO_REV,
294 293 bofi_dvma_kaddr_load,
295 294 bofi_dvma_unload,
296 295 bofi_dvma_sync
297 296 };
298 297 #endif
299 298
300 299 /*
301 300 * support routine - map user page into kernel virtual
302 301 */
303 302 static caddr_t
304 303 dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
305 304 {
306 305 struct buf buf;
307 306 struct proc proc;
308 307
309 308 /*
310 309 * mock up a buf structure so we can call bp_mapin_common()
311 310 */
312 311 buf.b_flags = B_PHYS;
313 312 buf.b_un.b_addr = (caddr_t)addr;
314 313 buf.b_bcount = (size_t)len;
315 314 proc.p_as = as;
316 315 buf.b_proc = &proc;
317 316 return (bp_mapin_common(&buf, flag));
318 317 }
319 318
320 319
321 320 /*
322 321 * support routine - map page chain into kernel virtual
323 322 */
324 323 static caddr_t
325 324 dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
326 325 {
327 326 struct buf buf;
328 327
329 328 /*
330 329 * mock up a buf structure so we can call bp_mapin_common()
331 330 */
332 331 buf.b_flags = B_PAGEIO;
333 332 buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
334 333 buf.b_bcount = (size_t)len;
335 334 buf.b_pages = pp;
336 335 return (bp_mapin_common(&buf, flag));
337 336 }
338 337
339 338
340 339 /*
341 340 * support routine - map page array into kernel virtual
342 341 */
343 342 static caddr_t
344 343 dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
345 344 int flag)
346 345 {
347 346 struct buf buf;
348 347 struct proc proc;
349 348
350 349 /*
351 350 * mock up a buf structure so we can call bp_mapin_common()
352 351 */
353 352 buf.b_flags = B_PHYS|B_SHADOW;
354 353 buf.b_un.b_addr = addr;
355 354 buf.b_bcount = len;
356 355 buf.b_shadow = pplist;
357 356 proc.p_as = as;
358 357 buf.b_proc = &proc;
359 358 return (bp_mapin_common(&buf, flag));
360 359 }
361 360
362 361
363 362 /*
364 363 * support routine - map dmareq into kernel virtual if not already
365 364 * fills in *lenp with length
366 365 * *mapaddr will be new kernel virtual address - or null if no mapping needed
367 366 */
368 367 static caddr_t
369 368 ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
370 369 offset_t *lenp)
371 370 {
372 371 int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
373 372
374 373 *lenp = dmareqp->dmar_object.dmao_size;
375 374 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
376 375 *mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
377 376 dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
378 377 dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
379 378 return (*mapaddrp);
380 379 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
381 380 *mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
382 381 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
383 382 dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
384 383 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
385 384 return (*mapaddrp);
386 385 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
387 386 *mapaddrp = NULL;
388 387 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
389 388 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
390 389 *mapaddrp = NULL;
391 390 return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
392 391 } else {
393 392 *mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
394 393 dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
395 394 dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
396 395 return (*mapaddrp);
397 396 }
398 397 }
399 398
400 399
401 400 /*
402 401 * support routine - free off kernel virtual mapping as allocated by
403 402 * ddi_dmareq_mapin()
404 403 */
405 404 static void
406 405 ddi_dmareq_mapout(caddr_t addr, offset_t len, int map_flags, page_t *pp,
407 406 page_t **pplist)
408 407 {
409 408 struct buf buf;
410 409
411 410 if (addr == NULL)
412 411 return;
413 412 /*
414 413 * mock up a buf structure
415 414 */
416 415 buf.b_flags = B_REMAPPED | map_flags;
417 416 buf.b_un.b_addr = addr;
418 417 buf.b_bcount = (size_t)len;
419 418 buf.b_pages = pp;
420 419 buf.b_shadow = pplist;
421 420 bp_mapout(&buf);
422 421 }
423 422
424 423 static time_t
425 424 bofi_gettime()
426 425 {
427 426 timestruc_t ts;
428 427
429 428 gethrestime(&ts);
430 429 return (ts.tv_sec);
431 430 }
432 431
433 432 /*
434 433 * reset the bus_ops structure of the specified nexus to point to
435 434 * the original values in the save_bus_ops structure.
436 435 *
437 436 * Note that both this routine and modify_bus_ops() rely on the current
438 437 * behavior of the framework in that nexus drivers are not unloadable
439 438 *
440 439 */
441 440
442 441 static int
443 442 reset_bus_ops(char *name, struct bus_ops *bop)
444 443 {
445 444 struct modctl *modp;
446 445 struct modldrv *mp;
447 446 struct bus_ops *bp;
448 447 struct dev_ops *ops;
449 448
450 449 mutex_enter(&mod_lock);
451 450 /*
452 451 * find specified module
453 452 */
454 453 modp = &modules;
455 454 do {
456 455 if (strcmp(name, modp->mod_modname) == 0) {
457 456 if (!modp->mod_linkage) {
458 457 mutex_exit(&mod_lock);
459 458 return (0);
460 459 }
461 460 mp = modp->mod_linkage->ml_linkage[0];
462 461 if (!mp || !mp->drv_dev_ops) {
463 462 mutex_exit(&mod_lock);
464 463 return (0);
465 464 }
466 465 ops = mp->drv_dev_ops;
467 466 bp = ops->devo_bus_ops;
468 467 if (!bp) {
469 468 mutex_exit(&mod_lock);
470 469 return (0);
471 470 }
472 471 if (ops->devo_refcnt > 0) {
473 472 /*
474 473 * As long as devices are active with modified
475 474 * bus ops bofi must not go away. There may be
476 475 * drivers with modified access or dma handles.
477 476 */
478 477 mutex_exit(&mod_lock);
479 478 return (0);
480 479 }
481 480 cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
482 481 mp->drv_linkinfo);
483 482 bp->bus_intr_op = bop->bus_intr_op;
484 483 bp->bus_post_event = bop->bus_post_event;
485 484 bp->bus_map = bop->bus_map;
486 485 bp->bus_dma_map = bop->bus_dma_map;
487 486 bp->bus_dma_allochdl = bop->bus_dma_allochdl;
488 487 bp->bus_dma_freehdl = bop->bus_dma_freehdl;
489 488 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
490 489 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
491 490 bp->bus_dma_flush = bop->bus_dma_flush;
492 491 bp->bus_dma_win = bop->bus_dma_win;
493 492 bp->bus_dma_ctl = bop->bus_dma_ctl;
494 493 mutex_exit(&mod_lock);
495 494 return (1);
496 495 }
497 496 } while ((modp = modp->mod_next) != &modules);
498 497 mutex_exit(&mod_lock);
499 498 return (0);
500 499 }
501 500
502 501 /*
503 502 * modify the bus_ops structure of the specified nexus to point to bofi
504 503 * routines, saving the original values in the save_bus_ops structure
505 504 */
506 505
507 506 static int
508 507 modify_bus_ops(char *name, struct bus_ops *bop)
509 508 {
510 509 struct modctl *modp;
511 510 struct modldrv *mp;
512 511 struct bus_ops *bp;
513 512 struct dev_ops *ops;
514 513
515 514 if (ddi_name_to_major(name) == -1)
516 515 return (0);
517 516
518 517 mutex_enter(&mod_lock);
519 518 /*
520 519 * find specified module
521 520 */
522 521 modp = &modules;
523 522 do {
524 523 if (strcmp(name, modp->mod_modname) == 0) {
525 524 if (!modp->mod_linkage) {
526 525 mutex_exit(&mod_lock);
527 526 return (0);
528 527 }
529 528 mp = modp->mod_linkage->ml_linkage[0];
530 529 if (!mp || !mp->drv_dev_ops) {
531 530 mutex_exit(&mod_lock);
532 531 return (0);
533 532 }
534 533 ops = mp->drv_dev_ops;
535 534 bp = ops->devo_bus_ops;
536 535 if (!bp) {
537 536 mutex_exit(&mod_lock);
538 537 return (0);
539 538 }
540 539 if (ops->devo_refcnt == 0) {
541 540 /*
542 541 * If there is no device active for this
543 542 * module then there is nothing to do for bofi.
544 543 */
545 544 mutex_exit(&mod_lock);
546 545 return (0);
547 546 }
548 547 cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
549 548 mp->drv_linkinfo);
550 549 save_bus_ops = *bp;
551 550 bp->bus_intr_op = bop->bus_intr_op;
552 551 bp->bus_post_event = bop->bus_post_event;
553 552 bp->bus_map = bop->bus_map;
554 553 bp->bus_dma_map = bop->bus_dma_map;
555 554 bp->bus_dma_allochdl = bop->bus_dma_allochdl;
556 555 bp->bus_dma_freehdl = bop->bus_dma_freehdl;
557 556 bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
558 557 bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
559 558 bp->bus_dma_flush = bop->bus_dma_flush;
560 559 bp->bus_dma_win = bop->bus_dma_win;
561 560 bp->bus_dma_ctl = bop->bus_dma_ctl;
562 561 mutex_exit(&mod_lock);
563 562 return (1);
564 563 }
565 564 } while ((modp = modp->mod_next) != &modules);
566 565 mutex_exit(&mod_lock);
567 566 return (0);
568 567 }
569 568
570 569
571 570 int
572 571 _init(void)
573 572 {
574 573 int e;
575 574
576 575 e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
577 576 if (e != 0)
578 577 return (e);
579 578 if ((e = mod_install(&modlinkage)) != 0)
580 579 ddi_soft_state_fini(&statep);
581 580 return (e);
582 581 }
583 582
584 583
585 584 int
586 585 _fini(void)
587 586 {
588 587 int e;
589 588
590 589 if ((e = mod_remove(&modlinkage)) != 0)
591 590 return (e);
592 591 ddi_soft_state_fini(&statep);
593 592 return (e);
594 593 }
595 594
596 595
597 596 int
598 597 _info(struct modinfo *modinfop)
599 598 {
600 599 return (mod_info(&modlinkage, modinfop));
601 600 }
602 601
603 602
604 603 static int
605 604 bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
606 605 {
607 606 char *name;
608 607 char buf[80];
609 608 int i;
610 609 int s, ss;
611 610 int size = NAMESIZE;
612 611 int new_string;
613 612 char *ptr;
614 613
615 614 if (cmd != DDI_ATTACH)
616 615 return (DDI_FAILURE);
617 616 /*
618 617 * only one instance - but we clone using the open routine
619 618 */
620 619 if (ddi_get_instance(dip) > 0)
621 620 return (DDI_FAILURE);
622 621
623 622 if (!initialized) {
624 623 if ((name = ddi_get_name(dip)) == NULL)
625 624 return (DDI_FAILURE);
626 625 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
627 626 if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
628 627 DDI_PSEUDO, NULL) == DDI_FAILURE)
629 628 return (DDI_FAILURE);
630 629
631 630 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
632 631 &bofi_low_cookie) != DDI_SUCCESS) {
633 632 ddi_remove_minor_node(dip, buf);
634 633 return (DDI_FAILURE); /* fail attach */
635 634 }
636 635 /*
637 636 * get nexus name (from conf file)
638 637 */
639 638 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
640 639 "bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
641 640 ddi_remove_minor_node(dip, buf);
642 641 return (DDI_FAILURE);
643 642 }
644 643 /*
645 644 * get whether to do dma map kmem private checking
646 645 */
647 646 if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
648 647 dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
649 648 bofi_range_check = 0;
650 649 else if (strcmp(ptr, "panic") == 0)
651 650 bofi_range_check = 2;
652 651 else if (strcmp(ptr, "warn") == 0)
653 652 bofi_range_check = 1;
654 653 else
655 654 bofi_range_check = 0;
656 655 ddi_prop_free(ptr);
657 656
658 657 /*
659 658 * get whether to prevent direct access to register
660 659 */
661 660 if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
662 661 dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
663 662 bofi_ddi_check = 0;
664 663 else if (strcmp(ptr, "on") == 0)
665 664 bofi_ddi_check = 1;
666 665 else
667 666 bofi_ddi_check = 0;
668 667 ddi_prop_free(ptr);
669 668
670 669 /*
671 670 * get whether to do copy on ddi_dma_sync
672 671 */
673 672 if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
674 673 dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
675 674 bofi_sync_check = 0;
676 675 else if (strcmp(ptr, "on") == 0)
677 676 bofi_sync_check = 1;
678 677 else
679 678 bofi_sync_check = 0;
680 679 ddi_prop_free(ptr);
681 680
682 681 /*
683 682 * get driver-under-test names (from conf file)
684 683 */
685 684 size = NAMESIZE;
686 685 if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
687 686 "bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
688 687 driver_list[0] = 0;
689 688 /*
690 689 * and convert into a sequence of strings
691 690 */
692 691 driver_list_neg = 1;
693 692 new_string = 1;
694 693 driver_list_size = strlen(driver_list);
695 694 for (i = 0; i < driver_list_size; i++) {
696 695 if (driver_list[i] == ' ') {
697 696 driver_list[i] = '\0';
698 697 new_string = 1;
699 698 } else if (new_string) {
700 699 if (driver_list[i] != '!')
701 700 driver_list_neg = 0;
702 701 new_string = 0;
703 702 }
704 703 }
705 704 /*
706 705 * initialize mutex, lists
707 706 */
708 707 mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
709 708 NULL);
710 709 /*
711 710 * fake up iblock cookie - need to protect outselves
712 711 * against drivers that use hilevel interrupts
713 712 */
714 713 ss = spl8();
715 714 s = spl8();
716 715 splx(ss);
717 716 mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
718 717 mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
719 718 (void *)bofi_low_cookie);
720 719 shadow_list.next = &shadow_list;
721 720 shadow_list.prev = &shadow_list;
722 721 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
723 722 hhash_table[i].hnext = &hhash_table[i];
724 723 hhash_table[i].hprev = &hhash_table[i];
725 724 dhash_table[i].dnext = &dhash_table[i];
726 725 dhash_table[i].dprev = &dhash_table[i];
727 726 }
728 727 for (i = 1; i < BOFI_NLINKS; i++)
729 728 bofi_link_array[i].link = &bofi_link_array[i-1];
730 729 bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
731 730 /*
732 731 * overlay bus_ops structure
733 732 */
734 733 if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
735 734 ddi_remove_minor_node(dip, buf);
736 735 mutex_destroy(&clone_tab_mutex);
737 736 mutex_destroy(&bofi_mutex);
738 737 mutex_destroy(&bofi_low_mutex);
739 738 return (DDI_FAILURE);
740 739 }
741 740 if (sysevent_evc_bind(FM_ERROR_CHAN, &bofi_error_chan, 0) == 0)
742 741 (void) sysevent_evc_subscribe(bofi_error_chan, "bofi",
743 742 EC_FM, bofi_fm_ereport_callback, NULL, 0);
744 743
745 744 /*
746 745 * save dip for getinfo
747 746 */
748 747 our_dip = dip;
749 748 ddi_report_dev(dip);
750 749 initialized = 1;
751 750 }
752 751 return (DDI_SUCCESS);
753 752 }
754 753
755 754
756 755 static int
757 756 bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
758 757 {
759 758 char *name;
760 759 char buf[80];
761 760
762 761 if (cmd != DDI_DETACH)
763 762 return (DDI_FAILURE);
764 763 if (ddi_get_instance(dip) > 0)
765 764 return (DDI_FAILURE);
766 765 if ((name = ddi_get_name(dip)) == NULL)
767 766 return (DDI_FAILURE);
768 767 (void) snprintf(buf, sizeof (buf), "%s,ctl", name);
769 768 mutex_enter(&bofi_low_mutex);
770 769 mutex_enter(&bofi_mutex);
771 770 /*
772 771 * make sure test bofi is no longer in use
773 772 */
774 773 if (shadow_list.next != &shadow_list || errent_listp != NULL) {
775 774 mutex_exit(&bofi_mutex);
776 775 mutex_exit(&bofi_low_mutex);
777 776 return (DDI_FAILURE);
778 777 }
779 778 mutex_exit(&bofi_mutex);
780 779 mutex_exit(&bofi_low_mutex);
781 780
782 781 /*
783 782 * restore bus_ops structure
784 783 */
785 784 if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
786 785 return (DDI_FAILURE);
787 786
788 787 (void) sysevent_evc_unbind(bofi_error_chan);
789 788
790 789 mutex_destroy(&clone_tab_mutex);
791 790 mutex_destroy(&bofi_mutex);
792 791 mutex_destroy(&bofi_low_mutex);
793 792 ddi_remove_minor_node(dip, buf);
794 793 our_dip = NULL;
795 794 initialized = 0;
796 795 return (DDI_SUCCESS);
797 796 }
798 797
799 798
800 799 /* ARGSUSED */
801 800 static int
802 801 bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
803 802 {
804 803 dev_t dev = (dev_t)arg;
805 804 int minor = (int)getminor(dev);
806 805 int retval;
807 806
808 807 switch (cmd) {
809 808 case DDI_INFO_DEVT2DEVINFO:
810 809 if (minor != 0 || our_dip == NULL) {
811 810 *result = (void *)NULL;
812 811 retval = DDI_FAILURE;
813 812 } else {
814 813 *result = (void *)our_dip;
815 814 retval = DDI_SUCCESS;
816 815 }
817 816 break;
818 817 case DDI_INFO_DEVT2INSTANCE:
819 818 *result = (void *)0;
820 819 retval = DDI_SUCCESS;
821 820 break;
822 821 default:
823 822 retval = DDI_FAILURE;
824 823 }
825 824 return (retval);
826 825 }
827 826
828 827
829 828 /* ARGSUSED */
830 829 static int
831 830 bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
832 831 {
833 832 int minor = (int)getminor(*devp);
834 833 struct bofi_errent *softc;
835 834
836 835 /*
837 836 * only allow open on minor=0 - the clone device
838 837 */
839 838 if (minor != 0)
840 839 return (ENXIO);
841 840 /*
842 841 * fail if not attached
843 842 */
844 843 if (!initialized)
845 844 return (ENXIO);
846 845 /*
847 846 * find a free slot and grab it
848 847 */
849 848 mutex_enter(&clone_tab_mutex);
850 849 for (minor = 1; minor < NCLONES; minor++) {
851 850 if (clone_tab[minor] == 0) {
852 851 clone_tab[minor] = 1;
853 852 break;
854 853 }
855 854 }
856 855 mutex_exit(&clone_tab_mutex);
857 856 if (minor == NCLONES)
858 857 return (EAGAIN);
859 858 /*
860 859 * soft state structure for this clone is used to maintain a list
861 860 * of allocated errdefs so they can be freed on close
862 861 */
863 862 if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
864 863 mutex_enter(&clone_tab_mutex);
865 864 clone_tab[minor] = 0;
866 865 mutex_exit(&clone_tab_mutex);
867 866 return (EAGAIN);
868 867 }
869 868 softc = ddi_get_soft_state(statep, minor);
870 869 softc->cnext = softc;
871 870 softc->cprev = softc;
872 871
873 872 *devp = makedevice(getmajor(*devp), minor);
874 873 return (0);
875 874 }
876 875
877 876
878 877 /* ARGSUSED */
879 878 static int
880 879 bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
881 880 {
882 881 int minor = (int)getminor(dev);
883 882 struct bofi_errent *softc;
884 883 struct bofi_errent *ep, *next_ep;
885 884
886 885 softc = ddi_get_soft_state(statep, minor);
887 886 if (softc == NULL)
888 887 return (ENXIO);
889 888 /*
890 889 * find list of errdefs and free them off
891 890 */
892 891 for (ep = softc->cnext; ep != softc; ) {
893 892 next_ep = ep->cnext;
894 893 (void) bofi_errdef_free(ep);
895 894 ep = next_ep;
896 895 }
897 896 /*
898 897 * free clone tab slot
899 898 */
900 899 mutex_enter(&clone_tab_mutex);
901 900 clone_tab[minor] = 0;
902 901 mutex_exit(&clone_tab_mutex);
903 902
904 903 ddi_soft_state_free(statep, minor);
905 904 return (0);
906 905 }
907 906
908 907
909 908 /* ARGSUSED */
910 909 static int
911 910 bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
912 911 int *rvalp)
913 912 {
914 913 struct bofi_errent *softc;
915 914 int minor = (int)getminor(dev);
916 915 struct bofi_errdef errdef;
917 916 struct bofi_errctl errctl;
918 917 struct bofi_errstate errstate;
919 918 void *ed_handle;
920 919 struct bofi_get_handles get_handles;
921 920 struct bofi_get_hdl_info hdl_info;
922 921 struct handle_info *hdlip;
923 922 struct handle_info *hib;
924 923
925 924 char *buffer;
926 925 char *bufptr;
927 926 char *endbuf;
928 927 int req_count, count, err;
929 928 char *namep;
930 929 struct bofi_shadow *hp;
931 930 int retval;
932 931 struct bofi_shadow *hhashp;
933 932 int i;
934 933
935 934 switch (cmd) {
936 935 case BOFI_ADD_DEF:
937 936 /*
938 937 * add a new error definition
939 938 */
940 939 #ifdef _MULTI_DATAMODEL
941 940 switch (ddi_model_convert_from(mode & FMODELS)) {
942 941 case DDI_MODEL_ILP32:
943 942 {
944 943 /*
945 944 * For use when a 32 bit app makes a call into a
946 945 * 64 bit ioctl
947 946 */
948 947 struct bofi_errdef32 errdef_32;
949 948
950 949 if (ddi_copyin((void *)arg, &errdef_32,
951 950 sizeof (struct bofi_errdef32), mode)) {
952 951 return (EFAULT);
953 952 }
954 953 errdef.namesize = errdef_32.namesize;
955 954 (void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
956 955 errdef.instance = errdef_32.instance;
957 956 errdef.rnumber = errdef_32.rnumber;
958 957 errdef.offset = errdef_32.offset;
959 958 errdef.len = errdef_32.len;
960 959 errdef.access_type = errdef_32.access_type;
961 960 errdef.access_count = errdef_32.access_count;
962 961 errdef.fail_count = errdef_32.fail_count;
963 962 errdef.acc_chk = errdef_32.acc_chk;
964 963 errdef.optype = errdef_32.optype;
965 964 errdef.operand = errdef_32.operand;
966 965 errdef.log.logsize = errdef_32.log.logsize;
967 966 errdef.log.entries = errdef_32.log.entries;
968 967 errdef.log.flags = errdef_32.log.flags;
969 968 errdef.log.wrapcnt = errdef_32.log.wrapcnt;
970 969 errdef.log.start_time = errdef_32.log.start_time;
971 970 errdef.log.stop_time = errdef_32.log.stop_time;
972 971 errdef.log.logbase =
973 972 (caddr_t)(uintptr_t)errdef_32.log.logbase;
974 973 errdef.errdef_handle = errdef_32.errdef_handle;
975 974 break;
976 975 }
977 976 case DDI_MODEL_NONE:
978 977 if (ddi_copyin((void *)arg, &errdef,
979 978 sizeof (struct bofi_errdef), mode))
980 979 return (EFAULT);
981 980 break;
982 981 }
983 982 #else /* ! _MULTI_DATAMODEL */
984 983 if (ddi_copyin((void *)arg, &errdef,
985 984 sizeof (struct bofi_errdef), mode) != 0)
986 985 return (EFAULT);
987 986 #endif /* _MULTI_DATAMODEL */
988 987 /*
989 988 * do some validation
990 989 */
991 990 if (errdef.fail_count == 0)
992 991 errdef.optype = 0;
993 992 if (errdef.optype != 0) {
994 993 if (errdef.access_type & BOFI_INTR &&
995 994 errdef.optype != BOFI_DELAY_INTR &&
996 995 errdef.optype != BOFI_LOSE_INTR &&
997 996 errdef.optype != BOFI_EXTRA_INTR)
998 997 return (EINVAL);
999 998 if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
1000 999 errdef.optype == BOFI_NO_TRANSFER)
1001 1000 return (EINVAL);
1002 1001 if ((errdef.access_type & (BOFI_PIO_RW)) &&
1003 1002 errdef.optype != BOFI_EQUAL &&
1004 1003 errdef.optype != BOFI_OR &&
1005 1004 errdef.optype != BOFI_XOR &&
1006 1005 errdef.optype != BOFI_AND &&
1007 1006 errdef.optype != BOFI_NO_TRANSFER)
1008 1007 return (EINVAL);
1009 1008 }
1010 1009 /*
1011 1010 * find softstate for this clone, so we can tag
1012 1011 * new errdef on to it
1013 1012 */
1014 1013 softc = ddi_get_soft_state(statep, minor);
1015 1014 if (softc == NULL)
1016 1015 return (ENXIO);
1017 1016 /*
1018 1017 * read in name
1019 1018 */
1020 1019 if (errdef.namesize > NAMESIZE)
1021 1020 return (EINVAL);
1022 1021 namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
1023 1022 (void) strncpy(namep, errdef.name, errdef.namesize);
1024 1023
1025 1024 if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
1026 1025 (void) bofi_errdef_free((struct bofi_errent *)
1027 1026 (uintptr_t)errdef.errdef_handle);
1028 1027 kmem_free(namep, errdef.namesize+1);
1029 1028 return (EINVAL);
1030 1029 }
1031 1030 /*
1032 1031 * copy out errdef again, including filled in errdef_handle
1033 1032 */
1034 1033 #ifdef _MULTI_DATAMODEL
1035 1034 switch (ddi_model_convert_from(mode & FMODELS)) {
1036 1035 case DDI_MODEL_ILP32:
1037 1036 {
1038 1037 /*
1039 1038 * For use when a 32 bit app makes a call into a
1040 1039 * 64 bit ioctl
1041 1040 */
1042 1041 struct bofi_errdef32 errdef_32;
1043 1042
1044 1043 errdef_32.namesize = errdef.namesize;
1045 1044 (void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
1046 1045 errdef_32.instance = errdef.instance;
1047 1046 errdef_32.rnumber = errdef.rnumber;
1048 1047 errdef_32.offset = errdef.offset;
1049 1048 errdef_32.len = errdef.len;
1050 1049 errdef_32.access_type = errdef.access_type;
1051 1050 errdef_32.access_count = errdef.access_count;
1052 1051 errdef_32.fail_count = errdef.fail_count;
1053 1052 errdef_32.acc_chk = errdef.acc_chk;
1054 1053 errdef_32.optype = errdef.optype;
1055 1054 errdef_32.operand = errdef.operand;
1056 1055 errdef_32.log.logsize = errdef.log.logsize;
1057 1056 errdef_32.log.entries = errdef.log.entries;
1058 1057 errdef_32.log.flags = errdef.log.flags;
1059 1058 errdef_32.log.wrapcnt = errdef.log.wrapcnt;
1060 1059 errdef_32.log.start_time = errdef.log.start_time;
1061 1060 errdef_32.log.stop_time = errdef.log.stop_time;
1062 1061 errdef_32.log.logbase =
1063 1062 (caddr32_t)(uintptr_t)errdef.log.logbase;
1064 1063 errdef_32.errdef_handle = errdef.errdef_handle;
1065 1064 if (ddi_copyout(&errdef_32, (void *)arg,
1066 1065 sizeof (struct bofi_errdef32), mode) != 0) {
1067 1066 (void) bofi_errdef_free((struct bofi_errent *)
1068 1067 errdef.errdef_handle);
1069 1068 kmem_free(namep, errdef.namesize+1);
1070 1069 return (EFAULT);
1071 1070 }
1072 1071 break;
1073 1072 }
1074 1073 case DDI_MODEL_NONE:
1075 1074 if (ddi_copyout(&errdef, (void *)arg,
1076 1075 sizeof (struct bofi_errdef), mode) != 0) {
1077 1076 (void) bofi_errdef_free((struct bofi_errent *)
1078 1077 errdef.errdef_handle);
1079 1078 kmem_free(namep, errdef.namesize+1);
1080 1079 return (EFAULT);
1081 1080 }
1082 1081 break;
1083 1082 }
1084 1083 #else /* ! _MULTI_DATAMODEL */
1085 1084 if (ddi_copyout(&errdef, (void *)arg,
1086 1085 sizeof (struct bofi_errdef), mode) != 0) {
1087 1086 (void) bofi_errdef_free((struct bofi_errent *)
1088 1087 (uintptr_t)errdef.errdef_handle);
1089 1088 kmem_free(namep, errdef.namesize+1);
1090 1089 return (EFAULT);
1091 1090 }
1092 1091 #endif /* _MULTI_DATAMODEL */
1093 1092 return (0);
1094 1093 case BOFI_DEL_DEF:
1095 1094 /*
1096 1095 * delete existing errdef
1097 1096 */
1098 1097 if (ddi_copyin((void *)arg, &ed_handle,
1099 1098 sizeof (void *), mode) != 0)
1100 1099 return (EFAULT);
1101 1100 return (bofi_errdef_free((struct bofi_errent *)ed_handle));
1102 1101 case BOFI_START:
1103 1102 /*
1104 1103 * start all errdefs corresponding to
1105 1104 * this name and instance
1106 1105 */
1107 1106 if (ddi_copyin((void *)arg, &errctl,
1108 1107 sizeof (struct bofi_errctl), mode) != 0)
1109 1108 return (EFAULT);
1110 1109 /*
1111 1110 * copy in name
1112 1111 */
1113 1112 if (errctl.namesize > NAMESIZE)
1114 1113 return (EINVAL);
1115 1114 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1116 1115 (void) strncpy(namep, errctl.name, errctl.namesize);
1117 1116 bofi_start(&errctl, namep);
1118 1117 kmem_free(namep, errctl.namesize+1);
1119 1118 return (0);
1120 1119 case BOFI_STOP:
1121 1120 /*
1122 1121 * stop all errdefs corresponding to
1123 1122 * this name and instance
1124 1123 */
1125 1124 if (ddi_copyin((void *)arg, &errctl,
1126 1125 sizeof (struct bofi_errctl), mode) != 0)
1127 1126 return (EFAULT);
1128 1127 /*
1129 1128 * copy in name
1130 1129 */
1131 1130 if (errctl.namesize > NAMESIZE)
1132 1131 return (EINVAL);
1133 1132 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1134 1133 (void) strncpy(namep, errctl.name, errctl.namesize);
1135 1134 bofi_stop(&errctl, namep);
1136 1135 kmem_free(namep, errctl.namesize+1);
1137 1136 return (0);
1138 1137 case BOFI_BROADCAST:
1139 1138 /*
1140 1139 * wakeup all errdefs corresponding to
1141 1140 * this name and instance
1142 1141 */
1143 1142 if (ddi_copyin((void *)arg, &errctl,
1144 1143 sizeof (struct bofi_errctl), mode) != 0)
1145 1144 return (EFAULT);
1146 1145 /*
1147 1146 * copy in name
1148 1147 */
1149 1148 if (errctl.namesize > NAMESIZE)
1150 1149 return (EINVAL);
1151 1150 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1152 1151 (void) strncpy(namep, errctl.name, errctl.namesize);
1153 1152 bofi_broadcast(&errctl, namep);
1154 1153 kmem_free(namep, errctl.namesize+1);
1155 1154 return (0);
1156 1155 case BOFI_CLEAR_ACC_CHK:
1157 1156 /*
1158 1157 * clear "acc_chk" for all errdefs corresponding to
1159 1158 * this name and instance
1160 1159 */
1161 1160 if (ddi_copyin((void *)arg, &errctl,
1162 1161 sizeof (struct bofi_errctl), mode) != 0)
1163 1162 return (EFAULT);
1164 1163 /*
1165 1164 * copy in name
1166 1165 */
1167 1166 if (errctl.namesize > NAMESIZE)
1168 1167 return (EINVAL);
1169 1168 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1170 1169 (void) strncpy(namep, errctl.name, errctl.namesize);
1171 1170 bofi_clear_acc_chk(&errctl, namep);
1172 1171 kmem_free(namep, errctl.namesize+1);
1173 1172 return (0);
1174 1173 case BOFI_CLEAR_ERRORS:
1175 1174 /*
1176 1175 * set "fail_count" to 0 for all errdefs corresponding to
1177 1176 * this name and instance whose "access_count"
1178 1177 * has expired.
1179 1178 */
1180 1179 if (ddi_copyin((void *)arg, &errctl,
1181 1180 sizeof (struct bofi_errctl), mode) != 0)
1182 1181 return (EFAULT);
1183 1182 /*
1184 1183 * copy in name
1185 1184 */
1186 1185 if (errctl.namesize > NAMESIZE)
1187 1186 return (EINVAL);
1188 1187 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1189 1188 (void) strncpy(namep, errctl.name, errctl.namesize);
1190 1189 bofi_clear_errors(&errctl, namep);
1191 1190 kmem_free(namep, errctl.namesize+1);
1192 1191 return (0);
1193 1192 case BOFI_CLEAR_ERRDEFS:
1194 1193 /*
1195 1194 * set "access_count" and "fail_count" to 0 for all errdefs
1196 1195 * corresponding to this name and instance
1197 1196 */
1198 1197 if (ddi_copyin((void *)arg, &errctl,
1199 1198 sizeof (struct bofi_errctl), mode) != 0)
1200 1199 return (EFAULT);
1201 1200 /*
1202 1201 * copy in name
1203 1202 */
1204 1203 if (errctl.namesize > NAMESIZE)
1205 1204 return (EINVAL);
1206 1205 namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
1207 1206 (void) strncpy(namep, errctl.name, errctl.namesize);
1208 1207 bofi_clear_errdefs(&errctl, namep);
1209 1208 kmem_free(namep, errctl.namesize+1);
1210 1209 return (0);
1211 1210 case BOFI_CHK_STATE:
1212 1211 {
1213 1212 struct acc_log_elem *klg;
1214 1213 size_t uls;
1215 1214 /*
1216 1215 * get state for this errdef - read in dummy errstate
1217 1216 * with just the errdef_handle filled in
1218 1217 */
1219 1218 #ifdef _MULTI_DATAMODEL
1220 1219 switch (ddi_model_convert_from(mode & FMODELS)) {
1221 1220 case DDI_MODEL_ILP32:
1222 1221 {
1223 1222 /*
1224 1223 * For use when a 32 bit app makes a call into a
1225 1224 * 64 bit ioctl
1226 1225 */
1227 1226 struct bofi_errstate32 errstate_32;
1228 1227
1229 1228 if (ddi_copyin((void *)arg, &errstate_32,
1230 1229 sizeof (struct bofi_errstate32), mode) != 0) {
1231 1230 return (EFAULT);
1232 1231 }
1233 1232 errstate.fail_time = errstate_32.fail_time;
1234 1233 errstate.msg_time = errstate_32.msg_time;
1235 1234 errstate.access_count = errstate_32.access_count;
1236 1235 errstate.fail_count = errstate_32.fail_count;
1237 1236 errstate.acc_chk = errstate_32.acc_chk;
1238 1237 errstate.errmsg_count = errstate_32.errmsg_count;
1239 1238 (void) strncpy(errstate.buffer, errstate_32.buffer,
1240 1239 ERRMSGSIZE);
1241 1240 errstate.severity = errstate_32.severity;
1242 1241 errstate.log.logsize = errstate_32.log.logsize;
1243 1242 errstate.log.entries = errstate_32.log.entries;
1244 1243 errstate.log.flags = errstate_32.log.flags;
1245 1244 errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1246 1245 errstate.log.start_time = errstate_32.log.start_time;
1247 1246 errstate.log.stop_time = errstate_32.log.stop_time;
1248 1247 errstate.log.logbase =
1249 1248 (caddr_t)(uintptr_t)errstate_32.log.logbase;
1250 1249 errstate.errdef_handle = errstate_32.errdef_handle;
1251 1250 break;
1252 1251 }
1253 1252 case DDI_MODEL_NONE:
1254 1253 if (ddi_copyin((void *)arg, &errstate,
1255 1254 sizeof (struct bofi_errstate), mode) != 0)
1256 1255 return (EFAULT);
1257 1256 break;
1258 1257 }
1259 1258 #else /* ! _MULTI_DATAMODEL */
1260 1259 if (ddi_copyin((void *)arg, &errstate,
1261 1260 sizeof (struct bofi_errstate), mode) != 0)
1262 1261 return (EFAULT);
1263 1262 #endif /* _MULTI_DATAMODEL */
1264 1263 if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
1265 1264 return (EINVAL);
1266 1265 /*
1267 1266 * copy out real errstate structure
1268 1267 */
1269 1268 uls = errstate.log.logsize;
1270 1269 if (errstate.log.entries > uls && uls)
1271 1270 /* insufficient user memory */
1272 1271 errstate.log.entries = uls;
1273 1272 /* always pass back a time */
1274 1273 if (errstate.log.stop_time == 0ul)
1275 1274 (void) drv_getparm(TIME, &(errstate.log.stop_time));
1276 1275
1277 1276 #ifdef _MULTI_DATAMODEL
1278 1277 switch (ddi_model_convert_from(mode & FMODELS)) {
1279 1278 case DDI_MODEL_ILP32:
1280 1279 {
1281 1280 /*
1282 1281 * For use when a 32 bit app makes a call into a
1283 1282 * 64 bit ioctl
1284 1283 */
1285 1284 struct bofi_errstate32 errstate_32;
1286 1285
1287 1286 errstate_32.fail_time = errstate.fail_time;
1288 1287 errstate_32.msg_time = errstate.msg_time;
1289 1288 errstate_32.access_count = errstate.access_count;
1290 1289 errstate_32.fail_count = errstate.fail_count;
1291 1290 errstate_32.acc_chk = errstate.acc_chk;
1292 1291 errstate_32.errmsg_count = errstate.errmsg_count;
1293 1292 (void) strncpy(errstate_32.buffer, errstate.buffer,
1294 1293 ERRMSGSIZE);
1295 1294 errstate_32.severity = errstate.severity;
1296 1295 errstate_32.log.logsize = errstate.log.logsize;
1297 1296 errstate_32.log.entries = errstate.log.entries;
1298 1297 errstate_32.log.flags = errstate.log.flags;
1299 1298 errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1300 1299 errstate_32.log.start_time = errstate.log.start_time;
1301 1300 errstate_32.log.stop_time = errstate.log.stop_time;
1302 1301 errstate_32.log.logbase =
1303 1302 (caddr32_t)(uintptr_t)errstate.log.logbase;
1304 1303 errstate_32.errdef_handle = errstate.errdef_handle;
1305 1304 if (ddi_copyout(&errstate_32, (void *)arg,
1306 1305 sizeof (struct bofi_errstate32), mode) != 0)
1307 1306 return (EFAULT);
1308 1307 break;
1309 1308 }
1310 1309 case DDI_MODEL_NONE:
1311 1310 if (ddi_copyout(&errstate, (void *)arg,
1312 1311 sizeof (struct bofi_errstate), mode) != 0)
1313 1312 return (EFAULT);
1314 1313 break;
1315 1314 }
1316 1315 #else /* ! _MULTI_DATAMODEL */
1317 1316 if (ddi_copyout(&errstate, (void *)arg,
1318 1317 sizeof (struct bofi_errstate), mode) != 0)
1319 1318 return (EFAULT);
1320 1319 #endif /* _MULTI_DATAMODEL */
1321 1320 if (uls && errstate.log.entries &&
1322 1321 ddi_copyout(klg, errstate.log.logbase,
1323 1322 errstate.log.entries * sizeof (struct acc_log_elem),
1324 1323 mode) != 0) {
1325 1324 return (EFAULT);
1326 1325 }
1327 1326 return (retval);
1328 1327 }
1329 1328 case BOFI_CHK_STATE_W:
1330 1329 {
1331 1330 struct acc_log_elem *klg;
1332 1331 size_t uls;
1333 1332 /*
1334 1333 * get state for this errdef - read in dummy errstate
1335 1334 * with just the errdef_handle filled in. Then wait for
1336 1335 * a ddi_report_fault message to come back
1337 1336 */
1338 1337 #ifdef _MULTI_DATAMODEL
1339 1338 switch (ddi_model_convert_from(mode & FMODELS)) {
1340 1339 case DDI_MODEL_ILP32:
1341 1340 {
1342 1341 /*
1343 1342 * For use when a 32 bit app makes a call into a
1344 1343 * 64 bit ioctl
1345 1344 */
1346 1345 struct bofi_errstate32 errstate_32;
1347 1346
1348 1347 if (ddi_copyin((void *)arg, &errstate_32,
1349 1348 sizeof (struct bofi_errstate32), mode) != 0) {
1350 1349 return (EFAULT);
1351 1350 }
1352 1351 errstate.fail_time = errstate_32.fail_time;
1353 1352 errstate.msg_time = errstate_32.msg_time;
1354 1353 errstate.access_count = errstate_32.access_count;
1355 1354 errstate.fail_count = errstate_32.fail_count;
1356 1355 errstate.acc_chk = errstate_32.acc_chk;
1357 1356 errstate.errmsg_count = errstate_32.errmsg_count;
1358 1357 (void) strncpy(errstate.buffer, errstate_32.buffer,
1359 1358 ERRMSGSIZE);
1360 1359 errstate.severity = errstate_32.severity;
1361 1360 errstate.log.logsize = errstate_32.log.logsize;
1362 1361 errstate.log.entries = errstate_32.log.entries;
1363 1362 errstate.log.flags = errstate_32.log.flags;
1364 1363 errstate.log.wrapcnt = errstate_32.log.wrapcnt;
1365 1364 errstate.log.start_time = errstate_32.log.start_time;
1366 1365 errstate.log.stop_time = errstate_32.log.stop_time;
1367 1366 errstate.log.logbase =
1368 1367 (caddr_t)(uintptr_t)errstate_32.log.logbase;
1369 1368 errstate.errdef_handle = errstate_32.errdef_handle;
1370 1369 break;
1371 1370 }
1372 1371 case DDI_MODEL_NONE:
1373 1372 if (ddi_copyin((void *)arg, &errstate,
1374 1373 sizeof (struct bofi_errstate), mode) != 0)
1375 1374 return (EFAULT);
1376 1375 break;
1377 1376 }
1378 1377 #else /* ! _MULTI_DATAMODEL */
1379 1378 if (ddi_copyin((void *)arg, &errstate,
1380 1379 sizeof (struct bofi_errstate), mode) != 0)
1381 1380 return (EFAULT);
1382 1381 #endif /* _MULTI_DATAMODEL */
1383 1382 if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
1384 1383 return (EINVAL);
1385 1384 /*
1386 1385 * copy out real errstate structure
1387 1386 */
1388 1387 uls = errstate.log.logsize;
1389 1388 uls = errstate.log.logsize;
1390 1389 if (errstate.log.entries > uls && uls)
1391 1390 /* insufficient user memory */
1392 1391 errstate.log.entries = uls;
1393 1392 /* always pass back a time */
1394 1393 if (errstate.log.stop_time == 0ul)
1395 1394 (void) drv_getparm(TIME, &(errstate.log.stop_time));
1396 1395
1397 1396 #ifdef _MULTI_DATAMODEL
1398 1397 switch (ddi_model_convert_from(mode & FMODELS)) {
1399 1398 case DDI_MODEL_ILP32:
1400 1399 {
1401 1400 /*
1402 1401 * For use when a 32 bit app makes a call into a
1403 1402 * 64 bit ioctl
1404 1403 */
1405 1404 struct bofi_errstate32 errstate_32;
1406 1405
1407 1406 errstate_32.fail_time = errstate.fail_time;
1408 1407 errstate_32.msg_time = errstate.msg_time;
1409 1408 errstate_32.access_count = errstate.access_count;
1410 1409 errstate_32.fail_count = errstate.fail_count;
1411 1410 errstate_32.acc_chk = errstate.acc_chk;
1412 1411 errstate_32.errmsg_count = errstate.errmsg_count;
1413 1412 (void) strncpy(errstate_32.buffer, errstate.buffer,
1414 1413 ERRMSGSIZE);
1415 1414 errstate_32.severity = errstate.severity;
1416 1415 errstate_32.log.logsize = errstate.log.logsize;
1417 1416 errstate_32.log.entries = errstate.log.entries;
1418 1417 errstate_32.log.flags = errstate.log.flags;
1419 1418 errstate_32.log.wrapcnt = errstate.log.wrapcnt;
1420 1419 errstate_32.log.start_time = errstate.log.start_time;
1421 1420 errstate_32.log.stop_time = errstate.log.stop_time;
1422 1421 errstate_32.log.logbase =
1423 1422 (caddr32_t)(uintptr_t)errstate.log.logbase;
1424 1423 errstate_32.errdef_handle = errstate.errdef_handle;
1425 1424 if (ddi_copyout(&errstate_32, (void *)arg,
1426 1425 sizeof (struct bofi_errstate32), mode) != 0)
1427 1426 return (EFAULT);
1428 1427 break;
1429 1428 }
1430 1429 case DDI_MODEL_NONE:
1431 1430 if (ddi_copyout(&errstate, (void *)arg,
1432 1431 sizeof (struct bofi_errstate), mode) != 0)
1433 1432 return (EFAULT);
1434 1433 break;
1435 1434 }
1436 1435 #else /* ! _MULTI_DATAMODEL */
1437 1436 if (ddi_copyout(&errstate, (void *)arg,
1438 1437 sizeof (struct bofi_errstate), mode) != 0)
1439 1438 return (EFAULT);
1440 1439 #endif /* _MULTI_DATAMODEL */
1441 1440
1442 1441 if (uls && errstate.log.entries &&
1443 1442 ddi_copyout(klg, errstate.log.logbase,
1444 1443 errstate.log.entries * sizeof (struct acc_log_elem),
1445 1444 mode) != 0) {
1446 1445 return (EFAULT);
1447 1446 }
1448 1447 return (retval);
1449 1448 }
1450 1449 case BOFI_GET_HANDLES:
1451 1450 /*
1452 1451 * display existing handles
1453 1452 */
1454 1453 #ifdef _MULTI_DATAMODEL
1455 1454 switch (ddi_model_convert_from(mode & FMODELS)) {
1456 1455 case DDI_MODEL_ILP32:
1457 1456 {
1458 1457 /*
1459 1458 * For use when a 32 bit app makes a call into a
1460 1459 * 64 bit ioctl
1461 1460 */
1462 1461 struct bofi_get_handles32 get_handles_32;
1463 1462
1464 1463 if (ddi_copyin((void *)arg, &get_handles_32,
1465 1464 sizeof (get_handles_32), mode) != 0) {
1466 1465 return (EFAULT);
1467 1466 }
1468 1467 get_handles.namesize = get_handles_32.namesize;
1469 1468 (void) strncpy(get_handles.name, get_handles_32.name,
1470 1469 NAMESIZE);
1471 1470 get_handles.instance = get_handles_32.instance;
1472 1471 get_handles.count = get_handles_32.count;
1473 1472 get_handles.buffer =
1474 1473 (caddr_t)(uintptr_t)get_handles_32.buffer;
1475 1474 break;
1476 1475 }
1477 1476 case DDI_MODEL_NONE:
1478 1477 if (ddi_copyin((void *)arg, &get_handles,
1479 1478 sizeof (get_handles), mode) != 0)
1480 1479 return (EFAULT);
1481 1480 break;
1482 1481 }
1483 1482 #else /* ! _MULTI_DATAMODEL */
1484 1483 if (ddi_copyin((void *)arg, &get_handles,
1485 1484 sizeof (get_handles), mode) != 0)
1486 1485 return (EFAULT);
1487 1486 #endif /* _MULTI_DATAMODEL */
1488 1487 /*
1489 1488 * read in name
1490 1489 */
1491 1490 if (get_handles.namesize > NAMESIZE)
1492 1491 return (EINVAL);
1493 1492 namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
1494 1493 (void) strncpy(namep, get_handles.name, get_handles.namesize);
1495 1494 req_count = get_handles.count;
1496 1495 bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
1497 1496 endbuf = bufptr + req_count;
1498 1497 /*
1499 1498 * display existing handles
1500 1499 */
1501 1500 mutex_enter(&bofi_low_mutex);
1502 1501 mutex_enter(&bofi_mutex);
1503 1502 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1504 1503 hhashp = &hhash_table[i];
1505 1504 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1506 1505 if (!driver_under_test(hp->dip))
1507 1506 continue;
1508 1507 if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
1509 1508 ddi_name_to_major(namep))
1510 1509 continue;
1511 1510 if (hp->instance != get_handles.instance)
1512 1511 continue;
1513 1512 /*
1514 1513 * print information per handle - note that
1515 1514 * DMA* means an unbound DMA handle
1516 1515 */
1517 1516 (void) snprintf(bufptr, (size_t)(endbuf-bufptr),
1518 1517 " %s %d %s ", hp->name, hp->instance,
1519 1518 (hp->type == BOFI_INT_HDL) ? "INTR" :
1520 1519 (hp->type == BOFI_ACC_HDL) ? "PIO" :
1521 1520 (hp->type == BOFI_DMA_HDL) ? "DMA" :
1522 1521 (hp->hparrayp != NULL) ? "DVMA" : "DMA*");
1523 1522 bufptr += strlen(bufptr);
1524 1523 if (hp->type == BOFI_ACC_HDL) {
1525 1524 if (hp->len == INT_MAX - hp->offset)
1526 1525 (void) snprintf(bufptr,
1527 1526 (size_t)(endbuf-bufptr),
1528 1527 "reg set %d off 0x%llx\n",
1529 1528 hp->rnumber, hp->offset);
1530 1529 else
1531 1530 (void) snprintf(bufptr,
1532 1531 (size_t)(endbuf-bufptr),
1533 1532 "reg set %d off 0x%llx"
1534 1533 " len 0x%llx\n",
1535 1534 hp->rnumber, hp->offset,
1536 1535 hp->len);
1537 1536 } else if (hp->type == BOFI_DMA_HDL)
1538 1537 (void) snprintf(bufptr,
1539 1538 (size_t)(endbuf-bufptr),
1540 1539 "handle no %d len 0x%llx"
1541 1540 " addr 0x%p\n", hp->rnumber,
1542 1541 hp->len, (void *)hp->addr);
1543 1542 else if (hp->type == BOFI_NULL &&
1544 1543 hp->hparrayp == NULL)
1545 1544 (void) snprintf(bufptr,
1546 1545 (size_t)(endbuf-bufptr),
1547 1546 "handle no %d\n", hp->rnumber);
1548 1547 else
1549 1548 (void) snprintf(bufptr,
1550 1549 (size_t)(endbuf-bufptr), "\n");
1551 1550 bufptr += strlen(bufptr);
1552 1551 }
1553 1552 }
1554 1553 mutex_exit(&bofi_mutex);
1555 1554 mutex_exit(&bofi_low_mutex);
1556 1555 err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
1557 1556 kmem_free(namep, get_handles.namesize+1);
1558 1557 kmem_free(buffer, req_count);
1559 1558 if (err != 0)
1560 1559 return (EFAULT);
1561 1560 else
1562 1561 return (0);
1563 1562 case BOFI_GET_HANDLE_INFO:
1564 1563 /*
1565 1564 * display existing handles
1566 1565 */
1567 1566 #ifdef _MULTI_DATAMODEL
1568 1567 switch (ddi_model_convert_from(mode & FMODELS)) {
1569 1568 case DDI_MODEL_ILP32:
1570 1569 {
1571 1570 /*
1572 1571 * For use when a 32 bit app makes a call into a
1573 1572 * 64 bit ioctl
1574 1573 */
1575 1574 struct bofi_get_hdl_info32 hdl_info_32;
1576 1575
1577 1576 if (ddi_copyin((void *)arg, &hdl_info_32,
1578 1577 sizeof (hdl_info_32), mode)) {
1579 1578 return (EFAULT);
1580 1579 }
1581 1580 hdl_info.namesize = hdl_info_32.namesize;
1582 1581 (void) strncpy(hdl_info.name, hdl_info_32.name,
1583 1582 NAMESIZE);
1584 1583 hdl_info.count = hdl_info_32.count;
1585 1584 hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
1586 1585 break;
1587 1586 }
1588 1587 case DDI_MODEL_NONE:
1589 1588 if (ddi_copyin((void *)arg, &hdl_info,
1590 1589 sizeof (hdl_info), mode))
1591 1590 return (EFAULT);
1592 1591 break;
1593 1592 }
1594 1593 #else /* ! _MULTI_DATAMODEL */
1595 1594 if (ddi_copyin((void *)arg, &hdl_info,
1596 1595 sizeof (hdl_info), mode))
1597 1596 return (EFAULT);
1598 1597 #endif /* _MULTI_DATAMODEL */
1599 1598 if (hdl_info.namesize > NAMESIZE)
1600 1599 return (EINVAL);
1601 1600 namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
1602 1601 (void) strncpy(namep, hdl_info.name, hdl_info.namesize);
1603 1602 req_count = hdl_info.count;
1604 1603 count = hdl_info.count = 0; /* the actual no of handles */
1605 1604 if (req_count > 0) {
1606 1605 hib = hdlip =
1607 1606 kmem_zalloc(req_count * sizeof (struct handle_info),
1608 1607 KM_SLEEP);
1609 1608 } else {
1610 1609 hib = hdlip = 0;
1611 1610 req_count = hdl_info.count = 0;
1612 1611 }
1613 1612
1614 1613 /*
1615 1614 * display existing handles
1616 1615 */
1617 1616 mutex_enter(&bofi_low_mutex);
1618 1617 mutex_enter(&bofi_mutex);
1619 1618 for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
1620 1619 hhashp = &hhash_table[i];
1621 1620 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
1622 1621 if (!driver_under_test(hp->dip) ||
1623 1622 ddi_name_to_major(ddi_get_name(hp->dip)) !=
1624 1623 ddi_name_to_major(namep) ||
1625 1624 ++(hdl_info.count) > req_count ||
1626 1625 count == req_count)
1627 1626 continue;
1628 1627
1629 1628 hdlip->instance = hp->instance;
1630 1629 hdlip->rnumber = hp->rnumber;
1631 1630 switch (hp->type) {
1632 1631 case BOFI_ACC_HDL:
1633 1632 hdlip->access_type = BOFI_PIO_RW;
1634 1633 hdlip->offset = hp->offset;
1635 1634 hdlip->len = hp->len;
1636 1635 break;
1637 1636 case BOFI_DMA_HDL:
1638 1637 hdlip->access_type = 0;
1639 1638 if (hp->flags & DDI_DMA_WRITE)
1640 1639 hdlip->access_type |=
1641 1640 BOFI_DMA_W;
1642 1641 if (hp->flags & DDI_DMA_READ)
1643 1642 hdlip->access_type |=
1644 1643 BOFI_DMA_R;
1645 1644 hdlip->len = hp->len;
1646 1645 hdlip->addr_cookie =
1647 1646 (uint64_t)(uintptr_t)hp->addr;
1648 1647 break;
1649 1648 case BOFI_INT_HDL:
1650 1649 hdlip->access_type = BOFI_INTR;
1651 1650 break;
1652 1651 default:
1653 1652 hdlip->access_type = 0;
1654 1653 break;
1655 1654 }
1656 1655 hdlip++;
1657 1656 count++;
1658 1657 }
1659 1658 }
1660 1659 mutex_exit(&bofi_mutex);
1661 1660 mutex_exit(&bofi_low_mutex);
1662 1661 err = 0;
1663 1662 #ifdef _MULTI_DATAMODEL
1664 1663 switch (ddi_model_convert_from(mode & FMODELS)) {
1665 1664 case DDI_MODEL_ILP32:
1666 1665 {
1667 1666 /*
1668 1667 * For use when a 32 bit app makes a call into a
1669 1668 * 64 bit ioctl
1670 1669 */
1671 1670 struct bofi_get_hdl_info32 hdl_info_32;
1672 1671
1673 1672 hdl_info_32.namesize = hdl_info.namesize;
1674 1673 (void) strncpy(hdl_info_32.name, hdl_info.name,
1675 1674 NAMESIZE);
1676 1675 hdl_info_32.count = hdl_info.count;
1677 1676 hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
1678 1677 if (ddi_copyout(&hdl_info_32, (void *)arg,
1679 1678 sizeof (hdl_info_32), mode) != 0) {
1680 1679 kmem_free(namep, hdl_info.namesize+1);
1681 1680 if (req_count > 0)
1682 1681 kmem_free(hib,
1683 1682 req_count * sizeof (*hib));
1684 1683 return (EFAULT);
1685 1684 }
1686 1685 break;
1687 1686 }
1688 1687 case DDI_MODEL_NONE:
1689 1688 if (ddi_copyout(&hdl_info, (void *)arg,
1690 1689 sizeof (hdl_info), mode) != 0) {
1691 1690 kmem_free(namep, hdl_info.namesize+1);
1692 1691 if (req_count > 0)
1693 1692 kmem_free(hib,
1694 1693 req_count * sizeof (*hib));
1695 1694 return (EFAULT);
1696 1695 }
1697 1696 break;
1698 1697 }
1699 1698 #else /* ! _MULTI_DATAMODEL */
1700 1699 if (ddi_copyout(&hdl_info, (void *)arg,
1701 1700 sizeof (hdl_info), mode) != 0) {
1702 1701 kmem_free(namep, hdl_info.namesize+1);
1703 1702 if (req_count > 0)
1704 1703 kmem_free(hib, req_count * sizeof (*hib));
1705 1704 return (EFAULT);
1706 1705 }
1707 1706 #endif /* ! _MULTI_DATAMODEL */
1708 1707 if (count > 0) {
1709 1708 if (ddi_copyout(hib, hdl_info.hdli,
1710 1709 count * sizeof (*hib), mode) != 0) {
1711 1710 kmem_free(namep, hdl_info.namesize+1);
1712 1711 if (req_count > 0)
1713 1712 kmem_free(hib,
1714 1713 req_count * sizeof (*hib));
1715 1714 return (EFAULT);
1716 1715 }
1717 1716 }
1718 1717 kmem_free(namep, hdl_info.namesize+1);
1719 1718 if (req_count > 0)
1720 1719 kmem_free(hib, req_count * sizeof (*hib));
1721 1720 return (err);
1722 1721 default:
1723 1722 return (ENOTTY);
1724 1723 }
1725 1724 }
1726 1725
1727 1726
1728 1727 /*
1729 1728 * add a new error definition
1730 1729 */
1731 1730 static int
1732 1731 bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
1733 1732 struct bofi_errent *softc)
1734 1733 {
1735 1734 struct bofi_errent *ep;
1736 1735 struct bofi_shadow *hp;
1737 1736 struct bofi_link *lp;
1738 1737
1739 1738 /*
1740 1739 * allocate errdef structure and put on in-use list
1741 1740 */
1742 1741 ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
1743 1742 ep->errdef = *errdefp;
1744 1743 ep->name = namep;
1745 1744 ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
1746 1745 ep->errstate.severity = DDI_SERVICE_RESTORED;
1747 1746 ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
1748 1747 cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
1749 1748 /*
1750 1749 * allocate space for logging
1751 1750 */
1752 1751 ep->errdef.log.entries = 0;
1753 1752 ep->errdef.log.wrapcnt = 0;
1754 1753 if (ep->errdef.access_type & BOFI_LOG)
1755 1754 ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
1756 1755 ep->errdef.log.logsize, KM_SLEEP);
1757 1756 else
1758 1757 ep->logbase = NULL;
1759 1758 /*
1760 1759 * put on in-use list
1761 1760 */
1762 1761 mutex_enter(&bofi_low_mutex);
1763 1762 mutex_enter(&bofi_mutex);
1764 1763 ep->next = errent_listp;
1765 1764 errent_listp = ep;
1766 1765 /*
1767 1766 * and add it to the per-clone list
1768 1767 */
1769 1768 ep->cnext = softc->cnext;
1770 1769 softc->cnext->cprev = ep;
1771 1770 ep->cprev = softc;
1772 1771 softc->cnext = ep;
1773 1772
1774 1773 /*
1775 1774 * look for corresponding shadow handle structures and if we find any
1776 1775 * tag this errdef structure on to their link lists.
1777 1776 */
1778 1777 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1779 1778 if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
1780 1779 hp->instance == errdefp->instance &&
1781 1780 (((errdefp->access_type & BOFI_DMA_RW) &&
1782 1781 (ep->errdef.rnumber == -1 ||
1783 1782 hp->rnumber == ep->errdef.rnumber) &&
1784 1783 hp->type == BOFI_DMA_HDL &&
1785 1784 (((uintptr_t)(hp->addr + ep->errdef.offset +
1786 1785 ep->errdef.len) & ~LLSZMASK) >
1787 1786 ((uintptr_t)((hp->addr + ep->errdef.offset) +
1788 1787 LLSZMASK) & ~LLSZMASK))) ||
1789 1788 ((errdefp->access_type & BOFI_INTR) &&
1790 1789 hp->type == BOFI_INT_HDL) ||
1791 1790 ((errdefp->access_type & BOFI_PIO_RW) &&
1792 1791 hp->type == BOFI_ACC_HDL &&
1793 1792 (errdefp->rnumber == -1 ||
1794 1793 hp->rnumber == errdefp->rnumber) &&
1795 1794 (errdefp->len == 0 ||
1796 1795 hp->offset < errdefp->offset + errdefp->len) &&
1797 1796 hp->offset + hp->len > errdefp->offset))) {
1798 1797 lp = bofi_link_freelist;
1799 1798 if (lp != NULL) {
1800 1799 bofi_link_freelist = lp->link;
1801 1800 lp->errentp = ep;
1802 1801 lp->link = hp->link;
1803 1802 hp->link = lp;
1804 1803 }
1805 1804 }
1806 1805 }
1807 1806 errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
1808 1807 mutex_exit(&bofi_mutex);
1809 1808 mutex_exit(&bofi_low_mutex);
1810 1809 ep->softintr_id = NULL;
1811 1810 return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
1812 1811 NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
1813 1812 }
1814 1813
1815 1814
1816 1815 /*
1817 1816 * delete existing errdef
1818 1817 */
1819 1818 static int
1820 1819 bofi_errdef_free(struct bofi_errent *ep)
1821 1820 {
1822 1821 struct bofi_errent *hep, *prev_hep;
1823 1822 struct bofi_link *lp, *prev_lp, *next_lp;
1824 1823 struct bofi_shadow *hp;
1825 1824
1826 1825 mutex_enter(&bofi_low_mutex);
1827 1826 mutex_enter(&bofi_mutex);
1828 1827 /*
1829 1828 * don't just assume its a valid ep - check that its on the
1830 1829 * in-use list
1831 1830 */
1832 1831 prev_hep = NULL;
1833 1832 for (hep = errent_listp; hep != NULL; ) {
1834 1833 if (hep == ep)
1835 1834 break;
1836 1835 prev_hep = hep;
1837 1836 hep = hep->next;
1838 1837 }
1839 1838 if (hep == NULL) {
1840 1839 mutex_exit(&bofi_mutex);
1841 1840 mutex_exit(&bofi_low_mutex);
1842 1841 return (EINVAL);
1843 1842 }
1844 1843 /*
1845 1844 * found it - delete from in-use list
1846 1845 */
1847 1846
1848 1847 if (prev_hep)
1849 1848 prev_hep->next = hep->next;
1850 1849 else
1851 1850 errent_listp = hep->next;
1852 1851 /*
1853 1852 * and take it off the per-clone list
1854 1853 */
1855 1854 hep->cnext->cprev = hep->cprev;
1856 1855 hep->cprev->cnext = hep->cnext;
1857 1856 /*
1858 1857 * see if we are on any shadow handle link lists - and if we
1859 1858 * are then take us off
1860 1859 */
1861 1860 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
1862 1861 prev_lp = NULL;
1863 1862 for (lp = hp->link; lp != NULL; ) {
1864 1863 if (lp->errentp == ep) {
1865 1864 if (prev_lp)
1866 1865 prev_lp->link = lp->link;
1867 1866 else
1868 1867 hp->link = lp->link;
1869 1868 next_lp = lp->link;
1870 1869 lp->link = bofi_link_freelist;
1871 1870 bofi_link_freelist = lp;
1872 1871 lp = next_lp;
1873 1872 } else {
1874 1873 prev_lp = lp;
1875 1874 lp = lp->link;
1876 1875 }
1877 1876 }
1878 1877 }
1879 1878 mutex_exit(&bofi_mutex);
1880 1879 mutex_exit(&bofi_low_mutex);
1881 1880
1882 1881 cv_destroy(&ep->cv);
1883 1882 kmem_free(ep->name, ep->errdef.namesize+1);
1884 1883 if ((ep->errdef.access_type & BOFI_LOG) &&
1885 1884 ep->errdef.log.logsize && ep->logbase) /* double check */
1886 1885 kmem_free(ep->logbase,
1887 1886 sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
1888 1887
1889 1888 if (ep->softintr_id)
1890 1889 ddi_remove_softintr(ep->softintr_id);
1891 1890 kmem_free(ep, sizeof (struct bofi_errent));
1892 1891 return (0);
1893 1892 }
1894 1893
1895 1894
1896 1895 /*
1897 1896 * start all errdefs corresponding to this name and instance
1898 1897 */
1899 1898 static void
1900 1899 bofi_start(struct bofi_errctl *errctlp, char *namep)
1901 1900 {
1902 1901 struct bofi_errent *ep;
1903 1902
1904 1903 /*
1905 1904 * look for any errdefs with matching name and instance
1906 1905 */
1907 1906 mutex_enter(&bofi_low_mutex);
1908 1907 for (ep = errent_listp; ep != NULL; ep = ep->next)
1909 1908 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1910 1909 errctlp->instance == ep->errdef.instance) {
1911 1910 ep->state |= BOFI_DEV_ACTIVE;
1912 1911 (void) drv_getparm(TIME, &(ep->errdef.log.start_time));
1913 1912 ep->errdef.log.stop_time = 0ul;
1914 1913 }
1915 1914 mutex_exit(&bofi_low_mutex);
1916 1915 }
1917 1916
1918 1917
1919 1918 /*
1920 1919 * stop all errdefs corresponding to this name and instance
1921 1920 */
1922 1921 static void
1923 1922 bofi_stop(struct bofi_errctl *errctlp, char *namep)
1924 1923 {
1925 1924 struct bofi_errent *ep;
1926 1925
1927 1926 /*
1928 1927 * look for any errdefs with matching name and instance
1929 1928 */
1930 1929 mutex_enter(&bofi_low_mutex);
1931 1930 for (ep = errent_listp; ep != NULL; ep = ep->next)
1932 1931 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1933 1932 errctlp->instance == ep->errdef.instance) {
1934 1933 ep->state &= ~BOFI_DEV_ACTIVE;
1935 1934 if (ep->errdef.log.stop_time == 0ul)
1936 1935 (void) drv_getparm(TIME,
1937 1936 &(ep->errdef.log.stop_time));
1938 1937 }
1939 1938 mutex_exit(&bofi_low_mutex);
1940 1939 }
1941 1940
1942 1941
1943 1942 /*
1944 1943 * wake up any thread waiting on this errdefs
1945 1944 */
1946 1945 static uint_t
1947 1946 bofi_signal(caddr_t arg)
1948 1947 {
1949 1948 struct bofi_errdef *edp = (struct bofi_errdef *)arg;
1950 1949 struct bofi_errent *hep;
1951 1950 struct bofi_errent *ep =
1952 1951 (struct bofi_errent *)(uintptr_t)edp->errdef_handle;
1953 1952
1954 1953 mutex_enter(&bofi_low_mutex);
1955 1954 for (hep = errent_listp; hep != NULL; ) {
1956 1955 if (hep == ep)
1957 1956 break;
1958 1957 hep = hep->next;
1959 1958 }
1960 1959 if (hep == NULL) {
1961 1960 mutex_exit(&bofi_low_mutex);
1962 1961 return (DDI_INTR_UNCLAIMED);
1963 1962 }
1964 1963 if ((ep->errdef.access_type & BOFI_LOG) &&
1965 1964 (edp->log.flags & BOFI_LOG_FULL)) {
1966 1965 edp->log.stop_time = bofi_gettime();
1967 1966 ep->state |= BOFI_NEW_MESSAGE;
1968 1967 if (ep->state & BOFI_MESSAGE_WAIT)
1969 1968 cv_broadcast(&ep->cv);
1970 1969 ep->state &= ~BOFI_MESSAGE_WAIT;
1971 1970 }
1972 1971 if (ep->errstate.msg_time != 0) {
1973 1972 ep->state |= BOFI_NEW_MESSAGE;
1974 1973 if (ep->state & BOFI_MESSAGE_WAIT)
1975 1974 cv_broadcast(&ep->cv);
1976 1975 ep->state &= ~BOFI_MESSAGE_WAIT;
1977 1976 }
1978 1977 mutex_exit(&bofi_low_mutex);
1979 1978 return (DDI_INTR_CLAIMED);
1980 1979 }
1981 1980
1982 1981
1983 1982 /*
1984 1983 * wake up all errdefs corresponding to this name and instance
1985 1984 */
1986 1985 static void
1987 1986 bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
1988 1987 {
1989 1988 struct bofi_errent *ep;
1990 1989
1991 1990 /*
1992 1991 * look for any errdefs with matching name and instance
1993 1992 */
1994 1993 mutex_enter(&bofi_low_mutex);
1995 1994 for (ep = errent_listp; ep != NULL; ep = ep->next)
1996 1995 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
1997 1996 errctlp->instance == ep->errdef.instance) {
1998 1997 /*
1999 1998 * wake up sleepers
2000 1999 */
2001 2000 ep->state |= BOFI_NEW_MESSAGE;
2002 2001 if (ep->state & BOFI_MESSAGE_WAIT)
2003 2002 cv_broadcast(&ep->cv);
2004 2003 ep->state &= ~BOFI_MESSAGE_WAIT;
2005 2004 }
2006 2005 mutex_exit(&bofi_low_mutex);
2007 2006 }
2008 2007
2009 2008
2010 2009 /*
2011 2010 * clear "acc_chk" for all errdefs corresponding to this name and instance
2012 2011 * and wake them up.
2013 2012 */
2014 2013 static void
2015 2014 bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
2016 2015 {
2017 2016 struct bofi_errent *ep;
2018 2017
2019 2018 /*
2020 2019 * look for any errdefs with matching name and instance
2021 2020 */
2022 2021 mutex_enter(&bofi_low_mutex);
2023 2022 for (ep = errent_listp; ep != NULL; ep = ep->next)
2024 2023 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2025 2024 errctlp->instance == ep->errdef.instance) {
2026 2025 mutex_enter(&bofi_mutex);
2027 2026 if (ep->errdef.access_count == 0 &&
2028 2027 ep->errdef.fail_count == 0)
2029 2028 ep->errdef.acc_chk = 0;
2030 2029 mutex_exit(&bofi_mutex);
2031 2030 /*
2032 2031 * wake up sleepers
2033 2032 */
2034 2033 ep->state |= BOFI_NEW_MESSAGE;
2035 2034 if (ep->state & BOFI_MESSAGE_WAIT)
2036 2035 cv_broadcast(&ep->cv);
2037 2036 ep->state &= ~BOFI_MESSAGE_WAIT;
2038 2037 }
2039 2038 mutex_exit(&bofi_low_mutex);
2040 2039 }
2041 2040
2042 2041
2043 2042 /*
2044 2043 * set "fail_count" to 0 for all errdefs corresponding to this name and instance
2045 2044 * whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
2046 2045 */
2047 2046 static void
2048 2047 bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
2049 2048 {
2050 2049 struct bofi_errent *ep;
2051 2050
2052 2051 /*
2053 2052 * look for any errdefs with matching name and instance
2054 2053 */
2055 2054 mutex_enter(&bofi_low_mutex);
2056 2055 for (ep = errent_listp; ep != NULL; ep = ep->next)
2057 2056 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2058 2057 errctlp->instance == ep->errdef.instance) {
2059 2058 mutex_enter(&bofi_mutex);
2060 2059 if (ep->errdef.access_count == 0) {
2061 2060 ep->errdef.acc_chk = 0;
2062 2061 ep->errdef.fail_count = 0;
2063 2062 mutex_exit(&bofi_mutex);
2064 2063 if (ep->errdef.log.stop_time == 0ul)
2065 2064 (void) drv_getparm(TIME,
2066 2065 &(ep->errdef.log.stop_time));
2067 2066 } else
2068 2067 mutex_exit(&bofi_mutex);
2069 2068 /*
2070 2069 * wake up sleepers
2071 2070 */
2072 2071 ep->state |= BOFI_NEW_MESSAGE;
2073 2072 if (ep->state & BOFI_MESSAGE_WAIT)
2074 2073 cv_broadcast(&ep->cv);
2075 2074 ep->state &= ~BOFI_MESSAGE_WAIT;
2076 2075 }
2077 2076 mutex_exit(&bofi_low_mutex);
2078 2077 }
2079 2078
2080 2079
2081 2080 /*
2082 2081 * set "access_count" and "fail_count" to 0 for all errdefs corresponding to
2083 2082 * this name and instance, set "acc_chk" to 0, and wake them up.
2084 2083 */
2085 2084 static void
2086 2085 bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
2087 2086 {
2088 2087 struct bofi_errent *ep;
2089 2088
2090 2089 /*
2091 2090 * look for any errdefs with matching name and instance
2092 2091 */
2093 2092 mutex_enter(&bofi_low_mutex);
2094 2093 for (ep = errent_listp; ep != NULL; ep = ep->next)
2095 2094 if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
2096 2095 errctlp->instance == ep->errdef.instance) {
2097 2096 mutex_enter(&bofi_mutex);
2098 2097 ep->errdef.acc_chk = 0;
2099 2098 ep->errdef.access_count = 0;
2100 2099 ep->errdef.fail_count = 0;
2101 2100 mutex_exit(&bofi_mutex);
2102 2101 if (ep->errdef.log.stop_time == 0ul)
2103 2102 (void) drv_getparm(TIME,
2104 2103 &(ep->errdef.log.stop_time));
2105 2104 /*
2106 2105 * wake up sleepers
2107 2106 */
2108 2107 ep->state |= BOFI_NEW_MESSAGE;
2109 2108 if (ep->state & BOFI_MESSAGE_WAIT)
2110 2109 cv_broadcast(&ep->cv);
2111 2110 ep->state &= ~BOFI_MESSAGE_WAIT;
2112 2111 }
2113 2112 mutex_exit(&bofi_low_mutex);
2114 2113 }
2115 2114
2116 2115
2117 2116 /*
2118 2117 * get state for this errdef
2119 2118 */
2120 2119 static int
2121 2120 bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
2122 2121 {
2123 2122 struct bofi_errent *hep;
2124 2123 struct bofi_errent *ep;
2125 2124
2126 2125 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2127 2126 mutex_enter(&bofi_low_mutex);
2128 2127 /*
2129 2128 * don't just assume its a valid ep - check that its on the
2130 2129 * in-use list
2131 2130 */
2132 2131 for (hep = errent_listp; hep != NULL; hep = hep->next)
2133 2132 if (hep == ep)
2134 2133 break;
2135 2134 if (hep == NULL) {
2136 2135 mutex_exit(&bofi_low_mutex);
2137 2136 return (EINVAL);
2138 2137 }
2139 2138 mutex_enter(&bofi_mutex);
2140 2139 ep->errstate.access_count = ep->errdef.access_count;
2141 2140 ep->errstate.fail_count = ep->errdef.fail_count;
2142 2141 ep->errstate.acc_chk = ep->errdef.acc_chk;
2143 2142 ep->errstate.log = ep->errdef.log;
2144 2143 *logpp = ep->logbase;
2145 2144 *errstatep = ep->errstate;
2146 2145 mutex_exit(&bofi_mutex);
2147 2146 mutex_exit(&bofi_low_mutex);
2148 2147 return (0);
2149 2148 }
2150 2149
2151 2150
2152 2151 /*
2153 2152 * Wait for a ddi_report_fault message to come back for this errdef
2154 2153 * Then return state for this errdef.
2155 2154 * fault report is intercepted by bofi_post_event, which triggers
2156 2155 * bofi_signal via a softint, which will wake up this routine if
2157 2156 * we are waiting
2158 2157 */
2159 2158 static int
2160 2159 bofi_errdef_check_w(struct bofi_errstate *errstatep,
2161 2160 struct acc_log_elem **logpp)
2162 2161 {
2163 2162 struct bofi_errent *hep;
2164 2163 struct bofi_errent *ep;
2165 2164 int rval = 0;
2166 2165
2167 2166 ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
2168 2167 mutex_enter(&bofi_low_mutex);
2169 2168 retry:
2170 2169 /*
2171 2170 * don't just assume its a valid ep - check that its on the
2172 2171 * in-use list
2173 2172 */
2174 2173 for (hep = errent_listp; hep != NULL; hep = hep->next)
2175 2174 if (hep == ep)
2176 2175 break;
2177 2176 if (hep == NULL) {
2178 2177 mutex_exit(&bofi_low_mutex);
2179 2178 return (EINVAL);
2180 2179 }
2181 2180 /*
2182 2181 * wait for ddi_report_fault for the devinfo corresponding
2183 2182 * to this errdef
2184 2183 */
2185 2184 if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
2186 2185 ep->state |= BOFI_MESSAGE_WAIT;
2187 2186 if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0) {
2188 2187 if (!(ep->state & BOFI_NEW_MESSAGE))
2189 2188 rval = EINTR;
2190 2189 }
2191 2190 goto retry;
2192 2191 }
2193 2192 ep->state &= ~BOFI_NEW_MESSAGE;
2194 2193 /*
2195 2194 * we either didn't need to sleep, we've been woken up or we've been
2196 2195 * signaled - either way return state now
2197 2196 */
2198 2197 mutex_enter(&bofi_mutex);
2199 2198 ep->errstate.access_count = ep->errdef.access_count;
2200 2199 ep->errstate.fail_count = ep->errdef.fail_count;
2201 2200 ep->errstate.acc_chk = ep->errdef.acc_chk;
2202 2201 ep->errstate.log = ep->errdef.log;
2203 2202 *logpp = ep->logbase;
2204 2203 *errstatep = ep->errstate;
2205 2204 mutex_exit(&bofi_mutex);
2206 2205 mutex_exit(&bofi_low_mutex);
2207 2206 return (rval);
2208 2207 }
2209 2208
2210 2209
2211 2210 /*
2212 2211 * support routine - check if requested driver is defined as under test in the
2213 2212 * conf file.
2214 2213 */
2215 2214 static int
2216 2215 driver_under_test(dev_info_t *rdip)
2217 2216 {
2218 2217 int i;
2219 2218 char *rname;
2220 2219 major_t rmaj;
2221 2220
2222 2221 rname = ddi_get_name(rdip);
2223 2222 rmaj = ddi_name_to_major(rname);
2224 2223
2225 2224 /*
2226 2225 * Enforce the user to specifically request the following drivers.
2227 2226 */
2228 2227 for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
2229 2228 if (driver_list_neg == 0) {
2230 2229 if (rmaj == ddi_name_to_major(&driver_list[i]))
2231 2230 return (1);
2232 2231 } else {
2233 2232 if (rmaj == ddi_name_to_major(&driver_list[i+1]))
2234 2233 return (0);
2235 2234 }
2236 2235 }
2237 2236 if (driver_list_neg == 0)
2238 2237 return (0);
2239 2238 else
2240 2239 return (1);
2241 2240
2242 2241 }
2243 2242
2244 2243
2245 2244 static void
2246 2245 log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
2247 2246 size_t repcount, uint64_t *valuep)
2248 2247 {
2249 2248 struct bofi_errdef *edp = &(ep->errdef);
2250 2249 struct acc_log *log = &edp->log;
2251 2250
2252 2251 ASSERT(log != NULL);
2253 2252 ASSERT(MUTEX_HELD(&bofi_mutex));
2254 2253
2255 2254 if (log->flags & BOFI_LOG_REPIO)
2256 2255 repcount = 1;
2257 2256 else if (repcount == 0 && edp->access_count > 0 &&
2258 2257 (log->flags & BOFI_LOG_FULL) == 0)
2259 2258 edp->access_count += 1;
2260 2259
2261 2260 if (repcount && log->entries < log->logsize) {
2262 2261 struct acc_log_elem *elem = ep->logbase + log->entries;
2263 2262
2264 2263 if (log->flags & BOFI_LOG_TIMESTAMP)
2265 2264 elem->access_time = bofi_gettime();
2266 2265 elem->access_type = at;
2267 2266 elem->offset = offset;
2268 2267 elem->value = valuep ? *valuep : 0ll;
2269 2268 elem->size = len;
2270 2269 elem->repcount = repcount;
2271 2270 ++log->entries;
2272 2271 if (log->entries == log->logsize) {
2273 2272 log->flags |= BOFI_LOG_FULL;
2274 2273 ddi_trigger_softintr(((struct bofi_errent *)
2275 2274 (uintptr_t)edp->errdef_handle)->softintr_id);
2276 2275 }
2277 2276 }
2278 2277 if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
2279 2278 log->wrapcnt++;
2280 2279 edp->access_count = log->logsize;
2281 2280 log->entries = 0; /* wrap back to the start */
2282 2281 }
2283 2282 }
2284 2283
2285 2284
2286 2285 /*
2287 2286 * got a condition match on dma read/write - check counts and corrupt
2288 2287 * data if necessary
2289 2288 *
2290 2289 * bofi_mutex always held when this is called.
2291 2290 */
2292 2291 static void
2293 2292 do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
2294 2293 uint_t synctype, off_t off, off_t length)
2295 2294 {
2296 2295 uint64_t operand;
2297 2296 int i;
2298 2297 off_t len;
2299 2298 caddr_t logaddr;
2300 2299 uint64_t *addr;
2301 2300 uint64_t *endaddr;
2302 2301 ddi_dma_impl_t *hdlp;
2303 2302 ndi_err_t *errp;
2304 2303
2305 2304 ASSERT(MUTEX_HELD(&bofi_mutex));
2306 2305 if ((ep->errdef.access_count ||
2307 2306 ep->errdef.fail_count) &&
2308 2307 (ep->errdef.access_type & BOFI_LOG)) {
2309 2308 uint_t atype;
2310 2309
2311 2310 if (synctype == DDI_DMA_SYNC_FORDEV)
2312 2311 atype = BOFI_DMA_W;
2313 2312 else if (synctype == DDI_DMA_SYNC_FORCPU ||
2314 2313 synctype == DDI_DMA_SYNC_FORKERNEL)
2315 2314 atype = BOFI_DMA_R;
2316 2315 else
2317 2316 atype = 0;
2318 2317 if ((off <= ep->errdef.offset &&
2319 2318 off + length > ep->errdef.offset) ||
2320 2319 (off > ep->errdef.offset &&
2321 2320 off < ep->errdef.offset + ep->errdef.len)) {
2322 2321 logaddr = (caddr_t)((uintptr_t)(hp->addr +
2323 2322 off + LLSZMASK) & ~LLSZMASK);
2324 2323
2325 2324 log_acc_event(ep, atype, logaddr - hp->addr,
2326 2325 length, 1, 0);
2327 2326 }
2328 2327 }
2329 2328 if (ep->errdef.access_count > 1) {
2330 2329 ep->errdef.access_count--;
2331 2330 } else if (ep->errdef.fail_count > 0) {
2332 2331 ep->errdef.fail_count--;
2333 2332 ep->errdef.access_count = 0;
2334 2333 /*
2335 2334 * OK do the corruption
2336 2335 */
2337 2336 if (ep->errstate.fail_time == 0)
2338 2337 ep->errstate.fail_time = bofi_gettime();
2339 2338 /*
2340 2339 * work out how much to corrupt
2341 2340 *
2342 2341 * Make sure endaddr isn't greater than hp->addr + hp->len.
2343 2342 * If endaddr becomes less than addr len becomes negative
2344 2343 * and the following loop isn't entered.
2345 2344 */
2346 2345 addr = (uint64_t *)((uintptr_t)((hp->addr +
2347 2346 ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
2348 2347 endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
2349 2348 ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
2350 2349 len = endaddr - addr;
2351 2350 operand = ep->errdef.operand;
2352 2351 hdlp = (ddi_dma_impl_t *)(hp->hdl.dma_handle);
2353 2352 errp = &hdlp->dmai_error;
2354 2353 if (ep->errdef.acc_chk & 2) {
2355 2354 uint64_t ena;
2356 2355 char buf[FM_MAX_CLASS];
2357 2356
2358 2357 errp->err_status = DDI_FM_NONFATAL;
2359 2358 (void) snprintf(buf, FM_MAX_CLASS, FM_SIMULATED_DMA);
2360 2359 ena = fm_ena_generate(0, FM_ENA_FMT1);
2361 2360 ddi_fm_ereport_post(hp->dip, buf, ena,
2362 2361 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2363 2362 FM_EREPORT_VERS0, NULL);
2364 2363 }
2365 2364 switch (ep->errdef.optype) {
2366 2365 case BOFI_EQUAL :
2367 2366 for (i = 0; i < len; i++)
2368 2367 *(addr + i) = operand;
2369 2368 break;
2370 2369 case BOFI_AND :
2371 2370 for (i = 0; i < len; i++)
2372 2371 *(addr + i) &= operand;
2373 2372 break;
2374 2373 case BOFI_OR :
2375 2374 for (i = 0; i < len; i++)
2376 2375 *(addr + i) |= operand;
2377 2376 break;
2378 2377 case BOFI_XOR :
2379 2378 for (i = 0; i < len; i++)
2380 2379 *(addr + i) ^= operand;
2381 2380 break;
2382 2381 default:
2383 2382 /* do nothing */
2384 2383 break;
2385 2384 }
2386 2385 }
2387 2386 }
2388 2387
2389 2388
2390 2389 static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
2391 2390 static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
2392 2391 static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
2393 2392 static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
2394 2393
2395 2394
2396 2395 /*
2397 2396 * check all errdefs linked to this shadow handle. If we've got a condition
2398 2397 * match check counts and corrupt data if necessary
2399 2398 *
2400 2399 * bofi_mutex always held when this is called.
2401 2400 *
2402 2401 * because of possibility of BOFI_NO_TRANSFER, we couldn't get data
2403 2402 * from io-space before calling this, so we pass in the func to do the
2404 2403 * transfer as a parameter.
2405 2404 */
2406 2405 static uint64_t
2407 2406 do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
2408 2407 uint64_t (*func)(), size_t repcount, size_t accsize)
2409 2408 {
2410 2409 struct bofi_errent *ep;
2411 2410 struct bofi_link *lp;
2412 2411 uint64_t operand;
2413 2412 uintptr_t minlen;
2414 2413 intptr_t base;
2415 2414 int done_get = 0;
2416 2415 uint64_t get_val, gv;
2417 2416 ddi_acc_impl_t *hdlp;
2418 2417 ndi_err_t *errp;
2419 2418
2420 2419 ASSERT(MUTEX_HELD(&bofi_mutex));
2421 2420 /*
2422 2421 * check through all errdefs associated with this shadow handle
2423 2422 */
2424 2423 for (lp = hp->link; lp != NULL; lp = lp->link) {
2425 2424 ep = lp->errentp;
2426 2425 if (ep->errdef.len == 0)
2427 2426 minlen = hp->len;
2428 2427 else
2429 2428 minlen = min(hp->len, ep->errdef.len);
2430 2429 base = addr - hp->addr - ep->errdef.offset + hp->offset;
2431 2430 if ((ep->errdef.access_type & BOFI_PIO_R) &&
2432 2431 (ep->state & BOFI_DEV_ACTIVE) &&
2433 2432 base >= 0 && base < minlen) {
2434 2433 /*
2435 2434 * condition match for pio read
2436 2435 */
2437 2436 if (ep->errdef.access_count > 1) {
2438 2437 ep->errdef.access_count--;
2439 2438 if (done_get == 0) {
2440 2439 done_get = 1;
2441 2440 gv = get_val = func(hp, addr);
2442 2441 }
2443 2442 if (ep->errdef.access_type & BOFI_LOG) {
2444 2443 log_acc_event(ep, BOFI_PIO_R,
2445 2444 addr - hp->addr,
2446 2445 accsize, repcount, &gv);
2447 2446 }
2448 2447 } else if (ep->errdef.fail_count > 0) {
2449 2448 ep->errdef.fail_count--;
2450 2449 ep->errdef.access_count = 0;
2451 2450 /*
2452 2451 * OK do corruption
2453 2452 */
2454 2453 if (ep->errstate.fail_time == 0)
2455 2454 ep->errstate.fail_time = bofi_gettime();
2456 2455 operand = ep->errdef.operand;
2457 2456 if (done_get == 0) {
2458 2457 if (ep->errdef.optype ==
2459 2458 BOFI_NO_TRANSFER)
2460 2459 /*
2461 2460 * no transfer - bomb out
2462 2461 */
2463 2462 return (operand);
2464 2463 done_get = 1;
2465 2464 gv = get_val = func(hp, addr);
2466 2465
2467 2466 }
2468 2467 if (ep->errdef.access_type & BOFI_LOG) {
2469 2468 log_acc_event(ep, BOFI_PIO_R,
2470 2469 addr - hp->addr,
2471 2470 accsize, repcount, &gv);
2472 2471 }
2473 2472 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2474 2473 errp = hdlp->ahi_err;
2475 2474 if (ep->errdef.acc_chk & 1) {
2476 2475 uint64_t ena;
2477 2476 char buf[FM_MAX_CLASS];
2478 2477
2479 2478 errp->err_status = DDI_FM_NONFATAL;
2480 2479 (void) snprintf(buf, FM_MAX_CLASS,
2481 2480 FM_SIMULATED_PIO);
2482 2481 ena = fm_ena_generate(0, FM_ENA_FMT1);
2483 2482 ddi_fm_ereport_post(hp->dip, buf, ena,
2484 2483 DDI_NOSLEEP, FM_VERSION,
2485 2484 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2486 2485 NULL);
2487 2486 }
2488 2487 switch (ep->errdef.optype) {
2489 2488 case BOFI_EQUAL :
2490 2489 get_val = operand;
2491 2490 break;
2492 2491 case BOFI_AND :
2493 2492 get_val &= operand;
2494 2493 break;
2495 2494 case BOFI_OR :
2496 2495 get_val |= operand;
2497 2496 break;
2498 2497 case BOFI_XOR :
2499 2498 get_val ^= operand;
2500 2499 break;
2501 2500 default:
2502 2501 /* do nothing */
2503 2502 break;
2504 2503 }
2505 2504 }
2506 2505 }
2507 2506 }
2508 2507 if (done_get == 0)
2509 2508 return (func(hp, addr));
2510 2509 else
2511 2510 return (get_val);
2512 2511 }
2513 2512
2514 2513
2515 2514 /*
2516 2515 * check all errdefs linked to this shadow handle. If we've got a condition
2517 2516 * match check counts and corrupt data if necessary
2518 2517 *
2519 2518 * bofi_mutex always held when this is called.
2520 2519 *
2521 2520 * because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
2522 2521 * is to be written out to io-space, 1 otherwise
2523 2522 */
2524 2523 static int
2525 2524 do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
2526 2525 size_t size, size_t repcount)
2527 2526 {
2528 2527 struct bofi_errent *ep;
2529 2528 struct bofi_link *lp;
2530 2529 uintptr_t minlen;
2531 2530 intptr_t base;
2532 2531 uint64_t v = *valuep;
2533 2532 ddi_acc_impl_t *hdlp;
2534 2533 ndi_err_t *errp;
2535 2534
2536 2535 ASSERT(MUTEX_HELD(&bofi_mutex));
2537 2536 /*
2538 2537 * check through all errdefs associated with this shadow handle
2539 2538 */
2540 2539 for (lp = hp->link; lp != NULL; lp = lp->link) {
2541 2540 ep = lp->errentp;
2542 2541 if (ep->errdef.len == 0)
2543 2542 minlen = hp->len;
2544 2543 else
2545 2544 minlen = min(hp->len, ep->errdef.len);
2546 2545 base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
2547 2546 if ((ep->errdef.access_type & BOFI_PIO_W) &&
2548 2547 (ep->state & BOFI_DEV_ACTIVE) &&
2549 2548 base >= 0 && base < minlen) {
2550 2549 /*
2551 2550 * condition match for pio write
2552 2551 */
2553 2552
2554 2553 if (ep->errdef.access_count > 1) {
2555 2554 ep->errdef.access_count--;
2556 2555 if (ep->errdef.access_type & BOFI_LOG)
2557 2556 log_acc_event(ep, BOFI_PIO_W,
2558 2557 addr - hp->addr, size,
2559 2558 repcount, &v);
2560 2559 } else if (ep->errdef.fail_count > 0) {
2561 2560 ep->errdef.fail_count--;
2562 2561 ep->errdef.access_count = 0;
2563 2562 if (ep->errdef.access_type & BOFI_LOG)
2564 2563 log_acc_event(ep, BOFI_PIO_W,
2565 2564 addr - hp->addr, size,
2566 2565 repcount, &v);
2567 2566 /*
2568 2567 * OK do corruption
2569 2568 */
2570 2569 if (ep->errstate.fail_time == 0)
2571 2570 ep->errstate.fail_time = bofi_gettime();
2572 2571 hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
2573 2572 errp = hdlp->ahi_err;
2574 2573 if (ep->errdef.acc_chk & 1) {
2575 2574 uint64_t ena;
2576 2575 char buf[FM_MAX_CLASS];
2577 2576
2578 2577 errp->err_status = DDI_FM_NONFATAL;
2579 2578 (void) snprintf(buf, FM_MAX_CLASS,
2580 2579 FM_SIMULATED_PIO);
2581 2580 ena = fm_ena_generate(0, FM_ENA_FMT1);
2582 2581 ddi_fm_ereport_post(hp->dip, buf, ena,
2583 2582 DDI_NOSLEEP, FM_VERSION,
2584 2583 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
2585 2584 NULL);
2586 2585 }
2587 2586 switch (ep->errdef.optype) {
2588 2587 case BOFI_EQUAL :
2589 2588 *valuep = ep->errdef.operand;
2590 2589 break;
2591 2590 case BOFI_AND :
2592 2591 *valuep &= ep->errdef.operand;
2593 2592 break;
2594 2593 case BOFI_OR :
2595 2594 *valuep |= ep->errdef.operand;
2596 2595 break;
2597 2596 case BOFI_XOR :
2598 2597 *valuep ^= ep->errdef.operand;
2599 2598 break;
2600 2599 case BOFI_NO_TRANSFER :
2601 2600 /*
2602 2601 * no transfer - bomb out
2603 2602 */
2604 2603 return (0);
2605 2604 default:
2606 2605 /* do nothing */
2607 2606 break;
2608 2607 }
2609 2608 }
2610 2609 }
2611 2610 }
2612 2611 return (1);
2613 2612 }
2614 2613
2615 2614
2616 2615 static uint64_t
2617 2616 do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
2618 2617 {
2619 2618 return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
2620 2619 }
2621 2620
2622 2621 #define BOFI_READ_CHECKS(type) \
2623 2622 if (bofi_ddi_check) \
2624 2623 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2625 2624 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2626 2625 (caddr_t)addr - hp->addr >= hp->len)) { \
2627 2626 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2628 2627 "ddi_get() out of range addr %p not in %p/%llx", \
2629 2628 (void *)addr, (void *)hp->addr, hp->len); \
2630 2629 return (0); \
2631 2630 }
2632 2631
2633 2632 /*
2634 2633 * our getb() routine - use tryenter
2635 2634 */
2636 2635 static uint8_t
2637 2636 bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
2638 2637 {
2639 2638 struct bofi_shadow *hp;
2640 2639 uint8_t retval;
2641 2640
2642 2641 hp = handle->ahi_common.ah_bus_private;
2643 2642 BOFI_READ_CHECKS(uint8_t)
2644 2643 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2645 2644 return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
2646 2645 retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
2647 2646 1);
2648 2647 mutex_exit(&bofi_mutex);
2649 2648 return (retval);
2650 2649 }
2651 2650
2652 2651
2653 2652 static uint64_t
2654 2653 do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
2655 2654 {
2656 2655 return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
2657 2656 }
2658 2657
2659 2658
2660 2659 /*
2661 2660 * our getw() routine - use tryenter
2662 2661 */
2663 2662 static uint16_t
2664 2663 bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
2665 2664 {
2666 2665 struct bofi_shadow *hp;
2667 2666 uint16_t retval;
2668 2667
2669 2668 hp = handle->ahi_common.ah_bus_private;
2670 2669 BOFI_READ_CHECKS(uint16_t)
2671 2670 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2672 2671 return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
2673 2672 retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2674 2673 2);
2675 2674 mutex_exit(&bofi_mutex);
2676 2675 return (retval);
2677 2676 }
2678 2677
2679 2678
2680 2679 static uint64_t
2681 2680 do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
2682 2681 {
2683 2682 return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
2684 2683 }
2685 2684
2686 2685
2687 2686 /*
2688 2687 * our getl() routine - use tryenter
2689 2688 */
2690 2689 static uint32_t
2691 2690 bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
2692 2691 {
2693 2692 struct bofi_shadow *hp;
2694 2693 uint32_t retval;
2695 2694
2696 2695 hp = handle->ahi_common.ah_bus_private;
2697 2696 BOFI_READ_CHECKS(uint32_t)
2698 2697 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2699 2698 return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
2700 2699 retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
2701 2700 4);
2702 2701 mutex_exit(&bofi_mutex);
2703 2702 return (retval);
2704 2703 }
2705 2704
2706 2705
2707 2706 static uint64_t
2708 2707 do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
2709 2708 {
2710 2709 return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
2711 2710 }
2712 2711
2713 2712
2714 2713 /*
2715 2714 * our getll() routine - use tryenter
2716 2715 */
2717 2716 static uint64_t
2718 2717 bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
2719 2718 {
2720 2719 struct bofi_shadow *hp;
2721 2720 uint64_t retval;
2722 2721
2723 2722 hp = handle->ahi_common.ah_bus_private;
2724 2723 BOFI_READ_CHECKS(uint64_t)
2725 2724 if (!hp->link || !mutex_tryenter(&bofi_mutex))
2726 2725 return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
2727 2726 retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
2728 2727 8);
2729 2728 mutex_exit(&bofi_mutex);
2730 2729 return (retval);
2731 2730 }
2732 2731
2733 2732 #define BOFI_WRITE_TESTS(type) \
2734 2733 if (bofi_ddi_check) \
2735 2734 addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
2736 2735 if (bofi_range_check && ((caddr_t)addr < hp->addr || \
2737 2736 (caddr_t)addr - hp->addr >= hp->len)) { \
2738 2737 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2739 2738 "ddi_put() out of range addr %p not in %p/%llx\n", \
2740 2739 (void *)addr, (void *)hp->addr, hp->len); \
2741 2740 return; \
2742 2741 }
2743 2742
2744 2743 /*
2745 2744 * our putb() routine - use tryenter
2746 2745 */
2747 2746 static void
2748 2747 bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
2749 2748 {
2750 2749 struct bofi_shadow *hp;
2751 2750 uint64_t llvalue = value;
2752 2751
2753 2752 hp = handle->ahi_common.ah_bus_private;
2754 2753 BOFI_WRITE_TESTS(uint8_t)
2755 2754 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2756 2755 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2757 2756 return;
2758 2757 }
2759 2758 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
2760 2759 hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
2761 2760 mutex_exit(&bofi_mutex);
2762 2761 }
2763 2762
2764 2763
2765 2764 /*
2766 2765 * our putw() routine - use tryenter
2767 2766 */
2768 2767 static void
2769 2768 bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
2770 2769 {
2771 2770 struct bofi_shadow *hp;
2772 2771 uint64_t llvalue = value;
2773 2772
2774 2773 hp = handle->ahi_common.ah_bus_private;
2775 2774 BOFI_WRITE_TESTS(uint16_t)
2776 2775 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2777 2776 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2778 2777 return;
2779 2778 }
2780 2779 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
2781 2780 hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
2782 2781 mutex_exit(&bofi_mutex);
2783 2782 }
2784 2783
2785 2784
2786 2785 /*
2787 2786 * our putl() routine - use tryenter
2788 2787 */
2789 2788 static void
2790 2789 bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
2791 2790 {
2792 2791 struct bofi_shadow *hp;
2793 2792 uint64_t llvalue = value;
2794 2793
2795 2794 hp = handle->ahi_common.ah_bus_private;
2796 2795 BOFI_WRITE_TESTS(uint32_t)
2797 2796 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2798 2797 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2799 2798 return;
2800 2799 }
2801 2800 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
2802 2801 hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
2803 2802 mutex_exit(&bofi_mutex);
2804 2803 }
2805 2804
2806 2805
2807 2806 /*
2808 2807 * our putll() routine - use tryenter
2809 2808 */
2810 2809 static void
2811 2810 bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
2812 2811 {
2813 2812 struct bofi_shadow *hp;
2814 2813 uint64_t llvalue = value;
2815 2814
2816 2815 hp = handle->ahi_common.ah_bus_private;
2817 2816 BOFI_WRITE_TESTS(uint64_t)
2818 2817 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2819 2818 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2820 2819 return;
2821 2820 }
2822 2821 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
2823 2822 hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
2824 2823 mutex_exit(&bofi_mutex);
2825 2824 }
2826 2825
2827 2826 #define BOFI_REP_READ_TESTS(type) \
2828 2827 if (bofi_ddi_check) \
2829 2828 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2830 2829 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2831 2830 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2832 2831 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2833 2832 "ddi_rep_get() out of range addr %p not in %p/%llx\n", \
2834 2833 (void *)dev_addr, (void *)hp->addr, hp->len); \
2835 2834 if ((caddr_t)dev_addr < hp->addr || \
2836 2835 (caddr_t)dev_addr - hp->addr >= hp->len) \
2837 2836 return; \
2838 2837 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2839 2838 }
2840 2839
2841 2840 /*
2842 2841 * our rep_getb() routine - use tryenter
2843 2842 */
2844 2843 static void
2845 2844 bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2846 2845 size_t repcount, uint_t flags)
2847 2846 {
2848 2847 struct bofi_shadow *hp;
2849 2848 int i;
2850 2849 uint8_t *addr;
2851 2850
2852 2851 hp = handle->ahi_common.ah_bus_private;
2853 2852 BOFI_REP_READ_TESTS(uint8_t)
2854 2853 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2855 2854 hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
2856 2855 repcount, flags);
2857 2856 return;
2858 2857 }
2859 2858 for (i = 0; i < repcount; i++) {
2860 2859 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2861 2860 *(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
2862 2861 do_bofi_rd8, i ? 0 : repcount, 1);
2863 2862 }
2864 2863 mutex_exit(&bofi_mutex);
2865 2864 }
2866 2865
2867 2866
2868 2867 /*
2869 2868 * our rep_getw() routine - use tryenter
2870 2869 */
2871 2870 static void
2872 2871 bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2873 2872 uint16_t *dev_addr, size_t repcount, uint_t flags)
2874 2873 {
2875 2874 struct bofi_shadow *hp;
2876 2875 int i;
2877 2876 uint16_t *addr;
2878 2877
2879 2878 hp = handle->ahi_common.ah_bus_private;
2880 2879 BOFI_REP_READ_TESTS(uint16_t)
2881 2880 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2882 2881 hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
2883 2882 repcount, flags);
2884 2883 return;
2885 2884 }
2886 2885 for (i = 0; i < repcount; i++) {
2887 2886 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2888 2887 *(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
2889 2888 do_bofi_rd16, i ? 0 : repcount, 2);
2890 2889 }
2891 2890 mutex_exit(&bofi_mutex);
2892 2891 }
2893 2892
2894 2893
2895 2894 /*
2896 2895 * our rep_getl() routine - use tryenter
2897 2896 */
2898 2897 static void
2899 2898 bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
2900 2899 uint32_t *dev_addr, size_t repcount, uint_t flags)
2901 2900 {
2902 2901 struct bofi_shadow *hp;
2903 2902 int i;
2904 2903 uint32_t *addr;
2905 2904
2906 2905 hp = handle->ahi_common.ah_bus_private;
2907 2906 BOFI_REP_READ_TESTS(uint32_t)
2908 2907 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2909 2908 hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
2910 2909 repcount, flags);
2911 2910 return;
2912 2911 }
2913 2912 for (i = 0; i < repcount; i++) {
2914 2913 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2915 2914 *(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
2916 2915 do_bofi_rd32, i ? 0 : repcount, 4);
2917 2916 }
2918 2917 mutex_exit(&bofi_mutex);
2919 2918 }
2920 2919
2921 2920
2922 2921 /*
2923 2922 * our rep_getll() routine - use tryenter
2924 2923 */
2925 2924 static void
2926 2925 bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
2927 2926 uint64_t *dev_addr, size_t repcount, uint_t flags)
2928 2927 {
2929 2928 struct bofi_shadow *hp;
2930 2929 int i;
2931 2930 uint64_t *addr;
2932 2931
2933 2932 hp = handle->ahi_common.ah_bus_private;
2934 2933 BOFI_REP_READ_TESTS(uint64_t)
2935 2934 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2936 2935 hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
2937 2936 repcount, flags);
2938 2937 return;
2939 2938 }
2940 2939 for (i = 0; i < repcount; i++) {
2941 2940 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2942 2941 *(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
2943 2942 do_bofi_rd64, i ? 0 : repcount, 8);
2944 2943 }
2945 2944 mutex_exit(&bofi_mutex);
2946 2945 }
2947 2946
2948 2947 #define BOFI_REP_WRITE_TESTS(type) \
2949 2948 if (bofi_ddi_check) \
2950 2949 dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
2951 2950 if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
2952 2951 (caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
2953 2952 cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
2954 2953 "ddi_rep_put() out of range addr %p not in %p/%llx\n", \
2955 2954 (void *)dev_addr, (void *)hp->addr, hp->len); \
2956 2955 if ((caddr_t)dev_addr < hp->addr || \
2957 2956 (caddr_t)dev_addr - hp->addr >= hp->len) \
2958 2957 return; \
2959 2958 repcount = (type *)(hp->addr + hp->len) - dev_addr; \
2960 2959 }
2961 2960
2962 2961 /*
2963 2962 * our rep_putb() routine - use tryenter
2964 2963 */
2965 2964 static void
2966 2965 bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
2967 2966 size_t repcount, uint_t flags)
2968 2967 {
2969 2968 struct bofi_shadow *hp;
2970 2969 int i;
2971 2970 uint64_t llvalue;
2972 2971 uint8_t *addr;
2973 2972
2974 2973 hp = handle->ahi_common.ah_bus_private;
2975 2974 BOFI_REP_WRITE_TESTS(uint8_t)
2976 2975 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
2977 2976 hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
2978 2977 repcount, flags);
2979 2978 return;
2980 2979 }
2981 2980 for (i = 0; i < repcount; i++) {
2982 2981 llvalue = *(host_addr + i);
2983 2982 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
2984 2983 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
2985 2984 repcount))
2986 2985 hp->save.acc.ahi_put8(&hp->save.acc, addr,
2987 2986 (uint8_t)llvalue);
2988 2987 }
2989 2988 mutex_exit(&bofi_mutex);
2990 2989 }
2991 2990
2992 2991
2993 2992 /*
2994 2993 * our rep_putw() routine - use tryenter
2995 2994 */
2996 2995 static void
2997 2996 bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
2998 2997 uint16_t *dev_addr, size_t repcount, uint_t flags)
2999 2998 {
3000 2999 struct bofi_shadow *hp;
3001 3000 int i;
3002 3001 uint64_t llvalue;
3003 3002 uint16_t *addr;
3004 3003
3005 3004 hp = handle->ahi_common.ah_bus_private;
3006 3005 BOFI_REP_WRITE_TESTS(uint16_t)
3007 3006 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3008 3007 hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
3009 3008 repcount, flags);
3010 3009 return;
3011 3010 }
3012 3011 for (i = 0; i < repcount; i++) {
3013 3012 llvalue = *(host_addr + i);
3014 3013 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3015 3014 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
3016 3015 repcount))
3017 3016 hp->save.acc.ahi_put16(&hp->save.acc, addr,
3018 3017 (uint16_t)llvalue);
3019 3018 }
3020 3019 mutex_exit(&bofi_mutex);
3021 3020 }
3022 3021
3023 3022
3024 3023 /*
3025 3024 * our rep_putl() routine - use tryenter
3026 3025 */
3027 3026 static void
3028 3027 bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
3029 3028 uint32_t *dev_addr, size_t repcount, uint_t flags)
3030 3029 {
3031 3030 struct bofi_shadow *hp;
3032 3031 int i;
3033 3032 uint64_t llvalue;
3034 3033 uint32_t *addr;
3035 3034
3036 3035 hp = handle->ahi_common.ah_bus_private;
3037 3036 BOFI_REP_WRITE_TESTS(uint32_t)
3038 3037 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3039 3038 hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
3040 3039 repcount, flags);
3041 3040 return;
3042 3041 }
3043 3042 for (i = 0; i < repcount; i++) {
3044 3043 llvalue = *(host_addr + i);
3045 3044 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3046 3045 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
3047 3046 repcount))
3048 3047 hp->save.acc.ahi_put32(&hp->save.acc, addr,
3049 3048 (uint32_t)llvalue);
3050 3049 }
3051 3050 mutex_exit(&bofi_mutex);
3052 3051 }
3053 3052
3054 3053
3055 3054 /*
3056 3055 * our rep_putll() routine - use tryenter
3057 3056 */
3058 3057 static void
3059 3058 bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
3060 3059 uint64_t *dev_addr, size_t repcount, uint_t flags)
3061 3060 {
3062 3061 struct bofi_shadow *hp;
3063 3062 int i;
3064 3063 uint64_t llvalue;
3065 3064 uint64_t *addr;
3066 3065
3067 3066 hp = handle->ahi_common.ah_bus_private;
3068 3067 BOFI_REP_WRITE_TESTS(uint64_t)
3069 3068 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
3070 3069 hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
3071 3070 repcount, flags);
3072 3071 return;
3073 3072 }
3074 3073 for (i = 0; i < repcount; i++) {
3075 3074 llvalue = *(host_addr + i);
3076 3075 addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
3077 3076 if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
3078 3077 repcount))
3079 3078 hp->save.acc.ahi_put64(&hp->save.acc, addr,
3080 3079 (uint64_t)llvalue);
3081 3080 }
3082 3081 mutex_exit(&bofi_mutex);
3083 3082 }
3084 3083
3085 3084
3086 3085 /*
3087 3086 * our ddi_map routine
3088 3087 */
3089 3088 static int
3090 3089 bofi_map(dev_info_t *dip, dev_info_t *rdip,
3091 3090 ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
3092 3091 {
3093 3092 ddi_acc_impl_t *ap;
3094 3093 struct bofi_shadow *hp;
3095 3094 struct bofi_errent *ep;
3096 3095 struct bofi_link *lp, *next_lp;
3097 3096 int retval;
3098 3097 struct bofi_shadow *dhashp;
3099 3098 struct bofi_shadow *hhashp;
3100 3099
3101 3100 switch (reqp->map_op) {
3102 3101 case DDI_MO_MAP_LOCKED:
3103 3102 /*
3104 3103 * for this case get nexus to do real work first
3105 3104 */
3106 3105 retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
3107 3106 vaddrp);
3108 3107 if (retval != DDI_SUCCESS)
3109 3108 return (retval);
3110 3109
3111 3110 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3112 3111 if (ap == NULL)
3113 3112 return (DDI_SUCCESS);
3114 3113 /*
3115 3114 * if driver_list is set, only intercept those drivers
3116 3115 */
3117 3116 if (!driver_under_test(ap->ahi_common.ah_dip))
3118 3117 return (DDI_SUCCESS);
3119 3118
3120 3119 /*
3121 3120 * support for ddi_regs_map_setup()
3122 3121 * - allocate shadow handle structure and fill it in
3123 3122 */
3124 3123 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
3125 3124 (void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
3126 3125 NAMESIZE);
3127 3126 hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
3128 3127 hp->dip = ap->ahi_common.ah_dip;
3129 3128 hp->addr = *vaddrp;
3130 3129 /*
3131 3130 * return spurious value to catch direct access to registers
3132 3131 */
3133 3132 if (bofi_ddi_check)
3134 3133 *vaddrp = (caddr_t)64;
3135 3134 hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
3136 3135 hp->offset = offset;
3137 3136 if (len == 0)
3138 3137 hp->len = INT_MAX - offset;
3139 3138 else
3140 3139 hp->len = min(len, INT_MAX - offset);
3141 3140 hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
3142 3141 hp->link = NULL;
3143 3142 hp->type = BOFI_ACC_HDL;
3144 3143 /*
3145 3144 * save existing function pointers and plug in our own
3146 3145 */
3147 3146 hp->save.acc = *ap;
3148 3147 ap->ahi_get8 = bofi_rd8;
3149 3148 ap->ahi_get16 = bofi_rd16;
3150 3149 ap->ahi_get32 = bofi_rd32;
3151 3150 ap->ahi_get64 = bofi_rd64;
3152 3151 ap->ahi_put8 = bofi_wr8;
3153 3152 ap->ahi_put16 = bofi_wr16;
3154 3153 ap->ahi_put32 = bofi_wr32;
3155 3154 ap->ahi_put64 = bofi_wr64;
3156 3155 ap->ahi_rep_get8 = bofi_rep_rd8;
3157 3156 ap->ahi_rep_get16 = bofi_rep_rd16;
3158 3157 ap->ahi_rep_get32 = bofi_rep_rd32;
3159 3158 ap->ahi_rep_get64 = bofi_rep_rd64;
3160 3159 ap->ahi_rep_put8 = bofi_rep_wr8;
3161 3160 ap->ahi_rep_put16 = bofi_rep_wr16;
3162 3161 ap->ahi_rep_put32 = bofi_rep_wr32;
3163 3162 ap->ahi_rep_put64 = bofi_rep_wr64;
3164 3163 ap->ahi_fault_check = bofi_check_acc_hdl;
3165 3164 #if defined(__sparc)
3166 3165 #else
3167 3166 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
3168 3167 #endif
3169 3168 /*
3170 3169 * stick in a pointer to our shadow handle
3171 3170 */
3172 3171 ap->ahi_common.ah_bus_private = hp;
3173 3172 /*
3174 3173 * add to dhash, hhash and inuse lists
3175 3174 */
3176 3175 mutex_enter(&bofi_low_mutex);
3177 3176 mutex_enter(&bofi_mutex);
3178 3177 hp->next = shadow_list.next;
3179 3178 shadow_list.next->prev = hp;
3180 3179 hp->prev = &shadow_list;
3181 3180 shadow_list.next = hp;
3182 3181 hhashp = HDL_HHASH(ap);
3183 3182 hp->hnext = hhashp->hnext;
3184 3183 hhashp->hnext->hprev = hp;
3185 3184 hp->hprev = hhashp;
3186 3185 hhashp->hnext = hp;
3187 3186 dhashp = HDL_DHASH(hp->dip);
3188 3187 hp->dnext = dhashp->dnext;
3189 3188 dhashp->dnext->dprev = hp;
3190 3189 hp->dprev = dhashp;
3191 3190 dhashp->dnext = hp;
3192 3191 /*
3193 3192 * chain on any pre-existing errdefs that apply to this
3194 3193 * acc_handle
3195 3194 */
3196 3195 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3197 3196 if (ddi_name_to_major(hp->name) ==
3198 3197 ddi_name_to_major(ep->name) &&
3199 3198 hp->instance == ep->errdef.instance &&
3200 3199 (ep->errdef.access_type & BOFI_PIO_RW) &&
3201 3200 (ep->errdef.rnumber == -1 ||
3202 3201 hp->rnumber == ep->errdef.rnumber) &&
3203 3202 (ep->errdef.len == 0 ||
3204 3203 offset < ep->errdef.offset + ep->errdef.len) &&
3205 3204 offset + hp->len > ep->errdef.offset) {
3206 3205 lp = bofi_link_freelist;
3207 3206 if (lp != NULL) {
3208 3207 bofi_link_freelist = lp->link;
3209 3208 lp->errentp = ep;
3210 3209 lp->link = hp->link;
3211 3210 hp->link = lp;
3212 3211 }
3213 3212 }
3214 3213 }
3215 3214 mutex_exit(&bofi_mutex);
3216 3215 mutex_exit(&bofi_low_mutex);
3217 3216 return (DDI_SUCCESS);
3218 3217 case DDI_MO_UNMAP:
3219 3218
3220 3219 ap = (ddi_acc_impl_t *)reqp->map_handlep;
3221 3220 if (ap == NULL)
3222 3221 break;
3223 3222 /*
3224 3223 * support for ddi_regs_map_free()
3225 3224 * - check we really have a shadow handle for this one
3226 3225 */
3227 3226 mutex_enter(&bofi_low_mutex);
3228 3227 mutex_enter(&bofi_mutex);
3229 3228 hhashp = HDL_HHASH(ap);
3230 3229 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3231 3230 if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
3232 3231 break;
3233 3232 if (hp == hhashp) {
3234 3233 mutex_exit(&bofi_mutex);
3235 3234 mutex_exit(&bofi_low_mutex);
3236 3235 break;
3237 3236 }
3238 3237 /*
3239 3238 * got a shadow handle - restore original pointers
3240 3239 */
3241 3240 *ap = hp->save.acc;
3242 3241 *vaddrp = hp->addr;
3243 3242 /*
3244 3243 * remove from dhash, hhash and inuse lists
3245 3244 */
3246 3245 hp->hnext->hprev = hp->hprev;
3247 3246 hp->hprev->hnext = hp->hnext;
3248 3247 hp->dnext->dprev = hp->dprev;
3249 3248 hp->dprev->dnext = hp->dnext;
3250 3249 hp->next->prev = hp->prev;
3251 3250 hp->prev->next = hp->next;
3252 3251 /*
3253 3252 * free any errdef link structures tagged onto the shadow handle
3254 3253 */
3255 3254 for (lp = hp->link; lp != NULL; ) {
3256 3255 next_lp = lp->link;
3257 3256 lp->link = bofi_link_freelist;
3258 3257 bofi_link_freelist = lp;
3259 3258 lp = next_lp;
3260 3259 }
3261 3260 hp->link = NULL;
3262 3261 mutex_exit(&bofi_mutex);
3263 3262 mutex_exit(&bofi_low_mutex);
3264 3263 /*
3265 3264 * finally delete shadow handle
3266 3265 */
3267 3266 kmem_free(hp, sizeof (struct bofi_shadow));
3268 3267 break;
3269 3268 default:
3270 3269 break;
3271 3270 }
3272 3271 return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
3273 3272 }
3274 3273
3275 3274
3276 3275 /*
3277 3276 * chain any pre-existing errdefs on to newly created dma handle
3278 3277 * if required call do_dma_corrupt() to corrupt data
3279 3278 */
3280 3279 static void
3281 3280 chain_on_errdefs(struct bofi_shadow *hp)
3282 3281 {
3283 3282 struct bofi_errent *ep;
3284 3283 struct bofi_link *lp;
3285 3284
3286 3285 ASSERT(MUTEX_HELD(&bofi_mutex));
3287 3286 /*
3288 3287 * chain on any pre-existing errdefs that apply to this dma_handle
3289 3288 */
3290 3289 for (ep = errent_listp; ep != NULL; ep = ep->next) {
3291 3290 if (ddi_name_to_major(hp->name) ==
3292 3291 ddi_name_to_major(ep->name) &&
3293 3292 hp->instance == ep->errdef.instance &&
3294 3293 (ep->errdef.rnumber == -1 ||
3295 3294 hp->rnumber == ep->errdef.rnumber) &&
3296 3295 ((ep->errdef.access_type & BOFI_DMA_RW) &&
3297 3296 (((uintptr_t)(hp->addr + ep->errdef.offset +
3298 3297 ep->errdef.len) & ~LLSZMASK) >
3299 3298 ((uintptr_t)((hp->addr + ep->errdef.offset) +
3300 3299 LLSZMASK) & ~LLSZMASK)))) {
3301 3300 /*
3302 3301 * got a match - link it on
3303 3302 */
3304 3303 lp = bofi_link_freelist;
3305 3304 if (lp != NULL) {
3306 3305 bofi_link_freelist = lp->link;
3307 3306 lp->errentp = ep;
3308 3307 lp->link = hp->link;
3309 3308 hp->link = lp;
3310 3309 if ((ep->errdef.access_type & BOFI_DMA_W) &&
3311 3310 (hp->flags & DDI_DMA_WRITE) &&
3312 3311 (ep->state & BOFI_DEV_ACTIVE)) {
3313 3312 do_dma_corrupt(hp, ep,
3314 3313 DDI_DMA_SYNC_FORDEV,
3315 3314 0, hp->len);
3316 3315 }
3317 3316 }
3318 3317 }
3319 3318 }
3320 3319 }
3321 3320
3322 3321
3323 3322 /*
3324 3323 * need to do copy byte-by-byte in case one of pages is little-endian
3325 3324 */
3326 3325 static void
3327 3326 xbcopy(void *from, void *to, u_longlong_t len)
3328 3327 {
3329 3328 uchar_t *f = from;
3330 3329 uchar_t *t = to;
3331 3330
3332 3331 while (len--)
3333 3332 *t++ = *f++;
3334 3333 }
3335 3334
3336 3335
3337 3336 /*
3338 3337 * our ddi_dma_allochdl routine
3339 3338 */
3340 3339 static int
3341 3340 bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
3342 3341 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
3343 3342 {
3344 3343 int retval = DDI_DMA_NORESOURCES;
3345 3344 struct bofi_shadow *hp, *xhp;
3346 3345 int maxrnumber = 0;
3347 3346 struct bofi_shadow *dhashp;
3348 3347 struct bofi_shadow *hhashp;
3349 3348 ddi_dma_impl_t *mp;
3350 3349
3351 3350 /*
3352 3351 * if driver_list is set, only intercept those drivers
3353 3352 */
3354 3353 if (!driver_under_test(rdip))
3355 3354 return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
3356 3355 waitfp, arg, handlep));
3357 3356
3358 3357 /*
3359 3358 * allocate shadow handle structure and fill it in
3360 3359 */
3361 3360 hp = kmem_zalloc(sizeof (struct bofi_shadow),
3362 3361 ((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
3363 3362 if (hp == NULL) {
3364 3363 /*
3365 3364 * what to do here? Wait a bit and try again
3366 3365 */
3367 3366 if (waitfp != DDI_DMA_DONTWAIT)
3368 3367 (void) timeout((void (*)())waitfp, arg, 10);
3369 3368 return (retval);
3370 3369 }
3371 3370 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3372 3371 hp->instance = ddi_get_instance(rdip);
3373 3372 hp->dip = rdip;
3374 3373 hp->link = NULL;
3375 3374 hp->type = BOFI_NULL;
3376 3375 /*
3377 3376 * call nexus to do the real work
3378 3377 */
3379 3378 retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
3380 3379 handlep);
3381 3380 if (retval != DDI_SUCCESS) {
3382 3381 kmem_free(hp, sizeof (struct bofi_shadow));
3383 3382 return (retval);
3384 3383 }
3385 3384 /*
3386 3385 * now point set dma_handle to point to real handle
3387 3386 */
3388 3387 hp->hdl.dma_handle = *handlep;
3389 3388 mp = (ddi_dma_impl_t *)*handlep;
3390 3389 mp->dmai_fault_check = bofi_check_dma_hdl;
3391 3390 /*
3392 3391 * bind and unbind are cached in devinfo - must overwrite them
3393 3392 * - note that our bind and unbind are quite happy dealing with
3394 3393 * any handles for this devinfo that were previously allocated
3395 3394 */
3396 3395 if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
3397 3396 DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
3398 3397 if (save_bus_ops.bus_dma_unbindhdl ==
3399 3398 DEVI(rdip)->devi_bus_dma_unbindfunc)
3400 3399 DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
3401 3400 mutex_enter(&bofi_low_mutex);
3402 3401 mutex_enter(&bofi_mutex);
3403 3402 /*
3404 3403 * get an "rnumber" for this handle - really just seeking to
3405 3404 * get a unique number - generally only care for early allocated
3406 3405 * handles - so we get as far as INT_MAX, just stay there
3407 3406 */
3408 3407 dhashp = HDL_DHASH(hp->dip);
3409 3408 for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
3410 3409 if (ddi_name_to_major(xhp->name) ==
3411 3410 ddi_name_to_major(hp->name) &&
3412 3411 xhp->instance == hp->instance &&
3413 3412 (xhp->type == BOFI_DMA_HDL ||
3414 3413 xhp->type == BOFI_NULL))
3415 3414 if (xhp->rnumber >= maxrnumber) {
3416 3415 if (xhp->rnumber == INT_MAX)
3417 3416 maxrnumber = INT_MAX;
3418 3417 else
3419 3418 maxrnumber = xhp->rnumber + 1;
3420 3419 }
3421 3420 hp->rnumber = maxrnumber;
3422 3421 /*
3423 3422 * add to dhash, hhash and inuse lists
3424 3423 */
3425 3424 hp->next = shadow_list.next;
3426 3425 shadow_list.next->prev = hp;
3427 3426 hp->prev = &shadow_list;
3428 3427 shadow_list.next = hp;
3429 3428 hhashp = HDL_HHASH(*handlep);
3430 3429 hp->hnext = hhashp->hnext;
3431 3430 hhashp->hnext->hprev = hp;
3432 3431 hp->hprev = hhashp;
3433 3432 hhashp->hnext = hp;
3434 3433 dhashp = HDL_DHASH(hp->dip);
3435 3434 hp->dnext = dhashp->dnext;
3436 3435 dhashp->dnext->dprev = hp;
3437 3436 hp->dprev = dhashp;
3438 3437 dhashp->dnext = hp;
3439 3438 mutex_exit(&bofi_mutex);
3440 3439 mutex_exit(&bofi_low_mutex);
3441 3440 return (retval);
3442 3441 }
3443 3442
3444 3443
3445 3444 /*
3446 3445 * our ddi_dma_freehdl routine
3447 3446 */
3448 3447 static int
3449 3448 bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3450 3449 {
3451 3450 int retval;
3452 3451 struct bofi_shadow *hp;
3453 3452 struct bofi_shadow *hhashp;
3454 3453
3455 3454 /*
3456 3455 * find shadow for this handle
3457 3456 */
3458 3457 mutex_enter(&bofi_low_mutex);
3459 3458 mutex_enter(&bofi_mutex);
3460 3459 hhashp = HDL_HHASH(handle);
3461 3460 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3462 3461 if (hp->hdl.dma_handle == handle)
3463 3462 break;
3464 3463 mutex_exit(&bofi_mutex);
3465 3464 mutex_exit(&bofi_low_mutex);
3466 3465 /*
3467 3466 * call nexus to do the real work
3468 3467 */
3469 3468 retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
3470 3469 if (retval != DDI_SUCCESS) {
3471 3470 return (retval);
3472 3471 }
3473 3472 /*
3474 3473 * did we really have a shadow for this handle
3475 3474 */
3476 3475 if (hp == hhashp)
3477 3476 return (retval);
3478 3477 /*
3479 3478 * yes we have - see if it's still bound
3480 3479 */
3481 3480 mutex_enter(&bofi_low_mutex);
3482 3481 mutex_enter(&bofi_mutex);
3483 3482 if (hp->type != BOFI_NULL)
3484 3483 panic("driver freeing bound dma_handle");
3485 3484 /*
3486 3485 * remove from dhash, hhash and inuse lists
3487 3486 */
3488 3487 hp->hnext->hprev = hp->hprev;
3489 3488 hp->hprev->hnext = hp->hnext;
3490 3489 hp->dnext->dprev = hp->dprev;
3491 3490 hp->dprev->dnext = hp->dnext;
3492 3491 hp->next->prev = hp->prev;
3493 3492 hp->prev->next = hp->next;
3494 3493 mutex_exit(&bofi_mutex);
3495 3494 mutex_exit(&bofi_low_mutex);
3496 3495
3497 3496 kmem_free(hp, sizeof (struct bofi_shadow));
3498 3497 return (retval);
3499 3498 }
3500 3499
3501 3500
3502 3501 /*
3503 3502 * our ddi_dma_bindhdl routine
3504 3503 */
3505 3504 static int
3506 3505 bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
3507 3506 ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
3508 3507 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3509 3508 {
3510 3509 int retval = DDI_DMA_NORESOURCES;
3511 3510 auto struct ddi_dma_req dmareq;
3512 3511 struct bofi_shadow *hp;
3513 3512 struct bofi_shadow *hhashp;
3514 3513 ddi_dma_impl_t *mp;
3515 3514 unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3516 3515
3517 3516 /*
3518 3517 * check we really have a shadow for this handle
3519 3518 */
3520 3519 mutex_enter(&bofi_low_mutex);
3521 3520 mutex_enter(&bofi_mutex);
3522 3521 hhashp = HDL_HHASH(handle);
3523 3522 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3524 3523 if (hp->hdl.dma_handle == handle)
3525 3524 break;
3526 3525 mutex_exit(&bofi_mutex);
3527 3526 mutex_exit(&bofi_low_mutex);
3528 3527 if (hp == hhashp) {
3529 3528 /*
3530 3529 * no we don't - just call nexus to do the real work
3531 3530 */
3532 3531 return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3533 3532 cookiep, ccountp);
3534 3533 }
3535 3534 /*
3536 3535 * yes we have - see if it's already bound
3537 3536 */
3538 3537 if (hp->type != BOFI_NULL)
3539 3538 return (DDI_DMA_INUSE);
3540 3539
3541 3540 hp->flags = dmareqp->dmar_flags;
3542 3541 if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
3543 3542 hp->map_flags = B_PAGEIO;
3544 3543 hp->map_pp = dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp;
3545 3544 } else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
3546 3545 hp->map_flags = B_SHADOW;
3547 3546 hp->map_pplist = dmareqp->dmar_object.dmao_obj.virt_obj.v_priv;
3548 3547 } else {
3549 3548 hp->map_flags = 0;
3550 3549 }
3551 3550 /*
3552 3551 * get a kernel virtual mapping
3553 3552 */
3554 3553 hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
3555 3554 if (hp->addr == NULL)
3556 3555 goto error;
3557 3556 if (bofi_sync_check) {
3558 3557 /*
3559 3558 * Take a copy and pass pointers to this up to nexus instead.
3560 3559 * Data will be copied from the original on explicit
3561 3560 * and implicit ddi_dma_sync()
3562 3561 *
3563 3562 * - maintain page alignment because some devices assume it.
3564 3563 */
3565 3564 hp->origaddr = hp->addr;
3566 3565 hp->allocaddr = ddi_umem_alloc(
3567 3566 ((uintptr_t)hp->addr & pagemask) + hp->len,
3568 3567 (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
3569 3568 &hp->umem_cookie);
3570 3569 if (hp->allocaddr == NULL)
3571 3570 goto error;
3572 3571 hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
3573 3572 if (dmareqp->dmar_flags & DDI_DMA_WRITE)
3574 3573 xbcopy(hp->origaddr, hp->addr, hp->len);
3575 3574 dmareq = *dmareqp;
3576 3575 dmareq.dmar_object.dmao_size = hp->len;
3577 3576 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
3578 3577 dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
3579 3578 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
3580 3579 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
3581 3580 dmareqp = &dmareq;
3582 3581 }
3583 3582 /*
3584 3583 * call nexus to do the real work
3585 3584 */
3586 3585 retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
3587 3586 cookiep, ccountp);
3588 3587 if (retval != DDI_SUCCESS)
3589 3588 goto error2;
3590 3589 /*
3591 3590 * unset DMP_NOSYNC
3592 3591 */
3593 3592 mp = (ddi_dma_impl_t *)handle;
3594 3593 mp->dmai_rflags &= ~DMP_NOSYNC;
3595 3594 /*
3596 3595 * chain on any pre-existing errdefs that apply to this
3597 3596 * acc_handle and corrupt if required (as there is an implicit
3598 3597 * ddi_dma_sync() in this call)
3599 3598 */
3600 3599 mutex_enter(&bofi_low_mutex);
3601 3600 mutex_enter(&bofi_mutex);
3602 3601 hp->type = BOFI_DMA_HDL;
3603 3602 chain_on_errdefs(hp);
3604 3603 mutex_exit(&bofi_mutex);
3605 3604 mutex_exit(&bofi_low_mutex);
3606 3605 return (retval);
3607 3606
3608 3607 error:
3609 3608 if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
3610 3609 /*
3611 3610 * what to do here? Wait a bit and try again
3612 3611 */
3613 3612 (void) timeout((void (*)())dmareqp->dmar_fp,
3614 3613 dmareqp->dmar_arg, 10);
3615 3614 }
3616 3615 error2:
3617 3616 if (hp) {
3618 3617 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3619 3618 hp->map_pp, hp->map_pplist);
3620 3619 if (bofi_sync_check && hp->allocaddr)
3621 3620 ddi_umem_free(hp->umem_cookie);
3622 3621 hp->mapaddr = NULL;
3623 3622 hp->allocaddr = NULL;
3624 3623 hp->origaddr = NULL;
3625 3624 }
3626 3625 return (retval);
3627 3626 }
3628 3627
3629 3628
3630 3629 /*
3631 3630 * our ddi_dma_unbindhdl routine
3632 3631 */
3633 3632 static int
3634 3633 bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
3635 3634 {
3636 3635 struct bofi_link *lp, *next_lp;
3637 3636 struct bofi_errent *ep;
3638 3637 int retval;
3639 3638 struct bofi_shadow *hp;
3640 3639 struct bofi_shadow *hhashp;
3641 3640
3642 3641 /*
3643 3642 * call nexus to do the real work
3644 3643 */
3645 3644 retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
3646 3645 if (retval != DDI_SUCCESS)
3647 3646 return (retval);
3648 3647 /*
3649 3648 * check we really have a shadow for this handle
3650 3649 */
3651 3650 mutex_enter(&bofi_low_mutex);
3652 3651 mutex_enter(&bofi_mutex);
3653 3652 hhashp = HDL_HHASH(handle);
3654 3653 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3655 3654 if (hp->hdl.dma_handle == handle)
3656 3655 break;
3657 3656 if (hp == hhashp) {
3658 3657 mutex_exit(&bofi_mutex);
3659 3658 mutex_exit(&bofi_low_mutex);
3660 3659 return (retval);
3661 3660 }
3662 3661 /*
3663 3662 * yes we have - see if it's already unbound
3664 3663 */
3665 3664 if (hp->type == BOFI_NULL)
3666 3665 panic("driver unbinding unbound dma_handle");
3667 3666 /*
3668 3667 * free any errdef link structures tagged on to this
3669 3668 * shadow handle
3670 3669 */
3671 3670 for (lp = hp->link; lp != NULL; ) {
3672 3671 next_lp = lp->link;
3673 3672 /*
3674 3673 * there is an implicit sync_for_cpu on free -
3675 3674 * may need to corrupt
3676 3675 */
3677 3676 ep = lp->errentp;
3678 3677 if ((ep->errdef.access_type & BOFI_DMA_R) &&
3679 3678 (hp->flags & DDI_DMA_READ) &&
3680 3679 (ep->state & BOFI_DEV_ACTIVE)) {
3681 3680 do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
3682 3681 }
3683 3682 lp->link = bofi_link_freelist;
3684 3683 bofi_link_freelist = lp;
3685 3684 lp = next_lp;
3686 3685 }
3687 3686 hp->link = NULL;
3688 3687 hp->type = BOFI_NULL;
3689 3688 mutex_exit(&bofi_mutex);
3690 3689 mutex_exit(&bofi_low_mutex);
3691 3690
3692 3691 if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
3693 3692 /*
3694 3693 * implicit sync_for_cpu - copy data back
3695 3694 */
3696 3695 if (hp->allocaddr)
3697 3696 xbcopy(hp->addr, hp->origaddr, hp->len);
3698 3697 ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
3699 3698 hp->map_pp, hp->map_pplist);
3700 3699 if (bofi_sync_check && hp->allocaddr)
3701 3700 ddi_umem_free(hp->umem_cookie);
3702 3701 hp->mapaddr = NULL;
3703 3702 hp->allocaddr = NULL;
3704 3703 hp->origaddr = NULL;
3705 3704 return (retval);
3706 3705 }
3707 3706
3708 3707
3709 3708 /*
3710 3709 * our ddi_dma_sync routine
3711 3710 */
3712 3711 static int
3713 3712 bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
3714 3713 ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
3715 3714 {
3716 3715 struct bofi_link *lp;
3717 3716 struct bofi_errent *ep;
3718 3717 struct bofi_shadow *hp;
3719 3718 struct bofi_shadow *hhashp;
3720 3719 int retval;
3721 3720
3722 3721 if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
3723 3722 /*
3724 3723 * in this case get nexus driver to do sync first
3725 3724 */
3726 3725 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3727 3726 len, flags);
3728 3727 if (retval != DDI_SUCCESS)
3729 3728 return (retval);
3730 3729 }
3731 3730 /*
3732 3731 * check we really have a shadow for this handle
3733 3732 */
3734 3733 mutex_enter(&bofi_low_mutex);
3735 3734 mutex_enter(&bofi_mutex);
3736 3735 hhashp = HDL_HHASH(handle);
3737 3736 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3738 3737 if (hp->hdl.dma_handle == handle &&
3739 3738 hp->type == BOFI_DMA_HDL)
3740 3739 break;
3741 3740 mutex_exit(&bofi_mutex);
3742 3741 mutex_exit(&bofi_low_mutex);
3743 3742 if (hp != hhashp) {
3744 3743 /*
3745 3744 * yes - do we need to copy data from original
3746 3745 */
3747 3746 if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
3748 3747 if (hp->allocaddr)
3749 3748 xbcopy(hp->origaddr+off, hp->addr+off,
3750 3749 len ? len : (hp->len - off));
3751 3750 /*
3752 3751 * yes - check if we need to corrupt the data
3753 3752 */
3754 3753 mutex_enter(&bofi_low_mutex);
3755 3754 mutex_enter(&bofi_mutex);
3756 3755 for (lp = hp->link; lp != NULL; lp = lp->link) {
3757 3756 ep = lp->errentp;
3758 3757 if ((((ep->errdef.access_type & BOFI_DMA_R) &&
3759 3758 (flags == DDI_DMA_SYNC_FORCPU ||
3760 3759 flags == DDI_DMA_SYNC_FORKERNEL)) ||
3761 3760 ((ep->errdef.access_type & BOFI_DMA_W) &&
3762 3761 (flags == DDI_DMA_SYNC_FORDEV))) &&
3763 3762 (ep->state & BOFI_DEV_ACTIVE)) {
3764 3763 do_dma_corrupt(hp, ep, flags, off,
3765 3764 len ? len : (hp->len - off));
3766 3765 }
3767 3766 }
3768 3767 mutex_exit(&bofi_mutex);
3769 3768 mutex_exit(&bofi_low_mutex);
3770 3769 /*
3771 3770 * do we need to copy data to original
3772 3771 */
3773 3772 if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
3774 3773 flags == DDI_DMA_SYNC_FORKERNEL))
3775 3774 if (hp->allocaddr)
3776 3775 xbcopy(hp->addr+off, hp->origaddr+off,
3777 3776 len ? len : (hp->len - off));
3778 3777 }
3779 3778 if (flags == DDI_DMA_SYNC_FORDEV)
3780 3779 /*
3781 3780 * in this case get nexus driver to do sync last
3782 3781 */
3783 3782 retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
3784 3783 len, flags);
3785 3784 return (retval);
3786 3785 }
3787 3786
3788 3787
3789 3788 /*
3790 3789 * our dma_win routine
3791 3790 */
3792 3791 static int
3793 3792 bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
3794 3793 ddi_dma_handle_t handle, uint_t win, off_t *offp,
3795 3794 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
3796 3795 {
3797 3796 struct bofi_shadow *hp;
3798 3797 struct bofi_shadow *hhashp;
3799 3798 int retval;
3800 3799 ddi_dma_impl_t *mp;
3801 3800
3802 3801 /*
3803 3802 * call nexus to do the real work
3804 3803 */
3805 3804 retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
3806 3805 cookiep, ccountp);
3807 3806 if (retval != DDI_SUCCESS)
3808 3807 return (retval);
3809 3808 /*
3810 3809 * check we really have a shadow for this handle
3811 3810 */
3812 3811 mutex_enter(&bofi_low_mutex);
3813 3812 mutex_enter(&bofi_mutex);
3814 3813 hhashp = HDL_HHASH(handle);
3815 3814 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3816 3815 if (hp->hdl.dma_handle == handle)
3817 3816 break;
3818 3817 if (hp != hhashp) {
3819 3818 /*
3820 3819 * yes - make sure DMP_NOSYNC is unset
3821 3820 */
3822 3821 mp = (ddi_dma_impl_t *)handle;
3823 3822 mp->dmai_rflags &= ~DMP_NOSYNC;
3824 3823 }
3825 3824 mutex_exit(&bofi_mutex);
3826 3825 mutex_exit(&bofi_low_mutex);
3827 3826 return (retval);
3828 3827 }
3829 3828
3830 3829
3831 3830 /*
3832 3831 * our dma_ctl routine
3833 3832 */
3834 3833 static int
3835 3834 bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
3836 3835 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
3837 3836 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
3838 3837 {
3839 3838 struct bofi_shadow *hp;
3840 3839 struct bofi_shadow *hhashp;
3841 3840 int retval;
3842 3841 int i;
3843 3842 struct bofi_shadow *dummyhp;
3844 3843
3845 3844 /*
3846 3845 * get nexus to do real work
3847 3846 */
3848 3847 retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
3849 3848 lenp, objp, flags);
3850 3849 if (retval != DDI_SUCCESS)
3851 3850 return (retval);
3852 3851 /*
3853 3852 * if driver_list is set, only intercept those drivers
3854 3853 */
3855 3854 if (!driver_under_test(rdip))
3856 3855 return (DDI_SUCCESS);
3857 3856
3858 3857 #if defined(__sparc)
3859 3858 /*
3860 3859 * check if this is a dvma_reserve - that one's like a
3861 3860 * dma_allochdl and needs to be handled separately
3862 3861 */
3863 3862 if (request == DDI_DMA_RESERVE) {
3864 3863 bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp);
3865 3864 return (DDI_SUCCESS);
3866 3865 }
3867 3866 #endif
3868 3867 /*
3869 3868 * check we really have a shadow for this handle
3870 3869 */
3871 3870 mutex_enter(&bofi_low_mutex);
3872 3871 mutex_enter(&bofi_mutex);
3873 3872 hhashp = HDL_HHASH(handle);
3874 3873 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
3875 3874 if (hp->hdl.dma_handle == handle)
3876 3875 break;
3877 3876 if (hp == hhashp) {
3878 3877 mutex_exit(&bofi_mutex);
3879 3878 mutex_exit(&bofi_low_mutex);
3880 3879 return (retval);
3881 3880 }
3882 3881 /*
3883 3882 * yes we have - see what kind of command this is
3884 3883 */
3885 3884 switch (request) {
3886 3885 case DDI_DMA_RELEASE:
3887 3886 /*
3888 3887 * dvma release - release dummy handle and all the index handles
3889 3888 */
3890 3889 dummyhp = hp;
3891 3890 dummyhp->hnext->hprev = dummyhp->hprev;
3892 3891 dummyhp->hprev->hnext = dummyhp->hnext;
3893 3892 mutex_exit(&bofi_mutex);
3894 3893 mutex_exit(&bofi_low_mutex);
3895 3894 for (i = 0; i < dummyhp->len; i++) {
3896 3895 hp = dummyhp->hparrayp[i];
3897 3896 /*
3898 3897 * chek none of the index handles were still loaded
3899 3898 */
3900 3899 if (hp->type != BOFI_NULL)
3901 3900 panic("driver releasing loaded dvma");
3902 3901 /*
3903 3902 * remove from dhash and inuse lists
3904 3903 */
3905 3904 mutex_enter(&bofi_low_mutex);
3906 3905 mutex_enter(&bofi_mutex);
3907 3906 hp->dnext->dprev = hp->dprev;
3908 3907 hp->dprev->dnext = hp->dnext;
3909 3908 hp->next->prev = hp->prev;
3910 3909 hp->prev->next = hp->next;
3911 3910 mutex_exit(&bofi_mutex);
3912 3911 mutex_exit(&bofi_low_mutex);
3913 3912
3914 3913 if (bofi_sync_check && hp->allocaddr)
3915 3914 ddi_umem_free(hp->umem_cookie);
3916 3915 kmem_free(hp, sizeof (struct bofi_shadow));
3917 3916 }
3918 3917 kmem_free(dummyhp->hparrayp, dummyhp->len *
3919 3918 sizeof (struct bofi_shadow *));
3920 3919 kmem_free(dummyhp, sizeof (struct bofi_shadow));
3921 3920 return (retval);
3922 3921 default:
3923 3922 break;
3924 3923 }
3925 3924 mutex_exit(&bofi_mutex);
3926 3925 mutex_exit(&bofi_low_mutex);
3927 3926 return (retval);
3928 3927 }
3929 3928
3930 3929 #if defined(__sparc)
3931 3930 /*
3932 3931 * dvma reserve case from bofi_dma_ctl()
3933 3932 */
3934 3933 static void
3935 3934 bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle)
3936 3935 {
3937 3936 struct bofi_shadow *hp;
3938 3937 struct bofi_shadow *dummyhp;
3939 3938 struct bofi_shadow *dhashp;
3940 3939 struct bofi_shadow *hhashp;
3941 3940 ddi_dma_impl_t *mp;
3942 3941 struct fast_dvma *nexus_private;
3943 3942 int i, count;
3944 3943
3945 3944 mp = (ddi_dma_impl_t *)handle;
3946 3945 count = mp->dmai_ndvmapages;
3947 3946 /*
3948 3947 * allocate dummy shadow handle structure
3949 3948 */
3950 3949 dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP);
3951 3950 if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
3952 3951 /*
3953 3952 * overlay our routines over the nexus's dvma routines
3954 3953 */
3955 3954 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
3956 3955 dummyhp->save.dvma_ops = *(nexus_private->ops);
3957 3956 nexus_private->ops = &bofi_dvma_ops;
3958 3957 }
3959 3958 /*
3960 3959 * now fill in the dummy handle. This just gets put on hhash queue
3961 3960 * so our dvma routines can find and index off to the handle they
3962 3961 * really want.
3963 3962 */
3964 3963 (void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE);
3965 3964 dummyhp->instance = ddi_get_instance(rdip);
3966 3965 dummyhp->rnumber = -1;
3967 3966 dummyhp->dip = rdip;
3968 3967 dummyhp->len = count;
3969 3968 dummyhp->hdl.dma_handle = handle;
3970 3969 dummyhp->link = NULL;
3971 3970 dummyhp->type = BOFI_NULL;
3972 3971 /*
3973 3972 * allocate space for real handles
3974 3973 */
3975 3974 dummyhp->hparrayp = kmem_alloc(count *
3976 3975 sizeof (struct bofi_shadow *), KM_SLEEP);
3977 3976 for (i = 0; i < count; i++) {
3978 3977 /*
3979 3978 * allocate shadow handle structures and fill them in
3980 3979 */
3981 3980 hp = kmem_zalloc(sizeof (*hp), KM_SLEEP);
3982 3981 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
3983 3982 hp->instance = ddi_get_instance(rdip);
3984 3983 hp->rnumber = -1;
3985 3984 hp->dip = rdip;
3986 3985 hp->hdl.dma_handle = 0;
3987 3986 hp->link = NULL;
3988 3987 hp->type = BOFI_NULL;
3989 3988 if (bofi_sync_check) {
3990 3989 unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
3991 3990 /*
3992 3991 * Take a copy and set this to be hp->addr
3993 3992 * Data will be copied to and from the original on
3994 3993 * explicit and implicit ddi_dma_sync()
3995 3994 *
3996 3995 * - maintain page alignment because some devices
3997 3996 * assume it.
3998 3997 */
3999 3998 hp->allocaddr = ddi_umem_alloc(
4000 3999 ((int)(uintptr_t)hp->addr & pagemask)
4001 4000 + pagemask + 1,
4002 4001 KM_SLEEP, &hp->umem_cookie);
4003 4002 hp->addr = hp->allocaddr +
4004 4003 ((int)(uintptr_t)hp->addr & pagemask);
4005 4004 }
4006 4005 /*
4007 4006 * add to dhash and inuse lists.
4008 4007 * these don't go on hhash queue.
4009 4008 */
4010 4009 mutex_enter(&bofi_low_mutex);
4011 4010 mutex_enter(&bofi_mutex);
4012 4011 hp->next = shadow_list.next;
4013 4012 shadow_list.next->prev = hp;
4014 4013 hp->prev = &shadow_list;
4015 4014 shadow_list.next = hp;
4016 4015 dhashp = HDL_DHASH(hp->dip);
4017 4016 hp->dnext = dhashp->dnext;
4018 4017 dhashp->dnext->dprev = hp;
4019 4018 hp->dprev = dhashp;
4020 4019 dhashp->dnext = hp;
4021 4020 dummyhp->hparrayp[i] = hp;
4022 4021 mutex_exit(&bofi_mutex);
4023 4022 mutex_exit(&bofi_low_mutex);
4024 4023 }
4025 4024 /*
4026 4025 * add dummy handle to hhash list only
4027 4026 */
4028 4027 mutex_enter(&bofi_low_mutex);
4029 4028 mutex_enter(&bofi_mutex);
4030 4029 hhashp = HDL_HHASH(handle);
4031 4030 dummyhp->hnext = hhashp->hnext;
4032 4031 hhashp->hnext->hprev = dummyhp;
4033 4032 dummyhp->hprev = hhashp;
4034 4033 hhashp->hnext = dummyhp;
4035 4034 mutex_exit(&bofi_mutex);
4036 4035 mutex_exit(&bofi_low_mutex);
4037 4036 }
4038 4037
4039 4038 /*
4040 4039 * our dvma_kaddr_load()
4041 4040 */
4042 4041 static void
4043 4042 bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
4044 4043 ddi_dma_cookie_t *cp)
4045 4044 {
4046 4045 struct bofi_shadow *dummyhp;
4047 4046 struct bofi_shadow *hp;
4048 4047 struct bofi_shadow *hhashp;
4049 4048 struct bofi_errent *ep;
4050 4049 struct bofi_link *lp;
4051 4050
4052 4051 /*
4053 4052 * check we really have a dummy shadow for this handle
4054 4053 */
4055 4054 mutex_enter(&bofi_low_mutex);
4056 4055 mutex_enter(&bofi_mutex);
4057 4056 hhashp = HDL_HHASH(h);
4058 4057 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4059 4058 dummyhp = dummyhp->hnext)
4060 4059 if (dummyhp->hdl.dma_handle == h)
4061 4060 break;
4062 4061 mutex_exit(&bofi_mutex);
4063 4062 mutex_exit(&bofi_low_mutex);
4064 4063 if (dummyhp == hhashp) {
4065 4064 /*
4066 4065 * no dummy shadow - panic
4067 4066 */
4068 4067 panic("driver dvma_kaddr_load with no reserve");
4069 4068 }
4070 4069
4071 4070 /*
4072 4071 * find real hp
4073 4072 */
4074 4073 hp = dummyhp->hparrayp[index];
4075 4074 /*
4076 4075 * check its not already loaded
4077 4076 */
4078 4077 if (hp->type != BOFI_NULL)
4079 4078 panic("driver loading loaded dvma");
4080 4079 /*
4081 4080 * if were doing copying, just need to change origaddr and get
4082 4081 * nexus to map hp->addr again
4083 4082 * if not, set hp->addr to new address.
4084 4083 * - note these are always kernel virtual addresses - no need to map
4085 4084 */
4086 4085 if (bofi_sync_check && hp->allocaddr) {
4087 4086 hp->origaddr = a;
4088 4087 a = hp->addr;
4089 4088 } else
4090 4089 hp->addr = a;
4091 4090 hp->len = len;
4092 4091 /*
4093 4092 * get nexus to do the real work
4094 4093 */
4095 4094 dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp);
4096 4095 /*
4097 4096 * chain on any pre-existing errdefs that apply to this dma_handle
4098 4097 * no need to corrupt - there's no implicit dma_sync on this one
4099 4098 */
4100 4099 mutex_enter(&bofi_low_mutex);
4101 4100 mutex_enter(&bofi_mutex);
4102 4101 hp->type = BOFI_DMA_HDL;
4103 4102 for (ep = errent_listp; ep != NULL; ep = ep->next) {
4104 4103 if (ddi_name_to_major(hp->name) ==
4105 4104 ddi_name_to_major(ep->name) &&
4106 4105 hp->instance == ep->errdef.instance &&
4107 4106 (ep->errdef.rnumber == -1 ||
4108 4107 hp->rnumber == ep->errdef.rnumber) &&
4109 4108 ((ep->errdef.access_type & BOFI_DMA_RW) &&
4110 4109 (((uintptr_t)(hp->addr + ep->errdef.offset +
4111 4110 ep->errdef.len) & ~LLSZMASK) >
4112 4111 ((uintptr_t)((hp->addr + ep->errdef.offset) +
4113 4112 LLSZMASK) & ~LLSZMASK)))) {
4114 4113 lp = bofi_link_freelist;
4115 4114 if (lp != NULL) {
4116 4115 bofi_link_freelist = lp->link;
4117 4116 lp->errentp = ep;
4118 4117 lp->link = hp->link;
4119 4118 hp->link = lp;
4120 4119 }
4121 4120 }
4122 4121 }
4123 4122 mutex_exit(&bofi_mutex);
4124 4123 mutex_exit(&bofi_low_mutex);
4125 4124 }
4126 4125
4127 4126 /*
4128 4127 * our dvma_unload()
4129 4128 */
4130 4129 static void
4131 4130 bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
4132 4131 {
4133 4132 struct bofi_link *lp, *next_lp;
4134 4133 struct bofi_errent *ep;
4135 4134 struct bofi_shadow *dummyhp;
4136 4135 struct bofi_shadow *hp;
4137 4136 struct bofi_shadow *hhashp;
4138 4137
4139 4138 /*
4140 4139 * check we really have a dummy shadow for this handle
4141 4140 */
4142 4141 mutex_enter(&bofi_low_mutex);
4143 4142 mutex_enter(&bofi_mutex);
4144 4143 hhashp = HDL_HHASH(h);
4145 4144 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4146 4145 dummyhp = dummyhp->hnext)
4147 4146 if (dummyhp->hdl.dma_handle == h)
4148 4147 break;
4149 4148 mutex_exit(&bofi_mutex);
4150 4149 mutex_exit(&bofi_low_mutex);
4151 4150 if (dummyhp == hhashp) {
4152 4151 /*
4153 4152 * no dummy shadow - panic
4154 4153 */
4155 4154 panic("driver dvma_unload with no reserve");
4156 4155 }
4157 4156 dummyhp->save.dvma_ops.dvma_unload(h, index, view);
4158 4157 /*
4159 4158 * find real hp
4160 4159 */
4161 4160 hp = dummyhp->hparrayp[index];
4162 4161 /*
4163 4162 * check its not already unloaded
4164 4163 */
4165 4164 if (hp->type == BOFI_NULL)
4166 4165 panic("driver unloading unloaded dvma");
4167 4166 /*
4168 4167 * free any errdef link structures tagged on to this
4169 4168 * shadow handle - do corruption if necessary
4170 4169 */
4171 4170 mutex_enter(&bofi_low_mutex);
4172 4171 mutex_enter(&bofi_mutex);
4173 4172 for (lp = hp->link; lp != NULL; ) {
4174 4173 next_lp = lp->link;
4175 4174 ep = lp->errentp;
4176 4175 if ((ep->errdef.access_type & BOFI_DMA_R) &&
4177 4176 (view == DDI_DMA_SYNC_FORCPU ||
4178 4177 view == DDI_DMA_SYNC_FORKERNEL) &&
4179 4178 (ep->state & BOFI_DEV_ACTIVE)) {
4180 4179 do_dma_corrupt(hp, ep, view, 0, hp->len);
4181 4180 }
4182 4181 lp->link = bofi_link_freelist;
4183 4182 bofi_link_freelist = lp;
4184 4183 lp = next_lp;
4185 4184 }
4186 4185 hp->link = NULL;
4187 4186 hp->type = BOFI_NULL;
4188 4187 mutex_exit(&bofi_mutex);
4189 4188 mutex_exit(&bofi_low_mutex);
4190 4189 /*
4191 4190 * if there is an explicit sync_for_cpu, then do copy to original
4192 4191 */
4193 4192 if (bofi_sync_check &&
4194 4193 (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL))
4195 4194 if (hp->allocaddr)
4196 4195 xbcopy(hp->addr, hp->origaddr, hp->len);
4197 4196 }
4198 4197
4199 4198 /*
4200 4199 * our dvma_unload()
4201 4200 */
4202 4201 static void
4203 4202 bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
4204 4203 {
4205 4204 struct bofi_link *lp;
4206 4205 struct bofi_errent *ep;
4207 4206 struct bofi_shadow *hp;
4208 4207 struct bofi_shadow *dummyhp;
4209 4208 struct bofi_shadow *hhashp;
4210 4209
4211 4210 /*
4212 4211 * check we really have a dummy shadow for this handle
4213 4212 */
4214 4213 mutex_enter(&bofi_low_mutex);
4215 4214 mutex_enter(&bofi_mutex);
4216 4215 hhashp = HDL_HHASH(h);
4217 4216 for (dummyhp = hhashp->hnext; dummyhp != hhashp;
4218 4217 dummyhp = dummyhp->hnext)
4219 4218 if (dummyhp->hdl.dma_handle == h)
4220 4219 break;
4221 4220 mutex_exit(&bofi_mutex);
4222 4221 mutex_exit(&bofi_low_mutex);
4223 4222 if (dummyhp == hhashp) {
4224 4223 /*
4225 4224 * no dummy shadow - panic
4226 4225 */
4227 4226 panic("driver dvma_sync with no reserve");
4228 4227 }
4229 4228 /*
4230 4229 * find real hp
4231 4230 */
4232 4231 hp = dummyhp->hparrayp[index];
4233 4232 /*
4234 4233 * check its already loaded
4235 4234 */
4236 4235 if (hp->type == BOFI_NULL)
4237 4236 panic("driver syncing unloaded dvma");
4238 4237 if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)
4239 4238 /*
4240 4239 * in this case do sync first
4241 4240 */
4242 4241 dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4243 4242 /*
4244 4243 * if there is an explicit sync_for_dev, then do copy from original
4245 4244 */
4246 4245 if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) {
4247 4246 if (hp->allocaddr)
4248 4247 xbcopy(hp->origaddr, hp->addr, hp->len);
4249 4248 }
4250 4249 /*
4251 4250 * do corruption if necessary
4252 4251 */
4253 4252 mutex_enter(&bofi_low_mutex);
4254 4253 mutex_enter(&bofi_mutex);
4255 4254 for (lp = hp->link; lp != NULL; lp = lp->link) {
4256 4255 ep = lp->errentp;
4257 4256 if ((((ep->errdef.access_type & BOFI_DMA_R) &&
4258 4257 (view == DDI_DMA_SYNC_FORCPU ||
4259 4258 view == DDI_DMA_SYNC_FORKERNEL)) ||
4260 4259 ((ep->errdef.access_type & BOFI_DMA_W) &&
4261 4260 (view == DDI_DMA_SYNC_FORDEV))) &&
4262 4261 (ep->state & BOFI_DEV_ACTIVE)) {
4263 4262 do_dma_corrupt(hp, ep, view, 0, hp->len);
4264 4263 }
4265 4264 }
4266 4265 mutex_exit(&bofi_mutex);
4267 4266 mutex_exit(&bofi_low_mutex);
4268 4267 /*
4269 4268 * if there is an explicit sync_for_cpu, then do copy to original
4270 4269 */
4271 4270 if (bofi_sync_check &&
4272 4271 (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) {
4273 4272 if (hp->allocaddr)
4274 4273 xbcopy(hp->addr, hp->origaddr, hp->len);
4275 4274 }
4276 4275 if (view == DDI_DMA_SYNC_FORDEV)
4277 4276 /*
4278 4277 * in this case do sync last
4279 4278 */
4280 4279 dummyhp->save.dvma_ops.dvma_sync(h, index, view);
4281 4280 }
4282 4281 #endif
4283 4282
4284 4283 /*
4285 4284 * bofi intercept routine - gets called instead of users interrupt routine
4286 4285 */
4287 4286 static uint_t
4288 4287 bofi_intercept_intr(caddr_t xp, caddr_t arg2)
4289 4288 {
4290 4289 struct bofi_errent *ep;
4291 4290 struct bofi_link *lp;
4292 4291 struct bofi_shadow *hp;
4293 4292 int intr_count = 1;
4294 4293 int i;
4295 4294 uint_t retval = DDI_INTR_UNCLAIMED;
4296 4295 uint_t result;
4297 4296 int unclaimed_counter = 0;
4298 4297 int jabber_detected = 0;
4299 4298
4300 4299 hp = (struct bofi_shadow *)xp;
4301 4300 /*
4302 4301 * check if nothing to do
4303 4302 */
4304 4303 if (hp->link == NULL)
4305 4304 return (hp->save.intr.int_handler
4306 4305 (hp->save.intr.int_handler_arg1, arg2));
4307 4306 mutex_enter(&bofi_mutex);
4308 4307 /*
4309 4308 * look for any errdefs
4310 4309 */
4311 4310 for (lp = hp->link; lp != NULL; lp = lp->link) {
4312 4311 ep = lp->errentp;
4313 4312 if (ep->state & BOFI_DEV_ACTIVE) {
4314 4313 /*
4315 4314 * got one
4316 4315 */
4317 4316 if ((ep->errdef.access_count ||
4318 4317 ep->errdef.fail_count) &&
4319 4318 (ep->errdef.access_type & BOFI_LOG))
4320 4319 log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
4321 4320 if (ep->errdef.access_count > 1) {
4322 4321 ep->errdef.access_count--;
4323 4322 } else if (ep->errdef.fail_count > 0) {
4324 4323 ep->errdef.fail_count--;
4325 4324 ep->errdef.access_count = 0;
4326 4325 /*
4327 4326 * OK do "corruption"
4328 4327 */
4329 4328 if (ep->errstate.fail_time == 0)
4330 4329 ep->errstate.fail_time = bofi_gettime();
4331 4330 switch (ep->errdef.optype) {
4332 4331 case BOFI_DELAY_INTR:
4333 4332 if (!hp->hilevel) {
4334 4333 drv_usecwait
4335 4334 (ep->errdef.operand);
4336 4335 }
4337 4336 break;
4338 4337 case BOFI_LOSE_INTR:
4339 4338 intr_count = 0;
4340 4339 break;
4341 4340 case BOFI_EXTRA_INTR:
4342 4341 intr_count += ep->errdef.operand;
4343 4342 break;
4344 4343 default:
4345 4344 break;
4346 4345 }
4347 4346 }
4348 4347 }
4349 4348 }
4350 4349 mutex_exit(&bofi_mutex);
4351 4350 /*
4352 4351 * send extra or fewer interrupts as requested
4353 4352 */
4354 4353 for (i = 0; i < intr_count; i++) {
4355 4354 result = hp->save.intr.int_handler
4356 4355 (hp->save.intr.int_handler_arg1, arg2);
4357 4356 if (result == DDI_INTR_CLAIMED)
4358 4357 unclaimed_counter >>= 1;
4359 4358 else if (++unclaimed_counter >= 20)
4360 4359 jabber_detected = 1;
4361 4360 if (i == 0)
4362 4361 retval = result;
4363 4362 }
4364 4363 /*
4365 4364 * if more than 1000 spurious interrupts requested and
4366 4365 * jabber not detected - give warning
4367 4366 */
4368 4367 if (intr_count > 1000 && !jabber_detected)
4369 4368 panic("undetected interrupt jabber: %s%d",
4370 4369 hp->name, hp->instance);
4371 4370 /*
4372 4371 * return first response - or "unclaimed" if none
4373 4372 */
4374 4373 return (retval);
4375 4374 }
4376 4375
4377 4376
4378 4377 /*
4379 4378 * our ddi_check_acc_hdl
4380 4379 */
4381 4380 /* ARGSUSED */
4382 4381 static int
4383 4382 bofi_check_acc_hdl(ddi_acc_impl_t *handle)
4384 4383 {
4385 4384 struct bofi_shadow *hp;
4386 4385 struct bofi_link *lp;
4387 4386 uint_t result = 0;
4388 4387
4389 4388 hp = handle->ahi_common.ah_bus_private;
4390 4389 if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
4391 4390 return (0);
4392 4391 }
4393 4392 for (lp = hp->link; lp != NULL; lp = lp->link) {
4394 4393 /*
4395 4394 * OR in error state from all associated
4396 4395 * errdef structures
4397 4396 */
4398 4397 if (lp->errentp->errdef.access_count == 0 &&
4399 4398 (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4400 4399 result = (lp->errentp->errdef.acc_chk & 1);
4401 4400 }
4402 4401 }
4403 4402 mutex_exit(&bofi_mutex);
4404 4403 return (result);
4405 4404 }
4406 4405
4407 4406 /*
4408 4407 * our ddi_check_dma_hdl
4409 4408 */
4410 4409 /* ARGSUSED */
4411 4410 static int
4412 4411 bofi_check_dma_hdl(ddi_dma_impl_t *handle)
4413 4412 {
4414 4413 struct bofi_shadow *hp;
4415 4414 struct bofi_link *lp;
4416 4415 struct bofi_shadow *hhashp;
4417 4416 uint_t result = 0;
4418 4417
4419 4418 if (!mutex_tryenter(&bofi_mutex)) {
4420 4419 return (0);
4421 4420 }
4422 4421 hhashp = HDL_HHASH(handle);
4423 4422 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
4424 4423 if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
4425 4424 break;
4426 4425 if (hp == hhashp) {
4427 4426 mutex_exit(&bofi_mutex);
4428 4427 return (0);
4429 4428 }
4430 4429 if (!hp->link) {
4431 4430 mutex_exit(&bofi_mutex);
4432 4431 return (0);
4433 4432 }
4434 4433 for (lp = hp->link; lp != NULL; lp = lp->link) {
4435 4434 /*
4436 4435 * OR in error state from all associated
4437 4436 * errdef structures
4438 4437 */
4439 4438 if (lp->errentp->errdef.access_count == 0 &&
4440 4439 (lp->errentp->state & BOFI_DEV_ACTIVE)) {
4441 4440 result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
4442 4441 }
4443 4442 }
4444 4443 mutex_exit(&bofi_mutex);
4445 4444 return (result);
4446 4445 }
4447 4446
4448 4447
4449 4448 /* ARGSUSED */
4450 4449 static int
4451 4450 bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
4452 4451 ddi_eventcookie_t eventhdl, void *impl_data)
4453 4452 {
4454 4453 ddi_eventcookie_t ec;
4455 4454 struct ddi_fault_event_data *arg;
4456 4455 struct bofi_errent *ep;
4457 4456 struct bofi_shadow *hp;
4458 4457 struct bofi_shadow *dhashp;
4459 4458 struct bofi_link *lp;
4460 4459
4461 4460 ASSERT(eventhdl);
4462 4461 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
4463 4462 return (DDI_FAILURE);
4464 4463
4465 4464 if (ec != eventhdl)
4466 4465 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
4467 4466 impl_data));
4468 4467
4469 4468 arg = (struct ddi_fault_event_data *)impl_data;
4470 4469 mutex_enter(&bofi_mutex);
4471 4470 /*
4472 4471 * find shadow handles with appropriate dev_infos
4473 4472 * and set error reported on all associated errdef structures
4474 4473 */
4475 4474 dhashp = HDL_DHASH(arg->f_dip);
4476 4475 for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
4477 4476 if (hp->dip == arg->f_dip) {
4478 4477 for (lp = hp->link; lp != NULL; lp = lp->link) {
4479 4478 ep = lp->errentp;
4480 4479 ep->errstate.errmsg_count++;
4481 4480 if ((ep->errstate.msg_time == NULL ||
4482 4481 ep->errstate.severity > arg->f_impact) &&
4483 4482 (ep->state & BOFI_DEV_ACTIVE)) {
4484 4483 ep->errstate.msg_time = bofi_gettime();
4485 4484 ep->errstate.severity = arg->f_impact;
4486 4485 (void) strncpy(ep->errstate.buffer,
4487 4486 arg->f_message, ERRMSGSIZE);
4488 4487 ddi_trigger_softintr(ep->softintr_id);
4489 4488 }
4490 4489 }
4491 4490 }
4492 4491 }
4493 4492 mutex_exit(&bofi_mutex);
4494 4493 return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
4495 4494 }
4496 4495
4497 4496 /*ARGSUSED*/
4498 4497 static int
4499 4498 bofi_fm_ereport_callback(sysevent_t *ev, void *cookie)
4500 4499 {
4501 4500 char *class = "";
4502 4501 char *path = "";
4503 4502 char *ptr;
4504 4503 nvlist_t *nvlist;
4505 4504 nvlist_t *detector;
4506 4505 ddi_fault_impact_t impact;
4507 4506 struct bofi_errent *ep;
4508 4507 struct bofi_shadow *hp;
4509 4508 struct bofi_link *lp;
4510 4509 char service_class[FM_MAX_CLASS];
4511 4510 char hppath[MAXPATHLEN];
4512 4511 int service_ereport = 0;
4513 4512
4514 4513 (void) sysevent_get_attr_list(ev, &nvlist);
4515 4514 (void) nvlist_lookup_string(nvlist, FM_CLASS, &class);
4516 4515 if (nvlist_lookup_nvlist(nvlist, FM_EREPORT_DETECTOR, &detector) == 0)
4517 4516 (void) nvlist_lookup_string(detector, FM_FMRI_DEV_PATH, &path);
4518 4517
4519 4518 (void) snprintf(service_class, FM_MAX_CLASS, "%s.%s.%s.",
4520 4519 FM_EREPORT_CLASS, DDI_IO_CLASS, DDI_FM_SERVICE_IMPACT);
4521 4520 if (strncmp(class, service_class, strlen(service_class) - 1) == 0)
4522 4521 service_ereport = 1;
4523 4522
4524 4523 mutex_enter(&bofi_mutex);
4525 4524 /*
4526 4525 * find shadow handles with appropriate dev_infos
4527 4526 * and set error reported on all associated errdef structures
4528 4527 */
4529 4528 for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
4530 4529 (void) ddi_pathname(hp->dip, hppath);
4531 4530 if (strcmp(path, hppath) != 0)
4532 4531 continue;
4533 4532 for (lp = hp->link; lp != NULL; lp = lp->link) {
4534 4533 ep = lp->errentp;
4535 4534 ep->errstate.errmsg_count++;
4536 4535 if (!(ep->state & BOFI_DEV_ACTIVE))
4537 4536 continue;
4538 4537 if (ep->errstate.msg_time != NULL)
4539 4538 continue;
4540 4539 if (service_ereport) {
4541 4540 ptr = class + strlen(service_class);
4542 4541 if (strcmp(ptr, DDI_FM_SERVICE_LOST) == 0)
4543 4542 impact = DDI_SERVICE_LOST;
4544 4543 else if (strcmp(ptr,
4545 4544 DDI_FM_SERVICE_DEGRADED) == 0)
4546 4545 impact = DDI_SERVICE_DEGRADED;
4547 4546 else if (strcmp(ptr,
4548 4547 DDI_FM_SERVICE_RESTORED) == 0)
4549 4548 impact = DDI_SERVICE_RESTORED;
4550 4549 else
4551 4550 impact = DDI_SERVICE_UNAFFECTED;
4552 4551 if (ep->errstate.severity > impact)
4553 4552 ep->errstate.severity = impact;
4554 4553 } else if (ep->errstate.buffer[0] == '\0') {
4555 4554 (void) strncpy(ep->errstate.buffer, class,
4556 4555 ERRMSGSIZE);
4557 4556 }
4558 4557 if (ep->errstate.buffer[0] != '\0' &&
4559 4558 ep->errstate.severity < DDI_SERVICE_RESTORED) {
4560 4559 ep->errstate.msg_time = bofi_gettime();
4561 4560 ddi_trigger_softintr(ep->softintr_id);
4562 4561 }
4563 4562 }
4564 4563 }
4565 4564 nvlist_free(nvlist);
4566 4565 mutex_exit(&bofi_mutex);
4567 4566 return (0);
4568 4567 }
4569 4568
4570 4569 /*
4571 4570 * our intr_ops routine
4572 4571 */
4573 4572 static int
4574 4573 bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
4575 4574 ddi_intr_handle_impl_t *hdlp, void *result)
4576 4575 {
4577 4576 int retval;
4578 4577 struct bofi_shadow *hp;
4579 4578 struct bofi_shadow *dhashp;
4580 4579 struct bofi_shadow *hhashp;
4581 4580 struct bofi_errent *ep;
4582 4581 struct bofi_link *lp, *next_lp;
4583 4582
4584 4583 switch (intr_op) {
4585 4584 case DDI_INTROP_ADDISR:
4586 4585 /*
4587 4586 * if driver_list is set, only intercept those drivers
4588 4587 */
4589 4588 if (!driver_under_test(rdip))
4590 4589 return (save_bus_ops.bus_intr_op(dip, rdip,
4591 4590 intr_op, hdlp, result));
4592 4591 /*
4593 4592 * allocate shadow handle structure and fill in
4594 4593 */
4595 4594 hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
4596 4595 (void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
4597 4596 hp->instance = ddi_get_instance(rdip);
4598 4597 hp->save.intr.int_handler = hdlp->ih_cb_func;
4599 4598 hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
4600 4599 hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
4601 4600 hdlp->ih_cb_arg1 = (caddr_t)hp;
4602 4601 hp->bofi_inum = hdlp->ih_inum;
4603 4602 hp->dip = rdip;
4604 4603 hp->link = NULL;
4605 4604 hp->type = BOFI_INT_HDL;
4606 4605 /*
4607 4606 * save whether hilevel or not
4608 4607 */
4609 4608
4610 4609 if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
4611 4610 hp->hilevel = 1;
4612 4611 else
4613 4612 hp->hilevel = 0;
4614 4613
4615 4614 /*
4616 4615 * call nexus to do real work, but specifying our handler, and
4617 4616 * our shadow handle as argument
4618 4617 */
4619 4618 retval = save_bus_ops.bus_intr_op(dip, rdip,
4620 4619 intr_op, hdlp, result);
4621 4620 if (retval != DDI_SUCCESS) {
4622 4621 kmem_free(hp, sizeof (struct bofi_shadow));
4623 4622 return (retval);
4624 4623 }
4625 4624 /*
4626 4625 * add to dhash, hhash and inuse lists
4627 4626 */
4628 4627 mutex_enter(&bofi_low_mutex);
4629 4628 mutex_enter(&bofi_mutex);
4630 4629 hp->next = shadow_list.next;
4631 4630 shadow_list.next->prev = hp;
4632 4631 hp->prev = &shadow_list;
4633 4632 shadow_list.next = hp;
4634 4633 hhashp = HDL_HHASH(hdlp->ih_inum);
4635 4634 hp->hnext = hhashp->hnext;
4636 4635 hhashp->hnext->hprev = hp;
4637 4636 hp->hprev = hhashp;
4638 4637 hhashp->hnext = hp;
4639 4638 dhashp = HDL_DHASH(hp->dip);
4640 4639 hp->dnext = dhashp->dnext;
4641 4640 dhashp->dnext->dprev = hp;
4642 4641 hp->dprev = dhashp;
4643 4642 dhashp->dnext = hp;
4644 4643 /*
4645 4644 * chain on any pre-existing errdefs that apply to this
4646 4645 * acc_handle
4647 4646 */
4648 4647 for (ep = errent_listp; ep != NULL; ep = ep->next) {
4649 4648 if (ddi_name_to_major(hp->name) ==
4650 4649 ddi_name_to_major(ep->name) &&
4651 4650 hp->instance == ep->errdef.instance &&
4652 4651 (ep->errdef.access_type & BOFI_INTR)) {
4653 4652 lp = bofi_link_freelist;
4654 4653 if (lp != NULL) {
4655 4654 bofi_link_freelist = lp->link;
4656 4655 lp->errentp = ep;
4657 4656 lp->link = hp->link;
4658 4657 hp->link = lp;
4659 4658 }
4660 4659 }
4661 4660 }
4662 4661 mutex_exit(&bofi_mutex);
4663 4662 mutex_exit(&bofi_low_mutex);
4664 4663 return (retval);
4665 4664 case DDI_INTROP_REMISR:
4666 4665 /*
4667 4666 * call nexus routine first
4668 4667 */
4669 4668 retval = save_bus_ops.bus_intr_op(dip, rdip,
4670 4669 intr_op, hdlp, result);
4671 4670 /*
4672 4671 * find shadow handle
4673 4672 */
4674 4673 mutex_enter(&bofi_low_mutex);
4675 4674 mutex_enter(&bofi_mutex);
4676 4675 hhashp = HDL_HHASH(hdlp->ih_inum);
4677 4676 for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
4678 4677 if (hp->dip == rdip &&
4679 4678 hp->type == BOFI_INT_HDL &&
4680 4679 hp->bofi_inum == hdlp->ih_inum) {
4681 4680 break;
4682 4681 }
4683 4682 }
4684 4683 if (hp == hhashp) {
4685 4684 mutex_exit(&bofi_mutex);
4686 4685 mutex_exit(&bofi_low_mutex);
4687 4686 return (retval);
4688 4687 }
4689 4688 /*
4690 4689 * found one - remove from dhash, hhash and inuse lists
4691 4690 */
4692 4691 hp->hnext->hprev = hp->hprev;
4693 4692 hp->hprev->hnext = hp->hnext;
4694 4693 hp->dnext->dprev = hp->dprev;
4695 4694 hp->dprev->dnext = hp->dnext;
4696 4695 hp->next->prev = hp->prev;
4697 4696 hp->prev->next = hp->next;
4698 4697 /*
4699 4698 * free any errdef link structures
4700 4699 * tagged on to this shadow handle
4701 4700 */
4702 4701 for (lp = hp->link; lp != NULL; ) {
4703 4702 next_lp = lp->link;
4704 4703 lp->link = bofi_link_freelist;
4705 4704 bofi_link_freelist = lp;
4706 4705 lp = next_lp;
4707 4706 }
4708 4707 hp->link = NULL;
4709 4708 mutex_exit(&bofi_mutex);
4710 4709 mutex_exit(&bofi_low_mutex);
4711 4710 kmem_free(hp, sizeof (struct bofi_shadow));
4712 4711 return (retval);
4713 4712 default:
4714 4713 return (save_bus_ops.bus_intr_op(dip, rdip,
4715 4714 intr_op, hdlp, result));
4716 4715 }
4717 4716 }
↓ open down ↓ |
4421 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX