1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved. 25 */ 26 27 #include <sys/note.h> 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/buf.h> 32 #include <sys/uio.h> 33 #include <sys/cred.h> 34 #include <sys/poll.h> 35 #include <sys/mman.h> 36 #include <sys/kmem.h> 37 #include <sys/model.h> 38 #include <sys/file.h> 39 #include <sys/proc.h> 40 #include <sys/open.h> 41 #include <sys/user.h> 42 #include <sys/t_lock.h> 43 #include <sys/vm.h> 44 #include <sys/stat.h> 45 #include <vm/hat.h> 46 #include <vm/seg.h> 47 #include <vm/seg_vn.h> 48 #include <vm/seg_dev.h> 49 #include <vm/as.h> 50 #include <sys/cmn_err.h> 51 #include <sys/cpuvar.h> 52 #include <sys/debug.h> 53 #include <sys/autoconf.h> 54 #include <sys/sunddi.h> 55 #include <sys/esunddi.h> 56 #include <sys/sunndi.h> 57 #include <sys/kstat.h> 58 #include <sys/conf.h> 59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */ 60 #include <sys/ndi_impldefs.h> /* include prototypes */ 61 #include <sys/ddi_periodic.h> 62 #include <sys/hwconf.h> 63 #include <sys/pathname.h> 64 #include <sys/modctl.h> 65 #include <sys/epm.h> 66 #include <sys/devctl.h> 67 #include <sys/callb.h> 68 #include <sys/cladm.h> 69 #include <sys/sysevent.h> 70 #include <sys/dacf_impl.h> 71 #include <sys/ddidevmap.h> 72 #include <sys/bootconf.h> 73 #include <sys/disp.h> 74 #include <sys/atomic.h> 75 #include <sys/promif.h> 76 #include <sys/instance.h> 77 #include <sys/sysevent/eventdefs.h> 78 #include <sys/task.h> 79 #include <sys/project.h> 80 #include <sys/taskq.h> 81 #include <sys/devpolicy.h> 82 #include <sys/ctype.h> 83 #include <net/if.h> 84 #include <sys/rctl.h> 85 #include <sys/zone.h> 86 #include <sys/clock_impl.h> 87 #include <sys/ddi.h> 88 #include <sys/modhash.h> 89 #include <sys/sunldi_impl.h> 90 #include <sys/fs/dv_node.h> 91 #include <sys/fs/snode.h> 92 93 extern pri_t minclsyspri; 94 95 extern rctl_hndl_t rc_project_locked_mem; 96 extern rctl_hndl_t rc_zone_locked_mem; 97 98 #ifdef DEBUG 99 static int sunddi_debug = 0; 100 #endif /* DEBUG */ 101 102 /* ddi_umem_unlock miscellaneous */ 103 104 static void i_ddi_umem_unlock_thread_start(void); 105 106 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */ 107 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */ 108 static kthread_t *ddi_umem_unlock_thread; 109 /* 110 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list. 111 */ 112 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL; 113 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL; 114 115 /* 116 * DDI(Sun) Function and flag definitions: 117 */ 118 119 #if defined(__x86) 120 /* 121 * Used to indicate which entries were chosen from a range. 122 */ 123 char *chosen_reg = "chosen-reg"; 124 #endif 125 126 /* 127 * Function used to ring system console bell 128 */ 129 void (*ddi_console_bell_func)(clock_t duration); 130 131 /* 132 * Creating register mappings and handling interrupts: 133 */ 134 135 /* 136 * Generic ddi_map: Call parent to fulfill request... 137 */ 138 139 int 140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset, 141 off_t len, caddr_t *addrp) 142 { 143 dev_info_t *pdip; 144 145 ASSERT(dp); 146 pdip = (dev_info_t *)DEVI(dp)->devi_parent; 147 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip, 148 dp, mp, offset, len, addrp)); 149 } 150 151 /* 152 * ddi_apply_range: (Called by nexi only.) 153 * Apply ranges in parent node dp, to child regspec rp... 154 */ 155 156 int 157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp) 158 { 159 return (i_ddi_apply_range(dp, rdip, rp)); 160 } 161 162 int 163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 164 off_t len) 165 { 166 ddi_map_req_t mr; 167 #if defined(__x86) 168 struct { 169 int bus; 170 int addr; 171 int size; 172 } reg, *reglist; 173 uint_t length; 174 int rc; 175 176 /* 177 * get the 'registers' or the 'reg' property. 178 * We look up the reg property as an array of 179 * int's. 180 */ 181 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 182 DDI_PROP_DONTPASS, "registers", (int **)®list, &length); 183 if (rc != DDI_PROP_SUCCESS) 184 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 185 DDI_PROP_DONTPASS, "reg", (int **)®list, &length); 186 if (rc == DDI_PROP_SUCCESS) { 187 /* 188 * point to the required entry. 189 */ 190 reg = reglist[rnumber]; 191 reg.addr += offset; 192 if (len != 0) 193 reg.size = len; 194 /* 195 * make a new property containing ONLY the required tuple. 196 */ 197 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 198 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int))) 199 != DDI_PROP_SUCCESS) { 200 cmn_err(CE_WARN, "%s%d: cannot create '%s' " 201 "property", DEVI(dip)->devi_name, 202 DEVI(dip)->devi_instance, chosen_reg); 203 } 204 /* 205 * free the memory allocated by 206 * ddi_prop_lookup_int_array (). 207 */ 208 ddi_prop_free((void *)reglist); 209 } 210 #endif 211 mr.map_op = DDI_MO_MAP_LOCKED; 212 mr.map_type = DDI_MT_RNUMBER; 213 mr.map_obj.rnumber = rnumber; 214 mr.map_prot = PROT_READ | PROT_WRITE; 215 mr.map_flags = DDI_MF_KERNEL_MAPPING; 216 mr.map_handlep = NULL; 217 mr.map_vers = DDI_MAP_VERSION; 218 219 /* 220 * Call my parent to map in my regs. 221 */ 222 223 return (ddi_map(dip, &mr, offset, len, kaddrp)); 224 } 225 226 void 227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset, 228 off_t len) 229 { 230 ddi_map_req_t mr; 231 232 mr.map_op = DDI_MO_UNMAP; 233 mr.map_type = DDI_MT_RNUMBER; 234 mr.map_flags = DDI_MF_KERNEL_MAPPING; 235 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */ 236 mr.map_obj.rnumber = rnumber; 237 mr.map_handlep = NULL; 238 mr.map_vers = DDI_MAP_VERSION; 239 240 /* 241 * Call my parent to unmap my regs. 242 */ 243 244 (void) ddi_map(dip, &mr, offset, len, kaddrp); 245 *kaddrp = (caddr_t)0; 246 #if defined(__x86) 247 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg); 248 #endif 249 } 250 251 int 252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 253 off_t offset, off_t len, caddr_t *vaddrp) 254 { 255 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp)); 256 } 257 258 /* 259 * nullbusmap: The/DDI default bus_map entry point for nexi 260 * not conforming to the reg/range paradigm (i.e. scsi, etc.) 261 * with no HAT/MMU layer to be programmed at this level. 262 * 263 * If the call is to map by rnumber, return an error, 264 * otherwise pass anything else up the tree to my parent. 265 */ 266 int 267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 268 off_t offset, off_t len, caddr_t *vaddrp) 269 { 270 _NOTE(ARGUNUSED(rdip)) 271 if (mp->map_type == DDI_MT_RNUMBER) 272 return (DDI_ME_UNSUPPORTED); 273 274 return (ddi_map(dip, mp, offset, len, vaddrp)); 275 } 276 277 /* 278 * ddi_rnumber_to_regspec: Not for use by leaf drivers. 279 * Only for use by nexi using the reg/range paradigm. 280 */ 281 struct regspec * 282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber) 283 { 284 return (i_ddi_rnumber_to_regspec(dip, rnumber)); 285 } 286 287 288 /* 289 * Note that we allow the dip to be nil because we may be called 290 * prior even to the instantiation of the devinfo tree itself - all 291 * regular leaf and nexus drivers should always use a non-nil dip! 292 * 293 * We treat peek in a somewhat cavalier fashion .. assuming that we'll 294 * simply get a synchronous fault as soon as we touch a missing address. 295 * 296 * Poke is rather more carefully handled because we might poke to a write 297 * buffer, "succeed", then only find some time later that we got an 298 * asynchronous fault that indicated that the address we were writing to 299 * was not really backed by hardware. 300 */ 301 302 static int 303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size, 304 void *addr, void *value_p) 305 { 306 union { 307 uint64_t u64; 308 uint32_t u32; 309 uint16_t u16; 310 uint8_t u8; 311 } peekpoke_value; 312 313 peekpoke_ctlops_t peekpoke_args; 314 uint64_t dummy_result; 315 int rval; 316 317 /* Note: size is assumed to be correct; it is not checked. */ 318 peekpoke_args.size = size; 319 peekpoke_args.dev_addr = (uintptr_t)addr; 320 peekpoke_args.handle = NULL; 321 peekpoke_args.repcount = 1; 322 peekpoke_args.flags = 0; 323 324 if (cmd == DDI_CTLOPS_POKE) { 325 switch (size) { 326 case sizeof (uint8_t): 327 peekpoke_value.u8 = *(uint8_t *)value_p; 328 break; 329 case sizeof (uint16_t): 330 peekpoke_value.u16 = *(uint16_t *)value_p; 331 break; 332 case sizeof (uint32_t): 333 peekpoke_value.u32 = *(uint32_t *)value_p; 334 break; 335 case sizeof (uint64_t): 336 peekpoke_value.u64 = *(uint64_t *)value_p; 337 break; 338 } 339 } 340 341 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64; 342 343 if (devi != NULL) 344 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args, 345 &dummy_result); 346 else 347 rval = peekpoke_mem(cmd, &peekpoke_args); 348 349 /* 350 * A NULL value_p is permitted by ddi_peek(9F); discard the result. 351 */ 352 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) { 353 switch (size) { 354 case sizeof (uint8_t): 355 *(uint8_t *)value_p = peekpoke_value.u8; 356 break; 357 case sizeof (uint16_t): 358 *(uint16_t *)value_p = peekpoke_value.u16; 359 break; 360 case sizeof (uint32_t): 361 *(uint32_t *)value_p = peekpoke_value.u32; 362 break; 363 case sizeof (uint64_t): 364 *(uint64_t *)value_p = peekpoke_value.u64; 365 break; 366 } 367 } 368 369 return (rval); 370 } 371 372 /* 373 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this. 374 * they shouldn't be, but the 9f manpage kind of pseudo exposes it. 375 */ 376 int 377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p) 378 { 379 switch (size) { 380 case sizeof (uint8_t): 381 case sizeof (uint16_t): 382 case sizeof (uint32_t): 383 case sizeof (uint64_t): 384 break; 385 default: 386 return (DDI_FAILURE); 387 } 388 389 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p)); 390 } 391 392 int 393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p) 394 { 395 switch (size) { 396 case sizeof (uint8_t): 397 case sizeof (uint16_t): 398 case sizeof (uint32_t): 399 case sizeof (uint64_t): 400 break; 401 default: 402 return (DDI_FAILURE); 403 } 404 405 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p)); 406 } 407 408 int 409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p) 410 { 411 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 412 val_p)); 413 } 414 415 int 416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p) 417 { 418 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 419 val_p)); 420 } 421 422 int 423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p) 424 { 425 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 426 val_p)); 427 } 428 429 int 430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p) 431 { 432 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 433 val_p)); 434 } 435 436 437 /* 438 * We need to separate the old interfaces from the new ones and leave them 439 * in here for a while. Previous versions of the OS defined the new interfaces 440 * to the old interfaces. This way we can fix things up so that we can 441 * eventually remove these interfaces. 442 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10 443 * or earlier will actually have a reference to ddi_peekc in the binary. 444 */ 445 #ifdef _ILP32 446 int 447 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p) 448 { 449 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 450 val_p)); 451 } 452 453 int 454 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p) 455 { 456 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 457 val_p)); 458 } 459 460 int 461 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p) 462 { 463 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 464 val_p)); 465 } 466 467 int 468 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p) 469 { 470 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr, 471 val_p)); 472 } 473 #endif /* _ILP32 */ 474 475 int 476 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val) 477 { 478 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 479 } 480 481 int 482 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val) 483 { 484 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 485 } 486 487 int 488 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val) 489 { 490 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 491 } 492 493 int 494 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val) 495 { 496 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 497 } 498 499 /* 500 * We need to separate the old interfaces from the new ones and leave them 501 * in here for a while. Previous versions of the OS defined the new interfaces 502 * to the old interfaces. This way we can fix things up so that we can 503 * eventually remove these interfaces. 504 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10 505 * or earlier will actually have a reference to ddi_pokec in the binary. 506 */ 507 #ifdef _ILP32 508 int 509 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val) 510 { 511 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 512 } 513 514 int 515 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val) 516 { 517 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 518 } 519 520 int 521 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val) 522 { 523 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 524 } 525 526 int 527 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val) 528 { 529 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val)); 530 } 531 #endif /* _ILP32 */ 532 533 /* 534 * ddi_peekpokeio() is used primarily by the mem drivers for moving 535 * data to and from uio structures via peek and poke. Note that we 536 * use "internal" routines ddi_peek and ddi_poke to make this go 537 * slightly faster, avoiding the call overhead .. 538 */ 539 int 540 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw, 541 caddr_t addr, size_t len, uint_t xfersize) 542 { 543 int64_t ibuffer; 544 int8_t w8; 545 size_t sz; 546 int o; 547 548 if (xfersize > sizeof (long)) 549 xfersize = sizeof (long); 550 551 while (len != 0) { 552 if ((len | (uintptr_t)addr) & 1) { 553 sz = sizeof (int8_t); 554 if (rw == UIO_WRITE) { 555 if ((o = uwritec(uio)) == -1) 556 return (DDI_FAILURE); 557 if (ddi_poke8(devi, (int8_t *)addr, 558 (int8_t)o) != DDI_SUCCESS) 559 return (DDI_FAILURE); 560 } else { 561 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 562 (int8_t *)addr, &w8) != DDI_SUCCESS) 563 return (DDI_FAILURE); 564 if (ureadc(w8, uio)) 565 return (DDI_FAILURE); 566 } 567 } else { 568 switch (xfersize) { 569 case sizeof (int64_t): 570 if (((len | (uintptr_t)addr) & 571 (sizeof (int64_t) - 1)) == 0) { 572 sz = xfersize; 573 break; 574 } 575 /*FALLTHROUGH*/ 576 case sizeof (int32_t): 577 if (((len | (uintptr_t)addr) & 578 (sizeof (int32_t) - 1)) == 0) { 579 sz = xfersize; 580 break; 581 } 582 /*FALLTHROUGH*/ 583 default: 584 /* 585 * This still assumes that we might have an 586 * I/O bus out there that permits 16-bit 587 * transfers (and that it would be upset by 588 * 32-bit transfers from such locations). 589 */ 590 sz = sizeof (int16_t); 591 break; 592 } 593 594 if (rw == UIO_READ) { 595 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz, 596 addr, &ibuffer) != DDI_SUCCESS) 597 return (DDI_FAILURE); 598 } 599 600 if (uiomove(&ibuffer, sz, rw, uio)) 601 return (DDI_FAILURE); 602 603 if (rw == UIO_WRITE) { 604 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz, 605 addr, &ibuffer) != DDI_SUCCESS) 606 return (DDI_FAILURE); 607 } 608 } 609 addr += sz; 610 len -= sz; 611 } 612 return (DDI_SUCCESS); 613 } 614 615 /* 616 * These routines are used by drivers that do layered ioctls 617 * On sparc, they're implemented in assembler to avoid spilling 618 * register windows in the common (copyin) case .. 619 */ 620 #if !defined(__sparc) 621 int 622 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags) 623 { 624 if (flags & FKIOCTL) 625 return (kcopy(buf, kernbuf, size) ? -1 : 0); 626 return (copyin(buf, kernbuf, size)); 627 } 628 629 int 630 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags) 631 { 632 if (flags & FKIOCTL) 633 return (kcopy(buf, kernbuf, size) ? -1 : 0); 634 return (copyout(buf, kernbuf, size)); 635 } 636 #endif /* !__sparc */ 637 638 /* 639 * Conversions in nexus pagesize units. We don't duplicate the 640 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI 641 * routines anyway. 642 */ 643 unsigned long 644 ddi_btop(dev_info_t *dip, unsigned long bytes) 645 { 646 unsigned long pages; 647 648 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages); 649 return (pages); 650 } 651 652 unsigned long 653 ddi_btopr(dev_info_t *dip, unsigned long bytes) 654 { 655 unsigned long pages; 656 657 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages); 658 return (pages); 659 } 660 661 unsigned long 662 ddi_ptob(dev_info_t *dip, unsigned long pages) 663 { 664 unsigned long bytes; 665 666 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes); 667 return (bytes); 668 } 669 670 unsigned int 671 ddi_enter_critical(void) 672 { 673 return ((uint_t)spl7()); 674 } 675 676 void 677 ddi_exit_critical(unsigned int spl) 678 { 679 splx((int)spl); 680 } 681 682 /* 683 * Nexus ctlops punter 684 */ 685 686 #if !defined(__sparc) 687 /* 688 * Request bus_ctl parent to handle a bus_ctl request 689 * 690 * (The sparc version is in sparc_ddi.s) 691 */ 692 int 693 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v) 694 { 695 int (*fp)(); 696 697 if (!d || !r) 698 return (DDI_FAILURE); 699 700 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL) 701 return (DDI_FAILURE); 702 703 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl; 704 return ((*fp)(d, r, op, a, v)); 705 } 706 707 #endif 708 709 /* 710 * DMA/DVMA setup 711 */ 712 713 #if defined(__sparc) 714 static ddi_dma_lim_t standard_limits = { 715 (uint_t)0, /* addr_t dlim_addr_lo */ 716 (uint_t)-1, /* addr_t dlim_addr_hi */ 717 (uint_t)-1, /* uint_t dlim_cntr_max */ 718 (uint_t)1, /* uint_t dlim_burstsizes */ 719 (uint_t)1, /* uint_t dlim_minxfer */ 720 0 /* uint_t dlim_dmaspeed */ 721 }; 722 #elif defined(__x86) 723 static ddi_dma_lim_t standard_limits = { 724 (uint_t)0, /* addr_t dlim_addr_lo */ 725 (uint_t)0xffffff, /* addr_t dlim_addr_hi */ 726 (uint_t)0, /* uint_t dlim_cntr_max */ 727 (uint_t)0x00000001, /* uint_t dlim_burstsizes */ 728 (uint_t)DMA_UNIT_8, /* uint_t dlim_minxfer */ 729 (uint_t)0, /* uint_t dlim_dmaspeed */ 730 (uint_t)0x86<<24+0, /* uint_t dlim_version */ 731 (uint_t)0xffff, /* uint_t dlim_adreg_max */ 732 (uint_t)0xffff, /* uint_t dlim_ctreg_max */ 733 (uint_t)512, /* uint_t dlim_granular */ 734 (int)1, /* int dlim_sgllen */ 735 (uint_t)0xffffffff /* uint_t dlim_reqsizes */ 736 }; 737 738 #endif 739 740 #if !defined(__sparc) 741 /* 742 * Request bus_dma_ctl parent to fiddle with a dma request. 743 * 744 * (The sparc version is in sparc_subr.s) 745 */ 746 int 747 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 748 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 749 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 750 { 751 int (*fp)(); 752 753 if (dip != ddi_root_node()) 754 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl; 755 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl; 756 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags)); 757 } 758 #endif 759 760 /* 761 * For all DMA control functions, call the DMA control 762 * routine and return status. 763 * 764 * Just plain assume that the parent is to be called. 765 * If a nexus driver or a thread outside the framework 766 * of a nexus driver or a leaf driver calls these functions, 767 * it is up to them to deal with the fact that the parent's 768 * bus_dma_ctl function will be the first one called. 769 */ 770 771 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip 772 773 /* 774 * This routine is left in place to satisfy link dependencies 775 * for any 3rd party nexus drivers that rely on it. It is never 776 * called, though. 777 */ 778 /*ARGSUSED*/ 779 int 780 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip, 781 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 782 { 783 return (DDI_FAILURE); 784 } 785 786 #if !defined(__sparc) 787 788 /* 789 * The SPARC versions of these routines are done in assembler to 790 * save register windows, so they're in sparc_subr.s. 791 */ 792 793 int 794 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 795 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 796 { 797 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *, 798 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *); 799 800 if (dip != ddi_root_node()) 801 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 802 803 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl; 804 return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep)); 805 } 806 807 int 808 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep) 809 { 810 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 811 812 if (dip != ddi_root_node()) 813 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl; 814 815 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl; 816 return ((*funcp)(dip, rdip, handlep)); 817 } 818 819 int 820 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 821 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 822 ddi_dma_cookie_t *cp, uint_t *ccountp) 823 { 824 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 825 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *); 826 827 if (dip != ddi_root_node()) 828 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 829 830 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl; 831 return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp)); 832 } 833 834 int 835 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 836 ddi_dma_handle_t handle) 837 { 838 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 839 840 if (dip != ddi_root_node()) 841 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 842 843 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl; 844 return ((*funcp)(dip, rdip, handle)); 845 } 846 847 848 int 849 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip, 850 ddi_dma_handle_t handle, off_t off, size_t len, 851 uint_t cache_flags) 852 { 853 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 854 off_t, size_t, uint_t); 855 856 if (dip != ddi_root_node()) 857 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 858 859 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush; 860 return ((*funcp)(dip, rdip, handle, off, len, cache_flags)); 861 } 862 863 int 864 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip, 865 ddi_dma_handle_t handle, uint_t win, off_t *offp, 866 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 867 { 868 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, 869 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *); 870 871 if (dip != ddi_root_node()) 872 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win; 873 874 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win; 875 return ((*funcp)(dip, rdip, handle, win, offp, lenp, 876 cookiep, ccountp)); 877 } 878 879 int 880 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom) 881 { 882 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 883 dev_info_t *dip, *rdip; 884 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t, 885 size_t, uint_t); 886 887 /* 888 * the DMA nexus driver will set DMP_NOSYNC if the 889 * platform does not require any sync operation. For 890 * example if the memory is uncached or consistent 891 * and without any I/O write buffers involved. 892 */ 893 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 894 return (DDI_SUCCESS); 895 896 dip = rdip = hp->dmai_rdip; 897 if (dip != ddi_root_node()) 898 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush; 899 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush; 900 return ((*funcp)(dip, rdip, h, o, l, whom)); 901 } 902 903 int 904 ddi_dma_unbind_handle(ddi_dma_handle_t h) 905 { 906 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h; 907 dev_info_t *dip, *rdip; 908 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t); 909 910 dip = rdip = hp->dmai_rdip; 911 if (dip != ddi_root_node()) 912 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl; 913 funcp = DEVI(rdip)->devi_bus_dma_unbindfunc; 914 return ((*funcp)(dip, rdip, h)); 915 } 916 917 #endif /* !__sparc */ 918 919 /* 920 * DMA burst sizes, and transfer minimums 921 */ 922 923 int 924 ddi_dma_burstsizes(ddi_dma_handle_t handle) 925 { 926 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle; 927 928 if (!dimp) 929 return (0); 930 else 931 return (dimp->dmai_burstsizes); 932 } 933 934 int 935 ddi_iomin(dev_info_t *a, int i, int stream) 936 { 937 int r; 938 939 /* 940 * Make sure that the initial value is sane 941 */ 942 if (i & (i - 1)) 943 return (0); 944 if (i == 0) 945 i = (stream) ? 4 : 1; 946 947 r = ddi_ctlops(a, a, 948 DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i); 949 if (r != DDI_SUCCESS || (i & (i - 1))) 950 return (0); 951 return (i); 952 } 953 954 /* 955 * Given two DMA attribute structures, apply the attributes 956 * of one to the other, following the rules of attributes 957 * and the wishes of the caller. 958 * 959 * The rules of DMA attribute structures are that you cannot 960 * make things *less* restrictive as you apply one set 961 * of attributes to another. 962 * 963 */ 964 void 965 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod) 966 { 967 attr->dma_attr_addr_lo = 968 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo); 969 attr->dma_attr_addr_hi = 970 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi); 971 attr->dma_attr_count_max = 972 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max); 973 attr->dma_attr_align = 974 MAX(attr->dma_attr_align, mod->dma_attr_align); 975 attr->dma_attr_burstsizes = 976 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes); 977 attr->dma_attr_minxfer = 978 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer); 979 attr->dma_attr_maxxfer = 980 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer); 981 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg); 982 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen, 983 (uint_t)mod->dma_attr_sgllen); 984 attr->dma_attr_granular = 985 MAX(attr->dma_attr_granular, mod->dma_attr_granular); 986 } 987 988 /* 989 * mmap/segmap interface: 990 */ 991 992 /* 993 * ddi_segmap: setup the default segment driver. Calls the drivers 994 * XXmmap routine to validate the range to be mapped. 995 * Return ENXIO of the range is not valid. Create 996 * a seg_dev segment that contains all of the 997 * necessary information and will reference the 998 * default segment driver routines. It returns zero 999 * on success or non-zero on failure. 1000 */ 1001 int 1002 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len, 1003 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp) 1004 { 1005 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *, 1006 off_t, uint_t, uint_t, uint_t, struct cred *); 1007 1008 return (spec_segmap(dev, offset, asp, addrp, len, 1009 prot, maxprot, flags, credp)); 1010 } 1011 1012 /* 1013 * ddi_map_fault: Resolve mappings at fault time. Used by segment 1014 * drivers. Allows each successive parent to resolve 1015 * address translations and add its mappings to the 1016 * mapping list supplied in the page structure. It 1017 * returns zero on success or non-zero on failure. 1018 */ 1019 1020 int 1021 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg, 1022 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock) 1023 { 1024 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock)); 1025 } 1026 1027 /* 1028 * ddi_device_mapping_check: Called from ddi_segmap_setup. 1029 * Invokes platform specific DDI to determine whether attributes specified 1030 * in attr(9s) are valid for the region of memory that will be made 1031 * available for direct access to user process via the mmap(2) system call. 1032 */ 1033 int 1034 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp, 1035 uint_t rnumber, uint_t *hat_flags) 1036 { 1037 ddi_acc_handle_t handle; 1038 ddi_map_req_t mr; 1039 ddi_acc_hdl_t *hp; 1040 int result; 1041 dev_info_t *dip; 1042 1043 /* 1044 * we use e_ddi_hold_devi_by_dev to search for the devi. We 1045 * release it immediately since it should already be held by 1046 * a devfs vnode. 1047 */ 1048 if ((dip = 1049 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL) 1050 return (-1); 1051 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */ 1052 1053 /* 1054 * Allocate and initialize the common elements of data 1055 * access handle. 1056 */ 1057 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1058 if (handle == NULL) 1059 return (-1); 1060 1061 hp = impl_acc_hdl_get(handle); 1062 hp->ah_vers = VERS_ACCHDL; 1063 hp->ah_dip = dip; 1064 hp->ah_rnumber = rnumber; 1065 hp->ah_offset = 0; 1066 hp->ah_len = 0; 1067 hp->ah_acc = *accattrp; 1068 1069 /* 1070 * Set up the mapping request and call to parent. 1071 */ 1072 mr.map_op = DDI_MO_MAP_HANDLE; 1073 mr.map_type = DDI_MT_RNUMBER; 1074 mr.map_obj.rnumber = rnumber; 1075 mr.map_prot = PROT_READ | PROT_WRITE; 1076 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1077 mr.map_handlep = hp; 1078 mr.map_vers = DDI_MAP_VERSION; 1079 result = ddi_map(dip, &mr, 0, 0, NULL); 1080 1081 /* 1082 * Region must be mappable, pick up flags from the framework. 1083 */ 1084 *hat_flags = hp->ah_hat_flags; 1085 1086 impl_acc_hdl_free(handle); 1087 1088 /* 1089 * check for end result. 1090 */ 1091 if (result != DDI_SUCCESS) 1092 return (-1); 1093 return (0); 1094 } 1095 1096 1097 /* 1098 * Property functions: See also, ddipropdefs.h. 1099 * 1100 * These functions are the framework for the property functions, 1101 * i.e. they support software defined properties. All implementation 1102 * specific property handling (i.e.: self-identifying devices and 1103 * PROM defined properties are handled in the implementation specific 1104 * functions (defined in ddi_implfuncs.h). 1105 */ 1106 1107 /* 1108 * nopropop: Shouldn't be called, right? 1109 */ 1110 int 1111 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1112 char *name, caddr_t valuep, int *lengthp) 1113 { 1114 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp)) 1115 return (DDI_PROP_NOT_FOUND); 1116 } 1117 1118 #ifdef DDI_PROP_DEBUG 1119 int ddi_prop_debug_flag = 0; 1120 1121 int 1122 ddi_prop_debug(int enable) 1123 { 1124 int prev = ddi_prop_debug_flag; 1125 1126 if ((enable != 0) || (prev != 0)) 1127 printf("ddi_prop_debug: debugging %s\n", 1128 enable ? "enabled" : "disabled"); 1129 ddi_prop_debug_flag = enable; 1130 return (prev); 1131 } 1132 1133 #endif /* DDI_PROP_DEBUG */ 1134 1135 /* 1136 * Search a property list for a match, if found return pointer 1137 * to matching prop struct, else return NULL. 1138 */ 1139 1140 ddi_prop_t * 1141 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head) 1142 { 1143 ddi_prop_t *propp; 1144 1145 /* 1146 * find the property in child's devinfo: 1147 * Search order defined by this search function is first matching 1148 * property with input dev == DDI_DEV_T_ANY matching any dev or 1149 * dev == propp->prop_dev, name == propp->name, and the correct 1150 * data type as specified in the flags. If a DDI_DEV_T_NONE dev 1151 * value made it this far then it implies a DDI_DEV_T_ANY search. 1152 */ 1153 if (dev == DDI_DEV_T_NONE) 1154 dev = DDI_DEV_T_ANY; 1155 1156 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 1157 1158 if (!DDI_STRSAME(propp->prop_name, name)) 1159 continue; 1160 1161 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev)) 1162 continue; 1163 1164 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1165 continue; 1166 1167 return (propp); 1168 } 1169 1170 return ((ddi_prop_t *)0); 1171 } 1172 1173 /* 1174 * Search for property within devnames structures 1175 */ 1176 ddi_prop_t * 1177 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags) 1178 { 1179 major_t major; 1180 struct devnames *dnp; 1181 ddi_prop_t *propp; 1182 1183 /* 1184 * Valid dev_t value is needed to index into the 1185 * correct devnames entry, therefore a dev_t 1186 * value of DDI_DEV_T_ANY is not appropriate. 1187 */ 1188 ASSERT(dev != DDI_DEV_T_ANY); 1189 if (dev == DDI_DEV_T_ANY) { 1190 return ((ddi_prop_t *)0); 1191 } 1192 1193 major = getmajor(dev); 1194 dnp = &(devnamesp[major]); 1195 1196 if (dnp->dn_global_prop_ptr == NULL) 1197 return ((ddi_prop_t *)0); 1198 1199 LOCK_DEV_OPS(&dnp->dn_lock); 1200 1201 for (propp = dnp->dn_global_prop_ptr->prop_list; 1202 propp != NULL; 1203 propp = (ddi_prop_t *)propp->prop_next) { 1204 1205 if (!DDI_STRSAME(propp->prop_name, name)) 1206 continue; 1207 1208 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) && 1209 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev)) 1210 continue; 1211 1212 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0) 1213 continue; 1214 1215 /* Property found, return it */ 1216 UNLOCK_DEV_OPS(&dnp->dn_lock); 1217 return (propp); 1218 } 1219 1220 UNLOCK_DEV_OPS(&dnp->dn_lock); 1221 return ((ddi_prop_t *)0); 1222 } 1223 1224 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>"; 1225 1226 /* 1227 * ddi_prop_search_global: 1228 * Search the global property list within devnames 1229 * for the named property. Return the encoded value. 1230 */ 1231 static int 1232 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name, 1233 void *valuep, uint_t *lengthp) 1234 { 1235 ddi_prop_t *propp; 1236 caddr_t buffer; 1237 1238 propp = i_ddi_search_global_prop(dev, name, flags); 1239 1240 /* Property NOT found, bail */ 1241 if (propp == (ddi_prop_t *)0) 1242 return (DDI_PROP_NOT_FOUND); 1243 1244 if (propp->prop_flags & DDI_PROP_UNDEF_IT) 1245 return (DDI_PROP_UNDEFINED); 1246 1247 if ((buffer = kmem_alloc(propp->prop_len, 1248 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) { 1249 cmn_err(CE_CONT, prop_no_mem_msg, name); 1250 return (DDI_PROP_NO_MEMORY); 1251 } 1252 1253 /* 1254 * Return the encoded data 1255 */ 1256 *(caddr_t *)valuep = buffer; 1257 *lengthp = propp->prop_len; 1258 bcopy(propp->prop_val, buffer, propp->prop_len); 1259 1260 return (DDI_PROP_SUCCESS); 1261 } 1262 1263 /* 1264 * ddi_prop_search_common: Lookup and return the encoded value 1265 */ 1266 int 1267 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1268 uint_t flags, char *name, void *valuep, uint_t *lengthp) 1269 { 1270 ddi_prop_t *propp; 1271 int i; 1272 caddr_t buffer; 1273 caddr_t prealloc = NULL; 1274 int plength = 0; 1275 dev_info_t *pdip; 1276 int (*bop)(); 1277 1278 /*CONSTANTCONDITION*/ 1279 while (1) { 1280 1281 mutex_enter(&(DEVI(dip)->devi_lock)); 1282 1283 1284 /* 1285 * find the property in child's devinfo: 1286 * Search order is: 1287 * 1. driver defined properties 1288 * 2. system defined properties 1289 * 3. driver global properties 1290 * 4. boot defined properties 1291 */ 1292 1293 propp = i_ddi_prop_search(dev, name, flags, 1294 &(DEVI(dip)->devi_drv_prop_ptr)); 1295 if (propp == NULL) { 1296 propp = i_ddi_prop_search(dev, name, flags, 1297 &(DEVI(dip)->devi_sys_prop_ptr)); 1298 } 1299 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) { 1300 propp = i_ddi_prop_search(dev, name, flags, 1301 &DEVI(dip)->devi_global_prop_list->prop_list); 1302 } 1303 1304 if (propp == NULL) { 1305 propp = i_ddi_prop_search(dev, name, flags, 1306 &(DEVI(dip)->devi_hw_prop_ptr)); 1307 } 1308 1309 /* 1310 * Software property found? 1311 */ 1312 if (propp != (ddi_prop_t *)0) { 1313 1314 /* 1315 * If explicit undefine, return now. 1316 */ 1317 if (propp->prop_flags & DDI_PROP_UNDEF_IT) { 1318 mutex_exit(&(DEVI(dip)->devi_lock)); 1319 if (prealloc) 1320 kmem_free(prealloc, plength); 1321 return (DDI_PROP_UNDEFINED); 1322 } 1323 1324 /* 1325 * If we only want to know if it exists, return now 1326 */ 1327 if (prop_op == PROP_EXISTS) { 1328 mutex_exit(&(DEVI(dip)->devi_lock)); 1329 ASSERT(prealloc == NULL); 1330 return (DDI_PROP_SUCCESS); 1331 } 1332 1333 /* 1334 * If length only request or prop length == 0, 1335 * service request and return now. 1336 */ 1337 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) { 1338 *lengthp = propp->prop_len; 1339 1340 /* 1341 * if prop_op is PROP_LEN_AND_VAL_ALLOC 1342 * that means prop_len is 0, so set valuep 1343 * also to NULL 1344 */ 1345 if (prop_op == PROP_LEN_AND_VAL_ALLOC) 1346 *(caddr_t *)valuep = NULL; 1347 1348 mutex_exit(&(DEVI(dip)->devi_lock)); 1349 if (prealloc) 1350 kmem_free(prealloc, plength); 1351 return (DDI_PROP_SUCCESS); 1352 } 1353 1354 /* 1355 * If LEN_AND_VAL_ALLOC and the request can sleep, 1356 * drop the mutex, allocate the buffer, and go 1357 * through the loop again. If we already allocated 1358 * the buffer, and the size of the property changed, 1359 * keep trying... 1360 */ 1361 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) && 1362 (flags & DDI_PROP_CANSLEEP)) { 1363 if (prealloc && (propp->prop_len != plength)) { 1364 kmem_free(prealloc, plength); 1365 prealloc = NULL; 1366 } 1367 if (prealloc == NULL) { 1368 plength = propp->prop_len; 1369 mutex_exit(&(DEVI(dip)->devi_lock)); 1370 prealloc = kmem_alloc(plength, 1371 KM_SLEEP); 1372 continue; 1373 } 1374 } 1375 1376 /* 1377 * Allocate buffer, if required. Either way, 1378 * set `buffer' variable. 1379 */ 1380 i = *lengthp; /* Get callers length */ 1381 *lengthp = propp->prop_len; /* Set callers length */ 1382 1383 switch (prop_op) { 1384 1385 case PROP_LEN_AND_VAL_ALLOC: 1386 1387 if (prealloc == NULL) { 1388 buffer = kmem_alloc(propp->prop_len, 1389 KM_NOSLEEP); 1390 } else { 1391 buffer = prealloc; 1392 } 1393 1394 if (buffer == NULL) { 1395 mutex_exit(&(DEVI(dip)->devi_lock)); 1396 cmn_err(CE_CONT, prop_no_mem_msg, name); 1397 return (DDI_PROP_NO_MEMORY); 1398 } 1399 /* Set callers buf ptr */ 1400 *(caddr_t *)valuep = buffer; 1401 break; 1402 1403 case PROP_LEN_AND_VAL_BUF: 1404 1405 if (propp->prop_len > (i)) { 1406 mutex_exit(&(DEVI(dip)->devi_lock)); 1407 return (DDI_PROP_BUF_TOO_SMALL); 1408 } 1409 1410 buffer = valuep; /* Get callers buf ptr */ 1411 break; 1412 1413 default: 1414 break; 1415 } 1416 1417 /* 1418 * Do the copy. 1419 */ 1420 bcopy(propp->prop_val, buffer, propp->prop_len); 1421 mutex_exit(&(DEVI(dip)->devi_lock)); 1422 return (DDI_PROP_SUCCESS); 1423 } 1424 1425 mutex_exit(&(DEVI(dip)->devi_lock)); 1426 if (prealloc) 1427 kmem_free(prealloc, plength); 1428 prealloc = NULL; 1429 1430 /* 1431 * Prop not found, call parent bus_ops to deal with possible 1432 * h/w layer (possible PROM defined props, etc.) and to 1433 * possibly ascend the hierarchy, if allowed by flags. 1434 */ 1435 pdip = (dev_info_t *)DEVI(dip)->devi_parent; 1436 1437 /* 1438 * One last call for the root driver PROM props? 1439 */ 1440 if (dip == ddi_root_node()) { 1441 return (ddi_bus_prop_op(dev, dip, dip, prop_op, 1442 flags, name, valuep, (int *)lengthp)); 1443 } 1444 1445 /* 1446 * We may have been called to check for properties 1447 * within a single devinfo node that has no parent - 1448 * see make_prop() 1449 */ 1450 if (pdip == NULL) { 1451 ASSERT((flags & 1452 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) == 1453 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)); 1454 return (DDI_PROP_NOT_FOUND); 1455 } 1456 1457 /* 1458 * Instead of recursing, we do iterative calls up the tree. 1459 * As a bit of optimization, skip the bus_op level if the 1460 * node is a s/w node and if the parent's bus_prop_op function 1461 * is `ddi_bus_prop_op', because we know that in this case, 1462 * this function does nothing. 1463 * 1464 * 4225415: If the parent isn't attached, or the child 1465 * hasn't been named by the parent yet, use the default 1466 * ddi_bus_prop_op as a proxy for the parent. This 1467 * allows property lookups in any child/parent state to 1468 * include 'prom' and inherited properties, even when 1469 * there are no drivers attached to the child or parent. 1470 */ 1471 1472 bop = ddi_bus_prop_op; 1473 if (i_ddi_devi_attached(pdip) && 1474 (i_ddi_node_state(dip) >= DS_INITIALIZED)) 1475 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op; 1476 1477 i = DDI_PROP_NOT_FOUND; 1478 1479 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) { 1480 i = (*bop)(dev, pdip, dip, prop_op, 1481 flags | DDI_PROP_DONTPASS, 1482 name, valuep, lengthp); 1483 } 1484 1485 if ((flags & DDI_PROP_DONTPASS) || 1486 (i != DDI_PROP_NOT_FOUND)) 1487 return (i); 1488 1489 dip = pdip; 1490 } 1491 /*NOTREACHED*/ 1492 } 1493 1494 1495 /* 1496 * ddi_prop_op: The basic property operator for drivers. 1497 * 1498 * In ddi_prop_op, the type of valuep is interpreted based on prop_op: 1499 * 1500 * prop_op valuep 1501 * ------ ------ 1502 * 1503 * PROP_LEN <unused> 1504 * 1505 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer 1506 * 1507 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to 1508 * address of allocated buffer, if successful) 1509 */ 1510 int 1511 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1512 char *name, caddr_t valuep, int *lengthp) 1513 { 1514 int i; 1515 1516 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0); 1517 1518 /* 1519 * If this was originally an LDI prop lookup then we bail here. 1520 * The reason is that the LDI property lookup interfaces first call 1521 * a drivers prop_op() entry point to allow it to override 1522 * properties. But if we've made it here, then the driver hasn't 1523 * overriden any properties. We don't want to continue with the 1524 * property search here because we don't have any type inforamtion. 1525 * When we return failure, the LDI interfaces will then proceed to 1526 * call the typed property interfaces to look up the property. 1527 */ 1528 if (mod_flags & DDI_PROP_DYNAMIC) 1529 return (DDI_PROP_NOT_FOUND); 1530 1531 /* 1532 * check for pre-typed property consumer asking for typed property: 1533 * see e_ddi_getprop_int64. 1534 */ 1535 if (mod_flags & DDI_PROP_CONSUMER_TYPED) 1536 mod_flags |= DDI_PROP_TYPE_INT64; 1537 mod_flags |= DDI_PROP_TYPE_ANY; 1538 1539 i = ddi_prop_search_common(dev, dip, prop_op, 1540 mod_flags, name, valuep, (uint_t *)lengthp); 1541 if (i == DDI_PROP_FOUND_1275) 1542 return (DDI_PROP_SUCCESS); 1543 return (i); 1544 } 1545 1546 /* 1547 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that 1548 * maintain size in number of blksize blocks. Provides a dynamic property 1549 * implementation for size oriented properties based on nblocks64 and blksize 1550 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64 1551 * is too large. This interface should not be used with a nblocks64 that 1552 * represents the driver's idea of how to represent unknown, if nblocks is 1553 * unknown use ddi_prop_op. 1554 */ 1555 int 1556 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1557 int mod_flags, char *name, caddr_t valuep, int *lengthp, 1558 uint64_t nblocks64, uint_t blksize) 1559 { 1560 uint64_t size64; 1561 int blkshift; 1562 1563 /* convert block size to shift value */ 1564 ASSERT(BIT_ONLYONESET(blksize)); 1565 blkshift = highbit(blksize) - 1; 1566 1567 /* 1568 * There is no point in supporting nblocks64 values that don't have 1569 * an accurate uint64_t byte count representation. 1570 */ 1571 if (nblocks64 >= (UINT64_MAX >> blkshift)) 1572 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1573 name, valuep, lengthp)); 1574 1575 size64 = nblocks64 << blkshift; 1576 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags, 1577 name, valuep, lengthp, size64, blksize)); 1578 } 1579 1580 /* 1581 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize. 1582 */ 1583 int 1584 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1585 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64) 1586 { 1587 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, 1588 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE)); 1589 } 1590 1591 /* 1592 * ddi_prop_op_size_blksize: The basic property operator for block drivers that 1593 * maintain size in bytes. Provides a of dynamic property implementation for 1594 * size oriented properties based on size64 value and blksize passed in by the 1595 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface 1596 * should not be used with a size64 that represents the driver's idea of how 1597 * to represent unknown, if size is unknown use ddi_prop_op. 1598 * 1599 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned 1600 * integers. While the most likely interface to request them ([bc]devi_size) 1601 * is declared int (signed) there is no enforcement of this, which means we 1602 * can't enforce limitations here without risking regression. 1603 */ 1604 int 1605 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1606 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64, 1607 uint_t blksize) 1608 { 1609 uint64_t nblocks64; 1610 int callers_length; 1611 caddr_t buffer; 1612 int blkshift; 1613 1614 /* 1615 * This is a kludge to support capture of size(9P) pure dynamic 1616 * properties in snapshots for non-cmlb code (without exposing 1617 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code 1618 * should be removed. 1619 */ 1620 if (i_ddi_prop_dyn_driver_get(dip) == NULL) { 1621 static i_ddi_prop_dyn_t prop_dyn_size[] = { 1622 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR}, 1623 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK}, 1624 {NULL} 1625 }; 1626 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size); 1627 } 1628 1629 /* convert block size to shift value */ 1630 ASSERT(BIT_ONLYONESET(blksize)); 1631 blkshift = highbit(blksize) - 1; 1632 1633 /* compute DEV_BSIZE nblocks value */ 1634 nblocks64 = size64 >> blkshift; 1635 1636 /* get callers length, establish length of our dynamic properties */ 1637 callers_length = *lengthp; 1638 1639 if (strcmp(name, "Nblocks") == 0) 1640 *lengthp = sizeof (uint64_t); 1641 else if (strcmp(name, "Size") == 0) 1642 *lengthp = sizeof (uint64_t); 1643 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX)) 1644 *lengthp = sizeof (uint32_t); 1645 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX)) 1646 *lengthp = sizeof (uint32_t); 1647 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX)) 1648 *lengthp = sizeof (uint32_t); 1649 else { 1650 /* fallback to ddi_prop_op */ 1651 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1652 name, valuep, lengthp)); 1653 } 1654 1655 /* service request for the length of the property */ 1656 if (prop_op == PROP_LEN) 1657 return (DDI_PROP_SUCCESS); 1658 1659 switch (prop_op) { 1660 case PROP_LEN_AND_VAL_ALLOC: 1661 if ((buffer = kmem_alloc(*lengthp, 1662 (mod_flags & DDI_PROP_CANSLEEP) ? 1663 KM_SLEEP : KM_NOSLEEP)) == NULL) 1664 return (DDI_PROP_NO_MEMORY); 1665 1666 *(caddr_t *)valuep = buffer; /* set callers buf ptr */ 1667 break; 1668 1669 case PROP_LEN_AND_VAL_BUF: 1670 /* the length of the property and the request must match */ 1671 if (callers_length != *lengthp) 1672 return (DDI_PROP_INVAL_ARG); 1673 1674 buffer = valuep; /* get callers buf ptr */ 1675 break; 1676 1677 default: 1678 return (DDI_PROP_INVAL_ARG); 1679 } 1680 1681 /* transfer the value into the buffer */ 1682 if (strcmp(name, "Nblocks") == 0) 1683 *((uint64_t *)buffer) = nblocks64; 1684 else if (strcmp(name, "Size") == 0) 1685 *((uint64_t *)buffer) = size64; 1686 else if (strcmp(name, "nblocks") == 0) 1687 *((uint32_t *)buffer) = (uint32_t)nblocks64; 1688 else if (strcmp(name, "size") == 0) 1689 *((uint32_t *)buffer) = (uint32_t)size64; 1690 else if (strcmp(name, "blksize") == 0) 1691 *((uint32_t *)buffer) = (uint32_t)blksize; 1692 return (DDI_PROP_SUCCESS); 1693 } 1694 1695 /* 1696 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size. 1697 */ 1698 int 1699 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1700 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64) 1701 { 1702 return (ddi_prop_op_size_blksize(dev, dip, prop_op, 1703 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE)); 1704 } 1705 1706 /* 1707 * Variable length props... 1708 */ 1709 1710 /* 1711 * ddi_getlongprop: Get variable length property len+val into a buffer 1712 * allocated by property provider via kmem_alloc. Requester 1713 * is responsible for freeing returned property via kmem_free. 1714 * 1715 * Arguments: 1716 * 1717 * dev_t: Input: dev_t of property. 1718 * dip: Input: dev_info_t pointer of child. 1719 * flags: Input: Possible flag modifiers are: 1720 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found. 1721 * DDI_PROP_CANSLEEP: Memory allocation may sleep. 1722 * name: Input: name of property. 1723 * valuep: Output: Addr of callers buffer pointer. 1724 * lengthp:Output: *lengthp will contain prop length on exit. 1725 * 1726 * Possible Returns: 1727 * 1728 * DDI_PROP_SUCCESS: Prop found and returned. 1729 * DDI_PROP_NOT_FOUND: Prop not found 1730 * DDI_PROP_UNDEFINED: Prop explicitly undefined. 1731 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem. 1732 */ 1733 1734 int 1735 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags, 1736 char *name, caddr_t valuep, int *lengthp) 1737 { 1738 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC, 1739 flags, name, valuep, lengthp)); 1740 } 1741 1742 /* 1743 * 1744 * ddi_getlongprop_buf: Get long prop into pre-allocated callers 1745 * buffer. (no memory allocation by provider). 1746 * 1747 * dev_t: Input: dev_t of property. 1748 * dip: Input: dev_info_t pointer of child. 1749 * flags: Input: DDI_PROP_DONTPASS or NULL 1750 * name: Input: name of property 1751 * valuep: Input: ptr to callers buffer. 1752 * lengthp:I/O: ptr to length of callers buffer on entry, 1753 * actual length of property on exit. 1754 * 1755 * Possible returns: 1756 * 1757 * DDI_PROP_SUCCESS Prop found and returned 1758 * DDI_PROP_NOT_FOUND Prop not found 1759 * DDI_PROP_UNDEFINED Prop explicitly undefined. 1760 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small, 1761 * no value returned, but actual prop 1762 * length returned in *lengthp 1763 * 1764 */ 1765 1766 int 1767 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags, 1768 char *name, caddr_t valuep, int *lengthp) 1769 { 1770 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 1771 flags, name, valuep, lengthp)); 1772 } 1773 1774 /* 1775 * Integer/boolean sized props. 1776 * 1777 * Call is value only... returns found boolean or int sized prop value or 1778 * defvalue if prop not found or is wrong length or is explicitly undefined. 1779 * Only flag is DDI_PROP_DONTPASS... 1780 * 1781 * By convention, this interface returns boolean (0) sized properties 1782 * as value (int)1. 1783 * 1784 * This never returns an error, if property not found or specifically 1785 * undefined, the input `defvalue' is returned. 1786 */ 1787 1788 int 1789 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue) 1790 { 1791 int propvalue = defvalue; 1792 int proplength = sizeof (int); 1793 int error; 1794 1795 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF, 1796 flags, name, (caddr_t)&propvalue, &proplength); 1797 1798 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 1799 propvalue = 1; 1800 1801 return (propvalue); 1802 } 1803 1804 /* 1805 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS 1806 * if returns DDI_PROP_SUCCESS, length returned in *lengthp. 1807 */ 1808 1809 int 1810 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp) 1811 { 1812 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp)); 1813 } 1814 1815 /* 1816 * Allocate a struct prop_driver_data, along with 'size' bytes 1817 * for decoded property data. This structure is freed by 1818 * calling ddi_prop_free(9F). 1819 */ 1820 static void * 1821 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *)) 1822 { 1823 struct prop_driver_data *pdd; 1824 1825 /* 1826 * Allocate a structure with enough memory to store the decoded data. 1827 */ 1828 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP); 1829 pdd->pdd_size = (sizeof (struct prop_driver_data) + size); 1830 pdd->pdd_prop_free = prop_free; 1831 1832 /* 1833 * Return a pointer to the location to put the decoded data. 1834 */ 1835 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data))); 1836 } 1837 1838 /* 1839 * Allocated the memory needed to store the encoded data in the property 1840 * handle. 1841 */ 1842 static int 1843 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size) 1844 { 1845 /* 1846 * If size is zero, then set data to NULL and size to 0. This 1847 * is a boolean property. 1848 */ 1849 if (size == 0) { 1850 ph->ph_size = 0; 1851 ph->ph_data = NULL; 1852 ph->ph_cur_pos = NULL; 1853 ph->ph_save_pos = NULL; 1854 } else { 1855 if (ph->ph_flags == DDI_PROP_DONTSLEEP) { 1856 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP); 1857 if (ph->ph_data == NULL) 1858 return (DDI_PROP_NO_MEMORY); 1859 } else 1860 ph->ph_data = kmem_zalloc(size, KM_SLEEP); 1861 ph->ph_size = size; 1862 ph->ph_cur_pos = ph->ph_data; 1863 ph->ph_save_pos = ph->ph_data; 1864 } 1865 return (DDI_PROP_SUCCESS); 1866 } 1867 1868 /* 1869 * Free the space allocated by the lookup routines. Each lookup routine 1870 * returns a pointer to the decoded data to the driver. The driver then 1871 * passes this pointer back to us. This data actually lives in a struct 1872 * prop_driver_data. We use negative indexing to find the beginning of 1873 * the structure and then free the entire structure using the size and 1874 * the free routine stored in the structure. 1875 */ 1876 void 1877 ddi_prop_free(void *datap) 1878 { 1879 struct prop_driver_data *pdd; 1880 1881 /* 1882 * Get the structure 1883 */ 1884 pdd = (struct prop_driver_data *) 1885 ((caddr_t)datap - sizeof (struct prop_driver_data)); 1886 /* 1887 * Call the free routine to free it 1888 */ 1889 (*pdd->pdd_prop_free)(pdd); 1890 } 1891 1892 /* 1893 * Free the data associated with an array of ints, 1894 * allocated with ddi_prop_decode_alloc(). 1895 */ 1896 static void 1897 ddi_prop_free_ints(struct prop_driver_data *pdd) 1898 { 1899 kmem_free(pdd, pdd->pdd_size); 1900 } 1901 1902 /* 1903 * Free a single string property or a single string contained within 1904 * the argv style return value of an array of strings. 1905 */ 1906 static void 1907 ddi_prop_free_string(struct prop_driver_data *pdd) 1908 { 1909 kmem_free(pdd, pdd->pdd_size); 1910 1911 } 1912 1913 /* 1914 * Free an array of strings. 1915 */ 1916 static void 1917 ddi_prop_free_strings(struct prop_driver_data *pdd) 1918 { 1919 kmem_free(pdd, pdd->pdd_size); 1920 } 1921 1922 /* 1923 * Free the data associated with an array of bytes. 1924 */ 1925 static void 1926 ddi_prop_free_bytes(struct prop_driver_data *pdd) 1927 { 1928 kmem_free(pdd, pdd->pdd_size); 1929 } 1930 1931 /* 1932 * Reset the current location pointer in the property handle to the 1933 * beginning of the data. 1934 */ 1935 void 1936 ddi_prop_reset_pos(prop_handle_t *ph) 1937 { 1938 ph->ph_cur_pos = ph->ph_data; 1939 ph->ph_save_pos = ph->ph_data; 1940 } 1941 1942 /* 1943 * Restore the current location pointer in the property handle to the 1944 * saved position. 1945 */ 1946 void 1947 ddi_prop_save_pos(prop_handle_t *ph) 1948 { 1949 ph->ph_save_pos = ph->ph_cur_pos; 1950 } 1951 1952 /* 1953 * Save the location that the current location pointer is pointing to.. 1954 */ 1955 void 1956 ddi_prop_restore_pos(prop_handle_t *ph) 1957 { 1958 ph->ph_cur_pos = ph->ph_save_pos; 1959 } 1960 1961 /* 1962 * Property encode/decode functions 1963 */ 1964 1965 /* 1966 * Decode a single integer property 1967 */ 1968 static int 1969 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements) 1970 { 1971 int i; 1972 int tmp; 1973 1974 /* 1975 * If there is nothing to decode return an error 1976 */ 1977 if (ph->ph_size == 0) 1978 return (DDI_PROP_END_OF_DATA); 1979 1980 /* 1981 * Decode the property as a single integer and return it 1982 * in data if we were able to decode it. 1983 */ 1984 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp); 1985 if (i < DDI_PROP_RESULT_OK) { 1986 switch (i) { 1987 case DDI_PROP_RESULT_EOF: 1988 return (DDI_PROP_END_OF_DATA); 1989 1990 case DDI_PROP_RESULT_ERROR: 1991 return (DDI_PROP_CANNOT_DECODE); 1992 } 1993 } 1994 1995 *(int *)data = tmp; 1996 *nelements = 1; 1997 return (DDI_PROP_SUCCESS); 1998 } 1999 2000 /* 2001 * Decode a single 64 bit integer property 2002 */ 2003 static int 2004 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements) 2005 { 2006 int i; 2007 int64_t tmp; 2008 2009 /* 2010 * If there is nothing to decode return an error 2011 */ 2012 if (ph->ph_size == 0) 2013 return (DDI_PROP_END_OF_DATA); 2014 2015 /* 2016 * Decode the property as a single integer and return it 2017 * in data if we were able to decode it. 2018 */ 2019 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp); 2020 if (i < DDI_PROP_RESULT_OK) { 2021 switch (i) { 2022 case DDI_PROP_RESULT_EOF: 2023 return (DDI_PROP_END_OF_DATA); 2024 2025 case DDI_PROP_RESULT_ERROR: 2026 return (DDI_PROP_CANNOT_DECODE); 2027 } 2028 } 2029 2030 *(int64_t *)data = tmp; 2031 *nelements = 1; 2032 return (DDI_PROP_SUCCESS); 2033 } 2034 2035 /* 2036 * Decode an array of integers property 2037 */ 2038 static int 2039 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements) 2040 { 2041 int i; 2042 int cnt = 0; 2043 int *tmp; 2044 int *intp; 2045 int n; 2046 2047 /* 2048 * Figure out how many array elements there are by going through the 2049 * data without decoding it first and counting. 2050 */ 2051 for (;;) { 2052 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL); 2053 if (i < 0) 2054 break; 2055 cnt++; 2056 } 2057 2058 /* 2059 * If there are no elements return an error 2060 */ 2061 if (cnt == 0) 2062 return (DDI_PROP_END_OF_DATA); 2063 2064 /* 2065 * If we cannot skip through the data, we cannot decode it 2066 */ 2067 if (i == DDI_PROP_RESULT_ERROR) 2068 return (DDI_PROP_CANNOT_DECODE); 2069 2070 /* 2071 * Reset the data pointer to the beginning of the encoded data 2072 */ 2073 ddi_prop_reset_pos(ph); 2074 2075 /* 2076 * Allocated memory to store the decoded value in. 2077 */ 2078 intp = ddi_prop_decode_alloc((cnt * sizeof (int)), 2079 ddi_prop_free_ints); 2080 2081 /* 2082 * Decode each element and place it in the space we just allocated 2083 */ 2084 tmp = intp; 2085 for (n = 0; n < cnt; n++, tmp++) { 2086 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp); 2087 if (i < DDI_PROP_RESULT_OK) { 2088 /* 2089 * Free the space we just allocated 2090 * and return an error. 2091 */ 2092 ddi_prop_free(intp); 2093 switch (i) { 2094 case DDI_PROP_RESULT_EOF: 2095 return (DDI_PROP_END_OF_DATA); 2096 2097 case DDI_PROP_RESULT_ERROR: 2098 return (DDI_PROP_CANNOT_DECODE); 2099 } 2100 } 2101 } 2102 2103 *nelements = cnt; 2104 *(int **)data = intp; 2105 2106 return (DDI_PROP_SUCCESS); 2107 } 2108 2109 /* 2110 * Decode a 64 bit integer array property 2111 */ 2112 static int 2113 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements) 2114 { 2115 int i; 2116 int n; 2117 int cnt = 0; 2118 int64_t *tmp; 2119 int64_t *intp; 2120 2121 /* 2122 * Count the number of array elements by going 2123 * through the data without decoding it. 2124 */ 2125 for (;;) { 2126 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL); 2127 if (i < 0) 2128 break; 2129 cnt++; 2130 } 2131 2132 /* 2133 * If there are no elements return an error 2134 */ 2135 if (cnt == 0) 2136 return (DDI_PROP_END_OF_DATA); 2137 2138 /* 2139 * If we cannot skip through the data, we cannot decode it 2140 */ 2141 if (i == DDI_PROP_RESULT_ERROR) 2142 return (DDI_PROP_CANNOT_DECODE); 2143 2144 /* 2145 * Reset the data pointer to the beginning of the encoded data 2146 */ 2147 ddi_prop_reset_pos(ph); 2148 2149 /* 2150 * Allocate memory to store the decoded value. 2151 */ 2152 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)), 2153 ddi_prop_free_ints); 2154 2155 /* 2156 * Decode each element and place it in the space allocated 2157 */ 2158 tmp = intp; 2159 for (n = 0; n < cnt; n++, tmp++) { 2160 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp); 2161 if (i < DDI_PROP_RESULT_OK) { 2162 /* 2163 * Free the space we just allocated 2164 * and return an error. 2165 */ 2166 ddi_prop_free(intp); 2167 switch (i) { 2168 case DDI_PROP_RESULT_EOF: 2169 return (DDI_PROP_END_OF_DATA); 2170 2171 case DDI_PROP_RESULT_ERROR: 2172 return (DDI_PROP_CANNOT_DECODE); 2173 } 2174 } 2175 } 2176 2177 *nelements = cnt; 2178 *(int64_t **)data = intp; 2179 2180 return (DDI_PROP_SUCCESS); 2181 } 2182 2183 /* 2184 * Encode an array of integers property (Can be one element) 2185 */ 2186 int 2187 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements) 2188 { 2189 int i; 2190 int *tmp; 2191 int cnt; 2192 int size; 2193 2194 /* 2195 * If there is no data, we cannot do anything 2196 */ 2197 if (nelements == 0) 2198 return (DDI_PROP_CANNOT_ENCODE); 2199 2200 /* 2201 * Get the size of an encoded int. 2202 */ 2203 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2204 2205 if (size < DDI_PROP_RESULT_OK) { 2206 switch (size) { 2207 case DDI_PROP_RESULT_EOF: 2208 return (DDI_PROP_END_OF_DATA); 2209 2210 case DDI_PROP_RESULT_ERROR: 2211 return (DDI_PROP_CANNOT_ENCODE); 2212 } 2213 } 2214 2215 /* 2216 * Allocate space in the handle to store the encoded int. 2217 */ 2218 if (ddi_prop_encode_alloc(ph, size * nelements) != 2219 DDI_PROP_SUCCESS) 2220 return (DDI_PROP_NO_MEMORY); 2221 2222 /* 2223 * Encode the array of ints. 2224 */ 2225 tmp = (int *)data; 2226 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2227 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp); 2228 if (i < DDI_PROP_RESULT_OK) { 2229 switch (i) { 2230 case DDI_PROP_RESULT_EOF: 2231 return (DDI_PROP_END_OF_DATA); 2232 2233 case DDI_PROP_RESULT_ERROR: 2234 return (DDI_PROP_CANNOT_ENCODE); 2235 } 2236 } 2237 } 2238 2239 return (DDI_PROP_SUCCESS); 2240 } 2241 2242 2243 /* 2244 * Encode a 64 bit integer array property 2245 */ 2246 int 2247 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements) 2248 { 2249 int i; 2250 int cnt; 2251 int size; 2252 int64_t *tmp; 2253 2254 /* 2255 * If there is no data, we cannot do anything 2256 */ 2257 if (nelements == 0) 2258 return (DDI_PROP_CANNOT_ENCODE); 2259 2260 /* 2261 * Get the size of an encoded 64 bit int. 2262 */ 2263 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL); 2264 2265 if (size < DDI_PROP_RESULT_OK) { 2266 switch (size) { 2267 case DDI_PROP_RESULT_EOF: 2268 return (DDI_PROP_END_OF_DATA); 2269 2270 case DDI_PROP_RESULT_ERROR: 2271 return (DDI_PROP_CANNOT_ENCODE); 2272 } 2273 } 2274 2275 /* 2276 * Allocate space in the handle to store the encoded int. 2277 */ 2278 if (ddi_prop_encode_alloc(ph, size * nelements) != 2279 DDI_PROP_SUCCESS) 2280 return (DDI_PROP_NO_MEMORY); 2281 2282 /* 2283 * Encode the array of ints. 2284 */ 2285 tmp = (int64_t *)data; 2286 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2287 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp); 2288 if (i < DDI_PROP_RESULT_OK) { 2289 switch (i) { 2290 case DDI_PROP_RESULT_EOF: 2291 return (DDI_PROP_END_OF_DATA); 2292 2293 case DDI_PROP_RESULT_ERROR: 2294 return (DDI_PROP_CANNOT_ENCODE); 2295 } 2296 } 2297 } 2298 2299 return (DDI_PROP_SUCCESS); 2300 } 2301 2302 /* 2303 * Decode a single string property 2304 */ 2305 static int 2306 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements) 2307 { 2308 char *tmp; 2309 char *str; 2310 int i; 2311 int size; 2312 2313 /* 2314 * If there is nothing to decode return an error 2315 */ 2316 if (ph->ph_size == 0) 2317 return (DDI_PROP_END_OF_DATA); 2318 2319 /* 2320 * Get the decoded size of the encoded string. 2321 */ 2322 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2323 if (size < DDI_PROP_RESULT_OK) { 2324 switch (size) { 2325 case DDI_PROP_RESULT_EOF: 2326 return (DDI_PROP_END_OF_DATA); 2327 2328 case DDI_PROP_RESULT_ERROR: 2329 return (DDI_PROP_CANNOT_DECODE); 2330 } 2331 } 2332 2333 /* 2334 * Allocated memory to store the decoded value in. 2335 */ 2336 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string); 2337 2338 ddi_prop_reset_pos(ph); 2339 2340 /* 2341 * Decode the str and place it in the space we just allocated 2342 */ 2343 tmp = str; 2344 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp); 2345 if (i < DDI_PROP_RESULT_OK) { 2346 /* 2347 * Free the space we just allocated 2348 * and return an error. 2349 */ 2350 ddi_prop_free(str); 2351 switch (i) { 2352 case DDI_PROP_RESULT_EOF: 2353 return (DDI_PROP_END_OF_DATA); 2354 2355 case DDI_PROP_RESULT_ERROR: 2356 return (DDI_PROP_CANNOT_DECODE); 2357 } 2358 } 2359 2360 *(char **)data = str; 2361 *nelements = 1; 2362 2363 return (DDI_PROP_SUCCESS); 2364 } 2365 2366 /* 2367 * Decode an array of strings. 2368 */ 2369 int 2370 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements) 2371 { 2372 int cnt = 0; 2373 char **strs; 2374 char **tmp; 2375 char *ptr; 2376 int i; 2377 int n; 2378 int size; 2379 size_t nbytes; 2380 2381 /* 2382 * Figure out how many array elements there are by going through the 2383 * data without decoding it first and counting. 2384 */ 2385 for (;;) { 2386 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL); 2387 if (i < 0) 2388 break; 2389 cnt++; 2390 } 2391 2392 /* 2393 * If there are no elements return an error 2394 */ 2395 if (cnt == 0) 2396 return (DDI_PROP_END_OF_DATA); 2397 2398 /* 2399 * If we cannot skip through the data, we cannot decode it 2400 */ 2401 if (i == DDI_PROP_RESULT_ERROR) 2402 return (DDI_PROP_CANNOT_DECODE); 2403 2404 /* 2405 * Reset the data pointer to the beginning of the encoded data 2406 */ 2407 ddi_prop_reset_pos(ph); 2408 2409 /* 2410 * Figure out how much memory we need for the sum total 2411 */ 2412 nbytes = (cnt + 1) * sizeof (char *); 2413 2414 for (n = 0; n < cnt; n++) { 2415 /* 2416 * Get the decoded size of the current encoded string. 2417 */ 2418 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2419 if (size < DDI_PROP_RESULT_OK) { 2420 switch (size) { 2421 case DDI_PROP_RESULT_EOF: 2422 return (DDI_PROP_END_OF_DATA); 2423 2424 case DDI_PROP_RESULT_ERROR: 2425 return (DDI_PROP_CANNOT_DECODE); 2426 } 2427 } 2428 2429 nbytes += size; 2430 } 2431 2432 /* 2433 * Allocate memory in which to store the decoded strings. 2434 */ 2435 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings); 2436 2437 /* 2438 * Set up pointers for each string by figuring out yet 2439 * again how long each string is. 2440 */ 2441 ddi_prop_reset_pos(ph); 2442 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *)); 2443 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2444 /* 2445 * Get the decoded size of the current encoded string. 2446 */ 2447 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL); 2448 if (size < DDI_PROP_RESULT_OK) { 2449 ddi_prop_free(strs); 2450 switch (size) { 2451 case DDI_PROP_RESULT_EOF: 2452 return (DDI_PROP_END_OF_DATA); 2453 2454 case DDI_PROP_RESULT_ERROR: 2455 return (DDI_PROP_CANNOT_DECODE); 2456 } 2457 } 2458 2459 *tmp = ptr; 2460 ptr += size; 2461 } 2462 2463 /* 2464 * String array is terminated by a NULL 2465 */ 2466 *tmp = NULL; 2467 2468 /* 2469 * Finally, we can decode each string 2470 */ 2471 ddi_prop_reset_pos(ph); 2472 for (tmp = strs, n = 0; n < cnt; n++, tmp++) { 2473 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp); 2474 if (i < DDI_PROP_RESULT_OK) { 2475 /* 2476 * Free the space we just allocated 2477 * and return an error 2478 */ 2479 ddi_prop_free(strs); 2480 switch (i) { 2481 case DDI_PROP_RESULT_EOF: 2482 return (DDI_PROP_END_OF_DATA); 2483 2484 case DDI_PROP_RESULT_ERROR: 2485 return (DDI_PROP_CANNOT_DECODE); 2486 } 2487 } 2488 } 2489 2490 *(char ***)data = strs; 2491 *nelements = cnt; 2492 2493 return (DDI_PROP_SUCCESS); 2494 } 2495 2496 /* 2497 * Encode a string. 2498 */ 2499 int 2500 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements) 2501 { 2502 char **tmp; 2503 int size; 2504 int i; 2505 2506 /* 2507 * If there is no data, we cannot do anything 2508 */ 2509 if (nelements == 0) 2510 return (DDI_PROP_CANNOT_ENCODE); 2511 2512 /* 2513 * Get the size of the encoded string. 2514 */ 2515 tmp = (char **)data; 2516 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2517 if (size < DDI_PROP_RESULT_OK) { 2518 switch (size) { 2519 case DDI_PROP_RESULT_EOF: 2520 return (DDI_PROP_END_OF_DATA); 2521 2522 case DDI_PROP_RESULT_ERROR: 2523 return (DDI_PROP_CANNOT_ENCODE); 2524 } 2525 } 2526 2527 /* 2528 * Allocate space in the handle to store the encoded string. 2529 */ 2530 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS) 2531 return (DDI_PROP_NO_MEMORY); 2532 2533 ddi_prop_reset_pos(ph); 2534 2535 /* 2536 * Encode the string. 2537 */ 2538 tmp = (char **)data; 2539 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2540 if (i < DDI_PROP_RESULT_OK) { 2541 switch (i) { 2542 case DDI_PROP_RESULT_EOF: 2543 return (DDI_PROP_END_OF_DATA); 2544 2545 case DDI_PROP_RESULT_ERROR: 2546 return (DDI_PROP_CANNOT_ENCODE); 2547 } 2548 } 2549 2550 return (DDI_PROP_SUCCESS); 2551 } 2552 2553 2554 /* 2555 * Encode an array of strings. 2556 */ 2557 int 2558 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements) 2559 { 2560 int cnt = 0; 2561 char **tmp; 2562 int size; 2563 uint_t total_size; 2564 int i; 2565 2566 /* 2567 * If there is no data, we cannot do anything 2568 */ 2569 if (nelements == 0) 2570 return (DDI_PROP_CANNOT_ENCODE); 2571 2572 /* 2573 * Get the total size required to encode all the strings. 2574 */ 2575 total_size = 0; 2576 tmp = (char **)data; 2577 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2578 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp); 2579 if (size < DDI_PROP_RESULT_OK) { 2580 switch (size) { 2581 case DDI_PROP_RESULT_EOF: 2582 return (DDI_PROP_END_OF_DATA); 2583 2584 case DDI_PROP_RESULT_ERROR: 2585 return (DDI_PROP_CANNOT_ENCODE); 2586 } 2587 } 2588 total_size += (uint_t)size; 2589 } 2590 2591 /* 2592 * Allocate space in the handle to store the encoded strings. 2593 */ 2594 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS) 2595 return (DDI_PROP_NO_MEMORY); 2596 2597 ddi_prop_reset_pos(ph); 2598 2599 /* 2600 * Encode the array of strings. 2601 */ 2602 tmp = (char **)data; 2603 for (cnt = 0; cnt < nelements; cnt++, tmp++) { 2604 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp); 2605 if (i < DDI_PROP_RESULT_OK) { 2606 switch (i) { 2607 case DDI_PROP_RESULT_EOF: 2608 return (DDI_PROP_END_OF_DATA); 2609 2610 case DDI_PROP_RESULT_ERROR: 2611 return (DDI_PROP_CANNOT_ENCODE); 2612 } 2613 } 2614 } 2615 2616 return (DDI_PROP_SUCCESS); 2617 } 2618 2619 2620 /* 2621 * Decode an array of bytes. 2622 */ 2623 static int 2624 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements) 2625 { 2626 uchar_t *tmp; 2627 int nbytes; 2628 int i; 2629 2630 /* 2631 * If there are no elements return an error 2632 */ 2633 if (ph->ph_size == 0) 2634 return (DDI_PROP_END_OF_DATA); 2635 2636 /* 2637 * Get the size of the encoded array of bytes. 2638 */ 2639 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE, 2640 data, ph->ph_size); 2641 if (nbytes < DDI_PROP_RESULT_OK) { 2642 switch (nbytes) { 2643 case DDI_PROP_RESULT_EOF: 2644 return (DDI_PROP_END_OF_DATA); 2645 2646 case DDI_PROP_RESULT_ERROR: 2647 return (DDI_PROP_CANNOT_DECODE); 2648 } 2649 } 2650 2651 /* 2652 * Allocated memory to store the decoded value in. 2653 */ 2654 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes); 2655 2656 /* 2657 * Decode each element and place it in the space we just allocated 2658 */ 2659 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes); 2660 if (i < DDI_PROP_RESULT_OK) { 2661 /* 2662 * Free the space we just allocated 2663 * and return an error 2664 */ 2665 ddi_prop_free(tmp); 2666 switch (i) { 2667 case DDI_PROP_RESULT_EOF: 2668 return (DDI_PROP_END_OF_DATA); 2669 2670 case DDI_PROP_RESULT_ERROR: 2671 return (DDI_PROP_CANNOT_DECODE); 2672 } 2673 } 2674 2675 *(uchar_t **)data = tmp; 2676 *nelements = nbytes; 2677 2678 return (DDI_PROP_SUCCESS); 2679 } 2680 2681 /* 2682 * Encode an array of bytes. 2683 */ 2684 int 2685 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements) 2686 { 2687 int size; 2688 int i; 2689 2690 /* 2691 * If there are no elements, then this is a boolean property, 2692 * so just create a property handle with no data and return. 2693 */ 2694 if (nelements == 0) { 2695 (void) ddi_prop_encode_alloc(ph, 0); 2696 return (DDI_PROP_SUCCESS); 2697 } 2698 2699 /* 2700 * Get the size of the encoded array of bytes. 2701 */ 2702 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data, 2703 nelements); 2704 if (size < DDI_PROP_RESULT_OK) { 2705 switch (size) { 2706 case DDI_PROP_RESULT_EOF: 2707 return (DDI_PROP_END_OF_DATA); 2708 2709 case DDI_PROP_RESULT_ERROR: 2710 return (DDI_PROP_CANNOT_DECODE); 2711 } 2712 } 2713 2714 /* 2715 * Allocate space in the handle to store the encoded bytes. 2716 */ 2717 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS) 2718 return (DDI_PROP_NO_MEMORY); 2719 2720 /* 2721 * Encode the array of bytes. 2722 */ 2723 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data, 2724 nelements); 2725 if (i < DDI_PROP_RESULT_OK) { 2726 switch (i) { 2727 case DDI_PROP_RESULT_EOF: 2728 return (DDI_PROP_END_OF_DATA); 2729 2730 case DDI_PROP_RESULT_ERROR: 2731 return (DDI_PROP_CANNOT_ENCODE); 2732 } 2733 } 2734 2735 return (DDI_PROP_SUCCESS); 2736 } 2737 2738 /* 2739 * OBP 1275 integer, string and byte operators. 2740 * 2741 * DDI_PROP_CMD_DECODE: 2742 * 2743 * DDI_PROP_RESULT_ERROR: cannot decode the data 2744 * DDI_PROP_RESULT_EOF: end of data 2745 * DDI_PROP_OK: data was decoded 2746 * 2747 * DDI_PROP_CMD_ENCODE: 2748 * 2749 * DDI_PROP_RESULT_ERROR: cannot encode the data 2750 * DDI_PROP_RESULT_EOF: end of data 2751 * DDI_PROP_OK: data was encoded 2752 * 2753 * DDI_PROP_CMD_SKIP: 2754 * 2755 * DDI_PROP_RESULT_ERROR: cannot skip the data 2756 * DDI_PROP_RESULT_EOF: end of data 2757 * DDI_PROP_OK: data was skipped 2758 * 2759 * DDI_PROP_CMD_GET_ESIZE: 2760 * 2761 * DDI_PROP_RESULT_ERROR: cannot get encoded size 2762 * DDI_PROP_RESULT_EOF: end of data 2763 * > 0: the encoded size 2764 * 2765 * DDI_PROP_CMD_GET_DSIZE: 2766 * 2767 * DDI_PROP_RESULT_ERROR: cannot get decoded size 2768 * DDI_PROP_RESULT_EOF: end of data 2769 * > 0: the decoded size 2770 */ 2771 2772 /* 2773 * OBP 1275 integer operator 2774 * 2775 * OBP properties are a byte stream of data, so integers may not be 2776 * properly aligned. Therefore we need to copy them one byte at a time. 2777 */ 2778 int 2779 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data) 2780 { 2781 int i; 2782 2783 switch (cmd) { 2784 case DDI_PROP_CMD_DECODE: 2785 /* 2786 * Check that there is encoded data 2787 */ 2788 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 2789 return (DDI_PROP_RESULT_ERROR); 2790 if (ph->ph_flags & PH_FROM_PROM) { 2791 i = MIN(ph->ph_size, PROP_1275_INT_SIZE); 2792 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 2793 ph->ph_size - i)) 2794 return (DDI_PROP_RESULT_ERROR); 2795 } else { 2796 if (ph->ph_size < sizeof (int) || 2797 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 2798 ph->ph_size - sizeof (int)))) 2799 return (DDI_PROP_RESULT_ERROR); 2800 } 2801 2802 /* 2803 * Copy the integer, using the implementation-specific 2804 * copy function if the property is coming from the PROM. 2805 */ 2806 if (ph->ph_flags & PH_FROM_PROM) { 2807 *data = impl_ddi_prop_int_from_prom( 2808 (uchar_t *)ph->ph_cur_pos, 2809 (ph->ph_size < PROP_1275_INT_SIZE) ? 2810 ph->ph_size : PROP_1275_INT_SIZE); 2811 } else { 2812 bcopy(ph->ph_cur_pos, data, sizeof (int)); 2813 } 2814 2815 /* 2816 * Move the current location to the start of the next 2817 * bit of undecoded data. 2818 */ 2819 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 2820 PROP_1275_INT_SIZE; 2821 return (DDI_PROP_RESULT_OK); 2822 2823 case DDI_PROP_CMD_ENCODE: 2824 /* 2825 * Check that there is room to encoded the data 2826 */ 2827 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 2828 ph->ph_size < PROP_1275_INT_SIZE || 2829 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data + 2830 ph->ph_size - sizeof (int)))) 2831 return (DDI_PROP_RESULT_ERROR); 2832 2833 /* 2834 * Encode the integer into the byte stream one byte at a 2835 * time. 2836 */ 2837 bcopy(data, ph->ph_cur_pos, sizeof (int)); 2838 2839 /* 2840 * Move the current location to the start of the next bit of 2841 * space where we can store encoded data. 2842 */ 2843 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 2844 return (DDI_PROP_RESULT_OK); 2845 2846 case DDI_PROP_CMD_SKIP: 2847 /* 2848 * Check that there is encoded data 2849 */ 2850 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 2851 ph->ph_size < PROP_1275_INT_SIZE) 2852 return (DDI_PROP_RESULT_ERROR); 2853 2854 2855 if ((caddr_t)ph->ph_cur_pos == 2856 (caddr_t)ph->ph_data + ph->ph_size) { 2857 return (DDI_PROP_RESULT_EOF); 2858 } else if ((caddr_t)ph->ph_cur_pos > 2859 (caddr_t)ph->ph_data + ph->ph_size) { 2860 return (DDI_PROP_RESULT_EOF); 2861 } 2862 2863 /* 2864 * Move the current location to the start of the next bit of 2865 * undecoded data. 2866 */ 2867 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE; 2868 return (DDI_PROP_RESULT_OK); 2869 2870 case DDI_PROP_CMD_GET_ESIZE: 2871 /* 2872 * Return the size of an encoded integer on OBP 2873 */ 2874 return (PROP_1275_INT_SIZE); 2875 2876 case DDI_PROP_CMD_GET_DSIZE: 2877 /* 2878 * Return the size of a decoded integer on the system. 2879 */ 2880 return (sizeof (int)); 2881 2882 default: 2883 #ifdef DEBUG 2884 panic("ddi_prop_1275_int: %x impossible", cmd); 2885 /*NOTREACHED*/ 2886 #else 2887 return (DDI_PROP_RESULT_ERROR); 2888 #endif /* DEBUG */ 2889 } 2890 } 2891 2892 /* 2893 * 64 bit integer operator. 2894 * 2895 * This is an extension, defined by Sun, to the 1275 integer 2896 * operator. This routine handles the encoding/decoding of 2897 * 64 bit integer properties. 2898 */ 2899 int 2900 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data) 2901 { 2902 2903 switch (cmd) { 2904 case DDI_PROP_CMD_DECODE: 2905 /* 2906 * Check that there is encoded data 2907 */ 2908 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) 2909 return (DDI_PROP_RESULT_ERROR); 2910 if (ph->ph_flags & PH_FROM_PROM) { 2911 return (DDI_PROP_RESULT_ERROR); 2912 } else { 2913 if (ph->ph_size < sizeof (int64_t) || 2914 ((int64_t *)ph->ph_cur_pos > 2915 ((int64_t *)ph->ph_data + 2916 ph->ph_size - sizeof (int64_t)))) 2917 return (DDI_PROP_RESULT_ERROR); 2918 } 2919 /* 2920 * Copy the integer, using the implementation-specific 2921 * copy function if the property is coming from the PROM. 2922 */ 2923 if (ph->ph_flags & PH_FROM_PROM) { 2924 return (DDI_PROP_RESULT_ERROR); 2925 } else { 2926 bcopy(ph->ph_cur_pos, data, sizeof (int64_t)); 2927 } 2928 2929 /* 2930 * Move the current location to the start of the next 2931 * bit of undecoded data. 2932 */ 2933 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 2934 sizeof (int64_t); 2935 return (DDI_PROP_RESULT_OK); 2936 2937 case DDI_PROP_CMD_ENCODE: 2938 /* 2939 * Check that there is room to encoded the data 2940 */ 2941 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 2942 ph->ph_size < sizeof (int64_t) || 2943 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data + 2944 ph->ph_size - sizeof (int64_t)))) 2945 return (DDI_PROP_RESULT_ERROR); 2946 2947 /* 2948 * Encode the integer into the byte stream one byte at a 2949 * time. 2950 */ 2951 bcopy(data, ph->ph_cur_pos, sizeof (int64_t)); 2952 2953 /* 2954 * Move the current location to the start of the next bit of 2955 * space where we can store encoded data. 2956 */ 2957 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 2958 sizeof (int64_t); 2959 return (DDI_PROP_RESULT_OK); 2960 2961 case DDI_PROP_CMD_SKIP: 2962 /* 2963 * Check that there is encoded data 2964 */ 2965 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 2966 ph->ph_size < sizeof (int64_t)) 2967 return (DDI_PROP_RESULT_ERROR); 2968 2969 if ((caddr_t)ph->ph_cur_pos == 2970 (caddr_t)ph->ph_data + ph->ph_size) { 2971 return (DDI_PROP_RESULT_EOF); 2972 } else if ((caddr_t)ph->ph_cur_pos > 2973 (caddr_t)ph->ph_data + ph->ph_size) { 2974 return (DDI_PROP_RESULT_EOF); 2975 } 2976 2977 /* 2978 * Move the current location to the start of 2979 * the next bit of undecoded data. 2980 */ 2981 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + 2982 sizeof (int64_t); 2983 return (DDI_PROP_RESULT_OK); 2984 2985 case DDI_PROP_CMD_GET_ESIZE: 2986 /* 2987 * Return the size of an encoded integer on OBP 2988 */ 2989 return (sizeof (int64_t)); 2990 2991 case DDI_PROP_CMD_GET_DSIZE: 2992 /* 2993 * Return the size of a decoded integer on the system. 2994 */ 2995 return (sizeof (int64_t)); 2996 2997 default: 2998 #ifdef DEBUG 2999 panic("ddi_prop_int64_op: %x impossible", cmd); 3000 /*NOTREACHED*/ 3001 #else 3002 return (DDI_PROP_RESULT_ERROR); 3003 #endif /* DEBUG */ 3004 } 3005 } 3006 3007 /* 3008 * OBP 1275 string operator. 3009 * 3010 * OBP strings are NULL terminated. 3011 */ 3012 int 3013 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data) 3014 { 3015 int n; 3016 char *p; 3017 char *end; 3018 3019 switch (cmd) { 3020 case DDI_PROP_CMD_DECODE: 3021 /* 3022 * Check that there is encoded data 3023 */ 3024 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3025 return (DDI_PROP_RESULT_ERROR); 3026 } 3027 3028 /* 3029 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and 3030 * how to NULL terminate result. 3031 */ 3032 p = (char *)ph->ph_cur_pos; 3033 end = (char *)ph->ph_data + ph->ph_size; 3034 if (p >= end) 3035 return (DDI_PROP_RESULT_EOF); 3036 3037 while (p < end) { 3038 *data++ = *p; 3039 if (*p++ == 0) { /* NULL from OBP */ 3040 ph->ph_cur_pos = p; 3041 return (DDI_PROP_RESULT_OK); 3042 } 3043 } 3044 3045 /* 3046 * If OBP did not NULL terminate string, which happens 3047 * (at least) for 'true'/'false' boolean values, account for 3048 * the space and store null termination on decode. 3049 */ 3050 ph->ph_cur_pos = p; 3051 *data = 0; 3052 return (DDI_PROP_RESULT_OK); 3053 3054 case DDI_PROP_CMD_ENCODE: 3055 /* 3056 * Check that there is room to encoded the data 3057 */ 3058 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3059 return (DDI_PROP_RESULT_ERROR); 3060 } 3061 3062 n = strlen(data) + 1; 3063 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3064 ph->ph_size - n)) { 3065 return (DDI_PROP_RESULT_ERROR); 3066 } 3067 3068 /* 3069 * Copy the NULL terminated string 3070 */ 3071 bcopy(data, ph->ph_cur_pos, n); 3072 3073 /* 3074 * Move the current location to the start of the next bit of 3075 * space where we can store encoded data. 3076 */ 3077 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n; 3078 return (DDI_PROP_RESULT_OK); 3079 3080 case DDI_PROP_CMD_SKIP: 3081 /* 3082 * Check that there is encoded data 3083 */ 3084 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) { 3085 return (DDI_PROP_RESULT_ERROR); 3086 } 3087 3088 /* 3089 * Return the string length plus one for the NULL 3090 * We know the size of the property, we need to 3091 * ensure that the string is properly formatted, 3092 * since we may be looking up random OBP data. 3093 */ 3094 p = (char *)ph->ph_cur_pos; 3095 end = (char *)ph->ph_data + ph->ph_size; 3096 if (p >= end) 3097 return (DDI_PROP_RESULT_EOF); 3098 3099 while (p < end) { 3100 if (*p++ == 0) { /* NULL from OBP */ 3101 ph->ph_cur_pos = p; 3102 return (DDI_PROP_RESULT_OK); 3103 } 3104 } 3105 3106 /* 3107 * Accommodate the fact that OBP does not always NULL 3108 * terminate strings. 3109 */ 3110 ph->ph_cur_pos = p; 3111 return (DDI_PROP_RESULT_OK); 3112 3113 case DDI_PROP_CMD_GET_ESIZE: 3114 /* 3115 * Return the size of the encoded string on OBP. 3116 */ 3117 return (strlen(data) + 1); 3118 3119 case DDI_PROP_CMD_GET_DSIZE: 3120 /* 3121 * Return the string length plus one for the NULL. 3122 * We know the size of the property, we need to 3123 * ensure that the string is properly formatted, 3124 * since we may be looking up random OBP data. 3125 */ 3126 p = (char *)ph->ph_cur_pos; 3127 end = (char *)ph->ph_data + ph->ph_size; 3128 if (p >= end) 3129 return (DDI_PROP_RESULT_EOF); 3130 3131 for (n = 0; p < end; n++) { 3132 if (*p++ == 0) { /* NULL from OBP */ 3133 ph->ph_cur_pos = p; 3134 return (n + 1); 3135 } 3136 } 3137 3138 /* 3139 * If OBP did not NULL terminate string, which happens for 3140 * 'true'/'false' boolean values, account for the space 3141 * to store null termination here. 3142 */ 3143 ph->ph_cur_pos = p; 3144 return (n + 1); 3145 3146 default: 3147 #ifdef DEBUG 3148 panic("ddi_prop_1275_string: %x impossible", cmd); 3149 /*NOTREACHED*/ 3150 #else 3151 return (DDI_PROP_RESULT_ERROR); 3152 #endif /* DEBUG */ 3153 } 3154 } 3155 3156 /* 3157 * OBP 1275 byte operator 3158 * 3159 * Caller must specify the number of bytes to get. OBP encodes bytes 3160 * as a byte so there is a 1-to-1 translation. 3161 */ 3162 int 3163 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data, 3164 uint_t nelements) 3165 { 3166 switch (cmd) { 3167 case DDI_PROP_CMD_DECODE: 3168 /* 3169 * Check that there is encoded data 3170 */ 3171 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3172 ph->ph_size < nelements || 3173 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3174 ph->ph_size - nelements))) 3175 return (DDI_PROP_RESULT_ERROR); 3176 3177 /* 3178 * Copy out the bytes 3179 */ 3180 bcopy(ph->ph_cur_pos, data, nelements); 3181 3182 /* 3183 * Move the current location 3184 */ 3185 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3186 return (DDI_PROP_RESULT_OK); 3187 3188 case DDI_PROP_CMD_ENCODE: 3189 /* 3190 * Check that there is room to encode the data 3191 */ 3192 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3193 ph->ph_size < nelements || 3194 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3195 ph->ph_size - nelements))) 3196 return (DDI_PROP_RESULT_ERROR); 3197 3198 /* 3199 * Copy in the bytes 3200 */ 3201 bcopy(data, ph->ph_cur_pos, nelements); 3202 3203 /* 3204 * Move the current location to the start of the next bit of 3205 * space where we can store encoded data. 3206 */ 3207 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3208 return (DDI_PROP_RESULT_OK); 3209 3210 case DDI_PROP_CMD_SKIP: 3211 /* 3212 * Check that there is encoded data 3213 */ 3214 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 || 3215 ph->ph_size < nelements) 3216 return (DDI_PROP_RESULT_ERROR); 3217 3218 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data + 3219 ph->ph_size - nelements)) 3220 return (DDI_PROP_RESULT_EOF); 3221 3222 /* 3223 * Move the current location 3224 */ 3225 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements; 3226 return (DDI_PROP_RESULT_OK); 3227 3228 case DDI_PROP_CMD_GET_ESIZE: 3229 /* 3230 * The size in bytes of the encoded size is the 3231 * same as the decoded size provided by the caller. 3232 */ 3233 return (nelements); 3234 3235 case DDI_PROP_CMD_GET_DSIZE: 3236 /* 3237 * Just return the number of bytes specified by the caller. 3238 */ 3239 return (nelements); 3240 3241 default: 3242 #ifdef DEBUG 3243 panic("ddi_prop_1275_bytes: %x impossible", cmd); 3244 /*NOTREACHED*/ 3245 #else 3246 return (DDI_PROP_RESULT_ERROR); 3247 #endif /* DEBUG */ 3248 } 3249 } 3250 3251 /* 3252 * Used for properties that come from the OBP, hardware configuration files, 3253 * or that are created by calls to ddi_prop_update(9F). 3254 */ 3255 static struct prop_handle_ops prop_1275_ops = { 3256 ddi_prop_1275_int, 3257 ddi_prop_1275_string, 3258 ddi_prop_1275_bytes, 3259 ddi_prop_int64_op 3260 }; 3261 3262 3263 /* 3264 * Interface to create/modify a managed property on child's behalf... 3265 * Flags interpreted are: 3266 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep. 3267 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list. 3268 * 3269 * Use same dev_t when modifying or undefining a property. 3270 * Search for properties with DDI_DEV_T_ANY to match first named 3271 * property on the list. 3272 * 3273 * Properties are stored LIFO and subsequently will match the first 3274 * `matching' instance. 3275 */ 3276 3277 /* 3278 * ddi_prop_add: Add a software defined property 3279 */ 3280 3281 /* 3282 * define to get a new ddi_prop_t. 3283 * km_flags are KM_SLEEP or KM_NOSLEEP. 3284 */ 3285 3286 #define DDI_NEW_PROP_T(km_flags) \ 3287 (kmem_zalloc(sizeof (ddi_prop_t), km_flags)) 3288 3289 static int 3290 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags, 3291 char *name, caddr_t value, int length) 3292 { 3293 ddi_prop_t *new_propp, *propp; 3294 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 3295 int km_flags = KM_NOSLEEP; 3296 int name_buf_len; 3297 3298 /* 3299 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error. 3300 */ 3301 3302 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0) 3303 return (DDI_PROP_INVAL_ARG); 3304 3305 if (flags & DDI_PROP_CANSLEEP) 3306 km_flags = KM_SLEEP; 3307 3308 if (flags & DDI_PROP_SYSTEM_DEF) 3309 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 3310 else if (flags & DDI_PROP_HW_DEF) 3311 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 3312 3313 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) { 3314 cmn_err(CE_CONT, prop_no_mem_msg, name); 3315 return (DDI_PROP_NO_MEMORY); 3316 } 3317 3318 /* 3319 * If dev is major number 0, then we need to do a ddi_name_to_major 3320 * to get the real major number for the device. This needs to be 3321 * done because some drivers need to call ddi_prop_create in their 3322 * attach routines but they don't have a dev. By creating the dev 3323 * ourself if the major number is 0, drivers will not have to know what 3324 * their major number. They can just create a dev with major number 3325 * 0 and pass it in. For device 0, we will be doing a little extra 3326 * work by recreating the same dev that we already have, but its the 3327 * price you pay :-). 3328 * 3329 * This fixes bug #1098060. 3330 */ 3331 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) { 3332 new_propp->prop_dev = 3333 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name), 3334 getminor(dev)); 3335 } else 3336 new_propp->prop_dev = dev; 3337 3338 /* 3339 * Allocate space for property name and copy it in... 3340 */ 3341 3342 name_buf_len = strlen(name) + 1; 3343 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags); 3344 if (new_propp->prop_name == 0) { 3345 kmem_free(new_propp, sizeof (ddi_prop_t)); 3346 cmn_err(CE_CONT, prop_no_mem_msg, name); 3347 return (DDI_PROP_NO_MEMORY); 3348 } 3349 bcopy(name, new_propp->prop_name, name_buf_len); 3350 3351 /* 3352 * Set the property type 3353 */ 3354 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK; 3355 3356 /* 3357 * Set length and value ONLY if not an explicit property undefine: 3358 * NOTE: value and length are zero for explicit undefines. 3359 */ 3360 3361 if (flags & DDI_PROP_UNDEF_IT) { 3362 new_propp->prop_flags |= DDI_PROP_UNDEF_IT; 3363 } else { 3364 if ((new_propp->prop_len = length) != 0) { 3365 new_propp->prop_val = kmem_alloc(length, km_flags); 3366 if (new_propp->prop_val == 0) { 3367 kmem_free(new_propp->prop_name, name_buf_len); 3368 kmem_free(new_propp, sizeof (ddi_prop_t)); 3369 cmn_err(CE_CONT, prop_no_mem_msg, name); 3370 return (DDI_PROP_NO_MEMORY); 3371 } 3372 bcopy(value, new_propp->prop_val, length); 3373 } 3374 } 3375 3376 /* 3377 * Link property into beginning of list. (Properties are LIFO order.) 3378 */ 3379 3380 mutex_enter(&(DEVI(dip)->devi_lock)); 3381 propp = *list_head; 3382 new_propp->prop_next = propp; 3383 *list_head = new_propp; 3384 mutex_exit(&(DEVI(dip)->devi_lock)); 3385 return (DDI_PROP_SUCCESS); 3386 } 3387 3388 3389 /* 3390 * ddi_prop_change: Modify a software managed property value 3391 * 3392 * Set new length and value if found. 3393 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or 3394 * input name is the NULL string. 3395 * returns DDI_PROP_NO_MEMORY if unable to allocate memory 3396 * 3397 * Note: an undef can be modified to be a define, 3398 * (you can't go the other way.) 3399 */ 3400 3401 static int 3402 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags, 3403 char *name, caddr_t value, int length) 3404 { 3405 ddi_prop_t *propp; 3406 ddi_prop_t **ppropp; 3407 caddr_t p = NULL; 3408 3409 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0)) 3410 return (DDI_PROP_INVAL_ARG); 3411 3412 /* 3413 * Preallocate buffer, even if we don't need it... 3414 */ 3415 if (length != 0) { 3416 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ? 3417 KM_SLEEP : KM_NOSLEEP); 3418 if (p == NULL) { 3419 cmn_err(CE_CONT, prop_no_mem_msg, name); 3420 return (DDI_PROP_NO_MEMORY); 3421 } 3422 } 3423 3424 /* 3425 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major 3426 * number, a real dev_t value should be created based upon the dip's 3427 * binding driver. See ddi_prop_add... 3428 */ 3429 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) 3430 dev = makedevice( 3431 ddi_name_to_major(DEVI(dip)->devi_binding_name), 3432 getminor(dev)); 3433 3434 /* 3435 * Check to see if the property exists. If so we modify it. 3436 * Else we create it by calling ddi_prop_add(). 3437 */ 3438 mutex_enter(&(DEVI(dip)->devi_lock)); 3439 ppropp = &DEVI(dip)->devi_drv_prop_ptr; 3440 if (flags & DDI_PROP_SYSTEM_DEF) 3441 ppropp = &DEVI(dip)->devi_sys_prop_ptr; 3442 else if (flags & DDI_PROP_HW_DEF) 3443 ppropp = &DEVI(dip)->devi_hw_prop_ptr; 3444 3445 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) { 3446 /* 3447 * Need to reallocate buffer? If so, do it 3448 * carefully (reuse same space if new prop 3449 * is same size and non-NULL sized). 3450 */ 3451 if (length != 0) 3452 bcopy(value, p, length); 3453 3454 if (propp->prop_len != 0) 3455 kmem_free(propp->prop_val, propp->prop_len); 3456 3457 propp->prop_len = length; 3458 propp->prop_val = p; 3459 propp->prop_flags &= ~DDI_PROP_UNDEF_IT; 3460 mutex_exit(&(DEVI(dip)->devi_lock)); 3461 return (DDI_PROP_SUCCESS); 3462 } 3463 3464 mutex_exit(&(DEVI(dip)->devi_lock)); 3465 if (length != 0) 3466 kmem_free(p, length); 3467 3468 return (ddi_prop_add(dev, dip, flags, name, value, length)); 3469 } 3470 3471 /* 3472 * Common update routine used to update and encode a property. Creates 3473 * a property handle, calls the property encode routine, figures out if 3474 * the property already exists and updates if it does. Otherwise it 3475 * creates if it does not exist. 3476 */ 3477 int 3478 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags, 3479 char *name, void *data, uint_t nelements, 3480 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3481 { 3482 prop_handle_t ph; 3483 int rval; 3484 uint_t ourflags; 3485 3486 /* 3487 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3488 * return error. 3489 */ 3490 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3491 return (DDI_PROP_INVAL_ARG); 3492 3493 /* 3494 * Create the handle 3495 */ 3496 ph.ph_data = NULL; 3497 ph.ph_cur_pos = NULL; 3498 ph.ph_save_pos = NULL; 3499 ph.ph_size = 0; 3500 ph.ph_ops = &prop_1275_ops; 3501 3502 /* 3503 * ourflags: 3504 * For compatibility with the old interfaces. The old interfaces 3505 * didn't sleep by default and slept when the flag was set. These 3506 * interfaces to the opposite. So the old interfaces now set the 3507 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep. 3508 * 3509 * ph.ph_flags: 3510 * Blocked data or unblocked data allocation 3511 * for ph.ph_data in ddi_prop_encode_alloc() 3512 */ 3513 if (flags & DDI_PROP_DONTSLEEP) { 3514 ourflags = flags; 3515 ph.ph_flags = DDI_PROP_DONTSLEEP; 3516 } else { 3517 ourflags = flags | DDI_PROP_CANSLEEP; 3518 ph.ph_flags = DDI_PROP_CANSLEEP; 3519 } 3520 3521 /* 3522 * Encode the data and store it in the property handle by 3523 * calling the prop_encode routine. 3524 */ 3525 if ((rval = (*prop_create)(&ph, data, nelements)) != 3526 DDI_PROP_SUCCESS) { 3527 if (rval == DDI_PROP_NO_MEMORY) 3528 cmn_err(CE_CONT, prop_no_mem_msg, name); 3529 if (ph.ph_size != 0) 3530 kmem_free(ph.ph_data, ph.ph_size); 3531 return (rval); 3532 } 3533 3534 /* 3535 * The old interfaces use a stacking approach to creating 3536 * properties. If we are being called from the old interfaces, 3537 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a 3538 * create without checking. 3539 */ 3540 if (flags & DDI_PROP_STACK_CREATE) { 3541 rval = ddi_prop_add(match_dev, dip, 3542 ourflags, name, ph.ph_data, ph.ph_size); 3543 } else { 3544 rval = ddi_prop_change(match_dev, dip, 3545 ourflags, name, ph.ph_data, ph.ph_size); 3546 } 3547 3548 /* 3549 * Free the encoded data allocated in the prop_encode routine. 3550 */ 3551 if (ph.ph_size != 0) 3552 kmem_free(ph.ph_data, ph.ph_size); 3553 3554 return (rval); 3555 } 3556 3557 3558 /* 3559 * ddi_prop_create: Define a managed property: 3560 * See above for details. 3561 */ 3562 3563 int 3564 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3565 char *name, caddr_t value, int length) 3566 { 3567 if (!(flag & DDI_PROP_CANSLEEP)) { 3568 flag |= DDI_PROP_DONTSLEEP; 3569 #ifdef DDI_PROP_DEBUG 3570 if (length != 0) 3571 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete," 3572 "use ddi_prop_update (prop = %s, node = %s%d)", 3573 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3574 #endif /* DDI_PROP_DEBUG */ 3575 } 3576 flag &= ~DDI_PROP_SYSTEM_DEF; 3577 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY; 3578 return (ddi_prop_update_common(dev, dip, flag, name, 3579 value, length, ddi_prop_fm_encode_bytes)); 3580 } 3581 3582 int 3583 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag, 3584 char *name, caddr_t value, int length) 3585 { 3586 if (!(flag & DDI_PROP_CANSLEEP)) 3587 flag |= DDI_PROP_DONTSLEEP; 3588 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY; 3589 return (ddi_prop_update_common(dev, dip, flag, 3590 name, value, length, ddi_prop_fm_encode_bytes)); 3591 } 3592 3593 int 3594 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3595 char *name, caddr_t value, int length) 3596 { 3597 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3598 3599 /* 3600 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3601 * return error. 3602 */ 3603 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3604 return (DDI_PROP_INVAL_ARG); 3605 3606 if (!(flag & DDI_PROP_CANSLEEP)) 3607 flag |= DDI_PROP_DONTSLEEP; 3608 flag &= ~DDI_PROP_SYSTEM_DEF; 3609 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0) 3610 return (DDI_PROP_NOT_FOUND); 3611 3612 return (ddi_prop_update_common(dev, dip, 3613 (flag | DDI_PROP_TYPE_BYTE), name, 3614 value, length, ddi_prop_fm_encode_bytes)); 3615 } 3616 3617 int 3618 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag, 3619 char *name, caddr_t value, int length) 3620 { 3621 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0); 3622 3623 /* 3624 * If dev_t is DDI_DEV_T_ANY or name's length is zero, 3625 * return error. 3626 */ 3627 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0) 3628 return (DDI_PROP_INVAL_ARG); 3629 3630 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0) 3631 return (DDI_PROP_NOT_FOUND); 3632 3633 if (!(flag & DDI_PROP_CANSLEEP)) 3634 flag |= DDI_PROP_DONTSLEEP; 3635 return (ddi_prop_update_common(dev, dip, 3636 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE), 3637 name, value, length, ddi_prop_fm_encode_bytes)); 3638 } 3639 3640 3641 /* 3642 * Common lookup routine used to lookup and decode a property. 3643 * Creates a property handle, searches for the raw encoded data, 3644 * fills in the handle, and calls the property decode functions 3645 * passed in. 3646 * 3647 * This routine is not static because ddi_bus_prop_op() which lives in 3648 * ddi_impl.c calls it. No driver should be calling this routine. 3649 */ 3650 int 3651 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip, 3652 uint_t flags, char *name, void *data, uint_t *nelements, 3653 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3654 { 3655 int rval; 3656 uint_t ourflags; 3657 prop_handle_t ph; 3658 3659 if ((match_dev == DDI_DEV_T_NONE) || 3660 (name == NULL) || (strlen(name) == 0)) 3661 return (DDI_PROP_INVAL_ARG); 3662 3663 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags : 3664 flags | DDI_PROP_CANSLEEP; 3665 3666 /* 3667 * Get the encoded data 3668 */ 3669 bzero(&ph, sizeof (prop_handle_t)); 3670 3671 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) { 3672 /* 3673 * For rootnex and unbound dlpi style-2 devices, index into 3674 * the devnames' array and search the global 3675 * property list. 3676 */ 3677 ourflags &= ~DDI_UNBND_DLPI2; 3678 rval = i_ddi_prop_search_global(match_dev, 3679 ourflags, name, &ph.ph_data, &ph.ph_size); 3680 } else { 3681 rval = ddi_prop_search_common(match_dev, dip, 3682 PROP_LEN_AND_VAL_ALLOC, ourflags, name, 3683 &ph.ph_data, &ph.ph_size); 3684 3685 } 3686 3687 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) { 3688 ASSERT(ph.ph_data == NULL); 3689 ASSERT(ph.ph_size == 0); 3690 return (rval); 3691 } 3692 3693 /* 3694 * If the encoded data came from a OBP or software 3695 * use the 1275 OBP decode/encode routines. 3696 */ 3697 ph.ph_cur_pos = ph.ph_data; 3698 ph.ph_save_pos = ph.ph_data; 3699 ph.ph_ops = &prop_1275_ops; 3700 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0; 3701 3702 rval = (*prop_decoder)(&ph, data, nelements); 3703 3704 /* 3705 * Free the encoded data 3706 */ 3707 if (ph.ph_size != 0) 3708 kmem_free(ph.ph_data, ph.ph_size); 3709 3710 return (rval); 3711 } 3712 3713 /* 3714 * Lookup and return an array of composite properties. The driver must 3715 * provide the decode routine. 3716 */ 3717 int 3718 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip, 3719 uint_t flags, char *name, void *data, uint_t *nelements, 3720 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements)) 3721 { 3722 return (ddi_prop_lookup_common(match_dev, dip, 3723 (flags | DDI_PROP_TYPE_COMPOSITE), name, 3724 data, nelements, prop_decoder)); 3725 } 3726 3727 /* 3728 * Return 1 if a property exists (no type checking done). 3729 * Return 0 if it does not exist. 3730 */ 3731 int 3732 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name) 3733 { 3734 int i; 3735 uint_t x = 0; 3736 3737 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS, 3738 flags | DDI_PROP_TYPE_MASK, name, NULL, &x); 3739 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275); 3740 } 3741 3742 3743 /* 3744 * Update an array of composite properties. The driver must 3745 * provide the encode routine. 3746 */ 3747 int 3748 ddi_prop_update(dev_t match_dev, dev_info_t *dip, 3749 char *name, void *data, uint_t nelements, 3750 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements)) 3751 { 3752 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE, 3753 name, data, nelements, prop_create)); 3754 } 3755 3756 /* 3757 * Get a single integer or boolean property and return it. 3758 * If the property does not exists, or cannot be decoded, 3759 * then return the defvalue passed in. 3760 * 3761 * This routine always succeeds. 3762 */ 3763 int 3764 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags, 3765 char *name, int defvalue) 3766 { 3767 int data; 3768 uint_t nelements; 3769 int rval; 3770 3771 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3772 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3773 #ifdef DEBUG 3774 if (dip != NULL) { 3775 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag" 3776 " 0x%x (prop = %s, node = %s%d)", flags, 3777 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3778 } 3779 #endif /* DEBUG */ 3780 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3781 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 3782 } 3783 3784 if ((rval = ddi_prop_lookup_common(match_dev, dip, 3785 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements, 3786 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) { 3787 if (rval == DDI_PROP_END_OF_DATA) 3788 data = 1; 3789 else 3790 data = defvalue; 3791 } 3792 return (data); 3793 } 3794 3795 /* 3796 * Get a single 64 bit integer or boolean property and return it. 3797 * If the property does not exists, or cannot be decoded, 3798 * then return the defvalue passed in. 3799 * 3800 * This routine always succeeds. 3801 */ 3802 int64_t 3803 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags, 3804 char *name, int64_t defvalue) 3805 { 3806 int64_t data; 3807 uint_t nelements; 3808 int rval; 3809 3810 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3811 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3812 #ifdef DEBUG 3813 if (dip != NULL) { 3814 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag" 3815 " 0x%x (prop = %s, node = %s%d)", flags, 3816 name, ddi_driver_name(dip), ddi_get_instance(dip)); 3817 } 3818 #endif /* DEBUG */ 3819 return (DDI_PROP_INVAL_ARG); 3820 } 3821 3822 if ((rval = ddi_prop_lookup_common(match_dev, dip, 3823 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 3824 name, &data, &nelements, ddi_prop_fm_decode_int64)) 3825 != DDI_PROP_SUCCESS) { 3826 if (rval == DDI_PROP_END_OF_DATA) 3827 data = 1; 3828 else 3829 data = defvalue; 3830 } 3831 return (data); 3832 } 3833 3834 /* 3835 * Get an array of integer property 3836 */ 3837 int 3838 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 3839 char *name, int **data, uint_t *nelements) 3840 { 3841 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3842 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3843 #ifdef DEBUG 3844 if (dip != NULL) { 3845 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: " 3846 "invalid flag 0x%x (prop = %s, node = %s%d)", 3847 flags, name, ddi_driver_name(dip), 3848 ddi_get_instance(dip)); 3849 } 3850 #endif /* DEBUG */ 3851 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3852 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 3853 } 3854 3855 return (ddi_prop_lookup_common(match_dev, dip, 3856 (flags | DDI_PROP_TYPE_INT), name, data, 3857 nelements, ddi_prop_fm_decode_ints)); 3858 } 3859 3860 /* 3861 * Get an array of 64 bit integer properties 3862 */ 3863 int 3864 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 3865 char *name, int64_t **data, uint_t *nelements) 3866 { 3867 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3868 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3869 #ifdef DEBUG 3870 if (dip != NULL) { 3871 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: " 3872 "invalid flag 0x%x (prop = %s, node = %s%d)", 3873 flags, name, ddi_driver_name(dip), 3874 ddi_get_instance(dip)); 3875 } 3876 #endif /* DEBUG */ 3877 return (DDI_PROP_INVAL_ARG); 3878 } 3879 3880 return (ddi_prop_lookup_common(match_dev, dip, 3881 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM), 3882 name, data, nelements, ddi_prop_fm_decode_int64_array)); 3883 } 3884 3885 /* 3886 * Update a single integer property. If the property exists on the drivers 3887 * property list it updates, else it creates it. 3888 */ 3889 int 3890 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 3891 char *name, int data) 3892 { 3893 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 3894 name, &data, 1, ddi_prop_fm_encode_ints)); 3895 } 3896 3897 /* 3898 * Update a single 64 bit integer property. 3899 * Update the driver property list if it exists, else create it. 3900 */ 3901 int 3902 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 3903 char *name, int64_t data) 3904 { 3905 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 3906 name, &data, 1, ddi_prop_fm_encode_int64)); 3907 } 3908 3909 int 3910 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip, 3911 char *name, int data) 3912 { 3913 return (ddi_prop_update_common(match_dev, dip, 3914 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 3915 name, &data, 1, ddi_prop_fm_encode_ints)); 3916 } 3917 3918 int 3919 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 3920 char *name, int64_t data) 3921 { 3922 return (ddi_prop_update_common(match_dev, dip, 3923 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 3924 name, &data, 1, ddi_prop_fm_encode_int64)); 3925 } 3926 3927 /* 3928 * Update an array of integer property. If the property exists on the drivers 3929 * property list it updates, else it creates it. 3930 */ 3931 int 3932 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 3933 char *name, int *data, uint_t nelements) 3934 { 3935 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT, 3936 name, data, nelements, ddi_prop_fm_encode_ints)); 3937 } 3938 3939 /* 3940 * Update an array of 64 bit integer properties. 3941 * Update the driver property list if it exists, else create it. 3942 */ 3943 int 3944 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 3945 char *name, int64_t *data, uint_t nelements) 3946 { 3947 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64, 3948 name, data, nelements, ddi_prop_fm_encode_int64)); 3949 } 3950 3951 int 3952 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 3953 char *name, int64_t *data, uint_t nelements) 3954 { 3955 return (ddi_prop_update_common(match_dev, dip, 3956 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64, 3957 name, data, nelements, ddi_prop_fm_encode_int64)); 3958 } 3959 3960 int 3961 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 3962 char *name, int *data, uint_t nelements) 3963 { 3964 return (ddi_prop_update_common(match_dev, dip, 3965 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT, 3966 name, data, nelements, ddi_prop_fm_encode_ints)); 3967 } 3968 3969 /* 3970 * Get a single string property. 3971 */ 3972 int 3973 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags, 3974 char *name, char **data) 3975 { 3976 uint_t x; 3977 3978 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3979 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 3980 #ifdef DEBUG 3981 if (dip != NULL) { 3982 cmn_err(CE_WARN, "%s: invalid flag 0x%x " 3983 "(prop = %s, node = %s%d); invalid bits ignored", 3984 "ddi_prop_lookup_string", flags, name, 3985 ddi_driver_name(dip), ddi_get_instance(dip)); 3986 } 3987 #endif /* DEBUG */ 3988 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 3989 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 3990 } 3991 3992 return (ddi_prop_lookup_common(match_dev, dip, 3993 (flags | DDI_PROP_TYPE_STRING), name, data, 3994 &x, ddi_prop_fm_decode_string)); 3995 } 3996 3997 /* 3998 * Get an array of strings property. 3999 */ 4000 int 4001 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4002 char *name, char ***data, uint_t *nelements) 4003 { 4004 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4005 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4006 #ifdef DEBUG 4007 if (dip != NULL) { 4008 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: " 4009 "invalid flag 0x%x (prop = %s, node = %s%d)", 4010 flags, name, ddi_driver_name(dip), 4011 ddi_get_instance(dip)); 4012 } 4013 #endif /* DEBUG */ 4014 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4015 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4016 } 4017 4018 return (ddi_prop_lookup_common(match_dev, dip, 4019 (flags | DDI_PROP_TYPE_STRING), name, data, 4020 nelements, ddi_prop_fm_decode_strings)); 4021 } 4022 4023 /* 4024 * Update a single string property. 4025 */ 4026 int 4027 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4028 char *name, char *data) 4029 { 4030 return (ddi_prop_update_common(match_dev, dip, 4031 DDI_PROP_TYPE_STRING, name, &data, 1, 4032 ddi_prop_fm_encode_string)); 4033 } 4034 4035 int 4036 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip, 4037 char *name, char *data) 4038 { 4039 return (ddi_prop_update_common(match_dev, dip, 4040 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4041 name, &data, 1, ddi_prop_fm_encode_string)); 4042 } 4043 4044 4045 /* 4046 * Update an array of strings property. 4047 */ 4048 int 4049 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4050 char *name, char **data, uint_t nelements) 4051 { 4052 return (ddi_prop_update_common(match_dev, dip, 4053 DDI_PROP_TYPE_STRING, name, data, nelements, 4054 ddi_prop_fm_encode_strings)); 4055 } 4056 4057 int 4058 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 4059 char *name, char **data, uint_t nelements) 4060 { 4061 return (ddi_prop_update_common(match_dev, dip, 4062 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING, 4063 name, data, nelements, 4064 ddi_prop_fm_encode_strings)); 4065 } 4066 4067 4068 /* 4069 * Get an array of bytes property. 4070 */ 4071 int 4072 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags, 4073 char *name, uchar_t **data, uint_t *nelements) 4074 { 4075 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4076 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) { 4077 #ifdef DEBUG 4078 if (dip != NULL) { 4079 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: " 4080 " invalid flag 0x%x (prop = %s, node = %s%d)", 4081 flags, name, ddi_driver_name(dip), 4082 ddi_get_instance(dip)); 4083 } 4084 #endif /* DEBUG */ 4085 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM | 4086 LDI_DEV_T_ANY | DDI_UNBND_DLPI2; 4087 } 4088 4089 return (ddi_prop_lookup_common(match_dev, dip, 4090 (flags | DDI_PROP_TYPE_BYTE), name, data, 4091 nelements, ddi_prop_fm_decode_bytes)); 4092 } 4093 4094 /* 4095 * Update an array of bytes property. 4096 */ 4097 int 4098 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4099 char *name, uchar_t *data, uint_t nelements) 4100 { 4101 if (nelements == 0) 4102 return (DDI_PROP_INVAL_ARG); 4103 4104 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE, 4105 name, data, nelements, ddi_prop_fm_encode_bytes)); 4106 } 4107 4108 4109 int 4110 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 4111 char *name, uchar_t *data, uint_t nelements) 4112 { 4113 if (nelements == 0) 4114 return (DDI_PROP_INVAL_ARG); 4115 4116 return (ddi_prop_update_common(match_dev, dip, 4117 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE, 4118 name, data, nelements, ddi_prop_fm_encode_bytes)); 4119 } 4120 4121 4122 /* 4123 * ddi_prop_remove_common: Undefine a managed property: 4124 * Input dev_t must match dev_t when defined. 4125 * Returns DDI_PROP_NOT_FOUND, possibly. 4126 * DDI_PROP_INVAL_ARG is also possible if dev is 4127 * DDI_DEV_T_ANY or incoming name is the NULL string. 4128 */ 4129 int 4130 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag) 4131 { 4132 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4133 ddi_prop_t *propp; 4134 ddi_prop_t *lastpropp = NULL; 4135 4136 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) || 4137 (strlen(name) == 0)) { 4138 return (DDI_PROP_INVAL_ARG); 4139 } 4140 4141 if (flag & DDI_PROP_SYSTEM_DEF) 4142 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4143 else if (flag & DDI_PROP_HW_DEF) 4144 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4145 4146 mutex_enter(&(DEVI(dip)->devi_lock)); 4147 4148 for (propp = *list_head; propp != NULL; propp = propp->prop_next) { 4149 if (DDI_STRSAME(propp->prop_name, name) && 4150 (dev == propp->prop_dev)) { 4151 /* 4152 * Unlink this propp allowing for it to 4153 * be first in the list: 4154 */ 4155 4156 if (lastpropp == NULL) 4157 *list_head = propp->prop_next; 4158 else 4159 lastpropp->prop_next = propp->prop_next; 4160 4161 mutex_exit(&(DEVI(dip)->devi_lock)); 4162 4163 /* 4164 * Free memory and return... 4165 */ 4166 kmem_free(propp->prop_name, 4167 strlen(propp->prop_name) + 1); 4168 if (propp->prop_len != 0) 4169 kmem_free(propp->prop_val, propp->prop_len); 4170 kmem_free(propp, sizeof (ddi_prop_t)); 4171 return (DDI_PROP_SUCCESS); 4172 } 4173 lastpropp = propp; 4174 } 4175 mutex_exit(&(DEVI(dip)->devi_lock)); 4176 return (DDI_PROP_NOT_FOUND); 4177 } 4178 4179 int 4180 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4181 { 4182 return (ddi_prop_remove_common(dev, dip, name, 0)); 4183 } 4184 4185 int 4186 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 4187 { 4188 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF)); 4189 } 4190 4191 /* 4192 * e_ddi_prop_list_delete: remove a list of properties 4193 * Note that the caller needs to provide the required protection 4194 * (eg. devi_lock if these properties are still attached to a devi) 4195 */ 4196 void 4197 e_ddi_prop_list_delete(ddi_prop_t *props) 4198 { 4199 i_ddi_prop_list_delete(props); 4200 } 4201 4202 /* 4203 * ddi_prop_remove_all_common: 4204 * Used before unloading a driver to remove 4205 * all properties. (undefines all dev_t's props.) 4206 * Also removes `explicitly undefined' props. 4207 * No errors possible. 4208 */ 4209 void 4210 ddi_prop_remove_all_common(dev_info_t *dip, int flag) 4211 { 4212 ddi_prop_t **list_head; 4213 4214 mutex_enter(&(DEVI(dip)->devi_lock)); 4215 if (flag & DDI_PROP_SYSTEM_DEF) { 4216 list_head = &(DEVI(dip)->devi_sys_prop_ptr); 4217 } else if (flag & DDI_PROP_HW_DEF) { 4218 list_head = &(DEVI(dip)->devi_hw_prop_ptr); 4219 } else { 4220 list_head = &(DEVI(dip)->devi_drv_prop_ptr); 4221 } 4222 i_ddi_prop_list_delete(*list_head); 4223 *list_head = NULL; 4224 mutex_exit(&(DEVI(dip)->devi_lock)); 4225 } 4226 4227 4228 /* 4229 * ddi_prop_remove_all: Remove all driver prop definitions. 4230 */ 4231 4232 void 4233 ddi_prop_remove_all(dev_info_t *dip) 4234 { 4235 i_ddi_prop_dyn_driver_set(dip, NULL); 4236 ddi_prop_remove_all_common(dip, 0); 4237 } 4238 4239 /* 4240 * e_ddi_prop_remove_all: Remove all system prop definitions. 4241 */ 4242 4243 void 4244 e_ddi_prop_remove_all(dev_info_t *dip) 4245 { 4246 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF); 4247 } 4248 4249 4250 /* 4251 * ddi_prop_undefine: Explicitly undefine a property. Property 4252 * searches which match this property return 4253 * the error code DDI_PROP_UNDEFINED. 4254 * 4255 * Use ddi_prop_remove to negate effect of 4256 * ddi_prop_undefine 4257 * 4258 * See above for error returns. 4259 */ 4260 4261 int 4262 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4263 { 4264 if (!(flag & DDI_PROP_CANSLEEP)) 4265 flag |= DDI_PROP_DONTSLEEP; 4266 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY; 4267 return (ddi_prop_update_common(dev, dip, flag, 4268 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4269 } 4270 4271 int 4272 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name) 4273 { 4274 if (!(flag & DDI_PROP_CANSLEEP)) 4275 flag |= DDI_PROP_DONTSLEEP; 4276 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | 4277 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY; 4278 return (ddi_prop_update_common(dev, dip, flag, 4279 name, NULL, 0, ddi_prop_fm_encode_bytes)); 4280 } 4281 4282 /* 4283 * Support for gathering dynamic properties in devinfo snapshot. 4284 */ 4285 void 4286 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4287 { 4288 DEVI(dip)->devi_prop_dyn_driver = dp; 4289 } 4290 4291 i_ddi_prop_dyn_t * 4292 i_ddi_prop_dyn_driver_get(dev_info_t *dip) 4293 { 4294 return (DEVI(dip)->devi_prop_dyn_driver); 4295 } 4296 4297 void 4298 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4299 { 4300 DEVI(dip)->devi_prop_dyn_parent = dp; 4301 } 4302 4303 i_ddi_prop_dyn_t * 4304 i_ddi_prop_dyn_parent_get(dev_info_t *dip) 4305 { 4306 return (DEVI(dip)->devi_prop_dyn_parent); 4307 } 4308 4309 void 4310 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp) 4311 { 4312 /* for now we invalidate the entire cached snapshot */ 4313 if (dip && dp) 4314 i_ddi_di_cache_invalidate(); 4315 } 4316 4317 /* ARGSUSED */ 4318 void 4319 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags) 4320 { 4321 /* for now we invalidate the entire cached snapshot */ 4322 i_ddi_di_cache_invalidate(); 4323 } 4324 4325 4326 /* 4327 * Code to search hardware layer (PROM), if it exists, on behalf of child. 4328 * 4329 * if input dip != child_dip, then call is on behalf of child 4330 * to search PROM, do it via ddi_prop_search_common() and ascend only 4331 * if allowed. 4332 * 4333 * if input dip == ch_dip (child_dip), call is on behalf of root driver, 4334 * to search for PROM defined props only. 4335 * 4336 * Note that the PROM search is done only if the requested dev 4337 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties 4338 * have no associated dev, thus are automatically associated with 4339 * DDI_DEV_T_NONE. 4340 * 4341 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer. 4342 * 4343 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework 4344 * that the property resides in the prom. 4345 */ 4346 int 4347 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4348 ddi_prop_op_t prop_op, int mod_flags, 4349 char *name, caddr_t valuep, int *lengthp) 4350 { 4351 int len; 4352 caddr_t buffer; 4353 4354 /* 4355 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then 4356 * look in caller's PROM if it's a self identifying device... 4357 * 4358 * Note that this is very similar to ddi_prop_op, but we 4359 * search the PROM instead of the s/w defined properties, 4360 * and we are called on by the parent driver to do this for 4361 * the child. 4362 */ 4363 4364 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) && 4365 ndi_dev_is_prom_node(ch_dip) && 4366 ((mod_flags & DDI_PROP_NOTPROM) == 0)) { 4367 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name); 4368 if (len == -1) { 4369 return (DDI_PROP_NOT_FOUND); 4370 } 4371 4372 /* 4373 * If exists only request, we're done 4374 */ 4375 if (prop_op == PROP_EXISTS) { 4376 return (DDI_PROP_FOUND_1275); 4377 } 4378 4379 /* 4380 * If length only request or prop length == 0, get out 4381 */ 4382 if ((prop_op == PROP_LEN) || (len == 0)) { 4383 *lengthp = len; 4384 return (DDI_PROP_FOUND_1275); 4385 } 4386 4387 /* 4388 * Allocate buffer if required... (either way `buffer' 4389 * is receiving address). 4390 */ 4391 4392 switch (prop_op) { 4393 4394 case PROP_LEN_AND_VAL_ALLOC: 4395 4396 buffer = kmem_alloc((size_t)len, 4397 mod_flags & DDI_PROP_CANSLEEP ? 4398 KM_SLEEP : KM_NOSLEEP); 4399 if (buffer == NULL) { 4400 return (DDI_PROP_NO_MEMORY); 4401 } 4402 *(caddr_t *)valuep = buffer; 4403 break; 4404 4405 case PROP_LEN_AND_VAL_BUF: 4406 4407 if (len > (*lengthp)) { 4408 *lengthp = len; 4409 return (DDI_PROP_BUF_TOO_SMALL); 4410 } 4411 4412 buffer = valuep; 4413 break; 4414 4415 default: 4416 break; 4417 } 4418 4419 /* 4420 * Call the PROM function to do the copy. 4421 */ 4422 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid, 4423 name, buffer); 4424 4425 *lengthp = len; /* return the actual length to the caller */ 4426 (void) impl_fix_props(dip, ch_dip, name, len, buffer); 4427 return (DDI_PROP_FOUND_1275); 4428 } 4429 4430 return (DDI_PROP_NOT_FOUND); 4431 } 4432 4433 /* 4434 * The ddi_bus_prop_op default bus nexus prop op function. 4435 * 4436 * Code to search hardware layer (PROM), if it exists, 4437 * on behalf of child, then, if appropriate, ascend and check 4438 * my own software defined properties... 4439 */ 4440 int 4441 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip, 4442 ddi_prop_op_t prop_op, int mod_flags, 4443 char *name, caddr_t valuep, int *lengthp) 4444 { 4445 int error; 4446 4447 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags, 4448 name, valuep, lengthp); 4449 4450 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 || 4451 error == DDI_PROP_BUF_TOO_SMALL) 4452 return (error); 4453 4454 if (error == DDI_PROP_NO_MEMORY) { 4455 cmn_err(CE_CONT, prop_no_mem_msg, name); 4456 return (DDI_PROP_NO_MEMORY); 4457 } 4458 4459 /* 4460 * Check the 'options' node as a last resort 4461 */ 4462 if ((mod_flags & DDI_PROP_DONTPASS) != 0) 4463 return (DDI_PROP_NOT_FOUND); 4464 4465 if (ch_dip == ddi_root_node()) { 4466 /* 4467 * As a last resort, when we've reached 4468 * the top and still haven't found the 4469 * property, see if the desired property 4470 * is attached to the options node. 4471 * 4472 * The options dip is attached right after boot. 4473 */ 4474 ASSERT(options_dip != NULL); 4475 /* 4476 * Force the "don't pass" flag to *just* see 4477 * what the options node has to offer. 4478 */ 4479 return (ddi_prop_search_common(dev, options_dip, prop_op, 4480 mod_flags|DDI_PROP_DONTPASS, name, valuep, 4481 (uint_t *)lengthp)); 4482 } 4483 4484 /* 4485 * Otherwise, continue search with parent's s/w defined properties... 4486 * NOTE: Using `dip' in following call increments the level. 4487 */ 4488 4489 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags, 4490 name, valuep, (uint_t *)lengthp)); 4491 } 4492 4493 /* 4494 * External property functions used by other parts of the kernel... 4495 */ 4496 4497 /* 4498 * e_ddi_getlongprop: See comments for ddi_get_longprop. 4499 */ 4500 4501 int 4502 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags, 4503 caddr_t valuep, int *lengthp) 4504 { 4505 _NOTE(ARGUNUSED(type)) 4506 dev_info_t *devi; 4507 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC; 4508 int error; 4509 4510 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4511 return (DDI_PROP_NOT_FOUND); 4512 4513 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4514 ddi_release_devi(devi); 4515 return (error); 4516 } 4517 4518 /* 4519 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf. 4520 */ 4521 4522 int 4523 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags, 4524 caddr_t valuep, int *lengthp) 4525 { 4526 _NOTE(ARGUNUSED(type)) 4527 dev_info_t *devi; 4528 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4529 int error; 4530 4531 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4532 return (DDI_PROP_NOT_FOUND); 4533 4534 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp); 4535 ddi_release_devi(devi); 4536 return (error); 4537 } 4538 4539 /* 4540 * e_ddi_getprop: See comments for ddi_getprop. 4541 */ 4542 int 4543 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue) 4544 { 4545 _NOTE(ARGUNUSED(type)) 4546 dev_info_t *devi; 4547 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4548 int propvalue = defvalue; 4549 int proplength = sizeof (int); 4550 int error; 4551 4552 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4553 return (defvalue); 4554 4555 error = cdev_prop_op(dev, devi, prop_op, 4556 flags, name, (caddr_t)&propvalue, &proplength); 4557 ddi_release_devi(devi); 4558 4559 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4560 propvalue = 1; 4561 4562 return (propvalue); 4563 } 4564 4565 /* 4566 * e_ddi_getprop_int64: 4567 * 4568 * This is a typed interfaces, but predates typed properties. With the 4569 * introduction of typed properties the framework tries to ensure 4570 * consistent use of typed interfaces. This is why TYPE_INT64 is not 4571 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a 4572 * typed interface invokes legacy (non-typed) interfaces: 4573 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the 4574 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support 4575 * this type of lookup as a single operation we invoke the legacy 4576 * non-typed interfaces with the special CONSUMER_TYPED bit set. The 4577 * framework ddi_prop_op(9F) implementation is expected to check for 4578 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY 4579 * (currently TYPE_INT64). 4580 */ 4581 int64_t 4582 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name, 4583 int flags, int64_t defvalue) 4584 { 4585 _NOTE(ARGUNUSED(type)) 4586 dev_info_t *devi; 4587 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF; 4588 int64_t propvalue = defvalue; 4589 int proplength = sizeof (propvalue); 4590 int error; 4591 4592 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4593 return (defvalue); 4594 4595 error = cdev_prop_op(dev, devi, prop_op, flags | 4596 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength); 4597 ddi_release_devi(devi); 4598 4599 if ((error == DDI_PROP_SUCCESS) && (proplength == 0)) 4600 propvalue = 1; 4601 4602 return (propvalue); 4603 } 4604 4605 /* 4606 * e_ddi_getproplen: See comments for ddi_getproplen. 4607 */ 4608 int 4609 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp) 4610 { 4611 _NOTE(ARGUNUSED(type)) 4612 dev_info_t *devi; 4613 ddi_prop_op_t prop_op = PROP_LEN; 4614 int error; 4615 4616 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 4617 return (DDI_PROP_NOT_FOUND); 4618 4619 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp); 4620 ddi_release_devi(devi); 4621 return (error); 4622 } 4623 4624 /* 4625 * Routines to get at elements of the dev_info structure 4626 */ 4627 4628 /* 4629 * ddi_binding_name: Return the driver binding name of the devinfo node 4630 * This is the name the OS used to bind the node to a driver. 4631 */ 4632 char * 4633 ddi_binding_name(dev_info_t *dip) 4634 { 4635 return (DEVI(dip)->devi_binding_name); 4636 } 4637 4638 /* 4639 * ddi_driver_major: Return the major number of the driver that 4640 * the supplied devinfo is bound to. If not yet bound, 4641 * DDI_MAJOR_T_NONE. 4642 * 4643 * When used by the driver bound to 'devi', this 4644 * function will reliably return the driver major number. 4645 * Other ways of determining the driver major number, such as 4646 * major = ddi_name_to_major(ddi_get_name(devi)); 4647 * major = ddi_name_to_major(ddi_binding_name(devi)); 4648 * can return a different result as the driver/alias binding 4649 * can change dynamically, and thus should be avoided. 4650 */ 4651 major_t 4652 ddi_driver_major(dev_info_t *devi) 4653 { 4654 return (DEVI(devi)->devi_major); 4655 } 4656 4657 /* 4658 * ddi_driver_name: Return the normalized driver name. this is the 4659 * actual driver name 4660 */ 4661 const char * 4662 ddi_driver_name(dev_info_t *devi) 4663 { 4664 major_t major; 4665 4666 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE) 4667 return (ddi_major_to_name(major)); 4668 4669 return (ddi_node_name(devi)); 4670 } 4671 4672 /* 4673 * i_ddi_set_binding_name: Set binding name. 4674 * 4675 * Set the binding name to the given name. 4676 * This routine is for use by the ddi implementation, not by drivers. 4677 */ 4678 void 4679 i_ddi_set_binding_name(dev_info_t *dip, char *name) 4680 { 4681 DEVI(dip)->devi_binding_name = name; 4682 4683 } 4684 4685 /* 4686 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name 4687 * the implementation has used to bind the node to a driver. 4688 */ 4689 char * 4690 ddi_get_name(dev_info_t *dip) 4691 { 4692 return (DEVI(dip)->devi_binding_name); 4693 } 4694 4695 /* 4696 * ddi_node_name: Return the name property of the devinfo node 4697 * This may differ from ddi_binding_name if the node name 4698 * does not define a binding to a driver (i.e. generic names). 4699 */ 4700 char * 4701 ddi_node_name(dev_info_t *dip) 4702 { 4703 return (DEVI(dip)->devi_node_name); 4704 } 4705 4706 4707 /* 4708 * ddi_get_nodeid: Get nodeid stored in dev_info structure. 4709 */ 4710 int 4711 ddi_get_nodeid(dev_info_t *dip) 4712 { 4713 return (DEVI(dip)->devi_nodeid); 4714 } 4715 4716 int 4717 ddi_get_instance(dev_info_t *dip) 4718 { 4719 return (DEVI(dip)->devi_instance); 4720 } 4721 4722 struct dev_ops * 4723 ddi_get_driver(dev_info_t *dip) 4724 { 4725 return (DEVI(dip)->devi_ops); 4726 } 4727 4728 void 4729 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo) 4730 { 4731 DEVI(dip)->devi_ops = devo; 4732 } 4733 4734 /* 4735 * ddi_set_driver_private/ddi_get_driver_private: 4736 * Get/set device driver private data in devinfo. 4737 */ 4738 void 4739 ddi_set_driver_private(dev_info_t *dip, void *data) 4740 { 4741 DEVI(dip)->devi_driver_data = data; 4742 } 4743 4744 void * 4745 ddi_get_driver_private(dev_info_t *dip) 4746 { 4747 return (DEVI(dip)->devi_driver_data); 4748 } 4749 4750 /* 4751 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling 4752 */ 4753 4754 dev_info_t * 4755 ddi_get_parent(dev_info_t *dip) 4756 { 4757 return ((dev_info_t *)DEVI(dip)->devi_parent); 4758 } 4759 4760 dev_info_t * 4761 ddi_get_child(dev_info_t *dip) 4762 { 4763 return ((dev_info_t *)DEVI(dip)->devi_child); 4764 } 4765 4766 dev_info_t * 4767 ddi_get_next_sibling(dev_info_t *dip) 4768 { 4769 return ((dev_info_t *)DEVI(dip)->devi_sibling); 4770 } 4771 4772 dev_info_t * 4773 ddi_get_next(dev_info_t *dip) 4774 { 4775 return ((dev_info_t *)DEVI(dip)->devi_next); 4776 } 4777 4778 void 4779 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip) 4780 { 4781 DEVI(dip)->devi_next = DEVI(nextdip); 4782 } 4783 4784 /* 4785 * ddi_root_node: Return root node of devinfo tree 4786 */ 4787 4788 dev_info_t * 4789 ddi_root_node(void) 4790 { 4791 extern dev_info_t *top_devinfo; 4792 4793 return (top_devinfo); 4794 } 4795 4796 /* 4797 * Miscellaneous functions: 4798 */ 4799 4800 /* 4801 * Implementation specific hooks 4802 */ 4803 4804 void 4805 ddi_report_dev(dev_info_t *d) 4806 { 4807 char *b; 4808 4809 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0); 4810 4811 /* 4812 * If this devinfo node has cb_ops, it's implicitly accessible from 4813 * userland, so we print its full name together with the instance 4814 * number 'abbreviation' that the driver may use internally. 4815 */ 4816 if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 && 4817 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) { 4818 cmn_err(CE_CONT, "?%s%d is %s\n", 4819 ddi_driver_name(d), ddi_get_instance(d), 4820 ddi_pathname(d, b)); 4821 kmem_free(b, MAXPATHLEN); 4822 } 4823 } 4824 4825 /* 4826 * ddi_ctlops() is described in the assembler not to buy a new register 4827 * window when it's called and can reduce cost in climbing the device tree 4828 * without using the tail call optimization. 4829 */ 4830 int 4831 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result) 4832 { 4833 int ret; 4834 4835 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE, 4836 (void *)&rnumber, (void *)result); 4837 4838 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 4839 } 4840 4841 int 4842 ddi_dev_nregs(dev_info_t *dev, int *result) 4843 { 4844 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result)); 4845 } 4846 4847 int 4848 ddi_dev_is_sid(dev_info_t *d) 4849 { 4850 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0)); 4851 } 4852 4853 int 4854 ddi_slaveonly(dev_info_t *d) 4855 { 4856 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0)); 4857 } 4858 4859 int 4860 ddi_dev_affinity(dev_info_t *a, dev_info_t *b) 4861 { 4862 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0)); 4863 } 4864 4865 int 4866 ddi_streams_driver(dev_info_t *dip) 4867 { 4868 if (i_ddi_devi_attached(dip) && 4869 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) && 4870 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL)) 4871 return (DDI_SUCCESS); 4872 return (DDI_FAILURE); 4873 } 4874 4875 /* 4876 * callback free list 4877 */ 4878 4879 static int ncallbacks; 4880 static int nc_low = 170; 4881 static int nc_med = 512; 4882 static int nc_high = 2048; 4883 static struct ddi_callback *callbackq; 4884 static struct ddi_callback *callbackqfree; 4885 4886 /* 4887 * set/run callback lists 4888 */ 4889 struct cbstats { 4890 kstat_named_t cb_asked; 4891 kstat_named_t cb_new; 4892 kstat_named_t cb_run; 4893 kstat_named_t cb_delete; 4894 kstat_named_t cb_maxreq; 4895 kstat_named_t cb_maxlist; 4896 kstat_named_t cb_alloc; 4897 kstat_named_t cb_runouts; 4898 kstat_named_t cb_L2; 4899 kstat_named_t cb_grow; 4900 } cbstats = { 4901 {"asked", KSTAT_DATA_UINT32}, 4902 {"new", KSTAT_DATA_UINT32}, 4903 {"run", KSTAT_DATA_UINT32}, 4904 {"delete", KSTAT_DATA_UINT32}, 4905 {"maxreq", KSTAT_DATA_UINT32}, 4906 {"maxlist", KSTAT_DATA_UINT32}, 4907 {"alloc", KSTAT_DATA_UINT32}, 4908 {"runouts", KSTAT_DATA_UINT32}, 4909 {"L2", KSTAT_DATA_UINT32}, 4910 {"grow", KSTAT_DATA_UINT32}, 4911 }; 4912 4913 #define nc_asked cb_asked.value.ui32 4914 #define nc_new cb_new.value.ui32 4915 #define nc_run cb_run.value.ui32 4916 #define nc_delete cb_delete.value.ui32 4917 #define nc_maxreq cb_maxreq.value.ui32 4918 #define nc_maxlist cb_maxlist.value.ui32 4919 #define nc_alloc cb_alloc.value.ui32 4920 #define nc_runouts cb_runouts.value.ui32 4921 #define nc_L2 cb_L2.value.ui32 4922 #define nc_grow cb_grow.value.ui32 4923 4924 static kmutex_t ddi_callback_mutex; 4925 4926 /* 4927 * callbacks are handled using a L1/L2 cache. The L1 cache 4928 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If 4929 * we can't get callbacks from the L1 cache [because pageout is doing 4930 * I/O at the time freemem is 0], we allocate callbacks out of the 4931 * L2 cache. The L2 cache is static and depends on the memory size. 4932 * [We might also count the number of devices at probe time and 4933 * allocate one structure per device and adjust for deferred attach] 4934 */ 4935 void 4936 impl_ddi_callback_init(void) 4937 { 4938 int i; 4939 uint_t physmegs; 4940 kstat_t *ksp; 4941 4942 physmegs = physmem >> (20 - PAGESHIFT); 4943 if (physmegs < 48) { 4944 ncallbacks = nc_low; 4945 } else if (physmegs < 128) { 4946 ncallbacks = nc_med; 4947 } else { 4948 ncallbacks = nc_high; 4949 } 4950 4951 /* 4952 * init free list 4953 */ 4954 callbackq = kmem_zalloc( 4955 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP); 4956 for (i = 0; i < ncallbacks-1; i++) 4957 callbackq[i].c_nfree = &callbackq[i+1]; 4958 callbackqfree = callbackq; 4959 4960 /* init kstats */ 4961 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED, 4962 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) { 4963 ksp->ks_data = (void *) &cbstats; 4964 kstat_install(ksp); 4965 } 4966 4967 } 4968 4969 static void 4970 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid, 4971 int count) 4972 { 4973 struct ddi_callback *list, *marker, *new; 4974 size_t size = sizeof (struct ddi_callback); 4975 4976 list = marker = (struct ddi_callback *)*listid; 4977 while (list != NULL) { 4978 if (list->c_call == funcp && list->c_arg == arg) { 4979 list->c_count += count; 4980 return; 4981 } 4982 marker = list; 4983 list = list->c_nlist; 4984 } 4985 new = kmem_alloc(size, KM_NOSLEEP); 4986 if (new == NULL) { 4987 new = callbackqfree; 4988 if (new == NULL) { 4989 new = kmem_alloc_tryhard(sizeof (struct ddi_callback), 4990 &size, KM_NOSLEEP | KM_PANIC); 4991 cbstats.nc_grow++; 4992 } else { 4993 callbackqfree = new->c_nfree; 4994 cbstats.nc_L2++; 4995 } 4996 } 4997 if (marker != NULL) { 4998 marker->c_nlist = new; 4999 } else { 5000 *listid = (uintptr_t)new; 5001 } 5002 new->c_size = size; 5003 new->c_nlist = NULL; 5004 new->c_call = funcp; 5005 new->c_arg = arg; 5006 new->c_count = count; 5007 cbstats.nc_new++; 5008 cbstats.nc_alloc++; 5009 if (cbstats.nc_alloc > cbstats.nc_maxlist) 5010 cbstats.nc_maxlist = cbstats.nc_alloc; 5011 } 5012 5013 void 5014 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid) 5015 { 5016 mutex_enter(&ddi_callback_mutex); 5017 cbstats.nc_asked++; 5018 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq) 5019 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run); 5020 (void) callback_insert(funcp, arg, listid, 1); 5021 mutex_exit(&ddi_callback_mutex); 5022 } 5023 5024 static void 5025 real_callback_run(void *Queue) 5026 { 5027 int (*funcp)(caddr_t); 5028 caddr_t arg; 5029 int count, rval; 5030 uintptr_t *listid; 5031 struct ddi_callback *list, *marker; 5032 int check_pending = 1; 5033 int pending = 0; 5034 5035 do { 5036 mutex_enter(&ddi_callback_mutex); 5037 listid = Queue; 5038 list = (struct ddi_callback *)*listid; 5039 if (list == NULL) { 5040 mutex_exit(&ddi_callback_mutex); 5041 return; 5042 } 5043 if (check_pending) { 5044 marker = list; 5045 while (marker != NULL) { 5046 pending += marker->c_count; 5047 marker = marker->c_nlist; 5048 } 5049 check_pending = 0; 5050 } 5051 ASSERT(pending > 0); 5052 ASSERT(list->c_count > 0); 5053 funcp = list->c_call; 5054 arg = list->c_arg; 5055 count = list->c_count; 5056 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist; 5057 if (list >= &callbackq[0] && 5058 list <= &callbackq[ncallbacks-1]) { 5059 list->c_nfree = callbackqfree; 5060 callbackqfree = list; 5061 } else 5062 kmem_free(list, list->c_size); 5063 5064 cbstats.nc_delete++; 5065 cbstats.nc_alloc--; 5066 mutex_exit(&ddi_callback_mutex); 5067 5068 do { 5069 if ((rval = (*funcp)(arg)) == 0) { 5070 pending -= count; 5071 mutex_enter(&ddi_callback_mutex); 5072 (void) callback_insert(funcp, arg, listid, 5073 count); 5074 cbstats.nc_runouts++; 5075 } else { 5076 pending--; 5077 mutex_enter(&ddi_callback_mutex); 5078 cbstats.nc_run++; 5079 } 5080 mutex_exit(&ddi_callback_mutex); 5081 } while (rval != 0 && (--count > 0)); 5082 } while (pending > 0); 5083 } 5084 5085 void 5086 ddi_run_callback(uintptr_t *listid) 5087 { 5088 softcall(real_callback_run, listid); 5089 } 5090 5091 /* 5092 * ddi_periodic_t 5093 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, 5094 * int level) 5095 * 5096 * INTERFACE LEVEL 5097 * Solaris DDI specific (Solaris DDI) 5098 * 5099 * PARAMETERS 5100 * func: the callback function 5101 * 5102 * The callback function will be invoked. The function is invoked 5103 * in kernel context if the argument level passed is the zero. 5104 * Otherwise it's invoked in interrupt context at the specified 5105 * level. 5106 * 5107 * arg: the argument passed to the callback function 5108 * 5109 * interval: interval time 5110 * 5111 * level : callback interrupt level 5112 * 5113 * If the value is the zero, the callback function is invoked 5114 * in kernel context. If the value is more than the zero, but 5115 * less than or equal to ten, the callback function is invoked in 5116 * interrupt context at the specified interrupt level, which may 5117 * be used for real time applications. 5118 * 5119 * This value must be in range of 0-10, which can be a numeric 5120 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10). 5121 * 5122 * DESCRIPTION 5123 * ddi_periodic_add(9F) schedules the specified function to be 5124 * periodically invoked in the interval time. 5125 * 5126 * As well as timeout(9F), the exact time interval over which the function 5127 * takes effect cannot be guaranteed, but the value given is a close 5128 * approximation. 5129 * 5130 * Drivers waiting on behalf of processes with real-time constraints must 5131 * pass non-zero value with the level argument to ddi_periodic_add(9F). 5132 * 5133 * RETURN VALUES 5134 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t), 5135 * which must be used for ddi_periodic_delete(9F) to specify the request. 5136 * 5137 * CONTEXT 5138 * ddi_periodic_add(9F) can be called in user or kernel context, but 5139 * it cannot be called in interrupt context, which is different from 5140 * timeout(9F). 5141 */ 5142 ddi_periodic_t 5143 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level) 5144 { 5145 /* 5146 * Sanity check of the argument level. 5147 */ 5148 if (level < DDI_IPL_0 || level > DDI_IPL_10) 5149 cmn_err(CE_PANIC, 5150 "ddi_periodic_add: invalid interrupt level (%d).", level); 5151 5152 /* 5153 * Sanity check of the context. ddi_periodic_add() cannot be 5154 * called in either interrupt context or high interrupt context. 5155 */ 5156 if (servicing_interrupt()) 5157 cmn_err(CE_PANIC, 5158 "ddi_periodic_add: called in (high) interrupt context."); 5159 5160 return ((ddi_periodic_t)i_timeout(func, arg, interval, level)); 5161 } 5162 5163 /* 5164 * void 5165 * ddi_periodic_delete(ddi_periodic_t req) 5166 * 5167 * INTERFACE LEVEL 5168 * Solaris DDI specific (Solaris DDI) 5169 * 5170 * PARAMETERS 5171 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned 5172 * previously. 5173 * 5174 * DESCRIPTION 5175 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request 5176 * previously requested. 5177 * 5178 * ddi_periodic_delete(9F) will not return until the pending request 5179 * is canceled or executed. 5180 * 5181 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a 5182 * timeout which is either running on another CPU, or has already 5183 * completed causes no problems. However, unlike untimeout(9F), there is 5184 * no restrictions on the lock which might be held across the call to 5185 * ddi_periodic_delete(9F). 5186 * 5187 * Drivers should be structured with the understanding that the arrival of 5188 * both an interrupt and a timeout for that interrupt can occasionally 5189 * occur, in either order. 5190 * 5191 * CONTEXT 5192 * ddi_periodic_delete(9F) can be called in user or kernel context, but 5193 * it cannot be called in interrupt context, which is different from 5194 * untimeout(9F). 5195 */ 5196 void 5197 ddi_periodic_delete(ddi_periodic_t req) 5198 { 5199 /* 5200 * Sanity check of the context. ddi_periodic_delete() cannot be 5201 * called in either interrupt context or high interrupt context. 5202 */ 5203 if (servicing_interrupt()) 5204 cmn_err(CE_PANIC, 5205 "ddi_periodic_delete: called in (high) interrupt context."); 5206 5207 i_untimeout((timeout_t)req); 5208 } 5209 5210 dev_info_t * 5211 nodevinfo(dev_t dev, int otyp) 5212 { 5213 _NOTE(ARGUNUSED(dev, otyp)) 5214 return ((dev_info_t *)0); 5215 } 5216 5217 /* 5218 * A driver should support its own getinfo(9E) entry point. This function 5219 * is provided as a convenience for ON drivers that don't expect their 5220 * getinfo(9E) entry point to be called. A driver that uses this must not 5221 * call ddi_create_minor_node. 5222 */ 5223 int 5224 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5225 { 5226 _NOTE(ARGUNUSED(dip, infocmd, arg, result)) 5227 return (DDI_FAILURE); 5228 } 5229 5230 /* 5231 * A driver should support its own getinfo(9E) entry point. This function 5232 * is provided as a convenience for ON drivers that where the minor number 5233 * is the instance. Drivers that do not have 1:1 mapping must implement 5234 * their own getinfo(9E) function. 5235 */ 5236 int 5237 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd, 5238 void *arg, void **result) 5239 { 5240 _NOTE(ARGUNUSED(dip)) 5241 int instance; 5242 5243 if (infocmd != DDI_INFO_DEVT2INSTANCE) 5244 return (DDI_FAILURE); 5245 5246 instance = getminor((dev_t)(uintptr_t)arg); 5247 *result = (void *)(uintptr_t)instance; 5248 return (DDI_SUCCESS); 5249 } 5250 5251 int 5252 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd) 5253 { 5254 _NOTE(ARGUNUSED(devi, cmd)) 5255 return (DDI_FAILURE); 5256 } 5257 5258 int 5259 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip, 5260 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep) 5261 { 5262 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep)) 5263 return (DDI_DMA_NOMAPPING); 5264 } 5265 5266 int 5267 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 5268 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 5269 { 5270 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep)) 5271 return (DDI_DMA_BADATTR); 5272 } 5273 5274 int 5275 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 5276 ddi_dma_handle_t handle) 5277 { 5278 _NOTE(ARGUNUSED(dip, rdip, handle)) 5279 return (DDI_FAILURE); 5280 } 5281 5282 int 5283 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 5284 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 5285 ddi_dma_cookie_t *cp, uint_t *ccountp) 5286 { 5287 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp)) 5288 return (DDI_DMA_NOMAPPING); 5289 } 5290 5291 int 5292 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 5293 ddi_dma_handle_t handle) 5294 { 5295 _NOTE(ARGUNUSED(dip, rdip, handle)) 5296 return (DDI_FAILURE); 5297 } 5298 5299 int 5300 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip, 5301 ddi_dma_handle_t handle, off_t off, size_t len, 5302 uint_t cache_flags) 5303 { 5304 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags)) 5305 return (DDI_FAILURE); 5306 } 5307 5308 int 5309 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip, 5310 ddi_dma_handle_t handle, uint_t win, off_t *offp, 5311 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 5312 { 5313 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp)) 5314 return (DDI_FAILURE); 5315 } 5316 5317 int 5318 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 5319 ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 5320 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) 5321 { 5322 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags)) 5323 return (DDI_FAILURE); 5324 } 5325 5326 void 5327 ddivoid(void) 5328 {} 5329 5330 int 5331 nochpoll(dev_t dev, short events, int anyyet, short *reventsp, 5332 struct pollhead **pollhdrp) 5333 { 5334 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp)) 5335 return (ENXIO); 5336 } 5337 5338 cred_t * 5339 ddi_get_cred(void) 5340 { 5341 return (CRED()); 5342 } 5343 5344 clock_t 5345 ddi_get_lbolt(void) 5346 { 5347 return ((clock_t)lbolt_hybrid()); 5348 } 5349 5350 int64_t 5351 ddi_get_lbolt64(void) 5352 { 5353 return (lbolt_hybrid()); 5354 } 5355 5356 time_t 5357 ddi_get_time(void) 5358 { 5359 time_t now; 5360 5361 if ((now = gethrestime_sec()) == 0) { 5362 timestruc_t ts; 5363 mutex_enter(&tod_lock); 5364 ts = tod_get(); 5365 mutex_exit(&tod_lock); 5366 return (ts.tv_sec); 5367 } else { 5368 return (now); 5369 } 5370 } 5371 5372 pid_t 5373 ddi_get_pid(void) 5374 { 5375 return (ttoproc(curthread)->p_pid); 5376 } 5377 5378 kt_did_t 5379 ddi_get_kt_did(void) 5380 { 5381 return (curthread->t_did); 5382 } 5383 5384 /* 5385 * This function returns B_TRUE if the caller can reasonably expect that a call 5386 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened 5387 * by user-level signal. If it returns B_FALSE, then the caller should use 5388 * other means to make certain that the wait will not hang "forever." 5389 * 5390 * It does not check the signal mask, nor for reception of any particular 5391 * signal. 5392 * 5393 * Currently, a thread can receive a signal if it's not a kernel thread and it 5394 * is not in the middle of exit(2) tear-down. Threads that are in that 5395 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to 5396 * cv_timedwait, and qwait_sig to qwait. 5397 */ 5398 boolean_t 5399 ddi_can_receive_sig(void) 5400 { 5401 proc_t *pp; 5402 5403 if (curthread->t_proc_flag & TP_LWPEXIT) 5404 return (B_FALSE); 5405 if ((pp = ttoproc(curthread)) == NULL) 5406 return (B_FALSE); 5407 return (pp->p_as != &kas); 5408 } 5409 5410 /* 5411 * Swap bytes in 16-bit [half-]words 5412 */ 5413 void 5414 swab(void *src, void *dst, size_t nbytes) 5415 { 5416 uchar_t *pf = (uchar_t *)src; 5417 uchar_t *pt = (uchar_t *)dst; 5418 uchar_t tmp; 5419 int nshorts; 5420 5421 nshorts = nbytes >> 1; 5422 5423 while (--nshorts >= 0) { 5424 tmp = *pf++; 5425 *pt++ = *pf++; 5426 *pt++ = tmp; 5427 } 5428 } 5429 5430 static void 5431 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp) 5432 { 5433 int circ; 5434 struct ddi_minor_data *dp; 5435 5436 ndi_devi_enter(ddip, &circ); 5437 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) { 5438 DEVI(ddip)->devi_minor = dmdp; 5439 } else { 5440 while (dp->next != (struct ddi_minor_data *)NULL) 5441 dp = dp->next; 5442 dp->next = dmdp; 5443 } 5444 ndi_devi_exit(ddip, circ); 5445 } 5446 5447 /* 5448 * Part of the obsolete SunCluster DDI Hooks. 5449 * Keep for binary compatibility 5450 */ 5451 minor_t 5452 ddi_getiminor(dev_t dev) 5453 { 5454 return (getminor(dev)); 5455 } 5456 5457 static int 5458 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name) 5459 { 5460 int se_flag; 5461 int kmem_flag; 5462 int se_err; 5463 char *pathname, *class_name; 5464 sysevent_t *ev = NULL; 5465 sysevent_id_t eid; 5466 sysevent_value_t se_val; 5467 sysevent_attr_list_t *ev_attr_list = NULL; 5468 5469 /* determine interrupt context */ 5470 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP; 5471 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 5472 5473 i_ddi_di_cache_invalidate(); 5474 5475 #ifdef DEBUG 5476 if ((se_flag == SE_NOSLEEP) && sunddi_debug) { 5477 cmn_err(CE_CONT, "ddi_create_minor_node: called from " 5478 "interrupt level by driver %s", 5479 ddi_driver_name(dip)); 5480 } 5481 #endif /* DEBUG */ 5482 5483 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag); 5484 if (ev == NULL) { 5485 goto fail; 5486 } 5487 5488 pathname = kmem_alloc(MAXPATHLEN, kmem_flag); 5489 if (pathname == NULL) { 5490 sysevent_free(ev); 5491 goto fail; 5492 } 5493 5494 (void) ddi_pathname(dip, pathname); 5495 ASSERT(strlen(pathname)); 5496 se_val.value_type = SE_DATA_TYPE_STRING; 5497 se_val.value.sv_string = pathname; 5498 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5499 &se_val, se_flag) != 0) { 5500 kmem_free(pathname, MAXPATHLEN); 5501 sysevent_free(ev); 5502 goto fail; 5503 } 5504 kmem_free(pathname, MAXPATHLEN); 5505 5506 /* add the device class attribute */ 5507 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5508 se_val.value_type = SE_DATA_TYPE_STRING; 5509 se_val.value.sv_string = class_name; 5510 if (sysevent_add_attr(&ev_attr_list, 5511 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5512 sysevent_free_attr(ev_attr_list); 5513 goto fail; 5514 } 5515 } 5516 5517 /* 5518 * allow for NULL minor names 5519 */ 5520 if (minor_name != NULL) { 5521 se_val.value.sv_string = minor_name; 5522 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5523 &se_val, se_flag) != 0) { 5524 sysevent_free_attr(ev_attr_list); 5525 sysevent_free(ev); 5526 goto fail; 5527 } 5528 } 5529 5530 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5531 sysevent_free_attr(ev_attr_list); 5532 sysevent_free(ev); 5533 goto fail; 5534 } 5535 5536 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) { 5537 if (se_err == SE_NO_TRANSPORT) { 5538 cmn_err(CE_WARN, "/devices or /dev may not be current " 5539 "for driver %s (%s). Run devfsadm -i %s", 5540 ddi_driver_name(dip), "syseventd not responding", 5541 ddi_driver_name(dip)); 5542 } else { 5543 sysevent_free(ev); 5544 goto fail; 5545 } 5546 } 5547 5548 sysevent_free(ev); 5549 return (DDI_SUCCESS); 5550 fail: 5551 cmn_err(CE_WARN, "/devices or /dev may not be current " 5552 "for driver %s. Run devfsadm -i %s", 5553 ddi_driver_name(dip), ddi_driver_name(dip)); 5554 return (DDI_SUCCESS); 5555 } 5556 5557 /* 5558 * failing to remove a minor node is not of interest 5559 * therefore we do not generate an error message 5560 */ 5561 static int 5562 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name) 5563 { 5564 char *pathname, *class_name; 5565 sysevent_t *ev; 5566 sysevent_id_t eid; 5567 sysevent_value_t se_val; 5568 sysevent_attr_list_t *ev_attr_list = NULL; 5569 5570 /* 5571 * only log ddi_remove_minor_node() calls outside the scope 5572 * of attach/detach reconfigurations and when the dip is 5573 * still initialized. 5574 */ 5575 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) || 5576 (i_ddi_node_state(dip) < DS_INITIALIZED)) { 5577 return (DDI_SUCCESS); 5578 } 5579 5580 i_ddi_di_cache_invalidate(); 5581 5582 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP); 5583 if (ev == NULL) { 5584 return (DDI_SUCCESS); 5585 } 5586 5587 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5588 if (pathname == NULL) { 5589 sysevent_free(ev); 5590 return (DDI_SUCCESS); 5591 } 5592 5593 (void) ddi_pathname(dip, pathname); 5594 ASSERT(strlen(pathname)); 5595 se_val.value_type = SE_DATA_TYPE_STRING; 5596 se_val.value.sv_string = pathname; 5597 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 5598 &se_val, SE_SLEEP) != 0) { 5599 kmem_free(pathname, MAXPATHLEN); 5600 sysevent_free(ev); 5601 return (DDI_SUCCESS); 5602 } 5603 5604 kmem_free(pathname, MAXPATHLEN); 5605 5606 /* 5607 * allow for NULL minor names 5608 */ 5609 if (minor_name != NULL) { 5610 se_val.value.sv_string = minor_name; 5611 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME, 5612 &se_val, SE_SLEEP) != 0) { 5613 sysevent_free_attr(ev_attr_list); 5614 goto fail; 5615 } 5616 } 5617 5618 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 5619 /* add the device class, driver name and instance attributes */ 5620 5621 se_val.value_type = SE_DATA_TYPE_STRING; 5622 se_val.value.sv_string = class_name; 5623 if (sysevent_add_attr(&ev_attr_list, 5624 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 5625 sysevent_free_attr(ev_attr_list); 5626 goto fail; 5627 } 5628 5629 se_val.value_type = SE_DATA_TYPE_STRING; 5630 se_val.value.sv_string = (char *)ddi_driver_name(dip); 5631 if (sysevent_add_attr(&ev_attr_list, 5632 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) { 5633 sysevent_free_attr(ev_attr_list); 5634 goto fail; 5635 } 5636 5637 se_val.value_type = SE_DATA_TYPE_INT32; 5638 se_val.value.sv_int32 = ddi_get_instance(dip); 5639 if (sysevent_add_attr(&ev_attr_list, 5640 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) { 5641 sysevent_free_attr(ev_attr_list); 5642 goto fail; 5643 } 5644 5645 } 5646 5647 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 5648 sysevent_free_attr(ev_attr_list); 5649 } else { 5650 (void) log_sysevent(ev, SE_SLEEP, &eid); 5651 } 5652 fail: 5653 sysevent_free(ev); 5654 return (DDI_SUCCESS); 5655 } 5656 5657 /* 5658 * Derive the device class of the node. 5659 * Device class names aren't defined yet. Until this is done we use 5660 * devfs event subclass names as device class names. 5661 */ 5662 static int 5663 derive_devi_class(dev_info_t *dip, char *node_type, int flag) 5664 { 5665 int rv = DDI_SUCCESS; 5666 5667 if (i_ddi_devi_class(dip) == NULL) { 5668 if (strncmp(node_type, DDI_NT_BLOCK, 5669 sizeof (DDI_NT_BLOCK) - 1) == 0 && 5670 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' || 5671 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') && 5672 strcmp(node_type, DDI_NT_FD) != 0) { 5673 5674 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag); 5675 5676 } else if (strncmp(node_type, DDI_NT_NET, 5677 sizeof (DDI_NT_NET) - 1) == 0 && 5678 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' || 5679 node_type[sizeof (DDI_NT_NET) - 1] == ':')) { 5680 5681 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag); 5682 5683 } else if (strncmp(node_type, DDI_NT_PRINTER, 5684 sizeof (DDI_NT_PRINTER) - 1) == 0 && 5685 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' || 5686 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) { 5687 5688 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag); 5689 5690 } else if (strncmp(node_type, DDI_PSEUDO, 5691 sizeof (DDI_PSEUDO) -1) == 0 && 5692 (strncmp(ESC_LOFI, ddi_node_name(dip), 5693 sizeof (ESC_LOFI) -1) == 0)) { 5694 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag); 5695 } 5696 } 5697 5698 return (rv); 5699 } 5700 5701 /* 5702 * Check compliance with PSARC 2003/375: 5703 * 5704 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not 5705 * exceed IFNAMSIZ (16) characters in length. 5706 */ 5707 static boolean_t 5708 verify_name(char *name) 5709 { 5710 size_t len = strlen(name); 5711 char *cp; 5712 5713 if (len == 0 || len > IFNAMSIZ) 5714 return (B_FALSE); 5715 5716 for (cp = name; *cp != '\0'; cp++) { 5717 if (!isalnum(*cp) && *cp != '_') 5718 return (B_FALSE); 5719 } 5720 5721 return (B_TRUE); 5722 } 5723 5724 /* 5725 * ddi_create_minor_common: Create a ddi_minor_data structure and 5726 * attach it to the given devinfo node. 5727 */ 5728 5729 int 5730 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type, 5731 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype, 5732 const char *read_priv, const char *write_priv, mode_t priv_mode) 5733 { 5734 struct ddi_minor_data *dmdp; 5735 major_t major; 5736 5737 if (spec_type != S_IFCHR && spec_type != S_IFBLK) 5738 return (DDI_FAILURE); 5739 5740 if (name == NULL) 5741 return (DDI_FAILURE); 5742 5743 /* 5744 * Log a message if the minor number the driver is creating 5745 * is not expressible on the on-disk filesystem (currently 5746 * this is limited to 18 bits both by UFS). The device can 5747 * be opened via devfs, but not by device special files created 5748 * via mknod(). 5749 */ 5750 if (minor_num > L_MAXMIN32) { 5751 cmn_err(CE_WARN, 5752 "%s%d:%s minor 0x%x too big for 32-bit applications", 5753 ddi_driver_name(dip), ddi_get_instance(dip), 5754 name, minor_num); 5755 return (DDI_FAILURE); 5756 } 5757 5758 /* dip must be bound and attached */ 5759 major = ddi_driver_major(dip); 5760 ASSERT(major != DDI_MAJOR_T_NONE); 5761 5762 /* 5763 * Default node_type to DDI_PSEUDO and issue notice in debug mode 5764 */ 5765 if (node_type == NULL) { 5766 node_type = DDI_PSEUDO; 5767 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d " 5768 " minor node %s; default to DDI_PSEUDO", 5769 ddi_driver_name(dip), ddi_get_instance(dip), name)); 5770 } 5771 5772 /* 5773 * If the driver is a network driver, ensure that the name falls within 5774 * the interface naming constraints specified by PSARC/2003/375. 5775 */ 5776 if (strcmp(node_type, DDI_NT_NET) == 0) { 5777 if (!verify_name(name)) 5778 return (DDI_FAILURE); 5779 5780 if (mtype == DDM_MINOR) { 5781 struct devnames *dnp = &devnamesp[major]; 5782 5783 /* Mark driver as a network driver */ 5784 LOCK_DEV_OPS(&dnp->dn_lock); 5785 dnp->dn_flags |= DN_NETWORK_DRIVER; 5786 5787 /* 5788 * If this minor node is created during the device 5789 * attachment, this is a physical network device. 5790 * Mark the driver as a physical network driver. 5791 */ 5792 if (DEVI_IS_ATTACHING(dip)) 5793 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER; 5794 UNLOCK_DEV_OPS(&dnp->dn_lock); 5795 } 5796 } 5797 5798 if (mtype == DDM_MINOR) { 5799 if (derive_devi_class(dip, node_type, KM_NOSLEEP) != 5800 DDI_SUCCESS) 5801 return (DDI_FAILURE); 5802 } 5803 5804 /* 5805 * Take care of minor number information for the node. 5806 */ 5807 5808 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data), 5809 KM_NOSLEEP)) == NULL) { 5810 return (DDI_FAILURE); 5811 } 5812 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) { 5813 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 5814 return (DDI_FAILURE); 5815 } 5816 dmdp->dip = dip; 5817 dmdp->ddm_dev = makedevice(major, minor_num); 5818 dmdp->ddm_spec_type = spec_type; 5819 dmdp->ddm_node_type = node_type; 5820 dmdp->type = mtype; 5821 if (flag & CLONE_DEV) { 5822 dmdp->type = DDM_ALIAS; 5823 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major); 5824 } 5825 if (flag & PRIVONLY_DEV) { 5826 dmdp->ddm_flags |= DM_NO_FSPERM; 5827 } 5828 if (read_priv || write_priv) { 5829 dmdp->ddm_node_priv = 5830 devpolicy_priv_by_name(read_priv, write_priv); 5831 } 5832 dmdp->ddm_priv_mode = priv_mode; 5833 5834 ddi_append_minor_node(dip, dmdp); 5835 5836 /* 5837 * only log ddi_create_minor_node() calls which occur 5838 * outside the scope of attach(9e)/detach(9e) reconfigurations 5839 */ 5840 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) && 5841 mtype != DDM_INTERNAL_PATH) { 5842 (void) i_log_devfs_minor_create(dip, name); 5843 } 5844 5845 /* 5846 * Check if any dacf rules match the creation of this minor node 5847 */ 5848 dacfc_match_create_minor(name, node_type, dip, dmdp, flag); 5849 return (DDI_SUCCESS); 5850 } 5851 5852 int 5853 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type, 5854 minor_t minor_num, char *node_type, int flag) 5855 { 5856 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5857 node_type, flag, DDM_MINOR, NULL, NULL, 0)); 5858 } 5859 5860 int 5861 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type, 5862 minor_t minor_num, char *node_type, int flag, 5863 const char *rdpriv, const char *wrpriv, mode_t priv_mode) 5864 { 5865 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5866 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode)); 5867 } 5868 5869 int 5870 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type, 5871 minor_t minor_num, char *node_type, int flag) 5872 { 5873 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5874 node_type, flag, DDM_DEFAULT, NULL, NULL, 0)); 5875 } 5876 5877 /* 5878 * Internal (non-ddi) routine for drivers to export names known 5879 * to the kernel (especially ddi_pathname_to_dev_t and friends) 5880 * but not exported externally to /dev 5881 */ 5882 int 5883 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type, 5884 minor_t minor_num) 5885 { 5886 return (ddi_create_minor_common(dip, name, spec_type, minor_num, 5887 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0)); 5888 } 5889 5890 void 5891 ddi_remove_minor_node(dev_info_t *dip, char *name) 5892 { 5893 int circ; 5894 struct ddi_minor_data *dmdp, *dmdp1; 5895 struct ddi_minor_data **dmdp_prev; 5896 5897 ndi_devi_enter(dip, &circ); 5898 dmdp_prev = &DEVI(dip)->devi_minor; 5899 dmdp = DEVI(dip)->devi_minor; 5900 while (dmdp != NULL) { 5901 dmdp1 = dmdp->next; 5902 if ((name == NULL || (dmdp->ddm_name != NULL && 5903 strcmp(name, dmdp->ddm_name) == 0))) { 5904 if (dmdp->ddm_name != NULL) { 5905 if (dmdp->type != DDM_INTERNAL_PATH) 5906 (void) i_log_devfs_minor_remove(dip, 5907 dmdp->ddm_name); 5908 kmem_free(dmdp->ddm_name, 5909 strlen(dmdp->ddm_name) + 1); 5910 } 5911 /* 5912 * Release device privilege, if any. 5913 * Release dacf client data associated with this minor 5914 * node by storing NULL. 5915 */ 5916 if (dmdp->ddm_node_priv) 5917 dpfree(dmdp->ddm_node_priv); 5918 dacf_store_info((dacf_infohdl_t)dmdp, NULL); 5919 kmem_free(dmdp, sizeof (struct ddi_minor_data)); 5920 *dmdp_prev = dmdp1; 5921 /* 5922 * OK, we found it, so get out now -- if we drive on, 5923 * we will strcmp against garbage. See 1139209. 5924 */ 5925 if (name != NULL) 5926 break; 5927 } else { 5928 dmdp_prev = &dmdp->next; 5929 } 5930 dmdp = dmdp1; 5931 } 5932 ndi_devi_exit(dip, circ); 5933 } 5934 5935 5936 int 5937 ddi_in_panic() 5938 { 5939 return (panicstr != NULL); 5940 } 5941 5942 5943 /* 5944 * Find first bit set in a mask (returned counting from 1 up) 5945 */ 5946 5947 int 5948 ddi_ffs(long mask) 5949 { 5950 return (ffs(mask)); 5951 } 5952 5953 /* 5954 * Find last bit set. Take mask and clear 5955 * all but the most significant bit, and 5956 * then let ffs do the rest of the work. 5957 * 5958 * Algorithm courtesy of Steve Chessin. 5959 */ 5960 5961 int 5962 ddi_fls(long mask) 5963 { 5964 while (mask) { 5965 long nx; 5966 5967 if ((nx = (mask & (mask - 1))) == 0) 5968 break; 5969 mask = nx; 5970 } 5971 return (ffs(mask)); 5972 } 5973 5974 /* 5975 * The ddi_soft_state_* routines comprise generic storage management utilities 5976 * for driver soft state structures (in "the old days," this was done with 5977 * statically sized array - big systems and dynamic loading and unloading 5978 * make heap allocation more attractive). 5979 */ 5980 5981 /* 5982 * Allocate a set of pointers to 'n_items' objects of size 'size' 5983 * bytes. Each pointer is initialized to nil. 5984 * 5985 * The 'size' and 'n_items' values are stashed in the opaque 5986 * handle returned to the caller. 5987 * 5988 * This implementation interprets 'set of pointers' to mean 'array 5989 * of pointers' but note that nothing in the interface definition 5990 * precludes an implementation that uses, for example, a linked list. 5991 * However there should be a small efficiency gain from using an array 5992 * at lookup time. 5993 * 5994 * NOTE As an optimization, we make our growable array allocations in 5995 * powers of two (bytes), since that's how much kmem_alloc (currently) 5996 * gives us anyway. It should save us some free/realloc's .. 5997 * 5998 * As a further optimization, we make the growable array start out 5999 * with MIN_N_ITEMS in it. 6000 */ 6001 6002 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */ 6003 6004 int 6005 ddi_soft_state_init(void **state_p, size_t size, size_t n_items) 6006 { 6007 i_ddi_soft_state *ss; 6008 6009 if (state_p == NULL || size == 0) 6010 return (EINVAL); 6011 6012 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP); 6013 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL); 6014 ss->size = size; 6015 6016 if (n_items < MIN_N_ITEMS) 6017 ss->n_items = MIN_N_ITEMS; 6018 else { 6019 int bitlog; 6020 6021 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items)) 6022 bitlog--; 6023 ss->n_items = 1 << bitlog; 6024 } 6025 6026 ASSERT(ss->n_items >= n_items); 6027 6028 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP); 6029 6030 *state_p = ss; 6031 return (0); 6032 } 6033 6034 /* 6035 * Allocate a state structure of size 'size' to be associated 6036 * with item 'item'. 6037 * 6038 * In this implementation, the array is extended to 6039 * allow the requested offset, if needed. 6040 */ 6041 int 6042 ddi_soft_state_zalloc(void *state, int item) 6043 { 6044 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 6045 void **array; 6046 void *new_element; 6047 6048 if ((state == NULL) || (item < 0)) 6049 return (DDI_FAILURE); 6050 6051 mutex_enter(&ss->lock); 6052 if (ss->size == 0) { 6053 mutex_exit(&ss->lock); 6054 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s", 6055 mod_containing_pc(caller())); 6056 return (DDI_FAILURE); 6057 } 6058 6059 array = ss->array; /* NULL if ss->n_items == 0 */ 6060 ASSERT(ss->n_items != 0 && array != NULL); 6061 6062 /* 6063 * refuse to tread on an existing element 6064 */ 6065 if (item < ss->n_items && array[item] != NULL) { 6066 mutex_exit(&ss->lock); 6067 return (DDI_FAILURE); 6068 } 6069 6070 /* 6071 * Allocate a new element to plug in 6072 */ 6073 new_element = kmem_zalloc(ss->size, KM_SLEEP); 6074 6075 /* 6076 * Check if the array is big enough, if not, grow it. 6077 */ 6078 if (item >= ss->n_items) { 6079 void **new_array; 6080 size_t new_n_items; 6081 struct i_ddi_soft_state *dirty; 6082 6083 /* 6084 * Allocate a new array of the right length, copy 6085 * all the old pointers to the new array, then 6086 * if it exists at all, put the old array on the 6087 * dirty list. 6088 * 6089 * Note that we can't kmem_free() the old array. 6090 * 6091 * Why -- well the 'get' operation is 'mutex-free', so we 6092 * can't easily catch a suspended thread that is just about 6093 * to dereference the array we just grew out of. So we 6094 * cons up a header and put it on a list of 'dirty' 6095 * pointer arrays. (Dirty in the sense that there may 6096 * be suspended threads somewhere that are in the middle 6097 * of referencing them). Fortunately, we -can- garbage 6098 * collect it all at ddi_soft_state_fini time. 6099 */ 6100 new_n_items = ss->n_items; 6101 while (new_n_items < (1 + item)) 6102 new_n_items <<= 1; /* double array size .. */ 6103 6104 ASSERT(new_n_items >= (1 + item)); /* sanity check! */ 6105 6106 new_array = kmem_zalloc(new_n_items * sizeof (void *), 6107 KM_SLEEP); 6108 /* 6109 * Copy the pointers into the new array 6110 */ 6111 bcopy(array, new_array, ss->n_items * sizeof (void *)); 6112 6113 /* 6114 * Save the old array on the dirty list 6115 */ 6116 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP); 6117 dirty->array = ss->array; 6118 dirty->n_items = ss->n_items; 6119 dirty->next = ss->next; 6120 ss->next = dirty; 6121 6122 ss->array = (array = new_array); 6123 ss->n_items = new_n_items; 6124 } 6125 6126 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL); 6127 6128 array[item] = new_element; 6129 6130 mutex_exit(&ss->lock); 6131 return (DDI_SUCCESS); 6132 } 6133 6134 /* 6135 * Fetch a pointer to the allocated soft state structure. 6136 * 6137 * This is designed to be cheap. 6138 * 6139 * There's an argument that there should be more checking for 6140 * nil pointers and out of bounds on the array.. but we do a lot 6141 * of that in the alloc/free routines. 6142 * 6143 * An array has the convenience that we don't need to lock read-access 6144 * to it c.f. a linked list. However our "expanding array" strategy 6145 * means that we should hold a readers lock on the i_ddi_soft_state 6146 * structure. 6147 * 6148 * However, from a performance viewpoint, we need to do it without 6149 * any locks at all -- this also makes it a leaf routine. The algorithm 6150 * is 'lock-free' because we only discard the pointer arrays at 6151 * ddi_soft_state_fini() time. 6152 */ 6153 void * 6154 ddi_get_soft_state(void *state, int item) 6155 { 6156 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 6157 6158 ASSERT((ss != NULL) && (item >= 0)); 6159 6160 if (item < ss->n_items && ss->array != NULL) 6161 return (ss->array[item]); 6162 return (NULL); 6163 } 6164 6165 /* 6166 * Free the state structure corresponding to 'item.' Freeing an 6167 * element that has either gone or was never allocated is not 6168 * considered an error. Note that we free the state structure, but 6169 * we don't shrink our pointer array, or discard 'dirty' arrays, 6170 * since even a few pointers don't really waste too much memory. 6171 * 6172 * Passing an item number that is out of bounds, or a null pointer will 6173 * provoke an error message. 6174 */ 6175 void 6176 ddi_soft_state_free(void *state, int item) 6177 { 6178 i_ddi_soft_state *ss = (i_ddi_soft_state *)state; 6179 void **array; 6180 void *element; 6181 static char msg[] = "ddi_soft_state_free:"; 6182 6183 if (ss == NULL) { 6184 cmn_err(CE_WARN, "%s null handle: %s", 6185 msg, mod_containing_pc(caller())); 6186 return; 6187 } 6188 6189 element = NULL; 6190 6191 mutex_enter(&ss->lock); 6192 6193 if ((array = ss->array) == NULL || ss->size == 0) { 6194 cmn_err(CE_WARN, "%s bad handle: %s", 6195 msg, mod_containing_pc(caller())); 6196 } else if (item < 0 || item >= ss->n_items) { 6197 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s", 6198 msg, item, ss->n_items - 1, mod_containing_pc(caller())); 6199 } else if (array[item] != NULL) { 6200 element = array[item]; 6201 array[item] = NULL; 6202 } 6203 6204 mutex_exit(&ss->lock); 6205 6206 if (element) 6207 kmem_free(element, ss->size); 6208 } 6209 6210 /* 6211 * Free the entire set of pointers, and any 6212 * soft state structures contained therein. 6213 * 6214 * Note that we don't grab the ss->lock mutex, even though 6215 * we're inspecting the various fields of the data structure. 6216 * 6217 * There is an implicit assumption that this routine will 6218 * never run concurrently with any of the above on this 6219 * particular state structure i.e. by the time the driver 6220 * calls this routine, there should be no other threads 6221 * running in the driver. 6222 */ 6223 void 6224 ddi_soft_state_fini(void **state_p) 6225 { 6226 i_ddi_soft_state *ss, *dirty; 6227 int item; 6228 static char msg[] = "ddi_soft_state_fini:"; 6229 6230 if (state_p == NULL || 6231 (ss = (i_ddi_soft_state *)(*state_p)) == NULL) { 6232 cmn_err(CE_WARN, "%s null handle: %s", 6233 msg, mod_containing_pc(caller())); 6234 return; 6235 } 6236 6237 if (ss->size == 0) { 6238 cmn_err(CE_WARN, "%s bad handle: %s", 6239 msg, mod_containing_pc(caller())); 6240 return; 6241 } 6242 6243 if (ss->n_items > 0) { 6244 for (item = 0; item < ss->n_items; item++) 6245 ddi_soft_state_free(ss, item); 6246 kmem_free(ss->array, ss->n_items * sizeof (void *)); 6247 } 6248 6249 /* 6250 * Now delete any dirty arrays from previous 'grow' operations 6251 */ 6252 for (dirty = ss->next; dirty; dirty = ss->next) { 6253 ss->next = dirty->next; 6254 kmem_free(dirty->array, dirty->n_items * sizeof (void *)); 6255 kmem_free(dirty, sizeof (*dirty)); 6256 } 6257 6258 mutex_destroy(&ss->lock); 6259 kmem_free(ss, sizeof (*ss)); 6260 6261 *state_p = NULL; 6262 } 6263 6264 #define SS_N_ITEMS_PER_HASH 16 6265 #define SS_MIN_HASH_SZ 16 6266 #define SS_MAX_HASH_SZ 4096 6267 6268 int 6269 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size, 6270 int n_items) 6271 { 6272 i_ddi_soft_state_bystr *sss; 6273 int hash_sz; 6274 6275 ASSERT(state_p && size && n_items); 6276 if ((state_p == NULL) || (size == 0) || (n_items == 0)) 6277 return (EINVAL); 6278 6279 /* current implementation is based on hash, convert n_items to hash */ 6280 hash_sz = n_items / SS_N_ITEMS_PER_HASH; 6281 if (hash_sz < SS_MIN_HASH_SZ) 6282 hash_sz = SS_MIN_HASH_SZ; 6283 else if (hash_sz > SS_MAX_HASH_SZ) 6284 hash_sz = SS_MAX_HASH_SZ; 6285 6286 /* allocate soft_state pool */ 6287 sss = kmem_zalloc(sizeof (*sss), KM_SLEEP); 6288 sss->ss_size = size; 6289 sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr", 6290 hash_sz, mod_hash_null_valdtor); 6291 *state_p = (ddi_soft_state_bystr *)sss; 6292 return (0); 6293 } 6294 6295 int 6296 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str) 6297 { 6298 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6299 void *sso; 6300 char *dup_str; 6301 6302 ASSERT(sss && str && sss->ss_mod_hash); 6303 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6304 return (DDI_FAILURE); 6305 sso = kmem_zalloc(sss->ss_size, KM_SLEEP); 6306 dup_str = i_ddi_strdup((char *)str, KM_SLEEP); 6307 if (mod_hash_insert(sss->ss_mod_hash, 6308 (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0) 6309 return (DDI_SUCCESS); 6310 6311 /* 6312 * The only error from an strhash insert is caused by a duplicate key. 6313 * We refuse to tread on an existing elements, so free and fail. 6314 */ 6315 kmem_free(dup_str, strlen(dup_str) + 1); 6316 kmem_free(sso, sss->ss_size); 6317 return (DDI_FAILURE); 6318 } 6319 6320 void * 6321 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str) 6322 { 6323 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6324 void *sso; 6325 6326 ASSERT(sss && str && sss->ss_mod_hash); 6327 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6328 return (NULL); 6329 6330 if (mod_hash_find(sss->ss_mod_hash, 6331 (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0) 6332 return (sso); 6333 return (NULL); 6334 } 6335 6336 void 6337 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str) 6338 { 6339 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state; 6340 void *sso; 6341 6342 ASSERT(sss && str && sss->ss_mod_hash); 6343 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL)) 6344 return; 6345 6346 (void) mod_hash_remove(sss->ss_mod_hash, 6347 (mod_hash_key_t)str, (mod_hash_val_t *)&sso); 6348 kmem_free(sso, sss->ss_size); 6349 } 6350 6351 void 6352 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p) 6353 { 6354 i_ddi_soft_state_bystr *sss; 6355 6356 ASSERT(state_p); 6357 if (state_p == NULL) 6358 return; 6359 6360 sss = (i_ddi_soft_state_bystr *)(*state_p); 6361 if (sss == NULL) 6362 return; 6363 6364 ASSERT(sss->ss_mod_hash); 6365 if (sss->ss_mod_hash) { 6366 mod_hash_destroy_strhash(sss->ss_mod_hash); 6367 sss->ss_mod_hash = NULL; 6368 } 6369 6370 kmem_free(sss, sizeof (*sss)); 6371 *state_p = NULL; 6372 } 6373 6374 /* 6375 * The ddi_strid_* routines provide string-to-index management utilities. 6376 */ 6377 /* allocate and initialize an strid set */ 6378 int 6379 ddi_strid_init(ddi_strid **strid_p, int n_items) 6380 { 6381 i_ddi_strid *ss; 6382 int hash_sz; 6383 6384 if (strid_p == NULL) 6385 return (DDI_FAILURE); 6386 6387 /* current implementation is based on hash, convert n_items to hash */ 6388 hash_sz = n_items / SS_N_ITEMS_PER_HASH; 6389 if (hash_sz < SS_MIN_HASH_SZ) 6390 hash_sz = SS_MIN_HASH_SZ; 6391 else if (hash_sz > SS_MAX_HASH_SZ) 6392 hash_sz = SS_MAX_HASH_SZ; 6393 6394 ss = kmem_alloc(sizeof (*ss), KM_SLEEP); 6395 ss->strid_chunksz = n_items; 6396 ss->strid_spacesz = n_items; 6397 ss->strid_space = id_space_create("strid", 1, n_items); 6398 ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz, 6399 mod_hash_null_valdtor); 6400 ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz, 6401 mod_hash_null_valdtor); 6402 *strid_p = (ddi_strid *)ss; 6403 return (DDI_SUCCESS); 6404 } 6405 6406 /* allocate an id mapping within the specified set for str, return id */ 6407 static id_t 6408 i_ddi_strid_alloc(ddi_strid *strid, char *str) 6409 { 6410 i_ddi_strid *ss = (i_ddi_strid *)strid; 6411 id_t id; 6412 char *s; 6413 6414 ASSERT(ss && str); 6415 if ((ss == NULL) || (str == NULL)) 6416 return (0); 6417 6418 /* 6419 * Allocate an id using VM_FIRSTFIT in order to keep allocated id 6420 * range as compressed as possible. This is important to minimize 6421 * the amount of space used when the id is used as a ddi_soft_state 6422 * index by the caller. 6423 * 6424 * If the id list is exhausted, increase the size of the list 6425 * by the chuck size specified in ddi_strid_init and reattempt 6426 * the allocation 6427 */ 6428 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) { 6429 id_space_extend(ss->strid_space, ss->strid_spacesz, 6430 ss->strid_spacesz + ss->strid_chunksz); 6431 ss->strid_spacesz += ss->strid_chunksz; 6432 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) 6433 return (0); 6434 } 6435 6436 /* 6437 * NOTE: since we create and destroy in unison we can save space by 6438 * using bystr key as the byid value. This means destroy must occur 6439 * in (byid, bystr) order. 6440 */ 6441 s = i_ddi_strdup(str, KM_SLEEP); 6442 if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s, 6443 (mod_hash_val_t)(intptr_t)id) != 0) { 6444 ddi_strid_free(strid, id); 6445 return (0); 6446 } 6447 if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id, 6448 (mod_hash_val_t)s) != 0) { 6449 ddi_strid_free(strid, id); 6450 return (0); 6451 } 6452 6453 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */ 6454 return (id); 6455 } 6456 6457 /* allocate an id mapping within the specified set for str, return id */ 6458 id_t 6459 ddi_strid_alloc(ddi_strid *strid, char *str) 6460 { 6461 return (i_ddi_strid_alloc(strid, str)); 6462 } 6463 6464 /* return the id within the specified strid given the str */ 6465 id_t 6466 ddi_strid_str2id(ddi_strid *strid, char *str) 6467 { 6468 i_ddi_strid *ss = (i_ddi_strid *)strid; 6469 id_t id = 0; 6470 mod_hash_val_t hv; 6471 6472 ASSERT(ss && str); 6473 if (ss && str && (mod_hash_find(ss->strid_bystr, 6474 (mod_hash_key_t)str, &hv) == 0)) 6475 id = (int)(intptr_t)hv; 6476 return (id); 6477 } 6478 6479 /* return str within the specified strid given the id */ 6480 char * 6481 ddi_strid_id2str(ddi_strid *strid, id_t id) 6482 { 6483 i_ddi_strid *ss = (i_ddi_strid *)strid; 6484 char *str = NULL; 6485 mod_hash_val_t hv; 6486 6487 ASSERT(ss && id > 0); 6488 if (ss && (id > 0) && (mod_hash_find(ss->strid_byid, 6489 (mod_hash_key_t)(uintptr_t)id, &hv) == 0)) 6490 str = (char *)hv; 6491 return (str); 6492 } 6493 6494 /* free the id mapping within the specified strid */ 6495 void 6496 ddi_strid_free(ddi_strid *strid, id_t id) 6497 { 6498 i_ddi_strid *ss = (i_ddi_strid *)strid; 6499 char *str; 6500 6501 ASSERT(ss && id > 0); 6502 if ((ss == NULL) || (id <= 0)) 6503 return; 6504 6505 /* bystr key is byid value: destroy order must be (byid, bystr) */ 6506 str = ddi_strid_id2str(strid, id); 6507 (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id); 6508 id_free(ss->strid_space, id); 6509 6510 if (str) 6511 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str); 6512 } 6513 6514 /* destroy the strid set */ 6515 void 6516 ddi_strid_fini(ddi_strid **strid_p) 6517 { 6518 i_ddi_strid *ss; 6519 6520 ASSERT(strid_p); 6521 if (strid_p == NULL) 6522 return; 6523 6524 ss = (i_ddi_strid *)(*strid_p); 6525 if (ss == NULL) 6526 return; 6527 6528 /* bystr key is byid value: destroy order must be (byid, bystr) */ 6529 if (ss->strid_byid) 6530 mod_hash_destroy_hash(ss->strid_byid); 6531 if (ss->strid_byid) 6532 mod_hash_destroy_hash(ss->strid_bystr); 6533 if (ss->strid_space) 6534 id_space_destroy(ss->strid_space); 6535 kmem_free(ss, sizeof (*ss)); 6536 *strid_p = NULL; 6537 } 6538 6539 /* 6540 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'. 6541 * Storage is double buffered to prevent updates during devi_addr use - 6542 * double buffering is adaquate for reliable ddi_deviname() consumption. 6543 * The double buffer is not freed until dev_info structure destruction 6544 * (by i_ddi_free_node). 6545 */ 6546 void 6547 ddi_set_name_addr(dev_info_t *dip, char *name) 6548 { 6549 char *buf = DEVI(dip)->devi_addr_buf; 6550 char *newaddr; 6551 6552 if (buf == NULL) { 6553 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP); 6554 DEVI(dip)->devi_addr_buf = buf; 6555 } 6556 6557 if (name) { 6558 ASSERT(strlen(name) < MAXNAMELEN); 6559 newaddr = (DEVI(dip)->devi_addr == buf) ? 6560 (buf + MAXNAMELEN) : buf; 6561 (void) strlcpy(newaddr, name, MAXNAMELEN); 6562 } else 6563 newaddr = NULL; 6564 6565 DEVI(dip)->devi_addr = newaddr; 6566 } 6567 6568 char * 6569 ddi_get_name_addr(dev_info_t *dip) 6570 { 6571 return (DEVI(dip)->devi_addr); 6572 } 6573 6574 void 6575 ddi_set_parent_data(dev_info_t *dip, void *pd) 6576 { 6577 DEVI(dip)->devi_parent_data = pd; 6578 } 6579 6580 void * 6581 ddi_get_parent_data(dev_info_t *dip) 6582 { 6583 return (DEVI(dip)->devi_parent_data); 6584 } 6585 6586 /* 6587 * ddi_name_to_major: returns the major number of a named module, 6588 * derived from the current driver alias binding. 6589 * 6590 * Caveat: drivers should avoid the use of this function, in particular 6591 * together with ddi_get_name/ddi_binding name, as per 6592 * major = ddi_name_to_major(ddi_get_name(devi)); 6593 * ddi_name_to_major() relies on the state of the device/alias binding, 6594 * which can and does change dynamically as aliases are administered 6595 * over time. An attached device instance cannot rely on the major 6596 * number returned by ddi_name_to_major() to match its own major number. 6597 * 6598 * For driver use, ddi_driver_major() reliably returns the major number 6599 * for the module to which the device was bound at attach time over 6600 * the life of the instance. 6601 * major = ddi_driver_major(dev_info_t *) 6602 */ 6603 major_t 6604 ddi_name_to_major(char *name) 6605 { 6606 return (mod_name_to_major(name)); 6607 } 6608 6609 /* 6610 * ddi_major_to_name: Returns the module name bound to a major number. 6611 */ 6612 char * 6613 ddi_major_to_name(major_t major) 6614 { 6615 return (mod_major_to_name(major)); 6616 } 6617 6618 /* 6619 * Return the name of the devinfo node pointed at by 'dip' in the buffer 6620 * pointed at by 'name.' A devinfo node is named as a result of calling 6621 * ddi_initchild(). 6622 * 6623 * Note: the driver must be held before calling this function! 6624 */ 6625 char * 6626 ddi_deviname(dev_info_t *dip, char *name) 6627 { 6628 char *addrname; 6629 char none = '\0'; 6630 6631 if (dip == ddi_root_node()) { 6632 *name = '\0'; 6633 return (name); 6634 } 6635 6636 if (i_ddi_node_state(dip) < DS_BOUND) { 6637 addrname = &none; 6638 } else { 6639 /* 6640 * Use ddi_get_name_addr() without checking state so we get 6641 * a unit-address if we are called after ddi_set_name_addr() 6642 * by nexus DDI_CTL_INITCHILD code, but before completing 6643 * node promotion to DS_INITIALIZED. We currently have 6644 * two situations where we are called in this state: 6645 * o For framework processing of a path-oriented alias. 6646 * o If a SCSA nexus driver calls ddi_devid_register() 6647 * from it's tran_tgt_init(9E) implementation. 6648 */ 6649 addrname = ddi_get_name_addr(dip); 6650 if (addrname == NULL) 6651 addrname = &none; 6652 } 6653 6654 if (*addrname == '\0') { 6655 (void) sprintf(name, "/%s", ddi_node_name(dip)); 6656 } else { 6657 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname); 6658 } 6659 6660 return (name); 6661 } 6662 6663 /* 6664 * Spits out the name of device node, typically name@addr, for a given node, 6665 * using the driver name, not the nodename. 6666 * 6667 * Used by match_parent. Not to be used elsewhere. 6668 */ 6669 char * 6670 i_ddi_parname(dev_info_t *dip, char *name) 6671 { 6672 char *addrname; 6673 6674 if (dip == ddi_root_node()) { 6675 *name = '\0'; 6676 return (name); 6677 } 6678 6679 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED); 6680 6681 if (*(addrname = ddi_get_name_addr(dip)) == '\0') 6682 (void) sprintf(name, "%s", ddi_binding_name(dip)); 6683 else 6684 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname); 6685 return (name); 6686 } 6687 6688 static char * 6689 pathname_work(dev_info_t *dip, char *path) 6690 { 6691 char *bp; 6692 6693 if (dip == ddi_root_node()) { 6694 *path = '\0'; 6695 return (path); 6696 } 6697 (void) pathname_work(ddi_get_parent(dip), path); 6698 bp = path + strlen(path); 6699 (void) ddi_deviname(dip, bp); 6700 return (path); 6701 } 6702 6703 char * 6704 ddi_pathname(dev_info_t *dip, char *path) 6705 { 6706 return (pathname_work(dip, path)); 6707 } 6708 6709 char * 6710 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path) 6711 { 6712 if (dmdp->dip == NULL) 6713 *path = '\0'; 6714 else { 6715 (void) ddi_pathname(dmdp->dip, path); 6716 if (dmdp->ddm_name) { 6717 (void) strcat(path, ":"); 6718 (void) strcat(path, dmdp->ddm_name); 6719 } 6720 } 6721 return (path); 6722 } 6723 6724 static char * 6725 pathname_work_obp(dev_info_t *dip, char *path) 6726 { 6727 char *bp; 6728 char *obp_path; 6729 6730 /* 6731 * look up the "obp-path" property, return the path if it exists 6732 */ 6733 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 6734 "obp-path", &obp_path) == DDI_PROP_SUCCESS) { 6735 (void) strcpy(path, obp_path); 6736 ddi_prop_free(obp_path); 6737 return (path); 6738 } 6739 6740 /* 6741 * stop at root, no obp path 6742 */ 6743 if (dip == ddi_root_node()) { 6744 return (NULL); 6745 } 6746 6747 obp_path = pathname_work_obp(ddi_get_parent(dip), path); 6748 if (obp_path == NULL) 6749 return (NULL); 6750 6751 /* 6752 * append our component to parent's obp path 6753 */ 6754 bp = path + strlen(path); 6755 if (*(bp - 1) != '/') 6756 (void) strcat(bp++, "/"); 6757 (void) ddi_deviname(dip, bp); 6758 return (path); 6759 } 6760 6761 /* 6762 * return the 'obp-path' based path for the given node, or NULL if the node 6763 * does not have a different obp path. NOTE: Unlike ddi_pathname, this 6764 * function can't be called from interrupt context (since we need to 6765 * lookup a string property). 6766 */ 6767 char * 6768 ddi_pathname_obp(dev_info_t *dip, char *path) 6769 { 6770 ASSERT(!servicing_interrupt()); 6771 if (dip == NULL || path == NULL) 6772 return (NULL); 6773 6774 /* split work into a separate function to aid debugging */ 6775 return (pathname_work_obp(dip, path)); 6776 } 6777 6778 int 6779 ddi_pathname_obp_set(dev_info_t *dip, char *component) 6780 { 6781 dev_info_t *pdip; 6782 char *obp_path = NULL; 6783 int rc = DDI_FAILURE; 6784 6785 if (dip == NULL) 6786 return (DDI_FAILURE); 6787 6788 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 6789 6790 pdip = ddi_get_parent(dip); 6791 6792 if (ddi_pathname_obp(pdip, obp_path) == NULL) { 6793 (void) ddi_pathname(pdip, obp_path); 6794 } 6795 6796 if (component) { 6797 (void) strncat(obp_path, "/", MAXPATHLEN); 6798 (void) strncat(obp_path, component, MAXPATHLEN); 6799 } 6800 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path", 6801 obp_path); 6802 6803 if (obp_path) 6804 kmem_free(obp_path, MAXPATHLEN); 6805 6806 return (rc); 6807 } 6808 6809 /* 6810 * Given a dev_t, return the pathname of the corresponding device in the 6811 * buffer pointed at by "path." The buffer is assumed to be large enough 6812 * to hold the pathname of the device (MAXPATHLEN). 6813 * 6814 * The pathname of a device is the pathname of the devinfo node to which 6815 * the device "belongs," concatenated with the character ':' and the name 6816 * of the minor node corresponding to the dev_t. If spec_type is 0 then 6817 * just the pathname of the devinfo node is returned without driving attach 6818 * of that node. For a non-zero spec_type, an attach is performed and a 6819 * search of the minor list occurs. 6820 * 6821 * It is possible that the path associated with the dev_t is not 6822 * currently available in the devinfo tree. In order to have a 6823 * dev_t, a device must have been discovered before, which means 6824 * that the path is always in the instance tree. The one exception 6825 * to this is if the dev_t is associated with a pseudo driver, in 6826 * which case the device must exist on the pseudo branch of the 6827 * devinfo tree as a result of parsing .conf files. 6828 */ 6829 int 6830 ddi_dev_pathname(dev_t devt, int spec_type, char *path) 6831 { 6832 int circ; 6833 major_t major = getmajor(devt); 6834 int instance; 6835 dev_info_t *dip; 6836 char *minorname; 6837 char *drvname; 6838 6839 if (major >= devcnt) 6840 goto fail; 6841 if (major == clone_major) { 6842 /* clone has no minor nodes, manufacture the path here */ 6843 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL) 6844 goto fail; 6845 6846 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname); 6847 return (DDI_SUCCESS); 6848 } 6849 6850 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */ 6851 if ((instance = dev_to_instance(devt)) == -1) 6852 goto fail; 6853 6854 /* reconstruct the path given the major/instance */ 6855 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS) 6856 goto fail; 6857 6858 /* if spec_type given we must drive attach and search minor nodes */ 6859 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) { 6860 /* attach the path so we can search minors */ 6861 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL) 6862 goto fail; 6863 6864 /* Add minorname to path. */ 6865 ndi_devi_enter(dip, &circ); 6866 minorname = i_ddi_devtspectype_to_minorname(dip, 6867 devt, spec_type); 6868 if (minorname) { 6869 (void) strcat(path, ":"); 6870 (void) strcat(path, minorname); 6871 } 6872 ndi_devi_exit(dip, circ); 6873 ddi_release_devi(dip); 6874 if (minorname == NULL) 6875 goto fail; 6876 } 6877 ASSERT(strlen(path) < MAXPATHLEN); 6878 return (DDI_SUCCESS); 6879 6880 fail: *path = 0; 6881 return (DDI_FAILURE); 6882 } 6883 6884 /* 6885 * Given a major number and an instance, return the path. 6886 * This interface does NOT drive attach. 6887 */ 6888 int 6889 e_ddi_majorinstance_to_path(major_t major, int instance, char *path) 6890 { 6891 struct devnames *dnp; 6892 dev_info_t *dip; 6893 6894 if ((major >= devcnt) || (instance == -1)) { 6895 *path = 0; 6896 return (DDI_FAILURE); 6897 } 6898 6899 /* look for the major/instance in the instance tree */ 6900 if (e_ddi_instance_majorinstance_to_path(major, instance, 6901 path) == DDI_SUCCESS) { 6902 ASSERT(strlen(path) < MAXPATHLEN); 6903 return (DDI_SUCCESS); 6904 } 6905 6906 /* 6907 * Not in instance tree, find the instance on the per driver list and 6908 * construct path to instance via ddi_pathname(). This is how paths 6909 * down the 'pseudo' branch are constructed. 6910 */ 6911 dnp = &(devnamesp[major]); 6912 LOCK_DEV_OPS(&(dnp->dn_lock)); 6913 for (dip = dnp->dn_head; dip; 6914 dip = (dev_info_t *)DEVI(dip)->devi_next) { 6915 /* Skip if instance does not match. */ 6916 if (DEVI(dip)->devi_instance != instance) 6917 continue; 6918 6919 /* 6920 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND 6921 * node demotion, so it is not an effective way of ensuring 6922 * that the ddi_pathname result has a unit-address. Instead, 6923 * we reverify the node state after calling ddi_pathname(). 6924 */ 6925 if (i_ddi_node_state(dip) >= DS_INITIALIZED) { 6926 (void) ddi_pathname(dip, path); 6927 if (i_ddi_node_state(dip) < DS_INITIALIZED) 6928 continue; 6929 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6930 ASSERT(strlen(path) < MAXPATHLEN); 6931 return (DDI_SUCCESS); 6932 } 6933 } 6934 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6935 6936 /* can't reconstruct the path */ 6937 *path = 0; 6938 return (DDI_FAILURE); 6939 } 6940 6941 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa" 6942 6943 /* 6944 * Given the dip for a network interface return the ppa for that interface. 6945 * 6946 * In all cases except GLD v0 drivers, the ppa == instance. 6947 * In the case of GLD v0 drivers, the ppa is equal to the attach order. 6948 * So for these drivers when the attach routine calls gld_register(), 6949 * the GLD framework creates an integer property called "gld_driver_ppa" 6950 * that can be queried here. 6951 * 6952 * The only time this function is used is when a system is booting over nfs. 6953 * In this case the system has to resolve the pathname of the boot device 6954 * to it's ppa. 6955 */ 6956 int 6957 i_ddi_devi_get_ppa(dev_info_t *dip) 6958 { 6959 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 6960 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 6961 GLD_DRIVER_PPA, ddi_get_instance(dip))); 6962 } 6963 6964 /* 6965 * i_ddi_devi_set_ppa() should only be called from gld_register() 6966 * and only for GLD v0 drivers 6967 */ 6968 void 6969 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa) 6970 { 6971 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa); 6972 } 6973 6974 6975 /* 6976 * Private DDI Console bell functions. 6977 */ 6978 void 6979 ddi_ring_console_bell(clock_t duration) 6980 { 6981 if (ddi_console_bell_func != NULL) 6982 (*ddi_console_bell_func)(duration); 6983 } 6984 6985 void 6986 ddi_set_console_bell(void (*bellfunc)(clock_t duration)) 6987 { 6988 ddi_console_bell_func = bellfunc; 6989 } 6990 6991 int 6992 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr, 6993 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 6994 { 6995 int (*funcp)() = ddi_dma_allochdl; 6996 ddi_dma_attr_t dma_attr; 6997 struct bus_ops *bop; 6998 6999 if (attr == (ddi_dma_attr_t *)0) 7000 return (DDI_DMA_BADATTR); 7001 7002 dma_attr = *attr; 7003 7004 bop = DEVI(dip)->devi_ops->devo_bus_ops; 7005 if (bop && bop->bus_dma_allochdl) 7006 funcp = bop->bus_dma_allochdl; 7007 7008 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep)); 7009 } 7010 7011 void 7012 ddi_dma_free_handle(ddi_dma_handle_t *handlep) 7013 { 7014 ddi_dma_handle_t h = *handlep; 7015 (void) ddi_dma_freehdl(HD, HD, h); 7016 } 7017 7018 static uintptr_t dma_mem_list_id = 0; 7019 7020 7021 int 7022 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length, 7023 ddi_device_acc_attr_t *accattrp, uint_t flags, 7024 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp, 7025 size_t *real_length, ddi_acc_handle_t *handlep) 7026 { 7027 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7028 dev_info_t *dip = hp->dmai_rdip; 7029 ddi_acc_hdl_t *ap; 7030 ddi_dma_attr_t *attrp = &hp->dmai_attr; 7031 uint_t sleepflag, xfermodes; 7032 int (*fp)(caddr_t); 7033 int rval; 7034 7035 if (waitfp == DDI_DMA_SLEEP) 7036 fp = (int (*)())KM_SLEEP; 7037 else if (waitfp == DDI_DMA_DONTWAIT) 7038 fp = (int (*)())KM_NOSLEEP; 7039 else 7040 fp = waitfp; 7041 *handlep = impl_acc_hdl_alloc(fp, arg); 7042 if (*handlep == NULL) 7043 return (DDI_FAILURE); 7044 7045 /* check if the cache attributes are supported */ 7046 if (i_ddi_check_cache_attr(flags) == B_FALSE) 7047 return (DDI_FAILURE); 7048 7049 /* 7050 * Transfer the meaningful bits to xfermodes. 7051 * Double-check if the 3rd party driver correctly sets the bits. 7052 * If not, set DDI_DMA_STREAMING to keep compatibility. 7053 */ 7054 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING); 7055 if (xfermodes == 0) { 7056 xfermodes = DDI_DMA_STREAMING; 7057 } 7058 7059 /* 7060 * initialize the common elements of data access handle 7061 */ 7062 ap = impl_acc_hdl_get(*handlep); 7063 ap->ah_vers = VERS_ACCHDL; 7064 ap->ah_dip = dip; 7065 ap->ah_offset = 0; 7066 ap->ah_len = 0; 7067 ap->ah_xfermodes = flags; 7068 ap->ah_acc = *accattrp; 7069 7070 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0); 7071 if (xfermodes == DDI_DMA_CONSISTENT) { 7072 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 7073 flags, accattrp, kaddrp, NULL, ap); 7074 *real_length = length; 7075 } else { 7076 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 7077 flags, accattrp, kaddrp, real_length, ap); 7078 } 7079 if (rval == DDI_SUCCESS) { 7080 ap->ah_len = (off_t)(*real_length); 7081 ap->ah_addr = *kaddrp; 7082 } else { 7083 impl_acc_hdl_free(*handlep); 7084 *handlep = (ddi_acc_handle_t)NULL; 7085 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) { 7086 ddi_set_callback(waitfp, arg, &dma_mem_list_id); 7087 } 7088 rval = DDI_FAILURE; 7089 } 7090 return (rval); 7091 } 7092 7093 void 7094 ddi_dma_mem_free(ddi_acc_handle_t *handlep) 7095 { 7096 ddi_acc_hdl_t *ap; 7097 7098 ap = impl_acc_hdl_get(*handlep); 7099 ASSERT(ap); 7100 7101 i_ddi_mem_free((caddr_t)ap->ah_addr, ap); 7102 7103 /* 7104 * free the handle 7105 */ 7106 impl_acc_hdl_free(*handlep); 7107 *handlep = (ddi_acc_handle_t)NULL; 7108 7109 if (dma_mem_list_id != 0) { 7110 ddi_run_callback(&dma_mem_list_id); 7111 } 7112 } 7113 7114 int 7115 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp, 7116 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg, 7117 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7118 { 7119 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7120 dev_info_t *dip, *rdip; 7121 struct ddi_dma_req dmareq; 7122 int (*funcp)(); 7123 7124 dmareq.dmar_flags = flags; 7125 dmareq.dmar_fp = waitfp; 7126 dmareq.dmar_arg = arg; 7127 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount; 7128 7129 if (bp->b_flags & B_PAGEIO) { 7130 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES; 7131 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages; 7132 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset = 7133 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET); 7134 } else { 7135 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr; 7136 if (bp->b_flags & B_SHADOW) { 7137 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = 7138 bp->b_shadow; 7139 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR; 7140 } else { 7141 dmareq.dmar_object.dmao_type = 7142 (bp->b_flags & (B_PHYS | B_REMAPPED)) ? 7143 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR; 7144 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7145 } 7146 7147 /* 7148 * If the buffer has no proc pointer, or the proc 7149 * struct has the kernel address space, or the buffer has 7150 * been marked B_REMAPPED (meaning that it is now 7151 * mapped into the kernel's address space), then 7152 * the address space is kas (kernel address space). 7153 */ 7154 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) || 7155 (bp->b_flags & B_REMAPPED)) { 7156 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0; 7157 } else { 7158 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 7159 bp->b_proc->p_as; 7160 } 7161 } 7162 7163 dip = rdip = hp->dmai_rdip; 7164 if (dip != ddi_root_node()) 7165 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7166 funcp = DEVI(rdip)->devi_bus_dma_bindfunc; 7167 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp)); 7168 } 7169 7170 int 7171 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as, 7172 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t), 7173 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7174 { 7175 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7176 dev_info_t *dip, *rdip; 7177 struct ddi_dma_req dmareq; 7178 int (*funcp)(); 7179 7180 if (len == (uint_t)0) { 7181 return (DDI_DMA_NOMAPPING); 7182 } 7183 dmareq.dmar_flags = flags; 7184 dmareq.dmar_fp = waitfp; 7185 dmareq.dmar_arg = arg; 7186 dmareq.dmar_object.dmao_size = len; 7187 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR; 7188 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as; 7189 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr; 7190 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL; 7191 7192 dip = rdip = hp->dmai_rdip; 7193 if (dip != ddi_root_node()) 7194 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl; 7195 funcp = DEVI(rdip)->devi_bus_dma_bindfunc; 7196 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp)); 7197 } 7198 7199 void 7200 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep) 7201 { 7202 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7203 ddi_dma_cookie_t *cp; 7204 7205 cp = hp->dmai_cookie; 7206 ASSERT(cp); 7207 7208 cookiep->dmac_notused = cp->dmac_notused; 7209 cookiep->dmac_type = cp->dmac_type; 7210 cookiep->dmac_address = cp->dmac_address; 7211 cookiep->dmac_size = cp->dmac_size; 7212 hp->dmai_cookie++; 7213 } 7214 7215 int 7216 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp) 7217 { 7218 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7219 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 7220 return (DDI_FAILURE); 7221 } else { 7222 *nwinp = hp->dmai_nwin; 7223 return (DDI_SUCCESS); 7224 } 7225 } 7226 7227 int 7228 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp, 7229 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 7230 { 7231 int (*funcp)() = ddi_dma_win; 7232 struct bus_ops *bop; 7233 7234 bop = DEVI(HD)->devi_ops->devo_bus_ops; 7235 if (bop && bop->bus_dma_win) 7236 funcp = bop->bus_dma_win; 7237 7238 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp)); 7239 } 7240 7241 int 7242 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes) 7243 { 7244 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0, 7245 &burstsizes, 0, 0)); 7246 } 7247 7248 int 7249 i_ddi_dma_fault_check(ddi_dma_impl_t *hp) 7250 { 7251 return (hp->dmai_fault); 7252 } 7253 7254 int 7255 ddi_check_dma_handle(ddi_dma_handle_t handle) 7256 { 7257 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7258 int (*check)(ddi_dma_impl_t *); 7259 7260 if ((check = hp->dmai_fault_check) == NULL) 7261 check = i_ddi_dma_fault_check; 7262 7263 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE); 7264 } 7265 7266 void 7267 i_ddi_dma_set_fault(ddi_dma_handle_t handle) 7268 { 7269 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7270 void (*notify)(ddi_dma_impl_t *); 7271 7272 if (!hp->dmai_fault) { 7273 hp->dmai_fault = 1; 7274 if ((notify = hp->dmai_fault_notify) != NULL) 7275 (*notify)(hp); 7276 } 7277 } 7278 7279 void 7280 i_ddi_dma_clr_fault(ddi_dma_handle_t handle) 7281 { 7282 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 7283 void (*notify)(ddi_dma_impl_t *); 7284 7285 if (hp->dmai_fault) { 7286 hp->dmai_fault = 0; 7287 if ((notify = hp->dmai_fault_notify) != NULL) 7288 (*notify)(hp); 7289 } 7290 } 7291 7292 /* 7293 * register mapping routines. 7294 */ 7295 int 7296 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp, 7297 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp, 7298 ddi_acc_handle_t *handle) 7299 { 7300 ddi_map_req_t mr; 7301 ddi_acc_hdl_t *hp; 7302 int result; 7303 7304 /* 7305 * Allocate and initialize the common elements of data access handle. 7306 */ 7307 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL); 7308 hp = impl_acc_hdl_get(*handle); 7309 hp->ah_vers = VERS_ACCHDL; 7310 hp->ah_dip = dip; 7311 hp->ah_rnumber = rnumber; 7312 hp->ah_offset = offset; 7313 hp->ah_len = len; 7314 hp->ah_acc = *accattrp; 7315 7316 /* 7317 * Set up the mapping request and call to parent. 7318 */ 7319 mr.map_op = DDI_MO_MAP_LOCKED; 7320 mr.map_type = DDI_MT_RNUMBER; 7321 mr.map_obj.rnumber = rnumber; 7322 mr.map_prot = PROT_READ | PROT_WRITE; 7323 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7324 mr.map_handlep = hp; 7325 mr.map_vers = DDI_MAP_VERSION; 7326 result = ddi_map(dip, &mr, offset, len, addrp); 7327 7328 /* 7329 * check for end result 7330 */ 7331 if (result != DDI_SUCCESS) { 7332 impl_acc_hdl_free(*handle); 7333 *handle = (ddi_acc_handle_t)NULL; 7334 } else { 7335 hp->ah_addr = *addrp; 7336 } 7337 7338 return (result); 7339 } 7340 7341 void 7342 ddi_regs_map_free(ddi_acc_handle_t *handlep) 7343 { 7344 ddi_map_req_t mr; 7345 ddi_acc_hdl_t *hp; 7346 7347 hp = impl_acc_hdl_get(*handlep); 7348 ASSERT(hp); 7349 7350 mr.map_op = DDI_MO_UNMAP; 7351 mr.map_type = DDI_MT_RNUMBER; 7352 mr.map_obj.rnumber = hp->ah_rnumber; 7353 mr.map_prot = PROT_READ | PROT_WRITE; 7354 mr.map_flags = DDI_MF_KERNEL_MAPPING; 7355 mr.map_handlep = hp; 7356 mr.map_vers = DDI_MAP_VERSION; 7357 7358 /* 7359 * Call my parent to unmap my regs. 7360 */ 7361 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 7362 hp->ah_len, &hp->ah_addr); 7363 /* 7364 * free the handle 7365 */ 7366 impl_acc_hdl_free(*handlep); 7367 *handlep = (ddi_acc_handle_t)NULL; 7368 } 7369 7370 int 7371 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount, 7372 ssize_t dev_advcnt, uint_t dev_datasz) 7373 { 7374 uint8_t *b; 7375 uint16_t *w; 7376 uint32_t *l; 7377 uint64_t *ll; 7378 7379 /* check for total byte count is multiple of data transfer size */ 7380 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7381 return (DDI_FAILURE); 7382 7383 switch (dev_datasz) { 7384 case DDI_DATA_SZ01_ACC: 7385 for (b = (uint8_t *)dev_addr; 7386 bytecount != 0; bytecount -= 1, b += dev_advcnt) 7387 ddi_put8(handle, b, 0); 7388 break; 7389 case DDI_DATA_SZ02_ACC: 7390 for (w = (uint16_t *)dev_addr; 7391 bytecount != 0; bytecount -= 2, w += dev_advcnt) 7392 ddi_put16(handle, w, 0); 7393 break; 7394 case DDI_DATA_SZ04_ACC: 7395 for (l = (uint32_t *)dev_addr; 7396 bytecount != 0; bytecount -= 4, l += dev_advcnt) 7397 ddi_put32(handle, l, 0); 7398 break; 7399 case DDI_DATA_SZ08_ACC: 7400 for (ll = (uint64_t *)dev_addr; 7401 bytecount != 0; bytecount -= 8, ll += dev_advcnt) 7402 ddi_put64(handle, ll, 0x0ll); 7403 break; 7404 default: 7405 return (DDI_FAILURE); 7406 } 7407 return (DDI_SUCCESS); 7408 } 7409 7410 int 7411 ddi_device_copy( 7412 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt, 7413 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt, 7414 size_t bytecount, uint_t dev_datasz) 7415 { 7416 uint8_t *b_src, *b_dst; 7417 uint16_t *w_src, *w_dst; 7418 uint32_t *l_src, *l_dst; 7419 uint64_t *ll_src, *ll_dst; 7420 7421 /* check for total byte count is multiple of data transfer size */ 7422 if (bytecount != ((bytecount / dev_datasz) * dev_datasz)) 7423 return (DDI_FAILURE); 7424 7425 switch (dev_datasz) { 7426 case DDI_DATA_SZ01_ACC: 7427 b_src = (uint8_t *)src_addr; 7428 b_dst = (uint8_t *)dest_addr; 7429 7430 for (; bytecount != 0; bytecount -= 1) { 7431 ddi_put8(dest_handle, b_dst, 7432 ddi_get8(src_handle, b_src)); 7433 b_dst += dest_advcnt; 7434 b_src += src_advcnt; 7435 } 7436 break; 7437 case DDI_DATA_SZ02_ACC: 7438 w_src = (uint16_t *)src_addr; 7439 w_dst = (uint16_t *)dest_addr; 7440 7441 for (; bytecount != 0; bytecount -= 2) { 7442 ddi_put16(dest_handle, w_dst, 7443 ddi_get16(src_handle, w_src)); 7444 w_dst += dest_advcnt; 7445 w_src += src_advcnt; 7446 } 7447 break; 7448 case DDI_DATA_SZ04_ACC: 7449 l_src = (uint32_t *)src_addr; 7450 l_dst = (uint32_t *)dest_addr; 7451 7452 for (; bytecount != 0; bytecount -= 4) { 7453 ddi_put32(dest_handle, l_dst, 7454 ddi_get32(src_handle, l_src)); 7455 l_dst += dest_advcnt; 7456 l_src += src_advcnt; 7457 } 7458 break; 7459 case DDI_DATA_SZ08_ACC: 7460 ll_src = (uint64_t *)src_addr; 7461 ll_dst = (uint64_t *)dest_addr; 7462 7463 for (; bytecount != 0; bytecount -= 8) { 7464 ddi_put64(dest_handle, ll_dst, 7465 ddi_get64(src_handle, ll_src)); 7466 ll_dst += dest_advcnt; 7467 ll_src += src_advcnt; 7468 } 7469 break; 7470 default: 7471 return (DDI_FAILURE); 7472 } 7473 return (DDI_SUCCESS); 7474 } 7475 7476 #define swap16(value) \ 7477 ((((value) & 0xff) << 8) | ((value) >> 8)) 7478 7479 #define swap32(value) \ 7480 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \ 7481 (uint32_t)swap16((uint16_t)((value) >> 16))) 7482 7483 #define swap64(value) \ 7484 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \ 7485 << 32) | \ 7486 (uint64_t)swap32((uint32_t)((value) >> 32))) 7487 7488 uint16_t 7489 ddi_swap16(uint16_t value) 7490 { 7491 return (swap16(value)); 7492 } 7493 7494 uint32_t 7495 ddi_swap32(uint32_t value) 7496 { 7497 return (swap32(value)); 7498 } 7499 7500 uint64_t 7501 ddi_swap64(uint64_t value) 7502 { 7503 return (swap64(value)); 7504 } 7505 7506 /* 7507 * Convert a binding name to a driver name. 7508 * A binding name is the name used to determine the driver for a 7509 * device - it may be either an alias for the driver or the name 7510 * of the driver itself. 7511 */ 7512 char * 7513 i_binding_to_drv_name(char *bname) 7514 { 7515 major_t major_no; 7516 7517 ASSERT(bname != NULL); 7518 7519 if ((major_no = ddi_name_to_major(bname)) == -1) 7520 return (NULL); 7521 return (ddi_major_to_name(major_no)); 7522 } 7523 7524 /* 7525 * Search for minor name that has specified dev_t and spec_type. 7526 * If spec_type is zero then any dev_t match works. Since we 7527 * are returning a pointer to the minor name string, we require the 7528 * caller to do the locking. 7529 */ 7530 char * 7531 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type) 7532 { 7533 struct ddi_minor_data *dmdp; 7534 7535 /* 7536 * The did layered driver currently intentionally returns a 7537 * devinfo ptr for an underlying sd instance based on a did 7538 * dev_t. In this case it is not an error. 7539 * 7540 * The did layered driver is associated with Sun Cluster. 7541 */ 7542 ASSERT((ddi_driver_major(dip) == getmajor(dev)) || 7543 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0)); 7544 7545 ASSERT(DEVI_BUSY_OWNED(dip)); 7546 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7547 if (((dmdp->type == DDM_MINOR) || 7548 (dmdp->type == DDM_INTERNAL_PATH) || 7549 (dmdp->type == DDM_DEFAULT)) && 7550 (dmdp->ddm_dev == dev) && 7551 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) || 7552 (dmdp->ddm_spec_type == spec_type))) 7553 return (dmdp->ddm_name); 7554 } 7555 7556 return (NULL); 7557 } 7558 7559 /* 7560 * Find the devt and spectype of the specified minor_name. 7561 * Return DDI_FAILURE if minor_name not found. Since we are 7562 * returning everything via arguments we can do the locking. 7563 */ 7564 int 7565 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name, 7566 dev_t *devtp, int *spectypep) 7567 { 7568 int circ; 7569 struct ddi_minor_data *dmdp; 7570 7571 /* deal with clone minor nodes */ 7572 if (dip == clone_dip) { 7573 major_t major; 7574 /* 7575 * Make sure minor_name is a STREAMS driver. 7576 * We load the driver but don't attach to any instances. 7577 */ 7578 7579 major = ddi_name_to_major(minor_name); 7580 if (major == DDI_MAJOR_T_NONE) 7581 return (DDI_FAILURE); 7582 7583 if (ddi_hold_driver(major) == NULL) 7584 return (DDI_FAILURE); 7585 7586 if (STREAMSTAB(major) == NULL) { 7587 ddi_rele_driver(major); 7588 return (DDI_FAILURE); 7589 } 7590 ddi_rele_driver(major); 7591 7592 if (devtp) 7593 *devtp = makedevice(clone_major, (minor_t)major); 7594 7595 if (spectypep) 7596 *spectypep = S_IFCHR; 7597 7598 return (DDI_SUCCESS); 7599 } 7600 7601 ndi_devi_enter(dip, &circ); 7602 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 7603 if (((dmdp->type != DDM_MINOR) && 7604 (dmdp->type != DDM_INTERNAL_PATH) && 7605 (dmdp->type != DDM_DEFAULT)) || 7606 strcmp(minor_name, dmdp->ddm_name)) 7607 continue; 7608 7609 if (devtp) 7610 *devtp = dmdp->ddm_dev; 7611 7612 if (spectypep) 7613 *spectypep = dmdp->ddm_spec_type; 7614 7615 ndi_devi_exit(dip, circ); 7616 return (DDI_SUCCESS); 7617 } 7618 ndi_devi_exit(dip, circ); 7619 7620 return (DDI_FAILURE); 7621 } 7622 7623 static kmutex_t devid_gen_mutex; 7624 static short devid_gen_number; 7625 7626 #ifdef DEBUG 7627 7628 static int devid_register_corrupt = 0; 7629 static int devid_register_corrupt_major = 0; 7630 static int devid_register_corrupt_hint = 0; 7631 static int devid_register_corrupt_hint_major = 0; 7632 7633 static int devid_lyr_debug = 0; 7634 7635 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \ 7636 if (devid_lyr_debug) \ 7637 ddi_debug_devid_devts(msg, ndevs, devs) 7638 7639 #else 7640 7641 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) 7642 7643 #endif /* DEBUG */ 7644 7645 7646 #ifdef DEBUG 7647 7648 static void 7649 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs) 7650 { 7651 int i; 7652 7653 cmn_err(CE_CONT, "%s:\n", msg); 7654 for (i = 0; i < ndevs; i++) { 7655 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7656 } 7657 } 7658 7659 static void 7660 ddi_debug_devid_paths(char *msg, int npaths, char **paths) 7661 { 7662 int i; 7663 7664 cmn_err(CE_CONT, "%s:\n", msg); 7665 for (i = 0; i < npaths; i++) { 7666 cmn_err(CE_CONT, " %s\n", paths[i]); 7667 } 7668 } 7669 7670 static void 7671 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs) 7672 { 7673 int i; 7674 7675 cmn_err(CE_CONT, "dev_ts per path %s\n", path); 7676 for (i = 0; i < ndevs; i++) { 7677 cmn_err(CE_CONT, " 0x%lx\n", devs[i]); 7678 } 7679 } 7680 7681 #endif /* DEBUG */ 7682 7683 /* 7684 * Register device id into DDI framework. 7685 * Must be called when the driver is bound. 7686 */ 7687 static int 7688 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7689 { 7690 impl_devid_t *i_devid = (impl_devid_t *)devid; 7691 size_t driver_len; 7692 const char *driver_name; 7693 char *devid_str; 7694 major_t major; 7695 7696 if ((dip == NULL) || 7697 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE)) 7698 return (DDI_FAILURE); 7699 7700 /* verify that the devid is valid */ 7701 if (ddi_devid_valid(devid) != DDI_SUCCESS) 7702 return (DDI_FAILURE); 7703 7704 /* Updating driver name hint in devid */ 7705 driver_name = ddi_driver_name(dip); 7706 driver_len = strlen(driver_name); 7707 if (driver_len > DEVID_HINT_SIZE) { 7708 /* Pick up last four characters of driver name */ 7709 driver_name += driver_len - DEVID_HINT_SIZE; 7710 driver_len = DEVID_HINT_SIZE; 7711 } 7712 bzero(i_devid->did_driver, DEVID_HINT_SIZE); 7713 bcopy(driver_name, i_devid->did_driver, driver_len); 7714 7715 #ifdef DEBUG 7716 /* Corrupt the devid for testing. */ 7717 if (devid_register_corrupt) 7718 i_devid->did_id[0] += devid_register_corrupt; 7719 if (devid_register_corrupt_major && 7720 (major == devid_register_corrupt_major)) 7721 i_devid->did_id[0] += 1; 7722 if (devid_register_corrupt_hint) 7723 i_devid->did_driver[0] += devid_register_corrupt_hint; 7724 if (devid_register_corrupt_hint_major && 7725 (major == devid_register_corrupt_hint_major)) 7726 i_devid->did_driver[0] += 1; 7727 #endif /* DEBUG */ 7728 7729 /* encode the devid as a string */ 7730 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL) 7731 return (DDI_FAILURE); 7732 7733 /* add string as a string property */ 7734 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip, 7735 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) { 7736 cmn_err(CE_WARN, "%s%d: devid property update failed", 7737 ddi_driver_name(dip), ddi_get_instance(dip)); 7738 ddi_devid_str_free(devid_str); 7739 return (DDI_FAILURE); 7740 } 7741 7742 /* keep pointer to devid string for interrupt context fma code */ 7743 if (DEVI(dip)->devi_devid_str) 7744 ddi_devid_str_free(DEVI(dip)->devi_devid_str); 7745 DEVI(dip)->devi_devid_str = devid_str; 7746 return (DDI_SUCCESS); 7747 } 7748 7749 int 7750 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid) 7751 { 7752 int rval; 7753 7754 rval = i_ddi_devid_register(dip, devid); 7755 if (rval == DDI_SUCCESS) { 7756 /* 7757 * Register devid in devid-to-path cache 7758 */ 7759 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) { 7760 mutex_enter(&DEVI(dip)->devi_lock); 7761 DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID; 7762 mutex_exit(&DEVI(dip)->devi_lock); 7763 } else if (ddi_get_name_addr(dip)) { 7764 /* 7765 * We only expect cache_register DDI_FAILURE when we 7766 * can't form the full path because of NULL devi_addr. 7767 */ 7768 cmn_err(CE_WARN, "%s%d: failed to cache devid", 7769 ddi_driver_name(dip), ddi_get_instance(dip)); 7770 } 7771 } else { 7772 cmn_err(CE_WARN, "%s%d: failed to register devid", 7773 ddi_driver_name(dip), ddi_get_instance(dip)); 7774 } 7775 return (rval); 7776 } 7777 7778 /* 7779 * Remove (unregister) device id from DDI framework. 7780 * Must be called when device is detached. 7781 */ 7782 static void 7783 i_ddi_devid_unregister(dev_info_t *dip) 7784 { 7785 if (DEVI(dip)->devi_devid_str) { 7786 ddi_devid_str_free(DEVI(dip)->devi_devid_str); 7787 DEVI(dip)->devi_devid_str = NULL; 7788 } 7789 7790 /* remove the devid property */ 7791 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME); 7792 } 7793 7794 void 7795 ddi_devid_unregister(dev_info_t *dip) 7796 { 7797 mutex_enter(&DEVI(dip)->devi_lock); 7798 DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID; 7799 mutex_exit(&DEVI(dip)->devi_lock); 7800 e_devid_cache_unregister(dip); 7801 i_ddi_devid_unregister(dip); 7802 } 7803 7804 /* 7805 * Allocate and initialize a device id. 7806 */ 7807 int 7808 ddi_devid_init( 7809 dev_info_t *dip, 7810 ushort_t devid_type, 7811 ushort_t nbytes, 7812 void *id, 7813 ddi_devid_t *ret_devid) 7814 { 7815 impl_devid_t *i_devid; 7816 int sz = sizeof (*i_devid) + nbytes - sizeof (char); 7817 int driver_len; 7818 const char *driver_name; 7819 7820 switch (devid_type) { 7821 case DEVID_SCSI3_WWN: 7822 /*FALLTHRU*/ 7823 case DEVID_SCSI_SERIAL: 7824 /*FALLTHRU*/ 7825 case DEVID_ATA_SERIAL: 7826 /*FALLTHRU*/ 7827 case DEVID_ENCAP: 7828 if (nbytes == 0) 7829 return (DDI_FAILURE); 7830 if (id == NULL) 7831 return (DDI_FAILURE); 7832 break; 7833 case DEVID_FAB: 7834 if (nbytes != 0) 7835 return (DDI_FAILURE); 7836 if (id != NULL) 7837 return (DDI_FAILURE); 7838 nbytes = sizeof (int) + 7839 sizeof (struct timeval32) + sizeof (short); 7840 sz += nbytes; 7841 break; 7842 default: 7843 return (DDI_FAILURE); 7844 } 7845 7846 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL) 7847 return (DDI_FAILURE); 7848 7849 i_devid->did_magic_hi = DEVID_MAGIC_MSB; 7850 i_devid->did_magic_lo = DEVID_MAGIC_LSB; 7851 i_devid->did_rev_hi = DEVID_REV_MSB; 7852 i_devid->did_rev_lo = DEVID_REV_LSB; 7853 DEVID_FORMTYPE(i_devid, devid_type); 7854 DEVID_FORMLEN(i_devid, nbytes); 7855 7856 /* Fill in driver name hint */ 7857 driver_name = ddi_driver_name(dip); 7858 driver_len = strlen(driver_name); 7859 if (driver_len > DEVID_HINT_SIZE) { 7860 /* Pick up last four characters of driver name */ 7861 driver_name += driver_len - DEVID_HINT_SIZE; 7862 driver_len = DEVID_HINT_SIZE; 7863 } 7864 7865 bcopy(driver_name, i_devid->did_driver, driver_len); 7866 7867 /* Fill in id field */ 7868 if (devid_type == DEVID_FAB) { 7869 char *cp; 7870 uint32_t hostid; 7871 struct timeval32 timestamp32; 7872 int i; 7873 int *ip; 7874 short gen; 7875 7876 /* increase the generation number */ 7877 mutex_enter(&devid_gen_mutex); 7878 gen = devid_gen_number++; 7879 mutex_exit(&devid_gen_mutex); 7880 7881 cp = i_devid->did_id; 7882 7883 /* Fill in host id (big-endian byte ordering) */ 7884 hostid = zone_get_hostid(NULL); 7885 *cp++ = hibyte(hiword(hostid)); 7886 *cp++ = lobyte(hiword(hostid)); 7887 *cp++ = hibyte(loword(hostid)); 7888 *cp++ = lobyte(loword(hostid)); 7889 7890 /* 7891 * Fill in timestamp (big-endian byte ordering) 7892 * 7893 * (Note that the format may have to be changed 7894 * before 2038 comes around, though it's arguably 7895 * unique enough as it is..) 7896 */ 7897 uniqtime32(×tamp32); 7898 ip = (int *)×tamp32; 7899 for (i = 0; 7900 i < sizeof (timestamp32) / sizeof (int); i++, ip++) { 7901 int val; 7902 val = *ip; 7903 *cp++ = hibyte(hiword(val)); 7904 *cp++ = lobyte(hiword(val)); 7905 *cp++ = hibyte(loword(val)); 7906 *cp++ = lobyte(loword(val)); 7907 } 7908 7909 /* fill in the generation number */ 7910 *cp++ = hibyte(gen); 7911 *cp++ = lobyte(gen); 7912 } else 7913 bcopy(id, i_devid->did_id, nbytes); 7914 7915 /* return device id */ 7916 *ret_devid = (ddi_devid_t)i_devid; 7917 return (DDI_SUCCESS); 7918 } 7919 7920 int 7921 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid) 7922 { 7923 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid)); 7924 } 7925 7926 int 7927 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid) 7928 { 7929 char *devidstr; 7930 7931 ASSERT(dev != DDI_DEV_T_NONE); 7932 7933 /* look up the property, devt specific first */ 7934 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS, 7935 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) { 7936 if ((dev == DDI_DEV_T_ANY) || 7937 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 7938 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) != 7939 DDI_PROP_SUCCESS)) { 7940 return (DDI_FAILURE); 7941 } 7942 } 7943 7944 /* convert to binary form */ 7945 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) { 7946 ddi_prop_free(devidstr); 7947 return (DDI_FAILURE); 7948 } 7949 ddi_prop_free(devidstr); 7950 return (DDI_SUCCESS); 7951 } 7952 7953 /* 7954 * Return a copy of the device id for dev_t 7955 */ 7956 int 7957 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid) 7958 { 7959 dev_info_t *dip; 7960 int rval; 7961 7962 /* get the dip */ 7963 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) 7964 return (DDI_FAILURE); 7965 7966 rval = i_ddi_devi_get_devid(dev, dip, ret_devid); 7967 7968 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7969 return (rval); 7970 } 7971 7972 /* 7973 * Return a copy of the minor name for dev_t and spec_type 7974 */ 7975 int 7976 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name) 7977 { 7978 char *buf; 7979 int circ; 7980 dev_info_t *dip; 7981 char *nm; 7982 int rval; 7983 7984 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) { 7985 *minor_name = NULL; 7986 return (DDI_FAILURE); 7987 } 7988 7989 /* Find the minor name and copy into max size buf */ 7990 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 7991 ndi_devi_enter(dip, &circ); 7992 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type); 7993 if (nm) 7994 (void) strcpy(buf, nm); 7995 ndi_devi_exit(dip, circ); 7996 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */ 7997 7998 if (nm) { 7999 /* duplicate into min size buf for return result */ 8000 *minor_name = i_ddi_strdup(buf, KM_SLEEP); 8001 rval = DDI_SUCCESS; 8002 } else { 8003 *minor_name = NULL; 8004 rval = DDI_FAILURE; 8005 } 8006 8007 /* free max size buf and return */ 8008 kmem_free(buf, MAXNAMELEN); 8009 return (rval); 8010 } 8011 8012 int 8013 ddi_lyr_devid_to_devlist( 8014 ddi_devid_t devid, 8015 char *minor_name, 8016 int *retndevs, 8017 dev_t **retdevs) 8018 { 8019 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 8020 8021 if (e_devid_cache_to_devt_list(devid, minor_name, 8022 retndevs, retdevs) == DDI_SUCCESS) { 8023 ASSERT(*retndevs > 0); 8024 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 8025 *retndevs, *retdevs); 8026 return (DDI_SUCCESS); 8027 } 8028 8029 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) { 8030 return (DDI_FAILURE); 8031 } 8032 8033 if (e_devid_cache_to_devt_list(devid, minor_name, 8034 retndevs, retdevs) == DDI_SUCCESS) { 8035 ASSERT(*retndevs > 0); 8036 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist", 8037 *retndevs, *retdevs); 8038 return (DDI_SUCCESS); 8039 } 8040 8041 return (DDI_FAILURE); 8042 } 8043 8044 void 8045 ddi_lyr_free_devlist(dev_t *devlist, int ndevs) 8046 { 8047 kmem_free(devlist, sizeof (dev_t) * ndevs); 8048 } 8049 8050 /* 8051 * Note: This will need to be fixed if we ever allow processes to 8052 * have more than one data model per exec. 8053 */ 8054 model_t 8055 ddi_mmap_get_model(void) 8056 { 8057 return (get_udatamodel()); 8058 } 8059 8060 model_t 8061 ddi_model_convert_from(model_t model) 8062 { 8063 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE); 8064 } 8065 8066 /* 8067 * ddi interfaces managing storage and retrieval of eventcookies. 8068 */ 8069 8070 /* 8071 * Invoke bus nexus driver's implementation of the 8072 * (*bus_remove_eventcall)() interface to remove a registered 8073 * callback handler for "event". 8074 */ 8075 int 8076 ddi_remove_event_handler(ddi_callback_id_t id) 8077 { 8078 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id; 8079 dev_info_t *ddip; 8080 8081 ASSERT(cb); 8082 if (!cb) { 8083 return (DDI_FAILURE); 8084 } 8085 8086 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie); 8087 return (ndi_busop_remove_eventcall(ddip, id)); 8088 } 8089 8090 /* 8091 * Invoke bus nexus driver's implementation of the 8092 * (*bus_add_eventcall)() interface to register a callback handler 8093 * for "event". 8094 */ 8095 int 8096 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event, 8097 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *), 8098 void *arg, ddi_callback_id_t *id) 8099 { 8100 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id)); 8101 } 8102 8103 8104 /* 8105 * Return a handle for event "name" by calling up the device tree 8106 * hierarchy via (*bus_get_eventcookie)() interface until claimed 8107 * by a bus nexus or top of dev_info tree is reached. 8108 */ 8109 int 8110 ddi_get_eventcookie(dev_info_t *dip, char *name, 8111 ddi_eventcookie_t *event_cookiep) 8112 { 8113 return (ndi_busop_get_eventcookie(dip, dip, 8114 name, event_cookiep)); 8115 } 8116 8117 /* 8118 * This procedure is provided as the general callback function when 8119 * umem_lockmemory calls as_add_callback for long term memory locking. 8120 * When as_unmap, as_setprot, or as_free encounter segments which have 8121 * locked memory, this callback will be invoked. 8122 */ 8123 void 8124 umem_lock_undo(struct as *as, void *arg, uint_t event) 8125 { 8126 _NOTE(ARGUNUSED(as, event)) 8127 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg; 8128 8129 /* 8130 * Call the cleanup function. Decrement the cookie reference 8131 * count, if it goes to zero, return the memory for the cookie. 8132 * The i_ddi_umem_unlock for this cookie may or may not have been 8133 * called already. It is the responsibility of the caller of 8134 * umem_lockmemory to handle the case of the cleanup routine 8135 * being called after a ddi_umem_unlock for the cookie 8136 * was called. 8137 */ 8138 8139 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp); 8140 8141 /* remove the cookie if reference goes to zero */ 8142 if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) { 8143 kmem_free(cp, sizeof (struct ddi_umem_cookie)); 8144 } 8145 } 8146 8147 /* 8148 * The following two Consolidation Private routines provide generic 8149 * interfaces to increase/decrease the amount of device-locked memory. 8150 * 8151 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory() 8152 * must be called every time i_ddi_incr_locked_memory() is called. 8153 */ 8154 int 8155 /* ARGSUSED */ 8156 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc) 8157 { 8158 ASSERT(procp != NULL); 8159 mutex_enter(&procp->p_lock); 8160 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) { 8161 mutex_exit(&procp->p_lock); 8162 return (ENOMEM); 8163 } 8164 mutex_exit(&procp->p_lock); 8165 return (0); 8166 } 8167 8168 /* 8169 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory() 8170 * must be called every time i_ddi_decr_locked_memory() is called. 8171 */ 8172 /* ARGSUSED */ 8173 void 8174 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec) 8175 { 8176 ASSERT(procp != NULL); 8177 mutex_enter(&procp->p_lock); 8178 rctl_decr_locked_mem(procp, NULL, dec, 1); 8179 mutex_exit(&procp->p_lock); 8180 } 8181 8182 /* 8183 * The cookie->upd_max_lock_rctl flag is used to determine if we should 8184 * charge device locked memory to the max-locked-memory rctl. Tracking 8185 * device locked memory causes the rctl locks to get hot under high-speed 8186 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit, 8187 * we bypass charging the locked memory to the rctl altogether. The cookie's 8188 * flag tells us if the rctl value should be updated when unlocking the memory, 8189 * in case the rctl gets changed after the memory was locked. Any device 8190 * locked memory in that rare case will not be counted toward the rctl limit. 8191 * 8192 * When tracking the locked memory, the kproject_t parameter is always NULL 8193 * in the code paths: 8194 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem 8195 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem 8196 * Thus, we always use the tk_proj member to check the projp setting. 8197 */ 8198 static void 8199 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie) 8200 { 8201 proc_t *p; 8202 kproject_t *projp; 8203 zone_t *zonep; 8204 8205 ASSERT(cookie); 8206 p = cookie->procp; 8207 ASSERT(p); 8208 8209 zonep = p->p_zone; 8210 projp = p->p_task->tk_proj; 8211 8212 ASSERT(zonep); 8213 ASSERT(projp); 8214 8215 if (zonep->zone_locked_mem_ctl == UINT64_MAX && 8216 projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX) 8217 cookie->upd_max_lock_rctl = 0; 8218 else 8219 cookie->upd_max_lock_rctl = 1; 8220 } 8221 8222 /* 8223 * This routine checks if the max-locked-memory resource ctl is 8224 * exceeded, if not increments it, grabs a hold on the project. 8225 * Returns 0 if successful otherwise returns error code 8226 */ 8227 static int 8228 umem_incr_devlockmem(struct ddi_umem_cookie *cookie) 8229 { 8230 proc_t *procp; 8231 int ret; 8232 8233 ASSERT(cookie); 8234 if (cookie->upd_max_lock_rctl == 0) 8235 return (0); 8236 8237 procp = cookie->procp; 8238 ASSERT(procp); 8239 8240 if ((ret = i_ddi_incr_locked_memory(procp, 8241 cookie->size)) != 0) { 8242 return (ret); 8243 } 8244 return (0); 8245 } 8246 8247 /* 8248 * Decrements the max-locked-memory resource ctl and releases 8249 * the hold on the project that was acquired during umem_incr_devlockmem 8250 */ 8251 static void 8252 umem_decr_devlockmem(struct ddi_umem_cookie *cookie) 8253 { 8254 proc_t *proc; 8255 8256 if (cookie->upd_max_lock_rctl == 0) 8257 return; 8258 8259 proc = (proc_t *)cookie->procp; 8260 if (!proc) 8261 return; 8262 8263 i_ddi_decr_locked_memory(proc, cookie->size); 8264 } 8265 8266 /* 8267 * A consolidation private function which is essentially equivalent to 8268 * ddi_umem_lock but with the addition of arguments ops_vector and procp. 8269 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and 8270 * the ops_vector is valid. 8271 * 8272 * Lock the virtual address range in the current process and create a 8273 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8274 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8275 * to user space. 8276 * 8277 * Note: The resource control accounting currently uses a full charge model 8278 * in other words attempts to lock the same/overlapping areas of memory 8279 * will deduct the full size of the buffer from the projects running 8280 * counter for the device locked memory. 8281 * 8282 * addr, size should be PAGESIZE aligned 8283 * 8284 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8285 * identifies whether the locked memory will be read or written or both 8286 * DDI_UMEMLOCK_LONGTERM must be set when the locking will 8287 * be maintained for an indefinitely long period (essentially permanent), 8288 * rather than for what would be required for a typical I/O completion. 8289 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT 8290 * if the memory pertains to a regular file which is mapped MAP_SHARED. 8291 * This is to prevent a deadlock if a file truncation is attempted after 8292 * after the locking is done. 8293 * 8294 * Returns 0 on success 8295 * EINVAL - for invalid parameters 8296 * EPERM, ENOMEM and other error codes returned by as_pagelock 8297 * ENOMEM - is returned if the current request to lock memory exceeds 8298 * *.max-locked-memory resource control value. 8299 * EFAULT - memory pertains to a regular file mapped shared and 8300 * and DDI_UMEMLOCK_LONGTERM flag is set 8301 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8302 */ 8303 int 8304 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie, 8305 struct umem_callback_ops *ops_vector, 8306 proc_t *procp) 8307 { 8308 int error; 8309 struct ddi_umem_cookie *p; 8310 void (*driver_callback)() = NULL; 8311 struct as *as; 8312 struct seg *seg; 8313 vnode_t *vp; 8314 8315 /* Allow device drivers to not have to reference "curproc" */ 8316 if (procp == NULL) 8317 procp = curproc; 8318 as = procp->p_as; 8319 *cookie = NULL; /* in case of any error return */ 8320 8321 /* These are the only three valid flags */ 8322 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE | 8323 DDI_UMEMLOCK_LONGTERM)) != 0) 8324 return (EINVAL); 8325 8326 /* At least one (can be both) of the two access flags must be set */ 8327 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) 8328 return (EINVAL); 8329 8330 /* addr and len must be page-aligned */ 8331 if (((uintptr_t)addr & PAGEOFFSET) != 0) 8332 return (EINVAL); 8333 8334 if ((len & PAGEOFFSET) != 0) 8335 return (EINVAL); 8336 8337 /* 8338 * For longterm locking a driver callback must be specified; if 8339 * not longterm then a callback is optional. 8340 */ 8341 if (ops_vector != NULL) { 8342 if (ops_vector->cbo_umem_callback_version != 8343 UMEM_CALLBACK_VERSION) 8344 return (EINVAL); 8345 else 8346 driver_callback = ops_vector->cbo_umem_lock_cleanup; 8347 } 8348 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM)) 8349 return (EINVAL); 8350 8351 /* 8352 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8353 * be called on first ddi_umem_lock or umem_lockmemory call. 8354 */ 8355 if (ddi_umem_unlock_thread == NULL) 8356 i_ddi_umem_unlock_thread_start(); 8357 8358 /* Allocate memory for the cookie */ 8359 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8360 8361 /* Convert the flags to seg_rw type */ 8362 if (flags & DDI_UMEMLOCK_WRITE) { 8363 p->s_flags = S_WRITE; 8364 } else { 8365 p->s_flags = S_READ; 8366 } 8367 8368 /* Store procp in cookie for later iosetup/unlock */ 8369 p->procp = (void *)procp; 8370 8371 /* 8372 * Store the struct as pointer in cookie for later use by 8373 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8374 * is called after relvm is called. 8375 */ 8376 p->asp = as; 8377 8378 /* 8379 * The size field is needed for lockmem accounting. 8380 */ 8381 p->size = len; 8382 init_lockedmem_rctl_flag(p); 8383 8384 if (umem_incr_devlockmem(p) != 0) { 8385 /* 8386 * The requested memory cannot be locked 8387 */ 8388 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8389 *cookie = (ddi_umem_cookie_t)NULL; 8390 return (ENOMEM); 8391 } 8392 8393 /* Lock the pages corresponding to addr, len in memory */ 8394 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags); 8395 if (error != 0) { 8396 umem_decr_devlockmem(p); 8397 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8398 *cookie = (ddi_umem_cookie_t)NULL; 8399 return (error); 8400 } 8401 8402 /* 8403 * For longterm locking the addr must pertain to a seg_vn segment or 8404 * or a seg_spt segment. 8405 * If the segment pertains to a regular file, it cannot be 8406 * mapped MAP_SHARED. 8407 * This is to prevent a deadlock if a file truncation is attempted 8408 * after the locking is done. 8409 * Doing this after as_pagelock guarantees persistence of the as; if 8410 * an unacceptable segment is found, the cleanup includes calling 8411 * as_pageunlock before returning EFAULT. 8412 * 8413 * segdev is allowed here as it is already locked. This allows 8414 * for memory exported by drivers through mmap() (which is already 8415 * locked) to be allowed for LONGTERM. 8416 */ 8417 if (flags & DDI_UMEMLOCK_LONGTERM) { 8418 extern struct seg_ops segspt_shmops; 8419 extern struct seg_ops segdev_ops; 8420 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 8421 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) { 8422 if (seg == NULL || seg->s_base > addr + len) 8423 break; 8424 if (seg->s_ops == &segdev_ops) 8425 continue; 8426 if (((seg->s_ops != &segvn_ops) && 8427 (seg->s_ops != &segspt_shmops)) || 8428 ((SEGOP_GETVP(seg, addr, &vp) == 0 && 8429 vp != NULL && vp->v_type == VREG) && 8430 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) { 8431 as_pageunlock(as, p->pparray, 8432 addr, len, p->s_flags); 8433 AS_LOCK_EXIT(as, &as->a_lock); 8434 umem_decr_devlockmem(p); 8435 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8436 *cookie = (ddi_umem_cookie_t)NULL; 8437 return (EFAULT); 8438 } 8439 } 8440 AS_LOCK_EXIT(as, &as->a_lock); 8441 } 8442 8443 8444 /* Initialize the fields in the ddi_umem_cookie */ 8445 p->cvaddr = addr; 8446 p->type = UMEM_LOCKED; 8447 if (driver_callback != NULL) { 8448 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */ 8449 p->cook_refcnt = 2; 8450 p->callbacks = *ops_vector; 8451 } else { 8452 /* only i_ddi_umme_unlock needs the cookie */ 8453 p->cook_refcnt = 1; 8454 } 8455 8456 *cookie = (ddi_umem_cookie_t)p; 8457 8458 /* 8459 * If a driver callback was specified, add an entry to the 8460 * as struct callback list. The as_pagelock above guarantees 8461 * the persistence of as. 8462 */ 8463 if (driver_callback) { 8464 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT, 8465 addr, len, KM_SLEEP); 8466 if (error != 0) { 8467 as_pageunlock(as, p->pparray, 8468 addr, len, p->s_flags); 8469 umem_decr_devlockmem(p); 8470 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8471 *cookie = (ddi_umem_cookie_t)NULL; 8472 } 8473 } 8474 return (error); 8475 } 8476 8477 /* 8478 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free 8479 * the cookie. Called from i_ddi_umem_unlock_thread. 8480 */ 8481 8482 static void 8483 i_ddi_umem_unlock(struct ddi_umem_cookie *p) 8484 { 8485 uint_t rc; 8486 8487 /* 8488 * There is no way to determine whether a callback to 8489 * umem_lock_undo was registered via as_add_callback. 8490 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and 8491 * a valid callback function structure.) as_delete_callback 8492 * is called to delete a possible registered callback. If the 8493 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it 8494 * indicates that there was a callback registered, and that is was 8495 * successfully deleted. Thus, the cookie reference count 8496 * will never be decremented by umem_lock_undo. Just return the 8497 * memory for the cookie, since both users of the cookie are done. 8498 * A return of AS_CALLBACK_NOTFOUND indicates a callback was 8499 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED 8500 * indicates that callback processing is taking place and, and 8501 * umem_lock_undo is, or will be, executing, and thus decrementing 8502 * the cookie reference count when it is complete. 8503 * 8504 * This needs to be done before as_pageunlock so that the 8505 * persistence of as is guaranteed because of the locked pages. 8506 * 8507 */ 8508 rc = as_delete_callback(p->asp, p); 8509 8510 8511 /* 8512 * The proc->p_as will be stale if i_ddi_umem_unlock is called 8513 * after relvm is called so use p->asp. 8514 */ 8515 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags); 8516 8517 /* 8518 * Now that we have unlocked the memory decrement the 8519 * *.max-locked-memory rctl 8520 */ 8521 umem_decr_devlockmem(p); 8522 8523 if (rc == AS_CALLBACK_DELETED) { 8524 /* umem_lock_undo will not happen, return the cookie memory */ 8525 ASSERT(p->cook_refcnt == 2); 8526 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8527 } else { 8528 /* 8529 * umem_undo_lock may happen if as_delete_callback returned 8530 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the 8531 * reference count, atomically, and return the cookie 8532 * memory if the reference count goes to zero. The only 8533 * other value for rc is AS_CALLBACK_NOTFOUND. In that 8534 * case, just return the cookie memory. 8535 */ 8536 if ((rc != AS_CALLBACK_DELETE_DEFERRED) || 8537 (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1) 8538 == 0)) { 8539 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8540 } 8541 } 8542 } 8543 8544 /* 8545 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler. 8546 * 8547 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list 8548 * until it is empty. Then, wait for more to be added. This thread is awoken 8549 * via calls to ddi_umem_unlock. 8550 */ 8551 8552 static void 8553 i_ddi_umem_unlock_thread(void) 8554 { 8555 struct ddi_umem_cookie *ret_cookie; 8556 callb_cpr_t cprinfo; 8557 8558 /* process the ddi_umem_unlock list */ 8559 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex, 8560 callb_generic_cpr, "unlock_thread"); 8561 for (;;) { 8562 mutex_enter(&ddi_umem_unlock_mutex); 8563 if (ddi_umem_unlock_head != NULL) { /* list not empty */ 8564 ret_cookie = ddi_umem_unlock_head; 8565 /* take if off the list */ 8566 if ((ddi_umem_unlock_head = 8567 ddi_umem_unlock_head->unl_forw) == NULL) { 8568 ddi_umem_unlock_tail = NULL; 8569 } 8570 mutex_exit(&ddi_umem_unlock_mutex); 8571 /* unlock the pages in this cookie */ 8572 (void) i_ddi_umem_unlock(ret_cookie); 8573 } else { /* list is empty, wait for next ddi_umem_unlock */ 8574 CALLB_CPR_SAFE_BEGIN(&cprinfo); 8575 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex); 8576 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex); 8577 mutex_exit(&ddi_umem_unlock_mutex); 8578 } 8579 } 8580 /* ddi_umem_unlock_thread does not exit */ 8581 /* NOTREACHED */ 8582 } 8583 8584 /* 8585 * Start the thread that will process the ddi_umem_unlock list if it is 8586 * not already started (i_ddi_umem_unlock_thread). 8587 */ 8588 static void 8589 i_ddi_umem_unlock_thread_start(void) 8590 { 8591 mutex_enter(&ddi_umem_unlock_mutex); 8592 if (ddi_umem_unlock_thread == NULL) { 8593 ddi_umem_unlock_thread = thread_create(NULL, 0, 8594 i_ddi_umem_unlock_thread, NULL, 0, &p0, 8595 TS_RUN, minclsyspri); 8596 } 8597 mutex_exit(&ddi_umem_unlock_mutex); 8598 } 8599 8600 /* 8601 * Lock the virtual address range in the current process and create a 8602 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to 8603 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export 8604 * to user space. 8605 * 8606 * Note: The resource control accounting currently uses a full charge model 8607 * in other words attempts to lock the same/overlapping areas of memory 8608 * will deduct the full size of the buffer from the projects running 8609 * counter for the device locked memory. This applies to umem_lockmemory too. 8610 * 8611 * addr, size should be PAGESIZE aligned 8612 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both 8613 * identifies whether the locked memory will be read or written or both 8614 * 8615 * Returns 0 on success 8616 * EINVAL - for invalid parameters 8617 * EPERM, ENOMEM and other error codes returned by as_pagelock 8618 * ENOMEM - is returned if the current request to lock memory exceeds 8619 * *.max-locked-memory resource control value. 8620 * EAGAIN - could not start the ddi_umem_unlock list processing thread 8621 */ 8622 int 8623 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie) 8624 { 8625 int error; 8626 struct ddi_umem_cookie *p; 8627 8628 *cookie = NULL; /* in case of any error return */ 8629 8630 /* These are the only two valid flags */ 8631 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) { 8632 return (EINVAL); 8633 } 8634 8635 /* At least one of the two flags (or both) must be set */ 8636 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) { 8637 return (EINVAL); 8638 } 8639 8640 /* addr and len must be page-aligned */ 8641 if (((uintptr_t)addr & PAGEOFFSET) != 0) { 8642 return (EINVAL); 8643 } 8644 8645 if ((len & PAGEOFFSET) != 0) { 8646 return (EINVAL); 8647 } 8648 8649 /* 8650 * Call i_ddi_umem_unlock_thread_start if necessary. It will 8651 * be called on first ddi_umem_lock or umem_lockmemory call. 8652 */ 8653 if (ddi_umem_unlock_thread == NULL) 8654 i_ddi_umem_unlock_thread_start(); 8655 8656 /* Allocate memory for the cookie */ 8657 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP); 8658 8659 /* Convert the flags to seg_rw type */ 8660 if (flags & DDI_UMEMLOCK_WRITE) { 8661 p->s_flags = S_WRITE; 8662 } else { 8663 p->s_flags = S_READ; 8664 } 8665 8666 /* Store curproc in cookie for later iosetup/unlock */ 8667 p->procp = (void *)curproc; 8668 8669 /* 8670 * Store the struct as pointer in cookie for later use by 8671 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock 8672 * is called after relvm is called. 8673 */ 8674 p->asp = curproc->p_as; 8675 /* 8676 * The size field is needed for lockmem accounting. 8677 */ 8678 p->size = len; 8679 init_lockedmem_rctl_flag(p); 8680 8681 if (umem_incr_devlockmem(p) != 0) { 8682 /* 8683 * The requested memory cannot be locked 8684 */ 8685 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8686 *cookie = (ddi_umem_cookie_t)NULL; 8687 return (ENOMEM); 8688 } 8689 8690 /* Lock the pages corresponding to addr, len in memory */ 8691 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray), 8692 addr, len, p->s_flags); 8693 if (error != 0) { 8694 umem_decr_devlockmem(p); 8695 kmem_free(p, sizeof (struct ddi_umem_cookie)); 8696 *cookie = (ddi_umem_cookie_t)NULL; 8697 return (error); 8698 } 8699 8700 /* Initialize the fields in the ddi_umem_cookie */ 8701 p->cvaddr = addr; 8702 p->type = UMEM_LOCKED; 8703 p->cook_refcnt = 1; 8704 8705 *cookie = (ddi_umem_cookie_t)p; 8706 return (error); 8707 } 8708 8709 /* 8710 * Add the cookie to the ddi_umem_unlock list. Pages will be 8711 * unlocked by i_ddi_umem_unlock_thread. 8712 */ 8713 8714 void 8715 ddi_umem_unlock(ddi_umem_cookie_t cookie) 8716 { 8717 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8718 8719 ASSERT(p->type == UMEM_LOCKED); 8720 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */ 8721 ASSERT(ddi_umem_unlock_thread != NULL); 8722 8723 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */ 8724 /* 8725 * Queue the unlock request and notify i_ddi_umem_unlock thread 8726 * if it's called in the interrupt context. Otherwise, unlock pages 8727 * immediately. 8728 */ 8729 if (servicing_interrupt()) { 8730 /* queue the unlock request and notify the thread */ 8731 mutex_enter(&ddi_umem_unlock_mutex); 8732 if (ddi_umem_unlock_head == NULL) { 8733 ddi_umem_unlock_head = ddi_umem_unlock_tail = p; 8734 cv_broadcast(&ddi_umem_unlock_cv); 8735 } else { 8736 ddi_umem_unlock_tail->unl_forw = p; 8737 ddi_umem_unlock_tail = p; 8738 } 8739 mutex_exit(&ddi_umem_unlock_mutex); 8740 } else { 8741 /* unlock the pages right away */ 8742 (void) i_ddi_umem_unlock(p); 8743 } 8744 } 8745 8746 /* 8747 * Create a buf structure from a ddi_umem_cookie 8748 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc 8749 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported) 8750 * off, len - identifies the portion of the memory represented by the cookie 8751 * that the buf points to. 8752 * NOTE: off, len need to follow the alignment/size restrictions of the 8753 * device (dev) that this buf will be passed to. Some devices 8754 * will accept unrestricted alignment/size, whereas others (such as 8755 * st) require some block-size alignment/size. It is the caller's 8756 * responsibility to ensure that the alignment/size restrictions 8757 * are met (we cannot assert as we do not know the restrictions) 8758 * 8759 * direction - is one of B_READ or B_WRITE and needs to be compatible with 8760 * the flags used in ddi_umem_lock 8761 * 8762 * The following three arguments are used to initialize fields in the 8763 * buf structure and are uninterpreted by this routine. 8764 * 8765 * dev 8766 * blkno 8767 * iodone 8768 * 8769 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP 8770 * 8771 * Returns a buf structure pointer on success (to be freed by freerbuf) 8772 * NULL on any parameter error or memory alloc failure 8773 * 8774 */ 8775 struct buf * 8776 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len, 8777 int direction, dev_t dev, daddr_t blkno, 8778 int (*iodone)(struct buf *), int sleepflag) 8779 { 8780 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie; 8781 struct buf *bp; 8782 8783 /* 8784 * check for valid cookie offset, len 8785 */ 8786 if ((off + len) > p->size) { 8787 return (NULL); 8788 } 8789 8790 if (len > p->size) { 8791 return (NULL); 8792 } 8793 8794 /* direction has to be one of B_READ or B_WRITE */ 8795 if ((direction != B_READ) && (direction != B_WRITE)) { 8796 return (NULL); 8797 } 8798 8799 /* These are the only two valid sleepflags */ 8800 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) { 8801 return (NULL); 8802 } 8803 8804 /* 8805 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported 8806 */ 8807 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) { 8808 return (NULL); 8809 } 8810 8811 /* If type is KMEM_NON_PAGEABLE procp is NULL */ 8812 ASSERT((p->type == KMEM_NON_PAGEABLE) ? 8813 (p->procp == NULL) : (p->procp != NULL)); 8814 8815 bp = kmem_alloc(sizeof (struct buf), sleepflag); 8816 if (bp == NULL) { 8817 return (NULL); 8818 } 8819 bioinit(bp); 8820 8821 bp->b_flags = B_BUSY | B_PHYS | direction; 8822 bp->b_edev = dev; 8823 bp->b_lblkno = blkno; 8824 bp->b_iodone = iodone; 8825 bp->b_bcount = len; 8826 bp->b_proc = (proc_t *)p->procp; 8827 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8828 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off); 8829 if (p->pparray != NULL) { 8830 bp->b_flags |= B_SHADOW; 8831 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0); 8832 bp->b_shadow = p->pparray + btop(off); 8833 } 8834 return (bp); 8835 } 8836 8837 /* 8838 * Fault-handling and related routines 8839 */ 8840 8841 ddi_devstate_t 8842 ddi_get_devstate(dev_info_t *dip) 8843 { 8844 if (DEVI_IS_DEVICE_OFFLINE(dip)) 8845 return (DDI_DEVSTATE_OFFLINE); 8846 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip)) 8847 return (DDI_DEVSTATE_DOWN); 8848 else if (DEVI_IS_BUS_QUIESCED(dip)) 8849 return (DDI_DEVSTATE_QUIESCED); 8850 else if (DEVI_IS_DEVICE_DEGRADED(dip)) 8851 return (DDI_DEVSTATE_DEGRADED); 8852 else 8853 return (DDI_DEVSTATE_UP); 8854 } 8855 8856 void 8857 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact, 8858 ddi_fault_location_t location, const char *message) 8859 { 8860 struct ddi_fault_event_data fd; 8861 ddi_eventcookie_t ec; 8862 8863 /* 8864 * Assemble all the information into a fault-event-data structure 8865 */ 8866 fd.f_dip = dip; 8867 fd.f_impact = impact; 8868 fd.f_location = location; 8869 fd.f_message = message; 8870 fd.f_oldstate = ddi_get_devstate(dip); 8871 8872 /* 8873 * Get eventcookie from defining parent. 8874 */ 8875 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != 8876 DDI_SUCCESS) 8877 return; 8878 8879 (void) ndi_post_event(dip, dip, ec, &fd); 8880 } 8881 8882 char * 8883 i_ddi_devi_class(dev_info_t *dip) 8884 { 8885 return (DEVI(dip)->devi_device_class); 8886 } 8887 8888 int 8889 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag) 8890 { 8891 struct dev_info *devi = DEVI(dip); 8892 8893 mutex_enter(&devi->devi_lock); 8894 8895 if (devi->devi_device_class) 8896 kmem_free(devi->devi_device_class, 8897 strlen(devi->devi_device_class) + 1); 8898 8899 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag)) 8900 != NULL) { 8901 mutex_exit(&devi->devi_lock); 8902 return (DDI_SUCCESS); 8903 } 8904 8905 mutex_exit(&devi->devi_lock); 8906 8907 return (DDI_FAILURE); 8908 } 8909 8910 8911 /* 8912 * Task Queues DDI interfaces. 8913 */ 8914 8915 /* ARGSUSED */ 8916 ddi_taskq_t * 8917 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads, 8918 pri_t pri, uint_t cflags) 8919 { 8920 char full_name[TASKQ_NAMELEN]; 8921 const char *tq_name; 8922 int nodeid = 0; 8923 8924 if (dip == NULL) 8925 tq_name = name; 8926 else { 8927 nodeid = ddi_get_instance(dip); 8928 8929 if (name == NULL) 8930 name = "tq"; 8931 8932 (void) snprintf(full_name, sizeof (full_name), "%s_%s", 8933 ddi_driver_name(dip), name); 8934 8935 tq_name = full_name; 8936 } 8937 8938 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads, 8939 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri, 8940 nthreads, INT_MAX, TASKQ_PREPOPULATE)); 8941 } 8942 8943 void 8944 ddi_taskq_destroy(ddi_taskq_t *tq) 8945 { 8946 taskq_destroy((taskq_t *)tq); 8947 } 8948 8949 int 8950 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *), 8951 void *arg, uint_t dflags) 8952 { 8953 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg, 8954 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP); 8955 8956 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE); 8957 } 8958 8959 void 8960 ddi_taskq_wait(ddi_taskq_t *tq) 8961 { 8962 taskq_wait((taskq_t *)tq); 8963 } 8964 8965 void 8966 ddi_taskq_suspend(ddi_taskq_t *tq) 8967 { 8968 taskq_suspend((taskq_t *)tq); 8969 } 8970 8971 boolean_t 8972 ddi_taskq_suspended(ddi_taskq_t *tq) 8973 { 8974 return (taskq_suspended((taskq_t *)tq)); 8975 } 8976 8977 void 8978 ddi_taskq_resume(ddi_taskq_t *tq) 8979 { 8980 taskq_resume((taskq_t *)tq); 8981 } 8982 8983 int 8984 ddi_parse( 8985 const char *ifname, 8986 char *alnum, 8987 uint_t *nump) 8988 { 8989 const char *p; 8990 int l; 8991 ulong_t num; 8992 boolean_t nonum = B_TRUE; 8993 char c; 8994 8995 l = strlen(ifname); 8996 for (p = ifname + l; p != ifname; l--) { 8997 c = *--p; 8998 if (!isdigit(c)) { 8999 (void) strlcpy(alnum, ifname, l + 1); 9000 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0) 9001 return (DDI_FAILURE); 9002 break; 9003 } 9004 nonum = B_FALSE; 9005 } 9006 if (l == 0 || nonum) 9007 return (DDI_FAILURE); 9008 9009 *nump = num; 9010 return (DDI_SUCCESS); 9011 } 9012 9013 /* 9014 * Default initialization function for drivers that don't need to quiesce. 9015 */ 9016 /* ARGSUSED */ 9017 int 9018 ddi_quiesce_not_needed(dev_info_t *dip) 9019 { 9020 return (DDI_SUCCESS); 9021 } 9022 9023 /* 9024 * Initialization function for drivers that should implement quiesce() 9025 * but haven't yet. 9026 */ 9027 /* ARGSUSED */ 9028 int 9029 ddi_quiesce_not_supported(dev_info_t *dip) 9030 { 9031 return (DDI_FAILURE); 9032 } 9033 9034 char * 9035 ddi_strdup(const char *str, int flag) 9036 { 9037 int n; 9038 char *ptr; 9039 9040 ASSERT(str != NULL); 9041 ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP)); 9042 9043 n = strlen(str); 9044 if ((ptr = kmem_alloc(n + 1, flag)) == NULL) 9045 return (NULL); 9046 bcopy(str, ptr, n + 1); 9047 return (ptr); 9048 } 9049 9050 char * 9051 strdup(const char *str) 9052 { 9053 return (ddi_strdup(str, KM_SLEEP)); 9054 } 9055 9056 void 9057 strfree(char *str) 9058 { 9059 ASSERT(str != NULL); 9060 kmem_free(str, strlen(str) + 1); 9061 } 9062 9063 /* 9064 * Generic DDI callback interfaces. 9065 */ 9066 9067 int 9068 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc, 9069 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp) 9070 { 9071 ddi_cb_t *cbp; 9072 9073 ASSERT(dip != NULL); 9074 ASSERT(DDI_CB_FLAG_VALID(flags)); 9075 ASSERT(cbfunc != NULL); 9076 ASSERT(ret_hdlp != NULL); 9077 9078 /* Sanity check the context */ 9079 ASSERT(!servicing_interrupt()); 9080 if (servicing_interrupt()) 9081 return (DDI_FAILURE); 9082 9083 /* Validate parameters */ 9084 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) || 9085 (cbfunc == NULL) || (ret_hdlp == NULL)) 9086 return (DDI_EINVAL); 9087 9088 /* Check for previous registration */ 9089 if (DEVI(dip)->devi_cb_p != NULL) 9090 return (DDI_EALREADY); 9091 9092 /* Allocate and initialize callback */ 9093 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP); 9094 cbp->cb_dip = dip; 9095 cbp->cb_func = cbfunc; 9096 cbp->cb_arg1 = arg1; 9097 cbp->cb_arg2 = arg2; 9098 cbp->cb_flags = flags; 9099 DEVI(dip)->devi_cb_p = cbp; 9100 9101 /* If adding an IRM callback, notify IRM */ 9102 if (flags & DDI_CB_FLAG_INTR) 9103 i_ddi_irm_set_cb(dip, B_TRUE); 9104 9105 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p); 9106 return (DDI_SUCCESS); 9107 } 9108 9109 int 9110 ddi_cb_unregister(ddi_cb_handle_t hdl) 9111 { 9112 ddi_cb_t *cbp; 9113 dev_info_t *dip; 9114 9115 ASSERT(hdl != NULL); 9116 9117 /* Sanity check the context */ 9118 ASSERT(!servicing_interrupt()); 9119 if (servicing_interrupt()) 9120 return (DDI_FAILURE); 9121 9122 /* Validate parameters */ 9123 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) || 9124 ((dip = cbp->cb_dip) == NULL)) 9125 return (DDI_EINVAL); 9126 9127 /* If removing an IRM callback, notify IRM */ 9128 if (cbp->cb_flags & DDI_CB_FLAG_INTR) 9129 i_ddi_irm_set_cb(dip, B_FALSE); 9130 9131 /* Destroy the callback */ 9132 kmem_free(cbp, sizeof (ddi_cb_t)); 9133 DEVI(dip)->devi_cb_p = NULL; 9134 9135 return (DDI_SUCCESS); 9136 } 9137 9138 /* 9139 * Platform independent DR routines 9140 */ 9141 9142 static int 9143 ndi2errno(int n) 9144 { 9145 int err = 0; 9146 9147 switch (n) { 9148 case NDI_NOMEM: 9149 err = ENOMEM; 9150 break; 9151 case NDI_BUSY: 9152 err = EBUSY; 9153 break; 9154 case NDI_FAULT: 9155 err = EFAULT; 9156 break; 9157 case NDI_FAILURE: 9158 err = EIO; 9159 break; 9160 case NDI_SUCCESS: 9161 break; 9162 case NDI_BADHANDLE: 9163 default: 9164 err = EINVAL; 9165 break; 9166 } 9167 return (err); 9168 } 9169 9170 /* 9171 * Prom tree node list 9172 */ 9173 struct ptnode { 9174 pnode_t nodeid; 9175 struct ptnode *next; 9176 }; 9177 9178 /* 9179 * Prom tree walk arg 9180 */ 9181 struct pta { 9182 dev_info_t *pdip; 9183 devi_branch_t *bp; 9184 uint_t flags; 9185 dev_info_t *fdip; 9186 struct ptnode *head; 9187 }; 9188 9189 static void 9190 visit_node(pnode_t nodeid, struct pta *ap) 9191 { 9192 struct ptnode **nextp; 9193 int (*select)(pnode_t, void *, uint_t); 9194 9195 ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE); 9196 9197 select = ap->bp->create.prom_branch_select; 9198 9199 ASSERT(select); 9200 9201 if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) { 9202 9203 for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next) 9204 ; 9205 9206 *nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP); 9207 9208 (*nextp)->nodeid = nodeid; 9209 } 9210 9211 if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD) 9212 return; 9213 9214 nodeid = prom_childnode(nodeid); 9215 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) { 9216 visit_node(nodeid, ap); 9217 nodeid = prom_nextnode(nodeid); 9218 } 9219 } 9220 9221 /* 9222 * NOTE: The caller of this function must check for device contracts 9223 * or LDI callbacks against this dip before setting the dip offline. 9224 */ 9225 static int 9226 set_infant_dip_offline(dev_info_t *dip, void *arg) 9227 { 9228 char *path = (char *)arg; 9229 9230 ASSERT(dip); 9231 ASSERT(arg); 9232 9233 if (i_ddi_node_state(dip) >= DS_ATTACHED) { 9234 (void) ddi_pathname(dip, path); 9235 cmn_err(CE_WARN, "Attempt to set offline flag on attached " 9236 "node: %s", path); 9237 return (DDI_FAILURE); 9238 } 9239 9240 mutex_enter(&(DEVI(dip)->devi_lock)); 9241 if (!DEVI_IS_DEVICE_OFFLINE(dip)) 9242 DEVI_SET_DEVICE_OFFLINE(dip); 9243 mutex_exit(&(DEVI(dip)->devi_lock)); 9244 9245 return (DDI_SUCCESS); 9246 } 9247 9248 typedef struct result { 9249 char *path; 9250 int result; 9251 } result_t; 9252 9253 static int 9254 dip_set_offline(dev_info_t *dip, void *arg) 9255 { 9256 int end; 9257 result_t *resp = (result_t *)arg; 9258 9259 ASSERT(dip); 9260 ASSERT(resp); 9261 9262 /* 9263 * We stop the walk if e_ddi_offline_notify() returns 9264 * failure, because this implies that one or more consumers 9265 * (either LDI or contract based) has blocked the offline. 9266 * So there is no point in conitnuing the walk 9267 */ 9268 if (e_ddi_offline_notify(dip) == DDI_FAILURE) { 9269 resp->result = DDI_FAILURE; 9270 return (DDI_WALK_TERMINATE); 9271 } 9272 9273 /* 9274 * If set_infant_dip_offline() returns failure, it implies 9275 * that we failed to set a particular dip offline. This 9276 * does not imply that the offline as a whole should fail. 9277 * We want to do the best we can, so we continue the walk. 9278 */ 9279 if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS) 9280 end = DDI_SUCCESS; 9281 else 9282 end = DDI_FAILURE; 9283 9284 e_ddi_offline_finalize(dip, end); 9285 9286 return (DDI_WALK_CONTINUE); 9287 } 9288 9289 /* 9290 * The call to e_ddi_offline_notify() exists for the 9291 * unlikely error case that a branch we are trying to 9292 * create already exists and has device contracts or LDI 9293 * event callbacks against it. 9294 * 9295 * We allow create to succeed for such branches only if 9296 * no constraints block the offline. 9297 */ 9298 static int 9299 branch_set_offline(dev_info_t *dip, char *path) 9300 { 9301 int circ; 9302 int end; 9303 result_t res; 9304 9305 9306 if (e_ddi_offline_notify(dip) == DDI_FAILURE) { 9307 return (DDI_FAILURE); 9308 } 9309 9310 if (set_infant_dip_offline(dip, path) == DDI_SUCCESS) 9311 end = DDI_SUCCESS; 9312 else 9313 end = DDI_FAILURE; 9314 9315 e_ddi_offline_finalize(dip, end); 9316 9317 if (end == DDI_FAILURE) 9318 return (DDI_FAILURE); 9319 9320 res.result = DDI_SUCCESS; 9321 res.path = path; 9322 9323 ndi_devi_enter(dip, &circ); 9324 ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res); 9325 ndi_devi_exit(dip, circ); 9326 9327 return (res.result); 9328 } 9329 9330 /*ARGSUSED*/ 9331 static int 9332 create_prom_branch(void *arg, int has_changed) 9333 { 9334 int circ; 9335 int exists, rv; 9336 pnode_t nodeid; 9337 struct ptnode *tnp; 9338 dev_info_t *dip; 9339 struct pta *ap = arg; 9340 devi_branch_t *bp; 9341 char *path; 9342 9343 ASSERT(ap); 9344 ASSERT(ap->fdip == NULL); 9345 ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip)); 9346 9347 bp = ap->bp; 9348 9349 nodeid = ddi_get_nodeid(ap->pdip); 9350 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) { 9351 cmn_err(CE_WARN, "create_prom_branch: invalid " 9352 "nodeid: 0x%x", nodeid); 9353 return (EINVAL); 9354 } 9355 9356 ap->head = NULL; 9357 9358 nodeid = prom_childnode(nodeid); 9359 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) { 9360 visit_node(nodeid, ap); 9361 nodeid = prom_nextnode(nodeid); 9362 } 9363 9364 if (ap->head == NULL) 9365 return (ENODEV); 9366 9367 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 9368 rv = 0; 9369 while ((tnp = ap->head) != NULL) { 9370 ap->head = tnp->next; 9371 9372 ndi_devi_enter(ap->pdip, &circ); 9373 9374 /* 9375 * Check if the branch already exists. 9376 */ 9377 exists = 0; 9378 dip = e_ddi_nodeid_to_dip(tnp->nodeid); 9379 if (dip != NULL) { 9380 exists = 1; 9381 9382 /* Parent is held busy, so release hold */ 9383 ndi_rele_devi(dip); 9384 #ifdef DEBUG 9385 cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists" 9386 " for nodeid 0x%x", (void *)dip, tnp->nodeid); 9387 #endif 9388 } else { 9389 dip = i_ddi_create_branch(ap->pdip, tnp->nodeid); 9390 } 9391 9392 kmem_free(tnp, sizeof (struct ptnode)); 9393 9394 /* 9395 * Hold the branch if it is not already held 9396 */ 9397 if (dip && !exists) { 9398 e_ddi_branch_hold(dip); 9399 } 9400 9401 ASSERT(dip == NULL || e_ddi_branch_held(dip)); 9402 9403 /* 9404 * Set all dips in the newly created branch offline so that 9405 * only a "configure" operation can attach 9406 * the branch 9407 */ 9408 if (dip == NULL || branch_set_offline(dip, path) 9409 == DDI_FAILURE) { 9410 ndi_devi_exit(ap->pdip, circ); 9411 rv = EIO; 9412 continue; 9413 } 9414 9415 ASSERT(ddi_get_parent(dip) == ap->pdip); 9416 9417 ndi_devi_exit(ap->pdip, circ); 9418 9419 if (ap->flags & DEVI_BRANCH_CONFIGURE) { 9420 int error = e_ddi_branch_configure(dip, &ap->fdip, 0); 9421 if (error && rv == 0) 9422 rv = error; 9423 } 9424 9425 /* 9426 * Invoke devi_branch_callback() (if it exists) only for 9427 * newly created branches 9428 */ 9429 if (bp->devi_branch_callback && !exists) 9430 bp->devi_branch_callback(dip, bp->arg, 0); 9431 } 9432 9433 kmem_free(path, MAXPATHLEN); 9434 9435 return (rv); 9436 } 9437 9438 static int 9439 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp) 9440 { 9441 int rv, circ, len; 9442 int i, flags, ret; 9443 dev_info_t *dip; 9444 char *nbuf; 9445 char *path; 9446 static const char *noname = "<none>"; 9447 9448 ASSERT(pdip); 9449 ASSERT(DEVI_BUSY_OWNED(pdip)); 9450 9451 flags = 0; 9452 9453 /* 9454 * Creating the root of a branch ? 9455 */ 9456 if (rdipp) { 9457 *rdipp = NULL; 9458 flags = DEVI_BRANCH_ROOT; 9459 } 9460 9461 ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip); 9462 rv = bp->create.sid_branch_create(dip, bp->arg, flags); 9463 9464 nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP); 9465 9466 if (rv == DDI_WALK_ERROR) { 9467 cmn_err(CE_WARN, "e_ddi_branch_create: Error setting" 9468 " properties on devinfo node %p", (void *)dip); 9469 goto fail; 9470 } 9471 9472 len = OBP_MAXDRVNAME; 9473 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 9474 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len) 9475 != DDI_PROP_SUCCESS) { 9476 cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has" 9477 "no name property", (void *)dip); 9478 goto fail; 9479 } 9480 9481 ASSERT(i_ddi_node_state(dip) == DS_PROTO); 9482 if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) { 9483 cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)" 9484 " for devinfo node %p", nbuf, (void *)dip); 9485 goto fail; 9486 } 9487 9488 kmem_free(nbuf, OBP_MAXDRVNAME); 9489 9490 /* 9491 * Ignore bind failures just like boot does 9492 */ 9493 (void) ndi_devi_bind_driver(dip, 0); 9494 9495 switch (rv) { 9496 case DDI_WALK_CONTINUE: 9497 case DDI_WALK_PRUNESIB: 9498 ndi_devi_enter(dip, &circ); 9499 9500 i = DDI_WALK_CONTINUE; 9501 for (; i == DDI_WALK_CONTINUE; ) { 9502 i = sid_node_create(dip, bp, NULL); 9503 } 9504 9505 ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB); 9506 if (i == DDI_WALK_ERROR) 9507 rv = i; 9508 /* 9509 * If PRUNESIB stop creating siblings 9510 * of dip's child. Subsequent walk behavior 9511 * is determined by rv returned by dip. 9512 */ 9513 9514 ndi_devi_exit(dip, circ); 9515 break; 9516 case DDI_WALK_TERMINATE: 9517 /* 9518 * Don't create children and ask our parent 9519 * to not create siblings either. 9520 */ 9521 rv = DDI_WALK_PRUNESIB; 9522 break; 9523 case DDI_WALK_PRUNECHILD: 9524 /* 9525 * Don't create children, but ask parent to continue 9526 * with siblings. 9527 */ 9528 rv = DDI_WALK_CONTINUE; 9529 break; 9530 default: 9531 ASSERT(0); 9532 break; 9533 } 9534 9535 if (rdipp) 9536 *rdipp = dip; 9537 9538 /* 9539 * Set device offline - only the "configure" op should cause an attach. 9540 * Note that it is safe to set the dip offline without checking 9541 * for either device contract or layered driver (LDI) based constraints 9542 * since there cannot be any contracts or LDI opens of this device. 9543 * This is because this node is a newly created dip with the parent busy 9544 * held, so no other thread can come in and attach this dip. A dip that 9545 * has never been attached cannot have contracts since by definition 9546 * a device contract (an agreement between a process and a device minor 9547 * node) can only be created against a device that has minor nodes 9548 * i.e is attached. Similarly an LDI open will only succeed if the 9549 * dip is attached. We assert below that the dip is not attached. 9550 */ 9551 ASSERT(i_ddi_node_state(dip) < DS_ATTACHED); 9552 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 9553 ret = set_infant_dip_offline(dip, path); 9554 ASSERT(ret == DDI_SUCCESS); 9555 kmem_free(path, MAXPATHLEN); 9556 9557 return (rv); 9558 fail: 9559 (void) ndi_devi_free(dip); 9560 kmem_free(nbuf, OBP_MAXDRVNAME); 9561 return (DDI_WALK_ERROR); 9562 } 9563 9564 static int 9565 create_sid_branch( 9566 dev_info_t *pdip, 9567 devi_branch_t *bp, 9568 dev_info_t **dipp, 9569 uint_t flags) 9570 { 9571 int rv = 0, state = DDI_WALK_CONTINUE; 9572 dev_info_t *rdip; 9573 9574 while (state == DDI_WALK_CONTINUE) { 9575 int circ; 9576 9577 ndi_devi_enter(pdip, &circ); 9578 9579 state = sid_node_create(pdip, bp, &rdip); 9580 if (rdip == NULL) { 9581 ndi_devi_exit(pdip, circ); 9582 ASSERT(state == DDI_WALK_ERROR); 9583 break; 9584 } 9585 9586 e_ddi_branch_hold(rdip); 9587 9588 ndi_devi_exit(pdip, circ); 9589 9590 if (flags & DEVI_BRANCH_CONFIGURE) { 9591 int error = e_ddi_branch_configure(rdip, dipp, 0); 9592 if (error && rv == 0) 9593 rv = error; 9594 } 9595 9596 /* 9597 * devi_branch_callback() is optional 9598 */ 9599 if (bp->devi_branch_callback) 9600 bp->devi_branch_callback(rdip, bp->arg, 0); 9601 } 9602 9603 ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB); 9604 9605 return (state == DDI_WALK_ERROR ? EIO : rv); 9606 } 9607 9608 int 9609 e_ddi_branch_create( 9610 dev_info_t *pdip, 9611 devi_branch_t *bp, 9612 dev_info_t **dipp, 9613 uint_t flags) 9614 { 9615 int prom_devi, sid_devi, error; 9616 9617 if (pdip == NULL || bp == NULL || bp->type == 0) 9618 return (EINVAL); 9619 9620 prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0; 9621 sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0; 9622 9623 if (prom_devi && bp->create.prom_branch_select == NULL) 9624 return (EINVAL); 9625 else if (sid_devi && bp->create.sid_branch_create == NULL) 9626 return (EINVAL); 9627 else if (!prom_devi && !sid_devi) 9628 return (EINVAL); 9629 9630 if (flags & DEVI_BRANCH_EVENT) 9631 return (EINVAL); 9632 9633 if (prom_devi) { 9634 struct pta pta = {0}; 9635 9636 pta.pdip = pdip; 9637 pta.bp = bp; 9638 pta.flags = flags; 9639 9640 error = prom_tree_access(create_prom_branch, &pta, NULL); 9641 9642 if (dipp) 9643 *dipp = pta.fdip; 9644 else if (pta.fdip) 9645 ndi_rele_devi(pta.fdip); 9646 } else { 9647 error = create_sid_branch(pdip, bp, dipp, flags); 9648 } 9649 9650 return (error); 9651 } 9652 9653 int 9654 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags) 9655 { 9656 int rv; 9657 char *devnm; 9658 dev_info_t *pdip; 9659 9660 if (dipp) 9661 *dipp = NULL; 9662 9663 if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT)) 9664 return (EINVAL); 9665 9666 pdip = ddi_get_parent(rdip); 9667 9668 ndi_hold_devi(pdip); 9669 9670 if (!e_ddi_branch_held(rdip)) { 9671 ndi_rele_devi(pdip); 9672 cmn_err(CE_WARN, "e_ddi_branch_configure: " 9673 "dip(%p) not held", (void *)rdip); 9674 return (EINVAL); 9675 } 9676 9677 if (i_ddi_node_state(rdip) < DS_INITIALIZED) { 9678 /* 9679 * First attempt to bind a driver. If we fail, return 9680 * success (On some platforms, dips for some device 9681 * types (CPUs) may not have a driver) 9682 */ 9683 if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) { 9684 ndi_rele_devi(pdip); 9685 return (0); 9686 } 9687 9688 if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) { 9689 rv = NDI_FAILURE; 9690 goto out; 9691 } 9692 } 9693 9694 ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED); 9695 9696 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 9697 9698 (void) ddi_deviname(rdip, devnm); 9699 9700 if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip, 9701 NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) { 9702 /* release hold from ndi_devi_config_one() */ 9703 ndi_rele_devi(rdip); 9704 } 9705 9706 kmem_free(devnm, MAXNAMELEN + 1); 9707 out: 9708 if (rv != NDI_SUCCESS && dipp && rdip) { 9709 ndi_hold_devi(rdip); 9710 *dipp = rdip; 9711 } 9712 ndi_rele_devi(pdip); 9713 return (ndi2errno(rv)); 9714 } 9715 9716 void 9717 e_ddi_branch_hold(dev_info_t *rdip) 9718 { 9719 if (e_ddi_branch_held(rdip)) { 9720 cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held"); 9721 return; 9722 } 9723 9724 mutex_enter(&DEVI(rdip)->devi_lock); 9725 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) { 9726 DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD; 9727 DEVI(rdip)->devi_ref++; 9728 } 9729 ASSERT(DEVI(rdip)->devi_ref > 0); 9730 mutex_exit(&DEVI(rdip)->devi_lock); 9731 } 9732 9733 int 9734 e_ddi_branch_held(dev_info_t *rdip) 9735 { 9736 int rv = 0; 9737 9738 mutex_enter(&DEVI(rdip)->devi_lock); 9739 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) && 9740 DEVI(rdip)->devi_ref > 0) { 9741 rv = 1; 9742 } 9743 mutex_exit(&DEVI(rdip)->devi_lock); 9744 9745 return (rv); 9746 } 9747 9748 void 9749 e_ddi_branch_rele(dev_info_t *rdip) 9750 { 9751 mutex_enter(&DEVI(rdip)->devi_lock); 9752 DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD; 9753 DEVI(rdip)->devi_ref--; 9754 mutex_exit(&DEVI(rdip)->devi_lock); 9755 } 9756 9757 int 9758 e_ddi_branch_unconfigure( 9759 dev_info_t *rdip, 9760 dev_info_t **dipp, 9761 uint_t flags) 9762 { 9763 int circ, rv; 9764 int destroy; 9765 char *devnm; 9766 uint_t nflags; 9767 dev_info_t *pdip; 9768 9769 if (dipp) 9770 *dipp = NULL; 9771 9772 if (rdip == NULL) 9773 return (EINVAL); 9774 9775 pdip = ddi_get_parent(rdip); 9776 9777 ASSERT(pdip); 9778 9779 /* 9780 * Check if caller holds pdip busy - can cause deadlocks during 9781 * devfs_clean() 9782 */ 9783 if (DEVI_BUSY_OWNED(pdip)) { 9784 cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent" 9785 " devinfo node(%p) is busy held", (void *)pdip); 9786 return (EINVAL); 9787 } 9788 9789 destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0; 9790 9791 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 9792 9793 ndi_devi_enter(pdip, &circ); 9794 (void) ddi_deviname(rdip, devnm); 9795 ndi_devi_exit(pdip, circ); 9796 9797 /* 9798 * ddi_deviname() returns a component name with / prepended. 9799 */ 9800 (void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE); 9801 9802 ndi_devi_enter(pdip, &circ); 9803 9804 /* 9805 * Recreate device name as it may have changed state (init/uninit) 9806 * when parent busy lock was dropped for devfs_clean() 9807 */ 9808 (void) ddi_deviname(rdip, devnm); 9809 9810 if (!e_ddi_branch_held(rdip)) { 9811 kmem_free(devnm, MAXNAMELEN + 1); 9812 ndi_devi_exit(pdip, circ); 9813 cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held", 9814 destroy ? "destroy" : "unconfigure", (void *)rdip); 9815 return (EINVAL); 9816 } 9817 9818 /* 9819 * Release hold on the branch. This is ok since we are holding the 9820 * parent busy. If rdip is not removed, we must do a hold on the 9821 * branch before returning. 9822 */ 9823 e_ddi_branch_rele(rdip); 9824 9825 nflags = NDI_DEVI_OFFLINE; 9826 if (destroy || (flags & DEVI_BRANCH_DESTROY)) { 9827 nflags |= NDI_DEVI_REMOVE; 9828 destroy = 1; 9829 } else { 9830 nflags |= NDI_UNCONFIG; /* uninit but don't remove */ 9831 } 9832 9833 if (flags & DEVI_BRANCH_EVENT) 9834 nflags |= NDI_POST_EVENT; 9835 9836 if (i_ddi_devi_attached(pdip) && 9837 (i_ddi_node_state(rdip) >= DS_INITIALIZED)) { 9838 rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags); 9839 } else { 9840 rv = e_ddi_devi_unconfig(rdip, dipp, nflags); 9841 if (rv == NDI_SUCCESS) { 9842 ASSERT(!destroy || ddi_get_child(rdip) == NULL); 9843 rv = ndi_devi_offline(rdip, nflags); 9844 } 9845 } 9846 9847 if (!destroy || rv != NDI_SUCCESS) { 9848 /* The dip still exists, so do a hold */ 9849 e_ddi_branch_hold(rdip); 9850 } 9851 out: 9852 kmem_free(devnm, MAXNAMELEN + 1); 9853 ndi_devi_exit(pdip, circ); 9854 return (ndi2errno(rv)); 9855 } 9856 9857 int 9858 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag) 9859 { 9860 return (e_ddi_branch_unconfigure(rdip, dipp, 9861 flag|DEVI_BRANCH_DESTROY)); 9862 } 9863 9864 /* 9865 * Number of chains for hash table 9866 */ 9867 #define NUMCHAINS 17 9868 9869 /* 9870 * Devinfo busy arg 9871 */ 9872 struct devi_busy { 9873 int dv_total; 9874 int s_total; 9875 mod_hash_t *dv_hash; 9876 mod_hash_t *s_hash; 9877 int (*callback)(dev_info_t *, void *, uint_t); 9878 void *arg; 9879 }; 9880 9881 static int 9882 visit_dip(dev_info_t *dip, void *arg) 9883 { 9884 uintptr_t sbusy, dvbusy, ref; 9885 struct devi_busy *bsp = arg; 9886 9887 ASSERT(bsp->callback); 9888 9889 /* 9890 * A dip cannot be busy if its reference count is 0 9891 */ 9892 if ((ref = e_ddi_devi_holdcnt(dip)) == 0) { 9893 return (bsp->callback(dip, bsp->arg, 0)); 9894 } 9895 9896 if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy)) 9897 dvbusy = 0; 9898 9899 /* 9900 * To catch device opens currently maintained on specfs common snodes. 9901 */ 9902 if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy)) 9903 sbusy = 0; 9904 9905 #ifdef DEBUG 9906 if (ref < sbusy || ref < dvbusy) { 9907 cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu " 9908 "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref); 9909 } 9910 #endif 9911 9912 dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy; 9913 9914 return (bsp->callback(dip, bsp->arg, dvbusy)); 9915 } 9916 9917 static int 9918 visit_snode(struct snode *sp, void *arg) 9919 { 9920 uintptr_t sbusy; 9921 dev_info_t *dip; 9922 int count; 9923 struct devi_busy *bsp = arg; 9924 9925 ASSERT(sp); 9926 9927 /* 9928 * The stable lock is held. This prevents 9929 * the snode and its associated dip from 9930 * going away. 9931 */ 9932 dip = NULL; 9933 count = spec_devi_open_count(sp, &dip); 9934 9935 if (count <= 0) 9936 return (DDI_WALK_CONTINUE); 9937 9938 ASSERT(dip); 9939 9940 if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy)) 9941 sbusy = count; 9942 else 9943 sbusy += count; 9944 9945 if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) { 9946 cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, " 9947 "sbusy = %lu", "e_ddi_branch_referenced", 9948 (void *)dip, sbusy); 9949 } 9950 9951 bsp->s_total += count; 9952 9953 return (DDI_WALK_CONTINUE); 9954 } 9955 9956 static void 9957 visit_dvnode(struct dv_node *dv, void *arg) 9958 { 9959 uintptr_t dvbusy; 9960 uint_t count; 9961 struct vnode *vp; 9962 struct devi_busy *bsp = arg; 9963 9964 ASSERT(dv && dv->dv_devi); 9965 9966 vp = DVTOV(dv); 9967 9968 mutex_enter(&vp->v_lock); 9969 count = vp->v_count; 9970 mutex_exit(&vp->v_lock); 9971 9972 if (!count) 9973 return; 9974 9975 if (mod_hash_remove(bsp->dv_hash, dv->dv_devi, 9976 (mod_hash_val_t *)&dvbusy)) 9977 dvbusy = count; 9978 else 9979 dvbusy += count; 9980 9981 if (mod_hash_insert(bsp->dv_hash, dv->dv_devi, 9982 (mod_hash_val_t)dvbusy)) { 9983 cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, " 9984 "dvbusy=%lu", "e_ddi_branch_referenced", 9985 (void *)dv->dv_devi, dvbusy); 9986 } 9987 9988 bsp->dv_total += count; 9989 } 9990 9991 /* 9992 * Returns reference count on success or -1 on failure. 9993 */ 9994 int 9995 e_ddi_branch_referenced( 9996 dev_info_t *rdip, 9997 int (*callback)(dev_info_t *dip, void *arg, uint_t ref), 9998 void *arg) 9999 { 10000 int circ; 10001 char *path; 10002 dev_info_t *pdip; 10003 struct devi_busy bsa = {0}; 10004 10005 ASSERT(rdip); 10006 10007 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 10008 10009 ndi_hold_devi(rdip); 10010 10011 pdip = ddi_get_parent(rdip); 10012 10013 ASSERT(pdip); 10014 10015 /* 10016 * Check if caller holds pdip busy - can cause deadlocks during 10017 * devfs_walk() 10018 */ 10019 if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) { 10020 cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: " 10021 "devinfo branch(%p) not held or parent busy held", 10022 (void *)rdip); 10023 ndi_rele_devi(rdip); 10024 kmem_free(path, MAXPATHLEN); 10025 return (-1); 10026 } 10027 10028 ndi_devi_enter(pdip, &circ); 10029 (void) ddi_pathname(rdip, path); 10030 ndi_devi_exit(pdip, circ); 10031 10032 bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS, 10033 mod_hash_null_valdtor, sizeof (struct dev_info)); 10034 10035 bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS, 10036 mod_hash_null_valdtor, sizeof (struct snode)); 10037 10038 if (devfs_walk(path, visit_dvnode, &bsa)) { 10039 cmn_err(CE_WARN, "e_ddi_branch_referenced: " 10040 "devfs walk failed for: %s", path); 10041 kmem_free(path, MAXPATHLEN); 10042 bsa.s_total = bsa.dv_total = -1; 10043 goto out; 10044 } 10045 10046 kmem_free(path, MAXPATHLEN); 10047 10048 /* 10049 * Walk the snode table to detect device opens, which are currently 10050 * maintained on specfs common snodes. 10051 */ 10052 spec_snode_walk(visit_snode, &bsa); 10053 10054 if (callback == NULL) 10055 goto out; 10056 10057 bsa.callback = callback; 10058 bsa.arg = arg; 10059 10060 if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) { 10061 ndi_devi_enter(rdip, &circ); 10062 ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa); 10063 ndi_devi_exit(rdip, circ); 10064 } 10065 10066 out: 10067 ndi_rele_devi(rdip); 10068 mod_hash_destroy_ptrhash(bsa.s_hash); 10069 mod_hash_destroy_ptrhash(bsa.dv_hash); 10070 return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total); 10071 }