Print this page
4888 Undocument dma_req(9s)
4884 EOF scsi_hba_attach
4886 EOF ddi_dmae_getlim
4887 EOF ddi_iomin
4634 undocument scsi_hba_attach() and ddi_dma_lim(9s)
4630 clean stale references to ddi_iopb_alloc and ddi_iopb_free
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/sunddi.c
+++ new/usr/src/uts/common/os/sunddi.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 - * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
24 + * Copyright 2014 Garrett D'Amore <garrett@damore.org>
25 25 */
26 26
27 27 #include <sys/note.h>
28 28 #include <sys/types.h>
29 29 #include <sys/param.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/buf.h>
32 32 #include <sys/uio.h>
33 33 #include <sys/cred.h>
34 34 #include <sys/poll.h>
35 35 #include <sys/mman.h>
36 36 #include <sys/kmem.h>
37 37 #include <sys/model.h>
38 38 #include <sys/file.h>
39 39 #include <sys/proc.h>
40 40 #include <sys/open.h>
41 41 #include <sys/user.h>
42 42 #include <sys/t_lock.h>
43 43 #include <sys/vm.h>
44 44 #include <sys/stat.h>
45 45 #include <vm/hat.h>
46 46 #include <vm/seg.h>
47 47 #include <vm/seg_vn.h>
48 48 #include <vm/seg_dev.h>
49 49 #include <vm/as.h>
50 50 #include <sys/cmn_err.h>
51 51 #include <sys/cpuvar.h>
52 52 #include <sys/debug.h>
53 53 #include <sys/autoconf.h>
54 54 #include <sys/sunddi.h>
55 55 #include <sys/esunddi.h>
56 56 #include <sys/sunndi.h>
57 57 #include <sys/kstat.h>
58 58 #include <sys/conf.h>
59 59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */
60 60 #include <sys/ndi_impldefs.h> /* include prototypes */
61 61 #include <sys/ddi_periodic.h>
62 62 #include <sys/hwconf.h>
63 63 #include <sys/pathname.h>
64 64 #include <sys/modctl.h>
65 65 #include <sys/epm.h>
66 66 #include <sys/devctl.h>
67 67 #include <sys/callb.h>
68 68 #include <sys/cladm.h>
69 69 #include <sys/sysevent.h>
70 70 #include <sys/dacf_impl.h>
71 71 #include <sys/ddidevmap.h>
72 72 #include <sys/bootconf.h>
73 73 #include <sys/disp.h>
74 74 #include <sys/atomic.h>
75 75 #include <sys/promif.h>
76 76 #include <sys/instance.h>
77 77 #include <sys/sysevent/eventdefs.h>
78 78 #include <sys/task.h>
79 79 #include <sys/project.h>
80 80 #include <sys/taskq.h>
81 81 #include <sys/devpolicy.h>
82 82 #include <sys/ctype.h>
83 83 #include <net/if.h>
84 84 #include <sys/rctl.h>
85 85 #include <sys/zone.h>
86 86 #include <sys/clock_impl.h>
87 87 #include <sys/ddi.h>
88 88 #include <sys/modhash.h>
89 89 #include <sys/sunldi_impl.h>
90 90 #include <sys/fs/dv_node.h>
91 91 #include <sys/fs/snode.h>
92 92
93 93 extern pri_t minclsyspri;
94 94
95 95 extern rctl_hndl_t rc_project_locked_mem;
96 96 extern rctl_hndl_t rc_zone_locked_mem;
97 97
98 98 #ifdef DEBUG
99 99 static int sunddi_debug = 0;
100 100 #endif /* DEBUG */
101 101
102 102 /* ddi_umem_unlock miscellaneous */
103 103
104 104 static void i_ddi_umem_unlock_thread_start(void);
105 105
106 106 static kmutex_t ddi_umem_unlock_mutex; /* unlock list mutex */
107 107 static kcondvar_t ddi_umem_unlock_cv; /* unlock list block/unblock */
108 108 static kthread_t *ddi_umem_unlock_thread;
109 109 /*
110 110 * The ddi_umem_unlock FIFO list. NULL head pointer indicates empty list.
111 111 */
112 112 static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL;
113 113 static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
114 114
115 115 /*
116 116 * DDI(Sun) Function and flag definitions:
117 117 */
118 118
119 119 #if defined(__x86)
120 120 /*
121 121 * Used to indicate which entries were chosen from a range.
122 122 */
123 123 char *chosen_reg = "chosen-reg";
124 124 #endif
125 125
126 126 /*
127 127 * Function used to ring system console bell
128 128 */
129 129 void (*ddi_console_bell_func)(clock_t duration);
130 130
131 131 /*
132 132 * Creating register mappings and handling interrupts:
133 133 */
134 134
135 135 /*
136 136 * Generic ddi_map: Call parent to fulfill request...
137 137 */
138 138
139 139 int
140 140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141 141 off_t len, caddr_t *addrp)
142 142 {
143 143 dev_info_t *pdip;
144 144
145 145 ASSERT(dp);
146 146 pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 147 return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 148 dp, mp, offset, len, addrp));
149 149 }
150 150
151 151 /*
152 152 * ddi_apply_range: (Called by nexi only.)
153 153 * Apply ranges in parent node dp, to child regspec rp...
154 154 */
155 155
156 156 int
157 157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 158 {
159 159 return (i_ddi_apply_range(dp, rdip, rp));
160 160 }
161 161
162 162 int
163 163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164 164 off_t len)
165 165 {
166 166 ddi_map_req_t mr;
167 167 #if defined(__x86)
168 168 struct {
169 169 int bus;
170 170 int addr;
171 171 int size;
172 172 } reg, *reglist;
173 173 uint_t length;
174 174 int rc;
175 175
176 176 /*
177 177 * get the 'registers' or the 'reg' property.
178 178 * We look up the reg property as an array of
179 179 * int's.
180 180 */
181 181 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 182 DDI_PROP_DONTPASS, "registers", (int **)®list, &length);
183 183 if (rc != DDI_PROP_SUCCESS)
184 184 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 185 DDI_PROP_DONTPASS, "reg", (int **)®list, &length);
186 186 if (rc == DDI_PROP_SUCCESS) {
187 187 /*
188 188 * point to the required entry.
189 189 */
190 190 reg = reglist[rnumber];
191 191 reg.addr += offset;
192 192 if (len != 0)
193 193 reg.size = len;
194 194 /*
195 195 * make a new property containing ONLY the required tuple.
196 196 */
197 197 if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 198 chosen_reg, (int *)®, (sizeof (reg)/sizeof (int)))
199 199 != DDI_PROP_SUCCESS) {
200 200 cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 201 "property", DEVI(dip)->devi_name,
202 202 DEVI(dip)->devi_instance, chosen_reg);
203 203 }
204 204 /*
205 205 * free the memory allocated by
206 206 * ddi_prop_lookup_int_array ().
207 207 */
208 208 ddi_prop_free((void *)reglist);
209 209 }
210 210 #endif
211 211 mr.map_op = DDI_MO_MAP_LOCKED;
212 212 mr.map_type = DDI_MT_RNUMBER;
213 213 mr.map_obj.rnumber = rnumber;
214 214 mr.map_prot = PROT_READ | PROT_WRITE;
215 215 mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 216 mr.map_handlep = NULL;
217 217 mr.map_vers = DDI_MAP_VERSION;
218 218
219 219 /*
220 220 * Call my parent to map in my regs.
221 221 */
222 222
223 223 return (ddi_map(dip, &mr, offset, len, kaddrp));
224 224 }
225 225
226 226 void
227 227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228 228 off_t len)
229 229 {
230 230 ddi_map_req_t mr;
231 231
232 232 mr.map_op = DDI_MO_UNMAP;
233 233 mr.map_type = DDI_MT_RNUMBER;
234 234 mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 235 mr.map_prot = PROT_READ | PROT_WRITE; /* who cares? */
236 236 mr.map_obj.rnumber = rnumber;
237 237 mr.map_handlep = NULL;
238 238 mr.map_vers = DDI_MAP_VERSION;
239 239
240 240 /*
241 241 * Call my parent to unmap my regs.
242 242 */
243 243
244 244 (void) ddi_map(dip, &mr, offset, len, kaddrp);
245 245 *kaddrp = (caddr_t)0;
246 246 #if defined(__x86)
247 247 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 248 #endif
249 249 }
250 250
251 251 int
252 252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 253 off_t offset, off_t len, caddr_t *vaddrp)
254 254 {
255 255 return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 256 }
257 257
258 258 /*
259 259 * nullbusmap: The/DDI default bus_map entry point for nexi
260 260 * not conforming to the reg/range paradigm (i.e. scsi, etc.)
261 261 * with no HAT/MMU layer to be programmed at this level.
262 262 *
263 263 * If the call is to map by rnumber, return an error,
264 264 * otherwise pass anything else up the tree to my parent.
265 265 */
266 266 int
267 267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 268 off_t offset, off_t len, caddr_t *vaddrp)
269 269 {
270 270 _NOTE(ARGUNUSED(rdip))
271 271 if (mp->map_type == DDI_MT_RNUMBER)
272 272 return (DDI_ME_UNSUPPORTED);
273 273
274 274 return (ddi_map(dip, mp, offset, len, vaddrp));
275 275 }
276 276
277 277 /*
278 278 * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279 279 * Only for use by nexi using the reg/range paradigm.
280 280 */
281 281 struct regspec *
282 282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 283 {
284 284 return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 285 }
286 286
287 287
288 288 /*
289 289 * Note that we allow the dip to be nil because we may be called
290 290 * prior even to the instantiation of the devinfo tree itself - all
291 291 * regular leaf and nexus drivers should always use a non-nil dip!
292 292 *
293 293 * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294 294 * simply get a synchronous fault as soon as we touch a missing address.
295 295 *
296 296 * Poke is rather more carefully handled because we might poke to a write
297 297 * buffer, "succeed", then only find some time later that we got an
298 298 * asynchronous fault that indicated that the address we were writing to
299 299 * was not really backed by hardware.
300 300 */
301 301
302 302 static int
303 303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304 304 void *addr, void *value_p)
305 305 {
306 306 union {
307 307 uint64_t u64;
308 308 uint32_t u32;
309 309 uint16_t u16;
310 310 uint8_t u8;
311 311 } peekpoke_value;
312 312
313 313 peekpoke_ctlops_t peekpoke_args;
314 314 uint64_t dummy_result;
315 315 int rval;
316 316
317 317 /* Note: size is assumed to be correct; it is not checked. */
318 318 peekpoke_args.size = size;
319 319 peekpoke_args.dev_addr = (uintptr_t)addr;
320 320 peekpoke_args.handle = NULL;
321 321 peekpoke_args.repcount = 1;
322 322 peekpoke_args.flags = 0;
323 323
324 324 if (cmd == DDI_CTLOPS_POKE) {
325 325 switch (size) {
326 326 case sizeof (uint8_t):
327 327 peekpoke_value.u8 = *(uint8_t *)value_p;
328 328 break;
329 329 case sizeof (uint16_t):
330 330 peekpoke_value.u16 = *(uint16_t *)value_p;
331 331 break;
332 332 case sizeof (uint32_t):
333 333 peekpoke_value.u32 = *(uint32_t *)value_p;
334 334 break;
335 335 case sizeof (uint64_t):
336 336 peekpoke_value.u64 = *(uint64_t *)value_p;
337 337 break;
338 338 }
339 339 }
340 340
341 341 peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 342
343 343 if (devi != NULL)
344 344 rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 345 &dummy_result);
346 346 else
347 347 rval = peekpoke_mem(cmd, &peekpoke_args);
348 348
349 349 /*
350 350 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 351 */
352 352 if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 353 switch (size) {
354 354 case sizeof (uint8_t):
355 355 *(uint8_t *)value_p = peekpoke_value.u8;
356 356 break;
357 357 case sizeof (uint16_t):
358 358 *(uint16_t *)value_p = peekpoke_value.u16;
359 359 break;
360 360 case sizeof (uint32_t):
361 361 *(uint32_t *)value_p = peekpoke_value.u32;
362 362 break;
363 363 case sizeof (uint64_t):
364 364 *(uint64_t *)value_p = peekpoke_value.u64;
365 365 break;
366 366 }
367 367 }
368 368
369 369 return (rval);
370 370 }
371 371
372 372 /*
373 373 * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374 374 * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375 375 */
376 376 int
377 377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 378 {
379 379 switch (size) {
380 380 case sizeof (uint8_t):
381 381 case sizeof (uint16_t):
382 382 case sizeof (uint32_t):
383 383 case sizeof (uint64_t):
384 384 break;
385 385 default:
386 386 return (DDI_FAILURE);
387 387 }
388 388
389 389 return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 390 }
391 391
392 392 int
393 393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 394 {
395 395 switch (size) {
396 396 case sizeof (uint8_t):
397 397 case sizeof (uint16_t):
398 398 case sizeof (uint32_t):
399 399 case sizeof (uint64_t):
400 400 break;
401 401 default:
402 402 return (DDI_FAILURE);
403 403 }
404 404
405 405 return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 406 }
407 407
408 408 int
409 409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
410 410 {
411 411 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
412 412 val_p));
413 413 }
414 414
415 415 int
416 416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
417 417 {
418 418 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
419 419 val_p));
420 420 }
421 421
422 422 int
423 423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
424 424 {
425 425 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
426 426 val_p));
427 427 }
428 428
429 429 int
430 430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
431 431 {
432 432 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
433 433 val_p));
434 434 }
435 435
436 436
437 437 /*
438 438 * We need to separate the old interfaces from the new ones and leave them
439 439 * in here for a while. Previous versions of the OS defined the new interfaces
440 440 * to the old interfaces. This way we can fix things up so that we can
441 441 * eventually remove these interfaces.
442 442 * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
443 443 * or earlier will actually have a reference to ddi_peekc in the binary.
444 444 */
445 445 #ifdef _ILP32
446 446 int
447 447 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
448 448 {
449 449 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
450 450 val_p));
451 451 }
452 452
453 453 int
454 454 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
455 455 {
456 456 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
457 457 val_p));
458 458 }
459 459
460 460 int
461 461 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
462 462 {
463 463 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
464 464 val_p));
465 465 }
466 466
467 467 int
468 468 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
469 469 {
470 470 return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
471 471 val_p));
472 472 }
473 473 #endif /* _ILP32 */
474 474
475 475 int
476 476 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
477 477 {
478 478 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
479 479 }
480 480
481 481 int
482 482 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
483 483 {
484 484 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
485 485 }
486 486
487 487 int
488 488 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
489 489 {
490 490 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
491 491 }
492 492
493 493 int
494 494 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
495 495 {
496 496 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
497 497 }
498 498
499 499 /*
500 500 * We need to separate the old interfaces from the new ones and leave them
501 501 * in here for a while. Previous versions of the OS defined the new interfaces
502 502 * to the old interfaces. This way we can fix things up so that we can
503 503 * eventually remove these interfaces.
504 504 * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
505 505 * or earlier will actually have a reference to ddi_pokec in the binary.
506 506 */
507 507 #ifdef _ILP32
508 508 int
509 509 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
510 510 {
511 511 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
512 512 }
513 513
514 514 int
515 515 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
516 516 {
517 517 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
518 518 }
519 519
520 520 int
521 521 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
522 522 {
523 523 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
524 524 }
525 525
526 526 int
527 527 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
528 528 {
529 529 return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
530 530 }
531 531 #endif /* _ILP32 */
532 532
533 533 /*
534 534 * ddi_peekpokeio() is used primarily by the mem drivers for moving
535 535 * data to and from uio structures via peek and poke. Note that we
536 536 * use "internal" routines ddi_peek and ddi_poke to make this go
537 537 * slightly faster, avoiding the call overhead ..
538 538 */
539 539 int
540 540 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
541 541 caddr_t addr, size_t len, uint_t xfersize)
542 542 {
543 543 int64_t ibuffer;
544 544 int8_t w8;
545 545 size_t sz;
546 546 int o;
547 547
548 548 if (xfersize > sizeof (long))
549 549 xfersize = sizeof (long);
550 550
551 551 while (len != 0) {
552 552 if ((len | (uintptr_t)addr) & 1) {
553 553 sz = sizeof (int8_t);
554 554 if (rw == UIO_WRITE) {
555 555 if ((o = uwritec(uio)) == -1)
556 556 return (DDI_FAILURE);
557 557 if (ddi_poke8(devi, (int8_t *)addr,
558 558 (int8_t)o) != DDI_SUCCESS)
559 559 return (DDI_FAILURE);
560 560 } else {
561 561 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
562 562 (int8_t *)addr, &w8) != DDI_SUCCESS)
563 563 return (DDI_FAILURE);
564 564 if (ureadc(w8, uio))
565 565 return (DDI_FAILURE);
566 566 }
567 567 } else {
568 568 switch (xfersize) {
569 569 case sizeof (int64_t):
570 570 if (((len | (uintptr_t)addr) &
571 571 (sizeof (int64_t) - 1)) == 0) {
572 572 sz = xfersize;
573 573 break;
574 574 }
575 575 /*FALLTHROUGH*/
576 576 case sizeof (int32_t):
577 577 if (((len | (uintptr_t)addr) &
578 578 (sizeof (int32_t) - 1)) == 0) {
579 579 sz = xfersize;
580 580 break;
581 581 }
582 582 /*FALLTHROUGH*/
583 583 default:
584 584 /*
585 585 * This still assumes that we might have an
586 586 * I/O bus out there that permits 16-bit
587 587 * transfers (and that it would be upset by
588 588 * 32-bit transfers from such locations).
589 589 */
590 590 sz = sizeof (int16_t);
591 591 break;
592 592 }
593 593
594 594 if (rw == UIO_READ) {
595 595 if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
596 596 addr, &ibuffer) != DDI_SUCCESS)
597 597 return (DDI_FAILURE);
598 598 }
599 599
600 600 if (uiomove(&ibuffer, sz, rw, uio))
601 601 return (DDI_FAILURE);
602 602
603 603 if (rw == UIO_WRITE) {
604 604 if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
605 605 addr, &ibuffer) != DDI_SUCCESS)
606 606 return (DDI_FAILURE);
607 607 }
608 608 }
609 609 addr += sz;
610 610 len -= sz;
611 611 }
612 612 return (DDI_SUCCESS);
613 613 }
614 614
615 615 /*
616 616 * These routines are used by drivers that do layered ioctls
617 617 * On sparc, they're implemented in assembler to avoid spilling
618 618 * register windows in the common (copyin) case ..
619 619 */
620 620 #if !defined(__sparc)
621 621 int
622 622 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
623 623 {
624 624 if (flags & FKIOCTL)
625 625 return (kcopy(buf, kernbuf, size) ? -1 : 0);
626 626 return (copyin(buf, kernbuf, size));
627 627 }
628 628
629 629 int
630 630 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
631 631 {
632 632 if (flags & FKIOCTL)
633 633 return (kcopy(buf, kernbuf, size) ? -1 : 0);
634 634 return (copyout(buf, kernbuf, size));
635 635 }
636 636 #endif /* !__sparc */
637 637
638 638 /*
639 639 * Conversions in nexus pagesize units. We don't duplicate the
640 640 * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
641 641 * routines anyway.
642 642 */
643 643 unsigned long
644 644 ddi_btop(dev_info_t *dip, unsigned long bytes)
645 645 {
646 646 unsigned long pages;
647 647
648 648 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
649 649 return (pages);
650 650 }
651 651
652 652 unsigned long
653 653 ddi_btopr(dev_info_t *dip, unsigned long bytes)
654 654 {
655 655 unsigned long pages;
656 656
657 657 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
658 658 return (pages);
659 659 }
660 660
661 661 unsigned long
662 662 ddi_ptob(dev_info_t *dip, unsigned long pages)
663 663 {
664 664 unsigned long bytes;
665 665
666 666 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
667 667 return (bytes);
668 668 }
669 669
670 670 unsigned int
671 671 ddi_enter_critical(void)
672 672 {
673 673 return ((uint_t)spl7());
674 674 }
675 675
676 676 void
677 677 ddi_exit_critical(unsigned int spl)
678 678 {
679 679 splx((int)spl);
680 680 }
681 681
682 682 /*
683 683 * Nexus ctlops punter
684 684 */
685 685
686 686 #if !defined(__sparc)
687 687 /*
688 688 * Request bus_ctl parent to handle a bus_ctl request
689 689 *
690 690 * (The sparc version is in sparc_ddi.s)
691 691 */
692 692 int
693 693 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
694 694 {
695 695 int (*fp)();
696 696
697 697 if (!d || !r)
698 698 return (DDI_FAILURE);
699 699
700 700 if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
701 701 return (DDI_FAILURE);
702 702
↓ open down ↓ |
668 lines elided |
↑ open up ↑ |
703 703 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
704 704 return ((*fp)(d, r, op, a, v));
705 705 }
706 706
707 707 #endif
708 708
709 709 /*
710 710 * DMA/DVMA setup
711 711 */
712 712
713 -#if defined(__sparc)
714 -static ddi_dma_lim_t standard_limits = {
715 - (uint_t)0, /* addr_t dlim_addr_lo */
716 - (uint_t)-1, /* addr_t dlim_addr_hi */
717 - (uint_t)-1, /* uint_t dlim_cntr_max */
718 - (uint_t)1, /* uint_t dlim_burstsizes */
719 - (uint_t)1, /* uint_t dlim_minxfer */
720 - 0 /* uint_t dlim_dmaspeed */
721 -};
722 -#elif defined(__x86)
723 -static ddi_dma_lim_t standard_limits = {
724 - (uint_t)0, /* addr_t dlim_addr_lo */
725 - (uint_t)0xffffff, /* addr_t dlim_addr_hi */
726 - (uint_t)0, /* uint_t dlim_cntr_max */
727 - (uint_t)0x00000001, /* uint_t dlim_burstsizes */
728 - (uint_t)DMA_UNIT_8, /* uint_t dlim_minxfer */
729 - (uint_t)0, /* uint_t dlim_dmaspeed */
730 - (uint_t)0x86<<24+0, /* uint_t dlim_version */
731 - (uint_t)0xffff, /* uint_t dlim_adreg_max */
732 - (uint_t)0xffff, /* uint_t dlim_ctreg_max */
733 - (uint_t)512, /* uint_t dlim_granular */
734 - (int)1, /* int dlim_sgllen */
735 - (uint_t)0xffffffff /* uint_t dlim_reqsizes */
736 -};
737 -
738 -#endif
739 -
740 713 #if !defined(__sparc)
741 714 /*
742 715 * Request bus_dma_ctl parent to fiddle with a dma request.
743 716 *
744 717 * (The sparc version is in sparc_subr.s)
745 718 */
746 719 int
747 720 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
748 721 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
749 722 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
750 723 {
751 724 int (*fp)();
752 725
753 726 if (dip != ddi_root_node())
754 727 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
755 728 fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
756 729 return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
757 730 }
758 731 #endif
759 732
760 733 /*
761 734 * For all DMA control functions, call the DMA control
762 735 * routine and return status.
763 736 *
764 737 * Just plain assume that the parent is to be called.
765 738 * If a nexus driver or a thread outside the framework
766 739 * of a nexus driver or a leaf driver calls these functions,
767 740 * it is up to them to deal with the fact that the parent's
768 741 * bus_dma_ctl function will be the first one called.
769 742 */
770 743
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
771 744 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip
772 745
773 746 /*
774 747 * This routine is left in place to satisfy link dependencies
775 748 * for any 3rd party nexus drivers that rely on it. It is never
776 749 * called, though.
777 750 */
778 751 /*ARGSUSED*/
779 752 int
780 753 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
781 - struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
754 + struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
782 755 {
783 756 return (DDI_FAILURE);
784 757 }
785 758
786 759 #if !defined(__sparc)
787 760
788 761 /*
789 762 * The SPARC versions of these routines are done in assembler to
790 763 * save register windows, so they're in sparc_subr.s.
791 764 */
792 765
793 766 int
794 767 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
795 768 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
796 769 {
797 770 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
798 771 int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
799 772
800 773 if (dip != ddi_root_node())
801 774 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
802 775
803 776 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
804 777 return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
805 778 }
806 779
807 780 int
808 781 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
809 782 {
810 783 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
811 784
812 785 if (dip != ddi_root_node())
813 786 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
814 787
815 788 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
816 789 return ((*funcp)(dip, rdip, handlep));
817 790 }
818 791
819 792 int
820 793 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
821 794 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
822 795 ddi_dma_cookie_t *cp, uint_t *ccountp)
823 796 {
824 797 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
825 798 struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
826 799
827 800 if (dip != ddi_root_node())
828 801 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
829 802
830 803 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
831 804 return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
832 805 }
833 806
834 807 int
835 808 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
836 809 ddi_dma_handle_t handle)
837 810 {
838 811 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
839 812
840 813 if (dip != ddi_root_node())
841 814 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
842 815
843 816 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
844 817 return ((*funcp)(dip, rdip, handle));
845 818 }
846 819
847 820
848 821 int
849 822 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
850 823 ddi_dma_handle_t handle, off_t off, size_t len,
851 824 uint_t cache_flags)
852 825 {
853 826 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
854 827 off_t, size_t, uint_t);
855 828
856 829 if (dip != ddi_root_node())
857 830 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
858 831
859 832 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
860 833 return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
861 834 }
862 835
863 836 int
864 837 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
865 838 ddi_dma_handle_t handle, uint_t win, off_t *offp,
866 839 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
867 840 {
868 841 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
869 842 uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
870 843
871 844 if (dip != ddi_root_node())
872 845 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
873 846
874 847 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
875 848 return ((*funcp)(dip, rdip, handle, win, offp, lenp,
876 849 cookiep, ccountp));
877 850 }
878 851
879 852 int
880 853 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
881 854 {
882 855 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
883 856 dev_info_t *dip, *rdip;
884 857 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
885 858 size_t, uint_t);
886 859
887 860 /*
888 861 * the DMA nexus driver will set DMP_NOSYNC if the
889 862 * platform does not require any sync operation. For
890 863 * example if the memory is uncached or consistent
891 864 * and without any I/O write buffers involved.
892 865 */
893 866 if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
894 867 return (DDI_SUCCESS);
895 868
896 869 dip = rdip = hp->dmai_rdip;
897 870 if (dip != ddi_root_node())
898 871 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
899 872 funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
900 873 return ((*funcp)(dip, rdip, h, o, l, whom));
901 874 }
902 875
903 876 int
904 877 ddi_dma_unbind_handle(ddi_dma_handle_t h)
905 878 {
906 879 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
907 880 dev_info_t *dip, *rdip;
908 881 int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
909 882
910 883 dip = rdip = hp->dmai_rdip;
911 884 if (dip != ddi_root_node())
912 885 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
913 886 funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
914 887 return ((*funcp)(dip, rdip, h));
915 888 }
916 889
917 890 #endif /* !__sparc */
918 891
919 892 /*
920 893 * DMA burst sizes, and transfer minimums
921 894 */
922 895
923 896 int
↓ open down ↓ |
132 lines elided |
↑ open up ↑ |
924 897 ddi_dma_burstsizes(ddi_dma_handle_t handle)
925 898 {
926 899 ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
927 900
928 901 if (!dimp)
929 902 return (0);
930 903 else
931 904 return (dimp->dmai_burstsizes);
932 905 }
933 906
934 -int
935 -ddi_iomin(dev_info_t *a, int i, int stream)
936 -{
937 - int r;
938 -
939 - /*
940 - * Make sure that the initial value is sane
941 - */
942 - if (i & (i - 1))
943 - return (0);
944 - if (i == 0)
945 - i = (stream) ? 4 : 1;
946 -
947 - r = ddi_ctlops(a, a,
948 - DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
949 - if (r != DDI_SUCCESS || (i & (i - 1)))
950 - return (0);
951 - return (i);
952 -}
953 -
954 907 /*
955 908 * Given two DMA attribute structures, apply the attributes
956 909 * of one to the other, following the rules of attributes
957 910 * and the wishes of the caller.
958 911 *
959 912 * The rules of DMA attribute structures are that you cannot
960 913 * make things *less* restrictive as you apply one set
961 914 * of attributes to another.
962 915 *
963 916 */
964 917 void
965 918 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
966 919 {
967 920 attr->dma_attr_addr_lo =
968 921 MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
969 922 attr->dma_attr_addr_hi =
970 923 MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
971 924 attr->dma_attr_count_max =
972 925 MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
973 926 attr->dma_attr_align =
974 927 MAX(attr->dma_attr_align, mod->dma_attr_align);
975 928 attr->dma_attr_burstsizes =
976 929 (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
977 930 attr->dma_attr_minxfer =
978 931 maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
979 932 attr->dma_attr_maxxfer =
980 933 MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
981 934 attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
982 935 attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
983 936 (uint_t)mod->dma_attr_sgllen);
984 937 attr->dma_attr_granular =
985 938 MAX(attr->dma_attr_granular, mod->dma_attr_granular);
986 939 }
987 940
988 941 /*
989 942 * mmap/segmap interface:
990 943 */
991 944
992 945 /*
993 946 * ddi_segmap: setup the default segment driver. Calls the drivers
994 947 * XXmmap routine to validate the range to be mapped.
995 948 * Return ENXIO of the range is not valid. Create
996 949 * a seg_dev segment that contains all of the
997 950 * necessary information and will reference the
998 951 * default segment driver routines. It returns zero
999 952 * on success or non-zero on failure.
1000 953 */
1001 954 int
1002 955 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1003 956 uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1004 957 {
1005 958 extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1006 959 off_t, uint_t, uint_t, uint_t, struct cred *);
1007 960
1008 961 return (spec_segmap(dev, offset, asp, addrp, len,
1009 962 prot, maxprot, flags, credp));
1010 963 }
1011 964
1012 965 /*
1013 966 * ddi_map_fault: Resolve mappings at fault time. Used by segment
1014 967 * drivers. Allows each successive parent to resolve
1015 968 * address translations and add its mappings to the
1016 969 * mapping list supplied in the page structure. It
1017 970 * returns zero on success or non-zero on failure.
1018 971 */
1019 972
1020 973 int
1021 974 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1022 975 caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1023 976 {
1024 977 return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1025 978 }
1026 979
1027 980 /*
1028 981 * ddi_device_mapping_check: Called from ddi_segmap_setup.
1029 982 * Invokes platform specific DDI to determine whether attributes specified
1030 983 * in attr(9s) are valid for the region of memory that will be made
1031 984 * available for direct access to user process via the mmap(2) system call.
1032 985 */
1033 986 int
1034 987 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1035 988 uint_t rnumber, uint_t *hat_flags)
1036 989 {
1037 990 ddi_acc_handle_t handle;
1038 991 ddi_map_req_t mr;
1039 992 ddi_acc_hdl_t *hp;
1040 993 int result;
1041 994 dev_info_t *dip;
1042 995
1043 996 /*
1044 997 * we use e_ddi_hold_devi_by_dev to search for the devi. We
1045 998 * release it immediately since it should already be held by
1046 999 * a devfs vnode.
1047 1000 */
1048 1001 if ((dip =
1049 1002 e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1050 1003 return (-1);
1051 1004 ddi_release_devi(dip); /* for e_ddi_hold_devi_by_dev() */
1052 1005
1053 1006 /*
1054 1007 * Allocate and initialize the common elements of data
1055 1008 * access handle.
1056 1009 */
1057 1010 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1058 1011 if (handle == NULL)
1059 1012 return (-1);
1060 1013
1061 1014 hp = impl_acc_hdl_get(handle);
1062 1015 hp->ah_vers = VERS_ACCHDL;
1063 1016 hp->ah_dip = dip;
1064 1017 hp->ah_rnumber = rnumber;
1065 1018 hp->ah_offset = 0;
1066 1019 hp->ah_len = 0;
1067 1020 hp->ah_acc = *accattrp;
1068 1021
1069 1022 /*
1070 1023 * Set up the mapping request and call to parent.
1071 1024 */
1072 1025 mr.map_op = DDI_MO_MAP_HANDLE;
1073 1026 mr.map_type = DDI_MT_RNUMBER;
1074 1027 mr.map_obj.rnumber = rnumber;
1075 1028 mr.map_prot = PROT_READ | PROT_WRITE;
1076 1029 mr.map_flags = DDI_MF_KERNEL_MAPPING;
1077 1030 mr.map_handlep = hp;
1078 1031 mr.map_vers = DDI_MAP_VERSION;
1079 1032 result = ddi_map(dip, &mr, 0, 0, NULL);
1080 1033
1081 1034 /*
1082 1035 * Region must be mappable, pick up flags from the framework.
1083 1036 */
1084 1037 *hat_flags = hp->ah_hat_flags;
1085 1038
1086 1039 impl_acc_hdl_free(handle);
1087 1040
1088 1041 /*
1089 1042 * check for end result.
1090 1043 */
1091 1044 if (result != DDI_SUCCESS)
1092 1045 return (-1);
1093 1046 return (0);
1094 1047 }
1095 1048
1096 1049
1097 1050 /*
1098 1051 * Property functions: See also, ddipropdefs.h.
1099 1052 *
1100 1053 * These functions are the framework for the property functions,
1101 1054 * i.e. they support software defined properties. All implementation
1102 1055 * specific property handling (i.e.: self-identifying devices and
1103 1056 * PROM defined properties are handled in the implementation specific
1104 1057 * functions (defined in ddi_implfuncs.h).
1105 1058 */
1106 1059
1107 1060 /*
1108 1061 * nopropop: Shouldn't be called, right?
1109 1062 */
1110 1063 int
1111 1064 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1112 1065 char *name, caddr_t valuep, int *lengthp)
1113 1066 {
1114 1067 _NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1115 1068 return (DDI_PROP_NOT_FOUND);
1116 1069 }
1117 1070
1118 1071 #ifdef DDI_PROP_DEBUG
1119 1072 int ddi_prop_debug_flag = 0;
1120 1073
1121 1074 int
1122 1075 ddi_prop_debug(int enable)
1123 1076 {
1124 1077 int prev = ddi_prop_debug_flag;
1125 1078
1126 1079 if ((enable != 0) || (prev != 0))
1127 1080 printf("ddi_prop_debug: debugging %s\n",
1128 1081 enable ? "enabled" : "disabled");
1129 1082 ddi_prop_debug_flag = enable;
1130 1083 return (prev);
1131 1084 }
1132 1085
1133 1086 #endif /* DDI_PROP_DEBUG */
1134 1087
1135 1088 /*
1136 1089 * Search a property list for a match, if found return pointer
1137 1090 * to matching prop struct, else return NULL.
1138 1091 */
1139 1092
1140 1093 ddi_prop_t *
1141 1094 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1142 1095 {
1143 1096 ddi_prop_t *propp;
1144 1097
1145 1098 /*
1146 1099 * find the property in child's devinfo:
1147 1100 * Search order defined by this search function is first matching
1148 1101 * property with input dev == DDI_DEV_T_ANY matching any dev or
1149 1102 * dev == propp->prop_dev, name == propp->name, and the correct
1150 1103 * data type as specified in the flags. If a DDI_DEV_T_NONE dev
1151 1104 * value made it this far then it implies a DDI_DEV_T_ANY search.
1152 1105 */
1153 1106 if (dev == DDI_DEV_T_NONE)
1154 1107 dev = DDI_DEV_T_ANY;
1155 1108
1156 1109 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
1157 1110
1158 1111 if (!DDI_STRSAME(propp->prop_name, name))
1159 1112 continue;
1160 1113
1161 1114 if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1162 1115 continue;
1163 1116
1164 1117 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1165 1118 continue;
1166 1119
1167 1120 return (propp);
1168 1121 }
1169 1122
1170 1123 return ((ddi_prop_t *)0);
1171 1124 }
1172 1125
1173 1126 /*
1174 1127 * Search for property within devnames structures
1175 1128 */
1176 1129 ddi_prop_t *
1177 1130 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1178 1131 {
1179 1132 major_t major;
1180 1133 struct devnames *dnp;
1181 1134 ddi_prop_t *propp;
1182 1135
1183 1136 /*
1184 1137 * Valid dev_t value is needed to index into the
1185 1138 * correct devnames entry, therefore a dev_t
1186 1139 * value of DDI_DEV_T_ANY is not appropriate.
1187 1140 */
1188 1141 ASSERT(dev != DDI_DEV_T_ANY);
1189 1142 if (dev == DDI_DEV_T_ANY) {
1190 1143 return ((ddi_prop_t *)0);
1191 1144 }
1192 1145
1193 1146 major = getmajor(dev);
1194 1147 dnp = &(devnamesp[major]);
1195 1148
1196 1149 if (dnp->dn_global_prop_ptr == NULL)
1197 1150 return ((ddi_prop_t *)0);
1198 1151
1199 1152 LOCK_DEV_OPS(&dnp->dn_lock);
1200 1153
1201 1154 for (propp = dnp->dn_global_prop_ptr->prop_list;
1202 1155 propp != NULL;
1203 1156 propp = (ddi_prop_t *)propp->prop_next) {
1204 1157
1205 1158 if (!DDI_STRSAME(propp->prop_name, name))
1206 1159 continue;
1207 1160
1208 1161 if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1209 1162 (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1210 1163 continue;
1211 1164
1212 1165 if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1213 1166 continue;
1214 1167
1215 1168 /* Property found, return it */
1216 1169 UNLOCK_DEV_OPS(&dnp->dn_lock);
1217 1170 return (propp);
1218 1171 }
1219 1172
1220 1173 UNLOCK_DEV_OPS(&dnp->dn_lock);
1221 1174 return ((ddi_prop_t *)0);
1222 1175 }
1223 1176
1224 1177 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1225 1178
1226 1179 /*
1227 1180 * ddi_prop_search_global:
1228 1181 * Search the global property list within devnames
1229 1182 * for the named property. Return the encoded value.
1230 1183 */
1231 1184 static int
1232 1185 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1233 1186 void *valuep, uint_t *lengthp)
1234 1187 {
1235 1188 ddi_prop_t *propp;
1236 1189 caddr_t buffer;
1237 1190
1238 1191 propp = i_ddi_search_global_prop(dev, name, flags);
1239 1192
1240 1193 /* Property NOT found, bail */
1241 1194 if (propp == (ddi_prop_t *)0)
1242 1195 return (DDI_PROP_NOT_FOUND);
1243 1196
1244 1197 if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1245 1198 return (DDI_PROP_UNDEFINED);
1246 1199
1247 1200 if ((buffer = kmem_alloc(propp->prop_len,
1248 1201 (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1249 1202 cmn_err(CE_CONT, prop_no_mem_msg, name);
1250 1203 return (DDI_PROP_NO_MEMORY);
1251 1204 }
1252 1205
1253 1206 /*
1254 1207 * Return the encoded data
1255 1208 */
1256 1209 *(caddr_t *)valuep = buffer;
1257 1210 *lengthp = propp->prop_len;
1258 1211 bcopy(propp->prop_val, buffer, propp->prop_len);
1259 1212
1260 1213 return (DDI_PROP_SUCCESS);
1261 1214 }
1262 1215
1263 1216 /*
1264 1217 * ddi_prop_search_common: Lookup and return the encoded value
1265 1218 */
1266 1219 int
1267 1220 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1268 1221 uint_t flags, char *name, void *valuep, uint_t *lengthp)
1269 1222 {
1270 1223 ddi_prop_t *propp;
1271 1224 int i;
1272 1225 caddr_t buffer;
1273 1226 caddr_t prealloc = NULL;
1274 1227 int plength = 0;
1275 1228 dev_info_t *pdip;
1276 1229 int (*bop)();
1277 1230
1278 1231 /*CONSTANTCONDITION*/
1279 1232 while (1) {
1280 1233
1281 1234 mutex_enter(&(DEVI(dip)->devi_lock));
1282 1235
1283 1236
1284 1237 /*
1285 1238 * find the property in child's devinfo:
1286 1239 * Search order is:
1287 1240 * 1. driver defined properties
1288 1241 * 2. system defined properties
1289 1242 * 3. driver global properties
1290 1243 * 4. boot defined properties
1291 1244 */
1292 1245
1293 1246 propp = i_ddi_prop_search(dev, name, flags,
1294 1247 &(DEVI(dip)->devi_drv_prop_ptr));
1295 1248 if (propp == NULL) {
1296 1249 propp = i_ddi_prop_search(dev, name, flags,
1297 1250 &(DEVI(dip)->devi_sys_prop_ptr));
1298 1251 }
1299 1252 if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1300 1253 propp = i_ddi_prop_search(dev, name, flags,
1301 1254 &DEVI(dip)->devi_global_prop_list->prop_list);
1302 1255 }
1303 1256
1304 1257 if (propp == NULL) {
1305 1258 propp = i_ddi_prop_search(dev, name, flags,
1306 1259 &(DEVI(dip)->devi_hw_prop_ptr));
1307 1260 }
1308 1261
1309 1262 /*
1310 1263 * Software property found?
1311 1264 */
1312 1265 if (propp != (ddi_prop_t *)0) {
1313 1266
1314 1267 /*
1315 1268 * If explicit undefine, return now.
1316 1269 */
1317 1270 if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1318 1271 mutex_exit(&(DEVI(dip)->devi_lock));
1319 1272 if (prealloc)
1320 1273 kmem_free(prealloc, plength);
1321 1274 return (DDI_PROP_UNDEFINED);
1322 1275 }
1323 1276
1324 1277 /*
1325 1278 * If we only want to know if it exists, return now
1326 1279 */
1327 1280 if (prop_op == PROP_EXISTS) {
1328 1281 mutex_exit(&(DEVI(dip)->devi_lock));
1329 1282 ASSERT(prealloc == NULL);
1330 1283 return (DDI_PROP_SUCCESS);
1331 1284 }
1332 1285
1333 1286 /*
1334 1287 * If length only request or prop length == 0,
1335 1288 * service request and return now.
1336 1289 */
1337 1290 if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1338 1291 *lengthp = propp->prop_len;
1339 1292
1340 1293 /*
1341 1294 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1342 1295 * that means prop_len is 0, so set valuep
1343 1296 * also to NULL
1344 1297 */
1345 1298 if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1346 1299 *(caddr_t *)valuep = NULL;
1347 1300
1348 1301 mutex_exit(&(DEVI(dip)->devi_lock));
1349 1302 if (prealloc)
1350 1303 kmem_free(prealloc, plength);
1351 1304 return (DDI_PROP_SUCCESS);
1352 1305 }
1353 1306
1354 1307 /*
1355 1308 * If LEN_AND_VAL_ALLOC and the request can sleep,
1356 1309 * drop the mutex, allocate the buffer, and go
1357 1310 * through the loop again. If we already allocated
1358 1311 * the buffer, and the size of the property changed,
1359 1312 * keep trying...
1360 1313 */
1361 1314 if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1362 1315 (flags & DDI_PROP_CANSLEEP)) {
1363 1316 if (prealloc && (propp->prop_len != plength)) {
1364 1317 kmem_free(prealloc, plength);
1365 1318 prealloc = NULL;
1366 1319 }
1367 1320 if (prealloc == NULL) {
1368 1321 plength = propp->prop_len;
1369 1322 mutex_exit(&(DEVI(dip)->devi_lock));
1370 1323 prealloc = kmem_alloc(plength,
1371 1324 KM_SLEEP);
1372 1325 continue;
1373 1326 }
1374 1327 }
1375 1328
1376 1329 /*
1377 1330 * Allocate buffer, if required. Either way,
1378 1331 * set `buffer' variable.
1379 1332 */
1380 1333 i = *lengthp; /* Get callers length */
1381 1334 *lengthp = propp->prop_len; /* Set callers length */
1382 1335
1383 1336 switch (prop_op) {
1384 1337
1385 1338 case PROP_LEN_AND_VAL_ALLOC:
1386 1339
1387 1340 if (prealloc == NULL) {
1388 1341 buffer = kmem_alloc(propp->prop_len,
1389 1342 KM_NOSLEEP);
1390 1343 } else {
1391 1344 buffer = prealloc;
1392 1345 }
1393 1346
1394 1347 if (buffer == NULL) {
1395 1348 mutex_exit(&(DEVI(dip)->devi_lock));
1396 1349 cmn_err(CE_CONT, prop_no_mem_msg, name);
1397 1350 return (DDI_PROP_NO_MEMORY);
1398 1351 }
1399 1352 /* Set callers buf ptr */
1400 1353 *(caddr_t *)valuep = buffer;
1401 1354 break;
1402 1355
1403 1356 case PROP_LEN_AND_VAL_BUF:
1404 1357
1405 1358 if (propp->prop_len > (i)) {
1406 1359 mutex_exit(&(DEVI(dip)->devi_lock));
1407 1360 return (DDI_PROP_BUF_TOO_SMALL);
1408 1361 }
1409 1362
1410 1363 buffer = valuep; /* Get callers buf ptr */
1411 1364 break;
1412 1365
1413 1366 default:
1414 1367 break;
1415 1368 }
1416 1369
1417 1370 /*
1418 1371 * Do the copy.
1419 1372 */
1420 1373 bcopy(propp->prop_val, buffer, propp->prop_len);
1421 1374 mutex_exit(&(DEVI(dip)->devi_lock));
1422 1375 return (DDI_PROP_SUCCESS);
1423 1376 }
1424 1377
1425 1378 mutex_exit(&(DEVI(dip)->devi_lock));
1426 1379 if (prealloc)
1427 1380 kmem_free(prealloc, plength);
1428 1381 prealloc = NULL;
1429 1382
1430 1383 /*
1431 1384 * Prop not found, call parent bus_ops to deal with possible
1432 1385 * h/w layer (possible PROM defined props, etc.) and to
1433 1386 * possibly ascend the hierarchy, if allowed by flags.
1434 1387 */
1435 1388 pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1436 1389
1437 1390 /*
1438 1391 * One last call for the root driver PROM props?
1439 1392 */
1440 1393 if (dip == ddi_root_node()) {
1441 1394 return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1442 1395 flags, name, valuep, (int *)lengthp));
1443 1396 }
1444 1397
1445 1398 /*
1446 1399 * We may have been called to check for properties
1447 1400 * within a single devinfo node that has no parent -
1448 1401 * see make_prop()
1449 1402 */
1450 1403 if (pdip == NULL) {
1451 1404 ASSERT((flags &
1452 1405 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1453 1406 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1454 1407 return (DDI_PROP_NOT_FOUND);
1455 1408 }
1456 1409
1457 1410 /*
1458 1411 * Instead of recursing, we do iterative calls up the tree.
1459 1412 * As a bit of optimization, skip the bus_op level if the
1460 1413 * node is a s/w node and if the parent's bus_prop_op function
1461 1414 * is `ddi_bus_prop_op', because we know that in this case,
1462 1415 * this function does nothing.
1463 1416 *
1464 1417 * 4225415: If the parent isn't attached, or the child
1465 1418 * hasn't been named by the parent yet, use the default
1466 1419 * ddi_bus_prop_op as a proxy for the parent. This
1467 1420 * allows property lookups in any child/parent state to
1468 1421 * include 'prom' and inherited properties, even when
1469 1422 * there are no drivers attached to the child or parent.
1470 1423 */
1471 1424
1472 1425 bop = ddi_bus_prop_op;
1473 1426 if (i_ddi_devi_attached(pdip) &&
1474 1427 (i_ddi_node_state(dip) >= DS_INITIALIZED))
1475 1428 bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1476 1429
1477 1430 i = DDI_PROP_NOT_FOUND;
1478 1431
1479 1432 if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1480 1433 i = (*bop)(dev, pdip, dip, prop_op,
1481 1434 flags | DDI_PROP_DONTPASS,
1482 1435 name, valuep, lengthp);
1483 1436 }
1484 1437
1485 1438 if ((flags & DDI_PROP_DONTPASS) ||
1486 1439 (i != DDI_PROP_NOT_FOUND))
1487 1440 return (i);
1488 1441
1489 1442 dip = pdip;
1490 1443 }
1491 1444 /*NOTREACHED*/
1492 1445 }
1493 1446
1494 1447
1495 1448 /*
1496 1449 * ddi_prop_op: The basic property operator for drivers.
1497 1450 *
1498 1451 * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1499 1452 *
1500 1453 * prop_op valuep
1501 1454 * ------ ------
1502 1455 *
1503 1456 * PROP_LEN <unused>
1504 1457 *
1505 1458 * PROP_LEN_AND_VAL_BUF Pointer to callers buffer
1506 1459 *
1507 1460 * PROP_LEN_AND_VAL_ALLOC Address of callers pointer (will be set to
1508 1461 * address of allocated buffer, if successful)
1509 1462 */
1510 1463 int
1511 1464 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1512 1465 char *name, caddr_t valuep, int *lengthp)
1513 1466 {
1514 1467 int i;
1515 1468
1516 1469 ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1517 1470
1518 1471 /*
1519 1472 * If this was originally an LDI prop lookup then we bail here.
1520 1473 * The reason is that the LDI property lookup interfaces first call
1521 1474 * a drivers prop_op() entry point to allow it to override
1522 1475 * properties. But if we've made it here, then the driver hasn't
1523 1476 * overriden any properties. We don't want to continue with the
1524 1477 * property search here because we don't have any type inforamtion.
1525 1478 * When we return failure, the LDI interfaces will then proceed to
1526 1479 * call the typed property interfaces to look up the property.
1527 1480 */
1528 1481 if (mod_flags & DDI_PROP_DYNAMIC)
1529 1482 return (DDI_PROP_NOT_FOUND);
1530 1483
1531 1484 /*
1532 1485 * check for pre-typed property consumer asking for typed property:
1533 1486 * see e_ddi_getprop_int64.
1534 1487 */
1535 1488 if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1536 1489 mod_flags |= DDI_PROP_TYPE_INT64;
1537 1490 mod_flags |= DDI_PROP_TYPE_ANY;
1538 1491
1539 1492 i = ddi_prop_search_common(dev, dip, prop_op,
1540 1493 mod_flags, name, valuep, (uint_t *)lengthp);
1541 1494 if (i == DDI_PROP_FOUND_1275)
1542 1495 return (DDI_PROP_SUCCESS);
1543 1496 return (i);
1544 1497 }
1545 1498
1546 1499 /*
1547 1500 * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1548 1501 * maintain size in number of blksize blocks. Provides a dynamic property
1549 1502 * implementation for size oriented properties based on nblocks64 and blksize
1550 1503 * values passed in by the driver. Fallback to ddi_prop_op if the nblocks64
1551 1504 * is too large. This interface should not be used with a nblocks64 that
1552 1505 * represents the driver's idea of how to represent unknown, if nblocks is
1553 1506 * unknown use ddi_prop_op.
1554 1507 */
1555 1508 int
1556 1509 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1557 1510 int mod_flags, char *name, caddr_t valuep, int *lengthp,
1558 1511 uint64_t nblocks64, uint_t blksize)
1559 1512 {
1560 1513 uint64_t size64;
1561 1514 int blkshift;
1562 1515
1563 1516 /* convert block size to shift value */
1564 1517 ASSERT(BIT_ONLYONESET(blksize));
1565 1518 blkshift = highbit(blksize) - 1;
1566 1519
1567 1520 /*
1568 1521 * There is no point in supporting nblocks64 values that don't have
1569 1522 * an accurate uint64_t byte count representation.
1570 1523 */
1571 1524 if (nblocks64 >= (UINT64_MAX >> blkshift))
1572 1525 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1573 1526 name, valuep, lengthp));
1574 1527
1575 1528 size64 = nblocks64 << blkshift;
1576 1529 return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1577 1530 name, valuep, lengthp, size64, blksize));
1578 1531 }
1579 1532
1580 1533 /*
1581 1534 * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1582 1535 */
1583 1536 int
1584 1537 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1585 1538 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1586 1539 {
1587 1540 return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1588 1541 mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1589 1542 }
1590 1543
1591 1544 /*
1592 1545 * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1593 1546 * maintain size in bytes. Provides a of dynamic property implementation for
1594 1547 * size oriented properties based on size64 value and blksize passed in by the
1595 1548 * driver. Fallback to ddi_prop_op if the size64 is too large. This interface
1596 1549 * should not be used with a size64 that represents the driver's idea of how
1597 1550 * to represent unknown, if size is unknown use ddi_prop_op.
1598 1551 *
1599 1552 * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1600 1553 * integers. While the most likely interface to request them ([bc]devi_size)
1601 1554 * is declared int (signed) there is no enforcement of this, which means we
1602 1555 * can't enforce limitations here without risking regression.
1603 1556 */
1604 1557 int
1605 1558 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1606 1559 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1607 1560 uint_t blksize)
1608 1561 {
1609 1562 uint64_t nblocks64;
1610 1563 int callers_length;
1611 1564 caddr_t buffer;
1612 1565 int blkshift;
1613 1566
1614 1567 /*
1615 1568 * This is a kludge to support capture of size(9P) pure dynamic
1616 1569 * properties in snapshots for non-cmlb code (without exposing
1617 1570 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1618 1571 * should be removed.
1619 1572 */
1620 1573 if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1621 1574 static i_ddi_prop_dyn_t prop_dyn_size[] = {
1622 1575 {"Size", DDI_PROP_TYPE_INT64, S_IFCHR},
1623 1576 {"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK},
1624 1577 {NULL}
1625 1578 };
1626 1579 i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1627 1580 }
1628 1581
1629 1582 /* convert block size to shift value */
1630 1583 ASSERT(BIT_ONLYONESET(blksize));
1631 1584 blkshift = highbit(blksize) - 1;
1632 1585
1633 1586 /* compute DEV_BSIZE nblocks value */
1634 1587 nblocks64 = size64 >> blkshift;
1635 1588
1636 1589 /* get callers length, establish length of our dynamic properties */
1637 1590 callers_length = *lengthp;
1638 1591
1639 1592 if (strcmp(name, "Nblocks") == 0)
1640 1593 *lengthp = sizeof (uint64_t);
1641 1594 else if (strcmp(name, "Size") == 0)
1642 1595 *lengthp = sizeof (uint64_t);
1643 1596 else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1644 1597 *lengthp = sizeof (uint32_t);
1645 1598 else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1646 1599 *lengthp = sizeof (uint32_t);
1647 1600 else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1648 1601 *lengthp = sizeof (uint32_t);
1649 1602 else {
1650 1603 /* fallback to ddi_prop_op */
1651 1604 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1652 1605 name, valuep, lengthp));
1653 1606 }
1654 1607
1655 1608 /* service request for the length of the property */
1656 1609 if (prop_op == PROP_LEN)
1657 1610 return (DDI_PROP_SUCCESS);
1658 1611
1659 1612 switch (prop_op) {
1660 1613 case PROP_LEN_AND_VAL_ALLOC:
1661 1614 if ((buffer = kmem_alloc(*lengthp,
1662 1615 (mod_flags & DDI_PROP_CANSLEEP) ?
1663 1616 KM_SLEEP : KM_NOSLEEP)) == NULL)
1664 1617 return (DDI_PROP_NO_MEMORY);
1665 1618
1666 1619 *(caddr_t *)valuep = buffer; /* set callers buf ptr */
1667 1620 break;
1668 1621
1669 1622 case PROP_LEN_AND_VAL_BUF:
1670 1623 /* the length of the property and the request must match */
1671 1624 if (callers_length != *lengthp)
1672 1625 return (DDI_PROP_INVAL_ARG);
1673 1626
1674 1627 buffer = valuep; /* get callers buf ptr */
1675 1628 break;
1676 1629
1677 1630 default:
1678 1631 return (DDI_PROP_INVAL_ARG);
1679 1632 }
1680 1633
1681 1634 /* transfer the value into the buffer */
1682 1635 if (strcmp(name, "Nblocks") == 0)
1683 1636 *((uint64_t *)buffer) = nblocks64;
1684 1637 else if (strcmp(name, "Size") == 0)
1685 1638 *((uint64_t *)buffer) = size64;
1686 1639 else if (strcmp(name, "nblocks") == 0)
1687 1640 *((uint32_t *)buffer) = (uint32_t)nblocks64;
1688 1641 else if (strcmp(name, "size") == 0)
1689 1642 *((uint32_t *)buffer) = (uint32_t)size64;
1690 1643 else if (strcmp(name, "blksize") == 0)
1691 1644 *((uint32_t *)buffer) = (uint32_t)blksize;
1692 1645 return (DDI_PROP_SUCCESS);
1693 1646 }
1694 1647
1695 1648 /*
1696 1649 * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1697 1650 */
1698 1651 int
1699 1652 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1700 1653 int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1701 1654 {
1702 1655 return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1703 1656 mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1704 1657 }
1705 1658
1706 1659 /*
1707 1660 * Variable length props...
1708 1661 */
1709 1662
1710 1663 /*
1711 1664 * ddi_getlongprop: Get variable length property len+val into a buffer
1712 1665 * allocated by property provider via kmem_alloc. Requester
1713 1666 * is responsible for freeing returned property via kmem_free.
1714 1667 *
1715 1668 * Arguments:
1716 1669 *
1717 1670 * dev_t: Input: dev_t of property.
1718 1671 * dip: Input: dev_info_t pointer of child.
1719 1672 * flags: Input: Possible flag modifiers are:
1720 1673 * DDI_PROP_DONTPASS: Don't pass to parent if prop not found.
1721 1674 * DDI_PROP_CANSLEEP: Memory allocation may sleep.
1722 1675 * name: Input: name of property.
1723 1676 * valuep: Output: Addr of callers buffer pointer.
1724 1677 * lengthp:Output: *lengthp will contain prop length on exit.
1725 1678 *
1726 1679 * Possible Returns:
1727 1680 *
1728 1681 * DDI_PROP_SUCCESS: Prop found and returned.
1729 1682 * DDI_PROP_NOT_FOUND: Prop not found
1730 1683 * DDI_PROP_UNDEFINED: Prop explicitly undefined.
1731 1684 * DDI_PROP_NO_MEMORY: Prop found, but unable to alloc mem.
1732 1685 */
1733 1686
1734 1687 int
1735 1688 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1736 1689 char *name, caddr_t valuep, int *lengthp)
1737 1690 {
1738 1691 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1739 1692 flags, name, valuep, lengthp));
1740 1693 }
1741 1694
1742 1695 /*
1743 1696 *
1744 1697 * ddi_getlongprop_buf: Get long prop into pre-allocated callers
1745 1698 * buffer. (no memory allocation by provider).
1746 1699 *
1747 1700 * dev_t: Input: dev_t of property.
1748 1701 * dip: Input: dev_info_t pointer of child.
1749 1702 * flags: Input: DDI_PROP_DONTPASS or NULL
1750 1703 * name: Input: name of property
1751 1704 * valuep: Input: ptr to callers buffer.
1752 1705 * lengthp:I/O: ptr to length of callers buffer on entry,
1753 1706 * actual length of property on exit.
1754 1707 *
1755 1708 * Possible returns:
1756 1709 *
1757 1710 * DDI_PROP_SUCCESS Prop found and returned
1758 1711 * DDI_PROP_NOT_FOUND Prop not found
1759 1712 * DDI_PROP_UNDEFINED Prop explicitly undefined.
1760 1713 * DDI_PROP_BUF_TOO_SMALL Prop found, callers buf too small,
1761 1714 * no value returned, but actual prop
1762 1715 * length returned in *lengthp
1763 1716 *
1764 1717 */
1765 1718
1766 1719 int
1767 1720 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1768 1721 char *name, caddr_t valuep, int *lengthp)
1769 1722 {
1770 1723 return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1771 1724 flags, name, valuep, lengthp));
1772 1725 }
1773 1726
1774 1727 /*
1775 1728 * Integer/boolean sized props.
1776 1729 *
1777 1730 * Call is value only... returns found boolean or int sized prop value or
1778 1731 * defvalue if prop not found or is wrong length or is explicitly undefined.
1779 1732 * Only flag is DDI_PROP_DONTPASS...
1780 1733 *
1781 1734 * By convention, this interface returns boolean (0) sized properties
1782 1735 * as value (int)1.
1783 1736 *
1784 1737 * This never returns an error, if property not found or specifically
1785 1738 * undefined, the input `defvalue' is returned.
1786 1739 */
1787 1740
1788 1741 int
1789 1742 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1790 1743 {
1791 1744 int propvalue = defvalue;
1792 1745 int proplength = sizeof (int);
1793 1746 int error;
1794 1747
1795 1748 error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1796 1749 flags, name, (caddr_t)&propvalue, &proplength);
1797 1750
1798 1751 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1799 1752 propvalue = 1;
1800 1753
1801 1754 return (propvalue);
1802 1755 }
1803 1756
1804 1757 /*
1805 1758 * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1806 1759 * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1807 1760 */
1808 1761
1809 1762 int
1810 1763 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1811 1764 {
1812 1765 return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1813 1766 }
1814 1767
1815 1768 /*
1816 1769 * Allocate a struct prop_driver_data, along with 'size' bytes
1817 1770 * for decoded property data. This structure is freed by
1818 1771 * calling ddi_prop_free(9F).
1819 1772 */
1820 1773 static void *
1821 1774 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1822 1775 {
1823 1776 struct prop_driver_data *pdd;
1824 1777
1825 1778 /*
1826 1779 * Allocate a structure with enough memory to store the decoded data.
1827 1780 */
1828 1781 pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1829 1782 pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1830 1783 pdd->pdd_prop_free = prop_free;
1831 1784
1832 1785 /*
1833 1786 * Return a pointer to the location to put the decoded data.
1834 1787 */
1835 1788 return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1836 1789 }
1837 1790
1838 1791 /*
1839 1792 * Allocated the memory needed to store the encoded data in the property
1840 1793 * handle.
1841 1794 */
1842 1795 static int
1843 1796 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1844 1797 {
1845 1798 /*
1846 1799 * If size is zero, then set data to NULL and size to 0. This
1847 1800 * is a boolean property.
1848 1801 */
1849 1802 if (size == 0) {
1850 1803 ph->ph_size = 0;
1851 1804 ph->ph_data = NULL;
1852 1805 ph->ph_cur_pos = NULL;
1853 1806 ph->ph_save_pos = NULL;
1854 1807 } else {
1855 1808 if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1856 1809 ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1857 1810 if (ph->ph_data == NULL)
1858 1811 return (DDI_PROP_NO_MEMORY);
1859 1812 } else
1860 1813 ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1861 1814 ph->ph_size = size;
1862 1815 ph->ph_cur_pos = ph->ph_data;
1863 1816 ph->ph_save_pos = ph->ph_data;
1864 1817 }
1865 1818 return (DDI_PROP_SUCCESS);
1866 1819 }
1867 1820
1868 1821 /*
1869 1822 * Free the space allocated by the lookup routines. Each lookup routine
1870 1823 * returns a pointer to the decoded data to the driver. The driver then
1871 1824 * passes this pointer back to us. This data actually lives in a struct
1872 1825 * prop_driver_data. We use negative indexing to find the beginning of
1873 1826 * the structure and then free the entire structure using the size and
1874 1827 * the free routine stored in the structure.
1875 1828 */
1876 1829 void
1877 1830 ddi_prop_free(void *datap)
1878 1831 {
1879 1832 struct prop_driver_data *pdd;
1880 1833
1881 1834 /*
1882 1835 * Get the structure
1883 1836 */
1884 1837 pdd = (struct prop_driver_data *)
1885 1838 ((caddr_t)datap - sizeof (struct prop_driver_data));
1886 1839 /*
1887 1840 * Call the free routine to free it
1888 1841 */
1889 1842 (*pdd->pdd_prop_free)(pdd);
1890 1843 }
1891 1844
1892 1845 /*
1893 1846 * Free the data associated with an array of ints,
1894 1847 * allocated with ddi_prop_decode_alloc().
1895 1848 */
1896 1849 static void
1897 1850 ddi_prop_free_ints(struct prop_driver_data *pdd)
1898 1851 {
1899 1852 kmem_free(pdd, pdd->pdd_size);
1900 1853 }
1901 1854
1902 1855 /*
1903 1856 * Free a single string property or a single string contained within
1904 1857 * the argv style return value of an array of strings.
1905 1858 */
1906 1859 static void
1907 1860 ddi_prop_free_string(struct prop_driver_data *pdd)
1908 1861 {
1909 1862 kmem_free(pdd, pdd->pdd_size);
1910 1863
1911 1864 }
1912 1865
1913 1866 /*
1914 1867 * Free an array of strings.
1915 1868 */
1916 1869 static void
1917 1870 ddi_prop_free_strings(struct prop_driver_data *pdd)
1918 1871 {
1919 1872 kmem_free(pdd, pdd->pdd_size);
1920 1873 }
1921 1874
1922 1875 /*
1923 1876 * Free the data associated with an array of bytes.
1924 1877 */
1925 1878 static void
1926 1879 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1927 1880 {
1928 1881 kmem_free(pdd, pdd->pdd_size);
1929 1882 }
1930 1883
1931 1884 /*
1932 1885 * Reset the current location pointer in the property handle to the
1933 1886 * beginning of the data.
1934 1887 */
1935 1888 void
1936 1889 ddi_prop_reset_pos(prop_handle_t *ph)
1937 1890 {
1938 1891 ph->ph_cur_pos = ph->ph_data;
1939 1892 ph->ph_save_pos = ph->ph_data;
1940 1893 }
1941 1894
1942 1895 /*
1943 1896 * Restore the current location pointer in the property handle to the
1944 1897 * saved position.
1945 1898 */
1946 1899 void
1947 1900 ddi_prop_save_pos(prop_handle_t *ph)
1948 1901 {
1949 1902 ph->ph_save_pos = ph->ph_cur_pos;
1950 1903 }
1951 1904
1952 1905 /*
1953 1906 * Save the location that the current location pointer is pointing to..
1954 1907 */
1955 1908 void
1956 1909 ddi_prop_restore_pos(prop_handle_t *ph)
1957 1910 {
1958 1911 ph->ph_cur_pos = ph->ph_save_pos;
1959 1912 }
1960 1913
1961 1914 /*
1962 1915 * Property encode/decode functions
1963 1916 */
1964 1917
1965 1918 /*
1966 1919 * Decode a single integer property
1967 1920 */
1968 1921 static int
1969 1922 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1970 1923 {
1971 1924 int i;
1972 1925 int tmp;
1973 1926
1974 1927 /*
1975 1928 * If there is nothing to decode return an error
1976 1929 */
1977 1930 if (ph->ph_size == 0)
1978 1931 return (DDI_PROP_END_OF_DATA);
1979 1932
1980 1933 /*
1981 1934 * Decode the property as a single integer and return it
1982 1935 * in data if we were able to decode it.
1983 1936 */
1984 1937 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1985 1938 if (i < DDI_PROP_RESULT_OK) {
1986 1939 switch (i) {
1987 1940 case DDI_PROP_RESULT_EOF:
1988 1941 return (DDI_PROP_END_OF_DATA);
1989 1942
1990 1943 case DDI_PROP_RESULT_ERROR:
1991 1944 return (DDI_PROP_CANNOT_DECODE);
1992 1945 }
1993 1946 }
1994 1947
1995 1948 *(int *)data = tmp;
1996 1949 *nelements = 1;
1997 1950 return (DDI_PROP_SUCCESS);
1998 1951 }
1999 1952
2000 1953 /*
2001 1954 * Decode a single 64 bit integer property
2002 1955 */
2003 1956 static int
2004 1957 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2005 1958 {
2006 1959 int i;
2007 1960 int64_t tmp;
2008 1961
2009 1962 /*
2010 1963 * If there is nothing to decode return an error
2011 1964 */
2012 1965 if (ph->ph_size == 0)
2013 1966 return (DDI_PROP_END_OF_DATA);
2014 1967
2015 1968 /*
2016 1969 * Decode the property as a single integer and return it
2017 1970 * in data if we were able to decode it.
2018 1971 */
2019 1972 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2020 1973 if (i < DDI_PROP_RESULT_OK) {
2021 1974 switch (i) {
2022 1975 case DDI_PROP_RESULT_EOF:
2023 1976 return (DDI_PROP_END_OF_DATA);
2024 1977
2025 1978 case DDI_PROP_RESULT_ERROR:
2026 1979 return (DDI_PROP_CANNOT_DECODE);
2027 1980 }
2028 1981 }
2029 1982
2030 1983 *(int64_t *)data = tmp;
2031 1984 *nelements = 1;
2032 1985 return (DDI_PROP_SUCCESS);
2033 1986 }
2034 1987
2035 1988 /*
2036 1989 * Decode an array of integers property
2037 1990 */
2038 1991 static int
2039 1992 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2040 1993 {
2041 1994 int i;
2042 1995 int cnt = 0;
2043 1996 int *tmp;
2044 1997 int *intp;
2045 1998 int n;
2046 1999
2047 2000 /*
2048 2001 * Figure out how many array elements there are by going through the
2049 2002 * data without decoding it first and counting.
2050 2003 */
2051 2004 for (;;) {
2052 2005 i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2053 2006 if (i < 0)
2054 2007 break;
2055 2008 cnt++;
2056 2009 }
2057 2010
2058 2011 /*
2059 2012 * If there are no elements return an error
2060 2013 */
2061 2014 if (cnt == 0)
2062 2015 return (DDI_PROP_END_OF_DATA);
2063 2016
2064 2017 /*
2065 2018 * If we cannot skip through the data, we cannot decode it
2066 2019 */
2067 2020 if (i == DDI_PROP_RESULT_ERROR)
2068 2021 return (DDI_PROP_CANNOT_DECODE);
2069 2022
2070 2023 /*
2071 2024 * Reset the data pointer to the beginning of the encoded data
2072 2025 */
2073 2026 ddi_prop_reset_pos(ph);
2074 2027
2075 2028 /*
2076 2029 * Allocated memory to store the decoded value in.
2077 2030 */
2078 2031 intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2079 2032 ddi_prop_free_ints);
2080 2033
2081 2034 /*
2082 2035 * Decode each element and place it in the space we just allocated
2083 2036 */
2084 2037 tmp = intp;
2085 2038 for (n = 0; n < cnt; n++, tmp++) {
2086 2039 i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2087 2040 if (i < DDI_PROP_RESULT_OK) {
2088 2041 /*
2089 2042 * Free the space we just allocated
2090 2043 * and return an error.
2091 2044 */
2092 2045 ddi_prop_free(intp);
2093 2046 switch (i) {
2094 2047 case DDI_PROP_RESULT_EOF:
2095 2048 return (DDI_PROP_END_OF_DATA);
2096 2049
2097 2050 case DDI_PROP_RESULT_ERROR:
2098 2051 return (DDI_PROP_CANNOT_DECODE);
2099 2052 }
2100 2053 }
2101 2054 }
2102 2055
2103 2056 *nelements = cnt;
2104 2057 *(int **)data = intp;
2105 2058
2106 2059 return (DDI_PROP_SUCCESS);
2107 2060 }
2108 2061
2109 2062 /*
2110 2063 * Decode a 64 bit integer array property
2111 2064 */
2112 2065 static int
2113 2066 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2114 2067 {
2115 2068 int i;
2116 2069 int n;
2117 2070 int cnt = 0;
2118 2071 int64_t *tmp;
2119 2072 int64_t *intp;
2120 2073
2121 2074 /*
2122 2075 * Count the number of array elements by going
2123 2076 * through the data without decoding it.
2124 2077 */
2125 2078 for (;;) {
2126 2079 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2127 2080 if (i < 0)
2128 2081 break;
2129 2082 cnt++;
2130 2083 }
2131 2084
2132 2085 /*
2133 2086 * If there are no elements return an error
2134 2087 */
2135 2088 if (cnt == 0)
2136 2089 return (DDI_PROP_END_OF_DATA);
2137 2090
2138 2091 /*
2139 2092 * If we cannot skip through the data, we cannot decode it
2140 2093 */
2141 2094 if (i == DDI_PROP_RESULT_ERROR)
2142 2095 return (DDI_PROP_CANNOT_DECODE);
2143 2096
2144 2097 /*
2145 2098 * Reset the data pointer to the beginning of the encoded data
2146 2099 */
2147 2100 ddi_prop_reset_pos(ph);
2148 2101
2149 2102 /*
2150 2103 * Allocate memory to store the decoded value.
2151 2104 */
2152 2105 intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2153 2106 ddi_prop_free_ints);
2154 2107
2155 2108 /*
2156 2109 * Decode each element and place it in the space allocated
2157 2110 */
2158 2111 tmp = intp;
2159 2112 for (n = 0; n < cnt; n++, tmp++) {
2160 2113 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2161 2114 if (i < DDI_PROP_RESULT_OK) {
2162 2115 /*
2163 2116 * Free the space we just allocated
2164 2117 * and return an error.
2165 2118 */
2166 2119 ddi_prop_free(intp);
2167 2120 switch (i) {
2168 2121 case DDI_PROP_RESULT_EOF:
2169 2122 return (DDI_PROP_END_OF_DATA);
2170 2123
2171 2124 case DDI_PROP_RESULT_ERROR:
2172 2125 return (DDI_PROP_CANNOT_DECODE);
2173 2126 }
2174 2127 }
2175 2128 }
2176 2129
2177 2130 *nelements = cnt;
2178 2131 *(int64_t **)data = intp;
2179 2132
2180 2133 return (DDI_PROP_SUCCESS);
2181 2134 }
2182 2135
2183 2136 /*
2184 2137 * Encode an array of integers property (Can be one element)
2185 2138 */
2186 2139 int
2187 2140 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2188 2141 {
2189 2142 int i;
2190 2143 int *tmp;
2191 2144 int cnt;
2192 2145 int size;
2193 2146
2194 2147 /*
2195 2148 * If there is no data, we cannot do anything
2196 2149 */
2197 2150 if (nelements == 0)
2198 2151 return (DDI_PROP_CANNOT_ENCODE);
2199 2152
2200 2153 /*
2201 2154 * Get the size of an encoded int.
2202 2155 */
2203 2156 size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2204 2157
2205 2158 if (size < DDI_PROP_RESULT_OK) {
2206 2159 switch (size) {
2207 2160 case DDI_PROP_RESULT_EOF:
2208 2161 return (DDI_PROP_END_OF_DATA);
2209 2162
2210 2163 case DDI_PROP_RESULT_ERROR:
2211 2164 return (DDI_PROP_CANNOT_ENCODE);
2212 2165 }
2213 2166 }
2214 2167
2215 2168 /*
2216 2169 * Allocate space in the handle to store the encoded int.
2217 2170 */
2218 2171 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2219 2172 DDI_PROP_SUCCESS)
2220 2173 return (DDI_PROP_NO_MEMORY);
2221 2174
2222 2175 /*
2223 2176 * Encode the array of ints.
2224 2177 */
2225 2178 tmp = (int *)data;
2226 2179 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2227 2180 i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2228 2181 if (i < DDI_PROP_RESULT_OK) {
2229 2182 switch (i) {
2230 2183 case DDI_PROP_RESULT_EOF:
2231 2184 return (DDI_PROP_END_OF_DATA);
2232 2185
2233 2186 case DDI_PROP_RESULT_ERROR:
2234 2187 return (DDI_PROP_CANNOT_ENCODE);
2235 2188 }
2236 2189 }
2237 2190 }
2238 2191
2239 2192 return (DDI_PROP_SUCCESS);
2240 2193 }
2241 2194
2242 2195
2243 2196 /*
2244 2197 * Encode a 64 bit integer array property
2245 2198 */
2246 2199 int
2247 2200 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2248 2201 {
2249 2202 int i;
2250 2203 int cnt;
2251 2204 int size;
2252 2205 int64_t *tmp;
2253 2206
2254 2207 /*
2255 2208 * If there is no data, we cannot do anything
2256 2209 */
2257 2210 if (nelements == 0)
2258 2211 return (DDI_PROP_CANNOT_ENCODE);
2259 2212
2260 2213 /*
2261 2214 * Get the size of an encoded 64 bit int.
2262 2215 */
2263 2216 size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2264 2217
2265 2218 if (size < DDI_PROP_RESULT_OK) {
2266 2219 switch (size) {
2267 2220 case DDI_PROP_RESULT_EOF:
2268 2221 return (DDI_PROP_END_OF_DATA);
2269 2222
2270 2223 case DDI_PROP_RESULT_ERROR:
2271 2224 return (DDI_PROP_CANNOT_ENCODE);
2272 2225 }
2273 2226 }
2274 2227
2275 2228 /*
2276 2229 * Allocate space in the handle to store the encoded int.
2277 2230 */
2278 2231 if (ddi_prop_encode_alloc(ph, size * nelements) !=
2279 2232 DDI_PROP_SUCCESS)
2280 2233 return (DDI_PROP_NO_MEMORY);
2281 2234
2282 2235 /*
2283 2236 * Encode the array of ints.
2284 2237 */
2285 2238 tmp = (int64_t *)data;
2286 2239 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2287 2240 i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2288 2241 if (i < DDI_PROP_RESULT_OK) {
2289 2242 switch (i) {
2290 2243 case DDI_PROP_RESULT_EOF:
2291 2244 return (DDI_PROP_END_OF_DATA);
2292 2245
2293 2246 case DDI_PROP_RESULT_ERROR:
2294 2247 return (DDI_PROP_CANNOT_ENCODE);
2295 2248 }
2296 2249 }
2297 2250 }
2298 2251
2299 2252 return (DDI_PROP_SUCCESS);
2300 2253 }
2301 2254
2302 2255 /*
2303 2256 * Decode a single string property
2304 2257 */
2305 2258 static int
2306 2259 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2307 2260 {
2308 2261 char *tmp;
2309 2262 char *str;
2310 2263 int i;
2311 2264 int size;
2312 2265
2313 2266 /*
2314 2267 * If there is nothing to decode return an error
2315 2268 */
2316 2269 if (ph->ph_size == 0)
2317 2270 return (DDI_PROP_END_OF_DATA);
2318 2271
2319 2272 /*
2320 2273 * Get the decoded size of the encoded string.
2321 2274 */
2322 2275 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2323 2276 if (size < DDI_PROP_RESULT_OK) {
2324 2277 switch (size) {
2325 2278 case DDI_PROP_RESULT_EOF:
2326 2279 return (DDI_PROP_END_OF_DATA);
2327 2280
2328 2281 case DDI_PROP_RESULT_ERROR:
2329 2282 return (DDI_PROP_CANNOT_DECODE);
2330 2283 }
2331 2284 }
2332 2285
2333 2286 /*
2334 2287 * Allocated memory to store the decoded value in.
2335 2288 */
2336 2289 str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2337 2290
2338 2291 ddi_prop_reset_pos(ph);
2339 2292
2340 2293 /*
2341 2294 * Decode the str and place it in the space we just allocated
2342 2295 */
2343 2296 tmp = str;
2344 2297 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2345 2298 if (i < DDI_PROP_RESULT_OK) {
2346 2299 /*
2347 2300 * Free the space we just allocated
2348 2301 * and return an error.
2349 2302 */
2350 2303 ddi_prop_free(str);
2351 2304 switch (i) {
2352 2305 case DDI_PROP_RESULT_EOF:
2353 2306 return (DDI_PROP_END_OF_DATA);
2354 2307
2355 2308 case DDI_PROP_RESULT_ERROR:
2356 2309 return (DDI_PROP_CANNOT_DECODE);
2357 2310 }
2358 2311 }
2359 2312
2360 2313 *(char **)data = str;
2361 2314 *nelements = 1;
2362 2315
2363 2316 return (DDI_PROP_SUCCESS);
2364 2317 }
2365 2318
2366 2319 /*
2367 2320 * Decode an array of strings.
2368 2321 */
2369 2322 int
2370 2323 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2371 2324 {
2372 2325 int cnt = 0;
2373 2326 char **strs;
2374 2327 char **tmp;
2375 2328 char *ptr;
2376 2329 int i;
2377 2330 int n;
2378 2331 int size;
2379 2332 size_t nbytes;
2380 2333
2381 2334 /*
2382 2335 * Figure out how many array elements there are by going through the
2383 2336 * data without decoding it first and counting.
2384 2337 */
2385 2338 for (;;) {
2386 2339 i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2387 2340 if (i < 0)
2388 2341 break;
2389 2342 cnt++;
2390 2343 }
2391 2344
2392 2345 /*
2393 2346 * If there are no elements return an error
2394 2347 */
2395 2348 if (cnt == 0)
2396 2349 return (DDI_PROP_END_OF_DATA);
2397 2350
2398 2351 /*
2399 2352 * If we cannot skip through the data, we cannot decode it
2400 2353 */
2401 2354 if (i == DDI_PROP_RESULT_ERROR)
2402 2355 return (DDI_PROP_CANNOT_DECODE);
2403 2356
2404 2357 /*
2405 2358 * Reset the data pointer to the beginning of the encoded data
2406 2359 */
2407 2360 ddi_prop_reset_pos(ph);
2408 2361
2409 2362 /*
2410 2363 * Figure out how much memory we need for the sum total
2411 2364 */
2412 2365 nbytes = (cnt + 1) * sizeof (char *);
2413 2366
2414 2367 for (n = 0; n < cnt; n++) {
2415 2368 /*
2416 2369 * Get the decoded size of the current encoded string.
2417 2370 */
2418 2371 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2419 2372 if (size < DDI_PROP_RESULT_OK) {
2420 2373 switch (size) {
2421 2374 case DDI_PROP_RESULT_EOF:
2422 2375 return (DDI_PROP_END_OF_DATA);
2423 2376
2424 2377 case DDI_PROP_RESULT_ERROR:
2425 2378 return (DDI_PROP_CANNOT_DECODE);
2426 2379 }
2427 2380 }
2428 2381
2429 2382 nbytes += size;
2430 2383 }
2431 2384
2432 2385 /*
2433 2386 * Allocate memory in which to store the decoded strings.
2434 2387 */
2435 2388 strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2436 2389
2437 2390 /*
2438 2391 * Set up pointers for each string by figuring out yet
2439 2392 * again how long each string is.
2440 2393 */
2441 2394 ddi_prop_reset_pos(ph);
2442 2395 ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2443 2396 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2444 2397 /*
2445 2398 * Get the decoded size of the current encoded string.
2446 2399 */
2447 2400 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2448 2401 if (size < DDI_PROP_RESULT_OK) {
2449 2402 ddi_prop_free(strs);
2450 2403 switch (size) {
2451 2404 case DDI_PROP_RESULT_EOF:
2452 2405 return (DDI_PROP_END_OF_DATA);
2453 2406
2454 2407 case DDI_PROP_RESULT_ERROR:
2455 2408 return (DDI_PROP_CANNOT_DECODE);
2456 2409 }
2457 2410 }
2458 2411
2459 2412 *tmp = ptr;
2460 2413 ptr += size;
2461 2414 }
2462 2415
2463 2416 /*
2464 2417 * String array is terminated by a NULL
2465 2418 */
2466 2419 *tmp = NULL;
2467 2420
2468 2421 /*
2469 2422 * Finally, we can decode each string
2470 2423 */
2471 2424 ddi_prop_reset_pos(ph);
2472 2425 for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2473 2426 i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2474 2427 if (i < DDI_PROP_RESULT_OK) {
2475 2428 /*
2476 2429 * Free the space we just allocated
2477 2430 * and return an error
2478 2431 */
2479 2432 ddi_prop_free(strs);
2480 2433 switch (i) {
2481 2434 case DDI_PROP_RESULT_EOF:
2482 2435 return (DDI_PROP_END_OF_DATA);
2483 2436
2484 2437 case DDI_PROP_RESULT_ERROR:
2485 2438 return (DDI_PROP_CANNOT_DECODE);
2486 2439 }
2487 2440 }
2488 2441 }
2489 2442
2490 2443 *(char ***)data = strs;
2491 2444 *nelements = cnt;
2492 2445
2493 2446 return (DDI_PROP_SUCCESS);
2494 2447 }
2495 2448
2496 2449 /*
2497 2450 * Encode a string.
2498 2451 */
2499 2452 int
2500 2453 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2501 2454 {
2502 2455 char **tmp;
2503 2456 int size;
2504 2457 int i;
2505 2458
2506 2459 /*
2507 2460 * If there is no data, we cannot do anything
2508 2461 */
2509 2462 if (nelements == 0)
2510 2463 return (DDI_PROP_CANNOT_ENCODE);
2511 2464
2512 2465 /*
2513 2466 * Get the size of the encoded string.
2514 2467 */
2515 2468 tmp = (char **)data;
2516 2469 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2517 2470 if (size < DDI_PROP_RESULT_OK) {
2518 2471 switch (size) {
2519 2472 case DDI_PROP_RESULT_EOF:
2520 2473 return (DDI_PROP_END_OF_DATA);
2521 2474
2522 2475 case DDI_PROP_RESULT_ERROR:
2523 2476 return (DDI_PROP_CANNOT_ENCODE);
2524 2477 }
2525 2478 }
2526 2479
2527 2480 /*
2528 2481 * Allocate space in the handle to store the encoded string.
2529 2482 */
2530 2483 if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2531 2484 return (DDI_PROP_NO_MEMORY);
2532 2485
2533 2486 ddi_prop_reset_pos(ph);
2534 2487
2535 2488 /*
2536 2489 * Encode the string.
2537 2490 */
2538 2491 tmp = (char **)data;
2539 2492 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2540 2493 if (i < DDI_PROP_RESULT_OK) {
2541 2494 switch (i) {
2542 2495 case DDI_PROP_RESULT_EOF:
2543 2496 return (DDI_PROP_END_OF_DATA);
2544 2497
2545 2498 case DDI_PROP_RESULT_ERROR:
2546 2499 return (DDI_PROP_CANNOT_ENCODE);
2547 2500 }
2548 2501 }
2549 2502
2550 2503 return (DDI_PROP_SUCCESS);
2551 2504 }
2552 2505
2553 2506
2554 2507 /*
2555 2508 * Encode an array of strings.
2556 2509 */
2557 2510 int
2558 2511 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2559 2512 {
2560 2513 int cnt = 0;
2561 2514 char **tmp;
2562 2515 int size;
2563 2516 uint_t total_size;
2564 2517 int i;
2565 2518
2566 2519 /*
2567 2520 * If there is no data, we cannot do anything
2568 2521 */
2569 2522 if (nelements == 0)
2570 2523 return (DDI_PROP_CANNOT_ENCODE);
2571 2524
2572 2525 /*
2573 2526 * Get the total size required to encode all the strings.
2574 2527 */
2575 2528 total_size = 0;
2576 2529 tmp = (char **)data;
2577 2530 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2578 2531 size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2579 2532 if (size < DDI_PROP_RESULT_OK) {
2580 2533 switch (size) {
2581 2534 case DDI_PROP_RESULT_EOF:
2582 2535 return (DDI_PROP_END_OF_DATA);
2583 2536
2584 2537 case DDI_PROP_RESULT_ERROR:
2585 2538 return (DDI_PROP_CANNOT_ENCODE);
2586 2539 }
2587 2540 }
2588 2541 total_size += (uint_t)size;
2589 2542 }
2590 2543
2591 2544 /*
2592 2545 * Allocate space in the handle to store the encoded strings.
2593 2546 */
2594 2547 if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2595 2548 return (DDI_PROP_NO_MEMORY);
2596 2549
2597 2550 ddi_prop_reset_pos(ph);
2598 2551
2599 2552 /*
2600 2553 * Encode the array of strings.
2601 2554 */
2602 2555 tmp = (char **)data;
2603 2556 for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2604 2557 i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2605 2558 if (i < DDI_PROP_RESULT_OK) {
2606 2559 switch (i) {
2607 2560 case DDI_PROP_RESULT_EOF:
2608 2561 return (DDI_PROP_END_OF_DATA);
2609 2562
2610 2563 case DDI_PROP_RESULT_ERROR:
2611 2564 return (DDI_PROP_CANNOT_ENCODE);
2612 2565 }
2613 2566 }
2614 2567 }
2615 2568
2616 2569 return (DDI_PROP_SUCCESS);
2617 2570 }
2618 2571
2619 2572
2620 2573 /*
2621 2574 * Decode an array of bytes.
2622 2575 */
2623 2576 static int
2624 2577 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2625 2578 {
2626 2579 uchar_t *tmp;
2627 2580 int nbytes;
2628 2581 int i;
2629 2582
2630 2583 /*
2631 2584 * If there are no elements return an error
2632 2585 */
2633 2586 if (ph->ph_size == 0)
2634 2587 return (DDI_PROP_END_OF_DATA);
2635 2588
2636 2589 /*
2637 2590 * Get the size of the encoded array of bytes.
2638 2591 */
2639 2592 nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2640 2593 data, ph->ph_size);
2641 2594 if (nbytes < DDI_PROP_RESULT_OK) {
2642 2595 switch (nbytes) {
2643 2596 case DDI_PROP_RESULT_EOF:
2644 2597 return (DDI_PROP_END_OF_DATA);
2645 2598
2646 2599 case DDI_PROP_RESULT_ERROR:
2647 2600 return (DDI_PROP_CANNOT_DECODE);
2648 2601 }
2649 2602 }
2650 2603
2651 2604 /*
2652 2605 * Allocated memory to store the decoded value in.
2653 2606 */
2654 2607 tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2655 2608
2656 2609 /*
2657 2610 * Decode each element and place it in the space we just allocated
2658 2611 */
2659 2612 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2660 2613 if (i < DDI_PROP_RESULT_OK) {
2661 2614 /*
2662 2615 * Free the space we just allocated
2663 2616 * and return an error
2664 2617 */
2665 2618 ddi_prop_free(tmp);
2666 2619 switch (i) {
2667 2620 case DDI_PROP_RESULT_EOF:
2668 2621 return (DDI_PROP_END_OF_DATA);
2669 2622
2670 2623 case DDI_PROP_RESULT_ERROR:
2671 2624 return (DDI_PROP_CANNOT_DECODE);
2672 2625 }
2673 2626 }
2674 2627
2675 2628 *(uchar_t **)data = tmp;
2676 2629 *nelements = nbytes;
2677 2630
2678 2631 return (DDI_PROP_SUCCESS);
2679 2632 }
2680 2633
2681 2634 /*
2682 2635 * Encode an array of bytes.
2683 2636 */
2684 2637 int
2685 2638 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2686 2639 {
2687 2640 int size;
2688 2641 int i;
2689 2642
2690 2643 /*
2691 2644 * If there are no elements, then this is a boolean property,
2692 2645 * so just create a property handle with no data and return.
2693 2646 */
2694 2647 if (nelements == 0) {
2695 2648 (void) ddi_prop_encode_alloc(ph, 0);
2696 2649 return (DDI_PROP_SUCCESS);
2697 2650 }
2698 2651
2699 2652 /*
2700 2653 * Get the size of the encoded array of bytes.
2701 2654 */
2702 2655 size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2703 2656 nelements);
2704 2657 if (size < DDI_PROP_RESULT_OK) {
2705 2658 switch (size) {
2706 2659 case DDI_PROP_RESULT_EOF:
2707 2660 return (DDI_PROP_END_OF_DATA);
2708 2661
2709 2662 case DDI_PROP_RESULT_ERROR:
2710 2663 return (DDI_PROP_CANNOT_DECODE);
2711 2664 }
2712 2665 }
2713 2666
2714 2667 /*
2715 2668 * Allocate space in the handle to store the encoded bytes.
2716 2669 */
2717 2670 if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2718 2671 return (DDI_PROP_NO_MEMORY);
2719 2672
2720 2673 /*
2721 2674 * Encode the array of bytes.
2722 2675 */
2723 2676 i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2724 2677 nelements);
2725 2678 if (i < DDI_PROP_RESULT_OK) {
2726 2679 switch (i) {
2727 2680 case DDI_PROP_RESULT_EOF:
2728 2681 return (DDI_PROP_END_OF_DATA);
2729 2682
2730 2683 case DDI_PROP_RESULT_ERROR:
2731 2684 return (DDI_PROP_CANNOT_ENCODE);
2732 2685 }
2733 2686 }
2734 2687
2735 2688 return (DDI_PROP_SUCCESS);
2736 2689 }
2737 2690
2738 2691 /*
2739 2692 * OBP 1275 integer, string and byte operators.
2740 2693 *
2741 2694 * DDI_PROP_CMD_DECODE:
2742 2695 *
2743 2696 * DDI_PROP_RESULT_ERROR: cannot decode the data
2744 2697 * DDI_PROP_RESULT_EOF: end of data
2745 2698 * DDI_PROP_OK: data was decoded
2746 2699 *
2747 2700 * DDI_PROP_CMD_ENCODE:
2748 2701 *
2749 2702 * DDI_PROP_RESULT_ERROR: cannot encode the data
2750 2703 * DDI_PROP_RESULT_EOF: end of data
2751 2704 * DDI_PROP_OK: data was encoded
2752 2705 *
2753 2706 * DDI_PROP_CMD_SKIP:
2754 2707 *
2755 2708 * DDI_PROP_RESULT_ERROR: cannot skip the data
2756 2709 * DDI_PROP_RESULT_EOF: end of data
2757 2710 * DDI_PROP_OK: data was skipped
2758 2711 *
2759 2712 * DDI_PROP_CMD_GET_ESIZE:
2760 2713 *
2761 2714 * DDI_PROP_RESULT_ERROR: cannot get encoded size
2762 2715 * DDI_PROP_RESULT_EOF: end of data
2763 2716 * > 0: the encoded size
2764 2717 *
2765 2718 * DDI_PROP_CMD_GET_DSIZE:
2766 2719 *
2767 2720 * DDI_PROP_RESULT_ERROR: cannot get decoded size
2768 2721 * DDI_PROP_RESULT_EOF: end of data
2769 2722 * > 0: the decoded size
2770 2723 */
2771 2724
2772 2725 /*
2773 2726 * OBP 1275 integer operator
2774 2727 *
2775 2728 * OBP properties are a byte stream of data, so integers may not be
2776 2729 * properly aligned. Therefore we need to copy them one byte at a time.
2777 2730 */
2778 2731 int
2779 2732 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2780 2733 {
2781 2734 int i;
2782 2735
2783 2736 switch (cmd) {
2784 2737 case DDI_PROP_CMD_DECODE:
2785 2738 /*
2786 2739 * Check that there is encoded data
2787 2740 */
2788 2741 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2789 2742 return (DDI_PROP_RESULT_ERROR);
2790 2743 if (ph->ph_flags & PH_FROM_PROM) {
2791 2744 i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2792 2745 if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2793 2746 ph->ph_size - i))
2794 2747 return (DDI_PROP_RESULT_ERROR);
2795 2748 } else {
2796 2749 if (ph->ph_size < sizeof (int) ||
2797 2750 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2798 2751 ph->ph_size - sizeof (int))))
2799 2752 return (DDI_PROP_RESULT_ERROR);
2800 2753 }
2801 2754
2802 2755 /*
2803 2756 * Copy the integer, using the implementation-specific
2804 2757 * copy function if the property is coming from the PROM.
2805 2758 */
2806 2759 if (ph->ph_flags & PH_FROM_PROM) {
2807 2760 *data = impl_ddi_prop_int_from_prom(
2808 2761 (uchar_t *)ph->ph_cur_pos,
2809 2762 (ph->ph_size < PROP_1275_INT_SIZE) ?
2810 2763 ph->ph_size : PROP_1275_INT_SIZE);
2811 2764 } else {
2812 2765 bcopy(ph->ph_cur_pos, data, sizeof (int));
2813 2766 }
2814 2767
2815 2768 /*
2816 2769 * Move the current location to the start of the next
2817 2770 * bit of undecoded data.
2818 2771 */
2819 2772 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2820 2773 PROP_1275_INT_SIZE;
2821 2774 return (DDI_PROP_RESULT_OK);
2822 2775
2823 2776 case DDI_PROP_CMD_ENCODE:
2824 2777 /*
2825 2778 * Check that there is room to encoded the data
2826 2779 */
2827 2780 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2828 2781 ph->ph_size < PROP_1275_INT_SIZE ||
2829 2782 ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2830 2783 ph->ph_size - sizeof (int))))
2831 2784 return (DDI_PROP_RESULT_ERROR);
2832 2785
2833 2786 /*
2834 2787 * Encode the integer into the byte stream one byte at a
2835 2788 * time.
2836 2789 */
2837 2790 bcopy(data, ph->ph_cur_pos, sizeof (int));
2838 2791
2839 2792 /*
2840 2793 * Move the current location to the start of the next bit of
2841 2794 * space where we can store encoded data.
2842 2795 */
2843 2796 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2844 2797 return (DDI_PROP_RESULT_OK);
2845 2798
2846 2799 case DDI_PROP_CMD_SKIP:
2847 2800 /*
2848 2801 * Check that there is encoded data
2849 2802 */
2850 2803 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2851 2804 ph->ph_size < PROP_1275_INT_SIZE)
2852 2805 return (DDI_PROP_RESULT_ERROR);
2853 2806
2854 2807
2855 2808 if ((caddr_t)ph->ph_cur_pos ==
2856 2809 (caddr_t)ph->ph_data + ph->ph_size) {
2857 2810 return (DDI_PROP_RESULT_EOF);
2858 2811 } else if ((caddr_t)ph->ph_cur_pos >
2859 2812 (caddr_t)ph->ph_data + ph->ph_size) {
2860 2813 return (DDI_PROP_RESULT_EOF);
2861 2814 }
2862 2815
2863 2816 /*
2864 2817 * Move the current location to the start of the next bit of
2865 2818 * undecoded data.
2866 2819 */
2867 2820 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2868 2821 return (DDI_PROP_RESULT_OK);
2869 2822
2870 2823 case DDI_PROP_CMD_GET_ESIZE:
2871 2824 /*
2872 2825 * Return the size of an encoded integer on OBP
2873 2826 */
2874 2827 return (PROP_1275_INT_SIZE);
2875 2828
2876 2829 case DDI_PROP_CMD_GET_DSIZE:
2877 2830 /*
2878 2831 * Return the size of a decoded integer on the system.
2879 2832 */
2880 2833 return (sizeof (int));
2881 2834
2882 2835 default:
2883 2836 #ifdef DEBUG
2884 2837 panic("ddi_prop_1275_int: %x impossible", cmd);
2885 2838 /*NOTREACHED*/
2886 2839 #else
2887 2840 return (DDI_PROP_RESULT_ERROR);
2888 2841 #endif /* DEBUG */
2889 2842 }
2890 2843 }
2891 2844
2892 2845 /*
2893 2846 * 64 bit integer operator.
2894 2847 *
2895 2848 * This is an extension, defined by Sun, to the 1275 integer
2896 2849 * operator. This routine handles the encoding/decoding of
2897 2850 * 64 bit integer properties.
2898 2851 */
2899 2852 int
2900 2853 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2901 2854 {
2902 2855
2903 2856 switch (cmd) {
2904 2857 case DDI_PROP_CMD_DECODE:
2905 2858 /*
2906 2859 * Check that there is encoded data
2907 2860 */
2908 2861 if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2909 2862 return (DDI_PROP_RESULT_ERROR);
2910 2863 if (ph->ph_flags & PH_FROM_PROM) {
2911 2864 return (DDI_PROP_RESULT_ERROR);
2912 2865 } else {
2913 2866 if (ph->ph_size < sizeof (int64_t) ||
2914 2867 ((int64_t *)ph->ph_cur_pos >
2915 2868 ((int64_t *)ph->ph_data +
2916 2869 ph->ph_size - sizeof (int64_t))))
2917 2870 return (DDI_PROP_RESULT_ERROR);
2918 2871 }
2919 2872 /*
2920 2873 * Copy the integer, using the implementation-specific
2921 2874 * copy function if the property is coming from the PROM.
2922 2875 */
2923 2876 if (ph->ph_flags & PH_FROM_PROM) {
2924 2877 return (DDI_PROP_RESULT_ERROR);
2925 2878 } else {
2926 2879 bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2927 2880 }
2928 2881
2929 2882 /*
2930 2883 * Move the current location to the start of the next
2931 2884 * bit of undecoded data.
2932 2885 */
2933 2886 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2934 2887 sizeof (int64_t);
2935 2888 return (DDI_PROP_RESULT_OK);
2936 2889
2937 2890 case DDI_PROP_CMD_ENCODE:
2938 2891 /*
2939 2892 * Check that there is room to encoded the data
2940 2893 */
2941 2894 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2942 2895 ph->ph_size < sizeof (int64_t) ||
2943 2896 ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2944 2897 ph->ph_size - sizeof (int64_t))))
2945 2898 return (DDI_PROP_RESULT_ERROR);
2946 2899
2947 2900 /*
2948 2901 * Encode the integer into the byte stream one byte at a
2949 2902 * time.
2950 2903 */
2951 2904 bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2952 2905
2953 2906 /*
2954 2907 * Move the current location to the start of the next bit of
2955 2908 * space where we can store encoded data.
2956 2909 */
2957 2910 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2958 2911 sizeof (int64_t);
2959 2912 return (DDI_PROP_RESULT_OK);
2960 2913
2961 2914 case DDI_PROP_CMD_SKIP:
2962 2915 /*
2963 2916 * Check that there is encoded data
2964 2917 */
2965 2918 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2966 2919 ph->ph_size < sizeof (int64_t))
2967 2920 return (DDI_PROP_RESULT_ERROR);
2968 2921
2969 2922 if ((caddr_t)ph->ph_cur_pos ==
2970 2923 (caddr_t)ph->ph_data + ph->ph_size) {
2971 2924 return (DDI_PROP_RESULT_EOF);
2972 2925 } else if ((caddr_t)ph->ph_cur_pos >
2973 2926 (caddr_t)ph->ph_data + ph->ph_size) {
2974 2927 return (DDI_PROP_RESULT_EOF);
2975 2928 }
2976 2929
2977 2930 /*
2978 2931 * Move the current location to the start of
2979 2932 * the next bit of undecoded data.
2980 2933 */
2981 2934 ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2982 2935 sizeof (int64_t);
2983 2936 return (DDI_PROP_RESULT_OK);
2984 2937
2985 2938 case DDI_PROP_CMD_GET_ESIZE:
2986 2939 /*
2987 2940 * Return the size of an encoded integer on OBP
2988 2941 */
2989 2942 return (sizeof (int64_t));
2990 2943
2991 2944 case DDI_PROP_CMD_GET_DSIZE:
2992 2945 /*
2993 2946 * Return the size of a decoded integer on the system.
2994 2947 */
2995 2948 return (sizeof (int64_t));
2996 2949
2997 2950 default:
2998 2951 #ifdef DEBUG
2999 2952 panic("ddi_prop_int64_op: %x impossible", cmd);
3000 2953 /*NOTREACHED*/
3001 2954 #else
3002 2955 return (DDI_PROP_RESULT_ERROR);
3003 2956 #endif /* DEBUG */
3004 2957 }
3005 2958 }
3006 2959
3007 2960 /*
3008 2961 * OBP 1275 string operator.
3009 2962 *
3010 2963 * OBP strings are NULL terminated.
3011 2964 */
3012 2965 int
3013 2966 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3014 2967 {
3015 2968 int n;
3016 2969 char *p;
3017 2970 char *end;
3018 2971
3019 2972 switch (cmd) {
3020 2973 case DDI_PROP_CMD_DECODE:
3021 2974 /*
3022 2975 * Check that there is encoded data
3023 2976 */
3024 2977 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3025 2978 return (DDI_PROP_RESULT_ERROR);
3026 2979 }
3027 2980
3028 2981 /*
3029 2982 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3030 2983 * how to NULL terminate result.
3031 2984 */
3032 2985 p = (char *)ph->ph_cur_pos;
3033 2986 end = (char *)ph->ph_data + ph->ph_size;
3034 2987 if (p >= end)
3035 2988 return (DDI_PROP_RESULT_EOF);
3036 2989
3037 2990 while (p < end) {
3038 2991 *data++ = *p;
3039 2992 if (*p++ == 0) { /* NULL from OBP */
3040 2993 ph->ph_cur_pos = p;
3041 2994 return (DDI_PROP_RESULT_OK);
3042 2995 }
3043 2996 }
3044 2997
3045 2998 /*
3046 2999 * If OBP did not NULL terminate string, which happens
3047 3000 * (at least) for 'true'/'false' boolean values, account for
3048 3001 * the space and store null termination on decode.
3049 3002 */
3050 3003 ph->ph_cur_pos = p;
3051 3004 *data = 0;
3052 3005 return (DDI_PROP_RESULT_OK);
3053 3006
3054 3007 case DDI_PROP_CMD_ENCODE:
3055 3008 /*
3056 3009 * Check that there is room to encoded the data
3057 3010 */
3058 3011 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3059 3012 return (DDI_PROP_RESULT_ERROR);
3060 3013 }
3061 3014
3062 3015 n = strlen(data) + 1;
3063 3016 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3064 3017 ph->ph_size - n)) {
3065 3018 return (DDI_PROP_RESULT_ERROR);
3066 3019 }
3067 3020
3068 3021 /*
3069 3022 * Copy the NULL terminated string
3070 3023 */
3071 3024 bcopy(data, ph->ph_cur_pos, n);
3072 3025
3073 3026 /*
3074 3027 * Move the current location to the start of the next bit of
3075 3028 * space where we can store encoded data.
3076 3029 */
3077 3030 ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3078 3031 return (DDI_PROP_RESULT_OK);
3079 3032
3080 3033 case DDI_PROP_CMD_SKIP:
3081 3034 /*
3082 3035 * Check that there is encoded data
3083 3036 */
3084 3037 if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3085 3038 return (DDI_PROP_RESULT_ERROR);
3086 3039 }
3087 3040
3088 3041 /*
3089 3042 * Return the string length plus one for the NULL
3090 3043 * We know the size of the property, we need to
3091 3044 * ensure that the string is properly formatted,
3092 3045 * since we may be looking up random OBP data.
3093 3046 */
3094 3047 p = (char *)ph->ph_cur_pos;
3095 3048 end = (char *)ph->ph_data + ph->ph_size;
3096 3049 if (p >= end)
3097 3050 return (DDI_PROP_RESULT_EOF);
3098 3051
3099 3052 while (p < end) {
3100 3053 if (*p++ == 0) { /* NULL from OBP */
3101 3054 ph->ph_cur_pos = p;
3102 3055 return (DDI_PROP_RESULT_OK);
3103 3056 }
3104 3057 }
3105 3058
3106 3059 /*
3107 3060 * Accommodate the fact that OBP does not always NULL
3108 3061 * terminate strings.
3109 3062 */
3110 3063 ph->ph_cur_pos = p;
3111 3064 return (DDI_PROP_RESULT_OK);
3112 3065
3113 3066 case DDI_PROP_CMD_GET_ESIZE:
3114 3067 /*
3115 3068 * Return the size of the encoded string on OBP.
3116 3069 */
3117 3070 return (strlen(data) + 1);
3118 3071
3119 3072 case DDI_PROP_CMD_GET_DSIZE:
3120 3073 /*
3121 3074 * Return the string length plus one for the NULL.
3122 3075 * We know the size of the property, we need to
3123 3076 * ensure that the string is properly formatted,
3124 3077 * since we may be looking up random OBP data.
3125 3078 */
3126 3079 p = (char *)ph->ph_cur_pos;
3127 3080 end = (char *)ph->ph_data + ph->ph_size;
3128 3081 if (p >= end)
3129 3082 return (DDI_PROP_RESULT_EOF);
3130 3083
3131 3084 for (n = 0; p < end; n++) {
3132 3085 if (*p++ == 0) { /* NULL from OBP */
3133 3086 ph->ph_cur_pos = p;
3134 3087 return (n + 1);
3135 3088 }
3136 3089 }
3137 3090
3138 3091 /*
3139 3092 * If OBP did not NULL terminate string, which happens for
3140 3093 * 'true'/'false' boolean values, account for the space
3141 3094 * to store null termination here.
3142 3095 */
3143 3096 ph->ph_cur_pos = p;
3144 3097 return (n + 1);
3145 3098
3146 3099 default:
3147 3100 #ifdef DEBUG
3148 3101 panic("ddi_prop_1275_string: %x impossible", cmd);
3149 3102 /*NOTREACHED*/
3150 3103 #else
3151 3104 return (DDI_PROP_RESULT_ERROR);
3152 3105 #endif /* DEBUG */
3153 3106 }
3154 3107 }
3155 3108
3156 3109 /*
3157 3110 * OBP 1275 byte operator
3158 3111 *
3159 3112 * Caller must specify the number of bytes to get. OBP encodes bytes
3160 3113 * as a byte so there is a 1-to-1 translation.
3161 3114 */
3162 3115 int
3163 3116 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3164 3117 uint_t nelements)
3165 3118 {
3166 3119 switch (cmd) {
3167 3120 case DDI_PROP_CMD_DECODE:
3168 3121 /*
3169 3122 * Check that there is encoded data
3170 3123 */
3171 3124 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3172 3125 ph->ph_size < nelements ||
3173 3126 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3174 3127 ph->ph_size - nelements)))
3175 3128 return (DDI_PROP_RESULT_ERROR);
3176 3129
3177 3130 /*
3178 3131 * Copy out the bytes
3179 3132 */
3180 3133 bcopy(ph->ph_cur_pos, data, nelements);
3181 3134
3182 3135 /*
3183 3136 * Move the current location
3184 3137 */
3185 3138 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3186 3139 return (DDI_PROP_RESULT_OK);
3187 3140
3188 3141 case DDI_PROP_CMD_ENCODE:
3189 3142 /*
3190 3143 * Check that there is room to encode the data
3191 3144 */
3192 3145 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3193 3146 ph->ph_size < nelements ||
3194 3147 ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3195 3148 ph->ph_size - nelements)))
3196 3149 return (DDI_PROP_RESULT_ERROR);
3197 3150
3198 3151 /*
3199 3152 * Copy in the bytes
3200 3153 */
3201 3154 bcopy(data, ph->ph_cur_pos, nelements);
3202 3155
3203 3156 /*
3204 3157 * Move the current location to the start of the next bit of
3205 3158 * space where we can store encoded data.
3206 3159 */
3207 3160 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3208 3161 return (DDI_PROP_RESULT_OK);
3209 3162
3210 3163 case DDI_PROP_CMD_SKIP:
3211 3164 /*
3212 3165 * Check that there is encoded data
3213 3166 */
3214 3167 if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3215 3168 ph->ph_size < nelements)
3216 3169 return (DDI_PROP_RESULT_ERROR);
3217 3170
3218 3171 if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3219 3172 ph->ph_size - nelements))
3220 3173 return (DDI_PROP_RESULT_EOF);
3221 3174
3222 3175 /*
3223 3176 * Move the current location
3224 3177 */
3225 3178 ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3226 3179 return (DDI_PROP_RESULT_OK);
3227 3180
3228 3181 case DDI_PROP_CMD_GET_ESIZE:
3229 3182 /*
3230 3183 * The size in bytes of the encoded size is the
3231 3184 * same as the decoded size provided by the caller.
3232 3185 */
3233 3186 return (nelements);
3234 3187
3235 3188 case DDI_PROP_CMD_GET_DSIZE:
3236 3189 /*
3237 3190 * Just return the number of bytes specified by the caller.
3238 3191 */
3239 3192 return (nelements);
3240 3193
3241 3194 default:
3242 3195 #ifdef DEBUG
3243 3196 panic("ddi_prop_1275_bytes: %x impossible", cmd);
3244 3197 /*NOTREACHED*/
3245 3198 #else
3246 3199 return (DDI_PROP_RESULT_ERROR);
3247 3200 #endif /* DEBUG */
3248 3201 }
3249 3202 }
3250 3203
3251 3204 /*
3252 3205 * Used for properties that come from the OBP, hardware configuration files,
3253 3206 * or that are created by calls to ddi_prop_update(9F).
3254 3207 */
3255 3208 static struct prop_handle_ops prop_1275_ops = {
3256 3209 ddi_prop_1275_int,
3257 3210 ddi_prop_1275_string,
3258 3211 ddi_prop_1275_bytes,
3259 3212 ddi_prop_int64_op
3260 3213 };
3261 3214
3262 3215
3263 3216 /*
3264 3217 * Interface to create/modify a managed property on child's behalf...
3265 3218 * Flags interpreted are:
3266 3219 * DDI_PROP_CANSLEEP: Allow memory allocation to sleep.
3267 3220 * DDI_PROP_SYSTEM_DEF: Manipulate system list rather than driver list.
3268 3221 *
3269 3222 * Use same dev_t when modifying or undefining a property.
3270 3223 * Search for properties with DDI_DEV_T_ANY to match first named
3271 3224 * property on the list.
3272 3225 *
3273 3226 * Properties are stored LIFO and subsequently will match the first
3274 3227 * `matching' instance.
3275 3228 */
3276 3229
3277 3230 /*
3278 3231 * ddi_prop_add: Add a software defined property
3279 3232 */
3280 3233
3281 3234 /*
3282 3235 * define to get a new ddi_prop_t.
3283 3236 * km_flags are KM_SLEEP or KM_NOSLEEP.
3284 3237 */
3285 3238
3286 3239 #define DDI_NEW_PROP_T(km_flags) \
3287 3240 (kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3288 3241
3289 3242 static int
3290 3243 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3291 3244 char *name, caddr_t value, int length)
3292 3245 {
3293 3246 ddi_prop_t *new_propp, *propp;
3294 3247 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3295 3248 int km_flags = KM_NOSLEEP;
3296 3249 int name_buf_len;
3297 3250
3298 3251 /*
3299 3252 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3300 3253 */
3301 3254
3302 3255 if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3303 3256 return (DDI_PROP_INVAL_ARG);
3304 3257
3305 3258 if (flags & DDI_PROP_CANSLEEP)
3306 3259 km_flags = KM_SLEEP;
3307 3260
3308 3261 if (flags & DDI_PROP_SYSTEM_DEF)
3309 3262 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3310 3263 else if (flags & DDI_PROP_HW_DEF)
3311 3264 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3312 3265
3313 3266 if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL) {
3314 3267 cmn_err(CE_CONT, prop_no_mem_msg, name);
3315 3268 return (DDI_PROP_NO_MEMORY);
3316 3269 }
3317 3270
3318 3271 /*
3319 3272 * If dev is major number 0, then we need to do a ddi_name_to_major
3320 3273 * to get the real major number for the device. This needs to be
3321 3274 * done because some drivers need to call ddi_prop_create in their
3322 3275 * attach routines but they don't have a dev. By creating the dev
3323 3276 * ourself if the major number is 0, drivers will not have to know what
3324 3277 * their major number. They can just create a dev with major number
3325 3278 * 0 and pass it in. For device 0, we will be doing a little extra
3326 3279 * work by recreating the same dev that we already have, but its the
3327 3280 * price you pay :-).
3328 3281 *
3329 3282 * This fixes bug #1098060.
3330 3283 */
3331 3284 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3332 3285 new_propp->prop_dev =
3333 3286 makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3334 3287 getminor(dev));
3335 3288 } else
3336 3289 new_propp->prop_dev = dev;
3337 3290
3338 3291 /*
3339 3292 * Allocate space for property name and copy it in...
3340 3293 */
3341 3294
3342 3295 name_buf_len = strlen(name) + 1;
3343 3296 new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3344 3297 if (new_propp->prop_name == 0) {
3345 3298 kmem_free(new_propp, sizeof (ddi_prop_t));
3346 3299 cmn_err(CE_CONT, prop_no_mem_msg, name);
3347 3300 return (DDI_PROP_NO_MEMORY);
3348 3301 }
3349 3302 bcopy(name, new_propp->prop_name, name_buf_len);
3350 3303
3351 3304 /*
3352 3305 * Set the property type
3353 3306 */
3354 3307 new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3355 3308
3356 3309 /*
3357 3310 * Set length and value ONLY if not an explicit property undefine:
3358 3311 * NOTE: value and length are zero for explicit undefines.
3359 3312 */
3360 3313
3361 3314 if (flags & DDI_PROP_UNDEF_IT) {
3362 3315 new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3363 3316 } else {
3364 3317 if ((new_propp->prop_len = length) != 0) {
3365 3318 new_propp->prop_val = kmem_alloc(length, km_flags);
3366 3319 if (new_propp->prop_val == 0) {
3367 3320 kmem_free(new_propp->prop_name, name_buf_len);
3368 3321 kmem_free(new_propp, sizeof (ddi_prop_t));
3369 3322 cmn_err(CE_CONT, prop_no_mem_msg, name);
3370 3323 return (DDI_PROP_NO_MEMORY);
3371 3324 }
3372 3325 bcopy(value, new_propp->prop_val, length);
3373 3326 }
3374 3327 }
3375 3328
3376 3329 /*
3377 3330 * Link property into beginning of list. (Properties are LIFO order.)
3378 3331 */
3379 3332
3380 3333 mutex_enter(&(DEVI(dip)->devi_lock));
3381 3334 propp = *list_head;
3382 3335 new_propp->prop_next = propp;
3383 3336 *list_head = new_propp;
3384 3337 mutex_exit(&(DEVI(dip)->devi_lock));
3385 3338 return (DDI_PROP_SUCCESS);
3386 3339 }
3387 3340
3388 3341
3389 3342 /*
3390 3343 * ddi_prop_change: Modify a software managed property value
3391 3344 *
3392 3345 * Set new length and value if found.
3393 3346 * returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3394 3347 * input name is the NULL string.
3395 3348 * returns DDI_PROP_NO_MEMORY if unable to allocate memory
3396 3349 *
3397 3350 * Note: an undef can be modified to be a define,
3398 3351 * (you can't go the other way.)
3399 3352 */
3400 3353
3401 3354 static int
3402 3355 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3403 3356 char *name, caddr_t value, int length)
3404 3357 {
3405 3358 ddi_prop_t *propp;
3406 3359 ddi_prop_t **ppropp;
3407 3360 caddr_t p = NULL;
3408 3361
3409 3362 if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3410 3363 return (DDI_PROP_INVAL_ARG);
3411 3364
3412 3365 /*
3413 3366 * Preallocate buffer, even if we don't need it...
3414 3367 */
3415 3368 if (length != 0) {
3416 3369 p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3417 3370 KM_SLEEP : KM_NOSLEEP);
3418 3371 if (p == NULL) {
3419 3372 cmn_err(CE_CONT, prop_no_mem_msg, name);
3420 3373 return (DDI_PROP_NO_MEMORY);
3421 3374 }
3422 3375 }
3423 3376
3424 3377 /*
3425 3378 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3426 3379 * number, a real dev_t value should be created based upon the dip's
3427 3380 * binding driver. See ddi_prop_add...
3428 3381 */
3429 3382 if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3430 3383 dev = makedevice(
3431 3384 ddi_name_to_major(DEVI(dip)->devi_binding_name),
3432 3385 getminor(dev));
3433 3386
3434 3387 /*
3435 3388 * Check to see if the property exists. If so we modify it.
3436 3389 * Else we create it by calling ddi_prop_add().
3437 3390 */
3438 3391 mutex_enter(&(DEVI(dip)->devi_lock));
3439 3392 ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3440 3393 if (flags & DDI_PROP_SYSTEM_DEF)
3441 3394 ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3442 3395 else if (flags & DDI_PROP_HW_DEF)
3443 3396 ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3444 3397
3445 3398 if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3446 3399 /*
3447 3400 * Need to reallocate buffer? If so, do it
3448 3401 * carefully (reuse same space if new prop
3449 3402 * is same size and non-NULL sized).
3450 3403 */
3451 3404 if (length != 0)
3452 3405 bcopy(value, p, length);
3453 3406
3454 3407 if (propp->prop_len != 0)
3455 3408 kmem_free(propp->prop_val, propp->prop_len);
3456 3409
3457 3410 propp->prop_len = length;
3458 3411 propp->prop_val = p;
3459 3412 propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3460 3413 mutex_exit(&(DEVI(dip)->devi_lock));
3461 3414 return (DDI_PROP_SUCCESS);
3462 3415 }
3463 3416
3464 3417 mutex_exit(&(DEVI(dip)->devi_lock));
3465 3418 if (length != 0)
3466 3419 kmem_free(p, length);
3467 3420
3468 3421 return (ddi_prop_add(dev, dip, flags, name, value, length));
3469 3422 }
3470 3423
3471 3424 /*
3472 3425 * Common update routine used to update and encode a property. Creates
3473 3426 * a property handle, calls the property encode routine, figures out if
3474 3427 * the property already exists and updates if it does. Otherwise it
3475 3428 * creates if it does not exist.
3476 3429 */
3477 3430 int
3478 3431 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3479 3432 char *name, void *data, uint_t nelements,
3480 3433 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3481 3434 {
3482 3435 prop_handle_t ph;
3483 3436 int rval;
3484 3437 uint_t ourflags;
3485 3438
3486 3439 /*
3487 3440 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3488 3441 * return error.
3489 3442 */
3490 3443 if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3491 3444 return (DDI_PROP_INVAL_ARG);
3492 3445
3493 3446 /*
3494 3447 * Create the handle
3495 3448 */
3496 3449 ph.ph_data = NULL;
3497 3450 ph.ph_cur_pos = NULL;
3498 3451 ph.ph_save_pos = NULL;
3499 3452 ph.ph_size = 0;
3500 3453 ph.ph_ops = &prop_1275_ops;
3501 3454
3502 3455 /*
3503 3456 * ourflags:
3504 3457 * For compatibility with the old interfaces. The old interfaces
3505 3458 * didn't sleep by default and slept when the flag was set. These
3506 3459 * interfaces to the opposite. So the old interfaces now set the
3507 3460 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3508 3461 *
3509 3462 * ph.ph_flags:
3510 3463 * Blocked data or unblocked data allocation
3511 3464 * for ph.ph_data in ddi_prop_encode_alloc()
3512 3465 */
3513 3466 if (flags & DDI_PROP_DONTSLEEP) {
3514 3467 ourflags = flags;
3515 3468 ph.ph_flags = DDI_PROP_DONTSLEEP;
3516 3469 } else {
3517 3470 ourflags = flags | DDI_PROP_CANSLEEP;
3518 3471 ph.ph_flags = DDI_PROP_CANSLEEP;
3519 3472 }
3520 3473
3521 3474 /*
3522 3475 * Encode the data and store it in the property handle by
3523 3476 * calling the prop_encode routine.
3524 3477 */
3525 3478 if ((rval = (*prop_create)(&ph, data, nelements)) !=
3526 3479 DDI_PROP_SUCCESS) {
3527 3480 if (rval == DDI_PROP_NO_MEMORY)
3528 3481 cmn_err(CE_CONT, prop_no_mem_msg, name);
3529 3482 if (ph.ph_size != 0)
3530 3483 kmem_free(ph.ph_data, ph.ph_size);
3531 3484 return (rval);
3532 3485 }
3533 3486
3534 3487 /*
3535 3488 * The old interfaces use a stacking approach to creating
3536 3489 * properties. If we are being called from the old interfaces,
3537 3490 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3538 3491 * create without checking.
3539 3492 */
3540 3493 if (flags & DDI_PROP_STACK_CREATE) {
3541 3494 rval = ddi_prop_add(match_dev, dip,
3542 3495 ourflags, name, ph.ph_data, ph.ph_size);
3543 3496 } else {
3544 3497 rval = ddi_prop_change(match_dev, dip,
3545 3498 ourflags, name, ph.ph_data, ph.ph_size);
3546 3499 }
3547 3500
3548 3501 /*
3549 3502 * Free the encoded data allocated in the prop_encode routine.
3550 3503 */
3551 3504 if (ph.ph_size != 0)
3552 3505 kmem_free(ph.ph_data, ph.ph_size);
3553 3506
3554 3507 return (rval);
3555 3508 }
3556 3509
3557 3510
3558 3511 /*
3559 3512 * ddi_prop_create: Define a managed property:
3560 3513 * See above for details.
3561 3514 */
3562 3515
3563 3516 int
3564 3517 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3565 3518 char *name, caddr_t value, int length)
3566 3519 {
3567 3520 if (!(flag & DDI_PROP_CANSLEEP)) {
3568 3521 flag |= DDI_PROP_DONTSLEEP;
3569 3522 #ifdef DDI_PROP_DEBUG
3570 3523 if (length != 0)
3571 3524 cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3572 3525 "use ddi_prop_update (prop = %s, node = %s%d)",
3573 3526 name, ddi_driver_name(dip), ddi_get_instance(dip));
3574 3527 #endif /* DDI_PROP_DEBUG */
3575 3528 }
3576 3529 flag &= ~DDI_PROP_SYSTEM_DEF;
3577 3530 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3578 3531 return (ddi_prop_update_common(dev, dip, flag, name,
3579 3532 value, length, ddi_prop_fm_encode_bytes));
3580 3533 }
3581 3534
3582 3535 int
3583 3536 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3584 3537 char *name, caddr_t value, int length)
3585 3538 {
3586 3539 if (!(flag & DDI_PROP_CANSLEEP))
3587 3540 flag |= DDI_PROP_DONTSLEEP;
3588 3541 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3589 3542 return (ddi_prop_update_common(dev, dip, flag,
3590 3543 name, value, length, ddi_prop_fm_encode_bytes));
3591 3544 }
3592 3545
3593 3546 int
3594 3547 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3595 3548 char *name, caddr_t value, int length)
3596 3549 {
3597 3550 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3598 3551
3599 3552 /*
3600 3553 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3601 3554 * return error.
3602 3555 */
3603 3556 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3604 3557 return (DDI_PROP_INVAL_ARG);
3605 3558
3606 3559 if (!(flag & DDI_PROP_CANSLEEP))
3607 3560 flag |= DDI_PROP_DONTSLEEP;
3608 3561 flag &= ~DDI_PROP_SYSTEM_DEF;
3609 3562 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3610 3563 return (DDI_PROP_NOT_FOUND);
3611 3564
3612 3565 return (ddi_prop_update_common(dev, dip,
3613 3566 (flag | DDI_PROP_TYPE_BYTE), name,
3614 3567 value, length, ddi_prop_fm_encode_bytes));
3615 3568 }
3616 3569
3617 3570 int
3618 3571 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3619 3572 char *name, caddr_t value, int length)
3620 3573 {
3621 3574 ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3622 3575
3623 3576 /*
3624 3577 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3625 3578 * return error.
3626 3579 */
3627 3580 if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3628 3581 return (DDI_PROP_INVAL_ARG);
3629 3582
3630 3583 if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3631 3584 return (DDI_PROP_NOT_FOUND);
3632 3585
3633 3586 if (!(flag & DDI_PROP_CANSLEEP))
3634 3587 flag |= DDI_PROP_DONTSLEEP;
3635 3588 return (ddi_prop_update_common(dev, dip,
3636 3589 (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3637 3590 name, value, length, ddi_prop_fm_encode_bytes));
3638 3591 }
3639 3592
3640 3593
3641 3594 /*
3642 3595 * Common lookup routine used to lookup and decode a property.
3643 3596 * Creates a property handle, searches for the raw encoded data,
3644 3597 * fills in the handle, and calls the property decode functions
3645 3598 * passed in.
3646 3599 *
3647 3600 * This routine is not static because ddi_bus_prop_op() which lives in
3648 3601 * ddi_impl.c calls it. No driver should be calling this routine.
3649 3602 */
3650 3603 int
3651 3604 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3652 3605 uint_t flags, char *name, void *data, uint_t *nelements,
3653 3606 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3654 3607 {
3655 3608 int rval;
3656 3609 uint_t ourflags;
3657 3610 prop_handle_t ph;
3658 3611
3659 3612 if ((match_dev == DDI_DEV_T_NONE) ||
3660 3613 (name == NULL) || (strlen(name) == 0))
3661 3614 return (DDI_PROP_INVAL_ARG);
3662 3615
3663 3616 ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3664 3617 flags | DDI_PROP_CANSLEEP;
3665 3618
3666 3619 /*
3667 3620 * Get the encoded data
3668 3621 */
3669 3622 bzero(&ph, sizeof (prop_handle_t));
3670 3623
3671 3624 if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3672 3625 /*
3673 3626 * For rootnex and unbound dlpi style-2 devices, index into
3674 3627 * the devnames' array and search the global
3675 3628 * property list.
3676 3629 */
3677 3630 ourflags &= ~DDI_UNBND_DLPI2;
3678 3631 rval = i_ddi_prop_search_global(match_dev,
3679 3632 ourflags, name, &ph.ph_data, &ph.ph_size);
3680 3633 } else {
3681 3634 rval = ddi_prop_search_common(match_dev, dip,
3682 3635 PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3683 3636 &ph.ph_data, &ph.ph_size);
3684 3637
3685 3638 }
3686 3639
3687 3640 if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3688 3641 ASSERT(ph.ph_data == NULL);
3689 3642 ASSERT(ph.ph_size == 0);
3690 3643 return (rval);
3691 3644 }
3692 3645
3693 3646 /*
3694 3647 * If the encoded data came from a OBP or software
3695 3648 * use the 1275 OBP decode/encode routines.
3696 3649 */
3697 3650 ph.ph_cur_pos = ph.ph_data;
3698 3651 ph.ph_save_pos = ph.ph_data;
3699 3652 ph.ph_ops = &prop_1275_ops;
3700 3653 ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3701 3654
3702 3655 rval = (*prop_decoder)(&ph, data, nelements);
3703 3656
3704 3657 /*
3705 3658 * Free the encoded data
3706 3659 */
3707 3660 if (ph.ph_size != 0)
3708 3661 kmem_free(ph.ph_data, ph.ph_size);
3709 3662
3710 3663 return (rval);
3711 3664 }
3712 3665
3713 3666 /*
3714 3667 * Lookup and return an array of composite properties. The driver must
3715 3668 * provide the decode routine.
3716 3669 */
3717 3670 int
3718 3671 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3719 3672 uint_t flags, char *name, void *data, uint_t *nelements,
3720 3673 int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3721 3674 {
3722 3675 return (ddi_prop_lookup_common(match_dev, dip,
3723 3676 (flags | DDI_PROP_TYPE_COMPOSITE), name,
3724 3677 data, nelements, prop_decoder));
3725 3678 }
3726 3679
3727 3680 /*
3728 3681 * Return 1 if a property exists (no type checking done).
3729 3682 * Return 0 if it does not exist.
3730 3683 */
3731 3684 int
3732 3685 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3733 3686 {
3734 3687 int i;
3735 3688 uint_t x = 0;
3736 3689
3737 3690 i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3738 3691 flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3739 3692 return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3740 3693 }
3741 3694
3742 3695
3743 3696 /*
3744 3697 * Update an array of composite properties. The driver must
3745 3698 * provide the encode routine.
3746 3699 */
3747 3700 int
3748 3701 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3749 3702 char *name, void *data, uint_t nelements,
3750 3703 int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3751 3704 {
3752 3705 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3753 3706 name, data, nelements, prop_create));
3754 3707 }
3755 3708
3756 3709 /*
3757 3710 * Get a single integer or boolean property and return it.
3758 3711 * If the property does not exists, or cannot be decoded,
3759 3712 * then return the defvalue passed in.
3760 3713 *
3761 3714 * This routine always succeeds.
3762 3715 */
3763 3716 int
3764 3717 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3765 3718 char *name, int defvalue)
3766 3719 {
3767 3720 int data;
3768 3721 uint_t nelements;
3769 3722 int rval;
3770 3723
3771 3724 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3772 3725 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3773 3726 #ifdef DEBUG
3774 3727 if (dip != NULL) {
3775 3728 cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3776 3729 " 0x%x (prop = %s, node = %s%d)", flags,
3777 3730 name, ddi_driver_name(dip), ddi_get_instance(dip));
3778 3731 }
3779 3732 #endif /* DEBUG */
3780 3733 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3781 3734 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3782 3735 }
3783 3736
3784 3737 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3785 3738 (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3786 3739 ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3787 3740 if (rval == DDI_PROP_END_OF_DATA)
3788 3741 data = 1;
3789 3742 else
3790 3743 data = defvalue;
3791 3744 }
3792 3745 return (data);
3793 3746 }
3794 3747
3795 3748 /*
3796 3749 * Get a single 64 bit integer or boolean property and return it.
3797 3750 * If the property does not exists, or cannot be decoded,
3798 3751 * then return the defvalue passed in.
3799 3752 *
3800 3753 * This routine always succeeds.
3801 3754 */
3802 3755 int64_t
3803 3756 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3804 3757 char *name, int64_t defvalue)
3805 3758 {
3806 3759 int64_t data;
3807 3760 uint_t nelements;
3808 3761 int rval;
3809 3762
3810 3763 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3811 3764 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3812 3765 #ifdef DEBUG
3813 3766 if (dip != NULL) {
3814 3767 cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3815 3768 " 0x%x (prop = %s, node = %s%d)", flags,
3816 3769 name, ddi_driver_name(dip), ddi_get_instance(dip));
3817 3770 }
3818 3771 #endif /* DEBUG */
3819 3772 return (DDI_PROP_INVAL_ARG);
3820 3773 }
3821 3774
3822 3775 if ((rval = ddi_prop_lookup_common(match_dev, dip,
3823 3776 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3824 3777 name, &data, &nelements, ddi_prop_fm_decode_int64))
3825 3778 != DDI_PROP_SUCCESS) {
3826 3779 if (rval == DDI_PROP_END_OF_DATA)
3827 3780 data = 1;
3828 3781 else
3829 3782 data = defvalue;
3830 3783 }
3831 3784 return (data);
3832 3785 }
3833 3786
3834 3787 /*
3835 3788 * Get an array of integer property
3836 3789 */
3837 3790 int
3838 3791 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3839 3792 char *name, int **data, uint_t *nelements)
3840 3793 {
3841 3794 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3842 3795 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3843 3796 #ifdef DEBUG
3844 3797 if (dip != NULL) {
3845 3798 cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3846 3799 "invalid flag 0x%x (prop = %s, node = %s%d)",
3847 3800 flags, name, ddi_driver_name(dip),
3848 3801 ddi_get_instance(dip));
3849 3802 }
3850 3803 #endif /* DEBUG */
3851 3804 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3852 3805 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3853 3806 }
3854 3807
3855 3808 return (ddi_prop_lookup_common(match_dev, dip,
3856 3809 (flags | DDI_PROP_TYPE_INT), name, data,
3857 3810 nelements, ddi_prop_fm_decode_ints));
3858 3811 }
3859 3812
3860 3813 /*
3861 3814 * Get an array of 64 bit integer properties
3862 3815 */
3863 3816 int
3864 3817 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3865 3818 char *name, int64_t **data, uint_t *nelements)
3866 3819 {
3867 3820 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3868 3821 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3869 3822 #ifdef DEBUG
3870 3823 if (dip != NULL) {
3871 3824 cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3872 3825 "invalid flag 0x%x (prop = %s, node = %s%d)",
3873 3826 flags, name, ddi_driver_name(dip),
3874 3827 ddi_get_instance(dip));
3875 3828 }
3876 3829 #endif /* DEBUG */
3877 3830 return (DDI_PROP_INVAL_ARG);
3878 3831 }
3879 3832
3880 3833 return (ddi_prop_lookup_common(match_dev, dip,
3881 3834 (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3882 3835 name, data, nelements, ddi_prop_fm_decode_int64_array));
3883 3836 }
3884 3837
3885 3838 /*
3886 3839 * Update a single integer property. If the property exists on the drivers
3887 3840 * property list it updates, else it creates it.
3888 3841 */
3889 3842 int
3890 3843 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3891 3844 char *name, int data)
3892 3845 {
3893 3846 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3894 3847 name, &data, 1, ddi_prop_fm_encode_ints));
3895 3848 }
3896 3849
3897 3850 /*
3898 3851 * Update a single 64 bit integer property.
3899 3852 * Update the driver property list if it exists, else create it.
3900 3853 */
3901 3854 int
3902 3855 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3903 3856 char *name, int64_t data)
3904 3857 {
3905 3858 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3906 3859 name, &data, 1, ddi_prop_fm_encode_int64));
3907 3860 }
3908 3861
3909 3862 int
3910 3863 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3911 3864 char *name, int data)
3912 3865 {
3913 3866 return (ddi_prop_update_common(match_dev, dip,
3914 3867 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3915 3868 name, &data, 1, ddi_prop_fm_encode_ints));
3916 3869 }
3917 3870
3918 3871 int
3919 3872 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3920 3873 char *name, int64_t data)
3921 3874 {
3922 3875 return (ddi_prop_update_common(match_dev, dip,
3923 3876 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3924 3877 name, &data, 1, ddi_prop_fm_encode_int64));
3925 3878 }
3926 3879
3927 3880 /*
3928 3881 * Update an array of integer property. If the property exists on the drivers
3929 3882 * property list it updates, else it creates it.
3930 3883 */
3931 3884 int
3932 3885 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3933 3886 char *name, int *data, uint_t nelements)
3934 3887 {
3935 3888 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3936 3889 name, data, nelements, ddi_prop_fm_encode_ints));
3937 3890 }
3938 3891
3939 3892 /*
3940 3893 * Update an array of 64 bit integer properties.
3941 3894 * Update the driver property list if it exists, else create it.
3942 3895 */
3943 3896 int
3944 3897 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3945 3898 char *name, int64_t *data, uint_t nelements)
3946 3899 {
3947 3900 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3948 3901 name, data, nelements, ddi_prop_fm_encode_int64));
3949 3902 }
3950 3903
3951 3904 int
3952 3905 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3953 3906 char *name, int64_t *data, uint_t nelements)
3954 3907 {
3955 3908 return (ddi_prop_update_common(match_dev, dip,
3956 3909 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3957 3910 name, data, nelements, ddi_prop_fm_encode_int64));
3958 3911 }
3959 3912
3960 3913 int
3961 3914 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3962 3915 char *name, int *data, uint_t nelements)
3963 3916 {
3964 3917 return (ddi_prop_update_common(match_dev, dip,
3965 3918 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3966 3919 name, data, nelements, ddi_prop_fm_encode_ints));
3967 3920 }
3968 3921
3969 3922 /*
3970 3923 * Get a single string property.
3971 3924 */
3972 3925 int
3973 3926 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3974 3927 char *name, char **data)
3975 3928 {
3976 3929 uint_t x;
3977 3930
3978 3931 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3979 3932 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3980 3933 #ifdef DEBUG
3981 3934 if (dip != NULL) {
3982 3935 cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3983 3936 "(prop = %s, node = %s%d); invalid bits ignored",
3984 3937 "ddi_prop_lookup_string", flags, name,
3985 3938 ddi_driver_name(dip), ddi_get_instance(dip));
3986 3939 }
3987 3940 #endif /* DEBUG */
3988 3941 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3989 3942 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3990 3943 }
3991 3944
3992 3945 return (ddi_prop_lookup_common(match_dev, dip,
3993 3946 (flags | DDI_PROP_TYPE_STRING), name, data,
3994 3947 &x, ddi_prop_fm_decode_string));
3995 3948 }
3996 3949
3997 3950 /*
3998 3951 * Get an array of strings property.
3999 3952 */
4000 3953 int
4001 3954 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4002 3955 char *name, char ***data, uint_t *nelements)
4003 3956 {
4004 3957 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4005 3958 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4006 3959 #ifdef DEBUG
4007 3960 if (dip != NULL) {
4008 3961 cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4009 3962 "invalid flag 0x%x (prop = %s, node = %s%d)",
4010 3963 flags, name, ddi_driver_name(dip),
4011 3964 ddi_get_instance(dip));
4012 3965 }
4013 3966 #endif /* DEBUG */
4014 3967 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4015 3968 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4016 3969 }
4017 3970
4018 3971 return (ddi_prop_lookup_common(match_dev, dip,
4019 3972 (flags | DDI_PROP_TYPE_STRING), name, data,
4020 3973 nelements, ddi_prop_fm_decode_strings));
4021 3974 }
4022 3975
4023 3976 /*
4024 3977 * Update a single string property.
4025 3978 */
4026 3979 int
4027 3980 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4028 3981 char *name, char *data)
4029 3982 {
4030 3983 return (ddi_prop_update_common(match_dev, dip,
4031 3984 DDI_PROP_TYPE_STRING, name, &data, 1,
4032 3985 ddi_prop_fm_encode_string));
4033 3986 }
4034 3987
4035 3988 int
4036 3989 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4037 3990 char *name, char *data)
4038 3991 {
4039 3992 return (ddi_prop_update_common(match_dev, dip,
4040 3993 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4041 3994 name, &data, 1, ddi_prop_fm_encode_string));
4042 3995 }
4043 3996
4044 3997
4045 3998 /*
4046 3999 * Update an array of strings property.
4047 4000 */
4048 4001 int
4049 4002 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4050 4003 char *name, char **data, uint_t nelements)
4051 4004 {
4052 4005 return (ddi_prop_update_common(match_dev, dip,
4053 4006 DDI_PROP_TYPE_STRING, name, data, nelements,
4054 4007 ddi_prop_fm_encode_strings));
4055 4008 }
4056 4009
4057 4010 int
4058 4011 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4059 4012 char *name, char **data, uint_t nelements)
4060 4013 {
4061 4014 return (ddi_prop_update_common(match_dev, dip,
4062 4015 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4063 4016 name, data, nelements,
4064 4017 ddi_prop_fm_encode_strings));
4065 4018 }
4066 4019
4067 4020
4068 4021 /*
4069 4022 * Get an array of bytes property.
4070 4023 */
4071 4024 int
4072 4025 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4073 4026 char *name, uchar_t **data, uint_t *nelements)
4074 4027 {
4075 4028 if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4076 4029 LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4077 4030 #ifdef DEBUG
4078 4031 if (dip != NULL) {
4079 4032 cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4080 4033 " invalid flag 0x%x (prop = %s, node = %s%d)",
4081 4034 flags, name, ddi_driver_name(dip),
4082 4035 ddi_get_instance(dip));
4083 4036 }
4084 4037 #endif /* DEBUG */
4085 4038 flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4086 4039 LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4087 4040 }
4088 4041
4089 4042 return (ddi_prop_lookup_common(match_dev, dip,
4090 4043 (flags | DDI_PROP_TYPE_BYTE), name, data,
4091 4044 nelements, ddi_prop_fm_decode_bytes));
4092 4045 }
4093 4046
4094 4047 /*
4095 4048 * Update an array of bytes property.
4096 4049 */
4097 4050 int
4098 4051 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4099 4052 char *name, uchar_t *data, uint_t nelements)
4100 4053 {
4101 4054 if (nelements == 0)
4102 4055 return (DDI_PROP_INVAL_ARG);
4103 4056
4104 4057 return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4105 4058 name, data, nelements, ddi_prop_fm_encode_bytes));
4106 4059 }
4107 4060
4108 4061
4109 4062 int
4110 4063 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4111 4064 char *name, uchar_t *data, uint_t nelements)
4112 4065 {
4113 4066 if (nelements == 0)
4114 4067 return (DDI_PROP_INVAL_ARG);
4115 4068
4116 4069 return (ddi_prop_update_common(match_dev, dip,
4117 4070 DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4118 4071 name, data, nelements, ddi_prop_fm_encode_bytes));
4119 4072 }
4120 4073
4121 4074
4122 4075 /*
4123 4076 * ddi_prop_remove_common: Undefine a managed property:
4124 4077 * Input dev_t must match dev_t when defined.
4125 4078 * Returns DDI_PROP_NOT_FOUND, possibly.
4126 4079 * DDI_PROP_INVAL_ARG is also possible if dev is
4127 4080 * DDI_DEV_T_ANY or incoming name is the NULL string.
4128 4081 */
4129 4082 int
4130 4083 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4131 4084 {
4132 4085 ddi_prop_t **list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4133 4086 ddi_prop_t *propp;
4134 4087 ddi_prop_t *lastpropp = NULL;
4135 4088
4136 4089 if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4137 4090 (strlen(name) == 0)) {
4138 4091 return (DDI_PROP_INVAL_ARG);
4139 4092 }
4140 4093
4141 4094 if (flag & DDI_PROP_SYSTEM_DEF)
4142 4095 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4143 4096 else if (flag & DDI_PROP_HW_DEF)
4144 4097 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4145 4098
4146 4099 mutex_enter(&(DEVI(dip)->devi_lock));
4147 4100
4148 4101 for (propp = *list_head; propp != NULL; propp = propp->prop_next) {
4149 4102 if (DDI_STRSAME(propp->prop_name, name) &&
4150 4103 (dev == propp->prop_dev)) {
4151 4104 /*
4152 4105 * Unlink this propp allowing for it to
4153 4106 * be first in the list:
4154 4107 */
4155 4108
4156 4109 if (lastpropp == NULL)
4157 4110 *list_head = propp->prop_next;
4158 4111 else
4159 4112 lastpropp->prop_next = propp->prop_next;
4160 4113
4161 4114 mutex_exit(&(DEVI(dip)->devi_lock));
4162 4115
4163 4116 /*
4164 4117 * Free memory and return...
4165 4118 */
4166 4119 kmem_free(propp->prop_name,
4167 4120 strlen(propp->prop_name) + 1);
4168 4121 if (propp->prop_len != 0)
4169 4122 kmem_free(propp->prop_val, propp->prop_len);
4170 4123 kmem_free(propp, sizeof (ddi_prop_t));
4171 4124 return (DDI_PROP_SUCCESS);
4172 4125 }
4173 4126 lastpropp = propp;
4174 4127 }
4175 4128 mutex_exit(&(DEVI(dip)->devi_lock));
4176 4129 return (DDI_PROP_NOT_FOUND);
4177 4130 }
4178 4131
4179 4132 int
4180 4133 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4181 4134 {
4182 4135 return (ddi_prop_remove_common(dev, dip, name, 0));
4183 4136 }
4184 4137
4185 4138 int
4186 4139 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4187 4140 {
4188 4141 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4189 4142 }
4190 4143
4191 4144 /*
4192 4145 * e_ddi_prop_list_delete: remove a list of properties
4193 4146 * Note that the caller needs to provide the required protection
4194 4147 * (eg. devi_lock if these properties are still attached to a devi)
4195 4148 */
4196 4149 void
4197 4150 e_ddi_prop_list_delete(ddi_prop_t *props)
4198 4151 {
4199 4152 i_ddi_prop_list_delete(props);
4200 4153 }
4201 4154
4202 4155 /*
4203 4156 * ddi_prop_remove_all_common:
4204 4157 * Used before unloading a driver to remove
4205 4158 * all properties. (undefines all dev_t's props.)
4206 4159 * Also removes `explicitly undefined' props.
4207 4160 * No errors possible.
4208 4161 */
4209 4162 void
4210 4163 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4211 4164 {
4212 4165 ddi_prop_t **list_head;
4213 4166
4214 4167 mutex_enter(&(DEVI(dip)->devi_lock));
4215 4168 if (flag & DDI_PROP_SYSTEM_DEF) {
4216 4169 list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4217 4170 } else if (flag & DDI_PROP_HW_DEF) {
4218 4171 list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4219 4172 } else {
4220 4173 list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4221 4174 }
4222 4175 i_ddi_prop_list_delete(*list_head);
4223 4176 *list_head = NULL;
4224 4177 mutex_exit(&(DEVI(dip)->devi_lock));
4225 4178 }
4226 4179
4227 4180
4228 4181 /*
4229 4182 * ddi_prop_remove_all: Remove all driver prop definitions.
4230 4183 */
4231 4184
4232 4185 void
4233 4186 ddi_prop_remove_all(dev_info_t *dip)
4234 4187 {
4235 4188 i_ddi_prop_dyn_driver_set(dip, NULL);
4236 4189 ddi_prop_remove_all_common(dip, 0);
4237 4190 }
4238 4191
4239 4192 /*
4240 4193 * e_ddi_prop_remove_all: Remove all system prop definitions.
4241 4194 */
4242 4195
4243 4196 void
4244 4197 e_ddi_prop_remove_all(dev_info_t *dip)
4245 4198 {
4246 4199 ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4247 4200 }
4248 4201
4249 4202
4250 4203 /*
4251 4204 * ddi_prop_undefine: Explicitly undefine a property. Property
4252 4205 * searches which match this property return
4253 4206 * the error code DDI_PROP_UNDEFINED.
4254 4207 *
4255 4208 * Use ddi_prop_remove to negate effect of
4256 4209 * ddi_prop_undefine
4257 4210 *
4258 4211 * See above for error returns.
4259 4212 */
4260 4213
4261 4214 int
4262 4215 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4263 4216 {
4264 4217 if (!(flag & DDI_PROP_CANSLEEP))
4265 4218 flag |= DDI_PROP_DONTSLEEP;
4266 4219 flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4267 4220 return (ddi_prop_update_common(dev, dip, flag,
4268 4221 name, NULL, 0, ddi_prop_fm_encode_bytes));
4269 4222 }
4270 4223
4271 4224 int
4272 4225 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4273 4226 {
4274 4227 if (!(flag & DDI_PROP_CANSLEEP))
4275 4228 flag |= DDI_PROP_DONTSLEEP;
4276 4229 flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4277 4230 DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4278 4231 return (ddi_prop_update_common(dev, dip, flag,
4279 4232 name, NULL, 0, ddi_prop_fm_encode_bytes));
4280 4233 }
4281 4234
4282 4235 /*
4283 4236 * Support for gathering dynamic properties in devinfo snapshot.
4284 4237 */
4285 4238 void
4286 4239 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4287 4240 {
4288 4241 DEVI(dip)->devi_prop_dyn_driver = dp;
4289 4242 }
4290 4243
4291 4244 i_ddi_prop_dyn_t *
4292 4245 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4293 4246 {
4294 4247 return (DEVI(dip)->devi_prop_dyn_driver);
4295 4248 }
4296 4249
4297 4250 void
4298 4251 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4299 4252 {
4300 4253 DEVI(dip)->devi_prop_dyn_parent = dp;
4301 4254 }
4302 4255
4303 4256 i_ddi_prop_dyn_t *
4304 4257 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4305 4258 {
4306 4259 return (DEVI(dip)->devi_prop_dyn_parent);
4307 4260 }
4308 4261
4309 4262 void
4310 4263 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4311 4264 {
4312 4265 /* for now we invalidate the entire cached snapshot */
4313 4266 if (dip && dp)
4314 4267 i_ddi_di_cache_invalidate();
4315 4268 }
4316 4269
4317 4270 /* ARGSUSED */
4318 4271 void
4319 4272 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4320 4273 {
4321 4274 /* for now we invalidate the entire cached snapshot */
4322 4275 i_ddi_di_cache_invalidate();
4323 4276 }
4324 4277
4325 4278
4326 4279 /*
4327 4280 * Code to search hardware layer (PROM), if it exists, on behalf of child.
4328 4281 *
4329 4282 * if input dip != child_dip, then call is on behalf of child
4330 4283 * to search PROM, do it via ddi_prop_search_common() and ascend only
4331 4284 * if allowed.
4332 4285 *
4333 4286 * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4334 4287 * to search for PROM defined props only.
4335 4288 *
4336 4289 * Note that the PROM search is done only if the requested dev
4337 4290 * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4338 4291 * have no associated dev, thus are automatically associated with
4339 4292 * DDI_DEV_T_NONE.
4340 4293 *
4341 4294 * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4342 4295 *
4343 4296 * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4344 4297 * that the property resides in the prom.
4345 4298 */
4346 4299 int
4347 4300 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4348 4301 ddi_prop_op_t prop_op, int mod_flags,
4349 4302 char *name, caddr_t valuep, int *lengthp)
4350 4303 {
4351 4304 int len;
4352 4305 caddr_t buffer;
4353 4306
4354 4307 /*
4355 4308 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4356 4309 * look in caller's PROM if it's a self identifying device...
4357 4310 *
4358 4311 * Note that this is very similar to ddi_prop_op, but we
4359 4312 * search the PROM instead of the s/w defined properties,
4360 4313 * and we are called on by the parent driver to do this for
4361 4314 * the child.
4362 4315 */
4363 4316
4364 4317 if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4365 4318 ndi_dev_is_prom_node(ch_dip) &&
4366 4319 ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4367 4320 len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4368 4321 if (len == -1) {
4369 4322 return (DDI_PROP_NOT_FOUND);
4370 4323 }
4371 4324
4372 4325 /*
4373 4326 * If exists only request, we're done
4374 4327 */
4375 4328 if (prop_op == PROP_EXISTS) {
4376 4329 return (DDI_PROP_FOUND_1275);
4377 4330 }
4378 4331
4379 4332 /*
4380 4333 * If length only request or prop length == 0, get out
4381 4334 */
4382 4335 if ((prop_op == PROP_LEN) || (len == 0)) {
4383 4336 *lengthp = len;
4384 4337 return (DDI_PROP_FOUND_1275);
4385 4338 }
4386 4339
4387 4340 /*
4388 4341 * Allocate buffer if required... (either way `buffer'
4389 4342 * is receiving address).
4390 4343 */
4391 4344
4392 4345 switch (prop_op) {
4393 4346
4394 4347 case PROP_LEN_AND_VAL_ALLOC:
4395 4348
4396 4349 buffer = kmem_alloc((size_t)len,
4397 4350 mod_flags & DDI_PROP_CANSLEEP ?
4398 4351 KM_SLEEP : KM_NOSLEEP);
4399 4352 if (buffer == NULL) {
4400 4353 return (DDI_PROP_NO_MEMORY);
4401 4354 }
4402 4355 *(caddr_t *)valuep = buffer;
4403 4356 break;
4404 4357
4405 4358 case PROP_LEN_AND_VAL_BUF:
4406 4359
4407 4360 if (len > (*lengthp)) {
4408 4361 *lengthp = len;
4409 4362 return (DDI_PROP_BUF_TOO_SMALL);
4410 4363 }
4411 4364
4412 4365 buffer = valuep;
4413 4366 break;
4414 4367
4415 4368 default:
4416 4369 break;
4417 4370 }
4418 4371
4419 4372 /*
4420 4373 * Call the PROM function to do the copy.
4421 4374 */
4422 4375 (void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4423 4376 name, buffer);
4424 4377
4425 4378 *lengthp = len; /* return the actual length to the caller */
4426 4379 (void) impl_fix_props(dip, ch_dip, name, len, buffer);
4427 4380 return (DDI_PROP_FOUND_1275);
4428 4381 }
4429 4382
4430 4383 return (DDI_PROP_NOT_FOUND);
4431 4384 }
4432 4385
4433 4386 /*
4434 4387 * The ddi_bus_prop_op default bus nexus prop op function.
4435 4388 *
4436 4389 * Code to search hardware layer (PROM), if it exists,
4437 4390 * on behalf of child, then, if appropriate, ascend and check
4438 4391 * my own software defined properties...
4439 4392 */
4440 4393 int
4441 4394 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4442 4395 ddi_prop_op_t prop_op, int mod_flags,
4443 4396 char *name, caddr_t valuep, int *lengthp)
4444 4397 {
4445 4398 int error;
4446 4399
4447 4400 error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4448 4401 name, valuep, lengthp);
4449 4402
4450 4403 if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4451 4404 error == DDI_PROP_BUF_TOO_SMALL)
4452 4405 return (error);
4453 4406
4454 4407 if (error == DDI_PROP_NO_MEMORY) {
4455 4408 cmn_err(CE_CONT, prop_no_mem_msg, name);
4456 4409 return (DDI_PROP_NO_MEMORY);
4457 4410 }
4458 4411
4459 4412 /*
4460 4413 * Check the 'options' node as a last resort
4461 4414 */
4462 4415 if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4463 4416 return (DDI_PROP_NOT_FOUND);
4464 4417
4465 4418 if (ch_dip == ddi_root_node()) {
4466 4419 /*
4467 4420 * As a last resort, when we've reached
4468 4421 * the top and still haven't found the
4469 4422 * property, see if the desired property
4470 4423 * is attached to the options node.
4471 4424 *
4472 4425 * The options dip is attached right after boot.
4473 4426 */
4474 4427 ASSERT(options_dip != NULL);
4475 4428 /*
4476 4429 * Force the "don't pass" flag to *just* see
4477 4430 * what the options node has to offer.
4478 4431 */
4479 4432 return (ddi_prop_search_common(dev, options_dip, prop_op,
4480 4433 mod_flags|DDI_PROP_DONTPASS, name, valuep,
4481 4434 (uint_t *)lengthp));
4482 4435 }
4483 4436
4484 4437 /*
4485 4438 * Otherwise, continue search with parent's s/w defined properties...
4486 4439 * NOTE: Using `dip' in following call increments the level.
4487 4440 */
4488 4441
4489 4442 return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4490 4443 name, valuep, (uint_t *)lengthp));
4491 4444 }
4492 4445
4493 4446 /*
4494 4447 * External property functions used by other parts of the kernel...
4495 4448 */
4496 4449
4497 4450 /*
4498 4451 * e_ddi_getlongprop: See comments for ddi_get_longprop.
4499 4452 */
4500 4453
4501 4454 int
4502 4455 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4503 4456 caddr_t valuep, int *lengthp)
4504 4457 {
4505 4458 _NOTE(ARGUNUSED(type))
4506 4459 dev_info_t *devi;
4507 4460 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4508 4461 int error;
4509 4462
4510 4463 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4511 4464 return (DDI_PROP_NOT_FOUND);
4512 4465
4513 4466 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4514 4467 ddi_release_devi(devi);
4515 4468 return (error);
4516 4469 }
4517 4470
4518 4471 /*
4519 4472 * e_ddi_getlongprop_buf: See comments for ddi_getlongprop_buf.
4520 4473 */
4521 4474
4522 4475 int
4523 4476 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4524 4477 caddr_t valuep, int *lengthp)
4525 4478 {
4526 4479 _NOTE(ARGUNUSED(type))
4527 4480 dev_info_t *devi;
4528 4481 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4529 4482 int error;
4530 4483
4531 4484 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4532 4485 return (DDI_PROP_NOT_FOUND);
4533 4486
4534 4487 error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4535 4488 ddi_release_devi(devi);
4536 4489 return (error);
4537 4490 }
4538 4491
4539 4492 /*
4540 4493 * e_ddi_getprop: See comments for ddi_getprop.
4541 4494 */
4542 4495 int
4543 4496 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4544 4497 {
4545 4498 _NOTE(ARGUNUSED(type))
4546 4499 dev_info_t *devi;
4547 4500 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4548 4501 int propvalue = defvalue;
4549 4502 int proplength = sizeof (int);
4550 4503 int error;
4551 4504
4552 4505 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4553 4506 return (defvalue);
4554 4507
4555 4508 error = cdev_prop_op(dev, devi, prop_op,
4556 4509 flags, name, (caddr_t)&propvalue, &proplength);
4557 4510 ddi_release_devi(devi);
4558 4511
4559 4512 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4560 4513 propvalue = 1;
4561 4514
4562 4515 return (propvalue);
4563 4516 }
4564 4517
4565 4518 /*
4566 4519 * e_ddi_getprop_int64:
4567 4520 *
4568 4521 * This is a typed interfaces, but predates typed properties. With the
4569 4522 * introduction of typed properties the framework tries to ensure
4570 4523 * consistent use of typed interfaces. This is why TYPE_INT64 is not
4571 4524 * part of TYPE_ANY. E_ddi_getprop_int64 is a special case where a
4572 4525 * typed interface invokes legacy (non-typed) interfaces:
4573 4526 * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)). In this case the
4574 4527 * fact that TYPE_INT64 is not part of TYPE_ANY matters. To support
4575 4528 * this type of lookup as a single operation we invoke the legacy
4576 4529 * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4577 4530 * framework ddi_prop_op(9F) implementation is expected to check for
4578 4531 * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4579 4532 * (currently TYPE_INT64).
4580 4533 */
4581 4534 int64_t
4582 4535 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4583 4536 int flags, int64_t defvalue)
4584 4537 {
4585 4538 _NOTE(ARGUNUSED(type))
4586 4539 dev_info_t *devi;
4587 4540 ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4588 4541 int64_t propvalue = defvalue;
4589 4542 int proplength = sizeof (propvalue);
4590 4543 int error;
4591 4544
4592 4545 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4593 4546 return (defvalue);
4594 4547
4595 4548 error = cdev_prop_op(dev, devi, prop_op, flags |
4596 4549 DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4597 4550 ddi_release_devi(devi);
4598 4551
4599 4552 if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4600 4553 propvalue = 1;
4601 4554
4602 4555 return (propvalue);
4603 4556 }
4604 4557
4605 4558 /*
4606 4559 * e_ddi_getproplen: See comments for ddi_getproplen.
4607 4560 */
4608 4561 int
4609 4562 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4610 4563 {
4611 4564 _NOTE(ARGUNUSED(type))
4612 4565 dev_info_t *devi;
4613 4566 ddi_prop_op_t prop_op = PROP_LEN;
4614 4567 int error;
4615 4568
4616 4569 if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4617 4570 return (DDI_PROP_NOT_FOUND);
4618 4571
4619 4572 error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4620 4573 ddi_release_devi(devi);
4621 4574 return (error);
4622 4575 }
4623 4576
4624 4577 /*
4625 4578 * Routines to get at elements of the dev_info structure
4626 4579 */
4627 4580
4628 4581 /*
4629 4582 * ddi_binding_name: Return the driver binding name of the devinfo node
4630 4583 * This is the name the OS used to bind the node to a driver.
4631 4584 */
4632 4585 char *
4633 4586 ddi_binding_name(dev_info_t *dip)
4634 4587 {
4635 4588 return (DEVI(dip)->devi_binding_name);
4636 4589 }
4637 4590
4638 4591 /*
4639 4592 * ddi_driver_major: Return the major number of the driver that
4640 4593 * the supplied devinfo is bound to. If not yet bound,
4641 4594 * DDI_MAJOR_T_NONE.
4642 4595 *
4643 4596 * When used by the driver bound to 'devi', this
4644 4597 * function will reliably return the driver major number.
4645 4598 * Other ways of determining the driver major number, such as
4646 4599 * major = ddi_name_to_major(ddi_get_name(devi));
4647 4600 * major = ddi_name_to_major(ddi_binding_name(devi));
4648 4601 * can return a different result as the driver/alias binding
4649 4602 * can change dynamically, and thus should be avoided.
4650 4603 */
4651 4604 major_t
4652 4605 ddi_driver_major(dev_info_t *devi)
4653 4606 {
4654 4607 return (DEVI(devi)->devi_major);
4655 4608 }
4656 4609
4657 4610 /*
4658 4611 * ddi_driver_name: Return the normalized driver name. this is the
4659 4612 * actual driver name
4660 4613 */
4661 4614 const char *
4662 4615 ddi_driver_name(dev_info_t *devi)
4663 4616 {
4664 4617 major_t major;
4665 4618
4666 4619 if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4667 4620 return (ddi_major_to_name(major));
4668 4621
4669 4622 return (ddi_node_name(devi));
4670 4623 }
4671 4624
4672 4625 /*
4673 4626 * i_ddi_set_binding_name: Set binding name.
4674 4627 *
4675 4628 * Set the binding name to the given name.
4676 4629 * This routine is for use by the ddi implementation, not by drivers.
4677 4630 */
4678 4631 void
4679 4632 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4680 4633 {
4681 4634 DEVI(dip)->devi_binding_name = name;
4682 4635
4683 4636 }
4684 4637
4685 4638 /*
4686 4639 * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4687 4640 * the implementation has used to bind the node to a driver.
4688 4641 */
4689 4642 char *
4690 4643 ddi_get_name(dev_info_t *dip)
4691 4644 {
4692 4645 return (DEVI(dip)->devi_binding_name);
4693 4646 }
4694 4647
4695 4648 /*
4696 4649 * ddi_node_name: Return the name property of the devinfo node
4697 4650 * This may differ from ddi_binding_name if the node name
4698 4651 * does not define a binding to a driver (i.e. generic names).
4699 4652 */
4700 4653 char *
4701 4654 ddi_node_name(dev_info_t *dip)
4702 4655 {
4703 4656 return (DEVI(dip)->devi_node_name);
4704 4657 }
4705 4658
4706 4659
4707 4660 /*
4708 4661 * ddi_get_nodeid: Get nodeid stored in dev_info structure.
4709 4662 */
4710 4663 int
4711 4664 ddi_get_nodeid(dev_info_t *dip)
4712 4665 {
4713 4666 return (DEVI(dip)->devi_nodeid);
4714 4667 }
4715 4668
4716 4669 int
4717 4670 ddi_get_instance(dev_info_t *dip)
4718 4671 {
4719 4672 return (DEVI(dip)->devi_instance);
4720 4673 }
4721 4674
4722 4675 struct dev_ops *
4723 4676 ddi_get_driver(dev_info_t *dip)
4724 4677 {
4725 4678 return (DEVI(dip)->devi_ops);
4726 4679 }
4727 4680
4728 4681 void
4729 4682 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4730 4683 {
4731 4684 DEVI(dip)->devi_ops = devo;
4732 4685 }
4733 4686
4734 4687 /*
4735 4688 * ddi_set_driver_private/ddi_get_driver_private:
4736 4689 * Get/set device driver private data in devinfo.
4737 4690 */
4738 4691 void
4739 4692 ddi_set_driver_private(dev_info_t *dip, void *data)
4740 4693 {
4741 4694 DEVI(dip)->devi_driver_data = data;
4742 4695 }
4743 4696
4744 4697 void *
4745 4698 ddi_get_driver_private(dev_info_t *dip)
4746 4699 {
4747 4700 return (DEVI(dip)->devi_driver_data);
4748 4701 }
4749 4702
4750 4703 /*
4751 4704 * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4752 4705 */
4753 4706
4754 4707 dev_info_t *
4755 4708 ddi_get_parent(dev_info_t *dip)
4756 4709 {
4757 4710 return ((dev_info_t *)DEVI(dip)->devi_parent);
4758 4711 }
4759 4712
4760 4713 dev_info_t *
4761 4714 ddi_get_child(dev_info_t *dip)
4762 4715 {
4763 4716 return ((dev_info_t *)DEVI(dip)->devi_child);
4764 4717 }
4765 4718
4766 4719 dev_info_t *
4767 4720 ddi_get_next_sibling(dev_info_t *dip)
4768 4721 {
4769 4722 return ((dev_info_t *)DEVI(dip)->devi_sibling);
4770 4723 }
4771 4724
4772 4725 dev_info_t *
4773 4726 ddi_get_next(dev_info_t *dip)
4774 4727 {
4775 4728 return ((dev_info_t *)DEVI(dip)->devi_next);
4776 4729 }
4777 4730
4778 4731 void
4779 4732 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4780 4733 {
4781 4734 DEVI(dip)->devi_next = DEVI(nextdip);
4782 4735 }
4783 4736
4784 4737 /*
4785 4738 * ddi_root_node: Return root node of devinfo tree
4786 4739 */
4787 4740
4788 4741 dev_info_t *
4789 4742 ddi_root_node(void)
4790 4743 {
4791 4744 extern dev_info_t *top_devinfo;
4792 4745
4793 4746 return (top_devinfo);
4794 4747 }
4795 4748
4796 4749 /*
4797 4750 * Miscellaneous functions:
4798 4751 */
4799 4752
4800 4753 /*
4801 4754 * Implementation specific hooks
4802 4755 */
4803 4756
4804 4757 void
4805 4758 ddi_report_dev(dev_info_t *d)
4806 4759 {
4807 4760 char *b;
4808 4761
4809 4762 (void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4810 4763
4811 4764 /*
4812 4765 * If this devinfo node has cb_ops, it's implicitly accessible from
4813 4766 * userland, so we print its full name together with the instance
4814 4767 * number 'abbreviation' that the driver may use internally.
4815 4768 */
4816 4769 if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4817 4770 (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4818 4771 cmn_err(CE_CONT, "?%s%d is %s\n",
4819 4772 ddi_driver_name(d), ddi_get_instance(d),
4820 4773 ddi_pathname(d, b));
4821 4774 kmem_free(b, MAXPATHLEN);
4822 4775 }
4823 4776 }
4824 4777
4825 4778 /*
4826 4779 * ddi_ctlops() is described in the assembler not to buy a new register
4827 4780 * window when it's called and can reduce cost in climbing the device tree
4828 4781 * without using the tail call optimization.
4829 4782 */
4830 4783 int
4831 4784 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4832 4785 {
4833 4786 int ret;
4834 4787
4835 4788 ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4836 4789 (void *)&rnumber, (void *)result);
4837 4790
4838 4791 return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4839 4792 }
4840 4793
4841 4794 int
4842 4795 ddi_dev_nregs(dev_info_t *dev, int *result)
4843 4796 {
4844 4797 return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4845 4798 }
4846 4799
4847 4800 int
4848 4801 ddi_dev_is_sid(dev_info_t *d)
4849 4802 {
4850 4803 return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4851 4804 }
4852 4805
4853 4806 int
4854 4807 ddi_slaveonly(dev_info_t *d)
4855 4808 {
4856 4809 return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4857 4810 }
4858 4811
4859 4812 int
4860 4813 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4861 4814 {
4862 4815 return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4863 4816 }
4864 4817
4865 4818 int
4866 4819 ddi_streams_driver(dev_info_t *dip)
4867 4820 {
4868 4821 if (i_ddi_devi_attached(dip) &&
4869 4822 (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4870 4823 (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4871 4824 return (DDI_SUCCESS);
4872 4825 return (DDI_FAILURE);
4873 4826 }
4874 4827
4875 4828 /*
4876 4829 * callback free list
4877 4830 */
4878 4831
4879 4832 static int ncallbacks;
4880 4833 static int nc_low = 170;
4881 4834 static int nc_med = 512;
4882 4835 static int nc_high = 2048;
4883 4836 static struct ddi_callback *callbackq;
4884 4837 static struct ddi_callback *callbackqfree;
4885 4838
4886 4839 /*
4887 4840 * set/run callback lists
4888 4841 */
4889 4842 struct cbstats {
4890 4843 kstat_named_t cb_asked;
4891 4844 kstat_named_t cb_new;
4892 4845 kstat_named_t cb_run;
4893 4846 kstat_named_t cb_delete;
4894 4847 kstat_named_t cb_maxreq;
4895 4848 kstat_named_t cb_maxlist;
4896 4849 kstat_named_t cb_alloc;
4897 4850 kstat_named_t cb_runouts;
4898 4851 kstat_named_t cb_L2;
4899 4852 kstat_named_t cb_grow;
4900 4853 } cbstats = {
4901 4854 {"asked", KSTAT_DATA_UINT32},
4902 4855 {"new", KSTAT_DATA_UINT32},
4903 4856 {"run", KSTAT_DATA_UINT32},
4904 4857 {"delete", KSTAT_DATA_UINT32},
4905 4858 {"maxreq", KSTAT_DATA_UINT32},
4906 4859 {"maxlist", KSTAT_DATA_UINT32},
4907 4860 {"alloc", KSTAT_DATA_UINT32},
4908 4861 {"runouts", KSTAT_DATA_UINT32},
4909 4862 {"L2", KSTAT_DATA_UINT32},
4910 4863 {"grow", KSTAT_DATA_UINT32},
4911 4864 };
4912 4865
4913 4866 #define nc_asked cb_asked.value.ui32
4914 4867 #define nc_new cb_new.value.ui32
4915 4868 #define nc_run cb_run.value.ui32
4916 4869 #define nc_delete cb_delete.value.ui32
4917 4870 #define nc_maxreq cb_maxreq.value.ui32
4918 4871 #define nc_maxlist cb_maxlist.value.ui32
4919 4872 #define nc_alloc cb_alloc.value.ui32
4920 4873 #define nc_runouts cb_runouts.value.ui32
4921 4874 #define nc_L2 cb_L2.value.ui32
4922 4875 #define nc_grow cb_grow.value.ui32
4923 4876
4924 4877 static kmutex_t ddi_callback_mutex;
4925 4878
4926 4879 /*
4927 4880 * callbacks are handled using a L1/L2 cache. The L1 cache
4928 4881 * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4929 4882 * we can't get callbacks from the L1 cache [because pageout is doing
4930 4883 * I/O at the time freemem is 0], we allocate callbacks out of the
4931 4884 * L2 cache. The L2 cache is static and depends on the memory size.
4932 4885 * [We might also count the number of devices at probe time and
4933 4886 * allocate one structure per device and adjust for deferred attach]
4934 4887 */
4935 4888 void
4936 4889 impl_ddi_callback_init(void)
4937 4890 {
4938 4891 int i;
4939 4892 uint_t physmegs;
4940 4893 kstat_t *ksp;
4941 4894
4942 4895 physmegs = physmem >> (20 - PAGESHIFT);
4943 4896 if (physmegs < 48) {
4944 4897 ncallbacks = nc_low;
4945 4898 } else if (physmegs < 128) {
4946 4899 ncallbacks = nc_med;
4947 4900 } else {
4948 4901 ncallbacks = nc_high;
4949 4902 }
4950 4903
4951 4904 /*
4952 4905 * init free list
4953 4906 */
4954 4907 callbackq = kmem_zalloc(
4955 4908 ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4956 4909 for (i = 0; i < ncallbacks-1; i++)
4957 4910 callbackq[i].c_nfree = &callbackq[i+1];
4958 4911 callbackqfree = callbackq;
4959 4912
4960 4913 /* init kstats */
4961 4914 if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4962 4915 sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4963 4916 ksp->ks_data = (void *) &cbstats;
4964 4917 kstat_install(ksp);
4965 4918 }
4966 4919
4967 4920 }
4968 4921
4969 4922 static void
4970 4923 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4971 4924 int count)
4972 4925 {
4973 4926 struct ddi_callback *list, *marker, *new;
4974 4927 size_t size = sizeof (struct ddi_callback);
4975 4928
4976 4929 list = marker = (struct ddi_callback *)*listid;
4977 4930 while (list != NULL) {
4978 4931 if (list->c_call == funcp && list->c_arg == arg) {
4979 4932 list->c_count += count;
4980 4933 return;
4981 4934 }
4982 4935 marker = list;
4983 4936 list = list->c_nlist;
4984 4937 }
4985 4938 new = kmem_alloc(size, KM_NOSLEEP);
4986 4939 if (new == NULL) {
4987 4940 new = callbackqfree;
4988 4941 if (new == NULL) {
4989 4942 new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4990 4943 &size, KM_NOSLEEP | KM_PANIC);
4991 4944 cbstats.nc_grow++;
4992 4945 } else {
4993 4946 callbackqfree = new->c_nfree;
4994 4947 cbstats.nc_L2++;
4995 4948 }
4996 4949 }
4997 4950 if (marker != NULL) {
4998 4951 marker->c_nlist = new;
4999 4952 } else {
5000 4953 *listid = (uintptr_t)new;
5001 4954 }
5002 4955 new->c_size = size;
5003 4956 new->c_nlist = NULL;
5004 4957 new->c_call = funcp;
5005 4958 new->c_arg = arg;
5006 4959 new->c_count = count;
5007 4960 cbstats.nc_new++;
5008 4961 cbstats.nc_alloc++;
5009 4962 if (cbstats.nc_alloc > cbstats.nc_maxlist)
5010 4963 cbstats.nc_maxlist = cbstats.nc_alloc;
5011 4964 }
5012 4965
5013 4966 void
5014 4967 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5015 4968 {
5016 4969 mutex_enter(&ddi_callback_mutex);
5017 4970 cbstats.nc_asked++;
5018 4971 if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5019 4972 cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5020 4973 (void) callback_insert(funcp, arg, listid, 1);
5021 4974 mutex_exit(&ddi_callback_mutex);
5022 4975 }
5023 4976
5024 4977 static void
5025 4978 real_callback_run(void *Queue)
5026 4979 {
5027 4980 int (*funcp)(caddr_t);
5028 4981 caddr_t arg;
5029 4982 int count, rval;
5030 4983 uintptr_t *listid;
5031 4984 struct ddi_callback *list, *marker;
5032 4985 int check_pending = 1;
5033 4986 int pending = 0;
5034 4987
5035 4988 do {
5036 4989 mutex_enter(&ddi_callback_mutex);
5037 4990 listid = Queue;
5038 4991 list = (struct ddi_callback *)*listid;
5039 4992 if (list == NULL) {
5040 4993 mutex_exit(&ddi_callback_mutex);
5041 4994 return;
5042 4995 }
5043 4996 if (check_pending) {
5044 4997 marker = list;
5045 4998 while (marker != NULL) {
5046 4999 pending += marker->c_count;
5047 5000 marker = marker->c_nlist;
5048 5001 }
5049 5002 check_pending = 0;
5050 5003 }
5051 5004 ASSERT(pending > 0);
5052 5005 ASSERT(list->c_count > 0);
5053 5006 funcp = list->c_call;
5054 5007 arg = list->c_arg;
5055 5008 count = list->c_count;
5056 5009 *(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5057 5010 if (list >= &callbackq[0] &&
5058 5011 list <= &callbackq[ncallbacks-1]) {
5059 5012 list->c_nfree = callbackqfree;
5060 5013 callbackqfree = list;
5061 5014 } else
5062 5015 kmem_free(list, list->c_size);
5063 5016
5064 5017 cbstats.nc_delete++;
5065 5018 cbstats.nc_alloc--;
5066 5019 mutex_exit(&ddi_callback_mutex);
5067 5020
5068 5021 do {
5069 5022 if ((rval = (*funcp)(arg)) == 0) {
5070 5023 pending -= count;
5071 5024 mutex_enter(&ddi_callback_mutex);
5072 5025 (void) callback_insert(funcp, arg, listid,
5073 5026 count);
5074 5027 cbstats.nc_runouts++;
5075 5028 } else {
5076 5029 pending--;
5077 5030 mutex_enter(&ddi_callback_mutex);
5078 5031 cbstats.nc_run++;
5079 5032 }
5080 5033 mutex_exit(&ddi_callback_mutex);
5081 5034 } while (rval != 0 && (--count > 0));
5082 5035 } while (pending > 0);
5083 5036 }
5084 5037
5085 5038 void
5086 5039 ddi_run_callback(uintptr_t *listid)
5087 5040 {
5088 5041 softcall(real_callback_run, listid);
5089 5042 }
5090 5043
5091 5044 /*
5092 5045 * ddi_periodic_t
5093 5046 * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5094 5047 * int level)
5095 5048 *
5096 5049 * INTERFACE LEVEL
5097 5050 * Solaris DDI specific (Solaris DDI)
5098 5051 *
5099 5052 * PARAMETERS
5100 5053 * func: the callback function
5101 5054 *
5102 5055 * The callback function will be invoked. The function is invoked
5103 5056 * in kernel context if the argument level passed is the zero.
5104 5057 * Otherwise it's invoked in interrupt context at the specified
5105 5058 * level.
5106 5059 *
5107 5060 * arg: the argument passed to the callback function
5108 5061 *
5109 5062 * interval: interval time
5110 5063 *
5111 5064 * level : callback interrupt level
5112 5065 *
5113 5066 * If the value is the zero, the callback function is invoked
5114 5067 * in kernel context. If the value is more than the zero, but
5115 5068 * less than or equal to ten, the callback function is invoked in
5116 5069 * interrupt context at the specified interrupt level, which may
5117 5070 * be used for real time applications.
5118 5071 *
5119 5072 * This value must be in range of 0-10, which can be a numeric
5120 5073 * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5121 5074 *
5122 5075 * DESCRIPTION
5123 5076 * ddi_periodic_add(9F) schedules the specified function to be
5124 5077 * periodically invoked in the interval time.
5125 5078 *
5126 5079 * As well as timeout(9F), the exact time interval over which the function
5127 5080 * takes effect cannot be guaranteed, but the value given is a close
5128 5081 * approximation.
5129 5082 *
5130 5083 * Drivers waiting on behalf of processes with real-time constraints must
5131 5084 * pass non-zero value with the level argument to ddi_periodic_add(9F).
5132 5085 *
5133 5086 * RETURN VALUES
5134 5087 * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5135 5088 * which must be used for ddi_periodic_delete(9F) to specify the request.
5136 5089 *
5137 5090 * CONTEXT
5138 5091 * ddi_periodic_add(9F) can be called in user or kernel context, but
5139 5092 * it cannot be called in interrupt context, which is different from
5140 5093 * timeout(9F).
5141 5094 */
5142 5095 ddi_periodic_t
5143 5096 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5144 5097 {
5145 5098 /*
5146 5099 * Sanity check of the argument level.
5147 5100 */
5148 5101 if (level < DDI_IPL_0 || level > DDI_IPL_10)
5149 5102 cmn_err(CE_PANIC,
5150 5103 "ddi_periodic_add: invalid interrupt level (%d).", level);
5151 5104
5152 5105 /*
5153 5106 * Sanity check of the context. ddi_periodic_add() cannot be
5154 5107 * called in either interrupt context or high interrupt context.
5155 5108 */
5156 5109 if (servicing_interrupt())
5157 5110 cmn_err(CE_PANIC,
5158 5111 "ddi_periodic_add: called in (high) interrupt context.");
5159 5112
5160 5113 return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5161 5114 }
5162 5115
5163 5116 /*
5164 5117 * void
5165 5118 * ddi_periodic_delete(ddi_periodic_t req)
5166 5119 *
5167 5120 * INTERFACE LEVEL
5168 5121 * Solaris DDI specific (Solaris DDI)
5169 5122 *
5170 5123 * PARAMETERS
5171 5124 * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5172 5125 * previously.
5173 5126 *
5174 5127 * DESCRIPTION
5175 5128 * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5176 5129 * previously requested.
5177 5130 *
5178 5131 * ddi_periodic_delete(9F) will not return until the pending request
5179 5132 * is canceled or executed.
5180 5133 *
5181 5134 * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5182 5135 * timeout which is either running on another CPU, or has already
5183 5136 * completed causes no problems. However, unlike untimeout(9F), there is
5184 5137 * no restrictions on the lock which might be held across the call to
5185 5138 * ddi_periodic_delete(9F).
5186 5139 *
5187 5140 * Drivers should be structured with the understanding that the arrival of
5188 5141 * both an interrupt and a timeout for that interrupt can occasionally
5189 5142 * occur, in either order.
5190 5143 *
5191 5144 * CONTEXT
5192 5145 * ddi_periodic_delete(9F) can be called in user or kernel context, but
5193 5146 * it cannot be called in interrupt context, which is different from
5194 5147 * untimeout(9F).
5195 5148 */
5196 5149 void
5197 5150 ddi_periodic_delete(ddi_periodic_t req)
5198 5151 {
5199 5152 /*
5200 5153 * Sanity check of the context. ddi_periodic_delete() cannot be
5201 5154 * called in either interrupt context or high interrupt context.
5202 5155 */
5203 5156 if (servicing_interrupt())
5204 5157 cmn_err(CE_PANIC,
5205 5158 "ddi_periodic_delete: called in (high) interrupt context.");
5206 5159
5207 5160 i_untimeout((timeout_t)req);
5208 5161 }
5209 5162
5210 5163 dev_info_t *
5211 5164 nodevinfo(dev_t dev, int otyp)
5212 5165 {
5213 5166 _NOTE(ARGUNUSED(dev, otyp))
5214 5167 return ((dev_info_t *)0);
5215 5168 }
5216 5169
5217 5170 /*
5218 5171 * A driver should support its own getinfo(9E) entry point. This function
5219 5172 * is provided as a convenience for ON drivers that don't expect their
5220 5173 * getinfo(9E) entry point to be called. A driver that uses this must not
5221 5174 * call ddi_create_minor_node.
5222 5175 */
5223 5176 int
5224 5177 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5225 5178 {
5226 5179 _NOTE(ARGUNUSED(dip, infocmd, arg, result))
5227 5180 return (DDI_FAILURE);
5228 5181 }
5229 5182
5230 5183 /*
5231 5184 * A driver should support its own getinfo(9E) entry point. This function
5232 5185 * is provided as a convenience for ON drivers that where the minor number
5233 5186 * is the instance. Drivers that do not have 1:1 mapping must implement
5234 5187 * their own getinfo(9E) function.
5235 5188 */
5236 5189 int
5237 5190 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5238 5191 void *arg, void **result)
5239 5192 {
5240 5193 _NOTE(ARGUNUSED(dip))
5241 5194 int instance;
5242 5195
5243 5196 if (infocmd != DDI_INFO_DEVT2INSTANCE)
5244 5197 return (DDI_FAILURE);
5245 5198
5246 5199 instance = getminor((dev_t)(uintptr_t)arg);
5247 5200 *result = (void *)(uintptr_t)instance;
5248 5201 return (DDI_SUCCESS);
5249 5202 }
5250 5203
5251 5204 int
5252 5205 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5253 5206 {
5254 5207 _NOTE(ARGUNUSED(devi, cmd))
5255 5208 return (DDI_FAILURE);
5256 5209 }
5257 5210
5258 5211 int
5259 5212 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5260 5213 struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5261 5214 {
5262 5215 _NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5263 5216 return (DDI_DMA_NOMAPPING);
5264 5217 }
5265 5218
5266 5219 int
5267 5220 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5268 5221 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5269 5222 {
5270 5223 _NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5271 5224 return (DDI_DMA_BADATTR);
5272 5225 }
5273 5226
5274 5227 int
5275 5228 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5276 5229 ddi_dma_handle_t handle)
5277 5230 {
5278 5231 _NOTE(ARGUNUSED(dip, rdip, handle))
5279 5232 return (DDI_FAILURE);
5280 5233 }
5281 5234
5282 5235 int
5283 5236 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5284 5237 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5285 5238 ddi_dma_cookie_t *cp, uint_t *ccountp)
5286 5239 {
5287 5240 _NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5288 5241 return (DDI_DMA_NOMAPPING);
5289 5242 }
5290 5243
5291 5244 int
5292 5245 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5293 5246 ddi_dma_handle_t handle)
5294 5247 {
5295 5248 _NOTE(ARGUNUSED(dip, rdip, handle))
5296 5249 return (DDI_FAILURE);
5297 5250 }
5298 5251
5299 5252 int
5300 5253 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5301 5254 ddi_dma_handle_t handle, off_t off, size_t len,
5302 5255 uint_t cache_flags)
5303 5256 {
5304 5257 _NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5305 5258 return (DDI_FAILURE);
5306 5259 }
5307 5260
5308 5261 int
5309 5262 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5310 5263 ddi_dma_handle_t handle, uint_t win, off_t *offp,
5311 5264 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5312 5265 {
5313 5266 _NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5314 5267 return (DDI_FAILURE);
5315 5268 }
5316 5269
5317 5270 int
5318 5271 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5319 5272 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5320 5273 off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5321 5274 {
5322 5275 _NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5323 5276 return (DDI_FAILURE);
5324 5277 }
5325 5278
5326 5279 void
5327 5280 ddivoid(void)
5328 5281 {}
5329 5282
5330 5283 int
5331 5284 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5332 5285 struct pollhead **pollhdrp)
5333 5286 {
5334 5287 _NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5335 5288 return (ENXIO);
5336 5289 }
5337 5290
5338 5291 cred_t *
5339 5292 ddi_get_cred(void)
5340 5293 {
5341 5294 return (CRED());
5342 5295 }
5343 5296
5344 5297 clock_t
5345 5298 ddi_get_lbolt(void)
5346 5299 {
5347 5300 return ((clock_t)lbolt_hybrid());
5348 5301 }
5349 5302
5350 5303 int64_t
5351 5304 ddi_get_lbolt64(void)
5352 5305 {
5353 5306 return (lbolt_hybrid());
5354 5307 }
5355 5308
5356 5309 time_t
5357 5310 ddi_get_time(void)
5358 5311 {
5359 5312 time_t now;
5360 5313
5361 5314 if ((now = gethrestime_sec()) == 0) {
5362 5315 timestruc_t ts;
5363 5316 mutex_enter(&tod_lock);
5364 5317 ts = tod_get();
5365 5318 mutex_exit(&tod_lock);
5366 5319 return (ts.tv_sec);
5367 5320 } else {
5368 5321 return (now);
5369 5322 }
5370 5323 }
5371 5324
5372 5325 pid_t
5373 5326 ddi_get_pid(void)
5374 5327 {
5375 5328 return (ttoproc(curthread)->p_pid);
5376 5329 }
5377 5330
5378 5331 kt_did_t
5379 5332 ddi_get_kt_did(void)
5380 5333 {
5381 5334 return (curthread->t_did);
5382 5335 }
5383 5336
5384 5337 /*
5385 5338 * This function returns B_TRUE if the caller can reasonably expect that a call
5386 5339 * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5387 5340 * by user-level signal. If it returns B_FALSE, then the caller should use
5388 5341 * other means to make certain that the wait will not hang "forever."
5389 5342 *
5390 5343 * It does not check the signal mask, nor for reception of any particular
5391 5344 * signal.
5392 5345 *
5393 5346 * Currently, a thread can receive a signal if it's not a kernel thread and it
5394 5347 * is not in the middle of exit(2) tear-down. Threads that are in that
5395 5348 * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5396 5349 * cv_timedwait, and qwait_sig to qwait.
5397 5350 */
5398 5351 boolean_t
5399 5352 ddi_can_receive_sig(void)
5400 5353 {
5401 5354 proc_t *pp;
5402 5355
5403 5356 if (curthread->t_proc_flag & TP_LWPEXIT)
5404 5357 return (B_FALSE);
5405 5358 if ((pp = ttoproc(curthread)) == NULL)
5406 5359 return (B_FALSE);
5407 5360 return (pp->p_as != &kas);
5408 5361 }
5409 5362
5410 5363 /*
5411 5364 * Swap bytes in 16-bit [half-]words
5412 5365 */
5413 5366 void
5414 5367 swab(void *src, void *dst, size_t nbytes)
5415 5368 {
5416 5369 uchar_t *pf = (uchar_t *)src;
5417 5370 uchar_t *pt = (uchar_t *)dst;
5418 5371 uchar_t tmp;
5419 5372 int nshorts;
5420 5373
5421 5374 nshorts = nbytes >> 1;
5422 5375
5423 5376 while (--nshorts >= 0) {
5424 5377 tmp = *pf++;
5425 5378 *pt++ = *pf++;
5426 5379 *pt++ = tmp;
5427 5380 }
5428 5381 }
5429 5382
5430 5383 static void
5431 5384 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5432 5385 {
5433 5386 int circ;
5434 5387 struct ddi_minor_data *dp;
5435 5388
5436 5389 ndi_devi_enter(ddip, &circ);
5437 5390 if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5438 5391 DEVI(ddip)->devi_minor = dmdp;
5439 5392 } else {
5440 5393 while (dp->next != (struct ddi_minor_data *)NULL)
5441 5394 dp = dp->next;
5442 5395 dp->next = dmdp;
5443 5396 }
5444 5397 ndi_devi_exit(ddip, circ);
5445 5398 }
5446 5399
5447 5400 /*
5448 5401 * Part of the obsolete SunCluster DDI Hooks.
5449 5402 * Keep for binary compatibility
5450 5403 */
5451 5404 minor_t
5452 5405 ddi_getiminor(dev_t dev)
5453 5406 {
5454 5407 return (getminor(dev));
5455 5408 }
5456 5409
5457 5410 static int
5458 5411 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5459 5412 {
5460 5413 int se_flag;
5461 5414 int kmem_flag;
5462 5415 int se_err;
5463 5416 char *pathname, *class_name;
5464 5417 sysevent_t *ev = NULL;
5465 5418 sysevent_id_t eid;
5466 5419 sysevent_value_t se_val;
5467 5420 sysevent_attr_list_t *ev_attr_list = NULL;
5468 5421
5469 5422 /* determine interrupt context */
5470 5423 se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5471 5424 kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5472 5425
5473 5426 i_ddi_di_cache_invalidate();
5474 5427
5475 5428 #ifdef DEBUG
5476 5429 if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5477 5430 cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5478 5431 "interrupt level by driver %s",
5479 5432 ddi_driver_name(dip));
5480 5433 }
5481 5434 #endif /* DEBUG */
5482 5435
5483 5436 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5484 5437 if (ev == NULL) {
5485 5438 goto fail;
5486 5439 }
5487 5440
5488 5441 pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5489 5442 if (pathname == NULL) {
5490 5443 sysevent_free(ev);
5491 5444 goto fail;
5492 5445 }
5493 5446
5494 5447 (void) ddi_pathname(dip, pathname);
5495 5448 ASSERT(strlen(pathname));
5496 5449 se_val.value_type = SE_DATA_TYPE_STRING;
5497 5450 se_val.value.sv_string = pathname;
5498 5451 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5499 5452 &se_val, se_flag) != 0) {
5500 5453 kmem_free(pathname, MAXPATHLEN);
5501 5454 sysevent_free(ev);
5502 5455 goto fail;
5503 5456 }
5504 5457 kmem_free(pathname, MAXPATHLEN);
5505 5458
5506 5459 /* add the device class attribute */
5507 5460 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5508 5461 se_val.value_type = SE_DATA_TYPE_STRING;
5509 5462 se_val.value.sv_string = class_name;
5510 5463 if (sysevent_add_attr(&ev_attr_list,
5511 5464 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5512 5465 sysevent_free_attr(ev_attr_list);
5513 5466 goto fail;
5514 5467 }
5515 5468 }
5516 5469
5517 5470 /*
5518 5471 * allow for NULL minor names
5519 5472 */
5520 5473 if (minor_name != NULL) {
5521 5474 se_val.value.sv_string = minor_name;
5522 5475 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5523 5476 &se_val, se_flag) != 0) {
5524 5477 sysevent_free_attr(ev_attr_list);
5525 5478 sysevent_free(ev);
5526 5479 goto fail;
5527 5480 }
5528 5481 }
5529 5482
5530 5483 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5531 5484 sysevent_free_attr(ev_attr_list);
5532 5485 sysevent_free(ev);
5533 5486 goto fail;
5534 5487 }
5535 5488
5536 5489 if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5537 5490 if (se_err == SE_NO_TRANSPORT) {
5538 5491 cmn_err(CE_WARN, "/devices or /dev may not be current "
5539 5492 "for driver %s (%s). Run devfsadm -i %s",
5540 5493 ddi_driver_name(dip), "syseventd not responding",
5541 5494 ddi_driver_name(dip));
5542 5495 } else {
5543 5496 sysevent_free(ev);
5544 5497 goto fail;
5545 5498 }
5546 5499 }
5547 5500
5548 5501 sysevent_free(ev);
5549 5502 return (DDI_SUCCESS);
5550 5503 fail:
5551 5504 cmn_err(CE_WARN, "/devices or /dev may not be current "
5552 5505 "for driver %s. Run devfsadm -i %s",
5553 5506 ddi_driver_name(dip), ddi_driver_name(dip));
5554 5507 return (DDI_SUCCESS);
5555 5508 }
5556 5509
5557 5510 /*
5558 5511 * failing to remove a minor node is not of interest
5559 5512 * therefore we do not generate an error message
5560 5513 */
5561 5514 static int
5562 5515 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5563 5516 {
5564 5517 char *pathname, *class_name;
5565 5518 sysevent_t *ev;
5566 5519 sysevent_id_t eid;
5567 5520 sysevent_value_t se_val;
5568 5521 sysevent_attr_list_t *ev_attr_list = NULL;
5569 5522
5570 5523 /*
5571 5524 * only log ddi_remove_minor_node() calls outside the scope
5572 5525 * of attach/detach reconfigurations and when the dip is
5573 5526 * still initialized.
5574 5527 */
5575 5528 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5576 5529 (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5577 5530 return (DDI_SUCCESS);
5578 5531 }
5579 5532
5580 5533 i_ddi_di_cache_invalidate();
5581 5534
5582 5535 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5583 5536 if (ev == NULL) {
5584 5537 return (DDI_SUCCESS);
5585 5538 }
5586 5539
5587 5540 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5588 5541 if (pathname == NULL) {
5589 5542 sysevent_free(ev);
5590 5543 return (DDI_SUCCESS);
5591 5544 }
5592 5545
5593 5546 (void) ddi_pathname(dip, pathname);
5594 5547 ASSERT(strlen(pathname));
5595 5548 se_val.value_type = SE_DATA_TYPE_STRING;
5596 5549 se_val.value.sv_string = pathname;
5597 5550 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5598 5551 &se_val, SE_SLEEP) != 0) {
5599 5552 kmem_free(pathname, MAXPATHLEN);
5600 5553 sysevent_free(ev);
5601 5554 return (DDI_SUCCESS);
5602 5555 }
5603 5556
5604 5557 kmem_free(pathname, MAXPATHLEN);
5605 5558
5606 5559 /*
5607 5560 * allow for NULL minor names
5608 5561 */
5609 5562 if (minor_name != NULL) {
5610 5563 se_val.value.sv_string = minor_name;
5611 5564 if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5612 5565 &se_val, SE_SLEEP) != 0) {
5613 5566 sysevent_free_attr(ev_attr_list);
5614 5567 goto fail;
5615 5568 }
5616 5569 }
5617 5570
5618 5571 if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5619 5572 /* add the device class, driver name and instance attributes */
5620 5573
5621 5574 se_val.value_type = SE_DATA_TYPE_STRING;
5622 5575 se_val.value.sv_string = class_name;
5623 5576 if (sysevent_add_attr(&ev_attr_list,
5624 5577 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5625 5578 sysevent_free_attr(ev_attr_list);
5626 5579 goto fail;
5627 5580 }
5628 5581
5629 5582 se_val.value_type = SE_DATA_TYPE_STRING;
5630 5583 se_val.value.sv_string = (char *)ddi_driver_name(dip);
5631 5584 if (sysevent_add_attr(&ev_attr_list,
5632 5585 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5633 5586 sysevent_free_attr(ev_attr_list);
5634 5587 goto fail;
5635 5588 }
5636 5589
5637 5590 se_val.value_type = SE_DATA_TYPE_INT32;
5638 5591 se_val.value.sv_int32 = ddi_get_instance(dip);
5639 5592 if (sysevent_add_attr(&ev_attr_list,
5640 5593 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5641 5594 sysevent_free_attr(ev_attr_list);
5642 5595 goto fail;
5643 5596 }
5644 5597
5645 5598 }
5646 5599
5647 5600 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5648 5601 sysevent_free_attr(ev_attr_list);
5649 5602 } else {
5650 5603 (void) log_sysevent(ev, SE_SLEEP, &eid);
5651 5604 }
5652 5605 fail:
5653 5606 sysevent_free(ev);
5654 5607 return (DDI_SUCCESS);
5655 5608 }
5656 5609
5657 5610 /*
5658 5611 * Derive the device class of the node.
5659 5612 * Device class names aren't defined yet. Until this is done we use
5660 5613 * devfs event subclass names as device class names.
5661 5614 */
5662 5615 static int
5663 5616 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5664 5617 {
5665 5618 int rv = DDI_SUCCESS;
5666 5619
5667 5620 if (i_ddi_devi_class(dip) == NULL) {
5668 5621 if (strncmp(node_type, DDI_NT_BLOCK,
5669 5622 sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5670 5623 (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5671 5624 node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5672 5625 strcmp(node_type, DDI_NT_FD) != 0) {
5673 5626
5674 5627 rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5675 5628
5676 5629 } else if (strncmp(node_type, DDI_NT_NET,
5677 5630 sizeof (DDI_NT_NET) - 1) == 0 &&
5678 5631 (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5679 5632 node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5680 5633
5681 5634 rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5682 5635
5683 5636 } else if (strncmp(node_type, DDI_NT_PRINTER,
5684 5637 sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5685 5638 (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5686 5639 node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5687 5640
5688 5641 rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5689 5642
5690 5643 } else if (strncmp(node_type, DDI_PSEUDO,
5691 5644 sizeof (DDI_PSEUDO) -1) == 0 &&
5692 5645 (strncmp(ESC_LOFI, ddi_node_name(dip),
5693 5646 sizeof (ESC_LOFI) -1) == 0)) {
5694 5647 rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5695 5648 }
5696 5649 }
5697 5650
5698 5651 return (rv);
5699 5652 }
5700 5653
5701 5654 /*
5702 5655 * Check compliance with PSARC 2003/375:
5703 5656 *
5704 5657 * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5705 5658 * exceed IFNAMSIZ (16) characters in length.
5706 5659 */
5707 5660 static boolean_t
5708 5661 verify_name(char *name)
5709 5662 {
5710 5663 size_t len = strlen(name);
5711 5664 char *cp;
5712 5665
5713 5666 if (len == 0 || len > IFNAMSIZ)
5714 5667 return (B_FALSE);
5715 5668
5716 5669 for (cp = name; *cp != '\0'; cp++) {
5717 5670 if (!isalnum(*cp) && *cp != '_')
5718 5671 return (B_FALSE);
5719 5672 }
5720 5673
5721 5674 return (B_TRUE);
5722 5675 }
5723 5676
5724 5677 /*
5725 5678 * ddi_create_minor_common: Create a ddi_minor_data structure and
5726 5679 * attach it to the given devinfo node.
5727 5680 */
5728 5681
5729 5682 int
5730 5683 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5731 5684 minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5732 5685 const char *read_priv, const char *write_priv, mode_t priv_mode)
5733 5686 {
5734 5687 struct ddi_minor_data *dmdp;
5735 5688 major_t major;
5736 5689
5737 5690 if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5738 5691 return (DDI_FAILURE);
5739 5692
5740 5693 if (name == NULL)
5741 5694 return (DDI_FAILURE);
5742 5695
5743 5696 /*
5744 5697 * Log a message if the minor number the driver is creating
5745 5698 * is not expressible on the on-disk filesystem (currently
5746 5699 * this is limited to 18 bits both by UFS). The device can
5747 5700 * be opened via devfs, but not by device special files created
5748 5701 * via mknod().
5749 5702 */
5750 5703 if (minor_num > L_MAXMIN32) {
5751 5704 cmn_err(CE_WARN,
5752 5705 "%s%d:%s minor 0x%x too big for 32-bit applications",
5753 5706 ddi_driver_name(dip), ddi_get_instance(dip),
5754 5707 name, minor_num);
5755 5708 return (DDI_FAILURE);
5756 5709 }
5757 5710
5758 5711 /* dip must be bound and attached */
5759 5712 major = ddi_driver_major(dip);
5760 5713 ASSERT(major != DDI_MAJOR_T_NONE);
5761 5714
5762 5715 /*
5763 5716 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5764 5717 */
5765 5718 if (node_type == NULL) {
5766 5719 node_type = DDI_PSEUDO;
5767 5720 NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5768 5721 " minor node %s; default to DDI_PSEUDO",
5769 5722 ddi_driver_name(dip), ddi_get_instance(dip), name));
5770 5723 }
5771 5724
5772 5725 /*
5773 5726 * If the driver is a network driver, ensure that the name falls within
5774 5727 * the interface naming constraints specified by PSARC/2003/375.
5775 5728 */
5776 5729 if (strcmp(node_type, DDI_NT_NET) == 0) {
5777 5730 if (!verify_name(name))
5778 5731 return (DDI_FAILURE);
5779 5732
5780 5733 if (mtype == DDM_MINOR) {
5781 5734 struct devnames *dnp = &devnamesp[major];
5782 5735
5783 5736 /* Mark driver as a network driver */
5784 5737 LOCK_DEV_OPS(&dnp->dn_lock);
5785 5738 dnp->dn_flags |= DN_NETWORK_DRIVER;
5786 5739
5787 5740 /*
5788 5741 * If this minor node is created during the device
5789 5742 * attachment, this is a physical network device.
5790 5743 * Mark the driver as a physical network driver.
5791 5744 */
5792 5745 if (DEVI_IS_ATTACHING(dip))
5793 5746 dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5794 5747 UNLOCK_DEV_OPS(&dnp->dn_lock);
5795 5748 }
5796 5749 }
5797 5750
5798 5751 if (mtype == DDM_MINOR) {
5799 5752 if (derive_devi_class(dip, node_type, KM_NOSLEEP) !=
5800 5753 DDI_SUCCESS)
5801 5754 return (DDI_FAILURE);
5802 5755 }
5803 5756
5804 5757 /*
5805 5758 * Take care of minor number information for the node.
5806 5759 */
5807 5760
5808 5761 if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5809 5762 KM_NOSLEEP)) == NULL) {
5810 5763 return (DDI_FAILURE);
5811 5764 }
5812 5765 if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5813 5766 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5814 5767 return (DDI_FAILURE);
5815 5768 }
5816 5769 dmdp->dip = dip;
5817 5770 dmdp->ddm_dev = makedevice(major, minor_num);
5818 5771 dmdp->ddm_spec_type = spec_type;
5819 5772 dmdp->ddm_node_type = node_type;
5820 5773 dmdp->type = mtype;
5821 5774 if (flag & CLONE_DEV) {
5822 5775 dmdp->type = DDM_ALIAS;
5823 5776 dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5824 5777 }
5825 5778 if (flag & PRIVONLY_DEV) {
5826 5779 dmdp->ddm_flags |= DM_NO_FSPERM;
5827 5780 }
5828 5781 if (read_priv || write_priv) {
5829 5782 dmdp->ddm_node_priv =
5830 5783 devpolicy_priv_by_name(read_priv, write_priv);
5831 5784 }
5832 5785 dmdp->ddm_priv_mode = priv_mode;
5833 5786
5834 5787 ddi_append_minor_node(dip, dmdp);
5835 5788
5836 5789 /*
5837 5790 * only log ddi_create_minor_node() calls which occur
5838 5791 * outside the scope of attach(9e)/detach(9e) reconfigurations
5839 5792 */
5840 5793 if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5841 5794 mtype != DDM_INTERNAL_PATH) {
5842 5795 (void) i_log_devfs_minor_create(dip, name);
5843 5796 }
5844 5797
5845 5798 /*
5846 5799 * Check if any dacf rules match the creation of this minor node
5847 5800 */
5848 5801 dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5849 5802 return (DDI_SUCCESS);
5850 5803 }
5851 5804
5852 5805 int
5853 5806 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5854 5807 minor_t minor_num, char *node_type, int flag)
5855 5808 {
5856 5809 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5857 5810 node_type, flag, DDM_MINOR, NULL, NULL, 0));
5858 5811 }
5859 5812
5860 5813 int
5861 5814 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5862 5815 minor_t minor_num, char *node_type, int flag,
5863 5816 const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5864 5817 {
5865 5818 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5866 5819 node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5867 5820 }
5868 5821
5869 5822 int
5870 5823 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5871 5824 minor_t minor_num, char *node_type, int flag)
5872 5825 {
5873 5826 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5874 5827 node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5875 5828 }
5876 5829
5877 5830 /*
5878 5831 * Internal (non-ddi) routine for drivers to export names known
5879 5832 * to the kernel (especially ddi_pathname_to_dev_t and friends)
5880 5833 * but not exported externally to /dev
5881 5834 */
5882 5835 int
5883 5836 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5884 5837 minor_t minor_num)
5885 5838 {
5886 5839 return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5887 5840 "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5888 5841 }
5889 5842
5890 5843 void
5891 5844 ddi_remove_minor_node(dev_info_t *dip, char *name)
5892 5845 {
5893 5846 int circ;
5894 5847 struct ddi_minor_data *dmdp, *dmdp1;
5895 5848 struct ddi_minor_data **dmdp_prev;
5896 5849
5897 5850 ndi_devi_enter(dip, &circ);
5898 5851 dmdp_prev = &DEVI(dip)->devi_minor;
5899 5852 dmdp = DEVI(dip)->devi_minor;
5900 5853 while (dmdp != NULL) {
5901 5854 dmdp1 = dmdp->next;
5902 5855 if ((name == NULL || (dmdp->ddm_name != NULL &&
5903 5856 strcmp(name, dmdp->ddm_name) == 0))) {
5904 5857 if (dmdp->ddm_name != NULL) {
5905 5858 if (dmdp->type != DDM_INTERNAL_PATH)
5906 5859 (void) i_log_devfs_minor_remove(dip,
5907 5860 dmdp->ddm_name);
5908 5861 kmem_free(dmdp->ddm_name,
5909 5862 strlen(dmdp->ddm_name) + 1);
5910 5863 }
5911 5864 /*
5912 5865 * Release device privilege, if any.
5913 5866 * Release dacf client data associated with this minor
5914 5867 * node by storing NULL.
5915 5868 */
5916 5869 if (dmdp->ddm_node_priv)
5917 5870 dpfree(dmdp->ddm_node_priv);
5918 5871 dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5919 5872 kmem_free(dmdp, sizeof (struct ddi_minor_data));
5920 5873 *dmdp_prev = dmdp1;
5921 5874 /*
5922 5875 * OK, we found it, so get out now -- if we drive on,
5923 5876 * we will strcmp against garbage. See 1139209.
5924 5877 */
5925 5878 if (name != NULL)
5926 5879 break;
5927 5880 } else {
5928 5881 dmdp_prev = &dmdp->next;
5929 5882 }
5930 5883 dmdp = dmdp1;
5931 5884 }
5932 5885 ndi_devi_exit(dip, circ);
5933 5886 }
5934 5887
5935 5888
5936 5889 int
5937 5890 ddi_in_panic()
5938 5891 {
5939 5892 return (panicstr != NULL);
5940 5893 }
5941 5894
5942 5895
5943 5896 /*
5944 5897 * Find first bit set in a mask (returned counting from 1 up)
5945 5898 */
5946 5899
5947 5900 int
5948 5901 ddi_ffs(long mask)
5949 5902 {
5950 5903 return (ffs(mask));
5951 5904 }
5952 5905
5953 5906 /*
5954 5907 * Find last bit set. Take mask and clear
5955 5908 * all but the most significant bit, and
5956 5909 * then let ffs do the rest of the work.
5957 5910 *
5958 5911 * Algorithm courtesy of Steve Chessin.
5959 5912 */
5960 5913
5961 5914 int
5962 5915 ddi_fls(long mask)
5963 5916 {
5964 5917 while (mask) {
5965 5918 long nx;
5966 5919
5967 5920 if ((nx = (mask & (mask - 1))) == 0)
5968 5921 break;
5969 5922 mask = nx;
5970 5923 }
5971 5924 return (ffs(mask));
5972 5925 }
5973 5926
5974 5927 /*
5975 5928 * The ddi_soft_state_* routines comprise generic storage management utilities
5976 5929 * for driver soft state structures (in "the old days," this was done with
5977 5930 * statically sized array - big systems and dynamic loading and unloading
5978 5931 * make heap allocation more attractive).
5979 5932 */
5980 5933
5981 5934 /*
5982 5935 * Allocate a set of pointers to 'n_items' objects of size 'size'
5983 5936 * bytes. Each pointer is initialized to nil.
5984 5937 *
5985 5938 * The 'size' and 'n_items' values are stashed in the opaque
5986 5939 * handle returned to the caller.
5987 5940 *
5988 5941 * This implementation interprets 'set of pointers' to mean 'array
5989 5942 * of pointers' but note that nothing in the interface definition
5990 5943 * precludes an implementation that uses, for example, a linked list.
5991 5944 * However there should be a small efficiency gain from using an array
5992 5945 * at lookup time.
5993 5946 *
5994 5947 * NOTE As an optimization, we make our growable array allocations in
5995 5948 * powers of two (bytes), since that's how much kmem_alloc (currently)
5996 5949 * gives us anyway. It should save us some free/realloc's ..
5997 5950 *
5998 5951 * As a further optimization, we make the growable array start out
5999 5952 * with MIN_N_ITEMS in it.
6000 5953 */
6001 5954
6002 5955 #define MIN_N_ITEMS 8 /* 8 void *'s == 32 bytes */
6003 5956
6004 5957 int
6005 5958 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6006 5959 {
6007 5960 i_ddi_soft_state *ss;
6008 5961
6009 5962 if (state_p == NULL || size == 0)
6010 5963 return (EINVAL);
6011 5964
6012 5965 ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6013 5966 mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6014 5967 ss->size = size;
6015 5968
6016 5969 if (n_items < MIN_N_ITEMS)
6017 5970 ss->n_items = MIN_N_ITEMS;
6018 5971 else {
6019 5972 int bitlog;
6020 5973
6021 5974 if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6022 5975 bitlog--;
6023 5976 ss->n_items = 1 << bitlog;
6024 5977 }
6025 5978
6026 5979 ASSERT(ss->n_items >= n_items);
6027 5980
6028 5981 ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6029 5982
6030 5983 *state_p = ss;
6031 5984 return (0);
6032 5985 }
6033 5986
6034 5987 /*
6035 5988 * Allocate a state structure of size 'size' to be associated
6036 5989 * with item 'item'.
6037 5990 *
6038 5991 * In this implementation, the array is extended to
6039 5992 * allow the requested offset, if needed.
6040 5993 */
6041 5994 int
6042 5995 ddi_soft_state_zalloc(void *state, int item)
6043 5996 {
6044 5997 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6045 5998 void **array;
6046 5999 void *new_element;
6047 6000
6048 6001 if ((state == NULL) || (item < 0))
6049 6002 return (DDI_FAILURE);
6050 6003
6051 6004 mutex_enter(&ss->lock);
6052 6005 if (ss->size == 0) {
6053 6006 mutex_exit(&ss->lock);
6054 6007 cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6055 6008 mod_containing_pc(caller()));
6056 6009 return (DDI_FAILURE);
6057 6010 }
6058 6011
6059 6012 array = ss->array; /* NULL if ss->n_items == 0 */
6060 6013 ASSERT(ss->n_items != 0 && array != NULL);
6061 6014
6062 6015 /*
6063 6016 * refuse to tread on an existing element
6064 6017 */
6065 6018 if (item < ss->n_items && array[item] != NULL) {
6066 6019 mutex_exit(&ss->lock);
6067 6020 return (DDI_FAILURE);
6068 6021 }
6069 6022
6070 6023 /*
6071 6024 * Allocate a new element to plug in
6072 6025 */
6073 6026 new_element = kmem_zalloc(ss->size, KM_SLEEP);
6074 6027
6075 6028 /*
6076 6029 * Check if the array is big enough, if not, grow it.
6077 6030 */
6078 6031 if (item >= ss->n_items) {
6079 6032 void **new_array;
6080 6033 size_t new_n_items;
6081 6034 struct i_ddi_soft_state *dirty;
6082 6035
6083 6036 /*
6084 6037 * Allocate a new array of the right length, copy
6085 6038 * all the old pointers to the new array, then
6086 6039 * if it exists at all, put the old array on the
6087 6040 * dirty list.
6088 6041 *
6089 6042 * Note that we can't kmem_free() the old array.
6090 6043 *
6091 6044 * Why -- well the 'get' operation is 'mutex-free', so we
6092 6045 * can't easily catch a suspended thread that is just about
6093 6046 * to dereference the array we just grew out of. So we
6094 6047 * cons up a header and put it on a list of 'dirty'
6095 6048 * pointer arrays. (Dirty in the sense that there may
6096 6049 * be suspended threads somewhere that are in the middle
6097 6050 * of referencing them). Fortunately, we -can- garbage
6098 6051 * collect it all at ddi_soft_state_fini time.
6099 6052 */
6100 6053 new_n_items = ss->n_items;
6101 6054 while (new_n_items < (1 + item))
6102 6055 new_n_items <<= 1; /* double array size .. */
6103 6056
6104 6057 ASSERT(new_n_items >= (1 + item)); /* sanity check! */
6105 6058
6106 6059 new_array = kmem_zalloc(new_n_items * sizeof (void *),
6107 6060 KM_SLEEP);
6108 6061 /*
6109 6062 * Copy the pointers into the new array
6110 6063 */
6111 6064 bcopy(array, new_array, ss->n_items * sizeof (void *));
6112 6065
6113 6066 /*
6114 6067 * Save the old array on the dirty list
6115 6068 */
6116 6069 dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6117 6070 dirty->array = ss->array;
6118 6071 dirty->n_items = ss->n_items;
6119 6072 dirty->next = ss->next;
6120 6073 ss->next = dirty;
6121 6074
6122 6075 ss->array = (array = new_array);
6123 6076 ss->n_items = new_n_items;
6124 6077 }
6125 6078
6126 6079 ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6127 6080
6128 6081 array[item] = new_element;
6129 6082
6130 6083 mutex_exit(&ss->lock);
6131 6084 return (DDI_SUCCESS);
6132 6085 }
6133 6086
6134 6087 /*
6135 6088 * Fetch a pointer to the allocated soft state structure.
6136 6089 *
6137 6090 * This is designed to be cheap.
6138 6091 *
6139 6092 * There's an argument that there should be more checking for
6140 6093 * nil pointers and out of bounds on the array.. but we do a lot
6141 6094 * of that in the alloc/free routines.
6142 6095 *
6143 6096 * An array has the convenience that we don't need to lock read-access
6144 6097 * to it c.f. a linked list. However our "expanding array" strategy
6145 6098 * means that we should hold a readers lock on the i_ddi_soft_state
6146 6099 * structure.
6147 6100 *
6148 6101 * However, from a performance viewpoint, we need to do it without
6149 6102 * any locks at all -- this also makes it a leaf routine. The algorithm
6150 6103 * is 'lock-free' because we only discard the pointer arrays at
6151 6104 * ddi_soft_state_fini() time.
6152 6105 */
6153 6106 void *
6154 6107 ddi_get_soft_state(void *state, int item)
6155 6108 {
6156 6109 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6157 6110
6158 6111 ASSERT((ss != NULL) && (item >= 0));
6159 6112
6160 6113 if (item < ss->n_items && ss->array != NULL)
6161 6114 return (ss->array[item]);
6162 6115 return (NULL);
6163 6116 }
6164 6117
6165 6118 /*
6166 6119 * Free the state structure corresponding to 'item.' Freeing an
6167 6120 * element that has either gone or was never allocated is not
6168 6121 * considered an error. Note that we free the state structure, but
6169 6122 * we don't shrink our pointer array, or discard 'dirty' arrays,
6170 6123 * since even a few pointers don't really waste too much memory.
6171 6124 *
6172 6125 * Passing an item number that is out of bounds, or a null pointer will
6173 6126 * provoke an error message.
6174 6127 */
6175 6128 void
6176 6129 ddi_soft_state_free(void *state, int item)
6177 6130 {
6178 6131 i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
6179 6132 void **array;
6180 6133 void *element;
6181 6134 static char msg[] = "ddi_soft_state_free:";
6182 6135
6183 6136 if (ss == NULL) {
6184 6137 cmn_err(CE_WARN, "%s null handle: %s",
6185 6138 msg, mod_containing_pc(caller()));
6186 6139 return;
6187 6140 }
6188 6141
6189 6142 element = NULL;
6190 6143
6191 6144 mutex_enter(&ss->lock);
6192 6145
6193 6146 if ((array = ss->array) == NULL || ss->size == 0) {
6194 6147 cmn_err(CE_WARN, "%s bad handle: %s",
6195 6148 msg, mod_containing_pc(caller()));
6196 6149 } else if (item < 0 || item >= ss->n_items) {
6197 6150 cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6198 6151 msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6199 6152 } else if (array[item] != NULL) {
6200 6153 element = array[item];
6201 6154 array[item] = NULL;
6202 6155 }
6203 6156
6204 6157 mutex_exit(&ss->lock);
6205 6158
6206 6159 if (element)
6207 6160 kmem_free(element, ss->size);
6208 6161 }
6209 6162
6210 6163 /*
6211 6164 * Free the entire set of pointers, and any
6212 6165 * soft state structures contained therein.
6213 6166 *
6214 6167 * Note that we don't grab the ss->lock mutex, even though
6215 6168 * we're inspecting the various fields of the data structure.
6216 6169 *
6217 6170 * There is an implicit assumption that this routine will
6218 6171 * never run concurrently with any of the above on this
6219 6172 * particular state structure i.e. by the time the driver
6220 6173 * calls this routine, there should be no other threads
6221 6174 * running in the driver.
6222 6175 */
6223 6176 void
6224 6177 ddi_soft_state_fini(void **state_p)
6225 6178 {
6226 6179 i_ddi_soft_state *ss, *dirty;
6227 6180 int item;
6228 6181 static char msg[] = "ddi_soft_state_fini:";
6229 6182
6230 6183 if (state_p == NULL ||
6231 6184 (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6232 6185 cmn_err(CE_WARN, "%s null handle: %s",
6233 6186 msg, mod_containing_pc(caller()));
6234 6187 return;
6235 6188 }
6236 6189
6237 6190 if (ss->size == 0) {
6238 6191 cmn_err(CE_WARN, "%s bad handle: %s",
6239 6192 msg, mod_containing_pc(caller()));
6240 6193 return;
6241 6194 }
6242 6195
6243 6196 if (ss->n_items > 0) {
6244 6197 for (item = 0; item < ss->n_items; item++)
6245 6198 ddi_soft_state_free(ss, item);
6246 6199 kmem_free(ss->array, ss->n_items * sizeof (void *));
6247 6200 }
6248 6201
6249 6202 /*
6250 6203 * Now delete any dirty arrays from previous 'grow' operations
6251 6204 */
6252 6205 for (dirty = ss->next; dirty; dirty = ss->next) {
6253 6206 ss->next = dirty->next;
6254 6207 kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6255 6208 kmem_free(dirty, sizeof (*dirty));
6256 6209 }
6257 6210
6258 6211 mutex_destroy(&ss->lock);
6259 6212 kmem_free(ss, sizeof (*ss));
6260 6213
6261 6214 *state_p = NULL;
6262 6215 }
6263 6216
6264 6217 #define SS_N_ITEMS_PER_HASH 16
6265 6218 #define SS_MIN_HASH_SZ 16
6266 6219 #define SS_MAX_HASH_SZ 4096
6267 6220
6268 6221 int
6269 6222 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6270 6223 int n_items)
6271 6224 {
6272 6225 i_ddi_soft_state_bystr *sss;
6273 6226 int hash_sz;
6274 6227
6275 6228 ASSERT(state_p && size && n_items);
6276 6229 if ((state_p == NULL) || (size == 0) || (n_items == 0))
6277 6230 return (EINVAL);
6278 6231
6279 6232 /* current implementation is based on hash, convert n_items to hash */
6280 6233 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6281 6234 if (hash_sz < SS_MIN_HASH_SZ)
6282 6235 hash_sz = SS_MIN_HASH_SZ;
6283 6236 else if (hash_sz > SS_MAX_HASH_SZ)
6284 6237 hash_sz = SS_MAX_HASH_SZ;
6285 6238
6286 6239 /* allocate soft_state pool */
6287 6240 sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6288 6241 sss->ss_size = size;
6289 6242 sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6290 6243 hash_sz, mod_hash_null_valdtor);
6291 6244 *state_p = (ddi_soft_state_bystr *)sss;
6292 6245 return (0);
6293 6246 }
6294 6247
6295 6248 int
6296 6249 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6297 6250 {
6298 6251 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6299 6252 void *sso;
6300 6253 char *dup_str;
6301 6254
6302 6255 ASSERT(sss && str && sss->ss_mod_hash);
6303 6256 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6304 6257 return (DDI_FAILURE);
6305 6258 sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6306 6259 dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6307 6260 if (mod_hash_insert(sss->ss_mod_hash,
6308 6261 (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6309 6262 return (DDI_SUCCESS);
6310 6263
6311 6264 /*
6312 6265 * The only error from an strhash insert is caused by a duplicate key.
6313 6266 * We refuse to tread on an existing elements, so free and fail.
6314 6267 */
6315 6268 kmem_free(dup_str, strlen(dup_str) + 1);
6316 6269 kmem_free(sso, sss->ss_size);
6317 6270 return (DDI_FAILURE);
6318 6271 }
6319 6272
6320 6273 void *
6321 6274 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6322 6275 {
6323 6276 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6324 6277 void *sso;
6325 6278
6326 6279 ASSERT(sss && str && sss->ss_mod_hash);
6327 6280 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6328 6281 return (NULL);
6329 6282
6330 6283 if (mod_hash_find(sss->ss_mod_hash,
6331 6284 (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6332 6285 return (sso);
6333 6286 return (NULL);
6334 6287 }
6335 6288
6336 6289 void
6337 6290 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6338 6291 {
6339 6292 i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
6340 6293 void *sso;
6341 6294
6342 6295 ASSERT(sss && str && sss->ss_mod_hash);
6343 6296 if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6344 6297 return;
6345 6298
6346 6299 (void) mod_hash_remove(sss->ss_mod_hash,
6347 6300 (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6348 6301 kmem_free(sso, sss->ss_size);
6349 6302 }
6350 6303
6351 6304 void
6352 6305 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6353 6306 {
6354 6307 i_ddi_soft_state_bystr *sss;
6355 6308
6356 6309 ASSERT(state_p);
6357 6310 if (state_p == NULL)
6358 6311 return;
6359 6312
6360 6313 sss = (i_ddi_soft_state_bystr *)(*state_p);
6361 6314 if (sss == NULL)
6362 6315 return;
6363 6316
6364 6317 ASSERT(sss->ss_mod_hash);
6365 6318 if (sss->ss_mod_hash) {
6366 6319 mod_hash_destroy_strhash(sss->ss_mod_hash);
6367 6320 sss->ss_mod_hash = NULL;
6368 6321 }
6369 6322
6370 6323 kmem_free(sss, sizeof (*sss));
6371 6324 *state_p = NULL;
6372 6325 }
6373 6326
6374 6327 /*
6375 6328 * The ddi_strid_* routines provide string-to-index management utilities.
6376 6329 */
6377 6330 /* allocate and initialize an strid set */
6378 6331 int
6379 6332 ddi_strid_init(ddi_strid **strid_p, int n_items)
6380 6333 {
6381 6334 i_ddi_strid *ss;
6382 6335 int hash_sz;
6383 6336
6384 6337 if (strid_p == NULL)
6385 6338 return (DDI_FAILURE);
6386 6339
6387 6340 /* current implementation is based on hash, convert n_items to hash */
6388 6341 hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6389 6342 if (hash_sz < SS_MIN_HASH_SZ)
6390 6343 hash_sz = SS_MIN_HASH_SZ;
6391 6344 else if (hash_sz > SS_MAX_HASH_SZ)
6392 6345 hash_sz = SS_MAX_HASH_SZ;
6393 6346
6394 6347 ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6395 6348 ss->strid_chunksz = n_items;
6396 6349 ss->strid_spacesz = n_items;
6397 6350 ss->strid_space = id_space_create("strid", 1, n_items);
6398 6351 ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6399 6352 mod_hash_null_valdtor);
6400 6353 ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6401 6354 mod_hash_null_valdtor);
6402 6355 *strid_p = (ddi_strid *)ss;
6403 6356 return (DDI_SUCCESS);
6404 6357 }
6405 6358
6406 6359 /* allocate an id mapping within the specified set for str, return id */
6407 6360 static id_t
6408 6361 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6409 6362 {
6410 6363 i_ddi_strid *ss = (i_ddi_strid *)strid;
6411 6364 id_t id;
6412 6365 char *s;
6413 6366
6414 6367 ASSERT(ss && str);
6415 6368 if ((ss == NULL) || (str == NULL))
6416 6369 return (0);
6417 6370
6418 6371 /*
6419 6372 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6420 6373 * range as compressed as possible. This is important to minimize
6421 6374 * the amount of space used when the id is used as a ddi_soft_state
6422 6375 * index by the caller.
6423 6376 *
6424 6377 * If the id list is exhausted, increase the size of the list
6425 6378 * by the chuck size specified in ddi_strid_init and reattempt
6426 6379 * the allocation
6427 6380 */
6428 6381 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6429 6382 id_space_extend(ss->strid_space, ss->strid_spacesz,
6430 6383 ss->strid_spacesz + ss->strid_chunksz);
6431 6384 ss->strid_spacesz += ss->strid_chunksz;
6432 6385 if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6433 6386 return (0);
6434 6387 }
6435 6388
6436 6389 /*
6437 6390 * NOTE: since we create and destroy in unison we can save space by
6438 6391 * using bystr key as the byid value. This means destroy must occur
6439 6392 * in (byid, bystr) order.
6440 6393 */
6441 6394 s = i_ddi_strdup(str, KM_SLEEP);
6442 6395 if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6443 6396 (mod_hash_val_t)(intptr_t)id) != 0) {
6444 6397 ddi_strid_free(strid, id);
6445 6398 return (0);
6446 6399 }
6447 6400 if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6448 6401 (mod_hash_val_t)s) != 0) {
6449 6402 ddi_strid_free(strid, id);
6450 6403 return (0);
6451 6404 }
6452 6405
6453 6406 /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6454 6407 return (id);
6455 6408 }
6456 6409
6457 6410 /* allocate an id mapping within the specified set for str, return id */
6458 6411 id_t
6459 6412 ddi_strid_alloc(ddi_strid *strid, char *str)
6460 6413 {
6461 6414 return (i_ddi_strid_alloc(strid, str));
6462 6415 }
6463 6416
6464 6417 /* return the id within the specified strid given the str */
6465 6418 id_t
6466 6419 ddi_strid_str2id(ddi_strid *strid, char *str)
6467 6420 {
6468 6421 i_ddi_strid *ss = (i_ddi_strid *)strid;
6469 6422 id_t id = 0;
6470 6423 mod_hash_val_t hv;
6471 6424
6472 6425 ASSERT(ss && str);
6473 6426 if (ss && str && (mod_hash_find(ss->strid_bystr,
6474 6427 (mod_hash_key_t)str, &hv) == 0))
6475 6428 id = (int)(intptr_t)hv;
6476 6429 return (id);
6477 6430 }
6478 6431
6479 6432 /* return str within the specified strid given the id */
6480 6433 char *
6481 6434 ddi_strid_id2str(ddi_strid *strid, id_t id)
6482 6435 {
6483 6436 i_ddi_strid *ss = (i_ddi_strid *)strid;
6484 6437 char *str = NULL;
6485 6438 mod_hash_val_t hv;
6486 6439
6487 6440 ASSERT(ss && id > 0);
6488 6441 if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6489 6442 (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6490 6443 str = (char *)hv;
6491 6444 return (str);
6492 6445 }
6493 6446
6494 6447 /* free the id mapping within the specified strid */
6495 6448 void
6496 6449 ddi_strid_free(ddi_strid *strid, id_t id)
6497 6450 {
6498 6451 i_ddi_strid *ss = (i_ddi_strid *)strid;
6499 6452 char *str;
6500 6453
6501 6454 ASSERT(ss && id > 0);
6502 6455 if ((ss == NULL) || (id <= 0))
6503 6456 return;
6504 6457
6505 6458 /* bystr key is byid value: destroy order must be (byid, bystr) */
6506 6459 str = ddi_strid_id2str(strid, id);
6507 6460 (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6508 6461 id_free(ss->strid_space, id);
6509 6462
6510 6463 if (str)
6511 6464 (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6512 6465 }
6513 6466
6514 6467 /* destroy the strid set */
6515 6468 void
6516 6469 ddi_strid_fini(ddi_strid **strid_p)
6517 6470 {
6518 6471 i_ddi_strid *ss;
6519 6472
6520 6473 ASSERT(strid_p);
6521 6474 if (strid_p == NULL)
6522 6475 return;
6523 6476
6524 6477 ss = (i_ddi_strid *)(*strid_p);
6525 6478 if (ss == NULL)
6526 6479 return;
6527 6480
6528 6481 /* bystr key is byid value: destroy order must be (byid, bystr) */
6529 6482 if (ss->strid_byid)
6530 6483 mod_hash_destroy_hash(ss->strid_byid);
6531 6484 if (ss->strid_byid)
6532 6485 mod_hash_destroy_hash(ss->strid_bystr);
6533 6486 if (ss->strid_space)
6534 6487 id_space_destroy(ss->strid_space);
6535 6488 kmem_free(ss, sizeof (*ss));
6536 6489 *strid_p = NULL;
6537 6490 }
6538 6491
6539 6492 /*
6540 6493 * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6541 6494 * Storage is double buffered to prevent updates during devi_addr use -
6542 6495 * double buffering is adaquate for reliable ddi_deviname() consumption.
6543 6496 * The double buffer is not freed until dev_info structure destruction
6544 6497 * (by i_ddi_free_node).
6545 6498 */
6546 6499 void
6547 6500 ddi_set_name_addr(dev_info_t *dip, char *name)
6548 6501 {
6549 6502 char *buf = DEVI(dip)->devi_addr_buf;
6550 6503 char *newaddr;
6551 6504
6552 6505 if (buf == NULL) {
6553 6506 buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6554 6507 DEVI(dip)->devi_addr_buf = buf;
6555 6508 }
6556 6509
6557 6510 if (name) {
6558 6511 ASSERT(strlen(name) < MAXNAMELEN);
6559 6512 newaddr = (DEVI(dip)->devi_addr == buf) ?
6560 6513 (buf + MAXNAMELEN) : buf;
6561 6514 (void) strlcpy(newaddr, name, MAXNAMELEN);
6562 6515 } else
6563 6516 newaddr = NULL;
6564 6517
6565 6518 DEVI(dip)->devi_addr = newaddr;
6566 6519 }
6567 6520
6568 6521 char *
6569 6522 ddi_get_name_addr(dev_info_t *dip)
6570 6523 {
6571 6524 return (DEVI(dip)->devi_addr);
6572 6525 }
6573 6526
6574 6527 void
6575 6528 ddi_set_parent_data(dev_info_t *dip, void *pd)
6576 6529 {
6577 6530 DEVI(dip)->devi_parent_data = pd;
6578 6531 }
6579 6532
6580 6533 void *
6581 6534 ddi_get_parent_data(dev_info_t *dip)
6582 6535 {
6583 6536 return (DEVI(dip)->devi_parent_data);
6584 6537 }
6585 6538
6586 6539 /*
6587 6540 * ddi_name_to_major: returns the major number of a named module,
6588 6541 * derived from the current driver alias binding.
6589 6542 *
6590 6543 * Caveat: drivers should avoid the use of this function, in particular
6591 6544 * together with ddi_get_name/ddi_binding name, as per
6592 6545 * major = ddi_name_to_major(ddi_get_name(devi));
6593 6546 * ddi_name_to_major() relies on the state of the device/alias binding,
6594 6547 * which can and does change dynamically as aliases are administered
6595 6548 * over time. An attached device instance cannot rely on the major
6596 6549 * number returned by ddi_name_to_major() to match its own major number.
6597 6550 *
6598 6551 * For driver use, ddi_driver_major() reliably returns the major number
6599 6552 * for the module to which the device was bound at attach time over
6600 6553 * the life of the instance.
6601 6554 * major = ddi_driver_major(dev_info_t *)
6602 6555 */
6603 6556 major_t
6604 6557 ddi_name_to_major(char *name)
6605 6558 {
6606 6559 return (mod_name_to_major(name));
6607 6560 }
6608 6561
6609 6562 /*
6610 6563 * ddi_major_to_name: Returns the module name bound to a major number.
6611 6564 */
6612 6565 char *
6613 6566 ddi_major_to_name(major_t major)
6614 6567 {
6615 6568 return (mod_major_to_name(major));
6616 6569 }
6617 6570
6618 6571 /*
6619 6572 * Return the name of the devinfo node pointed at by 'dip' in the buffer
6620 6573 * pointed at by 'name.' A devinfo node is named as a result of calling
6621 6574 * ddi_initchild().
6622 6575 *
6623 6576 * Note: the driver must be held before calling this function!
6624 6577 */
6625 6578 char *
6626 6579 ddi_deviname(dev_info_t *dip, char *name)
6627 6580 {
6628 6581 char *addrname;
6629 6582 char none = '\0';
6630 6583
6631 6584 if (dip == ddi_root_node()) {
6632 6585 *name = '\0';
6633 6586 return (name);
6634 6587 }
6635 6588
6636 6589 if (i_ddi_node_state(dip) < DS_BOUND) {
6637 6590 addrname = &none;
6638 6591 } else {
6639 6592 /*
6640 6593 * Use ddi_get_name_addr() without checking state so we get
6641 6594 * a unit-address if we are called after ddi_set_name_addr()
6642 6595 * by nexus DDI_CTL_INITCHILD code, but before completing
6643 6596 * node promotion to DS_INITIALIZED. We currently have
6644 6597 * two situations where we are called in this state:
6645 6598 * o For framework processing of a path-oriented alias.
6646 6599 * o If a SCSA nexus driver calls ddi_devid_register()
6647 6600 * from it's tran_tgt_init(9E) implementation.
6648 6601 */
6649 6602 addrname = ddi_get_name_addr(dip);
6650 6603 if (addrname == NULL)
6651 6604 addrname = &none;
6652 6605 }
6653 6606
6654 6607 if (*addrname == '\0') {
6655 6608 (void) sprintf(name, "/%s", ddi_node_name(dip));
6656 6609 } else {
6657 6610 (void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6658 6611 }
6659 6612
6660 6613 return (name);
6661 6614 }
6662 6615
6663 6616 /*
6664 6617 * Spits out the name of device node, typically name@addr, for a given node,
6665 6618 * using the driver name, not the nodename.
6666 6619 *
6667 6620 * Used by match_parent. Not to be used elsewhere.
6668 6621 */
6669 6622 char *
6670 6623 i_ddi_parname(dev_info_t *dip, char *name)
6671 6624 {
6672 6625 char *addrname;
6673 6626
6674 6627 if (dip == ddi_root_node()) {
6675 6628 *name = '\0';
6676 6629 return (name);
6677 6630 }
6678 6631
6679 6632 ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6680 6633
6681 6634 if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6682 6635 (void) sprintf(name, "%s", ddi_binding_name(dip));
6683 6636 else
6684 6637 (void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6685 6638 return (name);
6686 6639 }
6687 6640
6688 6641 static char *
6689 6642 pathname_work(dev_info_t *dip, char *path)
6690 6643 {
6691 6644 char *bp;
6692 6645
6693 6646 if (dip == ddi_root_node()) {
6694 6647 *path = '\0';
6695 6648 return (path);
6696 6649 }
6697 6650 (void) pathname_work(ddi_get_parent(dip), path);
6698 6651 bp = path + strlen(path);
6699 6652 (void) ddi_deviname(dip, bp);
6700 6653 return (path);
6701 6654 }
6702 6655
6703 6656 char *
6704 6657 ddi_pathname(dev_info_t *dip, char *path)
6705 6658 {
6706 6659 return (pathname_work(dip, path));
6707 6660 }
6708 6661
6709 6662 char *
6710 6663 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6711 6664 {
6712 6665 if (dmdp->dip == NULL)
6713 6666 *path = '\0';
6714 6667 else {
6715 6668 (void) ddi_pathname(dmdp->dip, path);
6716 6669 if (dmdp->ddm_name) {
6717 6670 (void) strcat(path, ":");
6718 6671 (void) strcat(path, dmdp->ddm_name);
6719 6672 }
6720 6673 }
6721 6674 return (path);
6722 6675 }
6723 6676
6724 6677 static char *
6725 6678 pathname_work_obp(dev_info_t *dip, char *path)
6726 6679 {
6727 6680 char *bp;
6728 6681 char *obp_path;
6729 6682
6730 6683 /*
6731 6684 * look up the "obp-path" property, return the path if it exists
6732 6685 */
6733 6686 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6734 6687 "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6735 6688 (void) strcpy(path, obp_path);
6736 6689 ddi_prop_free(obp_path);
6737 6690 return (path);
6738 6691 }
6739 6692
6740 6693 /*
6741 6694 * stop at root, no obp path
6742 6695 */
6743 6696 if (dip == ddi_root_node()) {
6744 6697 return (NULL);
6745 6698 }
6746 6699
6747 6700 obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6748 6701 if (obp_path == NULL)
6749 6702 return (NULL);
6750 6703
6751 6704 /*
6752 6705 * append our component to parent's obp path
6753 6706 */
6754 6707 bp = path + strlen(path);
6755 6708 if (*(bp - 1) != '/')
6756 6709 (void) strcat(bp++, "/");
6757 6710 (void) ddi_deviname(dip, bp);
6758 6711 return (path);
6759 6712 }
6760 6713
6761 6714 /*
6762 6715 * return the 'obp-path' based path for the given node, or NULL if the node
6763 6716 * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6764 6717 * function can't be called from interrupt context (since we need to
6765 6718 * lookup a string property).
6766 6719 */
6767 6720 char *
6768 6721 ddi_pathname_obp(dev_info_t *dip, char *path)
6769 6722 {
6770 6723 ASSERT(!servicing_interrupt());
6771 6724 if (dip == NULL || path == NULL)
6772 6725 return (NULL);
6773 6726
6774 6727 /* split work into a separate function to aid debugging */
6775 6728 return (pathname_work_obp(dip, path));
6776 6729 }
6777 6730
6778 6731 int
6779 6732 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6780 6733 {
6781 6734 dev_info_t *pdip;
6782 6735 char *obp_path = NULL;
6783 6736 int rc = DDI_FAILURE;
6784 6737
6785 6738 if (dip == NULL)
6786 6739 return (DDI_FAILURE);
6787 6740
6788 6741 obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6789 6742
6790 6743 pdip = ddi_get_parent(dip);
6791 6744
6792 6745 if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6793 6746 (void) ddi_pathname(pdip, obp_path);
6794 6747 }
6795 6748
6796 6749 if (component) {
6797 6750 (void) strncat(obp_path, "/", MAXPATHLEN);
6798 6751 (void) strncat(obp_path, component, MAXPATHLEN);
6799 6752 }
6800 6753 rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6801 6754 obp_path);
6802 6755
6803 6756 if (obp_path)
6804 6757 kmem_free(obp_path, MAXPATHLEN);
6805 6758
6806 6759 return (rc);
6807 6760 }
6808 6761
6809 6762 /*
6810 6763 * Given a dev_t, return the pathname of the corresponding device in the
6811 6764 * buffer pointed at by "path." The buffer is assumed to be large enough
6812 6765 * to hold the pathname of the device (MAXPATHLEN).
6813 6766 *
6814 6767 * The pathname of a device is the pathname of the devinfo node to which
6815 6768 * the device "belongs," concatenated with the character ':' and the name
6816 6769 * of the minor node corresponding to the dev_t. If spec_type is 0 then
6817 6770 * just the pathname of the devinfo node is returned without driving attach
6818 6771 * of that node. For a non-zero spec_type, an attach is performed and a
6819 6772 * search of the minor list occurs.
6820 6773 *
6821 6774 * It is possible that the path associated with the dev_t is not
6822 6775 * currently available in the devinfo tree. In order to have a
6823 6776 * dev_t, a device must have been discovered before, which means
6824 6777 * that the path is always in the instance tree. The one exception
6825 6778 * to this is if the dev_t is associated with a pseudo driver, in
6826 6779 * which case the device must exist on the pseudo branch of the
6827 6780 * devinfo tree as a result of parsing .conf files.
6828 6781 */
6829 6782 int
6830 6783 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6831 6784 {
6832 6785 int circ;
6833 6786 major_t major = getmajor(devt);
6834 6787 int instance;
6835 6788 dev_info_t *dip;
6836 6789 char *minorname;
6837 6790 char *drvname;
6838 6791
6839 6792 if (major >= devcnt)
6840 6793 goto fail;
6841 6794 if (major == clone_major) {
6842 6795 /* clone has no minor nodes, manufacture the path here */
6843 6796 if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6844 6797 goto fail;
6845 6798
6846 6799 (void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6847 6800 return (DDI_SUCCESS);
6848 6801 }
6849 6802
6850 6803 /* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6851 6804 if ((instance = dev_to_instance(devt)) == -1)
6852 6805 goto fail;
6853 6806
6854 6807 /* reconstruct the path given the major/instance */
6855 6808 if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6856 6809 goto fail;
6857 6810
6858 6811 /* if spec_type given we must drive attach and search minor nodes */
6859 6812 if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6860 6813 /* attach the path so we can search minors */
6861 6814 if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6862 6815 goto fail;
6863 6816
6864 6817 /* Add minorname to path. */
6865 6818 ndi_devi_enter(dip, &circ);
6866 6819 minorname = i_ddi_devtspectype_to_minorname(dip,
6867 6820 devt, spec_type);
6868 6821 if (minorname) {
6869 6822 (void) strcat(path, ":");
6870 6823 (void) strcat(path, minorname);
6871 6824 }
6872 6825 ndi_devi_exit(dip, circ);
6873 6826 ddi_release_devi(dip);
6874 6827 if (minorname == NULL)
6875 6828 goto fail;
6876 6829 }
6877 6830 ASSERT(strlen(path) < MAXPATHLEN);
6878 6831 return (DDI_SUCCESS);
6879 6832
6880 6833 fail: *path = 0;
6881 6834 return (DDI_FAILURE);
6882 6835 }
6883 6836
6884 6837 /*
6885 6838 * Given a major number and an instance, return the path.
6886 6839 * This interface does NOT drive attach.
6887 6840 */
6888 6841 int
6889 6842 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6890 6843 {
6891 6844 struct devnames *dnp;
6892 6845 dev_info_t *dip;
6893 6846
6894 6847 if ((major >= devcnt) || (instance == -1)) {
6895 6848 *path = 0;
6896 6849 return (DDI_FAILURE);
6897 6850 }
6898 6851
6899 6852 /* look for the major/instance in the instance tree */
6900 6853 if (e_ddi_instance_majorinstance_to_path(major, instance,
6901 6854 path) == DDI_SUCCESS) {
6902 6855 ASSERT(strlen(path) < MAXPATHLEN);
6903 6856 return (DDI_SUCCESS);
6904 6857 }
6905 6858
6906 6859 /*
6907 6860 * Not in instance tree, find the instance on the per driver list and
6908 6861 * construct path to instance via ddi_pathname(). This is how paths
6909 6862 * down the 'pseudo' branch are constructed.
6910 6863 */
6911 6864 dnp = &(devnamesp[major]);
6912 6865 LOCK_DEV_OPS(&(dnp->dn_lock));
6913 6866 for (dip = dnp->dn_head; dip;
6914 6867 dip = (dev_info_t *)DEVI(dip)->devi_next) {
6915 6868 /* Skip if instance does not match. */
6916 6869 if (DEVI(dip)->devi_instance != instance)
6917 6870 continue;
6918 6871
6919 6872 /*
6920 6873 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6921 6874 * node demotion, so it is not an effective way of ensuring
6922 6875 * that the ddi_pathname result has a unit-address. Instead,
6923 6876 * we reverify the node state after calling ddi_pathname().
6924 6877 */
6925 6878 if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6926 6879 (void) ddi_pathname(dip, path);
6927 6880 if (i_ddi_node_state(dip) < DS_INITIALIZED)
6928 6881 continue;
6929 6882 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6930 6883 ASSERT(strlen(path) < MAXPATHLEN);
6931 6884 return (DDI_SUCCESS);
6932 6885 }
6933 6886 }
6934 6887 UNLOCK_DEV_OPS(&(dnp->dn_lock));
6935 6888
6936 6889 /* can't reconstruct the path */
6937 6890 *path = 0;
6938 6891 return (DDI_FAILURE);
6939 6892 }
6940 6893
6941 6894 #define GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6942 6895
6943 6896 /*
6944 6897 * Given the dip for a network interface return the ppa for that interface.
6945 6898 *
6946 6899 * In all cases except GLD v0 drivers, the ppa == instance.
6947 6900 * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6948 6901 * So for these drivers when the attach routine calls gld_register(),
6949 6902 * the GLD framework creates an integer property called "gld_driver_ppa"
6950 6903 * that can be queried here.
6951 6904 *
6952 6905 * The only time this function is used is when a system is booting over nfs.
6953 6906 * In this case the system has to resolve the pathname of the boot device
6954 6907 * to it's ppa.
6955 6908 */
6956 6909 int
6957 6910 i_ddi_devi_get_ppa(dev_info_t *dip)
6958 6911 {
6959 6912 return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6960 6913 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6961 6914 GLD_DRIVER_PPA, ddi_get_instance(dip)));
6962 6915 }
6963 6916
6964 6917 /*
6965 6918 * i_ddi_devi_set_ppa() should only be called from gld_register()
6966 6919 * and only for GLD v0 drivers
6967 6920 */
6968 6921 void
6969 6922 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6970 6923 {
6971 6924 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6972 6925 }
6973 6926
6974 6927
6975 6928 /*
6976 6929 * Private DDI Console bell functions.
6977 6930 */
6978 6931 void
6979 6932 ddi_ring_console_bell(clock_t duration)
6980 6933 {
6981 6934 if (ddi_console_bell_func != NULL)
6982 6935 (*ddi_console_bell_func)(duration);
6983 6936 }
6984 6937
6985 6938 void
6986 6939 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6987 6940 {
6988 6941 ddi_console_bell_func = bellfunc;
6989 6942 }
6990 6943
6991 6944 int
6992 6945 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6993 6946 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6994 6947 {
6995 6948 int (*funcp)() = ddi_dma_allochdl;
6996 6949 ddi_dma_attr_t dma_attr;
6997 6950 struct bus_ops *bop;
6998 6951
6999 6952 if (attr == (ddi_dma_attr_t *)0)
7000 6953 return (DDI_DMA_BADATTR);
7001 6954
7002 6955 dma_attr = *attr;
7003 6956
7004 6957 bop = DEVI(dip)->devi_ops->devo_bus_ops;
7005 6958 if (bop && bop->bus_dma_allochdl)
7006 6959 funcp = bop->bus_dma_allochdl;
7007 6960
7008 6961 return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
7009 6962 }
7010 6963
7011 6964 void
7012 6965 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
7013 6966 {
7014 6967 ddi_dma_handle_t h = *handlep;
7015 6968 (void) ddi_dma_freehdl(HD, HD, h);
7016 6969 }
7017 6970
7018 6971 static uintptr_t dma_mem_list_id = 0;
7019 6972
7020 6973
7021 6974 int
7022 6975 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
7023 6976 ddi_device_acc_attr_t *accattrp, uint_t flags,
7024 6977 int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
7025 6978 size_t *real_length, ddi_acc_handle_t *handlep)
7026 6979 {
7027 6980 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7028 6981 dev_info_t *dip = hp->dmai_rdip;
7029 6982 ddi_acc_hdl_t *ap;
7030 6983 ddi_dma_attr_t *attrp = &hp->dmai_attr;
7031 6984 uint_t sleepflag, xfermodes;
7032 6985 int (*fp)(caddr_t);
7033 6986 int rval;
7034 6987
7035 6988 if (waitfp == DDI_DMA_SLEEP)
7036 6989 fp = (int (*)())KM_SLEEP;
7037 6990 else if (waitfp == DDI_DMA_DONTWAIT)
7038 6991 fp = (int (*)())KM_NOSLEEP;
7039 6992 else
7040 6993 fp = waitfp;
7041 6994 *handlep = impl_acc_hdl_alloc(fp, arg);
7042 6995 if (*handlep == NULL)
7043 6996 return (DDI_FAILURE);
7044 6997
7045 6998 /* check if the cache attributes are supported */
7046 6999 if (i_ddi_check_cache_attr(flags) == B_FALSE)
7047 7000 return (DDI_FAILURE);
7048 7001
7049 7002 /*
7050 7003 * Transfer the meaningful bits to xfermodes.
7051 7004 * Double-check if the 3rd party driver correctly sets the bits.
7052 7005 * If not, set DDI_DMA_STREAMING to keep compatibility.
7053 7006 */
7054 7007 xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7055 7008 if (xfermodes == 0) {
7056 7009 xfermodes = DDI_DMA_STREAMING;
7057 7010 }
7058 7011
7059 7012 /*
7060 7013 * initialize the common elements of data access handle
7061 7014 */
7062 7015 ap = impl_acc_hdl_get(*handlep);
7063 7016 ap->ah_vers = VERS_ACCHDL;
7064 7017 ap->ah_dip = dip;
7065 7018 ap->ah_offset = 0;
7066 7019 ap->ah_len = 0;
7067 7020 ap->ah_xfermodes = flags;
7068 7021 ap->ah_acc = *accattrp;
7069 7022
7070 7023 sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7071 7024 if (xfermodes == DDI_DMA_CONSISTENT) {
7072 7025 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7073 7026 flags, accattrp, kaddrp, NULL, ap);
7074 7027 *real_length = length;
7075 7028 } else {
7076 7029 rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7077 7030 flags, accattrp, kaddrp, real_length, ap);
7078 7031 }
7079 7032 if (rval == DDI_SUCCESS) {
7080 7033 ap->ah_len = (off_t)(*real_length);
7081 7034 ap->ah_addr = *kaddrp;
7082 7035 } else {
7083 7036 impl_acc_hdl_free(*handlep);
7084 7037 *handlep = (ddi_acc_handle_t)NULL;
7085 7038 if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7086 7039 ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7087 7040 }
7088 7041 rval = DDI_FAILURE;
7089 7042 }
7090 7043 return (rval);
7091 7044 }
7092 7045
7093 7046 void
7094 7047 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7095 7048 {
7096 7049 ddi_acc_hdl_t *ap;
7097 7050
7098 7051 ap = impl_acc_hdl_get(*handlep);
7099 7052 ASSERT(ap);
7100 7053
7101 7054 i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7102 7055
7103 7056 /*
7104 7057 * free the handle
7105 7058 */
7106 7059 impl_acc_hdl_free(*handlep);
7107 7060 *handlep = (ddi_acc_handle_t)NULL;
7108 7061
7109 7062 if (dma_mem_list_id != 0) {
7110 7063 ddi_run_callback(&dma_mem_list_id);
7111 7064 }
7112 7065 }
7113 7066
7114 7067 int
7115 7068 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7116 7069 uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7117 7070 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7118 7071 {
7119 7072 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7120 7073 dev_info_t *dip, *rdip;
7121 7074 struct ddi_dma_req dmareq;
7122 7075 int (*funcp)();
7123 7076
7124 7077 dmareq.dmar_flags = flags;
7125 7078 dmareq.dmar_fp = waitfp;
7126 7079 dmareq.dmar_arg = arg;
7127 7080 dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7128 7081
7129 7082 if (bp->b_flags & B_PAGEIO) {
7130 7083 dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7131 7084 dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7132 7085 dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7133 7086 (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7134 7087 } else {
7135 7088 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7136 7089 if (bp->b_flags & B_SHADOW) {
7137 7090 dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7138 7091 bp->b_shadow;
7139 7092 dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7140 7093 } else {
7141 7094 dmareq.dmar_object.dmao_type =
7142 7095 (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7143 7096 DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7144 7097 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7145 7098 }
7146 7099
7147 7100 /*
7148 7101 * If the buffer has no proc pointer, or the proc
7149 7102 * struct has the kernel address space, or the buffer has
7150 7103 * been marked B_REMAPPED (meaning that it is now
7151 7104 * mapped into the kernel's address space), then
7152 7105 * the address space is kas (kernel address space).
7153 7106 */
7154 7107 if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7155 7108 (bp->b_flags & B_REMAPPED)) {
7156 7109 dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7157 7110 } else {
7158 7111 dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7159 7112 bp->b_proc->p_as;
7160 7113 }
7161 7114 }
7162 7115
7163 7116 dip = rdip = hp->dmai_rdip;
7164 7117 if (dip != ddi_root_node())
7165 7118 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7166 7119 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7167 7120 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7168 7121 }
7169 7122
7170 7123 int
7171 7124 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7172 7125 caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7173 7126 caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7174 7127 {
7175 7128 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7176 7129 dev_info_t *dip, *rdip;
7177 7130 struct ddi_dma_req dmareq;
7178 7131 int (*funcp)();
7179 7132
7180 7133 if (len == (uint_t)0) {
7181 7134 return (DDI_DMA_NOMAPPING);
7182 7135 }
7183 7136 dmareq.dmar_flags = flags;
7184 7137 dmareq.dmar_fp = waitfp;
7185 7138 dmareq.dmar_arg = arg;
7186 7139 dmareq.dmar_object.dmao_size = len;
7187 7140 dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7188 7141 dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7189 7142 dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7190 7143 dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7191 7144
7192 7145 dip = rdip = hp->dmai_rdip;
7193 7146 if (dip != ddi_root_node())
7194 7147 dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7195 7148 funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7196 7149 return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7197 7150 }
7198 7151
7199 7152 void
7200 7153 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7201 7154 {
7202 7155 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7203 7156 ddi_dma_cookie_t *cp;
7204 7157
7205 7158 cp = hp->dmai_cookie;
7206 7159 ASSERT(cp);
7207 7160
7208 7161 cookiep->dmac_notused = cp->dmac_notused;
7209 7162 cookiep->dmac_type = cp->dmac_type;
7210 7163 cookiep->dmac_address = cp->dmac_address;
7211 7164 cookiep->dmac_size = cp->dmac_size;
7212 7165 hp->dmai_cookie++;
7213 7166 }
7214 7167
7215 7168 int
7216 7169 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7217 7170 {
7218 7171 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7219 7172 if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7220 7173 return (DDI_FAILURE);
7221 7174 } else {
7222 7175 *nwinp = hp->dmai_nwin;
7223 7176 return (DDI_SUCCESS);
7224 7177 }
7225 7178 }
7226 7179
7227 7180 int
7228 7181 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7229 7182 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7230 7183 {
7231 7184 int (*funcp)() = ddi_dma_win;
7232 7185 struct bus_ops *bop;
7233 7186
7234 7187 bop = DEVI(HD)->devi_ops->devo_bus_ops;
7235 7188 if (bop && bop->bus_dma_win)
7236 7189 funcp = bop->bus_dma_win;
7237 7190
7238 7191 return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7239 7192 }
7240 7193
7241 7194 int
7242 7195 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7243 7196 {
7244 7197 return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7245 7198 &burstsizes, 0, 0));
7246 7199 }
7247 7200
7248 7201 int
7249 7202 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7250 7203 {
7251 7204 return (hp->dmai_fault);
7252 7205 }
7253 7206
7254 7207 int
7255 7208 ddi_check_dma_handle(ddi_dma_handle_t handle)
7256 7209 {
7257 7210 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7258 7211 int (*check)(ddi_dma_impl_t *);
7259 7212
7260 7213 if ((check = hp->dmai_fault_check) == NULL)
7261 7214 check = i_ddi_dma_fault_check;
7262 7215
7263 7216 return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7264 7217 }
7265 7218
7266 7219 void
7267 7220 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7268 7221 {
7269 7222 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7270 7223 void (*notify)(ddi_dma_impl_t *);
7271 7224
7272 7225 if (!hp->dmai_fault) {
7273 7226 hp->dmai_fault = 1;
7274 7227 if ((notify = hp->dmai_fault_notify) != NULL)
7275 7228 (*notify)(hp);
7276 7229 }
7277 7230 }
7278 7231
7279 7232 void
7280 7233 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7281 7234 {
7282 7235 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7283 7236 void (*notify)(ddi_dma_impl_t *);
7284 7237
7285 7238 if (hp->dmai_fault) {
7286 7239 hp->dmai_fault = 0;
7287 7240 if ((notify = hp->dmai_fault_notify) != NULL)
7288 7241 (*notify)(hp);
7289 7242 }
7290 7243 }
7291 7244
7292 7245 /*
7293 7246 * register mapping routines.
7294 7247 */
7295 7248 int
7296 7249 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7297 7250 offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7298 7251 ddi_acc_handle_t *handle)
7299 7252 {
7300 7253 ddi_map_req_t mr;
7301 7254 ddi_acc_hdl_t *hp;
7302 7255 int result;
7303 7256
7304 7257 /*
7305 7258 * Allocate and initialize the common elements of data access handle.
7306 7259 */
7307 7260 *handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7308 7261 hp = impl_acc_hdl_get(*handle);
7309 7262 hp->ah_vers = VERS_ACCHDL;
7310 7263 hp->ah_dip = dip;
7311 7264 hp->ah_rnumber = rnumber;
7312 7265 hp->ah_offset = offset;
7313 7266 hp->ah_len = len;
7314 7267 hp->ah_acc = *accattrp;
7315 7268
7316 7269 /*
7317 7270 * Set up the mapping request and call to parent.
7318 7271 */
7319 7272 mr.map_op = DDI_MO_MAP_LOCKED;
7320 7273 mr.map_type = DDI_MT_RNUMBER;
7321 7274 mr.map_obj.rnumber = rnumber;
7322 7275 mr.map_prot = PROT_READ | PROT_WRITE;
7323 7276 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7324 7277 mr.map_handlep = hp;
7325 7278 mr.map_vers = DDI_MAP_VERSION;
7326 7279 result = ddi_map(dip, &mr, offset, len, addrp);
7327 7280
7328 7281 /*
7329 7282 * check for end result
7330 7283 */
7331 7284 if (result != DDI_SUCCESS) {
7332 7285 impl_acc_hdl_free(*handle);
7333 7286 *handle = (ddi_acc_handle_t)NULL;
7334 7287 } else {
7335 7288 hp->ah_addr = *addrp;
7336 7289 }
7337 7290
7338 7291 return (result);
7339 7292 }
7340 7293
7341 7294 void
7342 7295 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7343 7296 {
7344 7297 ddi_map_req_t mr;
7345 7298 ddi_acc_hdl_t *hp;
7346 7299
7347 7300 hp = impl_acc_hdl_get(*handlep);
7348 7301 ASSERT(hp);
7349 7302
7350 7303 mr.map_op = DDI_MO_UNMAP;
7351 7304 mr.map_type = DDI_MT_RNUMBER;
7352 7305 mr.map_obj.rnumber = hp->ah_rnumber;
7353 7306 mr.map_prot = PROT_READ | PROT_WRITE;
7354 7307 mr.map_flags = DDI_MF_KERNEL_MAPPING;
7355 7308 mr.map_handlep = hp;
7356 7309 mr.map_vers = DDI_MAP_VERSION;
7357 7310
7358 7311 /*
7359 7312 * Call my parent to unmap my regs.
7360 7313 */
7361 7314 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7362 7315 hp->ah_len, &hp->ah_addr);
7363 7316 /*
7364 7317 * free the handle
7365 7318 */
7366 7319 impl_acc_hdl_free(*handlep);
7367 7320 *handlep = (ddi_acc_handle_t)NULL;
7368 7321 }
7369 7322
7370 7323 int
7371 7324 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7372 7325 ssize_t dev_advcnt, uint_t dev_datasz)
7373 7326 {
7374 7327 uint8_t *b;
7375 7328 uint16_t *w;
7376 7329 uint32_t *l;
7377 7330 uint64_t *ll;
7378 7331
7379 7332 /* check for total byte count is multiple of data transfer size */
7380 7333 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7381 7334 return (DDI_FAILURE);
7382 7335
7383 7336 switch (dev_datasz) {
7384 7337 case DDI_DATA_SZ01_ACC:
7385 7338 for (b = (uint8_t *)dev_addr;
7386 7339 bytecount != 0; bytecount -= 1, b += dev_advcnt)
7387 7340 ddi_put8(handle, b, 0);
7388 7341 break;
7389 7342 case DDI_DATA_SZ02_ACC:
7390 7343 for (w = (uint16_t *)dev_addr;
7391 7344 bytecount != 0; bytecount -= 2, w += dev_advcnt)
7392 7345 ddi_put16(handle, w, 0);
7393 7346 break;
7394 7347 case DDI_DATA_SZ04_ACC:
7395 7348 for (l = (uint32_t *)dev_addr;
7396 7349 bytecount != 0; bytecount -= 4, l += dev_advcnt)
7397 7350 ddi_put32(handle, l, 0);
7398 7351 break;
7399 7352 case DDI_DATA_SZ08_ACC:
7400 7353 for (ll = (uint64_t *)dev_addr;
7401 7354 bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7402 7355 ddi_put64(handle, ll, 0x0ll);
7403 7356 break;
7404 7357 default:
7405 7358 return (DDI_FAILURE);
7406 7359 }
7407 7360 return (DDI_SUCCESS);
7408 7361 }
7409 7362
7410 7363 int
7411 7364 ddi_device_copy(
7412 7365 ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7413 7366 ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7414 7367 size_t bytecount, uint_t dev_datasz)
7415 7368 {
7416 7369 uint8_t *b_src, *b_dst;
7417 7370 uint16_t *w_src, *w_dst;
7418 7371 uint32_t *l_src, *l_dst;
7419 7372 uint64_t *ll_src, *ll_dst;
7420 7373
7421 7374 /* check for total byte count is multiple of data transfer size */
7422 7375 if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7423 7376 return (DDI_FAILURE);
7424 7377
7425 7378 switch (dev_datasz) {
7426 7379 case DDI_DATA_SZ01_ACC:
7427 7380 b_src = (uint8_t *)src_addr;
7428 7381 b_dst = (uint8_t *)dest_addr;
7429 7382
7430 7383 for (; bytecount != 0; bytecount -= 1) {
7431 7384 ddi_put8(dest_handle, b_dst,
7432 7385 ddi_get8(src_handle, b_src));
7433 7386 b_dst += dest_advcnt;
7434 7387 b_src += src_advcnt;
7435 7388 }
7436 7389 break;
7437 7390 case DDI_DATA_SZ02_ACC:
7438 7391 w_src = (uint16_t *)src_addr;
7439 7392 w_dst = (uint16_t *)dest_addr;
7440 7393
7441 7394 for (; bytecount != 0; bytecount -= 2) {
7442 7395 ddi_put16(dest_handle, w_dst,
7443 7396 ddi_get16(src_handle, w_src));
7444 7397 w_dst += dest_advcnt;
7445 7398 w_src += src_advcnt;
7446 7399 }
7447 7400 break;
7448 7401 case DDI_DATA_SZ04_ACC:
7449 7402 l_src = (uint32_t *)src_addr;
7450 7403 l_dst = (uint32_t *)dest_addr;
7451 7404
7452 7405 for (; bytecount != 0; bytecount -= 4) {
7453 7406 ddi_put32(dest_handle, l_dst,
7454 7407 ddi_get32(src_handle, l_src));
7455 7408 l_dst += dest_advcnt;
7456 7409 l_src += src_advcnt;
7457 7410 }
7458 7411 break;
7459 7412 case DDI_DATA_SZ08_ACC:
7460 7413 ll_src = (uint64_t *)src_addr;
7461 7414 ll_dst = (uint64_t *)dest_addr;
7462 7415
7463 7416 for (; bytecount != 0; bytecount -= 8) {
7464 7417 ddi_put64(dest_handle, ll_dst,
7465 7418 ddi_get64(src_handle, ll_src));
7466 7419 ll_dst += dest_advcnt;
7467 7420 ll_src += src_advcnt;
7468 7421 }
7469 7422 break;
7470 7423 default:
7471 7424 return (DDI_FAILURE);
7472 7425 }
7473 7426 return (DDI_SUCCESS);
7474 7427 }
7475 7428
7476 7429 #define swap16(value) \
7477 7430 ((((value) & 0xff) << 8) | ((value) >> 8))
7478 7431
7479 7432 #define swap32(value) \
7480 7433 (((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7481 7434 (uint32_t)swap16((uint16_t)((value) >> 16)))
7482 7435
7483 7436 #define swap64(value) \
7484 7437 (((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7485 7438 << 32) | \
7486 7439 (uint64_t)swap32((uint32_t)((value) >> 32)))
7487 7440
7488 7441 uint16_t
7489 7442 ddi_swap16(uint16_t value)
7490 7443 {
7491 7444 return (swap16(value));
7492 7445 }
7493 7446
7494 7447 uint32_t
7495 7448 ddi_swap32(uint32_t value)
7496 7449 {
7497 7450 return (swap32(value));
7498 7451 }
7499 7452
7500 7453 uint64_t
7501 7454 ddi_swap64(uint64_t value)
7502 7455 {
7503 7456 return (swap64(value));
7504 7457 }
7505 7458
7506 7459 /*
7507 7460 * Convert a binding name to a driver name.
7508 7461 * A binding name is the name used to determine the driver for a
7509 7462 * device - it may be either an alias for the driver or the name
7510 7463 * of the driver itself.
7511 7464 */
7512 7465 char *
7513 7466 i_binding_to_drv_name(char *bname)
7514 7467 {
7515 7468 major_t major_no;
7516 7469
7517 7470 ASSERT(bname != NULL);
7518 7471
7519 7472 if ((major_no = ddi_name_to_major(bname)) == -1)
7520 7473 return (NULL);
7521 7474 return (ddi_major_to_name(major_no));
7522 7475 }
7523 7476
7524 7477 /*
7525 7478 * Search for minor name that has specified dev_t and spec_type.
7526 7479 * If spec_type is zero then any dev_t match works. Since we
7527 7480 * are returning a pointer to the minor name string, we require the
7528 7481 * caller to do the locking.
7529 7482 */
7530 7483 char *
7531 7484 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7532 7485 {
7533 7486 struct ddi_minor_data *dmdp;
7534 7487
7535 7488 /*
7536 7489 * The did layered driver currently intentionally returns a
7537 7490 * devinfo ptr for an underlying sd instance based on a did
7538 7491 * dev_t. In this case it is not an error.
7539 7492 *
7540 7493 * The did layered driver is associated with Sun Cluster.
7541 7494 */
7542 7495 ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7543 7496 (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7544 7497
7545 7498 ASSERT(DEVI_BUSY_OWNED(dip));
7546 7499 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7547 7500 if (((dmdp->type == DDM_MINOR) ||
7548 7501 (dmdp->type == DDM_INTERNAL_PATH) ||
7549 7502 (dmdp->type == DDM_DEFAULT)) &&
7550 7503 (dmdp->ddm_dev == dev) &&
7551 7504 ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7552 7505 (dmdp->ddm_spec_type == spec_type)))
7553 7506 return (dmdp->ddm_name);
7554 7507 }
7555 7508
7556 7509 return (NULL);
7557 7510 }
7558 7511
7559 7512 /*
7560 7513 * Find the devt and spectype of the specified minor_name.
7561 7514 * Return DDI_FAILURE if minor_name not found. Since we are
7562 7515 * returning everything via arguments we can do the locking.
7563 7516 */
7564 7517 int
7565 7518 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7566 7519 dev_t *devtp, int *spectypep)
7567 7520 {
7568 7521 int circ;
7569 7522 struct ddi_minor_data *dmdp;
7570 7523
7571 7524 /* deal with clone minor nodes */
7572 7525 if (dip == clone_dip) {
7573 7526 major_t major;
7574 7527 /*
7575 7528 * Make sure minor_name is a STREAMS driver.
7576 7529 * We load the driver but don't attach to any instances.
7577 7530 */
7578 7531
7579 7532 major = ddi_name_to_major(minor_name);
7580 7533 if (major == DDI_MAJOR_T_NONE)
7581 7534 return (DDI_FAILURE);
7582 7535
7583 7536 if (ddi_hold_driver(major) == NULL)
7584 7537 return (DDI_FAILURE);
7585 7538
7586 7539 if (STREAMSTAB(major) == NULL) {
7587 7540 ddi_rele_driver(major);
7588 7541 return (DDI_FAILURE);
7589 7542 }
7590 7543 ddi_rele_driver(major);
7591 7544
7592 7545 if (devtp)
7593 7546 *devtp = makedevice(clone_major, (minor_t)major);
7594 7547
7595 7548 if (spectypep)
7596 7549 *spectypep = S_IFCHR;
7597 7550
7598 7551 return (DDI_SUCCESS);
7599 7552 }
7600 7553
7601 7554 ndi_devi_enter(dip, &circ);
7602 7555 for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7603 7556 if (((dmdp->type != DDM_MINOR) &&
7604 7557 (dmdp->type != DDM_INTERNAL_PATH) &&
7605 7558 (dmdp->type != DDM_DEFAULT)) ||
7606 7559 strcmp(minor_name, dmdp->ddm_name))
7607 7560 continue;
7608 7561
7609 7562 if (devtp)
7610 7563 *devtp = dmdp->ddm_dev;
7611 7564
7612 7565 if (spectypep)
7613 7566 *spectypep = dmdp->ddm_spec_type;
7614 7567
7615 7568 ndi_devi_exit(dip, circ);
7616 7569 return (DDI_SUCCESS);
7617 7570 }
7618 7571 ndi_devi_exit(dip, circ);
7619 7572
7620 7573 return (DDI_FAILURE);
7621 7574 }
7622 7575
7623 7576 static kmutex_t devid_gen_mutex;
7624 7577 static short devid_gen_number;
7625 7578
7626 7579 #ifdef DEBUG
7627 7580
7628 7581 static int devid_register_corrupt = 0;
7629 7582 static int devid_register_corrupt_major = 0;
7630 7583 static int devid_register_corrupt_hint = 0;
7631 7584 static int devid_register_corrupt_hint_major = 0;
7632 7585
7633 7586 static int devid_lyr_debug = 0;
7634 7587
7635 7588 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs) \
7636 7589 if (devid_lyr_debug) \
7637 7590 ddi_debug_devid_devts(msg, ndevs, devs)
7638 7591
7639 7592 #else
7640 7593
7641 7594 #define DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7642 7595
7643 7596 #endif /* DEBUG */
7644 7597
7645 7598
7646 7599 #ifdef DEBUG
7647 7600
7648 7601 static void
7649 7602 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7650 7603 {
7651 7604 int i;
7652 7605
7653 7606 cmn_err(CE_CONT, "%s:\n", msg);
7654 7607 for (i = 0; i < ndevs; i++) {
7655 7608 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7656 7609 }
7657 7610 }
7658 7611
7659 7612 static void
7660 7613 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7661 7614 {
7662 7615 int i;
7663 7616
7664 7617 cmn_err(CE_CONT, "%s:\n", msg);
7665 7618 for (i = 0; i < npaths; i++) {
7666 7619 cmn_err(CE_CONT, " %s\n", paths[i]);
7667 7620 }
7668 7621 }
7669 7622
7670 7623 static void
7671 7624 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7672 7625 {
7673 7626 int i;
7674 7627
7675 7628 cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7676 7629 for (i = 0; i < ndevs; i++) {
7677 7630 cmn_err(CE_CONT, " 0x%lx\n", devs[i]);
7678 7631 }
7679 7632 }
7680 7633
7681 7634 #endif /* DEBUG */
7682 7635
7683 7636 /*
7684 7637 * Register device id into DDI framework.
7685 7638 * Must be called when the driver is bound.
7686 7639 */
7687 7640 static int
7688 7641 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7689 7642 {
7690 7643 impl_devid_t *i_devid = (impl_devid_t *)devid;
7691 7644 size_t driver_len;
7692 7645 const char *driver_name;
7693 7646 char *devid_str;
7694 7647 major_t major;
7695 7648
7696 7649 if ((dip == NULL) ||
7697 7650 ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7698 7651 return (DDI_FAILURE);
7699 7652
7700 7653 /* verify that the devid is valid */
7701 7654 if (ddi_devid_valid(devid) != DDI_SUCCESS)
7702 7655 return (DDI_FAILURE);
7703 7656
7704 7657 /* Updating driver name hint in devid */
7705 7658 driver_name = ddi_driver_name(dip);
7706 7659 driver_len = strlen(driver_name);
7707 7660 if (driver_len > DEVID_HINT_SIZE) {
7708 7661 /* Pick up last four characters of driver name */
7709 7662 driver_name += driver_len - DEVID_HINT_SIZE;
7710 7663 driver_len = DEVID_HINT_SIZE;
7711 7664 }
7712 7665 bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7713 7666 bcopy(driver_name, i_devid->did_driver, driver_len);
7714 7667
7715 7668 #ifdef DEBUG
7716 7669 /* Corrupt the devid for testing. */
7717 7670 if (devid_register_corrupt)
7718 7671 i_devid->did_id[0] += devid_register_corrupt;
7719 7672 if (devid_register_corrupt_major &&
7720 7673 (major == devid_register_corrupt_major))
7721 7674 i_devid->did_id[0] += 1;
7722 7675 if (devid_register_corrupt_hint)
7723 7676 i_devid->did_driver[0] += devid_register_corrupt_hint;
7724 7677 if (devid_register_corrupt_hint_major &&
7725 7678 (major == devid_register_corrupt_hint_major))
7726 7679 i_devid->did_driver[0] += 1;
7727 7680 #endif /* DEBUG */
7728 7681
7729 7682 /* encode the devid as a string */
7730 7683 if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7731 7684 return (DDI_FAILURE);
7732 7685
7733 7686 /* add string as a string property */
7734 7687 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7735 7688 DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7736 7689 cmn_err(CE_WARN, "%s%d: devid property update failed",
7737 7690 ddi_driver_name(dip), ddi_get_instance(dip));
7738 7691 ddi_devid_str_free(devid_str);
7739 7692 return (DDI_FAILURE);
7740 7693 }
7741 7694
7742 7695 /* keep pointer to devid string for interrupt context fma code */
7743 7696 if (DEVI(dip)->devi_devid_str)
7744 7697 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7745 7698 DEVI(dip)->devi_devid_str = devid_str;
7746 7699 return (DDI_SUCCESS);
7747 7700 }
7748 7701
7749 7702 int
7750 7703 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7751 7704 {
7752 7705 int rval;
7753 7706
7754 7707 rval = i_ddi_devid_register(dip, devid);
7755 7708 if (rval == DDI_SUCCESS) {
7756 7709 /*
7757 7710 * Register devid in devid-to-path cache
7758 7711 */
7759 7712 if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7760 7713 mutex_enter(&DEVI(dip)->devi_lock);
7761 7714 DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7762 7715 mutex_exit(&DEVI(dip)->devi_lock);
7763 7716 } else if (ddi_get_name_addr(dip)) {
7764 7717 /*
7765 7718 * We only expect cache_register DDI_FAILURE when we
7766 7719 * can't form the full path because of NULL devi_addr.
7767 7720 */
7768 7721 cmn_err(CE_WARN, "%s%d: failed to cache devid",
7769 7722 ddi_driver_name(dip), ddi_get_instance(dip));
7770 7723 }
7771 7724 } else {
7772 7725 cmn_err(CE_WARN, "%s%d: failed to register devid",
7773 7726 ddi_driver_name(dip), ddi_get_instance(dip));
7774 7727 }
7775 7728 return (rval);
7776 7729 }
7777 7730
7778 7731 /*
7779 7732 * Remove (unregister) device id from DDI framework.
7780 7733 * Must be called when device is detached.
7781 7734 */
7782 7735 static void
7783 7736 i_ddi_devid_unregister(dev_info_t *dip)
7784 7737 {
7785 7738 if (DEVI(dip)->devi_devid_str) {
7786 7739 ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7787 7740 DEVI(dip)->devi_devid_str = NULL;
7788 7741 }
7789 7742
7790 7743 /* remove the devid property */
7791 7744 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7792 7745 }
7793 7746
7794 7747 void
7795 7748 ddi_devid_unregister(dev_info_t *dip)
7796 7749 {
7797 7750 mutex_enter(&DEVI(dip)->devi_lock);
7798 7751 DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7799 7752 mutex_exit(&DEVI(dip)->devi_lock);
7800 7753 e_devid_cache_unregister(dip);
7801 7754 i_ddi_devid_unregister(dip);
7802 7755 }
7803 7756
7804 7757 /*
7805 7758 * Allocate and initialize a device id.
7806 7759 */
7807 7760 int
7808 7761 ddi_devid_init(
7809 7762 dev_info_t *dip,
7810 7763 ushort_t devid_type,
7811 7764 ushort_t nbytes,
7812 7765 void *id,
7813 7766 ddi_devid_t *ret_devid)
7814 7767 {
7815 7768 impl_devid_t *i_devid;
7816 7769 int sz = sizeof (*i_devid) + nbytes - sizeof (char);
7817 7770 int driver_len;
7818 7771 const char *driver_name;
7819 7772
7820 7773 switch (devid_type) {
7821 7774 case DEVID_SCSI3_WWN:
7822 7775 /*FALLTHRU*/
7823 7776 case DEVID_SCSI_SERIAL:
7824 7777 /*FALLTHRU*/
7825 7778 case DEVID_ATA_SERIAL:
7826 7779 /*FALLTHRU*/
7827 7780 case DEVID_ENCAP:
7828 7781 if (nbytes == 0)
7829 7782 return (DDI_FAILURE);
7830 7783 if (id == NULL)
7831 7784 return (DDI_FAILURE);
7832 7785 break;
7833 7786 case DEVID_FAB:
7834 7787 if (nbytes != 0)
7835 7788 return (DDI_FAILURE);
7836 7789 if (id != NULL)
7837 7790 return (DDI_FAILURE);
7838 7791 nbytes = sizeof (int) +
7839 7792 sizeof (struct timeval32) + sizeof (short);
7840 7793 sz += nbytes;
7841 7794 break;
7842 7795 default:
7843 7796 return (DDI_FAILURE);
7844 7797 }
7845 7798
7846 7799 if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7847 7800 return (DDI_FAILURE);
7848 7801
7849 7802 i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7850 7803 i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7851 7804 i_devid->did_rev_hi = DEVID_REV_MSB;
7852 7805 i_devid->did_rev_lo = DEVID_REV_LSB;
7853 7806 DEVID_FORMTYPE(i_devid, devid_type);
7854 7807 DEVID_FORMLEN(i_devid, nbytes);
7855 7808
7856 7809 /* Fill in driver name hint */
7857 7810 driver_name = ddi_driver_name(dip);
7858 7811 driver_len = strlen(driver_name);
7859 7812 if (driver_len > DEVID_HINT_SIZE) {
7860 7813 /* Pick up last four characters of driver name */
7861 7814 driver_name += driver_len - DEVID_HINT_SIZE;
7862 7815 driver_len = DEVID_HINT_SIZE;
7863 7816 }
7864 7817
7865 7818 bcopy(driver_name, i_devid->did_driver, driver_len);
7866 7819
7867 7820 /* Fill in id field */
7868 7821 if (devid_type == DEVID_FAB) {
7869 7822 char *cp;
7870 7823 uint32_t hostid;
7871 7824 struct timeval32 timestamp32;
7872 7825 int i;
7873 7826 int *ip;
7874 7827 short gen;
7875 7828
7876 7829 /* increase the generation number */
7877 7830 mutex_enter(&devid_gen_mutex);
7878 7831 gen = devid_gen_number++;
7879 7832 mutex_exit(&devid_gen_mutex);
7880 7833
7881 7834 cp = i_devid->did_id;
7882 7835
7883 7836 /* Fill in host id (big-endian byte ordering) */
7884 7837 hostid = zone_get_hostid(NULL);
7885 7838 *cp++ = hibyte(hiword(hostid));
7886 7839 *cp++ = lobyte(hiword(hostid));
7887 7840 *cp++ = hibyte(loword(hostid));
7888 7841 *cp++ = lobyte(loword(hostid));
7889 7842
7890 7843 /*
7891 7844 * Fill in timestamp (big-endian byte ordering)
7892 7845 *
7893 7846 * (Note that the format may have to be changed
7894 7847 * before 2038 comes around, though it's arguably
7895 7848 * unique enough as it is..)
7896 7849 */
7897 7850 uniqtime32(×tamp32);
7898 7851 ip = (int *)×tamp32;
7899 7852 for (i = 0;
7900 7853 i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7901 7854 int val;
7902 7855 val = *ip;
7903 7856 *cp++ = hibyte(hiword(val));
7904 7857 *cp++ = lobyte(hiword(val));
7905 7858 *cp++ = hibyte(loword(val));
7906 7859 *cp++ = lobyte(loword(val));
7907 7860 }
7908 7861
7909 7862 /* fill in the generation number */
7910 7863 *cp++ = hibyte(gen);
7911 7864 *cp++ = lobyte(gen);
7912 7865 } else
7913 7866 bcopy(id, i_devid->did_id, nbytes);
7914 7867
7915 7868 /* return device id */
7916 7869 *ret_devid = (ddi_devid_t)i_devid;
7917 7870 return (DDI_SUCCESS);
7918 7871 }
7919 7872
7920 7873 int
7921 7874 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7922 7875 {
7923 7876 return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7924 7877 }
7925 7878
7926 7879 int
7927 7880 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7928 7881 {
7929 7882 char *devidstr;
7930 7883
7931 7884 ASSERT(dev != DDI_DEV_T_NONE);
7932 7885
7933 7886 /* look up the property, devt specific first */
7934 7887 if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7935 7888 DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7936 7889 if ((dev == DDI_DEV_T_ANY) ||
7937 7890 (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7938 7891 DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7939 7892 DDI_PROP_SUCCESS)) {
7940 7893 return (DDI_FAILURE);
7941 7894 }
7942 7895 }
7943 7896
7944 7897 /* convert to binary form */
7945 7898 if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7946 7899 ddi_prop_free(devidstr);
7947 7900 return (DDI_FAILURE);
7948 7901 }
7949 7902 ddi_prop_free(devidstr);
7950 7903 return (DDI_SUCCESS);
7951 7904 }
7952 7905
7953 7906 /*
7954 7907 * Return a copy of the device id for dev_t
7955 7908 */
7956 7909 int
7957 7910 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7958 7911 {
7959 7912 dev_info_t *dip;
7960 7913 int rval;
7961 7914
7962 7915 /* get the dip */
7963 7916 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7964 7917 return (DDI_FAILURE);
7965 7918
7966 7919 rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7967 7920
7968 7921 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7969 7922 return (rval);
7970 7923 }
7971 7924
7972 7925 /*
7973 7926 * Return a copy of the minor name for dev_t and spec_type
7974 7927 */
7975 7928 int
7976 7929 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7977 7930 {
7978 7931 char *buf;
7979 7932 int circ;
7980 7933 dev_info_t *dip;
7981 7934 char *nm;
7982 7935 int rval;
7983 7936
7984 7937 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7985 7938 *minor_name = NULL;
7986 7939 return (DDI_FAILURE);
7987 7940 }
7988 7941
7989 7942 /* Find the minor name and copy into max size buf */
7990 7943 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7991 7944 ndi_devi_enter(dip, &circ);
7992 7945 nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7993 7946 if (nm)
7994 7947 (void) strcpy(buf, nm);
7995 7948 ndi_devi_exit(dip, circ);
7996 7949 ddi_release_devi(dip); /* e_ddi_hold_devi_by_dev() */
7997 7950
7998 7951 if (nm) {
7999 7952 /* duplicate into min size buf for return result */
8000 7953 *minor_name = i_ddi_strdup(buf, KM_SLEEP);
8001 7954 rval = DDI_SUCCESS;
8002 7955 } else {
8003 7956 *minor_name = NULL;
8004 7957 rval = DDI_FAILURE;
8005 7958 }
8006 7959
8007 7960 /* free max size buf and return */
8008 7961 kmem_free(buf, MAXNAMELEN);
8009 7962 return (rval);
8010 7963 }
8011 7964
8012 7965 int
8013 7966 ddi_lyr_devid_to_devlist(
8014 7967 ddi_devid_t devid,
8015 7968 char *minor_name,
8016 7969 int *retndevs,
8017 7970 dev_t **retdevs)
8018 7971 {
8019 7972 ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
8020 7973
8021 7974 if (e_devid_cache_to_devt_list(devid, minor_name,
8022 7975 retndevs, retdevs) == DDI_SUCCESS) {
8023 7976 ASSERT(*retndevs > 0);
8024 7977 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8025 7978 *retndevs, *retdevs);
8026 7979 return (DDI_SUCCESS);
8027 7980 }
8028 7981
8029 7982 if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8030 7983 return (DDI_FAILURE);
8031 7984 }
8032 7985
8033 7986 if (e_devid_cache_to_devt_list(devid, minor_name,
8034 7987 retndevs, retdevs) == DDI_SUCCESS) {
8035 7988 ASSERT(*retndevs > 0);
8036 7989 DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8037 7990 *retndevs, *retdevs);
8038 7991 return (DDI_SUCCESS);
8039 7992 }
8040 7993
8041 7994 return (DDI_FAILURE);
8042 7995 }
8043 7996
8044 7997 void
8045 7998 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8046 7999 {
8047 8000 kmem_free(devlist, sizeof (dev_t) * ndevs);
8048 8001 }
8049 8002
8050 8003 /*
8051 8004 * Note: This will need to be fixed if we ever allow processes to
8052 8005 * have more than one data model per exec.
8053 8006 */
8054 8007 model_t
8055 8008 ddi_mmap_get_model(void)
8056 8009 {
8057 8010 return (get_udatamodel());
8058 8011 }
8059 8012
8060 8013 model_t
8061 8014 ddi_model_convert_from(model_t model)
8062 8015 {
8063 8016 return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8064 8017 }
8065 8018
8066 8019 /*
8067 8020 * ddi interfaces managing storage and retrieval of eventcookies.
8068 8021 */
8069 8022
8070 8023 /*
8071 8024 * Invoke bus nexus driver's implementation of the
8072 8025 * (*bus_remove_eventcall)() interface to remove a registered
8073 8026 * callback handler for "event".
8074 8027 */
8075 8028 int
8076 8029 ddi_remove_event_handler(ddi_callback_id_t id)
8077 8030 {
8078 8031 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8079 8032 dev_info_t *ddip;
8080 8033
8081 8034 ASSERT(cb);
8082 8035 if (!cb) {
8083 8036 return (DDI_FAILURE);
8084 8037 }
8085 8038
8086 8039 ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8087 8040 return (ndi_busop_remove_eventcall(ddip, id));
8088 8041 }
8089 8042
8090 8043 /*
8091 8044 * Invoke bus nexus driver's implementation of the
8092 8045 * (*bus_add_eventcall)() interface to register a callback handler
8093 8046 * for "event".
8094 8047 */
8095 8048 int
8096 8049 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8097 8050 void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8098 8051 void *arg, ddi_callback_id_t *id)
8099 8052 {
8100 8053 return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8101 8054 }
8102 8055
8103 8056
8104 8057 /*
8105 8058 * Return a handle for event "name" by calling up the device tree
8106 8059 * hierarchy via (*bus_get_eventcookie)() interface until claimed
8107 8060 * by a bus nexus or top of dev_info tree is reached.
8108 8061 */
8109 8062 int
8110 8063 ddi_get_eventcookie(dev_info_t *dip, char *name,
8111 8064 ddi_eventcookie_t *event_cookiep)
8112 8065 {
8113 8066 return (ndi_busop_get_eventcookie(dip, dip,
8114 8067 name, event_cookiep));
8115 8068 }
8116 8069
8117 8070 /*
8118 8071 * This procedure is provided as the general callback function when
8119 8072 * umem_lockmemory calls as_add_callback for long term memory locking.
8120 8073 * When as_unmap, as_setprot, or as_free encounter segments which have
8121 8074 * locked memory, this callback will be invoked.
8122 8075 */
8123 8076 void
8124 8077 umem_lock_undo(struct as *as, void *arg, uint_t event)
8125 8078 {
8126 8079 _NOTE(ARGUNUSED(as, event))
8127 8080 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8128 8081
8129 8082 /*
8130 8083 * Call the cleanup function. Decrement the cookie reference
8131 8084 * count, if it goes to zero, return the memory for the cookie.
8132 8085 * The i_ddi_umem_unlock for this cookie may or may not have been
8133 8086 * called already. It is the responsibility of the caller of
8134 8087 * umem_lockmemory to handle the case of the cleanup routine
8135 8088 * being called after a ddi_umem_unlock for the cookie
8136 8089 * was called.
8137 8090 */
8138 8091
8139 8092 (*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8140 8093
8141 8094 /* remove the cookie if reference goes to zero */
8142 8095 if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
8143 8096 kmem_free(cp, sizeof (struct ddi_umem_cookie));
8144 8097 }
8145 8098 }
8146 8099
8147 8100 /*
8148 8101 * The following two Consolidation Private routines provide generic
8149 8102 * interfaces to increase/decrease the amount of device-locked memory.
8150 8103 *
8151 8104 * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8152 8105 * must be called every time i_ddi_incr_locked_memory() is called.
8153 8106 */
8154 8107 int
8155 8108 /* ARGSUSED */
8156 8109 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8157 8110 {
8158 8111 ASSERT(procp != NULL);
8159 8112 mutex_enter(&procp->p_lock);
8160 8113 if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8161 8114 mutex_exit(&procp->p_lock);
8162 8115 return (ENOMEM);
8163 8116 }
8164 8117 mutex_exit(&procp->p_lock);
8165 8118 return (0);
8166 8119 }
8167 8120
8168 8121 /*
8169 8122 * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8170 8123 * must be called every time i_ddi_decr_locked_memory() is called.
8171 8124 */
8172 8125 /* ARGSUSED */
8173 8126 void
8174 8127 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8175 8128 {
8176 8129 ASSERT(procp != NULL);
8177 8130 mutex_enter(&procp->p_lock);
8178 8131 rctl_decr_locked_mem(procp, NULL, dec, 1);
8179 8132 mutex_exit(&procp->p_lock);
8180 8133 }
8181 8134
8182 8135 /*
8183 8136 * The cookie->upd_max_lock_rctl flag is used to determine if we should
8184 8137 * charge device locked memory to the max-locked-memory rctl. Tracking
8185 8138 * device locked memory causes the rctl locks to get hot under high-speed
8186 8139 * I/O such as RDSv3 over IB. If there is no max-locked-memory rctl limit,
8187 8140 * we bypass charging the locked memory to the rctl altogether. The cookie's
8188 8141 * flag tells us if the rctl value should be updated when unlocking the memory,
8189 8142 * in case the rctl gets changed after the memory was locked. Any device
8190 8143 * locked memory in that rare case will not be counted toward the rctl limit.
8191 8144 *
8192 8145 * When tracking the locked memory, the kproject_t parameter is always NULL
8193 8146 * in the code paths:
8194 8147 * i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8195 8148 * i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8196 8149 * Thus, we always use the tk_proj member to check the projp setting.
8197 8150 */
8198 8151 static void
8199 8152 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8200 8153 {
8201 8154 proc_t *p;
8202 8155 kproject_t *projp;
8203 8156 zone_t *zonep;
8204 8157
8205 8158 ASSERT(cookie);
8206 8159 p = cookie->procp;
8207 8160 ASSERT(p);
8208 8161
8209 8162 zonep = p->p_zone;
8210 8163 projp = p->p_task->tk_proj;
8211 8164
8212 8165 ASSERT(zonep);
8213 8166 ASSERT(projp);
8214 8167
8215 8168 if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8216 8169 projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8217 8170 cookie->upd_max_lock_rctl = 0;
8218 8171 else
8219 8172 cookie->upd_max_lock_rctl = 1;
8220 8173 }
8221 8174
8222 8175 /*
8223 8176 * This routine checks if the max-locked-memory resource ctl is
8224 8177 * exceeded, if not increments it, grabs a hold on the project.
8225 8178 * Returns 0 if successful otherwise returns error code
8226 8179 */
8227 8180 static int
8228 8181 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8229 8182 {
8230 8183 proc_t *procp;
8231 8184 int ret;
8232 8185
8233 8186 ASSERT(cookie);
8234 8187 if (cookie->upd_max_lock_rctl == 0)
8235 8188 return (0);
8236 8189
8237 8190 procp = cookie->procp;
8238 8191 ASSERT(procp);
8239 8192
8240 8193 if ((ret = i_ddi_incr_locked_memory(procp,
8241 8194 cookie->size)) != 0) {
8242 8195 return (ret);
8243 8196 }
8244 8197 return (0);
8245 8198 }
8246 8199
8247 8200 /*
8248 8201 * Decrements the max-locked-memory resource ctl and releases
8249 8202 * the hold on the project that was acquired during umem_incr_devlockmem
8250 8203 */
8251 8204 static void
8252 8205 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8253 8206 {
8254 8207 proc_t *proc;
8255 8208
8256 8209 if (cookie->upd_max_lock_rctl == 0)
8257 8210 return;
8258 8211
8259 8212 proc = (proc_t *)cookie->procp;
8260 8213 if (!proc)
8261 8214 return;
8262 8215
8263 8216 i_ddi_decr_locked_memory(proc, cookie->size);
8264 8217 }
8265 8218
8266 8219 /*
8267 8220 * A consolidation private function which is essentially equivalent to
8268 8221 * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8269 8222 * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8270 8223 * the ops_vector is valid.
8271 8224 *
8272 8225 * Lock the virtual address range in the current process and create a
8273 8226 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8274 8227 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8275 8228 * to user space.
8276 8229 *
8277 8230 * Note: The resource control accounting currently uses a full charge model
8278 8231 * in other words attempts to lock the same/overlapping areas of memory
8279 8232 * will deduct the full size of the buffer from the projects running
8280 8233 * counter for the device locked memory.
8281 8234 *
8282 8235 * addr, size should be PAGESIZE aligned
8283 8236 *
8284 8237 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8285 8238 * identifies whether the locked memory will be read or written or both
8286 8239 * DDI_UMEMLOCK_LONGTERM must be set when the locking will
8287 8240 * be maintained for an indefinitely long period (essentially permanent),
8288 8241 * rather than for what would be required for a typical I/O completion.
8289 8242 * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8290 8243 * if the memory pertains to a regular file which is mapped MAP_SHARED.
8291 8244 * This is to prevent a deadlock if a file truncation is attempted after
8292 8245 * after the locking is done.
8293 8246 *
8294 8247 * Returns 0 on success
8295 8248 * EINVAL - for invalid parameters
8296 8249 * EPERM, ENOMEM and other error codes returned by as_pagelock
8297 8250 * ENOMEM - is returned if the current request to lock memory exceeds
8298 8251 * *.max-locked-memory resource control value.
8299 8252 * EFAULT - memory pertains to a regular file mapped shared and
8300 8253 * and DDI_UMEMLOCK_LONGTERM flag is set
8301 8254 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8302 8255 */
8303 8256 int
8304 8257 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8305 8258 struct umem_callback_ops *ops_vector,
8306 8259 proc_t *procp)
8307 8260 {
8308 8261 int error;
8309 8262 struct ddi_umem_cookie *p;
8310 8263 void (*driver_callback)() = NULL;
8311 8264 struct as *as;
8312 8265 struct seg *seg;
8313 8266 vnode_t *vp;
8314 8267
8315 8268 /* Allow device drivers to not have to reference "curproc" */
8316 8269 if (procp == NULL)
8317 8270 procp = curproc;
8318 8271 as = procp->p_as;
8319 8272 *cookie = NULL; /* in case of any error return */
8320 8273
8321 8274 /* These are the only three valid flags */
8322 8275 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8323 8276 DDI_UMEMLOCK_LONGTERM)) != 0)
8324 8277 return (EINVAL);
8325 8278
8326 8279 /* At least one (can be both) of the two access flags must be set */
8327 8280 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8328 8281 return (EINVAL);
8329 8282
8330 8283 /* addr and len must be page-aligned */
8331 8284 if (((uintptr_t)addr & PAGEOFFSET) != 0)
8332 8285 return (EINVAL);
8333 8286
8334 8287 if ((len & PAGEOFFSET) != 0)
8335 8288 return (EINVAL);
8336 8289
8337 8290 /*
8338 8291 * For longterm locking a driver callback must be specified; if
8339 8292 * not longterm then a callback is optional.
8340 8293 */
8341 8294 if (ops_vector != NULL) {
8342 8295 if (ops_vector->cbo_umem_callback_version !=
8343 8296 UMEM_CALLBACK_VERSION)
8344 8297 return (EINVAL);
8345 8298 else
8346 8299 driver_callback = ops_vector->cbo_umem_lock_cleanup;
8347 8300 }
8348 8301 if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8349 8302 return (EINVAL);
8350 8303
8351 8304 /*
8352 8305 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8353 8306 * be called on first ddi_umem_lock or umem_lockmemory call.
8354 8307 */
8355 8308 if (ddi_umem_unlock_thread == NULL)
8356 8309 i_ddi_umem_unlock_thread_start();
8357 8310
8358 8311 /* Allocate memory for the cookie */
8359 8312 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8360 8313
8361 8314 /* Convert the flags to seg_rw type */
8362 8315 if (flags & DDI_UMEMLOCK_WRITE) {
8363 8316 p->s_flags = S_WRITE;
8364 8317 } else {
8365 8318 p->s_flags = S_READ;
8366 8319 }
8367 8320
8368 8321 /* Store procp in cookie for later iosetup/unlock */
8369 8322 p->procp = (void *)procp;
8370 8323
8371 8324 /*
8372 8325 * Store the struct as pointer in cookie for later use by
8373 8326 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8374 8327 * is called after relvm is called.
8375 8328 */
8376 8329 p->asp = as;
8377 8330
8378 8331 /*
8379 8332 * The size field is needed for lockmem accounting.
8380 8333 */
8381 8334 p->size = len;
8382 8335 init_lockedmem_rctl_flag(p);
8383 8336
8384 8337 if (umem_incr_devlockmem(p) != 0) {
8385 8338 /*
8386 8339 * The requested memory cannot be locked
8387 8340 */
8388 8341 kmem_free(p, sizeof (struct ddi_umem_cookie));
8389 8342 *cookie = (ddi_umem_cookie_t)NULL;
8390 8343 return (ENOMEM);
8391 8344 }
8392 8345
8393 8346 /* Lock the pages corresponding to addr, len in memory */
8394 8347 error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8395 8348 if (error != 0) {
8396 8349 umem_decr_devlockmem(p);
8397 8350 kmem_free(p, sizeof (struct ddi_umem_cookie));
8398 8351 *cookie = (ddi_umem_cookie_t)NULL;
8399 8352 return (error);
8400 8353 }
8401 8354
8402 8355 /*
8403 8356 * For longterm locking the addr must pertain to a seg_vn segment or
8404 8357 * or a seg_spt segment.
8405 8358 * If the segment pertains to a regular file, it cannot be
8406 8359 * mapped MAP_SHARED.
8407 8360 * This is to prevent a deadlock if a file truncation is attempted
8408 8361 * after the locking is done.
8409 8362 * Doing this after as_pagelock guarantees persistence of the as; if
8410 8363 * an unacceptable segment is found, the cleanup includes calling
8411 8364 * as_pageunlock before returning EFAULT.
8412 8365 *
8413 8366 * segdev is allowed here as it is already locked. This allows
8414 8367 * for memory exported by drivers through mmap() (which is already
8415 8368 * locked) to be allowed for LONGTERM.
8416 8369 */
8417 8370 if (flags & DDI_UMEMLOCK_LONGTERM) {
8418 8371 extern struct seg_ops segspt_shmops;
8419 8372 extern struct seg_ops segdev_ops;
8420 8373 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8421 8374 for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8422 8375 if (seg == NULL || seg->s_base > addr + len)
8423 8376 break;
8424 8377 if (seg->s_ops == &segdev_ops)
8425 8378 continue;
8426 8379 if (((seg->s_ops != &segvn_ops) &&
8427 8380 (seg->s_ops != &segspt_shmops)) ||
8428 8381 ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8429 8382 vp != NULL && vp->v_type == VREG) &&
8430 8383 (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8431 8384 as_pageunlock(as, p->pparray,
8432 8385 addr, len, p->s_flags);
8433 8386 AS_LOCK_EXIT(as, &as->a_lock);
8434 8387 umem_decr_devlockmem(p);
8435 8388 kmem_free(p, sizeof (struct ddi_umem_cookie));
8436 8389 *cookie = (ddi_umem_cookie_t)NULL;
8437 8390 return (EFAULT);
8438 8391 }
8439 8392 }
8440 8393 AS_LOCK_EXIT(as, &as->a_lock);
8441 8394 }
8442 8395
8443 8396
8444 8397 /* Initialize the fields in the ddi_umem_cookie */
8445 8398 p->cvaddr = addr;
8446 8399 p->type = UMEM_LOCKED;
8447 8400 if (driver_callback != NULL) {
8448 8401 /* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8449 8402 p->cook_refcnt = 2;
8450 8403 p->callbacks = *ops_vector;
8451 8404 } else {
8452 8405 /* only i_ddi_umme_unlock needs the cookie */
8453 8406 p->cook_refcnt = 1;
8454 8407 }
8455 8408
8456 8409 *cookie = (ddi_umem_cookie_t)p;
8457 8410
8458 8411 /*
8459 8412 * If a driver callback was specified, add an entry to the
8460 8413 * as struct callback list. The as_pagelock above guarantees
8461 8414 * the persistence of as.
8462 8415 */
8463 8416 if (driver_callback) {
8464 8417 error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8465 8418 addr, len, KM_SLEEP);
8466 8419 if (error != 0) {
8467 8420 as_pageunlock(as, p->pparray,
8468 8421 addr, len, p->s_flags);
8469 8422 umem_decr_devlockmem(p);
8470 8423 kmem_free(p, sizeof (struct ddi_umem_cookie));
8471 8424 *cookie = (ddi_umem_cookie_t)NULL;
8472 8425 }
8473 8426 }
8474 8427 return (error);
8475 8428 }
8476 8429
8477 8430 /*
8478 8431 * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8479 8432 * the cookie. Called from i_ddi_umem_unlock_thread.
8480 8433 */
8481 8434
8482 8435 static void
8483 8436 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8484 8437 {
8485 8438 uint_t rc;
8486 8439
8487 8440 /*
8488 8441 * There is no way to determine whether a callback to
8489 8442 * umem_lock_undo was registered via as_add_callback.
8490 8443 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8491 8444 * a valid callback function structure.) as_delete_callback
8492 8445 * is called to delete a possible registered callback. If the
8493 8446 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8494 8447 * indicates that there was a callback registered, and that is was
8495 8448 * successfully deleted. Thus, the cookie reference count
8496 8449 * will never be decremented by umem_lock_undo. Just return the
8497 8450 * memory for the cookie, since both users of the cookie are done.
8498 8451 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8499 8452 * never registered. A return of AS_CALLBACK_DELETE_DEFERRED
8500 8453 * indicates that callback processing is taking place and, and
8501 8454 * umem_lock_undo is, or will be, executing, and thus decrementing
8502 8455 * the cookie reference count when it is complete.
8503 8456 *
8504 8457 * This needs to be done before as_pageunlock so that the
8505 8458 * persistence of as is guaranteed because of the locked pages.
8506 8459 *
8507 8460 */
8508 8461 rc = as_delete_callback(p->asp, p);
8509 8462
8510 8463
8511 8464 /*
8512 8465 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8513 8466 * after relvm is called so use p->asp.
8514 8467 */
8515 8468 as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8516 8469
8517 8470 /*
8518 8471 * Now that we have unlocked the memory decrement the
8519 8472 * *.max-locked-memory rctl
8520 8473 */
8521 8474 umem_decr_devlockmem(p);
8522 8475
8523 8476 if (rc == AS_CALLBACK_DELETED) {
8524 8477 /* umem_lock_undo will not happen, return the cookie memory */
8525 8478 ASSERT(p->cook_refcnt == 2);
8526 8479 kmem_free(p, sizeof (struct ddi_umem_cookie));
8527 8480 } else {
8528 8481 /*
8529 8482 * umem_undo_lock may happen if as_delete_callback returned
8530 8483 * AS_CALLBACK_DELETE_DEFERRED. In that case, decrement the
8531 8484 * reference count, atomically, and return the cookie
8532 8485 * memory if the reference count goes to zero. The only
8533 8486 * other value for rc is AS_CALLBACK_NOTFOUND. In that
8534 8487 * case, just return the cookie memory.
8535 8488 */
8536 8489 if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8537 8490 (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8538 8491 == 0)) {
8539 8492 kmem_free(p, sizeof (struct ddi_umem_cookie));
8540 8493 }
8541 8494 }
8542 8495 }
8543 8496
8544 8497 /*
8545 8498 * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8546 8499 *
8547 8500 * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8548 8501 * until it is empty. Then, wait for more to be added. This thread is awoken
8549 8502 * via calls to ddi_umem_unlock.
8550 8503 */
8551 8504
8552 8505 static void
8553 8506 i_ddi_umem_unlock_thread(void)
8554 8507 {
8555 8508 struct ddi_umem_cookie *ret_cookie;
8556 8509 callb_cpr_t cprinfo;
8557 8510
8558 8511 /* process the ddi_umem_unlock list */
8559 8512 CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8560 8513 callb_generic_cpr, "unlock_thread");
8561 8514 for (;;) {
8562 8515 mutex_enter(&ddi_umem_unlock_mutex);
8563 8516 if (ddi_umem_unlock_head != NULL) { /* list not empty */
8564 8517 ret_cookie = ddi_umem_unlock_head;
8565 8518 /* take if off the list */
8566 8519 if ((ddi_umem_unlock_head =
8567 8520 ddi_umem_unlock_head->unl_forw) == NULL) {
8568 8521 ddi_umem_unlock_tail = NULL;
8569 8522 }
8570 8523 mutex_exit(&ddi_umem_unlock_mutex);
8571 8524 /* unlock the pages in this cookie */
8572 8525 (void) i_ddi_umem_unlock(ret_cookie);
8573 8526 } else { /* list is empty, wait for next ddi_umem_unlock */
8574 8527 CALLB_CPR_SAFE_BEGIN(&cprinfo);
8575 8528 cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8576 8529 CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8577 8530 mutex_exit(&ddi_umem_unlock_mutex);
8578 8531 }
8579 8532 }
8580 8533 /* ddi_umem_unlock_thread does not exit */
8581 8534 /* NOTREACHED */
8582 8535 }
8583 8536
8584 8537 /*
8585 8538 * Start the thread that will process the ddi_umem_unlock list if it is
8586 8539 * not already started (i_ddi_umem_unlock_thread).
8587 8540 */
8588 8541 static void
8589 8542 i_ddi_umem_unlock_thread_start(void)
8590 8543 {
8591 8544 mutex_enter(&ddi_umem_unlock_mutex);
8592 8545 if (ddi_umem_unlock_thread == NULL) {
8593 8546 ddi_umem_unlock_thread = thread_create(NULL, 0,
8594 8547 i_ddi_umem_unlock_thread, NULL, 0, &p0,
8595 8548 TS_RUN, minclsyspri);
8596 8549 }
8597 8550 mutex_exit(&ddi_umem_unlock_mutex);
8598 8551 }
8599 8552
8600 8553 /*
8601 8554 * Lock the virtual address range in the current process and create a
8602 8555 * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8603 8556 * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8604 8557 * to user space.
8605 8558 *
8606 8559 * Note: The resource control accounting currently uses a full charge model
8607 8560 * in other words attempts to lock the same/overlapping areas of memory
8608 8561 * will deduct the full size of the buffer from the projects running
8609 8562 * counter for the device locked memory. This applies to umem_lockmemory too.
8610 8563 *
8611 8564 * addr, size should be PAGESIZE aligned
8612 8565 * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8613 8566 * identifies whether the locked memory will be read or written or both
8614 8567 *
8615 8568 * Returns 0 on success
8616 8569 * EINVAL - for invalid parameters
8617 8570 * EPERM, ENOMEM and other error codes returned by as_pagelock
8618 8571 * ENOMEM - is returned if the current request to lock memory exceeds
8619 8572 * *.max-locked-memory resource control value.
8620 8573 * EAGAIN - could not start the ddi_umem_unlock list processing thread
8621 8574 */
8622 8575 int
8623 8576 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8624 8577 {
8625 8578 int error;
8626 8579 struct ddi_umem_cookie *p;
8627 8580
8628 8581 *cookie = NULL; /* in case of any error return */
8629 8582
8630 8583 /* These are the only two valid flags */
8631 8584 if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8632 8585 return (EINVAL);
8633 8586 }
8634 8587
8635 8588 /* At least one of the two flags (or both) must be set */
8636 8589 if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8637 8590 return (EINVAL);
8638 8591 }
8639 8592
8640 8593 /* addr and len must be page-aligned */
8641 8594 if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8642 8595 return (EINVAL);
8643 8596 }
8644 8597
8645 8598 if ((len & PAGEOFFSET) != 0) {
8646 8599 return (EINVAL);
8647 8600 }
8648 8601
8649 8602 /*
8650 8603 * Call i_ddi_umem_unlock_thread_start if necessary. It will
8651 8604 * be called on first ddi_umem_lock or umem_lockmemory call.
8652 8605 */
8653 8606 if (ddi_umem_unlock_thread == NULL)
8654 8607 i_ddi_umem_unlock_thread_start();
8655 8608
8656 8609 /* Allocate memory for the cookie */
8657 8610 p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8658 8611
8659 8612 /* Convert the flags to seg_rw type */
8660 8613 if (flags & DDI_UMEMLOCK_WRITE) {
8661 8614 p->s_flags = S_WRITE;
8662 8615 } else {
8663 8616 p->s_flags = S_READ;
8664 8617 }
8665 8618
8666 8619 /* Store curproc in cookie for later iosetup/unlock */
8667 8620 p->procp = (void *)curproc;
8668 8621
8669 8622 /*
8670 8623 * Store the struct as pointer in cookie for later use by
8671 8624 * ddi_umem_unlock. The proc->p_as will be stale if ddi_umem_unlock
8672 8625 * is called after relvm is called.
8673 8626 */
8674 8627 p->asp = curproc->p_as;
8675 8628 /*
8676 8629 * The size field is needed for lockmem accounting.
8677 8630 */
8678 8631 p->size = len;
8679 8632 init_lockedmem_rctl_flag(p);
8680 8633
8681 8634 if (umem_incr_devlockmem(p) != 0) {
8682 8635 /*
8683 8636 * The requested memory cannot be locked
8684 8637 */
8685 8638 kmem_free(p, sizeof (struct ddi_umem_cookie));
8686 8639 *cookie = (ddi_umem_cookie_t)NULL;
8687 8640 return (ENOMEM);
8688 8641 }
8689 8642
8690 8643 /* Lock the pages corresponding to addr, len in memory */
8691 8644 error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8692 8645 addr, len, p->s_flags);
8693 8646 if (error != 0) {
8694 8647 umem_decr_devlockmem(p);
8695 8648 kmem_free(p, sizeof (struct ddi_umem_cookie));
8696 8649 *cookie = (ddi_umem_cookie_t)NULL;
8697 8650 return (error);
8698 8651 }
8699 8652
8700 8653 /* Initialize the fields in the ddi_umem_cookie */
8701 8654 p->cvaddr = addr;
8702 8655 p->type = UMEM_LOCKED;
8703 8656 p->cook_refcnt = 1;
8704 8657
8705 8658 *cookie = (ddi_umem_cookie_t)p;
8706 8659 return (error);
8707 8660 }
8708 8661
8709 8662 /*
8710 8663 * Add the cookie to the ddi_umem_unlock list. Pages will be
8711 8664 * unlocked by i_ddi_umem_unlock_thread.
8712 8665 */
8713 8666
8714 8667 void
8715 8668 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8716 8669 {
8717 8670 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8718 8671
8719 8672 ASSERT(p->type == UMEM_LOCKED);
8720 8673 ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8721 8674 ASSERT(ddi_umem_unlock_thread != NULL);
8722 8675
8723 8676 p->unl_forw = (struct ddi_umem_cookie *)NULL; /* end of list */
8724 8677 /*
8725 8678 * Queue the unlock request and notify i_ddi_umem_unlock thread
8726 8679 * if it's called in the interrupt context. Otherwise, unlock pages
8727 8680 * immediately.
8728 8681 */
8729 8682 if (servicing_interrupt()) {
8730 8683 /* queue the unlock request and notify the thread */
8731 8684 mutex_enter(&ddi_umem_unlock_mutex);
8732 8685 if (ddi_umem_unlock_head == NULL) {
8733 8686 ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8734 8687 cv_broadcast(&ddi_umem_unlock_cv);
8735 8688 } else {
8736 8689 ddi_umem_unlock_tail->unl_forw = p;
8737 8690 ddi_umem_unlock_tail = p;
8738 8691 }
8739 8692 mutex_exit(&ddi_umem_unlock_mutex);
8740 8693 } else {
8741 8694 /* unlock the pages right away */
8742 8695 (void) i_ddi_umem_unlock(p);
8743 8696 }
8744 8697 }
8745 8698
8746 8699 /*
8747 8700 * Create a buf structure from a ddi_umem_cookie
8748 8701 * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8749 8702 * (only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8750 8703 * off, len - identifies the portion of the memory represented by the cookie
8751 8704 * that the buf points to.
8752 8705 * NOTE: off, len need to follow the alignment/size restrictions of the
8753 8706 * device (dev) that this buf will be passed to. Some devices
8754 8707 * will accept unrestricted alignment/size, whereas others (such as
8755 8708 * st) require some block-size alignment/size. It is the caller's
8756 8709 * responsibility to ensure that the alignment/size restrictions
8757 8710 * are met (we cannot assert as we do not know the restrictions)
8758 8711 *
8759 8712 * direction - is one of B_READ or B_WRITE and needs to be compatible with
8760 8713 * the flags used in ddi_umem_lock
8761 8714 *
8762 8715 * The following three arguments are used to initialize fields in the
8763 8716 * buf structure and are uninterpreted by this routine.
8764 8717 *
8765 8718 * dev
8766 8719 * blkno
8767 8720 * iodone
8768 8721 *
8769 8722 * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8770 8723 *
8771 8724 * Returns a buf structure pointer on success (to be freed by freerbuf)
8772 8725 * NULL on any parameter error or memory alloc failure
8773 8726 *
8774 8727 */
8775 8728 struct buf *
8776 8729 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8777 8730 int direction, dev_t dev, daddr_t blkno,
8778 8731 int (*iodone)(struct buf *), int sleepflag)
8779 8732 {
8780 8733 struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8781 8734 struct buf *bp;
8782 8735
8783 8736 /*
8784 8737 * check for valid cookie offset, len
8785 8738 */
8786 8739 if ((off + len) > p->size) {
8787 8740 return (NULL);
8788 8741 }
8789 8742
8790 8743 if (len > p->size) {
8791 8744 return (NULL);
8792 8745 }
8793 8746
8794 8747 /* direction has to be one of B_READ or B_WRITE */
8795 8748 if ((direction != B_READ) && (direction != B_WRITE)) {
8796 8749 return (NULL);
8797 8750 }
8798 8751
8799 8752 /* These are the only two valid sleepflags */
8800 8753 if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8801 8754 return (NULL);
8802 8755 }
8803 8756
8804 8757 /*
8805 8758 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8806 8759 */
8807 8760 if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8808 8761 return (NULL);
8809 8762 }
8810 8763
8811 8764 /* If type is KMEM_NON_PAGEABLE procp is NULL */
8812 8765 ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8813 8766 (p->procp == NULL) : (p->procp != NULL));
8814 8767
8815 8768 bp = kmem_alloc(sizeof (struct buf), sleepflag);
8816 8769 if (bp == NULL) {
8817 8770 return (NULL);
8818 8771 }
8819 8772 bioinit(bp);
8820 8773
8821 8774 bp->b_flags = B_BUSY | B_PHYS | direction;
8822 8775 bp->b_edev = dev;
8823 8776 bp->b_lblkno = blkno;
8824 8777 bp->b_iodone = iodone;
8825 8778 bp->b_bcount = len;
8826 8779 bp->b_proc = (proc_t *)p->procp;
8827 8780 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8828 8781 bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8829 8782 if (p->pparray != NULL) {
8830 8783 bp->b_flags |= B_SHADOW;
8831 8784 ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8832 8785 bp->b_shadow = p->pparray + btop(off);
8833 8786 }
8834 8787 return (bp);
8835 8788 }
8836 8789
8837 8790 /*
8838 8791 * Fault-handling and related routines
8839 8792 */
8840 8793
8841 8794 ddi_devstate_t
8842 8795 ddi_get_devstate(dev_info_t *dip)
8843 8796 {
8844 8797 if (DEVI_IS_DEVICE_OFFLINE(dip))
8845 8798 return (DDI_DEVSTATE_OFFLINE);
8846 8799 else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8847 8800 return (DDI_DEVSTATE_DOWN);
8848 8801 else if (DEVI_IS_BUS_QUIESCED(dip))
8849 8802 return (DDI_DEVSTATE_QUIESCED);
8850 8803 else if (DEVI_IS_DEVICE_DEGRADED(dip))
8851 8804 return (DDI_DEVSTATE_DEGRADED);
8852 8805 else
8853 8806 return (DDI_DEVSTATE_UP);
8854 8807 }
8855 8808
8856 8809 void
8857 8810 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8858 8811 ddi_fault_location_t location, const char *message)
8859 8812 {
8860 8813 struct ddi_fault_event_data fd;
8861 8814 ddi_eventcookie_t ec;
8862 8815
8863 8816 /*
8864 8817 * Assemble all the information into a fault-event-data structure
8865 8818 */
8866 8819 fd.f_dip = dip;
8867 8820 fd.f_impact = impact;
8868 8821 fd.f_location = location;
8869 8822 fd.f_message = message;
8870 8823 fd.f_oldstate = ddi_get_devstate(dip);
8871 8824
8872 8825 /*
8873 8826 * Get eventcookie from defining parent.
8874 8827 */
8875 8828 if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8876 8829 DDI_SUCCESS)
8877 8830 return;
8878 8831
8879 8832 (void) ndi_post_event(dip, dip, ec, &fd);
8880 8833 }
8881 8834
8882 8835 char *
8883 8836 i_ddi_devi_class(dev_info_t *dip)
8884 8837 {
8885 8838 return (DEVI(dip)->devi_device_class);
8886 8839 }
8887 8840
8888 8841 int
8889 8842 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8890 8843 {
8891 8844 struct dev_info *devi = DEVI(dip);
8892 8845
8893 8846 mutex_enter(&devi->devi_lock);
8894 8847
8895 8848 if (devi->devi_device_class)
8896 8849 kmem_free(devi->devi_device_class,
8897 8850 strlen(devi->devi_device_class) + 1);
8898 8851
8899 8852 if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8900 8853 != NULL) {
8901 8854 mutex_exit(&devi->devi_lock);
8902 8855 return (DDI_SUCCESS);
8903 8856 }
8904 8857
8905 8858 mutex_exit(&devi->devi_lock);
8906 8859
8907 8860 return (DDI_FAILURE);
8908 8861 }
8909 8862
8910 8863
8911 8864 /*
8912 8865 * Task Queues DDI interfaces.
8913 8866 */
8914 8867
8915 8868 /* ARGSUSED */
8916 8869 ddi_taskq_t *
8917 8870 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8918 8871 pri_t pri, uint_t cflags)
8919 8872 {
8920 8873 char full_name[TASKQ_NAMELEN];
8921 8874 const char *tq_name;
8922 8875 int nodeid = 0;
8923 8876
8924 8877 if (dip == NULL)
8925 8878 tq_name = name;
8926 8879 else {
8927 8880 nodeid = ddi_get_instance(dip);
8928 8881
8929 8882 if (name == NULL)
8930 8883 name = "tq";
8931 8884
8932 8885 (void) snprintf(full_name, sizeof (full_name), "%s_%s",
8933 8886 ddi_driver_name(dip), name);
8934 8887
8935 8888 tq_name = full_name;
8936 8889 }
8937 8890
8938 8891 return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8939 8892 pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8940 8893 nthreads, INT_MAX, TASKQ_PREPOPULATE));
8941 8894 }
8942 8895
8943 8896 void
8944 8897 ddi_taskq_destroy(ddi_taskq_t *tq)
8945 8898 {
8946 8899 taskq_destroy((taskq_t *)tq);
8947 8900 }
8948 8901
8949 8902 int
8950 8903 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8951 8904 void *arg, uint_t dflags)
8952 8905 {
8953 8906 taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8954 8907 dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8955 8908
8956 8909 return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8957 8910 }
8958 8911
8959 8912 void
8960 8913 ddi_taskq_wait(ddi_taskq_t *tq)
8961 8914 {
8962 8915 taskq_wait((taskq_t *)tq);
8963 8916 }
8964 8917
8965 8918 void
8966 8919 ddi_taskq_suspend(ddi_taskq_t *tq)
8967 8920 {
8968 8921 taskq_suspend((taskq_t *)tq);
8969 8922 }
8970 8923
8971 8924 boolean_t
8972 8925 ddi_taskq_suspended(ddi_taskq_t *tq)
8973 8926 {
8974 8927 return (taskq_suspended((taskq_t *)tq));
8975 8928 }
8976 8929
8977 8930 void
8978 8931 ddi_taskq_resume(ddi_taskq_t *tq)
8979 8932 {
8980 8933 taskq_resume((taskq_t *)tq);
8981 8934 }
8982 8935
8983 8936 int
8984 8937 ddi_parse(
8985 8938 const char *ifname,
8986 8939 char *alnum,
8987 8940 uint_t *nump)
8988 8941 {
8989 8942 const char *p;
8990 8943 int l;
8991 8944 ulong_t num;
8992 8945 boolean_t nonum = B_TRUE;
8993 8946 char c;
8994 8947
8995 8948 l = strlen(ifname);
8996 8949 for (p = ifname + l; p != ifname; l--) {
8997 8950 c = *--p;
8998 8951 if (!isdigit(c)) {
8999 8952 (void) strlcpy(alnum, ifname, l + 1);
9000 8953 if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
9001 8954 return (DDI_FAILURE);
9002 8955 break;
9003 8956 }
9004 8957 nonum = B_FALSE;
9005 8958 }
9006 8959 if (l == 0 || nonum)
9007 8960 return (DDI_FAILURE);
9008 8961
9009 8962 *nump = num;
9010 8963 return (DDI_SUCCESS);
9011 8964 }
9012 8965
9013 8966 /*
9014 8967 * Default initialization function for drivers that don't need to quiesce.
9015 8968 */
9016 8969 /* ARGSUSED */
9017 8970 int
9018 8971 ddi_quiesce_not_needed(dev_info_t *dip)
9019 8972 {
9020 8973 return (DDI_SUCCESS);
9021 8974 }
9022 8975
9023 8976 /*
9024 8977 * Initialization function for drivers that should implement quiesce()
9025 8978 * but haven't yet.
9026 8979 */
9027 8980 /* ARGSUSED */
9028 8981 int
9029 8982 ddi_quiesce_not_supported(dev_info_t *dip)
9030 8983 {
9031 8984 return (DDI_FAILURE);
9032 8985 }
9033 8986
9034 8987 char *
9035 8988 ddi_strdup(const char *str, int flag)
9036 8989 {
9037 8990 int n;
9038 8991 char *ptr;
9039 8992
9040 8993 ASSERT(str != NULL);
9041 8994 ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9042 8995
9043 8996 n = strlen(str);
9044 8997 if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9045 8998 return (NULL);
9046 8999 bcopy(str, ptr, n + 1);
9047 9000 return (ptr);
9048 9001 }
9049 9002
9050 9003 char *
9051 9004 strdup(const char *str)
9052 9005 {
9053 9006 return (ddi_strdup(str, KM_SLEEP));
9054 9007 }
9055 9008
9056 9009 void
9057 9010 strfree(char *str)
9058 9011 {
9059 9012 ASSERT(str != NULL);
9060 9013 kmem_free(str, strlen(str) + 1);
9061 9014 }
9062 9015
9063 9016 /*
9064 9017 * Generic DDI callback interfaces.
9065 9018 */
9066 9019
9067 9020 int
9068 9021 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9069 9022 void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9070 9023 {
9071 9024 ddi_cb_t *cbp;
9072 9025
9073 9026 ASSERT(dip != NULL);
9074 9027 ASSERT(DDI_CB_FLAG_VALID(flags));
9075 9028 ASSERT(cbfunc != NULL);
9076 9029 ASSERT(ret_hdlp != NULL);
9077 9030
9078 9031 /* Sanity check the context */
9079 9032 ASSERT(!servicing_interrupt());
9080 9033 if (servicing_interrupt())
9081 9034 return (DDI_FAILURE);
9082 9035
9083 9036 /* Validate parameters */
9084 9037 if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9085 9038 (cbfunc == NULL) || (ret_hdlp == NULL))
9086 9039 return (DDI_EINVAL);
9087 9040
9088 9041 /* Check for previous registration */
9089 9042 if (DEVI(dip)->devi_cb_p != NULL)
9090 9043 return (DDI_EALREADY);
9091 9044
9092 9045 /* Allocate and initialize callback */
9093 9046 cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9094 9047 cbp->cb_dip = dip;
9095 9048 cbp->cb_func = cbfunc;
9096 9049 cbp->cb_arg1 = arg1;
9097 9050 cbp->cb_arg2 = arg2;
9098 9051 cbp->cb_flags = flags;
9099 9052 DEVI(dip)->devi_cb_p = cbp;
9100 9053
9101 9054 /* If adding an IRM callback, notify IRM */
9102 9055 if (flags & DDI_CB_FLAG_INTR)
9103 9056 i_ddi_irm_set_cb(dip, B_TRUE);
9104 9057
9105 9058 *ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9106 9059 return (DDI_SUCCESS);
9107 9060 }
9108 9061
9109 9062 int
9110 9063 ddi_cb_unregister(ddi_cb_handle_t hdl)
9111 9064 {
9112 9065 ddi_cb_t *cbp;
9113 9066 dev_info_t *dip;
9114 9067
9115 9068 ASSERT(hdl != NULL);
9116 9069
9117 9070 /* Sanity check the context */
9118 9071 ASSERT(!servicing_interrupt());
9119 9072 if (servicing_interrupt())
9120 9073 return (DDI_FAILURE);
9121 9074
9122 9075 /* Validate parameters */
9123 9076 if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9124 9077 ((dip = cbp->cb_dip) == NULL))
9125 9078 return (DDI_EINVAL);
9126 9079
9127 9080 /* If removing an IRM callback, notify IRM */
9128 9081 if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9129 9082 i_ddi_irm_set_cb(dip, B_FALSE);
9130 9083
9131 9084 /* Destroy the callback */
9132 9085 kmem_free(cbp, sizeof (ddi_cb_t));
9133 9086 DEVI(dip)->devi_cb_p = NULL;
9134 9087
9135 9088 return (DDI_SUCCESS);
9136 9089 }
9137 9090
9138 9091 /*
9139 9092 * Platform independent DR routines
9140 9093 */
9141 9094
9142 9095 static int
9143 9096 ndi2errno(int n)
9144 9097 {
9145 9098 int err = 0;
9146 9099
9147 9100 switch (n) {
9148 9101 case NDI_NOMEM:
9149 9102 err = ENOMEM;
9150 9103 break;
9151 9104 case NDI_BUSY:
9152 9105 err = EBUSY;
9153 9106 break;
9154 9107 case NDI_FAULT:
9155 9108 err = EFAULT;
9156 9109 break;
9157 9110 case NDI_FAILURE:
9158 9111 err = EIO;
9159 9112 break;
9160 9113 case NDI_SUCCESS:
9161 9114 break;
9162 9115 case NDI_BADHANDLE:
9163 9116 default:
9164 9117 err = EINVAL;
9165 9118 break;
9166 9119 }
9167 9120 return (err);
9168 9121 }
9169 9122
9170 9123 /*
9171 9124 * Prom tree node list
9172 9125 */
9173 9126 struct ptnode {
9174 9127 pnode_t nodeid;
9175 9128 struct ptnode *next;
9176 9129 };
9177 9130
9178 9131 /*
9179 9132 * Prom tree walk arg
9180 9133 */
9181 9134 struct pta {
9182 9135 dev_info_t *pdip;
9183 9136 devi_branch_t *bp;
9184 9137 uint_t flags;
9185 9138 dev_info_t *fdip;
9186 9139 struct ptnode *head;
9187 9140 };
9188 9141
9189 9142 static void
9190 9143 visit_node(pnode_t nodeid, struct pta *ap)
9191 9144 {
9192 9145 struct ptnode **nextp;
9193 9146 int (*select)(pnode_t, void *, uint_t);
9194 9147
9195 9148 ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9196 9149
9197 9150 select = ap->bp->create.prom_branch_select;
9198 9151
9199 9152 ASSERT(select);
9200 9153
9201 9154 if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9202 9155
9203 9156 for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9204 9157 ;
9205 9158
9206 9159 *nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9207 9160
9208 9161 (*nextp)->nodeid = nodeid;
9209 9162 }
9210 9163
9211 9164 if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9212 9165 return;
9213 9166
9214 9167 nodeid = prom_childnode(nodeid);
9215 9168 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9216 9169 visit_node(nodeid, ap);
9217 9170 nodeid = prom_nextnode(nodeid);
9218 9171 }
9219 9172 }
9220 9173
9221 9174 /*
9222 9175 * NOTE: The caller of this function must check for device contracts
9223 9176 * or LDI callbacks against this dip before setting the dip offline.
9224 9177 */
9225 9178 static int
9226 9179 set_infant_dip_offline(dev_info_t *dip, void *arg)
9227 9180 {
9228 9181 char *path = (char *)arg;
9229 9182
9230 9183 ASSERT(dip);
9231 9184 ASSERT(arg);
9232 9185
9233 9186 if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9234 9187 (void) ddi_pathname(dip, path);
9235 9188 cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9236 9189 "node: %s", path);
9237 9190 return (DDI_FAILURE);
9238 9191 }
9239 9192
9240 9193 mutex_enter(&(DEVI(dip)->devi_lock));
9241 9194 if (!DEVI_IS_DEVICE_OFFLINE(dip))
9242 9195 DEVI_SET_DEVICE_OFFLINE(dip);
9243 9196 mutex_exit(&(DEVI(dip)->devi_lock));
9244 9197
9245 9198 return (DDI_SUCCESS);
9246 9199 }
9247 9200
9248 9201 typedef struct result {
9249 9202 char *path;
9250 9203 int result;
9251 9204 } result_t;
9252 9205
9253 9206 static int
9254 9207 dip_set_offline(dev_info_t *dip, void *arg)
9255 9208 {
9256 9209 int end;
9257 9210 result_t *resp = (result_t *)arg;
9258 9211
9259 9212 ASSERT(dip);
9260 9213 ASSERT(resp);
9261 9214
9262 9215 /*
9263 9216 * We stop the walk if e_ddi_offline_notify() returns
9264 9217 * failure, because this implies that one or more consumers
9265 9218 * (either LDI or contract based) has blocked the offline.
9266 9219 * So there is no point in conitnuing the walk
9267 9220 */
9268 9221 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9269 9222 resp->result = DDI_FAILURE;
9270 9223 return (DDI_WALK_TERMINATE);
9271 9224 }
9272 9225
9273 9226 /*
9274 9227 * If set_infant_dip_offline() returns failure, it implies
9275 9228 * that we failed to set a particular dip offline. This
9276 9229 * does not imply that the offline as a whole should fail.
9277 9230 * We want to do the best we can, so we continue the walk.
9278 9231 */
9279 9232 if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9280 9233 end = DDI_SUCCESS;
9281 9234 else
9282 9235 end = DDI_FAILURE;
9283 9236
9284 9237 e_ddi_offline_finalize(dip, end);
9285 9238
9286 9239 return (DDI_WALK_CONTINUE);
9287 9240 }
9288 9241
9289 9242 /*
9290 9243 * The call to e_ddi_offline_notify() exists for the
9291 9244 * unlikely error case that a branch we are trying to
9292 9245 * create already exists and has device contracts or LDI
9293 9246 * event callbacks against it.
9294 9247 *
9295 9248 * We allow create to succeed for such branches only if
9296 9249 * no constraints block the offline.
9297 9250 */
9298 9251 static int
9299 9252 branch_set_offline(dev_info_t *dip, char *path)
9300 9253 {
9301 9254 int circ;
9302 9255 int end;
9303 9256 result_t res;
9304 9257
9305 9258
9306 9259 if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9307 9260 return (DDI_FAILURE);
9308 9261 }
9309 9262
9310 9263 if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9311 9264 end = DDI_SUCCESS;
9312 9265 else
9313 9266 end = DDI_FAILURE;
9314 9267
9315 9268 e_ddi_offline_finalize(dip, end);
9316 9269
9317 9270 if (end == DDI_FAILURE)
9318 9271 return (DDI_FAILURE);
9319 9272
9320 9273 res.result = DDI_SUCCESS;
9321 9274 res.path = path;
9322 9275
9323 9276 ndi_devi_enter(dip, &circ);
9324 9277 ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9325 9278 ndi_devi_exit(dip, circ);
9326 9279
9327 9280 return (res.result);
9328 9281 }
9329 9282
9330 9283 /*ARGSUSED*/
9331 9284 static int
9332 9285 create_prom_branch(void *arg, int has_changed)
9333 9286 {
9334 9287 int circ;
9335 9288 int exists, rv;
9336 9289 pnode_t nodeid;
9337 9290 struct ptnode *tnp;
9338 9291 dev_info_t *dip;
9339 9292 struct pta *ap = arg;
9340 9293 devi_branch_t *bp;
9341 9294 char *path;
9342 9295
9343 9296 ASSERT(ap);
9344 9297 ASSERT(ap->fdip == NULL);
9345 9298 ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9346 9299
9347 9300 bp = ap->bp;
9348 9301
9349 9302 nodeid = ddi_get_nodeid(ap->pdip);
9350 9303 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9351 9304 cmn_err(CE_WARN, "create_prom_branch: invalid "
9352 9305 "nodeid: 0x%x", nodeid);
9353 9306 return (EINVAL);
9354 9307 }
9355 9308
9356 9309 ap->head = NULL;
9357 9310
9358 9311 nodeid = prom_childnode(nodeid);
9359 9312 while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9360 9313 visit_node(nodeid, ap);
9361 9314 nodeid = prom_nextnode(nodeid);
9362 9315 }
9363 9316
9364 9317 if (ap->head == NULL)
9365 9318 return (ENODEV);
9366 9319
9367 9320 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9368 9321 rv = 0;
9369 9322 while ((tnp = ap->head) != NULL) {
9370 9323 ap->head = tnp->next;
9371 9324
9372 9325 ndi_devi_enter(ap->pdip, &circ);
9373 9326
9374 9327 /*
9375 9328 * Check if the branch already exists.
9376 9329 */
9377 9330 exists = 0;
9378 9331 dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9379 9332 if (dip != NULL) {
9380 9333 exists = 1;
9381 9334
9382 9335 /* Parent is held busy, so release hold */
9383 9336 ndi_rele_devi(dip);
9384 9337 #ifdef DEBUG
9385 9338 cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9386 9339 " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9387 9340 #endif
9388 9341 } else {
9389 9342 dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9390 9343 }
9391 9344
9392 9345 kmem_free(tnp, sizeof (struct ptnode));
9393 9346
9394 9347 /*
9395 9348 * Hold the branch if it is not already held
9396 9349 */
9397 9350 if (dip && !exists) {
9398 9351 e_ddi_branch_hold(dip);
9399 9352 }
9400 9353
9401 9354 ASSERT(dip == NULL || e_ddi_branch_held(dip));
9402 9355
9403 9356 /*
9404 9357 * Set all dips in the newly created branch offline so that
9405 9358 * only a "configure" operation can attach
9406 9359 * the branch
9407 9360 */
9408 9361 if (dip == NULL || branch_set_offline(dip, path)
9409 9362 == DDI_FAILURE) {
9410 9363 ndi_devi_exit(ap->pdip, circ);
9411 9364 rv = EIO;
9412 9365 continue;
9413 9366 }
9414 9367
9415 9368 ASSERT(ddi_get_parent(dip) == ap->pdip);
9416 9369
9417 9370 ndi_devi_exit(ap->pdip, circ);
9418 9371
9419 9372 if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9420 9373 int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9421 9374 if (error && rv == 0)
9422 9375 rv = error;
9423 9376 }
9424 9377
9425 9378 /*
9426 9379 * Invoke devi_branch_callback() (if it exists) only for
9427 9380 * newly created branches
9428 9381 */
9429 9382 if (bp->devi_branch_callback && !exists)
9430 9383 bp->devi_branch_callback(dip, bp->arg, 0);
9431 9384 }
9432 9385
9433 9386 kmem_free(path, MAXPATHLEN);
9434 9387
9435 9388 return (rv);
9436 9389 }
9437 9390
9438 9391 static int
9439 9392 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9440 9393 {
9441 9394 int rv, circ, len;
9442 9395 int i, flags, ret;
9443 9396 dev_info_t *dip;
9444 9397 char *nbuf;
9445 9398 char *path;
9446 9399 static const char *noname = "<none>";
9447 9400
9448 9401 ASSERT(pdip);
9449 9402 ASSERT(DEVI_BUSY_OWNED(pdip));
9450 9403
9451 9404 flags = 0;
9452 9405
9453 9406 /*
9454 9407 * Creating the root of a branch ?
9455 9408 */
9456 9409 if (rdipp) {
9457 9410 *rdipp = NULL;
9458 9411 flags = DEVI_BRANCH_ROOT;
9459 9412 }
9460 9413
9461 9414 ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9462 9415 rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9463 9416
9464 9417 nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9465 9418
9466 9419 if (rv == DDI_WALK_ERROR) {
9467 9420 cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9468 9421 " properties on devinfo node %p", (void *)dip);
9469 9422 goto fail;
9470 9423 }
9471 9424
9472 9425 len = OBP_MAXDRVNAME;
9473 9426 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9474 9427 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9475 9428 != DDI_PROP_SUCCESS) {
9476 9429 cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9477 9430 "no name property", (void *)dip);
9478 9431 goto fail;
9479 9432 }
9480 9433
9481 9434 ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9482 9435 if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9483 9436 cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9484 9437 " for devinfo node %p", nbuf, (void *)dip);
9485 9438 goto fail;
9486 9439 }
9487 9440
9488 9441 kmem_free(nbuf, OBP_MAXDRVNAME);
9489 9442
9490 9443 /*
9491 9444 * Ignore bind failures just like boot does
9492 9445 */
9493 9446 (void) ndi_devi_bind_driver(dip, 0);
9494 9447
9495 9448 switch (rv) {
9496 9449 case DDI_WALK_CONTINUE:
9497 9450 case DDI_WALK_PRUNESIB:
9498 9451 ndi_devi_enter(dip, &circ);
9499 9452
9500 9453 i = DDI_WALK_CONTINUE;
9501 9454 for (; i == DDI_WALK_CONTINUE; ) {
9502 9455 i = sid_node_create(dip, bp, NULL);
9503 9456 }
9504 9457
9505 9458 ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9506 9459 if (i == DDI_WALK_ERROR)
9507 9460 rv = i;
9508 9461 /*
9509 9462 * If PRUNESIB stop creating siblings
9510 9463 * of dip's child. Subsequent walk behavior
9511 9464 * is determined by rv returned by dip.
9512 9465 */
9513 9466
9514 9467 ndi_devi_exit(dip, circ);
9515 9468 break;
9516 9469 case DDI_WALK_TERMINATE:
9517 9470 /*
9518 9471 * Don't create children and ask our parent
9519 9472 * to not create siblings either.
9520 9473 */
9521 9474 rv = DDI_WALK_PRUNESIB;
9522 9475 break;
9523 9476 case DDI_WALK_PRUNECHILD:
9524 9477 /*
9525 9478 * Don't create children, but ask parent to continue
9526 9479 * with siblings.
9527 9480 */
9528 9481 rv = DDI_WALK_CONTINUE;
9529 9482 break;
9530 9483 default:
9531 9484 ASSERT(0);
9532 9485 break;
9533 9486 }
9534 9487
9535 9488 if (rdipp)
9536 9489 *rdipp = dip;
9537 9490
9538 9491 /*
9539 9492 * Set device offline - only the "configure" op should cause an attach.
9540 9493 * Note that it is safe to set the dip offline without checking
9541 9494 * for either device contract or layered driver (LDI) based constraints
9542 9495 * since there cannot be any contracts or LDI opens of this device.
9543 9496 * This is because this node is a newly created dip with the parent busy
9544 9497 * held, so no other thread can come in and attach this dip. A dip that
9545 9498 * has never been attached cannot have contracts since by definition
9546 9499 * a device contract (an agreement between a process and a device minor
9547 9500 * node) can only be created against a device that has minor nodes
9548 9501 * i.e is attached. Similarly an LDI open will only succeed if the
9549 9502 * dip is attached. We assert below that the dip is not attached.
9550 9503 */
9551 9504 ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9552 9505 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9553 9506 ret = set_infant_dip_offline(dip, path);
9554 9507 ASSERT(ret == DDI_SUCCESS);
9555 9508 kmem_free(path, MAXPATHLEN);
9556 9509
9557 9510 return (rv);
9558 9511 fail:
9559 9512 (void) ndi_devi_free(dip);
9560 9513 kmem_free(nbuf, OBP_MAXDRVNAME);
9561 9514 return (DDI_WALK_ERROR);
9562 9515 }
9563 9516
9564 9517 static int
9565 9518 create_sid_branch(
9566 9519 dev_info_t *pdip,
9567 9520 devi_branch_t *bp,
9568 9521 dev_info_t **dipp,
9569 9522 uint_t flags)
9570 9523 {
9571 9524 int rv = 0, state = DDI_WALK_CONTINUE;
9572 9525 dev_info_t *rdip;
9573 9526
9574 9527 while (state == DDI_WALK_CONTINUE) {
9575 9528 int circ;
9576 9529
9577 9530 ndi_devi_enter(pdip, &circ);
9578 9531
9579 9532 state = sid_node_create(pdip, bp, &rdip);
9580 9533 if (rdip == NULL) {
9581 9534 ndi_devi_exit(pdip, circ);
9582 9535 ASSERT(state == DDI_WALK_ERROR);
9583 9536 break;
9584 9537 }
9585 9538
9586 9539 e_ddi_branch_hold(rdip);
9587 9540
9588 9541 ndi_devi_exit(pdip, circ);
9589 9542
9590 9543 if (flags & DEVI_BRANCH_CONFIGURE) {
9591 9544 int error = e_ddi_branch_configure(rdip, dipp, 0);
9592 9545 if (error && rv == 0)
9593 9546 rv = error;
9594 9547 }
9595 9548
9596 9549 /*
9597 9550 * devi_branch_callback() is optional
9598 9551 */
9599 9552 if (bp->devi_branch_callback)
9600 9553 bp->devi_branch_callback(rdip, bp->arg, 0);
9601 9554 }
9602 9555
9603 9556 ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9604 9557
9605 9558 return (state == DDI_WALK_ERROR ? EIO : rv);
9606 9559 }
9607 9560
9608 9561 int
9609 9562 e_ddi_branch_create(
9610 9563 dev_info_t *pdip,
9611 9564 devi_branch_t *bp,
9612 9565 dev_info_t **dipp,
9613 9566 uint_t flags)
9614 9567 {
9615 9568 int prom_devi, sid_devi, error;
9616 9569
9617 9570 if (pdip == NULL || bp == NULL || bp->type == 0)
9618 9571 return (EINVAL);
9619 9572
9620 9573 prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9621 9574 sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9622 9575
9623 9576 if (prom_devi && bp->create.prom_branch_select == NULL)
9624 9577 return (EINVAL);
9625 9578 else if (sid_devi && bp->create.sid_branch_create == NULL)
9626 9579 return (EINVAL);
9627 9580 else if (!prom_devi && !sid_devi)
9628 9581 return (EINVAL);
9629 9582
9630 9583 if (flags & DEVI_BRANCH_EVENT)
9631 9584 return (EINVAL);
9632 9585
9633 9586 if (prom_devi) {
9634 9587 struct pta pta = {0};
9635 9588
9636 9589 pta.pdip = pdip;
9637 9590 pta.bp = bp;
9638 9591 pta.flags = flags;
9639 9592
9640 9593 error = prom_tree_access(create_prom_branch, &pta, NULL);
9641 9594
9642 9595 if (dipp)
9643 9596 *dipp = pta.fdip;
9644 9597 else if (pta.fdip)
9645 9598 ndi_rele_devi(pta.fdip);
9646 9599 } else {
9647 9600 error = create_sid_branch(pdip, bp, dipp, flags);
9648 9601 }
9649 9602
9650 9603 return (error);
9651 9604 }
9652 9605
9653 9606 int
9654 9607 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9655 9608 {
9656 9609 int rv;
9657 9610 char *devnm;
9658 9611 dev_info_t *pdip;
9659 9612
9660 9613 if (dipp)
9661 9614 *dipp = NULL;
9662 9615
9663 9616 if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9664 9617 return (EINVAL);
9665 9618
9666 9619 pdip = ddi_get_parent(rdip);
9667 9620
9668 9621 ndi_hold_devi(pdip);
9669 9622
9670 9623 if (!e_ddi_branch_held(rdip)) {
9671 9624 ndi_rele_devi(pdip);
9672 9625 cmn_err(CE_WARN, "e_ddi_branch_configure: "
9673 9626 "dip(%p) not held", (void *)rdip);
9674 9627 return (EINVAL);
9675 9628 }
9676 9629
9677 9630 if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9678 9631 /*
9679 9632 * First attempt to bind a driver. If we fail, return
9680 9633 * success (On some platforms, dips for some device
9681 9634 * types (CPUs) may not have a driver)
9682 9635 */
9683 9636 if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9684 9637 ndi_rele_devi(pdip);
9685 9638 return (0);
9686 9639 }
9687 9640
9688 9641 if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9689 9642 rv = NDI_FAILURE;
9690 9643 goto out;
9691 9644 }
9692 9645 }
9693 9646
9694 9647 ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9695 9648
9696 9649 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9697 9650
9698 9651 (void) ddi_deviname(rdip, devnm);
9699 9652
9700 9653 if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9701 9654 NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9702 9655 /* release hold from ndi_devi_config_one() */
9703 9656 ndi_rele_devi(rdip);
9704 9657 }
9705 9658
9706 9659 kmem_free(devnm, MAXNAMELEN + 1);
9707 9660 out:
9708 9661 if (rv != NDI_SUCCESS && dipp && rdip) {
9709 9662 ndi_hold_devi(rdip);
9710 9663 *dipp = rdip;
9711 9664 }
9712 9665 ndi_rele_devi(pdip);
9713 9666 return (ndi2errno(rv));
9714 9667 }
9715 9668
9716 9669 void
9717 9670 e_ddi_branch_hold(dev_info_t *rdip)
9718 9671 {
9719 9672 if (e_ddi_branch_held(rdip)) {
9720 9673 cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9721 9674 return;
9722 9675 }
9723 9676
9724 9677 mutex_enter(&DEVI(rdip)->devi_lock);
9725 9678 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9726 9679 DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9727 9680 DEVI(rdip)->devi_ref++;
9728 9681 }
9729 9682 ASSERT(DEVI(rdip)->devi_ref > 0);
9730 9683 mutex_exit(&DEVI(rdip)->devi_lock);
9731 9684 }
9732 9685
9733 9686 int
9734 9687 e_ddi_branch_held(dev_info_t *rdip)
9735 9688 {
9736 9689 int rv = 0;
9737 9690
9738 9691 mutex_enter(&DEVI(rdip)->devi_lock);
9739 9692 if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9740 9693 DEVI(rdip)->devi_ref > 0) {
9741 9694 rv = 1;
9742 9695 }
9743 9696 mutex_exit(&DEVI(rdip)->devi_lock);
9744 9697
9745 9698 return (rv);
9746 9699 }
9747 9700
9748 9701 void
9749 9702 e_ddi_branch_rele(dev_info_t *rdip)
9750 9703 {
9751 9704 mutex_enter(&DEVI(rdip)->devi_lock);
9752 9705 DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9753 9706 DEVI(rdip)->devi_ref--;
9754 9707 mutex_exit(&DEVI(rdip)->devi_lock);
9755 9708 }
9756 9709
9757 9710 int
9758 9711 e_ddi_branch_unconfigure(
9759 9712 dev_info_t *rdip,
9760 9713 dev_info_t **dipp,
9761 9714 uint_t flags)
9762 9715 {
9763 9716 int circ, rv;
9764 9717 int destroy;
9765 9718 char *devnm;
9766 9719 uint_t nflags;
9767 9720 dev_info_t *pdip;
9768 9721
9769 9722 if (dipp)
9770 9723 *dipp = NULL;
9771 9724
9772 9725 if (rdip == NULL)
9773 9726 return (EINVAL);
9774 9727
9775 9728 pdip = ddi_get_parent(rdip);
9776 9729
9777 9730 ASSERT(pdip);
9778 9731
9779 9732 /*
9780 9733 * Check if caller holds pdip busy - can cause deadlocks during
9781 9734 * devfs_clean()
9782 9735 */
9783 9736 if (DEVI_BUSY_OWNED(pdip)) {
9784 9737 cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9785 9738 " devinfo node(%p) is busy held", (void *)pdip);
9786 9739 return (EINVAL);
9787 9740 }
9788 9741
9789 9742 destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9790 9743
9791 9744 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9792 9745
9793 9746 ndi_devi_enter(pdip, &circ);
9794 9747 (void) ddi_deviname(rdip, devnm);
9795 9748 ndi_devi_exit(pdip, circ);
9796 9749
9797 9750 /*
9798 9751 * ddi_deviname() returns a component name with / prepended.
9799 9752 */
9800 9753 (void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9801 9754
9802 9755 ndi_devi_enter(pdip, &circ);
9803 9756
9804 9757 /*
9805 9758 * Recreate device name as it may have changed state (init/uninit)
9806 9759 * when parent busy lock was dropped for devfs_clean()
9807 9760 */
9808 9761 (void) ddi_deviname(rdip, devnm);
9809 9762
9810 9763 if (!e_ddi_branch_held(rdip)) {
9811 9764 kmem_free(devnm, MAXNAMELEN + 1);
9812 9765 ndi_devi_exit(pdip, circ);
9813 9766 cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9814 9767 destroy ? "destroy" : "unconfigure", (void *)rdip);
9815 9768 return (EINVAL);
9816 9769 }
9817 9770
9818 9771 /*
9819 9772 * Release hold on the branch. This is ok since we are holding the
9820 9773 * parent busy. If rdip is not removed, we must do a hold on the
9821 9774 * branch before returning.
9822 9775 */
9823 9776 e_ddi_branch_rele(rdip);
9824 9777
9825 9778 nflags = NDI_DEVI_OFFLINE;
9826 9779 if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9827 9780 nflags |= NDI_DEVI_REMOVE;
9828 9781 destroy = 1;
9829 9782 } else {
9830 9783 nflags |= NDI_UNCONFIG; /* uninit but don't remove */
9831 9784 }
9832 9785
9833 9786 if (flags & DEVI_BRANCH_EVENT)
9834 9787 nflags |= NDI_POST_EVENT;
9835 9788
9836 9789 if (i_ddi_devi_attached(pdip) &&
9837 9790 (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9838 9791 rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9839 9792 } else {
9840 9793 rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9841 9794 if (rv == NDI_SUCCESS) {
9842 9795 ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9843 9796 rv = ndi_devi_offline(rdip, nflags);
9844 9797 }
9845 9798 }
9846 9799
9847 9800 if (!destroy || rv != NDI_SUCCESS) {
9848 9801 /* The dip still exists, so do a hold */
9849 9802 e_ddi_branch_hold(rdip);
9850 9803 }
9851 9804 out:
9852 9805 kmem_free(devnm, MAXNAMELEN + 1);
9853 9806 ndi_devi_exit(pdip, circ);
9854 9807 return (ndi2errno(rv));
9855 9808 }
9856 9809
9857 9810 int
9858 9811 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9859 9812 {
9860 9813 return (e_ddi_branch_unconfigure(rdip, dipp,
9861 9814 flag|DEVI_BRANCH_DESTROY));
9862 9815 }
9863 9816
9864 9817 /*
9865 9818 * Number of chains for hash table
9866 9819 */
9867 9820 #define NUMCHAINS 17
9868 9821
9869 9822 /*
9870 9823 * Devinfo busy arg
9871 9824 */
9872 9825 struct devi_busy {
9873 9826 int dv_total;
9874 9827 int s_total;
9875 9828 mod_hash_t *dv_hash;
9876 9829 mod_hash_t *s_hash;
9877 9830 int (*callback)(dev_info_t *, void *, uint_t);
9878 9831 void *arg;
9879 9832 };
9880 9833
9881 9834 static int
9882 9835 visit_dip(dev_info_t *dip, void *arg)
9883 9836 {
9884 9837 uintptr_t sbusy, dvbusy, ref;
9885 9838 struct devi_busy *bsp = arg;
9886 9839
9887 9840 ASSERT(bsp->callback);
9888 9841
9889 9842 /*
9890 9843 * A dip cannot be busy if its reference count is 0
9891 9844 */
9892 9845 if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9893 9846 return (bsp->callback(dip, bsp->arg, 0));
9894 9847 }
9895 9848
9896 9849 if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9897 9850 dvbusy = 0;
9898 9851
9899 9852 /*
9900 9853 * To catch device opens currently maintained on specfs common snodes.
9901 9854 */
9902 9855 if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9903 9856 sbusy = 0;
9904 9857
9905 9858 #ifdef DEBUG
9906 9859 if (ref < sbusy || ref < dvbusy) {
9907 9860 cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9908 9861 "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9909 9862 }
9910 9863 #endif
9911 9864
9912 9865 dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9913 9866
9914 9867 return (bsp->callback(dip, bsp->arg, dvbusy));
9915 9868 }
9916 9869
9917 9870 static int
9918 9871 visit_snode(struct snode *sp, void *arg)
9919 9872 {
9920 9873 uintptr_t sbusy;
9921 9874 dev_info_t *dip;
9922 9875 int count;
9923 9876 struct devi_busy *bsp = arg;
9924 9877
9925 9878 ASSERT(sp);
9926 9879
9927 9880 /*
9928 9881 * The stable lock is held. This prevents
9929 9882 * the snode and its associated dip from
9930 9883 * going away.
9931 9884 */
9932 9885 dip = NULL;
9933 9886 count = spec_devi_open_count(sp, &dip);
9934 9887
9935 9888 if (count <= 0)
9936 9889 return (DDI_WALK_CONTINUE);
9937 9890
9938 9891 ASSERT(dip);
9939 9892
9940 9893 if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9941 9894 sbusy = count;
9942 9895 else
9943 9896 sbusy += count;
9944 9897
9945 9898 if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9946 9899 cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9947 9900 "sbusy = %lu", "e_ddi_branch_referenced",
9948 9901 (void *)dip, sbusy);
9949 9902 }
9950 9903
9951 9904 bsp->s_total += count;
9952 9905
9953 9906 return (DDI_WALK_CONTINUE);
9954 9907 }
9955 9908
9956 9909 static void
9957 9910 visit_dvnode(struct dv_node *dv, void *arg)
9958 9911 {
9959 9912 uintptr_t dvbusy;
9960 9913 uint_t count;
9961 9914 struct vnode *vp;
9962 9915 struct devi_busy *bsp = arg;
9963 9916
9964 9917 ASSERT(dv && dv->dv_devi);
9965 9918
9966 9919 vp = DVTOV(dv);
9967 9920
9968 9921 mutex_enter(&vp->v_lock);
9969 9922 count = vp->v_count;
9970 9923 mutex_exit(&vp->v_lock);
9971 9924
9972 9925 if (!count)
9973 9926 return;
9974 9927
9975 9928 if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9976 9929 (mod_hash_val_t *)&dvbusy))
9977 9930 dvbusy = count;
9978 9931 else
9979 9932 dvbusy += count;
9980 9933
9981 9934 if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9982 9935 (mod_hash_val_t)dvbusy)) {
9983 9936 cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9984 9937 "dvbusy=%lu", "e_ddi_branch_referenced",
9985 9938 (void *)dv->dv_devi, dvbusy);
9986 9939 }
9987 9940
9988 9941 bsp->dv_total += count;
9989 9942 }
9990 9943
9991 9944 /*
9992 9945 * Returns reference count on success or -1 on failure.
9993 9946 */
9994 9947 int
9995 9948 e_ddi_branch_referenced(
9996 9949 dev_info_t *rdip,
9997 9950 int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
9998 9951 void *arg)
9999 9952 {
10000 9953 int circ;
10001 9954 char *path;
10002 9955 dev_info_t *pdip;
10003 9956 struct devi_busy bsa = {0};
10004 9957
10005 9958 ASSERT(rdip);
10006 9959
10007 9960 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
10008 9961
10009 9962 ndi_hold_devi(rdip);
10010 9963
10011 9964 pdip = ddi_get_parent(rdip);
10012 9965
10013 9966 ASSERT(pdip);
10014 9967
10015 9968 /*
10016 9969 * Check if caller holds pdip busy - can cause deadlocks during
10017 9970 * devfs_walk()
10018 9971 */
10019 9972 if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
10020 9973 cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
10021 9974 "devinfo branch(%p) not held or parent busy held",
10022 9975 (void *)rdip);
10023 9976 ndi_rele_devi(rdip);
10024 9977 kmem_free(path, MAXPATHLEN);
10025 9978 return (-1);
10026 9979 }
10027 9980
10028 9981 ndi_devi_enter(pdip, &circ);
10029 9982 (void) ddi_pathname(rdip, path);
10030 9983 ndi_devi_exit(pdip, circ);
10031 9984
10032 9985 bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10033 9986 mod_hash_null_valdtor, sizeof (struct dev_info));
10034 9987
10035 9988 bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10036 9989 mod_hash_null_valdtor, sizeof (struct snode));
10037 9990
10038 9991 if (devfs_walk(path, visit_dvnode, &bsa)) {
10039 9992 cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10040 9993 "devfs walk failed for: %s", path);
10041 9994 kmem_free(path, MAXPATHLEN);
10042 9995 bsa.s_total = bsa.dv_total = -1;
10043 9996 goto out;
10044 9997 }
10045 9998
10046 9999 kmem_free(path, MAXPATHLEN);
10047 10000
10048 10001 /*
10049 10002 * Walk the snode table to detect device opens, which are currently
10050 10003 * maintained on specfs common snodes.
10051 10004 */
10052 10005 spec_snode_walk(visit_snode, &bsa);
10053 10006
10054 10007 if (callback == NULL)
10055 10008 goto out;
10056 10009
10057 10010 bsa.callback = callback;
10058 10011 bsa.arg = arg;
10059 10012
10060 10013 if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10061 10014 ndi_devi_enter(rdip, &circ);
10062 10015 ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10063 10016 ndi_devi_exit(rdip, circ);
10064 10017 }
10065 10018
10066 10019 out:
10067 10020 ndi_rele_devi(rdip);
10068 10021 mod_hash_destroy_ptrhash(bsa.s_hash);
10069 10022 mod_hash_destroy_ptrhash(bsa.dv_hash);
10070 10023 return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10071 10024 }
↓ open down ↓ |
9108 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX