1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Layered driver support.
27 */
28
29 #include <sys/atomic.h>
30 #include <sys/types.h>
31 #include <sys/t_lock.h>
32 #include <sys/param.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/buf.h>
37 #include <sys/cred.h>
38 #include <sys/uio.h>
39 #include <sys/vnode.h>
40 #include <sys/fs/snode.h>
41 #include <sys/open.h>
42 #include <sys/kmem.h>
43 #include <sys/file.h>
44 #include <sys/bootconf.h>
45 #include <sys/pathname.h>
46 #include <sys/bitmap.h>
47 #include <sys/stat.h>
48 #include <sys/dditypes.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/sunndi.h>
53 #include <sys/esunddi.h>
54 #include <sys/autoconf.h>
55 #include <sys/sunldi.h>
56 #include <sys/sunldi_impl.h>
57 #include <sys/errno.h>
58 #include <sys/debug.h>
59 #include <sys/modctl.h>
60 #include <sys/var.h>
61 #include <vm/seg_vn.h>
62
63 #include <sys/stropts.h>
64 #include <sys/strsubr.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/kstr.h>
68
69 /*
70 * Device contract related
71 */
72 #include <sys/contract_impl.h>
73 #include <sys/contract/device_impl.h>
74
75 /*
76 * Define macros to manipulate snode, vnode, and open device flags
77 */
78 #define VTYP_VALID(i) (((i) == VCHR) || ((i) == VBLK))
79 #define VTYP_TO_OTYP(i) (((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
80 #define VTYP_TO_STYP(i) (((i) == VCHR) ? S_IFCHR : S_IFBLK)
81
82 #define OTYP_VALID(i) (((i) == OTYP_CHR) || ((i) == OTYP_BLK))
83 #define OTYP_TO_VTYP(i) (((i) == OTYP_CHR) ? VCHR : VBLK)
84 #define OTYP_TO_STYP(i) (((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
85
86 #define STYP_VALID(i) (((i) == S_IFCHR) || ((i) == S_IFBLK))
87 #define STYP_TO_VTYP(i) (((i) == S_IFCHR) ? VCHR : VBLK)
88
89 /*
90 * Define macros for accessing layered driver hash structures
91 */
92 #define LH_HASH(vp) (handle_hash_func(vp) % LH_HASH_SZ)
93 #define LI_HASH(mid, dip, dev) (ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
94
95 /*
96 * Define layered handle flags used in the lh_type field
97 */
98 #define LH_STREAM (0x1) /* handle to a streams device */
99 #define LH_CBDEV (0x2) /* handle to a char/block device */
100
101 /*
102 * Define macro for devid property lookups
103 */
104 #define DEVID_PROP_FLAGS (DDI_PROP_DONTPASS | \
105 DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
106
107 /*
108 * Dummy string for NDI events
109 */
110 #define NDI_EVENT_SERVICE "NDI_EVENT_SERVICE"
111
112 static void ldi_ev_lock(void);
113 static void ldi_ev_unlock(void);
114
115 #ifdef LDI_OBSOLETE_EVENT
116 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
117 #endif
118
119
120 /*
121 * globals
122 */
123 static kmutex_t ldi_ident_hash_lock[LI_HASH_SZ];
124 static struct ldi_ident *ldi_ident_hash[LI_HASH_SZ];
125
126 static kmutex_t ldi_handle_hash_lock[LH_HASH_SZ];
127 static struct ldi_handle *ldi_handle_hash[LH_HASH_SZ];
128 static size_t ldi_handle_hash_count;
129
130 static struct ldi_ev_callback_list ldi_ev_callback_list;
131
132 static uint32_t ldi_ev_id_pool = 0;
133
134 struct ldi_ev_cookie {
135 char *ck_evname;
136 uint_t ck_sync;
137 uint_t ck_ctype;
138 };
139
140 static struct ldi_ev_cookie ldi_ev_cookies[] = {
141 { LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
142 { LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
143 { LDI_EV_DEVICE_REMOVE, 0, 0},
144 { NULL} /* must terminate list */
145 };
146
147 static ldi_ev_callback_impl_t *walker_next = NULL;
148 static ldi_ev_callback_impl_t *walker_prev = NULL;
149
150 void
151 ldi_init(void)
152 {
153 int i;
154
155 ldi_handle_hash_count = 0;
156 for (i = 0; i < LH_HASH_SZ; i++) {
157 mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
158 ldi_handle_hash[i] = NULL;
159 }
160 for (i = 0; i < LI_HASH_SZ; i++) {
161 mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
162 ldi_ident_hash[i] = NULL;
163 }
164
165 /*
166 * Initialize the LDI event subsystem
167 */
168 mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
169 cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
170 ldi_ev_callback_list.le_busy = 0;
171 ldi_ev_callback_list.le_thread = NULL;
172 list_create(&ldi_ev_callback_list.le_head,
173 sizeof (ldi_ev_callback_impl_t),
174 offsetof(ldi_ev_callback_impl_t, lec_list));
175 }
176
177 /*
178 * LDI ident manipulation functions
179 */
180 static uint_t
181 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
182 {
183 if (dip != NULL) {
184 uintptr_t k = (uintptr_t)dip;
185 k >>= (int)highbit(sizeof (struct dev_info));
186 return ((uint_t)k);
187 } else if (dev != DDI_DEV_T_NONE) {
188 return (modid + getminor(dev) + getmajor(dev));
189 } else {
190 return (modid);
191 }
192 }
193
194 static struct ldi_ident **
195 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
196 {
197 struct ldi_ident **lipp = NULL;
198 uint_t index = LI_HASH(modid, dip, dev);
199
200 ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
201
202 for (lipp = &(ldi_ident_hash[index]);
203 (*lipp != NULL);
204 lipp = &((*lipp)->li_next)) {
205 if (((*lipp)->li_modid == modid) &&
206 ((*lipp)->li_major == major) &&
207 ((*lipp)->li_dip == dip) &&
208 ((*lipp)->li_dev == dev))
209 break;
210 }
211
212 ASSERT(lipp != NULL);
213 return (lipp);
214 }
215
216 static struct ldi_ident *
217 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
218 {
219 struct ldi_ident *lip, **lipp, *retlip;
220 modid_t modid;
221 uint_t index;
222
223 ASSERT(mod_name != NULL);
224
225 /* get the module id */
226 modid = mod_name_to_modid(mod_name);
227 ASSERT(modid != -1);
228
229 /* allocate a new ident in case we need it */
230 lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
231
232 /* search the hash for a matching ident */
233 index = LI_HASH(modid, dip, dev);
234 mutex_enter(&ldi_ident_hash_lock[index]);
235 lipp = ident_find_ref_nolock(modid, dip, dev, major);
236
237 if (*lipp != NULL) {
238 /* we found an ident in the hash */
239 ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
240 (*lipp)->li_ref++;
241 retlip = *lipp;
242 mutex_exit(&ldi_ident_hash_lock[index]);
243 kmem_free(lip, sizeof (struct ldi_ident));
244 return (retlip);
245 }
246
247 /* initialize the new ident */
248 lip->li_next = NULL;
249 lip->li_ref = 1;
250 lip->li_modid = modid;
251 lip->li_major = major;
252 lip->li_dip = dip;
253 lip->li_dev = dev;
254 (void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
255
256 /* add it to the ident hash */
257 lip->li_next = ldi_ident_hash[index];
258 ldi_ident_hash[index] = lip;
259
260 mutex_exit(&ldi_ident_hash_lock[index]);
261 return (lip);
262 }
263
264 static void
265 ident_hold(struct ldi_ident *lip)
266 {
267 uint_t index;
268
269 ASSERT(lip != NULL);
270 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
271 mutex_enter(&ldi_ident_hash_lock[index]);
272 ASSERT(lip->li_ref > 0);
273 lip->li_ref++;
274 mutex_exit(&ldi_ident_hash_lock[index]);
275 }
276
277 static void
278 ident_release(struct ldi_ident *lip)
279 {
280 struct ldi_ident **lipp;
281 uint_t index;
282
283 ASSERT(lip != NULL);
284 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
285 mutex_enter(&ldi_ident_hash_lock[index]);
286
287 ASSERT(lip->li_ref > 0);
288 if (--lip->li_ref > 0) {
289 /* there are more references to this ident */
290 mutex_exit(&ldi_ident_hash_lock[index]);
291 return;
292 }
293
294 /* this was the last reference/open for this ident. free it. */
295 lipp = ident_find_ref_nolock(
296 lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
297
298 ASSERT((lipp != NULL) && (*lipp != NULL));
299 *lipp = lip->li_next;
300 mutex_exit(&ldi_ident_hash_lock[index]);
301 kmem_free(lip, sizeof (struct ldi_ident));
302 }
303
304 /*
305 * LDI handle manipulation functions
306 */
307 static uint_t
308 handle_hash_func(void *vp)
309 {
310 uintptr_t k = (uintptr_t)vp;
311 k >>= (int)highbit(sizeof (vnode_t));
312 return ((uint_t)k);
313 }
314
315 static struct ldi_handle **
316 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
317 {
318 struct ldi_handle **lhpp = NULL;
319 uint_t index = LH_HASH(vp);
320
321 ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
322
323 for (lhpp = &(ldi_handle_hash[index]);
324 (*lhpp != NULL);
325 lhpp = &((*lhpp)->lh_next)) {
326 if (((*lhpp)->lh_ident == ident) &&
327 ((*lhpp)->lh_vp == vp))
328 break;
329 }
330
331 ASSERT(lhpp != NULL);
332 return (lhpp);
333 }
334
335 static struct ldi_handle *
336 handle_find(vnode_t *vp, struct ldi_ident *ident)
337 {
338 struct ldi_handle **lhpp, *retlhp;
339 int index = LH_HASH(vp);
340
341 mutex_enter(&ldi_handle_hash_lock[index]);
342 lhpp = handle_find_ref_nolock(vp, ident);
343 retlhp = *lhpp;
344 mutex_exit(&ldi_handle_hash_lock[index]);
345 return (retlhp);
346 }
347
348 static struct ldi_handle *
349 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
350 {
351 struct ldi_handle *lhp, **lhpp, *retlhp;
352 uint_t index;
353
354 ASSERT((vp != NULL) && (ident != NULL));
355
356 /* allocate a new handle in case we need it */
357 lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
358
359 /* search the hash for a matching handle */
360 index = LH_HASH(vp);
361 mutex_enter(&ldi_handle_hash_lock[index]);
362 lhpp = handle_find_ref_nolock(vp, ident);
363
364 if (*lhpp != NULL) {
365 /* we found a handle in the hash */
366 (*lhpp)->lh_ref++;
367 retlhp = *lhpp;
368 mutex_exit(&ldi_handle_hash_lock[index]);
369
370 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
371 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
372 (void *)retlhp, (void *)ident, (void *)vp,
373 mod_major_to_name(getmajor(vp->v_rdev)),
374 getminor(vp->v_rdev)));
375
376 kmem_free(lhp, sizeof (struct ldi_handle));
377 return (retlhp);
378 }
379
380 /* initialize the new handle */
381 lhp->lh_ref = 1;
382 lhp->lh_vp = vp;
383 lhp->lh_ident = ident;
384 #ifdef LDI_OBSOLETE_EVENT
385 mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
386 #endif
387
388 /* set the device type for this handle */
389 lhp->lh_type = 0;
390 if (vp->v_stream) {
391 ASSERT(vp->v_type == VCHR);
392 lhp->lh_type |= LH_STREAM;
393 } else {
394 lhp->lh_type |= LH_CBDEV;
395 }
396
397 /* get holds on other objects */
398 ident_hold(ident);
399 ASSERT(vp->v_count >= 1);
400 VN_HOLD(vp);
401
402 /* add it to the handle hash */
403 lhp->lh_next = ldi_handle_hash[index];
404 ldi_handle_hash[index] = lhp;
405 atomic_add_long(&ldi_handle_hash_count, 1);
406
407 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
408 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
409 (void *)lhp, (void *)ident, (void *)vp,
410 mod_major_to_name(getmajor(vp->v_rdev)),
411 getminor(vp->v_rdev)));
412
413 mutex_exit(&ldi_handle_hash_lock[index]);
414 return (lhp);
415 }
416
417 static void
418 handle_release(struct ldi_handle *lhp)
419 {
420 struct ldi_handle **lhpp;
421 uint_t index;
422
423 ASSERT(lhp != NULL);
424
425 index = LH_HASH(lhp->lh_vp);
426 mutex_enter(&ldi_handle_hash_lock[index]);
427
428 LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
429 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
430 (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
431 mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
432 getminor(lhp->lh_vp->v_rdev)));
433
434 ASSERT(lhp->lh_ref > 0);
435 if (--lhp->lh_ref > 0) {
436 /* there are more references to this handle */
437 mutex_exit(&ldi_handle_hash_lock[index]);
438 return;
439 }
440
441 /* this was the last reference/open for this handle. free it. */
442 lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
443 ASSERT((lhpp != NULL) && (*lhpp != NULL));
444 *lhpp = lhp->lh_next;
445 atomic_add_long(&ldi_handle_hash_count, -1);
446 mutex_exit(&ldi_handle_hash_lock[index]);
447
448 VN_RELE(lhp->lh_vp);
449 ident_release(lhp->lh_ident);
450 #ifdef LDI_OBSOLETE_EVENT
451 mutex_destroy(lhp->lh_lock);
452 #endif
453 kmem_free(lhp, sizeof (struct ldi_handle));
454 }
455
456 #ifdef LDI_OBSOLETE_EVENT
457 /*
458 * LDI event manipulation functions
459 */
460 static void
461 handle_event_add(ldi_event_t *lep)
462 {
463 struct ldi_handle *lhp = lep->le_lhp;
464
465 ASSERT(lhp != NULL);
466
467 mutex_enter(lhp->lh_lock);
468 if (lhp->lh_events == NULL) {
469 lhp->lh_events = lep;
470 mutex_exit(lhp->lh_lock);
471 return;
472 }
473
474 lep->le_next = lhp->lh_events;
475 lhp->lh_events->le_prev = lep;
476 lhp->lh_events = lep;
477 mutex_exit(lhp->lh_lock);
478 }
479
480 static void
481 handle_event_remove(ldi_event_t *lep)
482 {
483 struct ldi_handle *lhp = lep->le_lhp;
484
485 ASSERT(lhp != NULL);
486
487 mutex_enter(lhp->lh_lock);
488 if (lep->le_prev)
489 lep->le_prev->le_next = lep->le_next;
490 if (lep->le_next)
491 lep->le_next->le_prev = lep->le_prev;
492 if (lhp->lh_events == lep)
493 lhp->lh_events = lep->le_next;
494 mutex_exit(lhp->lh_lock);
495
496 }
497
498 static void
499 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
500 void *arg, void *bus_impldata)
501 {
502 ldi_event_t *lep = (ldi_event_t *)arg;
503
504 ASSERT(lep != NULL);
505
506 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
507 "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
508 (void *)dip, (void *)event_cookie, (void *)lep));
509
510 lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
511 }
512 #endif
513
514 /*
515 * LDI open helper functions
516 */
517
518 /* get a vnode to a device by dev_t and otyp */
519 static int
520 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
521 {
522 dev_info_t *dip;
523 vnode_t *vp;
524
525 /* sanity check required input parameters */
526 if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
527 return (EINVAL);
528
529 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
530 return (ENODEV);
531
532 vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
533 spec_assoc_vp_with_devi(vp, dip);
534 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
535
536 *vpp = vp;
537 return (0);
538 }
539
540 /* get a vnode to a device by pathname */
541 int
542 ldi_vp_from_name(char *path, vnode_t **vpp)
543 {
544 vnode_t *vp = NULL;
545 int ret;
546
547 /* sanity check required input parameters */
548 if ((path == NULL) || (vpp == NULL))
549 return (EINVAL);
550
551 if (modrootloaded) {
552 cred_t *saved_cred = curthread->t_cred;
553
554 /* we don't want lookupname to fail because of credentials */
555 curthread->t_cred = kcred;
556
557 /*
558 * all lookups should be done in the global zone. but
559 * lookupnameat() won't actually do this if an absolute
560 * path is passed in. since the ldi interfaces require an
561 * absolute path we pass lookupnameat() a pointer to
562 * the character after the leading '/' and tell it to
563 * start searching at the current system root directory.
564 */
565 ASSERT(*path == '/');
566 ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
567 &vp, rootdir);
568
569 /* restore this threads credentials */
570 curthread->t_cred = saved_cred;
571
572 if (ret == 0) {
573 if (!vn_matchops(vp, spec_getvnodeops()) ||
574 !VTYP_VALID(vp->v_type)) {
575 VN_RELE(vp);
576 return (ENXIO);
577 }
578 }
579 }
580
581 if (vp == NULL) {
582 dev_info_t *dip;
583 dev_t dev;
584 int spec_type;
585
586 /*
587 * Root is not mounted, the minor node is not specified,
588 * or an OBP path has been specified.
589 */
590
591 /*
592 * Determine if path can be pruned to produce an
593 * OBP or devfs path for resolve_pathname.
594 */
595 if (strncmp(path, "/devices/", 9) == 0)
596 path += strlen("/devices");
597
598 /*
599 * if no minor node was specified the DEFAULT minor node
600 * will be returned. if there is no DEFAULT minor node
601 * one will be fabricated of type S_IFCHR with the minor
602 * number equal to the instance number.
603 */
604 ret = resolve_pathname(path, &dip, &dev, &spec_type);
605 if (ret != 0)
606 return (ENODEV);
607
608 ASSERT(STYP_VALID(spec_type));
609 vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
610 spec_assoc_vp_with_devi(vp, dip);
611 ddi_release_devi(dip);
612 }
613
614 *vpp = vp;
615 return (0);
616 }
617
618 static int
619 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
620 {
621 char *devidstr;
622 ddi_prop_t *propp;
623
624 /* convert devid as a string property */
625 if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
626 return (0);
627
628 /*
629 * Search for the devid. For speed and ease in locking this
630 * code directly uses the property implementation. See
631 * ddi_common_devid_to_devlist() for a comment as to why.
632 */
633 mutex_enter(&(DEVI(dip)->devi_lock));
634
635 /* check if there is a DDI_DEV_T_NONE devid property */
636 propp = i_ddi_prop_search(DDI_DEV_T_NONE,
637 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
638 if (propp != NULL) {
639 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
640 /* a DDI_DEV_T_NONE devid exists and matchs */
641 mutex_exit(&(DEVI(dip)->devi_lock));
642 ddi_devid_str_free(devidstr);
643 return (1);
644 } else {
645 /* a DDI_DEV_T_NONE devid exists and doesn't match */
646 mutex_exit(&(DEVI(dip)->devi_lock));
647 ddi_devid_str_free(devidstr);
648 return (0);
649 }
650 }
651
652 /* check if there is a devt specific devid property */
653 propp = i_ddi_prop_search(dev,
654 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
655 if (propp != NULL) {
656 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
657 /* a devt specific devid exists and matchs */
658 mutex_exit(&(DEVI(dip)->devi_lock));
659 ddi_devid_str_free(devidstr);
660 return (1);
661 } else {
662 /* a devt specific devid exists and doesn't match */
663 mutex_exit(&(DEVI(dip)->devi_lock));
664 ddi_devid_str_free(devidstr);
665 return (0);
666 }
667 }
668
669 /* we didn't find any devids associated with the device */
670 mutex_exit(&(DEVI(dip)->devi_lock));
671 ddi_devid_str_free(devidstr);
672 return (0);
673 }
674
675 /* get a handle to a device by devid and minor name */
676 int
677 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
678 {
679 dev_info_t *dip;
680 vnode_t *vp;
681 int ret, i, ndevs, styp;
682 dev_t dev, *devs;
683
684 /* sanity check required input parameters */
685 if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
686 return (EINVAL);
687
688 ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
689 if ((ret != DDI_SUCCESS) || (ndevs <= 0))
690 return (ENODEV);
691
692 for (i = 0; i < ndevs; i++) {
693 dev = devs[i];
694
695 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
696 continue;
697
698 /*
699 * now we have to verify that the devid of the disk
700 * still matches what was requested.
701 *
702 * we have to do this because the devid could have
703 * changed between the call to ddi_lyr_devid_to_devlist()
704 * and e_ddi_hold_devi_by_dev(). this is because when
705 * ddi_lyr_devid_to_devlist() returns a list of devts
706 * there is no kind of hold on those devts so a device
707 * could have been replaced out from under us in the
708 * interim.
709 */
710 if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
711 NULL, &styp) == DDI_SUCCESS) &&
712 ldi_devid_match(devid, dip, dev))
713 break;
714
715 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev() */
716 }
717
718 ddi_lyr_free_devlist(devs, ndevs);
719
720 if (i == ndevs)
721 return (ENODEV);
722
723 ASSERT(STYP_VALID(styp));
724 vp = makespecvp(dev, STYP_TO_VTYP(styp));
725 spec_assoc_vp_with_devi(vp, dip);
726 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
727
728 *vpp = vp;
729 return (0);
730 }
731
732 /* given a vnode, open a device */
733 static int
734 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
735 ldi_handle_t *lhp, struct ldi_ident *li)
736 {
737 struct ldi_handle *nlhp;
738 vnode_t *vp;
739 int err;
740
741 ASSERT((vpp != NULL) && (*vpp != NULL));
742 ASSERT((lhp != NULL) && (li != NULL));
743
744 vp = *vpp;
745 /* if the vnode passed in is not a device, then bail */
746 if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
747 return (ENXIO);
748
749 /*
750 * the caller may have specified a node that
751 * doesn't have cb_ops defined. the ldi doesn't yet
752 * support opening devices without a valid cb_ops.
753 */
754 if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
755 return (ENXIO);
756
757 /* open the device */
758 if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
759 return (err);
760
761 /* possible clone open, make sure that we still have a spec node */
762 ASSERT(vn_matchops(vp, spec_getvnodeops()));
763
764 nlhp = handle_alloc(vp, li);
765
766 if (vp != *vpp) {
767 /*
768 * allocating the layered handle took a new hold on the vnode
769 * so we can release the hold that was returned by the clone
770 * open
771 */
772 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
773 "ldi clone open", (void *)nlhp));
774 } else {
775 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
776 "ldi open", (void *)nlhp));
777 }
778
779 *vpp = vp;
780 *lhp = (ldi_handle_t)nlhp;
781 return (0);
782 }
783
784 /* Call a drivers prop_op(9E) interface */
785 static int
786 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
787 int flags, char *name, caddr_t valuep, int *lengthp)
788 {
789 struct dev_ops *ops = NULL;
790 int res;
791
792 ASSERT((dip != NULL) && (name != NULL));
793 ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
794 ASSERT(lengthp != NULL);
795
796 /*
797 * we can only be invoked after a driver has been opened and
798 * someone has a layered handle to it, so there had better be
799 * a valid ops vector.
800 */
801 ops = DEVI(dip)->devi_ops;
802 ASSERT(ops && ops->devo_cb_ops);
803
804 /*
805 * Some nexus drivers incorrectly set cb_prop_op to nodev,
806 * nulldev or even NULL.
807 */
808 if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
809 (ops->devo_cb_ops->cb_prop_op == nulldev) ||
810 (ops->devo_cb_ops->cb_prop_op == NULL)) {
811 return (DDI_PROP_NOT_FOUND);
812 }
813
814 /* check if this is actually DDI_DEV_T_ANY query */
815 if (flags & LDI_DEV_T_ANY) {
816 flags &= ~LDI_DEV_T_ANY;
817 dev = DDI_DEV_T_ANY;
818 }
819
820 res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
821 return (res);
822 }
823
824 static void
825 i_ldi_prop_op_free(struct prop_driver_data *pdd)
826 {
827 kmem_free(pdd, pdd->pdd_size);
828 }
829
830 static caddr_t
831 i_ldi_prop_op_alloc(int prop_len)
832 {
833 struct prop_driver_data *pdd;
834 int pdd_size;
835
836 pdd_size = sizeof (struct prop_driver_data) + prop_len;
837 pdd = kmem_alloc(pdd_size, KM_SLEEP);
838 pdd->pdd_size = pdd_size;
839 pdd->pdd_prop_free = i_ldi_prop_op_free;
840 return ((caddr_t)&pdd[1]);
841 }
842
843 /*
844 * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
845 * by the typed ldi property lookup interfaces.
846 */
847 static int
848 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
849 caddr_t *datap, int *lengthp, int elem_size)
850 {
851 caddr_t prop_val;
852 int prop_len, res;
853
854 ASSERT((dip != NULL) && (name != NULL));
855 ASSERT((datap != NULL) && (lengthp != NULL));
856
857 /*
858 * first call the drivers prop_op() interface to allow it
859 * it to override default property values.
860 */
861 res = i_ldi_prop_op(dev, dip, PROP_LEN,
862 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
863 if (res != DDI_PROP_SUCCESS)
864 return (DDI_PROP_NOT_FOUND);
865
866 /* sanity check the property length */
867 if (prop_len == 0) {
868 /*
869 * the ddi typed interfaces don't allow a drivers to
870 * create properties with a length of 0. so we should
871 * prevent drivers from returning 0 length dynamic
872 * properties for typed property lookups.
873 */
874 return (DDI_PROP_NOT_FOUND);
875 }
876
877 /* sanity check the property length against the element size */
878 if (elem_size && ((prop_len % elem_size) != 0))
879 return (DDI_PROP_NOT_FOUND);
880
881 /*
882 * got it. now allocate a prop_driver_data struct so that the
883 * user can free the property via ddi_prop_free().
884 */
885 prop_val = i_ldi_prop_op_alloc(prop_len);
886
887 /* lookup the property again, this time get the value */
888 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
889 flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
890 if (res != DDI_PROP_SUCCESS) {
891 ddi_prop_free(prop_val);
892 return (DDI_PROP_NOT_FOUND);
893 }
894
895 /* sanity check the property length */
896 if (prop_len == 0) {
897 ddi_prop_free(prop_val);
898 return (DDI_PROP_NOT_FOUND);
899 }
900
901 /* sanity check the property length against the element size */
902 if (elem_size && ((prop_len % elem_size) != 0)) {
903 ddi_prop_free(prop_val);
904 return (DDI_PROP_NOT_FOUND);
905 }
906
907 /*
908 * return the prop_driver_data struct and, optionally, the length
909 * of the data.
910 */
911 *datap = prop_val;
912 *lengthp = prop_len;
913
914 return (DDI_PROP_SUCCESS);
915 }
916
917 /*
918 * i_check_string looks at a string property and makes sure its
919 * a valid null terminated string
920 */
921 static int
922 i_check_string(char *str, int prop_len)
923 {
924 int i;
925
926 ASSERT(str != NULL);
927
928 for (i = 0; i < prop_len; i++) {
929 if (str[i] == '\0')
930 return (0);
931 }
932 return (1);
933 }
934
935 /*
936 * i_pack_string_array takes a a string array property that is represented
937 * as a concatenation of strings (with the NULL character included for
938 * each string) and converts it into a format that can be returned by
939 * ldi_prop_lookup_string_array.
940 */
941 static int
942 i_pack_string_array(char *str_concat, int prop_len,
943 char ***str_arrayp, int *nelemp)
944 {
945 int i, nelem, pack_size;
946 char **str_array, *strptr;
947
948 /*
949 * first we need to sanity check the input string array.
950 * in essence this can be done my making sure that the last
951 * character of the array passed in is null. (meaning the last
952 * string in the array is NULL terminated.
953 */
954 if (str_concat[prop_len - 1] != '\0')
955 return (1);
956
957 /* now let's count the number of strings in the array */
958 for (nelem = i = 0; i < prop_len; i++)
959 if (str_concat[i] == '\0')
960 nelem++;
961 ASSERT(nelem >= 1);
962
963 /* now let's allocate memory for the new packed property */
964 pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
965 str_array = (char **)i_ldi_prop_op_alloc(pack_size);
966
967 /* let's copy the actual string data into the new property */
968 strptr = (char *)&(str_array[nelem + 1]);
969 bcopy(str_concat, strptr, prop_len);
970
971 /* now initialize the string array pointers */
972 for (i = 0; i < nelem; i++) {
973 str_array[i] = strptr;
974 strptr += strlen(strptr) + 1;
975 }
976 str_array[nelem] = NULL;
977
978 /* set the return values */
979 *str_arrayp = str_array;
980 *nelemp = nelem;
981
982 return (0);
983 }
984
985
986 /*
987 * LDI Project private device usage interfaces
988 */
989
990 /*
991 * Get a count of how many devices are currentl open by different consumers
992 */
993 int
994 ldi_usage_count()
995 {
996 return (ldi_handle_hash_count);
997 }
998
999 static void
1000 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
1001 {
1002 dev_info_t *dip;
1003 dev_t dev;
1004
1005 ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1006
1007 /* get the target devt */
1008 dev = vp->v_rdev;
1009
1010 /* try to get the target dip */
1011 dip = VTOCS(vp)->s_dip;
1012 if (dip != NULL) {
1013 e_ddi_hold_devi(dip);
1014 } else if (dev != DDI_DEV_T_NONE) {
1015 dip = e_ddi_hold_devi_by_dev(dev, 0);
1016 }
1017
1018 /* set the target information */
1019 ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1020 ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1021 ldi_usage->tgt_devt = dev;
1022 ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1023 ldi_usage->tgt_dip = dip;
1024 }
1025
1026
1027 static int
1028 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1029 void *arg, int (*callback)(const ldi_usage_t *, void *))
1030 {
1031 ldi_usage_t ldi_usage;
1032 struct devnames *dnp;
1033 dev_info_t *dip;
1034 major_t major;
1035 dev_t dev;
1036 int ret = LDI_USAGE_CONTINUE;
1037
1038 /* set the target device information */
1039 ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1040
1041 /* get the source devt */
1042 dev = lip->li_dev;
1043
1044 /* try to get the source dip */
1045 dip = lip->li_dip;
1046 if (dip != NULL) {
1047 e_ddi_hold_devi(dip);
1048 } else if (dev != DDI_DEV_T_NONE) {
1049 dip = e_ddi_hold_devi_by_dev(dev, 0);
1050 }
1051
1052 /* set the valid source information */
1053 ldi_usage.src_modid = lip->li_modid;
1054 ldi_usage.src_name = lip->li_modname;
1055 ldi_usage.src_devt = dev;
1056 ldi_usage.src_dip = dip;
1057
1058 /*
1059 * if the source ident represents either:
1060 *
1061 * - a kernel module (and not a device or device driver)
1062 * - a device node
1063 *
1064 * then we currently have all the info we need to report the
1065 * usage information so invoke the callback function.
1066 */
1067 if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1068 (dip != NULL)) {
1069 ret = callback(&ldi_usage, arg);
1070 if (dip != NULL)
1071 ddi_release_devi(dip);
1072 if (ldi_usage.tgt_dip != NULL)
1073 ddi_release_devi(ldi_usage.tgt_dip);
1074 return (ret);
1075 }
1076
1077 /*
1078 * now this is kinda gross.
1079 *
1080 * what we do here is attempt to associate every device instance
1081 * of the source driver on the system with the open target driver.
1082 * we do this because we don't know which instance of the device
1083 * could potentially access the lower device so we assume that all
1084 * the instances could access it.
1085 *
1086 * there are two ways we could have gotten here:
1087 *
1088 * 1) this layered ident represents one created using only a
1089 * major number or a driver module name. this means that when
1090 * it was created we could not associate it with a particular
1091 * dev_t or device instance.
1092 *
1093 * when could this possibly happen you ask?
1094 *
1095 * a perfect example of this is streams persistent links.
1096 * when a persistant streams link is formed we can't associate
1097 * the lower device stream with any particular upper device
1098 * stream or instance. this is because any particular upper
1099 * device stream could be closed, then another could be
1100 * opened with a different dev_t and device instance, and it
1101 * would still have access to the lower linked stream.
1102 *
1103 * since any instance of the upper streams driver could
1104 * potentially access the lower stream whenever it wants,
1105 * we represent that here by associating the opened lower
1106 * device with every existing device instance of the upper
1107 * streams driver.
1108 *
1109 * 2) This case should really never happen but we'll include it
1110 * for completeness.
1111 *
1112 * it's possible that we could have gotten here because we
1113 * have a dev_t for the upper device but we couldn't find a
1114 * dip associated with that dev_t.
1115 *
1116 * the only types of devices that have dev_t without an
1117 * associated dip are unbound DLPIv2 network devices. These
1118 * types of devices exist to be able to attach a stream to any
1119 * instance of a hardware network device. since these types of
1120 * devices are usually hardware devices they should never
1121 * really have other devices open.
1122 */
1123 if (dev != DDI_DEV_T_NONE)
1124 major = getmajor(dev);
1125 else
1126 major = lip->li_major;
1127
1128 ASSERT((major >= 0) && (major < devcnt));
1129
1130 dnp = &devnamesp[major];
1131 LOCK_DEV_OPS(&dnp->dn_lock);
1132 dip = dnp->dn_head;
1133 while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1134 e_ddi_hold_devi(dip);
1135 UNLOCK_DEV_OPS(&dnp->dn_lock);
1136
1137 /* set the source dip */
1138 ldi_usage.src_dip = dip;
1139
1140 /* invoke the callback function */
1141 ret = callback(&ldi_usage, arg);
1142
1143 LOCK_DEV_OPS(&dnp->dn_lock);
1144 ddi_release_devi(dip);
1145 dip = ddi_get_next(dip);
1146 }
1147 UNLOCK_DEV_OPS(&dnp->dn_lock);
1148
1149 /* if there was a target dip, release it */
1150 if (ldi_usage.tgt_dip != NULL)
1151 ddi_release_devi(ldi_usage.tgt_dip);
1152
1153 return (ret);
1154 }
1155
1156 /*
1157 * ldi_usage_walker() - this walker reports LDI kernel device usage
1158 * information via the callback() callback function. the LDI keeps track
1159 * of what devices are being accessed in its own internal data structures.
1160 * this function walks those data structures to determine device usage.
1161 */
1162 void
1163 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1164 {
1165 struct ldi_handle *lhp;
1166 struct ldi_ident *lip;
1167 vnode_t *vp;
1168 int i;
1169 int ret = LDI_USAGE_CONTINUE;
1170
1171 for (i = 0; i < LH_HASH_SZ; i++) {
1172 mutex_enter(&ldi_handle_hash_lock[i]);
1173
1174 lhp = ldi_handle_hash[i];
1175 while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1176 lip = lhp->lh_ident;
1177 vp = lhp->lh_vp;
1178
1179 /* invoke the devinfo callback function */
1180 ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1181
1182 lhp = lhp->lh_next;
1183 }
1184 mutex_exit(&ldi_handle_hash_lock[i]);
1185
1186 if (ret != LDI_USAGE_CONTINUE)
1187 break;
1188 }
1189 }
1190
1191 /*
1192 * LDI Project private interfaces (streams linking interfaces)
1193 *
1194 * Streams supports a type of built in device layering via linking.
1195 * Certain types of streams drivers can be streams multiplexors.
1196 * A streams multiplexor supports the I_LINK/I_PLINK operation.
1197 * These operations allows other streams devices to be linked under the
1198 * multiplexor. By definition all streams multiplexors are devices
1199 * so this linking is a type of device layering where the multiplexor
1200 * device is layered on top of the device linked below it.
1201 */
1202
1203 /*
1204 * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1205 * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1206 *
1207 * The streams framework keeps track of links via the file_t of the lower
1208 * stream. The LDI keeps track of devices using a vnode. In the case
1209 * of a streams link created via an LDI handle, fnk_lh() allocates
1210 * a file_t that the streams framework can use to track the linkage.
1211 */
1212 int
1213 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1214 {
1215 struct ldi_handle *lhp = (struct ldi_handle *)arg;
1216 vnode_t *vpdown;
1217 file_t *fpdown;
1218 int err;
1219
1220 if (lhp == NULL)
1221 return (EINVAL);
1222
1223 vpdown = lhp->lh_vp;
1224 ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1225 ASSERT(cmd == _I_PLINK_LH);
1226
1227 /*
1228 * create a new lower vnode and a file_t that points to it,
1229 * streams linking requires a file_t. falloc() returns with
1230 * fpdown locked.
1231 */
1232 VN_HOLD(vpdown);
1233 (void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1234 mutex_exit(&fpdown->f_tlock);
1235
1236 /* try to establish the link */
1237 err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1238
1239 if (err != 0) {
1240 /* the link failed, free the file_t and release the vnode */
1241 mutex_enter(&fpdown->f_tlock);
1242 unfalloc(fpdown);
1243 VN_RELE(vpdown);
1244 }
1245
1246 return (err);
1247 }
1248
1249 /*
1250 * ldi_mlink_fp() is invoked for all successful streams linkages created
1251 * via I_LINK and I_PLINK. ldi_mlink_fp() records the linkage information
1252 * in its internal state so that the devinfo snapshot code has some
1253 * observability into streams device linkage information.
1254 */
1255 void
1256 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1257 {
1258 vnode_t *vp = fpdown->f_vnode;
1259 struct snode *sp, *csp;
1260 ldi_ident_t li;
1261 major_t major;
1262 int ret;
1263
1264 /* if the lower stream is not a device then return */
1265 if (!vn_matchops(vp, spec_getvnodeops()))
1266 return;
1267
1268 ASSERT(!servicing_interrupt());
1269
1270 LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1271 "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1272 (void *)stp, (void *)fpdown));
1273
1274 sp = VTOS(vp);
1275 csp = VTOS(sp->s_commonvp);
1276
1277 /* check if this was a plink via a layered handle */
1278 if (lhlink) {
1279 /*
1280 * increment the common snode s_count.
1281 *
1282 * this is done because after the link operation there
1283 * are two ways that s_count can be decremented.
1284 *
1285 * when the layered handle used to create the link is
1286 * closed, spec_close() is called and it will decrement
1287 * s_count in the common snode. if we don't increment
1288 * s_count here then this could cause spec_close() to
1289 * actually close the device while it's still linked
1290 * under a multiplexer.
1291 *
1292 * also, when the lower stream is unlinked, closef() is
1293 * called for the file_t associated with this snode.
1294 * closef() will call spec_close(), which will decrement
1295 * s_count. if we dont't increment s_count here then this
1296 * could cause spec_close() to actually close the device
1297 * while there may still be valid layered handles
1298 * pointing to it.
1299 */
1300 mutex_enter(&csp->s_lock);
1301 ASSERT(csp->s_count >= 1);
1302 csp->s_count++;
1303 mutex_exit(&csp->s_lock);
1304
1305 /*
1306 * decrement the f_count.
1307 * this is done because the layered driver framework does
1308 * not actually cache a copy of the file_t allocated to
1309 * do the link. this is done here instead of in ldi_mlink_lh()
1310 * because there is a window in ldi_mlink_lh() between where
1311 * milnk_file() returns and we would decrement the f_count
1312 * when the stream could be unlinked.
1313 */
1314 mutex_enter(&fpdown->f_tlock);
1315 fpdown->f_count--;
1316 mutex_exit(&fpdown->f_tlock);
1317 }
1318
1319 /*
1320 * NOTE: here we rely on the streams subsystem not allowing
1321 * a stream to be multiplexed more than once. if this
1322 * changes, we break.
1323 *
1324 * mark the snode/stream as multiplexed
1325 */
1326 mutex_enter(&sp->s_lock);
1327 ASSERT(!(sp->s_flag & SMUXED));
1328 sp->s_flag |= SMUXED;
1329 mutex_exit(&sp->s_lock);
1330
1331 /* get a layered ident for the upper stream */
1332 if (type == LINKNORMAL) {
1333 /*
1334 * if the link is not persistant then we can associate
1335 * the upper stream with a dev_t. this is because the
1336 * upper stream is associated with a vnode, which is
1337 * associated with a dev_t and this binding can't change
1338 * during the life of the stream. since the link isn't
1339 * persistant once the stream is destroyed the link is
1340 * destroyed. so the dev_t will be valid for the life
1341 * of the link.
1342 */
1343 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1344 } else {
1345 /*
1346 * if the link is persistant we can only associate the
1347 * link with a driver (and not a dev_t.) this is
1348 * because subsequent opens of the upper device may result
1349 * in a different stream (and dev_t) having access to
1350 * the lower stream.
1351 *
1352 * for example, if the upper stream is closed after the
1353 * persistant link operation is compleated, a subsequent
1354 * open of the upper device will create a new stream which
1355 * may have a different dev_t and an unlink operation
1356 * can be performed using this new upper stream.
1357 */
1358 ASSERT(type == LINKPERSIST);
1359 major = getmajor(stp->sd_vnode->v_rdev);
1360 ret = ldi_ident_from_major(major, &li);
1361 }
1362
1363 ASSERT(ret == 0);
1364 (void) handle_alloc(vp, (struct ldi_ident *)li);
1365 ldi_ident_release(li);
1366 }
1367
1368 void
1369 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1370 {
1371 struct ldi_handle *lhp;
1372 vnode_t *vp = (vnode_t *)fpdown->f_vnode;
1373 struct snode *sp;
1374 ldi_ident_t li;
1375 major_t major;
1376 int ret;
1377
1378 /* if the lower stream is not a device then return */
1379 if (!vn_matchops(vp, spec_getvnodeops()))
1380 return;
1381
1382 ASSERT(!servicing_interrupt());
1383 ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1384
1385 LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1386 "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1387 (void *)stp, (void *)fpdown));
1388
1389 /*
1390 * NOTE: here we rely on the streams subsystem not allowing
1391 * a stream to be multiplexed more than once. if this
1392 * changes, we break.
1393 *
1394 * mark the snode/stream as not multiplexed
1395 */
1396 sp = VTOS(vp);
1397 mutex_enter(&sp->s_lock);
1398 ASSERT(sp->s_flag & SMUXED);
1399 sp->s_flag &= ~SMUXED;
1400 mutex_exit(&sp->s_lock);
1401
1402 /*
1403 * clear the owner for this snode
1404 * see the comment in ldi_mlink_fp() for information about how
1405 * the ident is allocated
1406 */
1407 if (type == LINKNORMAL) {
1408 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1409 } else {
1410 ASSERT(type == LINKPERSIST);
1411 major = getmajor(stp->sd_vnode->v_rdev);
1412 ret = ldi_ident_from_major(major, &li);
1413 }
1414
1415 ASSERT(ret == 0);
1416 lhp = handle_find(vp, (struct ldi_ident *)li);
1417 handle_release(lhp);
1418 ldi_ident_release(li);
1419 }
1420
1421 /*
1422 * LDI Consolidation private interfaces
1423 */
1424 int
1425 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1426 {
1427 struct modctl *modp;
1428 major_t major;
1429 char *name;
1430
1431 if ((modlp == NULL) || (lip == NULL))
1432 return (EINVAL);
1433
1434 ASSERT(!servicing_interrupt());
1435
1436 modp = mod_getctl(modlp);
1437 if (modp == NULL)
1438 return (EINVAL);
1439 name = modp->mod_modname;
1440 if (name == NULL)
1441 return (EINVAL);
1442 major = mod_name_to_major(name);
1443
1444 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1445
1446 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1447 "ldi_ident_from_mod", (void *)*lip, name));
1448
1449 return (0);
1450 }
1451
1452 ldi_ident_t
1453 ldi_ident_from_anon()
1454 {
1455 ldi_ident_t lip;
1456
1457 ASSERT(!servicing_interrupt());
1458
1459 lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1460
1461 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1462 "ldi_ident_from_anon", (void *)lip, "genunix"));
1463
1464 return (lip);
1465 }
1466
1467
1468 /*
1469 * LDI Public interfaces
1470 */
1471 int
1472 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1473 {
1474 struct stdata *stp;
1475 dev_t dev;
1476 char *name;
1477
1478 if ((sq == NULL) || (lip == NULL))
1479 return (EINVAL);
1480
1481 ASSERT(!servicing_interrupt());
1482
1483 stp = sq->q_stream;
1484 if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1485 return (EINVAL);
1486
1487 dev = stp->sd_vnode->v_rdev;
1488 name = mod_major_to_name(getmajor(dev));
1489 if (name == NULL)
1490 return (EINVAL);
1491 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1492
1493 LDI_ALLOCFREE((CE_WARN,
1494 "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1495 "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1496 (void *)stp));
1497
1498 return (0);
1499 }
1500
1501 int
1502 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1503 {
1504 char *name;
1505
1506 if (lip == NULL)
1507 return (EINVAL);
1508
1509 ASSERT(!servicing_interrupt());
1510
1511 name = mod_major_to_name(getmajor(dev));
1512 if (name == NULL)
1513 return (EINVAL);
1514 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1515
1516 LDI_ALLOCFREE((CE_WARN,
1517 "%s: li=0x%p, mod=%s, minor=0x%x",
1518 "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1519
1520 return (0);
1521 }
1522
1523 int
1524 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1525 {
1526 struct dev_info *devi = (struct dev_info *)dip;
1527 char *name;
1528
1529 if ((dip == NULL) || (lip == NULL))
1530 return (EINVAL);
1531
1532 ASSERT(!servicing_interrupt());
1533
1534 name = mod_major_to_name(devi->devi_major);
1535 if (name == NULL)
1536 return (EINVAL);
1537 *lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1538
1539 LDI_ALLOCFREE((CE_WARN,
1540 "%s: li=0x%p, mod=%s, dip=0x%p",
1541 "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1542
1543 return (0);
1544 }
1545
1546 int
1547 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1548 {
1549 char *name;
1550
1551 if (lip == NULL)
1552 return (EINVAL);
1553
1554 ASSERT(!servicing_interrupt());
1555
1556 name = mod_major_to_name(major);
1557 if (name == NULL)
1558 return (EINVAL);
1559 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1560
1561 LDI_ALLOCFREE((CE_WARN,
1562 "%s: li=0x%p, mod=%s",
1563 "ldi_ident_from_major", (void *)*lip, name));
1564
1565 return (0);
1566 }
1567
1568 void
1569 ldi_ident_release(ldi_ident_t li)
1570 {
1571 struct ldi_ident *ident = (struct ldi_ident *)li;
1572 char *name;
1573
1574 if (li == NULL)
1575 return;
1576
1577 ASSERT(!servicing_interrupt());
1578
1579 name = ident->li_modname;
1580
1581 LDI_ALLOCFREE((CE_WARN,
1582 "%s: li=0x%p, mod=%s",
1583 "ldi_ident_release", (void *)li, name));
1584
1585 ident_release((struct ldi_ident *)li);
1586 }
1587
1588 /* get a handle to a device by dev_t and otyp */
1589 int
1590 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1591 ldi_handle_t *lhp, ldi_ident_t li)
1592 {
1593 struct ldi_ident *lip = (struct ldi_ident *)li;
1594 int ret;
1595 vnode_t *vp;
1596
1597 /* sanity check required input parameters */
1598 if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1599 (lhp == NULL) || (lip == NULL))
1600 return (EINVAL);
1601
1602 ASSERT(!servicing_interrupt());
1603
1604 if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1605 return (ret);
1606
1607 if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1608 *devp = vp->v_rdev;
1609 }
1610 VN_RELE(vp);
1611
1612 return (ret);
1613 }
1614
1615 /* get a handle to a device by pathname */
1616 int
1617 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1618 ldi_handle_t *lhp, ldi_ident_t li)
1619 {
1620 struct ldi_ident *lip = (struct ldi_ident *)li;
1621 int ret;
1622 vnode_t *vp;
1623
1624 /* sanity check required input parameters */
1625 if ((pathname == NULL) || (*pathname != '/') ||
1626 (cr == NULL) || (lhp == NULL) || (lip == NULL))
1627 return (EINVAL);
1628
1629 ASSERT(!servicing_interrupt());
1630
1631 if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1632 return (ret);
1633
1634 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1635 VN_RELE(vp);
1636
1637 return (ret);
1638 }
1639
1640 /* get a handle to a device by devid and minor_name */
1641 int
1642 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1643 int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1644 {
1645 struct ldi_ident *lip = (struct ldi_ident *)li;
1646 int ret;
1647 vnode_t *vp;
1648
1649 /* sanity check required input parameters */
1650 if ((minor_name == NULL) || (cr == NULL) ||
1651 (lhp == NULL) || (lip == NULL))
1652 return (EINVAL);
1653
1654 ASSERT(!servicing_interrupt());
1655
1656 if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1657 return (ret);
1658
1659 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1660 VN_RELE(vp);
1661
1662 return (ret);
1663 }
1664
1665 int
1666 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1667 {
1668 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1669 struct ldi_event *lep;
1670 int err = 0;
1671 int notify = 0;
1672 list_t *listp;
1673 ldi_ev_callback_impl_t *lecp;
1674
1675 if (lh == NULL)
1676 return (EINVAL);
1677
1678 ASSERT(!servicing_interrupt());
1679
1680 #ifdef LDI_OBSOLETE_EVENT
1681
1682 /*
1683 * Any event handlers should have been unregistered by the
1684 * time ldi_close() is called. If they haven't then it's a
1685 * bug.
1686 *
1687 * In a debug kernel we'll panic to make the problem obvious.
1688 */
1689 ASSERT(handlep->lh_events == NULL);
1690
1691 /*
1692 * On a production kernel we'll "do the right thing" (unregister
1693 * the event handlers) and then complain about having to do the
1694 * work ourselves.
1695 */
1696 while ((lep = handlep->lh_events) != NULL) {
1697 err = 1;
1698 (void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1699 }
1700 if (err) {
1701 struct ldi_ident *lip = handlep->lh_ident;
1702 ASSERT(lip != NULL);
1703 cmn_err(CE_NOTE, "ldi err: %s "
1704 "failed to unregister layered event handlers before "
1705 "closing devices", lip->li_modname);
1706 }
1707 #endif
1708
1709 /* do a layered close on the device */
1710 err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1711
1712 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1713
1714 /*
1715 * Search the event callback list for callbacks with this
1716 * handle. There are 2 cases
1717 * 1. Called in the context of a notify. The handle consumer
1718 * is releasing its hold on the device to allow a reconfiguration
1719 * of the device. Simply NULL out the handle and the notify callback.
1720 * The finalize callback is still available so that the consumer
1721 * knows of the final disposition of the device.
1722 * 2. Not called in the context of notify. NULL out the handle as well
1723 * as the notify and finalize callbacks. Since the consumer has
1724 * closed the handle, we assume it is not interested in the
1725 * notify and finalize callbacks.
1726 */
1727 ldi_ev_lock();
1728
1729 if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1730 notify = 1;
1731 listp = &ldi_ev_callback_list.le_head;
1732 for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1733 if (lecp->lec_lhp != handlep)
1734 continue;
1735 lecp->lec_lhp = NULL;
1736 lecp->lec_notify = NULL;
1737 LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1738 if (!notify) {
1739 LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1740 lecp->lec_finalize = NULL;
1741 }
1742 }
1743
1744 if (notify)
1745 handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1746 ldi_ev_unlock();
1747
1748 /*
1749 * Free the handle even if the device close failed. why?
1750 *
1751 * If the device close failed we can't really make assumptions
1752 * about the devices state so we shouldn't allow access to the
1753 * device via this handle any more. If the device consumer wants
1754 * to access the device again they should open it again.
1755 *
1756 * This is the same way file/device close failures are handled
1757 * in other places like spec_close() and closeandsetf().
1758 */
1759 handle_release(handlep);
1760 return (err);
1761 }
1762
1763 int
1764 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1765 {
1766 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1767 vnode_t *vp;
1768 dev_t dev;
1769 int ret;
1770
1771 if (lh == NULL)
1772 return (EINVAL);
1773
1774 vp = handlep->lh_vp;
1775 dev = vp->v_rdev;
1776 if (handlep->lh_type & LH_CBDEV) {
1777 ret = cdev_read(dev, uiop, credp);
1778 } else if (handlep->lh_type & LH_STREAM) {
1779 ret = strread(vp, uiop, credp);
1780 } else {
1781 return (ENOTSUP);
1782 }
1783 return (ret);
1784 }
1785
1786 int
1787 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1788 {
1789 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1790 vnode_t *vp;
1791 dev_t dev;
1792 int ret;
1793
1794 if (lh == NULL)
1795 return (EINVAL);
1796
1797 vp = handlep->lh_vp;
1798 dev = vp->v_rdev;
1799 if (handlep->lh_type & LH_CBDEV) {
1800 ret = cdev_write(dev, uiop, credp);
1801 } else if (handlep->lh_type & LH_STREAM) {
1802 ret = strwrite(vp, uiop, credp);
1803 } else {
1804 return (ENOTSUP);
1805 }
1806 return (ret);
1807 }
1808
1809 int
1810 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1811 {
1812 int otyp;
1813 uint_t value;
1814 int64_t drv_prop64;
1815 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1816 uint_t blksize;
1817 int blkshift;
1818
1819
1820 if ((lh == NULL) || (sizep == NULL))
1821 return (DDI_FAILURE);
1822
1823 if (handlep->lh_type & LH_STREAM)
1824 return (DDI_FAILURE);
1825
1826 /*
1827 * Determine device type (char or block).
1828 * Character devices support Size/size
1829 * property value. Block devices may support
1830 * Nblocks/nblocks or Size/size property value.
1831 */
1832 if ((ldi_get_otyp(lh, &otyp)) != 0)
1833 return (DDI_FAILURE);
1834
1835 if (otyp == OTYP_BLK) {
1836 if (ldi_prop_exists(lh,
1837 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1838
1839 drv_prop64 = ldi_prop_get_int64(lh,
1840 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1841 "Nblocks", 0);
1842 blksize = ldi_prop_get_int(lh,
1843 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1844 "blksize", DEV_BSIZE);
1845 if (blksize == DEV_BSIZE)
1846 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1847 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1848 "device-blksize", DEV_BSIZE);
1849
1850 /* blksize must be a power of two */
1851 ASSERT(BIT_ONLYONESET(blksize));
1852 blkshift = highbit(blksize) - 1;
1853
1854 /*
1855 * We don't support Nblocks values that don't have
1856 * an accurate uint64_t byte count representation.
1857 */
1858 if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1859 return (DDI_FAILURE);
1860
1861 *sizep = (uint64_t)
1862 (((u_offset_t)drv_prop64) << blkshift);
1863 return (DDI_SUCCESS);
1864 }
1865
1866 if (ldi_prop_exists(lh,
1867 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1868
1869 value = ldi_prop_get_int(lh,
1870 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1871 "nblocks", 0);
1872 blksize = ldi_prop_get_int(lh,
1873 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1874 "blksize", DEV_BSIZE);
1875 if (blksize == DEV_BSIZE)
1876 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1877 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1878 "device-blksize", DEV_BSIZE);
1879
1880 /* blksize must be a power of two */
1881 ASSERT(BIT_ONLYONESET(blksize));
1882 blkshift = highbit(blksize) - 1;
1883
1884 /*
1885 * We don't support nblocks values that don't have an
1886 * accurate uint64_t byte count representation.
1887 */
1888 if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1889 return (DDI_FAILURE);
1890
1891 *sizep = (uint64_t)
1892 (((u_offset_t)value) << blkshift);
1893 return (DDI_SUCCESS);
1894 }
1895 }
1896
1897 if (ldi_prop_exists(lh,
1898 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1899
1900 drv_prop64 = ldi_prop_get_int64(lh,
1901 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1902 *sizep = (uint64_t)drv_prop64;
1903 return (DDI_SUCCESS);
1904 }
1905
1906 if (ldi_prop_exists(lh,
1907 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1908
1909 value = ldi_prop_get_int(lh,
1910 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1911 *sizep = (uint64_t)value;
1912 return (DDI_SUCCESS);
1913 }
1914
1915 /* unable to determine device size */
1916 return (DDI_FAILURE);
1917 }
1918
1919 int
1920 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1921 cred_t *cr, int *rvalp)
1922 {
1923 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1924 vnode_t *vp;
1925 dev_t dev;
1926 int ret, copymode, unused;
1927
1928 if (lh == NULL)
1929 return (EINVAL);
1930
1931 /*
1932 * if the data pointed to by arg is located in the kernel then
1933 * make sure the FNATIVE flag is set.
1934 */
1935 if (mode & FKIOCTL)
1936 mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1937
1938 /*
1939 * Some drivers assume that rvalp will always be non-NULL, so in
1940 * an attempt to avoid panics if the caller passed in a NULL
1941 * value, update rvalp to point to a temporary variable.
1942 */
1943 if (rvalp == NULL)
1944 rvalp = &unused;
1945 vp = handlep->lh_vp;
1946 dev = vp->v_rdev;
1947 if (handlep->lh_type & LH_CBDEV) {
1948 ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1949 } else if (handlep->lh_type & LH_STREAM) {
1950 copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1951
1952 /*
1953 * if we get an I_PLINK from within the kernel the
1954 * arg is a layered handle pointer instead of
1955 * a file descriptor, so we translate this ioctl
1956 * into a private one that can handle this.
1957 */
1958 if ((mode & FKIOCTL) && (cmd == I_PLINK))
1959 cmd = _I_PLINK_LH;
1960
1961 ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1962 } else {
1963 return (ENOTSUP);
1964 }
1965
1966 return (ret);
1967 }
1968
1969 int
1970 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1971 struct pollhead **phpp)
1972 {
1973 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1974 vnode_t *vp;
1975 dev_t dev;
1976 int ret;
1977
1978 if (lh == NULL)
1979 return (EINVAL);
1980
1981 vp = handlep->lh_vp;
1982 dev = vp->v_rdev;
1983 if (handlep->lh_type & LH_CBDEV) {
1984 ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1985 } else if (handlep->lh_type & LH_STREAM) {
1986 ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
1987 } else {
1988 return (ENOTSUP);
1989 }
1990
1991 return (ret);
1992 }
1993
1994 int
1995 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
1996 int flags, char *name, caddr_t valuep, int *length)
1997 {
1998 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1999 dev_t dev;
2000 dev_info_t *dip;
2001 int ret;
2002 struct snode *csp;
2003
2004 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2005 return (DDI_PROP_INVAL_ARG);
2006
2007 if ((prop_op != PROP_LEN) && (valuep == NULL))
2008 return (DDI_PROP_INVAL_ARG);
2009
2010 if (length == NULL)
2011 return (DDI_PROP_INVAL_ARG);
2012
2013 /*
2014 * try to find the associated dip,
2015 * this places a hold on the driver
2016 */
2017 dev = handlep->lh_vp->v_rdev;
2018
2019 csp = VTOCS(handlep->lh_vp);
2020 mutex_enter(&csp->s_lock);
2021 if ((dip = csp->s_dip) != NULL)
2022 e_ddi_hold_devi(dip);
2023 mutex_exit(&csp->s_lock);
2024 if (dip == NULL)
2025 dip = e_ddi_hold_devi_by_dev(dev, 0);
2026
2027 if (dip == NULL)
2028 return (DDI_PROP_NOT_FOUND);
2029
2030 ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2031 ddi_release_devi(dip);
2032
2033 return (ret);
2034 }
2035
2036 int
2037 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2038 {
2039 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2040 dev_t dev;
2041
2042 if ((lh == NULL) || (bp == NULL))
2043 return (EINVAL);
2044
2045 /* this entry point is only supported for cb devices */
2046 dev = handlep->lh_vp->v_rdev;
2047 if (!(handlep->lh_type & LH_CBDEV))
2048 return (ENOTSUP);
2049
2050 bp->b_edev = dev;
2051 bp->b_dev = cmpdev(dev);
2052 return (bdev_strategy(bp));
2053 }
2054
2055 int
2056 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2057 {
2058 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2059 dev_t dev;
2060
2061 if (lh == NULL)
2062 return (EINVAL);
2063
2064 /* this entry point is only supported for cb devices */
2065 dev = handlep->lh_vp->v_rdev;
2066 if (!(handlep->lh_type & LH_CBDEV))
2067 return (ENOTSUP);
2068
2069 return (bdev_dump(dev, addr, blkno, nblk));
2070 }
2071
2072 int
2073 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2074 size_t len, size_t *maplen, uint_t model)
2075 {
2076 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2077 dev_t dev;
2078
2079 if (lh == NULL)
2080 return (EINVAL);
2081
2082 /* this entry point is only supported for cb devices */
2083 dev = handlep->lh_vp->v_rdev;
2084 if (!(handlep->lh_type & LH_CBDEV))
2085 return (ENOTSUP);
2086
2087 return (cdev_devmap(dev, dhp, off, len, maplen, model));
2088 }
2089
2090 int
2091 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2092 {
2093 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2094 dev_t dev;
2095 struct cb_ops *cb;
2096
2097 if (lh == NULL)
2098 return (EINVAL);
2099
2100 /* this entry point is only supported for cb devices */
2101 if (!(handlep->lh_type & LH_CBDEV))
2102 return (ENOTSUP);
2103
2104 /*
2105 * Kaio is only supported on block devices.
2106 */
2107 dev = handlep->lh_vp->v_rdev;
2108 cb = devopsp[getmajor(dev)]->devo_cb_ops;
2109 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2110 return (ENOTSUP);
2111
2112 if (cb->cb_aread == NULL)
2113 return (ENOTSUP);
2114
2115 return (cb->cb_aread(dev, aio_reqp, cr));
2116 }
2117
2118 int
2119 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2120 {
2121 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2122 struct cb_ops *cb;
2123 dev_t dev;
2124
2125 if (lh == NULL)
2126 return (EINVAL);
2127
2128 /* this entry point is only supported for cb devices */
2129 if (!(handlep->lh_type & LH_CBDEV))
2130 return (ENOTSUP);
2131
2132 /*
2133 * Kaio is only supported on block devices.
2134 */
2135 dev = handlep->lh_vp->v_rdev;
2136 cb = devopsp[getmajor(dev)]->devo_cb_ops;
2137 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2138 return (ENOTSUP);
2139
2140 if (cb->cb_awrite == NULL)
2141 return (ENOTSUP);
2142
2143 return (cb->cb_awrite(dev, aio_reqp, cr));
2144 }
2145
2146 int
2147 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2148 {
2149 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2150 int ret;
2151
2152 if ((lh == NULL) || (smp == NULL))
2153 return (EINVAL);
2154
2155 if (!(handlep->lh_type & LH_STREAM)) {
2156 freemsg(smp);
2157 return (ENOTSUP);
2158 }
2159
2160 /*
2161 * If we don't have db_credp, set it. Note that we can not be called
2162 * from interrupt context.
2163 */
2164 if (msg_getcred(smp, NULL) == NULL)
2165 mblk_setcred(smp, CRED(), curproc->p_pid);
2166
2167 /* Send message while honoring flow control */
2168 ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2169 MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2170
2171 return (ret);
2172 }
2173
2174 int
2175 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2176 {
2177 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2178 clock_t timout; /* milliseconds */
2179 uchar_t pri;
2180 rval_t rval;
2181 int ret, pflag;
2182
2183
2184 if (lh == NULL)
2185 return (EINVAL);
2186
2187 if (!(handlep->lh_type & LH_STREAM))
2188 return (ENOTSUP);
2189
2190 /* Convert from nanoseconds to milliseconds */
2191 if (timeo != NULL) {
2192 timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2193 if (timout > INT_MAX)
2194 return (EINVAL);
2195 } else
2196 timout = -1;
2197
2198 /* Wait for timeout millseconds for a message */
2199 pflag = MSG_ANY;
2200 pri = 0;
2201 *rmp = NULL;
2202 ret = kstrgetmsg(handlep->lh_vp,
2203 rmp, NULL, &pri, &pflag, timout, &rval);
2204 return (ret);
2205 }
2206
2207 int
2208 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2209 {
2210 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2211
2212 if ((lh == NULL) || (devp == NULL))
2213 return (EINVAL);
2214
2215 *devp = handlep->lh_vp->v_rdev;
2216 return (0);
2217 }
2218
2219 int
2220 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2221 {
2222 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2223
2224 if ((lh == NULL) || (otyp == NULL))
2225 return (EINVAL);
2226
2227 *otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2228 return (0);
2229 }
2230
2231 int
2232 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2233 {
2234 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2235 int ret;
2236 dev_t dev;
2237
2238 if ((lh == NULL) || (devid == NULL))
2239 return (EINVAL);
2240
2241 dev = handlep->lh_vp->v_rdev;
2242
2243 ret = ddi_lyr_get_devid(dev, devid);
2244 if (ret != DDI_SUCCESS)
2245 return (ENOTSUP);
2246
2247 return (0);
2248 }
2249
2250 int
2251 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2252 {
2253 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2254 int ret, otyp;
2255 dev_t dev;
2256
2257 if ((lh == NULL) || (minor_name == NULL))
2258 return (EINVAL);
2259
2260 dev = handlep->lh_vp->v_rdev;
2261 otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2262
2263 ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2264 if (ret != DDI_SUCCESS)
2265 return (ENOTSUP);
2266
2267 return (0);
2268 }
2269
2270 int
2271 ldi_prop_lookup_int_array(ldi_handle_t lh,
2272 uint_t flags, char *name, int **data, uint_t *nelements)
2273 {
2274 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2275 dev_info_t *dip;
2276 dev_t dev;
2277 int res;
2278 struct snode *csp;
2279
2280 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2281 return (DDI_PROP_INVAL_ARG);
2282
2283 dev = handlep->lh_vp->v_rdev;
2284
2285 csp = VTOCS(handlep->lh_vp);
2286 mutex_enter(&csp->s_lock);
2287 if ((dip = csp->s_dip) != NULL)
2288 e_ddi_hold_devi(dip);
2289 mutex_exit(&csp->s_lock);
2290 if (dip == NULL)
2291 dip = e_ddi_hold_devi_by_dev(dev, 0);
2292
2293 if (dip == NULL) {
2294 flags |= DDI_UNBND_DLPI2;
2295 } else if (flags & LDI_DEV_T_ANY) {
2296 flags &= ~LDI_DEV_T_ANY;
2297 dev = DDI_DEV_T_ANY;
2298 }
2299
2300 if (dip != NULL) {
2301 int *prop_val, prop_len;
2302
2303 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2304 (caddr_t *)&prop_val, &prop_len, sizeof (int));
2305
2306 /* if we got it then return it */
2307 if (res == DDI_PROP_SUCCESS) {
2308 *nelements = prop_len / sizeof (int);
2309 *data = prop_val;
2310
2311 ddi_release_devi(dip);
2312 return (res);
2313 }
2314 }
2315
2316 /* call the normal property interfaces */
2317 res = ddi_prop_lookup_int_array(dev, dip, flags,
2318 name, data, nelements);
2319
2320 if (dip != NULL)
2321 ddi_release_devi(dip);
2322
2323 return (res);
2324 }
2325
2326 int
2327 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2328 uint_t flags, char *name, int64_t **data, uint_t *nelements)
2329 {
2330 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2331 dev_info_t *dip;
2332 dev_t dev;
2333 int res;
2334 struct snode *csp;
2335
2336 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2337 return (DDI_PROP_INVAL_ARG);
2338
2339 dev = handlep->lh_vp->v_rdev;
2340
2341 csp = VTOCS(handlep->lh_vp);
2342 mutex_enter(&csp->s_lock);
2343 if ((dip = csp->s_dip) != NULL)
2344 e_ddi_hold_devi(dip);
2345 mutex_exit(&csp->s_lock);
2346 if (dip == NULL)
2347 dip = e_ddi_hold_devi_by_dev(dev, 0);
2348
2349 if (dip == NULL) {
2350 flags |= DDI_UNBND_DLPI2;
2351 } else if (flags & LDI_DEV_T_ANY) {
2352 flags &= ~LDI_DEV_T_ANY;
2353 dev = DDI_DEV_T_ANY;
2354 }
2355
2356 if (dip != NULL) {
2357 int64_t *prop_val;
2358 int prop_len;
2359
2360 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2361 (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2362
2363 /* if we got it then return it */
2364 if (res == DDI_PROP_SUCCESS) {
2365 *nelements = prop_len / sizeof (int64_t);
2366 *data = prop_val;
2367
2368 ddi_release_devi(dip);
2369 return (res);
2370 }
2371 }
2372
2373 /* call the normal property interfaces */
2374 res = ddi_prop_lookup_int64_array(dev, dip, flags,
2375 name, data, nelements);
2376
2377 if (dip != NULL)
2378 ddi_release_devi(dip);
2379
2380 return (res);
2381 }
2382
2383 int
2384 ldi_prop_lookup_string_array(ldi_handle_t lh,
2385 uint_t flags, char *name, char ***data, uint_t *nelements)
2386 {
2387 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2388 dev_info_t *dip;
2389 dev_t dev;
2390 int res;
2391 struct snode *csp;
2392
2393 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2394 return (DDI_PROP_INVAL_ARG);
2395
2396 dev = handlep->lh_vp->v_rdev;
2397
2398 csp = VTOCS(handlep->lh_vp);
2399 mutex_enter(&csp->s_lock);
2400 if ((dip = csp->s_dip) != NULL)
2401 e_ddi_hold_devi(dip);
2402 mutex_exit(&csp->s_lock);
2403 if (dip == NULL)
2404 dip = e_ddi_hold_devi_by_dev(dev, 0);
2405
2406 if (dip == NULL) {
2407 flags |= DDI_UNBND_DLPI2;
2408 } else if (flags & LDI_DEV_T_ANY) {
2409 flags &= ~LDI_DEV_T_ANY;
2410 dev = DDI_DEV_T_ANY;
2411 }
2412
2413 if (dip != NULL) {
2414 char *prop_val;
2415 int prop_len;
2416
2417 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2418 (caddr_t *)&prop_val, &prop_len, 0);
2419
2420 /* if we got it then return it */
2421 if (res == DDI_PROP_SUCCESS) {
2422 char **str_array;
2423 int nelem;
2424
2425 /*
2426 * pack the returned string array into the format
2427 * our callers expect
2428 */
2429 if (i_pack_string_array(prop_val, prop_len,
2430 &str_array, &nelem) == 0) {
2431
2432 *data = str_array;
2433 *nelements = nelem;
2434
2435 ddi_prop_free(prop_val);
2436 ddi_release_devi(dip);
2437 return (res);
2438 }
2439
2440 /*
2441 * the format of the returned property must have
2442 * been bad so throw it out
2443 */
2444 ddi_prop_free(prop_val);
2445 }
2446 }
2447
2448 /* call the normal property interfaces */
2449 res = ddi_prop_lookup_string_array(dev, dip, flags,
2450 name, data, nelements);
2451
2452 if (dip != NULL)
2453 ddi_release_devi(dip);
2454
2455 return (res);
2456 }
2457
2458 int
2459 ldi_prop_lookup_string(ldi_handle_t lh,
2460 uint_t flags, char *name, char **data)
2461 {
2462 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2463 dev_info_t *dip;
2464 dev_t dev;
2465 int res;
2466 struct snode *csp;
2467
2468 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2469 return (DDI_PROP_INVAL_ARG);
2470
2471 dev = handlep->lh_vp->v_rdev;
2472
2473 csp = VTOCS(handlep->lh_vp);
2474 mutex_enter(&csp->s_lock);
2475 if ((dip = csp->s_dip) != NULL)
2476 e_ddi_hold_devi(dip);
2477 mutex_exit(&csp->s_lock);
2478 if (dip == NULL)
2479 dip = e_ddi_hold_devi_by_dev(dev, 0);
2480
2481 if (dip == NULL) {
2482 flags |= DDI_UNBND_DLPI2;
2483 } else if (flags & LDI_DEV_T_ANY) {
2484 flags &= ~LDI_DEV_T_ANY;
2485 dev = DDI_DEV_T_ANY;
2486 }
2487
2488 if (dip != NULL) {
2489 char *prop_val;
2490 int prop_len;
2491
2492 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2493 (caddr_t *)&prop_val, &prop_len, 0);
2494
2495 /* if we got it then return it */
2496 if (res == DDI_PROP_SUCCESS) {
2497 /*
2498 * sanity check the vaule returned.
2499 */
2500 if (i_check_string(prop_val, prop_len)) {
2501 ddi_prop_free(prop_val);
2502 } else {
2503 *data = prop_val;
2504 ddi_release_devi(dip);
2505 return (res);
2506 }
2507 }
2508 }
2509
2510 /* call the normal property interfaces */
2511 res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2512
2513 if (dip != NULL)
2514 ddi_release_devi(dip);
2515
2516 #ifdef DEBUG
2517 if (res == DDI_PROP_SUCCESS) {
2518 /*
2519 * keep ourselves honest
2520 * make sure the framework returns strings in the
2521 * same format as we're demanding from drivers.
2522 */
2523 struct prop_driver_data *pdd;
2524 int pdd_prop_size;
2525
2526 pdd = ((struct prop_driver_data *)(*data)) - 1;
2527 pdd_prop_size = pdd->pdd_size -
2528 sizeof (struct prop_driver_data);
2529 ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2530 }
2531 #endif /* DEBUG */
2532
2533 return (res);
2534 }
2535
2536 int
2537 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2538 uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2539 {
2540 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2541 dev_info_t *dip;
2542 dev_t dev;
2543 int res;
2544 struct snode *csp;
2545
2546 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2547 return (DDI_PROP_INVAL_ARG);
2548
2549 dev = handlep->lh_vp->v_rdev;
2550
2551 csp = VTOCS(handlep->lh_vp);
2552 mutex_enter(&csp->s_lock);
2553 if ((dip = csp->s_dip) != NULL)
2554 e_ddi_hold_devi(dip);
2555 mutex_exit(&csp->s_lock);
2556 if (dip == NULL)
2557 dip = e_ddi_hold_devi_by_dev(dev, 0);
2558
2559 if (dip == NULL) {
2560 flags |= DDI_UNBND_DLPI2;
2561 } else if (flags & LDI_DEV_T_ANY) {
2562 flags &= ~LDI_DEV_T_ANY;
2563 dev = DDI_DEV_T_ANY;
2564 }
2565
2566 if (dip != NULL) {
2567 uchar_t *prop_val;
2568 int prop_len;
2569
2570 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2571 (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2572
2573 /* if we got it then return it */
2574 if (res == DDI_PROP_SUCCESS) {
2575 *nelements = prop_len / sizeof (uchar_t);
2576 *data = prop_val;
2577
2578 ddi_release_devi(dip);
2579 return (res);
2580 }
2581 }
2582
2583 /* call the normal property interfaces */
2584 res = ddi_prop_lookup_byte_array(dev, dip, flags,
2585 name, data, nelements);
2586
2587 if (dip != NULL)
2588 ddi_release_devi(dip);
2589
2590 return (res);
2591 }
2592
2593 int
2594 ldi_prop_get_int(ldi_handle_t lh,
2595 uint_t flags, char *name, int defvalue)
2596 {
2597 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2598 dev_info_t *dip;
2599 dev_t dev;
2600 int res;
2601 struct snode *csp;
2602
2603 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2604 return (defvalue);
2605
2606 dev = handlep->lh_vp->v_rdev;
2607
2608 csp = VTOCS(handlep->lh_vp);
2609 mutex_enter(&csp->s_lock);
2610 if ((dip = csp->s_dip) != NULL)
2611 e_ddi_hold_devi(dip);
2612 mutex_exit(&csp->s_lock);
2613 if (dip == NULL)
2614 dip = e_ddi_hold_devi_by_dev(dev, 0);
2615
2616 if (dip == NULL) {
2617 flags |= DDI_UNBND_DLPI2;
2618 } else if (flags & LDI_DEV_T_ANY) {
2619 flags &= ~LDI_DEV_T_ANY;
2620 dev = DDI_DEV_T_ANY;
2621 }
2622
2623 if (dip != NULL) {
2624 int prop_val;
2625 int prop_len;
2626
2627 /*
2628 * first call the drivers prop_op interface to allow it
2629 * it to override default property values.
2630 */
2631 prop_len = sizeof (int);
2632 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2633 flags | DDI_PROP_DYNAMIC, name,
2634 (caddr_t)&prop_val, &prop_len);
2635
2636 /* if we got it then return it */
2637 if ((res == DDI_PROP_SUCCESS) &&
2638 (prop_len == sizeof (int))) {
2639 res = prop_val;
2640 ddi_release_devi(dip);
2641 return (res);
2642 }
2643 }
2644
2645 /* call the normal property interfaces */
2646 res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2647
2648 if (dip != NULL)
2649 ddi_release_devi(dip);
2650
2651 return (res);
2652 }
2653
2654 int64_t
2655 ldi_prop_get_int64(ldi_handle_t lh,
2656 uint_t flags, char *name, int64_t defvalue)
2657 {
2658 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2659 dev_info_t *dip;
2660 dev_t dev;
2661 int64_t res;
2662 struct snode *csp;
2663
2664 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2665 return (defvalue);
2666
2667 dev = handlep->lh_vp->v_rdev;
2668
2669 csp = VTOCS(handlep->lh_vp);
2670 mutex_enter(&csp->s_lock);
2671 if ((dip = csp->s_dip) != NULL)
2672 e_ddi_hold_devi(dip);
2673 mutex_exit(&csp->s_lock);
2674 if (dip == NULL)
2675 dip = e_ddi_hold_devi_by_dev(dev, 0);
2676
2677 if (dip == NULL) {
2678 flags |= DDI_UNBND_DLPI2;
2679 } else if (flags & LDI_DEV_T_ANY) {
2680 flags &= ~LDI_DEV_T_ANY;
2681 dev = DDI_DEV_T_ANY;
2682 }
2683
2684 if (dip != NULL) {
2685 int64_t prop_val;
2686 int prop_len;
2687
2688 /*
2689 * first call the drivers prop_op interface to allow it
2690 * it to override default property values.
2691 */
2692 prop_len = sizeof (int64_t);
2693 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2694 flags | DDI_PROP_DYNAMIC, name,
2695 (caddr_t)&prop_val, &prop_len);
2696
2697 /* if we got it then return it */
2698 if ((res == DDI_PROP_SUCCESS) &&
2699 (prop_len == sizeof (int64_t))) {
2700 res = prop_val;
2701 ddi_release_devi(dip);
2702 return (res);
2703 }
2704 }
2705
2706 /* call the normal property interfaces */
2707 res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2708
2709 if (dip != NULL)
2710 ddi_release_devi(dip);
2711
2712 return (res);
2713 }
2714
2715 int
2716 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2717 {
2718 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2719 dev_info_t *dip;
2720 dev_t dev;
2721 int res, prop_len;
2722 struct snode *csp;
2723
2724 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2725 return (0);
2726
2727 dev = handlep->lh_vp->v_rdev;
2728
2729 csp = VTOCS(handlep->lh_vp);
2730 mutex_enter(&csp->s_lock);
2731 if ((dip = csp->s_dip) != NULL)
2732 e_ddi_hold_devi(dip);
2733 mutex_exit(&csp->s_lock);
2734 if (dip == NULL)
2735 dip = e_ddi_hold_devi_by_dev(dev, 0);
2736
2737 /* if NULL dip, prop does NOT exist */
2738 if (dip == NULL)
2739 return (0);
2740
2741 if (flags & LDI_DEV_T_ANY) {
2742 flags &= ~LDI_DEV_T_ANY;
2743 dev = DDI_DEV_T_ANY;
2744 }
2745
2746 /*
2747 * first call the drivers prop_op interface to allow it
2748 * it to override default property values.
2749 */
2750 res = i_ldi_prop_op(dev, dip, PROP_LEN,
2751 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2752
2753 if (res == DDI_PROP_SUCCESS) {
2754 ddi_release_devi(dip);
2755 return (1);
2756 }
2757
2758 /* call the normal property interfaces */
2759 res = ddi_prop_exists(dev, dip, flags, name);
2760
2761 ddi_release_devi(dip);
2762 return (res);
2763 }
2764
2765 #ifdef LDI_OBSOLETE_EVENT
2766
2767 int
2768 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2769 {
2770 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2771 dev_info_t *dip;
2772 dev_t dev;
2773 int res;
2774 struct snode *csp;
2775
2776 if ((lh == NULL) || (name == NULL) ||
2777 (strlen(name) == 0) || (ecp == NULL)) {
2778 return (DDI_FAILURE);
2779 }
2780
2781 ASSERT(!servicing_interrupt());
2782
2783 dev = handlep->lh_vp->v_rdev;
2784
2785 csp = VTOCS(handlep->lh_vp);
2786 mutex_enter(&csp->s_lock);
2787 if ((dip = csp->s_dip) != NULL)
2788 e_ddi_hold_devi(dip);
2789 mutex_exit(&csp->s_lock);
2790 if (dip == NULL)
2791 dip = e_ddi_hold_devi_by_dev(dev, 0);
2792
2793 if (dip == NULL)
2794 return (DDI_FAILURE);
2795
2796 LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2797 "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2798 name, (void *)dip, (void *)ecp));
2799
2800 res = ddi_get_eventcookie(dip, name, ecp);
2801
2802 ddi_release_devi(dip);
2803 return (res);
2804 }
2805
2806 int
2807 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2808 void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2809 void *arg, ldi_callback_id_t *id)
2810 {
2811 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2812 struct ldi_event *lep;
2813 dev_info_t *dip;
2814 dev_t dev;
2815 int res;
2816 struct snode *csp;
2817
2818 if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2819 return (DDI_FAILURE);
2820
2821 ASSERT(!servicing_interrupt());
2822
2823 dev = handlep->lh_vp->v_rdev;
2824
2825 csp = VTOCS(handlep->lh_vp);
2826 mutex_enter(&csp->s_lock);
2827 if ((dip = csp->s_dip) != NULL)
2828 e_ddi_hold_devi(dip);
2829 mutex_exit(&csp->s_lock);
2830 if (dip == NULL)
2831 dip = e_ddi_hold_devi_by_dev(dev, 0);
2832
2833 if (dip == NULL)
2834 return (DDI_FAILURE);
2835
2836 lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2837 lep->le_lhp = handlep;
2838 lep->le_arg = arg;
2839 lep->le_handler = handler;
2840
2841 if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2842 (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2843 LDI_EVENTCB((CE_WARN, "%s: unable to add"
2844 "event callback", "ldi_add_event_handler"));
2845 ddi_release_devi(dip);
2846 kmem_free(lep, sizeof (struct ldi_event));
2847 return (res);
2848 }
2849
2850 *id = (ldi_callback_id_t)lep;
2851
2852 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2853 "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2854 (void *)dip, (void *)ec, (void *)lep, (void *)id));
2855
2856 handle_event_add(lep);
2857 ddi_release_devi(dip);
2858 return (res);
2859 }
2860
2861 int
2862 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2863 {
2864 ldi_event_t *lep = (ldi_event_t *)id;
2865 int res;
2866
2867 if ((lh == NULL) || (id == NULL))
2868 return (DDI_FAILURE);
2869
2870 ASSERT(!servicing_interrupt());
2871
2872 if ((res = ddi_remove_event_handler(lep->le_id))
2873 != DDI_SUCCESS) {
2874 LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2875 "event callback", "ldi_remove_event_handler"));
2876 return (res);
2877 }
2878
2879 handle_event_remove(lep);
2880 kmem_free(lep, sizeof (struct ldi_event));
2881 return (res);
2882 }
2883
2884 #endif
2885
2886 /*
2887 * Here are some definitions of terms used in the following LDI events
2888 * code:
2889 *
2890 * "LDI events" AKA "native events": These are events defined by the
2891 * "new" LDI event framework. These events are serviced by the LDI event
2892 * framework itself and thus are native to it.
2893 *
2894 * "LDI contract events": These are contract events that correspond to the
2895 * LDI events. This mapping of LDI events to contract events is defined by
2896 * the ldi_ev_cookies[] array above.
2897 *
2898 * NDI events: These are events which are serviced by the NDI event subsystem.
2899 * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2900 * These events are therefore *not* native events.
2901 */
2902
2903 static int
2904 ldi_native_event(const char *evname)
2905 {
2906 int i;
2907
2908 LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2909
2910 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2911 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2912 return (1);
2913 }
2914
2915 return (0);
2916 }
2917
2918 static uint_t
2919 ldi_ev_sync_event(const char *evname)
2920 {
2921 int i;
2922
2923 ASSERT(ldi_native_event(evname));
2924
2925 LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2926
2927 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2928 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2929 return (ldi_ev_cookies[i].ck_sync);
2930 }
2931
2932 /*
2933 * This should never happen until non-contract based
2934 * LDI events are introduced. If that happens, we will
2935 * use a "special" token to indicate that there are no
2936 * contracts corresponding to this LDI event.
2937 */
2938 cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2939
2940 return (0);
2941 }
2942
2943 static uint_t
2944 ldi_contract_event(const char *evname)
2945 {
2946 int i;
2947
2948 ASSERT(ldi_native_event(evname));
2949
2950 LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2951
2952 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2953 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2954 return (ldi_ev_cookies[i].ck_ctype);
2955 }
2956
2957 /*
2958 * This should never happen until non-contract based
2959 * LDI events are introduced. If that happens, we will
2960 * use a "special" token to indicate that there are no
2961 * contracts corresponding to this LDI event.
2962 */
2963 cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2964
2965 return (0);
2966 }
2967
2968 char *
2969 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2970 {
2971 int i;
2972 struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2973
2974 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2975 if (&ldi_ev_cookies[i] == cookie_impl) {
2976 LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2977 ldi_ev_cookies[i].ck_evname));
2978 return (ldi_ev_cookies[i].ck_evname);
2979 }
2980 }
2981
2982 /*
2983 * Not an LDI native event. Must be NDI event service.
2984 * Just return a generic string
2985 */
2986 LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2987 return (NDI_EVENT_SERVICE);
2988 }
2989
2990 static int
2991 ldi_native_cookie(ldi_ev_cookie_t cookie)
2992 {
2993 int i;
2994 struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2995
2996 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2997 if (&ldi_ev_cookies[i] == cookie_impl) {
2998 LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
2999 return (1);
3000 }
3001 }
3002
3003 LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
3004 return (0);
3005 }
3006
3007 static ldi_ev_cookie_t
3008 ldi_get_native_cookie(const char *evname)
3009 {
3010 int i;
3011
3012 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3013 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3014 LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3015 return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3016 }
3017 }
3018
3019 LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3020 return (NULL);
3021 }
3022
3023 /*
3024 * ldi_ev_lock() needs to be recursive, since layered drivers may call
3025 * other LDI interfaces (such as ldi_close() from within the context of
3026 * a notify callback. Since the notify callback is called with the
3027 * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3028 * to be recursive.
3029 */
3030 static void
3031 ldi_ev_lock(void)
3032 {
3033 LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3034
3035 mutex_enter(&ldi_ev_callback_list.le_lock);
3036 if (ldi_ev_callback_list.le_thread == curthread) {
3037 ASSERT(ldi_ev_callback_list.le_busy >= 1);
3038 ldi_ev_callback_list.le_busy++;
3039 } else {
3040 while (ldi_ev_callback_list.le_busy)
3041 cv_wait(&ldi_ev_callback_list.le_cv,
3042 &ldi_ev_callback_list.le_lock);
3043 ASSERT(ldi_ev_callback_list.le_thread == NULL);
3044 ldi_ev_callback_list.le_busy = 1;
3045 ldi_ev_callback_list.le_thread = curthread;
3046 }
3047 mutex_exit(&ldi_ev_callback_list.le_lock);
3048
3049 LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3050 }
3051
3052 static void
3053 ldi_ev_unlock(void)
3054 {
3055 LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3056 mutex_enter(&ldi_ev_callback_list.le_lock);
3057 ASSERT(ldi_ev_callback_list.le_thread == curthread);
3058 ASSERT(ldi_ev_callback_list.le_busy >= 1);
3059
3060 ldi_ev_callback_list.le_busy--;
3061 if (ldi_ev_callback_list.le_busy == 0) {
3062 ldi_ev_callback_list.le_thread = NULL;
3063 cv_signal(&ldi_ev_callback_list.le_cv);
3064 }
3065 mutex_exit(&ldi_ev_callback_list.le_lock);
3066 LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3067 }
3068
3069 int
3070 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3071 {
3072 struct ldi_handle *handlep = (struct ldi_handle *)lh;
3073 dev_info_t *dip;
3074 dev_t dev;
3075 int res;
3076 struct snode *csp;
3077 ddi_eventcookie_t ddi_cookie;
3078 ldi_ev_cookie_t tcookie;
3079
3080 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3081 evname ? evname : "<NULL>"));
3082
3083 if (lh == NULL || evname == NULL ||
3084 strlen(evname) == 0 || cookiep == NULL) {
3085 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3086 return (LDI_EV_FAILURE);
3087 }
3088
3089 *cookiep = NULL;
3090
3091 /*
3092 * First check if it is a LDI native event
3093 */
3094 tcookie = ldi_get_native_cookie(evname);
3095 if (tcookie) {
3096 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3097 *cookiep = tcookie;
3098 return (LDI_EV_SUCCESS);
3099 }
3100
3101 /*
3102 * Not a LDI native event. Try NDI event services
3103 */
3104
3105 dev = handlep->lh_vp->v_rdev;
3106
3107 csp = VTOCS(handlep->lh_vp);
3108 mutex_enter(&csp->s_lock);
3109 if ((dip = csp->s_dip) != NULL)
3110 e_ddi_hold_devi(dip);
3111 mutex_exit(&csp->s_lock);
3112 if (dip == NULL)
3113 dip = e_ddi_hold_devi_by_dev(dev, 0);
3114
3115 if (dip == NULL) {
3116 cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3117 "handle: %p", (void *)handlep);
3118 return (LDI_EV_FAILURE);
3119 }
3120
3121 LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3122 (void *)dip, evname));
3123
3124 res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3125
3126 ddi_release_devi(dip);
3127
3128 if (res == DDI_SUCCESS) {
3129 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3130 *cookiep = (ldi_ev_cookie_t)ddi_cookie;
3131 return (LDI_EV_SUCCESS);
3132 } else {
3133 LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3134 return (LDI_EV_FAILURE);
3135 }
3136 }
3137
3138 /*ARGSUSED*/
3139 static void
3140 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3141 void *arg, void *ev_data)
3142 {
3143 ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3144
3145 ASSERT(lecp != NULL);
3146 ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3147 ASSERT(lecp->lec_lhp);
3148 ASSERT(lecp->lec_notify == NULL);
3149 ASSERT(lecp->lec_finalize);
3150
3151 LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3152 "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3153 (void *)lecp->lec_arg, (void *)ev_data));
3154
3155 lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3156 lecp->lec_arg, ev_data);
3157 }
3158
3159 int
3160 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3161 ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3162 {
3163 struct ldi_handle *lhp = (struct ldi_handle *)lh;
3164 ldi_ev_callback_impl_t *lecp;
3165 dev_t dev;
3166 struct snode *csp;
3167 dev_info_t *dip;
3168 int ddi_event;
3169
3170 ASSERT(!servicing_interrupt());
3171
3172 if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3173 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3174 return (LDI_EV_FAILURE);
3175 }
3176
3177 if (callb->cb_vers != LDI_EV_CB_VERS) {
3178 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3179 return (LDI_EV_FAILURE);
3180 }
3181
3182 if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3183 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3184 return (LDI_EV_FAILURE);
3185 }
3186
3187 *id = 0;
3188
3189 dev = lhp->lh_vp->v_rdev;
3190 csp = VTOCS(lhp->lh_vp);
3191 mutex_enter(&csp->s_lock);
3192 if ((dip = csp->s_dip) != NULL)
3193 e_ddi_hold_devi(dip);
3194 mutex_exit(&csp->s_lock);
3195 if (dip == NULL)
3196 dip = e_ddi_hold_devi_by_dev(dev, 0);
3197
3198 if (dip == NULL) {
3199 cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3200 "LDI handle: %p", (void *)lhp);
3201 return (LDI_EV_FAILURE);
3202 }
3203
3204 lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3205
3206 ddi_event = 0;
3207 if (!ldi_native_cookie(cookie)) {
3208 if (callb->cb_notify || callb->cb_finalize == NULL) {
3209 /*
3210 * NDI event services only accept finalize
3211 */
3212 cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3213 "Only finalize"
3214 " callback supported with this cookie",
3215 "ldi_ev_register_callbacks",
3216 lhp->lh_ident->li_modname);
3217 kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3218 ddi_release_devi(dip);
3219 return (LDI_EV_FAILURE);
3220 }
3221
3222 if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3223 i_ldi_ev_callback, (void *)lecp,
3224 (ddi_callback_id_t *)&lecp->lec_id)
3225 != DDI_SUCCESS) {
3226 kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3227 ddi_release_devi(dip);
3228 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3229 "ddi_add_event_handler failed"));
3230 return (LDI_EV_FAILURE);
3231 }
3232 ddi_event = 1;
3233 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3234 "ddi_add_event_handler success"));
3235 }
3236
3237
3238
3239 ldi_ev_lock();
3240
3241 /*
3242 * Add the notify/finalize callback to the LDI's list of callbacks.
3243 */
3244 lecp->lec_lhp = lhp;
3245 lecp->lec_dev = lhp->lh_vp->v_rdev;
3246 lecp->lec_spec = VTYP_TO_STYP(lhp->lh_vp->v_type);
3247 lecp->lec_notify = callb->cb_notify;
3248 lecp->lec_finalize = callb->cb_finalize;
3249 lecp->lec_arg = arg;
3250 lecp->lec_cookie = cookie;
3251 if (!ddi_event)
3252 lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3253 else
3254 ASSERT(lecp->lec_id);
3255 lecp->lec_dip = dip;
3256 list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3257
3258 *id = (ldi_callback_id_t)lecp->lec_id;
3259
3260 ldi_ev_unlock();
3261
3262 ddi_release_devi(dip);
3263
3264 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3265 "notify/finalize"));
3266
3267 return (LDI_EV_SUCCESS);
3268 }
3269
3270 static int
3271 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3272 dev_t dev, int spec_type)
3273 {
3274 ASSERT(lecp);
3275 ASSERT(dip);
3276 ASSERT(dev != DDI_DEV_T_NONE);
3277 ASSERT(dev != NODEV);
3278 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3279 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3280 ASSERT(lecp->lec_dip);
3281 ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3282 ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3283 ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3284 ASSERT(lecp->lec_dev != NODEV);
3285
3286 if (dip != lecp->lec_dip)
3287 return (0);
3288
3289 if (dev != DDI_DEV_T_ANY) {
3290 if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3291 return (0);
3292 }
3293
3294 LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3295
3296 return (1);
3297 }
3298
3299 /*
3300 * LDI framework function to post a "notify" event to all layered drivers
3301 * that have registered for that event
3302 *
3303 * Returns:
3304 * LDI_EV_SUCCESS - registered callbacks allow event
3305 * LDI_EV_FAILURE - registered callbacks block event
3306 * LDI_EV_NONE - No matching LDI callbacks
3307 *
3308 * This function is *not* to be called by layered drivers. It is for I/O
3309 * framework code in Solaris, such as the I/O retire code and DR code
3310 * to call while servicing a device event such as offline or degraded.
3311 */
3312 int
3313 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3314 void *ev_data)
3315 {
3316 ldi_ev_callback_impl_t *lecp;
3317 list_t *listp;
3318 int ret;
3319 char *lec_event;
3320
3321 ASSERT(dip);
3322 ASSERT(dev != DDI_DEV_T_NONE);
3323 ASSERT(dev != NODEV);
3324 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3325 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3326 ASSERT(event);
3327 ASSERT(ldi_native_event(event));
3328 ASSERT(ldi_ev_sync_event(event));
3329
3330 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3331 (void *)dip, event));
3332
3333 ret = LDI_EV_NONE;
3334 ldi_ev_lock();
3335 VERIFY(walker_next == NULL);
3336 listp = &ldi_ev_callback_list.le_head;
3337 for (lecp = list_head(listp); lecp; lecp = walker_next) {
3338 walker_next = list_next(listp, lecp);
3339
3340 /* Check if matching device */
3341 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3342 continue;
3343
3344 if (lecp->lec_lhp == NULL) {
3345 /*
3346 * Consumer has unregistered the handle and so
3347 * is no longer interested in notify events.
3348 */
3349 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3350 "handle, skipping"));
3351 continue;
3352 }
3353
3354 if (lecp->lec_notify == NULL) {
3355 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3356 "callback. skipping"));
3357 continue; /* not interested in notify */
3358 }
3359
3360 /*
3361 * Check if matching event
3362 */
3363 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3364 if (strcmp(event, lec_event) != 0) {
3365 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3366 " event {%s,%s}. skipping", event, lec_event));
3367 continue;
3368 }
3369
3370 lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3371 if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3372 lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3373 ret = LDI_EV_FAILURE;
3374 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3375 " FAILURE"));
3376 break;
3377 }
3378
3379 /* We have a matching callback that allows the event to occur */
3380 ret = LDI_EV_SUCCESS;
3381
3382 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3383 }
3384
3385 if (ret != LDI_EV_FAILURE)
3386 goto out;
3387
3388 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3389
3390 /*
3391 * Undo notifies already sent
3392 */
3393 lecp = list_prev(listp, lecp);
3394 VERIFY(walker_prev == NULL);
3395 for (; lecp; lecp = walker_prev) {
3396 walker_prev = list_prev(listp, lecp);
3397
3398 /*
3399 * Check if matching device
3400 */
3401 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3402 continue;
3403
3404
3405 if (lecp->lec_finalize == NULL) {
3406 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3407 "skipping"));
3408 continue; /* not interested in finalize */
3409 }
3410
3411 /*
3412 * it is possible that in response to a notify event a
3413 * layered driver closed its LDI handle so it is ok
3414 * to have a NULL LDI handle for finalize. The layered
3415 * driver is expected to maintain state in its "arg"
3416 * parameter to keep track of the closed device.
3417 */
3418
3419 /* Check if matching event */
3420 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3421 if (strcmp(event, lec_event) != 0) {
3422 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3423 "event: %s,%s, skipping", event, lec_event));
3424 continue;
3425 }
3426
3427 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3428
3429 lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3430 LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3431
3432 /*
3433 * If LDI native event and LDI handle closed in context
3434 * of notify, NULL out the finalize callback as we have
3435 * already called the 1 finalize above allowed in this situation
3436 */
3437 if (lecp->lec_lhp == NULL &&
3438 ldi_native_cookie(lecp->lec_cookie)) {
3439 LDI_EVDBG((CE_NOTE,
3440 "ldi_invoke_notify(): NULL-ing finalize after "
3441 "calling 1 finalize following ldi_close"));
3442 lecp->lec_finalize = NULL;
3443 }
3444 }
3445
3446 out:
3447 walker_next = NULL;
3448 walker_prev = NULL;
3449 ldi_ev_unlock();
3450
3451 if (ret == LDI_EV_NONE) {
3452 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3453 "LDI callbacks"));
3454 }
3455
3456 return (ret);
3457 }
3458
3459 /*
3460 * Framework function to be called from a layered driver to propagate
3461 * LDI "notify" events to exported minors.
3462 *
3463 * This function is a public interface exported by the LDI framework
3464 * for use by layered drivers to propagate device events up the software
3465 * stack.
3466 */
3467 int
3468 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3469 ldi_ev_cookie_t cookie, void *ev_data)
3470 {
3471 char *evname = ldi_ev_get_type(cookie);
3472 uint_t ct_evtype;
3473 dev_t dev;
3474 major_t major;
3475 int retc;
3476 int retl;
3477
3478 ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3479 ASSERT(dip);
3480 ASSERT(ldi_native_cookie(cookie));
3481
3482 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3483 evname, (void *)dip));
3484
3485 if (!ldi_ev_sync_event(evname)) {
3486 cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3487 "negotiatable event", evname);
3488 return (LDI_EV_SUCCESS);
3489 }
3490
3491 major = ddi_driver_major(dip);
3492 if (major == DDI_MAJOR_T_NONE) {
3493 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3494 (void) ddi_pathname(dip, path);
3495 cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3496 "for device %s", path);
3497 kmem_free(path, MAXPATHLEN);
3498 return (LDI_EV_FAILURE);
3499 }
3500 dev = makedevice(major, minor);
3501
3502 /*
3503 * Generate negotiation contract events on contracts (if any) associated
3504 * with this minor.
3505 */
3506 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3507 ct_evtype = ldi_contract_event(evname);
3508 retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3509 if (retc == CT_NACK) {
3510 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3511 return (LDI_EV_FAILURE);
3512 }
3513
3514 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3515 retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3516 if (retl == LDI_EV_FAILURE) {
3517 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3518 "returned FAILURE. Calling contract negend"));
3519 contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3520 return (LDI_EV_FAILURE);
3521 }
3522
3523 /*
3524 * The very fact that we are here indicates that there is a
3525 * LDI callback (and hence a constraint) for the retire of the
3526 * HW device. So we just return success even if there are no
3527 * contracts or LDI callbacks against the minors layered on top
3528 * of the HW minors
3529 */
3530 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3531 return (LDI_EV_SUCCESS);
3532 }
3533
3534 /*
3535 * LDI framework function to invoke "finalize" callbacks for all layered
3536 * drivers that have registered callbacks for that event.
3537 *
3538 * This function is *not* to be called by layered drivers. It is for I/O
3539 * framework code in Solaris, such as the I/O retire code and DR code
3540 * to call while servicing a device event such as offline or degraded.
3541 */
3542 void
3543 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3544 int ldi_result, void *ev_data)
3545 {
3546 ldi_ev_callback_impl_t *lecp;
3547 list_t *listp;
3548 char *lec_event;
3549 int found = 0;
3550
3551 ASSERT(dip);
3552 ASSERT(dev != DDI_DEV_T_NONE);
3553 ASSERT(dev != NODEV);
3554 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3555 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3556 ASSERT(event);
3557 ASSERT(ldi_native_event(event));
3558 ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3559
3560 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3561 " event=%s", (void *)dip, ldi_result, event));
3562
3563 ldi_ev_lock();
3564 VERIFY(walker_next == NULL);
3565 listp = &ldi_ev_callback_list.le_head;
3566 for (lecp = list_head(listp); lecp; lecp = walker_next) {
3567 walker_next = list_next(listp, lecp);
3568
3569 if (lecp->lec_finalize == NULL) {
3570 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3571 "finalize. Skipping"));
3572 continue; /* Not interested in finalize */
3573 }
3574
3575 /*
3576 * Check if matching device
3577 */
3578 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3579 continue;
3580
3581 /*
3582 * It is valid for the LDI handle to be NULL during finalize.
3583 * The layered driver may have done an LDI close in the notify
3584 * callback.
3585 */
3586
3587 /*
3588 * Check if matching event
3589 */
3590 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3591 if (strcmp(event, lec_event) != 0) {
3592 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3593 "matching event {%s,%s}. Skipping",
3594 event, lec_event));
3595 continue;
3596 }
3597
3598 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3599
3600 found = 1;
3601
3602 lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3603 ldi_result, lecp->lec_arg, ev_data);
3604
3605 /*
3606 * If LDI native event and LDI handle closed in context
3607 * of notify, NULL out the finalize callback as we have
3608 * already called the 1 finalize above allowed in this situation
3609 */
3610 if (lecp->lec_lhp == NULL &&
3611 ldi_native_cookie(lecp->lec_cookie)) {
3612 LDI_EVDBG((CE_NOTE,
3613 "ldi_invoke_finalize(): NULLing finalize after "
3614 "calling 1 finalize following ldi_close"));
3615 lecp->lec_finalize = NULL;
3616 }
3617 }
3618 walker_next = NULL;
3619 ldi_ev_unlock();
3620
3621 if (found)
3622 return;
3623
3624 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3625 }
3626
3627 /*
3628 * Framework function to be called from a layered driver to propagate
3629 * LDI "finalize" events to exported minors.
3630 *
3631 * This function is a public interface exported by the LDI framework
3632 * for use by layered drivers to propagate device events up the software
3633 * stack.
3634 */
3635 void
3636 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3637 ldi_ev_cookie_t cookie, void *ev_data)
3638 {
3639 dev_t dev;
3640 major_t major;
3641 char *evname;
3642 int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3643 CT_EV_SUCCESS : CT_EV_FAILURE;
3644 uint_t ct_evtype;
3645
3646 ASSERT(dip);
3647 ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3648 ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3649 ASSERT(ldi_native_cookie(cookie));
3650
3651 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3652
3653 major = ddi_driver_major(dip);
3654 if (major == DDI_MAJOR_T_NONE) {
3655 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3656 (void) ddi_pathname(dip, path);
3657 cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3658 "for device %s", path);
3659 kmem_free(path, MAXPATHLEN);
3660 return;
3661 }
3662 dev = makedevice(major, minor);
3663
3664 evname = ldi_ev_get_type(cookie);
3665
3666 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3667 ct_evtype = ldi_contract_event(evname);
3668 contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3669
3670 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3671 ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3672 }
3673
3674 int
3675 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3676 {
3677 ldi_ev_callback_impl_t *lecp;
3678 ldi_ev_callback_impl_t *next;
3679 ldi_ev_callback_impl_t *found;
3680 list_t *listp;
3681
3682 ASSERT(!servicing_interrupt());
3683
3684 if (id == 0) {
3685 cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3686 return (LDI_EV_FAILURE);
3687 }
3688
3689 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3690 (void *)id));
3691
3692 ldi_ev_lock();
3693
3694 listp = &ldi_ev_callback_list.le_head;
3695 next = found = NULL;
3696 for (lecp = list_head(listp); lecp; lecp = next) {
3697 next = list_next(listp, lecp);
3698 if (lecp->lec_id == id) {
3699 ASSERT(found == NULL);
3700
3701 /* If there is a walk in progress, move it along... */
3702 if (walker_next == lecp)
3703 walker_next = next;
3704 if (walker_prev == lecp)
3705 walker_prev = list_prev(listp, walker_prev);
3706
3707 list_remove(listp, lecp);
3708 found = lecp;
3709 }
3710 }
3711 ldi_ev_unlock();
3712
3713 if (found == NULL) {
3714 cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3715 (void *)id);
3716 return (LDI_EV_SUCCESS);
3717 }
3718
3719 if (!ldi_native_cookie(found->lec_cookie)) {
3720 ASSERT(found->lec_notify == NULL);
3721 if (ddi_remove_event_handler((ddi_callback_id_t)id)
3722 != DDI_SUCCESS) {
3723 cmn_err(CE_WARN, "failed to remove NDI event handler "
3724 "for id (%p)", (void *)id);
3725 ldi_ev_lock();
3726 list_insert_tail(listp, found);
3727 ldi_ev_unlock();
3728 return (LDI_EV_FAILURE);
3729 }
3730 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3731 "service removal succeeded"));
3732 } else {
3733 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3734 "LDI native callbacks"));
3735 }
3736 kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3737
3738 return (LDI_EV_SUCCESS);
3739 }