Print this page
LOCAL: LDI -- make walks removal-safe
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/driver_lyr.c
+++ new/usr/src/uts/common/os/driver_lyr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * Layered driver support.
27 27 */
28 28
29 29 #include <sys/atomic.h>
30 30 #include <sys/types.h>
31 31 #include <sys/t_lock.h>
32 32 #include <sys/param.h>
33 33 #include <sys/conf.h>
34 34 #include <sys/systm.h>
35 35 #include <sys/sysmacros.h>
36 36 #include <sys/buf.h>
37 37 #include <sys/cred.h>
38 38 #include <sys/uio.h>
39 39 #include <sys/vnode.h>
40 40 #include <sys/fs/snode.h>
41 41 #include <sys/open.h>
42 42 #include <sys/kmem.h>
43 43 #include <sys/file.h>
44 44 #include <sys/bootconf.h>
45 45 #include <sys/pathname.h>
46 46 #include <sys/bitmap.h>
47 47 #include <sys/stat.h>
48 48 #include <sys/dditypes.h>
49 49 #include <sys/ddi_impldefs.h>
50 50 #include <sys/ddi.h>
51 51 #include <sys/sunddi.h>
52 52 #include <sys/sunndi.h>
53 53 #include <sys/esunddi.h>
54 54 #include <sys/autoconf.h>
55 55 #include <sys/sunldi.h>
56 56 #include <sys/sunldi_impl.h>
57 57 #include <sys/errno.h>
58 58 #include <sys/debug.h>
59 59 #include <sys/modctl.h>
60 60 #include <sys/var.h>
61 61 #include <vm/seg_vn.h>
62 62
63 63 #include <sys/stropts.h>
64 64 #include <sys/strsubr.h>
65 65 #include <sys/socket.h>
66 66 #include <sys/socketvar.h>
67 67 #include <sys/kstr.h>
68 68
69 69 /*
70 70 * Device contract related
71 71 */
72 72 #include <sys/contract_impl.h>
73 73 #include <sys/contract/device_impl.h>
74 74
75 75 /*
76 76 * Define macros to manipulate snode, vnode, and open device flags
77 77 */
78 78 #define VTYP_VALID(i) (((i) == VCHR) || ((i) == VBLK))
79 79 #define VTYP_TO_OTYP(i) (((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
80 80 #define VTYP_TO_STYP(i) (((i) == VCHR) ? S_IFCHR : S_IFBLK)
81 81
82 82 #define OTYP_VALID(i) (((i) == OTYP_CHR) || ((i) == OTYP_BLK))
83 83 #define OTYP_TO_VTYP(i) (((i) == OTYP_CHR) ? VCHR : VBLK)
84 84 #define OTYP_TO_STYP(i) (((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
85 85
86 86 #define STYP_VALID(i) (((i) == S_IFCHR) || ((i) == S_IFBLK))
87 87 #define STYP_TO_VTYP(i) (((i) == S_IFCHR) ? VCHR : VBLK)
88 88
89 89 /*
90 90 * Define macros for accessing layered driver hash structures
91 91 */
92 92 #define LH_HASH(vp) (handle_hash_func(vp) % LH_HASH_SZ)
93 93 #define LI_HASH(mid, dip, dev) (ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
94 94
95 95 /*
96 96 * Define layered handle flags used in the lh_type field
97 97 */
98 98 #define LH_STREAM (0x1) /* handle to a streams device */
99 99 #define LH_CBDEV (0x2) /* handle to a char/block device */
100 100
101 101 /*
102 102 * Define macro for devid property lookups
103 103 */
104 104 #define DEVID_PROP_FLAGS (DDI_PROP_DONTPASS | \
105 105 DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
106 106
107 107 /*
108 108 * Dummy string for NDI events
109 109 */
110 110 #define NDI_EVENT_SERVICE "NDI_EVENT_SERVICE"
111 111
112 112 static void ldi_ev_lock(void);
113 113 static void ldi_ev_unlock(void);
114 114
115 115 #ifdef LDI_OBSOLETE_EVENT
116 116 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
117 117 #endif
118 118
119 119
120 120 /*
121 121 * globals
122 122 */
123 123 static kmutex_t ldi_ident_hash_lock[LI_HASH_SZ];
124 124 static struct ldi_ident *ldi_ident_hash[LI_HASH_SZ];
125 125
126 126 static kmutex_t ldi_handle_hash_lock[LH_HASH_SZ];
127 127 static struct ldi_handle *ldi_handle_hash[LH_HASH_SZ];
128 128 static size_t ldi_handle_hash_count;
129 129
130 130 static struct ldi_ev_callback_list ldi_ev_callback_list;
131 131
132 132 static uint32_t ldi_ev_id_pool = 0;
133 133
134 134 struct ldi_ev_cookie {
135 135 char *ck_evname;
136 136 uint_t ck_sync;
↓ open down ↓ |
136 lines elided |
↑ open up ↑ |
137 137 uint_t ck_ctype;
138 138 };
139 139
140 140 static struct ldi_ev_cookie ldi_ev_cookies[] = {
141 141 { LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
142 142 { LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
143 143 { LDI_EV_DEVICE_REMOVE, 0, 0},
144 144 { NULL} /* must terminate list */
145 145 };
146 146
147 +static ldi_ev_callback_impl_t *walker_next = NULL;
148 +static ldi_ev_callback_impl_t *walker_prev = NULL;
149 +
147 150 void
148 151 ldi_init(void)
149 152 {
150 153 int i;
151 154
152 155 ldi_handle_hash_count = 0;
153 156 for (i = 0; i < LH_HASH_SZ; i++) {
154 157 mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
155 158 ldi_handle_hash[i] = NULL;
156 159 }
157 160 for (i = 0; i < LI_HASH_SZ; i++) {
158 161 mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
159 162 ldi_ident_hash[i] = NULL;
160 163 }
161 164
162 165 /*
163 166 * Initialize the LDI event subsystem
164 167 */
165 168 mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
166 169 cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
167 170 ldi_ev_callback_list.le_busy = 0;
168 171 ldi_ev_callback_list.le_thread = NULL;
169 172 list_create(&ldi_ev_callback_list.le_head,
170 173 sizeof (ldi_ev_callback_impl_t),
171 174 offsetof(ldi_ev_callback_impl_t, lec_list));
172 175 }
173 176
174 177 /*
175 178 * LDI ident manipulation functions
176 179 */
177 180 static uint_t
178 181 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
179 182 {
180 183 if (dip != NULL) {
181 184 uintptr_t k = (uintptr_t)dip;
182 185 k >>= (int)highbit(sizeof (struct dev_info));
183 186 return ((uint_t)k);
184 187 } else if (dev != DDI_DEV_T_NONE) {
185 188 return (modid + getminor(dev) + getmajor(dev));
186 189 } else {
187 190 return (modid);
188 191 }
189 192 }
190 193
191 194 static struct ldi_ident **
192 195 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
193 196 {
194 197 struct ldi_ident **lipp = NULL;
195 198 uint_t index = LI_HASH(modid, dip, dev);
196 199
197 200 ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
198 201
199 202 for (lipp = &(ldi_ident_hash[index]);
200 203 (*lipp != NULL);
201 204 lipp = &((*lipp)->li_next)) {
202 205 if (((*lipp)->li_modid == modid) &&
203 206 ((*lipp)->li_major == major) &&
204 207 ((*lipp)->li_dip == dip) &&
205 208 ((*lipp)->li_dev == dev))
206 209 break;
207 210 }
208 211
209 212 ASSERT(lipp != NULL);
210 213 return (lipp);
211 214 }
212 215
213 216 static struct ldi_ident *
214 217 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
215 218 {
216 219 struct ldi_ident *lip, **lipp, *retlip;
217 220 modid_t modid;
218 221 uint_t index;
219 222
220 223 ASSERT(mod_name != NULL);
221 224
222 225 /* get the module id */
223 226 modid = mod_name_to_modid(mod_name);
224 227 ASSERT(modid != -1);
225 228
226 229 /* allocate a new ident in case we need it */
227 230 lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
228 231
229 232 /* search the hash for a matching ident */
230 233 index = LI_HASH(modid, dip, dev);
231 234 mutex_enter(&ldi_ident_hash_lock[index]);
232 235 lipp = ident_find_ref_nolock(modid, dip, dev, major);
233 236
234 237 if (*lipp != NULL) {
235 238 /* we found an ident in the hash */
236 239 ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
237 240 (*lipp)->li_ref++;
238 241 retlip = *lipp;
239 242 mutex_exit(&ldi_ident_hash_lock[index]);
240 243 kmem_free(lip, sizeof (struct ldi_ident));
241 244 return (retlip);
242 245 }
243 246
244 247 /* initialize the new ident */
245 248 lip->li_next = NULL;
246 249 lip->li_ref = 1;
247 250 lip->li_modid = modid;
248 251 lip->li_major = major;
249 252 lip->li_dip = dip;
250 253 lip->li_dev = dev;
251 254 (void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
252 255
253 256 /* add it to the ident hash */
254 257 lip->li_next = ldi_ident_hash[index];
255 258 ldi_ident_hash[index] = lip;
256 259
257 260 mutex_exit(&ldi_ident_hash_lock[index]);
258 261 return (lip);
259 262 }
260 263
261 264 static void
262 265 ident_hold(struct ldi_ident *lip)
263 266 {
264 267 uint_t index;
265 268
266 269 ASSERT(lip != NULL);
267 270 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
268 271 mutex_enter(&ldi_ident_hash_lock[index]);
269 272 ASSERT(lip->li_ref > 0);
270 273 lip->li_ref++;
271 274 mutex_exit(&ldi_ident_hash_lock[index]);
272 275 }
273 276
274 277 static void
275 278 ident_release(struct ldi_ident *lip)
276 279 {
277 280 struct ldi_ident **lipp;
278 281 uint_t index;
279 282
280 283 ASSERT(lip != NULL);
281 284 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
282 285 mutex_enter(&ldi_ident_hash_lock[index]);
283 286
284 287 ASSERT(lip->li_ref > 0);
285 288 if (--lip->li_ref > 0) {
286 289 /* there are more references to this ident */
287 290 mutex_exit(&ldi_ident_hash_lock[index]);
288 291 return;
289 292 }
290 293
291 294 /* this was the last reference/open for this ident. free it. */
292 295 lipp = ident_find_ref_nolock(
293 296 lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
294 297
295 298 ASSERT((lipp != NULL) && (*lipp != NULL));
296 299 *lipp = lip->li_next;
297 300 mutex_exit(&ldi_ident_hash_lock[index]);
298 301 kmem_free(lip, sizeof (struct ldi_ident));
299 302 }
300 303
301 304 /*
302 305 * LDI handle manipulation functions
303 306 */
304 307 static uint_t
305 308 handle_hash_func(void *vp)
306 309 {
307 310 uintptr_t k = (uintptr_t)vp;
308 311 k >>= (int)highbit(sizeof (vnode_t));
309 312 return ((uint_t)k);
310 313 }
311 314
312 315 static struct ldi_handle **
313 316 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
314 317 {
315 318 struct ldi_handle **lhpp = NULL;
316 319 uint_t index = LH_HASH(vp);
317 320
318 321 ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
319 322
320 323 for (lhpp = &(ldi_handle_hash[index]);
321 324 (*lhpp != NULL);
322 325 lhpp = &((*lhpp)->lh_next)) {
323 326 if (((*lhpp)->lh_ident == ident) &&
324 327 ((*lhpp)->lh_vp == vp))
325 328 break;
326 329 }
327 330
328 331 ASSERT(lhpp != NULL);
329 332 return (lhpp);
330 333 }
331 334
332 335 static struct ldi_handle *
333 336 handle_find(vnode_t *vp, struct ldi_ident *ident)
334 337 {
335 338 struct ldi_handle **lhpp, *retlhp;
336 339 int index = LH_HASH(vp);
337 340
338 341 mutex_enter(&ldi_handle_hash_lock[index]);
339 342 lhpp = handle_find_ref_nolock(vp, ident);
340 343 retlhp = *lhpp;
341 344 mutex_exit(&ldi_handle_hash_lock[index]);
342 345 return (retlhp);
343 346 }
344 347
345 348 static struct ldi_handle *
346 349 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
347 350 {
348 351 struct ldi_handle *lhp, **lhpp, *retlhp;
349 352 uint_t index;
350 353
351 354 ASSERT((vp != NULL) && (ident != NULL));
352 355
353 356 /* allocate a new handle in case we need it */
354 357 lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
355 358
356 359 /* search the hash for a matching handle */
357 360 index = LH_HASH(vp);
358 361 mutex_enter(&ldi_handle_hash_lock[index]);
359 362 lhpp = handle_find_ref_nolock(vp, ident);
360 363
361 364 if (*lhpp != NULL) {
362 365 /* we found a handle in the hash */
363 366 (*lhpp)->lh_ref++;
364 367 retlhp = *lhpp;
365 368 mutex_exit(&ldi_handle_hash_lock[index]);
366 369
367 370 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
368 371 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
369 372 (void *)retlhp, (void *)ident, (void *)vp,
370 373 mod_major_to_name(getmajor(vp->v_rdev)),
371 374 getminor(vp->v_rdev)));
372 375
373 376 kmem_free(lhp, sizeof (struct ldi_handle));
374 377 return (retlhp);
375 378 }
376 379
377 380 /* initialize the new handle */
378 381 lhp->lh_ref = 1;
379 382 lhp->lh_vp = vp;
380 383 lhp->lh_ident = ident;
381 384 #ifdef LDI_OBSOLETE_EVENT
382 385 mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
383 386 #endif
384 387
385 388 /* set the device type for this handle */
386 389 lhp->lh_type = 0;
387 390 if (vp->v_stream) {
388 391 ASSERT(vp->v_type == VCHR);
389 392 lhp->lh_type |= LH_STREAM;
390 393 } else {
391 394 lhp->lh_type |= LH_CBDEV;
392 395 }
393 396
394 397 /* get holds on other objects */
395 398 ident_hold(ident);
396 399 ASSERT(vp->v_count >= 1);
397 400 VN_HOLD(vp);
398 401
399 402 /* add it to the handle hash */
400 403 lhp->lh_next = ldi_handle_hash[index];
401 404 ldi_handle_hash[index] = lhp;
402 405 atomic_add_long(&ldi_handle_hash_count, 1);
403 406
404 407 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
405 408 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
406 409 (void *)lhp, (void *)ident, (void *)vp,
407 410 mod_major_to_name(getmajor(vp->v_rdev)),
408 411 getminor(vp->v_rdev)));
409 412
410 413 mutex_exit(&ldi_handle_hash_lock[index]);
411 414 return (lhp);
412 415 }
413 416
414 417 static void
415 418 handle_release(struct ldi_handle *lhp)
416 419 {
417 420 struct ldi_handle **lhpp;
418 421 uint_t index;
419 422
420 423 ASSERT(lhp != NULL);
421 424
422 425 index = LH_HASH(lhp->lh_vp);
423 426 mutex_enter(&ldi_handle_hash_lock[index]);
424 427
425 428 LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
426 429 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
427 430 (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
428 431 mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
429 432 getminor(lhp->lh_vp->v_rdev)));
430 433
431 434 ASSERT(lhp->lh_ref > 0);
432 435 if (--lhp->lh_ref > 0) {
433 436 /* there are more references to this handle */
434 437 mutex_exit(&ldi_handle_hash_lock[index]);
435 438 return;
436 439 }
437 440
438 441 /* this was the last reference/open for this handle. free it. */
439 442 lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
440 443 ASSERT((lhpp != NULL) && (*lhpp != NULL));
441 444 *lhpp = lhp->lh_next;
442 445 atomic_add_long(&ldi_handle_hash_count, -1);
443 446 mutex_exit(&ldi_handle_hash_lock[index]);
444 447
445 448 VN_RELE(lhp->lh_vp);
446 449 ident_release(lhp->lh_ident);
447 450 #ifdef LDI_OBSOLETE_EVENT
448 451 mutex_destroy(lhp->lh_lock);
449 452 #endif
450 453 kmem_free(lhp, sizeof (struct ldi_handle));
451 454 }
452 455
453 456 #ifdef LDI_OBSOLETE_EVENT
454 457 /*
455 458 * LDI event manipulation functions
456 459 */
457 460 static void
458 461 handle_event_add(ldi_event_t *lep)
459 462 {
460 463 struct ldi_handle *lhp = lep->le_lhp;
461 464
462 465 ASSERT(lhp != NULL);
463 466
464 467 mutex_enter(lhp->lh_lock);
465 468 if (lhp->lh_events == NULL) {
466 469 lhp->lh_events = lep;
467 470 mutex_exit(lhp->lh_lock);
468 471 return;
469 472 }
470 473
471 474 lep->le_next = lhp->lh_events;
472 475 lhp->lh_events->le_prev = lep;
473 476 lhp->lh_events = lep;
474 477 mutex_exit(lhp->lh_lock);
475 478 }
476 479
477 480 static void
478 481 handle_event_remove(ldi_event_t *lep)
479 482 {
480 483 struct ldi_handle *lhp = lep->le_lhp;
481 484
482 485 ASSERT(lhp != NULL);
483 486
484 487 mutex_enter(lhp->lh_lock);
485 488 if (lep->le_prev)
486 489 lep->le_prev->le_next = lep->le_next;
487 490 if (lep->le_next)
488 491 lep->le_next->le_prev = lep->le_prev;
489 492 if (lhp->lh_events == lep)
490 493 lhp->lh_events = lep->le_next;
491 494 mutex_exit(lhp->lh_lock);
492 495
493 496 }
494 497
495 498 static void
496 499 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
497 500 void *arg, void *bus_impldata)
498 501 {
499 502 ldi_event_t *lep = (ldi_event_t *)arg;
500 503
501 504 ASSERT(lep != NULL);
502 505
503 506 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
504 507 "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
505 508 (void *)dip, (void *)event_cookie, (void *)lep));
506 509
507 510 lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
508 511 }
509 512 #endif
510 513
511 514 /*
512 515 * LDI open helper functions
513 516 */
514 517
515 518 /* get a vnode to a device by dev_t and otyp */
516 519 static int
517 520 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
518 521 {
519 522 dev_info_t *dip;
520 523 vnode_t *vp;
521 524
522 525 /* sanity check required input parameters */
523 526 if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
524 527 return (EINVAL);
525 528
526 529 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
527 530 return (ENODEV);
528 531
529 532 vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
530 533 spec_assoc_vp_with_devi(vp, dip);
531 534 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
532 535
533 536 *vpp = vp;
534 537 return (0);
535 538 }
536 539
537 540 /* get a vnode to a device by pathname */
538 541 int
539 542 ldi_vp_from_name(char *path, vnode_t **vpp)
540 543 {
541 544 vnode_t *vp = NULL;
542 545 int ret;
543 546
544 547 /* sanity check required input parameters */
545 548 if ((path == NULL) || (vpp == NULL))
546 549 return (EINVAL);
547 550
548 551 if (modrootloaded) {
549 552 cred_t *saved_cred = curthread->t_cred;
550 553
551 554 /* we don't want lookupname to fail because of credentials */
552 555 curthread->t_cred = kcred;
553 556
554 557 /*
555 558 * all lookups should be done in the global zone. but
556 559 * lookupnameat() won't actually do this if an absolute
557 560 * path is passed in. since the ldi interfaces require an
558 561 * absolute path we pass lookupnameat() a pointer to
559 562 * the character after the leading '/' and tell it to
560 563 * start searching at the current system root directory.
561 564 */
562 565 ASSERT(*path == '/');
563 566 ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
564 567 &vp, rootdir);
565 568
566 569 /* restore this threads credentials */
567 570 curthread->t_cred = saved_cred;
568 571
569 572 if (ret == 0) {
570 573 if (!vn_matchops(vp, spec_getvnodeops()) ||
571 574 !VTYP_VALID(vp->v_type)) {
572 575 VN_RELE(vp);
573 576 return (ENXIO);
574 577 }
575 578 }
576 579 }
577 580
578 581 if (vp == NULL) {
579 582 dev_info_t *dip;
580 583 dev_t dev;
581 584 int spec_type;
582 585
583 586 /*
584 587 * Root is not mounted, the minor node is not specified,
585 588 * or an OBP path has been specified.
586 589 */
587 590
588 591 /*
589 592 * Determine if path can be pruned to produce an
590 593 * OBP or devfs path for resolve_pathname.
591 594 */
592 595 if (strncmp(path, "/devices/", 9) == 0)
593 596 path += strlen("/devices");
594 597
595 598 /*
596 599 * if no minor node was specified the DEFAULT minor node
597 600 * will be returned. if there is no DEFAULT minor node
598 601 * one will be fabricated of type S_IFCHR with the minor
599 602 * number equal to the instance number.
600 603 */
601 604 ret = resolve_pathname(path, &dip, &dev, &spec_type);
602 605 if (ret != 0)
603 606 return (ENODEV);
604 607
605 608 ASSERT(STYP_VALID(spec_type));
606 609 vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
607 610 spec_assoc_vp_with_devi(vp, dip);
608 611 ddi_release_devi(dip);
609 612 }
610 613
611 614 *vpp = vp;
612 615 return (0);
613 616 }
614 617
615 618 static int
616 619 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
617 620 {
618 621 char *devidstr;
619 622 ddi_prop_t *propp;
620 623
621 624 /* convert devid as a string property */
622 625 if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
623 626 return (0);
624 627
625 628 /*
626 629 * Search for the devid. For speed and ease in locking this
627 630 * code directly uses the property implementation. See
628 631 * ddi_common_devid_to_devlist() for a comment as to why.
629 632 */
630 633 mutex_enter(&(DEVI(dip)->devi_lock));
631 634
632 635 /* check if there is a DDI_DEV_T_NONE devid property */
633 636 propp = i_ddi_prop_search(DDI_DEV_T_NONE,
634 637 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
635 638 if (propp != NULL) {
636 639 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
637 640 /* a DDI_DEV_T_NONE devid exists and matchs */
638 641 mutex_exit(&(DEVI(dip)->devi_lock));
639 642 ddi_devid_str_free(devidstr);
640 643 return (1);
641 644 } else {
642 645 /* a DDI_DEV_T_NONE devid exists and doesn't match */
643 646 mutex_exit(&(DEVI(dip)->devi_lock));
644 647 ddi_devid_str_free(devidstr);
645 648 return (0);
646 649 }
647 650 }
648 651
649 652 /* check if there is a devt specific devid property */
650 653 propp = i_ddi_prop_search(dev,
651 654 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
652 655 if (propp != NULL) {
653 656 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
654 657 /* a devt specific devid exists and matchs */
655 658 mutex_exit(&(DEVI(dip)->devi_lock));
656 659 ddi_devid_str_free(devidstr);
657 660 return (1);
658 661 } else {
659 662 /* a devt specific devid exists and doesn't match */
660 663 mutex_exit(&(DEVI(dip)->devi_lock));
661 664 ddi_devid_str_free(devidstr);
662 665 return (0);
663 666 }
664 667 }
665 668
666 669 /* we didn't find any devids associated with the device */
667 670 mutex_exit(&(DEVI(dip)->devi_lock));
668 671 ddi_devid_str_free(devidstr);
669 672 return (0);
670 673 }
671 674
672 675 /* get a handle to a device by devid and minor name */
673 676 int
674 677 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
675 678 {
676 679 dev_info_t *dip;
677 680 vnode_t *vp;
678 681 int ret, i, ndevs, styp;
679 682 dev_t dev, *devs;
680 683
681 684 /* sanity check required input parameters */
682 685 if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
683 686 return (EINVAL);
684 687
685 688 ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
686 689 if ((ret != DDI_SUCCESS) || (ndevs <= 0))
687 690 return (ENODEV);
688 691
689 692 for (i = 0; i < ndevs; i++) {
690 693 dev = devs[i];
691 694
692 695 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
693 696 continue;
694 697
695 698 /*
696 699 * now we have to verify that the devid of the disk
697 700 * still matches what was requested.
698 701 *
699 702 * we have to do this because the devid could have
700 703 * changed between the call to ddi_lyr_devid_to_devlist()
701 704 * and e_ddi_hold_devi_by_dev(). this is because when
702 705 * ddi_lyr_devid_to_devlist() returns a list of devts
703 706 * there is no kind of hold on those devts so a device
704 707 * could have been replaced out from under us in the
705 708 * interim.
706 709 */
707 710 if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
708 711 NULL, &styp) == DDI_SUCCESS) &&
709 712 ldi_devid_match(devid, dip, dev))
710 713 break;
711 714
712 715 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev() */
713 716 }
714 717
715 718 ddi_lyr_free_devlist(devs, ndevs);
716 719
717 720 if (i == ndevs)
718 721 return (ENODEV);
719 722
720 723 ASSERT(STYP_VALID(styp));
721 724 vp = makespecvp(dev, STYP_TO_VTYP(styp));
722 725 spec_assoc_vp_with_devi(vp, dip);
723 726 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
724 727
725 728 *vpp = vp;
726 729 return (0);
727 730 }
728 731
729 732 /* given a vnode, open a device */
730 733 static int
731 734 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
732 735 ldi_handle_t *lhp, struct ldi_ident *li)
733 736 {
734 737 struct ldi_handle *nlhp;
735 738 vnode_t *vp;
736 739 int err;
737 740
738 741 ASSERT((vpp != NULL) && (*vpp != NULL));
739 742 ASSERT((lhp != NULL) && (li != NULL));
740 743
741 744 vp = *vpp;
742 745 /* if the vnode passed in is not a device, then bail */
743 746 if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
744 747 return (ENXIO);
745 748
746 749 /*
747 750 * the caller may have specified a node that
748 751 * doesn't have cb_ops defined. the ldi doesn't yet
749 752 * support opening devices without a valid cb_ops.
750 753 */
751 754 if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
752 755 return (ENXIO);
753 756
754 757 /* open the device */
755 758 if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
756 759 return (err);
757 760
758 761 /* possible clone open, make sure that we still have a spec node */
759 762 ASSERT(vn_matchops(vp, spec_getvnodeops()));
760 763
761 764 nlhp = handle_alloc(vp, li);
762 765
763 766 if (vp != *vpp) {
764 767 /*
765 768 * allocating the layered handle took a new hold on the vnode
766 769 * so we can release the hold that was returned by the clone
767 770 * open
768 771 */
769 772 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
770 773 "ldi clone open", (void *)nlhp));
771 774 } else {
772 775 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
773 776 "ldi open", (void *)nlhp));
774 777 }
775 778
776 779 *vpp = vp;
777 780 *lhp = (ldi_handle_t)nlhp;
778 781 return (0);
779 782 }
780 783
781 784 /* Call a drivers prop_op(9E) interface */
782 785 static int
783 786 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
784 787 int flags, char *name, caddr_t valuep, int *lengthp)
785 788 {
786 789 struct dev_ops *ops = NULL;
787 790 int res;
788 791
789 792 ASSERT((dip != NULL) && (name != NULL));
790 793 ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
791 794 ASSERT(lengthp != NULL);
792 795
793 796 /*
794 797 * we can only be invoked after a driver has been opened and
795 798 * someone has a layered handle to it, so there had better be
796 799 * a valid ops vector.
797 800 */
798 801 ops = DEVI(dip)->devi_ops;
799 802 ASSERT(ops && ops->devo_cb_ops);
800 803
801 804 /*
802 805 * Some nexus drivers incorrectly set cb_prop_op to nodev,
803 806 * nulldev or even NULL.
804 807 */
805 808 if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
806 809 (ops->devo_cb_ops->cb_prop_op == nulldev) ||
807 810 (ops->devo_cb_ops->cb_prop_op == NULL)) {
808 811 return (DDI_PROP_NOT_FOUND);
809 812 }
810 813
811 814 /* check if this is actually DDI_DEV_T_ANY query */
812 815 if (flags & LDI_DEV_T_ANY) {
813 816 flags &= ~LDI_DEV_T_ANY;
814 817 dev = DDI_DEV_T_ANY;
815 818 }
816 819
817 820 res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
818 821 return (res);
819 822 }
820 823
821 824 static void
822 825 i_ldi_prop_op_free(struct prop_driver_data *pdd)
823 826 {
824 827 kmem_free(pdd, pdd->pdd_size);
825 828 }
826 829
827 830 static caddr_t
828 831 i_ldi_prop_op_alloc(int prop_len)
829 832 {
830 833 struct prop_driver_data *pdd;
831 834 int pdd_size;
832 835
833 836 pdd_size = sizeof (struct prop_driver_data) + prop_len;
834 837 pdd = kmem_alloc(pdd_size, KM_SLEEP);
835 838 pdd->pdd_size = pdd_size;
836 839 pdd->pdd_prop_free = i_ldi_prop_op_free;
837 840 return ((caddr_t)&pdd[1]);
838 841 }
839 842
840 843 /*
841 844 * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
842 845 * by the typed ldi property lookup interfaces.
843 846 */
844 847 static int
845 848 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
846 849 caddr_t *datap, int *lengthp, int elem_size)
847 850 {
848 851 caddr_t prop_val;
849 852 int prop_len, res;
850 853
851 854 ASSERT((dip != NULL) && (name != NULL));
852 855 ASSERT((datap != NULL) && (lengthp != NULL));
853 856
854 857 /*
855 858 * first call the drivers prop_op() interface to allow it
856 859 * it to override default property values.
857 860 */
858 861 res = i_ldi_prop_op(dev, dip, PROP_LEN,
859 862 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
860 863 if (res != DDI_PROP_SUCCESS)
861 864 return (DDI_PROP_NOT_FOUND);
862 865
863 866 /* sanity check the property length */
864 867 if (prop_len == 0) {
865 868 /*
866 869 * the ddi typed interfaces don't allow a drivers to
867 870 * create properties with a length of 0. so we should
868 871 * prevent drivers from returning 0 length dynamic
869 872 * properties for typed property lookups.
870 873 */
871 874 return (DDI_PROP_NOT_FOUND);
872 875 }
873 876
874 877 /* sanity check the property length against the element size */
875 878 if (elem_size && ((prop_len % elem_size) != 0))
876 879 return (DDI_PROP_NOT_FOUND);
877 880
878 881 /*
879 882 * got it. now allocate a prop_driver_data struct so that the
880 883 * user can free the property via ddi_prop_free().
881 884 */
882 885 prop_val = i_ldi_prop_op_alloc(prop_len);
883 886
884 887 /* lookup the property again, this time get the value */
885 888 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
886 889 flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
887 890 if (res != DDI_PROP_SUCCESS) {
888 891 ddi_prop_free(prop_val);
889 892 return (DDI_PROP_NOT_FOUND);
890 893 }
891 894
892 895 /* sanity check the property length */
893 896 if (prop_len == 0) {
894 897 ddi_prop_free(prop_val);
895 898 return (DDI_PROP_NOT_FOUND);
896 899 }
897 900
898 901 /* sanity check the property length against the element size */
899 902 if (elem_size && ((prop_len % elem_size) != 0)) {
900 903 ddi_prop_free(prop_val);
901 904 return (DDI_PROP_NOT_FOUND);
902 905 }
903 906
904 907 /*
905 908 * return the prop_driver_data struct and, optionally, the length
906 909 * of the data.
907 910 */
908 911 *datap = prop_val;
909 912 *lengthp = prop_len;
910 913
911 914 return (DDI_PROP_SUCCESS);
912 915 }
913 916
914 917 /*
915 918 * i_check_string looks at a string property and makes sure its
916 919 * a valid null terminated string
917 920 */
918 921 static int
919 922 i_check_string(char *str, int prop_len)
920 923 {
921 924 int i;
922 925
923 926 ASSERT(str != NULL);
924 927
925 928 for (i = 0; i < prop_len; i++) {
926 929 if (str[i] == '\0')
927 930 return (0);
928 931 }
929 932 return (1);
930 933 }
931 934
932 935 /*
933 936 * i_pack_string_array takes a a string array property that is represented
934 937 * as a concatenation of strings (with the NULL character included for
935 938 * each string) and converts it into a format that can be returned by
936 939 * ldi_prop_lookup_string_array.
937 940 */
938 941 static int
939 942 i_pack_string_array(char *str_concat, int prop_len,
940 943 char ***str_arrayp, int *nelemp)
941 944 {
942 945 int i, nelem, pack_size;
943 946 char **str_array, *strptr;
944 947
945 948 /*
946 949 * first we need to sanity check the input string array.
947 950 * in essence this can be done my making sure that the last
948 951 * character of the array passed in is null. (meaning the last
949 952 * string in the array is NULL terminated.
950 953 */
951 954 if (str_concat[prop_len - 1] != '\0')
952 955 return (1);
953 956
954 957 /* now let's count the number of strings in the array */
955 958 for (nelem = i = 0; i < prop_len; i++)
956 959 if (str_concat[i] == '\0')
957 960 nelem++;
958 961 ASSERT(nelem >= 1);
959 962
960 963 /* now let's allocate memory for the new packed property */
961 964 pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
962 965 str_array = (char **)i_ldi_prop_op_alloc(pack_size);
963 966
964 967 /* let's copy the actual string data into the new property */
965 968 strptr = (char *)&(str_array[nelem + 1]);
966 969 bcopy(str_concat, strptr, prop_len);
967 970
968 971 /* now initialize the string array pointers */
969 972 for (i = 0; i < nelem; i++) {
970 973 str_array[i] = strptr;
971 974 strptr += strlen(strptr) + 1;
972 975 }
973 976 str_array[nelem] = NULL;
974 977
975 978 /* set the return values */
976 979 *str_arrayp = str_array;
977 980 *nelemp = nelem;
978 981
979 982 return (0);
980 983 }
981 984
982 985
983 986 /*
984 987 * LDI Project private device usage interfaces
985 988 */
986 989
987 990 /*
988 991 * Get a count of how many devices are currentl open by different consumers
989 992 */
990 993 int
991 994 ldi_usage_count()
992 995 {
993 996 return (ldi_handle_hash_count);
994 997 }
995 998
996 999 static void
997 1000 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
998 1001 {
999 1002 dev_info_t *dip;
1000 1003 dev_t dev;
1001 1004
1002 1005 ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1003 1006
1004 1007 /* get the target devt */
1005 1008 dev = vp->v_rdev;
1006 1009
1007 1010 /* try to get the target dip */
1008 1011 dip = VTOCS(vp)->s_dip;
1009 1012 if (dip != NULL) {
1010 1013 e_ddi_hold_devi(dip);
1011 1014 } else if (dev != DDI_DEV_T_NONE) {
1012 1015 dip = e_ddi_hold_devi_by_dev(dev, 0);
1013 1016 }
1014 1017
1015 1018 /* set the target information */
1016 1019 ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1017 1020 ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1018 1021 ldi_usage->tgt_devt = dev;
1019 1022 ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1020 1023 ldi_usage->tgt_dip = dip;
1021 1024 }
1022 1025
1023 1026
1024 1027 static int
1025 1028 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1026 1029 void *arg, int (*callback)(const ldi_usage_t *, void *))
1027 1030 {
1028 1031 ldi_usage_t ldi_usage;
1029 1032 struct devnames *dnp;
1030 1033 dev_info_t *dip;
1031 1034 major_t major;
1032 1035 dev_t dev;
1033 1036 int ret = LDI_USAGE_CONTINUE;
1034 1037
1035 1038 /* set the target device information */
1036 1039 ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1037 1040
1038 1041 /* get the source devt */
1039 1042 dev = lip->li_dev;
1040 1043
1041 1044 /* try to get the source dip */
1042 1045 dip = lip->li_dip;
1043 1046 if (dip != NULL) {
1044 1047 e_ddi_hold_devi(dip);
1045 1048 } else if (dev != DDI_DEV_T_NONE) {
1046 1049 dip = e_ddi_hold_devi_by_dev(dev, 0);
1047 1050 }
1048 1051
1049 1052 /* set the valid source information */
1050 1053 ldi_usage.src_modid = lip->li_modid;
1051 1054 ldi_usage.src_name = lip->li_modname;
1052 1055 ldi_usage.src_devt = dev;
1053 1056 ldi_usage.src_dip = dip;
1054 1057
1055 1058 /*
1056 1059 * if the source ident represents either:
1057 1060 *
1058 1061 * - a kernel module (and not a device or device driver)
1059 1062 * - a device node
1060 1063 *
1061 1064 * then we currently have all the info we need to report the
1062 1065 * usage information so invoke the callback function.
1063 1066 */
1064 1067 if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1065 1068 (dip != NULL)) {
1066 1069 ret = callback(&ldi_usage, arg);
1067 1070 if (dip != NULL)
1068 1071 ddi_release_devi(dip);
1069 1072 if (ldi_usage.tgt_dip != NULL)
1070 1073 ddi_release_devi(ldi_usage.tgt_dip);
1071 1074 return (ret);
1072 1075 }
1073 1076
1074 1077 /*
1075 1078 * now this is kinda gross.
1076 1079 *
1077 1080 * what we do here is attempt to associate every device instance
1078 1081 * of the source driver on the system with the open target driver.
1079 1082 * we do this because we don't know which instance of the device
1080 1083 * could potentially access the lower device so we assume that all
1081 1084 * the instances could access it.
1082 1085 *
1083 1086 * there are two ways we could have gotten here:
1084 1087 *
1085 1088 * 1) this layered ident represents one created using only a
1086 1089 * major number or a driver module name. this means that when
1087 1090 * it was created we could not associate it with a particular
1088 1091 * dev_t or device instance.
1089 1092 *
1090 1093 * when could this possibly happen you ask?
1091 1094 *
1092 1095 * a perfect example of this is streams persistent links.
1093 1096 * when a persistant streams link is formed we can't associate
1094 1097 * the lower device stream with any particular upper device
1095 1098 * stream or instance. this is because any particular upper
1096 1099 * device stream could be closed, then another could be
1097 1100 * opened with a different dev_t and device instance, and it
1098 1101 * would still have access to the lower linked stream.
1099 1102 *
1100 1103 * since any instance of the upper streams driver could
1101 1104 * potentially access the lower stream whenever it wants,
1102 1105 * we represent that here by associating the opened lower
1103 1106 * device with every existing device instance of the upper
1104 1107 * streams driver.
1105 1108 *
1106 1109 * 2) This case should really never happen but we'll include it
1107 1110 * for completeness.
1108 1111 *
1109 1112 * it's possible that we could have gotten here because we
1110 1113 * have a dev_t for the upper device but we couldn't find a
1111 1114 * dip associated with that dev_t.
1112 1115 *
1113 1116 * the only types of devices that have dev_t without an
1114 1117 * associated dip are unbound DLPIv2 network devices. These
1115 1118 * types of devices exist to be able to attach a stream to any
1116 1119 * instance of a hardware network device. since these types of
1117 1120 * devices are usually hardware devices they should never
1118 1121 * really have other devices open.
1119 1122 */
1120 1123 if (dev != DDI_DEV_T_NONE)
1121 1124 major = getmajor(dev);
1122 1125 else
1123 1126 major = lip->li_major;
1124 1127
1125 1128 ASSERT((major >= 0) && (major < devcnt));
1126 1129
1127 1130 dnp = &devnamesp[major];
1128 1131 LOCK_DEV_OPS(&dnp->dn_lock);
1129 1132 dip = dnp->dn_head;
1130 1133 while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1131 1134 e_ddi_hold_devi(dip);
1132 1135 UNLOCK_DEV_OPS(&dnp->dn_lock);
1133 1136
1134 1137 /* set the source dip */
1135 1138 ldi_usage.src_dip = dip;
1136 1139
1137 1140 /* invoke the callback function */
1138 1141 ret = callback(&ldi_usage, arg);
1139 1142
1140 1143 LOCK_DEV_OPS(&dnp->dn_lock);
1141 1144 ddi_release_devi(dip);
1142 1145 dip = ddi_get_next(dip);
1143 1146 }
1144 1147 UNLOCK_DEV_OPS(&dnp->dn_lock);
1145 1148
1146 1149 /* if there was a target dip, release it */
1147 1150 if (ldi_usage.tgt_dip != NULL)
1148 1151 ddi_release_devi(ldi_usage.tgt_dip);
1149 1152
1150 1153 return (ret);
1151 1154 }
1152 1155
1153 1156 /*
1154 1157 * ldi_usage_walker() - this walker reports LDI kernel device usage
1155 1158 * information via the callback() callback function. the LDI keeps track
1156 1159 * of what devices are being accessed in its own internal data structures.
1157 1160 * this function walks those data structures to determine device usage.
1158 1161 */
1159 1162 void
1160 1163 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1161 1164 {
1162 1165 struct ldi_handle *lhp;
1163 1166 struct ldi_ident *lip;
1164 1167 vnode_t *vp;
1165 1168 int i;
1166 1169 int ret = LDI_USAGE_CONTINUE;
1167 1170
1168 1171 for (i = 0; i < LH_HASH_SZ; i++) {
1169 1172 mutex_enter(&ldi_handle_hash_lock[i]);
1170 1173
1171 1174 lhp = ldi_handle_hash[i];
1172 1175 while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1173 1176 lip = lhp->lh_ident;
1174 1177 vp = lhp->lh_vp;
1175 1178
1176 1179 /* invoke the devinfo callback function */
1177 1180 ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1178 1181
1179 1182 lhp = lhp->lh_next;
1180 1183 }
1181 1184 mutex_exit(&ldi_handle_hash_lock[i]);
1182 1185
1183 1186 if (ret != LDI_USAGE_CONTINUE)
1184 1187 break;
1185 1188 }
1186 1189 }
1187 1190
1188 1191 /*
1189 1192 * LDI Project private interfaces (streams linking interfaces)
1190 1193 *
1191 1194 * Streams supports a type of built in device layering via linking.
1192 1195 * Certain types of streams drivers can be streams multiplexors.
1193 1196 * A streams multiplexor supports the I_LINK/I_PLINK operation.
1194 1197 * These operations allows other streams devices to be linked under the
1195 1198 * multiplexor. By definition all streams multiplexors are devices
1196 1199 * so this linking is a type of device layering where the multiplexor
1197 1200 * device is layered on top of the device linked below it.
1198 1201 */
1199 1202
1200 1203 /*
1201 1204 * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1202 1205 * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1203 1206 *
1204 1207 * The streams framework keeps track of links via the file_t of the lower
1205 1208 * stream. The LDI keeps track of devices using a vnode. In the case
1206 1209 * of a streams link created via an LDI handle, fnk_lh() allocates
1207 1210 * a file_t that the streams framework can use to track the linkage.
1208 1211 */
1209 1212 int
1210 1213 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1211 1214 {
1212 1215 struct ldi_handle *lhp = (struct ldi_handle *)arg;
1213 1216 vnode_t *vpdown;
1214 1217 file_t *fpdown;
1215 1218 int err;
1216 1219
1217 1220 if (lhp == NULL)
1218 1221 return (EINVAL);
1219 1222
1220 1223 vpdown = lhp->lh_vp;
1221 1224 ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1222 1225 ASSERT(cmd == _I_PLINK_LH);
1223 1226
1224 1227 /*
1225 1228 * create a new lower vnode and a file_t that points to it,
1226 1229 * streams linking requires a file_t. falloc() returns with
1227 1230 * fpdown locked.
1228 1231 */
1229 1232 VN_HOLD(vpdown);
1230 1233 (void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1231 1234 mutex_exit(&fpdown->f_tlock);
1232 1235
1233 1236 /* try to establish the link */
1234 1237 err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1235 1238
1236 1239 if (err != 0) {
1237 1240 /* the link failed, free the file_t and release the vnode */
1238 1241 mutex_enter(&fpdown->f_tlock);
1239 1242 unfalloc(fpdown);
1240 1243 VN_RELE(vpdown);
1241 1244 }
1242 1245
1243 1246 return (err);
1244 1247 }
1245 1248
1246 1249 /*
1247 1250 * ldi_mlink_fp() is invoked for all successful streams linkages created
1248 1251 * via I_LINK and I_PLINK. ldi_mlink_fp() records the linkage information
1249 1252 * in its internal state so that the devinfo snapshot code has some
1250 1253 * observability into streams device linkage information.
1251 1254 */
1252 1255 void
1253 1256 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1254 1257 {
1255 1258 vnode_t *vp = fpdown->f_vnode;
1256 1259 struct snode *sp, *csp;
1257 1260 ldi_ident_t li;
1258 1261 major_t major;
1259 1262 int ret;
1260 1263
1261 1264 /* if the lower stream is not a device then return */
1262 1265 if (!vn_matchops(vp, spec_getvnodeops()))
1263 1266 return;
1264 1267
1265 1268 ASSERT(!servicing_interrupt());
1266 1269
1267 1270 LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1268 1271 "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1269 1272 (void *)stp, (void *)fpdown));
1270 1273
1271 1274 sp = VTOS(vp);
1272 1275 csp = VTOS(sp->s_commonvp);
1273 1276
1274 1277 /* check if this was a plink via a layered handle */
1275 1278 if (lhlink) {
1276 1279 /*
1277 1280 * increment the common snode s_count.
1278 1281 *
1279 1282 * this is done because after the link operation there
1280 1283 * are two ways that s_count can be decremented.
1281 1284 *
1282 1285 * when the layered handle used to create the link is
1283 1286 * closed, spec_close() is called and it will decrement
1284 1287 * s_count in the common snode. if we don't increment
1285 1288 * s_count here then this could cause spec_close() to
1286 1289 * actually close the device while it's still linked
1287 1290 * under a multiplexer.
1288 1291 *
1289 1292 * also, when the lower stream is unlinked, closef() is
1290 1293 * called for the file_t associated with this snode.
1291 1294 * closef() will call spec_close(), which will decrement
1292 1295 * s_count. if we dont't increment s_count here then this
1293 1296 * could cause spec_close() to actually close the device
1294 1297 * while there may still be valid layered handles
1295 1298 * pointing to it.
1296 1299 */
1297 1300 mutex_enter(&csp->s_lock);
1298 1301 ASSERT(csp->s_count >= 1);
1299 1302 csp->s_count++;
1300 1303 mutex_exit(&csp->s_lock);
1301 1304
1302 1305 /*
1303 1306 * decrement the f_count.
1304 1307 * this is done because the layered driver framework does
1305 1308 * not actually cache a copy of the file_t allocated to
1306 1309 * do the link. this is done here instead of in ldi_mlink_lh()
1307 1310 * because there is a window in ldi_mlink_lh() between where
1308 1311 * milnk_file() returns and we would decrement the f_count
1309 1312 * when the stream could be unlinked.
1310 1313 */
1311 1314 mutex_enter(&fpdown->f_tlock);
1312 1315 fpdown->f_count--;
1313 1316 mutex_exit(&fpdown->f_tlock);
1314 1317 }
1315 1318
1316 1319 /*
1317 1320 * NOTE: here we rely on the streams subsystem not allowing
1318 1321 * a stream to be multiplexed more than once. if this
1319 1322 * changes, we break.
1320 1323 *
1321 1324 * mark the snode/stream as multiplexed
1322 1325 */
1323 1326 mutex_enter(&sp->s_lock);
1324 1327 ASSERT(!(sp->s_flag & SMUXED));
1325 1328 sp->s_flag |= SMUXED;
1326 1329 mutex_exit(&sp->s_lock);
1327 1330
1328 1331 /* get a layered ident for the upper stream */
1329 1332 if (type == LINKNORMAL) {
1330 1333 /*
1331 1334 * if the link is not persistant then we can associate
1332 1335 * the upper stream with a dev_t. this is because the
1333 1336 * upper stream is associated with a vnode, which is
1334 1337 * associated with a dev_t and this binding can't change
1335 1338 * during the life of the stream. since the link isn't
1336 1339 * persistant once the stream is destroyed the link is
1337 1340 * destroyed. so the dev_t will be valid for the life
1338 1341 * of the link.
1339 1342 */
1340 1343 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1341 1344 } else {
1342 1345 /*
1343 1346 * if the link is persistant we can only associate the
1344 1347 * link with a driver (and not a dev_t.) this is
1345 1348 * because subsequent opens of the upper device may result
1346 1349 * in a different stream (and dev_t) having access to
1347 1350 * the lower stream.
1348 1351 *
1349 1352 * for example, if the upper stream is closed after the
1350 1353 * persistant link operation is compleated, a subsequent
1351 1354 * open of the upper device will create a new stream which
1352 1355 * may have a different dev_t and an unlink operation
1353 1356 * can be performed using this new upper stream.
1354 1357 */
1355 1358 ASSERT(type == LINKPERSIST);
1356 1359 major = getmajor(stp->sd_vnode->v_rdev);
1357 1360 ret = ldi_ident_from_major(major, &li);
1358 1361 }
1359 1362
1360 1363 ASSERT(ret == 0);
1361 1364 (void) handle_alloc(vp, (struct ldi_ident *)li);
1362 1365 ldi_ident_release(li);
1363 1366 }
1364 1367
1365 1368 void
1366 1369 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1367 1370 {
1368 1371 struct ldi_handle *lhp;
1369 1372 vnode_t *vp = (vnode_t *)fpdown->f_vnode;
1370 1373 struct snode *sp;
1371 1374 ldi_ident_t li;
1372 1375 major_t major;
1373 1376 int ret;
1374 1377
1375 1378 /* if the lower stream is not a device then return */
1376 1379 if (!vn_matchops(vp, spec_getvnodeops()))
1377 1380 return;
1378 1381
1379 1382 ASSERT(!servicing_interrupt());
1380 1383 ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1381 1384
1382 1385 LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1383 1386 "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1384 1387 (void *)stp, (void *)fpdown));
1385 1388
1386 1389 /*
1387 1390 * NOTE: here we rely on the streams subsystem not allowing
1388 1391 * a stream to be multiplexed more than once. if this
1389 1392 * changes, we break.
1390 1393 *
1391 1394 * mark the snode/stream as not multiplexed
1392 1395 */
1393 1396 sp = VTOS(vp);
1394 1397 mutex_enter(&sp->s_lock);
1395 1398 ASSERT(sp->s_flag & SMUXED);
1396 1399 sp->s_flag &= ~SMUXED;
1397 1400 mutex_exit(&sp->s_lock);
1398 1401
1399 1402 /*
1400 1403 * clear the owner for this snode
1401 1404 * see the comment in ldi_mlink_fp() for information about how
1402 1405 * the ident is allocated
1403 1406 */
1404 1407 if (type == LINKNORMAL) {
1405 1408 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1406 1409 } else {
1407 1410 ASSERT(type == LINKPERSIST);
1408 1411 major = getmajor(stp->sd_vnode->v_rdev);
1409 1412 ret = ldi_ident_from_major(major, &li);
1410 1413 }
1411 1414
1412 1415 ASSERT(ret == 0);
1413 1416 lhp = handle_find(vp, (struct ldi_ident *)li);
1414 1417 handle_release(lhp);
1415 1418 ldi_ident_release(li);
1416 1419 }
1417 1420
1418 1421 /*
1419 1422 * LDI Consolidation private interfaces
1420 1423 */
1421 1424 int
1422 1425 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1423 1426 {
1424 1427 struct modctl *modp;
1425 1428 major_t major;
1426 1429 char *name;
1427 1430
1428 1431 if ((modlp == NULL) || (lip == NULL))
1429 1432 return (EINVAL);
1430 1433
1431 1434 ASSERT(!servicing_interrupt());
1432 1435
1433 1436 modp = mod_getctl(modlp);
1434 1437 if (modp == NULL)
1435 1438 return (EINVAL);
1436 1439 name = modp->mod_modname;
1437 1440 if (name == NULL)
1438 1441 return (EINVAL);
1439 1442 major = mod_name_to_major(name);
1440 1443
1441 1444 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1442 1445
1443 1446 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1444 1447 "ldi_ident_from_mod", (void *)*lip, name));
1445 1448
1446 1449 return (0);
1447 1450 }
1448 1451
1449 1452 ldi_ident_t
1450 1453 ldi_ident_from_anon()
1451 1454 {
1452 1455 ldi_ident_t lip;
1453 1456
1454 1457 ASSERT(!servicing_interrupt());
1455 1458
1456 1459 lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1457 1460
1458 1461 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1459 1462 "ldi_ident_from_anon", (void *)lip, "genunix"));
1460 1463
1461 1464 return (lip);
1462 1465 }
1463 1466
1464 1467
1465 1468 /*
1466 1469 * LDI Public interfaces
1467 1470 */
1468 1471 int
1469 1472 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1470 1473 {
1471 1474 struct stdata *stp;
1472 1475 dev_t dev;
1473 1476 char *name;
1474 1477
1475 1478 if ((sq == NULL) || (lip == NULL))
1476 1479 return (EINVAL);
1477 1480
1478 1481 ASSERT(!servicing_interrupt());
1479 1482
1480 1483 stp = sq->q_stream;
1481 1484 if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1482 1485 return (EINVAL);
1483 1486
1484 1487 dev = stp->sd_vnode->v_rdev;
1485 1488 name = mod_major_to_name(getmajor(dev));
1486 1489 if (name == NULL)
1487 1490 return (EINVAL);
1488 1491 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1489 1492
1490 1493 LDI_ALLOCFREE((CE_WARN,
1491 1494 "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1492 1495 "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1493 1496 (void *)stp));
1494 1497
1495 1498 return (0);
1496 1499 }
1497 1500
1498 1501 int
1499 1502 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1500 1503 {
1501 1504 char *name;
1502 1505
1503 1506 if (lip == NULL)
1504 1507 return (EINVAL);
1505 1508
1506 1509 ASSERT(!servicing_interrupt());
1507 1510
1508 1511 name = mod_major_to_name(getmajor(dev));
1509 1512 if (name == NULL)
1510 1513 return (EINVAL);
1511 1514 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1512 1515
1513 1516 LDI_ALLOCFREE((CE_WARN,
1514 1517 "%s: li=0x%p, mod=%s, minor=0x%x",
1515 1518 "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1516 1519
1517 1520 return (0);
1518 1521 }
1519 1522
1520 1523 int
1521 1524 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1522 1525 {
1523 1526 struct dev_info *devi = (struct dev_info *)dip;
1524 1527 char *name;
1525 1528
1526 1529 if ((dip == NULL) || (lip == NULL))
1527 1530 return (EINVAL);
1528 1531
1529 1532 ASSERT(!servicing_interrupt());
1530 1533
1531 1534 name = mod_major_to_name(devi->devi_major);
1532 1535 if (name == NULL)
1533 1536 return (EINVAL);
1534 1537 *lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1535 1538
1536 1539 LDI_ALLOCFREE((CE_WARN,
1537 1540 "%s: li=0x%p, mod=%s, dip=0x%p",
1538 1541 "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1539 1542
1540 1543 return (0);
1541 1544 }
1542 1545
1543 1546 int
1544 1547 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1545 1548 {
1546 1549 char *name;
1547 1550
1548 1551 if (lip == NULL)
1549 1552 return (EINVAL);
1550 1553
1551 1554 ASSERT(!servicing_interrupt());
1552 1555
1553 1556 name = mod_major_to_name(major);
1554 1557 if (name == NULL)
1555 1558 return (EINVAL);
1556 1559 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1557 1560
1558 1561 LDI_ALLOCFREE((CE_WARN,
1559 1562 "%s: li=0x%p, mod=%s",
1560 1563 "ldi_ident_from_major", (void *)*lip, name));
1561 1564
1562 1565 return (0);
1563 1566 }
1564 1567
1565 1568 void
1566 1569 ldi_ident_release(ldi_ident_t li)
1567 1570 {
1568 1571 struct ldi_ident *ident = (struct ldi_ident *)li;
1569 1572 char *name;
1570 1573
1571 1574 if (li == NULL)
1572 1575 return;
1573 1576
1574 1577 ASSERT(!servicing_interrupt());
1575 1578
1576 1579 name = ident->li_modname;
1577 1580
1578 1581 LDI_ALLOCFREE((CE_WARN,
1579 1582 "%s: li=0x%p, mod=%s",
1580 1583 "ldi_ident_release", (void *)li, name));
1581 1584
1582 1585 ident_release((struct ldi_ident *)li);
1583 1586 }
1584 1587
1585 1588 /* get a handle to a device by dev_t and otyp */
1586 1589 int
1587 1590 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1588 1591 ldi_handle_t *lhp, ldi_ident_t li)
1589 1592 {
1590 1593 struct ldi_ident *lip = (struct ldi_ident *)li;
1591 1594 int ret;
1592 1595 vnode_t *vp;
1593 1596
1594 1597 /* sanity check required input parameters */
1595 1598 if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1596 1599 (lhp == NULL) || (lip == NULL))
1597 1600 return (EINVAL);
1598 1601
1599 1602 ASSERT(!servicing_interrupt());
1600 1603
1601 1604 if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1602 1605 return (ret);
1603 1606
1604 1607 if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1605 1608 *devp = vp->v_rdev;
1606 1609 }
1607 1610 VN_RELE(vp);
1608 1611
1609 1612 return (ret);
1610 1613 }
1611 1614
1612 1615 /* get a handle to a device by pathname */
1613 1616 int
1614 1617 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1615 1618 ldi_handle_t *lhp, ldi_ident_t li)
1616 1619 {
1617 1620 struct ldi_ident *lip = (struct ldi_ident *)li;
1618 1621 int ret;
1619 1622 vnode_t *vp;
1620 1623
1621 1624 /* sanity check required input parameters */
1622 1625 if ((pathname == NULL) || (*pathname != '/') ||
1623 1626 (cr == NULL) || (lhp == NULL) || (lip == NULL))
1624 1627 return (EINVAL);
1625 1628
1626 1629 ASSERT(!servicing_interrupt());
1627 1630
1628 1631 if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1629 1632 return (ret);
1630 1633
1631 1634 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1632 1635 VN_RELE(vp);
1633 1636
1634 1637 return (ret);
1635 1638 }
1636 1639
1637 1640 /* get a handle to a device by devid and minor_name */
1638 1641 int
1639 1642 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1640 1643 int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1641 1644 {
1642 1645 struct ldi_ident *lip = (struct ldi_ident *)li;
1643 1646 int ret;
1644 1647 vnode_t *vp;
1645 1648
1646 1649 /* sanity check required input parameters */
1647 1650 if ((minor_name == NULL) || (cr == NULL) ||
1648 1651 (lhp == NULL) || (lip == NULL))
1649 1652 return (EINVAL);
1650 1653
1651 1654 ASSERT(!servicing_interrupt());
1652 1655
1653 1656 if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1654 1657 return (ret);
1655 1658
1656 1659 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1657 1660 VN_RELE(vp);
1658 1661
1659 1662 return (ret);
1660 1663 }
1661 1664
1662 1665 int
1663 1666 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1664 1667 {
1665 1668 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1666 1669 struct ldi_event *lep;
1667 1670 int err = 0;
1668 1671 int notify = 0;
1669 1672 list_t *listp;
1670 1673 ldi_ev_callback_impl_t *lecp;
1671 1674
1672 1675 if (lh == NULL)
1673 1676 return (EINVAL);
1674 1677
1675 1678 ASSERT(!servicing_interrupt());
1676 1679
1677 1680 #ifdef LDI_OBSOLETE_EVENT
1678 1681
1679 1682 /*
1680 1683 * Any event handlers should have been unregistered by the
1681 1684 * time ldi_close() is called. If they haven't then it's a
1682 1685 * bug.
1683 1686 *
1684 1687 * In a debug kernel we'll panic to make the problem obvious.
1685 1688 */
1686 1689 ASSERT(handlep->lh_events == NULL);
1687 1690
1688 1691 /*
1689 1692 * On a production kernel we'll "do the right thing" (unregister
1690 1693 * the event handlers) and then complain about having to do the
1691 1694 * work ourselves.
1692 1695 */
1693 1696 while ((lep = handlep->lh_events) != NULL) {
1694 1697 err = 1;
1695 1698 (void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1696 1699 }
1697 1700 if (err) {
1698 1701 struct ldi_ident *lip = handlep->lh_ident;
1699 1702 ASSERT(lip != NULL);
1700 1703 cmn_err(CE_NOTE, "ldi err: %s "
1701 1704 "failed to unregister layered event handlers before "
1702 1705 "closing devices", lip->li_modname);
1703 1706 }
1704 1707 #endif
1705 1708
1706 1709 /* do a layered close on the device */
1707 1710 err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1708 1711
1709 1712 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1710 1713
1711 1714 /*
1712 1715 * Search the event callback list for callbacks with this
1713 1716 * handle. There are 2 cases
1714 1717 * 1. Called in the context of a notify. The handle consumer
1715 1718 * is releasing its hold on the device to allow a reconfiguration
1716 1719 * of the device. Simply NULL out the handle and the notify callback.
1717 1720 * The finalize callback is still available so that the consumer
1718 1721 * knows of the final disposition of the device.
1719 1722 * 2. Not called in the context of notify. NULL out the handle as well
1720 1723 * as the notify and finalize callbacks. Since the consumer has
1721 1724 * closed the handle, we assume it is not interested in the
1722 1725 * notify and finalize callbacks.
1723 1726 */
1724 1727 ldi_ev_lock();
1725 1728
1726 1729 if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1727 1730 notify = 1;
1728 1731 listp = &ldi_ev_callback_list.le_head;
1729 1732 for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1730 1733 if (lecp->lec_lhp != handlep)
1731 1734 continue;
1732 1735 lecp->lec_lhp = NULL;
1733 1736 lecp->lec_notify = NULL;
1734 1737 LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1735 1738 if (!notify) {
1736 1739 LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1737 1740 lecp->lec_finalize = NULL;
1738 1741 }
1739 1742 }
1740 1743
1741 1744 if (notify)
1742 1745 handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1743 1746 ldi_ev_unlock();
1744 1747
1745 1748 /*
1746 1749 * Free the handle even if the device close failed. why?
1747 1750 *
1748 1751 * If the device close failed we can't really make assumptions
1749 1752 * about the devices state so we shouldn't allow access to the
1750 1753 * device via this handle any more. If the device consumer wants
1751 1754 * to access the device again they should open it again.
1752 1755 *
1753 1756 * This is the same way file/device close failures are handled
1754 1757 * in other places like spec_close() and closeandsetf().
1755 1758 */
1756 1759 handle_release(handlep);
1757 1760 return (err);
1758 1761 }
1759 1762
1760 1763 int
1761 1764 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1762 1765 {
1763 1766 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1764 1767 vnode_t *vp;
1765 1768 dev_t dev;
1766 1769 int ret;
1767 1770
1768 1771 if (lh == NULL)
1769 1772 return (EINVAL);
1770 1773
1771 1774 vp = handlep->lh_vp;
1772 1775 dev = vp->v_rdev;
1773 1776 if (handlep->lh_type & LH_CBDEV) {
1774 1777 ret = cdev_read(dev, uiop, credp);
1775 1778 } else if (handlep->lh_type & LH_STREAM) {
1776 1779 ret = strread(vp, uiop, credp);
1777 1780 } else {
1778 1781 return (ENOTSUP);
1779 1782 }
1780 1783 return (ret);
1781 1784 }
1782 1785
1783 1786 int
1784 1787 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1785 1788 {
1786 1789 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1787 1790 vnode_t *vp;
1788 1791 dev_t dev;
1789 1792 int ret;
1790 1793
1791 1794 if (lh == NULL)
1792 1795 return (EINVAL);
1793 1796
1794 1797 vp = handlep->lh_vp;
1795 1798 dev = vp->v_rdev;
1796 1799 if (handlep->lh_type & LH_CBDEV) {
1797 1800 ret = cdev_write(dev, uiop, credp);
1798 1801 } else if (handlep->lh_type & LH_STREAM) {
1799 1802 ret = strwrite(vp, uiop, credp);
1800 1803 } else {
1801 1804 return (ENOTSUP);
1802 1805 }
1803 1806 return (ret);
1804 1807 }
1805 1808
1806 1809 int
1807 1810 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1808 1811 {
1809 1812 int otyp;
1810 1813 uint_t value;
1811 1814 int64_t drv_prop64;
1812 1815 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1813 1816 uint_t blksize;
1814 1817 int blkshift;
1815 1818
1816 1819
1817 1820 if ((lh == NULL) || (sizep == NULL))
1818 1821 return (DDI_FAILURE);
1819 1822
1820 1823 if (handlep->lh_type & LH_STREAM)
1821 1824 return (DDI_FAILURE);
1822 1825
1823 1826 /*
1824 1827 * Determine device type (char or block).
1825 1828 * Character devices support Size/size
1826 1829 * property value. Block devices may support
1827 1830 * Nblocks/nblocks or Size/size property value.
1828 1831 */
1829 1832 if ((ldi_get_otyp(lh, &otyp)) != 0)
1830 1833 return (DDI_FAILURE);
1831 1834
1832 1835 if (otyp == OTYP_BLK) {
1833 1836 if (ldi_prop_exists(lh,
1834 1837 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1835 1838
1836 1839 drv_prop64 = ldi_prop_get_int64(lh,
1837 1840 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1838 1841 "Nblocks", 0);
1839 1842 blksize = ldi_prop_get_int(lh,
1840 1843 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1841 1844 "blksize", DEV_BSIZE);
1842 1845 if (blksize == DEV_BSIZE)
1843 1846 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1844 1847 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1845 1848 "device-blksize", DEV_BSIZE);
1846 1849
1847 1850 /* blksize must be a power of two */
1848 1851 ASSERT(BIT_ONLYONESET(blksize));
1849 1852 blkshift = highbit(blksize) - 1;
1850 1853
1851 1854 /*
1852 1855 * We don't support Nblocks values that don't have
1853 1856 * an accurate uint64_t byte count representation.
1854 1857 */
1855 1858 if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1856 1859 return (DDI_FAILURE);
1857 1860
1858 1861 *sizep = (uint64_t)
1859 1862 (((u_offset_t)drv_prop64) << blkshift);
1860 1863 return (DDI_SUCCESS);
1861 1864 }
1862 1865
1863 1866 if (ldi_prop_exists(lh,
1864 1867 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1865 1868
1866 1869 value = ldi_prop_get_int(lh,
1867 1870 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1868 1871 "nblocks", 0);
1869 1872 blksize = ldi_prop_get_int(lh,
1870 1873 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1871 1874 "blksize", DEV_BSIZE);
1872 1875 if (blksize == DEV_BSIZE)
1873 1876 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1874 1877 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1875 1878 "device-blksize", DEV_BSIZE);
1876 1879
1877 1880 /* blksize must be a power of two */
1878 1881 ASSERT(BIT_ONLYONESET(blksize));
1879 1882 blkshift = highbit(blksize) - 1;
1880 1883
1881 1884 /*
1882 1885 * We don't support nblocks values that don't have an
1883 1886 * accurate uint64_t byte count representation.
1884 1887 */
1885 1888 if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1886 1889 return (DDI_FAILURE);
1887 1890
1888 1891 *sizep = (uint64_t)
1889 1892 (((u_offset_t)value) << blkshift);
1890 1893 return (DDI_SUCCESS);
1891 1894 }
1892 1895 }
1893 1896
1894 1897 if (ldi_prop_exists(lh,
1895 1898 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1896 1899
1897 1900 drv_prop64 = ldi_prop_get_int64(lh,
1898 1901 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1899 1902 *sizep = (uint64_t)drv_prop64;
1900 1903 return (DDI_SUCCESS);
1901 1904 }
1902 1905
1903 1906 if (ldi_prop_exists(lh,
1904 1907 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1905 1908
1906 1909 value = ldi_prop_get_int(lh,
1907 1910 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1908 1911 *sizep = (uint64_t)value;
1909 1912 return (DDI_SUCCESS);
1910 1913 }
1911 1914
1912 1915 /* unable to determine device size */
1913 1916 return (DDI_FAILURE);
1914 1917 }
1915 1918
1916 1919 int
1917 1920 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1918 1921 cred_t *cr, int *rvalp)
1919 1922 {
1920 1923 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1921 1924 vnode_t *vp;
1922 1925 dev_t dev;
1923 1926 int ret, copymode, unused;
1924 1927
1925 1928 if (lh == NULL)
1926 1929 return (EINVAL);
1927 1930
1928 1931 /*
1929 1932 * if the data pointed to by arg is located in the kernel then
1930 1933 * make sure the FNATIVE flag is set.
1931 1934 */
1932 1935 if (mode & FKIOCTL)
1933 1936 mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1934 1937
1935 1938 /*
1936 1939 * Some drivers assume that rvalp will always be non-NULL, so in
1937 1940 * an attempt to avoid panics if the caller passed in a NULL
1938 1941 * value, update rvalp to point to a temporary variable.
1939 1942 */
1940 1943 if (rvalp == NULL)
1941 1944 rvalp = &unused;
1942 1945 vp = handlep->lh_vp;
1943 1946 dev = vp->v_rdev;
1944 1947 if (handlep->lh_type & LH_CBDEV) {
1945 1948 ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1946 1949 } else if (handlep->lh_type & LH_STREAM) {
1947 1950 copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1948 1951
1949 1952 /*
1950 1953 * if we get an I_PLINK from within the kernel the
1951 1954 * arg is a layered handle pointer instead of
1952 1955 * a file descriptor, so we translate this ioctl
1953 1956 * into a private one that can handle this.
1954 1957 */
1955 1958 if ((mode & FKIOCTL) && (cmd == I_PLINK))
1956 1959 cmd = _I_PLINK_LH;
1957 1960
1958 1961 ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1959 1962 } else {
1960 1963 return (ENOTSUP);
1961 1964 }
1962 1965
1963 1966 return (ret);
1964 1967 }
1965 1968
1966 1969 int
1967 1970 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1968 1971 struct pollhead **phpp)
1969 1972 {
1970 1973 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1971 1974 vnode_t *vp;
1972 1975 dev_t dev;
1973 1976 int ret;
1974 1977
1975 1978 if (lh == NULL)
1976 1979 return (EINVAL);
1977 1980
1978 1981 vp = handlep->lh_vp;
1979 1982 dev = vp->v_rdev;
1980 1983 if (handlep->lh_type & LH_CBDEV) {
1981 1984 ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1982 1985 } else if (handlep->lh_type & LH_STREAM) {
1983 1986 ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
1984 1987 } else {
1985 1988 return (ENOTSUP);
1986 1989 }
1987 1990
1988 1991 return (ret);
1989 1992 }
1990 1993
1991 1994 int
1992 1995 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
1993 1996 int flags, char *name, caddr_t valuep, int *length)
1994 1997 {
1995 1998 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1996 1999 dev_t dev;
1997 2000 dev_info_t *dip;
1998 2001 int ret;
1999 2002 struct snode *csp;
2000 2003
2001 2004 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2002 2005 return (DDI_PROP_INVAL_ARG);
2003 2006
2004 2007 if ((prop_op != PROP_LEN) && (valuep == NULL))
2005 2008 return (DDI_PROP_INVAL_ARG);
2006 2009
2007 2010 if (length == NULL)
2008 2011 return (DDI_PROP_INVAL_ARG);
2009 2012
2010 2013 /*
2011 2014 * try to find the associated dip,
2012 2015 * this places a hold on the driver
2013 2016 */
2014 2017 dev = handlep->lh_vp->v_rdev;
2015 2018
2016 2019 csp = VTOCS(handlep->lh_vp);
2017 2020 mutex_enter(&csp->s_lock);
2018 2021 if ((dip = csp->s_dip) != NULL)
2019 2022 e_ddi_hold_devi(dip);
2020 2023 mutex_exit(&csp->s_lock);
2021 2024 if (dip == NULL)
2022 2025 dip = e_ddi_hold_devi_by_dev(dev, 0);
2023 2026
2024 2027 if (dip == NULL)
2025 2028 return (DDI_PROP_NOT_FOUND);
2026 2029
2027 2030 ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2028 2031 ddi_release_devi(dip);
2029 2032
2030 2033 return (ret);
2031 2034 }
2032 2035
2033 2036 int
2034 2037 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2035 2038 {
2036 2039 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2037 2040 dev_t dev;
2038 2041
2039 2042 if ((lh == NULL) || (bp == NULL))
2040 2043 return (EINVAL);
2041 2044
2042 2045 /* this entry point is only supported for cb devices */
2043 2046 dev = handlep->lh_vp->v_rdev;
2044 2047 if (!(handlep->lh_type & LH_CBDEV))
2045 2048 return (ENOTSUP);
2046 2049
2047 2050 bp->b_edev = dev;
2048 2051 bp->b_dev = cmpdev(dev);
2049 2052 return (bdev_strategy(bp));
2050 2053 }
2051 2054
2052 2055 int
2053 2056 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2054 2057 {
2055 2058 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2056 2059 dev_t dev;
2057 2060
2058 2061 if (lh == NULL)
2059 2062 return (EINVAL);
2060 2063
2061 2064 /* this entry point is only supported for cb devices */
2062 2065 dev = handlep->lh_vp->v_rdev;
2063 2066 if (!(handlep->lh_type & LH_CBDEV))
2064 2067 return (ENOTSUP);
2065 2068
2066 2069 return (bdev_dump(dev, addr, blkno, nblk));
2067 2070 }
2068 2071
2069 2072 int
2070 2073 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2071 2074 size_t len, size_t *maplen, uint_t model)
2072 2075 {
2073 2076 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2074 2077 dev_t dev;
2075 2078
2076 2079 if (lh == NULL)
2077 2080 return (EINVAL);
2078 2081
2079 2082 /* this entry point is only supported for cb devices */
2080 2083 dev = handlep->lh_vp->v_rdev;
2081 2084 if (!(handlep->lh_type & LH_CBDEV))
2082 2085 return (ENOTSUP);
2083 2086
2084 2087 return (cdev_devmap(dev, dhp, off, len, maplen, model));
2085 2088 }
2086 2089
2087 2090 int
2088 2091 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2089 2092 {
2090 2093 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2091 2094 dev_t dev;
2092 2095 struct cb_ops *cb;
2093 2096
2094 2097 if (lh == NULL)
2095 2098 return (EINVAL);
2096 2099
2097 2100 /* this entry point is only supported for cb devices */
2098 2101 if (!(handlep->lh_type & LH_CBDEV))
2099 2102 return (ENOTSUP);
2100 2103
2101 2104 /*
2102 2105 * Kaio is only supported on block devices.
2103 2106 */
2104 2107 dev = handlep->lh_vp->v_rdev;
2105 2108 cb = devopsp[getmajor(dev)]->devo_cb_ops;
2106 2109 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2107 2110 return (ENOTSUP);
2108 2111
2109 2112 if (cb->cb_aread == NULL)
2110 2113 return (ENOTSUP);
2111 2114
2112 2115 return (cb->cb_aread(dev, aio_reqp, cr));
2113 2116 }
2114 2117
2115 2118 int
2116 2119 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2117 2120 {
2118 2121 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2119 2122 struct cb_ops *cb;
2120 2123 dev_t dev;
2121 2124
2122 2125 if (lh == NULL)
2123 2126 return (EINVAL);
2124 2127
2125 2128 /* this entry point is only supported for cb devices */
2126 2129 if (!(handlep->lh_type & LH_CBDEV))
2127 2130 return (ENOTSUP);
2128 2131
2129 2132 /*
2130 2133 * Kaio is only supported on block devices.
2131 2134 */
2132 2135 dev = handlep->lh_vp->v_rdev;
2133 2136 cb = devopsp[getmajor(dev)]->devo_cb_ops;
2134 2137 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2135 2138 return (ENOTSUP);
2136 2139
2137 2140 if (cb->cb_awrite == NULL)
2138 2141 return (ENOTSUP);
2139 2142
2140 2143 return (cb->cb_awrite(dev, aio_reqp, cr));
2141 2144 }
2142 2145
2143 2146 int
2144 2147 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2145 2148 {
2146 2149 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2147 2150 int ret;
2148 2151
2149 2152 if ((lh == NULL) || (smp == NULL))
2150 2153 return (EINVAL);
2151 2154
2152 2155 if (!(handlep->lh_type & LH_STREAM)) {
2153 2156 freemsg(smp);
2154 2157 return (ENOTSUP);
2155 2158 }
2156 2159
2157 2160 /*
2158 2161 * If we don't have db_credp, set it. Note that we can not be called
2159 2162 * from interrupt context.
2160 2163 */
2161 2164 if (msg_getcred(smp, NULL) == NULL)
2162 2165 mblk_setcred(smp, CRED(), curproc->p_pid);
2163 2166
2164 2167 /* Send message while honoring flow control */
2165 2168 ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2166 2169 MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2167 2170
2168 2171 return (ret);
2169 2172 }
2170 2173
2171 2174 int
2172 2175 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2173 2176 {
2174 2177 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2175 2178 clock_t timout; /* milliseconds */
2176 2179 uchar_t pri;
2177 2180 rval_t rval;
2178 2181 int ret, pflag;
2179 2182
2180 2183
2181 2184 if (lh == NULL)
2182 2185 return (EINVAL);
2183 2186
2184 2187 if (!(handlep->lh_type & LH_STREAM))
2185 2188 return (ENOTSUP);
2186 2189
2187 2190 /* Convert from nanoseconds to milliseconds */
2188 2191 if (timeo != NULL) {
2189 2192 timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2190 2193 if (timout > INT_MAX)
2191 2194 return (EINVAL);
2192 2195 } else
2193 2196 timout = -1;
2194 2197
2195 2198 /* Wait for timeout millseconds for a message */
2196 2199 pflag = MSG_ANY;
2197 2200 pri = 0;
2198 2201 *rmp = NULL;
2199 2202 ret = kstrgetmsg(handlep->lh_vp,
2200 2203 rmp, NULL, &pri, &pflag, timout, &rval);
2201 2204 return (ret);
2202 2205 }
2203 2206
2204 2207 int
2205 2208 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2206 2209 {
2207 2210 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2208 2211
2209 2212 if ((lh == NULL) || (devp == NULL))
2210 2213 return (EINVAL);
2211 2214
2212 2215 *devp = handlep->lh_vp->v_rdev;
2213 2216 return (0);
2214 2217 }
2215 2218
2216 2219 int
2217 2220 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2218 2221 {
2219 2222 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2220 2223
2221 2224 if ((lh == NULL) || (otyp == NULL))
2222 2225 return (EINVAL);
2223 2226
2224 2227 *otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2225 2228 return (0);
2226 2229 }
2227 2230
2228 2231 int
2229 2232 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2230 2233 {
2231 2234 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2232 2235 int ret;
2233 2236 dev_t dev;
2234 2237
2235 2238 if ((lh == NULL) || (devid == NULL))
2236 2239 return (EINVAL);
2237 2240
2238 2241 dev = handlep->lh_vp->v_rdev;
2239 2242
2240 2243 ret = ddi_lyr_get_devid(dev, devid);
2241 2244 if (ret != DDI_SUCCESS)
2242 2245 return (ENOTSUP);
2243 2246
2244 2247 return (0);
2245 2248 }
2246 2249
2247 2250 int
2248 2251 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2249 2252 {
2250 2253 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2251 2254 int ret, otyp;
2252 2255 dev_t dev;
2253 2256
2254 2257 if ((lh == NULL) || (minor_name == NULL))
2255 2258 return (EINVAL);
2256 2259
2257 2260 dev = handlep->lh_vp->v_rdev;
2258 2261 otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2259 2262
2260 2263 ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2261 2264 if (ret != DDI_SUCCESS)
2262 2265 return (ENOTSUP);
2263 2266
2264 2267 return (0);
2265 2268 }
2266 2269
2267 2270 int
2268 2271 ldi_prop_lookup_int_array(ldi_handle_t lh,
2269 2272 uint_t flags, char *name, int **data, uint_t *nelements)
2270 2273 {
2271 2274 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2272 2275 dev_info_t *dip;
2273 2276 dev_t dev;
2274 2277 int res;
2275 2278 struct snode *csp;
2276 2279
2277 2280 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2278 2281 return (DDI_PROP_INVAL_ARG);
2279 2282
2280 2283 dev = handlep->lh_vp->v_rdev;
2281 2284
2282 2285 csp = VTOCS(handlep->lh_vp);
2283 2286 mutex_enter(&csp->s_lock);
2284 2287 if ((dip = csp->s_dip) != NULL)
2285 2288 e_ddi_hold_devi(dip);
2286 2289 mutex_exit(&csp->s_lock);
2287 2290 if (dip == NULL)
2288 2291 dip = e_ddi_hold_devi_by_dev(dev, 0);
2289 2292
2290 2293 if (dip == NULL) {
2291 2294 flags |= DDI_UNBND_DLPI2;
2292 2295 } else if (flags & LDI_DEV_T_ANY) {
2293 2296 flags &= ~LDI_DEV_T_ANY;
2294 2297 dev = DDI_DEV_T_ANY;
2295 2298 }
2296 2299
2297 2300 if (dip != NULL) {
2298 2301 int *prop_val, prop_len;
2299 2302
2300 2303 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2301 2304 (caddr_t *)&prop_val, &prop_len, sizeof (int));
2302 2305
2303 2306 /* if we got it then return it */
2304 2307 if (res == DDI_PROP_SUCCESS) {
2305 2308 *nelements = prop_len / sizeof (int);
2306 2309 *data = prop_val;
2307 2310
2308 2311 ddi_release_devi(dip);
2309 2312 return (res);
2310 2313 }
2311 2314 }
2312 2315
2313 2316 /* call the normal property interfaces */
2314 2317 res = ddi_prop_lookup_int_array(dev, dip, flags,
2315 2318 name, data, nelements);
2316 2319
2317 2320 if (dip != NULL)
2318 2321 ddi_release_devi(dip);
2319 2322
2320 2323 return (res);
2321 2324 }
2322 2325
2323 2326 int
2324 2327 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2325 2328 uint_t flags, char *name, int64_t **data, uint_t *nelements)
2326 2329 {
2327 2330 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2328 2331 dev_info_t *dip;
2329 2332 dev_t dev;
2330 2333 int res;
2331 2334 struct snode *csp;
2332 2335
2333 2336 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2334 2337 return (DDI_PROP_INVAL_ARG);
2335 2338
2336 2339 dev = handlep->lh_vp->v_rdev;
2337 2340
2338 2341 csp = VTOCS(handlep->lh_vp);
2339 2342 mutex_enter(&csp->s_lock);
2340 2343 if ((dip = csp->s_dip) != NULL)
2341 2344 e_ddi_hold_devi(dip);
2342 2345 mutex_exit(&csp->s_lock);
2343 2346 if (dip == NULL)
2344 2347 dip = e_ddi_hold_devi_by_dev(dev, 0);
2345 2348
2346 2349 if (dip == NULL) {
2347 2350 flags |= DDI_UNBND_DLPI2;
2348 2351 } else if (flags & LDI_DEV_T_ANY) {
2349 2352 flags &= ~LDI_DEV_T_ANY;
2350 2353 dev = DDI_DEV_T_ANY;
2351 2354 }
2352 2355
2353 2356 if (dip != NULL) {
2354 2357 int64_t *prop_val;
2355 2358 int prop_len;
2356 2359
2357 2360 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2358 2361 (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2359 2362
2360 2363 /* if we got it then return it */
2361 2364 if (res == DDI_PROP_SUCCESS) {
2362 2365 *nelements = prop_len / sizeof (int64_t);
2363 2366 *data = prop_val;
2364 2367
2365 2368 ddi_release_devi(dip);
2366 2369 return (res);
2367 2370 }
2368 2371 }
2369 2372
2370 2373 /* call the normal property interfaces */
2371 2374 res = ddi_prop_lookup_int64_array(dev, dip, flags,
2372 2375 name, data, nelements);
2373 2376
2374 2377 if (dip != NULL)
2375 2378 ddi_release_devi(dip);
2376 2379
2377 2380 return (res);
2378 2381 }
2379 2382
2380 2383 int
2381 2384 ldi_prop_lookup_string_array(ldi_handle_t lh,
2382 2385 uint_t flags, char *name, char ***data, uint_t *nelements)
2383 2386 {
2384 2387 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2385 2388 dev_info_t *dip;
2386 2389 dev_t dev;
2387 2390 int res;
2388 2391 struct snode *csp;
2389 2392
2390 2393 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2391 2394 return (DDI_PROP_INVAL_ARG);
2392 2395
2393 2396 dev = handlep->lh_vp->v_rdev;
2394 2397
2395 2398 csp = VTOCS(handlep->lh_vp);
2396 2399 mutex_enter(&csp->s_lock);
2397 2400 if ((dip = csp->s_dip) != NULL)
2398 2401 e_ddi_hold_devi(dip);
2399 2402 mutex_exit(&csp->s_lock);
2400 2403 if (dip == NULL)
2401 2404 dip = e_ddi_hold_devi_by_dev(dev, 0);
2402 2405
2403 2406 if (dip == NULL) {
2404 2407 flags |= DDI_UNBND_DLPI2;
2405 2408 } else if (flags & LDI_DEV_T_ANY) {
2406 2409 flags &= ~LDI_DEV_T_ANY;
2407 2410 dev = DDI_DEV_T_ANY;
2408 2411 }
2409 2412
2410 2413 if (dip != NULL) {
2411 2414 char *prop_val;
2412 2415 int prop_len;
2413 2416
2414 2417 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2415 2418 (caddr_t *)&prop_val, &prop_len, 0);
2416 2419
2417 2420 /* if we got it then return it */
2418 2421 if (res == DDI_PROP_SUCCESS) {
2419 2422 char **str_array;
2420 2423 int nelem;
2421 2424
2422 2425 /*
2423 2426 * pack the returned string array into the format
2424 2427 * our callers expect
2425 2428 */
2426 2429 if (i_pack_string_array(prop_val, prop_len,
2427 2430 &str_array, &nelem) == 0) {
2428 2431
2429 2432 *data = str_array;
2430 2433 *nelements = nelem;
2431 2434
2432 2435 ddi_prop_free(prop_val);
2433 2436 ddi_release_devi(dip);
2434 2437 return (res);
2435 2438 }
2436 2439
2437 2440 /*
2438 2441 * the format of the returned property must have
2439 2442 * been bad so throw it out
2440 2443 */
2441 2444 ddi_prop_free(prop_val);
2442 2445 }
2443 2446 }
2444 2447
2445 2448 /* call the normal property interfaces */
2446 2449 res = ddi_prop_lookup_string_array(dev, dip, flags,
2447 2450 name, data, nelements);
2448 2451
2449 2452 if (dip != NULL)
2450 2453 ddi_release_devi(dip);
2451 2454
2452 2455 return (res);
2453 2456 }
2454 2457
2455 2458 int
2456 2459 ldi_prop_lookup_string(ldi_handle_t lh,
2457 2460 uint_t flags, char *name, char **data)
2458 2461 {
2459 2462 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2460 2463 dev_info_t *dip;
2461 2464 dev_t dev;
2462 2465 int res;
2463 2466 struct snode *csp;
2464 2467
2465 2468 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2466 2469 return (DDI_PROP_INVAL_ARG);
2467 2470
2468 2471 dev = handlep->lh_vp->v_rdev;
2469 2472
2470 2473 csp = VTOCS(handlep->lh_vp);
2471 2474 mutex_enter(&csp->s_lock);
2472 2475 if ((dip = csp->s_dip) != NULL)
2473 2476 e_ddi_hold_devi(dip);
2474 2477 mutex_exit(&csp->s_lock);
2475 2478 if (dip == NULL)
2476 2479 dip = e_ddi_hold_devi_by_dev(dev, 0);
2477 2480
2478 2481 if (dip == NULL) {
2479 2482 flags |= DDI_UNBND_DLPI2;
2480 2483 } else if (flags & LDI_DEV_T_ANY) {
2481 2484 flags &= ~LDI_DEV_T_ANY;
2482 2485 dev = DDI_DEV_T_ANY;
2483 2486 }
2484 2487
2485 2488 if (dip != NULL) {
2486 2489 char *prop_val;
2487 2490 int prop_len;
2488 2491
2489 2492 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2490 2493 (caddr_t *)&prop_val, &prop_len, 0);
2491 2494
2492 2495 /* if we got it then return it */
2493 2496 if (res == DDI_PROP_SUCCESS) {
2494 2497 /*
2495 2498 * sanity check the vaule returned.
2496 2499 */
2497 2500 if (i_check_string(prop_val, prop_len)) {
2498 2501 ddi_prop_free(prop_val);
2499 2502 } else {
2500 2503 *data = prop_val;
2501 2504 ddi_release_devi(dip);
2502 2505 return (res);
2503 2506 }
2504 2507 }
2505 2508 }
2506 2509
2507 2510 /* call the normal property interfaces */
2508 2511 res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2509 2512
2510 2513 if (dip != NULL)
2511 2514 ddi_release_devi(dip);
2512 2515
2513 2516 #ifdef DEBUG
2514 2517 if (res == DDI_PROP_SUCCESS) {
2515 2518 /*
2516 2519 * keep ourselves honest
2517 2520 * make sure the framework returns strings in the
2518 2521 * same format as we're demanding from drivers.
2519 2522 */
2520 2523 struct prop_driver_data *pdd;
2521 2524 int pdd_prop_size;
2522 2525
2523 2526 pdd = ((struct prop_driver_data *)(*data)) - 1;
2524 2527 pdd_prop_size = pdd->pdd_size -
2525 2528 sizeof (struct prop_driver_data);
2526 2529 ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2527 2530 }
2528 2531 #endif /* DEBUG */
2529 2532
2530 2533 return (res);
2531 2534 }
2532 2535
2533 2536 int
2534 2537 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2535 2538 uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2536 2539 {
2537 2540 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2538 2541 dev_info_t *dip;
2539 2542 dev_t dev;
2540 2543 int res;
2541 2544 struct snode *csp;
2542 2545
2543 2546 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2544 2547 return (DDI_PROP_INVAL_ARG);
2545 2548
2546 2549 dev = handlep->lh_vp->v_rdev;
2547 2550
2548 2551 csp = VTOCS(handlep->lh_vp);
2549 2552 mutex_enter(&csp->s_lock);
2550 2553 if ((dip = csp->s_dip) != NULL)
2551 2554 e_ddi_hold_devi(dip);
2552 2555 mutex_exit(&csp->s_lock);
2553 2556 if (dip == NULL)
2554 2557 dip = e_ddi_hold_devi_by_dev(dev, 0);
2555 2558
2556 2559 if (dip == NULL) {
2557 2560 flags |= DDI_UNBND_DLPI2;
2558 2561 } else if (flags & LDI_DEV_T_ANY) {
2559 2562 flags &= ~LDI_DEV_T_ANY;
2560 2563 dev = DDI_DEV_T_ANY;
2561 2564 }
2562 2565
2563 2566 if (dip != NULL) {
2564 2567 uchar_t *prop_val;
2565 2568 int prop_len;
2566 2569
2567 2570 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2568 2571 (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2569 2572
2570 2573 /* if we got it then return it */
2571 2574 if (res == DDI_PROP_SUCCESS) {
2572 2575 *nelements = prop_len / sizeof (uchar_t);
2573 2576 *data = prop_val;
2574 2577
2575 2578 ddi_release_devi(dip);
2576 2579 return (res);
2577 2580 }
2578 2581 }
2579 2582
2580 2583 /* call the normal property interfaces */
2581 2584 res = ddi_prop_lookup_byte_array(dev, dip, flags,
2582 2585 name, data, nelements);
2583 2586
2584 2587 if (dip != NULL)
2585 2588 ddi_release_devi(dip);
2586 2589
2587 2590 return (res);
2588 2591 }
2589 2592
2590 2593 int
2591 2594 ldi_prop_get_int(ldi_handle_t lh,
2592 2595 uint_t flags, char *name, int defvalue)
2593 2596 {
2594 2597 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2595 2598 dev_info_t *dip;
2596 2599 dev_t dev;
2597 2600 int res;
2598 2601 struct snode *csp;
2599 2602
2600 2603 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2601 2604 return (defvalue);
2602 2605
2603 2606 dev = handlep->lh_vp->v_rdev;
2604 2607
2605 2608 csp = VTOCS(handlep->lh_vp);
2606 2609 mutex_enter(&csp->s_lock);
2607 2610 if ((dip = csp->s_dip) != NULL)
2608 2611 e_ddi_hold_devi(dip);
2609 2612 mutex_exit(&csp->s_lock);
2610 2613 if (dip == NULL)
2611 2614 dip = e_ddi_hold_devi_by_dev(dev, 0);
2612 2615
2613 2616 if (dip == NULL) {
2614 2617 flags |= DDI_UNBND_DLPI2;
2615 2618 } else if (flags & LDI_DEV_T_ANY) {
2616 2619 flags &= ~LDI_DEV_T_ANY;
2617 2620 dev = DDI_DEV_T_ANY;
2618 2621 }
2619 2622
2620 2623 if (dip != NULL) {
2621 2624 int prop_val;
2622 2625 int prop_len;
2623 2626
2624 2627 /*
2625 2628 * first call the drivers prop_op interface to allow it
2626 2629 * it to override default property values.
2627 2630 */
2628 2631 prop_len = sizeof (int);
2629 2632 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2630 2633 flags | DDI_PROP_DYNAMIC, name,
2631 2634 (caddr_t)&prop_val, &prop_len);
2632 2635
2633 2636 /* if we got it then return it */
2634 2637 if ((res == DDI_PROP_SUCCESS) &&
2635 2638 (prop_len == sizeof (int))) {
2636 2639 res = prop_val;
2637 2640 ddi_release_devi(dip);
2638 2641 return (res);
2639 2642 }
2640 2643 }
2641 2644
2642 2645 /* call the normal property interfaces */
2643 2646 res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2644 2647
2645 2648 if (dip != NULL)
2646 2649 ddi_release_devi(dip);
2647 2650
2648 2651 return (res);
2649 2652 }
2650 2653
2651 2654 int64_t
2652 2655 ldi_prop_get_int64(ldi_handle_t lh,
2653 2656 uint_t flags, char *name, int64_t defvalue)
2654 2657 {
2655 2658 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2656 2659 dev_info_t *dip;
2657 2660 dev_t dev;
2658 2661 int64_t res;
2659 2662 struct snode *csp;
2660 2663
2661 2664 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2662 2665 return (defvalue);
2663 2666
2664 2667 dev = handlep->lh_vp->v_rdev;
2665 2668
2666 2669 csp = VTOCS(handlep->lh_vp);
2667 2670 mutex_enter(&csp->s_lock);
2668 2671 if ((dip = csp->s_dip) != NULL)
2669 2672 e_ddi_hold_devi(dip);
2670 2673 mutex_exit(&csp->s_lock);
2671 2674 if (dip == NULL)
2672 2675 dip = e_ddi_hold_devi_by_dev(dev, 0);
2673 2676
2674 2677 if (dip == NULL) {
2675 2678 flags |= DDI_UNBND_DLPI2;
2676 2679 } else if (flags & LDI_DEV_T_ANY) {
2677 2680 flags &= ~LDI_DEV_T_ANY;
2678 2681 dev = DDI_DEV_T_ANY;
2679 2682 }
2680 2683
2681 2684 if (dip != NULL) {
2682 2685 int64_t prop_val;
2683 2686 int prop_len;
2684 2687
2685 2688 /*
2686 2689 * first call the drivers prop_op interface to allow it
2687 2690 * it to override default property values.
2688 2691 */
2689 2692 prop_len = sizeof (int64_t);
2690 2693 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2691 2694 flags | DDI_PROP_DYNAMIC, name,
2692 2695 (caddr_t)&prop_val, &prop_len);
2693 2696
2694 2697 /* if we got it then return it */
2695 2698 if ((res == DDI_PROP_SUCCESS) &&
2696 2699 (prop_len == sizeof (int64_t))) {
2697 2700 res = prop_val;
2698 2701 ddi_release_devi(dip);
2699 2702 return (res);
2700 2703 }
2701 2704 }
2702 2705
2703 2706 /* call the normal property interfaces */
2704 2707 res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2705 2708
2706 2709 if (dip != NULL)
2707 2710 ddi_release_devi(dip);
2708 2711
2709 2712 return (res);
2710 2713 }
2711 2714
2712 2715 int
2713 2716 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2714 2717 {
2715 2718 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2716 2719 dev_info_t *dip;
2717 2720 dev_t dev;
2718 2721 int res, prop_len;
2719 2722 struct snode *csp;
2720 2723
2721 2724 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2722 2725 return (0);
2723 2726
2724 2727 dev = handlep->lh_vp->v_rdev;
2725 2728
2726 2729 csp = VTOCS(handlep->lh_vp);
2727 2730 mutex_enter(&csp->s_lock);
2728 2731 if ((dip = csp->s_dip) != NULL)
2729 2732 e_ddi_hold_devi(dip);
2730 2733 mutex_exit(&csp->s_lock);
2731 2734 if (dip == NULL)
2732 2735 dip = e_ddi_hold_devi_by_dev(dev, 0);
2733 2736
2734 2737 /* if NULL dip, prop does NOT exist */
2735 2738 if (dip == NULL)
2736 2739 return (0);
2737 2740
2738 2741 if (flags & LDI_DEV_T_ANY) {
2739 2742 flags &= ~LDI_DEV_T_ANY;
2740 2743 dev = DDI_DEV_T_ANY;
2741 2744 }
2742 2745
2743 2746 /*
2744 2747 * first call the drivers prop_op interface to allow it
2745 2748 * it to override default property values.
2746 2749 */
2747 2750 res = i_ldi_prop_op(dev, dip, PROP_LEN,
2748 2751 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2749 2752
2750 2753 if (res == DDI_PROP_SUCCESS) {
2751 2754 ddi_release_devi(dip);
2752 2755 return (1);
2753 2756 }
2754 2757
2755 2758 /* call the normal property interfaces */
2756 2759 res = ddi_prop_exists(dev, dip, flags, name);
2757 2760
2758 2761 ddi_release_devi(dip);
2759 2762 return (res);
2760 2763 }
2761 2764
2762 2765 #ifdef LDI_OBSOLETE_EVENT
2763 2766
2764 2767 int
2765 2768 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2766 2769 {
2767 2770 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2768 2771 dev_info_t *dip;
2769 2772 dev_t dev;
2770 2773 int res;
2771 2774 struct snode *csp;
2772 2775
2773 2776 if ((lh == NULL) || (name == NULL) ||
2774 2777 (strlen(name) == 0) || (ecp == NULL)) {
2775 2778 return (DDI_FAILURE);
2776 2779 }
2777 2780
2778 2781 ASSERT(!servicing_interrupt());
2779 2782
2780 2783 dev = handlep->lh_vp->v_rdev;
2781 2784
2782 2785 csp = VTOCS(handlep->lh_vp);
2783 2786 mutex_enter(&csp->s_lock);
2784 2787 if ((dip = csp->s_dip) != NULL)
2785 2788 e_ddi_hold_devi(dip);
2786 2789 mutex_exit(&csp->s_lock);
2787 2790 if (dip == NULL)
2788 2791 dip = e_ddi_hold_devi_by_dev(dev, 0);
2789 2792
2790 2793 if (dip == NULL)
2791 2794 return (DDI_FAILURE);
2792 2795
2793 2796 LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2794 2797 "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2795 2798 name, (void *)dip, (void *)ecp));
2796 2799
2797 2800 res = ddi_get_eventcookie(dip, name, ecp);
2798 2801
2799 2802 ddi_release_devi(dip);
2800 2803 return (res);
2801 2804 }
2802 2805
2803 2806 int
2804 2807 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2805 2808 void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2806 2809 void *arg, ldi_callback_id_t *id)
2807 2810 {
2808 2811 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2809 2812 struct ldi_event *lep;
2810 2813 dev_info_t *dip;
2811 2814 dev_t dev;
2812 2815 int res;
2813 2816 struct snode *csp;
2814 2817
2815 2818 if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2816 2819 return (DDI_FAILURE);
2817 2820
2818 2821 ASSERT(!servicing_interrupt());
2819 2822
2820 2823 dev = handlep->lh_vp->v_rdev;
2821 2824
2822 2825 csp = VTOCS(handlep->lh_vp);
2823 2826 mutex_enter(&csp->s_lock);
2824 2827 if ((dip = csp->s_dip) != NULL)
2825 2828 e_ddi_hold_devi(dip);
2826 2829 mutex_exit(&csp->s_lock);
2827 2830 if (dip == NULL)
2828 2831 dip = e_ddi_hold_devi_by_dev(dev, 0);
2829 2832
2830 2833 if (dip == NULL)
2831 2834 return (DDI_FAILURE);
2832 2835
2833 2836 lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2834 2837 lep->le_lhp = handlep;
2835 2838 lep->le_arg = arg;
2836 2839 lep->le_handler = handler;
2837 2840
2838 2841 if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2839 2842 (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2840 2843 LDI_EVENTCB((CE_WARN, "%s: unable to add"
2841 2844 "event callback", "ldi_add_event_handler"));
2842 2845 ddi_release_devi(dip);
2843 2846 kmem_free(lep, sizeof (struct ldi_event));
2844 2847 return (res);
2845 2848 }
2846 2849
2847 2850 *id = (ldi_callback_id_t)lep;
2848 2851
2849 2852 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2850 2853 "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2851 2854 (void *)dip, (void *)ec, (void *)lep, (void *)id));
2852 2855
2853 2856 handle_event_add(lep);
2854 2857 ddi_release_devi(dip);
2855 2858 return (res);
2856 2859 }
2857 2860
2858 2861 int
2859 2862 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2860 2863 {
2861 2864 ldi_event_t *lep = (ldi_event_t *)id;
2862 2865 int res;
2863 2866
2864 2867 if ((lh == NULL) || (id == NULL))
2865 2868 return (DDI_FAILURE);
2866 2869
2867 2870 ASSERT(!servicing_interrupt());
2868 2871
2869 2872 if ((res = ddi_remove_event_handler(lep->le_id))
2870 2873 != DDI_SUCCESS) {
2871 2874 LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2872 2875 "event callback", "ldi_remove_event_handler"));
2873 2876 return (res);
2874 2877 }
2875 2878
2876 2879 handle_event_remove(lep);
2877 2880 kmem_free(lep, sizeof (struct ldi_event));
2878 2881 return (res);
2879 2882 }
2880 2883
2881 2884 #endif
2882 2885
2883 2886 /*
2884 2887 * Here are some definitions of terms used in the following LDI events
2885 2888 * code:
2886 2889 *
2887 2890 * "LDI events" AKA "native events": These are events defined by the
2888 2891 * "new" LDI event framework. These events are serviced by the LDI event
2889 2892 * framework itself and thus are native to it.
2890 2893 *
2891 2894 * "LDI contract events": These are contract events that correspond to the
2892 2895 * LDI events. This mapping of LDI events to contract events is defined by
2893 2896 * the ldi_ev_cookies[] array above.
2894 2897 *
2895 2898 * NDI events: These are events which are serviced by the NDI event subsystem.
2896 2899 * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2897 2900 * These events are therefore *not* native events.
2898 2901 */
2899 2902
2900 2903 static int
2901 2904 ldi_native_event(const char *evname)
2902 2905 {
2903 2906 int i;
2904 2907
2905 2908 LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2906 2909
2907 2910 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2908 2911 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2909 2912 return (1);
2910 2913 }
2911 2914
2912 2915 return (0);
2913 2916 }
2914 2917
2915 2918 static uint_t
2916 2919 ldi_ev_sync_event(const char *evname)
2917 2920 {
2918 2921 int i;
2919 2922
2920 2923 ASSERT(ldi_native_event(evname));
2921 2924
2922 2925 LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2923 2926
2924 2927 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2925 2928 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2926 2929 return (ldi_ev_cookies[i].ck_sync);
2927 2930 }
2928 2931
2929 2932 /*
2930 2933 * This should never happen until non-contract based
2931 2934 * LDI events are introduced. If that happens, we will
2932 2935 * use a "special" token to indicate that there are no
2933 2936 * contracts corresponding to this LDI event.
2934 2937 */
2935 2938 cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2936 2939
2937 2940 return (0);
2938 2941 }
2939 2942
2940 2943 static uint_t
2941 2944 ldi_contract_event(const char *evname)
2942 2945 {
2943 2946 int i;
2944 2947
2945 2948 ASSERT(ldi_native_event(evname));
2946 2949
2947 2950 LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2948 2951
2949 2952 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2950 2953 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2951 2954 return (ldi_ev_cookies[i].ck_ctype);
2952 2955 }
2953 2956
2954 2957 /*
2955 2958 * This should never happen until non-contract based
2956 2959 * LDI events are introduced. If that happens, we will
2957 2960 * use a "special" token to indicate that there are no
2958 2961 * contracts corresponding to this LDI event.
2959 2962 */
2960 2963 cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2961 2964
2962 2965 return (0);
2963 2966 }
2964 2967
2965 2968 char *
2966 2969 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2967 2970 {
2968 2971 int i;
2969 2972 struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2970 2973
2971 2974 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2972 2975 if (&ldi_ev_cookies[i] == cookie_impl) {
2973 2976 LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2974 2977 ldi_ev_cookies[i].ck_evname));
2975 2978 return (ldi_ev_cookies[i].ck_evname);
2976 2979 }
2977 2980 }
2978 2981
2979 2982 /*
2980 2983 * Not an LDI native event. Must be NDI event service.
2981 2984 * Just return a generic string
2982 2985 */
2983 2986 LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2984 2987 return (NDI_EVENT_SERVICE);
2985 2988 }
2986 2989
2987 2990 static int
2988 2991 ldi_native_cookie(ldi_ev_cookie_t cookie)
2989 2992 {
2990 2993 int i;
2991 2994 struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2992 2995
2993 2996 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2994 2997 if (&ldi_ev_cookies[i] == cookie_impl) {
2995 2998 LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
2996 2999 return (1);
2997 3000 }
2998 3001 }
2999 3002
3000 3003 LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
3001 3004 return (0);
3002 3005 }
3003 3006
3004 3007 static ldi_ev_cookie_t
3005 3008 ldi_get_native_cookie(const char *evname)
3006 3009 {
3007 3010 int i;
3008 3011
3009 3012 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3010 3013 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3011 3014 LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3012 3015 return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3013 3016 }
3014 3017 }
3015 3018
3016 3019 LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3017 3020 return (NULL);
3018 3021 }
3019 3022
3020 3023 /*
3021 3024 * ldi_ev_lock() needs to be recursive, since layered drivers may call
3022 3025 * other LDI interfaces (such as ldi_close() from within the context of
3023 3026 * a notify callback. Since the notify callback is called with the
3024 3027 * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3025 3028 * to be recursive.
3026 3029 */
3027 3030 static void
3028 3031 ldi_ev_lock(void)
3029 3032 {
3030 3033 LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3031 3034
3032 3035 mutex_enter(&ldi_ev_callback_list.le_lock);
3033 3036 if (ldi_ev_callback_list.le_thread == curthread) {
3034 3037 ASSERT(ldi_ev_callback_list.le_busy >= 1);
3035 3038 ldi_ev_callback_list.le_busy++;
3036 3039 } else {
3037 3040 while (ldi_ev_callback_list.le_busy)
3038 3041 cv_wait(&ldi_ev_callback_list.le_cv,
3039 3042 &ldi_ev_callback_list.le_lock);
3040 3043 ASSERT(ldi_ev_callback_list.le_thread == NULL);
3041 3044 ldi_ev_callback_list.le_busy = 1;
3042 3045 ldi_ev_callback_list.le_thread = curthread;
3043 3046 }
3044 3047 mutex_exit(&ldi_ev_callback_list.le_lock);
3045 3048
3046 3049 LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3047 3050 }
3048 3051
3049 3052 static void
3050 3053 ldi_ev_unlock(void)
3051 3054 {
3052 3055 LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3053 3056 mutex_enter(&ldi_ev_callback_list.le_lock);
3054 3057 ASSERT(ldi_ev_callback_list.le_thread == curthread);
3055 3058 ASSERT(ldi_ev_callback_list.le_busy >= 1);
3056 3059
3057 3060 ldi_ev_callback_list.le_busy--;
3058 3061 if (ldi_ev_callback_list.le_busy == 0) {
3059 3062 ldi_ev_callback_list.le_thread = NULL;
3060 3063 cv_signal(&ldi_ev_callback_list.le_cv);
3061 3064 }
3062 3065 mutex_exit(&ldi_ev_callback_list.le_lock);
3063 3066 LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3064 3067 }
3065 3068
3066 3069 int
3067 3070 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3068 3071 {
3069 3072 struct ldi_handle *handlep = (struct ldi_handle *)lh;
3070 3073 dev_info_t *dip;
3071 3074 dev_t dev;
3072 3075 int res;
3073 3076 struct snode *csp;
3074 3077 ddi_eventcookie_t ddi_cookie;
3075 3078 ldi_ev_cookie_t tcookie;
3076 3079
3077 3080 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3078 3081 evname ? evname : "<NULL>"));
3079 3082
3080 3083 if (lh == NULL || evname == NULL ||
3081 3084 strlen(evname) == 0 || cookiep == NULL) {
3082 3085 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3083 3086 return (LDI_EV_FAILURE);
3084 3087 }
3085 3088
3086 3089 *cookiep = NULL;
3087 3090
3088 3091 /*
3089 3092 * First check if it is a LDI native event
3090 3093 */
3091 3094 tcookie = ldi_get_native_cookie(evname);
3092 3095 if (tcookie) {
3093 3096 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3094 3097 *cookiep = tcookie;
3095 3098 return (LDI_EV_SUCCESS);
3096 3099 }
3097 3100
3098 3101 /*
3099 3102 * Not a LDI native event. Try NDI event services
3100 3103 */
3101 3104
3102 3105 dev = handlep->lh_vp->v_rdev;
3103 3106
3104 3107 csp = VTOCS(handlep->lh_vp);
3105 3108 mutex_enter(&csp->s_lock);
3106 3109 if ((dip = csp->s_dip) != NULL)
3107 3110 e_ddi_hold_devi(dip);
3108 3111 mutex_exit(&csp->s_lock);
3109 3112 if (dip == NULL)
3110 3113 dip = e_ddi_hold_devi_by_dev(dev, 0);
3111 3114
3112 3115 if (dip == NULL) {
3113 3116 cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3114 3117 "handle: %p", (void *)handlep);
3115 3118 return (LDI_EV_FAILURE);
3116 3119 }
3117 3120
3118 3121 LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3119 3122 (void *)dip, evname));
3120 3123
3121 3124 res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3122 3125
3123 3126 ddi_release_devi(dip);
3124 3127
3125 3128 if (res == DDI_SUCCESS) {
3126 3129 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3127 3130 *cookiep = (ldi_ev_cookie_t)ddi_cookie;
3128 3131 return (LDI_EV_SUCCESS);
3129 3132 } else {
3130 3133 LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3131 3134 return (LDI_EV_FAILURE);
3132 3135 }
3133 3136 }
3134 3137
3135 3138 /*ARGSUSED*/
3136 3139 static void
3137 3140 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3138 3141 void *arg, void *ev_data)
3139 3142 {
3140 3143 ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3141 3144
3142 3145 ASSERT(lecp != NULL);
3143 3146 ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3144 3147 ASSERT(lecp->lec_lhp);
3145 3148 ASSERT(lecp->lec_notify == NULL);
3146 3149 ASSERT(lecp->lec_finalize);
3147 3150
3148 3151 LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3149 3152 "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3150 3153 (void *)lecp->lec_arg, (void *)ev_data));
3151 3154
3152 3155 lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3153 3156 lecp->lec_arg, ev_data);
3154 3157 }
3155 3158
3156 3159 int
3157 3160 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3158 3161 ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3159 3162 {
3160 3163 struct ldi_handle *lhp = (struct ldi_handle *)lh;
3161 3164 ldi_ev_callback_impl_t *lecp;
3162 3165 dev_t dev;
3163 3166 struct snode *csp;
3164 3167 dev_info_t *dip;
3165 3168 int ddi_event;
3166 3169
3167 3170 ASSERT(!servicing_interrupt());
3168 3171
3169 3172 if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3170 3173 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3171 3174 return (LDI_EV_FAILURE);
3172 3175 }
3173 3176
3174 3177 if (callb->cb_vers != LDI_EV_CB_VERS) {
3175 3178 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3176 3179 return (LDI_EV_FAILURE);
3177 3180 }
3178 3181
3179 3182 if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3180 3183 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3181 3184 return (LDI_EV_FAILURE);
3182 3185 }
3183 3186
3184 3187 *id = 0;
3185 3188
3186 3189 dev = lhp->lh_vp->v_rdev;
3187 3190 csp = VTOCS(lhp->lh_vp);
3188 3191 mutex_enter(&csp->s_lock);
3189 3192 if ((dip = csp->s_dip) != NULL)
3190 3193 e_ddi_hold_devi(dip);
3191 3194 mutex_exit(&csp->s_lock);
3192 3195 if (dip == NULL)
3193 3196 dip = e_ddi_hold_devi_by_dev(dev, 0);
3194 3197
3195 3198 if (dip == NULL) {
3196 3199 cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3197 3200 "LDI handle: %p", (void *)lhp);
3198 3201 return (LDI_EV_FAILURE);
3199 3202 }
3200 3203
3201 3204 lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3202 3205
3203 3206 ddi_event = 0;
3204 3207 if (!ldi_native_cookie(cookie)) {
3205 3208 if (callb->cb_notify || callb->cb_finalize == NULL) {
3206 3209 /*
3207 3210 * NDI event services only accept finalize
3208 3211 */
3209 3212 cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3210 3213 "Only finalize"
3211 3214 " callback supported with this cookie",
3212 3215 "ldi_ev_register_callbacks",
3213 3216 lhp->lh_ident->li_modname);
3214 3217 kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3215 3218 ddi_release_devi(dip);
3216 3219 return (LDI_EV_FAILURE);
3217 3220 }
3218 3221
3219 3222 if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3220 3223 i_ldi_ev_callback, (void *)lecp,
3221 3224 (ddi_callback_id_t *)&lecp->lec_id)
3222 3225 != DDI_SUCCESS) {
3223 3226 kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3224 3227 ddi_release_devi(dip);
3225 3228 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3226 3229 "ddi_add_event_handler failed"));
3227 3230 return (LDI_EV_FAILURE);
3228 3231 }
3229 3232 ddi_event = 1;
3230 3233 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3231 3234 "ddi_add_event_handler success"));
3232 3235 }
3233 3236
3234 3237
3235 3238
3236 3239 ldi_ev_lock();
3237 3240
3238 3241 /*
3239 3242 * Add the notify/finalize callback to the LDI's list of callbacks.
3240 3243 */
3241 3244 lecp->lec_lhp = lhp;
3242 3245 lecp->lec_dev = lhp->lh_vp->v_rdev;
3243 3246 lecp->lec_spec = VTYP_TO_STYP(lhp->lh_vp->v_type);
3244 3247 lecp->lec_notify = callb->cb_notify;
3245 3248 lecp->lec_finalize = callb->cb_finalize;
3246 3249 lecp->lec_arg = arg;
3247 3250 lecp->lec_cookie = cookie;
3248 3251 if (!ddi_event)
3249 3252 lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3250 3253 else
3251 3254 ASSERT(lecp->lec_id);
3252 3255 lecp->lec_dip = dip;
3253 3256 list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3254 3257
3255 3258 *id = (ldi_callback_id_t)lecp->lec_id;
3256 3259
3257 3260 ldi_ev_unlock();
3258 3261
3259 3262 ddi_release_devi(dip);
3260 3263
3261 3264 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3262 3265 "notify/finalize"));
3263 3266
3264 3267 return (LDI_EV_SUCCESS);
3265 3268 }
3266 3269
3267 3270 static int
3268 3271 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3269 3272 dev_t dev, int spec_type)
3270 3273 {
3271 3274 ASSERT(lecp);
3272 3275 ASSERT(dip);
3273 3276 ASSERT(dev != DDI_DEV_T_NONE);
3274 3277 ASSERT(dev != NODEV);
3275 3278 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3276 3279 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3277 3280 ASSERT(lecp->lec_dip);
3278 3281 ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3279 3282 ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3280 3283 ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3281 3284 ASSERT(lecp->lec_dev != NODEV);
3282 3285
3283 3286 if (dip != lecp->lec_dip)
3284 3287 return (0);
3285 3288
3286 3289 if (dev != DDI_DEV_T_ANY) {
3287 3290 if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3288 3291 return (0);
3289 3292 }
3290 3293
3291 3294 LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3292 3295
3293 3296 return (1);
3294 3297 }
3295 3298
3296 3299 /*
3297 3300 * LDI framework function to post a "notify" event to all layered drivers
3298 3301 * that have registered for that event
3299 3302 *
3300 3303 * Returns:
3301 3304 * LDI_EV_SUCCESS - registered callbacks allow event
3302 3305 * LDI_EV_FAILURE - registered callbacks block event
3303 3306 * LDI_EV_NONE - No matching LDI callbacks
3304 3307 *
3305 3308 * This function is *not* to be called by layered drivers. It is for I/O
3306 3309 * framework code in Solaris, such as the I/O retire code and DR code
3307 3310 * to call while servicing a device event such as offline or degraded.
3308 3311 */
3309 3312 int
3310 3313 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3311 3314 void *ev_data)
3312 3315 {
3313 3316 ldi_ev_callback_impl_t *lecp;
3314 3317 list_t *listp;
3315 3318 int ret;
3316 3319 char *lec_event;
3317 3320
3318 3321 ASSERT(dip);
3319 3322 ASSERT(dev != DDI_DEV_T_NONE);
3320 3323 ASSERT(dev != NODEV);
3321 3324 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
↓ open down ↓ |
3165 lines elided |
↑ open up ↑ |
3322 3325 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3323 3326 ASSERT(event);
3324 3327 ASSERT(ldi_native_event(event));
3325 3328 ASSERT(ldi_ev_sync_event(event));
3326 3329
3327 3330 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3328 3331 (void *)dip, event));
3329 3332
3330 3333 ret = LDI_EV_NONE;
3331 3334 ldi_ev_lock();
3335 + VERIFY(walker_next == NULL);
3332 3336 listp = &ldi_ev_callback_list.le_head;
3333 - for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3337 + for (lecp = list_head(listp); lecp; lecp = walker_next) {
3338 + walker_next = list_next(listp, lecp);
3334 3339
3335 3340 /* Check if matching device */
3336 3341 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3337 3342 continue;
3338 3343
3339 3344 if (lecp->lec_lhp == NULL) {
3340 3345 /*
3341 3346 * Consumer has unregistered the handle and so
3342 3347 * is no longer interested in notify events.
3343 3348 */
3344 3349 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3345 3350 "handle, skipping"));
3346 3351 continue;
3347 3352 }
3348 3353
3349 3354 if (lecp->lec_notify == NULL) {
3350 3355 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3351 3356 "callback. skipping"));
3352 3357 continue; /* not interested in notify */
3353 3358 }
3354 3359
3355 3360 /*
3356 3361 * Check if matching event
3357 3362 */
3358 3363 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3359 3364 if (strcmp(event, lec_event) != 0) {
3360 3365 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3361 3366 " event {%s,%s}. skipping", event, lec_event));
3362 3367 continue;
3363 3368 }
3364 3369
3365 3370 lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3366 3371 if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3367 3372 lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3368 3373 ret = LDI_EV_FAILURE;
3369 3374 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3370 3375 " FAILURE"));
3371 3376 break;
3372 3377 }
3373 3378
3374 3379 /* We have a matching callback that allows the event to occur */
3375 3380 ret = LDI_EV_SUCCESS;
3376 3381
3377 3382 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3378 3383 }
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
3379 3384
3380 3385 if (ret != LDI_EV_FAILURE)
3381 3386 goto out;
3382 3387
3383 3388 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3384 3389
3385 3390 /*
3386 3391 * Undo notifies already sent
3387 3392 */
3388 3393 lecp = list_prev(listp, lecp);
3389 - for (; lecp; lecp = list_prev(listp, lecp)) {
3394 + VERIFY(walker_prev == NULL);
3395 + for (; lecp; lecp = walker_prev) {
3396 + walker_prev = list_prev(listp, lecp);
3390 3397
3391 3398 /*
3392 3399 * Check if matching device
3393 3400 */
3394 3401 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3395 3402 continue;
3396 3403
3397 3404
3398 3405 if (lecp->lec_finalize == NULL) {
3399 3406 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3400 3407 "skipping"));
3401 3408 continue; /* not interested in finalize */
3402 3409 }
3403 3410
3404 3411 /*
3405 3412 * it is possible that in response to a notify event a
3406 3413 * layered driver closed its LDI handle so it is ok
3407 3414 * to have a NULL LDI handle for finalize. The layered
3408 3415 * driver is expected to maintain state in its "arg"
3409 3416 * parameter to keep track of the closed device.
3410 3417 */
3411 3418
3412 3419 /* Check if matching event */
3413 3420 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3414 3421 if (strcmp(event, lec_event) != 0) {
3415 3422 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3416 3423 "event: %s,%s, skipping", event, lec_event));
3417 3424 continue;
3418 3425 }
3419 3426
3420 3427 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3421 3428
3422 3429 lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3423 3430 LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3424 3431
3425 3432 /*
3426 3433 * If LDI native event and LDI handle closed in context
3427 3434 * of notify, NULL out the finalize callback as we have
3428 3435 * already called the 1 finalize above allowed in this situation
3429 3436 */
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
3430 3437 if (lecp->lec_lhp == NULL &&
3431 3438 ldi_native_cookie(lecp->lec_cookie)) {
3432 3439 LDI_EVDBG((CE_NOTE,
3433 3440 "ldi_invoke_notify(): NULL-ing finalize after "
3434 3441 "calling 1 finalize following ldi_close"));
3435 3442 lecp->lec_finalize = NULL;
3436 3443 }
3437 3444 }
3438 3445
3439 3446 out:
3447 + walker_next = NULL;
3448 + walker_prev = NULL;
3440 3449 ldi_ev_unlock();
3441 3450
3442 3451 if (ret == LDI_EV_NONE) {
3443 3452 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3444 3453 "LDI callbacks"));
3445 3454 }
3446 3455
3447 3456 return (ret);
3448 3457 }
3449 3458
3450 3459 /*
3451 3460 * Framework function to be called from a layered driver to propagate
3452 3461 * LDI "notify" events to exported minors.
3453 3462 *
3454 3463 * This function is a public interface exported by the LDI framework
3455 3464 * for use by layered drivers to propagate device events up the software
3456 3465 * stack.
3457 3466 */
3458 3467 int
3459 3468 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3460 3469 ldi_ev_cookie_t cookie, void *ev_data)
3461 3470 {
3462 3471 char *evname = ldi_ev_get_type(cookie);
3463 3472 uint_t ct_evtype;
3464 3473 dev_t dev;
3465 3474 major_t major;
3466 3475 int retc;
3467 3476 int retl;
3468 3477
3469 3478 ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3470 3479 ASSERT(dip);
3471 3480 ASSERT(ldi_native_cookie(cookie));
3472 3481
3473 3482 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3474 3483 evname, (void *)dip));
3475 3484
3476 3485 if (!ldi_ev_sync_event(evname)) {
3477 3486 cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3478 3487 "negotiatable event", evname);
3479 3488 return (LDI_EV_SUCCESS);
3480 3489 }
3481 3490
3482 3491 major = ddi_driver_major(dip);
3483 3492 if (major == DDI_MAJOR_T_NONE) {
3484 3493 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3485 3494 (void) ddi_pathname(dip, path);
3486 3495 cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3487 3496 "for device %s", path);
3488 3497 kmem_free(path, MAXPATHLEN);
3489 3498 return (LDI_EV_FAILURE);
3490 3499 }
3491 3500 dev = makedevice(major, minor);
3492 3501
3493 3502 /*
3494 3503 * Generate negotiation contract events on contracts (if any) associated
3495 3504 * with this minor.
3496 3505 */
3497 3506 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3498 3507 ct_evtype = ldi_contract_event(evname);
3499 3508 retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3500 3509 if (retc == CT_NACK) {
3501 3510 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3502 3511 return (LDI_EV_FAILURE);
3503 3512 }
3504 3513
3505 3514 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3506 3515 retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3507 3516 if (retl == LDI_EV_FAILURE) {
3508 3517 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3509 3518 "returned FAILURE. Calling contract negend"));
3510 3519 contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3511 3520 return (LDI_EV_FAILURE);
3512 3521 }
3513 3522
3514 3523 /*
3515 3524 * The very fact that we are here indicates that there is a
3516 3525 * LDI callback (and hence a constraint) for the retire of the
3517 3526 * HW device. So we just return success even if there are no
3518 3527 * contracts or LDI callbacks against the minors layered on top
3519 3528 * of the HW minors
3520 3529 */
3521 3530 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3522 3531 return (LDI_EV_SUCCESS);
3523 3532 }
3524 3533
3525 3534 /*
3526 3535 * LDI framework function to invoke "finalize" callbacks for all layered
3527 3536 * drivers that have registered callbacks for that event.
3528 3537 *
3529 3538 * This function is *not* to be called by layered drivers. It is for I/O
3530 3539 * framework code in Solaris, such as the I/O retire code and DR code
3531 3540 * to call while servicing a device event such as offline or degraded.
3532 3541 */
3533 3542 void
3534 3543 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3535 3544 int ldi_result, void *ev_data)
3536 3545 {
3537 3546 ldi_ev_callback_impl_t *lecp;
3538 3547 list_t *listp;
3539 3548 char *lec_event;
3540 3549 int found = 0;
3541 3550
3542 3551 ASSERT(dip);
3543 3552 ASSERT(dev != DDI_DEV_T_NONE);
3544 3553 ASSERT(dev != NODEV);
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
3545 3554 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3546 3555 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3547 3556 ASSERT(event);
3548 3557 ASSERT(ldi_native_event(event));
3549 3558 ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3550 3559
3551 3560 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3552 3561 " event=%s", (void *)dip, ldi_result, event));
3553 3562
3554 3563 ldi_ev_lock();
3564 + VERIFY(walker_next == NULL);
3555 3565 listp = &ldi_ev_callback_list.le_head;
3556 - for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3566 + for (lecp = list_head(listp); lecp; lecp = walker_next) {
3567 + walker_next = list_next(listp, lecp);
3557 3568
3558 3569 if (lecp->lec_finalize == NULL) {
3559 3570 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3560 3571 "finalize. Skipping"));
3561 3572 continue; /* Not interested in finalize */
3562 3573 }
3563 3574
3564 3575 /*
3565 3576 * Check if matching device
3566 3577 */
3567 3578 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3568 3579 continue;
3569 3580
3570 3581 /*
3571 3582 * It is valid for the LDI handle to be NULL during finalize.
3572 3583 * The layered driver may have done an LDI close in the notify
3573 3584 * callback.
3574 3585 */
3575 3586
3576 3587 /*
3577 3588 * Check if matching event
3578 3589 */
3579 3590 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3580 3591 if (strcmp(event, lec_event) != 0) {
3581 3592 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3582 3593 "matching event {%s,%s}. Skipping",
3583 3594 event, lec_event));
3584 3595 continue;
3585 3596 }
3586 3597
3587 3598 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3588 3599
3589 3600 found = 1;
3590 3601
3591 3602 lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3592 3603 ldi_result, lecp->lec_arg, ev_data);
3593 3604
3594 3605 /*
3595 3606 * If LDI native event and LDI handle closed in context
3596 3607 * of notify, NULL out the finalize callback as we have
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
3597 3608 * already called the 1 finalize above allowed in this situation
3598 3609 */
3599 3610 if (lecp->lec_lhp == NULL &&
3600 3611 ldi_native_cookie(lecp->lec_cookie)) {
3601 3612 LDI_EVDBG((CE_NOTE,
3602 3613 "ldi_invoke_finalize(): NULLing finalize after "
3603 3614 "calling 1 finalize following ldi_close"));
3604 3615 lecp->lec_finalize = NULL;
3605 3616 }
3606 3617 }
3618 + walker_next = NULL;
3607 3619 ldi_ev_unlock();
3608 3620
3609 3621 if (found)
3610 3622 return;
3611 3623
3612 3624 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3613 3625 }
3614 3626
3615 3627 /*
3616 3628 * Framework function to be called from a layered driver to propagate
3617 3629 * LDI "finalize" events to exported minors.
3618 3630 *
3619 3631 * This function is a public interface exported by the LDI framework
3620 3632 * for use by layered drivers to propagate device events up the software
3621 3633 * stack.
3622 3634 */
3623 3635 void
3624 3636 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3625 3637 ldi_ev_cookie_t cookie, void *ev_data)
3626 3638 {
3627 3639 dev_t dev;
3628 3640 major_t major;
3629 3641 char *evname;
3630 3642 int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3631 3643 CT_EV_SUCCESS : CT_EV_FAILURE;
3632 3644 uint_t ct_evtype;
3633 3645
3634 3646 ASSERT(dip);
3635 3647 ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3636 3648 ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3637 3649 ASSERT(ldi_native_cookie(cookie));
3638 3650
3639 3651 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3640 3652
3641 3653 major = ddi_driver_major(dip);
3642 3654 if (major == DDI_MAJOR_T_NONE) {
3643 3655 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3644 3656 (void) ddi_pathname(dip, path);
3645 3657 cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3646 3658 "for device %s", path);
3647 3659 kmem_free(path, MAXPATHLEN);
3648 3660 return;
3649 3661 }
3650 3662 dev = makedevice(major, minor);
3651 3663
3652 3664 evname = ldi_ev_get_type(cookie);
3653 3665
3654 3666 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3655 3667 ct_evtype = ldi_contract_event(evname);
3656 3668 contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3657 3669
3658 3670 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3659 3671 ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3660 3672 }
3661 3673
3662 3674 int
3663 3675 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3664 3676 {
3665 3677 ldi_ev_callback_impl_t *lecp;
3666 3678 ldi_ev_callback_impl_t *next;
3667 3679 ldi_ev_callback_impl_t *found;
3668 3680 list_t *listp;
3669 3681
3670 3682 ASSERT(!servicing_interrupt());
3671 3683
3672 3684 if (id == 0) {
3673 3685 cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3674 3686 return (LDI_EV_FAILURE);
3675 3687 }
3676 3688
3677 3689 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
3678 3690 (void *)id));
3679 3691
3680 3692 ldi_ev_lock();
3681 3693
3682 3694 listp = &ldi_ev_callback_list.le_head;
3683 3695 next = found = NULL;
3684 3696 for (lecp = list_head(listp); lecp; lecp = next) {
3685 3697 next = list_next(listp, lecp);
3686 3698 if (lecp->lec_id == id) {
3687 3699 ASSERT(found == NULL);
3700 +
3701 + /* If there is a walk in progress, move it along... */
3702 + if (walker_next == lecp)
3703 + walker_next = next;
3704 + if (walker_prev == lecp)
3705 + walker_prev = list_prev(listp, walker_prev);
3706 +
3688 3707 list_remove(listp, lecp);
3689 3708 found = lecp;
3690 3709 }
3691 3710 }
3692 3711 ldi_ev_unlock();
3693 3712
3694 3713 if (found == NULL) {
3695 3714 cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3696 3715 (void *)id);
3697 3716 return (LDI_EV_SUCCESS);
3698 3717 }
3699 3718
3700 3719 if (!ldi_native_cookie(found->lec_cookie)) {
3701 3720 ASSERT(found->lec_notify == NULL);
3702 3721 if (ddi_remove_event_handler((ddi_callback_id_t)id)
3703 3722 != DDI_SUCCESS) {
3704 3723 cmn_err(CE_WARN, "failed to remove NDI event handler "
3705 3724 "for id (%p)", (void *)id);
3706 3725 ldi_ev_lock();
3707 3726 list_insert_tail(listp, found);
3708 3727 ldi_ev_unlock();
3709 3728 return (LDI_EV_FAILURE);
3710 3729 }
3711 3730 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3712 3731 "service removal succeeded"));
3713 3732 } else {
3714 3733 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3715 3734 "LDI native callbacks"));
3716 3735 }
3717 3736 kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3718 3737
3719 3738 return (LDI_EV_SUCCESS);
3720 3739 }
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX