Print this page
OS-1988 Make ldi_ev_remove_callbacks safe to use in LDI callbacks
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/driver_lyr.c
+++ new/usr/src/uts/common/os/driver_lyr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 +/*
25 + * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 + */
24 27
25 28 /*
26 29 * Layered driver support.
27 30 */
28 31
29 32 #include <sys/atomic.h>
30 33 #include <sys/types.h>
31 34 #include <sys/t_lock.h>
32 35 #include <sys/param.h>
33 36 #include <sys/conf.h>
34 37 #include <sys/systm.h>
35 38 #include <sys/sysmacros.h>
36 39 #include <sys/buf.h>
37 40 #include <sys/cred.h>
38 41 #include <sys/uio.h>
39 42 #include <sys/vnode.h>
40 43 #include <sys/fs/snode.h>
41 44 #include <sys/open.h>
42 45 #include <sys/kmem.h>
43 46 #include <sys/file.h>
44 47 #include <sys/bootconf.h>
45 48 #include <sys/pathname.h>
46 49 #include <sys/bitmap.h>
47 50 #include <sys/stat.h>
48 51 #include <sys/dditypes.h>
49 52 #include <sys/ddi_impldefs.h>
50 53 #include <sys/ddi.h>
51 54 #include <sys/sunddi.h>
52 55 #include <sys/sunndi.h>
53 56 #include <sys/esunddi.h>
54 57 #include <sys/autoconf.h>
55 58 #include <sys/sunldi.h>
56 59 #include <sys/sunldi_impl.h>
57 60 #include <sys/errno.h>
58 61 #include <sys/debug.h>
59 62 #include <sys/modctl.h>
60 63 #include <sys/var.h>
61 64 #include <vm/seg_vn.h>
62 65
63 66 #include <sys/stropts.h>
64 67 #include <sys/strsubr.h>
65 68 #include <sys/socket.h>
66 69 #include <sys/socketvar.h>
67 70 #include <sys/kstr.h>
68 71
69 72 /*
70 73 * Device contract related
71 74 */
72 75 #include <sys/contract_impl.h>
73 76 #include <sys/contract/device_impl.h>
74 77
75 78 /*
76 79 * Define macros to manipulate snode, vnode, and open device flags
77 80 */
78 81 #define VTYP_VALID(i) (((i) == VCHR) || ((i) == VBLK))
79 82 #define VTYP_TO_OTYP(i) (((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
80 83 #define VTYP_TO_STYP(i) (((i) == VCHR) ? S_IFCHR : S_IFBLK)
81 84
82 85 #define OTYP_VALID(i) (((i) == OTYP_CHR) || ((i) == OTYP_BLK))
83 86 #define OTYP_TO_VTYP(i) (((i) == OTYP_CHR) ? VCHR : VBLK)
84 87 #define OTYP_TO_STYP(i) (((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
85 88
86 89 #define STYP_VALID(i) (((i) == S_IFCHR) || ((i) == S_IFBLK))
87 90 #define STYP_TO_VTYP(i) (((i) == S_IFCHR) ? VCHR : VBLK)
88 91
89 92 /*
90 93 * Define macros for accessing layered driver hash structures
91 94 */
92 95 #define LH_HASH(vp) (handle_hash_func(vp) % LH_HASH_SZ)
93 96 #define LI_HASH(mid, dip, dev) (ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
94 97
95 98 /*
96 99 * Define layered handle flags used in the lh_type field
97 100 */
98 101 #define LH_STREAM (0x1) /* handle to a streams device */
99 102 #define LH_CBDEV (0x2) /* handle to a char/block device */
100 103
101 104 /*
102 105 * Define macro for devid property lookups
103 106 */
104 107 #define DEVID_PROP_FLAGS (DDI_PROP_DONTPASS | \
105 108 DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
106 109
107 110 /*
108 111 * Dummy string for NDI events
109 112 */
110 113 #define NDI_EVENT_SERVICE "NDI_EVENT_SERVICE"
111 114
112 115 static void ldi_ev_lock(void);
113 116 static void ldi_ev_unlock(void);
114 117
115 118 #ifdef LDI_OBSOLETE_EVENT
116 119 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
117 120 #endif
118 121
119 122
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
120 123 /*
121 124 * globals
122 125 */
123 126 static kmutex_t ldi_ident_hash_lock[LI_HASH_SZ];
124 127 static struct ldi_ident *ldi_ident_hash[LI_HASH_SZ];
125 128
126 129 static kmutex_t ldi_handle_hash_lock[LH_HASH_SZ];
127 130 static struct ldi_handle *ldi_handle_hash[LH_HASH_SZ];
128 131 static size_t ldi_handle_hash_count;
129 132
133 +/*
134 + * Use of "ldi_ev_callback_list" must be protected by ldi_ev_lock()
135 + * and ldi_ev_unlock().
136 + */
130 137 static struct ldi_ev_callback_list ldi_ev_callback_list;
131 138
132 139 static uint32_t ldi_ev_id_pool = 0;
133 140
134 141 struct ldi_ev_cookie {
135 142 char *ck_evname;
136 143 uint_t ck_sync;
137 144 uint_t ck_ctype;
138 145 };
139 146
140 147 static struct ldi_ev_cookie ldi_ev_cookies[] = {
141 148 { LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
142 149 { LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
143 150 { LDI_EV_DEVICE_REMOVE, 0, 0},
144 151 { NULL} /* must terminate list */
145 152 };
146 153
147 154 void
148 155 ldi_init(void)
149 156 {
150 157 int i;
151 158
152 159 ldi_handle_hash_count = 0;
153 160 for (i = 0; i < LH_HASH_SZ; i++) {
154 161 mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
155 162 ldi_handle_hash[i] = NULL;
156 163 }
157 164 for (i = 0; i < LI_HASH_SZ; i++) {
158 165 mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
159 166 ldi_ident_hash[i] = NULL;
160 167 }
161 168
162 169 /*
163 170 * Initialize the LDI event subsystem
164 171 */
165 172 mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
166 173 cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
167 174 ldi_ev_callback_list.le_busy = 0;
168 175 ldi_ev_callback_list.le_thread = NULL;
176 + ldi_ev_callback_list.le_walker_next = NULL;
177 + ldi_ev_callback_list.le_walker_prev = NULL;
169 178 list_create(&ldi_ev_callback_list.le_head,
170 179 sizeof (ldi_ev_callback_impl_t),
171 180 offsetof(ldi_ev_callback_impl_t, lec_list));
172 181 }
173 182
174 183 /*
175 184 * LDI ident manipulation functions
176 185 */
177 186 static uint_t
178 187 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
179 188 {
180 189 if (dip != NULL) {
181 190 uintptr_t k = (uintptr_t)dip;
182 191 k >>= (int)highbit(sizeof (struct dev_info));
183 192 return ((uint_t)k);
184 193 } else if (dev != DDI_DEV_T_NONE) {
185 194 return (modid + getminor(dev) + getmajor(dev));
186 195 } else {
187 196 return (modid);
188 197 }
189 198 }
190 199
191 200 static struct ldi_ident **
192 201 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
193 202 {
194 203 struct ldi_ident **lipp = NULL;
195 204 uint_t index = LI_HASH(modid, dip, dev);
196 205
197 206 ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
198 207
199 208 for (lipp = &(ldi_ident_hash[index]);
200 209 (*lipp != NULL);
201 210 lipp = &((*lipp)->li_next)) {
202 211 if (((*lipp)->li_modid == modid) &&
203 212 ((*lipp)->li_major == major) &&
204 213 ((*lipp)->li_dip == dip) &&
205 214 ((*lipp)->li_dev == dev))
206 215 break;
207 216 }
208 217
209 218 ASSERT(lipp != NULL);
210 219 return (lipp);
211 220 }
212 221
213 222 static struct ldi_ident *
214 223 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
215 224 {
216 225 struct ldi_ident *lip, **lipp, *retlip;
217 226 modid_t modid;
218 227 uint_t index;
219 228
220 229 ASSERT(mod_name != NULL);
221 230
222 231 /* get the module id */
223 232 modid = mod_name_to_modid(mod_name);
224 233 ASSERT(modid != -1);
225 234
226 235 /* allocate a new ident in case we need it */
227 236 lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
228 237
229 238 /* search the hash for a matching ident */
230 239 index = LI_HASH(modid, dip, dev);
231 240 mutex_enter(&ldi_ident_hash_lock[index]);
232 241 lipp = ident_find_ref_nolock(modid, dip, dev, major);
233 242
234 243 if (*lipp != NULL) {
235 244 /* we found an ident in the hash */
236 245 ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
237 246 (*lipp)->li_ref++;
238 247 retlip = *lipp;
239 248 mutex_exit(&ldi_ident_hash_lock[index]);
240 249 kmem_free(lip, sizeof (struct ldi_ident));
241 250 return (retlip);
242 251 }
243 252
244 253 /* initialize the new ident */
245 254 lip->li_next = NULL;
246 255 lip->li_ref = 1;
247 256 lip->li_modid = modid;
248 257 lip->li_major = major;
249 258 lip->li_dip = dip;
250 259 lip->li_dev = dev;
251 260 (void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
252 261
253 262 /* add it to the ident hash */
254 263 lip->li_next = ldi_ident_hash[index];
255 264 ldi_ident_hash[index] = lip;
256 265
257 266 mutex_exit(&ldi_ident_hash_lock[index]);
258 267 return (lip);
259 268 }
260 269
261 270 static void
262 271 ident_hold(struct ldi_ident *lip)
263 272 {
264 273 uint_t index;
265 274
266 275 ASSERT(lip != NULL);
267 276 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
268 277 mutex_enter(&ldi_ident_hash_lock[index]);
269 278 ASSERT(lip->li_ref > 0);
270 279 lip->li_ref++;
271 280 mutex_exit(&ldi_ident_hash_lock[index]);
272 281 }
273 282
274 283 static void
275 284 ident_release(struct ldi_ident *lip)
276 285 {
277 286 struct ldi_ident **lipp;
278 287 uint_t index;
279 288
280 289 ASSERT(lip != NULL);
281 290 index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
282 291 mutex_enter(&ldi_ident_hash_lock[index]);
283 292
284 293 ASSERT(lip->li_ref > 0);
285 294 if (--lip->li_ref > 0) {
286 295 /* there are more references to this ident */
287 296 mutex_exit(&ldi_ident_hash_lock[index]);
288 297 return;
289 298 }
290 299
291 300 /* this was the last reference/open for this ident. free it. */
292 301 lipp = ident_find_ref_nolock(
293 302 lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
294 303
295 304 ASSERT((lipp != NULL) && (*lipp != NULL));
296 305 *lipp = lip->li_next;
297 306 mutex_exit(&ldi_ident_hash_lock[index]);
298 307 kmem_free(lip, sizeof (struct ldi_ident));
299 308 }
300 309
301 310 /*
302 311 * LDI handle manipulation functions
303 312 */
304 313 static uint_t
305 314 handle_hash_func(void *vp)
306 315 {
307 316 uintptr_t k = (uintptr_t)vp;
308 317 k >>= (int)highbit(sizeof (vnode_t));
309 318 return ((uint_t)k);
310 319 }
311 320
312 321 static struct ldi_handle **
313 322 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
314 323 {
315 324 struct ldi_handle **lhpp = NULL;
316 325 uint_t index = LH_HASH(vp);
317 326
318 327 ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
319 328
320 329 for (lhpp = &(ldi_handle_hash[index]);
321 330 (*lhpp != NULL);
322 331 lhpp = &((*lhpp)->lh_next)) {
323 332 if (((*lhpp)->lh_ident == ident) &&
324 333 ((*lhpp)->lh_vp == vp))
325 334 break;
326 335 }
327 336
328 337 ASSERT(lhpp != NULL);
329 338 return (lhpp);
330 339 }
331 340
332 341 static struct ldi_handle *
333 342 handle_find(vnode_t *vp, struct ldi_ident *ident)
334 343 {
335 344 struct ldi_handle **lhpp, *retlhp;
336 345 int index = LH_HASH(vp);
337 346
338 347 mutex_enter(&ldi_handle_hash_lock[index]);
339 348 lhpp = handle_find_ref_nolock(vp, ident);
340 349 retlhp = *lhpp;
341 350 mutex_exit(&ldi_handle_hash_lock[index]);
342 351 return (retlhp);
343 352 }
344 353
345 354 static struct ldi_handle *
346 355 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
347 356 {
348 357 struct ldi_handle *lhp, **lhpp, *retlhp;
349 358 uint_t index;
350 359
351 360 ASSERT((vp != NULL) && (ident != NULL));
352 361
353 362 /* allocate a new handle in case we need it */
354 363 lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
355 364
356 365 /* search the hash for a matching handle */
357 366 index = LH_HASH(vp);
358 367 mutex_enter(&ldi_handle_hash_lock[index]);
359 368 lhpp = handle_find_ref_nolock(vp, ident);
360 369
361 370 if (*lhpp != NULL) {
362 371 /* we found a handle in the hash */
363 372 (*lhpp)->lh_ref++;
364 373 retlhp = *lhpp;
365 374 mutex_exit(&ldi_handle_hash_lock[index]);
366 375
367 376 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
368 377 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
369 378 (void *)retlhp, (void *)ident, (void *)vp,
370 379 mod_major_to_name(getmajor(vp->v_rdev)),
371 380 getminor(vp->v_rdev)));
372 381
373 382 kmem_free(lhp, sizeof (struct ldi_handle));
374 383 return (retlhp);
375 384 }
376 385
377 386 /* initialize the new handle */
378 387 lhp->lh_ref = 1;
379 388 lhp->lh_vp = vp;
380 389 lhp->lh_ident = ident;
381 390 #ifdef LDI_OBSOLETE_EVENT
382 391 mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
383 392 #endif
384 393
385 394 /* set the device type for this handle */
386 395 lhp->lh_type = 0;
387 396 if (vp->v_stream) {
388 397 ASSERT(vp->v_type == VCHR);
389 398 lhp->lh_type |= LH_STREAM;
390 399 } else {
391 400 lhp->lh_type |= LH_CBDEV;
392 401 }
393 402
394 403 /* get holds on other objects */
395 404 ident_hold(ident);
396 405 ASSERT(vp->v_count >= 1);
397 406 VN_HOLD(vp);
398 407
399 408 /* add it to the handle hash */
400 409 lhp->lh_next = ldi_handle_hash[index];
401 410 ldi_handle_hash[index] = lhp;
402 411 atomic_add_long(&ldi_handle_hash_count, 1);
403 412
404 413 LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
405 414 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
406 415 (void *)lhp, (void *)ident, (void *)vp,
407 416 mod_major_to_name(getmajor(vp->v_rdev)),
408 417 getminor(vp->v_rdev)));
409 418
410 419 mutex_exit(&ldi_handle_hash_lock[index]);
411 420 return (lhp);
412 421 }
413 422
414 423 static void
415 424 handle_release(struct ldi_handle *lhp)
416 425 {
417 426 struct ldi_handle **lhpp;
418 427 uint_t index;
419 428
420 429 ASSERT(lhp != NULL);
421 430
422 431 index = LH_HASH(lhp->lh_vp);
423 432 mutex_enter(&ldi_handle_hash_lock[index]);
424 433
425 434 LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
426 435 "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
427 436 (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
428 437 mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
429 438 getminor(lhp->lh_vp->v_rdev)));
430 439
431 440 ASSERT(lhp->lh_ref > 0);
432 441 if (--lhp->lh_ref > 0) {
433 442 /* there are more references to this handle */
434 443 mutex_exit(&ldi_handle_hash_lock[index]);
435 444 return;
436 445 }
437 446
438 447 /* this was the last reference/open for this handle. free it. */
439 448 lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
440 449 ASSERT((lhpp != NULL) && (*lhpp != NULL));
441 450 *lhpp = lhp->lh_next;
442 451 atomic_add_long(&ldi_handle_hash_count, -1);
443 452 mutex_exit(&ldi_handle_hash_lock[index]);
444 453
445 454 VN_RELE(lhp->lh_vp);
446 455 ident_release(lhp->lh_ident);
447 456 #ifdef LDI_OBSOLETE_EVENT
448 457 mutex_destroy(lhp->lh_lock);
449 458 #endif
450 459 kmem_free(lhp, sizeof (struct ldi_handle));
451 460 }
452 461
453 462 #ifdef LDI_OBSOLETE_EVENT
454 463 /*
455 464 * LDI event manipulation functions
456 465 */
457 466 static void
458 467 handle_event_add(ldi_event_t *lep)
459 468 {
460 469 struct ldi_handle *lhp = lep->le_lhp;
461 470
462 471 ASSERT(lhp != NULL);
463 472
464 473 mutex_enter(lhp->lh_lock);
465 474 if (lhp->lh_events == NULL) {
466 475 lhp->lh_events = lep;
467 476 mutex_exit(lhp->lh_lock);
468 477 return;
469 478 }
470 479
471 480 lep->le_next = lhp->lh_events;
472 481 lhp->lh_events->le_prev = lep;
473 482 lhp->lh_events = lep;
474 483 mutex_exit(lhp->lh_lock);
475 484 }
476 485
477 486 static void
478 487 handle_event_remove(ldi_event_t *lep)
479 488 {
480 489 struct ldi_handle *lhp = lep->le_lhp;
481 490
482 491 ASSERT(lhp != NULL);
483 492
484 493 mutex_enter(lhp->lh_lock);
485 494 if (lep->le_prev)
486 495 lep->le_prev->le_next = lep->le_next;
487 496 if (lep->le_next)
488 497 lep->le_next->le_prev = lep->le_prev;
489 498 if (lhp->lh_events == lep)
490 499 lhp->lh_events = lep->le_next;
491 500 mutex_exit(lhp->lh_lock);
492 501
493 502 }
494 503
495 504 static void
496 505 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
497 506 void *arg, void *bus_impldata)
498 507 {
499 508 ldi_event_t *lep = (ldi_event_t *)arg;
500 509
501 510 ASSERT(lep != NULL);
502 511
503 512 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
504 513 "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
505 514 (void *)dip, (void *)event_cookie, (void *)lep));
506 515
507 516 lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
508 517 }
509 518 #endif
510 519
511 520 /*
512 521 * LDI open helper functions
513 522 */
514 523
515 524 /* get a vnode to a device by dev_t and otyp */
516 525 static int
517 526 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
518 527 {
519 528 dev_info_t *dip;
520 529 vnode_t *vp;
521 530
522 531 /* sanity check required input parameters */
523 532 if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
524 533 return (EINVAL);
525 534
526 535 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
527 536 return (ENODEV);
528 537
529 538 vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
530 539 spec_assoc_vp_with_devi(vp, dip);
531 540 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
532 541
533 542 *vpp = vp;
534 543 return (0);
535 544 }
536 545
537 546 /* get a vnode to a device by pathname */
538 547 int
539 548 ldi_vp_from_name(char *path, vnode_t **vpp)
540 549 {
541 550 vnode_t *vp = NULL;
542 551 int ret;
543 552
544 553 /* sanity check required input parameters */
545 554 if ((path == NULL) || (vpp == NULL))
546 555 return (EINVAL);
547 556
548 557 if (modrootloaded) {
549 558 cred_t *saved_cred = curthread->t_cred;
550 559
551 560 /* we don't want lookupname to fail because of credentials */
552 561 curthread->t_cred = kcred;
553 562
554 563 /*
555 564 * all lookups should be done in the global zone. but
556 565 * lookupnameat() won't actually do this if an absolute
557 566 * path is passed in. since the ldi interfaces require an
558 567 * absolute path we pass lookupnameat() a pointer to
559 568 * the character after the leading '/' and tell it to
560 569 * start searching at the current system root directory.
561 570 */
562 571 ASSERT(*path == '/');
563 572 ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
564 573 &vp, rootdir);
565 574
566 575 /* restore this threads credentials */
567 576 curthread->t_cred = saved_cred;
568 577
569 578 if (ret == 0) {
570 579 if (!vn_matchops(vp, spec_getvnodeops()) ||
571 580 !VTYP_VALID(vp->v_type)) {
572 581 VN_RELE(vp);
573 582 return (ENXIO);
574 583 }
575 584 }
576 585 }
577 586
578 587 if (vp == NULL) {
579 588 dev_info_t *dip;
580 589 dev_t dev;
581 590 int spec_type;
582 591
583 592 /*
584 593 * Root is not mounted, the minor node is not specified,
585 594 * or an OBP path has been specified.
586 595 */
587 596
588 597 /*
589 598 * Determine if path can be pruned to produce an
590 599 * OBP or devfs path for resolve_pathname.
591 600 */
592 601 if (strncmp(path, "/devices/", 9) == 0)
593 602 path += strlen("/devices");
594 603
595 604 /*
596 605 * if no minor node was specified the DEFAULT minor node
597 606 * will be returned. if there is no DEFAULT minor node
598 607 * one will be fabricated of type S_IFCHR with the minor
599 608 * number equal to the instance number.
600 609 */
601 610 ret = resolve_pathname(path, &dip, &dev, &spec_type);
602 611 if (ret != 0)
603 612 return (ENODEV);
604 613
605 614 ASSERT(STYP_VALID(spec_type));
606 615 vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
607 616 spec_assoc_vp_with_devi(vp, dip);
608 617 ddi_release_devi(dip);
609 618 }
610 619
611 620 *vpp = vp;
612 621 return (0);
613 622 }
614 623
615 624 static int
616 625 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
617 626 {
618 627 char *devidstr;
619 628 ddi_prop_t *propp;
620 629
621 630 /* convert devid as a string property */
622 631 if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
623 632 return (0);
624 633
625 634 /*
626 635 * Search for the devid. For speed and ease in locking this
627 636 * code directly uses the property implementation. See
628 637 * ddi_common_devid_to_devlist() for a comment as to why.
629 638 */
630 639 mutex_enter(&(DEVI(dip)->devi_lock));
631 640
632 641 /* check if there is a DDI_DEV_T_NONE devid property */
633 642 propp = i_ddi_prop_search(DDI_DEV_T_NONE,
634 643 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
635 644 if (propp != NULL) {
636 645 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
637 646 /* a DDI_DEV_T_NONE devid exists and matchs */
638 647 mutex_exit(&(DEVI(dip)->devi_lock));
639 648 ddi_devid_str_free(devidstr);
640 649 return (1);
641 650 } else {
642 651 /* a DDI_DEV_T_NONE devid exists and doesn't match */
643 652 mutex_exit(&(DEVI(dip)->devi_lock));
644 653 ddi_devid_str_free(devidstr);
645 654 return (0);
646 655 }
647 656 }
648 657
649 658 /* check if there is a devt specific devid property */
650 659 propp = i_ddi_prop_search(dev,
651 660 DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
652 661 if (propp != NULL) {
653 662 if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
654 663 /* a devt specific devid exists and matchs */
655 664 mutex_exit(&(DEVI(dip)->devi_lock));
656 665 ddi_devid_str_free(devidstr);
657 666 return (1);
658 667 } else {
659 668 /* a devt specific devid exists and doesn't match */
660 669 mutex_exit(&(DEVI(dip)->devi_lock));
661 670 ddi_devid_str_free(devidstr);
662 671 return (0);
663 672 }
664 673 }
665 674
666 675 /* we didn't find any devids associated with the device */
667 676 mutex_exit(&(DEVI(dip)->devi_lock));
668 677 ddi_devid_str_free(devidstr);
669 678 return (0);
670 679 }
671 680
672 681 /* get a handle to a device by devid and minor name */
673 682 int
674 683 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
675 684 {
676 685 dev_info_t *dip;
677 686 vnode_t *vp;
678 687 int ret, i, ndevs, styp;
679 688 dev_t dev, *devs;
680 689
681 690 /* sanity check required input parameters */
682 691 if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
683 692 return (EINVAL);
684 693
685 694 ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
686 695 if ((ret != DDI_SUCCESS) || (ndevs <= 0))
687 696 return (ENODEV);
688 697
689 698 for (i = 0; i < ndevs; i++) {
690 699 dev = devs[i];
691 700
692 701 if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
693 702 continue;
694 703
695 704 /*
696 705 * now we have to verify that the devid of the disk
697 706 * still matches what was requested.
698 707 *
699 708 * we have to do this because the devid could have
700 709 * changed between the call to ddi_lyr_devid_to_devlist()
701 710 * and e_ddi_hold_devi_by_dev(). this is because when
702 711 * ddi_lyr_devid_to_devlist() returns a list of devts
703 712 * there is no kind of hold on those devts so a device
704 713 * could have been replaced out from under us in the
705 714 * interim.
706 715 */
707 716 if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
708 717 NULL, &styp) == DDI_SUCCESS) &&
709 718 ldi_devid_match(devid, dip, dev))
710 719 break;
711 720
712 721 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev() */
713 722 }
714 723
715 724 ddi_lyr_free_devlist(devs, ndevs);
716 725
717 726 if (i == ndevs)
718 727 return (ENODEV);
719 728
720 729 ASSERT(STYP_VALID(styp));
721 730 vp = makespecvp(dev, STYP_TO_VTYP(styp));
722 731 spec_assoc_vp_with_devi(vp, dip);
723 732 ddi_release_devi(dip); /* from e_ddi_hold_devi_by_dev */
724 733
725 734 *vpp = vp;
726 735 return (0);
727 736 }
728 737
729 738 /* given a vnode, open a device */
730 739 static int
731 740 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
732 741 ldi_handle_t *lhp, struct ldi_ident *li)
733 742 {
734 743 struct ldi_handle *nlhp;
735 744 vnode_t *vp;
736 745 int err;
737 746
738 747 ASSERT((vpp != NULL) && (*vpp != NULL));
739 748 ASSERT((lhp != NULL) && (li != NULL));
740 749
741 750 vp = *vpp;
742 751 /* if the vnode passed in is not a device, then bail */
743 752 if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
744 753 return (ENXIO);
745 754
746 755 /*
747 756 * the caller may have specified a node that
748 757 * doesn't have cb_ops defined. the ldi doesn't yet
749 758 * support opening devices without a valid cb_ops.
750 759 */
751 760 if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
752 761 return (ENXIO);
753 762
754 763 /* open the device */
755 764 if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
756 765 return (err);
757 766
758 767 /* possible clone open, make sure that we still have a spec node */
759 768 ASSERT(vn_matchops(vp, spec_getvnodeops()));
760 769
761 770 nlhp = handle_alloc(vp, li);
762 771
763 772 if (vp != *vpp) {
764 773 /*
765 774 * allocating the layered handle took a new hold on the vnode
766 775 * so we can release the hold that was returned by the clone
767 776 * open
768 777 */
769 778 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
770 779 "ldi clone open", (void *)nlhp));
771 780 } else {
772 781 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
773 782 "ldi open", (void *)nlhp));
774 783 }
775 784
776 785 *vpp = vp;
777 786 *lhp = (ldi_handle_t)nlhp;
778 787 return (0);
779 788 }
780 789
781 790 /* Call a drivers prop_op(9E) interface */
782 791 static int
783 792 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
784 793 int flags, char *name, caddr_t valuep, int *lengthp)
785 794 {
786 795 struct dev_ops *ops = NULL;
787 796 int res;
788 797
789 798 ASSERT((dip != NULL) && (name != NULL));
790 799 ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
791 800 ASSERT(lengthp != NULL);
792 801
793 802 /*
794 803 * we can only be invoked after a driver has been opened and
795 804 * someone has a layered handle to it, so there had better be
796 805 * a valid ops vector.
797 806 */
798 807 ops = DEVI(dip)->devi_ops;
799 808 ASSERT(ops && ops->devo_cb_ops);
800 809
801 810 /*
802 811 * Some nexus drivers incorrectly set cb_prop_op to nodev,
803 812 * nulldev or even NULL.
804 813 */
805 814 if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
806 815 (ops->devo_cb_ops->cb_prop_op == nulldev) ||
807 816 (ops->devo_cb_ops->cb_prop_op == NULL)) {
808 817 return (DDI_PROP_NOT_FOUND);
809 818 }
810 819
811 820 /* check if this is actually DDI_DEV_T_ANY query */
812 821 if (flags & LDI_DEV_T_ANY) {
813 822 flags &= ~LDI_DEV_T_ANY;
814 823 dev = DDI_DEV_T_ANY;
815 824 }
816 825
817 826 res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
818 827 return (res);
819 828 }
820 829
821 830 static void
822 831 i_ldi_prop_op_free(struct prop_driver_data *pdd)
823 832 {
824 833 kmem_free(pdd, pdd->pdd_size);
825 834 }
826 835
827 836 static caddr_t
828 837 i_ldi_prop_op_alloc(int prop_len)
829 838 {
830 839 struct prop_driver_data *pdd;
831 840 int pdd_size;
832 841
833 842 pdd_size = sizeof (struct prop_driver_data) + prop_len;
834 843 pdd = kmem_alloc(pdd_size, KM_SLEEP);
835 844 pdd->pdd_size = pdd_size;
836 845 pdd->pdd_prop_free = i_ldi_prop_op_free;
837 846 return ((caddr_t)&pdd[1]);
838 847 }
839 848
840 849 /*
841 850 * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
842 851 * by the typed ldi property lookup interfaces.
843 852 */
844 853 static int
845 854 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
846 855 caddr_t *datap, int *lengthp, int elem_size)
847 856 {
848 857 caddr_t prop_val;
849 858 int prop_len, res;
850 859
851 860 ASSERT((dip != NULL) && (name != NULL));
852 861 ASSERT((datap != NULL) && (lengthp != NULL));
853 862
854 863 /*
855 864 * first call the drivers prop_op() interface to allow it
856 865 * it to override default property values.
857 866 */
858 867 res = i_ldi_prop_op(dev, dip, PROP_LEN,
859 868 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
860 869 if (res != DDI_PROP_SUCCESS)
861 870 return (DDI_PROP_NOT_FOUND);
862 871
863 872 /* sanity check the property length */
864 873 if (prop_len == 0) {
865 874 /*
866 875 * the ddi typed interfaces don't allow a drivers to
867 876 * create properties with a length of 0. so we should
868 877 * prevent drivers from returning 0 length dynamic
869 878 * properties for typed property lookups.
870 879 */
871 880 return (DDI_PROP_NOT_FOUND);
872 881 }
873 882
874 883 /* sanity check the property length against the element size */
875 884 if (elem_size && ((prop_len % elem_size) != 0))
876 885 return (DDI_PROP_NOT_FOUND);
877 886
878 887 /*
879 888 * got it. now allocate a prop_driver_data struct so that the
880 889 * user can free the property via ddi_prop_free().
881 890 */
882 891 prop_val = i_ldi_prop_op_alloc(prop_len);
883 892
884 893 /* lookup the property again, this time get the value */
885 894 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
886 895 flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
887 896 if (res != DDI_PROP_SUCCESS) {
888 897 ddi_prop_free(prop_val);
889 898 return (DDI_PROP_NOT_FOUND);
890 899 }
891 900
892 901 /* sanity check the property length */
893 902 if (prop_len == 0) {
894 903 ddi_prop_free(prop_val);
895 904 return (DDI_PROP_NOT_FOUND);
896 905 }
897 906
898 907 /* sanity check the property length against the element size */
899 908 if (elem_size && ((prop_len % elem_size) != 0)) {
900 909 ddi_prop_free(prop_val);
901 910 return (DDI_PROP_NOT_FOUND);
902 911 }
903 912
904 913 /*
905 914 * return the prop_driver_data struct and, optionally, the length
906 915 * of the data.
907 916 */
908 917 *datap = prop_val;
909 918 *lengthp = prop_len;
910 919
911 920 return (DDI_PROP_SUCCESS);
912 921 }
913 922
914 923 /*
915 924 * i_check_string looks at a string property and makes sure its
916 925 * a valid null terminated string
917 926 */
918 927 static int
919 928 i_check_string(char *str, int prop_len)
920 929 {
921 930 int i;
922 931
923 932 ASSERT(str != NULL);
924 933
925 934 for (i = 0; i < prop_len; i++) {
926 935 if (str[i] == '\0')
927 936 return (0);
928 937 }
929 938 return (1);
930 939 }
931 940
932 941 /*
933 942 * i_pack_string_array takes a a string array property that is represented
934 943 * as a concatenation of strings (with the NULL character included for
935 944 * each string) and converts it into a format that can be returned by
936 945 * ldi_prop_lookup_string_array.
937 946 */
938 947 static int
939 948 i_pack_string_array(char *str_concat, int prop_len,
940 949 char ***str_arrayp, int *nelemp)
941 950 {
942 951 int i, nelem, pack_size;
943 952 char **str_array, *strptr;
944 953
945 954 /*
946 955 * first we need to sanity check the input string array.
947 956 * in essence this can be done my making sure that the last
948 957 * character of the array passed in is null. (meaning the last
949 958 * string in the array is NULL terminated.
950 959 */
951 960 if (str_concat[prop_len - 1] != '\0')
952 961 return (1);
953 962
954 963 /* now let's count the number of strings in the array */
955 964 for (nelem = i = 0; i < prop_len; i++)
956 965 if (str_concat[i] == '\0')
957 966 nelem++;
958 967 ASSERT(nelem >= 1);
959 968
960 969 /* now let's allocate memory for the new packed property */
961 970 pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
962 971 str_array = (char **)i_ldi_prop_op_alloc(pack_size);
963 972
964 973 /* let's copy the actual string data into the new property */
965 974 strptr = (char *)&(str_array[nelem + 1]);
966 975 bcopy(str_concat, strptr, prop_len);
967 976
968 977 /* now initialize the string array pointers */
969 978 for (i = 0; i < nelem; i++) {
970 979 str_array[i] = strptr;
971 980 strptr += strlen(strptr) + 1;
972 981 }
973 982 str_array[nelem] = NULL;
974 983
975 984 /* set the return values */
976 985 *str_arrayp = str_array;
977 986 *nelemp = nelem;
978 987
979 988 return (0);
980 989 }
981 990
982 991
983 992 /*
984 993 * LDI Project private device usage interfaces
985 994 */
986 995
987 996 /*
988 997 * Get a count of how many devices are currentl open by different consumers
989 998 */
990 999 int
991 1000 ldi_usage_count()
992 1001 {
993 1002 return (ldi_handle_hash_count);
994 1003 }
995 1004
996 1005 static void
997 1006 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
998 1007 {
999 1008 dev_info_t *dip;
1000 1009 dev_t dev;
1001 1010
1002 1011 ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1003 1012
1004 1013 /* get the target devt */
1005 1014 dev = vp->v_rdev;
1006 1015
1007 1016 /* try to get the target dip */
1008 1017 dip = VTOCS(vp)->s_dip;
1009 1018 if (dip != NULL) {
1010 1019 e_ddi_hold_devi(dip);
1011 1020 } else if (dev != DDI_DEV_T_NONE) {
1012 1021 dip = e_ddi_hold_devi_by_dev(dev, 0);
1013 1022 }
1014 1023
1015 1024 /* set the target information */
1016 1025 ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1017 1026 ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1018 1027 ldi_usage->tgt_devt = dev;
1019 1028 ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1020 1029 ldi_usage->tgt_dip = dip;
1021 1030 }
1022 1031
1023 1032
1024 1033 static int
1025 1034 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1026 1035 void *arg, int (*callback)(const ldi_usage_t *, void *))
1027 1036 {
1028 1037 ldi_usage_t ldi_usage;
1029 1038 struct devnames *dnp;
1030 1039 dev_info_t *dip;
1031 1040 major_t major;
1032 1041 dev_t dev;
1033 1042 int ret = LDI_USAGE_CONTINUE;
1034 1043
1035 1044 /* set the target device information */
1036 1045 ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1037 1046
1038 1047 /* get the source devt */
1039 1048 dev = lip->li_dev;
1040 1049
1041 1050 /* try to get the source dip */
1042 1051 dip = lip->li_dip;
1043 1052 if (dip != NULL) {
1044 1053 e_ddi_hold_devi(dip);
1045 1054 } else if (dev != DDI_DEV_T_NONE) {
1046 1055 dip = e_ddi_hold_devi_by_dev(dev, 0);
1047 1056 }
1048 1057
1049 1058 /* set the valid source information */
1050 1059 ldi_usage.src_modid = lip->li_modid;
1051 1060 ldi_usage.src_name = lip->li_modname;
1052 1061 ldi_usage.src_devt = dev;
1053 1062 ldi_usage.src_dip = dip;
1054 1063
1055 1064 /*
1056 1065 * if the source ident represents either:
1057 1066 *
1058 1067 * - a kernel module (and not a device or device driver)
1059 1068 * - a device node
1060 1069 *
1061 1070 * then we currently have all the info we need to report the
1062 1071 * usage information so invoke the callback function.
1063 1072 */
1064 1073 if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1065 1074 (dip != NULL)) {
1066 1075 ret = callback(&ldi_usage, arg);
1067 1076 if (dip != NULL)
1068 1077 ddi_release_devi(dip);
1069 1078 if (ldi_usage.tgt_dip != NULL)
1070 1079 ddi_release_devi(ldi_usage.tgt_dip);
1071 1080 return (ret);
1072 1081 }
1073 1082
1074 1083 /*
1075 1084 * now this is kinda gross.
1076 1085 *
1077 1086 * what we do here is attempt to associate every device instance
1078 1087 * of the source driver on the system with the open target driver.
1079 1088 * we do this because we don't know which instance of the device
1080 1089 * could potentially access the lower device so we assume that all
1081 1090 * the instances could access it.
1082 1091 *
1083 1092 * there are two ways we could have gotten here:
1084 1093 *
1085 1094 * 1) this layered ident represents one created using only a
1086 1095 * major number or a driver module name. this means that when
1087 1096 * it was created we could not associate it with a particular
1088 1097 * dev_t or device instance.
1089 1098 *
1090 1099 * when could this possibly happen you ask?
1091 1100 *
1092 1101 * a perfect example of this is streams persistent links.
1093 1102 * when a persistant streams link is formed we can't associate
1094 1103 * the lower device stream with any particular upper device
1095 1104 * stream or instance. this is because any particular upper
1096 1105 * device stream could be closed, then another could be
1097 1106 * opened with a different dev_t and device instance, and it
1098 1107 * would still have access to the lower linked stream.
1099 1108 *
1100 1109 * since any instance of the upper streams driver could
1101 1110 * potentially access the lower stream whenever it wants,
1102 1111 * we represent that here by associating the opened lower
1103 1112 * device with every existing device instance of the upper
1104 1113 * streams driver.
1105 1114 *
1106 1115 * 2) This case should really never happen but we'll include it
1107 1116 * for completeness.
1108 1117 *
1109 1118 * it's possible that we could have gotten here because we
1110 1119 * have a dev_t for the upper device but we couldn't find a
1111 1120 * dip associated with that dev_t.
1112 1121 *
1113 1122 * the only types of devices that have dev_t without an
1114 1123 * associated dip are unbound DLPIv2 network devices. These
1115 1124 * types of devices exist to be able to attach a stream to any
1116 1125 * instance of a hardware network device. since these types of
1117 1126 * devices are usually hardware devices they should never
1118 1127 * really have other devices open.
1119 1128 */
1120 1129 if (dev != DDI_DEV_T_NONE)
1121 1130 major = getmajor(dev);
1122 1131 else
1123 1132 major = lip->li_major;
1124 1133
1125 1134 ASSERT((major >= 0) && (major < devcnt));
1126 1135
1127 1136 dnp = &devnamesp[major];
1128 1137 LOCK_DEV_OPS(&dnp->dn_lock);
1129 1138 dip = dnp->dn_head;
1130 1139 while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1131 1140 e_ddi_hold_devi(dip);
1132 1141 UNLOCK_DEV_OPS(&dnp->dn_lock);
1133 1142
1134 1143 /* set the source dip */
1135 1144 ldi_usage.src_dip = dip;
1136 1145
1137 1146 /* invoke the callback function */
1138 1147 ret = callback(&ldi_usage, arg);
1139 1148
1140 1149 LOCK_DEV_OPS(&dnp->dn_lock);
1141 1150 ddi_release_devi(dip);
1142 1151 dip = ddi_get_next(dip);
1143 1152 }
1144 1153 UNLOCK_DEV_OPS(&dnp->dn_lock);
1145 1154
1146 1155 /* if there was a target dip, release it */
1147 1156 if (ldi_usage.tgt_dip != NULL)
1148 1157 ddi_release_devi(ldi_usage.tgt_dip);
1149 1158
1150 1159 return (ret);
1151 1160 }
1152 1161
1153 1162 /*
1154 1163 * ldi_usage_walker() - this walker reports LDI kernel device usage
1155 1164 * information via the callback() callback function. the LDI keeps track
1156 1165 * of what devices are being accessed in its own internal data structures.
1157 1166 * this function walks those data structures to determine device usage.
1158 1167 */
1159 1168 void
1160 1169 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1161 1170 {
1162 1171 struct ldi_handle *lhp;
1163 1172 struct ldi_ident *lip;
1164 1173 vnode_t *vp;
1165 1174 int i;
1166 1175 int ret = LDI_USAGE_CONTINUE;
1167 1176
1168 1177 for (i = 0; i < LH_HASH_SZ; i++) {
1169 1178 mutex_enter(&ldi_handle_hash_lock[i]);
1170 1179
1171 1180 lhp = ldi_handle_hash[i];
1172 1181 while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1173 1182 lip = lhp->lh_ident;
1174 1183 vp = lhp->lh_vp;
1175 1184
1176 1185 /* invoke the devinfo callback function */
1177 1186 ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1178 1187
1179 1188 lhp = lhp->lh_next;
1180 1189 }
1181 1190 mutex_exit(&ldi_handle_hash_lock[i]);
1182 1191
1183 1192 if (ret != LDI_USAGE_CONTINUE)
1184 1193 break;
1185 1194 }
1186 1195 }
1187 1196
1188 1197 /*
1189 1198 * LDI Project private interfaces (streams linking interfaces)
1190 1199 *
1191 1200 * Streams supports a type of built in device layering via linking.
1192 1201 * Certain types of streams drivers can be streams multiplexors.
1193 1202 * A streams multiplexor supports the I_LINK/I_PLINK operation.
1194 1203 * These operations allows other streams devices to be linked under the
1195 1204 * multiplexor. By definition all streams multiplexors are devices
1196 1205 * so this linking is a type of device layering where the multiplexor
1197 1206 * device is layered on top of the device linked below it.
1198 1207 */
1199 1208
1200 1209 /*
1201 1210 * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1202 1211 * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1203 1212 *
1204 1213 * The streams framework keeps track of links via the file_t of the lower
1205 1214 * stream. The LDI keeps track of devices using a vnode. In the case
1206 1215 * of a streams link created via an LDI handle, fnk_lh() allocates
1207 1216 * a file_t that the streams framework can use to track the linkage.
1208 1217 */
1209 1218 int
1210 1219 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1211 1220 {
1212 1221 struct ldi_handle *lhp = (struct ldi_handle *)arg;
1213 1222 vnode_t *vpdown;
1214 1223 file_t *fpdown;
1215 1224 int err;
1216 1225
1217 1226 if (lhp == NULL)
1218 1227 return (EINVAL);
1219 1228
1220 1229 vpdown = lhp->lh_vp;
1221 1230 ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1222 1231 ASSERT(cmd == _I_PLINK_LH);
1223 1232
1224 1233 /*
1225 1234 * create a new lower vnode and a file_t that points to it,
1226 1235 * streams linking requires a file_t. falloc() returns with
1227 1236 * fpdown locked.
1228 1237 */
1229 1238 VN_HOLD(vpdown);
1230 1239 (void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1231 1240 mutex_exit(&fpdown->f_tlock);
1232 1241
1233 1242 /* try to establish the link */
1234 1243 err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1235 1244
1236 1245 if (err != 0) {
1237 1246 /* the link failed, free the file_t and release the vnode */
1238 1247 mutex_enter(&fpdown->f_tlock);
1239 1248 unfalloc(fpdown);
1240 1249 VN_RELE(vpdown);
1241 1250 }
1242 1251
1243 1252 return (err);
1244 1253 }
1245 1254
1246 1255 /*
1247 1256 * ldi_mlink_fp() is invoked for all successful streams linkages created
1248 1257 * via I_LINK and I_PLINK. ldi_mlink_fp() records the linkage information
1249 1258 * in its internal state so that the devinfo snapshot code has some
1250 1259 * observability into streams device linkage information.
1251 1260 */
1252 1261 void
1253 1262 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1254 1263 {
1255 1264 vnode_t *vp = fpdown->f_vnode;
1256 1265 struct snode *sp, *csp;
1257 1266 ldi_ident_t li;
1258 1267 major_t major;
1259 1268 int ret;
1260 1269
1261 1270 /* if the lower stream is not a device then return */
1262 1271 if (!vn_matchops(vp, spec_getvnodeops()))
1263 1272 return;
1264 1273
1265 1274 ASSERT(!servicing_interrupt());
1266 1275
1267 1276 LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1268 1277 "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1269 1278 (void *)stp, (void *)fpdown));
1270 1279
1271 1280 sp = VTOS(vp);
1272 1281 csp = VTOS(sp->s_commonvp);
1273 1282
1274 1283 /* check if this was a plink via a layered handle */
1275 1284 if (lhlink) {
1276 1285 /*
1277 1286 * increment the common snode s_count.
1278 1287 *
1279 1288 * this is done because after the link operation there
1280 1289 * are two ways that s_count can be decremented.
1281 1290 *
1282 1291 * when the layered handle used to create the link is
1283 1292 * closed, spec_close() is called and it will decrement
1284 1293 * s_count in the common snode. if we don't increment
1285 1294 * s_count here then this could cause spec_close() to
1286 1295 * actually close the device while it's still linked
1287 1296 * under a multiplexer.
1288 1297 *
1289 1298 * also, when the lower stream is unlinked, closef() is
1290 1299 * called for the file_t associated with this snode.
1291 1300 * closef() will call spec_close(), which will decrement
1292 1301 * s_count. if we dont't increment s_count here then this
1293 1302 * could cause spec_close() to actually close the device
1294 1303 * while there may still be valid layered handles
1295 1304 * pointing to it.
1296 1305 */
1297 1306 mutex_enter(&csp->s_lock);
1298 1307 ASSERT(csp->s_count >= 1);
1299 1308 csp->s_count++;
1300 1309 mutex_exit(&csp->s_lock);
1301 1310
1302 1311 /*
1303 1312 * decrement the f_count.
1304 1313 * this is done because the layered driver framework does
1305 1314 * not actually cache a copy of the file_t allocated to
1306 1315 * do the link. this is done here instead of in ldi_mlink_lh()
1307 1316 * because there is a window in ldi_mlink_lh() between where
1308 1317 * milnk_file() returns and we would decrement the f_count
1309 1318 * when the stream could be unlinked.
1310 1319 */
1311 1320 mutex_enter(&fpdown->f_tlock);
1312 1321 fpdown->f_count--;
1313 1322 mutex_exit(&fpdown->f_tlock);
1314 1323 }
1315 1324
1316 1325 /*
1317 1326 * NOTE: here we rely on the streams subsystem not allowing
1318 1327 * a stream to be multiplexed more than once. if this
1319 1328 * changes, we break.
1320 1329 *
1321 1330 * mark the snode/stream as multiplexed
1322 1331 */
1323 1332 mutex_enter(&sp->s_lock);
1324 1333 ASSERT(!(sp->s_flag & SMUXED));
1325 1334 sp->s_flag |= SMUXED;
1326 1335 mutex_exit(&sp->s_lock);
1327 1336
1328 1337 /* get a layered ident for the upper stream */
1329 1338 if (type == LINKNORMAL) {
1330 1339 /*
1331 1340 * if the link is not persistant then we can associate
1332 1341 * the upper stream with a dev_t. this is because the
1333 1342 * upper stream is associated with a vnode, which is
1334 1343 * associated with a dev_t and this binding can't change
1335 1344 * during the life of the stream. since the link isn't
1336 1345 * persistant once the stream is destroyed the link is
1337 1346 * destroyed. so the dev_t will be valid for the life
1338 1347 * of the link.
1339 1348 */
1340 1349 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1341 1350 } else {
1342 1351 /*
1343 1352 * if the link is persistant we can only associate the
1344 1353 * link with a driver (and not a dev_t.) this is
1345 1354 * because subsequent opens of the upper device may result
1346 1355 * in a different stream (and dev_t) having access to
1347 1356 * the lower stream.
1348 1357 *
1349 1358 * for example, if the upper stream is closed after the
1350 1359 * persistant link operation is compleated, a subsequent
1351 1360 * open of the upper device will create a new stream which
1352 1361 * may have a different dev_t and an unlink operation
1353 1362 * can be performed using this new upper stream.
1354 1363 */
1355 1364 ASSERT(type == LINKPERSIST);
1356 1365 major = getmajor(stp->sd_vnode->v_rdev);
1357 1366 ret = ldi_ident_from_major(major, &li);
1358 1367 }
1359 1368
1360 1369 ASSERT(ret == 0);
1361 1370 (void) handle_alloc(vp, (struct ldi_ident *)li);
1362 1371 ldi_ident_release(li);
1363 1372 }
1364 1373
1365 1374 void
1366 1375 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1367 1376 {
1368 1377 struct ldi_handle *lhp;
1369 1378 vnode_t *vp = (vnode_t *)fpdown->f_vnode;
1370 1379 struct snode *sp;
1371 1380 ldi_ident_t li;
1372 1381 major_t major;
1373 1382 int ret;
1374 1383
1375 1384 /* if the lower stream is not a device then return */
1376 1385 if (!vn_matchops(vp, spec_getvnodeops()))
1377 1386 return;
1378 1387
1379 1388 ASSERT(!servicing_interrupt());
1380 1389 ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1381 1390
1382 1391 LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1383 1392 "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1384 1393 (void *)stp, (void *)fpdown));
1385 1394
1386 1395 /*
1387 1396 * NOTE: here we rely on the streams subsystem not allowing
1388 1397 * a stream to be multiplexed more than once. if this
1389 1398 * changes, we break.
1390 1399 *
1391 1400 * mark the snode/stream as not multiplexed
1392 1401 */
1393 1402 sp = VTOS(vp);
1394 1403 mutex_enter(&sp->s_lock);
1395 1404 ASSERT(sp->s_flag & SMUXED);
1396 1405 sp->s_flag &= ~SMUXED;
1397 1406 mutex_exit(&sp->s_lock);
1398 1407
1399 1408 /*
1400 1409 * clear the owner for this snode
1401 1410 * see the comment in ldi_mlink_fp() for information about how
1402 1411 * the ident is allocated
1403 1412 */
1404 1413 if (type == LINKNORMAL) {
1405 1414 ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1406 1415 } else {
1407 1416 ASSERT(type == LINKPERSIST);
1408 1417 major = getmajor(stp->sd_vnode->v_rdev);
1409 1418 ret = ldi_ident_from_major(major, &li);
1410 1419 }
1411 1420
1412 1421 ASSERT(ret == 0);
1413 1422 lhp = handle_find(vp, (struct ldi_ident *)li);
1414 1423 handle_release(lhp);
1415 1424 ldi_ident_release(li);
1416 1425 }
1417 1426
1418 1427 /*
1419 1428 * LDI Consolidation private interfaces
1420 1429 */
1421 1430 int
1422 1431 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1423 1432 {
1424 1433 struct modctl *modp;
1425 1434 major_t major;
1426 1435 char *name;
1427 1436
1428 1437 if ((modlp == NULL) || (lip == NULL))
1429 1438 return (EINVAL);
1430 1439
1431 1440 ASSERT(!servicing_interrupt());
1432 1441
1433 1442 modp = mod_getctl(modlp);
1434 1443 if (modp == NULL)
1435 1444 return (EINVAL);
1436 1445 name = modp->mod_modname;
1437 1446 if (name == NULL)
1438 1447 return (EINVAL);
1439 1448 major = mod_name_to_major(name);
1440 1449
1441 1450 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1442 1451
1443 1452 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1444 1453 "ldi_ident_from_mod", (void *)*lip, name));
1445 1454
1446 1455 return (0);
1447 1456 }
1448 1457
1449 1458 ldi_ident_t
1450 1459 ldi_ident_from_anon()
1451 1460 {
1452 1461 ldi_ident_t lip;
1453 1462
1454 1463 ASSERT(!servicing_interrupt());
1455 1464
1456 1465 lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1457 1466
1458 1467 LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1459 1468 "ldi_ident_from_anon", (void *)lip, "genunix"));
1460 1469
1461 1470 return (lip);
1462 1471 }
1463 1472
1464 1473
1465 1474 /*
1466 1475 * LDI Public interfaces
1467 1476 */
1468 1477 int
1469 1478 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1470 1479 {
1471 1480 struct stdata *stp;
1472 1481 dev_t dev;
1473 1482 char *name;
1474 1483
1475 1484 if ((sq == NULL) || (lip == NULL))
1476 1485 return (EINVAL);
1477 1486
1478 1487 ASSERT(!servicing_interrupt());
1479 1488
1480 1489 stp = sq->q_stream;
1481 1490 if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1482 1491 return (EINVAL);
1483 1492
1484 1493 dev = stp->sd_vnode->v_rdev;
1485 1494 name = mod_major_to_name(getmajor(dev));
1486 1495 if (name == NULL)
1487 1496 return (EINVAL);
1488 1497 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1489 1498
1490 1499 LDI_ALLOCFREE((CE_WARN,
1491 1500 "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1492 1501 "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1493 1502 (void *)stp));
1494 1503
1495 1504 return (0);
1496 1505 }
1497 1506
1498 1507 int
1499 1508 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1500 1509 {
1501 1510 char *name;
1502 1511
1503 1512 if (lip == NULL)
1504 1513 return (EINVAL);
1505 1514
1506 1515 ASSERT(!servicing_interrupt());
1507 1516
1508 1517 name = mod_major_to_name(getmajor(dev));
1509 1518 if (name == NULL)
1510 1519 return (EINVAL);
1511 1520 *lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1512 1521
1513 1522 LDI_ALLOCFREE((CE_WARN,
1514 1523 "%s: li=0x%p, mod=%s, minor=0x%x",
1515 1524 "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1516 1525
1517 1526 return (0);
1518 1527 }
1519 1528
1520 1529 int
1521 1530 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1522 1531 {
1523 1532 struct dev_info *devi = (struct dev_info *)dip;
1524 1533 char *name;
1525 1534
1526 1535 if ((dip == NULL) || (lip == NULL))
1527 1536 return (EINVAL);
1528 1537
1529 1538 ASSERT(!servicing_interrupt());
1530 1539
1531 1540 name = mod_major_to_name(devi->devi_major);
1532 1541 if (name == NULL)
1533 1542 return (EINVAL);
1534 1543 *lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1535 1544
1536 1545 LDI_ALLOCFREE((CE_WARN,
1537 1546 "%s: li=0x%p, mod=%s, dip=0x%p",
1538 1547 "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1539 1548
1540 1549 return (0);
1541 1550 }
1542 1551
1543 1552 int
1544 1553 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1545 1554 {
1546 1555 char *name;
1547 1556
1548 1557 if (lip == NULL)
1549 1558 return (EINVAL);
1550 1559
1551 1560 ASSERT(!servicing_interrupt());
1552 1561
1553 1562 name = mod_major_to_name(major);
1554 1563 if (name == NULL)
1555 1564 return (EINVAL);
1556 1565 *lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1557 1566
1558 1567 LDI_ALLOCFREE((CE_WARN,
1559 1568 "%s: li=0x%p, mod=%s",
1560 1569 "ldi_ident_from_major", (void *)*lip, name));
1561 1570
1562 1571 return (0);
1563 1572 }
1564 1573
1565 1574 void
1566 1575 ldi_ident_release(ldi_ident_t li)
1567 1576 {
1568 1577 struct ldi_ident *ident = (struct ldi_ident *)li;
1569 1578 char *name;
1570 1579
1571 1580 if (li == NULL)
1572 1581 return;
1573 1582
1574 1583 ASSERT(!servicing_interrupt());
1575 1584
1576 1585 name = ident->li_modname;
1577 1586
1578 1587 LDI_ALLOCFREE((CE_WARN,
1579 1588 "%s: li=0x%p, mod=%s",
1580 1589 "ldi_ident_release", (void *)li, name));
1581 1590
1582 1591 ident_release((struct ldi_ident *)li);
1583 1592 }
1584 1593
1585 1594 /* get a handle to a device by dev_t and otyp */
1586 1595 int
1587 1596 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1588 1597 ldi_handle_t *lhp, ldi_ident_t li)
1589 1598 {
1590 1599 struct ldi_ident *lip = (struct ldi_ident *)li;
1591 1600 int ret;
1592 1601 vnode_t *vp;
1593 1602
1594 1603 /* sanity check required input parameters */
1595 1604 if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1596 1605 (lhp == NULL) || (lip == NULL))
1597 1606 return (EINVAL);
1598 1607
1599 1608 ASSERT(!servicing_interrupt());
1600 1609
1601 1610 if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1602 1611 return (ret);
1603 1612
1604 1613 if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1605 1614 *devp = vp->v_rdev;
1606 1615 }
1607 1616 VN_RELE(vp);
1608 1617
1609 1618 return (ret);
1610 1619 }
1611 1620
1612 1621 /* get a handle to a device by pathname */
1613 1622 int
1614 1623 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1615 1624 ldi_handle_t *lhp, ldi_ident_t li)
1616 1625 {
1617 1626 struct ldi_ident *lip = (struct ldi_ident *)li;
1618 1627 int ret;
1619 1628 vnode_t *vp;
1620 1629
1621 1630 /* sanity check required input parameters */
1622 1631 if ((pathname == NULL) || (*pathname != '/') ||
1623 1632 (cr == NULL) || (lhp == NULL) || (lip == NULL))
1624 1633 return (EINVAL);
1625 1634
1626 1635 ASSERT(!servicing_interrupt());
1627 1636
1628 1637 if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1629 1638 return (ret);
1630 1639
1631 1640 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1632 1641 VN_RELE(vp);
1633 1642
1634 1643 return (ret);
1635 1644 }
1636 1645
1637 1646 /* get a handle to a device by devid and minor_name */
1638 1647 int
1639 1648 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1640 1649 int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1641 1650 {
1642 1651 struct ldi_ident *lip = (struct ldi_ident *)li;
1643 1652 int ret;
1644 1653 vnode_t *vp;
1645 1654
1646 1655 /* sanity check required input parameters */
1647 1656 if ((minor_name == NULL) || (cr == NULL) ||
1648 1657 (lhp == NULL) || (lip == NULL))
1649 1658 return (EINVAL);
1650 1659
1651 1660 ASSERT(!servicing_interrupt());
1652 1661
1653 1662 if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1654 1663 return (ret);
1655 1664
1656 1665 ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1657 1666 VN_RELE(vp);
1658 1667
1659 1668 return (ret);
1660 1669 }
1661 1670
1662 1671 int
1663 1672 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1664 1673 {
1665 1674 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1666 1675 struct ldi_event *lep;
1667 1676 int err = 0;
1668 1677 int notify = 0;
1669 1678 list_t *listp;
1670 1679 ldi_ev_callback_impl_t *lecp;
1671 1680
1672 1681 if (lh == NULL)
1673 1682 return (EINVAL);
1674 1683
1675 1684 ASSERT(!servicing_interrupt());
1676 1685
1677 1686 #ifdef LDI_OBSOLETE_EVENT
1678 1687
1679 1688 /*
1680 1689 * Any event handlers should have been unregistered by the
1681 1690 * time ldi_close() is called. If they haven't then it's a
1682 1691 * bug.
1683 1692 *
1684 1693 * In a debug kernel we'll panic to make the problem obvious.
1685 1694 */
1686 1695 ASSERT(handlep->lh_events == NULL);
1687 1696
1688 1697 /*
1689 1698 * On a production kernel we'll "do the right thing" (unregister
1690 1699 * the event handlers) and then complain about having to do the
1691 1700 * work ourselves.
1692 1701 */
1693 1702 while ((lep = handlep->lh_events) != NULL) {
1694 1703 err = 1;
1695 1704 (void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1696 1705 }
1697 1706 if (err) {
1698 1707 struct ldi_ident *lip = handlep->lh_ident;
1699 1708 ASSERT(lip != NULL);
1700 1709 cmn_err(CE_NOTE, "ldi err: %s "
1701 1710 "failed to unregister layered event handlers before "
1702 1711 "closing devices", lip->li_modname);
1703 1712 }
1704 1713 #endif
1705 1714
1706 1715 /* do a layered close on the device */
1707 1716 err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1708 1717
1709 1718 LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1710 1719
1711 1720 /*
1712 1721 * Search the event callback list for callbacks with this
1713 1722 * handle. There are 2 cases
1714 1723 * 1. Called in the context of a notify. The handle consumer
1715 1724 * is releasing its hold on the device to allow a reconfiguration
1716 1725 * of the device. Simply NULL out the handle and the notify callback.
1717 1726 * The finalize callback is still available so that the consumer
1718 1727 * knows of the final disposition of the device.
1719 1728 * 2. Not called in the context of notify. NULL out the handle as well
1720 1729 * as the notify and finalize callbacks. Since the consumer has
1721 1730 * closed the handle, we assume it is not interested in the
1722 1731 * notify and finalize callbacks.
1723 1732 */
1724 1733 ldi_ev_lock();
1725 1734
1726 1735 if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1727 1736 notify = 1;
1728 1737 listp = &ldi_ev_callback_list.le_head;
1729 1738 for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1730 1739 if (lecp->lec_lhp != handlep)
1731 1740 continue;
1732 1741 lecp->lec_lhp = NULL;
1733 1742 lecp->lec_notify = NULL;
1734 1743 LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1735 1744 if (!notify) {
1736 1745 LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1737 1746 lecp->lec_finalize = NULL;
1738 1747 }
1739 1748 }
1740 1749
1741 1750 if (notify)
1742 1751 handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1743 1752 ldi_ev_unlock();
1744 1753
1745 1754 /*
1746 1755 * Free the handle even if the device close failed. why?
1747 1756 *
1748 1757 * If the device close failed we can't really make assumptions
1749 1758 * about the devices state so we shouldn't allow access to the
1750 1759 * device via this handle any more. If the device consumer wants
1751 1760 * to access the device again they should open it again.
1752 1761 *
1753 1762 * This is the same way file/device close failures are handled
1754 1763 * in other places like spec_close() and closeandsetf().
1755 1764 */
1756 1765 handle_release(handlep);
1757 1766 return (err);
1758 1767 }
1759 1768
1760 1769 int
1761 1770 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1762 1771 {
1763 1772 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1764 1773 vnode_t *vp;
1765 1774 dev_t dev;
1766 1775 int ret;
1767 1776
1768 1777 if (lh == NULL)
1769 1778 return (EINVAL);
1770 1779
1771 1780 vp = handlep->lh_vp;
1772 1781 dev = vp->v_rdev;
1773 1782 if (handlep->lh_type & LH_CBDEV) {
1774 1783 ret = cdev_read(dev, uiop, credp);
1775 1784 } else if (handlep->lh_type & LH_STREAM) {
1776 1785 ret = strread(vp, uiop, credp);
1777 1786 } else {
1778 1787 return (ENOTSUP);
1779 1788 }
1780 1789 return (ret);
1781 1790 }
1782 1791
1783 1792 int
1784 1793 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1785 1794 {
1786 1795 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1787 1796 vnode_t *vp;
1788 1797 dev_t dev;
1789 1798 int ret;
1790 1799
1791 1800 if (lh == NULL)
1792 1801 return (EINVAL);
1793 1802
1794 1803 vp = handlep->lh_vp;
1795 1804 dev = vp->v_rdev;
1796 1805 if (handlep->lh_type & LH_CBDEV) {
1797 1806 ret = cdev_write(dev, uiop, credp);
1798 1807 } else if (handlep->lh_type & LH_STREAM) {
1799 1808 ret = strwrite(vp, uiop, credp);
1800 1809 } else {
1801 1810 return (ENOTSUP);
1802 1811 }
1803 1812 return (ret);
1804 1813 }
1805 1814
1806 1815 int
1807 1816 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1808 1817 {
1809 1818 int otyp;
1810 1819 uint_t value;
1811 1820 int64_t drv_prop64;
1812 1821 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1813 1822 uint_t blksize;
1814 1823 int blkshift;
1815 1824
1816 1825
1817 1826 if ((lh == NULL) || (sizep == NULL))
1818 1827 return (DDI_FAILURE);
1819 1828
1820 1829 if (handlep->lh_type & LH_STREAM)
1821 1830 return (DDI_FAILURE);
1822 1831
1823 1832 /*
1824 1833 * Determine device type (char or block).
1825 1834 * Character devices support Size/size
1826 1835 * property value. Block devices may support
1827 1836 * Nblocks/nblocks or Size/size property value.
1828 1837 */
1829 1838 if ((ldi_get_otyp(lh, &otyp)) != 0)
1830 1839 return (DDI_FAILURE);
1831 1840
1832 1841 if (otyp == OTYP_BLK) {
1833 1842 if (ldi_prop_exists(lh,
1834 1843 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1835 1844
1836 1845 drv_prop64 = ldi_prop_get_int64(lh,
1837 1846 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1838 1847 "Nblocks", 0);
1839 1848 blksize = ldi_prop_get_int(lh,
1840 1849 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1841 1850 "blksize", DEV_BSIZE);
1842 1851 if (blksize == DEV_BSIZE)
1843 1852 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1844 1853 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1845 1854 "device-blksize", DEV_BSIZE);
1846 1855
1847 1856 /* blksize must be a power of two */
1848 1857 ASSERT(BIT_ONLYONESET(blksize));
1849 1858 blkshift = highbit(blksize) - 1;
1850 1859
1851 1860 /*
1852 1861 * We don't support Nblocks values that don't have
1853 1862 * an accurate uint64_t byte count representation.
1854 1863 */
1855 1864 if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1856 1865 return (DDI_FAILURE);
1857 1866
1858 1867 *sizep = (uint64_t)
1859 1868 (((u_offset_t)drv_prop64) << blkshift);
1860 1869 return (DDI_SUCCESS);
1861 1870 }
1862 1871
1863 1872 if (ldi_prop_exists(lh,
1864 1873 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1865 1874
1866 1875 value = ldi_prop_get_int(lh,
1867 1876 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1868 1877 "nblocks", 0);
1869 1878 blksize = ldi_prop_get_int(lh,
1870 1879 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1871 1880 "blksize", DEV_BSIZE);
1872 1881 if (blksize == DEV_BSIZE)
1873 1882 blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1874 1883 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1875 1884 "device-blksize", DEV_BSIZE);
1876 1885
1877 1886 /* blksize must be a power of two */
1878 1887 ASSERT(BIT_ONLYONESET(blksize));
1879 1888 blkshift = highbit(blksize) - 1;
1880 1889
1881 1890 /*
1882 1891 * We don't support nblocks values that don't have an
1883 1892 * accurate uint64_t byte count representation.
1884 1893 */
1885 1894 if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1886 1895 return (DDI_FAILURE);
1887 1896
1888 1897 *sizep = (uint64_t)
1889 1898 (((u_offset_t)value) << blkshift);
1890 1899 return (DDI_SUCCESS);
1891 1900 }
1892 1901 }
1893 1902
1894 1903 if (ldi_prop_exists(lh,
1895 1904 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1896 1905
1897 1906 drv_prop64 = ldi_prop_get_int64(lh,
1898 1907 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1899 1908 *sizep = (uint64_t)drv_prop64;
1900 1909 return (DDI_SUCCESS);
1901 1910 }
1902 1911
1903 1912 if (ldi_prop_exists(lh,
1904 1913 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1905 1914
1906 1915 value = ldi_prop_get_int(lh,
1907 1916 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1908 1917 *sizep = (uint64_t)value;
1909 1918 return (DDI_SUCCESS);
1910 1919 }
1911 1920
1912 1921 /* unable to determine device size */
1913 1922 return (DDI_FAILURE);
1914 1923 }
1915 1924
1916 1925 int
1917 1926 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1918 1927 cred_t *cr, int *rvalp)
1919 1928 {
1920 1929 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1921 1930 vnode_t *vp;
1922 1931 dev_t dev;
1923 1932 int ret, copymode, unused;
1924 1933
1925 1934 if (lh == NULL)
1926 1935 return (EINVAL);
1927 1936
1928 1937 /*
1929 1938 * if the data pointed to by arg is located in the kernel then
1930 1939 * make sure the FNATIVE flag is set.
1931 1940 */
1932 1941 if (mode & FKIOCTL)
1933 1942 mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1934 1943
1935 1944 /*
1936 1945 * Some drivers assume that rvalp will always be non-NULL, so in
1937 1946 * an attempt to avoid panics if the caller passed in a NULL
1938 1947 * value, update rvalp to point to a temporary variable.
1939 1948 */
1940 1949 if (rvalp == NULL)
1941 1950 rvalp = &unused;
1942 1951 vp = handlep->lh_vp;
1943 1952 dev = vp->v_rdev;
1944 1953 if (handlep->lh_type & LH_CBDEV) {
1945 1954 ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1946 1955 } else if (handlep->lh_type & LH_STREAM) {
1947 1956 copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1948 1957
1949 1958 /*
1950 1959 * if we get an I_PLINK from within the kernel the
1951 1960 * arg is a layered handle pointer instead of
1952 1961 * a file descriptor, so we translate this ioctl
1953 1962 * into a private one that can handle this.
1954 1963 */
1955 1964 if ((mode & FKIOCTL) && (cmd == I_PLINK))
1956 1965 cmd = _I_PLINK_LH;
1957 1966
1958 1967 ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1959 1968 } else {
1960 1969 return (ENOTSUP);
1961 1970 }
1962 1971
1963 1972 return (ret);
1964 1973 }
1965 1974
1966 1975 int
1967 1976 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1968 1977 struct pollhead **phpp)
1969 1978 {
1970 1979 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1971 1980 vnode_t *vp;
1972 1981 dev_t dev;
1973 1982 int ret;
1974 1983
1975 1984 if (lh == NULL)
1976 1985 return (EINVAL);
1977 1986
1978 1987 vp = handlep->lh_vp;
1979 1988 dev = vp->v_rdev;
1980 1989 if (handlep->lh_type & LH_CBDEV) {
1981 1990 ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1982 1991 } else if (handlep->lh_type & LH_STREAM) {
1983 1992 ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
1984 1993 } else {
1985 1994 return (ENOTSUP);
1986 1995 }
1987 1996
1988 1997 return (ret);
1989 1998 }
1990 1999
1991 2000 int
1992 2001 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
1993 2002 int flags, char *name, caddr_t valuep, int *length)
1994 2003 {
1995 2004 struct ldi_handle *handlep = (struct ldi_handle *)lh;
1996 2005 dev_t dev;
1997 2006 dev_info_t *dip;
1998 2007 int ret;
1999 2008 struct snode *csp;
2000 2009
2001 2010 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2002 2011 return (DDI_PROP_INVAL_ARG);
2003 2012
2004 2013 if ((prop_op != PROP_LEN) && (valuep == NULL))
2005 2014 return (DDI_PROP_INVAL_ARG);
2006 2015
2007 2016 if (length == NULL)
2008 2017 return (DDI_PROP_INVAL_ARG);
2009 2018
2010 2019 /*
2011 2020 * try to find the associated dip,
2012 2021 * this places a hold on the driver
2013 2022 */
2014 2023 dev = handlep->lh_vp->v_rdev;
2015 2024
2016 2025 csp = VTOCS(handlep->lh_vp);
2017 2026 mutex_enter(&csp->s_lock);
2018 2027 if ((dip = csp->s_dip) != NULL)
2019 2028 e_ddi_hold_devi(dip);
2020 2029 mutex_exit(&csp->s_lock);
2021 2030 if (dip == NULL)
2022 2031 dip = e_ddi_hold_devi_by_dev(dev, 0);
2023 2032
2024 2033 if (dip == NULL)
2025 2034 return (DDI_PROP_NOT_FOUND);
2026 2035
2027 2036 ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2028 2037 ddi_release_devi(dip);
2029 2038
2030 2039 return (ret);
2031 2040 }
2032 2041
2033 2042 int
2034 2043 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2035 2044 {
2036 2045 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2037 2046 dev_t dev;
2038 2047
2039 2048 if ((lh == NULL) || (bp == NULL))
2040 2049 return (EINVAL);
2041 2050
2042 2051 /* this entry point is only supported for cb devices */
2043 2052 dev = handlep->lh_vp->v_rdev;
2044 2053 if (!(handlep->lh_type & LH_CBDEV))
2045 2054 return (ENOTSUP);
2046 2055
2047 2056 bp->b_edev = dev;
2048 2057 bp->b_dev = cmpdev(dev);
2049 2058 return (bdev_strategy(bp));
2050 2059 }
2051 2060
2052 2061 int
2053 2062 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2054 2063 {
2055 2064 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2056 2065 dev_t dev;
2057 2066
2058 2067 if (lh == NULL)
2059 2068 return (EINVAL);
2060 2069
2061 2070 /* this entry point is only supported for cb devices */
2062 2071 dev = handlep->lh_vp->v_rdev;
2063 2072 if (!(handlep->lh_type & LH_CBDEV))
2064 2073 return (ENOTSUP);
2065 2074
2066 2075 return (bdev_dump(dev, addr, blkno, nblk));
2067 2076 }
2068 2077
2069 2078 int
2070 2079 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2071 2080 size_t len, size_t *maplen, uint_t model)
2072 2081 {
2073 2082 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2074 2083 dev_t dev;
2075 2084
2076 2085 if (lh == NULL)
2077 2086 return (EINVAL);
2078 2087
2079 2088 /* this entry point is only supported for cb devices */
2080 2089 dev = handlep->lh_vp->v_rdev;
2081 2090 if (!(handlep->lh_type & LH_CBDEV))
2082 2091 return (ENOTSUP);
2083 2092
2084 2093 return (cdev_devmap(dev, dhp, off, len, maplen, model));
2085 2094 }
2086 2095
2087 2096 int
2088 2097 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2089 2098 {
2090 2099 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2091 2100 dev_t dev;
2092 2101 struct cb_ops *cb;
2093 2102
2094 2103 if (lh == NULL)
2095 2104 return (EINVAL);
2096 2105
2097 2106 /* this entry point is only supported for cb devices */
2098 2107 if (!(handlep->lh_type & LH_CBDEV))
2099 2108 return (ENOTSUP);
2100 2109
2101 2110 /*
2102 2111 * Kaio is only supported on block devices.
2103 2112 */
2104 2113 dev = handlep->lh_vp->v_rdev;
2105 2114 cb = devopsp[getmajor(dev)]->devo_cb_ops;
2106 2115 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2107 2116 return (ENOTSUP);
2108 2117
2109 2118 if (cb->cb_aread == NULL)
2110 2119 return (ENOTSUP);
2111 2120
2112 2121 return (cb->cb_aread(dev, aio_reqp, cr));
2113 2122 }
2114 2123
2115 2124 int
2116 2125 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2117 2126 {
2118 2127 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2119 2128 struct cb_ops *cb;
2120 2129 dev_t dev;
2121 2130
2122 2131 if (lh == NULL)
2123 2132 return (EINVAL);
2124 2133
2125 2134 /* this entry point is only supported for cb devices */
2126 2135 if (!(handlep->lh_type & LH_CBDEV))
2127 2136 return (ENOTSUP);
2128 2137
2129 2138 /*
2130 2139 * Kaio is only supported on block devices.
2131 2140 */
2132 2141 dev = handlep->lh_vp->v_rdev;
2133 2142 cb = devopsp[getmajor(dev)]->devo_cb_ops;
2134 2143 if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2135 2144 return (ENOTSUP);
2136 2145
2137 2146 if (cb->cb_awrite == NULL)
2138 2147 return (ENOTSUP);
2139 2148
2140 2149 return (cb->cb_awrite(dev, aio_reqp, cr));
2141 2150 }
2142 2151
2143 2152 int
2144 2153 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2145 2154 {
2146 2155 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2147 2156 int ret;
2148 2157
2149 2158 if ((lh == NULL) || (smp == NULL))
2150 2159 return (EINVAL);
2151 2160
2152 2161 if (!(handlep->lh_type & LH_STREAM)) {
2153 2162 freemsg(smp);
2154 2163 return (ENOTSUP);
2155 2164 }
2156 2165
2157 2166 /*
2158 2167 * If we don't have db_credp, set it. Note that we can not be called
2159 2168 * from interrupt context.
2160 2169 */
2161 2170 if (msg_getcred(smp, NULL) == NULL)
2162 2171 mblk_setcred(smp, CRED(), curproc->p_pid);
2163 2172
2164 2173 /* Send message while honoring flow control */
2165 2174 ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2166 2175 MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2167 2176
2168 2177 return (ret);
2169 2178 }
2170 2179
2171 2180 int
2172 2181 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2173 2182 {
2174 2183 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2175 2184 clock_t timout; /* milliseconds */
2176 2185 uchar_t pri;
2177 2186 rval_t rval;
2178 2187 int ret, pflag;
2179 2188
2180 2189
2181 2190 if (lh == NULL)
2182 2191 return (EINVAL);
2183 2192
2184 2193 if (!(handlep->lh_type & LH_STREAM))
2185 2194 return (ENOTSUP);
2186 2195
2187 2196 /* Convert from nanoseconds to milliseconds */
2188 2197 if (timeo != NULL) {
2189 2198 timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2190 2199 if (timout > INT_MAX)
2191 2200 return (EINVAL);
2192 2201 } else
2193 2202 timout = -1;
2194 2203
2195 2204 /* Wait for timeout millseconds for a message */
2196 2205 pflag = MSG_ANY;
2197 2206 pri = 0;
2198 2207 *rmp = NULL;
2199 2208 ret = kstrgetmsg(handlep->lh_vp,
2200 2209 rmp, NULL, &pri, &pflag, timout, &rval);
2201 2210 return (ret);
2202 2211 }
2203 2212
2204 2213 int
2205 2214 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2206 2215 {
2207 2216 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2208 2217
2209 2218 if ((lh == NULL) || (devp == NULL))
2210 2219 return (EINVAL);
2211 2220
2212 2221 *devp = handlep->lh_vp->v_rdev;
2213 2222 return (0);
2214 2223 }
2215 2224
2216 2225 int
2217 2226 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2218 2227 {
2219 2228 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2220 2229
2221 2230 if ((lh == NULL) || (otyp == NULL))
2222 2231 return (EINVAL);
2223 2232
2224 2233 *otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2225 2234 return (0);
2226 2235 }
2227 2236
2228 2237 int
2229 2238 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2230 2239 {
2231 2240 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2232 2241 int ret;
2233 2242 dev_t dev;
2234 2243
2235 2244 if ((lh == NULL) || (devid == NULL))
2236 2245 return (EINVAL);
2237 2246
2238 2247 dev = handlep->lh_vp->v_rdev;
2239 2248
2240 2249 ret = ddi_lyr_get_devid(dev, devid);
2241 2250 if (ret != DDI_SUCCESS)
2242 2251 return (ENOTSUP);
2243 2252
2244 2253 return (0);
2245 2254 }
2246 2255
2247 2256 int
2248 2257 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2249 2258 {
2250 2259 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2251 2260 int ret, otyp;
2252 2261 dev_t dev;
2253 2262
2254 2263 if ((lh == NULL) || (minor_name == NULL))
2255 2264 return (EINVAL);
2256 2265
2257 2266 dev = handlep->lh_vp->v_rdev;
2258 2267 otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2259 2268
2260 2269 ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2261 2270 if (ret != DDI_SUCCESS)
2262 2271 return (ENOTSUP);
2263 2272
2264 2273 return (0);
2265 2274 }
2266 2275
2267 2276 int
2268 2277 ldi_prop_lookup_int_array(ldi_handle_t lh,
2269 2278 uint_t flags, char *name, int **data, uint_t *nelements)
2270 2279 {
2271 2280 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2272 2281 dev_info_t *dip;
2273 2282 dev_t dev;
2274 2283 int res;
2275 2284 struct snode *csp;
2276 2285
2277 2286 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2278 2287 return (DDI_PROP_INVAL_ARG);
2279 2288
2280 2289 dev = handlep->lh_vp->v_rdev;
2281 2290
2282 2291 csp = VTOCS(handlep->lh_vp);
2283 2292 mutex_enter(&csp->s_lock);
2284 2293 if ((dip = csp->s_dip) != NULL)
2285 2294 e_ddi_hold_devi(dip);
2286 2295 mutex_exit(&csp->s_lock);
2287 2296 if (dip == NULL)
2288 2297 dip = e_ddi_hold_devi_by_dev(dev, 0);
2289 2298
2290 2299 if (dip == NULL) {
2291 2300 flags |= DDI_UNBND_DLPI2;
2292 2301 } else if (flags & LDI_DEV_T_ANY) {
2293 2302 flags &= ~LDI_DEV_T_ANY;
2294 2303 dev = DDI_DEV_T_ANY;
2295 2304 }
2296 2305
2297 2306 if (dip != NULL) {
2298 2307 int *prop_val, prop_len;
2299 2308
2300 2309 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2301 2310 (caddr_t *)&prop_val, &prop_len, sizeof (int));
2302 2311
2303 2312 /* if we got it then return it */
2304 2313 if (res == DDI_PROP_SUCCESS) {
2305 2314 *nelements = prop_len / sizeof (int);
2306 2315 *data = prop_val;
2307 2316
2308 2317 ddi_release_devi(dip);
2309 2318 return (res);
2310 2319 }
2311 2320 }
2312 2321
2313 2322 /* call the normal property interfaces */
2314 2323 res = ddi_prop_lookup_int_array(dev, dip, flags,
2315 2324 name, data, nelements);
2316 2325
2317 2326 if (dip != NULL)
2318 2327 ddi_release_devi(dip);
2319 2328
2320 2329 return (res);
2321 2330 }
2322 2331
2323 2332 int
2324 2333 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2325 2334 uint_t flags, char *name, int64_t **data, uint_t *nelements)
2326 2335 {
2327 2336 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2328 2337 dev_info_t *dip;
2329 2338 dev_t dev;
2330 2339 int res;
2331 2340 struct snode *csp;
2332 2341
2333 2342 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2334 2343 return (DDI_PROP_INVAL_ARG);
2335 2344
2336 2345 dev = handlep->lh_vp->v_rdev;
2337 2346
2338 2347 csp = VTOCS(handlep->lh_vp);
2339 2348 mutex_enter(&csp->s_lock);
2340 2349 if ((dip = csp->s_dip) != NULL)
2341 2350 e_ddi_hold_devi(dip);
2342 2351 mutex_exit(&csp->s_lock);
2343 2352 if (dip == NULL)
2344 2353 dip = e_ddi_hold_devi_by_dev(dev, 0);
2345 2354
2346 2355 if (dip == NULL) {
2347 2356 flags |= DDI_UNBND_DLPI2;
2348 2357 } else if (flags & LDI_DEV_T_ANY) {
2349 2358 flags &= ~LDI_DEV_T_ANY;
2350 2359 dev = DDI_DEV_T_ANY;
2351 2360 }
2352 2361
2353 2362 if (dip != NULL) {
2354 2363 int64_t *prop_val;
2355 2364 int prop_len;
2356 2365
2357 2366 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2358 2367 (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2359 2368
2360 2369 /* if we got it then return it */
2361 2370 if (res == DDI_PROP_SUCCESS) {
2362 2371 *nelements = prop_len / sizeof (int64_t);
2363 2372 *data = prop_val;
2364 2373
2365 2374 ddi_release_devi(dip);
2366 2375 return (res);
2367 2376 }
2368 2377 }
2369 2378
2370 2379 /* call the normal property interfaces */
2371 2380 res = ddi_prop_lookup_int64_array(dev, dip, flags,
2372 2381 name, data, nelements);
2373 2382
2374 2383 if (dip != NULL)
2375 2384 ddi_release_devi(dip);
2376 2385
2377 2386 return (res);
2378 2387 }
2379 2388
2380 2389 int
2381 2390 ldi_prop_lookup_string_array(ldi_handle_t lh,
2382 2391 uint_t flags, char *name, char ***data, uint_t *nelements)
2383 2392 {
2384 2393 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2385 2394 dev_info_t *dip;
2386 2395 dev_t dev;
2387 2396 int res;
2388 2397 struct snode *csp;
2389 2398
2390 2399 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2391 2400 return (DDI_PROP_INVAL_ARG);
2392 2401
2393 2402 dev = handlep->lh_vp->v_rdev;
2394 2403
2395 2404 csp = VTOCS(handlep->lh_vp);
2396 2405 mutex_enter(&csp->s_lock);
2397 2406 if ((dip = csp->s_dip) != NULL)
2398 2407 e_ddi_hold_devi(dip);
2399 2408 mutex_exit(&csp->s_lock);
2400 2409 if (dip == NULL)
2401 2410 dip = e_ddi_hold_devi_by_dev(dev, 0);
2402 2411
2403 2412 if (dip == NULL) {
2404 2413 flags |= DDI_UNBND_DLPI2;
2405 2414 } else if (flags & LDI_DEV_T_ANY) {
2406 2415 flags &= ~LDI_DEV_T_ANY;
2407 2416 dev = DDI_DEV_T_ANY;
2408 2417 }
2409 2418
2410 2419 if (dip != NULL) {
2411 2420 char *prop_val;
2412 2421 int prop_len;
2413 2422
2414 2423 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2415 2424 (caddr_t *)&prop_val, &prop_len, 0);
2416 2425
2417 2426 /* if we got it then return it */
2418 2427 if (res == DDI_PROP_SUCCESS) {
2419 2428 char **str_array;
2420 2429 int nelem;
2421 2430
2422 2431 /*
2423 2432 * pack the returned string array into the format
2424 2433 * our callers expect
2425 2434 */
2426 2435 if (i_pack_string_array(prop_val, prop_len,
2427 2436 &str_array, &nelem) == 0) {
2428 2437
2429 2438 *data = str_array;
2430 2439 *nelements = nelem;
2431 2440
2432 2441 ddi_prop_free(prop_val);
2433 2442 ddi_release_devi(dip);
2434 2443 return (res);
2435 2444 }
2436 2445
2437 2446 /*
2438 2447 * the format of the returned property must have
2439 2448 * been bad so throw it out
2440 2449 */
2441 2450 ddi_prop_free(prop_val);
2442 2451 }
2443 2452 }
2444 2453
2445 2454 /* call the normal property interfaces */
2446 2455 res = ddi_prop_lookup_string_array(dev, dip, flags,
2447 2456 name, data, nelements);
2448 2457
2449 2458 if (dip != NULL)
2450 2459 ddi_release_devi(dip);
2451 2460
2452 2461 return (res);
2453 2462 }
2454 2463
2455 2464 int
2456 2465 ldi_prop_lookup_string(ldi_handle_t lh,
2457 2466 uint_t flags, char *name, char **data)
2458 2467 {
2459 2468 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2460 2469 dev_info_t *dip;
2461 2470 dev_t dev;
2462 2471 int res;
2463 2472 struct snode *csp;
2464 2473
2465 2474 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2466 2475 return (DDI_PROP_INVAL_ARG);
2467 2476
2468 2477 dev = handlep->lh_vp->v_rdev;
2469 2478
2470 2479 csp = VTOCS(handlep->lh_vp);
2471 2480 mutex_enter(&csp->s_lock);
2472 2481 if ((dip = csp->s_dip) != NULL)
2473 2482 e_ddi_hold_devi(dip);
2474 2483 mutex_exit(&csp->s_lock);
2475 2484 if (dip == NULL)
2476 2485 dip = e_ddi_hold_devi_by_dev(dev, 0);
2477 2486
2478 2487 if (dip == NULL) {
2479 2488 flags |= DDI_UNBND_DLPI2;
2480 2489 } else if (flags & LDI_DEV_T_ANY) {
2481 2490 flags &= ~LDI_DEV_T_ANY;
2482 2491 dev = DDI_DEV_T_ANY;
2483 2492 }
2484 2493
2485 2494 if (dip != NULL) {
2486 2495 char *prop_val;
2487 2496 int prop_len;
2488 2497
2489 2498 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2490 2499 (caddr_t *)&prop_val, &prop_len, 0);
2491 2500
2492 2501 /* if we got it then return it */
2493 2502 if (res == DDI_PROP_SUCCESS) {
2494 2503 /*
2495 2504 * sanity check the vaule returned.
2496 2505 */
2497 2506 if (i_check_string(prop_val, prop_len)) {
2498 2507 ddi_prop_free(prop_val);
2499 2508 } else {
2500 2509 *data = prop_val;
2501 2510 ddi_release_devi(dip);
2502 2511 return (res);
2503 2512 }
2504 2513 }
2505 2514 }
2506 2515
2507 2516 /* call the normal property interfaces */
2508 2517 res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2509 2518
2510 2519 if (dip != NULL)
2511 2520 ddi_release_devi(dip);
2512 2521
2513 2522 #ifdef DEBUG
2514 2523 if (res == DDI_PROP_SUCCESS) {
2515 2524 /*
2516 2525 * keep ourselves honest
2517 2526 * make sure the framework returns strings in the
2518 2527 * same format as we're demanding from drivers.
2519 2528 */
2520 2529 struct prop_driver_data *pdd;
2521 2530 int pdd_prop_size;
2522 2531
2523 2532 pdd = ((struct prop_driver_data *)(*data)) - 1;
2524 2533 pdd_prop_size = pdd->pdd_size -
2525 2534 sizeof (struct prop_driver_data);
2526 2535 ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2527 2536 }
2528 2537 #endif /* DEBUG */
2529 2538
2530 2539 return (res);
2531 2540 }
2532 2541
2533 2542 int
2534 2543 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2535 2544 uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2536 2545 {
2537 2546 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2538 2547 dev_info_t *dip;
2539 2548 dev_t dev;
2540 2549 int res;
2541 2550 struct snode *csp;
2542 2551
2543 2552 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2544 2553 return (DDI_PROP_INVAL_ARG);
2545 2554
2546 2555 dev = handlep->lh_vp->v_rdev;
2547 2556
2548 2557 csp = VTOCS(handlep->lh_vp);
2549 2558 mutex_enter(&csp->s_lock);
2550 2559 if ((dip = csp->s_dip) != NULL)
2551 2560 e_ddi_hold_devi(dip);
2552 2561 mutex_exit(&csp->s_lock);
2553 2562 if (dip == NULL)
2554 2563 dip = e_ddi_hold_devi_by_dev(dev, 0);
2555 2564
2556 2565 if (dip == NULL) {
2557 2566 flags |= DDI_UNBND_DLPI2;
2558 2567 } else if (flags & LDI_DEV_T_ANY) {
2559 2568 flags &= ~LDI_DEV_T_ANY;
2560 2569 dev = DDI_DEV_T_ANY;
2561 2570 }
2562 2571
2563 2572 if (dip != NULL) {
2564 2573 uchar_t *prop_val;
2565 2574 int prop_len;
2566 2575
2567 2576 res = i_ldi_prop_op_typed(dev, dip, flags, name,
2568 2577 (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2569 2578
2570 2579 /* if we got it then return it */
2571 2580 if (res == DDI_PROP_SUCCESS) {
2572 2581 *nelements = prop_len / sizeof (uchar_t);
2573 2582 *data = prop_val;
2574 2583
2575 2584 ddi_release_devi(dip);
2576 2585 return (res);
2577 2586 }
2578 2587 }
2579 2588
2580 2589 /* call the normal property interfaces */
2581 2590 res = ddi_prop_lookup_byte_array(dev, dip, flags,
2582 2591 name, data, nelements);
2583 2592
2584 2593 if (dip != NULL)
2585 2594 ddi_release_devi(dip);
2586 2595
2587 2596 return (res);
2588 2597 }
2589 2598
2590 2599 int
2591 2600 ldi_prop_get_int(ldi_handle_t lh,
2592 2601 uint_t flags, char *name, int defvalue)
2593 2602 {
2594 2603 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2595 2604 dev_info_t *dip;
2596 2605 dev_t dev;
2597 2606 int res;
2598 2607 struct snode *csp;
2599 2608
2600 2609 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2601 2610 return (defvalue);
2602 2611
2603 2612 dev = handlep->lh_vp->v_rdev;
2604 2613
2605 2614 csp = VTOCS(handlep->lh_vp);
2606 2615 mutex_enter(&csp->s_lock);
2607 2616 if ((dip = csp->s_dip) != NULL)
2608 2617 e_ddi_hold_devi(dip);
2609 2618 mutex_exit(&csp->s_lock);
2610 2619 if (dip == NULL)
2611 2620 dip = e_ddi_hold_devi_by_dev(dev, 0);
2612 2621
2613 2622 if (dip == NULL) {
2614 2623 flags |= DDI_UNBND_DLPI2;
2615 2624 } else if (flags & LDI_DEV_T_ANY) {
2616 2625 flags &= ~LDI_DEV_T_ANY;
2617 2626 dev = DDI_DEV_T_ANY;
2618 2627 }
2619 2628
2620 2629 if (dip != NULL) {
2621 2630 int prop_val;
2622 2631 int prop_len;
2623 2632
2624 2633 /*
2625 2634 * first call the drivers prop_op interface to allow it
2626 2635 * it to override default property values.
2627 2636 */
2628 2637 prop_len = sizeof (int);
2629 2638 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2630 2639 flags | DDI_PROP_DYNAMIC, name,
2631 2640 (caddr_t)&prop_val, &prop_len);
2632 2641
2633 2642 /* if we got it then return it */
2634 2643 if ((res == DDI_PROP_SUCCESS) &&
2635 2644 (prop_len == sizeof (int))) {
2636 2645 res = prop_val;
2637 2646 ddi_release_devi(dip);
2638 2647 return (res);
2639 2648 }
2640 2649 }
2641 2650
2642 2651 /* call the normal property interfaces */
2643 2652 res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2644 2653
2645 2654 if (dip != NULL)
2646 2655 ddi_release_devi(dip);
2647 2656
2648 2657 return (res);
2649 2658 }
2650 2659
2651 2660 int64_t
2652 2661 ldi_prop_get_int64(ldi_handle_t lh,
2653 2662 uint_t flags, char *name, int64_t defvalue)
2654 2663 {
2655 2664 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2656 2665 dev_info_t *dip;
2657 2666 dev_t dev;
2658 2667 int64_t res;
2659 2668 struct snode *csp;
2660 2669
2661 2670 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2662 2671 return (defvalue);
2663 2672
2664 2673 dev = handlep->lh_vp->v_rdev;
2665 2674
2666 2675 csp = VTOCS(handlep->lh_vp);
2667 2676 mutex_enter(&csp->s_lock);
2668 2677 if ((dip = csp->s_dip) != NULL)
2669 2678 e_ddi_hold_devi(dip);
2670 2679 mutex_exit(&csp->s_lock);
2671 2680 if (dip == NULL)
2672 2681 dip = e_ddi_hold_devi_by_dev(dev, 0);
2673 2682
2674 2683 if (dip == NULL) {
2675 2684 flags |= DDI_UNBND_DLPI2;
2676 2685 } else if (flags & LDI_DEV_T_ANY) {
2677 2686 flags &= ~LDI_DEV_T_ANY;
2678 2687 dev = DDI_DEV_T_ANY;
2679 2688 }
2680 2689
2681 2690 if (dip != NULL) {
2682 2691 int64_t prop_val;
2683 2692 int prop_len;
2684 2693
2685 2694 /*
2686 2695 * first call the drivers prop_op interface to allow it
2687 2696 * it to override default property values.
2688 2697 */
2689 2698 prop_len = sizeof (int64_t);
2690 2699 res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2691 2700 flags | DDI_PROP_DYNAMIC, name,
2692 2701 (caddr_t)&prop_val, &prop_len);
2693 2702
2694 2703 /* if we got it then return it */
2695 2704 if ((res == DDI_PROP_SUCCESS) &&
2696 2705 (prop_len == sizeof (int64_t))) {
2697 2706 res = prop_val;
2698 2707 ddi_release_devi(dip);
2699 2708 return (res);
2700 2709 }
2701 2710 }
2702 2711
2703 2712 /* call the normal property interfaces */
2704 2713 res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2705 2714
2706 2715 if (dip != NULL)
2707 2716 ddi_release_devi(dip);
2708 2717
2709 2718 return (res);
2710 2719 }
2711 2720
2712 2721 int
2713 2722 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2714 2723 {
2715 2724 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2716 2725 dev_info_t *dip;
2717 2726 dev_t dev;
2718 2727 int res, prop_len;
2719 2728 struct snode *csp;
2720 2729
2721 2730 if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2722 2731 return (0);
2723 2732
2724 2733 dev = handlep->lh_vp->v_rdev;
2725 2734
2726 2735 csp = VTOCS(handlep->lh_vp);
2727 2736 mutex_enter(&csp->s_lock);
2728 2737 if ((dip = csp->s_dip) != NULL)
2729 2738 e_ddi_hold_devi(dip);
2730 2739 mutex_exit(&csp->s_lock);
2731 2740 if (dip == NULL)
2732 2741 dip = e_ddi_hold_devi_by_dev(dev, 0);
2733 2742
2734 2743 /* if NULL dip, prop does NOT exist */
2735 2744 if (dip == NULL)
2736 2745 return (0);
2737 2746
2738 2747 if (flags & LDI_DEV_T_ANY) {
2739 2748 flags &= ~LDI_DEV_T_ANY;
2740 2749 dev = DDI_DEV_T_ANY;
2741 2750 }
2742 2751
2743 2752 /*
2744 2753 * first call the drivers prop_op interface to allow it
2745 2754 * it to override default property values.
2746 2755 */
2747 2756 res = i_ldi_prop_op(dev, dip, PROP_LEN,
2748 2757 flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2749 2758
2750 2759 if (res == DDI_PROP_SUCCESS) {
2751 2760 ddi_release_devi(dip);
2752 2761 return (1);
2753 2762 }
2754 2763
2755 2764 /* call the normal property interfaces */
2756 2765 res = ddi_prop_exists(dev, dip, flags, name);
2757 2766
2758 2767 ddi_release_devi(dip);
2759 2768 return (res);
2760 2769 }
2761 2770
2762 2771 #ifdef LDI_OBSOLETE_EVENT
2763 2772
2764 2773 int
2765 2774 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2766 2775 {
2767 2776 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2768 2777 dev_info_t *dip;
2769 2778 dev_t dev;
2770 2779 int res;
2771 2780 struct snode *csp;
2772 2781
2773 2782 if ((lh == NULL) || (name == NULL) ||
2774 2783 (strlen(name) == 0) || (ecp == NULL)) {
2775 2784 return (DDI_FAILURE);
2776 2785 }
2777 2786
2778 2787 ASSERT(!servicing_interrupt());
2779 2788
2780 2789 dev = handlep->lh_vp->v_rdev;
2781 2790
2782 2791 csp = VTOCS(handlep->lh_vp);
2783 2792 mutex_enter(&csp->s_lock);
2784 2793 if ((dip = csp->s_dip) != NULL)
2785 2794 e_ddi_hold_devi(dip);
2786 2795 mutex_exit(&csp->s_lock);
2787 2796 if (dip == NULL)
2788 2797 dip = e_ddi_hold_devi_by_dev(dev, 0);
2789 2798
2790 2799 if (dip == NULL)
2791 2800 return (DDI_FAILURE);
2792 2801
2793 2802 LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2794 2803 "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2795 2804 name, (void *)dip, (void *)ecp));
2796 2805
2797 2806 res = ddi_get_eventcookie(dip, name, ecp);
2798 2807
2799 2808 ddi_release_devi(dip);
2800 2809 return (res);
2801 2810 }
2802 2811
2803 2812 int
2804 2813 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2805 2814 void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2806 2815 void *arg, ldi_callback_id_t *id)
2807 2816 {
2808 2817 struct ldi_handle *handlep = (struct ldi_handle *)lh;
2809 2818 struct ldi_event *lep;
2810 2819 dev_info_t *dip;
2811 2820 dev_t dev;
2812 2821 int res;
2813 2822 struct snode *csp;
2814 2823
2815 2824 if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2816 2825 return (DDI_FAILURE);
2817 2826
2818 2827 ASSERT(!servicing_interrupt());
2819 2828
2820 2829 dev = handlep->lh_vp->v_rdev;
2821 2830
2822 2831 csp = VTOCS(handlep->lh_vp);
2823 2832 mutex_enter(&csp->s_lock);
2824 2833 if ((dip = csp->s_dip) != NULL)
2825 2834 e_ddi_hold_devi(dip);
2826 2835 mutex_exit(&csp->s_lock);
2827 2836 if (dip == NULL)
2828 2837 dip = e_ddi_hold_devi_by_dev(dev, 0);
2829 2838
2830 2839 if (dip == NULL)
2831 2840 return (DDI_FAILURE);
2832 2841
2833 2842 lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2834 2843 lep->le_lhp = handlep;
2835 2844 lep->le_arg = arg;
2836 2845 lep->le_handler = handler;
2837 2846
2838 2847 if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2839 2848 (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2840 2849 LDI_EVENTCB((CE_WARN, "%s: unable to add"
2841 2850 "event callback", "ldi_add_event_handler"));
2842 2851 ddi_release_devi(dip);
2843 2852 kmem_free(lep, sizeof (struct ldi_event));
2844 2853 return (res);
2845 2854 }
2846 2855
2847 2856 *id = (ldi_callback_id_t)lep;
2848 2857
2849 2858 LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2850 2859 "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2851 2860 (void *)dip, (void *)ec, (void *)lep, (void *)id));
2852 2861
2853 2862 handle_event_add(lep);
2854 2863 ddi_release_devi(dip);
2855 2864 return (res);
2856 2865 }
2857 2866
2858 2867 int
2859 2868 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2860 2869 {
2861 2870 ldi_event_t *lep = (ldi_event_t *)id;
2862 2871 int res;
2863 2872
2864 2873 if ((lh == NULL) || (id == NULL))
2865 2874 return (DDI_FAILURE);
2866 2875
2867 2876 ASSERT(!servicing_interrupt());
2868 2877
2869 2878 if ((res = ddi_remove_event_handler(lep->le_id))
2870 2879 != DDI_SUCCESS) {
2871 2880 LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2872 2881 "event callback", "ldi_remove_event_handler"));
2873 2882 return (res);
2874 2883 }
2875 2884
2876 2885 handle_event_remove(lep);
2877 2886 kmem_free(lep, sizeof (struct ldi_event));
2878 2887 return (res);
2879 2888 }
2880 2889
2881 2890 #endif
2882 2891
2883 2892 /*
2884 2893 * Here are some definitions of terms used in the following LDI events
2885 2894 * code:
2886 2895 *
2887 2896 * "LDI events" AKA "native events": These are events defined by the
2888 2897 * "new" LDI event framework. These events are serviced by the LDI event
2889 2898 * framework itself and thus are native to it.
2890 2899 *
2891 2900 * "LDI contract events": These are contract events that correspond to the
2892 2901 * LDI events. This mapping of LDI events to contract events is defined by
2893 2902 * the ldi_ev_cookies[] array above.
2894 2903 *
2895 2904 * NDI events: These are events which are serviced by the NDI event subsystem.
2896 2905 * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2897 2906 * These events are therefore *not* native events.
2898 2907 */
2899 2908
2900 2909 static int
2901 2910 ldi_native_event(const char *evname)
2902 2911 {
2903 2912 int i;
2904 2913
2905 2914 LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2906 2915
2907 2916 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2908 2917 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2909 2918 return (1);
2910 2919 }
2911 2920
2912 2921 return (0);
2913 2922 }
2914 2923
2915 2924 static uint_t
2916 2925 ldi_ev_sync_event(const char *evname)
2917 2926 {
2918 2927 int i;
2919 2928
2920 2929 ASSERT(ldi_native_event(evname));
2921 2930
2922 2931 LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2923 2932
2924 2933 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2925 2934 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2926 2935 return (ldi_ev_cookies[i].ck_sync);
2927 2936 }
2928 2937
2929 2938 /*
2930 2939 * This should never happen until non-contract based
2931 2940 * LDI events are introduced. If that happens, we will
2932 2941 * use a "special" token to indicate that there are no
2933 2942 * contracts corresponding to this LDI event.
2934 2943 */
2935 2944 cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2936 2945
2937 2946 return (0);
2938 2947 }
2939 2948
2940 2949 static uint_t
2941 2950 ldi_contract_event(const char *evname)
2942 2951 {
2943 2952 int i;
2944 2953
2945 2954 ASSERT(ldi_native_event(evname));
2946 2955
2947 2956 LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2948 2957
2949 2958 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2950 2959 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2951 2960 return (ldi_ev_cookies[i].ck_ctype);
2952 2961 }
2953 2962
2954 2963 /*
2955 2964 * This should never happen until non-contract based
2956 2965 * LDI events are introduced. If that happens, we will
2957 2966 * use a "special" token to indicate that there are no
2958 2967 * contracts corresponding to this LDI event.
2959 2968 */
2960 2969 cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2961 2970
2962 2971 return (0);
2963 2972 }
2964 2973
2965 2974 char *
2966 2975 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2967 2976 {
2968 2977 int i;
2969 2978 struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2970 2979
2971 2980 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2972 2981 if (&ldi_ev_cookies[i] == cookie_impl) {
2973 2982 LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2974 2983 ldi_ev_cookies[i].ck_evname));
2975 2984 return (ldi_ev_cookies[i].ck_evname);
2976 2985 }
2977 2986 }
2978 2987
2979 2988 /*
2980 2989 * Not an LDI native event. Must be NDI event service.
2981 2990 * Just return a generic string
2982 2991 */
2983 2992 LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2984 2993 return (NDI_EVENT_SERVICE);
2985 2994 }
2986 2995
2987 2996 static int
2988 2997 ldi_native_cookie(ldi_ev_cookie_t cookie)
2989 2998 {
2990 2999 int i;
2991 3000 struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2992 3001
2993 3002 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2994 3003 if (&ldi_ev_cookies[i] == cookie_impl) {
2995 3004 LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
2996 3005 return (1);
2997 3006 }
2998 3007 }
2999 3008
3000 3009 LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
3001 3010 return (0);
3002 3011 }
3003 3012
3004 3013 static ldi_ev_cookie_t
3005 3014 ldi_get_native_cookie(const char *evname)
3006 3015 {
3007 3016 int i;
3008 3017
3009 3018 for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3010 3019 if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3011 3020 LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3012 3021 return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3013 3022 }
3014 3023 }
3015 3024
3016 3025 LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3017 3026 return (NULL);
3018 3027 }
3019 3028
3020 3029 /*
3021 3030 * ldi_ev_lock() needs to be recursive, since layered drivers may call
3022 3031 * other LDI interfaces (such as ldi_close() from within the context of
3023 3032 * a notify callback. Since the notify callback is called with the
3024 3033 * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3025 3034 * to be recursive.
3026 3035 */
3027 3036 static void
3028 3037 ldi_ev_lock(void)
3029 3038 {
3030 3039 LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3031 3040
3032 3041 mutex_enter(&ldi_ev_callback_list.le_lock);
3033 3042 if (ldi_ev_callback_list.le_thread == curthread) {
3034 3043 ASSERT(ldi_ev_callback_list.le_busy >= 1);
3035 3044 ldi_ev_callback_list.le_busy++;
3036 3045 } else {
3037 3046 while (ldi_ev_callback_list.le_busy)
3038 3047 cv_wait(&ldi_ev_callback_list.le_cv,
3039 3048 &ldi_ev_callback_list.le_lock);
3040 3049 ASSERT(ldi_ev_callback_list.le_thread == NULL);
3041 3050 ldi_ev_callback_list.le_busy = 1;
3042 3051 ldi_ev_callback_list.le_thread = curthread;
3043 3052 }
3044 3053 mutex_exit(&ldi_ev_callback_list.le_lock);
3045 3054
3046 3055 LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3047 3056 }
3048 3057
3049 3058 static void
3050 3059 ldi_ev_unlock(void)
3051 3060 {
3052 3061 LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3053 3062 mutex_enter(&ldi_ev_callback_list.le_lock);
3054 3063 ASSERT(ldi_ev_callback_list.le_thread == curthread);
3055 3064 ASSERT(ldi_ev_callback_list.le_busy >= 1);
3056 3065
3057 3066 ldi_ev_callback_list.le_busy--;
3058 3067 if (ldi_ev_callback_list.le_busy == 0) {
3059 3068 ldi_ev_callback_list.le_thread = NULL;
3060 3069 cv_signal(&ldi_ev_callback_list.le_cv);
3061 3070 }
3062 3071 mutex_exit(&ldi_ev_callback_list.le_lock);
3063 3072 LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3064 3073 }
3065 3074
3066 3075 int
3067 3076 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3068 3077 {
3069 3078 struct ldi_handle *handlep = (struct ldi_handle *)lh;
3070 3079 dev_info_t *dip;
3071 3080 dev_t dev;
3072 3081 int res;
3073 3082 struct snode *csp;
3074 3083 ddi_eventcookie_t ddi_cookie;
3075 3084 ldi_ev_cookie_t tcookie;
3076 3085
3077 3086 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3078 3087 evname ? evname : "<NULL>"));
3079 3088
3080 3089 if (lh == NULL || evname == NULL ||
3081 3090 strlen(evname) == 0 || cookiep == NULL) {
3082 3091 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3083 3092 return (LDI_EV_FAILURE);
3084 3093 }
3085 3094
3086 3095 *cookiep = NULL;
3087 3096
3088 3097 /*
3089 3098 * First check if it is a LDI native event
3090 3099 */
3091 3100 tcookie = ldi_get_native_cookie(evname);
3092 3101 if (tcookie) {
3093 3102 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3094 3103 *cookiep = tcookie;
3095 3104 return (LDI_EV_SUCCESS);
3096 3105 }
3097 3106
3098 3107 /*
3099 3108 * Not a LDI native event. Try NDI event services
3100 3109 */
3101 3110
3102 3111 dev = handlep->lh_vp->v_rdev;
3103 3112
3104 3113 csp = VTOCS(handlep->lh_vp);
3105 3114 mutex_enter(&csp->s_lock);
3106 3115 if ((dip = csp->s_dip) != NULL)
3107 3116 e_ddi_hold_devi(dip);
3108 3117 mutex_exit(&csp->s_lock);
3109 3118 if (dip == NULL)
3110 3119 dip = e_ddi_hold_devi_by_dev(dev, 0);
3111 3120
3112 3121 if (dip == NULL) {
3113 3122 cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3114 3123 "handle: %p", (void *)handlep);
3115 3124 return (LDI_EV_FAILURE);
3116 3125 }
3117 3126
3118 3127 LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3119 3128 (void *)dip, evname));
3120 3129
3121 3130 res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3122 3131
3123 3132 ddi_release_devi(dip);
3124 3133
3125 3134 if (res == DDI_SUCCESS) {
3126 3135 LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3127 3136 *cookiep = (ldi_ev_cookie_t)ddi_cookie;
3128 3137 return (LDI_EV_SUCCESS);
3129 3138 } else {
3130 3139 LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3131 3140 return (LDI_EV_FAILURE);
3132 3141 }
3133 3142 }
3134 3143
3135 3144 /*ARGSUSED*/
3136 3145 static void
3137 3146 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3138 3147 void *arg, void *ev_data)
3139 3148 {
3140 3149 ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3141 3150
3142 3151 ASSERT(lecp != NULL);
3143 3152 ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3144 3153 ASSERT(lecp->lec_lhp);
3145 3154 ASSERT(lecp->lec_notify == NULL);
3146 3155 ASSERT(lecp->lec_finalize);
3147 3156
3148 3157 LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3149 3158 "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3150 3159 (void *)lecp->lec_arg, (void *)ev_data));
3151 3160
3152 3161 lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3153 3162 lecp->lec_arg, ev_data);
3154 3163 }
3155 3164
3156 3165 int
3157 3166 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3158 3167 ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3159 3168 {
3160 3169 struct ldi_handle *lhp = (struct ldi_handle *)lh;
3161 3170 ldi_ev_callback_impl_t *lecp;
3162 3171 dev_t dev;
3163 3172 struct snode *csp;
3164 3173 dev_info_t *dip;
3165 3174 int ddi_event;
3166 3175
3167 3176 ASSERT(!servicing_interrupt());
3168 3177
3169 3178 if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3170 3179 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3171 3180 return (LDI_EV_FAILURE);
3172 3181 }
3173 3182
3174 3183 if (callb->cb_vers != LDI_EV_CB_VERS) {
3175 3184 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3176 3185 return (LDI_EV_FAILURE);
3177 3186 }
3178 3187
3179 3188 if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3180 3189 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3181 3190 return (LDI_EV_FAILURE);
3182 3191 }
3183 3192
3184 3193 *id = 0;
3185 3194
3186 3195 dev = lhp->lh_vp->v_rdev;
3187 3196 csp = VTOCS(lhp->lh_vp);
3188 3197 mutex_enter(&csp->s_lock);
3189 3198 if ((dip = csp->s_dip) != NULL)
3190 3199 e_ddi_hold_devi(dip);
3191 3200 mutex_exit(&csp->s_lock);
3192 3201 if (dip == NULL)
3193 3202 dip = e_ddi_hold_devi_by_dev(dev, 0);
3194 3203
3195 3204 if (dip == NULL) {
3196 3205 cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3197 3206 "LDI handle: %p", (void *)lhp);
3198 3207 return (LDI_EV_FAILURE);
3199 3208 }
3200 3209
3201 3210 lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3202 3211
3203 3212 ddi_event = 0;
3204 3213 if (!ldi_native_cookie(cookie)) {
3205 3214 if (callb->cb_notify || callb->cb_finalize == NULL) {
3206 3215 /*
3207 3216 * NDI event services only accept finalize
3208 3217 */
3209 3218 cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3210 3219 "Only finalize"
3211 3220 " callback supported with this cookie",
3212 3221 "ldi_ev_register_callbacks",
3213 3222 lhp->lh_ident->li_modname);
3214 3223 kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3215 3224 ddi_release_devi(dip);
3216 3225 return (LDI_EV_FAILURE);
3217 3226 }
3218 3227
3219 3228 if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3220 3229 i_ldi_ev_callback, (void *)lecp,
3221 3230 (ddi_callback_id_t *)&lecp->lec_id)
3222 3231 != DDI_SUCCESS) {
3223 3232 kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3224 3233 ddi_release_devi(dip);
3225 3234 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3226 3235 "ddi_add_event_handler failed"));
3227 3236 return (LDI_EV_FAILURE);
3228 3237 }
3229 3238 ddi_event = 1;
3230 3239 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3231 3240 "ddi_add_event_handler success"));
3232 3241 }
3233 3242
3234 3243
3235 3244
3236 3245 ldi_ev_lock();
3237 3246
3238 3247 /*
3239 3248 * Add the notify/finalize callback to the LDI's list of callbacks.
3240 3249 */
3241 3250 lecp->lec_lhp = lhp;
3242 3251 lecp->lec_dev = lhp->lh_vp->v_rdev;
3243 3252 lecp->lec_spec = VTYP_TO_STYP(lhp->lh_vp->v_type);
3244 3253 lecp->lec_notify = callb->cb_notify;
3245 3254 lecp->lec_finalize = callb->cb_finalize;
3246 3255 lecp->lec_arg = arg;
3247 3256 lecp->lec_cookie = cookie;
3248 3257 if (!ddi_event)
3249 3258 lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3250 3259 else
3251 3260 ASSERT(lecp->lec_id);
3252 3261 lecp->lec_dip = dip;
3253 3262 list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3254 3263
3255 3264 *id = (ldi_callback_id_t)lecp->lec_id;
3256 3265
3257 3266 ldi_ev_unlock();
3258 3267
3259 3268 ddi_release_devi(dip);
3260 3269
3261 3270 LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3262 3271 "notify/finalize"));
3263 3272
3264 3273 return (LDI_EV_SUCCESS);
3265 3274 }
3266 3275
3267 3276 static int
3268 3277 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3269 3278 dev_t dev, int spec_type)
3270 3279 {
3271 3280 ASSERT(lecp);
3272 3281 ASSERT(dip);
3273 3282 ASSERT(dev != DDI_DEV_T_NONE);
3274 3283 ASSERT(dev != NODEV);
3275 3284 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3276 3285 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3277 3286 ASSERT(lecp->lec_dip);
3278 3287 ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3279 3288 ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3280 3289 ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3281 3290 ASSERT(lecp->lec_dev != NODEV);
3282 3291
3283 3292 if (dip != lecp->lec_dip)
3284 3293 return (0);
3285 3294
3286 3295 if (dev != DDI_DEV_T_ANY) {
3287 3296 if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3288 3297 return (0);
3289 3298 }
3290 3299
3291 3300 LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3292 3301
3293 3302 return (1);
3294 3303 }
3295 3304
3296 3305 /*
3297 3306 * LDI framework function to post a "notify" event to all layered drivers
3298 3307 * that have registered for that event
3299 3308 *
3300 3309 * Returns:
3301 3310 * LDI_EV_SUCCESS - registered callbacks allow event
3302 3311 * LDI_EV_FAILURE - registered callbacks block event
3303 3312 * LDI_EV_NONE - No matching LDI callbacks
3304 3313 *
3305 3314 * This function is *not* to be called by layered drivers. It is for I/O
3306 3315 * framework code in Solaris, such as the I/O retire code and DR code
3307 3316 * to call while servicing a device event such as offline or degraded.
3308 3317 */
3309 3318 int
3310 3319 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3311 3320 void *ev_data)
3312 3321 {
3313 3322 ldi_ev_callback_impl_t *lecp;
3314 3323 list_t *listp;
3315 3324 int ret;
3316 3325 char *lec_event;
3317 3326
3318 3327 ASSERT(dip);
3319 3328 ASSERT(dev != DDI_DEV_T_NONE);
3320 3329 ASSERT(dev != NODEV);
3321 3330 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
↓ open down ↓ |
3143 lines elided |
↑ open up ↑ |
3322 3331 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3323 3332 ASSERT(event);
3324 3333 ASSERT(ldi_native_event(event));
3325 3334 ASSERT(ldi_ev_sync_event(event));
3326 3335
3327 3336 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3328 3337 (void *)dip, event));
3329 3338
3330 3339 ret = LDI_EV_NONE;
3331 3340 ldi_ev_lock();
3341 +
3342 + VERIFY(ldi_ev_callback_list.le_walker_next == NULL);
3332 3343 listp = &ldi_ev_callback_list.le_head;
3333 - for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3344 + for (lecp = list_head(listp); lecp; lecp =
3345 + ldi_ev_callback_list.le_walker_next) {
3346 + ldi_ev_callback_list.le_walker_next = list_next(listp, lecp);
3334 3347
3335 3348 /* Check if matching device */
3336 3349 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3337 3350 continue;
3338 3351
3339 3352 if (lecp->lec_lhp == NULL) {
3340 3353 /*
3341 3354 * Consumer has unregistered the handle and so
3342 3355 * is no longer interested in notify events.
3343 3356 */
3344 3357 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3345 3358 "handle, skipping"));
3346 3359 continue;
3347 3360 }
3348 3361
3349 3362 if (lecp->lec_notify == NULL) {
3350 3363 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3351 3364 "callback. skipping"));
3352 3365 continue; /* not interested in notify */
3353 3366 }
3354 3367
3355 3368 /*
3356 3369 * Check if matching event
3357 3370 */
3358 3371 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3359 3372 if (strcmp(event, lec_event) != 0) {
3360 3373 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3361 3374 " event {%s,%s}. skipping", event, lec_event));
3362 3375 continue;
3363 3376 }
3364 3377
3365 3378 lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3366 3379 if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3367 3380 lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3368 3381 ret = LDI_EV_FAILURE;
3369 3382 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3370 3383 " FAILURE"));
3371 3384 break;
3372 3385 }
3373 3386
3374 3387 /* We have a matching callback that allows the event to occur */
3375 3388 ret = LDI_EV_SUCCESS;
3376 3389
3377 3390 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3378 3391 }
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
3379 3392
3380 3393 if (ret != LDI_EV_FAILURE)
3381 3394 goto out;
3382 3395
3383 3396 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3384 3397
3385 3398 /*
3386 3399 * Undo notifies already sent
3387 3400 */
3388 3401 lecp = list_prev(listp, lecp);
3389 - for (; lecp; lecp = list_prev(listp, lecp)) {
3402 + VERIFY(ldi_ev_callback_list.le_walker_prev == NULL);
3403 + for (; lecp; lecp = ldi_ev_callback_list.le_walker_prev) {
3404 + ldi_ev_callback_list.le_walker_prev = list_prev(listp, lecp);
3390 3405
3391 3406 /*
3392 3407 * Check if matching device
3393 3408 */
3394 3409 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3395 3410 continue;
3396 3411
3397 3412
3398 3413 if (lecp->lec_finalize == NULL) {
3399 3414 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3400 3415 "skipping"));
3401 3416 continue; /* not interested in finalize */
3402 3417 }
3403 3418
3404 3419 /*
3405 3420 * it is possible that in response to a notify event a
3406 3421 * layered driver closed its LDI handle so it is ok
3407 3422 * to have a NULL LDI handle for finalize. The layered
3408 3423 * driver is expected to maintain state in its "arg"
3409 3424 * parameter to keep track of the closed device.
3410 3425 */
3411 3426
3412 3427 /* Check if matching event */
3413 3428 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3414 3429 if (strcmp(event, lec_event) != 0) {
3415 3430 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3416 3431 "event: %s,%s, skipping", event, lec_event));
3417 3432 continue;
3418 3433 }
3419 3434
3420 3435 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3421 3436
3422 3437 lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3423 3438 LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3424 3439
3425 3440 /*
3426 3441 * If LDI native event and LDI handle closed in context
3427 3442 * of notify, NULL out the finalize callback as we have
3428 3443 * already called the 1 finalize above allowed in this situation
3429 3444 */
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
3430 3445 if (lecp->lec_lhp == NULL &&
3431 3446 ldi_native_cookie(lecp->lec_cookie)) {
3432 3447 LDI_EVDBG((CE_NOTE,
3433 3448 "ldi_invoke_notify(): NULL-ing finalize after "
3434 3449 "calling 1 finalize following ldi_close"));
3435 3450 lecp->lec_finalize = NULL;
3436 3451 }
3437 3452 }
3438 3453
3439 3454 out:
3455 + ldi_ev_callback_list.le_walker_next = NULL;
3456 + ldi_ev_callback_list.le_walker_prev = NULL;
3440 3457 ldi_ev_unlock();
3441 3458
3442 3459 if (ret == LDI_EV_NONE) {
3443 3460 LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3444 3461 "LDI callbacks"));
3445 3462 }
3446 3463
3447 3464 return (ret);
3448 3465 }
3449 3466
3450 3467 /*
3451 3468 * Framework function to be called from a layered driver to propagate
3452 3469 * LDI "notify" events to exported minors.
3453 3470 *
3454 3471 * This function is a public interface exported by the LDI framework
3455 3472 * for use by layered drivers to propagate device events up the software
3456 3473 * stack.
3457 3474 */
3458 3475 int
3459 3476 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3460 3477 ldi_ev_cookie_t cookie, void *ev_data)
3461 3478 {
3462 3479 char *evname = ldi_ev_get_type(cookie);
3463 3480 uint_t ct_evtype;
3464 3481 dev_t dev;
3465 3482 major_t major;
3466 3483 int retc;
3467 3484 int retl;
3468 3485
3469 3486 ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3470 3487 ASSERT(dip);
3471 3488 ASSERT(ldi_native_cookie(cookie));
3472 3489
3473 3490 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3474 3491 evname, (void *)dip));
3475 3492
3476 3493 if (!ldi_ev_sync_event(evname)) {
3477 3494 cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3478 3495 "negotiatable event", evname);
3479 3496 return (LDI_EV_SUCCESS);
3480 3497 }
3481 3498
3482 3499 major = ddi_driver_major(dip);
3483 3500 if (major == DDI_MAJOR_T_NONE) {
3484 3501 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3485 3502 (void) ddi_pathname(dip, path);
3486 3503 cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3487 3504 "for device %s", path);
3488 3505 kmem_free(path, MAXPATHLEN);
3489 3506 return (LDI_EV_FAILURE);
3490 3507 }
3491 3508 dev = makedevice(major, minor);
3492 3509
3493 3510 /*
3494 3511 * Generate negotiation contract events on contracts (if any) associated
3495 3512 * with this minor.
3496 3513 */
3497 3514 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3498 3515 ct_evtype = ldi_contract_event(evname);
3499 3516 retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3500 3517 if (retc == CT_NACK) {
3501 3518 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3502 3519 return (LDI_EV_FAILURE);
3503 3520 }
3504 3521
3505 3522 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3506 3523 retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3507 3524 if (retl == LDI_EV_FAILURE) {
3508 3525 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3509 3526 "returned FAILURE. Calling contract negend"));
3510 3527 contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3511 3528 return (LDI_EV_FAILURE);
3512 3529 }
3513 3530
3514 3531 /*
3515 3532 * The very fact that we are here indicates that there is a
3516 3533 * LDI callback (and hence a constraint) for the retire of the
3517 3534 * HW device. So we just return success even if there are no
3518 3535 * contracts or LDI callbacks against the minors layered on top
3519 3536 * of the HW minors
3520 3537 */
3521 3538 LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3522 3539 return (LDI_EV_SUCCESS);
3523 3540 }
3524 3541
3525 3542 /*
3526 3543 * LDI framework function to invoke "finalize" callbacks for all layered
3527 3544 * drivers that have registered callbacks for that event.
3528 3545 *
3529 3546 * This function is *not* to be called by layered drivers. It is for I/O
3530 3547 * framework code in Solaris, such as the I/O retire code and DR code
3531 3548 * to call while servicing a device event such as offline or degraded.
3532 3549 */
3533 3550 void
3534 3551 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3535 3552 int ldi_result, void *ev_data)
3536 3553 {
3537 3554 ldi_ev_callback_impl_t *lecp;
3538 3555 list_t *listp;
3539 3556 char *lec_event;
3540 3557 int found = 0;
3541 3558
3542 3559 ASSERT(dip);
3543 3560 ASSERT(dev != DDI_DEV_T_NONE);
3544 3561 ASSERT(dev != NODEV);
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
3545 3562 ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3546 3563 (spec_type == S_IFCHR || spec_type == S_IFBLK));
3547 3564 ASSERT(event);
3548 3565 ASSERT(ldi_native_event(event));
3549 3566 ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3550 3567
3551 3568 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3552 3569 " event=%s", (void *)dip, ldi_result, event));
3553 3570
3554 3571 ldi_ev_lock();
3572 + VERIFY(ldi_ev_callback_list.le_walker_next == NULL);
3555 3573 listp = &ldi_ev_callback_list.le_head;
3556 - for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3574 + for (lecp = list_head(listp); lecp; lecp =
3575 + ldi_ev_callback_list.le_walker_next) {
3576 + ldi_ev_callback_list.le_walker_next = list_next(listp, lecp);
3557 3577
3558 3578 if (lecp->lec_finalize == NULL) {
3559 3579 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3560 3580 "finalize. Skipping"));
3561 3581 continue; /* Not interested in finalize */
3562 3582 }
3563 3583
3564 3584 /*
3565 3585 * Check if matching device
3566 3586 */
3567 3587 if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3568 3588 continue;
3569 3589
3570 3590 /*
3571 3591 * It is valid for the LDI handle to be NULL during finalize.
3572 3592 * The layered driver may have done an LDI close in the notify
3573 3593 * callback.
3574 3594 */
3575 3595
3576 3596 /*
3577 3597 * Check if matching event
3578 3598 */
3579 3599 lec_event = ldi_ev_get_type(lecp->lec_cookie);
3580 3600 if (strcmp(event, lec_event) != 0) {
3581 3601 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3582 3602 "matching event {%s,%s}. Skipping",
3583 3603 event, lec_event));
3584 3604 continue;
3585 3605 }
3586 3606
3587 3607 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3588 3608
3589 3609 found = 1;
3590 3610
3591 3611 lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3592 3612 ldi_result, lecp->lec_arg, ev_data);
3593 3613
3594 3614 /*
3595 3615 * If LDI native event and LDI handle closed in context
3596 3616 * of notify, NULL out the finalize callback as we have
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
3597 3617 * already called the 1 finalize above allowed in this situation
3598 3618 */
3599 3619 if (lecp->lec_lhp == NULL &&
3600 3620 ldi_native_cookie(lecp->lec_cookie)) {
3601 3621 LDI_EVDBG((CE_NOTE,
3602 3622 "ldi_invoke_finalize(): NULLing finalize after "
3603 3623 "calling 1 finalize following ldi_close"));
3604 3624 lecp->lec_finalize = NULL;
3605 3625 }
3606 3626 }
3627 + ldi_ev_callback_list.le_walker_next = NULL;
3607 3628 ldi_ev_unlock();
3608 3629
3609 3630 if (found)
3610 3631 return;
3611 3632
3612 3633 LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3613 3634 }
3614 3635
3615 3636 /*
3616 3637 * Framework function to be called from a layered driver to propagate
3617 3638 * LDI "finalize" events to exported minors.
3618 3639 *
3619 3640 * This function is a public interface exported by the LDI framework
3620 3641 * for use by layered drivers to propagate device events up the software
3621 3642 * stack.
3622 3643 */
3623 3644 void
3624 3645 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3625 3646 ldi_ev_cookie_t cookie, void *ev_data)
3626 3647 {
3627 3648 dev_t dev;
3628 3649 major_t major;
3629 3650 char *evname;
3630 3651 int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3631 3652 CT_EV_SUCCESS : CT_EV_FAILURE;
3632 3653 uint_t ct_evtype;
3633 3654
3634 3655 ASSERT(dip);
3635 3656 ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3636 3657 ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3637 3658 ASSERT(ldi_native_cookie(cookie));
3638 3659
3639 3660 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3640 3661
3641 3662 major = ddi_driver_major(dip);
3642 3663 if (major == DDI_MAJOR_T_NONE) {
3643 3664 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3644 3665 (void) ddi_pathname(dip, path);
3645 3666 cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3646 3667 "for device %s", path);
3647 3668 kmem_free(path, MAXPATHLEN);
3648 3669 return;
3649 3670 }
3650 3671 dev = makedevice(major, minor);
3651 3672
3652 3673 evname = ldi_ev_get_type(cookie);
3653 3674
3654 3675 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3655 3676 ct_evtype = ldi_contract_event(evname);
3656 3677 contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3657 3678
3658 3679 LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3659 3680 ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3660 3681 }
3661 3682
3662 3683 int
3663 3684 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3664 3685 {
3665 3686 ldi_ev_callback_impl_t *lecp;
3666 3687 ldi_ev_callback_impl_t *next;
3667 3688 ldi_ev_callback_impl_t *found;
3668 3689 list_t *listp;
3669 3690
3670 3691 ASSERT(!servicing_interrupt());
3671 3692
3672 3693 if (id == 0) {
3673 3694 cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3674 3695 return (LDI_EV_FAILURE);
3675 3696 }
3676 3697
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
3677 3698 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3678 3699 (void *)id));
3679 3700
3680 3701 ldi_ev_lock();
3681 3702
3682 3703 listp = &ldi_ev_callback_list.le_head;
3683 3704 next = found = NULL;
3684 3705 for (lecp = list_head(listp); lecp; lecp = next) {
3685 3706 next = list_next(listp, lecp);
3686 3707 if (lecp->lec_id == id) {
3687 - ASSERT(found == NULL);
3708 + VERIFY(found == NULL);
3709 +
3710 + /*
3711 + * If there is a walk in progress, shift that walk
3712 + * along to the next element so that we can remove
3713 + * this one. This allows us to unregister an arbitrary
3714 + * number of callbacks from within a callback.
3715 + *
3716 + * See the struct definition (in sunldi_impl.h) for
3717 + * more information.
3718 + */
3719 + if (ldi_ev_callback_list.le_walker_next == lecp)
3720 + ldi_ev_callback_list.le_walker_next = next;
3721 + if (ldi_ev_callback_list.le_walker_prev == lecp)
3722 + ldi_ev_callback_list.le_walker_prev = list_prev(
3723 + listp, ldi_ev_callback_list.le_walker_prev);
3724 +
3688 3725 list_remove(listp, lecp);
3689 3726 found = lecp;
3690 3727 }
3691 3728 }
3692 3729 ldi_ev_unlock();
3693 3730
3694 3731 if (found == NULL) {
3695 3732 cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3696 3733 (void *)id);
3697 3734 return (LDI_EV_SUCCESS);
3698 3735 }
3699 3736
3700 3737 if (!ldi_native_cookie(found->lec_cookie)) {
3701 3738 ASSERT(found->lec_notify == NULL);
3702 3739 if (ddi_remove_event_handler((ddi_callback_id_t)id)
3703 3740 != DDI_SUCCESS) {
3704 3741 cmn_err(CE_WARN, "failed to remove NDI event handler "
3705 3742 "for id (%p)", (void *)id);
3706 3743 ldi_ev_lock();
3707 3744 list_insert_tail(listp, found);
3708 3745 ldi_ev_unlock();
3709 3746 return (LDI_EV_FAILURE);
3710 3747 }
3711 3748 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3712 3749 "service removal succeeded"));
3713 3750 } else {
3714 3751 LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3715 3752 "LDI native callbacks"));
3716 3753 }
3717 3754 kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3718 3755
3719 3756 return (LDI_EV_SUCCESS);
3720 3757 }
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX