Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/lvm/notify/md_notify.c
+++ new/usr/src/uts/common/io/lvm/notify/md_notify.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25 25 */
26 26
27 27 #include <sys/systm.h>
28 28 #include <sys/cmn_err.h>
29 29 #include <sys/errno.h>
30 30 #include <sys/ddi.h>
31 31 #include <sys/sunddi.h>
32 32 #include <sys/signal.h>
33 33 #include <sys/modctl.h>
34 34 #include <sys/proc.h>
35 35 #include <sys/lvm/mdvar.h>
36 36
37 37 md_ops_t event_md_ops;
38 38 #ifndef lint
39 39 md_ops_t *md_interface_ops = &event_md_ops;
40 40 #endif
41 41
42 42 extern void sigintr();
43 43 extern void sigunintr();
44 44 extern md_set_t md_set[];
45 45
46 46 extern kmutex_t md_mx; /* used to md global stuff */
47 47 extern kcondvar_t md_cv; /* md_status events */
48 48 extern int md_status;
49 49 extern clock_t md_hz;
50 50 extern md_event_queue_t *md_event_queue;
51 51 static void md_reaper();
52 52 extern void md_clear_named_service();
53 53
54 54 /* event handler stuff */
55 55 kmutex_t md_eventq_mx;
56 56 int md_reap_count = 32; /* check for pid alive */
57 57 int md_reap = 0;
58 58 int md_max_notify_queue = 512;
59 59 int md_reap_off = 0; /* non-zero turns off reap */
60 60 /* don't allow module to be unloaded until all pending ops are complete */
61 61 int global_lock_wait_cnt = 0;
62 62
63 63 static int
64 64 md_flush_queue(md_event_queue_t *queue)
65 65 {
66 66 md_event_t *element, *next_element;
67 67 /*
68 68 * if there is something waiting on it and the
69 69 * process/pid no longer exist then signal the defunct
70 70 * process continue on to clean this up later.
71 71 */
72 72 if (queue->mdn_waiting)
73 73 return (1);
74 74 /*
75 75 * this pid no longer exists blow it away
76 76 * first remove any entries, then unlink it and lastly
77 77 * free it.
78 78 */
79 79 element = queue->mdn_front;
80 80 while (element) {
81 81 next_element = element->mdn_next;
82 82 kmem_free(element, sizeof (md_event_t));
83 83 element = next_element;
84 84 }
85 85 queue->mdn_front = queue->mdn_tail = NULL;
86 86 return (0);
87 87
88 88 }
89 89
90 90 static void
91 91 md_put_event(md_tags_t tag, set_t sp, md_dev64_t dev, int event,
92 92 u_longlong_t user)
93 93 {
94 94
95 95 md_event_queue_t *queue;
96 96 md_event_t *entry;
97 97
98 98 if (!md_event_queue)
99 99 return;
100 100
101 101 mutex_enter(&md_eventq_mx);
102 102 for (queue = md_event_queue; queue; queue = queue->mdn_nextq) {
103 103 if (queue->mdn_size >= md_max_notify_queue) {
104 104 ASSERT(queue->mdn_front != NULL);
105 105 ASSERT(queue->mdn_front->mdn_next != NULL);
106 106 entry = queue->mdn_front;
107 107 queue->mdn_front = entry->mdn_next;
108 108 queue->mdn_size--;
109 109 queue->mdn_flags |= MD_EVENT_QUEUE_FULL;
110 110 } else
111 111 entry = (md_event_t *)kmem_alloc(sizeof (md_event_t),
112 112 KM_NOSLEEP);
113 113 if (entry == NULL) {
114 114 queue->mdn_flags |= MD_EVENT_QUEUE_INVALID;
115 115 continue;
116 116 }
117 117 entry->mdn_tag = tag;
118 118 entry->mdn_set = sp;
119 119 entry->mdn_dev = dev;
120 120 entry->mdn_event = event;
121 121 entry->mdn_user = user;
122 122 entry->mdn_next = NULL;
123 123 uniqtime(&entry->mdn_time);
124 124 if (queue->mdn_front == NULL) {
125 125 queue->mdn_front = entry;
126 126 queue->mdn_tail = entry;
127 127 } else {
128 128 queue->mdn_tail->mdn_next = entry;
129 129 queue->mdn_tail = entry;
130 130 }
131 131 if (queue->mdn_waiting)
132 132 cv_signal(&queue->mdn_cv);
133 133
134 134 queue->mdn_size++;
135 135 }
136 136 md_reap++;
137 137 mutex_exit(&md_eventq_mx);
138 138
139 139 if (md_reap > md_reap_count)
140 140 md_reaper();
141 141 }
142 142
143 143 static void
144 144 md_reaper()
145 145 {
146 146 md_event_queue_t *next = md_event_queue;
147 147 md_event_queue_t *present, *last = NULL;
148 148
149 149 if (md_event_queue == NULL || md_reap_off)
150 150 return;
151 151
152 152 mutex_enter(&md_eventq_mx);
153 153 while (next) {
154 154 present = next;
155 155 next = present->mdn_nextq;
156 156
157 157 /* check for long term event queue */
158 158 if (present->mdn_flags & MD_EVENT_QUEUE_PERM) {
159 159 last = present;
160 160 continue;
161 161 }
162 162
163 163 /* check to see if the pid is still alive */
164 164 if (!md_checkpid(present->mdn_pid, present->mdn_proc))
165 165 present->mdn_flags |= MD_EVENT_QUEUE_DESTROY;
166 166
167 167 /* see if queue is a "marked queue" if so destroy */
168 168 if (! (present->mdn_flags & MD_EVENT_QUEUE_DESTROY)) {
169 169 last = present;
170 170 continue;
171 171 }
172 172
173 173 /* yeeeha blow this one away */
174 174 present->mdn_pid = 0;
175 175 present->mdn_proc = NULL;
176 176 /*
177 177 * if there is something waiting on it and the
178 178 * process/pid no longer exist then signal the defunct
179 179 * process continue on to clean this up later.
180 180 */
181 181 if (md_flush_queue(present)) {
182 182 present->mdn_flags = MD_EVENT_QUEUE_DESTROY;
183 183 cv_broadcast(&present->mdn_cv);
184 184 last = present;
185 185 continue;
186 186 }
187 187 /* remove the entry */
188 188 if (last == NULL)
189 189 md_event_queue = next;
190 190 else
191 191 last->mdn_nextq = next;
192 192 cv_destroy(&present->mdn_cv);
193 193 kmem_free(present, sizeof (md_event_queue_t));
194 194 }
195 195 md_reap = 0;
196 196 mutex_exit(&md_eventq_mx);
197 197 }
198 198
199 199 /* ARGSUSED */
200 200 static int
201 201 notify_halt(md_haltcmd_t cmd, set_t setno)
202 202 {
203 203 md_event_queue_t *orig_queue, *queue, *queue_free;
204 204 int i;
205 205
206 206
207 207 switch (cmd) {
208 208 case MD_HALT_CLOSE:
209 209 case MD_HALT_OPEN:
210 210 case MD_HALT_DOIT:
211 211 case MD_HALT_CHECK:
212 212
213 213 return (0);
214 214
215 215 case MD_HALT_UNLOAD:
216 216 if (setno != MD_LOCAL_SET)
217 217 return (1);
218 218 mutex_enter(&md_eventq_mx);
219 219 if (md_event_queue == NULL) {
220 220 mutex_exit(&md_eventq_mx);
221 221 return (0);
222 222 }
223 223
224 224 orig_queue = md_event_queue;
225 225 md_event_queue = NULL;
226 226 for (i = 0; i < MD_NOTIFY_HALT_TRIES; i++) {
227 227 for (queue = orig_queue; queue;
228 228 queue = queue->mdn_nextq) {
229 229 if (queue->mdn_waiting == 0) {
230 230 continue;
231 231 }
232 232 queue->mdn_flags = MD_EVENT_QUEUE_DESTROY;
233 233 mutex_exit(&md_eventq_mx);
234 234 cv_broadcast(&queue->mdn_cv);
235 235 delay(md_hz);
236 236 mutex_enter(&md_eventq_mx);
237 237 }
238 238 }
239 239 for (queue = orig_queue; queue; ) {
240 240 if (md_flush_queue(queue)) {
241 241 cmn_err(CE_WARN, "md: queue not freed");
242 242 mutex_exit(&md_eventq_mx);
243 243 return (1);
244 244 }
245 245 queue_free = queue;
246 246 queue = queue->mdn_nextq;
247 247 kmem_free(queue_free, sizeof (md_event_queue_t));
248 248 }
249 249 md_event_queue = NULL;
250 250 mutex_exit(&md_eventq_mx);
251 251 return (0);
252 252
253 253 default:
254 254 return (1);
255 255 }
256 256 }
257 257
258 258 static md_event_queue_t *
259 259 md_find_event_queue(char *q_name, int lock)
260 260 {
261 261 md_event_queue_t *event_q = md_event_queue;
262 262
263 263 if (lock)
264 264 mutex_enter(&md_eventq_mx);
265 265 ASSERT(MUTEX_HELD(&md_eventq_mx));
266 266 while (event_q) {
267 267 if ((*event_q->mdn_name != *q_name) ||
268 268 (event_q->mdn_flags & MD_EVENT_QUEUE_DESTROY)) {
269 269 event_q = event_q->mdn_nextq;
270 270 continue;
271 271 }
272 272
273 273 if (bcmp(q_name, event_q->mdn_name, MD_NOTIFY_NAME_SIZE) == 0)
274 274 break;
275 275 event_q = event_q->mdn_nextq;
276 276 }
277 277 if (lock)
278 278 mutex_exit(&md_eventq_mx);
279 279
280 280 return ((md_event_queue_t *)event_q);
281 281 }
282 282
283 283 static intptr_t
284 284 notify_interface(md_event_cmds_t cmd, md_tags_t tag, set_t set, md_dev64_t dev,
285 285 md_event_type_t event)
286 286 {
287 287 switch (cmd) {
288 288 case EQ_PUT:
289 289 md_put_event(tag, set, dev, event, (u_longlong_t)0);
290 290 break;
291 291 default:
292 292 return (-1);
293 293 }
294 294 return (0);
295 295 }
296 296
297 297 static int
298 298 notify_fillin_empty_ioctl(void *data, void *ioctl_in, size_t sz,
299 299 int mode)
300 300 {
301 301
302 302 int err;
303 303 md_event_ioctl_t *ioctl = (md_event_ioctl_t *)data;
304 304
305 305
306 306 ioctl->mdn_event = EQ_EMPTY;
307 307 ioctl->mdn_tag = TAG_EMPTY;
308 308 ioctl->mdn_set = MD_ALLSETS;
309 309 ioctl->mdn_dev = MD_ALLDEVS;
310 310 uniqtime32(&ioctl->mdn_time);
311 311 ioctl->mdn_user = (u_longlong_t)0;
312 312 err = ddi_copyout(data, ioctl_in, sz, mode);
313 313 return (err);
314 314 }
315 315
316 316 /*
317 317 * md_wait_for_event:
318 318 * IOLOCK_RETURN which drops the md_ioctl_lock is called in this
319 319 * routine to enable other mdioctls to enter the kernel while this
320 320 * thread of execution waits on an event. When that event occurs, the
321 321 * stopped thread wakes and continues and md_ioctl_lock must be
322 322 * reacquired. Even though md_ioctl_lock is interruptable, we choose
323 323 * to ignore EINTR. Returning w/o acquiring md_ioctl_lock is
324 324 * catastrophic since it breaks down ioctl single threading.
325 325 *
326 326 * Return: 0 md_eventq_mx held
327 327 * EINTR md_eventq_mx no held
328 328 * Always returns with IOCTL lock held
329 329 */
330 330
331 331 static int
332 332 md_wait_for_event(md_event_queue_t *event_queue, void *ioctl_in,
333 333 md_event_ioctl_t *ioctl, size_t sz,
334 334 int mode, IOLOCK *lockp)
335 335 {
336 336 int rval = 0;
337 337
338 338 while (event_queue->mdn_front == NULL) {
339 339 event_queue->mdn_waiting++;
340 340 (void) IOLOCK_RETURN(0, lockp);
341 341 rval = cv_wait_sig(&event_queue->mdn_cv, &md_eventq_mx);
342 342 event_queue->mdn_waiting--;
343 343 if ((rval == 0) || (event_queue->mdn_flags &
344 344 MD_EVENT_QUEUE_DESTROY)) {
345 345 global_lock_wait_cnt++;
346 346 mutex_exit(&md_eventq_mx);
347 347 /* reenable single threading of ioctls */
348 348 while (md_ioctl_lock_enter() == EINTR);
349 349
350 350 (void) notify_fillin_empty_ioctl
351 351 ((void *)ioctl, ioctl_in, sz, mode);
352 352 mutex_enter(&md_eventq_mx);
353 353 global_lock_wait_cnt--;
354 354 mutex_exit(&md_eventq_mx);
355 355 return (EINTR);
356 356 }
357 357 /*
358 358 * reacquire single threading ioctls. Drop eventq_mutex
359 359 * since md_ioctl_lock_enter can sleep.
360 360 */
361 361 global_lock_wait_cnt++;
362 362 mutex_exit(&md_eventq_mx);
363 363 while (md_ioctl_lock_enter() == EINTR);
364 364 mutex_enter(&md_eventq_mx);
365 365 global_lock_wait_cnt--;
366 366 }
367 367 return (0);
368 368 }
369 369
370 370 /* ARGSUSED */
371 371 static int
372 372 notify_ioctl(dev_t dev, int icmd, void *ioctl_in, int mode, IOLOCK *lockp)
373 373 {
374 374 int cmd;
375 375 pid_t pid;
376 376 md_event_queue_t *event_queue;
377 377 md_event_t *event;
378 378 cred_t *credp;
379 379 char *q_name;
380 380 int err = 0;
381 381 size_t sz = 0;
382 382 md_event_ioctl_t *ioctl;
383 383
384 384 sz = sizeof (*ioctl);
385 385 ioctl = kmem_zalloc(sz, KM_SLEEP);
386 386
387 387 if (ddi_copyin(ioctl_in, (void *)ioctl, sz, mode)) {
388 388 err = EFAULT;
389 389 goto out;
390 390 }
391 391
392 392 if (ioctl->mdn_rev != MD_NOTIFY_REVISION) {
393 393 err = EINVAL;
394 394 goto out;
395 395 }
396 396 if (ioctl->mdn_magic != MD_EVENT_ID) {
397 397 err = EINVAL;
398 398 goto out;
399 399 }
400 400
401 401 pid = md_getpid();
402 402 cmd = ioctl->mdn_cmd;
403 403 q_name = ioctl->mdn_name;
404 404
405 405 if (((cmd != EQ_OFF) && (cmd != EQ_ON)) && (md_reap >= md_reap_count))
406 406 md_reaper();
407 407
408 408 if ((cmd != EQ_ON) && (cmd != EQ_PUT)) {
409 409 mutex_enter(&md_eventq_mx);
410 410 if ((event_queue = md_find_event_queue(q_name, 0)) == NULL) {
411 411 mutex_exit(&md_eventq_mx);
412 412 (void) notify_fillin_empty_ioctl
413 413 ((void *)ioctl, ioctl_in, sz, mode);
414 414 err = ENOENT;
415 415 goto out;
416 416 }
417 417 }
418 418
419 419 switch (cmd) {
420 420 case EQ_ON:
421 421
422 422 md_reaper();
423 423
424 424 mutex_enter(&md_eventq_mx);
425 425 if (md_find_event_queue(q_name, 0) != NULL) {
426 426 mutex_exit(&md_eventq_mx);
427 427 err = EEXIST;
428 428 break;
429 429 }
430 430
431 431 /* allocate and initialize queue head */
432 432 event_queue = (md_event_queue_t *)
433 433 kmem_alloc(sizeof (md_event_queue_t), KM_NOSLEEP);
434 434 if (event_queue == NULL) {
435 435 mutex_exit(&md_eventq_mx);
436 436 err = ENOMEM;
437 437 break;
438 438 }
439 439
440 440 cv_init(&event_queue->mdn_cv, NULL, CV_DEFAULT, NULL);
441 441
442 442 event_queue->mdn_flags = 0;
443 443 event_queue->mdn_pid = pid;
444 444 event_queue->mdn_proc = md_getproc();
445 445 event_queue->mdn_size = 0;
446 446 event_queue->mdn_front = NULL;
447 447 event_queue->mdn_tail = NULL;
448 448 event_queue->mdn_waiting = 0;
449 449 event_queue->mdn_nextq = NULL;
450 450 credp = ddi_get_cred();
451 451 event_queue->mdn_uid = crgetuid(credp);
452 452 bcopy(q_name, event_queue->mdn_name,
453 453 MD_NOTIFY_NAME_SIZE);
454 454 if (ioctl->mdn_flags & EQ_Q_PERM)
455 455 event_queue->mdn_flags |= MD_EVENT_QUEUE_PERM;
456 456
457 457 /* link into the list of event queues */
458 458 if (md_event_queue != NULL)
459 459 event_queue->mdn_nextq = md_event_queue;
460 460 md_event_queue = event_queue;
461 461 mutex_exit(&md_eventq_mx);
462 462 err = 0;
463 463 break;
464 464
465 465 case EQ_OFF:
466 466
467 467 if (md_event_queue == NULL)
468 468 return (ENOENT);
469 469
470 470 event_queue->mdn_flags = MD_EVENT_QUEUE_DESTROY;
471 471 event_queue->mdn_pid = 0;
472 472 event_queue->mdn_proc = NULL;
473 473
474 474 if (event_queue->mdn_waiting != 0)
475 475 cv_broadcast(&event_queue->mdn_cv);
476 476
477 477 /*
478 478 * force the reaper to delete this when it has no process
479 479 * waiting on it.
480 480 */
481 481 mutex_exit(&md_eventq_mx);
482 482 md_reaper();
483 483 err = 0;
484 484 break;
485 485
486 486 case EQ_GET_NOWAIT:
487 487 case EQ_GET_WAIT:
488 488 if (cmd == EQ_GET_WAIT) {
489 489 err = md_wait_for_event(event_queue, ioctl_in,
490 490 ioctl, sz, mode, lockp);
491 491 if (err == EINTR)
492 492 goto out;
493 493 }
494 494 ASSERT(MUTEX_HELD(&md_eventq_mx));
495 495 if (event_queue->mdn_flags &
496 496 (MD_EVENT_QUEUE_INVALID | MD_EVENT_QUEUE_FULL)) {
497 497 event_queue->mdn_flags &=
498 498 ~(MD_EVENT_QUEUE_INVALID | MD_EVENT_QUEUE_FULL);
499 499 mutex_exit(&md_eventq_mx);
500 500 err = notify_fillin_empty_ioctl
501 501 ((void *)ioctl, ioctl_in, sz, mode);
502 502 ioctl->mdn_event = EQ_NOTIFY_LOST;
503 503 err = ddi_copyout((void *)ioctl, ioctl_in, sz, mode);
504 504 if (err)
505 505 err = EFAULT;
506 506 goto out;
507 507 }
508 508 if (event_queue->mdn_front != NULL) {
509 509 event = event_queue->mdn_front;
510 510 event_queue->mdn_front = event->mdn_next;
511 511 event_queue->mdn_size--;
512 512 if (event_queue->mdn_front == NULL)
513 513 event_queue->mdn_tail = NULL;
514 514 mutex_exit(&md_eventq_mx);
515 515 ioctl->mdn_tag = event->mdn_tag;
516 516 ioctl->mdn_set = event->mdn_set;
517 517 ioctl->mdn_dev = event->mdn_dev;
518 518 ioctl->mdn_event = event->mdn_event;
519 519 ioctl->mdn_user = event->mdn_user;
520 520 ioctl->mdn_time.tv_sec = event->mdn_time.tv_sec;
521 521 ioctl->mdn_time.tv_usec =
522 522 event->mdn_time.tv_usec;
523 523 kmem_free(event, sizeof (md_event_t));
524 524 err = ddi_copyout((void *)ioctl, ioctl_in, sz, mode);
525 525 if (err)
526 526 err = EFAULT;
527 527 goto out;
528 528 } else { /* no elements on queue */
529 529 mutex_exit(&md_eventq_mx);
530 530 err = notify_fillin_empty_ioctl
531 531 ((void *)ioctl, ioctl_in, sz, mode);
532 532 if (err)
533 533 err = EFAULT;
534 534 }
535 535
536 536 if (cmd == EQ_GET_NOWAIT)
537 537 err = EAGAIN;
538 538 goto out;
539 539
540 540 case EQ_PUT:
541 541
542 542 if (!md_event_queue) {
543 543 err = ENOENT;
544 544 break;
545 545 }
546 546 md_put_event(ioctl->mdn_tag,
547 547 ioctl->mdn_set, ioctl->mdn_dev,
548 548 ioctl->mdn_event, ioctl->mdn_user);
549 549 err = 0;
550 550 goto out;
551 551
552 552 default:
553 553 err = EINVAL;
554 554 goto out;
555 555 }
556 556
557 557 out:
558 558 kmem_free(ioctl, sz);
559 559 return (err);
560 560 }
561 561
562 562 /*
563 563 * Turn orphaned queue off for testing purposes.
564 564 */
565 565
566 566 static intptr_t
567 567 notify_reap_off()
568 568 {
569 569 md_reap_off = 1;
570 570 return (0);
571 571 }
572 572
573 573 /*
574 574 * Turn reaping back on.
575 575 */
576 576
577 577 static intptr_t
578 578 notify_reap_on()
579 579 {
580 580 md_reap_off = 0;
581 581 return (0);
582 582 }
583 583
584 584 /*
585 585 * Return information that is used to test the notification feature.
586 586 */
587 587
588 588 static intptr_t
589 589 notify_test_stats(md_notify_stats_t *stats)
590 590 {
591 591 stats->mds_eventq_mx = &md_eventq_mx;
592 592 stats->mds_reap_count = md_reap_count;
593 593 stats->mds_reap = md_reap;
594 594 stats->mds_max_queue = md_max_notify_queue;
595 595 stats->mds_reap_off = md_reap_off;
596 596 return (0);
597 597 }
598 598
↓ open down ↓ |
598 lines elided |
↑ open up ↑ |
599 599 /*
600 600 * put this stuff at end so we don't have to create forward
601 601 * references for everything
602 602 */
603 603 static struct modlmisc modlmisc = {
604 604 &mod_miscops,
605 605 "Solaris Volume Manager notification module"
606 606 };
607 607
608 608 static struct modlinkage modlinkage = {
609 - MODREV_1, (void *)&modlmisc, NULL
609 + MODREV_1, { (void *)&modlmisc, NULL }
610 610 };
611 611
612 612 static md_named_services_t notify_services[] = {
613 613 {notify_interface, "notify interface"},
614 614 {notify_reap_off, MD_NOTIFY_REAP_OFF},
615 615 {notify_reap_on, MD_NOTIFY_REAP_ON},
616 616 {notify_test_stats, MD_NOTIFY_TEST_STATS},
617 617 {NULL, 0}
618 618 };
619 619
620 620 md_ops_t event_md_ops = {
621 621 NULL, /* open */
622 622 NULL, /* close */
623 623 NULL, /* strategy */
624 624 NULL, /* print */
625 625 NULL, /* dump */
626 626 NULL, /* read */
627 627 NULL, /* write */
628 628 notify_ioctl, /* event_ioctls, */
629 629 NULL, /* snarf */
630 630 notify_halt, /* halt */
631 631 NULL, /* aread */
632 632 NULL, /* awrite */
633 633 NULL, /* import set */
634 634 notify_services /* named_services */
635 635 };
636 636
637 637 int
638 638 _init()
639 639 {
640 640 md_event_queue = NULL;
641 641 mutex_init(&md_eventq_mx, NULL, MUTEX_DEFAULT, NULL);
642 642 return (mod_install(&modlinkage));
643 643 }
644 644
645 645 int
646 646 _fini()
647 647 {
648 648 int err = 0;
649 649
650 650 /*
651 651 * Don't allow the module to be unloaded while there is a thread
652 652 * of execution that is waiting for a global lock.
653 653 */
654 654 if (global_lock_wait_cnt > 0)
655 655 return (EBUSY);
656 656
657 657 if ((err = mod_remove(&modlinkage)) != 0)
658 658 return (err);
659 659
660 660 md_clear_named_service();
661 661 mutex_destroy(&md_eventq_mx);
662 662 return (err);
663 663 }
664 664
665 665 int
666 666 _info(struct modinfo *modinfop)
667 667 {
668 668 return (mod_info(&modlinkage, modinfop));
669 669 }
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX