1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 #include <fs/fs_subr.h>
30 #include <sys/atomic.h>
31 #include <sys/cmn_err.h>
32 #include <sys/dirent.h>
33 #include <sys/fs/fifonode.h>
34 #include <sys/modctl.h>
35 #include <sys/mount.h>
36 #include <sys/policy.h>
37 #include <sys/sunddi.h>
38
39 #include <sys/sysmacros.h>
40 #include <sys/vfs.h>
41 #include <sys/vfs_opreg.h>
42
43 #include <sys/lx_autofs_impl.h>
44
45 /*
46 * External functions
47 */
48 extern uintptr_t space_fetch(char *key);
49 extern int space_store(char *key, uintptr_t ptr);
50
51 /*
52 * Globals
53 */
54 static vfsops_t *lx_autofs_vfsops;
55 static vnodeops_t *lx_autofs_vn_ops = NULL;
56 static int lx_autofs_fstype;
57 static major_t lx_autofs_major;
58 static minor_t lx_autofs_minor = 0;
59
60 /*
61 * Support functions
62 */
63 static void
64 i_strfree(char *str)
65 {
66 kmem_free(str, strlen(str) + 1);
67 }
68
69 static char *
70 i_strdup(char *str)
71 {
72 int n = strlen(str);
73 char *ptr = kmem_alloc(n + 1, KM_SLEEP);
74 bcopy(str, ptr, n + 1);
75 return (ptr);
76 }
77
78 static int
79 i_str_to_int(char *str, int *val)
80 {
81 long res;
82
83 if (str == NULL)
84 return (-1);
85
86 if ((ddi_strtol(str, NULL, 10, &res) != 0) ||
87 (res < INT_MIN) || (res > INT_MAX))
88 return (-1);
89
90 *val = res;
91 return (0);
92 }
93
94 static void
95 i_stack_init(list_t *lp)
96 {
97 list_create(lp,
98 sizeof (stack_elem_t), offsetof(stack_elem_t, se_list));
99 }
100
101 static void
102 i_stack_fini(list_t *lp)
103 {
104 ASSERT(list_head(lp) == NULL);
105 list_destroy(lp);
106 }
107
108 static void
109 i_stack_push(list_t *lp, caddr_t ptr1, caddr_t ptr2, caddr_t ptr3)
110 {
111 stack_elem_t *se;
112
113 se = kmem_alloc(sizeof (*se), KM_SLEEP);
114 se->se_ptr1 = ptr1;
115 se->se_ptr2 = ptr2;
116 se->se_ptr3 = ptr3;
117 list_insert_head(lp, se);
118 }
119
120 static int
121 i_stack_pop(list_t *lp, caddr_t *ptr1, caddr_t *ptr2, caddr_t *ptr3)
122 {
123 stack_elem_t *se;
124
125 if ((se = list_head(lp)) == NULL)
126 return (-1);
127 list_remove(lp, se);
128 if (ptr1 != NULL)
129 *ptr1 = se->se_ptr1;
130 if (ptr2 != NULL)
131 *ptr2 = se->se_ptr2;
132 if (ptr3 != NULL)
133 *ptr3 = se->se_ptr3;
134 kmem_free(se, sizeof (*se));
135 return (0);
136 }
137
138 static vnode_t *
139 fifo_peer_vp(vnode_t *vp)
140 {
141 fifonode_t *fnp = VTOF(vp);
142 fifonode_t *fn_dest = fnp->fn_dest;
143 return (FTOV(fn_dest));
144 }
145
146 static vnode_t *
147 i_vn_alloc(vfs_t *vfsp, vnode_t *uvp)
148 {
149 lx_autofs_vfs_t *data = vfsp->vfs_data;
150 vnode_t *vp, *vp_old;
151
152 /* Allocate a new vnode structure in case we need it. */
153 vp = vn_alloc(KM_SLEEP);
154 vn_setops(vp, lx_autofs_vn_ops);
155 VN_SET_VFS_TYPE_DEV(vp, vfsp, uvp->v_type, uvp->v_rdev);
156 vp->v_data = uvp;
157 ASSERT(vp->v_count == 1);
158
159 /*
160 * Take a hold on the vfs structure. This is how unmount will
161 * determine if there are any active vnodes in the file system.
162 */
163 VFS_HOLD(vfsp);
164
165 /*
166 * Check if we already have a vnode allocated for this underlying
167 * vnode_t.
168 */
169 mutex_enter(&data->lav_lock);
170 if (mod_hash_find(data->lav_vn_hash,
171 (mod_hash_key_t)uvp, (mod_hash_val_t *)&vp_old) != 0) {
172
173 /*
174 * Didn't find an existing node.
175 * Add this node to the hash and return.
176 */
177 VERIFY(mod_hash_insert(data->lav_vn_hash,
178 (mod_hash_key_t)uvp,
179 (mod_hash_val_t)vp) == 0);
180 mutex_exit(&data->lav_lock);
181 return (vp);
182 }
183
184 /* Get a hold on the existing vnode and free up the one we allocated. */
185 VN_HOLD(vp_old);
186 mutex_exit(&data->lav_lock);
187
188 /* Free up the new vnode we allocated. */
189 VN_RELE(uvp);
190 VFS_RELE(vfsp);
191 vn_invalid(vp);
192 vn_free(vp);
193
194 return (vp_old);
195 }
196
197 static void
198 i_vn_free(vnode_t *vp)
199 {
200 vfs_t *vfsp = vp->v_vfsp;
201 lx_autofs_vfs_t *data = vfsp->vfs_data;
202 vnode_t *uvp = vp->v_data;
203 vnode_t *vp_tmp;
204
205 ASSERT(MUTEX_HELD((&data->lav_lock)));
206 ASSERT(MUTEX_HELD((&vp->v_lock)));
207
208 ASSERT(vp->v_count == 0);
209
210 /* We're about to free this vnode so take it out of the hash. */
211 (void) mod_hash_remove(data->lav_vn_hash,
212 (mod_hash_key_t)uvp, (mod_hash_val_t)&vp_tmp);
213
214 /*
215 * No one else can lookup this vnode any more so there's no need
216 * to hold locks.
217 */
218 mutex_exit(&data->lav_lock);
219 mutex_exit(&vp->v_lock);
220
221 /* Release the underlying vnode. */
222 VN_RELE(uvp);
223 VFS_RELE(vfsp);
224 vn_invalid(vp);
225 vn_free(vp);
226 }
227
228 static lx_autofs_lookup_req_t *
229 i_lalr_alloc(lx_autofs_vfs_t *data, int *dup_request, char *nm)
230 {
231 lx_autofs_lookup_req_t *lalr, *lalr_dup;
232
233 /* Pre-allocate a new automounter request before grabbing locks. */
234 lalr = kmem_zalloc(sizeof (*lalr), KM_SLEEP);
235 mutex_init(&lalr->lalr_lock, NULL, MUTEX_DEFAULT, NULL);
236 cv_init(&lalr->lalr_cv, NULL, CV_DEFAULT, NULL);
237 lalr->lalr_ref = 1;
238 lalr->lalr_pkt.lap_protover = LX_AUTOFS_PROTO_VERSION;
239
240 /* Assign a unique id for this request. */
241 lalr->lalr_pkt.lap_id = id_alloc(data->lav_ids);
242
243 /*
244 * The token expected by the linux automount is the name of
245 * the directory entry to look up. (And not the entire
246 * path that is being accessed.)
247 */
248 lalr->lalr_pkt.lap_name_len = strlen(nm);
249 if (lalr->lalr_pkt.lap_name_len >
250 (sizeof (lalr->lalr_pkt.lap_name) - 1)) {
251 zcmn_err(getzoneid(), CE_NOTE,
252 "invalid autofs lookup: \"%s\"", nm);
253 id_free(data->lav_ids, lalr->lalr_pkt.lap_id);
254 kmem_free(lalr, sizeof (*lalr));
255 return (NULL);
256 }
257 (void) strlcpy(lalr->lalr_pkt.lap_name, nm,
258 sizeof (lalr->lalr_pkt.lap_name));
259
260 /* Check for an outstanding request for this path. */
261 mutex_enter(&data->lav_lock);
262 if (mod_hash_find(data->lav_path_hash,
263 (mod_hash_key_t)nm, (mod_hash_val_t *)&lalr_dup) == 0) {
264 /*
265 * There's already an outstanding request for this
266 * path so we don't need a new one.
267 */
268 id_free(data->lav_ids, lalr->lalr_pkt.lap_id);
269 kmem_free(lalr, sizeof (*lalr));
270 lalr = lalr_dup;
271
272 /* Bump the ref count on the old request. */
273 atomic_add_int(&lalr->lalr_ref, 1);
274
275 *dup_request = 1;
276 } else {
277 /* Add it to the hashes. */
278 VERIFY(mod_hash_insert(data->lav_id_hash,
279 (mod_hash_key_t)(uintptr_t)lalr->lalr_pkt.lap_id,
280 (mod_hash_val_t)lalr) == 0);
281 VERIFY(mod_hash_insert(data->lav_path_hash,
282 (mod_hash_key_t)i_strdup(nm),
283 (mod_hash_val_t)lalr) == 0);
284
285 *dup_request = 0;
286 }
287 mutex_exit(&data->lav_lock);
288
289 return (lalr);
290 }
291
292 static lx_autofs_lookup_req_t *
293 i_lalr_find(lx_autofs_vfs_t *data, int id)
294 {
295 lx_autofs_lookup_req_t *lalr;
296
297 /* Check for an outstanding request for this id. */
298 mutex_enter(&data->lav_lock);
299 if (mod_hash_find(data->lav_id_hash, (mod_hash_key_t)(uintptr_t)id,
300 (mod_hash_val_t *)&lalr) != 0) {
301 mutex_exit(&data->lav_lock);
302 return (NULL);
303 }
304 atomic_add_int(&lalr->lalr_ref, 1);
305 mutex_exit(&data->lav_lock);
306 return (lalr);
307 }
308
309 static void
310 i_lalr_complete(lx_autofs_vfs_t *data, lx_autofs_lookup_req_t *lalr)
311 {
312 lx_autofs_lookup_req_t *lalr_tmp;
313
314 /* Remove this request from the hashes so no one can look it up. */
315 mutex_enter(&data->lav_lock);
316 (void) mod_hash_remove(data->lav_id_hash,
317 (mod_hash_key_t)(uintptr_t)lalr->lalr_pkt.lap_id,
318 (mod_hash_val_t)&lalr_tmp);
319 (void) mod_hash_remove(data->lav_path_hash,
320 (mod_hash_key_t)lalr->lalr_pkt.lap_name,
321 (mod_hash_val_t)&lalr_tmp);
322 mutex_exit(&data->lav_lock);
323
324 /* Mark this requst as complete and wakeup anyone waiting on it. */
325 mutex_enter(&lalr->lalr_lock);
326 lalr->lalr_complete = 1;
327 cv_broadcast(&lalr->lalr_cv);
328 mutex_exit(&lalr->lalr_lock);
329 }
330
331 static void
332 i_lalr_release(lx_autofs_vfs_t *data, lx_autofs_lookup_req_t *lalr)
333 {
334 ASSERT(!MUTEX_HELD(&lalr->lalr_lock));
335 if (atomic_add_int_nv(&lalr->lalr_ref, -1) > 0)
336 return;
337 ASSERT(lalr->lalr_ref == 0);
338 id_free(data->lav_ids, lalr->lalr_pkt.lap_id);
339 kmem_free(lalr, sizeof (*lalr));
340 }
341
342 static void
343 i_lalr_abort(lx_autofs_vfs_t *data, lx_autofs_lookup_req_t *lalr)
344 {
345 lx_autofs_lookup_req_t *lalr_tmp;
346
347 /*
348 * This is a little tricky. We're aborting the wait for this
349 * request. So if anyone else is waiting for this request we
350 * can't free it, but if no one else is waiting for the request
351 * we should free it.
352 */
353 mutex_enter(&data->lav_lock);
354 if (atomic_add_int_nv(&lalr->lalr_ref, -1) > 0) {
355 mutex_exit(&data->lav_lock);
356 return;
357 }
358 ASSERT(lalr->lalr_ref == 0);
359
360 /* Remove this request from the hashes so no one can look it up. */
361 (void) mod_hash_remove(data->lav_id_hash,
362 (mod_hash_key_t)(uintptr_t)lalr->lalr_pkt.lap_id,
363 (mod_hash_val_t)&lalr_tmp);
364 (void) mod_hash_remove(data->lav_path_hash,
365 (mod_hash_key_t)lalr->lalr_pkt.lap_name,
366 (mod_hash_val_t)&lalr_tmp);
367 mutex_exit(&data->lav_lock);
368
369 /* It's ok to free this now because the ref count was zero. */
370 id_free(data->lav_ids, lalr->lalr_pkt.lap_id);
371 kmem_free(lalr, sizeof (*lalr));
372 }
373
374 static int
375 i_fifo_lookup(pid_t pgrp, int fd, file_t **fpp_wr, file_t **fpp_rd)
376 {
377 proc_t *prp;
378 uf_info_t *fip;
379 uf_entry_t *ufp_wr, *ufp_rd = NULL;
380 file_t *fp_wr, *fp_rd = NULL;
381 vnode_t *vp_wr, *vp_rd;
382 int i;
383
384 /*
385 * sprlock() is zone aware, so assuming this mount call was
386 * initiated by a process in a zone, if it tries to specify
387 * a pgrp outside of it's zone this call will fail.
388 *
389 * Also, we want to grab hold of the main automounter process
390 * and its going to be the group leader for pgrp, so its
391 * pid will be equal to pgrp.
392 */
393 prp = sprlock(pgrp);
394 if (prp == NULL)
395 return (-1);
396 mutex_exit(&prp->p_lock);
397
398 /* Now we want to access the processes open file descriptors. */
399 fip = P_FINFO(prp);
400 mutex_enter(&fip->fi_lock);
401
402 /* Sanity check fifo write fd. */
403 if (fd >= fip->fi_nfiles) {
404 mutex_exit(&fip->fi_lock);
405 mutex_enter(&prp->p_lock);
406 sprunlock(prp);
407 return (-1);
408 }
409
410 /* Get a pointer to the write fifo. */
411 UF_ENTER(ufp_wr, fip, fd);
412 if (((fp_wr = ufp_wr->uf_file) == NULL) ||
413 ((vp_wr = fp_wr->f_vnode) == NULL) || (vp_wr->v_type != VFIFO)) {
414 /* Invalid fifo fd. */
415 UF_EXIT(ufp_wr);
416 mutex_exit(&fip->fi_lock);
417 mutex_enter(&prp->p_lock);
418 sprunlock(prp);
419 return (-1);
420 }
421
422 /*
423 * Now we need to find the read end of the fifo (for reasons
424 * explained below.) We assume that the read end of the fifo
425 * is in the same process as the write end.
426 */
427 vp_rd = fifo_peer_vp(fp_wr->f_vnode);
428 for (i = 0; i < fip->fi_nfiles; i++) {
429 UF_ENTER(ufp_rd, fip, i);
430 if (((fp_rd = ufp_rd->uf_file) != NULL) &&
431 (fp_rd->f_vnode == vp_rd))
432 break;
433 UF_EXIT(ufp_rd);
434 }
435 if (i == fip->fi_nfiles) {
436 /* Didn't find it. */
437 UF_EXIT(ufp_wr);
438 mutex_exit(&fip->fi_lock);
439 mutex_enter(&prp->p_lock);
440 sprunlock(prp);
441 return (-1);
442 }
443
444 /*
445 * We need to drop fi_lock before we can try to acquire f_tlock
446 * the good news is that the file pointers are protected because
447 * we're still holding uf_lock.
448 */
449 mutex_exit(&fip->fi_lock);
450
451 /*
452 * Here we bump the open counts on the fifos. The reason
453 * that we do this is because when we go to write to the
454 * fifo we want to ensure that they are actually open (and
455 * not in the process of being closed) without having to
456 * stop the automounter. (If the write end of the fifo
457 * were closed and we tried to write to it we would panic.
458 * If the read end of the fifo was closed and we tried to
459 * write to the other end, the process that invoked the
460 * lookup operation would get an unexpected SIGPIPE.)
461 */
462 mutex_enter(&fp_wr->f_tlock);
463 fp_wr->f_count++;
464 ASSERT(fp_wr->f_count >= 2);
465 mutex_exit(&fp_wr->f_tlock);
466
467 mutex_enter(&fp_rd->f_tlock);
468 fp_rd->f_count++;
469 ASSERT(fp_rd->f_count >= 2);
470 mutex_exit(&fp_rd->f_tlock);
471
472 /* Release all our locks. */
473 UF_EXIT(ufp_wr);
474 UF_EXIT(ufp_rd);
475 mutex_enter(&prp->p_lock);
476 sprunlock(prp);
477
478 /* Return the file pointers. */
479 *fpp_rd = fp_rd;
480 *fpp_wr = fp_wr;
481 return (0);
482 }
483
484 static uint_t
485 /*ARGSUSED*/
486 i_fifo_close_cb(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
487 {
488 int *id = (int *)arg;
489 /* Return the key and terminate the walk. */
490 *id = (uintptr_t)key;
491 return (MH_WALK_TERMINATE);
492 }
493
494 static void
495 i_fifo_close(lx_autofs_vfs_t *data)
496 {
497 /*
498 * Close the fifo to prevent any future requests from
499 * getting sent to the automounter.
500 */
501 mutex_enter(&data->lav_lock);
502 if (data->lav_fifo_wr != NULL) {
503 (void) closef(data->lav_fifo_wr);
504 data->lav_fifo_wr = NULL;
505 }
506 if (data->lav_fifo_rd != NULL) {
507 (void) closef(data->lav_fifo_rd);
508 data->lav_fifo_rd = NULL;
509 }
510 mutex_exit(&data->lav_lock);
511
512 /*
513 * Wakeup any threads currently waiting for the automounter
514 * note that it's possible for multiple threads to have entered
515 * this function and to be doing the work below simultaneously.
516 */
517 for (;;) {
518 lx_autofs_lookup_req_t *lalr;
519 int id;
520
521 /* Lookup the first entry in the hash. */
522 id = -1;
523 mod_hash_walk(data->lav_id_hash,
524 i_fifo_close_cb, &id);
525 if (id == -1) {
526 /* No more id's in the hash. */
527 break;
528 }
529 if ((lalr = i_lalr_find(data, id)) == NULL) {
530 /* Someone else beat us to it. */
531 continue;
532 }
533
534 /* Mark the request as compleate and release it. */
535 i_lalr_complete(data, lalr);
536 i_lalr_release(data, lalr);
537 }
538 }
539
540 static int
541 i_fifo_verify_rd(lx_autofs_vfs_t *data)
542 {
543 proc_t *prp;
544 uf_info_t *fip;
545 uf_entry_t *ufp_rd = NULL;
546 file_t *fp_rd = NULL;
547 vnode_t *vp_rd;
548 int i;
549
550 ASSERT(MUTEX_HELD((&data->lav_lock)));
551
552 /* Check if we've already been shut down. */
553 if (data->lav_fifo_wr == NULL) {
554 ASSERT(data->lav_fifo_rd == NULL);
555 return (-1);
556 }
557 vp_rd = fifo_peer_vp(data->lav_fifo_wr->f_vnode);
558
559 /*
560 * sprlock() is zone aware, so assuming this mount call was
561 * initiated by a process in a zone, if it tries to specify
562 * a pgrp outside of it's zone this call will fail.
563 *
564 * Also, we want to grab hold of the main automounter process
565 * and its going to be the group leader for pgrp, so its
566 * pid will be equal to pgrp.
567 */
568 prp = sprlock(data->lav_pgrp);
569 if (prp == NULL)
570 return (-1);
571 mutex_exit(&prp->p_lock);
572
573 /* Now we want to access the processes open file descriptors. */
574 fip = P_FINFO(prp);
575 mutex_enter(&fip->fi_lock);
576
577 /*
578 * Now we need to find the read end of the fifo (for reasons
579 * explained below.) We assume that the read end of the fifo
580 * is in the same process as the write end.
581 */
582 for (i = 0; i < fip->fi_nfiles; i++) {
583 UF_ENTER(ufp_rd, fip, i);
584 if (((fp_rd = ufp_rd->uf_file) != NULL) &&
585 (fp_rd->f_vnode == vp_rd))
586 break;
587 UF_EXIT(ufp_rd);
588 }
589 if (i == fip->fi_nfiles) {
590 /* Didn't find it. */
591 mutex_exit(&fip->fi_lock);
592 mutex_enter(&prp->p_lock);
593 sprunlock(prp);
594 return (-1);
595 }
596
597 /*
598 * Seems the automounter still has the read end of the fifo
599 * open, we're done here. Release all our locks and exit.
600 */
601 mutex_exit(&fip->fi_lock);
602 UF_EXIT(ufp_rd);
603 mutex_enter(&prp->p_lock);
604 sprunlock(prp);
605
606 return (0);
607 }
608
609 static int
610 i_fifo_write(lx_autofs_vfs_t *data, lx_autofs_pkt_t *lap)
611 {
612 struct uio uio;
613 struct iovec iov;
614 file_t *fp_wr, *fp_rd;
615 int error;
616
617 /*
618 * The catch here is we need to make sure _we_ don't close
619 * the the fifo while writing to it. (Another thread could come
620 * along and realize the automounter process is gone and close
621 * the fifo. To do this we bump the open count before we
622 * write to the fifo.
623 */
624 mutex_enter(&data->lav_lock);
625 if (data->lav_fifo_wr == NULL) {
626 ASSERT(data->lav_fifo_rd == NULL);
627 mutex_exit(&data->lav_lock);
628 return (ENOENT);
629 }
630 fp_wr = data->lav_fifo_wr;
631 fp_rd = data->lav_fifo_rd;
632
633 /* Bump the open count on the write fifo. */
634 mutex_enter(&fp_wr->f_tlock);
635 fp_wr->f_count++;
636 mutex_exit(&fp_wr->f_tlock);
637
638 /* Bump the open count on the read fifo. */
639 mutex_enter(&fp_rd->f_tlock);
640 fp_rd->f_count++;
641 mutex_exit(&fp_rd->f_tlock);
642
643 mutex_exit(&data->lav_lock);
644
645 iov.iov_base = (caddr_t)lap;
646 iov.iov_len = sizeof (*lap);
647 uio.uio_iov = &iov;
648 uio.uio_iovcnt = 1;
649 uio.uio_loffset = 0;
650 uio.uio_segflg = (short)UIO_SYSSPACE;
651 uio.uio_resid = sizeof (*lap);
652 uio.uio_llimit = 0;
653 uio.uio_fmode = FWRITE | FNDELAY | FNONBLOCK;
654
655 error = VOP_WRITE(fp_wr->f_vnode, &uio, 0, kcred, NULL);
656 (void) closef(fp_wr);
657 (void) closef(fp_rd);
658
659 /*
660 * After every write we verify that the automounter still has
661 * these files open.
662 */
663 mutex_enter(&data->lav_lock);
664 if (i_fifo_verify_rd(data) != 0) {
665 /*
666 * Something happened to the automounter.
667 * Close down the communication pipe we setup.
668 */
669 mutex_exit(&data->lav_lock);
670 i_fifo_close(data);
671 if (error != 0)
672 return (error);
673 return (ENOENT);
674 }
675 mutex_exit(&data->lav_lock);
676
677 return (error);
678 }
679
680 static int
681 i_bs_readdir(vnode_t *dvp, list_t *dir_stack, list_t *file_stack)
682 {
683 struct iovec iov;
684 struct uio uio;
685 dirent64_t *dp, *dbuf;
686 vnode_t *vp;
687 size_t dlen, dbuflen;
688 int eof, error, ndirents = 64;
689 char *nm;
690
691 dlen = ndirents * (sizeof (*dbuf));
692 dbuf = kmem_alloc(dlen, KM_SLEEP);
693
694 uio.uio_iov = &iov;
695 uio.uio_iovcnt = 1;
696 uio.uio_segflg = UIO_SYSSPACE;
697 uio.uio_fmode = 0;
698 uio.uio_extflg = UIO_COPY_CACHED;
699 uio.uio_loffset = 0;
700 uio.uio_llimit = MAXOFFSET_T;
701
702 eof = 0;
703 error = 0;
704 while (!error && !eof) {
705 uio.uio_resid = dlen;
706 iov.iov_base = (char *)dbuf;
707 iov.iov_len = dlen;
708
709 (void) VOP_RWLOCK(dvp, V_WRITELOCK_FALSE, NULL);
710 if (VOP_READDIR(dvp, &uio, kcred, &eof, NULL, 0) != 0) {
711 VOP_RWUNLOCK(dvp, V_WRITELOCK_FALSE, NULL);
712 kmem_free(dbuf, dlen);
713 return (-1);
714 }
715 VOP_RWUNLOCK(dvp, V_WRITELOCK_FALSE, NULL);
716
717 if ((dbuflen = dlen - uio.uio_resid) == 0) {
718 /* We're done. */
719 break;
720 }
721
722 for (dp = dbuf; ((intptr_t)dp < (intptr_t)dbuf + dbuflen);
723 dp = (dirent64_t *)((intptr_t)dp + dp->d_reclen)) {
724
725 nm = dp->d_name;
726
727 if (strcmp(nm, ".") == 0 || strcmp(nm, "..") == 0)
728 continue;
729
730 if (VOP_LOOKUP(dvp, nm, &vp, NULL, 0, NULL, kcred,
731 NULL, NULL, NULL) != 0) {
732 kmem_free(dbuf, dlen);
733 return (-1);
734 }
735 if (vp->v_type == VDIR) {
736 if (dir_stack != NULL) {
737 i_stack_push(dir_stack, (caddr_t)dvp,
738 (caddr_t)vp, i_strdup(nm));
739 } else {
740 VN_RELE(vp);
741 }
742 } else {
743 if (file_stack != NULL) {
744 i_stack_push(file_stack, (caddr_t)dvp,
745 (caddr_t)vp, i_strdup(nm));
746 } else {
747 VN_RELE(vp);
748 }
749 }
750 }
751 }
752 kmem_free(dbuf, dlen);
753 return (0);
754 }
755
756 static void
757 i_bs_destroy(vnode_t *dvp, char *path)
758 {
759 list_t search_stack;
760 list_t dir_stack;
761 list_t file_stack;
762 vnode_t *pdvp, *vp;
763 char *dpath, *fpath;
764 int ret;
765
766 if (VOP_LOOKUP(dvp, path, &vp, NULL, 0, NULL, kcred,
767 NULL, NULL, NULL) != 0) {
768 /* A directory entry with this name doesn't actually exist. */
769 return;
770 }
771
772 if ((vp->v_type & VDIR) == 0) {
773 /* Easy, the directory entry is a file so delete it. */
774 VN_RELE(vp);
775 (void) VOP_REMOVE(dvp, path, kcred, NULL, 0);
776 return;
777 }
778
779 /*
780 * The directory entry is a subdirectory, now we have a bit more
781 * work to do. (We'll have to recurse into the sub directory.)
782 * It would have been much easier to do this recursively but kernel
783 * stacks are notoriously small.
784 */
785 i_stack_init(&search_stack);
786 i_stack_init(&dir_stack);
787 i_stack_init(&file_stack);
788
789 /* Save our newfound subdirectory into a list. */
790 i_stack_push(&search_stack, (caddr_t)dvp, (caddr_t)vp, i_strdup(path));
791
792 /* Do a recursive depth first search into the subdirectories. */
793 while (i_stack_pop(&search_stack,
794 (caddr_t *)&pdvp, (caddr_t *)&dvp, &dpath) == 0) {
795
796 /* Get a list of the subdirectories in this directory. */
797 if (i_bs_readdir(dvp, &search_stack, NULL) != 0)
798 goto exit;
799
800 /* Save the current directory a separate stack. */
801 i_stack_push(&dir_stack, (caddr_t)pdvp, (caddr_t)dvp, dpath);
802 }
803
804 /*
805 * Now dir_stack contains a list of directories, the deepest paths
806 * are at the top of the list. So let's go through and process them.
807 */
808 while (i_stack_pop(&dir_stack,
809 (caddr_t *)&pdvp, (caddr_t *)&dvp, &dpath) == 0) {
810
811 /* Get a list of the files in this directory. */
812 if (i_bs_readdir(dvp, NULL, &file_stack) != 0) {
813 VN_RELE(dvp);
814 i_strfree(dpath);
815 goto exit;
816 }
817
818 /* Delete all the files in this directory. */
819 while (i_stack_pop(&file_stack,
820 NULL, (caddr_t *)&vp, &fpath) == 0) {
821 VN_RELE(vp)
822 ret = VOP_REMOVE(dvp, fpath, kcred, NULL, 0);
823 i_strfree(fpath);
824 if (ret != 0) {
825 i_strfree(dpath);
826 goto exit;
827 }
828 }
829
830 /* Delete this directory. */
831 VN_RELE(dvp);
832 ret = VOP_RMDIR(pdvp, dpath, pdvp, kcred, NULL, 0);
833 i_strfree(dpath);
834 if (ret != 0)
835 goto exit;
836 }
837
838 exit:
839 while (
840 (i_stack_pop(&search_stack, NULL, (caddr_t *)&vp, &path) == 0) ||
841 (i_stack_pop(&dir_stack, NULL, (caddr_t *)&vp, &path) == 0) ||
842 (i_stack_pop(&file_stack, NULL, (caddr_t *)&vp, &path) == 0)) {
843 VN_RELE(vp);
844 i_strfree(path);
845 }
846 i_stack_fini(&search_stack);
847 i_stack_fini(&dir_stack);
848 i_stack_fini(&file_stack);
849 }
850
851 static vnode_t *
852 i_bs_create(vnode_t *dvp, char *bs_name)
853 {
854 vnode_t *vp;
855 vattr_t vattr;
856
857 /*
858 * After looking at the mkdir syscall path it seems we don't need
859 * to initialize all of the vattr_t structure.
860 */
861 bzero(&vattr, sizeof (vattr));
862 vattr.va_type = VDIR;
863 vattr.va_mode = 0755; /* u+rwx,og=rx */
864 vattr.va_mask = AT_TYPE|AT_MODE;
865
866 if (VOP_MKDIR(dvp, bs_name, &vattr, &vp, kcred, NULL, 0, NULL) != 0)
867 return (NULL);
868 return (vp);
869 }
870
871 static int
872 i_automounter_call(vnode_t *dvp, char *nm)
873 {
874 lx_autofs_lookup_req_t *lalr;
875 lx_autofs_vfs_t *data;
876 int error, dup_request;
877
878 /* Get a pointer to the vfs mount data. */
879 data = dvp->v_vfsp->vfs_data;
880
881 /* The automounter only support queries in the root directory. */
882 if (dvp != data->lav_root)
883 return (ENOENT);
884
885 /*
886 * Check if the current process is in the automounters process
887 * group. (If it is, the current process is either the autmounter
888 * itself or one of it's forked child processes.) If so, don't
889 * redirect this lookup back into the automounter because we'll
890 * hang.
891 */
892 mutex_enter(&pidlock);
893 if (data->lav_pgrp == curproc->p_pgrp) {
894 mutex_exit(&pidlock);
895 return (ENOENT);
896 }
897 mutex_exit(&pidlock);
898
899 /* Verify that the automount process pipe still exists. */
900 mutex_enter(&data->lav_lock);
901 if (data->lav_fifo_wr == NULL) {
902 ASSERT(data->lav_fifo_rd == NULL);
903 mutex_exit(&data->lav_lock);
904 return (ENOENT);
905 }
906 mutex_exit(&data->lav_lock);
907
908 /* Allocate an automounter request structure. */
909 if ((lalr = i_lalr_alloc(data, &dup_request, nm)) == NULL)
910 return (ENOENT);
911
912 /*
913 * If we were the first one to allocate this request then we
914 * need to send it to the automounter.
915 */
916 if ((!dup_request) &&
917 ((error = i_fifo_write(data, &lalr->lalr_pkt)) != 0)) {
918 /*
919 * Unable to send the request to the automounter.
920 * Unblock any other threads waiting on the request
921 * and release the request.
922 */
923 i_lalr_complete(data, lalr);
924 i_lalr_release(data, lalr);
925 return (error);
926 }
927
928 /* Wait for someone to signal us that this request has compleated. */
929 mutex_enter(&lalr->lalr_lock);
930 while (!lalr->lalr_complete) {
931 if (cv_wait_sig(&lalr->lalr_cv, &lalr->lalr_lock) == 0) {
932 /* We got a signal, abort this lookup. */
933 mutex_exit(&lalr->lalr_lock);
934 i_lalr_abort(data, lalr);
935 return (EINTR);
936 }
937 }
938 mutex_exit(&lalr->lalr_lock);
939 i_lalr_release(data, lalr);
940
941 return (0);
942 }
943
944 static int
945 i_automounter_ioctl(vnode_t *vp, int cmd, intptr_t arg)
946 {
947 lx_autofs_vfs_t *data = (lx_autofs_vfs_t *)vp->v_vfsp->vfs_data;
948
949 /*
950 * Be strict.
951 * We only accept ioctls from the automounter process group.
952 */
953 mutex_enter(&pidlock);
954 if (data->lav_pgrp != curproc->p_pgrp) {
955 mutex_exit(&pidlock);
956 return (ENOENT);
957 }
958 mutex_exit(&pidlock);
959
960 if ((cmd == LX_AUTOFS_IOC_READY) || (cmd == LX_AUTOFS_IOC_FAIL)) {
961 lx_autofs_lookup_req_t *lalr;
962 int id = arg;
963
964 /*
965 * We don't actually care if the request failed or succeeded.
966 * We do the same thing either way.
967 */
968 if ((lalr = i_lalr_find(data, id)) == NULL)
969 return (ENXIO);
970
971 /* Mark the request as compleate and release it. */
972 i_lalr_complete(data, lalr);
973 i_lalr_release(data, lalr);
974 return (0);
975 }
976 if (cmd == LX_AUTOFS_IOC_CATATONIC) {
977 /* The automounter is shutting down. */
978 i_fifo_close(data);
979 return (0);
980 }
981 return (ENOTSUP);
982 }
983
984 static int
985 i_parse_mntopt(vfs_t *vfsp, lx_autofs_vfs_t *data)
986 {
987 char *fd_str, *pgrp_str, *minproto_str, *maxproto_str;
988 int fd, pgrp, minproto, maxproto;
989 file_t *fp_wr, *fp_rd;
990
991 /* Require all options to be present. */
992 if ((vfs_optionisset(vfsp, LX_MNTOPT_FD, &fd_str) != 1) ||
993 (vfs_optionisset(vfsp, LX_MNTOPT_PGRP, &pgrp_str) != 1) ||
994 (vfs_optionisset(vfsp, LX_MNTOPT_MINPROTO, &minproto_str) != 1) ||
995 (vfs_optionisset(vfsp, LX_MNTOPT_MAXPROTO, &maxproto_str) != 1))
996 return (EINVAL);
997
998 /* Get the values for each parameter. */
999 if ((i_str_to_int(fd_str, &fd) != 0) ||
1000 (i_str_to_int(pgrp_str, &pgrp) != 0) ||
1001 (i_str_to_int(minproto_str, &minproto) != 0) ||
1002 (i_str_to_int(maxproto_str, &maxproto) != 0))
1003 return (EINVAL);
1004
1005 /*
1006 * We support v2 of the linux kernel automounter protocol.
1007 * Make sure the mount request we got indicates support
1008 * for this version of the protocol.
1009 */
1010 if ((minproto > 2) || (maxproto < 2))
1011 return (EINVAL);
1012
1013 /*
1014 * Now we need to lookup the fifos we'll be using
1015 * to talk to the userland automounter process.
1016 */
1017 if (i_fifo_lookup(pgrp, fd, &fp_wr, &fp_rd) != 0)
1018 return (EINVAL);
1019
1020 /* Save the mount options and fifo pointers. */
1021 data->lav_fd = fd;
1022 data->lav_pgrp = pgrp;
1023 data->lav_fifo_rd = fp_rd;
1024 data->lav_fifo_wr = fp_wr;
1025 return (0);
1026 }
1027
1028 /*
1029 * VFS entry points
1030 */
1031 static int
1032 lx_autofs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
1033 {
1034 lx_autofs_vfs_t *data;
1035 dev_t dev;
1036 char name[40];
1037 int error;
1038
1039 if (secpolicy_fs_mount(cr, mvp, vfsp) != 0)
1040 return (EPERM);
1041
1042 if (mvp->v_type != VDIR)
1043 return (ENOTDIR);
1044
1045 if ((uap->flags & MS_OVERLAY) == 0 &&
1046 (mvp->v_count > 1 || (mvp->v_flag & VROOT)))
1047 return (EBUSY);
1048
1049 /* We don't support mountes in the global zone. */
1050 if (getzoneid() == GLOBAL_ZONEID)
1051 return (EPERM);
1052
1053 /* We don't support mounting on top of ourselves. */
1054 if (vn_matchops(mvp, lx_autofs_vn_ops))
1055 return (EPERM);
1056
1057 /* Allocate a vfs struct. */
1058 data = kmem_zalloc(sizeof (lx_autofs_vfs_t), KM_SLEEP);
1059
1060 /* Parse mount options. */
1061 if ((error = i_parse_mntopt(vfsp, data)) != 0) {
1062 kmem_free(data, sizeof (lx_autofs_vfs_t));
1063 return (error);
1064 }
1065
1066 /* Initialize the backing store. */
1067 i_bs_destroy(mvp, LX_AUTOFS_BS_DIR);
1068 if ((data->lav_bs_vp = i_bs_create(mvp, LX_AUTOFS_BS_DIR)) == NULL) {
1069 kmem_free(data, sizeof (lx_autofs_vfs_t));
1070 return (EBUSY);
1071 }
1072 data->lav_bs_name = LX_AUTOFS_BS_DIR;
1073
1074 /* We have to hold the underlying vnode we're mounted on. */
1075 data->lav_mvp = mvp;
1076 VN_HOLD(mvp);
1077
1078 /* Initialize vfs fields */
1079 vfsp->vfs_bsize = DEV_BSIZE;
1080 vfsp->vfs_fstype = lx_autofs_fstype;
1081 vfsp->vfs_data = data;
1082
1083 /* Invent a dev_t (sigh) */
1084 do {
1085 dev = makedevice(lx_autofs_major,
1086 atomic_add_32_nv(&lx_autofs_minor, 1) & L_MAXMIN32);
1087 } while (vfs_devismounted(dev));
1088 vfsp->vfs_dev = dev;
1089 vfs_make_fsid(&vfsp->vfs_fsid, dev, lx_autofs_fstype);
1090
1091 /* Create an id space arena for automounter requests. */
1092 (void) snprintf(name, sizeof (name), "lx_autofs_id_%d",
1093 getminor(vfsp->vfs_dev));
1094 data->lav_ids = id_space_create(name, 1, INT_MAX);
1095
1096 /* Create hashes to keep track of automounter requests. */
1097 mutex_init(&data->lav_lock, NULL, MUTEX_DEFAULT, NULL);
1098 (void) snprintf(name, sizeof (name), "lx_autofs_path_hash_%d",
1099 getminor(vfsp->vfs_dev));
1100 data->lav_path_hash = mod_hash_create_strhash(name,
1101 LX_AUTOFS_VFS_PATH_HASH_SIZE, mod_hash_null_valdtor);
1102 (void) snprintf(name, sizeof (name), "lx_autofs_id_hash_%d",
1103 getminor(vfsp->vfs_dev));
1104 data->lav_id_hash = mod_hash_create_idhash(name,
1105 LX_AUTOFS_VFS_ID_HASH_SIZE, mod_hash_null_valdtor);
1106
1107 /* Create a hash to keep track of vnodes. */
1108 (void) snprintf(name, sizeof (name), "lx_autofs_vn_hash_%d",
1109 getminor(vfsp->vfs_dev));
1110 data->lav_vn_hash = mod_hash_create_ptrhash(name,
1111 LX_AUTOFS_VFS_VN_HASH_SIZE, mod_hash_null_valdtor,
1112 sizeof (vnode_t));
1113
1114 /* Create root vnode */
1115 data->lav_root = i_vn_alloc(vfsp, data->lav_bs_vp);
1116 data->lav_root->v_flag |=
1117 VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT;
1118
1119 return (0);
1120 }
1121
1122 static int
1123 lx_autofs_unmount(vfs_t *vfsp, int flag, struct cred *cr)
1124 {
1125 lx_autofs_vfs_t *data;
1126
1127 if (secpolicy_fs_unmount(cr, vfsp) != 0)
1128 return (EPERM);
1129
1130 /* We do not currently support forced unmounts. */
1131 if (flag & MS_FORCE)
1132 return (ENOTSUP);
1133
1134 /*
1135 * We should never have a reference count of less than 2: one for the
1136 * caller, one for the root vnode.
1137 */
1138 ASSERT(vfsp->vfs_count >= 2);
1139
1140 /* If there are any outstanding vnodes, we can't unmount. */
1141 if (vfsp->vfs_count > 2)
1142 return (EBUSY);
1143
1144 /* Check for any remaining holds on the root vnode. */
1145 data = vfsp->vfs_data;
1146 ASSERT(data->lav_root->v_vfsp == vfsp);
1147 if (data->lav_root->v_count > 1)
1148 return (EBUSY);
1149
1150 /* Close the fifo to the automount process. */
1151 if (data->lav_fifo_wr != NULL)
1152 (void) closef(data->lav_fifo_wr);
1153 if (data->lav_fifo_rd != NULL)
1154 (void) closef(data->lav_fifo_rd);
1155
1156 /*
1157 * We have to release our hold on our root vnode before we can
1158 * delete the backing store. (Since the root vnode is linked
1159 * to the backing store.)
1160 */
1161 VN_RELE(data->lav_root);
1162
1163 /* Cleanup the backing store. */
1164 i_bs_destroy(data->lav_mvp, data->lav_bs_name);
1165 VN_RELE(data->lav_mvp);
1166
1167 /* Cleanup out remaining data structures. */
1168 mod_hash_destroy_strhash(data->lav_path_hash);
1169 mod_hash_destroy_idhash(data->lav_id_hash);
1170 mod_hash_destroy_ptrhash(data->lav_vn_hash);
1171 id_space_destroy(data->lav_ids);
1172 kmem_free(data, sizeof (lx_autofs_vfs_t));
1173
1174 return (0);
1175 }
1176
1177 static int
1178 lx_autofs_root(vfs_t *vfsp, vnode_t **vpp)
1179 {
1180 lx_autofs_vfs_t *data = vfsp->vfs_data;
1181
1182 *vpp = data->lav_root;
1183 VN_HOLD(*vpp);
1184
1185 return (0);
1186 }
1187
1188 static int
1189 lx_autofs_statvfs(vfs_t *vfsp, statvfs64_t *sp)
1190 {
1191 lx_autofs_vfs_t *data = vfsp->vfs_data;
1192 vnode_t *urvp = data->lav_root->v_data;
1193 dev32_t d32;
1194 int error;
1195
1196 if ((error = VFS_STATVFS(urvp->v_vfsp, sp)) != 0)
1197 return (error);
1198
1199 /* Update some of values before returning. */
1200 (void) cmpldev(&d32, vfsp->vfs_dev);
1201 sp->f_fsid = d32;
1202 (void) strlcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name,
1203 sizeof (sp->f_basetype));
1204 sp->f_flag = vf_to_stf(vfsp->vfs_flag);
1205 bzero(sp->f_fstr, sizeof (sp->f_fstr));
1206 return (0);
1207 }
1208
1209 static const fs_operation_def_t lx_autofs_vfstops[] = {
1210 { VFSNAME_MOUNT, { .vfs_mount = lx_autofs_mount } },
1211 { VFSNAME_UNMOUNT, { .vfs_unmount = lx_autofs_unmount } },
1212 { VFSNAME_ROOT, { .vfs_root = lx_autofs_root } },
1213 { VFSNAME_STATVFS, { .vfs_statvfs = lx_autofs_statvfs } },
1214 { NULL, NULL }
1215 };
1216
1217 /*
1218 * VOP entry points - simple passthrough
1219 *
1220 * For most VOP entry points we can simply pass the request on to
1221 * the underlying filesystem we're mounted on.
1222 */
1223 static int
1224 lx_autofs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
1225 caller_context_t *ctp)
1226 {
1227 vnode_t *uvp = vp->v_data;
1228 return (VOP_CLOSE(uvp, flag, count, offset, cr, ctp));
1229 }
1230
1231 static int
1232 lx_autofs_readdir(vnode_t *vp, uio_t *uiop, cred_t *cr, int *eofp,
1233 caller_context_t *ctp, int flags)
1234 {
1235 vnode_t *uvp = vp->v_data;
1236 return (VOP_READDIR(uvp, uiop, cr, eofp, ctp, flags));
1237 }
1238
1239 static int
1240 lx_autofs_access(vnode_t *vp, int mode, int flags, cred_t *cr,
1241 caller_context_t *ctp)
1242 {
1243 vnode_t *uvp = vp->v_data;
1244 return (VOP_ACCESS(uvp, mode, flags, cr, ctp));
1245 }
1246
1247 static int
1248 lx_autofs_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
1249 {
1250 vnode_t *uvp = vp->v_data;
1251 return (VOP_RWLOCK(uvp, write_lock, ctp));
1252 }
1253
1254 static void
1255 lx_autofs_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
1256 {
1257 vnode_t *uvp = vp->v_data;
1258 VOP_RWUNLOCK(uvp, write_lock, ctp);
1259 }
1260
1261 /*ARGSUSED*/
1262 static int
1263 lx_autofs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
1264 caller_context_t *ctp, int flags)
1265 {
1266 vnode_t *udvp = dvp->v_data;
1267
1268 /*
1269 * cdir is the calling processes current directory.
1270 * If cdir is lx_autofs vnode then get its real underlying
1271 * vnode ptr. (It seems like the only thing cdir is
1272 * ever used for is to make sure the user doesn't delete
1273 * their current directory.)
1274 */
1275 if (vn_matchops(cdir, lx_autofs_vn_ops)) {
1276 vnode_t *ucdir = cdir->v_data;
1277 return (VOP_RMDIR(udvp, nm, ucdir, cr, ctp, flags));
1278 }
1279
1280 return (VOP_RMDIR(udvp, nm, cdir, cr, ctp, flags));
1281 }
1282
1283 /*
1284 * VOP entry points - special passthrough
1285 *
1286 * For some VOP entry points we will first pass the request on to
1287 * the underlying filesystem we're mounted on. If there's an error
1288 * then we immediately return the error, but if the request succeeds
1289 * we have to do some extra work before returning.
1290 */
1291 static int
1292 lx_autofs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ctp)
1293 {
1294 vnode_t *ovp = *vpp;
1295 vnode_t *uvp = ovp->v_data;
1296 int error;
1297
1298 if ((error = VOP_OPEN(&uvp, flag, cr, ctp)) != 0)
1299 return (error);
1300
1301 /* Check for clone opens. */
1302 if (uvp == ovp->v_data)
1303 return (0);
1304
1305 /* Deal with clone opens by returning a new vnode. */
1306 *vpp = i_vn_alloc(ovp->v_vfsp, uvp);
1307 VN_RELE(ovp);
1308 return (0);
1309 }
1310
1311 static int
1312 lx_autofs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
1313 caller_context_t *ctp)
1314 {
1315 vnode_t *uvp = vp->v_data;
1316 int error;
1317
1318 if ((error = VOP_GETATTR(uvp, vap, flags, cr, ctp)) != 0)
1319 return (error);
1320
1321 /* Update the attributes with our filesystem id. */
1322 vap->va_fsid = vp->v_vfsp->vfs_dev;
1323 return (0);
1324 }
1325
1326 static int
1327 lx_autofs_mkdir(vnode_t *dvp, char *nm, struct vattr *vap, vnode_t **vpp,
1328 cred_t *cr, caller_context_t *ctp, int flags, vsecattr_t *vsecp)
1329 {
1330 vnode_t *udvp = dvp->v_data;
1331 vnode_t *uvp = NULL;
1332 int error;
1333
1334 if ((error = VOP_MKDIR(udvp, nm, vap, &uvp, cr,
1335 ctp, flags, vsecp)) != 0)
1336 return (error);
1337
1338 /* Update the attributes with our filesystem id. */
1339 vap->va_fsid = dvp->v_vfsp->vfs_dev;
1340
1341 /* Allocate a new vnode. */
1342 *vpp = i_vn_alloc(dvp->v_vfsp, uvp);
1343 return (0);
1344 }
1345
1346 /*
1347 * VOP entry points - custom
1348 */
1349 /*ARGSUSED*/
1350 static void
1351 lx_autofs_inactive(struct vnode *vp, struct cred *cr, caller_context_t *ctp)
1352 {
1353 lx_autofs_vfs_t *data = vp->v_vfsp->vfs_data;
1354
1355 /*
1356 * We need to hold the vfs lock because if we're going to free
1357 * this vnode we have to prevent anyone from looking it up
1358 * in the vnode hash.
1359 */
1360 mutex_enter(&data->lav_lock);
1361 mutex_enter(&vp->v_lock);
1362
1363 if (vp->v_count < 1) {
1364 panic("lx_autofs_inactive: bad v_count");
1365 /*NOTREACHED*/
1366 }
1367
1368 /* Drop the temporary hold by vn_rele now. */
1369 if (--vp->v_count > 0) {
1370 mutex_exit(&vp->v_lock);
1371 mutex_exit(&data->lav_lock);
1372 return;
1373 }
1374
1375 /*
1376 * No one should have been blocked on this lock because we're
1377 * about to free this vnode.
1378 */
1379 i_vn_free(vp);
1380 }
1381
1382 static int
1383 lx_autofs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
1384 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ctp,
1385 int *direntflags, pathname_t *realpnp)
1386 {
1387 vnode_t *udvp = dvp->v_data;
1388 vnode_t *uvp = NULL;
1389 int error;
1390
1391 /* First try to lookup if this path component already exitst. */
1392 if ((error = VOP_LOOKUP(udvp, nm, &uvp, pnp, flags, rdir, cr, ctp,
1393 direntflags, realpnp)) == 0) {
1394 *vpp = i_vn_alloc(dvp->v_vfsp, uvp);
1395 return (0);
1396 }
1397
1398 /* Only query the automounter if the path does not exist. */
1399 if (error != ENOENT)
1400 return (error);
1401
1402 /* Refer the lookup to the automounter. */
1403 if ((error = i_automounter_call(dvp, nm)) != 0)
1404 return (error);
1405
1406 /* Retry the lookup operation. */
1407 if ((error = VOP_LOOKUP(udvp, nm, &uvp, pnp, flags, rdir, cr, ctp,
1408 direntflags, realpnp)) == 0) {
1409 *vpp = i_vn_alloc(dvp->v_vfsp, uvp);
1410 return (0);
1411 }
1412 return (error);
1413 }
1414
1415 /*ARGSUSED*/
1416 static int
1417 lx_autofs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int mode, cred_t *cr,
1418 int *rvalp, caller_context_t *ctp)
1419 {
1420 vnode_t *uvp = vp->v_data;
1421
1422 /* Intercept certain ioctls. */
1423 switch ((uint_t)cmd) {
1424 case LX_AUTOFS_IOC_READY:
1425 case LX_AUTOFS_IOC_FAIL:
1426 case LX_AUTOFS_IOC_CATATONIC:
1427 case LX_AUTOFS_IOC_EXPIRE:
1428 case LX_AUTOFS_IOC_PROTOVER:
1429 case LX_AUTOFS_IOC_SETTIMEOUT:
1430 return (i_automounter_ioctl(vp, cmd, arg));
1431 }
1432
1433 /* Pass any remaining ioctl on. */
1434 return (VOP_IOCTL(uvp, cmd, arg, mode, cr, rvalp, ctp));
1435 }
1436
1437 /*
1438 * VOP entry points definitions
1439 */
1440 static const fs_operation_def_t lx_autofs_tops_root[] = {
1441 { VOPNAME_OPEN, { .vop_open = lx_autofs_open } },
1442 { VOPNAME_CLOSE, { .vop_close = lx_autofs_close } },
1443 { VOPNAME_IOCTL, { .vop_ioctl = lx_autofs_ioctl } },
1444 { VOPNAME_RWLOCK, { .vop_rwlock = lx_autofs_rwlock } },
1445 { VOPNAME_RWUNLOCK, { .vop_rwunlock = lx_autofs_rwunlock } },
1446 { VOPNAME_GETATTR, { .vop_getattr = lx_autofs_getattr } },
1447 { VOPNAME_ACCESS, { .vop_access = lx_autofs_access } },
1448 { VOPNAME_READDIR, { .vop_readdir = lx_autofs_readdir } },
1449 { VOPNAME_LOOKUP, { .vop_lookup = lx_autofs_lookup } },
1450 { VOPNAME_INACTIVE, { .vop_inactive = lx_autofs_inactive } },
1451 { VOPNAME_MKDIR, { .vop_mkdir = lx_autofs_mkdir } },
1452 { VOPNAME_RMDIR, { .vop_rmdir = lx_autofs_rmdir } },
1453 { NULL }
1454 };
1455
1456 /*
1457 * lx_autofs_init() gets invoked via the mod_install() call in
1458 * this modules _init() routine. Therefor, the code that cleans
1459 * up the structures we allocate below is actually found in
1460 * our _fini() routine.
1461 */
1462 /* ARGSUSED */
1463 static int
1464 lx_autofs_init(int fstype, char *name)
1465 {
1466 int error;
1467
1468 if ((lx_autofs_major =
1469 (major_t)space_fetch(LX_AUTOFS_SPACE_KEY_UDEV)) == 0) {
1470
1471 if ((lx_autofs_major = getudev()) == (major_t)-1) {
1472 cmn_err(CE_WARN, "lx_autofs_init: "
1473 "can't get unique device number");
1474 return (EAGAIN);
1475 }
1476
1477 if (space_store(LX_AUTOFS_SPACE_KEY_UDEV,
1478 (uintptr_t)lx_autofs_major) != 0) {
1479 cmn_err(CE_WARN, "lx_autofs_init: "
1480 "can't save unique device number");
1481 return (EAGAIN);
1482 }
1483 }
1484
1485 lx_autofs_fstype = fstype;
1486 if ((error = vfs_setfsops(
1487 fstype, lx_autofs_vfstops, &lx_autofs_vfsops)) != 0) {
1488 cmn_err(CE_WARN, "lx_autofs_init: bad vfs ops template");
1489 return (error);
1490 }
1491
1492 if ((error = vn_make_ops("lx_autofs vnode ops",
1493 lx_autofs_tops_root, &lx_autofs_vn_ops)) != 0) {
1494 VERIFY(vfs_freevfsops_by_type(fstype) == 0);
1495 lx_autofs_vn_ops = NULL;
1496 return (error);
1497 }
1498
1499 return (0);
1500 }
1501
1502
1503 /*
1504 * Module linkage
1505 */
1506 static mntopt_t lx_autofs_mntopt[] = {
1507 { LX_MNTOPT_FD, NULL, 0, MO_HASVALUE },
1508 { LX_MNTOPT_PGRP, NULL, 0, MO_HASVALUE },
1509 { LX_MNTOPT_MINPROTO, NULL, 0, MO_HASVALUE },
1510 { LX_MNTOPT_MAXPROTO, NULL, 0, MO_HASVALUE }
1511 };
1512
1513 static mntopts_t lx_autofs_mntopts = {
1514 sizeof (lx_autofs_mntopt) / sizeof (mntopt_t),
1515 lx_autofs_mntopt
1516 };
1517
1518 static vfsdef_t vfw = {
1519 VFSDEF_VERSION,
1520 LX_AUTOFS_NAME,
1521 lx_autofs_init,
1522 VSW_HASPROTO | VSW_VOLATILEDEV,
1523 &lx_autofs_mntopts
1524 };
1525
1526 extern struct mod_ops mod_fsops;
1527
1528 static struct modlfs modlfs = {
1529 &mod_fsops, "linux autofs filesystem", &vfw
1530 };
1531
1532 static struct modlinkage modlinkage = {
1533 MODREV_1, (void *)&modlfs, NULL
1534 };
1535
1536 int
1537 _init(void)
1538 {
1539 return (mod_install(&modlinkage));
1540 }
1541
1542 int
1543 _info(struct modinfo *modinfop)
1544 {
1545 return (mod_info(&modlinkage, modinfop));
1546 }
1547
1548 int
1549 _fini(void)
1550 {
1551 int error;
1552
1553 if ((error = mod_remove(&modlinkage)) != 0)
1554 return (error);
1555
1556 if (lx_autofs_vn_ops != NULL) {
1557 vn_freevnodeops(lx_autofs_vn_ops);
1558 lx_autofs_vn_ops = NULL;
1559 }
1560
1561 /*
1562 * In our init routine, if we get an error after calling
1563 * vfs_setfsops() we cleanup by calling vfs_freevfsops_by_type().
1564 * But we don't need to call vfs_freevfsops_by_type() here
1565 * because the fs framework did this for us as part of the
1566 * mod_remove() call above.
1567 */
1568 return (0);
1569 }