1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * This file implements /dev filesystem operations for non-global
28 * instances. Three major entry points:
29 * devname_profile_update()
30 * Update matching rules determining which names to export
31 * prof_readdir()
32 * Return the list of exported names
33 * prof_lookup()
34 * Implements lookup
35 */
36
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/sysmacros.h>
40 #include <sys/vnode.h>
41 #include <sys/uio.h>
42 #include <sys/dirent.h>
43 #include <sys/pathname.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/fs/sdev_impl.h>
46 #include <sys/sunndi.h>
47 #include <sys/modctl.h>
48
49 enum {
50 PROFILE_TYPE_INCLUDE,
51 PROFILE_TYPE_EXCLUDE,
52 PROFILE_TYPE_MAP,
53 PROFILE_TYPE_SYMLINK
54 };
55
56 enum {
57 WALK_DIR_CONTINUE = 0,
58 WALK_DIR_TERMINATE
59 };
60
61 static const char *sdev_nvp_val_err = "nvpair_value error %d, %s\n";
62
63 static void process_rule(struct sdev_node *, struct sdev_node *,
64 char *, char *, int);
65 static void walk_dir(struct vnode *, void *, int (*)(char *, void *));
66
67 static void
68 prof_getattr(struct sdev_node *dir, char *name, struct vnode *gdv,
69 struct vattr *vap, struct vnode **avpp, int *no_fs_perm)
70 {
71 struct vnode *advp;
72
73 /* get attribute from shadow, if present; else get default */
74 advp = dir->sdev_attrvp;
75 if (advp && VOP_LOOKUP(advp, name, avpp, NULL, 0, NULL, kcred,
76 NULL, NULL, NULL) == 0) {
77 (void) VOP_GETATTR(*avpp, vap, 0, kcred, NULL);
78 } else if (gdv == NULL || gdv->v_type == VDIR) {
79 /* always create shadow directory */
80 *vap = sdev_vattr_dir;
81 if (advp && VOP_MKDIR(advp, name, &sdev_vattr_dir,
82 avpp, kcred, NULL, 0, NULL) != 0) {
83 *avpp = NULLVP;
84 sdcmn_err10(("prof_getattr: failed to create "
85 "shadow directory %s/%s\n", dir->sdev_path, name));
86 }
87 } else {
88 /*
89 * get default permission from devfs
90 * Before calling devfs_get_defattr, we need to get
91 * the realvp (the dv_node). If realvp is not a dv_node,
92 * devfs_get_defattr() will return a system-wide default
93 * attr for device nodes.
94 */
95 struct vnode *rvp;
96 if (VOP_REALVP(gdv, &rvp, NULL) != 0)
97 rvp = gdv;
98 devfs_get_defattr(rvp, vap, no_fs_perm);
99 *avpp = NULLVP;
100 }
101
102 /* ignore dev_t and vtype from backing store */
103 if (gdv) {
104 vap->va_type = gdv->v_type;
105 vap->va_rdev = gdv->v_rdev;
106 }
107 }
108
109 static void
110 apply_glob_pattern(struct sdev_node *pdir, struct sdev_node *cdir)
111 {
112 char *name;
113 nvpair_t *nvp = NULL;
114 nvlist_t *nvl;
115 struct vnode *vp = SDEVTOV(cdir);
116 int rv = 0;
117
118 if (vp->v_type != VDIR)
119 return;
120 name = cdir->sdev_name;
121 nvl = pdir->sdev_prof.dev_glob_incdir;
122 while (nvp = nvlist_next_nvpair(nvl, nvp)) {
123 char *pathleft;
124 char *expr = nvpair_name(nvp);
125 if (!gmatch(name, expr))
126 continue;
127 rv = nvpair_value_string(nvp, &pathleft);
128 if (rv != 0) {
129 cmn_err(CE_WARN, sdev_nvp_val_err,
130 rv, nvpair_name(nvp));
131 break;
132 }
133 process_rule(cdir, cdir->sdev_origin,
134 pathleft, NULL, PROFILE_TYPE_INCLUDE);
135 }
136 }
137
138 /*
139 * Some commonality here with sdev_mknode(), could be simplified.
140 * NOTE: prof_mknode returns with *newdv held once, if success.
141 */
142 static int
143 prof_mknode(struct sdev_node *dir, char *name, struct sdev_node **newdv,
144 vattr_t *vap, vnode_t *avp, void *arg, cred_t *cred)
145 {
146 struct sdev_node *dv;
147 int rv;
148
149 ASSERT(RW_WRITE_HELD(&dir->sdev_contents));
150
151 /* check cache first */
152 if (dv = sdev_cache_lookup(dir, name)) {
153 *newdv = dv;
154 return (0);
155 }
156
157 /* allocate node and insert into cache */
158 rv = sdev_nodeinit(dir, name, &dv, NULL);
159 if (rv != 0) {
160 *newdv = NULL;
161 return (rv);
162 }
163
164 sdev_cache_update(dir, &dv, name, SDEV_CACHE_ADD);
165 *newdv = dv;
166
167 /* put it in ready state */
168 rv = sdev_nodeready(*newdv, vap, avp, arg, cred);
169
170 /* handle glob pattern in the middle of a path */
171 if (rv == 0) {
172 if (SDEVTOV(*newdv)->v_type == VDIR)
173 sdcmn_err10(("sdev_origin for %s set to 0x%p\n",
174 name, arg));
175 apply_glob_pattern(dir, *newdv);
176 } else {
177 sdev_cache_update(dir, &dv, name, SDEV_CACHE_DELETE);
178 SDEV_RELE(dv);
179 }
180 return (rv);
181 }
182
183 /*
184 * Create a directory node in a non-global dev instance.
185 * Always create shadow vnode. Set sdev_origin to the corresponding
186 * global directory sdev_node if it exists. This facilitates the
187 * lookup operation.
188 */
189 static int
190 prof_make_dir(char *name, struct sdev_node **gdirp, struct sdev_node **dirp)
191 {
192 struct sdev_node *dir = *dirp;
193 struct sdev_node *gdir = *gdirp;
194 struct sdev_node *newdv;
195 struct vnode *avp, *gnewdir = NULL;
196 struct vattr vattr;
197 int error;
198
199 /* see if name already exists */
200 rw_enter(&dir->sdev_contents, RW_READER);
201 if (newdv = sdev_cache_lookup(dir, name)) {
202 *dirp = newdv;
203 *gdirp = newdv->sdev_origin;
204 rw_exit(&dir->sdev_contents);
205 SDEV_RELE(dir);
206 return (0);
207 }
208 rw_exit(&dir->sdev_contents);
209
210 /* find corresponding dir node in global dev */
211 if (gdir) {
212 error = VOP_LOOKUP(SDEVTOV(gdir), name, &gnewdir,
213 NULL, 0, NULL, kcred, NULL, NULL, NULL);
214 if (error == 0) {
215 *gdirp = VTOSDEV(gnewdir);
216 } else { /* it's ok if there no global dir */
217 *gdirp = NULL;
218 }
219 }
220
221 /* get attribute from shadow, also create shadow dir */
222 prof_getattr(dir, name, gnewdir, &vattr, &avp, NULL);
223
224 /* create dev directory vnode */
225 rw_enter(&dir->sdev_contents, RW_WRITER);
226 error = prof_mknode(dir, name, &newdv, &vattr, avp, (void *)*gdirp,
227 kcred);
228 rw_exit(&dir->sdev_contents);
229 if (error == 0) {
230 ASSERT(newdv);
231 *dirp = newdv;
232 }
233 SDEV_RELE(dir);
234 return (error);
235 }
236
237 /*
238 * Look up a logical name in the global zone.
239 * Provides the ability to map the global zone's device name
240 * to an alternate name within a zone. The primary example
241 * is the virtual console device /dev/zcons/[zonename]/zconsole
242 * mapped to /[zonename]/root/dev/zconsole.
243 */
244 static void
245 prof_lookup_globaldev(struct sdev_node *dir, struct sdev_node *gdir,
246 char *name, char *rename)
247 {
248 int error;
249 struct vnode *avp, *gdv, *gddv;
250 struct sdev_node *newdv;
251 struct vattr vattr = {0};
252 struct pathname pn;
253
254 /* check if node already exists */
255 newdv = sdev_cache_lookup(dir, rename);
256 if (newdv) {
257 ASSERT(newdv->sdev_state != SDEV_ZOMBIE);
258 SDEV_SIMPLE_RELE(newdv);
259 return;
260 }
261
262 /* sanity check arguments */
263 if (!gdir || pn_get(name, UIO_SYSSPACE, &pn))
264 return;
265
266 /* perform a relative lookup of the global /dev instance */
267 gddv = SDEVTOV(gdir);
268 VN_HOLD(gddv);
269 error = lookuppnvp(&pn, NULL, FOLLOW, NULLVPP, &gdv,
270 rootdir, gddv, kcred);
271 pn_free(&pn);
272 if (error) {
273 sdcmn_err10(("prof_lookup_globaldev: %s not found\n", name));
274 return;
275 }
276 ASSERT(gdv && gdv->v_type != VLNK);
277
278 /*
279 * Found the entry in global /dev, figure out attributes
280 * by looking at backing store. Call into devfs for default.
281 * Note, mapped device is persisted under the new name
282 */
283 prof_getattr(dir, rename, gdv, &vattr, &avp, NULL);
284
285 if (gdv->v_type != VDIR) {
286 VN_RELE(gdv);
287 gdir = NULL;
288 } else
289 gdir = VTOSDEV(gdv);
290
291 if (prof_mknode(dir, rename, &newdv, &vattr, avp,
292 (void *)gdir, kcred) == 0) {
293 ASSERT(newdv->sdev_state != SDEV_ZOMBIE);
294 SDEV_SIMPLE_RELE(newdv);
295 }
296 }
297
298 static void
299 prof_make_sym(struct sdev_node *dir, char *lnm, char *tgt)
300 {
301 struct sdev_node *newdv;
302
303 if (prof_mknode(dir, lnm, &newdv, &sdev_vattr_lnk, NULL,
304 (void *)tgt, kcred) == 0) {
305 ASSERT(newdv->sdev_state != SDEV_ZOMBIE);
306 SDEV_SIMPLE_RELE(newdv);
307 }
308 }
309
310 /*
311 * Create symlinks in the current directory based on profile
312 */
313 static void
314 prof_make_symlinks(struct sdev_node *dir)
315 {
316 char *tgt, *lnm;
317 nvpair_t *nvp = NULL;
318 nvlist_t *nvl = dir->sdev_prof.dev_symlink;
319 int rv;
320
321 ASSERT(RW_WRITE_HELD(&dir->sdev_contents));
322
323 if (nvl == NULL)
324 return;
325
326 while (nvp = nvlist_next_nvpair(nvl, nvp)) {
327 lnm = nvpair_name(nvp);
328 rv = nvpair_value_string(nvp, &tgt);
329 if (rv != 0) {
330 cmn_err(CE_WARN, sdev_nvp_val_err,
331 rv, nvpair_name(nvp));
332 break;
333 }
334 prof_make_sym(dir, lnm, tgt);
335 }
336 }
337
338 static void
339 prof_make_maps(struct sdev_node *dir)
340 {
341 nvpair_t *nvp = NULL;
342 nvlist_t *nvl = dir->sdev_prof.dev_map;
343 int rv;
344
345 ASSERT(RW_WRITE_HELD(&dir->sdev_contents));
346
347 if (nvl == NULL)
348 return;
349
350 while (nvp = nvlist_next_nvpair(nvl, nvp)) {
351 char *name;
352 char *rename = nvpair_name(nvp);
353 rv = nvpair_value_string(nvp, &name);
354 if (rv != 0) {
355 cmn_err(CE_WARN, sdev_nvp_val_err,
356 rv, nvpair_name(nvp));
357 break;
358 }
359 sdcmn_err10(("map %s -> %s\n", name, rename));
360 (void) prof_lookup_globaldev(dir, sdev_origins->sdev_root,
361 name, rename);
362 }
363 }
364
365 struct match_arg {
366 char *expr;
367 int match;
368 };
369
370 static int
371 match_name(char *name, void *arg)
372 {
373 struct match_arg *margp = (struct match_arg *)arg;
374
375 if (gmatch(name, margp->expr)) {
376 margp->match = 1;
377 return (WALK_DIR_TERMINATE);
378 }
379 return (WALK_DIR_CONTINUE);
380 }
381
382 static int
383 is_nonempty_dir(char *name, char *pathleft, struct sdev_node *dir)
384 {
385 struct match_arg marg;
386 struct pathname pn;
387 struct vnode *gvp;
388 struct sdev_node *gdir = dir->sdev_origin;
389
390 if (VOP_LOOKUP(SDEVTOV(gdir), name, &gvp, NULL, 0, NULL, kcred,
391 NULL, NULL, NULL) != 0)
392 return (0);
393
394 if (gvp->v_type != VDIR) {
395 VN_RELE(gvp);
396 return (0);
397 }
398
399 if (pn_get(pathleft, UIO_SYSSPACE, &pn) != 0) {
400 VN_RELE(gvp);
401 return (0);
402 }
403
404 marg.expr = kmem_alloc(MAXNAMELEN, KM_SLEEP);
405 (void) pn_getcomponent(&pn, marg.expr);
406 marg.match = 0;
407
408 walk_dir(gvp, &marg, match_name);
409 VN_RELE(gvp);
410 kmem_free(marg.expr, MAXNAMELEN);
411 pn_free(&pn);
412
413 return (marg.match);
414 }
415
416
417 /* Check if name passes matching rules */
418 static int
419 prof_name_matched(char *name, struct sdev_node *dir)
420 {
421 int type, match = 0;
422 char *expr;
423 nvlist_t *nvl;
424 nvpair_t *nvp = NULL;
425 int rv;
426
427 /* check against nvlist for leaf include/exclude */
428 nvl = dir->sdev_prof.dev_name;
429 while (nvp = nvlist_next_nvpair(nvl, nvp)) {
430 expr = nvpair_name(nvp);
431 rv = nvpair_value_int32(nvp, &type);
432 if (rv != 0) {
433 cmn_err(CE_WARN, sdev_nvp_val_err,
434 rv, nvpair_name(nvp));
435 break;
436 }
437
438 if (type == PROFILE_TYPE_EXCLUDE) {
439 if (gmatch(name, expr))
440 return (0); /* excluded */
441 } else if (!match) {
442 match = gmatch(name, expr);
443 }
444 }
445 if (match) {
446 sdcmn_err10(("prof_name_matched: %s\n", name));
447 return (match);
448 }
449
450 /* check for match against directory globbing pattern */
451 nvl = dir->sdev_prof.dev_glob_incdir;
452 while (nvp = nvlist_next_nvpair(nvl, nvp)) {
453 char *pathleft;
454 expr = nvpair_name(nvp);
455 if (gmatch(name, expr) == 0)
456 continue;
457 rv = nvpair_value_string(nvp, &pathleft);
458 if (rv != 0) {
459 cmn_err(CE_WARN, sdev_nvp_val_err,
460 rv, nvpair_name(nvp));
461 break;
462 }
463 if (is_nonempty_dir(name, pathleft, dir)) {
464 sdcmn_err10(("prof_name_matched: dir %s\n", name));
465 return (1);
466 }
467 }
468
469 return (0);
470 }
471
472 static void
473 walk_dir(struct vnode *dvp, void *arg, int (*callback)(char *, void *))
474 {
475 char *nm;
476 int eof, error;
477 struct iovec iov;
478 struct uio uio;
479 struct dirent64 *dp;
480 dirent64_t *dbuf;
481 size_t dbuflen, dlen;
482
483 ASSERT(dvp);
484
485 dlen = 4096;
486 dbuf = kmem_zalloc(dlen, KM_SLEEP);
487
488 uio.uio_iov = &iov;
489 uio.uio_iovcnt = 1;
490 uio.uio_segflg = UIO_SYSSPACE;
491 uio.uio_fmode = 0;
492 uio.uio_extflg = UIO_COPY_CACHED;
493 uio.uio_loffset = 0;
494 uio.uio_llimit = MAXOFFSET_T;
495
496 eof = 0;
497 error = 0;
498 while (!error && !eof) {
499 uio.uio_resid = dlen;
500 iov.iov_base = (char *)dbuf;
501 iov.iov_len = dlen;
502 (void) VOP_RWLOCK(dvp, V_WRITELOCK_FALSE, NULL);
503 error = VOP_READDIR(dvp, &uio, kcred, &eof, NULL, 0);
504 VOP_RWUNLOCK(dvp, V_WRITELOCK_FALSE, NULL);
505
506 dbuflen = dlen - uio.uio_resid;
507 if (error || dbuflen == 0)
508 break;
509 for (dp = dbuf; ((intptr_t)dp <
510 (intptr_t)dbuf + dbuflen);
511 dp = (dirent64_t *)((intptr_t)dp + dp->d_reclen)) {
512 nm = dp->d_name;
513
514 if (strcmp(nm, ".") == 0 ||
515 strcmp(nm, "..") == 0)
516 continue;
517
518 if (callback(nm, arg) == WALK_DIR_TERMINATE)
519 goto end;
520 }
521 }
522
523 end:
524 kmem_free(dbuf, dlen);
525 }
526
527 /*
528 * Last chance for a zone to see a node. If our parent dir is
529 * SDEV_ZONED, then we look up the "zone" property for the node. If the
530 * property is found and matches the current zone name, we allow it.
531 * Note that this isn't quite correct for the global zone peeking inside
532 * a zone's /dev - for that to work, we'd have to have a per-dev-mount
533 * zone ref squirreled away.
534 */
535 static int
536 prof_zone_matched(char *name, struct sdev_node *dir)
537 {
538 vnode_t *gvn = SDEVTOV(dir->sdev_origin);
539 struct pathname pn;
540 vnode_t *vn = NULL;
541 char zonename[ZONENAME_MAX];
542 int znlen = ZONENAME_MAX;
543 int ret;
544
545 ASSERT((dir->sdev_flags & SDEV_ZONED) != 0);
546
547 sdcmn_err10(("sdev_node %p is zoned, looking for %s\n",
548 (void *)dir, name));
549
550 if (pn_get(name, UIO_SYSSPACE, &pn))
551 return (0);
552
553 VN_HOLD(gvn);
554
555 ret = lookuppnvp(&pn, NULL, FOLLOW, NULLVPP, &vn, rootdir, gvn, kcred);
556
557 pn_free(&pn);
558
559 if (ret != 0) {
560 sdcmn_err10(("prof_zone_matched: %s not found\n", name));
561 return (0);
562 }
563
564 /*
565 * VBLK doesn't matter, and the property name is in fact treated
566 * as a const char *.
567 */
568 ret = e_ddi_getlongprop_buf(vn->v_rdev, VBLK, (char *)"zone",
569 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, (caddr_t)zonename, &znlen);
570
571 VN_RELE(vn);
572
573 if (ret == DDI_PROP_NOT_FOUND) {
574 sdcmn_err10(("vnode %p: no zone prop\n", (void *)vn));
575 return (0);
576 } else if (ret != DDI_PROP_SUCCESS) {
577 sdcmn_err10(("vnode %p: zone prop error: %d\n",
578 (void *)vn, ret));
579 return (0);
580 }
581
582 sdcmn_err10(("vnode %p zone prop: %s\n", (void *)vn, zonename));
583 return (strcmp(zonename, curproc->p_zone->zone_name) == 0);
584 }
585
586 static int
587 prof_make_name_glob(char *nm, void *arg)
588 {
589 struct sdev_node *ddv = (struct sdev_node *)arg;
590
591 if (prof_name_matched(nm, ddv))
592 prof_lookup_globaldev(ddv, ddv->sdev_origin, nm, nm);
593
594 return (WALK_DIR_CONTINUE);
595 }
596
597 static int
598 prof_make_name_zone(char *nm, void *arg)
599 {
600 struct sdev_node *ddv = (struct sdev_node *)arg;
601
602 if (prof_zone_matched(nm, ddv))
603 prof_lookup_globaldev(ddv, ddv->sdev_origin, nm, nm);
604
605 return (WALK_DIR_CONTINUE);
606 }
607
608 static void
609 prof_make_names_walk(struct sdev_node *ddv, int (*cb)(char *, void *))
610 {
611 struct sdev_node *gdir;
612
613 gdir = ddv->sdev_origin;
614 if (gdir == NULL)
615 return;
616 walk_dir(SDEVTOV(gdir), (void *)ddv, cb);
617 }
618
619 static void
620 prof_make_names(struct sdev_node *dir)
621 {
622 char *name;
623 nvpair_t *nvp = NULL;
624 nvlist_t *nvl = dir->sdev_prof.dev_name;
625 int rv;
626
627 ASSERT(RW_WRITE_HELD(&dir->sdev_contents));
628
629 if ((dir->sdev_flags & SDEV_ZONED) != 0)
630 prof_make_names_walk(dir, prof_make_name_zone);
631
632 if (nvl == NULL)
633 return;
634
635 if (dir->sdev_prof.has_glob) {
636 prof_make_names_walk(dir, prof_make_name_glob);
637 return;
638 }
639
640 /* Walk nvlist and lookup corresponding device in global inst */
641 while (nvp = nvlist_next_nvpair(nvl, nvp)) {
642 int type;
643 rv = nvpair_value_int32(nvp, &type);
644 if (rv != 0) {
645 cmn_err(CE_WARN, sdev_nvp_val_err,
646 rv, nvpair_name(nvp));
647 break;
648 }
649 if (type == PROFILE_TYPE_EXCLUDE)
650 continue;
651 name = nvpair_name(nvp);
652 (void) prof_lookup_globaldev(dir, dir->sdev_origin,
653 name, name);
654 }
655 }
656
657 /*
658 * Build directory vnodes based on the profile and the global
659 * dev instance.
660 */
661 void
662 prof_filldir(struct sdev_node *ddv)
663 {
664 int firsttime = 1;
665 struct sdev_node *gdir = ddv->sdev_origin;
666
667 ASSERT(RW_READ_HELD(&ddv->sdev_contents));
668
669 /*
670 * We need to rebuild the directory content if
671 * - SDEV_BUILD is set
672 * - The device tree generation number has changed
673 * - The corresponding /dev namespace has been updated
674 */
675 check_build:
676 if ((ddv->sdev_flags & SDEV_BUILD) == 0 &&
677 ddv->sdev_devtree_gen == devtree_gen &&
678 (gdir == NULL || ddv->sdev_ldir_gen
679 == gdir->sdev_gdir_gen))
680 return; /* already up to date */
681
682 /* We may have become a zombie (across a try) */
683 if (ddv->sdev_state == SDEV_ZOMBIE)
684 return;
685
686 if (firsttime && rw_tryupgrade(&ddv->sdev_contents) == 0) {
687 rw_exit(&ddv->sdev_contents);
688 firsttime = 0;
689 rw_enter(&ddv->sdev_contents, RW_WRITER);
690 goto check_build;
691 }
692 sdcmn_err10(("devtree_gen (%s): %ld -> %ld\n",
693 ddv->sdev_path, ddv->sdev_devtree_gen, devtree_gen));
694 if (gdir)
695 sdcmn_err10(("sdev_dir_gen (%s): %ld -> %ld\n",
696 ddv->sdev_path, ddv->sdev_ldir_gen,
697 gdir->sdev_gdir_gen));
698
699 /* update flags and generation number so next filldir is quick */
700 ddv->sdev_flags &= ~SDEV_BUILD;
701 ddv->sdev_devtree_gen = devtree_gen;
702 if (gdir)
703 ddv->sdev_ldir_gen = gdir->sdev_gdir_gen;
704
705 prof_make_symlinks(ddv);
706 prof_make_maps(ddv);
707 prof_make_names(ddv);
708 rw_downgrade(&ddv->sdev_contents);
709 }
710
711 /* apply include/exclude pattern to existing directory content */
712 static void
713 apply_dir_pattern(struct sdev_node *dir, char *expr, char *pathleft, int type)
714 {
715 struct sdev_node *dv;
716
717 /* leaf pattern */
718 if (pathleft == NULL) {
719 if (type == PROFILE_TYPE_INCLUDE)
720 return; /* nothing to do for include */
721 (void) sdev_cleandir(dir, expr, SDEV_ENFORCE);
722 return;
723 }
724
725 /* directory pattern */
726 rw_enter(&dir->sdev_contents, RW_WRITER);
727
728 for (dv = SDEV_FIRST_ENTRY(dir); dv; dv = SDEV_NEXT_ENTRY(dir, dv)) {
729 if (gmatch(dv->sdev_name, expr) == 0 ||
730 SDEVTOV(dv)->v_type != VDIR)
731 continue;
732 process_rule(dv, dv->sdev_origin,
733 pathleft, NULL, type);
734 }
735 rw_exit(&dir->sdev_contents);
736 }
737
738 /*
739 * Add a profile rule.
740 * tgt represents a device name matching expression,
741 * matching device names are to be either included or excluded.
742 */
743 static void
744 prof_add_rule(char *name, char *tgt, struct sdev_node *dir, int type)
745 {
746 int error;
747 nvlist_t **nvlp = NULL;
748 int rv;
749
750 ASSERT(SDEVTOV(dir)->v_type == VDIR);
751
752 rw_enter(&dir->sdev_contents, RW_WRITER);
753
754 switch (type) {
755 case PROFILE_TYPE_INCLUDE:
756 if (tgt)
757 nvlp = &(dir->sdev_prof.dev_glob_incdir);
758 else
759 nvlp = &(dir->sdev_prof.dev_name);
760 break;
761 case PROFILE_TYPE_EXCLUDE:
762 if (tgt)
763 nvlp = &(dir->sdev_prof.dev_glob_excdir);
764 else
765 nvlp = &(dir->sdev_prof.dev_name);
766 break;
767 case PROFILE_TYPE_MAP:
768 nvlp = &(dir->sdev_prof.dev_map);
769 break;
770 case PROFILE_TYPE_SYMLINK:
771 nvlp = &(dir->sdev_prof.dev_symlink);
772 break;
773 };
774
775 /* initialize nvlist */
776 if (*nvlp == NULL) {
777 error = nvlist_alloc(nvlp, NV_UNIQUE_NAME, KM_SLEEP);
778 ASSERT(error == 0);
779 }
780
781 if (tgt) {
782 rv = nvlist_add_string(*nvlp, name, tgt);
783 } else {
784 rv = nvlist_add_int32(*nvlp, name, type);
785 }
786 ASSERT(rv == 0);
787 /* rebuild directory content */
788 dir->sdev_flags |= SDEV_BUILD;
789
790 if ((type == PROFILE_TYPE_INCLUDE) &&
791 (strpbrk(name, "*?[]") != NULL)) {
792 dir->sdev_prof.has_glob = 1;
793 }
794
795 rw_exit(&dir->sdev_contents);
796
797 /* additional details for glob pattern and exclusion */
798 switch (type) {
799 case PROFILE_TYPE_INCLUDE:
800 case PROFILE_TYPE_EXCLUDE:
801 apply_dir_pattern(dir, name, tgt, type);
802 break;
803 };
804 }
805
806 /*
807 * Parse path components and apply requested matching rule at
808 * directory level.
809 */
810 static void
811 process_rule(struct sdev_node *dir, struct sdev_node *gdir,
812 char *path, char *tgt, int type)
813 {
814 char *name;
815 struct pathname pn;
816 int rv = 0;
817
818 if ((strlen(path) > 5) && (strncmp(path, "/dev/", 5) == 0)) {
819 path += 5;
820 }
821
822 if (pn_get(path, UIO_SYSSPACE, &pn) != 0)
823 return;
824
825 name = kmem_alloc(MAXPATHLEN, KM_SLEEP);
826 (void) pn_getcomponent(&pn, name);
827 pn_skipslash(&pn);
828 SDEV_HOLD(dir);
829
830 while (pn_pathleft(&pn)) {
831 /* If this is pattern, just add the pattern */
832 if (strpbrk(name, "*?[]") != NULL &&
833 (type == PROFILE_TYPE_INCLUDE ||
834 type == PROFILE_TYPE_EXCLUDE)) {
835 ASSERT(tgt == NULL);
836 tgt = pn.pn_path;
837 break;
838 }
839 if ((rv = prof_make_dir(name, &gdir, &dir)) != 0) {
840 cmn_err(CE_CONT, "process_rule: %s error %d\n",
841 path, rv);
842 break;
843 }
844 (void) pn_getcomponent(&pn, name);
845 pn_skipslash(&pn);
846 }
847
848 /* process the leaf component */
849 if (rv == 0) {
850 prof_add_rule(name, tgt, dir, type);
851 SDEV_SIMPLE_RELE(dir);
852 }
853
854 kmem_free(name, MAXPATHLEN);
855 pn_free(&pn);
856 }
857
858 static int
859 copyin_nvlist(char *packed_usr, size_t packed_sz, nvlist_t **nvlp)
860 {
861 int err = 0;
862 char *packed;
863 nvlist_t *profile = NULL;
864
865 /* simple sanity check */
866 if (packed_usr == NULL || packed_sz == 0)
867 return (NULL);
868
869 /* copyin packed profile nvlist */
870 packed = kmem_alloc(packed_sz, KM_NOSLEEP);
871 if (packed == NULL)
872 return (ENOMEM);
873 err = copyin(packed_usr, packed, packed_sz);
874
875 /* unpack packed profile nvlist */
876 if (err)
877 cmn_err(CE_WARN, "copyin_nvlist: copyin failed with "
878 "err %d\n", err);
879 else if (err = nvlist_unpack(packed, packed_sz, &profile, KM_NOSLEEP))
880 cmn_err(CE_WARN, "copyin_nvlist: nvlist_unpack "
881 "failed with err %d\n", err);
882
883 kmem_free(packed, packed_sz);
884 if (err == 0)
885 *nvlp = profile;
886 return (err);
887 }
888
889 /*
890 * Process profile passed down from libdevinfo. There are four types
891 * of matching rules:
892 * include: export a name or names matching a pattern
893 * exclude: exclude a name or names matching a pattern
894 * symlink: create a local symlink
895 * map: export a device with a name different from the global zone
896 * Note: We may consider supporting VOP_SYMLINK in non-global instances,
897 * because it does not present any security risk. For now, the fs
898 * instance is read only.
899 */
900 static void
901 sdev_process_profile(struct sdev_data *sdev_data, nvlist_t *profile)
902 {
903 nvpair_t *nvpair;
904 char *nvname, *dname;
905 struct sdev_node *dir, *gdir;
906 char **pair; /* for symlinks and maps */
907 uint_t nelem;
908 int rv;
909
910 gdir = sdev_origins->sdev_root; /* root of global /dev */
911 dir = sdev_data->sdev_root; /* root of current instance */
912
913 ASSERT(profile);
914
915 /* process nvpairs in the list */
916 nvpair = NULL;
917 while (nvpair = nvlist_next_nvpair(profile, nvpair)) {
918 nvname = nvpair_name(nvpair);
919 ASSERT(nvname != NULL);
920
921 if (strcmp(nvname, SDEV_NVNAME_INCLUDE) == 0) {
922 rv = nvpair_value_string(nvpair, &dname);
923 if (rv != 0) {
924 cmn_err(CE_WARN, sdev_nvp_val_err,
925 rv, nvpair_name(nvpair));
926 break;
927 }
928 process_rule(dir, gdir, dname, NULL,
929 PROFILE_TYPE_INCLUDE);
930 } else if (strcmp(nvname, SDEV_NVNAME_EXCLUDE) == 0) {
931 rv = nvpair_value_string(nvpair, &dname);
932 if (rv != 0) {
933 cmn_err(CE_WARN, sdev_nvp_val_err,
934 rv, nvpair_name(nvpair));
935 break;
936 }
937 process_rule(dir, gdir, dname, NULL,
938 PROFILE_TYPE_EXCLUDE);
939 } else if (strcmp(nvname, SDEV_NVNAME_SYMLINK) == 0) {
940 rv = nvpair_value_string_array(nvpair, &pair, &nelem);
941 if (rv != 0) {
942 cmn_err(CE_WARN, sdev_nvp_val_err,
943 rv, nvpair_name(nvpair));
944 break;
945 }
946 ASSERT(nelem == 2);
947 process_rule(dir, gdir, pair[0], pair[1],
948 PROFILE_TYPE_SYMLINK);
949 } else if (strcmp(nvname, SDEV_NVNAME_MAP) == 0) {
950 rv = nvpair_value_string_array(nvpair, &pair, &nelem);
951 if (rv != 0) {
952 cmn_err(CE_WARN, sdev_nvp_val_err,
953 rv, nvpair_name(nvpair));
954 break;
955 }
956 process_rule(dir, gdir, pair[1], pair[0],
957 PROFILE_TYPE_MAP);
958 } else if (strcmp(nvname, SDEV_NVNAME_MOUNTPT) != 0) {
959 cmn_err(CE_WARN, "sdev_process_profile: invalid "
960 "nvpair %s\n", nvname);
961 }
962 }
963 }
964
965 /*ARGSUSED*/
966 int
967 prof_lookup(vnode_t *dvp, char *nm, struct vnode **vpp, struct cred *cred)
968 {
969 struct sdev_node *ddv = VTOSDEV(dvp);
970 struct sdev_node *dv;
971 int nmlen;
972
973 /*
974 * Empty name or ., return node itself.
975 */
976 nmlen = strlen(nm);
977 if ((nmlen == 0) || ((nmlen == 1) && (nm[0] == '.'))) {
978 *vpp = SDEVTOV(ddv);
979 VN_HOLD(*vpp);
980 return (0);
981 }
982
983 /*
984 * .., return the parent directory
985 */
986 if ((nmlen == 2) && (strcmp(nm, "..") == 0)) {
987 *vpp = SDEVTOV(ddv->sdev_dotdot);
988 VN_HOLD(*vpp);
989 return (0);
990 }
991
992 rw_enter(&ddv->sdev_contents, RW_READER);
993 dv = sdev_cache_lookup(ddv, nm);
994 if (dv == NULL) {
995 prof_filldir(ddv);
996 dv = sdev_cache_lookup(ddv, nm);
997 }
998 rw_exit(&ddv->sdev_contents);
999 if (dv == NULL) {
1000 sdcmn_err10(("prof_lookup: %s not found\n", nm));
1001 return (ENOENT);
1002 }
1003
1004 return (sdev_to_vp(dv, vpp));
1005 }
1006
1007 /*
1008 * This is invoked after a new filesystem is mounted to define the
1009 * name space. It is also invoked during normal system operation
1010 * to update the name space.
1011 *
1012 * Applications call di_prof_commit() in libdevinfo, which invokes
1013 * modctl(). modctl calls this function. The input is a packed nvlist.
1014 */
1015 int
1016 devname_profile_update(char *packed, size_t packed_sz)
1017 {
1018 char *mntpt;
1019 nvlist_t *nvl;
1020 nvpair_t *nvp;
1021 struct sdev_data *mntinfo;
1022 int err;
1023 int rv;
1024
1025 nvl = NULL;
1026 if ((err = copyin_nvlist(packed, packed_sz, &nvl)) != 0)
1027 return (err);
1028 ASSERT(nvl);
1029
1030 /* The first nvpair must be the mount point */
1031 nvp = nvlist_next_nvpair(nvl, NULL);
1032 if (strcmp(nvpair_name(nvp), SDEV_NVNAME_MOUNTPT) != 0) {
1033 cmn_err(CE_NOTE,
1034 "devname_profile_update: mount point not specified");
1035 nvlist_free(nvl);
1036 return (EINVAL);
1037 }
1038
1039 /* find the matching filesystem instance */
1040 rv = nvpair_value_string(nvp, &mntpt);
1041 if (rv != 0) {
1042 cmn_err(CE_WARN, sdev_nvp_val_err,
1043 rv, nvpair_name(nvp));
1044 } else {
1045 mntinfo = sdev_find_mntinfo(mntpt);
1046 if (mntinfo == NULL) {
1047 cmn_err(CE_NOTE, "devname_profile_update: "
1048 " mount point %s not found", mntpt);
1049 nvlist_free(nvl);
1050 return (EINVAL);
1051 }
1052
1053 /* now do the hardwork to process the profile */
1054 sdev_process_profile(mntinfo, nvl);
1055
1056 sdev_mntinfo_rele(mntinfo);
1057 }
1058
1059 nvlist_free(nvl);
1060 return (0);
1061 }