Print this page
fsh, fsd, libfsd, fsdadm from Sep 3rd webrev
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/vfs.c
+++ new/usr/src/uts/common/fs/vfs.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * University Copyright- Copyright (c) 1982, 1986, 1988
30 30 * The Regents of the University of California
31 31 * All Rights Reserved
32 32 *
33 33 * University Acknowledgment- Portions of this document are derived from
34 34 * software developed by the University of California, Berkeley, and its
35 35 * contributors.
36 36 */
37 37
38 38 #include <sys/types.h>
39 39 #include <sys/t_lock.h>
40 40 #include <sys/param.h>
41 41 #include <sys/errno.h>
42 42 #include <sys/user.h>
43 43 #include <sys/fstyp.h>
44 44 #include <sys/kmem.h>
45 45 #include <sys/systm.h>
46 46 #include <sys/proc.h>
47 47 #include <sys/mount.h>
48 48 #include <sys/vfs.h>
49 49 #include <sys/vfs_opreg.h>
50 50 #include <sys/fem.h>
51 51 #include <sys/mntent.h>
52 52 #include <sys/stat.h>
53 53 #include <sys/statvfs.h>
54 54 #include <sys/statfs.h>
55 55 #include <sys/cred.h>
56 56 #include <sys/vnode.h>
57 57 #include <sys/rwstlock.h>
58 58 #include <sys/dnlc.h>
59 59 #include <sys/file.h>
60 60 #include <sys/time.h>
61 61 #include <sys/atomic.h>
62 62 #include <sys/cmn_err.h>
63 63 #include <sys/buf.h>
64 64 #include <sys/swap.h>
65 65 #include <sys/debug.h>
66 66 #include <sys/vnode.h>
67 67 #include <sys/modctl.h>
68 68 #include <sys/ddi.h>
69 69 #include <sys/pathname.h>
70 70 #include <sys/bootconf.h>
71 71 #include <sys/dumphdr.h>
72 72 #include <sys/dc_ki.h>
73 73 #include <sys/poll.h>
74 74 #include <sys/sunddi.h>
75 75 #include <sys/sysmacros.h>
76 76 #include <sys/zone.h>
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
77 77 #include <sys/policy.h>
78 78 #include <sys/ctfs.h>
79 79 #include <sys/objfs.h>
80 80 #include <sys/console.h>
81 81 #include <sys/reboot.h>
82 82 #include <sys/attr.h>
83 83 #include <sys/zio.h>
84 84 #include <sys/spa.h>
85 85 #include <sys/lofi.h>
86 86 #include <sys/bootprops.h>
87 +#include <sys/fsh.h>
88 +#include <sys/fsh_impl.h>
87 89
88 90 #include <vm/page.h>
89 91
90 92 #include <fs/fs_subr.h>
91 93 /* Private interfaces to create vopstats-related data structures */
92 94 extern void initialize_vopstats(vopstats_t *);
93 95 extern vopstats_t *get_fstype_vopstats(struct vfs *, struct vfssw *);
94 96 extern vsk_anchor_t *get_vskstat_anchor(struct vfs *);
95 97
96 98 static void vfs_clearmntopt_nolock(mntopts_t *, const char *, int);
97 99 static void vfs_setmntopt_nolock(mntopts_t *, const char *,
98 100 const char *, int, int);
99 101 static int vfs_optionisset_nolock(const mntopts_t *, const char *, char **);
100 102 static void vfs_freemnttab(struct vfs *);
101 103 static void vfs_freeopt(mntopt_t *);
102 104 static void vfs_swapopttbl_nolock(mntopts_t *, mntopts_t *);
103 105 static void vfs_swapopttbl(mntopts_t *, mntopts_t *);
104 106 static void vfs_copyopttbl_extend(const mntopts_t *, mntopts_t *, int);
105 107 static void vfs_createopttbl_extend(mntopts_t *, const char *,
106 108 const mntopts_t *);
107 109 static char **vfs_copycancelopt_extend(char **const, int);
108 110 static void vfs_freecancelopt(char **);
109 111 static void getrootfs(char **, char **);
110 112 static int getmacpath(dev_info_t *, void *);
111 113 static void vfs_mnttabvp_setup(void);
112 114
113 115 struct ipmnt {
114 116 struct ipmnt *mip_next;
115 117 dev_t mip_dev;
116 118 struct vfs *mip_vfsp;
117 119 };
118 120
119 121 static kmutex_t vfs_miplist_mutex;
120 122 static struct ipmnt *vfs_miplist = NULL;
121 123 static struct ipmnt *vfs_miplist_end = NULL;
122 124
123 125 static kmem_cache_t *vfs_cache; /* Pointer to VFS kmem cache */
124 126
125 127 /*
126 128 * VFS global data.
127 129 */
128 130 vnode_t *rootdir; /* pointer to root inode vnode. */
129 131 vnode_t *devicesdir; /* pointer to inode of devices root */
130 132 vnode_t *devdir; /* pointer to inode of dev root */
131 133
132 134 char *server_rootpath; /* root path for diskless clients */
133 135 char *server_hostname; /* hostname of diskless server */
134 136
135 137 static struct vfs root;
136 138 static struct vfs devices;
137 139 static struct vfs dev;
138 140 struct vfs *rootvfs = &root; /* pointer to root vfs; head of VFS list. */
139 141 rvfs_t *rvfs_list; /* array of vfs ptrs for vfs hash list */
140 142 int vfshsz = 512; /* # of heads/locks in vfs hash arrays */
141 143 /* must be power of 2! */
142 144 timespec_t vfs_mnttab_ctime; /* mnttab created time */
143 145 timespec_t vfs_mnttab_mtime; /* mnttab last modified time */
144 146 char *vfs_dummyfstype = "\0";
145 147 struct pollhead vfs_pollhd; /* for mnttab pollers */
146 148 struct vnode *vfs_mntdummyvp; /* to fake mnttab read/write for file events */
147 149 int mntfstype; /* will be set once mnt fs is mounted */
148 150
149 151 /*
150 152 * Table for generic options recognized in the VFS layer and acted
151 153 * on at this level before parsing file system specific options.
152 154 * The nosuid option is stronger than any of the devices and setuid
153 155 * options, so those are canceled when nosuid is seen.
154 156 *
155 157 * All options which are added here need to be added to the
156 158 * list of standard options in usr/src/cmd/fs.d/fslib.c as well.
157 159 */
158 160 /*
159 161 * VFS Mount options table
160 162 */
161 163 static char *ro_cancel[] = { MNTOPT_RW, NULL };
162 164 static char *rw_cancel[] = { MNTOPT_RO, NULL };
163 165 static char *suid_cancel[] = { MNTOPT_NOSUID, NULL };
164 166 static char *nosuid_cancel[] = { MNTOPT_SUID, MNTOPT_DEVICES, MNTOPT_NODEVICES,
165 167 MNTOPT_NOSETUID, MNTOPT_SETUID, NULL };
166 168 static char *devices_cancel[] = { MNTOPT_NODEVICES, NULL };
167 169 static char *nodevices_cancel[] = { MNTOPT_DEVICES, NULL };
168 170 static char *setuid_cancel[] = { MNTOPT_NOSETUID, NULL };
169 171 static char *nosetuid_cancel[] = { MNTOPT_SETUID, NULL };
170 172 static char *nbmand_cancel[] = { MNTOPT_NONBMAND, NULL };
171 173 static char *nonbmand_cancel[] = { MNTOPT_NBMAND, NULL };
172 174 static char *exec_cancel[] = { MNTOPT_NOEXEC, NULL };
173 175 static char *noexec_cancel[] = { MNTOPT_EXEC, NULL };
174 176
175 177 static const mntopt_t mntopts[] = {
176 178 /*
177 179 * option name cancel options default arg flags
178 180 */
179 181 { MNTOPT_REMOUNT, NULL, NULL,
180 182 MO_NODISPLAY, (void *)0 },
181 183 { MNTOPT_RO, ro_cancel, NULL, 0,
182 184 (void *)0 },
183 185 { MNTOPT_RW, rw_cancel, NULL, 0,
184 186 (void *)0 },
185 187 { MNTOPT_SUID, suid_cancel, NULL, 0,
186 188 (void *)0 },
187 189 { MNTOPT_NOSUID, nosuid_cancel, NULL, 0,
188 190 (void *)0 },
189 191 { MNTOPT_DEVICES, devices_cancel, NULL, 0,
190 192 (void *)0 },
191 193 { MNTOPT_NODEVICES, nodevices_cancel, NULL, 0,
192 194 (void *)0 },
193 195 { MNTOPT_SETUID, setuid_cancel, NULL, 0,
194 196 (void *)0 },
195 197 { MNTOPT_NOSETUID, nosetuid_cancel, NULL, 0,
196 198 (void *)0 },
197 199 { MNTOPT_NBMAND, nbmand_cancel, NULL, 0,
198 200 (void *)0 },
199 201 { MNTOPT_NONBMAND, nonbmand_cancel, NULL, 0,
200 202 (void *)0 },
201 203 { MNTOPT_EXEC, exec_cancel, NULL, 0,
202 204 (void *)0 },
203 205 { MNTOPT_NOEXEC, noexec_cancel, NULL, 0,
204 206 (void *)0 },
205 207 };
206 208
207 209 const mntopts_t vfs_mntopts = {
208 210 sizeof (mntopts) / sizeof (mntopt_t),
↓ open down ↓ |
112 lines elided |
↑ open up ↑ |
209 211 (mntopt_t *)&mntopts[0]
210 212 };
211 213
212 214 /*
213 215 * File system operation dispatch functions.
214 216 */
215 217
216 218 int
217 219 fsop_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
218 220 {
219 - return (*(vfsp)->vfs_op->vfs_mount)(vfsp, mvp, uap, cr);
221 + return (fsh_mount(vfsp, mvp, uap, cr));
220 222 }
221 223
222 224 int
223 225 fsop_unmount(vfs_t *vfsp, int flag, cred_t *cr)
224 226 {
225 - return (*(vfsp)->vfs_op->vfs_unmount)(vfsp, flag, cr);
227 + return (fsh_unmount(vfsp, flag, cr));
226 228 }
227 229
228 230 int
229 231 fsop_root(vfs_t *vfsp, vnode_t **vpp)
230 232 {
231 233 refstr_t *mntpt;
232 234 int ret = (*(vfsp)->vfs_op->vfs_root)(vfsp, vpp);
233 235 /*
234 236 * Make sure this root has a path. With lofs, it is possible to have
235 237 * a NULL mountpoint.
236 238 */
237 239 if (ret == 0 && vfsp->vfs_mntpt != NULL && (*vpp)->v_path == NULL) {
238 240 mntpt = vfs_getmntpoint(vfsp);
239 241 vn_setpath_str(*vpp, refstr_value(mntpt),
240 242 strlen(refstr_value(mntpt)));
241 243 refstr_rele(mntpt);
242 244 }
243 245
244 246 return (ret);
245 247 }
246 248
247 249 int
248 250 fsop_statfs(vfs_t *vfsp, statvfs64_t *sp)
249 251 {
250 252 return (*(vfsp)->vfs_op->vfs_statvfs)(vfsp, sp);
251 253 }
252 254
253 255 int
254 256 fsop_sync(vfs_t *vfsp, short flag, cred_t *cr)
255 257 {
256 258 return (*(vfsp)->vfs_op->vfs_sync)(vfsp, flag, cr);
257 259 }
258 260
259 261 int
260 262 fsop_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
261 263 {
262 264 /*
263 265 * In order to handle system attribute fids in a manner
264 266 * transparent to the underlying fs, we embed the fid for
265 267 * the sysattr parent object in the sysattr fid and tack on
266 268 * some extra bytes that only the sysattr layer knows about.
267 269 *
268 270 * This guarantees that sysattr fids are larger than other fids
269 271 * for this vfs. If the vfs supports the sysattr view interface
270 272 * (as indicated by VFSFT_SYSATTR_VIEWS), we cannot have a size
271 273 * collision with XATTR_FIDSZ.
272 274 */
273 275 if (vfs_has_feature(vfsp, VFSFT_SYSATTR_VIEWS) &&
274 276 fidp->fid_len == XATTR_FIDSZ)
275 277 return (xattr_dir_vget(vfsp, vpp, fidp));
276 278
277 279 return (*(vfsp)->vfs_op->vfs_vget)(vfsp, vpp, fidp);
278 280 }
279 281
280 282 int
281 283 fsop_mountroot(vfs_t *vfsp, enum whymountroot reason)
282 284 {
283 285 return (*(vfsp)->vfs_op->vfs_mountroot)(vfsp, reason);
284 286 }
285 287
286 288 void
287 289 fsop_freefs(vfs_t *vfsp)
288 290 {
289 291 (*(vfsp)->vfs_op->vfs_freevfs)(vfsp);
290 292 }
291 293
292 294 int
293 295 fsop_vnstate(vfs_t *vfsp, vnode_t *vp, vntrans_t nstate)
294 296 {
295 297 return ((*(vfsp)->vfs_op->vfs_vnstate)(vfsp, vp, nstate));
296 298 }
297 299
298 300 int
299 301 fsop_sync_by_kind(int fstype, short flag, cred_t *cr)
300 302 {
301 303 ASSERT((fstype >= 0) && (fstype < nfstype));
302 304
303 305 if (ALLOCATED_VFSSW(&vfssw[fstype]) && VFS_INSTALLED(&vfssw[fstype]))
304 306 return (*vfssw[fstype].vsw_vfsops.vfs_sync) (NULL, flag, cr);
305 307 else
306 308 return (ENOTSUP);
307 309 }
308 310
309 311 /*
310 312 * File system initialization. vfs_setfsops() must be called from a file
311 313 * system's init routine.
312 314 */
313 315
314 316 static int
315 317 fs_copyfsops(const fs_operation_def_t *template, vfsops_t *actual,
316 318 int *unused_ops)
317 319 {
318 320 static const fs_operation_trans_def_t vfs_ops_table[] = {
319 321 VFSNAME_MOUNT, offsetof(vfsops_t, vfs_mount),
320 322 fs_nosys, fs_nosys,
321 323
322 324 VFSNAME_UNMOUNT, offsetof(vfsops_t, vfs_unmount),
323 325 fs_nosys, fs_nosys,
324 326
325 327 VFSNAME_ROOT, offsetof(vfsops_t, vfs_root),
326 328 fs_nosys, fs_nosys,
327 329
328 330 VFSNAME_STATVFS, offsetof(vfsops_t, vfs_statvfs),
329 331 fs_nosys, fs_nosys,
330 332
331 333 VFSNAME_SYNC, offsetof(vfsops_t, vfs_sync),
332 334 (fs_generic_func_p) fs_sync,
333 335 (fs_generic_func_p) fs_sync, /* No errors allowed */
334 336
335 337 VFSNAME_VGET, offsetof(vfsops_t, vfs_vget),
336 338 fs_nosys, fs_nosys,
337 339
338 340 VFSNAME_MOUNTROOT, offsetof(vfsops_t, vfs_mountroot),
339 341 fs_nosys, fs_nosys,
340 342
341 343 VFSNAME_FREEVFS, offsetof(vfsops_t, vfs_freevfs),
342 344 (fs_generic_func_p)fs_freevfs,
343 345 (fs_generic_func_p)fs_freevfs, /* Shouldn't fail */
344 346
345 347 VFSNAME_VNSTATE, offsetof(vfsops_t, vfs_vnstate),
346 348 (fs_generic_func_p)fs_nosys,
347 349 (fs_generic_func_p)fs_nosys,
348 350
349 351 NULL, 0, NULL, NULL
350 352 };
351 353
352 354 return (fs_build_vector(actual, unused_ops, vfs_ops_table, template));
353 355 }
354 356
355 357 void
356 358 zfs_boot_init() {
357 359
358 360 if (strcmp(rootfs.bo_fstype, MNTTYPE_ZFS) == 0)
359 361 spa_boot_init();
360 362 }
361 363
362 364 int
363 365 vfs_setfsops(int fstype, const fs_operation_def_t *template, vfsops_t **actual)
364 366 {
365 367 int error;
366 368 int unused_ops;
367 369
368 370 /*
369 371 * Verify that fstype refers to a valid fs. Note that
370 372 * 0 is valid since it's used to set "stray" ops.
371 373 */
372 374 if ((fstype < 0) || (fstype >= nfstype))
373 375 return (EINVAL);
374 376
375 377 if (!ALLOCATED_VFSSW(&vfssw[fstype]))
376 378 return (EINVAL);
377 379
378 380 /* Set up the operations vector. */
379 381
380 382 error = fs_copyfsops(template, &vfssw[fstype].vsw_vfsops, &unused_ops);
381 383
382 384 if (error != 0)
383 385 return (error);
384 386
385 387 vfssw[fstype].vsw_flag |= VSW_INSTALLED;
386 388
387 389 if (actual != NULL)
388 390 *actual = &vfssw[fstype].vsw_vfsops;
389 391
390 392 #if DEBUG
391 393 if (unused_ops != 0)
392 394 cmn_err(CE_WARN, "vfs_setfsops: %s: %d operations supplied "
393 395 "but not used", vfssw[fstype].vsw_name, unused_ops);
394 396 #endif
395 397
396 398 return (0);
397 399 }
398 400
399 401 int
400 402 vfs_makefsops(const fs_operation_def_t *template, vfsops_t **actual)
401 403 {
402 404 int error;
403 405 int unused_ops;
404 406
405 407 *actual = (vfsops_t *)kmem_alloc(sizeof (vfsops_t), KM_SLEEP);
406 408
407 409 error = fs_copyfsops(template, *actual, &unused_ops);
408 410 if (error != 0) {
409 411 kmem_free(*actual, sizeof (vfsops_t));
410 412 *actual = NULL;
411 413 return (error);
412 414 }
413 415
414 416 return (0);
415 417 }
416 418
417 419 /*
418 420 * Free a vfsops structure created as a result of vfs_makefsops().
419 421 * NOTE: For a vfsops structure initialized by vfs_setfsops(), use
420 422 * vfs_freevfsops_by_type().
421 423 */
422 424 void
423 425 vfs_freevfsops(vfsops_t *vfsops)
424 426 {
425 427 kmem_free(vfsops, sizeof (vfsops_t));
426 428 }
427 429
428 430 /*
429 431 * Since the vfsops structure is part of the vfssw table and wasn't
430 432 * really allocated, we're not really freeing anything. We keep
431 433 * the name for consistency with vfs_freevfsops(). We do, however,
432 434 * need to take care of a little bookkeeping.
433 435 * NOTE: For a vfsops structure created by vfs_setfsops(), use
434 436 * vfs_freevfsops_by_type().
435 437 */
436 438 int
437 439 vfs_freevfsops_by_type(int fstype)
438 440 {
439 441
440 442 /* Verify that fstype refers to a loaded fs (and not fsid 0). */
441 443 if ((fstype <= 0) || (fstype >= nfstype))
442 444 return (EINVAL);
443 445
444 446 WLOCK_VFSSW();
445 447 if ((vfssw[fstype].vsw_flag & VSW_INSTALLED) == 0) {
446 448 WUNLOCK_VFSSW();
447 449 return (EINVAL);
448 450 }
449 451
450 452 vfssw[fstype].vsw_flag &= ~VSW_INSTALLED;
451 453 WUNLOCK_VFSSW();
452 454
453 455 return (0);
454 456 }
455 457
456 458 /* Support routines used to reference vfs_op */
457 459
458 460 /* Set the operations vector for a vfs */
459 461 void
460 462 vfs_setops(vfs_t *vfsp, vfsops_t *vfsops)
461 463 {
462 464 vfsops_t *op;
463 465
464 466 ASSERT(vfsp != NULL);
465 467 ASSERT(vfsops != NULL);
466 468
467 469 op = vfsp->vfs_op;
468 470 membar_consumer();
469 471 if (vfsp->vfs_femhead == NULL &&
470 472 casptr(&vfsp->vfs_op, op, vfsops) == op) {
471 473 return;
472 474 }
473 475 fsem_setvfsops(vfsp, vfsops);
474 476 }
475 477
476 478 /* Retrieve the operations vector for a vfs */
477 479 vfsops_t *
478 480 vfs_getops(vfs_t *vfsp)
479 481 {
480 482 vfsops_t *op;
481 483
482 484 ASSERT(vfsp != NULL);
483 485
484 486 op = vfsp->vfs_op;
485 487 membar_consumer();
486 488 if (vfsp->vfs_femhead == NULL && op == vfsp->vfs_op) {
487 489 return (op);
488 490 } else {
489 491 return (fsem_getvfsops(vfsp));
490 492 }
491 493 }
492 494
493 495 /*
494 496 * Returns non-zero (1) if the vfsops matches that of the vfs.
495 497 * Returns zero (0) if not.
496 498 */
497 499 int
498 500 vfs_matchops(vfs_t *vfsp, vfsops_t *vfsops)
499 501 {
500 502 return (vfs_getops(vfsp) == vfsops);
501 503 }
502 504
503 505 /*
504 506 * Returns non-zero (1) if the file system has installed a non-default,
505 507 * non-error vfs_sync routine. Returns zero (0) otherwise.
506 508 */
507 509 int
508 510 vfs_can_sync(vfs_t *vfsp)
509 511 {
510 512 /* vfs_sync() routine is not the default/error function */
511 513 return (vfs_getops(vfsp)->vfs_sync != fs_sync);
512 514 }
513 515
514 516 /*
515 517 * Initialize a vfs structure.
516 518 */
517 519 void
518 520 vfs_init(vfs_t *vfsp, vfsops_t *op, void *data)
519 521 {
520 522 /* Other initialization has been moved to vfs_alloc() */
521 523 vfsp->vfs_count = 0;
522 524 vfsp->vfs_next = vfsp;
523 525 vfsp->vfs_prev = vfsp;
524 526 vfsp->vfs_zone_next = vfsp;
525 527 vfsp->vfs_zone_prev = vfsp;
526 528 vfsp->vfs_lofi_minor = 0;
527 529 sema_init(&vfsp->vfs_reflock, 1, NULL, SEMA_DEFAULT, NULL);
528 530 vfsimpl_setup(vfsp);
529 531 vfsp->vfs_data = (data);
530 532 vfs_setops((vfsp), (op));
531 533 }
532 534
533 535 /*
534 536 * Allocate and initialize the vfs implementation private data
535 537 * structure, vfs_impl_t.
536 538 */
537 539 void
538 540 vfsimpl_setup(vfs_t *vfsp)
539 541 {
540 542 int i;
541 543
542 544 if (vfsp->vfs_implp != NULL) {
543 545 return;
544 546 }
545 547
546 548 vfsp->vfs_implp = kmem_alloc(sizeof (vfs_impl_t), KM_SLEEP);
547 549 /* Note that these are #define'd in vfs.h */
548 550 vfsp->vfs_vskap = NULL;
549 551 vfsp->vfs_fstypevsp = NULL;
550 552
551 553 /* Set size of counted array, then zero the array */
552 554 vfsp->vfs_featureset[0] = VFS_FEATURE_MAXSZ - 1;
553 555 for (i = 1; i < VFS_FEATURE_MAXSZ; i++) {
554 556 vfsp->vfs_featureset[i] = 0;
555 557 }
556 558 }
557 559
558 560 /*
559 561 * Release the vfs_impl_t structure, if it exists. Some unbundled
560 562 * filesystems may not use the newer version of vfs and thus
561 563 * would not contain this implementation private data structure.
562 564 */
563 565 void
564 566 vfsimpl_teardown(vfs_t *vfsp)
565 567 {
566 568 vfs_impl_t *vip = vfsp->vfs_implp;
567 569
568 570 if (vip == NULL)
569 571 return;
570 572
571 573 kmem_free(vfsp->vfs_implp, sizeof (vfs_impl_t));
572 574 vfsp->vfs_implp = NULL;
573 575 }
574 576
575 577 /*
576 578 * VFS system calls: mount, umount, syssync, statfs, fstatfs, statvfs,
577 579 * fstatvfs, and sysfs moved to common/syscall.
578 580 */
579 581
580 582 /*
581 583 * Update every mounted file system. We call the vfs_sync operation of
582 584 * each file system type, passing it a NULL vfsp to indicate that all
583 585 * mounted file systems of that type should be updated.
584 586 */
585 587 void
586 588 vfs_sync(int flag)
587 589 {
588 590 struct vfssw *vswp;
589 591 RLOCK_VFSSW();
590 592 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
591 593 if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) {
592 594 vfs_refvfssw(vswp);
593 595 RUNLOCK_VFSSW();
594 596 (void) (*vswp->vsw_vfsops.vfs_sync)(NULL, flag,
595 597 CRED());
596 598 vfs_unrefvfssw(vswp);
597 599 RLOCK_VFSSW();
598 600 }
599 601 }
600 602 RUNLOCK_VFSSW();
601 603 }
602 604
603 605 void
604 606 sync(void)
605 607 {
606 608 vfs_sync(0);
607 609 }
608 610
609 611 /*
610 612 * External routines.
611 613 */
612 614
613 615 krwlock_t vfssw_lock; /* lock accesses to vfssw */
614 616
615 617 /*
616 618 * Lock for accessing the vfs linked list. Initialized in vfs_mountroot(),
617 619 * but otherwise should be accessed only via vfs_list_lock() and
618 620 * vfs_list_unlock(). Also used to protect the timestamp for mods to the list.
619 621 */
620 622 static krwlock_t vfslist;
621 623
622 624 /*
623 625 * Mount devfs on /devices. This is done right after root is mounted
624 626 * to provide device access support for the system
625 627 */
626 628 static void
627 629 vfs_mountdevices(void)
628 630 {
629 631 struct vfssw *vsw;
630 632 struct vnode *mvp;
631 633 struct mounta mounta = { /* fake mounta for devfs_mount() */
632 634 NULL,
633 635 NULL,
634 636 MS_SYSSPACE,
635 637 NULL,
636 638 NULL,
637 639 0,
638 640 NULL,
639 641 0
640 642 };
641 643
642 644 /*
643 645 * _init devfs module to fill in the vfssw
644 646 */
645 647 if (modload("fs", "devfs") == -1)
646 648 panic("Cannot _init devfs module");
647 649
648 650 /*
649 651 * Hold vfs
650 652 */
651 653 RLOCK_VFSSW();
652 654 vsw = vfs_getvfsswbyname("devfs");
653 655 VFS_INIT(&devices, &vsw->vsw_vfsops, NULL);
654 656 VFS_HOLD(&devices);
655 657
656 658 /*
657 659 * Locate mount point
658 660 */
659 661 if (lookupname("/devices", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp))
660 662 panic("Cannot find /devices");
661 663
662 664 /*
663 665 * Perform the mount of /devices
664 666 */
665 667 if (VFS_MOUNT(&devices, mvp, &mounta, CRED()))
666 668 panic("Cannot mount /devices");
667 669
668 670 RUNLOCK_VFSSW();
669 671
670 672 /*
671 673 * Set appropriate members and add to vfs list for mnttab display
672 674 */
673 675 vfs_setresource(&devices, "/devices", 0);
674 676 vfs_setmntpoint(&devices, "/devices", 0);
675 677
676 678 /*
677 679 * Hold the root of /devices so it won't go away
678 680 */
679 681 if (VFS_ROOT(&devices, &devicesdir))
680 682 panic("vfs_mountdevices: not devices root");
681 683
682 684 if (vfs_lock(&devices) != 0) {
683 685 VN_RELE(devicesdir);
684 686 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /devices");
685 687 return;
686 688 }
687 689
688 690 if (vn_vfswlock(mvp) != 0) {
689 691 vfs_unlock(&devices);
690 692 VN_RELE(devicesdir);
691 693 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /devices");
692 694 return;
693 695 }
694 696
695 697 vfs_add(mvp, &devices, 0);
696 698 vn_vfsunlock(mvp);
697 699 vfs_unlock(&devices);
698 700 VN_RELE(devicesdir);
699 701 }
700 702
701 703 /*
702 704 * mount the first instance of /dev to root and remain mounted
703 705 */
704 706 static void
705 707 vfs_mountdev1(void)
706 708 {
707 709 struct vfssw *vsw;
708 710 struct vnode *mvp;
709 711 struct mounta mounta = { /* fake mounta for sdev_mount() */
710 712 NULL,
711 713 NULL,
712 714 MS_SYSSPACE | MS_OVERLAY,
713 715 NULL,
714 716 NULL,
715 717 0,
716 718 NULL,
717 719 0
718 720 };
719 721
720 722 /*
721 723 * _init dev module to fill in the vfssw
722 724 */
723 725 if (modload("fs", "dev") == -1)
724 726 cmn_err(CE_PANIC, "Cannot _init dev module\n");
725 727
726 728 /*
727 729 * Hold vfs
728 730 */
729 731 RLOCK_VFSSW();
730 732 vsw = vfs_getvfsswbyname("dev");
731 733 VFS_INIT(&dev, &vsw->vsw_vfsops, NULL);
732 734 VFS_HOLD(&dev);
733 735
734 736 /*
735 737 * Locate mount point
736 738 */
737 739 if (lookupname("/dev", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp))
738 740 cmn_err(CE_PANIC, "Cannot find /dev\n");
739 741
740 742 /*
741 743 * Perform the mount of /dev
742 744 */
743 745 if (VFS_MOUNT(&dev, mvp, &mounta, CRED()))
744 746 cmn_err(CE_PANIC, "Cannot mount /dev 1\n");
745 747
746 748 RUNLOCK_VFSSW();
747 749
748 750 /*
749 751 * Set appropriate members and add to vfs list for mnttab display
750 752 */
751 753 vfs_setresource(&dev, "/dev", 0);
752 754 vfs_setmntpoint(&dev, "/dev", 0);
753 755
754 756 /*
755 757 * Hold the root of /dev so it won't go away
756 758 */
757 759 if (VFS_ROOT(&dev, &devdir))
758 760 cmn_err(CE_PANIC, "vfs_mountdev1: not dev root");
759 761
760 762 if (vfs_lock(&dev) != 0) {
761 763 VN_RELE(devdir);
762 764 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /dev");
763 765 return;
764 766 }
765 767
766 768 if (vn_vfswlock(mvp) != 0) {
767 769 vfs_unlock(&dev);
768 770 VN_RELE(devdir);
769 771 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /dev");
770 772 return;
771 773 }
772 774
773 775 vfs_add(mvp, &dev, 0);
774 776 vn_vfsunlock(mvp);
775 777 vfs_unlock(&dev);
776 778 VN_RELE(devdir);
777 779 }
778 780
779 781 /*
780 782 * Mount required filesystem. This is done right after root is mounted.
781 783 */
782 784 static void
783 785 vfs_mountfs(char *module, char *spec, char *path)
784 786 {
785 787 struct vnode *mvp;
786 788 struct mounta mounta;
787 789 vfs_t *vfsp;
788 790
789 791 mounta.flags = MS_SYSSPACE | MS_DATA;
790 792 mounta.fstype = module;
791 793 mounta.spec = spec;
792 794 mounta.dir = path;
793 795 if (lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) {
794 796 cmn_err(CE_WARN, "Cannot find %s", path);
795 797 return;
796 798 }
797 799 if (domount(NULL, &mounta, mvp, CRED(), &vfsp))
798 800 cmn_err(CE_WARN, "Cannot mount %s", path);
799 801 else
800 802 VFS_RELE(vfsp);
801 803 VN_RELE(mvp);
802 804 }
803 805
804 806 /*
805 807 * vfs_mountroot is called by main() to mount the root filesystem.
806 808 */
807 809 void
808 810 vfs_mountroot(void)
809 811 {
810 812 struct vnode *rvp = NULL;
811 813 char *path;
812 814 size_t plen;
813 815 struct vfssw *vswp;
814 816 proc_t *p;
815 817
816 818 rw_init(&vfssw_lock, NULL, RW_DEFAULT, NULL);
817 819 rw_init(&vfslist, NULL, RW_DEFAULT, NULL);
818 820
819 821 /*
820 822 * Alloc the vfs hash bucket array and locks
821 823 */
822 824 rvfs_list = kmem_zalloc(vfshsz * sizeof (rvfs_t), KM_SLEEP);
823 825
824 826 /*
825 827 * Call machine-dependent routine "rootconf" to choose a root
826 828 * file system type.
827 829 */
828 830 if (rootconf())
829 831 panic("vfs_mountroot: cannot mount root");
830 832 /*
831 833 * Get vnode for '/'. Set up rootdir, u.u_rdir and u.u_cdir
832 834 * to point to it. These are used by lookuppn() so that it
833 835 * knows where to start from ('/' or '.').
834 836 */
835 837 vfs_setmntpoint(rootvfs, "/", 0);
836 838 if (VFS_ROOT(rootvfs, &rootdir))
837 839 panic("vfs_mountroot: no root vnode");
838 840
839 841 /*
840 842 * At this point, the process tree consists of p0 and possibly some
841 843 * direct children of p0. (i.e. there are no grandchildren)
842 844 *
843 845 * Walk through them all, setting their current directory.
844 846 */
845 847 mutex_enter(&pidlock);
846 848 for (p = practive; p != NULL; p = p->p_next) {
847 849 ASSERT(p == &p0 || p->p_parent == &p0);
848 850
849 851 PTOU(p)->u_cdir = rootdir;
850 852 VN_HOLD(PTOU(p)->u_cdir);
851 853 PTOU(p)->u_rdir = NULL;
852 854 }
853 855 mutex_exit(&pidlock);
854 856
855 857 /*
856 858 * Setup the global zone's rootvp, now that it exists.
857 859 */
858 860 global_zone->zone_rootvp = rootdir;
859 861 VN_HOLD(global_zone->zone_rootvp);
860 862
861 863 /*
862 864 * Notify the module code that it can begin using the
863 865 * root filesystem instead of the boot program's services.
864 866 */
865 867 modrootloaded = 1;
866 868
867 869 /*
868 870 * Special handling for a ZFS root file system.
869 871 */
870 872 zfs_boot_init();
871 873
872 874 /*
873 875 * Set up mnttab information for root
874 876 */
875 877 vfs_setresource(rootvfs, rootfs.bo_name, 0);
876 878
877 879 /*
878 880 * Notify cluster software that the root filesystem is available.
879 881 */
880 882 clboot_mountroot();
881 883
882 884 /* Now that we're all done with the root FS, set up its vopstats */
883 885 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) != NULL) {
884 886 /* Set flag for statistics collection */
885 887 if (vswp->vsw_flag & VSW_STATS) {
886 888 initialize_vopstats(&rootvfs->vfs_vopstats);
887 889 rootvfs->vfs_flag |= VFS_STATS;
888 890 rootvfs->vfs_fstypevsp =
889 891 get_fstype_vopstats(rootvfs, vswp);
890 892 rootvfs->vfs_vskap = get_vskstat_anchor(rootvfs);
891 893 }
892 894 vfs_unrefvfssw(vswp);
893 895 }
894 896
895 897 /*
896 898 * Mount /devices, /dev instance 1, /system/contract, /etc/mnttab,
897 899 * /etc/svc/volatile, /etc/dfs/sharetab, /system/object, and /proc.
898 900 */
899 901 vfs_mountdevices();
900 902 vfs_mountdev1();
901 903
902 904 vfs_mountfs("ctfs", "ctfs", CTFS_ROOT);
903 905 vfs_mountfs("proc", "/proc", "/proc");
904 906 vfs_mountfs("mntfs", "/etc/mnttab", "/etc/mnttab");
905 907 vfs_mountfs("tmpfs", "/etc/svc/volatile", "/etc/svc/volatile");
906 908 vfs_mountfs("objfs", "objfs", OBJFS_ROOT);
907 909
908 910 if (getzoneid() == GLOBAL_ZONEID) {
909 911 vfs_mountfs("sharefs", "sharefs", "/etc/dfs/sharetab");
910 912 }
911 913
912 914 #ifdef __sparc
913 915 /*
914 916 * This bit of magic can go away when we convert sparc to
915 917 * the new boot architecture based on ramdisk.
916 918 *
917 919 * Booting off a mirrored root volume:
918 920 * At this point, we have booted and mounted root on a
919 921 * single component of the mirror. Complete the boot
920 922 * by configuring SVM and converting the root to the
921 923 * dev_t of the mirrored root device. This dev_t conversion
922 924 * only works because the underlying device doesn't change.
923 925 */
924 926 if (root_is_svm) {
925 927 if (svm_rootconf()) {
926 928 panic("vfs_mountroot: cannot remount root");
927 929 }
928 930
929 931 /*
930 932 * mnttab should reflect the new root device
931 933 */
932 934 vfs_lock_wait(rootvfs);
933 935 vfs_setresource(rootvfs, rootfs.bo_name, 0);
934 936 vfs_unlock(rootvfs);
935 937 }
936 938 #endif /* __sparc */
937 939
938 940 if (strcmp(rootfs.bo_fstype, "zfs") != 0) {
939 941 /*
940 942 * Look up the root device via devfs so that a dv_node is
941 943 * created for it. The vnode is never VN_RELE()ed.
942 944 * We allocate more than MAXPATHLEN so that the
943 945 * buffer passed to i_ddi_prompath_to_devfspath() is
944 946 * exactly MAXPATHLEN (the function expects a buffer
945 947 * of that length).
946 948 */
947 949 plen = strlen("/devices");
948 950 path = kmem_alloc(plen + MAXPATHLEN, KM_SLEEP);
949 951 (void) strcpy(path, "/devices");
950 952
951 953 if (i_ddi_prompath_to_devfspath(rootfs.bo_name, path + plen)
952 954 != DDI_SUCCESS ||
953 955 lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &rvp)) {
954 956
955 957 /* NUL terminate in case "path" has garbage */
956 958 path[plen + MAXPATHLEN - 1] = '\0';
957 959 #ifdef DEBUG
958 960 cmn_err(CE_WARN, "!Cannot lookup root device: %s",
959 961 path);
960 962 #endif
961 963 }
962 964 kmem_free(path, plen + MAXPATHLEN);
963 965 }
964 966
965 967 vfs_mnttabvp_setup();
966 968 }
967 969
968 970 /*
969 971 * Check to see if our "block device" is actually a file. If so,
970 972 * automatically add a lofi device, and keep track of this fact.
971 973 */
972 974 static int
973 975 lofi_add(const char *fsname, struct vfs *vfsp,
974 976 mntopts_t *mntopts, struct mounta *uap)
975 977 {
976 978 int fromspace = (uap->flags & MS_SYSSPACE) ?
977 979 UIO_SYSSPACE : UIO_USERSPACE;
978 980 struct lofi_ioctl *li = NULL;
979 981 struct vnode *vp = NULL;
980 982 struct pathname pn = { NULL };
981 983 ldi_ident_t ldi_id;
982 984 ldi_handle_t ldi_hdl;
983 985 vfssw_t *vfssw;
984 986 int minor;
985 987 int err = 0;
986 988
987 989 if ((vfssw = vfs_getvfssw(fsname)) == NULL)
988 990 return (0);
989 991
990 992 if (!(vfssw->vsw_flag & VSW_CANLOFI)) {
991 993 vfs_unrefvfssw(vfssw);
992 994 return (0);
993 995 }
994 996
995 997 vfs_unrefvfssw(vfssw);
996 998 vfssw = NULL;
997 999
998 1000 if (pn_get(uap->spec, fromspace, &pn) != 0)
999 1001 return (0);
1000 1002
1001 1003 if (lookupname(uap->spec, fromspace, FOLLOW, NULL, &vp) != 0)
1002 1004 goto out;
1003 1005
1004 1006 if (vp->v_type != VREG)
1005 1007 goto out;
1006 1008
1007 1009 /* OK, this is a lofi mount. */
1008 1010
1009 1011 if ((uap->flags & (MS_REMOUNT|MS_GLOBAL)) ||
1010 1012 vfs_optionisset_nolock(mntopts, MNTOPT_SUID, NULL) ||
1011 1013 vfs_optionisset_nolock(mntopts, MNTOPT_SETUID, NULL) ||
1012 1014 vfs_optionisset_nolock(mntopts, MNTOPT_DEVICES, NULL)) {
1013 1015 err = EINVAL;
1014 1016 goto out;
1015 1017 }
1016 1018
1017 1019 ldi_id = ldi_ident_from_anon();
1018 1020 li = kmem_zalloc(sizeof (*li), KM_SLEEP);
1019 1021 (void) strlcpy(li->li_filename, pn.pn_path, MAXPATHLEN);
1020 1022
1021 1023 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred,
1022 1024 &ldi_hdl, ldi_id);
1023 1025
1024 1026 if (err)
1025 1027 goto out2;
1026 1028
1027 1029 err = ldi_ioctl(ldi_hdl, LOFI_MAP_FILE, (intptr_t)li,
1028 1030 FREAD | FWRITE | FKIOCTL, kcred, &minor);
1029 1031
1030 1032 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred);
1031 1033
1032 1034 if (!err)
1033 1035 vfsp->vfs_lofi_minor = minor;
1034 1036
1035 1037 out2:
1036 1038 ldi_ident_release(ldi_id);
1037 1039 out:
1038 1040 if (li != NULL)
1039 1041 kmem_free(li, sizeof (*li));
1040 1042 if (vp != NULL)
1041 1043 VN_RELE(vp);
1042 1044 pn_free(&pn);
1043 1045 return (err);
1044 1046 }
1045 1047
1046 1048 static void
1047 1049 lofi_remove(struct vfs *vfsp)
1048 1050 {
1049 1051 struct lofi_ioctl *li = NULL;
1050 1052 ldi_ident_t ldi_id;
1051 1053 ldi_handle_t ldi_hdl;
1052 1054 int err;
1053 1055
1054 1056 if (vfsp->vfs_lofi_minor == 0)
1055 1057 return;
1056 1058
1057 1059 ldi_id = ldi_ident_from_anon();
1058 1060
1059 1061 li = kmem_zalloc(sizeof (*li), KM_SLEEP);
1060 1062 li->li_minor = vfsp->vfs_lofi_minor;
1061 1063 li->li_cleanup = B_TRUE;
1062 1064
1063 1065 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred,
1064 1066 &ldi_hdl, ldi_id);
1065 1067
1066 1068 if (err)
1067 1069 goto out;
1068 1070
1069 1071 err = ldi_ioctl(ldi_hdl, LOFI_UNMAP_FILE_MINOR, (intptr_t)li,
1070 1072 FREAD | FWRITE | FKIOCTL, kcred, NULL);
1071 1073
1072 1074 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred);
1073 1075
1074 1076 if (!err)
1075 1077 vfsp->vfs_lofi_minor = 0;
1076 1078
1077 1079 out:
1078 1080 ldi_ident_release(ldi_id);
1079 1081 if (li != NULL)
1080 1082 kmem_free(li, sizeof (*li));
1081 1083 }
1082 1084
1083 1085 /*
1084 1086 * Common mount code. Called from the system call entry point, from autofs,
1085 1087 * nfsv4 trigger mounts, and from pxfs.
1086 1088 *
1087 1089 * Takes the effective file system type, mount arguments, the mount point
1088 1090 * vnode, flags specifying whether the mount is a remount and whether it
1089 1091 * should be entered into the vfs list, and credentials. Fills in its vfspp
1090 1092 * parameter with the mounted file system instance's vfs.
1091 1093 *
1092 1094 * Note that the effective file system type is specified as a string. It may
1093 1095 * be null, in which case it's determined from the mount arguments, and may
1094 1096 * differ from the type specified in the mount arguments; this is a hook to
1095 1097 * allow interposition when instantiating file system instances.
1096 1098 *
1097 1099 * The caller is responsible for releasing its own hold on the mount point
1098 1100 * vp (this routine does its own hold when necessary).
1099 1101 * Also note that for remounts, the mount point vp should be the vnode for
1100 1102 * the root of the file system rather than the vnode that the file system
1101 1103 * is mounted on top of.
1102 1104 */
1103 1105 int
1104 1106 domount(char *fsname, struct mounta *uap, vnode_t *vp, struct cred *credp,
1105 1107 struct vfs **vfspp)
1106 1108 {
1107 1109 struct vfssw *vswp;
1108 1110 vfsops_t *vfsops;
1109 1111 struct vfs *vfsp;
1110 1112 struct vnode *bvp;
1111 1113 dev_t bdev = 0;
1112 1114 mntopts_t mnt_mntopts;
1113 1115 int error = 0;
1114 1116 int copyout_error = 0;
1115 1117 int ovflags;
1116 1118 char *opts = uap->optptr;
1117 1119 char *inargs = opts;
1118 1120 int optlen = uap->optlen;
1119 1121 int remount;
1120 1122 int rdonly;
1121 1123 int nbmand = 0;
1122 1124 int delmip = 0;
1123 1125 int addmip = 0;
1124 1126 int splice = ((uap->flags & MS_NOSPLICE) == 0);
1125 1127 int fromspace = (uap->flags & MS_SYSSPACE) ?
1126 1128 UIO_SYSSPACE : UIO_USERSPACE;
1127 1129 char *resource = NULL, *mountpt = NULL;
1128 1130 refstr_t *oldresource, *oldmntpt;
1129 1131 struct pathname pn, rpn;
1130 1132 vsk_anchor_t *vskap;
1131 1133 char fstname[FSTYPSZ];
1132 1134
1133 1135 /*
1134 1136 * The v_flag value for the mount point vp is permanently set
1135 1137 * to VVFSLOCK so that no one bypasses the vn_vfs*locks routine
1136 1138 * for mount point locking.
1137 1139 */
1138 1140 mutex_enter(&vp->v_lock);
1139 1141 vp->v_flag |= VVFSLOCK;
1140 1142 mutex_exit(&vp->v_lock);
1141 1143
1142 1144 mnt_mntopts.mo_count = 0;
1143 1145 /*
1144 1146 * Find the ops vector to use to invoke the file system-specific mount
1145 1147 * method. If the fsname argument is non-NULL, use it directly.
1146 1148 * Otherwise, dig the file system type information out of the mount
1147 1149 * arguments.
1148 1150 *
1149 1151 * A side effect is to hold the vfssw entry.
1150 1152 *
1151 1153 * Mount arguments can be specified in several ways, which are
1152 1154 * distinguished by flag bit settings. The preferred way is to set
1153 1155 * MS_OPTIONSTR, indicating an 8 argument mount with the file system
1154 1156 * type supplied as a character string and the last two arguments
1155 1157 * being a pointer to a character buffer and the size of the buffer.
1156 1158 * On entry, the buffer holds a null terminated list of options; on
1157 1159 * return, the string is the list of options the file system
1158 1160 * recognized. If MS_DATA is set arguments five and six point to a
1159 1161 * block of binary data which the file system interprets.
1160 1162 * A further wrinkle is that some callers don't set MS_FSS and MS_DATA
1161 1163 * consistently with these conventions. To handle them, we check to
1162 1164 * see whether the pointer to the file system name has a numeric value
1163 1165 * less than 256. If so, we treat it as an index.
1164 1166 */
1165 1167 if (fsname != NULL) {
1166 1168 if ((vswp = vfs_getvfssw(fsname)) == NULL) {
1167 1169 return (EINVAL);
1168 1170 }
1169 1171 } else if (uap->flags & (MS_OPTIONSTR | MS_DATA | MS_FSS)) {
1170 1172 size_t n;
1171 1173 uint_t fstype;
1172 1174
1173 1175 fsname = fstname;
1174 1176
1175 1177 if ((fstype = (uintptr_t)uap->fstype) < 256) {
1176 1178 RLOCK_VFSSW();
1177 1179 if (fstype == 0 || fstype >= nfstype ||
1178 1180 !ALLOCATED_VFSSW(&vfssw[fstype])) {
1179 1181 RUNLOCK_VFSSW();
1180 1182 return (EINVAL);
1181 1183 }
1182 1184 (void) strcpy(fsname, vfssw[fstype].vsw_name);
1183 1185 RUNLOCK_VFSSW();
1184 1186 if ((vswp = vfs_getvfssw(fsname)) == NULL)
1185 1187 return (EINVAL);
1186 1188 } else {
1187 1189 /*
1188 1190 * Handle either kernel or user address space.
1189 1191 */
1190 1192 if (uap->flags & MS_SYSSPACE) {
1191 1193 error = copystr(uap->fstype, fsname,
1192 1194 FSTYPSZ, &n);
1193 1195 } else {
1194 1196 error = copyinstr(uap->fstype, fsname,
1195 1197 FSTYPSZ, &n);
1196 1198 }
1197 1199 if (error) {
1198 1200 if (error == ENAMETOOLONG)
1199 1201 return (EINVAL);
1200 1202 return (error);
1201 1203 }
1202 1204 if ((vswp = vfs_getvfssw(fsname)) == NULL)
1203 1205 return (EINVAL);
1204 1206 }
1205 1207 } else {
1206 1208 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) == NULL)
1207 1209 return (EINVAL);
1208 1210 fsname = vswp->vsw_name;
1209 1211 }
1210 1212 if (!VFS_INSTALLED(vswp))
1211 1213 return (EINVAL);
1212 1214
1213 1215 if ((error = secpolicy_fs_allowed_mount(fsname)) != 0) {
1214 1216 vfs_unrefvfssw(vswp);
1215 1217 return (error);
1216 1218 }
1217 1219
1218 1220 vfsops = &vswp->vsw_vfsops;
1219 1221
1220 1222 vfs_copyopttbl(&vswp->vsw_optproto, &mnt_mntopts);
1221 1223 /*
1222 1224 * Fetch mount options and parse them for generic vfs options
1223 1225 */
1224 1226 if (uap->flags & MS_OPTIONSTR) {
1225 1227 /*
1226 1228 * Limit the buffer size
1227 1229 */
1228 1230 if (optlen < 0 || optlen > MAX_MNTOPT_STR) {
1229 1231 error = EINVAL;
1230 1232 goto errout;
1231 1233 }
1232 1234 if ((uap->flags & MS_SYSSPACE) == 0) {
1233 1235 inargs = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP);
1234 1236 inargs[0] = '\0';
1235 1237 if (optlen) {
1236 1238 error = copyinstr(opts, inargs, (size_t)optlen,
1237 1239 NULL);
1238 1240 if (error) {
1239 1241 goto errout;
1240 1242 }
1241 1243 }
1242 1244 }
1243 1245 vfs_parsemntopts(&mnt_mntopts, inargs, 0);
1244 1246 }
1245 1247 /*
1246 1248 * Flag bits override the options string.
1247 1249 */
1248 1250 if (uap->flags & MS_REMOUNT)
1249 1251 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_REMOUNT, NULL, 0, 0);
1250 1252 if (uap->flags & MS_RDONLY)
1251 1253 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_RO, NULL, 0, 0);
1252 1254 if (uap->flags & MS_NOSUID)
1253 1255 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0);
1254 1256
1255 1257 /*
1256 1258 * Check if this is a remount; must be set in the option string and
1257 1259 * the file system must support a remount option.
1258 1260 */
1259 1261 if (remount = vfs_optionisset_nolock(&mnt_mntopts,
1260 1262 MNTOPT_REMOUNT, NULL)) {
1261 1263 if (!(vswp->vsw_flag & VSW_CANREMOUNT)) {
1262 1264 error = ENOTSUP;
1263 1265 goto errout;
1264 1266 }
1265 1267 uap->flags |= MS_REMOUNT;
1266 1268 }
1267 1269
1268 1270 /*
1269 1271 * uap->flags and vfs_optionisset() should agree.
1270 1272 */
1271 1273 if (rdonly = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_RO, NULL)) {
1272 1274 uap->flags |= MS_RDONLY;
1273 1275 }
1274 1276 if (vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL)) {
1275 1277 uap->flags |= MS_NOSUID;
1276 1278 }
1277 1279 nbmand = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NBMAND, NULL);
1278 1280 ASSERT(splice || !remount);
1279 1281 /*
1280 1282 * If we are splicing the fs into the namespace,
1281 1283 * perform mount point checks.
1282 1284 *
1283 1285 * We want to resolve the path for the mount point to eliminate
1284 1286 * '.' and ".." and symlinks in mount points; we can't do the
1285 1287 * same for the resource string, since it would turn
1286 1288 * "/dev/dsk/c0t0d0s0" into "/devices/pci@...". We need to do
1287 1289 * this before grabbing vn_vfswlock(), because otherwise we
1288 1290 * would deadlock with lookuppn().
1289 1291 */
1290 1292 if (splice) {
1291 1293 ASSERT(vp->v_count > 0);
1292 1294
1293 1295 /*
1294 1296 * Pick up mount point and device from appropriate space.
1295 1297 */
1296 1298 if (pn_get(uap->spec, fromspace, &pn) == 0) {
1297 1299 resource = kmem_alloc(pn.pn_pathlen + 1,
1298 1300 KM_SLEEP);
1299 1301 (void) strcpy(resource, pn.pn_path);
1300 1302 pn_free(&pn);
1301 1303 }
1302 1304 /*
1303 1305 * Do a lookupname prior to taking the
1304 1306 * writelock. Mark this as completed if
1305 1307 * successful for later cleanup and addition to
1306 1308 * the mount in progress table.
1307 1309 */
1308 1310 if ((uap->flags & MS_GLOBAL) == 0 &&
1309 1311 lookupname(uap->spec, fromspace,
1310 1312 FOLLOW, NULL, &bvp) == 0) {
1311 1313 addmip = 1;
1312 1314 }
1313 1315
1314 1316 if ((error = pn_get(uap->dir, fromspace, &pn)) == 0) {
1315 1317 pathname_t *pnp;
1316 1318
1317 1319 if (*pn.pn_path != '/') {
1318 1320 error = EINVAL;
1319 1321 pn_free(&pn);
1320 1322 goto errout;
1321 1323 }
1322 1324 pn_alloc(&rpn);
1323 1325 /*
1324 1326 * Kludge to prevent autofs from deadlocking with
1325 1327 * itself when it calls domount().
1326 1328 *
1327 1329 * If autofs is calling, it is because it is doing
1328 1330 * (autofs) mounts in the process of an NFS mount. A
1329 1331 * lookuppn() here would cause us to block waiting for
1330 1332 * said NFS mount to complete, which can't since this
1331 1333 * is the thread that was supposed to doing it.
1332 1334 */
1333 1335 if (fromspace == UIO_USERSPACE) {
1334 1336 if ((error = lookuppn(&pn, &rpn, FOLLOW, NULL,
1335 1337 NULL)) == 0) {
1336 1338 pnp = &rpn;
1337 1339 } else {
1338 1340 /*
1339 1341 * The file disappeared or otherwise
1340 1342 * became inaccessible since we opened
1341 1343 * it; might as well fail the mount
1342 1344 * since the mount point is no longer
1343 1345 * accessible.
1344 1346 */
1345 1347 pn_free(&rpn);
1346 1348 pn_free(&pn);
1347 1349 goto errout;
1348 1350 }
1349 1351 } else {
1350 1352 pnp = &pn;
1351 1353 }
1352 1354 mountpt = kmem_alloc(pnp->pn_pathlen + 1, KM_SLEEP);
1353 1355 (void) strcpy(mountpt, pnp->pn_path);
1354 1356
1355 1357 /*
1356 1358 * If the addition of the zone's rootpath
1357 1359 * would push us over a total path length
1358 1360 * of MAXPATHLEN, we fail the mount with
1359 1361 * ENAMETOOLONG, which is what we would have
1360 1362 * gotten if we were trying to perform the same
1361 1363 * mount in the global zone.
1362 1364 *
1363 1365 * strlen() doesn't count the trailing
1364 1366 * '\0', but zone_rootpathlen counts both a
1365 1367 * trailing '/' and the terminating '\0'.
1366 1368 */
1367 1369 if ((curproc->p_zone->zone_rootpathlen - 1 +
1368 1370 strlen(mountpt)) > MAXPATHLEN ||
1369 1371 (resource != NULL &&
1370 1372 (curproc->p_zone->zone_rootpathlen - 1 +
1371 1373 strlen(resource)) > MAXPATHLEN)) {
1372 1374 error = ENAMETOOLONG;
1373 1375 }
1374 1376
1375 1377 pn_free(&rpn);
1376 1378 pn_free(&pn);
1377 1379 }
1378 1380
1379 1381 if (error)
1380 1382 goto errout;
1381 1383
1382 1384 /*
1383 1385 * Prevent path name resolution from proceeding past
1384 1386 * the mount point.
1385 1387 */
1386 1388 if (vn_vfswlock(vp) != 0) {
1387 1389 error = EBUSY;
1388 1390 goto errout;
1389 1391 }
1390 1392
1391 1393 /*
1392 1394 * Verify that it's legitimate to establish a mount on
1393 1395 * the prospective mount point.
1394 1396 */
1395 1397 if (vn_mountedvfs(vp) != NULL) {
1396 1398 /*
1397 1399 * The mount point lock was obtained after some
1398 1400 * other thread raced through and established a mount.
1399 1401 */
1400 1402 vn_vfsunlock(vp);
1401 1403 error = EBUSY;
1402 1404 goto errout;
1403 1405 }
1404 1406 if (vp->v_flag & VNOMOUNT) {
1405 1407 vn_vfsunlock(vp);
1406 1408 error = EINVAL;
1407 1409 goto errout;
1408 1410 }
1409 1411 }
1410 1412 if ((uap->flags & (MS_DATA | MS_OPTIONSTR)) == 0) {
1411 1413 uap->dataptr = NULL;
1412 1414 uap->datalen = 0;
1413 1415 }
1414 1416
1415 1417 /*
1416 1418 * If this is a remount, we don't want to create a new VFS.
1417 1419 * Instead, we pass the existing one with a remount flag.
1418 1420 */
1419 1421 if (remount) {
1420 1422 /*
1421 1423 * Confirm that the mount point is the root vnode of the
1422 1424 * file system that is being remounted.
1423 1425 * This can happen if the user specifies a different
1424 1426 * mount point directory pathname in the (re)mount command.
1425 1427 *
1426 1428 * Code below can only be reached if splice is true, so it's
1427 1429 * safe to do vn_vfsunlock() here.
1428 1430 */
1429 1431 if ((vp->v_flag & VROOT) == 0) {
1430 1432 vn_vfsunlock(vp);
1431 1433 error = ENOENT;
1432 1434 goto errout;
1433 1435 }
1434 1436 /*
1435 1437 * Disallow making file systems read-only unless file system
1436 1438 * explicitly allows it in its vfssw. Ignore other flags.
1437 1439 */
1438 1440 if (rdonly && vn_is_readonly(vp) == 0 &&
1439 1441 (vswp->vsw_flag & VSW_CANRWRO) == 0) {
1440 1442 vn_vfsunlock(vp);
1441 1443 error = EINVAL;
1442 1444 goto errout;
1443 1445 }
1444 1446 /*
1445 1447 * Disallow changing the NBMAND disposition of the file
1446 1448 * system on remounts.
1447 1449 */
1448 1450 if ((nbmand && ((vp->v_vfsp->vfs_flag & VFS_NBMAND) == 0)) ||
1449 1451 (!nbmand && (vp->v_vfsp->vfs_flag & VFS_NBMAND))) {
1450 1452 vn_vfsunlock(vp);
1451 1453 error = EINVAL;
1452 1454 goto errout;
1453 1455 }
1454 1456 vfsp = vp->v_vfsp;
1455 1457 ovflags = vfsp->vfs_flag;
1456 1458 vfsp->vfs_flag |= VFS_REMOUNT;
1457 1459 vfsp->vfs_flag &= ~VFS_RDONLY;
1458 1460 } else {
1459 1461 vfsp = vfs_alloc(KM_SLEEP);
1460 1462 VFS_INIT(vfsp, vfsops, NULL);
1461 1463 }
1462 1464
1463 1465 VFS_HOLD(vfsp);
1464 1466
1465 1467 if ((error = lofi_add(fsname, vfsp, &mnt_mntopts, uap)) != 0) {
1466 1468 if (!remount) {
1467 1469 if (splice)
1468 1470 vn_vfsunlock(vp);
1469 1471 vfs_free(vfsp);
1470 1472 } else {
1471 1473 vn_vfsunlock(vp);
1472 1474 VFS_RELE(vfsp);
1473 1475 }
1474 1476 goto errout;
1475 1477 }
1476 1478
1477 1479 /*
1478 1480 * PRIV_SYS_MOUNT doesn't mean you can become root.
1479 1481 */
1480 1482 if (vfsp->vfs_lofi_minor != 0) {
1481 1483 uap->flags |= MS_NOSUID;
1482 1484 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0);
1483 1485 }
1484 1486
1485 1487 /*
1486 1488 * The vfs_reflock is not used anymore the code below explicitly
1487 1489 * holds it preventing others accesing it directly.
1488 1490 */
1489 1491 if ((sema_tryp(&vfsp->vfs_reflock) == 0) &&
1490 1492 !(vfsp->vfs_flag & VFS_REMOUNT))
1491 1493 cmn_err(CE_WARN,
1492 1494 "mount type %s couldn't get vfs_reflock", vswp->vsw_name);
1493 1495
1494 1496 /*
1495 1497 * Lock the vfs. If this is a remount we want to avoid spurious umount
1496 1498 * failures that happen as a side-effect of fsflush() and other mount
1497 1499 * and unmount operations that might be going on simultaneously and
1498 1500 * may have locked the vfs currently. To not return EBUSY immediately
1499 1501 * here we use vfs_lock_wait() instead vfs_lock() for the remount case.
1500 1502 */
1501 1503 if (!remount) {
1502 1504 if (error = vfs_lock(vfsp)) {
1503 1505 vfsp->vfs_flag = ovflags;
1504 1506
1505 1507 lofi_remove(vfsp);
1506 1508
1507 1509 if (splice)
1508 1510 vn_vfsunlock(vp);
1509 1511 vfs_free(vfsp);
1510 1512 goto errout;
1511 1513 }
1512 1514 } else {
1513 1515 vfs_lock_wait(vfsp);
1514 1516 }
1515 1517
1516 1518 /*
1517 1519 * Add device to mount in progress table, global mounts require special
1518 1520 * handling. It is possible that we have already done the lookupname
1519 1521 * on a spliced, non-global fs. If so, we don't want to do it again
1520 1522 * since we cannot do a lookupname after taking the
1521 1523 * wlock above. This case is for a non-spliced, non-global filesystem.
1522 1524 */
1523 1525 if (!addmip) {
1524 1526 if ((uap->flags & MS_GLOBAL) == 0 &&
1525 1527 lookupname(uap->spec, fromspace, FOLLOW, NULL, &bvp) == 0) {
1526 1528 addmip = 1;
1527 1529 }
1528 1530 }
1529 1531
1530 1532 if (addmip) {
1531 1533 vnode_t *lvp = NULL;
1532 1534
1533 1535 error = vfs_get_lofi(vfsp, &lvp);
1534 1536 if (error > 0) {
1535 1537 lofi_remove(vfsp);
1536 1538
1537 1539 if (splice)
1538 1540 vn_vfsunlock(vp);
1539 1541 vfs_unlock(vfsp);
1540 1542
1541 1543 if (remount) {
1542 1544 VFS_RELE(vfsp);
1543 1545 } else {
1544 1546 vfs_free(vfsp);
1545 1547 }
1546 1548
1547 1549 goto errout;
1548 1550 } else if (error == -1) {
1549 1551 bdev = bvp->v_rdev;
1550 1552 VN_RELE(bvp);
1551 1553 } else {
1552 1554 bdev = lvp->v_rdev;
1553 1555 VN_RELE(lvp);
1554 1556 VN_RELE(bvp);
1555 1557 }
1556 1558
1557 1559 vfs_addmip(bdev, vfsp);
1558 1560 addmip = 0;
1559 1561 delmip = 1;
1560 1562 }
1561 1563 /*
1562 1564 * Invalidate cached entry for the mount point.
1563 1565 */
1564 1566 if (splice)
1565 1567 dnlc_purge_vp(vp);
1566 1568
1567 1569 /*
1568 1570 * If have an option string but the filesystem doesn't supply a
1569 1571 * prototype options table, create a table with the global
1570 1572 * options and sufficient room to accept all the options in the
1571 1573 * string. Then parse the passed in option string
1572 1574 * accepting all the options in the string. This gives us an
1573 1575 * option table with all the proper cancel properties for the
1574 1576 * global options.
1575 1577 *
1576 1578 * Filesystems that supply a prototype options table are handled
1577 1579 * earlier in this function.
1578 1580 */
1579 1581 if (uap->flags & MS_OPTIONSTR) {
1580 1582 if (!(vswp->vsw_flag & VSW_HASPROTO)) {
1581 1583 mntopts_t tmp_mntopts;
1582 1584
1583 1585 tmp_mntopts.mo_count = 0;
1584 1586 vfs_createopttbl_extend(&tmp_mntopts, inargs,
1585 1587 &mnt_mntopts);
1586 1588 vfs_parsemntopts(&tmp_mntopts, inargs, 1);
1587 1589 vfs_swapopttbl_nolock(&mnt_mntopts, &tmp_mntopts);
1588 1590 vfs_freeopttbl(&tmp_mntopts);
1589 1591 }
1590 1592 }
1591 1593
1592 1594 /*
1593 1595 * Serialize with zone creations.
1594 1596 */
1595 1597 mount_in_progress();
1596 1598 /*
1597 1599 * Instantiate (or reinstantiate) the file system. If appropriate,
1598 1600 * splice it into the file system name space.
1599 1601 *
1600 1602 * We want VFS_MOUNT() to be able to override the vfs_resource
1601 1603 * string if necessary (ie, mntfs), and also for a remount to
1602 1604 * change the same (necessary when remounting '/' during boot).
1603 1605 * So we set up vfs_mntpt and vfs_resource to what we think they
1604 1606 * should be, then hand off control to VFS_MOUNT() which can
1605 1607 * override this.
1606 1608 *
1607 1609 * For safety's sake, when changing vfs_resource or vfs_mntpt of
1608 1610 * a vfs which is on the vfs list (i.e. during a remount), we must
1609 1611 * never set those fields to NULL. Several bits of code make
1610 1612 * assumptions that the fields are always valid.
1611 1613 */
1612 1614 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts);
1613 1615 if (remount) {
1614 1616 if ((oldresource = vfsp->vfs_resource) != NULL)
1615 1617 refstr_hold(oldresource);
1616 1618 if ((oldmntpt = vfsp->vfs_mntpt) != NULL)
1617 1619 refstr_hold(oldmntpt);
1618 1620 }
1619 1621 vfs_setresource(vfsp, resource, 0);
1620 1622 vfs_setmntpoint(vfsp, mountpt, 0);
1621 1623
1622 1624 /*
1623 1625 * going to mount on this vnode, so notify.
1624 1626 */
1625 1627 vnevent_mountedover(vp, NULL);
1626 1628 error = VFS_MOUNT(vfsp, vp, uap, credp);
1627 1629
1628 1630 if (uap->flags & MS_RDONLY)
1629 1631 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
1630 1632 if (uap->flags & MS_NOSUID)
1631 1633 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0);
1632 1634 if (uap->flags & MS_GLOBAL)
1633 1635 vfs_setmntopt(vfsp, MNTOPT_GLOBAL, NULL, 0);
1634 1636
1635 1637 if (error) {
1636 1638 lofi_remove(vfsp);
1637 1639
1638 1640 if (remount) {
1639 1641 /* put back pre-remount options */
1640 1642 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts);
1641 1643 vfs_setmntpoint(vfsp, refstr_value(oldmntpt),
1642 1644 VFSSP_VERBATIM);
1643 1645 if (oldmntpt)
1644 1646 refstr_rele(oldmntpt);
1645 1647 vfs_setresource(vfsp, refstr_value(oldresource),
1646 1648 VFSSP_VERBATIM);
1647 1649 if (oldresource)
1648 1650 refstr_rele(oldresource);
1649 1651 vfsp->vfs_flag = ovflags;
1650 1652 vfs_unlock(vfsp);
1651 1653 VFS_RELE(vfsp);
1652 1654 } else {
1653 1655 vfs_unlock(vfsp);
1654 1656 vfs_freemnttab(vfsp);
1655 1657 vfs_free(vfsp);
1656 1658 }
1657 1659 } else {
1658 1660 /*
1659 1661 * Set the mount time to now
1660 1662 */
1661 1663 vfsp->vfs_mtime = ddi_get_time();
1662 1664 if (remount) {
1663 1665 vfsp->vfs_flag &= ~VFS_REMOUNT;
1664 1666 if (oldresource)
1665 1667 refstr_rele(oldresource);
1666 1668 if (oldmntpt)
1667 1669 refstr_rele(oldmntpt);
1668 1670 } else if (splice) {
1669 1671 /*
1670 1672 * Link vfsp into the name space at the mount
1671 1673 * point. Vfs_add() is responsible for
1672 1674 * holding the mount point which will be
1673 1675 * released when vfs_remove() is called.
1674 1676 */
1675 1677 vfs_add(vp, vfsp, uap->flags);
1676 1678 } else {
1677 1679 /*
1678 1680 * Hold the reference to file system which is
1679 1681 * not linked into the name space.
1680 1682 */
1681 1683 vfsp->vfs_zone = NULL;
1682 1684 VFS_HOLD(vfsp);
1683 1685 vfsp->vfs_vnodecovered = NULL;
1684 1686 }
1685 1687 /*
1686 1688 * Set flags for global options encountered
1687 1689 */
1688 1690 if (vfs_optionisset(vfsp, MNTOPT_RO, NULL))
1689 1691 vfsp->vfs_flag |= VFS_RDONLY;
1690 1692 else
1691 1693 vfsp->vfs_flag &= ~VFS_RDONLY;
1692 1694 if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
1693 1695 vfsp->vfs_flag |= (VFS_NOSETUID|VFS_NODEVICES);
1694 1696 } else {
1695 1697 if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
1696 1698 vfsp->vfs_flag |= VFS_NODEVICES;
1697 1699 else
1698 1700 vfsp->vfs_flag &= ~VFS_NODEVICES;
1699 1701 if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
1700 1702 vfsp->vfs_flag |= VFS_NOSETUID;
1701 1703 else
1702 1704 vfsp->vfs_flag &= ~VFS_NOSETUID;
1703 1705 }
1704 1706 if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL))
1705 1707 vfsp->vfs_flag |= VFS_NBMAND;
1706 1708 else
1707 1709 vfsp->vfs_flag &= ~VFS_NBMAND;
1708 1710
1709 1711 if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL))
1710 1712 vfsp->vfs_flag |= VFS_XATTR;
1711 1713 else
1712 1714 vfsp->vfs_flag &= ~VFS_XATTR;
1713 1715
1714 1716 if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL))
1715 1717 vfsp->vfs_flag |= VFS_NOEXEC;
1716 1718 else
1717 1719 vfsp->vfs_flag &= ~VFS_NOEXEC;
1718 1720
1719 1721 /*
1720 1722 * Now construct the output option string of options
1721 1723 * we recognized.
1722 1724 */
1723 1725 if (uap->flags & MS_OPTIONSTR) {
1724 1726 vfs_list_read_lock();
1725 1727 copyout_error = vfs_buildoptionstr(
1726 1728 &vfsp->vfs_mntopts, inargs, optlen);
1727 1729 vfs_list_unlock();
1728 1730 if (copyout_error == 0 &&
1729 1731 (uap->flags & MS_SYSSPACE) == 0) {
1730 1732 copyout_error = copyoutstr(inargs, opts,
1731 1733 optlen, NULL);
1732 1734 }
1733 1735 }
1734 1736
1735 1737 /*
1736 1738 * If this isn't a remount, set up the vopstats before
1737 1739 * anyone can touch this. We only allow spliced file
1738 1740 * systems (file systems which are in the namespace) to
1739 1741 * have the VFS_STATS flag set.
1740 1742 * NOTE: PxFS mounts the underlying file system with
1741 1743 * MS_NOSPLICE set and copies those vfs_flags to its private
1742 1744 * vfs structure. As a result, PxFS should never have
1743 1745 * the VFS_STATS flag or else we might access the vfs
1744 1746 * statistics-related fields prior to them being
1745 1747 * properly initialized.
1746 1748 */
1747 1749 if (!remount && (vswp->vsw_flag & VSW_STATS) && splice) {
1748 1750 initialize_vopstats(&vfsp->vfs_vopstats);
1749 1751 /*
1750 1752 * We need to set vfs_vskap to NULL because there's
1751 1753 * a chance it won't be set below. This is checked
1752 1754 * in teardown_vopstats() so we can't have garbage.
1753 1755 */
1754 1756 vfsp->vfs_vskap = NULL;
1755 1757 vfsp->vfs_flag |= VFS_STATS;
1756 1758 vfsp->vfs_fstypevsp = get_fstype_vopstats(vfsp, vswp);
1757 1759 }
1758 1760
1759 1761 if (vswp->vsw_flag & VSW_XID)
1760 1762 vfsp->vfs_flag |= VFS_XID;
1761 1763
1762 1764 vfs_unlock(vfsp);
1763 1765 }
1764 1766 mount_completed();
1765 1767 if (splice)
1766 1768 vn_vfsunlock(vp);
1767 1769
1768 1770 if ((error == 0) && (copyout_error == 0)) {
1769 1771 if (!remount) {
1770 1772 /*
1771 1773 * Don't call get_vskstat_anchor() while holding
1772 1774 * locks since it allocates memory and calls
1773 1775 * VFS_STATVFS(). For NFS, the latter can generate
1774 1776 * an over-the-wire call.
1775 1777 */
1776 1778 vskap = get_vskstat_anchor(vfsp);
1777 1779 /* Only take the lock if we have something to do */
↓ open down ↓ |
1542 lines elided |
↑ open up ↑ |
1778 1780 if (vskap != NULL) {
1779 1781 vfs_lock_wait(vfsp);
1780 1782 if (vfsp->vfs_flag & VFS_STATS) {
1781 1783 vfsp->vfs_vskap = vskap;
1782 1784 }
1783 1785 vfs_unlock(vfsp);
1784 1786 }
1785 1787 }
1786 1788 /* Return vfsp to caller. */
1787 1789 *vfspp = vfsp;
1790 + fsh_exec_mount_callbacks(vfsp);
1788 1791 }
1789 1792 errout:
1790 1793 vfs_freeopttbl(&mnt_mntopts);
1791 1794 if (resource != NULL)
1792 1795 kmem_free(resource, strlen(resource) + 1);
1793 1796 if (mountpt != NULL)
1794 1797 kmem_free(mountpt, strlen(mountpt) + 1);
1795 1798 /*
1796 1799 * It is possible we errored prior to adding to mount in progress
1797 1800 * table. Must free vnode we acquired with successful lookupname.
1798 1801 */
1799 1802 if (addmip)
1800 1803 VN_RELE(bvp);
1801 1804 if (delmip)
1802 1805 vfs_delmip(vfsp);
1803 1806 ASSERT(vswp != NULL);
1804 1807 vfs_unrefvfssw(vswp);
1805 1808 if (inargs != opts)
1806 1809 kmem_free(inargs, MAX_MNTOPT_STR);
1807 1810 if (copyout_error) {
1808 1811 lofi_remove(vfsp);
1809 1812 VFS_RELE(vfsp);
1810 1813 error = copyout_error;
1811 1814 }
1812 1815 return (error);
1813 1816 }
1814 1817
1815 1818 static void
1816 1819 vfs_setpath(
1817 1820 struct vfs *vfsp, /* vfs being updated */
1818 1821 refstr_t **refp, /* Ref-count string to contain the new path */
1819 1822 const char *newpath, /* Path to add to refp (above) */
1820 1823 uint32_t flag) /* flag */
1821 1824 {
1822 1825 size_t len;
1823 1826 refstr_t *ref;
1824 1827 zone_t *zone = curproc->p_zone;
1825 1828 char *sp;
1826 1829 int have_list_lock = 0;
1827 1830
1828 1831 ASSERT(!VFS_ON_LIST(vfsp) || vfs_lock_held(vfsp));
1829 1832
1830 1833 /*
1831 1834 * New path must be less than MAXPATHLEN because mntfs
1832 1835 * will only display up to MAXPATHLEN bytes. This is currently
1833 1836 * safe, because domount() uses pn_get(), and other callers
1834 1837 * similarly cap the size to fewer than MAXPATHLEN bytes.
1835 1838 */
1836 1839
1837 1840 ASSERT(strlen(newpath) < MAXPATHLEN);
1838 1841
1839 1842 /* mntfs requires consistency while vfs list lock is held */
1840 1843
1841 1844 if (VFS_ON_LIST(vfsp)) {
1842 1845 have_list_lock = 1;
1843 1846 vfs_list_lock();
1844 1847 }
1845 1848
1846 1849 if (*refp != NULL)
1847 1850 refstr_rele(*refp);
1848 1851
1849 1852 /*
1850 1853 * If we are in a non-global zone then we prefix the supplied path,
1851 1854 * newpath, with the zone's root path, with two exceptions. The first
1852 1855 * is where we have been explicitly directed to avoid doing so; this
1853 1856 * will be the case following a failed remount, where the path supplied
1854 1857 * will be a saved version which must now be restored. The second
1855 1858 * exception is where newpath is not a pathname but a descriptive name,
1856 1859 * e.g. "procfs".
1857 1860 */
1858 1861 if (zone == global_zone || (flag & VFSSP_VERBATIM) || *newpath != '/') {
1859 1862 ref = refstr_alloc(newpath);
1860 1863 goto out;
1861 1864 }
1862 1865
1863 1866 /*
1864 1867 * Truncate the trailing '/' in the zoneroot, and merge
1865 1868 * in the zone's rootpath with the "newpath" (resource
1866 1869 * or mountpoint) passed in.
1867 1870 *
1868 1871 * The size of the required buffer is thus the size of
1869 1872 * the buffer required for the passed-in newpath
1870 1873 * (strlen(newpath) + 1), plus the size of the buffer
1871 1874 * required to hold zone_rootpath (zone_rootpathlen)
1872 1875 * minus one for one of the now-superfluous NUL
1873 1876 * terminations, minus one for the trailing '/'.
1874 1877 *
1875 1878 * That gives us:
1876 1879 *
1877 1880 * (strlen(newpath) + 1) + zone_rootpathlen - 1 - 1
1878 1881 *
1879 1882 * Which is what we have below.
1880 1883 */
1881 1884
1882 1885 len = strlen(newpath) + zone->zone_rootpathlen - 1;
1883 1886 sp = kmem_alloc(len, KM_SLEEP);
1884 1887
1885 1888 /*
1886 1889 * Copy everything including the trailing slash, which
1887 1890 * we then overwrite with the NUL character.
1888 1891 */
1889 1892
1890 1893 (void) strcpy(sp, zone->zone_rootpath);
1891 1894 sp[zone->zone_rootpathlen - 2] = '\0';
1892 1895 (void) strcat(sp, newpath);
1893 1896
1894 1897 ref = refstr_alloc(sp);
1895 1898 kmem_free(sp, len);
1896 1899 out:
1897 1900 *refp = ref;
1898 1901
1899 1902 if (have_list_lock) {
1900 1903 vfs_mnttab_modtimeupd();
1901 1904 vfs_list_unlock();
1902 1905 }
1903 1906 }
1904 1907
1905 1908 /*
1906 1909 * Record a mounted resource name in a vfs structure.
1907 1910 * If vfsp is already mounted, caller must hold the vfs lock.
1908 1911 */
1909 1912 void
1910 1913 vfs_setresource(struct vfs *vfsp, const char *resource, uint32_t flag)
1911 1914 {
1912 1915 if (resource == NULL || resource[0] == '\0')
1913 1916 resource = VFS_NORESOURCE;
1914 1917 vfs_setpath(vfsp, &vfsp->vfs_resource, resource, flag);
1915 1918 }
1916 1919
1917 1920 /*
1918 1921 * Record a mount point name in a vfs structure.
1919 1922 * If vfsp is already mounted, caller must hold the vfs lock.
1920 1923 */
1921 1924 void
1922 1925 vfs_setmntpoint(struct vfs *vfsp, const char *mntpt, uint32_t flag)
1923 1926 {
1924 1927 if (mntpt == NULL || mntpt[0] == '\0')
1925 1928 mntpt = VFS_NOMNTPT;
1926 1929 vfs_setpath(vfsp, &vfsp->vfs_mntpt, mntpt, flag);
1927 1930 }
1928 1931
1929 1932 /* Returns the vfs_resource. Caller must call refstr_rele() when finished. */
1930 1933
1931 1934 refstr_t *
1932 1935 vfs_getresource(const struct vfs *vfsp)
1933 1936 {
1934 1937 refstr_t *resource;
1935 1938
1936 1939 vfs_list_read_lock();
1937 1940 resource = vfsp->vfs_resource;
1938 1941 refstr_hold(resource);
1939 1942 vfs_list_unlock();
1940 1943
1941 1944 return (resource);
1942 1945 }
1943 1946
1944 1947 /* Returns the vfs_mntpt. Caller must call refstr_rele() when finished. */
1945 1948
1946 1949 refstr_t *
1947 1950 vfs_getmntpoint(const struct vfs *vfsp)
1948 1951 {
1949 1952 refstr_t *mntpt;
1950 1953
1951 1954 vfs_list_read_lock();
1952 1955 mntpt = vfsp->vfs_mntpt;
1953 1956 refstr_hold(mntpt);
1954 1957 vfs_list_unlock();
1955 1958
1956 1959 return (mntpt);
1957 1960 }
1958 1961
1959 1962 /*
1960 1963 * Create an empty options table with enough empty slots to hold all
1961 1964 * The options in the options string passed as an argument.
1962 1965 * Potentially prepend another options table.
1963 1966 *
1964 1967 * Note: caller is responsible for locking the vfs list, if needed,
1965 1968 * to protect mops.
1966 1969 */
1967 1970 static void
1968 1971 vfs_createopttbl_extend(mntopts_t *mops, const char *opts,
1969 1972 const mntopts_t *mtmpl)
1970 1973 {
1971 1974 const char *s = opts;
1972 1975 uint_t count;
1973 1976
1974 1977 if (opts == NULL || *opts == '\0') {
1975 1978 count = 0;
1976 1979 } else {
1977 1980 count = 1;
1978 1981
1979 1982 /*
1980 1983 * Count number of options in the string
1981 1984 */
1982 1985 for (s = strchr(s, ','); s != NULL; s = strchr(s, ',')) {
1983 1986 count++;
1984 1987 s++;
1985 1988 }
1986 1989 }
1987 1990 vfs_copyopttbl_extend(mtmpl, mops, count);
1988 1991 }
1989 1992
1990 1993 /*
1991 1994 * Create an empty options table with enough empty slots to hold all
1992 1995 * The options in the options string passed as an argument.
1993 1996 *
1994 1997 * This function is *not* for general use by filesystems.
1995 1998 *
1996 1999 * Note: caller is responsible for locking the vfs list, if needed,
1997 2000 * to protect mops.
1998 2001 */
1999 2002 void
2000 2003 vfs_createopttbl(mntopts_t *mops, const char *opts)
2001 2004 {
2002 2005 vfs_createopttbl_extend(mops, opts, NULL);
2003 2006 }
2004 2007
2005 2008
2006 2009 /*
2007 2010 * Swap two mount options tables
2008 2011 */
2009 2012 static void
2010 2013 vfs_swapopttbl_nolock(mntopts_t *optbl1, mntopts_t *optbl2)
2011 2014 {
2012 2015 uint_t tmpcnt;
2013 2016 mntopt_t *tmplist;
2014 2017
2015 2018 tmpcnt = optbl2->mo_count;
2016 2019 tmplist = optbl2->mo_list;
2017 2020 optbl2->mo_count = optbl1->mo_count;
2018 2021 optbl2->mo_list = optbl1->mo_list;
2019 2022 optbl1->mo_count = tmpcnt;
2020 2023 optbl1->mo_list = tmplist;
2021 2024 }
2022 2025
2023 2026 static void
2024 2027 vfs_swapopttbl(mntopts_t *optbl1, mntopts_t *optbl2)
2025 2028 {
2026 2029 vfs_list_lock();
2027 2030 vfs_swapopttbl_nolock(optbl1, optbl2);
2028 2031 vfs_mnttab_modtimeupd();
2029 2032 vfs_list_unlock();
2030 2033 }
2031 2034
2032 2035 static char **
2033 2036 vfs_copycancelopt_extend(char **const moc, int extend)
2034 2037 {
2035 2038 int i = 0;
2036 2039 int j;
2037 2040 char **result;
2038 2041
2039 2042 if (moc != NULL) {
2040 2043 for (; moc[i] != NULL; i++)
2041 2044 /* count number of options to cancel */;
2042 2045 }
2043 2046
2044 2047 if (i + extend == 0)
2045 2048 return (NULL);
2046 2049
2047 2050 result = kmem_alloc((i + extend + 1) * sizeof (char *), KM_SLEEP);
2048 2051
2049 2052 for (j = 0; j < i; j++) {
2050 2053 result[j] = kmem_alloc(strlen(moc[j]) + 1, KM_SLEEP);
2051 2054 (void) strcpy(result[j], moc[j]);
2052 2055 }
2053 2056 for (; j <= i + extend; j++)
2054 2057 result[j] = NULL;
2055 2058
2056 2059 return (result);
2057 2060 }
2058 2061
2059 2062 static void
2060 2063 vfs_copyopt(const mntopt_t *s, mntopt_t *d)
2061 2064 {
2062 2065 char *sp, *dp;
2063 2066
2064 2067 d->mo_flags = s->mo_flags;
2065 2068 d->mo_data = s->mo_data;
2066 2069 sp = s->mo_name;
2067 2070 if (sp != NULL) {
2068 2071 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP);
2069 2072 (void) strcpy(dp, sp);
2070 2073 d->mo_name = dp;
2071 2074 } else {
2072 2075 d->mo_name = NULL; /* should never happen */
2073 2076 }
2074 2077
2075 2078 d->mo_cancel = vfs_copycancelopt_extend(s->mo_cancel, 0);
2076 2079
2077 2080 sp = s->mo_arg;
2078 2081 if (sp != NULL) {
2079 2082 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP);
2080 2083 (void) strcpy(dp, sp);
2081 2084 d->mo_arg = dp;
2082 2085 } else {
2083 2086 d->mo_arg = NULL;
2084 2087 }
2085 2088 }
2086 2089
2087 2090 /*
2088 2091 * Copy a mount options table, possibly allocating some spare
2089 2092 * slots at the end. It is permissible to copy_extend the NULL table.
2090 2093 */
2091 2094 static void
2092 2095 vfs_copyopttbl_extend(const mntopts_t *smo, mntopts_t *dmo, int extra)
2093 2096 {
2094 2097 uint_t i, count;
2095 2098 mntopt_t *motbl;
2096 2099
2097 2100 /*
2098 2101 * Clear out any existing stuff in the options table being initialized
2099 2102 */
2100 2103 vfs_freeopttbl(dmo);
2101 2104 count = (smo == NULL) ? 0 : smo->mo_count;
2102 2105 if ((count + extra) == 0) /* nothing to do */
2103 2106 return;
2104 2107 dmo->mo_count = count + extra;
2105 2108 motbl = kmem_zalloc((count + extra) * sizeof (mntopt_t), KM_SLEEP);
2106 2109 dmo->mo_list = motbl;
2107 2110 for (i = 0; i < count; i++) {
2108 2111 vfs_copyopt(&smo->mo_list[i], &motbl[i]);
2109 2112 }
2110 2113 for (i = count; i < count + extra; i++) {
2111 2114 motbl[i].mo_flags = MO_EMPTY;
2112 2115 }
2113 2116 }
2114 2117
2115 2118 /*
2116 2119 * Copy a mount options table.
2117 2120 *
2118 2121 * This function is *not* for general use by filesystems.
2119 2122 *
2120 2123 * Note: caller is responsible for locking the vfs list, if needed,
2121 2124 * to protect smo and dmo.
2122 2125 */
2123 2126 void
2124 2127 vfs_copyopttbl(const mntopts_t *smo, mntopts_t *dmo)
2125 2128 {
2126 2129 vfs_copyopttbl_extend(smo, dmo, 0);
2127 2130 }
2128 2131
2129 2132 static char **
2130 2133 vfs_mergecancelopts(const mntopt_t *mop1, const mntopt_t *mop2)
2131 2134 {
2132 2135 int c1 = 0;
2133 2136 int c2 = 0;
2134 2137 char **result;
2135 2138 char **sp1, **sp2, **dp;
2136 2139
2137 2140 /*
2138 2141 * First we count both lists of cancel options.
2139 2142 * If either is NULL or has no elements, we return a copy of
2140 2143 * the other.
2141 2144 */
2142 2145 if (mop1->mo_cancel != NULL) {
2143 2146 for (; mop1->mo_cancel[c1] != NULL; c1++)
2144 2147 /* count cancel options in mop1 */;
2145 2148 }
2146 2149
2147 2150 if (c1 == 0)
2148 2151 return (vfs_copycancelopt_extend(mop2->mo_cancel, 0));
2149 2152
2150 2153 if (mop2->mo_cancel != NULL) {
2151 2154 for (; mop2->mo_cancel[c2] != NULL; c2++)
2152 2155 /* count cancel options in mop2 */;
2153 2156 }
2154 2157
2155 2158 result = vfs_copycancelopt_extend(mop1->mo_cancel, c2);
2156 2159
2157 2160 if (c2 == 0)
2158 2161 return (result);
2159 2162
2160 2163 /*
2161 2164 * When we get here, we've got two sets of cancel options;
2162 2165 * we need to merge the two sets. We know that the result
2163 2166 * array has "c1+c2+1" entries and in the end we might shrink
2164 2167 * it.
2165 2168 * Result now has a copy of the c1 entries from mop1; we'll
2166 2169 * now lookup all the entries of mop2 in mop1 and copy it if
2167 2170 * it is unique.
2168 2171 * This operation is O(n^2) but it's only called once per
2169 2172 * filesystem per duplicate option. This is a situation
2170 2173 * which doesn't arise with the filesystems in ON and
2171 2174 * n is generally 1.
2172 2175 */
2173 2176
2174 2177 dp = &result[c1];
2175 2178 for (sp2 = mop2->mo_cancel; *sp2 != NULL; sp2++) {
2176 2179 for (sp1 = mop1->mo_cancel; *sp1 != NULL; sp1++) {
2177 2180 if (strcmp(*sp1, *sp2) == 0)
2178 2181 break;
2179 2182 }
2180 2183 if (*sp1 == NULL) {
2181 2184 /*
2182 2185 * Option *sp2 not found in mop1, so copy it.
2183 2186 * The calls to vfs_copycancelopt_extend()
2184 2187 * guarantee that there's enough room.
2185 2188 */
2186 2189 *dp = kmem_alloc(strlen(*sp2) + 1, KM_SLEEP);
2187 2190 (void) strcpy(*dp++, *sp2);
2188 2191 }
2189 2192 }
2190 2193 if (dp != &result[c1+c2]) {
2191 2194 size_t bytes = (dp - result + 1) * sizeof (char *);
2192 2195 char **nres = kmem_alloc(bytes, KM_SLEEP);
2193 2196
2194 2197 bcopy(result, nres, bytes);
2195 2198 kmem_free(result, (c1 + c2 + 1) * sizeof (char *));
2196 2199 result = nres;
2197 2200 }
2198 2201 return (result);
2199 2202 }
2200 2203
2201 2204 /*
2202 2205 * Merge two mount option tables (outer and inner) into one. This is very
2203 2206 * similar to "merging" global variables and automatic variables in C.
2204 2207 *
2205 2208 * This isn't (and doesn't have to be) fast.
2206 2209 *
2207 2210 * This function is *not* for general use by filesystems.
2208 2211 *
2209 2212 * Note: caller is responsible for locking the vfs list, if needed,
2210 2213 * to protect omo, imo & dmo.
2211 2214 */
2212 2215 void
2213 2216 vfs_mergeopttbl(const mntopts_t *omo, const mntopts_t *imo, mntopts_t *dmo)
2214 2217 {
2215 2218 uint_t i, count;
2216 2219 mntopt_t *mop, *motbl;
2217 2220 uint_t freeidx;
2218 2221
2219 2222 /*
2220 2223 * First determine how much space we need to allocate.
2221 2224 */
2222 2225 count = omo->mo_count;
2223 2226 for (i = 0; i < imo->mo_count; i++) {
2224 2227 if (imo->mo_list[i].mo_flags & MO_EMPTY)
2225 2228 continue;
2226 2229 if (vfs_hasopt(omo, imo->mo_list[i].mo_name) == NULL)
2227 2230 count++;
2228 2231 }
2229 2232 ASSERT(count >= omo->mo_count &&
2230 2233 count <= omo->mo_count + imo->mo_count);
2231 2234 motbl = kmem_alloc(count * sizeof (mntopt_t), KM_SLEEP);
2232 2235 for (i = 0; i < omo->mo_count; i++)
2233 2236 vfs_copyopt(&omo->mo_list[i], &motbl[i]);
2234 2237 freeidx = omo->mo_count;
2235 2238 for (i = 0; i < imo->mo_count; i++) {
2236 2239 if (imo->mo_list[i].mo_flags & MO_EMPTY)
2237 2240 continue;
2238 2241 if ((mop = vfs_hasopt(omo, imo->mo_list[i].mo_name)) != NULL) {
2239 2242 char **newcanp;
2240 2243 uint_t index = mop - omo->mo_list;
2241 2244
2242 2245 newcanp = vfs_mergecancelopts(mop, &motbl[index]);
2243 2246
2244 2247 vfs_freeopt(&motbl[index]);
2245 2248 vfs_copyopt(&imo->mo_list[i], &motbl[index]);
2246 2249
2247 2250 vfs_freecancelopt(motbl[index].mo_cancel);
2248 2251 motbl[index].mo_cancel = newcanp;
2249 2252 } else {
2250 2253 /*
2251 2254 * If it's a new option, just copy it over to the first
2252 2255 * free location.
2253 2256 */
2254 2257 vfs_copyopt(&imo->mo_list[i], &motbl[freeidx++]);
2255 2258 }
2256 2259 }
2257 2260 dmo->mo_count = count;
2258 2261 dmo->mo_list = motbl;
2259 2262 }
2260 2263
2261 2264 /*
2262 2265 * Functions to set and clear mount options in a mount options table.
2263 2266 */
2264 2267
2265 2268 /*
2266 2269 * Clear a mount option, if it exists.
2267 2270 *
2268 2271 * The update_mnttab arg indicates whether mops is part of a vfs that is on
2269 2272 * the vfs list.
2270 2273 */
2271 2274 static void
2272 2275 vfs_clearmntopt_nolock(mntopts_t *mops, const char *opt, int update_mnttab)
2273 2276 {
2274 2277 struct mntopt *mop;
2275 2278 uint_t i, count;
2276 2279
2277 2280 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist));
2278 2281
2279 2282 count = mops->mo_count;
2280 2283 for (i = 0; i < count; i++) {
2281 2284 mop = &mops->mo_list[i];
2282 2285
2283 2286 if (mop->mo_flags & MO_EMPTY)
2284 2287 continue;
2285 2288 if (strcmp(opt, mop->mo_name))
2286 2289 continue;
2287 2290 mop->mo_flags &= ~MO_SET;
2288 2291 if (mop->mo_arg != NULL) {
2289 2292 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2290 2293 }
2291 2294 mop->mo_arg = NULL;
2292 2295 if (update_mnttab)
2293 2296 vfs_mnttab_modtimeupd();
2294 2297 break;
2295 2298 }
2296 2299 }
2297 2300
2298 2301 void
2299 2302 vfs_clearmntopt(struct vfs *vfsp, const char *opt)
2300 2303 {
2301 2304 int gotlock = 0;
2302 2305
2303 2306 if (VFS_ON_LIST(vfsp)) {
2304 2307 gotlock = 1;
2305 2308 vfs_list_lock();
2306 2309 }
2307 2310 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, opt, gotlock);
2308 2311 if (gotlock)
2309 2312 vfs_list_unlock();
2310 2313 }
2311 2314
2312 2315
2313 2316 /*
2314 2317 * Set a mount option on. If it's not found in the table, it's silently
2315 2318 * ignored. If the option has MO_IGNORE set, it is still set unless the
2316 2319 * VFS_NOFORCEOPT bit is set in the flags. Also, VFS_DISPLAY/VFS_NODISPLAY flag
2317 2320 * bits can be used to toggle the MO_NODISPLAY bit for the option.
2318 2321 * If the VFS_CREATEOPT flag bit is set then the first option slot with
2319 2322 * MO_EMPTY set is created as the option passed in.
2320 2323 *
2321 2324 * The update_mnttab arg indicates whether mops is part of a vfs that is on
2322 2325 * the vfs list.
2323 2326 */
2324 2327 static void
2325 2328 vfs_setmntopt_nolock(mntopts_t *mops, const char *opt,
2326 2329 const char *arg, int flags, int update_mnttab)
2327 2330 {
2328 2331 mntopt_t *mop;
2329 2332 uint_t i, count;
2330 2333 char *sp;
2331 2334
2332 2335 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist));
2333 2336
2334 2337 if (flags & VFS_CREATEOPT) {
2335 2338 if (vfs_hasopt(mops, opt) != NULL) {
2336 2339 flags &= ~VFS_CREATEOPT;
2337 2340 }
2338 2341 }
2339 2342 count = mops->mo_count;
2340 2343 for (i = 0; i < count; i++) {
2341 2344 mop = &mops->mo_list[i];
2342 2345
2343 2346 if (mop->mo_flags & MO_EMPTY) {
2344 2347 if ((flags & VFS_CREATEOPT) == 0)
2345 2348 continue;
2346 2349 sp = kmem_alloc(strlen(opt) + 1, KM_SLEEP);
2347 2350 (void) strcpy(sp, opt);
2348 2351 mop->mo_name = sp;
2349 2352 if (arg != NULL)
2350 2353 mop->mo_flags = MO_HASVALUE;
2351 2354 else
2352 2355 mop->mo_flags = 0;
2353 2356 } else if (strcmp(opt, mop->mo_name)) {
2354 2357 continue;
2355 2358 }
2356 2359 if ((mop->mo_flags & MO_IGNORE) && (flags & VFS_NOFORCEOPT))
2357 2360 break;
2358 2361 if (arg != NULL && (mop->mo_flags & MO_HASVALUE) != 0) {
2359 2362 sp = kmem_alloc(strlen(arg) + 1, KM_SLEEP);
2360 2363 (void) strcpy(sp, arg);
2361 2364 } else {
2362 2365 sp = NULL;
2363 2366 }
2364 2367 if (mop->mo_arg != NULL)
2365 2368 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2366 2369 mop->mo_arg = sp;
2367 2370 if (flags & VFS_DISPLAY)
2368 2371 mop->mo_flags &= ~MO_NODISPLAY;
2369 2372 if (flags & VFS_NODISPLAY)
2370 2373 mop->mo_flags |= MO_NODISPLAY;
2371 2374 mop->mo_flags |= MO_SET;
2372 2375 if (mop->mo_cancel != NULL) {
2373 2376 char **cp;
2374 2377
2375 2378 for (cp = mop->mo_cancel; *cp != NULL; cp++)
2376 2379 vfs_clearmntopt_nolock(mops, *cp, 0);
2377 2380 }
2378 2381 if (update_mnttab)
2379 2382 vfs_mnttab_modtimeupd();
2380 2383 break;
2381 2384 }
2382 2385 }
2383 2386
2384 2387 void
2385 2388 vfs_setmntopt(struct vfs *vfsp, const char *opt, const char *arg, int flags)
2386 2389 {
2387 2390 int gotlock = 0;
2388 2391
2389 2392 if (VFS_ON_LIST(vfsp)) {
2390 2393 gotlock = 1;
2391 2394 vfs_list_lock();
2392 2395 }
2393 2396 vfs_setmntopt_nolock(&vfsp->vfs_mntopts, opt, arg, flags, gotlock);
2394 2397 if (gotlock)
2395 2398 vfs_list_unlock();
2396 2399 }
2397 2400
2398 2401
2399 2402 /*
2400 2403 * Add a "tag" option to a mounted file system's options list.
2401 2404 *
2402 2405 * Note: caller is responsible for locking the vfs list, if needed,
2403 2406 * to protect mops.
2404 2407 */
2405 2408 static mntopt_t *
2406 2409 vfs_addtag(mntopts_t *mops, const char *tag)
2407 2410 {
2408 2411 uint_t count;
2409 2412 mntopt_t *mop, *motbl;
2410 2413
2411 2414 count = mops->mo_count + 1;
2412 2415 motbl = kmem_zalloc(count * sizeof (mntopt_t), KM_SLEEP);
2413 2416 if (mops->mo_count) {
2414 2417 size_t len = (count - 1) * sizeof (mntopt_t);
2415 2418
2416 2419 bcopy(mops->mo_list, motbl, len);
2417 2420 kmem_free(mops->mo_list, len);
2418 2421 }
2419 2422 mops->mo_count = count;
2420 2423 mops->mo_list = motbl;
2421 2424 mop = &motbl[count - 1];
2422 2425 mop->mo_flags = MO_TAG;
2423 2426 mop->mo_name = kmem_alloc(strlen(tag) + 1, KM_SLEEP);
2424 2427 (void) strcpy(mop->mo_name, tag);
2425 2428 return (mop);
2426 2429 }
2427 2430
2428 2431 /*
2429 2432 * Allow users to set arbitrary "tags" in a vfs's mount options.
2430 2433 * Broader use within the kernel is discouraged.
2431 2434 */
2432 2435 int
2433 2436 vfs_settag(uint_t major, uint_t minor, const char *mntpt, const char *tag,
2434 2437 cred_t *cr)
2435 2438 {
2436 2439 vfs_t *vfsp;
2437 2440 mntopts_t *mops;
2438 2441 mntopt_t *mop;
2439 2442 int found = 0;
2440 2443 dev_t dev = makedevice(major, minor);
2441 2444 int err = 0;
2442 2445 char *buf = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP);
2443 2446
2444 2447 /*
2445 2448 * Find the desired mounted file system
2446 2449 */
2447 2450 vfs_list_lock();
2448 2451 vfsp = rootvfs;
2449 2452 do {
2450 2453 if (vfsp->vfs_dev == dev &&
2451 2454 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) {
2452 2455 found = 1;
2453 2456 break;
2454 2457 }
2455 2458 vfsp = vfsp->vfs_next;
2456 2459 } while (vfsp != rootvfs);
2457 2460
2458 2461 if (!found) {
2459 2462 err = EINVAL;
2460 2463 goto out;
2461 2464 }
2462 2465 err = secpolicy_fs_config(cr, vfsp);
2463 2466 if (err != 0)
2464 2467 goto out;
2465 2468
2466 2469 mops = &vfsp->vfs_mntopts;
2467 2470 /*
2468 2471 * Add tag if it doesn't already exist
2469 2472 */
2470 2473 if ((mop = vfs_hasopt(mops, tag)) == NULL) {
2471 2474 int len;
2472 2475
2473 2476 (void) vfs_buildoptionstr(mops, buf, MAX_MNTOPT_STR);
2474 2477 len = strlen(buf);
2475 2478 if (len + strlen(tag) + 2 > MAX_MNTOPT_STR) {
2476 2479 err = ENAMETOOLONG;
2477 2480 goto out;
2478 2481 }
2479 2482 mop = vfs_addtag(mops, tag);
2480 2483 }
2481 2484 if ((mop->mo_flags & MO_TAG) == 0) {
2482 2485 err = EINVAL;
2483 2486 goto out;
2484 2487 }
2485 2488 vfs_setmntopt_nolock(mops, tag, NULL, 0, 1);
2486 2489 out:
2487 2490 vfs_list_unlock();
2488 2491 kmem_free(buf, MAX_MNTOPT_STR);
2489 2492 return (err);
2490 2493 }
2491 2494
2492 2495 /*
2493 2496 * Allow users to remove arbitrary "tags" in a vfs's mount options.
2494 2497 * Broader use within the kernel is discouraged.
2495 2498 */
2496 2499 int
2497 2500 vfs_clrtag(uint_t major, uint_t minor, const char *mntpt, const char *tag,
2498 2501 cred_t *cr)
2499 2502 {
2500 2503 vfs_t *vfsp;
2501 2504 mntopt_t *mop;
2502 2505 int found = 0;
2503 2506 dev_t dev = makedevice(major, minor);
2504 2507 int err = 0;
2505 2508
2506 2509 /*
2507 2510 * Find the desired mounted file system
2508 2511 */
2509 2512 vfs_list_lock();
2510 2513 vfsp = rootvfs;
2511 2514 do {
2512 2515 if (vfsp->vfs_dev == dev &&
2513 2516 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) {
2514 2517 found = 1;
2515 2518 break;
2516 2519 }
2517 2520 vfsp = vfsp->vfs_next;
2518 2521 } while (vfsp != rootvfs);
2519 2522
2520 2523 if (!found) {
2521 2524 err = EINVAL;
2522 2525 goto out;
2523 2526 }
2524 2527 err = secpolicy_fs_config(cr, vfsp);
2525 2528 if (err != 0)
2526 2529 goto out;
2527 2530
2528 2531 if ((mop = vfs_hasopt(&vfsp->vfs_mntopts, tag)) == NULL) {
2529 2532 err = EINVAL;
2530 2533 goto out;
2531 2534 }
2532 2535 if ((mop->mo_flags & MO_TAG) == 0) {
2533 2536 err = EINVAL;
2534 2537 goto out;
2535 2538 }
2536 2539 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, tag, 1);
2537 2540 out:
2538 2541 vfs_list_unlock();
2539 2542 return (err);
2540 2543 }
2541 2544
2542 2545 /*
2543 2546 * Function to parse an option string and fill in a mount options table.
2544 2547 * Unknown options are silently ignored. The input option string is modified
2545 2548 * by replacing separators with nulls. If the create flag is set, options
2546 2549 * not found in the table are just added on the fly. The table must have
2547 2550 * an option slot marked MO_EMPTY to add an option on the fly.
2548 2551 *
2549 2552 * This function is *not* for general use by filesystems.
2550 2553 *
2551 2554 * Note: caller is responsible for locking the vfs list, if needed,
2552 2555 * to protect mops..
2553 2556 */
2554 2557 void
2555 2558 vfs_parsemntopts(mntopts_t *mops, char *osp, int create)
2556 2559 {
2557 2560 char *s = osp, *p, *nextop, *valp, *cp, *ep;
2558 2561 int setflg = VFS_NOFORCEOPT;
2559 2562
2560 2563 if (osp == NULL)
2561 2564 return;
2562 2565 while (*s != '\0') {
2563 2566 p = strchr(s, ','); /* find next option */
2564 2567 if (p == NULL) {
2565 2568 cp = NULL;
2566 2569 p = s + strlen(s);
2567 2570 } else {
2568 2571 cp = p; /* save location of comma */
2569 2572 *p++ = '\0'; /* mark end and point to next option */
2570 2573 }
2571 2574 nextop = p;
2572 2575 p = strchr(s, '='); /* look for value */
2573 2576 if (p == NULL) {
2574 2577 valp = NULL; /* no value supplied */
2575 2578 } else {
2576 2579 ep = p; /* save location of equals */
2577 2580 *p++ = '\0'; /* end option and point to value */
2578 2581 valp = p;
2579 2582 }
2580 2583 /*
2581 2584 * set option into options table
2582 2585 */
2583 2586 if (create)
2584 2587 setflg |= VFS_CREATEOPT;
2585 2588 vfs_setmntopt_nolock(mops, s, valp, setflg, 0);
2586 2589 if (cp != NULL)
2587 2590 *cp = ','; /* restore the comma */
2588 2591 if (valp != NULL)
2589 2592 *ep = '='; /* restore the equals */
2590 2593 s = nextop;
2591 2594 }
2592 2595 }
2593 2596
2594 2597 /*
2595 2598 * Function to inquire if an option exists in a mount options table.
2596 2599 * Returns a pointer to the option if it exists, else NULL.
2597 2600 *
2598 2601 * This function is *not* for general use by filesystems.
2599 2602 *
2600 2603 * Note: caller is responsible for locking the vfs list, if needed,
2601 2604 * to protect mops.
2602 2605 */
2603 2606 struct mntopt *
2604 2607 vfs_hasopt(const mntopts_t *mops, const char *opt)
2605 2608 {
2606 2609 struct mntopt *mop;
2607 2610 uint_t i, count;
2608 2611
2609 2612 count = mops->mo_count;
2610 2613 for (i = 0; i < count; i++) {
2611 2614 mop = &mops->mo_list[i];
2612 2615
2613 2616 if (mop->mo_flags & MO_EMPTY)
2614 2617 continue;
2615 2618 if (strcmp(opt, mop->mo_name) == 0)
2616 2619 return (mop);
2617 2620 }
2618 2621 return (NULL);
2619 2622 }
2620 2623
2621 2624 /*
2622 2625 * Function to inquire if an option is set in a mount options table.
2623 2626 * Returns non-zero if set and fills in the arg pointer with a pointer to
2624 2627 * the argument string or NULL if there is no argument string.
2625 2628 */
2626 2629 static int
2627 2630 vfs_optionisset_nolock(const mntopts_t *mops, const char *opt, char **argp)
2628 2631 {
2629 2632 struct mntopt *mop;
2630 2633 uint_t i, count;
2631 2634
2632 2635 count = mops->mo_count;
2633 2636 for (i = 0; i < count; i++) {
2634 2637 mop = &mops->mo_list[i];
2635 2638
2636 2639 if (mop->mo_flags & MO_EMPTY)
2637 2640 continue;
2638 2641 if (strcmp(opt, mop->mo_name))
2639 2642 continue;
2640 2643 if ((mop->mo_flags & MO_SET) == 0)
2641 2644 return (0);
2642 2645 if (argp != NULL && (mop->mo_flags & MO_HASVALUE) != 0)
2643 2646 *argp = mop->mo_arg;
2644 2647 return (1);
2645 2648 }
2646 2649 return (0);
2647 2650 }
2648 2651
2649 2652
2650 2653 int
2651 2654 vfs_optionisset(const struct vfs *vfsp, const char *opt, char **argp)
2652 2655 {
2653 2656 int ret;
2654 2657
2655 2658 vfs_list_read_lock();
2656 2659 ret = vfs_optionisset_nolock(&vfsp->vfs_mntopts, opt, argp);
2657 2660 vfs_list_unlock();
2658 2661 return (ret);
2659 2662 }
2660 2663
2661 2664
2662 2665 /*
2663 2666 * Construct a comma separated string of the options set in the given
2664 2667 * mount table, return the string in the given buffer. Return non-zero if
2665 2668 * the buffer would overflow.
2666 2669 *
2667 2670 * This function is *not* for general use by filesystems.
2668 2671 *
2669 2672 * Note: caller is responsible for locking the vfs list, if needed,
2670 2673 * to protect mp.
2671 2674 */
2672 2675 int
2673 2676 vfs_buildoptionstr(const mntopts_t *mp, char *buf, int len)
2674 2677 {
2675 2678 char *cp;
2676 2679 uint_t i;
2677 2680
2678 2681 buf[0] = '\0';
2679 2682 cp = buf;
2680 2683 for (i = 0; i < mp->mo_count; i++) {
2681 2684 struct mntopt *mop;
2682 2685
2683 2686 mop = &mp->mo_list[i];
2684 2687 if (mop->mo_flags & MO_SET) {
2685 2688 int optlen, comma = 0;
2686 2689
2687 2690 if (buf[0] != '\0')
2688 2691 comma = 1;
2689 2692 optlen = strlen(mop->mo_name);
2690 2693 if (strlen(buf) + comma + optlen + 1 > len)
2691 2694 goto err;
2692 2695 if (comma)
2693 2696 *cp++ = ',';
2694 2697 (void) strcpy(cp, mop->mo_name);
2695 2698 cp += optlen;
2696 2699 /*
2697 2700 * Append option value if there is one
2698 2701 */
2699 2702 if (mop->mo_arg != NULL) {
2700 2703 int arglen;
2701 2704
2702 2705 arglen = strlen(mop->mo_arg);
2703 2706 if (strlen(buf) + arglen + 2 > len)
2704 2707 goto err;
2705 2708 *cp++ = '=';
2706 2709 (void) strcpy(cp, mop->mo_arg);
2707 2710 cp += arglen;
2708 2711 }
2709 2712 }
2710 2713 }
2711 2714 return (0);
2712 2715 err:
2713 2716 return (EOVERFLOW);
2714 2717 }
2715 2718
2716 2719 static void
2717 2720 vfs_freecancelopt(char **moc)
2718 2721 {
2719 2722 if (moc != NULL) {
2720 2723 int ccnt = 0;
2721 2724 char **cp;
2722 2725
2723 2726 for (cp = moc; *cp != NULL; cp++) {
2724 2727 kmem_free(*cp, strlen(*cp) + 1);
2725 2728 ccnt++;
2726 2729 }
2727 2730 kmem_free(moc, (ccnt + 1) * sizeof (char *));
2728 2731 }
2729 2732 }
2730 2733
2731 2734 static void
2732 2735 vfs_freeopt(mntopt_t *mop)
2733 2736 {
2734 2737 if (mop->mo_name != NULL)
2735 2738 kmem_free(mop->mo_name, strlen(mop->mo_name) + 1);
2736 2739
2737 2740 vfs_freecancelopt(mop->mo_cancel);
2738 2741
2739 2742 if (mop->mo_arg != NULL)
2740 2743 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2741 2744 }
2742 2745
2743 2746 /*
2744 2747 * Free a mount options table
2745 2748 *
2746 2749 * This function is *not* for general use by filesystems.
2747 2750 *
2748 2751 * Note: caller is responsible for locking the vfs list, if needed,
2749 2752 * to protect mp.
2750 2753 */
2751 2754 void
2752 2755 vfs_freeopttbl(mntopts_t *mp)
2753 2756 {
2754 2757 uint_t i, count;
2755 2758
2756 2759 count = mp->mo_count;
2757 2760 for (i = 0; i < count; i++) {
2758 2761 vfs_freeopt(&mp->mo_list[i]);
2759 2762 }
2760 2763 if (count) {
2761 2764 kmem_free(mp->mo_list, sizeof (mntopt_t) * count);
2762 2765 mp->mo_count = 0;
2763 2766 mp->mo_list = NULL;
2764 2767 }
2765 2768 }
2766 2769
2767 2770
2768 2771 /* ARGSUSED */
2769 2772 static int
2770 2773 vfs_mntdummyread(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred,
2771 2774 caller_context_t *ct)
2772 2775 {
2773 2776 return (0);
2774 2777 }
2775 2778
2776 2779 /* ARGSUSED */
2777 2780 static int
2778 2781 vfs_mntdummywrite(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred,
2779 2782 caller_context_t *ct)
2780 2783 {
2781 2784 return (0);
2782 2785 }
2783 2786
2784 2787 /*
2785 2788 * The dummy vnode is currently used only by file events notification
2786 2789 * module which is just interested in the timestamps.
2787 2790 */
2788 2791 /* ARGSUSED */
2789 2792 static int
2790 2793 vfs_mntdummygetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2791 2794 caller_context_t *ct)
2792 2795 {
2793 2796 bzero(vap, sizeof (vattr_t));
2794 2797 vap->va_type = VREG;
2795 2798 vap->va_nlink = 1;
2796 2799 vap->va_ctime = vfs_mnttab_ctime;
2797 2800 /*
2798 2801 * it is ok to just copy mtime as the time will be monotonically
2799 2802 * increasing.
2800 2803 */
2801 2804 vap->va_mtime = vfs_mnttab_mtime;
2802 2805 vap->va_atime = vap->va_mtime;
2803 2806 return (0);
2804 2807 }
2805 2808
2806 2809 static void
2807 2810 vfs_mnttabvp_setup(void)
2808 2811 {
2809 2812 vnode_t *tvp;
2810 2813 vnodeops_t *vfs_mntdummyvnops;
2811 2814 const fs_operation_def_t mnt_dummyvnodeops_template[] = {
2812 2815 VOPNAME_READ, { .vop_read = vfs_mntdummyread },
2813 2816 VOPNAME_WRITE, { .vop_write = vfs_mntdummywrite },
2814 2817 VOPNAME_GETATTR, { .vop_getattr = vfs_mntdummygetattr },
2815 2818 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
2816 2819 NULL, NULL
2817 2820 };
2818 2821
2819 2822 if (vn_make_ops("mnttab", mnt_dummyvnodeops_template,
2820 2823 &vfs_mntdummyvnops) != 0) {
2821 2824 cmn_err(CE_WARN, "vfs_mnttabvp_setup: vn_make_ops failed");
2822 2825 /* Shouldn't happen, but not bad enough to panic */
2823 2826 return;
2824 2827 }
2825 2828
2826 2829 /*
2827 2830 * A global dummy vnode is allocated to represent mntfs files.
2828 2831 * The mntfs file (/etc/mnttab) can be monitored for file events
2829 2832 * and receive an event when mnttab changes. Dummy VOP calls
2830 2833 * will be made on this vnode. The file events notification module
2831 2834 * intercepts this vnode and delivers relevant events.
2832 2835 */
2833 2836 tvp = vn_alloc(KM_SLEEP);
2834 2837 tvp->v_flag = VNOMOUNT|VNOMAP|VNOSWAP|VNOCACHE;
2835 2838 vn_setops(tvp, vfs_mntdummyvnops);
2836 2839 tvp->v_type = VREG;
2837 2840 /*
2838 2841 * The mnt dummy ops do not reference v_data.
2839 2842 * No other module intercepting this vnode should either.
2840 2843 * Just set it to point to itself.
2841 2844 */
2842 2845 tvp->v_data = (caddr_t)tvp;
2843 2846 tvp->v_vfsp = rootvfs;
2844 2847 vfs_mntdummyvp = tvp;
2845 2848 }
2846 2849
2847 2850 /*
2848 2851 * performs fake read/write ops
2849 2852 */
2850 2853 static void
2851 2854 vfs_mnttab_rwop(int rw)
2852 2855 {
2853 2856 struct uio uio;
2854 2857 struct iovec iov;
2855 2858 char buf[1];
2856 2859
2857 2860 if (vfs_mntdummyvp == NULL)
2858 2861 return;
2859 2862
2860 2863 bzero(&uio, sizeof (uio));
2861 2864 bzero(&iov, sizeof (iov));
2862 2865 iov.iov_base = buf;
2863 2866 iov.iov_len = 0;
2864 2867 uio.uio_iov = &iov;
2865 2868 uio.uio_iovcnt = 1;
2866 2869 uio.uio_loffset = 0;
2867 2870 uio.uio_segflg = UIO_SYSSPACE;
2868 2871 uio.uio_resid = 0;
2869 2872 if (rw) {
2870 2873 (void) VOP_WRITE(vfs_mntdummyvp, &uio, 0, kcred, NULL);
2871 2874 } else {
2872 2875 (void) VOP_READ(vfs_mntdummyvp, &uio, 0, kcred, NULL);
2873 2876 }
2874 2877 }
2875 2878
2876 2879 /*
2877 2880 * Generate a write operation.
2878 2881 */
2879 2882 void
2880 2883 vfs_mnttab_writeop(void)
2881 2884 {
2882 2885 vfs_mnttab_rwop(1);
2883 2886 }
2884 2887
2885 2888 /*
2886 2889 * Generate a read operation.
2887 2890 */
2888 2891 void
2889 2892 vfs_mnttab_readop(void)
2890 2893 {
2891 2894 vfs_mnttab_rwop(0);
2892 2895 }
2893 2896
2894 2897 /*
2895 2898 * Free any mnttab information recorded in the vfs struct.
2896 2899 * The vfs must not be on the vfs list.
2897 2900 */
2898 2901 static void
2899 2902 vfs_freemnttab(struct vfs *vfsp)
2900 2903 {
2901 2904 ASSERT(!VFS_ON_LIST(vfsp));
2902 2905
2903 2906 /*
2904 2907 * Free device and mount point information
2905 2908 */
2906 2909 if (vfsp->vfs_mntpt != NULL) {
2907 2910 refstr_rele(vfsp->vfs_mntpt);
2908 2911 vfsp->vfs_mntpt = NULL;
2909 2912 }
2910 2913 if (vfsp->vfs_resource != NULL) {
2911 2914 refstr_rele(vfsp->vfs_resource);
2912 2915 vfsp->vfs_resource = NULL;
2913 2916 }
2914 2917 /*
2915 2918 * Now free mount options information
2916 2919 */
2917 2920 vfs_freeopttbl(&vfsp->vfs_mntopts);
2918 2921 }
2919 2922
2920 2923 /*
2921 2924 * Return the last mnttab modification time
2922 2925 */
2923 2926 void
2924 2927 vfs_mnttab_modtime(timespec_t *ts)
2925 2928 {
2926 2929 ASSERT(RW_LOCK_HELD(&vfslist));
2927 2930 *ts = vfs_mnttab_mtime;
2928 2931 }
2929 2932
2930 2933 /*
2931 2934 * See if mnttab is changed
2932 2935 */
2933 2936 void
2934 2937 vfs_mnttab_poll(timespec_t *old, struct pollhead **phpp)
2935 2938 {
2936 2939 int changed;
2937 2940
2938 2941 *phpp = (struct pollhead *)NULL;
2939 2942
2940 2943 /*
2941 2944 * Note: don't grab vfs list lock before accessing vfs_mnttab_mtime.
2942 2945 * Can lead to deadlock against vfs_mnttab_modtimeupd(). It is safe
2943 2946 * to not grab the vfs list lock because tv_sec is monotonically
2944 2947 * increasing.
2945 2948 */
2946 2949
2947 2950 changed = (old->tv_nsec != vfs_mnttab_mtime.tv_nsec) ||
2948 2951 (old->tv_sec != vfs_mnttab_mtime.tv_sec);
2949 2952 if (!changed) {
2950 2953 *phpp = &vfs_pollhd;
2951 2954 }
2952 2955 }
2953 2956
2954 2957 /* Provide a unique and monotonically-increasing timestamp. */
2955 2958 void
2956 2959 vfs_mono_time(timespec_t *ts)
2957 2960 {
2958 2961 static volatile hrtime_t hrt; /* The saved time. */
2959 2962 hrtime_t newhrt, oldhrt; /* For effecting the CAS. */
2960 2963 timespec_t newts;
2961 2964
2962 2965 /*
2963 2966 * Try gethrestime() first, but be prepared to fabricate a sensible
2964 2967 * answer at the first sign of any trouble.
2965 2968 */
2966 2969 gethrestime(&newts);
2967 2970 newhrt = ts2hrt(&newts);
2968 2971 for (;;) {
2969 2972 oldhrt = hrt;
2970 2973 if (newhrt <= hrt)
2971 2974 newhrt = hrt + 1;
2972 2975 if (cas64((uint64_t *)&hrt, oldhrt, newhrt) == oldhrt)
2973 2976 break;
2974 2977 }
2975 2978 hrt2ts(newhrt, ts);
2976 2979 }
2977 2980
2978 2981 /*
2979 2982 * Update the mnttab modification time and wake up any waiters for
2980 2983 * mnttab changes
2981 2984 */
2982 2985 void
2983 2986 vfs_mnttab_modtimeupd()
2984 2987 {
2985 2988 hrtime_t oldhrt, newhrt;
2986 2989
2987 2990 ASSERT(RW_WRITE_HELD(&vfslist));
2988 2991 oldhrt = ts2hrt(&vfs_mnttab_mtime);
2989 2992 gethrestime(&vfs_mnttab_mtime);
2990 2993 newhrt = ts2hrt(&vfs_mnttab_mtime);
2991 2994 if (oldhrt == (hrtime_t)0)
2992 2995 vfs_mnttab_ctime = vfs_mnttab_mtime;
2993 2996 /*
2994 2997 * Attempt to provide unique mtime (like uniqtime but not).
2995 2998 */
2996 2999 if (newhrt == oldhrt) {
2997 3000 newhrt++;
2998 3001 hrt2ts(newhrt, &vfs_mnttab_mtime);
2999 3002 }
3000 3003 pollwakeup(&vfs_pollhd, (short)POLLRDBAND);
3001 3004 vfs_mnttab_writeop();
3002 3005 }
3003 3006
3004 3007 int
3005 3008 dounmount(struct vfs *vfsp, int flag, cred_t *cr)
3006 3009 {
3007 3010 vnode_t *coveredvp;
3008 3011 int error;
3009 3012 extern void teardown_vopstats(vfs_t *);
3010 3013
3011 3014 /*
3012 3015 * Get covered vnode. This will be NULL if the vfs is not linked
3013 3016 * into the file system name space (i.e., domount() with MNT_NOSPICE).
3014 3017 */
3015 3018 coveredvp = vfsp->vfs_vnodecovered;
3016 3019 ASSERT(coveredvp == NULL || vn_vfswlock_held(coveredvp));
3017 3020
3018 3021 /*
3019 3022 * Purge all dnlc entries for this vfs.
3020 3023 */
3021 3024 (void) dnlc_purge_vfsp(vfsp, 0);
3022 3025
3023 3026 /* For forcible umount, skip VFS_SYNC() since it may hang */
3024 3027 if ((flag & MS_FORCE) == 0)
3025 3028 (void) VFS_SYNC(vfsp, 0, cr);
3026 3029
3027 3030 /*
3028 3031 * Lock the vfs to maintain fs status quo during unmount. This
3029 3032 * has to be done after the sync because ufs_update tries to acquire
3030 3033 * the vfs_reflock.
3031 3034 */
3032 3035 vfs_lock_wait(vfsp);
3033 3036
3034 3037 if (error = VFS_UNMOUNT(vfsp, flag, cr)) {
3035 3038 vfs_unlock(vfsp);
3036 3039 if (coveredvp != NULL)
3037 3040 vn_vfsunlock(coveredvp);
3038 3041 } else if (coveredvp != NULL) {
3039 3042 teardown_vopstats(vfsp);
3040 3043 /*
3041 3044 * vfs_remove() will do a VN_RELE(vfsp->vfs_vnodecovered)
3042 3045 * when it frees vfsp so we do a VN_HOLD() so we can
3043 3046 * continue to use coveredvp afterwards.
3044 3047 */
3045 3048 VN_HOLD(coveredvp);
3046 3049 vfs_remove(vfsp);
3047 3050 vn_vfsunlock(coveredvp);
3048 3051 VN_RELE(coveredvp);
3049 3052 } else {
3050 3053 teardown_vopstats(vfsp);
3051 3054 /*
3052 3055 * Release the reference to vfs that is not linked
3053 3056 * into the name space.
3054 3057 */
3055 3058 vfs_unlock(vfsp);
3056 3059 VFS_RELE(vfsp);
3057 3060 }
3058 3061 return (error);
3059 3062 }
3060 3063
3061 3064
3062 3065 /*
3063 3066 * Vfs_unmountall() is called by uadmin() to unmount all
3064 3067 * mounted file systems (except the root file system) during shutdown.
3065 3068 * It follows the existing locking protocol when traversing the vfs list
3066 3069 * to sync and unmount vfses. Even though there should be no
3067 3070 * other thread running while the system is shutting down, it is prudent
3068 3071 * to still follow the locking protocol.
3069 3072 */
3070 3073 void
3071 3074 vfs_unmountall(void)
3072 3075 {
3073 3076 struct vfs *vfsp;
3074 3077 struct vfs *prev_vfsp = NULL;
3075 3078 int error;
3076 3079
3077 3080 /*
3078 3081 * Toss all dnlc entries now so that the per-vfs sync
3079 3082 * and unmount operations don't have to slog through
3080 3083 * a bunch of uninteresting vnodes over and over again.
3081 3084 */
3082 3085 dnlc_purge();
3083 3086
3084 3087 vfs_list_lock();
3085 3088 for (vfsp = rootvfs->vfs_prev; vfsp != rootvfs; vfsp = prev_vfsp) {
3086 3089 prev_vfsp = vfsp->vfs_prev;
3087 3090
3088 3091 if (vfs_lock(vfsp) != 0)
3089 3092 continue;
3090 3093 error = vn_vfswlock(vfsp->vfs_vnodecovered);
3091 3094 vfs_unlock(vfsp);
3092 3095 if (error)
3093 3096 continue;
3094 3097
3095 3098 vfs_list_unlock();
3096 3099
3097 3100 (void) VFS_SYNC(vfsp, SYNC_CLOSE, CRED());
3098 3101 (void) dounmount(vfsp, 0, CRED());
3099 3102
3100 3103 /*
3101 3104 * Since we dropped the vfslist lock above we must
3102 3105 * verify that next_vfsp still exists, else start over.
3103 3106 */
3104 3107 vfs_list_lock();
3105 3108 for (vfsp = rootvfs->vfs_prev;
3106 3109 vfsp != rootvfs; vfsp = vfsp->vfs_prev)
3107 3110 if (vfsp == prev_vfsp)
3108 3111 break;
3109 3112 if (vfsp == rootvfs && prev_vfsp != rootvfs)
3110 3113 prev_vfsp = rootvfs->vfs_prev;
3111 3114 }
3112 3115 vfs_list_unlock();
3113 3116 }
3114 3117
3115 3118 /*
3116 3119 * Called to add an entry to the end of the vfs mount in progress list
3117 3120 */
3118 3121 void
3119 3122 vfs_addmip(dev_t dev, struct vfs *vfsp)
3120 3123 {
3121 3124 struct ipmnt *mipp;
3122 3125
3123 3126 mipp = (struct ipmnt *)kmem_alloc(sizeof (struct ipmnt), KM_SLEEP);
3124 3127 mipp->mip_next = NULL;
3125 3128 mipp->mip_dev = dev;
3126 3129 mipp->mip_vfsp = vfsp;
3127 3130 mutex_enter(&vfs_miplist_mutex);
3128 3131 if (vfs_miplist_end != NULL)
3129 3132 vfs_miplist_end->mip_next = mipp;
3130 3133 else
3131 3134 vfs_miplist = mipp;
3132 3135 vfs_miplist_end = mipp;
3133 3136 mutex_exit(&vfs_miplist_mutex);
3134 3137 }
3135 3138
3136 3139 /*
3137 3140 * Called to remove an entry from the mount in progress list
3138 3141 * Either because the mount completed or it failed.
3139 3142 */
3140 3143 void
3141 3144 vfs_delmip(struct vfs *vfsp)
3142 3145 {
3143 3146 struct ipmnt *mipp, *mipprev;
3144 3147
3145 3148 mutex_enter(&vfs_miplist_mutex);
3146 3149 mipprev = NULL;
3147 3150 for (mipp = vfs_miplist;
3148 3151 mipp && mipp->mip_vfsp != vfsp; mipp = mipp->mip_next) {
3149 3152 mipprev = mipp;
3150 3153 }
3151 3154 if (mipp == NULL)
3152 3155 return; /* shouldn't happen */
3153 3156 if (mipp == vfs_miplist_end)
3154 3157 vfs_miplist_end = mipprev;
3155 3158 if (mipprev == NULL)
3156 3159 vfs_miplist = mipp->mip_next;
3157 3160 else
3158 3161 mipprev->mip_next = mipp->mip_next;
3159 3162 mutex_exit(&vfs_miplist_mutex);
3160 3163 kmem_free(mipp, sizeof (struct ipmnt));
3161 3164 }
3162 3165
3163 3166 /*
3164 3167 * vfs_add is called by a specific filesystem's mount routine to add
3165 3168 * the new vfs into the vfs list/hash and to cover the mounted-on vnode.
3166 3169 * The vfs should already have been locked by the caller.
3167 3170 *
3168 3171 * coveredvp is NULL if this is the root.
3169 3172 */
3170 3173 void
3171 3174 vfs_add(vnode_t *coveredvp, struct vfs *vfsp, int mflag)
3172 3175 {
3173 3176 int newflag;
3174 3177
3175 3178 ASSERT(vfs_lock_held(vfsp));
3176 3179 VFS_HOLD(vfsp);
3177 3180 newflag = vfsp->vfs_flag;
3178 3181 if (mflag & MS_RDONLY)
3179 3182 newflag |= VFS_RDONLY;
3180 3183 else
3181 3184 newflag &= ~VFS_RDONLY;
3182 3185 if (mflag & MS_NOSUID)
3183 3186 newflag |= (VFS_NOSETUID|VFS_NODEVICES);
3184 3187 else
3185 3188 newflag &= ~(VFS_NOSETUID|VFS_NODEVICES);
3186 3189 if (mflag & MS_NOMNTTAB)
3187 3190 newflag |= VFS_NOMNTTAB;
3188 3191 else
3189 3192 newflag &= ~VFS_NOMNTTAB;
3190 3193
3191 3194 if (coveredvp != NULL) {
3192 3195 ASSERT(vn_vfswlock_held(coveredvp));
3193 3196 coveredvp->v_vfsmountedhere = vfsp;
3194 3197 VN_HOLD(coveredvp);
3195 3198 }
3196 3199 vfsp->vfs_vnodecovered = coveredvp;
3197 3200 vfsp->vfs_flag = newflag;
3198 3201
3199 3202 vfs_list_add(vfsp);
3200 3203 }
3201 3204
3202 3205 /*
3203 3206 * Remove a vfs from the vfs list, null out the pointer from the
3204 3207 * covered vnode to the vfs (v_vfsmountedhere), and null out the pointer
3205 3208 * from the vfs to the covered vnode (vfs_vnodecovered). Release the
3206 3209 * reference to the vfs and to the covered vnode.
3207 3210 *
3208 3211 * Called from dounmount after it's confirmed with the file system
3209 3212 * that the unmount is legal.
3210 3213 */
3211 3214 void
3212 3215 vfs_remove(struct vfs *vfsp)
3213 3216 {
3214 3217 vnode_t *vp;
3215 3218
3216 3219 ASSERT(vfs_lock_held(vfsp));
3217 3220
3218 3221 /*
3219 3222 * Can't unmount root. Should never happen because fs will
3220 3223 * be busy.
3221 3224 */
3222 3225 if (vfsp == rootvfs)
3223 3226 panic("vfs_remove: unmounting root");
3224 3227
3225 3228 vfs_list_remove(vfsp);
3226 3229
3227 3230 /*
3228 3231 * Unhook from the file system name space.
3229 3232 */
3230 3233 vp = vfsp->vfs_vnodecovered;
3231 3234 ASSERT(vn_vfswlock_held(vp));
3232 3235 vp->v_vfsmountedhere = NULL;
3233 3236 vfsp->vfs_vnodecovered = NULL;
3234 3237 VN_RELE(vp);
3235 3238
3236 3239 /*
3237 3240 * Release lock and wakeup anybody waiting.
3238 3241 */
3239 3242 vfs_unlock(vfsp);
3240 3243 VFS_RELE(vfsp);
3241 3244 }
3242 3245
3243 3246 /*
3244 3247 * Lock a filesystem to prevent access to it while mounting,
3245 3248 * unmounting and syncing. Return EBUSY immediately if lock
3246 3249 * can't be acquired.
3247 3250 */
3248 3251 int
3249 3252 vfs_lock(vfs_t *vfsp)
3250 3253 {
3251 3254 vn_vfslocks_entry_t *vpvfsentry;
3252 3255
3253 3256 vpvfsentry = vn_vfslocks_getlock(vfsp);
3254 3257 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_WRITER))
3255 3258 return (0);
3256 3259
3257 3260 vn_vfslocks_rele(vpvfsentry);
3258 3261 return (EBUSY);
3259 3262 }
3260 3263
3261 3264 int
3262 3265 vfs_rlock(vfs_t *vfsp)
3263 3266 {
3264 3267 vn_vfslocks_entry_t *vpvfsentry;
3265 3268
3266 3269 vpvfsentry = vn_vfslocks_getlock(vfsp);
3267 3270
3268 3271 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_READER))
3269 3272 return (0);
3270 3273
3271 3274 vn_vfslocks_rele(vpvfsentry);
3272 3275 return (EBUSY);
3273 3276 }
3274 3277
3275 3278 void
3276 3279 vfs_lock_wait(vfs_t *vfsp)
3277 3280 {
3278 3281 vn_vfslocks_entry_t *vpvfsentry;
3279 3282
3280 3283 vpvfsentry = vn_vfslocks_getlock(vfsp);
3281 3284 rwst_enter(&vpvfsentry->ve_lock, RW_WRITER);
3282 3285 }
3283 3286
3284 3287 void
3285 3288 vfs_rlock_wait(vfs_t *vfsp)
3286 3289 {
3287 3290 vn_vfslocks_entry_t *vpvfsentry;
3288 3291
3289 3292 vpvfsentry = vn_vfslocks_getlock(vfsp);
3290 3293 rwst_enter(&vpvfsentry->ve_lock, RW_READER);
3291 3294 }
3292 3295
3293 3296 /*
3294 3297 * Unlock a locked filesystem.
3295 3298 */
3296 3299 void
3297 3300 vfs_unlock(vfs_t *vfsp)
3298 3301 {
3299 3302 vn_vfslocks_entry_t *vpvfsentry;
3300 3303
3301 3304 /*
3302 3305 * vfs_unlock will mimic sema_v behaviour to fix 4748018.
3303 3306 * And these changes should remain for the patch changes as it is.
3304 3307 */
3305 3308 if (panicstr)
3306 3309 return;
3307 3310
3308 3311 /*
3309 3312 * ve_refcount needs to be dropped twice here.
3310 3313 * 1. To release refernce after a call to vfs_locks_getlock()
3311 3314 * 2. To release the reference from the locking routines like
3312 3315 * vfs_rlock_wait/vfs_wlock_wait/vfs_wlock etc,.
3313 3316 */
3314 3317
3315 3318 vpvfsentry = vn_vfslocks_getlock(vfsp);
3316 3319 vn_vfslocks_rele(vpvfsentry);
3317 3320
3318 3321 rwst_exit(&vpvfsentry->ve_lock);
3319 3322 vn_vfslocks_rele(vpvfsentry);
3320 3323 }
3321 3324
3322 3325 /*
3323 3326 * Utility routine that allows a filesystem to construct its
3324 3327 * fsid in "the usual way" - by munging some underlying dev_t and
3325 3328 * the filesystem type number into the 64-bit fsid. Note that
3326 3329 * this implicitly relies on dev_t persistence to make filesystem
3327 3330 * id's persistent.
3328 3331 *
3329 3332 * There's nothing to prevent an individual fs from constructing its
3330 3333 * fsid in a different way, and indeed they should.
3331 3334 *
3332 3335 * Since we want fsids to be 32-bit quantities (so that they can be
3333 3336 * exported identically by either 32-bit or 64-bit APIs, as well as
3334 3337 * the fact that fsid's are "known" to NFS), we compress the device
3335 3338 * number given down to 32-bits, and panic if that isn't possible.
3336 3339 */
3337 3340 void
3338 3341 vfs_make_fsid(fsid_t *fsi, dev_t dev, int val)
3339 3342 {
3340 3343 if (!cmpldev((dev32_t *)&fsi->val[0], dev))
3341 3344 panic("device number too big for fsid!");
3342 3345 fsi->val[1] = val;
3343 3346 }
3344 3347
3345 3348 int
3346 3349 vfs_lock_held(vfs_t *vfsp)
3347 3350 {
3348 3351 int held;
3349 3352 vn_vfslocks_entry_t *vpvfsentry;
3350 3353
3351 3354 /*
3352 3355 * vfs_lock_held will mimic sema_held behaviour
3353 3356 * if panicstr is set. And these changes should remain
3354 3357 * for the patch changes as it is.
3355 3358 */
3356 3359 if (panicstr)
3357 3360 return (1);
3358 3361
3359 3362 vpvfsentry = vn_vfslocks_getlock(vfsp);
3360 3363 held = rwst_lock_held(&vpvfsentry->ve_lock, RW_WRITER);
3361 3364
3362 3365 vn_vfslocks_rele(vpvfsentry);
3363 3366 return (held);
3364 3367 }
3365 3368
3366 3369 struct _kthread *
3367 3370 vfs_lock_owner(vfs_t *vfsp)
3368 3371 {
3369 3372 struct _kthread *owner;
3370 3373 vn_vfslocks_entry_t *vpvfsentry;
3371 3374
3372 3375 /*
3373 3376 * vfs_wlock_held will mimic sema_held behaviour
3374 3377 * if panicstr is set. And these changes should remain
3375 3378 * for the patch changes as it is.
3376 3379 */
3377 3380 if (panicstr)
3378 3381 return (NULL);
3379 3382
3380 3383 vpvfsentry = vn_vfslocks_getlock(vfsp);
3381 3384 owner = rwst_owner(&vpvfsentry->ve_lock);
3382 3385
3383 3386 vn_vfslocks_rele(vpvfsentry);
3384 3387 return (owner);
3385 3388 }
3386 3389
3387 3390 /*
3388 3391 * vfs list locking.
3389 3392 *
3390 3393 * Rather than manipulate the vfslist lock directly, we abstract into lock
3391 3394 * and unlock routines to allow the locking implementation to be changed for
3392 3395 * clustering.
3393 3396 *
3394 3397 * Whenever the vfs list is modified through its hash links, the overall list
3395 3398 * lock must be obtained before locking the relevant hash bucket. But to see
3396 3399 * whether a given vfs is on the list, it suffices to obtain the lock for the
3397 3400 * hash bucket without getting the overall list lock. (See getvfs() below.)
3398 3401 */
3399 3402
3400 3403 void
3401 3404 vfs_list_lock()
3402 3405 {
3403 3406 rw_enter(&vfslist, RW_WRITER);
3404 3407 }
3405 3408
3406 3409 void
3407 3410 vfs_list_read_lock()
3408 3411 {
3409 3412 rw_enter(&vfslist, RW_READER);
3410 3413 }
3411 3414
3412 3415 void
3413 3416 vfs_list_unlock()
3414 3417 {
3415 3418 rw_exit(&vfslist);
3416 3419 }
3417 3420
3418 3421 /*
3419 3422 * Low level worker routines for adding entries to and removing entries from
3420 3423 * the vfs list.
3421 3424 */
3422 3425
3423 3426 static void
3424 3427 vfs_hash_add(struct vfs *vfsp, int insert_at_head)
3425 3428 {
3426 3429 int vhno;
3427 3430 struct vfs **hp;
3428 3431 dev_t dev;
3429 3432
3430 3433 ASSERT(RW_WRITE_HELD(&vfslist));
3431 3434
3432 3435 dev = expldev(vfsp->vfs_fsid.val[0]);
3433 3436 vhno = VFSHASH(getmajor(dev), getminor(dev));
3434 3437
3435 3438 mutex_enter(&rvfs_list[vhno].rvfs_lock);
3436 3439
3437 3440 /*
3438 3441 * Link into the hash table, inserting it at the end, so that LOFS
3439 3442 * with the same fsid as UFS (or other) file systems will not hide the
3440 3443 * UFS.
3441 3444 */
3442 3445 if (insert_at_head) {
3443 3446 vfsp->vfs_hash = rvfs_list[vhno].rvfs_head;
3444 3447 rvfs_list[vhno].rvfs_head = vfsp;
3445 3448 } else {
3446 3449 for (hp = &rvfs_list[vhno].rvfs_head; *hp != NULL;
3447 3450 hp = &(*hp)->vfs_hash)
3448 3451 continue;
3449 3452 /*
3450 3453 * hp now contains the address of the pointer to update
3451 3454 * to effect the insertion.
3452 3455 */
3453 3456 vfsp->vfs_hash = NULL;
3454 3457 *hp = vfsp;
3455 3458 }
3456 3459
3457 3460 rvfs_list[vhno].rvfs_len++;
3458 3461 mutex_exit(&rvfs_list[vhno].rvfs_lock);
3459 3462 }
3460 3463
3461 3464
3462 3465 static void
3463 3466 vfs_hash_remove(struct vfs *vfsp)
3464 3467 {
3465 3468 int vhno;
3466 3469 struct vfs *tvfsp;
3467 3470 dev_t dev;
3468 3471
3469 3472 ASSERT(RW_WRITE_HELD(&vfslist));
3470 3473
3471 3474 dev = expldev(vfsp->vfs_fsid.val[0]);
3472 3475 vhno = VFSHASH(getmajor(dev), getminor(dev));
3473 3476
3474 3477 mutex_enter(&rvfs_list[vhno].rvfs_lock);
3475 3478
3476 3479 /*
3477 3480 * Remove from hash.
3478 3481 */
3479 3482 if (rvfs_list[vhno].rvfs_head == vfsp) {
3480 3483 rvfs_list[vhno].rvfs_head = vfsp->vfs_hash;
3481 3484 rvfs_list[vhno].rvfs_len--;
3482 3485 goto foundit;
3483 3486 }
3484 3487 for (tvfsp = rvfs_list[vhno].rvfs_head; tvfsp != NULL;
3485 3488 tvfsp = tvfsp->vfs_hash) {
3486 3489 if (tvfsp->vfs_hash == vfsp) {
3487 3490 tvfsp->vfs_hash = vfsp->vfs_hash;
3488 3491 rvfs_list[vhno].rvfs_len--;
3489 3492 goto foundit;
3490 3493 }
3491 3494 }
3492 3495 cmn_err(CE_WARN, "vfs_list_remove: vfs not found in hash");
3493 3496
3494 3497 foundit:
3495 3498
3496 3499 mutex_exit(&rvfs_list[vhno].rvfs_lock);
3497 3500 }
3498 3501
3499 3502
3500 3503 void
3501 3504 vfs_list_add(struct vfs *vfsp)
3502 3505 {
3503 3506 zone_t *zone;
3504 3507
3505 3508 /*
3506 3509 * Typically, the vfs_t will have been created on behalf of the file
3507 3510 * system in vfs_init, where it will have been provided with a
3508 3511 * vfs_impl_t. This, however, might be lacking if the vfs_t was created
3509 3512 * by an unbundled file system. We therefore check for such an example
3510 3513 * before stamping the vfs_t with its creation time for the benefit of
3511 3514 * mntfs.
3512 3515 */
3513 3516 if (vfsp->vfs_implp == NULL)
3514 3517 vfsimpl_setup(vfsp);
3515 3518 vfs_mono_time(&vfsp->vfs_hrctime);
3516 3519
3517 3520 /*
3518 3521 * The zone that owns the mount is the one that performed the mount.
3519 3522 * Note that this isn't necessarily the same as the zone mounted into.
3520 3523 * The corresponding zone_rele_ref() will be done when the vfs_t
3521 3524 * is being free'd.
3522 3525 */
3523 3526 vfsp->vfs_zone = curproc->p_zone;
3524 3527 zone_init_ref(&vfsp->vfs_implp->vi_zone_ref);
3525 3528 zone_hold_ref(vfsp->vfs_zone, &vfsp->vfs_implp->vi_zone_ref,
3526 3529 ZONE_REF_VFS);
3527 3530
3528 3531 /*
3529 3532 * Find the zone mounted into, and put this mount on its vfs list.
3530 3533 */
3531 3534 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
3532 3535 ASSERT(zone != NULL);
3533 3536 /*
3534 3537 * Special casing for the root vfs. This structure is allocated
3535 3538 * statically and hooked onto rootvfs at link time. During the
3536 3539 * vfs_mountroot call at system startup time, the root file system's
3537 3540 * VFS_MOUNTROOT routine will call vfs_add with this root vfs struct
3538 3541 * as argument. The code below must detect and handle this special
3539 3542 * case. The only apparent justification for this special casing is
3540 3543 * to ensure that the root file system appears at the head of the
3541 3544 * list.
3542 3545 *
3543 3546 * XXX: I'm assuming that it's ok to do normal list locking when
3544 3547 * adding the entry for the root file system (this used to be
3545 3548 * done with no locks held).
3546 3549 */
3547 3550 vfs_list_lock();
3548 3551 /*
3549 3552 * Link into the vfs list proper.
3550 3553 */
3551 3554 if (vfsp == &root) {
3552 3555 /*
3553 3556 * Assert: This vfs is already on the list as its first entry.
3554 3557 * Thus, there's nothing to do.
3555 3558 */
3556 3559 ASSERT(rootvfs == vfsp);
3557 3560 /*
3558 3561 * Add it to the head of the global zone's vfslist.
3559 3562 */
3560 3563 ASSERT(zone == global_zone);
3561 3564 ASSERT(zone->zone_vfslist == NULL);
3562 3565 zone->zone_vfslist = vfsp;
3563 3566 } else {
3564 3567 /*
3565 3568 * Link to end of list using vfs_prev (as rootvfs is now a
3566 3569 * doubly linked circular list) so list is in mount order for
3567 3570 * mnttab use.
3568 3571 */
3569 3572 rootvfs->vfs_prev->vfs_next = vfsp;
3570 3573 vfsp->vfs_prev = rootvfs->vfs_prev;
3571 3574 rootvfs->vfs_prev = vfsp;
3572 3575 vfsp->vfs_next = rootvfs;
3573 3576
3574 3577 /*
3575 3578 * Do it again for the zone-private list (which may be NULL).
3576 3579 */
3577 3580 if (zone->zone_vfslist == NULL) {
3578 3581 ASSERT(zone != global_zone);
3579 3582 zone->zone_vfslist = vfsp;
3580 3583 } else {
3581 3584 zone->zone_vfslist->vfs_zone_prev->vfs_zone_next = vfsp;
3582 3585 vfsp->vfs_zone_prev = zone->zone_vfslist->vfs_zone_prev;
3583 3586 zone->zone_vfslist->vfs_zone_prev = vfsp;
3584 3587 vfsp->vfs_zone_next = zone->zone_vfslist;
3585 3588 }
3586 3589 }
3587 3590
3588 3591 /*
3589 3592 * Link into the hash table, inserting it at the end, so that LOFS
3590 3593 * with the same fsid as UFS (or other) file systems will not hide
3591 3594 * the UFS.
3592 3595 */
3593 3596 vfs_hash_add(vfsp, 0);
3594 3597
3595 3598 /*
3596 3599 * update the mnttab modification time
3597 3600 */
3598 3601 vfs_mnttab_modtimeupd();
3599 3602 vfs_list_unlock();
3600 3603 zone_rele(zone);
3601 3604 }
3602 3605
3603 3606 void
3604 3607 vfs_list_remove(struct vfs *vfsp)
3605 3608 {
3606 3609 zone_t *zone;
3607 3610
3608 3611 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
3609 3612 ASSERT(zone != NULL);
3610 3613 /*
3611 3614 * Callers are responsible for preventing attempts to unmount the
3612 3615 * root.
3613 3616 */
3614 3617 ASSERT(vfsp != rootvfs);
3615 3618
3616 3619 vfs_list_lock();
3617 3620
3618 3621 /*
3619 3622 * Remove from hash.
3620 3623 */
3621 3624 vfs_hash_remove(vfsp);
3622 3625
3623 3626 /*
3624 3627 * Remove from vfs list.
3625 3628 */
3626 3629 vfsp->vfs_prev->vfs_next = vfsp->vfs_next;
3627 3630 vfsp->vfs_next->vfs_prev = vfsp->vfs_prev;
3628 3631 vfsp->vfs_next = vfsp->vfs_prev = NULL;
3629 3632
3630 3633 /*
3631 3634 * Remove from zone-specific vfs list.
3632 3635 */
3633 3636 if (zone->zone_vfslist == vfsp)
3634 3637 zone->zone_vfslist = vfsp->vfs_zone_next;
3635 3638
3636 3639 if (vfsp->vfs_zone_next == vfsp) {
3637 3640 ASSERT(vfsp->vfs_zone_prev == vfsp);
3638 3641 ASSERT(zone->zone_vfslist == vfsp);
3639 3642 zone->zone_vfslist = NULL;
3640 3643 }
3641 3644
3642 3645 vfsp->vfs_zone_prev->vfs_zone_next = vfsp->vfs_zone_next;
3643 3646 vfsp->vfs_zone_next->vfs_zone_prev = vfsp->vfs_zone_prev;
3644 3647 vfsp->vfs_zone_next = vfsp->vfs_zone_prev = NULL;
3645 3648
3646 3649 /*
3647 3650 * update the mnttab modification time
3648 3651 */
3649 3652 vfs_mnttab_modtimeupd();
3650 3653 vfs_list_unlock();
3651 3654 zone_rele(zone);
3652 3655 }
3653 3656
3654 3657 struct vfs *
3655 3658 getvfs(fsid_t *fsid)
3656 3659 {
3657 3660 struct vfs *vfsp;
3658 3661 int val0 = fsid->val[0];
3659 3662 int val1 = fsid->val[1];
3660 3663 dev_t dev = expldev(val0);
3661 3664 int vhno = VFSHASH(getmajor(dev), getminor(dev));
3662 3665 kmutex_t *hmp = &rvfs_list[vhno].rvfs_lock;
3663 3666
3664 3667 mutex_enter(hmp);
3665 3668 for (vfsp = rvfs_list[vhno].rvfs_head; vfsp; vfsp = vfsp->vfs_hash) {
3666 3669 if (vfsp->vfs_fsid.val[0] == val0 &&
3667 3670 vfsp->vfs_fsid.val[1] == val1) {
3668 3671 VFS_HOLD(vfsp);
3669 3672 mutex_exit(hmp);
3670 3673 return (vfsp);
3671 3674 }
3672 3675 }
3673 3676 mutex_exit(hmp);
3674 3677 return (NULL);
3675 3678 }
3676 3679
3677 3680 /*
3678 3681 * Search the vfs mount in progress list for a specified device/vfs entry.
3679 3682 * Returns 0 if the first entry in the list that the device matches has the
3680 3683 * given vfs pointer as well. If the device matches but a different vfs
3681 3684 * pointer is encountered in the list before the given vfs pointer then
3682 3685 * a 1 is returned.
3683 3686 */
3684 3687
3685 3688 int
3686 3689 vfs_devmounting(dev_t dev, struct vfs *vfsp)
3687 3690 {
3688 3691 int retval = 0;
3689 3692 struct ipmnt *mipp;
3690 3693
3691 3694 mutex_enter(&vfs_miplist_mutex);
3692 3695 for (mipp = vfs_miplist; mipp != NULL; mipp = mipp->mip_next) {
3693 3696 if (mipp->mip_dev == dev) {
3694 3697 if (mipp->mip_vfsp != vfsp)
3695 3698 retval = 1;
3696 3699 break;
3697 3700 }
3698 3701 }
3699 3702 mutex_exit(&vfs_miplist_mutex);
3700 3703 return (retval);
3701 3704 }
3702 3705
3703 3706 /*
3704 3707 * Search the vfs list for a specified device. Returns 1, if entry is found
3705 3708 * or 0 if no suitable entry is found.
3706 3709 */
3707 3710
3708 3711 int
3709 3712 vfs_devismounted(dev_t dev)
3710 3713 {
3711 3714 struct vfs *vfsp;
3712 3715 int found;
3713 3716
3714 3717 vfs_list_read_lock();
3715 3718 vfsp = rootvfs;
3716 3719 found = 0;
3717 3720 do {
3718 3721 if (vfsp->vfs_dev == dev) {
3719 3722 found = 1;
3720 3723 break;
3721 3724 }
3722 3725 vfsp = vfsp->vfs_next;
3723 3726 } while (vfsp != rootvfs);
3724 3727
3725 3728 vfs_list_unlock();
3726 3729 return (found);
3727 3730 }
3728 3731
3729 3732 /*
3730 3733 * Search the vfs list for a specified device. Returns a pointer to it
3731 3734 * or NULL if no suitable entry is found. The caller of this routine
3732 3735 * is responsible for releasing the returned vfs pointer.
3733 3736 */
3734 3737 struct vfs *
3735 3738 vfs_dev2vfsp(dev_t dev)
3736 3739 {
3737 3740 struct vfs *vfsp;
3738 3741 int found;
3739 3742
3740 3743 vfs_list_read_lock();
3741 3744 vfsp = rootvfs;
3742 3745 found = 0;
3743 3746 do {
3744 3747 /*
3745 3748 * The following could be made more efficient by making
3746 3749 * the entire loop use vfs_zone_next if the call is from
3747 3750 * a zone. The only callers, however, ustat(2) and
3748 3751 * umount2(2), don't seem to justify the added
3749 3752 * complexity at present.
3750 3753 */
3751 3754 if (vfsp->vfs_dev == dev &&
3752 3755 ZONE_PATH_VISIBLE(refstr_value(vfsp->vfs_mntpt),
3753 3756 curproc->p_zone)) {
3754 3757 VFS_HOLD(vfsp);
3755 3758 found = 1;
3756 3759 break;
3757 3760 }
3758 3761 vfsp = vfsp->vfs_next;
3759 3762 } while (vfsp != rootvfs);
3760 3763 vfs_list_unlock();
3761 3764 return (found ? vfsp: NULL);
3762 3765 }
3763 3766
3764 3767 /*
3765 3768 * Search the vfs list for a specified mntpoint. Returns a pointer to it
3766 3769 * or NULL if no suitable entry is found. The caller of this routine
3767 3770 * is responsible for releasing the returned vfs pointer.
3768 3771 *
3769 3772 * Note that if multiple mntpoints match, the last one matching is
3770 3773 * returned in an attempt to return the "top" mount when overlay
3771 3774 * mounts are covering the same mount point. This is accomplished by starting
3772 3775 * at the end of the list and working our way backwards, stopping at the first
3773 3776 * matching mount.
3774 3777 */
3775 3778 struct vfs *
3776 3779 vfs_mntpoint2vfsp(const char *mp)
3777 3780 {
3778 3781 struct vfs *vfsp;
3779 3782 struct vfs *retvfsp = NULL;
3780 3783 zone_t *zone = curproc->p_zone;
3781 3784 struct vfs *list;
3782 3785
3783 3786 vfs_list_read_lock();
3784 3787 if (getzoneid() == GLOBAL_ZONEID) {
3785 3788 /*
3786 3789 * The global zone may see filesystems in any zone.
3787 3790 */
3788 3791 vfsp = rootvfs->vfs_prev;
3789 3792 do {
3790 3793 if (strcmp(refstr_value(vfsp->vfs_mntpt), mp) == 0) {
3791 3794 retvfsp = vfsp;
3792 3795 break;
3793 3796 }
3794 3797 vfsp = vfsp->vfs_prev;
3795 3798 } while (vfsp != rootvfs->vfs_prev);
3796 3799 } else if ((list = zone->zone_vfslist) != NULL) {
3797 3800 const char *mntpt;
3798 3801
3799 3802 vfsp = list->vfs_zone_prev;
3800 3803 do {
3801 3804 mntpt = refstr_value(vfsp->vfs_mntpt);
3802 3805 mntpt = ZONE_PATH_TRANSLATE(mntpt, zone);
3803 3806 if (strcmp(mntpt, mp) == 0) {
3804 3807 retvfsp = vfsp;
3805 3808 break;
3806 3809 }
3807 3810 vfsp = vfsp->vfs_zone_prev;
3808 3811 } while (vfsp != list->vfs_zone_prev);
3809 3812 }
3810 3813 if (retvfsp)
3811 3814 VFS_HOLD(retvfsp);
3812 3815 vfs_list_unlock();
3813 3816 return (retvfsp);
3814 3817 }
3815 3818
3816 3819 /*
3817 3820 * Search the vfs list for a specified vfsops.
3818 3821 * if vfs entry is found then return 1, else 0.
3819 3822 */
3820 3823 int
3821 3824 vfs_opsinuse(vfsops_t *ops)
3822 3825 {
3823 3826 struct vfs *vfsp;
3824 3827 int found;
3825 3828
3826 3829 vfs_list_read_lock();
3827 3830 vfsp = rootvfs;
3828 3831 found = 0;
3829 3832 do {
3830 3833 if (vfs_getops(vfsp) == ops) {
3831 3834 found = 1;
3832 3835 break;
3833 3836 }
3834 3837 vfsp = vfsp->vfs_next;
3835 3838 } while (vfsp != rootvfs);
3836 3839 vfs_list_unlock();
3837 3840 return (found);
3838 3841 }
3839 3842
3840 3843 /*
3841 3844 * Allocate an entry in vfssw for a file system type
3842 3845 */
3843 3846 struct vfssw *
3844 3847 allocate_vfssw(const char *type)
3845 3848 {
3846 3849 struct vfssw *vswp;
3847 3850
3848 3851 if (type[0] == '\0' || strlen(type) + 1 > _ST_FSTYPSZ) {
3849 3852 /*
3850 3853 * The vfssw table uses the empty string to identify an
3851 3854 * available entry; we cannot add any type which has
3852 3855 * a leading NUL. The string length is limited to
3853 3856 * the size of the st_fstype array in struct stat.
3854 3857 */
3855 3858 return (NULL);
3856 3859 }
3857 3860
3858 3861 ASSERT(VFSSW_WRITE_LOCKED());
3859 3862 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++)
3860 3863 if (!ALLOCATED_VFSSW(vswp)) {
3861 3864 vswp->vsw_name = kmem_alloc(strlen(type) + 1, KM_SLEEP);
3862 3865 (void) strcpy(vswp->vsw_name, type);
3863 3866 ASSERT(vswp->vsw_count == 0);
3864 3867 vswp->vsw_count = 1;
3865 3868 mutex_init(&vswp->vsw_lock, NULL, MUTEX_DEFAULT, NULL);
3866 3869 return (vswp);
3867 3870 }
3868 3871 return (NULL);
3869 3872 }
3870 3873
3871 3874 /*
3872 3875 * Impose additional layer of translation between vfstype names
3873 3876 * and module names in the filesystem.
3874 3877 */
3875 3878 static const char *
3876 3879 vfs_to_modname(const char *vfstype)
3877 3880 {
3878 3881 if (strcmp(vfstype, "proc") == 0) {
3879 3882 vfstype = "procfs";
3880 3883 } else if (strcmp(vfstype, "fd") == 0) {
3881 3884 vfstype = "fdfs";
3882 3885 } else if (strncmp(vfstype, "nfs", 3) == 0) {
3883 3886 vfstype = "nfs";
3884 3887 }
3885 3888
3886 3889 return (vfstype);
3887 3890 }
3888 3891
3889 3892 /*
3890 3893 * Find a vfssw entry given a file system type name.
3891 3894 * Try to autoload the filesystem if it's not found.
3892 3895 * If it's installed, return the vfssw locked to prevent unloading.
3893 3896 */
3894 3897 struct vfssw *
3895 3898 vfs_getvfssw(const char *type)
3896 3899 {
3897 3900 struct vfssw *vswp;
3898 3901 const char *modname;
3899 3902
3900 3903 RLOCK_VFSSW();
3901 3904 vswp = vfs_getvfsswbyname(type);
3902 3905 modname = vfs_to_modname(type);
3903 3906
3904 3907 if (rootdir == NULL) {
3905 3908 /*
3906 3909 * If we haven't yet loaded the root file system, then our
3907 3910 * _init won't be called until later. Allocate vfssw entry,
3908 3911 * because mod_installfs won't be called.
3909 3912 */
3910 3913 if (vswp == NULL) {
3911 3914 RUNLOCK_VFSSW();
3912 3915 WLOCK_VFSSW();
3913 3916 if ((vswp = vfs_getvfsswbyname(type)) == NULL) {
3914 3917 if ((vswp = allocate_vfssw(type)) == NULL) {
3915 3918 WUNLOCK_VFSSW();
3916 3919 return (NULL);
3917 3920 }
3918 3921 }
3919 3922 WUNLOCK_VFSSW();
3920 3923 RLOCK_VFSSW();
3921 3924 }
3922 3925 if (!VFS_INSTALLED(vswp)) {
3923 3926 RUNLOCK_VFSSW();
3924 3927 (void) modloadonly("fs", modname);
3925 3928 } else
3926 3929 RUNLOCK_VFSSW();
3927 3930 return (vswp);
3928 3931 }
3929 3932
3930 3933 /*
3931 3934 * Try to load the filesystem. Before calling modload(), we drop
3932 3935 * our lock on the VFS switch table, and pick it up after the
3933 3936 * module is loaded. However, there is a potential race: the
3934 3937 * module could be unloaded after the call to modload() completes
3935 3938 * but before we pick up the lock and drive on. Therefore,
3936 3939 * we keep reloading the module until we've loaded the module
3937 3940 * _and_ we have the lock on the VFS switch table.
3938 3941 */
3939 3942 while (vswp == NULL || !VFS_INSTALLED(vswp)) {
3940 3943 RUNLOCK_VFSSW();
3941 3944 if (modload("fs", modname) == -1)
3942 3945 return (NULL);
3943 3946 RLOCK_VFSSW();
3944 3947 if (vswp == NULL)
3945 3948 if ((vswp = vfs_getvfsswbyname(type)) == NULL)
3946 3949 break;
3947 3950 }
3948 3951 RUNLOCK_VFSSW();
3949 3952
3950 3953 return (vswp);
3951 3954 }
3952 3955
3953 3956 /*
3954 3957 * Find a vfssw entry given a file system type name.
3955 3958 */
3956 3959 struct vfssw *
3957 3960 vfs_getvfsswbyname(const char *type)
3958 3961 {
3959 3962 struct vfssw *vswp;
3960 3963
3961 3964 ASSERT(VFSSW_LOCKED());
3962 3965 if (type == NULL || *type == '\0')
3963 3966 return (NULL);
3964 3967
3965 3968 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
3966 3969 if (strcmp(type, vswp->vsw_name) == 0) {
3967 3970 vfs_refvfssw(vswp);
3968 3971 return (vswp);
3969 3972 }
3970 3973 }
3971 3974
3972 3975 return (NULL);
3973 3976 }
3974 3977
3975 3978 /*
3976 3979 * Find a vfssw entry given a set of vfsops.
3977 3980 */
3978 3981 struct vfssw *
3979 3982 vfs_getvfsswbyvfsops(vfsops_t *vfsops)
3980 3983 {
3981 3984 struct vfssw *vswp;
3982 3985
3983 3986 RLOCK_VFSSW();
3984 3987 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
3985 3988 if (ALLOCATED_VFSSW(vswp) && &vswp->vsw_vfsops == vfsops) {
3986 3989 vfs_refvfssw(vswp);
3987 3990 RUNLOCK_VFSSW();
3988 3991 return (vswp);
3989 3992 }
3990 3993 }
3991 3994 RUNLOCK_VFSSW();
3992 3995
3993 3996 return (NULL);
3994 3997 }
3995 3998
3996 3999 /*
3997 4000 * Reference a vfssw entry.
3998 4001 */
3999 4002 void
4000 4003 vfs_refvfssw(struct vfssw *vswp)
4001 4004 {
4002 4005
4003 4006 mutex_enter(&vswp->vsw_lock);
4004 4007 vswp->vsw_count++;
4005 4008 mutex_exit(&vswp->vsw_lock);
4006 4009 }
4007 4010
4008 4011 /*
4009 4012 * Unreference a vfssw entry.
4010 4013 */
4011 4014 void
4012 4015 vfs_unrefvfssw(struct vfssw *vswp)
4013 4016 {
4014 4017
4015 4018 mutex_enter(&vswp->vsw_lock);
4016 4019 vswp->vsw_count--;
4017 4020 mutex_exit(&vswp->vsw_lock);
4018 4021 }
4019 4022
4020 4023 int sync_timeout = 30; /* timeout for syncing a page during panic */
4021 4024 int sync_timeleft; /* portion of sync_timeout remaining */
4022 4025
4023 4026 static int sync_retries = 20; /* number of retries when not making progress */
4024 4027 static int sync_triesleft; /* portion of sync_retries remaining */
4025 4028
4026 4029 static pgcnt_t old_pgcnt, new_pgcnt;
4027 4030 static int new_bufcnt, old_bufcnt;
4028 4031
4029 4032 /*
4030 4033 * Sync all of the mounted filesystems, and then wait for the actual i/o to
4031 4034 * complete. We wait by counting the number of dirty pages and buffers,
4032 4035 * pushing them out using bio_busy() and page_busy(), and then counting again.
4033 4036 * This routine is used during both the uadmin A_SHUTDOWN code as well as
4034 4037 * the SYNC phase of the panic code (see comments in panic.c). It should only
4035 4038 * be used after some higher-level mechanism has quiesced the system so that
4036 4039 * new writes are not being initiated while we are waiting for completion.
4037 4040 *
4038 4041 * To ensure finite running time, our algorithm uses two timeout mechanisms:
4039 4042 * sync_timeleft (a timer implemented by the omnipresent deadman() cyclic), and
4040 4043 * sync_triesleft (a progress counter used by the vfs_syncall() loop below).
4041 4044 * Together these ensure that syncing completes if our i/o paths are stuck.
4042 4045 * The counters are declared above so they can be found easily in the debugger.
4043 4046 *
4044 4047 * The sync_timeleft counter is reset by bio_busy() and page_busy() using the
4045 4048 * vfs_syncprogress() subroutine whenever we make progress through the lists of
4046 4049 * pages and buffers. It is decremented and expired by the deadman() cyclic.
4047 4050 * When vfs_syncall() decides it is done, we disable the deadman() counter by
4048 4051 * setting sync_timeleft to zero. This timer guards against vfs_syncall()
4049 4052 * deadlocking or hanging inside of a broken filesystem or driver routine.
4050 4053 *
4051 4054 * The sync_triesleft counter is updated by vfs_syncall() itself. If we make
4052 4055 * sync_retries consecutive calls to bio_busy() and page_busy() without
4053 4056 * decreasing either the number of dirty buffers or dirty pages below the
4054 4057 * lowest count we have seen so far, we give up and return from vfs_syncall().
4055 4058 *
4056 4059 * Each loop iteration ends with a call to delay() one second to allow time for
4057 4060 * i/o completion and to permit the user time to read our progress messages.
4058 4061 */
4059 4062 void
4060 4063 vfs_syncall(void)
4061 4064 {
4062 4065 if (rootdir == NULL && !modrootloaded)
4063 4066 return; /* panic during boot - no filesystems yet */
4064 4067
4065 4068 printf("syncing file systems...");
4066 4069 vfs_syncprogress();
4067 4070 sync();
4068 4071
4069 4072 vfs_syncprogress();
4070 4073 sync_triesleft = sync_retries;
4071 4074
4072 4075 old_bufcnt = new_bufcnt = INT_MAX;
4073 4076 old_pgcnt = new_pgcnt = ULONG_MAX;
4074 4077
4075 4078 while (sync_triesleft > 0) {
4076 4079 old_bufcnt = MIN(old_bufcnt, new_bufcnt);
4077 4080 old_pgcnt = MIN(old_pgcnt, new_pgcnt);
4078 4081
4079 4082 new_bufcnt = bio_busy(B_TRUE);
4080 4083 new_pgcnt = page_busy(B_TRUE);
4081 4084 vfs_syncprogress();
4082 4085
4083 4086 if (new_bufcnt == 0 && new_pgcnt == 0)
4084 4087 break;
4085 4088
4086 4089 if (new_bufcnt < old_bufcnt || new_pgcnt < old_pgcnt)
4087 4090 sync_triesleft = sync_retries;
4088 4091 else
4089 4092 sync_triesleft--;
4090 4093
4091 4094 if (new_bufcnt)
4092 4095 printf(" [%d]", new_bufcnt);
4093 4096 if (new_pgcnt)
4094 4097 printf(" %lu", new_pgcnt);
4095 4098
4096 4099 delay(hz);
4097 4100 }
4098 4101
4099 4102 if (new_bufcnt != 0 || new_pgcnt != 0)
4100 4103 printf(" done (not all i/o completed)\n");
4101 4104 else
4102 4105 printf(" done\n");
4103 4106
4104 4107 sync_timeleft = 0;
4105 4108 delay(hz);
4106 4109 }
4107 4110
4108 4111 /*
4109 4112 * If we are in the middle of the sync phase of panic, reset sync_timeleft to
4110 4113 * sync_timeout to indicate that we are making progress and the deadman()
4111 4114 * omnipresent cyclic should not yet time us out. Note that it is safe to
4112 4115 * store to sync_timeleft here since the deadman() is firing at high-level
4113 4116 * on top of us. If we are racing with the deadman(), either the deadman()
4114 4117 * will decrement the old value and then we will reset it, or we will
4115 4118 * reset it and then the deadman() will immediately decrement it. In either
4116 4119 * case, correct behavior results.
4117 4120 */
4118 4121 void
4119 4122 vfs_syncprogress(void)
4120 4123 {
4121 4124 if (panicstr)
4122 4125 sync_timeleft = sync_timeout;
4123 4126 }
4124 4127
4125 4128 /*
4126 4129 * Map VFS flags to statvfs flags. These shouldn't really be separate
4127 4130 * flags at all.
4128 4131 */
4129 4132 uint_t
4130 4133 vf_to_stf(uint_t vf)
4131 4134 {
4132 4135 uint_t stf = 0;
4133 4136
4134 4137 if (vf & VFS_RDONLY)
4135 4138 stf |= ST_RDONLY;
4136 4139 if (vf & VFS_NOSETUID)
4137 4140 stf |= ST_NOSUID;
4138 4141 if (vf & VFS_NOTRUNC)
4139 4142 stf |= ST_NOTRUNC;
4140 4143
4141 4144 return (stf);
4142 4145 }
4143 4146
4144 4147 /*
4145 4148 * Entries for (illegal) fstype 0.
4146 4149 */
4147 4150 /* ARGSUSED */
4148 4151 int
4149 4152 vfsstray_sync(struct vfs *vfsp, short arg, struct cred *cr)
4150 4153 {
4151 4154 cmn_err(CE_PANIC, "stray vfs operation");
4152 4155 return (0);
4153 4156 }
4154 4157
4155 4158 /*
4156 4159 * Entries for (illegal) fstype 0.
4157 4160 */
4158 4161 int
4159 4162 vfsstray(void)
4160 4163 {
4161 4164 cmn_err(CE_PANIC, "stray vfs operation");
4162 4165 return (0);
4163 4166 }
4164 4167
4165 4168 /*
4166 4169 * Support for dealing with forced UFS unmount and its interaction with
4167 4170 * LOFS. Could be used by any filesystem.
4168 4171 * See bug 1203132.
4169 4172 */
4170 4173 int
4171 4174 vfs_EIO(void)
4172 4175 {
4173 4176 return (EIO);
4174 4177 }
4175 4178
4176 4179 /*
4177 4180 * We've gotta define the op for sync separately, since the compiler gets
4178 4181 * confused if we mix and match ANSI and normal style prototypes when
4179 4182 * a "short" argument is present and spits out a warning.
4180 4183 */
4181 4184 /*ARGSUSED*/
4182 4185 int
4183 4186 vfs_EIO_sync(struct vfs *vfsp, short arg, struct cred *cr)
4184 4187 {
4185 4188 return (EIO);
4186 4189 }
4187 4190
4188 4191 vfs_t EIO_vfs;
4189 4192 vfsops_t *EIO_vfsops;
4190 4193
4191 4194 /*
4192 4195 * Called from startup() to initialize all loaded vfs's
4193 4196 */
4194 4197 void
4195 4198 vfsinit(void)
4196 4199 {
4197 4200 struct vfssw *vswp;
4198 4201 int error;
4199 4202 extern int vopstats_enabled;
4200 4203 extern void vopstats_startup();
4201 4204
4202 4205 static const fs_operation_def_t EIO_vfsops_template[] = {
4203 4206 VFSNAME_MOUNT, { .error = vfs_EIO },
4204 4207 VFSNAME_UNMOUNT, { .error = vfs_EIO },
4205 4208 VFSNAME_ROOT, { .error = vfs_EIO },
4206 4209 VFSNAME_STATVFS, { .error = vfs_EIO },
4207 4210 VFSNAME_SYNC, { .vfs_sync = vfs_EIO_sync },
4208 4211 VFSNAME_VGET, { .error = vfs_EIO },
4209 4212 VFSNAME_MOUNTROOT, { .error = vfs_EIO },
4210 4213 VFSNAME_FREEVFS, { .error = vfs_EIO },
4211 4214 VFSNAME_VNSTATE, { .error = vfs_EIO },
4212 4215 NULL, NULL
4213 4216 };
4214 4217
4215 4218 static const fs_operation_def_t stray_vfsops_template[] = {
4216 4219 VFSNAME_MOUNT, { .error = vfsstray },
4217 4220 VFSNAME_UNMOUNT, { .error = vfsstray },
4218 4221 VFSNAME_ROOT, { .error = vfsstray },
4219 4222 VFSNAME_STATVFS, { .error = vfsstray },
4220 4223 VFSNAME_SYNC, { .vfs_sync = vfsstray_sync },
4221 4224 VFSNAME_VGET, { .error = vfsstray },
4222 4225 VFSNAME_MOUNTROOT, { .error = vfsstray },
4223 4226 VFSNAME_FREEVFS, { .error = vfsstray },
4224 4227 VFSNAME_VNSTATE, { .error = vfsstray },
4225 4228 NULL, NULL
4226 4229 };
4227 4230
↓ open down ↓ |
2430 lines elided |
↑ open up ↑ |
4228 4231 /* Create vfs cache */
4229 4232 vfs_cache = kmem_cache_create("vfs_cache", sizeof (struct vfs),
4230 4233 sizeof (uintptr_t), NULL, NULL, NULL, NULL, NULL, 0);
4231 4234
4232 4235 /* Initialize the vnode cache (file systems may use it during init). */
4233 4236 vn_create_cache();
4234 4237
4235 4238 /* Setup event monitor framework */
4236 4239 fem_init();
4237 4240
4241 + /* Setup filesystem hook framework */
4242 + fsh_init();
4243 +
4238 4244 /* Initialize the dummy stray file system type. */
4239 4245 error = vfs_setfsops(0, stray_vfsops_template, NULL);
4240 4246
4241 4247 /* Initialize the dummy EIO file system. */
4242 4248 error = vfs_makefsops(EIO_vfsops_template, &EIO_vfsops);
4243 4249 if (error != 0) {
4244 4250 cmn_err(CE_WARN, "vfsinit: bad EIO vfs ops template");
4245 4251 /* Shouldn't happen, but not bad enough to panic */
4246 4252 }
4247 4253
4248 4254 VFS_INIT(&EIO_vfs, EIO_vfsops, (caddr_t)NULL);
4249 4255
4250 4256 /*
4251 4257 * Default EIO_vfs.vfs_flag to VFS_UNMOUNTED so a lookup
4252 4258 * on this vfs can immediately notice it's invalid.
4253 4259 */
4254 4260 EIO_vfs.vfs_flag |= VFS_UNMOUNTED;
4255 4261
4256 4262 /*
4257 4263 * Call the init routines of non-loadable filesystems only.
4258 4264 * Filesystems which are loaded as separate modules will be
4259 4265 * initialized by the module loading code instead.
4260 4266 */
4261 4267
4262 4268 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
4263 4269 RLOCK_VFSSW();
4264 4270 if (vswp->vsw_init != NULL)
4265 4271 (*vswp->vsw_init)(vswp - vfssw, vswp->vsw_name);
4266 4272 RUNLOCK_VFSSW();
4267 4273 }
4268 4274
4269 4275 vopstats_startup();
4270 4276
4271 4277 if (vopstats_enabled) {
4272 4278 /* EIO_vfs can collect stats, but we don't retrieve them */
4273 4279 initialize_vopstats(&EIO_vfs.vfs_vopstats);
4274 4280 EIO_vfs.vfs_fstypevsp = NULL;
4275 4281 EIO_vfs.vfs_vskap = NULL;
4276 4282 EIO_vfs.vfs_flag |= VFS_STATS;
4277 4283 }
4278 4284
4279 4285 xattr_init();
4280 4286
4281 4287 reparse_point_init();
4282 4288 }
4283 4289
4284 4290 vfs_t *
4285 4291 vfs_alloc(int kmflag)
4286 4292 {
4287 4293 vfs_t *vfsp;
4288 4294
4289 4295 vfsp = kmem_cache_alloc(vfs_cache, kmflag);
4290 4296
4291 4297 /*
4292 4298 * Do the simplest initialization here.
4293 4299 * Everything else gets done in vfs_init()
4294 4300 */
4295 4301 bzero(vfsp, sizeof (vfs_t));
4296 4302 return (vfsp);
4297 4303 }
4298 4304
4299 4305 void
4300 4306 vfs_free(vfs_t *vfsp)
4301 4307 {
4302 4308 /*
4303 4309 * One would be tempted to assert that "vfsp->vfs_count == 0".
4304 4310 * The problem is that this gets called out of domount() with
4305 4311 * a partially initialized vfs and a vfs_count of 1. This is
4306 4312 * also called from vfs_rele() with a vfs_count of 0. We can't
4307 4313 * call VFS_RELE() from domount() if VFS_MOUNT() hasn't successfully
4308 4314 * returned. This is because VFS_MOUNT() fully initializes the
4309 4315 * vfs structure and its associated data. VFS_RELE() will call
4310 4316 * VFS_FREEVFS() which may panic the system if the data structures
4311 4317 * aren't fully initialized from a successful VFS_MOUNT()).
↓ open down ↓ |
64 lines elided |
↑ open up ↑ |
4312 4318 */
4313 4319
4314 4320 /* If FEM was in use, make sure everything gets cleaned up */
4315 4321 if (vfsp->vfs_femhead) {
4316 4322 ASSERT(vfsp->vfs_femhead->femh_list == NULL);
4317 4323 mutex_destroy(&vfsp->vfs_femhead->femh_lock);
4318 4324 kmem_free(vfsp->vfs_femhead, sizeof (*(vfsp->vfs_femhead)));
4319 4325 vfsp->vfs_femhead = NULL;
4320 4326 }
4321 4327
4328 + /*
4329 + * fsh cleanup
4330 + * There's no need here to use atomic operations on vfs_fshrecord.
4331 + */
4332 + if (vfsp->vfs_fshrecord != NULL) {
4333 + fsh_fsrec_destroy(vfsp->vfs_fshrecord);
4334 + vfsp->vfs_fshrecord = NULL;
4335 + }
4336 +
4322 4337 if (vfsp->vfs_implp)
4323 4338 vfsimpl_teardown(vfsp);
4324 4339 sema_destroy(&vfsp->vfs_reflock);
4325 4340 kmem_cache_free(vfs_cache, vfsp);
4326 4341 }
4327 4342
4328 4343 /*
4329 4344 * Increments the vfs reference count by one atomically.
4330 4345 */
4331 4346 void
4332 4347 vfs_hold(vfs_t *vfsp)
4333 4348 {
4334 4349 atomic_add_32(&vfsp->vfs_count, 1);
4335 4350 ASSERT(vfsp->vfs_count != 0);
4336 4351 }
4337 4352
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
4338 4353 /*
4339 4354 * Decrements the vfs reference count by one atomically. When
4340 4355 * vfs reference count becomes zero, it calls the file system
4341 4356 * specific vfs_freevfs() to free up the resources.
4342 4357 */
4343 4358 void
4344 4359 vfs_rele(vfs_t *vfsp)
4345 4360 {
4346 4361 ASSERT(vfsp->vfs_count != 0);
4347 4362 if (atomic_add_32_nv(&vfsp->vfs_count, -1) == 0) {
4363 + fsh_exec_free_callbacks(vfsp);
4348 4364 VFS_FREEVFS(vfsp);
4349 4365 lofi_remove(vfsp);
4350 4366 if (vfsp->vfs_zone)
4351 4367 zone_rele_ref(&vfsp->vfs_implp->vi_zone_ref,
4352 4368 ZONE_REF_VFS);
4353 4369 vfs_freemnttab(vfsp);
4354 4370 vfs_free(vfsp);
4355 4371 }
4356 4372 }
4357 4373
4358 4374 /*
4359 4375 * Generic operations vector support.
4360 4376 *
4361 4377 * This is used to build operations vectors for both the vfs and vnode.
4362 4378 * It's normally called only when a file system is loaded.
4363 4379 *
4364 4380 * There are many possible algorithms for this, including the following:
4365 4381 *
4366 4382 * (1) scan the list of known operations; for each, see if the file system
4367 4383 * includes an entry for it, and fill it in as appropriate.
4368 4384 *
4369 4385 * (2) set up defaults for all known operations. scan the list of ops
4370 4386 * supplied by the file system; for each which is both supplied and
4371 4387 * known, fill it in.
4372 4388 *
4373 4389 * (3) sort the lists of known ops & supplied ops; scan the list, filling
4374 4390 * in entries as we go.
4375 4391 *
4376 4392 * we choose (1) for simplicity, and because performance isn't critical here.
4377 4393 * note that (2) could be sped up using a precomputed hash table on known ops.
4378 4394 * (3) could be faster than either, but only if the lists were very large or
4379 4395 * supplied in sorted order.
4380 4396 *
4381 4397 */
4382 4398
4383 4399 int
4384 4400 fs_build_vector(void *vector, int *unused_ops,
4385 4401 const fs_operation_trans_def_t *translation,
4386 4402 const fs_operation_def_t *operations)
4387 4403 {
4388 4404 int i, num_trans, num_ops, used;
4389 4405
4390 4406 /*
4391 4407 * Count the number of translations and the number of supplied
4392 4408 * operations.
4393 4409 */
4394 4410
4395 4411 {
4396 4412 const fs_operation_trans_def_t *p;
4397 4413
4398 4414 for (num_trans = 0, p = translation;
4399 4415 p->name != NULL;
4400 4416 num_trans++, p++)
4401 4417 ;
4402 4418 }
4403 4419
4404 4420 {
4405 4421 const fs_operation_def_t *p;
4406 4422
4407 4423 for (num_ops = 0, p = operations;
4408 4424 p->name != NULL;
4409 4425 num_ops++, p++)
4410 4426 ;
4411 4427 }
4412 4428
4413 4429 /* Walk through each operation known to our caller. There will be */
4414 4430 /* one entry in the supplied "translation table" for each. */
4415 4431
4416 4432 used = 0;
4417 4433
4418 4434 for (i = 0; i < num_trans; i++) {
4419 4435 int j, found;
4420 4436 char *curname;
4421 4437 fs_generic_func_p result;
4422 4438 fs_generic_func_p *location;
4423 4439
4424 4440 curname = translation[i].name;
4425 4441
4426 4442 /* Look for a matching operation in the list supplied by the */
4427 4443 /* file system. */
4428 4444
4429 4445 found = 0;
4430 4446
4431 4447 for (j = 0; j < num_ops; j++) {
4432 4448 if (strcmp(operations[j].name, curname) == 0) {
4433 4449 used++;
4434 4450 found = 1;
4435 4451 break;
4436 4452 }
4437 4453 }
4438 4454
4439 4455 /*
4440 4456 * If the file system is using a "placeholder" for default
4441 4457 * or error functions, grab the appropriate function out of
4442 4458 * the translation table. If the file system didn't supply
4443 4459 * this operation at all, use the default function.
4444 4460 */
4445 4461
4446 4462 if (found) {
4447 4463 result = operations[j].func.fs_generic;
4448 4464 if (result == fs_default) {
4449 4465 result = translation[i].defaultFunc;
4450 4466 } else if (result == fs_error) {
4451 4467 result = translation[i].errorFunc;
4452 4468 } else if (result == NULL) {
4453 4469 /* Null values are PROHIBITED */
4454 4470 return (EINVAL);
4455 4471 }
4456 4472 } else {
4457 4473 result = translation[i].defaultFunc;
4458 4474 }
4459 4475
4460 4476 /* Now store the function into the operations vector. */
4461 4477
4462 4478 location = (fs_generic_func_p *)
4463 4479 (((char *)vector) + translation[i].offset);
4464 4480
4465 4481 *location = result;
4466 4482 }
4467 4483
4468 4484 *unused_ops = num_ops - used;
4469 4485
4470 4486 return (0);
4471 4487 }
4472 4488
4473 4489 /* Placeholder functions, should never be called. */
4474 4490
4475 4491 int
4476 4492 fs_error(void)
4477 4493 {
4478 4494 cmn_err(CE_PANIC, "fs_error called");
4479 4495 return (0);
4480 4496 }
4481 4497
4482 4498 int
4483 4499 fs_default(void)
4484 4500 {
4485 4501 cmn_err(CE_PANIC, "fs_default called");
4486 4502 return (0);
4487 4503 }
4488 4504
4489 4505 #ifdef __sparc
4490 4506
4491 4507 /*
4492 4508 * Part of the implementation of booting off a mirrored root
4493 4509 * involves a change of dev_t for the root device. To
4494 4510 * accomplish this, first remove the existing hash table
4495 4511 * entry for the root device, convert to the new dev_t,
4496 4512 * then re-insert in the hash table at the head of the list.
4497 4513 */
4498 4514 void
4499 4515 vfs_root_redev(vfs_t *vfsp, dev_t ndev, int fstype)
4500 4516 {
4501 4517 vfs_list_lock();
4502 4518
4503 4519 vfs_hash_remove(vfsp);
4504 4520
4505 4521 vfsp->vfs_dev = ndev;
4506 4522 vfs_make_fsid(&vfsp->vfs_fsid, ndev, fstype);
4507 4523
4508 4524 vfs_hash_add(vfsp, 1);
4509 4525
4510 4526 vfs_list_unlock();
4511 4527 }
4512 4528
4513 4529 #else /* x86 NEWBOOT */
4514 4530
4515 4531 #if defined(__x86)
4516 4532 extern int hvmboot_rootconf();
4517 4533 #endif /* __x86 */
4518 4534
4519 4535 extern ib_boot_prop_t *iscsiboot_prop;
4520 4536
4521 4537 int
4522 4538 rootconf()
4523 4539 {
4524 4540 int error;
4525 4541 struct vfssw *vsw;
4526 4542 extern void pm_init();
4527 4543 char *fstyp, *fsmod;
4528 4544 int ret = -1;
4529 4545
4530 4546 getrootfs(&fstyp, &fsmod);
4531 4547
4532 4548 #if defined(__x86)
4533 4549 /*
4534 4550 * hvmboot_rootconf() is defined in the hvm_bootstrap misc module,
4535 4551 * which lives in /platform/i86hvm, and hence is only available when
4536 4552 * booted in an x86 hvm environment. If the hvm_bootstrap misc module
4537 4553 * is not available then the modstub for this function will return 0.
4538 4554 * If the hvm_bootstrap misc module is available it will be loaded
4539 4555 * and hvmboot_rootconf() will be invoked.
4540 4556 */
4541 4557 if (error = hvmboot_rootconf())
4542 4558 return (error);
4543 4559 #endif /* __x86 */
4544 4560
4545 4561 if (error = clboot_rootconf())
4546 4562 return (error);
4547 4563
4548 4564 if (modload("fs", fsmod) == -1)
4549 4565 panic("Cannot _init %s module", fsmod);
4550 4566
4551 4567 RLOCK_VFSSW();
4552 4568 vsw = vfs_getvfsswbyname(fstyp);
4553 4569 RUNLOCK_VFSSW();
4554 4570 if (vsw == NULL) {
4555 4571 cmn_err(CE_CONT, "Cannot find %s filesystem\n", fstyp);
4556 4572 return (ENXIO);
4557 4573 }
4558 4574 VFS_INIT(rootvfs, &vsw->vsw_vfsops, 0);
4559 4575 VFS_HOLD(rootvfs);
4560 4576
4561 4577 /* always mount readonly first */
4562 4578 rootvfs->vfs_flag |= VFS_RDONLY;
4563 4579
4564 4580 pm_init();
4565 4581
4566 4582 if (netboot && iscsiboot_prop) {
4567 4583 cmn_err(CE_WARN, "NFS boot and iSCSI boot"
4568 4584 " shouldn't happen in the same time");
4569 4585 return (EINVAL);
4570 4586 }
4571 4587
4572 4588 if (netboot || iscsiboot_prop) {
4573 4589 ret = strplumb();
4574 4590 if (ret != 0) {
4575 4591 cmn_err(CE_WARN, "Cannot plumb network device %d", ret);
4576 4592 return (EFAULT);
4577 4593 }
4578 4594 }
4579 4595
4580 4596 if ((ret == 0) && iscsiboot_prop) {
4581 4597 ret = modload("drv", "iscsi");
4582 4598 /* -1 indicates fail */
4583 4599 if (ret == -1) {
4584 4600 cmn_err(CE_WARN, "Failed to load iscsi module");
4585 4601 iscsi_boot_prop_free();
4586 4602 return (EINVAL);
4587 4603 } else {
4588 4604 if (!i_ddi_attach_pseudo_node("iscsi")) {
4589 4605 cmn_err(CE_WARN,
4590 4606 "Failed to attach iscsi driver");
4591 4607 iscsi_boot_prop_free();
4592 4608 return (ENODEV);
4593 4609 }
4594 4610 }
4595 4611 }
4596 4612
4597 4613 error = VFS_MOUNTROOT(rootvfs, ROOT_INIT);
4598 4614 vfs_unrefvfssw(vsw);
4599 4615 rootdev = rootvfs->vfs_dev;
4600 4616
4601 4617 if (error)
4602 4618 cmn_err(CE_CONT, "Cannot mount root on %s fstype %s\n",
4603 4619 rootfs.bo_name, fstyp);
4604 4620 else
4605 4621 cmn_err(CE_CONT, "?root on %s fstype %s\n",
4606 4622 rootfs.bo_name, fstyp);
4607 4623 return (error);
4608 4624 }
4609 4625
4610 4626 /*
4611 4627 * XXX this is called by nfs only and should probably be removed
4612 4628 * If booted with ASKNAME, prompt on the console for a filesystem
4613 4629 * name and return it.
4614 4630 */
4615 4631 void
4616 4632 getfsname(char *askfor, char *name, size_t namelen)
4617 4633 {
4618 4634 if (boothowto & RB_ASKNAME) {
4619 4635 printf("%s name: ", askfor);
4620 4636 console_gets(name, namelen);
4621 4637 }
4622 4638 }
4623 4639
4624 4640 /*
4625 4641 * Init the root filesystem type (rootfs.bo_fstype) from the "fstype"
4626 4642 * property.
4627 4643 *
4628 4644 * Filesystem types starting with the prefix "nfs" are diskless clients;
4629 4645 * init the root filename name (rootfs.bo_name), too.
4630 4646 *
4631 4647 * If we are booting via NFS we currently have these options:
4632 4648 * nfs - dynamically choose NFS V2, V3, or V4 (default)
4633 4649 * nfs2 - force NFS V2
4634 4650 * nfs3 - force NFS V3
4635 4651 * nfs4 - force NFS V4
4636 4652 * Because we need to maintain backward compatibility with the naming
4637 4653 * convention that the NFS V2 filesystem name is "nfs" (see vfs_conf.c)
4638 4654 * we need to map "nfs" => "nfsdyn" and "nfs2" => "nfs". The dynamic
4639 4655 * nfs module will map the type back to either "nfs", "nfs3", or "nfs4".
4640 4656 * This is only for root filesystems, all other uses such as cachefs
4641 4657 * will expect that "nfs" == NFS V2.
4642 4658 */
4643 4659 static void
4644 4660 getrootfs(char **fstypp, char **fsmodp)
4645 4661 {
4646 4662 extern char *strplumb_get_netdev_path(void);
4647 4663 char *propstr = NULL;
4648 4664
4649 4665 /*
4650 4666 * Check fstype property; for diskless it should be one of "nfs",
4651 4667 * "nfs2", "nfs3" or "nfs4".
4652 4668 */
4653 4669 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4654 4670 DDI_PROP_DONTPASS, "fstype", &propstr)
4655 4671 == DDI_SUCCESS) {
4656 4672 (void) strncpy(rootfs.bo_fstype, propstr, BO_MAXFSNAME);
4657 4673 ddi_prop_free(propstr);
4658 4674
4659 4675 /*
4660 4676 * if the boot property 'fstype' is not set, but 'zfs-bootfs' is set,
4661 4677 * assume the type of this root filesystem is 'zfs'.
4662 4678 */
4663 4679 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4664 4680 DDI_PROP_DONTPASS, "zfs-bootfs", &propstr)
4665 4681 == DDI_SUCCESS) {
4666 4682 (void) strncpy(rootfs.bo_fstype, "zfs", BO_MAXFSNAME);
4667 4683 ddi_prop_free(propstr);
4668 4684 }
4669 4685
4670 4686 if (strncmp(rootfs.bo_fstype, "nfs", 3) != 0) {
4671 4687 *fstypp = *fsmodp = rootfs.bo_fstype;
4672 4688 return;
4673 4689 }
4674 4690
4675 4691 ++netboot;
4676 4692
4677 4693 if (strcmp(rootfs.bo_fstype, "nfs2") == 0)
4678 4694 (void) strcpy(rootfs.bo_fstype, "nfs");
4679 4695 else if (strcmp(rootfs.bo_fstype, "nfs") == 0)
4680 4696 (void) strcpy(rootfs.bo_fstype, "nfsdyn");
4681 4697
4682 4698 /*
4683 4699 * check if path to network interface is specified in bootpath
4684 4700 * or by a hypervisor domain configuration file.
4685 4701 * XXPV - enable strlumb_get_netdev_path()
4686 4702 */
4687 4703 if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS,
4688 4704 "xpv-nfsroot")) {
4689 4705 (void) strcpy(rootfs.bo_name, "/xpvd/xnf@0");
4690 4706 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4691 4707 DDI_PROP_DONTPASS, "bootpath", &propstr)
4692 4708 == DDI_SUCCESS) {
4693 4709 (void) strncpy(rootfs.bo_name, propstr, BO_MAXOBJNAME);
4694 4710 ddi_prop_free(propstr);
4695 4711 } else {
4696 4712 /* attempt to determine netdev_path via boot_mac address */
4697 4713 netdev_path = strplumb_get_netdev_path();
4698 4714 if (netdev_path == NULL)
4699 4715 panic("cannot find boot network interface");
4700 4716 (void) strncpy(rootfs.bo_name, netdev_path, BO_MAXOBJNAME);
4701 4717 }
4702 4718 *fstypp = rootfs.bo_fstype;
4703 4719 *fsmodp = "nfs";
4704 4720 }
4705 4721 #endif
4706 4722
4707 4723 /*
4708 4724 * VFS feature routines
4709 4725 */
4710 4726
4711 4727 #define VFTINDEX(feature) (((feature) >> 32) & 0xFFFFFFFF)
4712 4728 #define VFTBITS(feature) ((feature) & 0xFFFFFFFFLL)
4713 4729
4714 4730 /* Register a feature in the vfs */
4715 4731 void
4716 4732 vfs_set_feature(vfs_t *vfsp, vfs_feature_t feature)
4717 4733 {
4718 4734 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4719 4735 if (vfsp->vfs_implp == NULL)
4720 4736 return;
4721 4737
4722 4738 vfsp->vfs_featureset[VFTINDEX(feature)] |= VFTBITS(feature);
4723 4739 }
4724 4740
4725 4741 void
4726 4742 vfs_clear_feature(vfs_t *vfsp, vfs_feature_t feature)
4727 4743 {
4728 4744 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4729 4745 if (vfsp->vfs_implp == NULL)
4730 4746 return;
4731 4747 vfsp->vfs_featureset[VFTINDEX(feature)] &= VFTBITS(~feature);
4732 4748 }
4733 4749
4734 4750 /*
4735 4751 * Query a vfs for a feature.
4736 4752 * Returns 1 if feature is present, 0 if not
4737 4753 */
4738 4754 int
4739 4755 vfs_has_feature(vfs_t *vfsp, vfs_feature_t feature)
4740 4756 {
4741 4757 int ret = 0;
4742 4758
4743 4759 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4744 4760 if (vfsp->vfs_implp == NULL)
4745 4761 return (ret);
4746 4762
4747 4763 if (vfsp->vfs_featureset[VFTINDEX(feature)] & VFTBITS(feature))
4748 4764 ret = 1;
4749 4765
4750 4766 return (ret);
4751 4767 }
4752 4768
4753 4769 /*
4754 4770 * Propagate feature set from one vfs to another
4755 4771 */
4756 4772 void
4757 4773 vfs_propagate_features(vfs_t *from, vfs_t *to)
4758 4774 {
4759 4775 int i;
4760 4776
4761 4777 if (to->vfs_implp == NULL || from->vfs_implp == NULL)
4762 4778 return;
4763 4779
4764 4780 for (i = 1; i <= to->vfs_featureset[0]; i++) {
4765 4781 to->vfs_featureset[i] = from->vfs_featureset[i];
4766 4782 }
4767 4783 }
4768 4784
4769 4785 #define LOFINODE_PATH "/dev/lofi/%d"
4770 4786
4771 4787 /*
4772 4788 * Return the vnode for the lofi node if there's a lofi mount in place.
4773 4789 * Returns -1 when there's no lofi node, 0 on success, and > 0 on
4774 4790 * failure.
4775 4791 */
4776 4792 int
4777 4793 vfs_get_lofi(vfs_t *vfsp, vnode_t **vpp)
4778 4794 {
4779 4795 char *path = NULL;
4780 4796 int strsize;
4781 4797 int err;
4782 4798
4783 4799 if (vfsp->vfs_lofi_minor == 0) {
4784 4800 *vpp = NULL;
4785 4801 return (-1);
4786 4802 }
4787 4803
4788 4804 strsize = snprintf(NULL, 0, LOFINODE_PATH, vfsp->vfs_lofi_minor);
4789 4805 path = kmem_alloc(strsize + 1, KM_SLEEP);
4790 4806 (void) snprintf(path, strsize + 1, LOFINODE_PATH, vfsp->vfs_lofi_minor);
4791 4807
4792 4808 /*
4793 4809 * We may be inside a zone, so we need to use the /dev path, but
4794 4810 * it's created asynchronously, so we wait here.
4795 4811 */
4796 4812 for (;;) {
4797 4813 err = lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, vpp);
4798 4814
4799 4815 if (err != ENOENT)
4800 4816 break;
4801 4817
4802 4818 if ((err = delay_sig(hz / 8)) == EINTR)
4803 4819 break;
4804 4820 }
4805 4821
4806 4822 if (err)
4807 4823 *vpp = NULL;
4808 4824
4809 4825 kmem_free(path, strsize + 1);
4810 4826 return (err);
4811 4827 }
↓ open down ↓ |
454 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX