Print this page
6198 Let's EOL cachefs
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/vfs.c
+++ new/usr/src/uts/common/fs/vfs.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
24 + * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 25 */
25 26
26 27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 28 /* All Rights Reserved */
28 29
29 30 /*
30 31 * University Copyright- Copyright (c) 1982, 1986, 1988
31 32 * The Regents of the University of California
32 33 * All Rights Reserved
33 34 *
34 35 * University Acknowledgment- Portions of this document are derived from
35 36 * software developed by the University of California, Berkeley, and its
36 37 * contributors.
37 38 */
38 39
39 40 #include <sys/types.h>
40 41 #include <sys/t_lock.h>
41 42 #include <sys/param.h>
42 43 #include <sys/errno.h>
43 44 #include <sys/user.h>
44 45 #include <sys/fstyp.h>
45 46 #include <sys/kmem.h>
46 47 #include <sys/systm.h>
47 48 #include <sys/proc.h>
48 49 #include <sys/mount.h>
49 50 #include <sys/vfs.h>
50 51 #include <sys/vfs_opreg.h>
51 52 #include <sys/fem.h>
52 53 #include <sys/mntent.h>
53 54 #include <sys/stat.h>
54 55 #include <sys/statvfs.h>
55 56 #include <sys/statfs.h>
56 57 #include <sys/cred.h>
57 58 #include <sys/vnode.h>
58 59 #include <sys/rwstlock.h>
59 60 #include <sys/dnlc.h>
60 61 #include <sys/file.h>
61 62 #include <sys/time.h>
62 63 #include <sys/atomic.h>
63 64 #include <sys/cmn_err.h>
64 65 #include <sys/buf.h>
65 66 #include <sys/swap.h>
66 67 #include <sys/debug.h>
67 68 #include <sys/vnode.h>
68 69 #include <sys/modctl.h>
69 70 #include <sys/ddi.h>
70 71 #include <sys/pathname.h>
71 72 #include <sys/bootconf.h>
72 73 #include <sys/dumphdr.h>
73 74 #include <sys/dc_ki.h>
74 75 #include <sys/poll.h>
75 76 #include <sys/sunddi.h>
76 77 #include <sys/sysmacros.h>
77 78 #include <sys/zone.h>
78 79 #include <sys/policy.h>
79 80 #include <sys/ctfs.h>
80 81 #include <sys/objfs.h>
81 82 #include <sys/console.h>
82 83 #include <sys/reboot.h>
83 84 #include <sys/attr.h>
84 85 #include <sys/zio.h>
85 86 #include <sys/spa.h>
86 87 #include <sys/lofi.h>
87 88 #include <sys/bootprops.h>
88 89
89 90 #include <vm/page.h>
90 91
91 92 #include <fs/fs_subr.h>
92 93 /* Private interfaces to create vopstats-related data structures */
93 94 extern void initialize_vopstats(vopstats_t *);
94 95 extern vopstats_t *get_fstype_vopstats(struct vfs *, struct vfssw *);
95 96 extern vsk_anchor_t *get_vskstat_anchor(struct vfs *);
96 97
97 98 static void vfs_clearmntopt_nolock(mntopts_t *, const char *, int);
98 99 static void vfs_setmntopt_nolock(mntopts_t *, const char *,
99 100 const char *, int, int);
100 101 static int vfs_optionisset_nolock(const mntopts_t *, const char *, char **);
101 102 static void vfs_freemnttab(struct vfs *);
102 103 static void vfs_freeopt(mntopt_t *);
103 104 static void vfs_swapopttbl_nolock(mntopts_t *, mntopts_t *);
104 105 static void vfs_swapopttbl(mntopts_t *, mntopts_t *);
105 106 static void vfs_copyopttbl_extend(const mntopts_t *, mntopts_t *, int);
106 107 static void vfs_createopttbl_extend(mntopts_t *, const char *,
107 108 const mntopts_t *);
108 109 static char **vfs_copycancelopt_extend(char **const, int);
109 110 static void vfs_freecancelopt(char **);
110 111 static void getrootfs(char **, char **);
111 112 static int getmacpath(dev_info_t *, void *);
112 113 static void vfs_mnttabvp_setup(void);
113 114
114 115 struct ipmnt {
115 116 struct ipmnt *mip_next;
116 117 dev_t mip_dev;
117 118 struct vfs *mip_vfsp;
118 119 };
119 120
120 121 static kmutex_t vfs_miplist_mutex;
121 122 static struct ipmnt *vfs_miplist = NULL;
122 123 static struct ipmnt *vfs_miplist_end = NULL;
123 124
124 125 static kmem_cache_t *vfs_cache; /* Pointer to VFS kmem cache */
125 126
126 127 /*
127 128 * VFS global data.
128 129 */
129 130 vnode_t *rootdir; /* pointer to root inode vnode. */
130 131 vnode_t *devicesdir; /* pointer to inode of devices root */
131 132 vnode_t *devdir; /* pointer to inode of dev root */
132 133
133 134 char *server_rootpath; /* root path for diskless clients */
134 135 char *server_hostname; /* hostname of diskless server */
135 136
136 137 static struct vfs root;
137 138 static struct vfs devices;
138 139 static struct vfs dev;
139 140 struct vfs *rootvfs = &root; /* pointer to root vfs; head of VFS list. */
140 141 rvfs_t *rvfs_list; /* array of vfs ptrs for vfs hash list */
141 142 int vfshsz = 512; /* # of heads/locks in vfs hash arrays */
142 143 /* must be power of 2! */
143 144 timespec_t vfs_mnttab_ctime; /* mnttab created time */
144 145 timespec_t vfs_mnttab_mtime; /* mnttab last modified time */
145 146 char *vfs_dummyfstype = "\0";
146 147 struct pollhead vfs_pollhd; /* for mnttab pollers */
147 148 struct vnode *vfs_mntdummyvp; /* to fake mnttab read/write for file events */
148 149 int mntfstype; /* will be set once mnt fs is mounted */
149 150
150 151 /*
151 152 * Table for generic options recognized in the VFS layer and acted
152 153 * on at this level before parsing file system specific options.
153 154 * The nosuid option is stronger than any of the devices and setuid
154 155 * options, so those are canceled when nosuid is seen.
155 156 *
156 157 * All options which are added here need to be added to the
157 158 * list of standard options in usr/src/cmd/fs.d/fslib.c as well.
158 159 */
159 160 /*
160 161 * VFS Mount options table
161 162 */
162 163 static char *ro_cancel[] = { MNTOPT_RW, NULL };
163 164 static char *rw_cancel[] = { MNTOPT_RO, NULL };
164 165 static char *suid_cancel[] = { MNTOPT_NOSUID, NULL };
165 166 static char *nosuid_cancel[] = { MNTOPT_SUID, MNTOPT_DEVICES, MNTOPT_NODEVICES,
166 167 MNTOPT_NOSETUID, MNTOPT_SETUID, NULL };
167 168 static char *devices_cancel[] = { MNTOPT_NODEVICES, NULL };
168 169 static char *nodevices_cancel[] = { MNTOPT_DEVICES, NULL };
169 170 static char *setuid_cancel[] = { MNTOPT_NOSETUID, NULL };
170 171 static char *nosetuid_cancel[] = { MNTOPT_SETUID, NULL };
171 172 static char *nbmand_cancel[] = { MNTOPT_NONBMAND, NULL };
172 173 static char *nonbmand_cancel[] = { MNTOPT_NBMAND, NULL };
173 174 static char *exec_cancel[] = { MNTOPT_NOEXEC, NULL };
174 175 static char *noexec_cancel[] = { MNTOPT_EXEC, NULL };
175 176
176 177 static const mntopt_t mntopts[] = {
177 178 /*
178 179 * option name cancel options default arg flags
179 180 */
180 181 { MNTOPT_REMOUNT, NULL, NULL,
181 182 MO_NODISPLAY, (void *)0 },
182 183 { MNTOPT_RO, ro_cancel, NULL, 0,
183 184 (void *)0 },
184 185 { MNTOPT_RW, rw_cancel, NULL, 0,
185 186 (void *)0 },
186 187 { MNTOPT_SUID, suid_cancel, NULL, 0,
187 188 (void *)0 },
188 189 { MNTOPT_NOSUID, nosuid_cancel, NULL, 0,
189 190 (void *)0 },
190 191 { MNTOPT_DEVICES, devices_cancel, NULL, 0,
191 192 (void *)0 },
192 193 { MNTOPT_NODEVICES, nodevices_cancel, NULL, 0,
193 194 (void *)0 },
194 195 { MNTOPT_SETUID, setuid_cancel, NULL, 0,
195 196 (void *)0 },
196 197 { MNTOPT_NOSETUID, nosetuid_cancel, NULL, 0,
197 198 (void *)0 },
198 199 { MNTOPT_NBMAND, nbmand_cancel, NULL, 0,
199 200 (void *)0 },
200 201 { MNTOPT_NONBMAND, nonbmand_cancel, NULL, 0,
201 202 (void *)0 },
202 203 { MNTOPT_EXEC, exec_cancel, NULL, 0,
203 204 (void *)0 },
204 205 { MNTOPT_NOEXEC, noexec_cancel, NULL, 0,
205 206 (void *)0 },
206 207 };
207 208
208 209 const mntopts_t vfs_mntopts = {
209 210 sizeof (mntopts) / sizeof (mntopt_t),
210 211 (mntopt_t *)&mntopts[0]
211 212 };
212 213
213 214 /*
214 215 * File system operation dispatch functions.
215 216 */
216 217
217 218 int
218 219 fsop_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
219 220 {
220 221 return (*(vfsp)->vfs_op->vfs_mount)(vfsp, mvp, uap, cr);
221 222 }
222 223
223 224 int
224 225 fsop_unmount(vfs_t *vfsp, int flag, cred_t *cr)
225 226 {
226 227 return (*(vfsp)->vfs_op->vfs_unmount)(vfsp, flag, cr);
227 228 }
228 229
229 230 int
230 231 fsop_root(vfs_t *vfsp, vnode_t **vpp)
231 232 {
232 233 refstr_t *mntpt;
233 234 int ret = (*(vfsp)->vfs_op->vfs_root)(vfsp, vpp);
234 235 /*
235 236 * Make sure this root has a path. With lofs, it is possible to have
236 237 * a NULL mountpoint.
237 238 */
238 239 if (ret == 0 && vfsp->vfs_mntpt != NULL && (*vpp)->v_path == NULL) {
239 240 mntpt = vfs_getmntpoint(vfsp);
240 241 vn_setpath_str(*vpp, refstr_value(mntpt),
241 242 strlen(refstr_value(mntpt)));
242 243 refstr_rele(mntpt);
243 244 }
244 245
245 246 return (ret);
246 247 }
247 248
248 249 int
249 250 fsop_statfs(vfs_t *vfsp, statvfs64_t *sp)
250 251 {
251 252 return (*(vfsp)->vfs_op->vfs_statvfs)(vfsp, sp);
252 253 }
253 254
254 255 int
255 256 fsop_sync(vfs_t *vfsp, short flag, cred_t *cr)
256 257 {
257 258 return (*(vfsp)->vfs_op->vfs_sync)(vfsp, flag, cr);
258 259 }
259 260
260 261 int
261 262 fsop_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
262 263 {
263 264 /*
264 265 * In order to handle system attribute fids in a manner
265 266 * transparent to the underlying fs, we embed the fid for
266 267 * the sysattr parent object in the sysattr fid and tack on
267 268 * some extra bytes that only the sysattr layer knows about.
268 269 *
269 270 * This guarantees that sysattr fids are larger than other fids
270 271 * for this vfs. If the vfs supports the sysattr view interface
271 272 * (as indicated by VFSFT_SYSATTR_VIEWS), we cannot have a size
272 273 * collision with XATTR_FIDSZ.
273 274 */
274 275 if (vfs_has_feature(vfsp, VFSFT_SYSATTR_VIEWS) &&
275 276 fidp->fid_len == XATTR_FIDSZ)
276 277 return (xattr_dir_vget(vfsp, vpp, fidp));
277 278
278 279 return (*(vfsp)->vfs_op->vfs_vget)(vfsp, vpp, fidp);
279 280 }
280 281
281 282 int
282 283 fsop_mountroot(vfs_t *vfsp, enum whymountroot reason)
283 284 {
284 285 return (*(vfsp)->vfs_op->vfs_mountroot)(vfsp, reason);
285 286 }
286 287
287 288 void
288 289 fsop_freefs(vfs_t *vfsp)
289 290 {
290 291 (*(vfsp)->vfs_op->vfs_freevfs)(vfsp);
291 292 }
292 293
293 294 int
294 295 fsop_vnstate(vfs_t *vfsp, vnode_t *vp, vntrans_t nstate)
295 296 {
296 297 return ((*(vfsp)->vfs_op->vfs_vnstate)(vfsp, vp, nstate));
297 298 }
298 299
299 300 int
300 301 fsop_sync_by_kind(int fstype, short flag, cred_t *cr)
301 302 {
302 303 ASSERT((fstype >= 0) && (fstype < nfstype));
303 304
304 305 if (ALLOCATED_VFSSW(&vfssw[fstype]) && VFS_INSTALLED(&vfssw[fstype]))
305 306 return (*vfssw[fstype].vsw_vfsops.vfs_sync) (NULL, flag, cr);
306 307 else
307 308 return (ENOTSUP);
308 309 }
309 310
310 311 /*
311 312 * File system initialization. vfs_setfsops() must be called from a file
312 313 * system's init routine.
313 314 */
314 315
315 316 static int
316 317 fs_copyfsops(const fs_operation_def_t *template, vfsops_t *actual,
317 318 int *unused_ops)
318 319 {
319 320 static const fs_operation_trans_def_t vfs_ops_table[] = {
320 321 VFSNAME_MOUNT, offsetof(vfsops_t, vfs_mount),
321 322 fs_nosys, fs_nosys,
322 323
323 324 VFSNAME_UNMOUNT, offsetof(vfsops_t, vfs_unmount),
324 325 fs_nosys, fs_nosys,
325 326
326 327 VFSNAME_ROOT, offsetof(vfsops_t, vfs_root),
327 328 fs_nosys, fs_nosys,
328 329
329 330 VFSNAME_STATVFS, offsetof(vfsops_t, vfs_statvfs),
330 331 fs_nosys, fs_nosys,
331 332
332 333 VFSNAME_SYNC, offsetof(vfsops_t, vfs_sync),
333 334 (fs_generic_func_p) fs_sync,
334 335 (fs_generic_func_p) fs_sync, /* No errors allowed */
335 336
336 337 VFSNAME_VGET, offsetof(vfsops_t, vfs_vget),
337 338 fs_nosys, fs_nosys,
338 339
339 340 VFSNAME_MOUNTROOT, offsetof(vfsops_t, vfs_mountroot),
340 341 fs_nosys, fs_nosys,
341 342
342 343 VFSNAME_FREEVFS, offsetof(vfsops_t, vfs_freevfs),
343 344 (fs_generic_func_p)fs_freevfs,
344 345 (fs_generic_func_p)fs_freevfs, /* Shouldn't fail */
345 346
346 347 VFSNAME_VNSTATE, offsetof(vfsops_t, vfs_vnstate),
347 348 (fs_generic_func_p)fs_nosys,
348 349 (fs_generic_func_p)fs_nosys,
349 350
350 351 NULL, 0, NULL, NULL
351 352 };
352 353
353 354 return (fs_build_vector(actual, unused_ops, vfs_ops_table, template));
354 355 }
355 356
356 357 void
357 358 zfs_boot_init() {
358 359
359 360 if (strcmp(rootfs.bo_fstype, MNTTYPE_ZFS) == 0)
360 361 spa_boot_init();
361 362 }
362 363
363 364 int
364 365 vfs_setfsops(int fstype, const fs_operation_def_t *template, vfsops_t **actual)
365 366 {
366 367 int error;
367 368 int unused_ops;
368 369
369 370 /*
370 371 * Verify that fstype refers to a valid fs. Note that
371 372 * 0 is valid since it's used to set "stray" ops.
372 373 */
373 374 if ((fstype < 0) || (fstype >= nfstype))
374 375 return (EINVAL);
375 376
376 377 if (!ALLOCATED_VFSSW(&vfssw[fstype]))
377 378 return (EINVAL);
378 379
379 380 /* Set up the operations vector. */
380 381
381 382 error = fs_copyfsops(template, &vfssw[fstype].vsw_vfsops, &unused_ops);
382 383
383 384 if (error != 0)
384 385 return (error);
385 386
386 387 vfssw[fstype].vsw_flag |= VSW_INSTALLED;
387 388
388 389 if (actual != NULL)
389 390 *actual = &vfssw[fstype].vsw_vfsops;
390 391
391 392 #if DEBUG
392 393 if (unused_ops != 0)
393 394 cmn_err(CE_WARN, "vfs_setfsops: %s: %d operations supplied "
394 395 "but not used", vfssw[fstype].vsw_name, unused_ops);
395 396 #endif
396 397
397 398 return (0);
398 399 }
399 400
400 401 int
401 402 vfs_makefsops(const fs_operation_def_t *template, vfsops_t **actual)
402 403 {
403 404 int error;
404 405 int unused_ops;
405 406
406 407 *actual = (vfsops_t *)kmem_alloc(sizeof (vfsops_t), KM_SLEEP);
407 408
408 409 error = fs_copyfsops(template, *actual, &unused_ops);
409 410 if (error != 0) {
410 411 kmem_free(*actual, sizeof (vfsops_t));
411 412 *actual = NULL;
412 413 return (error);
413 414 }
414 415
415 416 return (0);
416 417 }
417 418
418 419 /*
419 420 * Free a vfsops structure created as a result of vfs_makefsops().
420 421 * NOTE: For a vfsops structure initialized by vfs_setfsops(), use
421 422 * vfs_freevfsops_by_type().
422 423 */
423 424 void
424 425 vfs_freevfsops(vfsops_t *vfsops)
425 426 {
426 427 kmem_free(vfsops, sizeof (vfsops_t));
427 428 }
428 429
429 430 /*
430 431 * Since the vfsops structure is part of the vfssw table and wasn't
431 432 * really allocated, we're not really freeing anything. We keep
432 433 * the name for consistency with vfs_freevfsops(). We do, however,
433 434 * need to take care of a little bookkeeping.
434 435 * NOTE: For a vfsops structure created by vfs_setfsops(), use
435 436 * vfs_freevfsops_by_type().
436 437 */
437 438 int
438 439 vfs_freevfsops_by_type(int fstype)
439 440 {
440 441
441 442 /* Verify that fstype refers to a loaded fs (and not fsid 0). */
442 443 if ((fstype <= 0) || (fstype >= nfstype))
443 444 return (EINVAL);
444 445
445 446 WLOCK_VFSSW();
446 447 if ((vfssw[fstype].vsw_flag & VSW_INSTALLED) == 0) {
447 448 WUNLOCK_VFSSW();
448 449 return (EINVAL);
449 450 }
450 451
451 452 vfssw[fstype].vsw_flag &= ~VSW_INSTALLED;
452 453 WUNLOCK_VFSSW();
453 454
454 455 return (0);
455 456 }
456 457
457 458 /* Support routines used to reference vfs_op */
458 459
459 460 /* Set the operations vector for a vfs */
460 461 void
461 462 vfs_setops(vfs_t *vfsp, vfsops_t *vfsops)
462 463 {
463 464 vfsops_t *op;
464 465
465 466 ASSERT(vfsp != NULL);
466 467 ASSERT(vfsops != NULL);
467 468
468 469 op = vfsp->vfs_op;
469 470 membar_consumer();
470 471 if (vfsp->vfs_femhead == NULL &&
471 472 atomic_cas_ptr(&vfsp->vfs_op, op, vfsops) == op) {
472 473 return;
473 474 }
474 475 fsem_setvfsops(vfsp, vfsops);
475 476 }
476 477
477 478 /* Retrieve the operations vector for a vfs */
478 479 vfsops_t *
479 480 vfs_getops(vfs_t *vfsp)
480 481 {
481 482 vfsops_t *op;
482 483
483 484 ASSERT(vfsp != NULL);
484 485
485 486 op = vfsp->vfs_op;
486 487 membar_consumer();
487 488 if (vfsp->vfs_femhead == NULL && op == vfsp->vfs_op) {
488 489 return (op);
489 490 } else {
490 491 return (fsem_getvfsops(vfsp));
491 492 }
492 493 }
493 494
494 495 /*
495 496 * Returns non-zero (1) if the vfsops matches that of the vfs.
496 497 * Returns zero (0) if not.
497 498 */
498 499 int
499 500 vfs_matchops(vfs_t *vfsp, vfsops_t *vfsops)
500 501 {
501 502 return (vfs_getops(vfsp) == vfsops);
502 503 }
503 504
504 505 /*
505 506 * Returns non-zero (1) if the file system has installed a non-default,
506 507 * non-error vfs_sync routine. Returns zero (0) otherwise.
507 508 */
508 509 int
509 510 vfs_can_sync(vfs_t *vfsp)
510 511 {
511 512 /* vfs_sync() routine is not the default/error function */
512 513 return (vfs_getops(vfsp)->vfs_sync != fs_sync);
513 514 }
514 515
515 516 /*
516 517 * Initialize a vfs structure.
517 518 */
518 519 void
519 520 vfs_init(vfs_t *vfsp, vfsops_t *op, void *data)
520 521 {
521 522 /* Other initialization has been moved to vfs_alloc() */
522 523 vfsp->vfs_count = 0;
523 524 vfsp->vfs_next = vfsp;
524 525 vfsp->vfs_prev = vfsp;
525 526 vfsp->vfs_zone_next = vfsp;
526 527 vfsp->vfs_zone_prev = vfsp;
527 528 vfsp->vfs_lofi_minor = 0;
528 529 sema_init(&vfsp->vfs_reflock, 1, NULL, SEMA_DEFAULT, NULL);
529 530 vfsimpl_setup(vfsp);
530 531 vfsp->vfs_data = (data);
531 532 vfs_setops((vfsp), (op));
532 533 }
533 534
534 535 /*
535 536 * Allocate and initialize the vfs implementation private data
536 537 * structure, vfs_impl_t.
537 538 */
538 539 void
539 540 vfsimpl_setup(vfs_t *vfsp)
540 541 {
541 542 int i;
542 543
543 544 if (vfsp->vfs_implp != NULL) {
544 545 return;
545 546 }
546 547
547 548 vfsp->vfs_implp = kmem_alloc(sizeof (vfs_impl_t), KM_SLEEP);
548 549 /* Note that these are #define'd in vfs.h */
549 550 vfsp->vfs_vskap = NULL;
550 551 vfsp->vfs_fstypevsp = NULL;
551 552
552 553 /* Set size of counted array, then zero the array */
553 554 vfsp->vfs_featureset[0] = VFS_FEATURE_MAXSZ - 1;
554 555 for (i = 1; i < VFS_FEATURE_MAXSZ; i++) {
555 556 vfsp->vfs_featureset[i] = 0;
556 557 }
557 558 }
558 559
559 560 /*
560 561 * Release the vfs_impl_t structure, if it exists. Some unbundled
561 562 * filesystems may not use the newer version of vfs and thus
562 563 * would not contain this implementation private data structure.
563 564 */
564 565 void
565 566 vfsimpl_teardown(vfs_t *vfsp)
566 567 {
567 568 vfs_impl_t *vip = vfsp->vfs_implp;
568 569
569 570 if (vip == NULL)
570 571 return;
571 572
572 573 kmem_free(vfsp->vfs_implp, sizeof (vfs_impl_t));
573 574 vfsp->vfs_implp = NULL;
574 575 }
575 576
576 577 /*
577 578 * VFS system calls: mount, umount, syssync, statfs, fstatfs, statvfs,
578 579 * fstatvfs, and sysfs moved to common/syscall.
579 580 */
580 581
581 582 /*
582 583 * Update every mounted file system. We call the vfs_sync operation of
583 584 * each file system type, passing it a NULL vfsp to indicate that all
584 585 * mounted file systems of that type should be updated.
585 586 */
586 587 void
587 588 vfs_sync(int flag)
588 589 {
589 590 struct vfssw *vswp;
590 591 RLOCK_VFSSW();
591 592 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
592 593 if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) {
593 594 vfs_refvfssw(vswp);
594 595 RUNLOCK_VFSSW();
595 596 (void) (*vswp->vsw_vfsops.vfs_sync)(NULL, flag,
596 597 CRED());
597 598 vfs_unrefvfssw(vswp);
598 599 RLOCK_VFSSW();
599 600 }
600 601 }
601 602 RUNLOCK_VFSSW();
602 603 }
603 604
604 605 void
605 606 sync(void)
606 607 {
607 608 vfs_sync(0);
608 609 }
609 610
610 611 /*
611 612 * External routines.
612 613 */
613 614
614 615 krwlock_t vfssw_lock; /* lock accesses to vfssw */
615 616
616 617 /*
617 618 * Lock for accessing the vfs linked list. Initialized in vfs_mountroot(),
618 619 * but otherwise should be accessed only via vfs_list_lock() and
619 620 * vfs_list_unlock(). Also used to protect the timestamp for mods to the list.
620 621 */
621 622 static krwlock_t vfslist;
622 623
623 624 /*
624 625 * Mount devfs on /devices. This is done right after root is mounted
625 626 * to provide device access support for the system
626 627 */
627 628 static void
628 629 vfs_mountdevices(void)
629 630 {
630 631 struct vfssw *vsw;
631 632 struct vnode *mvp;
632 633 struct mounta mounta = { /* fake mounta for devfs_mount() */
633 634 NULL,
634 635 NULL,
635 636 MS_SYSSPACE,
636 637 NULL,
637 638 NULL,
638 639 0,
639 640 NULL,
640 641 0
641 642 };
642 643
643 644 /*
644 645 * _init devfs module to fill in the vfssw
645 646 */
646 647 if (modload("fs", "devfs") == -1)
647 648 panic("Cannot _init devfs module");
648 649
649 650 /*
650 651 * Hold vfs
651 652 */
652 653 RLOCK_VFSSW();
653 654 vsw = vfs_getvfsswbyname("devfs");
654 655 VFS_INIT(&devices, &vsw->vsw_vfsops, NULL);
655 656 VFS_HOLD(&devices);
656 657
657 658 /*
658 659 * Locate mount point
659 660 */
660 661 if (lookupname("/devices", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp))
661 662 panic("Cannot find /devices");
662 663
663 664 /*
664 665 * Perform the mount of /devices
665 666 */
666 667 if (VFS_MOUNT(&devices, mvp, &mounta, CRED()))
667 668 panic("Cannot mount /devices");
668 669
669 670 RUNLOCK_VFSSW();
670 671
671 672 /*
672 673 * Set appropriate members and add to vfs list for mnttab display
673 674 */
674 675 vfs_setresource(&devices, "/devices", 0);
675 676 vfs_setmntpoint(&devices, "/devices", 0);
676 677
677 678 /*
678 679 * Hold the root of /devices so it won't go away
679 680 */
680 681 if (VFS_ROOT(&devices, &devicesdir))
681 682 panic("vfs_mountdevices: not devices root");
682 683
683 684 if (vfs_lock(&devices) != 0) {
684 685 VN_RELE(devicesdir);
685 686 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /devices");
686 687 return;
687 688 }
688 689
689 690 if (vn_vfswlock(mvp) != 0) {
690 691 vfs_unlock(&devices);
691 692 VN_RELE(devicesdir);
692 693 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /devices");
693 694 return;
694 695 }
695 696
696 697 vfs_add(mvp, &devices, 0);
697 698 vn_vfsunlock(mvp);
698 699 vfs_unlock(&devices);
699 700 VN_RELE(devicesdir);
700 701 }
701 702
702 703 /*
703 704 * mount the first instance of /dev to root and remain mounted
704 705 */
705 706 static void
706 707 vfs_mountdev1(void)
707 708 {
708 709 struct vfssw *vsw;
709 710 struct vnode *mvp;
710 711 struct mounta mounta = { /* fake mounta for sdev_mount() */
711 712 NULL,
712 713 NULL,
713 714 MS_SYSSPACE | MS_OVERLAY,
714 715 NULL,
715 716 NULL,
716 717 0,
717 718 NULL,
718 719 0
719 720 };
720 721
721 722 /*
722 723 * _init dev module to fill in the vfssw
723 724 */
724 725 if (modload("fs", "dev") == -1)
725 726 cmn_err(CE_PANIC, "Cannot _init dev module\n");
726 727
727 728 /*
728 729 * Hold vfs
729 730 */
730 731 RLOCK_VFSSW();
731 732 vsw = vfs_getvfsswbyname("dev");
732 733 VFS_INIT(&dev, &vsw->vsw_vfsops, NULL);
733 734 VFS_HOLD(&dev);
734 735
735 736 /*
736 737 * Locate mount point
737 738 */
738 739 if (lookupname("/dev", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp))
739 740 cmn_err(CE_PANIC, "Cannot find /dev\n");
740 741
741 742 /*
742 743 * Perform the mount of /dev
743 744 */
744 745 if (VFS_MOUNT(&dev, mvp, &mounta, CRED()))
745 746 cmn_err(CE_PANIC, "Cannot mount /dev 1\n");
746 747
747 748 RUNLOCK_VFSSW();
748 749
749 750 /*
750 751 * Set appropriate members and add to vfs list for mnttab display
751 752 */
752 753 vfs_setresource(&dev, "/dev", 0);
753 754 vfs_setmntpoint(&dev, "/dev", 0);
754 755
755 756 /*
756 757 * Hold the root of /dev so it won't go away
757 758 */
758 759 if (VFS_ROOT(&dev, &devdir))
759 760 cmn_err(CE_PANIC, "vfs_mountdev1: not dev root");
760 761
761 762 if (vfs_lock(&dev) != 0) {
762 763 VN_RELE(devdir);
763 764 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /dev");
764 765 return;
765 766 }
766 767
767 768 if (vn_vfswlock(mvp) != 0) {
768 769 vfs_unlock(&dev);
769 770 VN_RELE(devdir);
770 771 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /dev");
771 772 return;
772 773 }
773 774
774 775 vfs_add(mvp, &dev, 0);
775 776 vn_vfsunlock(mvp);
776 777 vfs_unlock(&dev);
777 778 VN_RELE(devdir);
778 779 }
779 780
780 781 /*
781 782 * Mount required filesystem. This is done right after root is mounted.
782 783 */
783 784 static void
784 785 vfs_mountfs(char *module, char *spec, char *path)
785 786 {
786 787 struct vnode *mvp;
787 788 struct mounta mounta;
788 789 vfs_t *vfsp;
789 790
790 791 mounta.flags = MS_SYSSPACE | MS_DATA;
791 792 mounta.fstype = module;
792 793 mounta.spec = spec;
793 794 mounta.dir = path;
794 795 if (lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) {
795 796 cmn_err(CE_WARN, "Cannot find %s", path);
796 797 return;
797 798 }
798 799 if (domount(NULL, &mounta, mvp, CRED(), &vfsp))
799 800 cmn_err(CE_WARN, "Cannot mount %s", path);
800 801 else
801 802 VFS_RELE(vfsp);
802 803 VN_RELE(mvp);
803 804 }
804 805
805 806 /*
806 807 * vfs_mountroot is called by main() to mount the root filesystem.
807 808 */
808 809 void
809 810 vfs_mountroot(void)
810 811 {
811 812 struct vnode *rvp = NULL;
812 813 char *path;
813 814 size_t plen;
814 815 struct vfssw *vswp;
815 816 proc_t *p;
816 817
817 818 rw_init(&vfssw_lock, NULL, RW_DEFAULT, NULL);
818 819 rw_init(&vfslist, NULL, RW_DEFAULT, NULL);
819 820
820 821 /*
821 822 * Alloc the vfs hash bucket array and locks
822 823 */
823 824 rvfs_list = kmem_zalloc(vfshsz * sizeof (rvfs_t), KM_SLEEP);
824 825
825 826 /*
826 827 * Call machine-dependent routine "rootconf" to choose a root
827 828 * file system type.
828 829 */
829 830 if (rootconf())
830 831 panic("vfs_mountroot: cannot mount root");
831 832 /*
832 833 * Get vnode for '/'. Set up rootdir, u.u_rdir and u.u_cdir
833 834 * to point to it. These are used by lookuppn() so that it
834 835 * knows where to start from ('/' or '.').
835 836 */
836 837 vfs_setmntpoint(rootvfs, "/", 0);
837 838 if (VFS_ROOT(rootvfs, &rootdir))
838 839 panic("vfs_mountroot: no root vnode");
839 840
840 841 /*
841 842 * At this point, the process tree consists of p0 and possibly some
842 843 * direct children of p0. (i.e. there are no grandchildren)
843 844 *
844 845 * Walk through them all, setting their current directory.
845 846 */
846 847 mutex_enter(&pidlock);
847 848 for (p = practive; p != NULL; p = p->p_next) {
848 849 ASSERT(p == &p0 || p->p_parent == &p0);
849 850
850 851 PTOU(p)->u_cdir = rootdir;
851 852 VN_HOLD(PTOU(p)->u_cdir);
852 853 PTOU(p)->u_rdir = NULL;
853 854 }
854 855 mutex_exit(&pidlock);
855 856
856 857 /*
857 858 * Setup the global zone's rootvp, now that it exists.
858 859 */
859 860 global_zone->zone_rootvp = rootdir;
860 861 VN_HOLD(global_zone->zone_rootvp);
861 862
862 863 /*
863 864 * Notify the module code that it can begin using the
864 865 * root filesystem instead of the boot program's services.
865 866 */
866 867 modrootloaded = 1;
867 868
868 869 /*
869 870 * Special handling for a ZFS root file system.
870 871 */
871 872 zfs_boot_init();
872 873
873 874 /*
874 875 * Set up mnttab information for root
875 876 */
876 877 vfs_setresource(rootvfs, rootfs.bo_name, 0);
877 878
878 879 /*
879 880 * Notify cluster software that the root filesystem is available.
880 881 */
881 882 clboot_mountroot();
882 883
883 884 /* Now that we're all done with the root FS, set up its vopstats */
884 885 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) != NULL) {
885 886 /* Set flag for statistics collection */
886 887 if (vswp->vsw_flag & VSW_STATS) {
887 888 initialize_vopstats(&rootvfs->vfs_vopstats);
888 889 rootvfs->vfs_flag |= VFS_STATS;
889 890 rootvfs->vfs_fstypevsp =
890 891 get_fstype_vopstats(rootvfs, vswp);
891 892 rootvfs->vfs_vskap = get_vskstat_anchor(rootvfs);
892 893 }
893 894 vfs_unrefvfssw(vswp);
894 895 }
895 896
896 897 /*
897 898 * Mount /devices, /dev instance 1, /system/contract, /etc/mnttab,
898 899 * /etc/svc/volatile, /etc/dfs/sharetab, /system/object, and /proc.
899 900 */
900 901 vfs_mountdevices();
901 902 vfs_mountdev1();
902 903
903 904 vfs_mountfs("ctfs", "ctfs", CTFS_ROOT);
904 905 vfs_mountfs("proc", "/proc", "/proc");
905 906 vfs_mountfs("mntfs", "/etc/mnttab", "/etc/mnttab");
906 907 vfs_mountfs("tmpfs", "/etc/svc/volatile", "/etc/svc/volatile");
907 908 vfs_mountfs("objfs", "objfs", OBJFS_ROOT);
908 909
909 910 if (getzoneid() == GLOBAL_ZONEID) {
910 911 vfs_mountfs("sharefs", "sharefs", "/etc/dfs/sharetab");
911 912 }
912 913
913 914 #ifdef __sparc
914 915 /*
915 916 * This bit of magic can go away when we convert sparc to
916 917 * the new boot architecture based on ramdisk.
917 918 *
918 919 * Booting off a mirrored root volume:
919 920 * At this point, we have booted and mounted root on a
920 921 * single component of the mirror. Complete the boot
921 922 * by configuring SVM and converting the root to the
922 923 * dev_t of the mirrored root device. This dev_t conversion
923 924 * only works because the underlying device doesn't change.
924 925 */
925 926 if (root_is_svm) {
926 927 if (svm_rootconf()) {
927 928 panic("vfs_mountroot: cannot remount root");
928 929 }
929 930
930 931 /*
931 932 * mnttab should reflect the new root device
932 933 */
933 934 vfs_lock_wait(rootvfs);
934 935 vfs_setresource(rootvfs, rootfs.bo_name, 0);
935 936 vfs_unlock(rootvfs);
936 937 }
937 938 #endif /* __sparc */
938 939
939 940 if (strcmp(rootfs.bo_fstype, "zfs") != 0) {
940 941 /*
941 942 * Look up the root device via devfs so that a dv_node is
942 943 * created for it. The vnode is never VN_RELE()ed.
943 944 * We allocate more than MAXPATHLEN so that the
944 945 * buffer passed to i_ddi_prompath_to_devfspath() is
945 946 * exactly MAXPATHLEN (the function expects a buffer
946 947 * of that length).
947 948 */
948 949 plen = strlen("/devices");
949 950 path = kmem_alloc(plen + MAXPATHLEN, KM_SLEEP);
950 951 (void) strcpy(path, "/devices");
951 952
952 953 if (i_ddi_prompath_to_devfspath(rootfs.bo_name, path + plen)
953 954 != DDI_SUCCESS ||
954 955 lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &rvp)) {
955 956
956 957 /* NUL terminate in case "path" has garbage */
957 958 path[plen + MAXPATHLEN - 1] = '\0';
958 959 #ifdef DEBUG
959 960 cmn_err(CE_WARN, "!Cannot lookup root device: %s",
960 961 path);
961 962 #endif
962 963 }
963 964 kmem_free(path, plen + MAXPATHLEN);
964 965 }
965 966
966 967 vfs_mnttabvp_setup();
967 968 }
968 969
969 970 /*
970 971 * Check to see if our "block device" is actually a file. If so,
971 972 * automatically add a lofi device, and keep track of this fact.
972 973 */
973 974 static int
974 975 lofi_add(const char *fsname, struct vfs *vfsp,
975 976 mntopts_t *mntopts, struct mounta *uap)
976 977 {
977 978 int fromspace = (uap->flags & MS_SYSSPACE) ?
978 979 UIO_SYSSPACE : UIO_USERSPACE;
979 980 struct lofi_ioctl *li = NULL;
980 981 struct vnode *vp = NULL;
981 982 struct pathname pn = { NULL };
982 983 ldi_ident_t ldi_id;
983 984 ldi_handle_t ldi_hdl;
984 985 vfssw_t *vfssw;
985 986 int minor;
986 987 int err = 0;
987 988
988 989 if ((vfssw = vfs_getvfssw(fsname)) == NULL)
989 990 return (0);
990 991
991 992 if (!(vfssw->vsw_flag & VSW_CANLOFI)) {
992 993 vfs_unrefvfssw(vfssw);
993 994 return (0);
994 995 }
995 996
996 997 vfs_unrefvfssw(vfssw);
997 998 vfssw = NULL;
998 999
999 1000 if (pn_get(uap->spec, fromspace, &pn) != 0)
1000 1001 return (0);
1001 1002
1002 1003 if (lookupname(uap->spec, fromspace, FOLLOW, NULL, &vp) != 0)
1003 1004 goto out;
1004 1005
1005 1006 if (vp->v_type != VREG)
1006 1007 goto out;
1007 1008
1008 1009 /* OK, this is a lofi mount. */
1009 1010
1010 1011 if ((uap->flags & (MS_REMOUNT|MS_GLOBAL)) ||
1011 1012 vfs_optionisset_nolock(mntopts, MNTOPT_SUID, NULL) ||
1012 1013 vfs_optionisset_nolock(mntopts, MNTOPT_SETUID, NULL) ||
1013 1014 vfs_optionisset_nolock(mntopts, MNTOPT_DEVICES, NULL)) {
1014 1015 err = EINVAL;
1015 1016 goto out;
1016 1017 }
1017 1018
1018 1019 ldi_id = ldi_ident_from_anon();
1019 1020 li = kmem_zalloc(sizeof (*li), KM_SLEEP);
1020 1021 (void) strlcpy(li->li_filename, pn.pn_path, MAXPATHLEN);
1021 1022
1022 1023 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred,
1023 1024 &ldi_hdl, ldi_id);
1024 1025
1025 1026 if (err)
1026 1027 goto out2;
1027 1028
1028 1029 err = ldi_ioctl(ldi_hdl, LOFI_MAP_FILE, (intptr_t)li,
1029 1030 FREAD | FWRITE | FKIOCTL, kcred, &minor);
1030 1031
1031 1032 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred);
1032 1033
1033 1034 if (!err)
1034 1035 vfsp->vfs_lofi_minor = minor;
1035 1036
1036 1037 out2:
1037 1038 ldi_ident_release(ldi_id);
1038 1039 out:
1039 1040 if (li != NULL)
1040 1041 kmem_free(li, sizeof (*li));
1041 1042 if (vp != NULL)
1042 1043 VN_RELE(vp);
1043 1044 pn_free(&pn);
1044 1045 return (err);
1045 1046 }
1046 1047
1047 1048 static void
1048 1049 lofi_remove(struct vfs *vfsp)
1049 1050 {
1050 1051 struct lofi_ioctl *li = NULL;
1051 1052 ldi_ident_t ldi_id;
1052 1053 ldi_handle_t ldi_hdl;
1053 1054 int err;
1054 1055
1055 1056 if (vfsp->vfs_lofi_minor == 0)
1056 1057 return;
1057 1058
1058 1059 ldi_id = ldi_ident_from_anon();
1059 1060
1060 1061 li = kmem_zalloc(sizeof (*li), KM_SLEEP);
1061 1062 li->li_minor = vfsp->vfs_lofi_minor;
1062 1063 li->li_cleanup = B_TRUE;
1063 1064
1064 1065 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred,
1065 1066 &ldi_hdl, ldi_id);
1066 1067
1067 1068 if (err)
1068 1069 goto out;
1069 1070
1070 1071 err = ldi_ioctl(ldi_hdl, LOFI_UNMAP_FILE_MINOR, (intptr_t)li,
1071 1072 FREAD | FWRITE | FKIOCTL, kcred, NULL);
1072 1073
1073 1074 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred);
1074 1075
1075 1076 if (!err)
1076 1077 vfsp->vfs_lofi_minor = 0;
1077 1078
1078 1079 out:
1079 1080 ldi_ident_release(ldi_id);
1080 1081 if (li != NULL)
1081 1082 kmem_free(li, sizeof (*li));
1082 1083 }
1083 1084
1084 1085 /*
1085 1086 * Common mount code. Called from the system call entry point, from autofs,
1086 1087 * nfsv4 trigger mounts, and from pxfs.
1087 1088 *
1088 1089 * Takes the effective file system type, mount arguments, the mount point
1089 1090 * vnode, flags specifying whether the mount is a remount and whether it
1090 1091 * should be entered into the vfs list, and credentials. Fills in its vfspp
1091 1092 * parameter with the mounted file system instance's vfs.
1092 1093 *
1093 1094 * Note that the effective file system type is specified as a string. It may
1094 1095 * be null, in which case it's determined from the mount arguments, and may
1095 1096 * differ from the type specified in the mount arguments; this is a hook to
1096 1097 * allow interposition when instantiating file system instances.
1097 1098 *
1098 1099 * The caller is responsible for releasing its own hold on the mount point
1099 1100 * vp (this routine does its own hold when necessary).
1100 1101 * Also note that for remounts, the mount point vp should be the vnode for
1101 1102 * the root of the file system rather than the vnode that the file system
1102 1103 * is mounted on top of.
1103 1104 */
1104 1105 int
1105 1106 domount(char *fsname, struct mounta *uap, vnode_t *vp, struct cred *credp,
1106 1107 struct vfs **vfspp)
1107 1108 {
1108 1109 struct vfssw *vswp;
1109 1110 vfsops_t *vfsops;
1110 1111 struct vfs *vfsp;
1111 1112 struct vnode *bvp;
1112 1113 dev_t bdev = 0;
1113 1114 mntopts_t mnt_mntopts;
1114 1115 int error = 0;
1115 1116 int copyout_error = 0;
1116 1117 int ovflags;
1117 1118 char *opts = uap->optptr;
1118 1119 char *inargs = opts;
1119 1120 int optlen = uap->optlen;
1120 1121 int remount;
1121 1122 int rdonly;
1122 1123 int nbmand = 0;
1123 1124 int delmip = 0;
1124 1125 int addmip = 0;
1125 1126 int splice = ((uap->flags & MS_NOSPLICE) == 0);
1126 1127 int fromspace = (uap->flags & MS_SYSSPACE) ?
1127 1128 UIO_SYSSPACE : UIO_USERSPACE;
1128 1129 char *resource = NULL, *mountpt = NULL;
1129 1130 refstr_t *oldresource, *oldmntpt;
1130 1131 struct pathname pn, rpn;
1131 1132 vsk_anchor_t *vskap;
1132 1133 char fstname[FSTYPSZ];
1133 1134 zone_t *zone;
1134 1135
1135 1136 /*
1136 1137 * The v_flag value for the mount point vp is permanently set
1137 1138 * to VVFSLOCK so that no one bypasses the vn_vfs*locks routine
1138 1139 * for mount point locking.
1139 1140 */
1140 1141 mutex_enter(&vp->v_lock);
1141 1142 vp->v_flag |= VVFSLOCK;
1142 1143 mutex_exit(&vp->v_lock);
1143 1144
1144 1145 mnt_mntopts.mo_count = 0;
1145 1146 /*
1146 1147 * Find the ops vector to use to invoke the file system-specific mount
1147 1148 * method. If the fsname argument is non-NULL, use it directly.
1148 1149 * Otherwise, dig the file system type information out of the mount
1149 1150 * arguments.
1150 1151 *
1151 1152 * A side effect is to hold the vfssw entry.
1152 1153 *
1153 1154 * Mount arguments can be specified in several ways, which are
1154 1155 * distinguished by flag bit settings. The preferred way is to set
1155 1156 * MS_OPTIONSTR, indicating an 8 argument mount with the file system
1156 1157 * type supplied as a character string and the last two arguments
1157 1158 * being a pointer to a character buffer and the size of the buffer.
1158 1159 * On entry, the buffer holds a null terminated list of options; on
1159 1160 * return, the string is the list of options the file system
1160 1161 * recognized. If MS_DATA is set arguments five and six point to a
1161 1162 * block of binary data which the file system interprets.
1162 1163 * A further wrinkle is that some callers don't set MS_FSS and MS_DATA
1163 1164 * consistently with these conventions. To handle them, we check to
1164 1165 * see whether the pointer to the file system name has a numeric value
1165 1166 * less than 256. If so, we treat it as an index.
1166 1167 */
1167 1168 if (fsname != NULL) {
1168 1169 if ((vswp = vfs_getvfssw(fsname)) == NULL) {
1169 1170 return (EINVAL);
1170 1171 }
1171 1172 } else if (uap->flags & (MS_OPTIONSTR | MS_DATA | MS_FSS)) {
1172 1173 size_t n;
1173 1174 uint_t fstype;
1174 1175
1175 1176 fsname = fstname;
1176 1177
1177 1178 if ((fstype = (uintptr_t)uap->fstype) < 256) {
1178 1179 RLOCK_VFSSW();
1179 1180 if (fstype == 0 || fstype >= nfstype ||
1180 1181 !ALLOCATED_VFSSW(&vfssw[fstype])) {
1181 1182 RUNLOCK_VFSSW();
1182 1183 return (EINVAL);
1183 1184 }
1184 1185 (void) strcpy(fsname, vfssw[fstype].vsw_name);
1185 1186 RUNLOCK_VFSSW();
1186 1187 if ((vswp = vfs_getvfssw(fsname)) == NULL)
1187 1188 return (EINVAL);
1188 1189 } else {
1189 1190 /*
1190 1191 * Handle either kernel or user address space.
1191 1192 */
1192 1193 if (uap->flags & MS_SYSSPACE) {
1193 1194 error = copystr(uap->fstype, fsname,
1194 1195 FSTYPSZ, &n);
1195 1196 } else {
1196 1197 error = copyinstr(uap->fstype, fsname,
1197 1198 FSTYPSZ, &n);
1198 1199 }
1199 1200 if (error) {
1200 1201 if (error == ENAMETOOLONG)
1201 1202 return (EINVAL);
1202 1203 return (error);
1203 1204 }
1204 1205 if ((vswp = vfs_getvfssw(fsname)) == NULL)
1205 1206 return (EINVAL);
1206 1207 }
1207 1208 } else {
1208 1209 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) == NULL)
1209 1210 return (EINVAL);
1210 1211 fsname = vswp->vsw_name;
1211 1212 }
1212 1213 if (!VFS_INSTALLED(vswp))
1213 1214 return (EINVAL);
1214 1215
1215 1216 if ((error = secpolicy_fs_allowed_mount(fsname)) != 0) {
1216 1217 vfs_unrefvfssw(vswp);
1217 1218 return (error);
1218 1219 }
1219 1220
1220 1221 vfsops = &vswp->vsw_vfsops;
1221 1222
1222 1223 vfs_copyopttbl(&vswp->vsw_optproto, &mnt_mntopts);
1223 1224 /*
1224 1225 * Fetch mount options and parse them for generic vfs options
1225 1226 */
1226 1227 if (uap->flags & MS_OPTIONSTR) {
1227 1228 /*
1228 1229 * Limit the buffer size
1229 1230 */
1230 1231 if (optlen < 0 || optlen > MAX_MNTOPT_STR) {
1231 1232 error = EINVAL;
1232 1233 goto errout;
1233 1234 }
1234 1235 if ((uap->flags & MS_SYSSPACE) == 0) {
1235 1236 inargs = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP);
1236 1237 inargs[0] = '\0';
1237 1238 if (optlen) {
1238 1239 error = copyinstr(opts, inargs, (size_t)optlen,
1239 1240 NULL);
1240 1241 if (error) {
1241 1242 goto errout;
1242 1243 }
1243 1244 }
1244 1245 }
1245 1246 vfs_parsemntopts(&mnt_mntopts, inargs, 0);
1246 1247 }
1247 1248 /*
1248 1249 * Flag bits override the options string.
1249 1250 */
1250 1251 if (uap->flags & MS_REMOUNT)
1251 1252 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_REMOUNT, NULL, 0, 0);
1252 1253 if (uap->flags & MS_RDONLY)
1253 1254 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_RO, NULL, 0, 0);
1254 1255 if (uap->flags & MS_NOSUID)
1255 1256 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0);
1256 1257
1257 1258 /*
1258 1259 * Check if this is a remount; must be set in the option string and
1259 1260 * the file system must support a remount option.
1260 1261 */
1261 1262 if (remount = vfs_optionisset_nolock(&mnt_mntopts,
1262 1263 MNTOPT_REMOUNT, NULL)) {
1263 1264 if (!(vswp->vsw_flag & VSW_CANREMOUNT)) {
1264 1265 error = ENOTSUP;
1265 1266 goto errout;
1266 1267 }
1267 1268 uap->flags |= MS_REMOUNT;
1268 1269 }
1269 1270
1270 1271 /*
1271 1272 * uap->flags and vfs_optionisset() should agree.
1272 1273 */
1273 1274 if (rdonly = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_RO, NULL)) {
1274 1275 uap->flags |= MS_RDONLY;
1275 1276 }
1276 1277 if (vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL)) {
1277 1278 uap->flags |= MS_NOSUID;
1278 1279 }
1279 1280 nbmand = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NBMAND, NULL);
1280 1281 ASSERT(splice || !remount);
1281 1282 /*
1282 1283 * If we are splicing the fs into the namespace,
1283 1284 * perform mount point checks.
1284 1285 *
1285 1286 * We want to resolve the path for the mount point to eliminate
1286 1287 * '.' and ".." and symlinks in mount points; we can't do the
1287 1288 * same for the resource string, since it would turn
1288 1289 * "/dev/dsk/c0t0d0s0" into "/devices/pci@...". We need to do
1289 1290 * this before grabbing vn_vfswlock(), because otherwise we
1290 1291 * would deadlock with lookuppn().
1291 1292 */
1292 1293 if (splice) {
1293 1294 ASSERT(vp->v_count > 0);
1294 1295
1295 1296 /*
1296 1297 * Pick up mount point and device from appropriate space.
1297 1298 */
1298 1299 if (pn_get(uap->spec, fromspace, &pn) == 0) {
1299 1300 resource = kmem_alloc(pn.pn_pathlen + 1,
1300 1301 KM_SLEEP);
1301 1302 (void) strcpy(resource, pn.pn_path);
1302 1303 pn_free(&pn);
1303 1304 }
1304 1305 /*
1305 1306 * Do a lookupname prior to taking the
1306 1307 * writelock. Mark this as completed if
1307 1308 * successful for later cleanup and addition to
1308 1309 * the mount in progress table.
1309 1310 */
1310 1311 if ((uap->flags & MS_GLOBAL) == 0 &&
1311 1312 lookupname(uap->spec, fromspace,
1312 1313 FOLLOW, NULL, &bvp) == 0) {
1313 1314 addmip = 1;
1314 1315 }
1315 1316
1316 1317 if ((error = pn_get(uap->dir, fromspace, &pn)) == 0) {
1317 1318 pathname_t *pnp;
1318 1319
1319 1320 if (*pn.pn_path != '/') {
1320 1321 error = EINVAL;
1321 1322 pn_free(&pn);
1322 1323 goto errout;
1323 1324 }
1324 1325 pn_alloc(&rpn);
1325 1326 /*
1326 1327 * Kludge to prevent autofs from deadlocking with
1327 1328 * itself when it calls domount().
1328 1329 *
1329 1330 * If autofs is calling, it is because it is doing
1330 1331 * (autofs) mounts in the process of an NFS mount. A
1331 1332 * lookuppn() here would cause us to block waiting for
1332 1333 * said NFS mount to complete, which can't since this
1333 1334 * is the thread that was supposed to doing it.
1334 1335 */
1335 1336 if (fromspace == UIO_USERSPACE) {
1336 1337 if ((error = lookuppn(&pn, &rpn, FOLLOW, NULL,
1337 1338 NULL)) == 0) {
1338 1339 pnp = &rpn;
1339 1340 } else {
1340 1341 /*
1341 1342 * The file disappeared or otherwise
1342 1343 * became inaccessible since we opened
1343 1344 * it; might as well fail the mount
1344 1345 * since the mount point is no longer
1345 1346 * accessible.
1346 1347 */
1347 1348 pn_free(&rpn);
1348 1349 pn_free(&pn);
1349 1350 goto errout;
1350 1351 }
1351 1352 } else {
1352 1353 pnp = &pn;
1353 1354 }
1354 1355 mountpt = kmem_alloc(pnp->pn_pathlen + 1, KM_SLEEP);
1355 1356 (void) strcpy(mountpt, pnp->pn_path);
1356 1357
1357 1358 /*
1358 1359 * If the addition of the zone's rootpath
1359 1360 * would push us over a total path length
1360 1361 * of MAXPATHLEN, we fail the mount with
1361 1362 * ENAMETOOLONG, which is what we would have
1362 1363 * gotten if we were trying to perform the same
1363 1364 * mount in the global zone.
1364 1365 *
1365 1366 * strlen() doesn't count the trailing
1366 1367 * '\0', but zone_rootpathlen counts both a
1367 1368 * trailing '/' and the terminating '\0'.
1368 1369 */
1369 1370 if ((curproc->p_zone->zone_rootpathlen - 1 +
1370 1371 strlen(mountpt)) > MAXPATHLEN ||
1371 1372 (resource != NULL &&
1372 1373 (curproc->p_zone->zone_rootpathlen - 1 +
1373 1374 strlen(resource)) > MAXPATHLEN)) {
1374 1375 error = ENAMETOOLONG;
1375 1376 }
1376 1377
1377 1378 pn_free(&rpn);
1378 1379 pn_free(&pn);
1379 1380 }
1380 1381
1381 1382 if (error)
1382 1383 goto errout;
1383 1384
1384 1385 /*
1385 1386 * Prevent path name resolution from proceeding past
1386 1387 * the mount point.
1387 1388 */
1388 1389 if (vn_vfswlock(vp) != 0) {
1389 1390 error = EBUSY;
1390 1391 goto errout;
1391 1392 }
1392 1393
1393 1394 /*
1394 1395 * Verify that it's legitimate to establish a mount on
1395 1396 * the prospective mount point.
1396 1397 */
1397 1398 if (vn_mountedvfs(vp) != NULL) {
1398 1399 /*
1399 1400 * The mount point lock was obtained after some
1400 1401 * other thread raced through and established a mount.
1401 1402 */
1402 1403 vn_vfsunlock(vp);
1403 1404 error = EBUSY;
1404 1405 goto errout;
1405 1406 }
1406 1407 if (vp->v_flag & VNOMOUNT) {
1407 1408 vn_vfsunlock(vp);
1408 1409 error = EINVAL;
1409 1410 goto errout;
1410 1411 }
1411 1412 }
1412 1413 if ((uap->flags & (MS_DATA | MS_OPTIONSTR)) == 0) {
1413 1414 uap->dataptr = NULL;
1414 1415 uap->datalen = 0;
1415 1416 }
1416 1417
1417 1418 /*
1418 1419 * If this is a remount, we don't want to create a new VFS.
1419 1420 * Instead, we pass the existing one with a remount flag.
1420 1421 */
1421 1422 if (remount) {
1422 1423 /*
1423 1424 * Confirm that the mount point is the root vnode of the
1424 1425 * file system that is being remounted.
1425 1426 * This can happen if the user specifies a different
1426 1427 * mount point directory pathname in the (re)mount command.
1427 1428 *
1428 1429 * Code below can only be reached if splice is true, so it's
1429 1430 * safe to do vn_vfsunlock() here.
1430 1431 */
1431 1432 if ((vp->v_flag & VROOT) == 0) {
1432 1433 vn_vfsunlock(vp);
1433 1434 error = ENOENT;
1434 1435 goto errout;
1435 1436 }
1436 1437 /*
1437 1438 * Disallow making file systems read-only unless file system
1438 1439 * explicitly allows it in its vfssw. Ignore other flags.
1439 1440 */
1440 1441 if (rdonly && vn_is_readonly(vp) == 0 &&
1441 1442 (vswp->vsw_flag & VSW_CANRWRO) == 0) {
1442 1443 vn_vfsunlock(vp);
1443 1444 error = EINVAL;
1444 1445 goto errout;
1445 1446 }
1446 1447 /*
1447 1448 * Disallow changing the NBMAND disposition of the file
1448 1449 * system on remounts.
1449 1450 */
1450 1451 if ((nbmand && ((vp->v_vfsp->vfs_flag & VFS_NBMAND) == 0)) ||
1451 1452 (!nbmand && (vp->v_vfsp->vfs_flag & VFS_NBMAND))) {
1452 1453 vn_vfsunlock(vp);
1453 1454 error = EINVAL;
1454 1455 goto errout;
1455 1456 }
1456 1457 vfsp = vp->v_vfsp;
1457 1458 ovflags = vfsp->vfs_flag;
1458 1459 vfsp->vfs_flag |= VFS_REMOUNT;
1459 1460 vfsp->vfs_flag &= ~VFS_RDONLY;
1460 1461 } else {
1461 1462 vfsp = vfs_alloc(KM_SLEEP);
1462 1463 VFS_INIT(vfsp, vfsops, NULL);
1463 1464 }
1464 1465
1465 1466 VFS_HOLD(vfsp);
1466 1467
1467 1468 if ((error = lofi_add(fsname, vfsp, &mnt_mntopts, uap)) != 0) {
1468 1469 if (!remount) {
1469 1470 if (splice)
1470 1471 vn_vfsunlock(vp);
1471 1472 vfs_free(vfsp);
1472 1473 } else {
1473 1474 vn_vfsunlock(vp);
1474 1475 VFS_RELE(vfsp);
1475 1476 }
1476 1477 goto errout;
1477 1478 }
1478 1479
1479 1480 /*
1480 1481 * PRIV_SYS_MOUNT doesn't mean you can become root.
1481 1482 */
1482 1483 if (vfsp->vfs_lofi_minor != 0) {
1483 1484 uap->flags |= MS_NOSUID;
1484 1485 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0);
1485 1486 }
1486 1487
1487 1488 /*
1488 1489 * The vfs_reflock is not used anymore the code below explicitly
1489 1490 * holds it preventing others accesing it directly.
1490 1491 */
1491 1492 if ((sema_tryp(&vfsp->vfs_reflock) == 0) &&
1492 1493 !(vfsp->vfs_flag & VFS_REMOUNT))
1493 1494 cmn_err(CE_WARN,
1494 1495 "mount type %s couldn't get vfs_reflock", vswp->vsw_name);
1495 1496
1496 1497 /*
1497 1498 * Lock the vfs. If this is a remount we want to avoid spurious umount
1498 1499 * failures that happen as a side-effect of fsflush() and other mount
1499 1500 * and unmount operations that might be going on simultaneously and
1500 1501 * may have locked the vfs currently. To not return EBUSY immediately
1501 1502 * here we use vfs_lock_wait() instead vfs_lock() for the remount case.
1502 1503 */
1503 1504 if (!remount) {
1504 1505 if (error = vfs_lock(vfsp)) {
1505 1506 vfsp->vfs_flag = ovflags;
1506 1507
1507 1508 lofi_remove(vfsp);
1508 1509
1509 1510 if (splice)
1510 1511 vn_vfsunlock(vp);
1511 1512 vfs_free(vfsp);
1512 1513 goto errout;
1513 1514 }
1514 1515 } else {
1515 1516 vfs_lock_wait(vfsp);
1516 1517 }
1517 1518
1518 1519 /*
1519 1520 * Add device to mount in progress table, global mounts require special
1520 1521 * handling. It is possible that we have already done the lookupname
1521 1522 * on a spliced, non-global fs. If so, we don't want to do it again
1522 1523 * since we cannot do a lookupname after taking the
1523 1524 * wlock above. This case is for a non-spliced, non-global filesystem.
1524 1525 */
1525 1526 if (!addmip) {
1526 1527 if ((uap->flags & MS_GLOBAL) == 0 &&
1527 1528 lookupname(uap->spec, fromspace, FOLLOW, NULL, &bvp) == 0) {
1528 1529 addmip = 1;
1529 1530 }
1530 1531 }
1531 1532
1532 1533 if (addmip) {
1533 1534 vnode_t *lvp = NULL;
1534 1535
1535 1536 error = vfs_get_lofi(vfsp, &lvp);
1536 1537 if (error > 0) {
1537 1538 lofi_remove(vfsp);
1538 1539
1539 1540 if (splice)
1540 1541 vn_vfsunlock(vp);
1541 1542 vfs_unlock(vfsp);
1542 1543
1543 1544 if (remount) {
1544 1545 VFS_RELE(vfsp);
1545 1546 } else {
1546 1547 vfs_free(vfsp);
1547 1548 }
1548 1549
1549 1550 goto errout;
1550 1551 } else if (error == -1) {
1551 1552 bdev = bvp->v_rdev;
1552 1553 VN_RELE(bvp);
1553 1554 } else {
1554 1555 bdev = lvp->v_rdev;
1555 1556 VN_RELE(lvp);
1556 1557 VN_RELE(bvp);
1557 1558 }
1558 1559
1559 1560 vfs_addmip(bdev, vfsp);
1560 1561 addmip = 0;
1561 1562 delmip = 1;
1562 1563 }
1563 1564 /*
1564 1565 * Invalidate cached entry for the mount point.
1565 1566 */
1566 1567 if (splice)
1567 1568 dnlc_purge_vp(vp);
1568 1569
1569 1570 /*
1570 1571 * If have an option string but the filesystem doesn't supply a
1571 1572 * prototype options table, create a table with the global
1572 1573 * options and sufficient room to accept all the options in the
1573 1574 * string. Then parse the passed in option string
1574 1575 * accepting all the options in the string. This gives us an
1575 1576 * option table with all the proper cancel properties for the
1576 1577 * global options.
1577 1578 *
1578 1579 * Filesystems that supply a prototype options table are handled
1579 1580 * earlier in this function.
1580 1581 */
1581 1582 if (uap->flags & MS_OPTIONSTR) {
1582 1583 if (!(vswp->vsw_flag & VSW_HASPROTO)) {
1583 1584 mntopts_t tmp_mntopts;
1584 1585
1585 1586 tmp_mntopts.mo_count = 0;
1586 1587 vfs_createopttbl_extend(&tmp_mntopts, inargs,
1587 1588 &mnt_mntopts);
1588 1589 vfs_parsemntopts(&tmp_mntopts, inargs, 1);
1589 1590 vfs_swapopttbl_nolock(&mnt_mntopts, &tmp_mntopts);
1590 1591 vfs_freeopttbl(&tmp_mntopts);
1591 1592 }
1592 1593 }
1593 1594
1594 1595 /*
1595 1596 * Serialize with zone state transitions.
1596 1597 * See vfs_list_add; zone mounted into is:
1597 1598 * zone_find_by_path(refstr_value(vfsp->vfs_mntpt))
1598 1599 * not the zone doing the mount (curproc->p_zone), but if we're already
1599 1600 * inside a NGZ, then we know what zone we are.
1600 1601 */
1601 1602 if (INGLOBALZONE(curproc)) {
1602 1603 zone = zone_find_by_path(mountpt);
1603 1604 ASSERT(zone != NULL);
1604 1605 } else {
1605 1606 zone = curproc->p_zone;
1606 1607 /*
1607 1608 * zone_find_by_path does a hold, so do one here too so that
1608 1609 * we can do a zone_rele after mount_completed.
1609 1610 */
1610 1611 zone_hold(zone);
1611 1612 }
1612 1613 mount_in_progress(zone);
1613 1614 /*
1614 1615 * Instantiate (or reinstantiate) the file system. If appropriate,
1615 1616 * splice it into the file system name space.
1616 1617 *
1617 1618 * We want VFS_MOUNT() to be able to override the vfs_resource
1618 1619 * string if necessary (ie, mntfs), and also for a remount to
1619 1620 * change the same (necessary when remounting '/' during boot).
1620 1621 * So we set up vfs_mntpt and vfs_resource to what we think they
1621 1622 * should be, then hand off control to VFS_MOUNT() which can
1622 1623 * override this.
1623 1624 *
1624 1625 * For safety's sake, when changing vfs_resource or vfs_mntpt of
1625 1626 * a vfs which is on the vfs list (i.e. during a remount), we must
1626 1627 * never set those fields to NULL. Several bits of code make
1627 1628 * assumptions that the fields are always valid.
1628 1629 */
1629 1630 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts);
1630 1631 if (remount) {
1631 1632 if ((oldresource = vfsp->vfs_resource) != NULL)
1632 1633 refstr_hold(oldresource);
1633 1634 if ((oldmntpt = vfsp->vfs_mntpt) != NULL)
1634 1635 refstr_hold(oldmntpt);
1635 1636 }
1636 1637 vfs_setresource(vfsp, resource, 0);
1637 1638 vfs_setmntpoint(vfsp, mountpt, 0);
1638 1639
1639 1640 /*
1640 1641 * going to mount on this vnode, so notify.
1641 1642 */
1642 1643 vnevent_mountedover(vp, NULL);
1643 1644 error = VFS_MOUNT(vfsp, vp, uap, credp);
1644 1645
1645 1646 if (uap->flags & MS_RDONLY)
1646 1647 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
1647 1648 if (uap->flags & MS_NOSUID)
1648 1649 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0);
1649 1650 if (uap->flags & MS_GLOBAL)
1650 1651 vfs_setmntopt(vfsp, MNTOPT_GLOBAL, NULL, 0);
1651 1652
1652 1653 if (error) {
1653 1654 lofi_remove(vfsp);
1654 1655
1655 1656 if (remount) {
1656 1657 /* put back pre-remount options */
1657 1658 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts);
1658 1659 vfs_setmntpoint(vfsp, refstr_value(oldmntpt),
1659 1660 VFSSP_VERBATIM);
1660 1661 if (oldmntpt)
1661 1662 refstr_rele(oldmntpt);
1662 1663 vfs_setresource(vfsp, refstr_value(oldresource),
1663 1664 VFSSP_VERBATIM);
1664 1665 if (oldresource)
1665 1666 refstr_rele(oldresource);
1666 1667 vfsp->vfs_flag = ovflags;
1667 1668 vfs_unlock(vfsp);
1668 1669 VFS_RELE(vfsp);
1669 1670 } else {
1670 1671 vfs_unlock(vfsp);
1671 1672 vfs_freemnttab(vfsp);
1672 1673 vfs_free(vfsp);
1673 1674 }
1674 1675 } else {
1675 1676 /*
1676 1677 * Set the mount time to now
1677 1678 */
1678 1679 vfsp->vfs_mtime = ddi_get_time();
1679 1680 if (remount) {
1680 1681 vfsp->vfs_flag &= ~VFS_REMOUNT;
1681 1682 if (oldresource)
1682 1683 refstr_rele(oldresource);
1683 1684 if (oldmntpt)
1684 1685 refstr_rele(oldmntpt);
1685 1686 } else if (splice) {
1686 1687 /*
1687 1688 * Link vfsp into the name space at the mount
1688 1689 * point. Vfs_add() is responsible for
1689 1690 * holding the mount point which will be
1690 1691 * released when vfs_remove() is called.
1691 1692 */
1692 1693 vfs_add(vp, vfsp, uap->flags);
1693 1694 } else {
1694 1695 /*
1695 1696 * Hold the reference to file system which is
1696 1697 * not linked into the name space.
1697 1698 */
1698 1699 vfsp->vfs_zone = NULL;
1699 1700 VFS_HOLD(vfsp);
1700 1701 vfsp->vfs_vnodecovered = NULL;
1701 1702 }
1702 1703 /*
1703 1704 * Set flags for global options encountered
1704 1705 */
1705 1706 if (vfs_optionisset(vfsp, MNTOPT_RO, NULL))
1706 1707 vfsp->vfs_flag |= VFS_RDONLY;
1707 1708 else
1708 1709 vfsp->vfs_flag &= ~VFS_RDONLY;
1709 1710 if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
1710 1711 vfsp->vfs_flag |= (VFS_NOSETUID|VFS_NODEVICES);
1711 1712 } else {
1712 1713 if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
1713 1714 vfsp->vfs_flag |= VFS_NODEVICES;
1714 1715 else
1715 1716 vfsp->vfs_flag &= ~VFS_NODEVICES;
1716 1717 if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
1717 1718 vfsp->vfs_flag |= VFS_NOSETUID;
1718 1719 else
1719 1720 vfsp->vfs_flag &= ~VFS_NOSETUID;
1720 1721 }
1721 1722 if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL))
1722 1723 vfsp->vfs_flag |= VFS_NBMAND;
1723 1724 else
1724 1725 vfsp->vfs_flag &= ~VFS_NBMAND;
1725 1726
1726 1727 if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL))
1727 1728 vfsp->vfs_flag |= VFS_XATTR;
1728 1729 else
1729 1730 vfsp->vfs_flag &= ~VFS_XATTR;
1730 1731
1731 1732 if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL))
1732 1733 vfsp->vfs_flag |= VFS_NOEXEC;
1733 1734 else
1734 1735 vfsp->vfs_flag &= ~VFS_NOEXEC;
1735 1736
1736 1737 /*
1737 1738 * Now construct the output option string of options
1738 1739 * we recognized.
1739 1740 */
1740 1741 if (uap->flags & MS_OPTIONSTR) {
1741 1742 vfs_list_read_lock();
1742 1743 copyout_error = vfs_buildoptionstr(
1743 1744 &vfsp->vfs_mntopts, inargs, optlen);
1744 1745 vfs_list_unlock();
1745 1746 if (copyout_error == 0 &&
1746 1747 (uap->flags & MS_SYSSPACE) == 0) {
1747 1748 copyout_error = copyoutstr(inargs, opts,
1748 1749 optlen, NULL);
1749 1750 }
1750 1751 }
1751 1752
1752 1753 /*
1753 1754 * If this isn't a remount, set up the vopstats before
1754 1755 * anyone can touch this. We only allow spliced file
1755 1756 * systems (file systems which are in the namespace) to
1756 1757 * have the VFS_STATS flag set.
1757 1758 * NOTE: PxFS mounts the underlying file system with
1758 1759 * MS_NOSPLICE set and copies those vfs_flags to its private
1759 1760 * vfs structure. As a result, PxFS should never have
1760 1761 * the VFS_STATS flag or else we might access the vfs
1761 1762 * statistics-related fields prior to them being
1762 1763 * properly initialized.
1763 1764 */
1764 1765 if (!remount && (vswp->vsw_flag & VSW_STATS) && splice) {
1765 1766 initialize_vopstats(&vfsp->vfs_vopstats);
1766 1767 /*
1767 1768 * We need to set vfs_vskap to NULL because there's
1768 1769 * a chance it won't be set below. This is checked
1769 1770 * in teardown_vopstats() so we can't have garbage.
1770 1771 */
1771 1772 vfsp->vfs_vskap = NULL;
1772 1773 vfsp->vfs_flag |= VFS_STATS;
1773 1774 vfsp->vfs_fstypevsp = get_fstype_vopstats(vfsp, vswp);
1774 1775 }
1775 1776
1776 1777 if (vswp->vsw_flag & VSW_XID)
1777 1778 vfsp->vfs_flag |= VFS_XID;
1778 1779
1779 1780 vfs_unlock(vfsp);
1780 1781 }
1781 1782 mount_completed(zone);
1782 1783 zone_rele(zone);
1783 1784 if (splice)
1784 1785 vn_vfsunlock(vp);
1785 1786
1786 1787 if ((error == 0) && (copyout_error == 0)) {
1787 1788 if (!remount) {
1788 1789 /*
1789 1790 * Don't call get_vskstat_anchor() while holding
1790 1791 * locks since it allocates memory and calls
1791 1792 * VFS_STATVFS(). For NFS, the latter can generate
1792 1793 * an over-the-wire call.
1793 1794 */
1794 1795 vskap = get_vskstat_anchor(vfsp);
1795 1796 /* Only take the lock if we have something to do */
1796 1797 if (vskap != NULL) {
1797 1798 vfs_lock_wait(vfsp);
1798 1799 if (vfsp->vfs_flag & VFS_STATS) {
1799 1800 vfsp->vfs_vskap = vskap;
1800 1801 }
1801 1802 vfs_unlock(vfsp);
1802 1803 }
1803 1804 }
1804 1805 /* Return vfsp to caller. */
1805 1806 *vfspp = vfsp;
1806 1807 }
1807 1808 errout:
1808 1809 vfs_freeopttbl(&mnt_mntopts);
1809 1810 if (resource != NULL)
1810 1811 kmem_free(resource, strlen(resource) + 1);
1811 1812 if (mountpt != NULL)
1812 1813 kmem_free(mountpt, strlen(mountpt) + 1);
1813 1814 /*
1814 1815 * It is possible we errored prior to adding to mount in progress
1815 1816 * table. Must free vnode we acquired with successful lookupname.
1816 1817 */
1817 1818 if (addmip)
1818 1819 VN_RELE(bvp);
1819 1820 if (delmip)
1820 1821 vfs_delmip(vfsp);
1821 1822 ASSERT(vswp != NULL);
1822 1823 vfs_unrefvfssw(vswp);
1823 1824 if (inargs != opts)
1824 1825 kmem_free(inargs, MAX_MNTOPT_STR);
1825 1826 if (copyout_error) {
1826 1827 lofi_remove(vfsp);
1827 1828 VFS_RELE(vfsp);
1828 1829 error = copyout_error;
1829 1830 }
1830 1831 return (error);
1831 1832 }
1832 1833
1833 1834 static void
1834 1835 vfs_setpath(
1835 1836 struct vfs *vfsp, /* vfs being updated */
1836 1837 refstr_t **refp, /* Ref-count string to contain the new path */
1837 1838 const char *newpath, /* Path to add to refp (above) */
1838 1839 uint32_t flag) /* flag */
1839 1840 {
1840 1841 size_t len;
1841 1842 refstr_t *ref;
1842 1843 zone_t *zone = curproc->p_zone;
1843 1844 char *sp;
1844 1845 int have_list_lock = 0;
1845 1846
1846 1847 ASSERT(!VFS_ON_LIST(vfsp) || vfs_lock_held(vfsp));
1847 1848
1848 1849 /*
1849 1850 * New path must be less than MAXPATHLEN because mntfs
1850 1851 * will only display up to MAXPATHLEN bytes. This is currently
1851 1852 * safe, because domount() uses pn_get(), and other callers
1852 1853 * similarly cap the size to fewer than MAXPATHLEN bytes.
1853 1854 */
1854 1855
1855 1856 ASSERT(strlen(newpath) < MAXPATHLEN);
1856 1857
1857 1858 /* mntfs requires consistency while vfs list lock is held */
1858 1859
1859 1860 if (VFS_ON_LIST(vfsp)) {
1860 1861 have_list_lock = 1;
1861 1862 vfs_list_lock();
1862 1863 }
1863 1864
1864 1865 if (*refp != NULL)
1865 1866 refstr_rele(*refp);
1866 1867
1867 1868 /*
1868 1869 * If we are in a non-global zone then we prefix the supplied path,
1869 1870 * newpath, with the zone's root path, with two exceptions. The first
1870 1871 * is where we have been explicitly directed to avoid doing so; this
1871 1872 * will be the case following a failed remount, where the path supplied
1872 1873 * will be a saved version which must now be restored. The second
1873 1874 * exception is where newpath is not a pathname but a descriptive name,
1874 1875 * e.g. "procfs".
1875 1876 */
1876 1877 if (zone == global_zone || (flag & VFSSP_VERBATIM) || *newpath != '/') {
1877 1878 ref = refstr_alloc(newpath);
1878 1879 goto out;
1879 1880 }
1880 1881
1881 1882 /*
1882 1883 * Truncate the trailing '/' in the zoneroot, and merge
1883 1884 * in the zone's rootpath with the "newpath" (resource
1884 1885 * or mountpoint) passed in.
1885 1886 *
1886 1887 * The size of the required buffer is thus the size of
1887 1888 * the buffer required for the passed-in newpath
1888 1889 * (strlen(newpath) + 1), plus the size of the buffer
1889 1890 * required to hold zone_rootpath (zone_rootpathlen)
1890 1891 * minus one for one of the now-superfluous NUL
1891 1892 * terminations, minus one for the trailing '/'.
1892 1893 *
1893 1894 * That gives us:
1894 1895 *
1895 1896 * (strlen(newpath) + 1) + zone_rootpathlen - 1 - 1
1896 1897 *
1897 1898 * Which is what we have below.
1898 1899 */
1899 1900
1900 1901 len = strlen(newpath) + zone->zone_rootpathlen - 1;
1901 1902 sp = kmem_alloc(len, KM_SLEEP);
1902 1903
1903 1904 /*
1904 1905 * Copy everything including the trailing slash, which
1905 1906 * we then overwrite with the NUL character.
1906 1907 */
1907 1908
1908 1909 (void) strcpy(sp, zone->zone_rootpath);
1909 1910 sp[zone->zone_rootpathlen - 2] = '\0';
1910 1911 (void) strcat(sp, newpath);
1911 1912
1912 1913 ref = refstr_alloc(sp);
1913 1914 kmem_free(sp, len);
1914 1915 out:
1915 1916 *refp = ref;
1916 1917
1917 1918 if (have_list_lock) {
1918 1919 vfs_mnttab_modtimeupd();
1919 1920 vfs_list_unlock();
1920 1921 }
1921 1922 }
1922 1923
1923 1924 /*
1924 1925 * Record a mounted resource name in a vfs structure.
1925 1926 * If vfsp is already mounted, caller must hold the vfs lock.
1926 1927 */
1927 1928 void
1928 1929 vfs_setresource(struct vfs *vfsp, const char *resource, uint32_t flag)
1929 1930 {
1930 1931 if (resource == NULL || resource[0] == '\0')
1931 1932 resource = VFS_NORESOURCE;
1932 1933 vfs_setpath(vfsp, &vfsp->vfs_resource, resource, flag);
1933 1934 }
1934 1935
1935 1936 /*
1936 1937 * Record a mount point name in a vfs structure.
1937 1938 * If vfsp is already mounted, caller must hold the vfs lock.
1938 1939 */
1939 1940 void
1940 1941 vfs_setmntpoint(struct vfs *vfsp, const char *mntpt, uint32_t flag)
1941 1942 {
1942 1943 if (mntpt == NULL || mntpt[0] == '\0')
1943 1944 mntpt = VFS_NOMNTPT;
1944 1945 vfs_setpath(vfsp, &vfsp->vfs_mntpt, mntpt, flag);
1945 1946 }
1946 1947
1947 1948 /* Returns the vfs_resource. Caller must call refstr_rele() when finished. */
1948 1949
1949 1950 refstr_t *
1950 1951 vfs_getresource(const struct vfs *vfsp)
1951 1952 {
1952 1953 refstr_t *resource;
1953 1954
1954 1955 vfs_list_read_lock();
1955 1956 resource = vfsp->vfs_resource;
1956 1957 refstr_hold(resource);
1957 1958 vfs_list_unlock();
1958 1959
1959 1960 return (resource);
1960 1961 }
1961 1962
1962 1963 /* Returns the vfs_mntpt. Caller must call refstr_rele() when finished. */
1963 1964
1964 1965 refstr_t *
1965 1966 vfs_getmntpoint(const struct vfs *vfsp)
1966 1967 {
1967 1968 refstr_t *mntpt;
1968 1969
1969 1970 vfs_list_read_lock();
1970 1971 mntpt = vfsp->vfs_mntpt;
1971 1972 refstr_hold(mntpt);
1972 1973 vfs_list_unlock();
1973 1974
1974 1975 return (mntpt);
1975 1976 }
1976 1977
1977 1978 /*
1978 1979 * Create an empty options table with enough empty slots to hold all
1979 1980 * The options in the options string passed as an argument.
1980 1981 * Potentially prepend another options table.
1981 1982 *
1982 1983 * Note: caller is responsible for locking the vfs list, if needed,
1983 1984 * to protect mops.
1984 1985 */
1985 1986 static void
1986 1987 vfs_createopttbl_extend(mntopts_t *mops, const char *opts,
1987 1988 const mntopts_t *mtmpl)
1988 1989 {
1989 1990 const char *s = opts;
1990 1991 uint_t count;
1991 1992
1992 1993 if (opts == NULL || *opts == '\0') {
1993 1994 count = 0;
1994 1995 } else {
1995 1996 count = 1;
1996 1997
1997 1998 /*
1998 1999 * Count number of options in the string
1999 2000 */
2000 2001 for (s = strchr(s, ','); s != NULL; s = strchr(s, ',')) {
2001 2002 count++;
2002 2003 s++;
2003 2004 }
2004 2005 }
2005 2006 vfs_copyopttbl_extend(mtmpl, mops, count);
2006 2007 }
2007 2008
2008 2009 /*
2009 2010 * Create an empty options table with enough empty slots to hold all
2010 2011 * The options in the options string passed as an argument.
2011 2012 *
2012 2013 * This function is *not* for general use by filesystems.
2013 2014 *
2014 2015 * Note: caller is responsible for locking the vfs list, if needed,
2015 2016 * to protect mops.
2016 2017 */
2017 2018 void
2018 2019 vfs_createopttbl(mntopts_t *mops, const char *opts)
2019 2020 {
2020 2021 vfs_createopttbl_extend(mops, opts, NULL);
2021 2022 }
2022 2023
2023 2024
2024 2025 /*
2025 2026 * Swap two mount options tables
2026 2027 */
2027 2028 static void
2028 2029 vfs_swapopttbl_nolock(mntopts_t *optbl1, mntopts_t *optbl2)
2029 2030 {
2030 2031 uint_t tmpcnt;
2031 2032 mntopt_t *tmplist;
2032 2033
2033 2034 tmpcnt = optbl2->mo_count;
2034 2035 tmplist = optbl2->mo_list;
2035 2036 optbl2->mo_count = optbl1->mo_count;
2036 2037 optbl2->mo_list = optbl1->mo_list;
2037 2038 optbl1->mo_count = tmpcnt;
2038 2039 optbl1->mo_list = tmplist;
2039 2040 }
2040 2041
2041 2042 static void
2042 2043 vfs_swapopttbl(mntopts_t *optbl1, mntopts_t *optbl2)
2043 2044 {
2044 2045 vfs_list_lock();
2045 2046 vfs_swapopttbl_nolock(optbl1, optbl2);
2046 2047 vfs_mnttab_modtimeupd();
2047 2048 vfs_list_unlock();
2048 2049 }
2049 2050
2050 2051 static char **
2051 2052 vfs_copycancelopt_extend(char **const moc, int extend)
2052 2053 {
2053 2054 int i = 0;
2054 2055 int j;
2055 2056 char **result;
2056 2057
2057 2058 if (moc != NULL) {
2058 2059 for (; moc[i] != NULL; i++)
2059 2060 /* count number of options to cancel */;
2060 2061 }
2061 2062
2062 2063 if (i + extend == 0)
2063 2064 return (NULL);
2064 2065
2065 2066 result = kmem_alloc((i + extend + 1) * sizeof (char *), KM_SLEEP);
2066 2067
2067 2068 for (j = 0; j < i; j++) {
2068 2069 result[j] = kmem_alloc(strlen(moc[j]) + 1, KM_SLEEP);
2069 2070 (void) strcpy(result[j], moc[j]);
2070 2071 }
2071 2072 for (; j <= i + extend; j++)
2072 2073 result[j] = NULL;
2073 2074
2074 2075 return (result);
2075 2076 }
2076 2077
2077 2078 static void
2078 2079 vfs_copyopt(const mntopt_t *s, mntopt_t *d)
2079 2080 {
2080 2081 char *sp, *dp;
2081 2082
2082 2083 d->mo_flags = s->mo_flags;
2083 2084 d->mo_data = s->mo_data;
2084 2085 sp = s->mo_name;
2085 2086 if (sp != NULL) {
2086 2087 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP);
2087 2088 (void) strcpy(dp, sp);
2088 2089 d->mo_name = dp;
2089 2090 } else {
2090 2091 d->mo_name = NULL; /* should never happen */
2091 2092 }
2092 2093
2093 2094 d->mo_cancel = vfs_copycancelopt_extend(s->mo_cancel, 0);
2094 2095
2095 2096 sp = s->mo_arg;
2096 2097 if (sp != NULL) {
2097 2098 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP);
2098 2099 (void) strcpy(dp, sp);
2099 2100 d->mo_arg = dp;
2100 2101 } else {
2101 2102 d->mo_arg = NULL;
2102 2103 }
2103 2104 }
2104 2105
2105 2106 /*
2106 2107 * Copy a mount options table, possibly allocating some spare
2107 2108 * slots at the end. It is permissible to copy_extend the NULL table.
2108 2109 */
2109 2110 static void
2110 2111 vfs_copyopttbl_extend(const mntopts_t *smo, mntopts_t *dmo, int extra)
2111 2112 {
2112 2113 uint_t i, count;
2113 2114 mntopt_t *motbl;
2114 2115
2115 2116 /*
2116 2117 * Clear out any existing stuff in the options table being initialized
2117 2118 */
2118 2119 vfs_freeopttbl(dmo);
2119 2120 count = (smo == NULL) ? 0 : smo->mo_count;
2120 2121 if ((count + extra) == 0) /* nothing to do */
2121 2122 return;
2122 2123 dmo->mo_count = count + extra;
2123 2124 motbl = kmem_zalloc((count + extra) * sizeof (mntopt_t), KM_SLEEP);
2124 2125 dmo->mo_list = motbl;
2125 2126 for (i = 0; i < count; i++) {
2126 2127 vfs_copyopt(&smo->mo_list[i], &motbl[i]);
2127 2128 }
2128 2129 for (i = count; i < count + extra; i++) {
2129 2130 motbl[i].mo_flags = MO_EMPTY;
2130 2131 }
2131 2132 }
2132 2133
2133 2134 /*
2134 2135 * Copy a mount options table.
2135 2136 *
2136 2137 * This function is *not* for general use by filesystems.
2137 2138 *
2138 2139 * Note: caller is responsible for locking the vfs list, if needed,
2139 2140 * to protect smo and dmo.
2140 2141 */
2141 2142 void
2142 2143 vfs_copyopttbl(const mntopts_t *smo, mntopts_t *dmo)
2143 2144 {
2144 2145 vfs_copyopttbl_extend(smo, dmo, 0);
2145 2146 }
2146 2147
2147 2148 static char **
2148 2149 vfs_mergecancelopts(const mntopt_t *mop1, const mntopt_t *mop2)
2149 2150 {
2150 2151 int c1 = 0;
2151 2152 int c2 = 0;
2152 2153 char **result;
2153 2154 char **sp1, **sp2, **dp;
2154 2155
2155 2156 /*
2156 2157 * First we count both lists of cancel options.
2157 2158 * If either is NULL or has no elements, we return a copy of
2158 2159 * the other.
2159 2160 */
2160 2161 if (mop1->mo_cancel != NULL) {
2161 2162 for (; mop1->mo_cancel[c1] != NULL; c1++)
2162 2163 /* count cancel options in mop1 */;
2163 2164 }
2164 2165
2165 2166 if (c1 == 0)
2166 2167 return (vfs_copycancelopt_extend(mop2->mo_cancel, 0));
2167 2168
2168 2169 if (mop2->mo_cancel != NULL) {
2169 2170 for (; mop2->mo_cancel[c2] != NULL; c2++)
2170 2171 /* count cancel options in mop2 */;
2171 2172 }
2172 2173
2173 2174 result = vfs_copycancelopt_extend(mop1->mo_cancel, c2);
2174 2175
2175 2176 if (c2 == 0)
2176 2177 return (result);
2177 2178
2178 2179 /*
2179 2180 * When we get here, we've got two sets of cancel options;
2180 2181 * we need to merge the two sets. We know that the result
2181 2182 * array has "c1+c2+1" entries and in the end we might shrink
2182 2183 * it.
2183 2184 * Result now has a copy of the c1 entries from mop1; we'll
2184 2185 * now lookup all the entries of mop2 in mop1 and copy it if
2185 2186 * it is unique.
2186 2187 * This operation is O(n^2) but it's only called once per
2187 2188 * filesystem per duplicate option. This is a situation
2188 2189 * which doesn't arise with the filesystems in ON and
2189 2190 * n is generally 1.
2190 2191 */
2191 2192
2192 2193 dp = &result[c1];
2193 2194 for (sp2 = mop2->mo_cancel; *sp2 != NULL; sp2++) {
2194 2195 for (sp1 = mop1->mo_cancel; *sp1 != NULL; sp1++) {
2195 2196 if (strcmp(*sp1, *sp2) == 0)
2196 2197 break;
2197 2198 }
2198 2199 if (*sp1 == NULL) {
2199 2200 /*
2200 2201 * Option *sp2 not found in mop1, so copy it.
2201 2202 * The calls to vfs_copycancelopt_extend()
2202 2203 * guarantee that there's enough room.
2203 2204 */
2204 2205 *dp = kmem_alloc(strlen(*sp2) + 1, KM_SLEEP);
2205 2206 (void) strcpy(*dp++, *sp2);
2206 2207 }
2207 2208 }
2208 2209 if (dp != &result[c1+c2]) {
2209 2210 size_t bytes = (dp - result + 1) * sizeof (char *);
2210 2211 char **nres = kmem_alloc(bytes, KM_SLEEP);
2211 2212
2212 2213 bcopy(result, nres, bytes);
2213 2214 kmem_free(result, (c1 + c2 + 1) * sizeof (char *));
2214 2215 result = nres;
2215 2216 }
2216 2217 return (result);
2217 2218 }
2218 2219
2219 2220 /*
2220 2221 * Merge two mount option tables (outer and inner) into one. This is very
2221 2222 * similar to "merging" global variables and automatic variables in C.
2222 2223 *
2223 2224 * This isn't (and doesn't have to be) fast.
2224 2225 *
2225 2226 * This function is *not* for general use by filesystems.
2226 2227 *
2227 2228 * Note: caller is responsible for locking the vfs list, if needed,
2228 2229 * to protect omo, imo & dmo.
2229 2230 */
2230 2231 void
2231 2232 vfs_mergeopttbl(const mntopts_t *omo, const mntopts_t *imo, mntopts_t *dmo)
2232 2233 {
2233 2234 uint_t i, count;
2234 2235 mntopt_t *mop, *motbl;
2235 2236 uint_t freeidx;
2236 2237
2237 2238 /*
2238 2239 * First determine how much space we need to allocate.
2239 2240 */
2240 2241 count = omo->mo_count;
2241 2242 for (i = 0; i < imo->mo_count; i++) {
2242 2243 if (imo->mo_list[i].mo_flags & MO_EMPTY)
2243 2244 continue;
2244 2245 if (vfs_hasopt(omo, imo->mo_list[i].mo_name) == NULL)
2245 2246 count++;
2246 2247 }
2247 2248 ASSERT(count >= omo->mo_count &&
2248 2249 count <= omo->mo_count + imo->mo_count);
2249 2250 motbl = kmem_alloc(count * sizeof (mntopt_t), KM_SLEEP);
2250 2251 for (i = 0; i < omo->mo_count; i++)
2251 2252 vfs_copyopt(&omo->mo_list[i], &motbl[i]);
2252 2253 freeidx = omo->mo_count;
2253 2254 for (i = 0; i < imo->mo_count; i++) {
2254 2255 if (imo->mo_list[i].mo_flags & MO_EMPTY)
2255 2256 continue;
2256 2257 if ((mop = vfs_hasopt(omo, imo->mo_list[i].mo_name)) != NULL) {
2257 2258 char **newcanp;
2258 2259 uint_t index = mop - omo->mo_list;
2259 2260
2260 2261 newcanp = vfs_mergecancelopts(mop, &motbl[index]);
2261 2262
2262 2263 vfs_freeopt(&motbl[index]);
2263 2264 vfs_copyopt(&imo->mo_list[i], &motbl[index]);
2264 2265
2265 2266 vfs_freecancelopt(motbl[index].mo_cancel);
2266 2267 motbl[index].mo_cancel = newcanp;
2267 2268 } else {
2268 2269 /*
2269 2270 * If it's a new option, just copy it over to the first
2270 2271 * free location.
2271 2272 */
2272 2273 vfs_copyopt(&imo->mo_list[i], &motbl[freeidx++]);
2273 2274 }
2274 2275 }
2275 2276 dmo->mo_count = count;
2276 2277 dmo->mo_list = motbl;
2277 2278 }
2278 2279
2279 2280 /*
2280 2281 * Functions to set and clear mount options in a mount options table.
2281 2282 */
2282 2283
2283 2284 /*
2284 2285 * Clear a mount option, if it exists.
2285 2286 *
2286 2287 * The update_mnttab arg indicates whether mops is part of a vfs that is on
2287 2288 * the vfs list.
2288 2289 */
2289 2290 static void
2290 2291 vfs_clearmntopt_nolock(mntopts_t *mops, const char *opt, int update_mnttab)
2291 2292 {
2292 2293 struct mntopt *mop;
2293 2294 uint_t i, count;
2294 2295
2295 2296 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist));
2296 2297
2297 2298 count = mops->mo_count;
2298 2299 for (i = 0; i < count; i++) {
2299 2300 mop = &mops->mo_list[i];
2300 2301
2301 2302 if (mop->mo_flags & MO_EMPTY)
2302 2303 continue;
2303 2304 if (strcmp(opt, mop->mo_name))
2304 2305 continue;
2305 2306 mop->mo_flags &= ~MO_SET;
2306 2307 if (mop->mo_arg != NULL) {
2307 2308 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2308 2309 }
2309 2310 mop->mo_arg = NULL;
2310 2311 if (update_mnttab)
2311 2312 vfs_mnttab_modtimeupd();
2312 2313 break;
2313 2314 }
2314 2315 }
2315 2316
2316 2317 void
2317 2318 vfs_clearmntopt(struct vfs *vfsp, const char *opt)
2318 2319 {
2319 2320 int gotlock = 0;
2320 2321
2321 2322 if (VFS_ON_LIST(vfsp)) {
2322 2323 gotlock = 1;
2323 2324 vfs_list_lock();
2324 2325 }
2325 2326 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, opt, gotlock);
2326 2327 if (gotlock)
2327 2328 vfs_list_unlock();
2328 2329 }
2329 2330
2330 2331
2331 2332 /*
2332 2333 * Set a mount option on. If it's not found in the table, it's silently
2333 2334 * ignored. If the option has MO_IGNORE set, it is still set unless the
2334 2335 * VFS_NOFORCEOPT bit is set in the flags. Also, VFS_DISPLAY/VFS_NODISPLAY flag
2335 2336 * bits can be used to toggle the MO_NODISPLAY bit for the option.
2336 2337 * If the VFS_CREATEOPT flag bit is set then the first option slot with
2337 2338 * MO_EMPTY set is created as the option passed in.
2338 2339 *
2339 2340 * The update_mnttab arg indicates whether mops is part of a vfs that is on
2340 2341 * the vfs list.
2341 2342 */
2342 2343 static void
2343 2344 vfs_setmntopt_nolock(mntopts_t *mops, const char *opt,
2344 2345 const char *arg, int flags, int update_mnttab)
2345 2346 {
2346 2347 mntopt_t *mop;
2347 2348 uint_t i, count;
2348 2349 char *sp;
2349 2350
2350 2351 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist));
2351 2352
2352 2353 if (flags & VFS_CREATEOPT) {
2353 2354 if (vfs_hasopt(mops, opt) != NULL) {
2354 2355 flags &= ~VFS_CREATEOPT;
2355 2356 }
2356 2357 }
2357 2358 count = mops->mo_count;
2358 2359 for (i = 0; i < count; i++) {
2359 2360 mop = &mops->mo_list[i];
2360 2361
2361 2362 if (mop->mo_flags & MO_EMPTY) {
2362 2363 if ((flags & VFS_CREATEOPT) == 0)
2363 2364 continue;
2364 2365 sp = kmem_alloc(strlen(opt) + 1, KM_SLEEP);
2365 2366 (void) strcpy(sp, opt);
2366 2367 mop->mo_name = sp;
2367 2368 if (arg != NULL)
2368 2369 mop->mo_flags = MO_HASVALUE;
2369 2370 else
2370 2371 mop->mo_flags = 0;
2371 2372 } else if (strcmp(opt, mop->mo_name)) {
2372 2373 continue;
2373 2374 }
2374 2375 if ((mop->mo_flags & MO_IGNORE) && (flags & VFS_NOFORCEOPT))
2375 2376 break;
2376 2377 if (arg != NULL && (mop->mo_flags & MO_HASVALUE) != 0) {
2377 2378 sp = kmem_alloc(strlen(arg) + 1, KM_SLEEP);
2378 2379 (void) strcpy(sp, arg);
2379 2380 } else {
2380 2381 sp = NULL;
2381 2382 }
2382 2383 if (mop->mo_arg != NULL)
2383 2384 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2384 2385 mop->mo_arg = sp;
2385 2386 if (flags & VFS_DISPLAY)
2386 2387 mop->mo_flags &= ~MO_NODISPLAY;
2387 2388 if (flags & VFS_NODISPLAY)
2388 2389 mop->mo_flags |= MO_NODISPLAY;
2389 2390 mop->mo_flags |= MO_SET;
2390 2391 if (mop->mo_cancel != NULL) {
2391 2392 char **cp;
2392 2393
2393 2394 for (cp = mop->mo_cancel; *cp != NULL; cp++)
2394 2395 vfs_clearmntopt_nolock(mops, *cp, 0);
2395 2396 }
2396 2397 if (update_mnttab)
2397 2398 vfs_mnttab_modtimeupd();
2398 2399 break;
2399 2400 }
2400 2401 }
2401 2402
2402 2403 void
2403 2404 vfs_setmntopt(struct vfs *vfsp, const char *opt, const char *arg, int flags)
2404 2405 {
2405 2406 int gotlock = 0;
2406 2407
2407 2408 if (VFS_ON_LIST(vfsp)) {
2408 2409 gotlock = 1;
2409 2410 vfs_list_lock();
2410 2411 }
2411 2412 vfs_setmntopt_nolock(&vfsp->vfs_mntopts, opt, arg, flags, gotlock);
2412 2413 if (gotlock)
2413 2414 vfs_list_unlock();
2414 2415 }
2415 2416
2416 2417
2417 2418 /*
2418 2419 * Add a "tag" option to a mounted file system's options list.
2419 2420 *
2420 2421 * Note: caller is responsible for locking the vfs list, if needed,
2421 2422 * to protect mops.
2422 2423 */
2423 2424 static mntopt_t *
2424 2425 vfs_addtag(mntopts_t *mops, const char *tag)
2425 2426 {
2426 2427 uint_t count;
2427 2428 mntopt_t *mop, *motbl;
2428 2429
2429 2430 count = mops->mo_count + 1;
2430 2431 motbl = kmem_zalloc(count * sizeof (mntopt_t), KM_SLEEP);
2431 2432 if (mops->mo_count) {
2432 2433 size_t len = (count - 1) * sizeof (mntopt_t);
2433 2434
2434 2435 bcopy(mops->mo_list, motbl, len);
2435 2436 kmem_free(mops->mo_list, len);
2436 2437 }
2437 2438 mops->mo_count = count;
2438 2439 mops->mo_list = motbl;
2439 2440 mop = &motbl[count - 1];
2440 2441 mop->mo_flags = MO_TAG;
2441 2442 mop->mo_name = kmem_alloc(strlen(tag) + 1, KM_SLEEP);
2442 2443 (void) strcpy(mop->mo_name, tag);
2443 2444 return (mop);
2444 2445 }
2445 2446
2446 2447 /*
2447 2448 * Allow users to set arbitrary "tags" in a vfs's mount options.
2448 2449 * Broader use within the kernel is discouraged.
2449 2450 */
2450 2451 int
2451 2452 vfs_settag(uint_t major, uint_t minor, const char *mntpt, const char *tag,
2452 2453 cred_t *cr)
2453 2454 {
2454 2455 vfs_t *vfsp;
2455 2456 mntopts_t *mops;
2456 2457 mntopt_t *mop;
2457 2458 int found = 0;
2458 2459 dev_t dev = makedevice(major, minor);
2459 2460 int err = 0;
2460 2461 char *buf = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP);
2461 2462
2462 2463 /*
2463 2464 * Find the desired mounted file system
2464 2465 */
2465 2466 vfs_list_lock();
2466 2467 vfsp = rootvfs;
2467 2468 do {
2468 2469 if (vfsp->vfs_dev == dev &&
2469 2470 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) {
2470 2471 found = 1;
2471 2472 break;
2472 2473 }
2473 2474 vfsp = vfsp->vfs_next;
2474 2475 } while (vfsp != rootvfs);
2475 2476
2476 2477 if (!found) {
2477 2478 err = EINVAL;
2478 2479 goto out;
2479 2480 }
2480 2481 err = secpolicy_fs_config(cr, vfsp);
2481 2482 if (err != 0)
2482 2483 goto out;
2483 2484
2484 2485 mops = &vfsp->vfs_mntopts;
2485 2486 /*
2486 2487 * Add tag if it doesn't already exist
2487 2488 */
2488 2489 if ((mop = vfs_hasopt(mops, tag)) == NULL) {
2489 2490 int len;
2490 2491
2491 2492 (void) vfs_buildoptionstr(mops, buf, MAX_MNTOPT_STR);
2492 2493 len = strlen(buf);
2493 2494 if (len + strlen(tag) + 2 > MAX_MNTOPT_STR) {
2494 2495 err = ENAMETOOLONG;
2495 2496 goto out;
2496 2497 }
2497 2498 mop = vfs_addtag(mops, tag);
2498 2499 }
2499 2500 if ((mop->mo_flags & MO_TAG) == 0) {
2500 2501 err = EINVAL;
2501 2502 goto out;
2502 2503 }
2503 2504 vfs_setmntopt_nolock(mops, tag, NULL, 0, 1);
2504 2505 out:
2505 2506 vfs_list_unlock();
2506 2507 kmem_free(buf, MAX_MNTOPT_STR);
2507 2508 return (err);
2508 2509 }
2509 2510
2510 2511 /*
2511 2512 * Allow users to remove arbitrary "tags" in a vfs's mount options.
2512 2513 * Broader use within the kernel is discouraged.
2513 2514 */
2514 2515 int
2515 2516 vfs_clrtag(uint_t major, uint_t minor, const char *mntpt, const char *tag,
2516 2517 cred_t *cr)
2517 2518 {
2518 2519 vfs_t *vfsp;
2519 2520 mntopt_t *mop;
2520 2521 int found = 0;
2521 2522 dev_t dev = makedevice(major, minor);
2522 2523 int err = 0;
2523 2524
2524 2525 /*
2525 2526 * Find the desired mounted file system
2526 2527 */
2527 2528 vfs_list_lock();
2528 2529 vfsp = rootvfs;
2529 2530 do {
2530 2531 if (vfsp->vfs_dev == dev &&
2531 2532 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) {
2532 2533 found = 1;
2533 2534 break;
2534 2535 }
2535 2536 vfsp = vfsp->vfs_next;
2536 2537 } while (vfsp != rootvfs);
2537 2538
2538 2539 if (!found) {
2539 2540 err = EINVAL;
2540 2541 goto out;
2541 2542 }
2542 2543 err = secpolicy_fs_config(cr, vfsp);
2543 2544 if (err != 0)
2544 2545 goto out;
2545 2546
2546 2547 if ((mop = vfs_hasopt(&vfsp->vfs_mntopts, tag)) == NULL) {
2547 2548 err = EINVAL;
2548 2549 goto out;
2549 2550 }
2550 2551 if ((mop->mo_flags & MO_TAG) == 0) {
2551 2552 err = EINVAL;
2552 2553 goto out;
2553 2554 }
2554 2555 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, tag, 1);
2555 2556 out:
2556 2557 vfs_list_unlock();
2557 2558 return (err);
2558 2559 }
2559 2560
2560 2561 /*
2561 2562 * Function to parse an option string and fill in a mount options table.
2562 2563 * Unknown options are silently ignored. The input option string is modified
2563 2564 * by replacing separators with nulls. If the create flag is set, options
2564 2565 * not found in the table are just added on the fly. The table must have
2565 2566 * an option slot marked MO_EMPTY to add an option on the fly.
2566 2567 *
2567 2568 * This function is *not* for general use by filesystems.
2568 2569 *
2569 2570 * Note: caller is responsible for locking the vfs list, if needed,
2570 2571 * to protect mops..
2571 2572 */
2572 2573 void
2573 2574 vfs_parsemntopts(mntopts_t *mops, char *osp, int create)
2574 2575 {
2575 2576 char *s = osp, *p, *nextop, *valp, *cp, *ep;
2576 2577 int setflg = VFS_NOFORCEOPT;
2577 2578
2578 2579 if (osp == NULL)
2579 2580 return;
2580 2581 while (*s != '\0') {
2581 2582 p = strchr(s, ','); /* find next option */
2582 2583 if (p == NULL) {
2583 2584 cp = NULL;
2584 2585 p = s + strlen(s);
2585 2586 } else {
2586 2587 cp = p; /* save location of comma */
2587 2588 *p++ = '\0'; /* mark end and point to next option */
2588 2589 }
2589 2590 nextop = p;
2590 2591 p = strchr(s, '='); /* look for value */
2591 2592 if (p == NULL) {
2592 2593 valp = NULL; /* no value supplied */
2593 2594 } else {
2594 2595 ep = p; /* save location of equals */
2595 2596 *p++ = '\0'; /* end option and point to value */
2596 2597 valp = p;
2597 2598 }
2598 2599 /*
2599 2600 * set option into options table
2600 2601 */
2601 2602 if (create)
2602 2603 setflg |= VFS_CREATEOPT;
2603 2604 vfs_setmntopt_nolock(mops, s, valp, setflg, 0);
2604 2605 if (cp != NULL)
2605 2606 *cp = ','; /* restore the comma */
2606 2607 if (valp != NULL)
2607 2608 *ep = '='; /* restore the equals */
2608 2609 s = nextop;
2609 2610 }
2610 2611 }
2611 2612
2612 2613 /*
2613 2614 * Function to inquire if an option exists in a mount options table.
2614 2615 * Returns a pointer to the option if it exists, else NULL.
2615 2616 *
2616 2617 * This function is *not* for general use by filesystems.
2617 2618 *
2618 2619 * Note: caller is responsible for locking the vfs list, if needed,
2619 2620 * to protect mops.
2620 2621 */
2621 2622 struct mntopt *
2622 2623 vfs_hasopt(const mntopts_t *mops, const char *opt)
2623 2624 {
2624 2625 struct mntopt *mop;
2625 2626 uint_t i, count;
2626 2627
2627 2628 count = mops->mo_count;
2628 2629 for (i = 0; i < count; i++) {
2629 2630 mop = &mops->mo_list[i];
2630 2631
2631 2632 if (mop->mo_flags & MO_EMPTY)
2632 2633 continue;
2633 2634 if (strcmp(opt, mop->mo_name) == 0)
2634 2635 return (mop);
2635 2636 }
2636 2637 return (NULL);
2637 2638 }
2638 2639
2639 2640 /*
2640 2641 * Function to inquire if an option is set in a mount options table.
2641 2642 * Returns non-zero if set and fills in the arg pointer with a pointer to
2642 2643 * the argument string or NULL if there is no argument string.
2643 2644 */
2644 2645 static int
2645 2646 vfs_optionisset_nolock(const mntopts_t *mops, const char *opt, char **argp)
2646 2647 {
2647 2648 struct mntopt *mop;
2648 2649 uint_t i, count;
2649 2650
2650 2651 count = mops->mo_count;
2651 2652 for (i = 0; i < count; i++) {
2652 2653 mop = &mops->mo_list[i];
2653 2654
2654 2655 if (mop->mo_flags & MO_EMPTY)
2655 2656 continue;
2656 2657 if (strcmp(opt, mop->mo_name))
2657 2658 continue;
2658 2659 if ((mop->mo_flags & MO_SET) == 0)
2659 2660 return (0);
2660 2661 if (argp != NULL && (mop->mo_flags & MO_HASVALUE) != 0)
2661 2662 *argp = mop->mo_arg;
2662 2663 return (1);
2663 2664 }
2664 2665 return (0);
2665 2666 }
2666 2667
2667 2668
2668 2669 int
2669 2670 vfs_optionisset(const struct vfs *vfsp, const char *opt, char **argp)
2670 2671 {
2671 2672 int ret;
2672 2673
2673 2674 vfs_list_read_lock();
2674 2675 ret = vfs_optionisset_nolock(&vfsp->vfs_mntopts, opt, argp);
2675 2676 vfs_list_unlock();
2676 2677 return (ret);
2677 2678 }
2678 2679
2679 2680
2680 2681 /*
2681 2682 * Construct a comma separated string of the options set in the given
2682 2683 * mount table, return the string in the given buffer. Return non-zero if
2683 2684 * the buffer would overflow.
2684 2685 *
2685 2686 * This function is *not* for general use by filesystems.
2686 2687 *
2687 2688 * Note: caller is responsible for locking the vfs list, if needed,
2688 2689 * to protect mp.
2689 2690 */
2690 2691 int
2691 2692 vfs_buildoptionstr(const mntopts_t *mp, char *buf, int len)
2692 2693 {
2693 2694 char *cp;
2694 2695 uint_t i;
2695 2696
2696 2697 buf[0] = '\0';
2697 2698 cp = buf;
2698 2699 for (i = 0; i < mp->mo_count; i++) {
2699 2700 struct mntopt *mop;
2700 2701
2701 2702 mop = &mp->mo_list[i];
2702 2703 if (mop->mo_flags & MO_SET) {
2703 2704 int optlen, comma = 0;
2704 2705
2705 2706 if (buf[0] != '\0')
2706 2707 comma = 1;
2707 2708 optlen = strlen(mop->mo_name);
2708 2709 if (strlen(buf) + comma + optlen + 1 > len)
2709 2710 goto err;
2710 2711 if (comma)
2711 2712 *cp++ = ',';
2712 2713 (void) strcpy(cp, mop->mo_name);
2713 2714 cp += optlen;
2714 2715 /*
2715 2716 * Append option value if there is one
2716 2717 */
2717 2718 if (mop->mo_arg != NULL) {
2718 2719 int arglen;
2719 2720
2720 2721 arglen = strlen(mop->mo_arg);
2721 2722 if (strlen(buf) + arglen + 2 > len)
2722 2723 goto err;
2723 2724 *cp++ = '=';
2724 2725 (void) strcpy(cp, mop->mo_arg);
2725 2726 cp += arglen;
2726 2727 }
2727 2728 }
2728 2729 }
2729 2730 return (0);
2730 2731 err:
2731 2732 return (EOVERFLOW);
2732 2733 }
2733 2734
2734 2735 static void
2735 2736 vfs_freecancelopt(char **moc)
2736 2737 {
2737 2738 if (moc != NULL) {
2738 2739 int ccnt = 0;
2739 2740 char **cp;
2740 2741
2741 2742 for (cp = moc; *cp != NULL; cp++) {
2742 2743 kmem_free(*cp, strlen(*cp) + 1);
2743 2744 ccnt++;
2744 2745 }
2745 2746 kmem_free(moc, (ccnt + 1) * sizeof (char *));
2746 2747 }
2747 2748 }
2748 2749
2749 2750 static void
2750 2751 vfs_freeopt(mntopt_t *mop)
2751 2752 {
2752 2753 if (mop->mo_name != NULL)
2753 2754 kmem_free(mop->mo_name, strlen(mop->mo_name) + 1);
2754 2755
2755 2756 vfs_freecancelopt(mop->mo_cancel);
2756 2757
2757 2758 if (mop->mo_arg != NULL)
2758 2759 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2759 2760 }
2760 2761
2761 2762 /*
2762 2763 * Free a mount options table
2763 2764 *
2764 2765 * This function is *not* for general use by filesystems.
2765 2766 *
2766 2767 * Note: caller is responsible for locking the vfs list, if needed,
2767 2768 * to protect mp.
2768 2769 */
2769 2770 void
2770 2771 vfs_freeopttbl(mntopts_t *mp)
2771 2772 {
2772 2773 uint_t i, count;
2773 2774
2774 2775 count = mp->mo_count;
2775 2776 for (i = 0; i < count; i++) {
2776 2777 vfs_freeopt(&mp->mo_list[i]);
2777 2778 }
2778 2779 if (count) {
2779 2780 kmem_free(mp->mo_list, sizeof (mntopt_t) * count);
2780 2781 mp->mo_count = 0;
2781 2782 mp->mo_list = NULL;
2782 2783 }
2783 2784 }
2784 2785
2785 2786
2786 2787 /* ARGSUSED */
2787 2788 static int
2788 2789 vfs_mntdummyread(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred,
2789 2790 caller_context_t *ct)
2790 2791 {
2791 2792 return (0);
2792 2793 }
2793 2794
2794 2795 /* ARGSUSED */
2795 2796 static int
2796 2797 vfs_mntdummywrite(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred,
2797 2798 caller_context_t *ct)
2798 2799 {
2799 2800 return (0);
2800 2801 }
2801 2802
2802 2803 /*
2803 2804 * The dummy vnode is currently used only by file events notification
2804 2805 * module which is just interested in the timestamps.
2805 2806 */
2806 2807 /* ARGSUSED */
2807 2808 static int
2808 2809 vfs_mntdummygetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2809 2810 caller_context_t *ct)
2810 2811 {
2811 2812 bzero(vap, sizeof (vattr_t));
2812 2813 vap->va_type = VREG;
2813 2814 vap->va_nlink = 1;
2814 2815 vap->va_ctime = vfs_mnttab_ctime;
2815 2816 /*
2816 2817 * it is ok to just copy mtime as the time will be monotonically
2817 2818 * increasing.
2818 2819 */
2819 2820 vap->va_mtime = vfs_mnttab_mtime;
2820 2821 vap->va_atime = vap->va_mtime;
2821 2822 return (0);
2822 2823 }
2823 2824
2824 2825 static void
2825 2826 vfs_mnttabvp_setup(void)
2826 2827 {
2827 2828 vnode_t *tvp;
2828 2829 vnodeops_t *vfs_mntdummyvnops;
2829 2830 const fs_operation_def_t mnt_dummyvnodeops_template[] = {
2830 2831 VOPNAME_READ, { .vop_read = vfs_mntdummyread },
2831 2832 VOPNAME_WRITE, { .vop_write = vfs_mntdummywrite },
2832 2833 VOPNAME_GETATTR, { .vop_getattr = vfs_mntdummygetattr },
2833 2834 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
2834 2835 NULL, NULL
2835 2836 };
2836 2837
2837 2838 if (vn_make_ops("mnttab", mnt_dummyvnodeops_template,
2838 2839 &vfs_mntdummyvnops) != 0) {
2839 2840 cmn_err(CE_WARN, "vfs_mnttabvp_setup: vn_make_ops failed");
2840 2841 /* Shouldn't happen, but not bad enough to panic */
2841 2842 return;
2842 2843 }
2843 2844
2844 2845 /*
2845 2846 * A global dummy vnode is allocated to represent mntfs files.
2846 2847 * The mntfs file (/etc/mnttab) can be monitored for file events
2847 2848 * and receive an event when mnttab changes. Dummy VOP calls
2848 2849 * will be made on this vnode. The file events notification module
2849 2850 * intercepts this vnode and delivers relevant events.
2850 2851 */
2851 2852 tvp = vn_alloc(KM_SLEEP);
2852 2853 tvp->v_flag = VNOMOUNT|VNOMAP|VNOSWAP|VNOCACHE;
2853 2854 vn_setops(tvp, vfs_mntdummyvnops);
2854 2855 tvp->v_type = VREG;
2855 2856 /*
2856 2857 * The mnt dummy ops do not reference v_data.
2857 2858 * No other module intercepting this vnode should either.
2858 2859 * Just set it to point to itself.
2859 2860 */
2860 2861 tvp->v_data = (caddr_t)tvp;
2861 2862 tvp->v_vfsp = rootvfs;
2862 2863 vfs_mntdummyvp = tvp;
2863 2864 }
2864 2865
2865 2866 /*
2866 2867 * performs fake read/write ops
2867 2868 */
2868 2869 static void
2869 2870 vfs_mnttab_rwop(int rw)
2870 2871 {
2871 2872 struct uio uio;
2872 2873 struct iovec iov;
2873 2874 char buf[1];
2874 2875
2875 2876 if (vfs_mntdummyvp == NULL)
2876 2877 return;
2877 2878
2878 2879 bzero(&uio, sizeof (uio));
2879 2880 bzero(&iov, sizeof (iov));
2880 2881 iov.iov_base = buf;
2881 2882 iov.iov_len = 0;
2882 2883 uio.uio_iov = &iov;
2883 2884 uio.uio_iovcnt = 1;
2884 2885 uio.uio_loffset = 0;
2885 2886 uio.uio_segflg = UIO_SYSSPACE;
2886 2887 uio.uio_resid = 0;
2887 2888 if (rw) {
2888 2889 (void) VOP_WRITE(vfs_mntdummyvp, &uio, 0, kcred, NULL);
2889 2890 } else {
2890 2891 (void) VOP_READ(vfs_mntdummyvp, &uio, 0, kcred, NULL);
2891 2892 }
2892 2893 }
2893 2894
2894 2895 /*
2895 2896 * Generate a write operation.
2896 2897 */
2897 2898 void
2898 2899 vfs_mnttab_writeop(void)
2899 2900 {
2900 2901 vfs_mnttab_rwop(1);
2901 2902 }
2902 2903
2903 2904 /*
2904 2905 * Generate a read operation.
2905 2906 */
2906 2907 void
2907 2908 vfs_mnttab_readop(void)
2908 2909 {
2909 2910 vfs_mnttab_rwop(0);
2910 2911 }
2911 2912
2912 2913 /*
2913 2914 * Free any mnttab information recorded in the vfs struct.
2914 2915 * The vfs must not be on the vfs list.
2915 2916 */
2916 2917 static void
2917 2918 vfs_freemnttab(struct vfs *vfsp)
2918 2919 {
2919 2920 ASSERT(!VFS_ON_LIST(vfsp));
2920 2921
2921 2922 /*
2922 2923 * Free device and mount point information
2923 2924 */
2924 2925 if (vfsp->vfs_mntpt != NULL) {
2925 2926 refstr_rele(vfsp->vfs_mntpt);
2926 2927 vfsp->vfs_mntpt = NULL;
2927 2928 }
2928 2929 if (vfsp->vfs_resource != NULL) {
2929 2930 refstr_rele(vfsp->vfs_resource);
2930 2931 vfsp->vfs_resource = NULL;
2931 2932 }
2932 2933 /*
2933 2934 * Now free mount options information
2934 2935 */
2935 2936 vfs_freeopttbl(&vfsp->vfs_mntopts);
2936 2937 }
2937 2938
2938 2939 /*
2939 2940 * Return the last mnttab modification time
2940 2941 */
2941 2942 void
2942 2943 vfs_mnttab_modtime(timespec_t *ts)
2943 2944 {
2944 2945 ASSERT(RW_LOCK_HELD(&vfslist));
2945 2946 *ts = vfs_mnttab_mtime;
2946 2947 }
2947 2948
2948 2949 /*
2949 2950 * See if mnttab is changed
2950 2951 */
2951 2952 void
2952 2953 vfs_mnttab_poll(timespec_t *old, struct pollhead **phpp)
2953 2954 {
2954 2955 int changed;
2955 2956
2956 2957 *phpp = (struct pollhead *)NULL;
2957 2958
2958 2959 /*
2959 2960 * Note: don't grab vfs list lock before accessing vfs_mnttab_mtime.
2960 2961 * Can lead to deadlock against vfs_mnttab_modtimeupd(). It is safe
2961 2962 * to not grab the vfs list lock because tv_sec is monotonically
2962 2963 * increasing.
2963 2964 */
2964 2965
2965 2966 changed = (old->tv_nsec != vfs_mnttab_mtime.tv_nsec) ||
2966 2967 (old->tv_sec != vfs_mnttab_mtime.tv_sec);
2967 2968 if (!changed) {
2968 2969 *phpp = &vfs_pollhd;
2969 2970 }
2970 2971 }
2971 2972
2972 2973 /* Provide a unique and monotonically-increasing timestamp. */
2973 2974 void
2974 2975 vfs_mono_time(timespec_t *ts)
2975 2976 {
2976 2977 static volatile hrtime_t hrt; /* The saved time. */
2977 2978 hrtime_t newhrt, oldhrt; /* For effecting the CAS. */
2978 2979 timespec_t newts;
2979 2980
2980 2981 /*
2981 2982 * Try gethrestime() first, but be prepared to fabricate a sensible
2982 2983 * answer at the first sign of any trouble.
2983 2984 */
2984 2985 gethrestime(&newts);
2985 2986 newhrt = ts2hrt(&newts);
2986 2987 for (;;) {
2987 2988 oldhrt = hrt;
2988 2989 if (newhrt <= hrt)
2989 2990 newhrt = hrt + 1;
2990 2991 if (atomic_cas_64((uint64_t *)&hrt, oldhrt, newhrt) == oldhrt)
2991 2992 break;
2992 2993 }
2993 2994 hrt2ts(newhrt, ts);
2994 2995 }
2995 2996
2996 2997 /*
2997 2998 * Update the mnttab modification time and wake up any waiters for
2998 2999 * mnttab changes
2999 3000 */
3000 3001 void
3001 3002 vfs_mnttab_modtimeupd()
3002 3003 {
3003 3004 hrtime_t oldhrt, newhrt;
3004 3005
3005 3006 ASSERT(RW_WRITE_HELD(&vfslist));
3006 3007 oldhrt = ts2hrt(&vfs_mnttab_mtime);
3007 3008 gethrestime(&vfs_mnttab_mtime);
3008 3009 newhrt = ts2hrt(&vfs_mnttab_mtime);
3009 3010 if (oldhrt == (hrtime_t)0)
3010 3011 vfs_mnttab_ctime = vfs_mnttab_mtime;
3011 3012 /*
3012 3013 * Attempt to provide unique mtime (like uniqtime but not).
3013 3014 */
3014 3015 if (newhrt == oldhrt) {
3015 3016 newhrt++;
3016 3017 hrt2ts(newhrt, &vfs_mnttab_mtime);
3017 3018 }
3018 3019 pollwakeup(&vfs_pollhd, (short)POLLRDBAND);
3019 3020 vfs_mnttab_writeop();
3020 3021 }
3021 3022
3022 3023 int
3023 3024 dounmount(struct vfs *vfsp, int flag, cred_t *cr)
3024 3025 {
3025 3026 vnode_t *coveredvp;
3026 3027 int error;
3027 3028 extern void teardown_vopstats(vfs_t *);
3028 3029
3029 3030 /*
3030 3031 * Get covered vnode. This will be NULL if the vfs is not linked
3031 3032 * into the file system name space (i.e., domount() with MNT_NOSPICE).
3032 3033 */
3033 3034 coveredvp = vfsp->vfs_vnodecovered;
3034 3035 ASSERT(coveredvp == NULL || vn_vfswlock_held(coveredvp));
3035 3036
3036 3037 /*
3037 3038 * Purge all dnlc entries for this vfs.
3038 3039 */
3039 3040 (void) dnlc_purge_vfsp(vfsp, 0);
3040 3041
3041 3042 /* For forcible umount, skip VFS_SYNC() since it may hang */
3042 3043 if ((flag & MS_FORCE) == 0)
3043 3044 (void) VFS_SYNC(vfsp, 0, cr);
3044 3045
3045 3046 /*
3046 3047 * Lock the vfs to maintain fs status quo during unmount. This
3047 3048 * has to be done after the sync because ufs_update tries to acquire
3048 3049 * the vfs_reflock.
3049 3050 */
3050 3051 vfs_lock_wait(vfsp);
3051 3052
3052 3053 if (error = VFS_UNMOUNT(vfsp, flag, cr)) {
3053 3054 vfs_unlock(vfsp);
3054 3055 if (coveredvp != NULL)
3055 3056 vn_vfsunlock(coveredvp);
3056 3057 } else if (coveredvp != NULL) {
3057 3058 teardown_vopstats(vfsp);
3058 3059 /*
3059 3060 * vfs_remove() will do a VN_RELE(vfsp->vfs_vnodecovered)
3060 3061 * when it frees vfsp so we do a VN_HOLD() so we can
3061 3062 * continue to use coveredvp afterwards.
3062 3063 */
3063 3064 VN_HOLD(coveredvp);
3064 3065 vfs_remove(vfsp);
3065 3066 vn_vfsunlock(coveredvp);
3066 3067 VN_RELE(coveredvp);
3067 3068 } else {
3068 3069 teardown_vopstats(vfsp);
3069 3070 /*
3070 3071 * Release the reference to vfs that is not linked
3071 3072 * into the name space.
3072 3073 */
3073 3074 vfs_unlock(vfsp);
3074 3075 VFS_RELE(vfsp);
3075 3076 }
3076 3077 return (error);
3077 3078 }
3078 3079
3079 3080
3080 3081 /*
3081 3082 * Vfs_unmountall() is called by uadmin() to unmount all
3082 3083 * mounted file systems (except the root file system) during shutdown.
3083 3084 * It follows the existing locking protocol when traversing the vfs list
3084 3085 * to sync and unmount vfses. Even though there should be no
3085 3086 * other thread running while the system is shutting down, it is prudent
3086 3087 * to still follow the locking protocol.
3087 3088 */
3088 3089 void
3089 3090 vfs_unmountall(void)
3090 3091 {
3091 3092 struct vfs *vfsp;
3092 3093 struct vfs *prev_vfsp = NULL;
3093 3094 int error;
3094 3095
3095 3096 /*
3096 3097 * Toss all dnlc entries now so that the per-vfs sync
3097 3098 * and unmount operations don't have to slog through
3098 3099 * a bunch of uninteresting vnodes over and over again.
3099 3100 */
3100 3101 dnlc_purge();
3101 3102
3102 3103 vfs_list_lock();
3103 3104 for (vfsp = rootvfs->vfs_prev; vfsp != rootvfs; vfsp = prev_vfsp) {
3104 3105 prev_vfsp = vfsp->vfs_prev;
3105 3106
3106 3107 if (vfs_lock(vfsp) != 0)
3107 3108 continue;
3108 3109 error = vn_vfswlock(vfsp->vfs_vnodecovered);
3109 3110 vfs_unlock(vfsp);
3110 3111 if (error)
3111 3112 continue;
3112 3113
3113 3114 vfs_list_unlock();
3114 3115
3115 3116 (void) VFS_SYNC(vfsp, SYNC_CLOSE, CRED());
3116 3117 (void) dounmount(vfsp, 0, CRED());
3117 3118
3118 3119 /*
3119 3120 * Since we dropped the vfslist lock above we must
3120 3121 * verify that next_vfsp still exists, else start over.
3121 3122 */
3122 3123 vfs_list_lock();
3123 3124 for (vfsp = rootvfs->vfs_prev;
3124 3125 vfsp != rootvfs; vfsp = vfsp->vfs_prev)
3125 3126 if (vfsp == prev_vfsp)
3126 3127 break;
3127 3128 if (vfsp == rootvfs && prev_vfsp != rootvfs)
3128 3129 prev_vfsp = rootvfs->vfs_prev;
3129 3130 }
3130 3131 vfs_list_unlock();
3131 3132 }
3132 3133
3133 3134 /*
3134 3135 * Called to add an entry to the end of the vfs mount in progress list
3135 3136 */
3136 3137 void
3137 3138 vfs_addmip(dev_t dev, struct vfs *vfsp)
3138 3139 {
3139 3140 struct ipmnt *mipp;
3140 3141
3141 3142 mipp = (struct ipmnt *)kmem_alloc(sizeof (struct ipmnt), KM_SLEEP);
3142 3143 mipp->mip_next = NULL;
3143 3144 mipp->mip_dev = dev;
3144 3145 mipp->mip_vfsp = vfsp;
3145 3146 mutex_enter(&vfs_miplist_mutex);
3146 3147 if (vfs_miplist_end != NULL)
3147 3148 vfs_miplist_end->mip_next = mipp;
3148 3149 else
3149 3150 vfs_miplist = mipp;
3150 3151 vfs_miplist_end = mipp;
3151 3152 mutex_exit(&vfs_miplist_mutex);
3152 3153 }
3153 3154
3154 3155 /*
3155 3156 * Called to remove an entry from the mount in progress list
3156 3157 * Either because the mount completed or it failed.
3157 3158 */
3158 3159 void
3159 3160 vfs_delmip(struct vfs *vfsp)
3160 3161 {
3161 3162 struct ipmnt *mipp, *mipprev;
3162 3163
3163 3164 mutex_enter(&vfs_miplist_mutex);
3164 3165 mipprev = NULL;
3165 3166 for (mipp = vfs_miplist;
3166 3167 mipp && mipp->mip_vfsp != vfsp; mipp = mipp->mip_next) {
3167 3168 mipprev = mipp;
3168 3169 }
3169 3170 if (mipp == NULL)
3170 3171 return; /* shouldn't happen */
3171 3172 if (mipp == vfs_miplist_end)
3172 3173 vfs_miplist_end = mipprev;
3173 3174 if (mipprev == NULL)
3174 3175 vfs_miplist = mipp->mip_next;
3175 3176 else
3176 3177 mipprev->mip_next = mipp->mip_next;
3177 3178 mutex_exit(&vfs_miplist_mutex);
3178 3179 kmem_free(mipp, sizeof (struct ipmnt));
3179 3180 }
3180 3181
3181 3182 /*
3182 3183 * vfs_add is called by a specific filesystem's mount routine to add
3183 3184 * the new vfs into the vfs list/hash and to cover the mounted-on vnode.
3184 3185 * The vfs should already have been locked by the caller.
3185 3186 *
3186 3187 * coveredvp is NULL if this is the root.
3187 3188 */
3188 3189 void
3189 3190 vfs_add(vnode_t *coveredvp, struct vfs *vfsp, int mflag)
3190 3191 {
3191 3192 int newflag;
3192 3193
3193 3194 ASSERT(vfs_lock_held(vfsp));
3194 3195 VFS_HOLD(vfsp);
3195 3196 newflag = vfsp->vfs_flag;
3196 3197 if (mflag & MS_RDONLY)
3197 3198 newflag |= VFS_RDONLY;
3198 3199 else
3199 3200 newflag &= ~VFS_RDONLY;
3200 3201 if (mflag & MS_NOSUID)
3201 3202 newflag |= (VFS_NOSETUID|VFS_NODEVICES);
3202 3203 else
3203 3204 newflag &= ~(VFS_NOSETUID|VFS_NODEVICES);
3204 3205 if (mflag & MS_NOMNTTAB)
3205 3206 newflag |= VFS_NOMNTTAB;
3206 3207 else
3207 3208 newflag &= ~VFS_NOMNTTAB;
3208 3209
3209 3210 if (coveredvp != NULL) {
3210 3211 ASSERT(vn_vfswlock_held(coveredvp));
3211 3212 coveredvp->v_vfsmountedhere = vfsp;
3212 3213 VN_HOLD(coveredvp);
3213 3214 }
3214 3215 vfsp->vfs_vnodecovered = coveredvp;
3215 3216 vfsp->vfs_flag = newflag;
3216 3217
3217 3218 vfs_list_add(vfsp);
3218 3219 }
3219 3220
3220 3221 /*
3221 3222 * Remove a vfs from the vfs list, null out the pointer from the
3222 3223 * covered vnode to the vfs (v_vfsmountedhere), and null out the pointer
3223 3224 * from the vfs to the covered vnode (vfs_vnodecovered). Release the
3224 3225 * reference to the vfs and to the covered vnode.
3225 3226 *
3226 3227 * Called from dounmount after it's confirmed with the file system
3227 3228 * that the unmount is legal.
3228 3229 */
3229 3230 void
3230 3231 vfs_remove(struct vfs *vfsp)
3231 3232 {
3232 3233 vnode_t *vp;
3233 3234
3234 3235 ASSERT(vfs_lock_held(vfsp));
3235 3236
3236 3237 /*
3237 3238 * Can't unmount root. Should never happen because fs will
3238 3239 * be busy.
3239 3240 */
3240 3241 if (vfsp == rootvfs)
3241 3242 panic("vfs_remove: unmounting root");
3242 3243
3243 3244 vfs_list_remove(vfsp);
3244 3245
3245 3246 /*
3246 3247 * Unhook from the file system name space.
3247 3248 */
3248 3249 vp = vfsp->vfs_vnodecovered;
3249 3250 ASSERT(vn_vfswlock_held(vp));
3250 3251 vp->v_vfsmountedhere = NULL;
3251 3252 vfsp->vfs_vnodecovered = NULL;
3252 3253 VN_RELE(vp);
3253 3254
3254 3255 /*
3255 3256 * Release lock and wakeup anybody waiting.
3256 3257 */
3257 3258 vfs_unlock(vfsp);
3258 3259 VFS_RELE(vfsp);
3259 3260 }
3260 3261
3261 3262 /*
3262 3263 * Lock a filesystem to prevent access to it while mounting,
3263 3264 * unmounting and syncing. Return EBUSY immediately if lock
3264 3265 * can't be acquired.
3265 3266 */
3266 3267 int
3267 3268 vfs_lock(vfs_t *vfsp)
3268 3269 {
3269 3270 vn_vfslocks_entry_t *vpvfsentry;
3270 3271
3271 3272 vpvfsentry = vn_vfslocks_getlock(vfsp);
3272 3273 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_WRITER))
3273 3274 return (0);
3274 3275
3275 3276 vn_vfslocks_rele(vpvfsentry);
3276 3277 return (EBUSY);
3277 3278 }
3278 3279
3279 3280 int
3280 3281 vfs_rlock(vfs_t *vfsp)
3281 3282 {
3282 3283 vn_vfslocks_entry_t *vpvfsentry;
3283 3284
3284 3285 vpvfsentry = vn_vfslocks_getlock(vfsp);
3285 3286
3286 3287 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_READER))
3287 3288 return (0);
3288 3289
3289 3290 vn_vfslocks_rele(vpvfsentry);
3290 3291 return (EBUSY);
3291 3292 }
3292 3293
3293 3294 void
3294 3295 vfs_lock_wait(vfs_t *vfsp)
3295 3296 {
3296 3297 vn_vfslocks_entry_t *vpvfsentry;
3297 3298
3298 3299 vpvfsentry = vn_vfslocks_getlock(vfsp);
3299 3300 rwst_enter(&vpvfsentry->ve_lock, RW_WRITER);
3300 3301 }
3301 3302
3302 3303 void
3303 3304 vfs_rlock_wait(vfs_t *vfsp)
3304 3305 {
3305 3306 vn_vfslocks_entry_t *vpvfsentry;
3306 3307
3307 3308 vpvfsentry = vn_vfslocks_getlock(vfsp);
3308 3309 rwst_enter(&vpvfsentry->ve_lock, RW_READER);
3309 3310 }
3310 3311
3311 3312 /*
3312 3313 * Unlock a locked filesystem.
3313 3314 */
3314 3315 void
3315 3316 vfs_unlock(vfs_t *vfsp)
3316 3317 {
3317 3318 vn_vfslocks_entry_t *vpvfsentry;
3318 3319
3319 3320 /*
3320 3321 * vfs_unlock will mimic sema_v behaviour to fix 4748018.
3321 3322 * And these changes should remain for the patch changes as it is.
3322 3323 */
3323 3324 if (panicstr)
3324 3325 return;
3325 3326
3326 3327 /*
3327 3328 * ve_refcount needs to be dropped twice here.
3328 3329 * 1. To release refernce after a call to vfs_locks_getlock()
3329 3330 * 2. To release the reference from the locking routines like
3330 3331 * vfs_rlock_wait/vfs_wlock_wait/vfs_wlock etc,.
3331 3332 */
3332 3333
3333 3334 vpvfsentry = vn_vfslocks_getlock(vfsp);
3334 3335 vn_vfslocks_rele(vpvfsentry);
3335 3336
3336 3337 rwst_exit(&vpvfsentry->ve_lock);
3337 3338 vn_vfslocks_rele(vpvfsentry);
3338 3339 }
3339 3340
3340 3341 /*
3341 3342 * Utility routine that allows a filesystem to construct its
3342 3343 * fsid in "the usual way" - by munging some underlying dev_t and
3343 3344 * the filesystem type number into the 64-bit fsid. Note that
3344 3345 * this implicitly relies on dev_t persistence to make filesystem
3345 3346 * id's persistent.
3346 3347 *
3347 3348 * There's nothing to prevent an individual fs from constructing its
3348 3349 * fsid in a different way, and indeed they should.
3349 3350 *
3350 3351 * Since we want fsids to be 32-bit quantities (so that they can be
3351 3352 * exported identically by either 32-bit or 64-bit APIs, as well as
3352 3353 * the fact that fsid's are "known" to NFS), we compress the device
3353 3354 * number given down to 32-bits, and panic if that isn't possible.
3354 3355 */
3355 3356 void
3356 3357 vfs_make_fsid(fsid_t *fsi, dev_t dev, int val)
3357 3358 {
3358 3359 if (!cmpldev((dev32_t *)&fsi->val[0], dev))
3359 3360 panic("device number too big for fsid!");
3360 3361 fsi->val[1] = val;
3361 3362 }
3362 3363
3363 3364 int
3364 3365 vfs_lock_held(vfs_t *vfsp)
3365 3366 {
3366 3367 int held;
3367 3368 vn_vfslocks_entry_t *vpvfsentry;
3368 3369
3369 3370 /*
3370 3371 * vfs_lock_held will mimic sema_held behaviour
3371 3372 * if panicstr is set. And these changes should remain
3372 3373 * for the patch changes as it is.
3373 3374 */
3374 3375 if (panicstr)
3375 3376 return (1);
3376 3377
3377 3378 vpvfsentry = vn_vfslocks_getlock(vfsp);
3378 3379 held = rwst_lock_held(&vpvfsentry->ve_lock, RW_WRITER);
3379 3380
3380 3381 vn_vfslocks_rele(vpvfsentry);
3381 3382 return (held);
3382 3383 }
3383 3384
3384 3385 struct _kthread *
3385 3386 vfs_lock_owner(vfs_t *vfsp)
3386 3387 {
3387 3388 struct _kthread *owner;
3388 3389 vn_vfslocks_entry_t *vpvfsentry;
3389 3390
3390 3391 /*
3391 3392 * vfs_wlock_held will mimic sema_held behaviour
3392 3393 * if panicstr is set. And these changes should remain
3393 3394 * for the patch changes as it is.
3394 3395 */
3395 3396 if (panicstr)
3396 3397 return (NULL);
3397 3398
3398 3399 vpvfsentry = vn_vfslocks_getlock(vfsp);
3399 3400 owner = rwst_owner(&vpvfsentry->ve_lock);
3400 3401
3401 3402 vn_vfslocks_rele(vpvfsentry);
3402 3403 return (owner);
3403 3404 }
3404 3405
3405 3406 /*
3406 3407 * vfs list locking.
3407 3408 *
3408 3409 * Rather than manipulate the vfslist lock directly, we abstract into lock
3409 3410 * and unlock routines to allow the locking implementation to be changed for
3410 3411 * clustering.
3411 3412 *
3412 3413 * Whenever the vfs list is modified through its hash links, the overall list
3413 3414 * lock must be obtained before locking the relevant hash bucket. But to see
3414 3415 * whether a given vfs is on the list, it suffices to obtain the lock for the
3415 3416 * hash bucket without getting the overall list lock. (See getvfs() below.)
3416 3417 */
3417 3418
3418 3419 void
3419 3420 vfs_list_lock()
3420 3421 {
3421 3422 rw_enter(&vfslist, RW_WRITER);
3422 3423 }
3423 3424
3424 3425 void
3425 3426 vfs_list_read_lock()
3426 3427 {
3427 3428 rw_enter(&vfslist, RW_READER);
3428 3429 }
3429 3430
3430 3431 void
3431 3432 vfs_list_unlock()
3432 3433 {
3433 3434 rw_exit(&vfslist);
3434 3435 }
3435 3436
3436 3437 /*
3437 3438 * Low level worker routines for adding entries to and removing entries from
3438 3439 * the vfs list.
3439 3440 */
3440 3441
3441 3442 static void
3442 3443 vfs_hash_add(struct vfs *vfsp, int insert_at_head)
3443 3444 {
3444 3445 int vhno;
3445 3446 struct vfs **hp;
3446 3447 dev_t dev;
3447 3448
3448 3449 ASSERT(RW_WRITE_HELD(&vfslist));
3449 3450
3450 3451 dev = expldev(vfsp->vfs_fsid.val[0]);
3451 3452 vhno = VFSHASH(getmajor(dev), getminor(dev));
3452 3453
3453 3454 mutex_enter(&rvfs_list[vhno].rvfs_lock);
3454 3455
3455 3456 /*
3456 3457 * Link into the hash table, inserting it at the end, so that LOFS
3457 3458 * with the same fsid as UFS (or other) file systems will not hide the
3458 3459 * UFS.
3459 3460 */
3460 3461 if (insert_at_head) {
3461 3462 vfsp->vfs_hash = rvfs_list[vhno].rvfs_head;
3462 3463 rvfs_list[vhno].rvfs_head = vfsp;
3463 3464 } else {
3464 3465 for (hp = &rvfs_list[vhno].rvfs_head; *hp != NULL;
3465 3466 hp = &(*hp)->vfs_hash)
3466 3467 continue;
3467 3468 /*
3468 3469 * hp now contains the address of the pointer to update
3469 3470 * to effect the insertion.
3470 3471 */
3471 3472 vfsp->vfs_hash = NULL;
3472 3473 *hp = vfsp;
3473 3474 }
3474 3475
3475 3476 rvfs_list[vhno].rvfs_len++;
3476 3477 mutex_exit(&rvfs_list[vhno].rvfs_lock);
3477 3478 }
3478 3479
3479 3480
3480 3481 static void
3481 3482 vfs_hash_remove(struct vfs *vfsp)
3482 3483 {
3483 3484 int vhno;
3484 3485 struct vfs *tvfsp;
3485 3486 dev_t dev;
3486 3487
3487 3488 ASSERT(RW_WRITE_HELD(&vfslist));
3488 3489
3489 3490 dev = expldev(vfsp->vfs_fsid.val[0]);
3490 3491 vhno = VFSHASH(getmajor(dev), getminor(dev));
3491 3492
3492 3493 mutex_enter(&rvfs_list[vhno].rvfs_lock);
3493 3494
3494 3495 /*
3495 3496 * Remove from hash.
3496 3497 */
3497 3498 if (rvfs_list[vhno].rvfs_head == vfsp) {
3498 3499 rvfs_list[vhno].rvfs_head = vfsp->vfs_hash;
3499 3500 rvfs_list[vhno].rvfs_len--;
3500 3501 goto foundit;
3501 3502 }
3502 3503 for (tvfsp = rvfs_list[vhno].rvfs_head; tvfsp != NULL;
3503 3504 tvfsp = tvfsp->vfs_hash) {
3504 3505 if (tvfsp->vfs_hash == vfsp) {
3505 3506 tvfsp->vfs_hash = vfsp->vfs_hash;
3506 3507 rvfs_list[vhno].rvfs_len--;
3507 3508 goto foundit;
3508 3509 }
3509 3510 }
3510 3511 cmn_err(CE_WARN, "vfs_list_remove: vfs not found in hash");
3511 3512
3512 3513 foundit:
3513 3514
3514 3515 mutex_exit(&rvfs_list[vhno].rvfs_lock);
3515 3516 }
3516 3517
3517 3518
3518 3519 void
3519 3520 vfs_list_add(struct vfs *vfsp)
3520 3521 {
3521 3522 zone_t *zone;
3522 3523
3523 3524 /*
3524 3525 * Typically, the vfs_t will have been created on behalf of the file
3525 3526 * system in vfs_init, where it will have been provided with a
3526 3527 * vfs_impl_t. This, however, might be lacking if the vfs_t was created
3527 3528 * by an unbundled file system. We therefore check for such an example
3528 3529 * before stamping the vfs_t with its creation time for the benefit of
3529 3530 * mntfs.
3530 3531 */
3531 3532 if (vfsp->vfs_implp == NULL)
3532 3533 vfsimpl_setup(vfsp);
3533 3534 vfs_mono_time(&vfsp->vfs_hrctime);
3534 3535
3535 3536 /*
3536 3537 * The zone that owns the mount is the one that performed the mount.
3537 3538 * Note that this isn't necessarily the same as the zone mounted into.
3538 3539 * The corresponding zone_rele_ref() will be done when the vfs_t
3539 3540 * is being free'd.
3540 3541 */
3541 3542 vfsp->vfs_zone = curproc->p_zone;
3542 3543 zone_init_ref(&vfsp->vfs_implp->vi_zone_ref);
3543 3544 zone_hold_ref(vfsp->vfs_zone, &vfsp->vfs_implp->vi_zone_ref,
3544 3545 ZONE_REF_VFS);
3545 3546
3546 3547 /*
3547 3548 * Find the zone mounted into, and put this mount on its vfs list.
3548 3549 */
3549 3550 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
3550 3551 ASSERT(zone != NULL);
3551 3552 /*
3552 3553 * Special casing for the root vfs. This structure is allocated
3553 3554 * statically and hooked onto rootvfs at link time. During the
3554 3555 * vfs_mountroot call at system startup time, the root file system's
3555 3556 * VFS_MOUNTROOT routine will call vfs_add with this root vfs struct
3556 3557 * as argument. The code below must detect and handle this special
3557 3558 * case. The only apparent justification for this special casing is
3558 3559 * to ensure that the root file system appears at the head of the
3559 3560 * list.
3560 3561 *
3561 3562 * XXX: I'm assuming that it's ok to do normal list locking when
3562 3563 * adding the entry for the root file system (this used to be
3563 3564 * done with no locks held).
3564 3565 */
3565 3566 vfs_list_lock();
3566 3567 /*
3567 3568 * Link into the vfs list proper.
3568 3569 */
3569 3570 if (vfsp == &root) {
3570 3571 /*
3571 3572 * Assert: This vfs is already on the list as its first entry.
3572 3573 * Thus, there's nothing to do.
3573 3574 */
3574 3575 ASSERT(rootvfs == vfsp);
3575 3576 /*
3576 3577 * Add it to the head of the global zone's vfslist.
3577 3578 */
3578 3579 ASSERT(zone == global_zone);
3579 3580 ASSERT(zone->zone_vfslist == NULL);
3580 3581 zone->zone_vfslist = vfsp;
3581 3582 } else {
3582 3583 /*
3583 3584 * Link to end of list using vfs_prev (as rootvfs is now a
3584 3585 * doubly linked circular list) so list is in mount order for
3585 3586 * mnttab use.
3586 3587 */
3587 3588 rootvfs->vfs_prev->vfs_next = vfsp;
3588 3589 vfsp->vfs_prev = rootvfs->vfs_prev;
3589 3590 rootvfs->vfs_prev = vfsp;
3590 3591 vfsp->vfs_next = rootvfs;
3591 3592
3592 3593 /*
3593 3594 * Do it again for the zone-private list (which may be NULL).
3594 3595 */
3595 3596 if (zone->zone_vfslist == NULL) {
3596 3597 ASSERT(zone != global_zone);
3597 3598 zone->zone_vfslist = vfsp;
3598 3599 } else {
3599 3600 zone->zone_vfslist->vfs_zone_prev->vfs_zone_next = vfsp;
3600 3601 vfsp->vfs_zone_prev = zone->zone_vfslist->vfs_zone_prev;
3601 3602 zone->zone_vfslist->vfs_zone_prev = vfsp;
3602 3603 vfsp->vfs_zone_next = zone->zone_vfslist;
3603 3604 }
3604 3605 }
3605 3606
3606 3607 /*
3607 3608 * Link into the hash table, inserting it at the end, so that LOFS
3608 3609 * with the same fsid as UFS (or other) file systems will not hide
3609 3610 * the UFS.
3610 3611 */
3611 3612 vfs_hash_add(vfsp, 0);
3612 3613
3613 3614 /*
3614 3615 * update the mnttab modification time
3615 3616 */
3616 3617 vfs_mnttab_modtimeupd();
3617 3618 vfs_list_unlock();
3618 3619 zone_rele(zone);
3619 3620 }
3620 3621
3621 3622 void
3622 3623 vfs_list_remove(struct vfs *vfsp)
3623 3624 {
3624 3625 zone_t *zone;
3625 3626
3626 3627 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
3627 3628 ASSERT(zone != NULL);
3628 3629 /*
3629 3630 * Callers are responsible for preventing attempts to unmount the
3630 3631 * root.
3631 3632 */
3632 3633 ASSERT(vfsp != rootvfs);
3633 3634
3634 3635 vfs_list_lock();
3635 3636
3636 3637 /*
3637 3638 * Remove from hash.
3638 3639 */
3639 3640 vfs_hash_remove(vfsp);
3640 3641
3641 3642 /*
3642 3643 * Remove from vfs list.
3643 3644 */
3644 3645 vfsp->vfs_prev->vfs_next = vfsp->vfs_next;
3645 3646 vfsp->vfs_next->vfs_prev = vfsp->vfs_prev;
3646 3647 vfsp->vfs_next = vfsp->vfs_prev = NULL;
3647 3648
3648 3649 /*
3649 3650 * Remove from zone-specific vfs list.
3650 3651 */
3651 3652 if (zone->zone_vfslist == vfsp)
3652 3653 zone->zone_vfslist = vfsp->vfs_zone_next;
3653 3654
3654 3655 if (vfsp->vfs_zone_next == vfsp) {
3655 3656 ASSERT(vfsp->vfs_zone_prev == vfsp);
3656 3657 ASSERT(zone->zone_vfslist == vfsp);
3657 3658 zone->zone_vfslist = NULL;
3658 3659 }
3659 3660
3660 3661 vfsp->vfs_zone_prev->vfs_zone_next = vfsp->vfs_zone_next;
3661 3662 vfsp->vfs_zone_next->vfs_zone_prev = vfsp->vfs_zone_prev;
3662 3663 vfsp->vfs_zone_next = vfsp->vfs_zone_prev = NULL;
3663 3664
3664 3665 /*
3665 3666 * update the mnttab modification time
3666 3667 */
3667 3668 vfs_mnttab_modtimeupd();
3668 3669 vfs_list_unlock();
3669 3670 zone_rele(zone);
3670 3671 }
3671 3672
3672 3673 struct vfs *
3673 3674 getvfs(fsid_t *fsid)
3674 3675 {
3675 3676 struct vfs *vfsp;
3676 3677 int val0 = fsid->val[0];
3677 3678 int val1 = fsid->val[1];
3678 3679 dev_t dev = expldev(val0);
3679 3680 int vhno = VFSHASH(getmajor(dev), getminor(dev));
3680 3681 kmutex_t *hmp = &rvfs_list[vhno].rvfs_lock;
3681 3682
3682 3683 mutex_enter(hmp);
3683 3684 for (vfsp = rvfs_list[vhno].rvfs_head; vfsp; vfsp = vfsp->vfs_hash) {
3684 3685 if (vfsp->vfs_fsid.val[0] == val0 &&
3685 3686 vfsp->vfs_fsid.val[1] == val1) {
3686 3687 VFS_HOLD(vfsp);
3687 3688 mutex_exit(hmp);
3688 3689 return (vfsp);
3689 3690 }
3690 3691 }
3691 3692 mutex_exit(hmp);
3692 3693 return (NULL);
3693 3694 }
3694 3695
3695 3696 /*
3696 3697 * Search the vfs mount in progress list for a specified device/vfs entry.
3697 3698 * Returns 0 if the first entry in the list that the device matches has the
3698 3699 * given vfs pointer as well. If the device matches but a different vfs
3699 3700 * pointer is encountered in the list before the given vfs pointer then
3700 3701 * a 1 is returned.
3701 3702 */
3702 3703
3703 3704 int
3704 3705 vfs_devmounting(dev_t dev, struct vfs *vfsp)
3705 3706 {
3706 3707 int retval = 0;
3707 3708 struct ipmnt *mipp;
3708 3709
3709 3710 mutex_enter(&vfs_miplist_mutex);
3710 3711 for (mipp = vfs_miplist; mipp != NULL; mipp = mipp->mip_next) {
3711 3712 if (mipp->mip_dev == dev) {
3712 3713 if (mipp->mip_vfsp != vfsp)
3713 3714 retval = 1;
3714 3715 break;
3715 3716 }
3716 3717 }
3717 3718 mutex_exit(&vfs_miplist_mutex);
3718 3719 return (retval);
3719 3720 }
3720 3721
3721 3722 /*
3722 3723 * Search the vfs list for a specified device. Returns 1, if entry is found
3723 3724 * or 0 if no suitable entry is found.
3724 3725 */
3725 3726
3726 3727 int
3727 3728 vfs_devismounted(dev_t dev)
3728 3729 {
3729 3730 struct vfs *vfsp;
3730 3731 int found;
3731 3732
3732 3733 vfs_list_read_lock();
3733 3734 vfsp = rootvfs;
3734 3735 found = 0;
3735 3736 do {
3736 3737 if (vfsp->vfs_dev == dev) {
3737 3738 found = 1;
3738 3739 break;
3739 3740 }
3740 3741 vfsp = vfsp->vfs_next;
3741 3742 } while (vfsp != rootvfs);
3742 3743
3743 3744 vfs_list_unlock();
3744 3745 return (found);
3745 3746 }
3746 3747
3747 3748 /*
3748 3749 * Search the vfs list for a specified device. Returns a pointer to it
3749 3750 * or NULL if no suitable entry is found. The caller of this routine
3750 3751 * is responsible for releasing the returned vfs pointer.
3751 3752 */
3752 3753 struct vfs *
3753 3754 vfs_dev2vfsp(dev_t dev)
3754 3755 {
3755 3756 struct vfs *vfsp;
3756 3757 int found;
3757 3758
3758 3759 vfs_list_read_lock();
3759 3760 vfsp = rootvfs;
3760 3761 found = 0;
3761 3762 do {
3762 3763 /*
3763 3764 * The following could be made more efficient by making
3764 3765 * the entire loop use vfs_zone_next if the call is from
3765 3766 * a zone. The only callers, however, ustat(2) and
3766 3767 * umount2(2), don't seem to justify the added
3767 3768 * complexity at present.
3768 3769 */
3769 3770 if (vfsp->vfs_dev == dev &&
3770 3771 ZONE_PATH_VISIBLE(refstr_value(vfsp->vfs_mntpt),
3771 3772 curproc->p_zone)) {
3772 3773 VFS_HOLD(vfsp);
3773 3774 found = 1;
3774 3775 break;
3775 3776 }
3776 3777 vfsp = vfsp->vfs_next;
3777 3778 } while (vfsp != rootvfs);
3778 3779 vfs_list_unlock();
3779 3780 return (found ? vfsp: NULL);
3780 3781 }
3781 3782
3782 3783 /*
3783 3784 * Search the vfs list for a specified mntpoint. Returns a pointer to it
3784 3785 * or NULL if no suitable entry is found. The caller of this routine
3785 3786 * is responsible for releasing the returned vfs pointer.
3786 3787 *
3787 3788 * Note that if multiple mntpoints match, the last one matching is
3788 3789 * returned in an attempt to return the "top" mount when overlay
3789 3790 * mounts are covering the same mount point. This is accomplished by starting
3790 3791 * at the end of the list and working our way backwards, stopping at the first
3791 3792 * matching mount.
3792 3793 */
3793 3794 struct vfs *
3794 3795 vfs_mntpoint2vfsp(const char *mp)
3795 3796 {
3796 3797 struct vfs *vfsp;
3797 3798 struct vfs *retvfsp = NULL;
3798 3799 zone_t *zone = curproc->p_zone;
3799 3800 struct vfs *list;
3800 3801
3801 3802 vfs_list_read_lock();
3802 3803 if (getzoneid() == GLOBAL_ZONEID) {
3803 3804 /*
3804 3805 * The global zone may see filesystems in any zone.
3805 3806 */
3806 3807 vfsp = rootvfs->vfs_prev;
3807 3808 do {
3808 3809 if (strcmp(refstr_value(vfsp->vfs_mntpt), mp) == 0) {
3809 3810 retvfsp = vfsp;
3810 3811 break;
3811 3812 }
3812 3813 vfsp = vfsp->vfs_prev;
3813 3814 } while (vfsp != rootvfs->vfs_prev);
3814 3815 } else if ((list = zone->zone_vfslist) != NULL) {
3815 3816 const char *mntpt;
3816 3817
3817 3818 vfsp = list->vfs_zone_prev;
3818 3819 do {
3819 3820 mntpt = refstr_value(vfsp->vfs_mntpt);
3820 3821 mntpt = ZONE_PATH_TRANSLATE(mntpt, zone);
3821 3822 if (strcmp(mntpt, mp) == 0) {
3822 3823 retvfsp = vfsp;
3823 3824 break;
3824 3825 }
3825 3826 vfsp = vfsp->vfs_zone_prev;
3826 3827 } while (vfsp != list->vfs_zone_prev);
3827 3828 }
3828 3829 if (retvfsp)
3829 3830 VFS_HOLD(retvfsp);
3830 3831 vfs_list_unlock();
3831 3832 return (retvfsp);
3832 3833 }
3833 3834
3834 3835 /*
3835 3836 * Search the vfs list for a specified vfsops.
3836 3837 * if vfs entry is found then return 1, else 0.
3837 3838 */
3838 3839 int
3839 3840 vfs_opsinuse(vfsops_t *ops)
3840 3841 {
3841 3842 struct vfs *vfsp;
3842 3843 int found;
3843 3844
3844 3845 vfs_list_read_lock();
3845 3846 vfsp = rootvfs;
3846 3847 found = 0;
3847 3848 do {
3848 3849 if (vfs_getops(vfsp) == ops) {
3849 3850 found = 1;
3850 3851 break;
3851 3852 }
3852 3853 vfsp = vfsp->vfs_next;
3853 3854 } while (vfsp != rootvfs);
3854 3855 vfs_list_unlock();
3855 3856 return (found);
3856 3857 }
3857 3858
3858 3859 /*
3859 3860 * Allocate an entry in vfssw for a file system type
3860 3861 */
3861 3862 struct vfssw *
3862 3863 allocate_vfssw(const char *type)
3863 3864 {
3864 3865 struct vfssw *vswp;
3865 3866
3866 3867 if (type[0] == '\0' || strlen(type) + 1 > _ST_FSTYPSZ) {
3867 3868 /*
3868 3869 * The vfssw table uses the empty string to identify an
3869 3870 * available entry; we cannot add any type which has
3870 3871 * a leading NUL. The string length is limited to
3871 3872 * the size of the st_fstype array in struct stat.
3872 3873 */
3873 3874 return (NULL);
3874 3875 }
3875 3876
3876 3877 ASSERT(VFSSW_WRITE_LOCKED());
3877 3878 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++)
3878 3879 if (!ALLOCATED_VFSSW(vswp)) {
3879 3880 vswp->vsw_name = kmem_alloc(strlen(type) + 1, KM_SLEEP);
3880 3881 (void) strcpy(vswp->vsw_name, type);
3881 3882 ASSERT(vswp->vsw_count == 0);
3882 3883 vswp->vsw_count = 1;
3883 3884 mutex_init(&vswp->vsw_lock, NULL, MUTEX_DEFAULT, NULL);
3884 3885 return (vswp);
3885 3886 }
3886 3887 return (NULL);
3887 3888 }
3888 3889
3889 3890 /*
3890 3891 * Impose additional layer of translation between vfstype names
3891 3892 * and module names in the filesystem.
3892 3893 */
3893 3894 static const char *
3894 3895 vfs_to_modname(const char *vfstype)
3895 3896 {
3896 3897 if (strcmp(vfstype, "proc") == 0) {
3897 3898 vfstype = "procfs";
3898 3899 } else if (strcmp(vfstype, "fd") == 0) {
3899 3900 vfstype = "fdfs";
3900 3901 } else if (strncmp(vfstype, "nfs", 3) == 0) {
3901 3902 vfstype = "nfs";
3902 3903 }
3903 3904
3904 3905 return (vfstype);
3905 3906 }
3906 3907
3907 3908 /*
3908 3909 * Find a vfssw entry given a file system type name.
3909 3910 * Try to autoload the filesystem if it's not found.
3910 3911 * If it's installed, return the vfssw locked to prevent unloading.
3911 3912 */
3912 3913 struct vfssw *
3913 3914 vfs_getvfssw(const char *type)
3914 3915 {
3915 3916 struct vfssw *vswp;
3916 3917 const char *modname;
3917 3918
3918 3919 RLOCK_VFSSW();
3919 3920 vswp = vfs_getvfsswbyname(type);
3920 3921 modname = vfs_to_modname(type);
3921 3922
3922 3923 if (rootdir == NULL) {
3923 3924 /*
3924 3925 * If we haven't yet loaded the root file system, then our
3925 3926 * _init won't be called until later. Allocate vfssw entry,
3926 3927 * because mod_installfs won't be called.
3927 3928 */
3928 3929 if (vswp == NULL) {
3929 3930 RUNLOCK_VFSSW();
3930 3931 WLOCK_VFSSW();
3931 3932 if ((vswp = vfs_getvfsswbyname(type)) == NULL) {
3932 3933 if ((vswp = allocate_vfssw(type)) == NULL) {
3933 3934 WUNLOCK_VFSSW();
3934 3935 return (NULL);
3935 3936 }
3936 3937 }
3937 3938 WUNLOCK_VFSSW();
3938 3939 RLOCK_VFSSW();
3939 3940 }
3940 3941 if (!VFS_INSTALLED(vswp)) {
3941 3942 RUNLOCK_VFSSW();
3942 3943 (void) modloadonly("fs", modname);
3943 3944 } else
3944 3945 RUNLOCK_VFSSW();
3945 3946 return (vswp);
3946 3947 }
3947 3948
3948 3949 /*
3949 3950 * Try to load the filesystem. Before calling modload(), we drop
3950 3951 * our lock on the VFS switch table, and pick it up after the
3951 3952 * module is loaded. However, there is a potential race: the
3952 3953 * module could be unloaded after the call to modload() completes
3953 3954 * but before we pick up the lock and drive on. Therefore,
3954 3955 * we keep reloading the module until we've loaded the module
3955 3956 * _and_ we have the lock on the VFS switch table.
3956 3957 */
3957 3958 while (vswp == NULL || !VFS_INSTALLED(vswp)) {
3958 3959 RUNLOCK_VFSSW();
3959 3960 if (modload("fs", modname) == -1)
3960 3961 return (NULL);
3961 3962 RLOCK_VFSSW();
3962 3963 if (vswp == NULL)
3963 3964 if ((vswp = vfs_getvfsswbyname(type)) == NULL)
3964 3965 break;
3965 3966 }
3966 3967 RUNLOCK_VFSSW();
3967 3968
3968 3969 return (vswp);
3969 3970 }
3970 3971
3971 3972 /*
3972 3973 * Find a vfssw entry given a file system type name.
3973 3974 */
3974 3975 struct vfssw *
3975 3976 vfs_getvfsswbyname(const char *type)
3976 3977 {
3977 3978 struct vfssw *vswp;
3978 3979
3979 3980 ASSERT(VFSSW_LOCKED());
3980 3981 if (type == NULL || *type == '\0')
3981 3982 return (NULL);
3982 3983
3983 3984 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
3984 3985 if (strcmp(type, vswp->vsw_name) == 0) {
3985 3986 vfs_refvfssw(vswp);
3986 3987 return (vswp);
3987 3988 }
3988 3989 }
3989 3990
3990 3991 return (NULL);
3991 3992 }
3992 3993
3993 3994 /*
3994 3995 * Find a vfssw entry given a set of vfsops.
3995 3996 */
3996 3997 struct vfssw *
3997 3998 vfs_getvfsswbyvfsops(vfsops_t *vfsops)
3998 3999 {
3999 4000 struct vfssw *vswp;
4000 4001
4001 4002 RLOCK_VFSSW();
4002 4003 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
4003 4004 if (ALLOCATED_VFSSW(vswp) && &vswp->vsw_vfsops == vfsops) {
4004 4005 vfs_refvfssw(vswp);
4005 4006 RUNLOCK_VFSSW();
4006 4007 return (vswp);
4007 4008 }
4008 4009 }
4009 4010 RUNLOCK_VFSSW();
4010 4011
4011 4012 return (NULL);
4012 4013 }
4013 4014
4014 4015 /*
4015 4016 * Reference a vfssw entry.
4016 4017 */
4017 4018 void
4018 4019 vfs_refvfssw(struct vfssw *vswp)
4019 4020 {
4020 4021
4021 4022 mutex_enter(&vswp->vsw_lock);
4022 4023 vswp->vsw_count++;
4023 4024 mutex_exit(&vswp->vsw_lock);
4024 4025 }
4025 4026
4026 4027 /*
4027 4028 * Unreference a vfssw entry.
4028 4029 */
4029 4030 void
4030 4031 vfs_unrefvfssw(struct vfssw *vswp)
4031 4032 {
4032 4033
4033 4034 mutex_enter(&vswp->vsw_lock);
4034 4035 vswp->vsw_count--;
4035 4036 mutex_exit(&vswp->vsw_lock);
4036 4037 }
4037 4038
4038 4039 int sync_timeout = 30; /* timeout for syncing a page during panic */
4039 4040 int sync_timeleft; /* portion of sync_timeout remaining */
4040 4041
4041 4042 static int sync_retries = 20; /* number of retries when not making progress */
4042 4043 static int sync_triesleft; /* portion of sync_retries remaining */
4043 4044
4044 4045 static pgcnt_t old_pgcnt, new_pgcnt;
4045 4046 static int new_bufcnt, old_bufcnt;
4046 4047
4047 4048 /*
4048 4049 * Sync all of the mounted filesystems, and then wait for the actual i/o to
4049 4050 * complete. We wait by counting the number of dirty pages and buffers,
4050 4051 * pushing them out using bio_busy() and page_busy(), and then counting again.
4051 4052 * This routine is used during both the uadmin A_SHUTDOWN code as well as
4052 4053 * the SYNC phase of the panic code (see comments in panic.c). It should only
4053 4054 * be used after some higher-level mechanism has quiesced the system so that
4054 4055 * new writes are not being initiated while we are waiting for completion.
4055 4056 *
4056 4057 * To ensure finite running time, our algorithm uses two timeout mechanisms:
4057 4058 * sync_timeleft (a timer implemented by the omnipresent deadman() cyclic), and
4058 4059 * sync_triesleft (a progress counter used by the vfs_syncall() loop below).
4059 4060 * Together these ensure that syncing completes if our i/o paths are stuck.
4060 4061 * The counters are declared above so they can be found easily in the debugger.
4061 4062 *
4062 4063 * The sync_timeleft counter is reset by bio_busy() and page_busy() using the
4063 4064 * vfs_syncprogress() subroutine whenever we make progress through the lists of
4064 4065 * pages and buffers. It is decremented and expired by the deadman() cyclic.
4065 4066 * When vfs_syncall() decides it is done, we disable the deadman() counter by
4066 4067 * setting sync_timeleft to zero. This timer guards against vfs_syncall()
4067 4068 * deadlocking or hanging inside of a broken filesystem or driver routine.
4068 4069 *
4069 4070 * The sync_triesleft counter is updated by vfs_syncall() itself. If we make
4070 4071 * sync_retries consecutive calls to bio_busy() and page_busy() without
4071 4072 * decreasing either the number of dirty buffers or dirty pages below the
4072 4073 * lowest count we have seen so far, we give up and return from vfs_syncall().
4073 4074 *
4074 4075 * Each loop iteration ends with a call to delay() one second to allow time for
4075 4076 * i/o completion and to permit the user time to read our progress messages.
4076 4077 */
4077 4078 void
4078 4079 vfs_syncall(void)
4079 4080 {
4080 4081 if (rootdir == NULL && !modrootloaded)
4081 4082 return; /* panic during boot - no filesystems yet */
4082 4083
4083 4084 printf("syncing file systems...");
4084 4085 vfs_syncprogress();
4085 4086 sync();
4086 4087
4087 4088 vfs_syncprogress();
4088 4089 sync_triesleft = sync_retries;
4089 4090
4090 4091 old_bufcnt = new_bufcnt = INT_MAX;
4091 4092 old_pgcnt = new_pgcnt = ULONG_MAX;
4092 4093
4093 4094 while (sync_triesleft > 0) {
4094 4095 old_bufcnt = MIN(old_bufcnt, new_bufcnt);
4095 4096 old_pgcnt = MIN(old_pgcnt, new_pgcnt);
4096 4097
4097 4098 new_bufcnt = bio_busy(B_TRUE);
4098 4099 new_pgcnt = page_busy(B_TRUE);
4099 4100 vfs_syncprogress();
4100 4101
4101 4102 if (new_bufcnt == 0 && new_pgcnt == 0)
4102 4103 break;
4103 4104
4104 4105 if (new_bufcnt < old_bufcnt || new_pgcnt < old_pgcnt)
4105 4106 sync_triesleft = sync_retries;
4106 4107 else
4107 4108 sync_triesleft--;
4108 4109
4109 4110 if (new_bufcnt)
4110 4111 printf(" [%d]", new_bufcnt);
4111 4112 if (new_pgcnt)
4112 4113 printf(" %lu", new_pgcnt);
4113 4114
4114 4115 delay(hz);
4115 4116 }
4116 4117
4117 4118 if (new_bufcnt != 0 || new_pgcnt != 0)
4118 4119 printf(" done (not all i/o completed)\n");
4119 4120 else
4120 4121 printf(" done\n");
4121 4122
4122 4123 sync_timeleft = 0;
4123 4124 delay(hz);
4124 4125 }
4125 4126
4126 4127 /*
4127 4128 * If we are in the middle of the sync phase of panic, reset sync_timeleft to
4128 4129 * sync_timeout to indicate that we are making progress and the deadman()
4129 4130 * omnipresent cyclic should not yet time us out. Note that it is safe to
4130 4131 * store to sync_timeleft here since the deadman() is firing at high-level
4131 4132 * on top of us. If we are racing with the deadman(), either the deadman()
4132 4133 * will decrement the old value and then we will reset it, or we will
4133 4134 * reset it and then the deadman() will immediately decrement it. In either
4134 4135 * case, correct behavior results.
4135 4136 */
4136 4137 void
4137 4138 vfs_syncprogress(void)
4138 4139 {
4139 4140 if (panicstr)
4140 4141 sync_timeleft = sync_timeout;
4141 4142 }
4142 4143
4143 4144 /*
4144 4145 * Map VFS flags to statvfs flags. These shouldn't really be separate
4145 4146 * flags at all.
4146 4147 */
4147 4148 uint_t
4148 4149 vf_to_stf(uint_t vf)
4149 4150 {
4150 4151 uint_t stf = 0;
4151 4152
4152 4153 if (vf & VFS_RDONLY)
4153 4154 stf |= ST_RDONLY;
4154 4155 if (vf & VFS_NOSETUID)
4155 4156 stf |= ST_NOSUID;
4156 4157 if (vf & VFS_NOTRUNC)
4157 4158 stf |= ST_NOTRUNC;
4158 4159
4159 4160 return (stf);
4160 4161 }
4161 4162
4162 4163 /*
4163 4164 * Entries for (illegal) fstype 0.
4164 4165 */
4165 4166 /* ARGSUSED */
4166 4167 int
4167 4168 vfsstray_sync(struct vfs *vfsp, short arg, struct cred *cr)
4168 4169 {
4169 4170 cmn_err(CE_PANIC, "stray vfs operation");
4170 4171 return (0);
4171 4172 }
4172 4173
4173 4174 /*
4174 4175 * Entries for (illegal) fstype 0.
4175 4176 */
4176 4177 int
4177 4178 vfsstray(void)
4178 4179 {
4179 4180 cmn_err(CE_PANIC, "stray vfs operation");
4180 4181 return (0);
4181 4182 }
4182 4183
4183 4184 /*
4184 4185 * Support for dealing with forced UFS unmount and its interaction with
4185 4186 * LOFS. Could be used by any filesystem.
4186 4187 * See bug 1203132.
4187 4188 */
4188 4189 int
4189 4190 vfs_EIO(void)
4190 4191 {
4191 4192 return (EIO);
4192 4193 }
4193 4194
4194 4195 /*
4195 4196 * We've gotta define the op for sync separately, since the compiler gets
4196 4197 * confused if we mix and match ANSI and normal style prototypes when
4197 4198 * a "short" argument is present and spits out a warning.
4198 4199 */
4199 4200 /*ARGSUSED*/
4200 4201 int
4201 4202 vfs_EIO_sync(struct vfs *vfsp, short arg, struct cred *cr)
4202 4203 {
4203 4204 return (EIO);
4204 4205 }
4205 4206
4206 4207 vfs_t EIO_vfs;
4207 4208 vfsops_t *EIO_vfsops;
4208 4209
4209 4210 /*
4210 4211 * Called from startup() to initialize all loaded vfs's
4211 4212 */
4212 4213 void
4213 4214 vfsinit(void)
4214 4215 {
4215 4216 struct vfssw *vswp;
4216 4217 int error;
4217 4218 extern int vopstats_enabled;
4218 4219 extern void vopstats_startup();
4219 4220
4220 4221 static const fs_operation_def_t EIO_vfsops_template[] = {
4221 4222 VFSNAME_MOUNT, { .error = vfs_EIO },
4222 4223 VFSNAME_UNMOUNT, { .error = vfs_EIO },
4223 4224 VFSNAME_ROOT, { .error = vfs_EIO },
4224 4225 VFSNAME_STATVFS, { .error = vfs_EIO },
4225 4226 VFSNAME_SYNC, { .vfs_sync = vfs_EIO_sync },
4226 4227 VFSNAME_VGET, { .error = vfs_EIO },
4227 4228 VFSNAME_MOUNTROOT, { .error = vfs_EIO },
4228 4229 VFSNAME_FREEVFS, { .error = vfs_EIO },
4229 4230 VFSNAME_VNSTATE, { .error = vfs_EIO },
4230 4231 NULL, NULL
4231 4232 };
4232 4233
4233 4234 static const fs_operation_def_t stray_vfsops_template[] = {
4234 4235 VFSNAME_MOUNT, { .error = vfsstray },
4235 4236 VFSNAME_UNMOUNT, { .error = vfsstray },
4236 4237 VFSNAME_ROOT, { .error = vfsstray },
4237 4238 VFSNAME_STATVFS, { .error = vfsstray },
4238 4239 VFSNAME_SYNC, { .vfs_sync = vfsstray_sync },
4239 4240 VFSNAME_VGET, { .error = vfsstray },
4240 4241 VFSNAME_MOUNTROOT, { .error = vfsstray },
4241 4242 VFSNAME_FREEVFS, { .error = vfsstray },
4242 4243 VFSNAME_VNSTATE, { .error = vfsstray },
4243 4244 NULL, NULL
4244 4245 };
4245 4246
4246 4247 /* Create vfs cache */
4247 4248 vfs_cache = kmem_cache_create("vfs_cache", sizeof (struct vfs),
4248 4249 sizeof (uintptr_t), NULL, NULL, NULL, NULL, NULL, 0);
4249 4250
4250 4251 /* Initialize the vnode cache (file systems may use it during init). */
4251 4252 vn_create_cache();
4252 4253
4253 4254 /* Setup event monitor framework */
4254 4255 fem_init();
4255 4256
4256 4257 /* Initialize the dummy stray file system type. */
4257 4258 error = vfs_setfsops(0, stray_vfsops_template, NULL);
4258 4259
4259 4260 /* Initialize the dummy EIO file system. */
4260 4261 error = vfs_makefsops(EIO_vfsops_template, &EIO_vfsops);
4261 4262 if (error != 0) {
4262 4263 cmn_err(CE_WARN, "vfsinit: bad EIO vfs ops template");
4263 4264 /* Shouldn't happen, but not bad enough to panic */
4264 4265 }
4265 4266
4266 4267 VFS_INIT(&EIO_vfs, EIO_vfsops, (caddr_t)NULL);
4267 4268
4268 4269 /*
4269 4270 * Default EIO_vfs.vfs_flag to VFS_UNMOUNTED so a lookup
4270 4271 * on this vfs can immediately notice it's invalid.
4271 4272 */
4272 4273 EIO_vfs.vfs_flag |= VFS_UNMOUNTED;
4273 4274
4274 4275 /*
4275 4276 * Call the init routines of non-loadable filesystems only.
4276 4277 * Filesystems which are loaded as separate modules will be
4277 4278 * initialized by the module loading code instead.
4278 4279 */
4279 4280
4280 4281 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
4281 4282 RLOCK_VFSSW();
4282 4283 if (vswp->vsw_init != NULL)
4283 4284 (*vswp->vsw_init)(vswp - vfssw, vswp->vsw_name);
4284 4285 RUNLOCK_VFSSW();
4285 4286 }
4286 4287
4287 4288 vopstats_startup();
4288 4289
4289 4290 if (vopstats_enabled) {
4290 4291 /* EIO_vfs can collect stats, but we don't retrieve them */
4291 4292 initialize_vopstats(&EIO_vfs.vfs_vopstats);
4292 4293 EIO_vfs.vfs_fstypevsp = NULL;
4293 4294 EIO_vfs.vfs_vskap = NULL;
4294 4295 EIO_vfs.vfs_flag |= VFS_STATS;
4295 4296 }
4296 4297
4297 4298 xattr_init();
4298 4299
4299 4300 reparse_point_init();
4300 4301 }
4301 4302
4302 4303 vfs_t *
4303 4304 vfs_alloc(int kmflag)
4304 4305 {
4305 4306 vfs_t *vfsp;
4306 4307
4307 4308 vfsp = kmem_cache_alloc(vfs_cache, kmflag);
4308 4309
4309 4310 /*
4310 4311 * Do the simplest initialization here.
4311 4312 * Everything else gets done in vfs_init()
4312 4313 */
4313 4314 bzero(vfsp, sizeof (vfs_t));
4314 4315 return (vfsp);
4315 4316 }
4316 4317
4317 4318 void
4318 4319 vfs_free(vfs_t *vfsp)
4319 4320 {
4320 4321 /*
4321 4322 * One would be tempted to assert that "vfsp->vfs_count == 0".
4322 4323 * The problem is that this gets called out of domount() with
4323 4324 * a partially initialized vfs and a vfs_count of 1. This is
4324 4325 * also called from vfs_rele() with a vfs_count of 0. We can't
4325 4326 * call VFS_RELE() from domount() if VFS_MOUNT() hasn't successfully
4326 4327 * returned. This is because VFS_MOUNT() fully initializes the
4327 4328 * vfs structure and its associated data. VFS_RELE() will call
4328 4329 * VFS_FREEVFS() which may panic the system if the data structures
4329 4330 * aren't fully initialized from a successful VFS_MOUNT()).
4330 4331 */
4331 4332
4332 4333 /* If FEM was in use, make sure everything gets cleaned up */
4333 4334 if (vfsp->vfs_femhead) {
4334 4335 ASSERT(vfsp->vfs_femhead->femh_list == NULL);
4335 4336 mutex_destroy(&vfsp->vfs_femhead->femh_lock);
4336 4337 kmem_free(vfsp->vfs_femhead, sizeof (*(vfsp->vfs_femhead)));
4337 4338 vfsp->vfs_femhead = NULL;
4338 4339 }
4339 4340
4340 4341 if (vfsp->vfs_implp)
4341 4342 vfsimpl_teardown(vfsp);
4342 4343 sema_destroy(&vfsp->vfs_reflock);
4343 4344 kmem_cache_free(vfs_cache, vfsp);
4344 4345 }
4345 4346
4346 4347 /*
4347 4348 * Increments the vfs reference count by one atomically.
4348 4349 */
4349 4350 void
4350 4351 vfs_hold(vfs_t *vfsp)
4351 4352 {
4352 4353 atomic_inc_32(&vfsp->vfs_count);
4353 4354 ASSERT(vfsp->vfs_count != 0);
4354 4355 }
4355 4356
4356 4357 /*
4357 4358 * Decrements the vfs reference count by one atomically. When
4358 4359 * vfs reference count becomes zero, it calls the file system
4359 4360 * specific vfs_freevfs() to free up the resources.
4360 4361 */
4361 4362 void
4362 4363 vfs_rele(vfs_t *vfsp)
4363 4364 {
4364 4365 ASSERT(vfsp->vfs_count != 0);
4365 4366 if (atomic_dec_32_nv(&vfsp->vfs_count) == 0) {
4366 4367 VFS_FREEVFS(vfsp);
4367 4368 lofi_remove(vfsp);
4368 4369 if (vfsp->vfs_zone)
4369 4370 zone_rele_ref(&vfsp->vfs_implp->vi_zone_ref,
4370 4371 ZONE_REF_VFS);
4371 4372 vfs_freemnttab(vfsp);
4372 4373 vfs_free(vfsp);
4373 4374 }
4374 4375 }
4375 4376
4376 4377 /*
4377 4378 * Generic operations vector support.
4378 4379 *
4379 4380 * This is used to build operations vectors for both the vfs and vnode.
4380 4381 * It's normally called only when a file system is loaded.
4381 4382 *
4382 4383 * There are many possible algorithms for this, including the following:
4383 4384 *
4384 4385 * (1) scan the list of known operations; for each, see if the file system
4385 4386 * includes an entry for it, and fill it in as appropriate.
4386 4387 *
4387 4388 * (2) set up defaults for all known operations. scan the list of ops
4388 4389 * supplied by the file system; for each which is both supplied and
4389 4390 * known, fill it in.
4390 4391 *
4391 4392 * (3) sort the lists of known ops & supplied ops; scan the list, filling
4392 4393 * in entries as we go.
4393 4394 *
4394 4395 * we choose (1) for simplicity, and because performance isn't critical here.
4395 4396 * note that (2) could be sped up using a precomputed hash table on known ops.
4396 4397 * (3) could be faster than either, but only if the lists were very large or
4397 4398 * supplied in sorted order.
4398 4399 *
4399 4400 */
4400 4401
4401 4402 int
4402 4403 fs_build_vector(void *vector, int *unused_ops,
4403 4404 const fs_operation_trans_def_t *translation,
4404 4405 const fs_operation_def_t *operations)
4405 4406 {
4406 4407 int i, num_trans, num_ops, used;
4407 4408
4408 4409 /*
4409 4410 * Count the number of translations and the number of supplied
4410 4411 * operations.
4411 4412 */
4412 4413
4413 4414 {
4414 4415 const fs_operation_trans_def_t *p;
4415 4416
4416 4417 for (num_trans = 0, p = translation;
4417 4418 p->name != NULL;
4418 4419 num_trans++, p++)
4419 4420 ;
4420 4421 }
4421 4422
4422 4423 {
4423 4424 const fs_operation_def_t *p;
4424 4425
4425 4426 for (num_ops = 0, p = operations;
4426 4427 p->name != NULL;
4427 4428 num_ops++, p++)
4428 4429 ;
4429 4430 }
4430 4431
4431 4432 /* Walk through each operation known to our caller. There will be */
4432 4433 /* one entry in the supplied "translation table" for each. */
4433 4434
4434 4435 used = 0;
4435 4436
4436 4437 for (i = 0; i < num_trans; i++) {
4437 4438 int j, found;
4438 4439 char *curname;
4439 4440 fs_generic_func_p result;
4440 4441 fs_generic_func_p *location;
4441 4442
4442 4443 curname = translation[i].name;
4443 4444
4444 4445 /* Look for a matching operation in the list supplied by the */
4445 4446 /* file system. */
4446 4447
4447 4448 found = 0;
4448 4449
4449 4450 for (j = 0; j < num_ops; j++) {
4450 4451 if (strcmp(operations[j].name, curname) == 0) {
4451 4452 used++;
4452 4453 found = 1;
4453 4454 break;
4454 4455 }
4455 4456 }
4456 4457
4457 4458 /*
4458 4459 * If the file system is using a "placeholder" for default
4459 4460 * or error functions, grab the appropriate function out of
4460 4461 * the translation table. If the file system didn't supply
4461 4462 * this operation at all, use the default function.
4462 4463 */
4463 4464
4464 4465 if (found) {
4465 4466 result = operations[j].func.fs_generic;
4466 4467 if (result == fs_default) {
4467 4468 result = translation[i].defaultFunc;
4468 4469 } else if (result == fs_error) {
4469 4470 result = translation[i].errorFunc;
4470 4471 } else if (result == NULL) {
4471 4472 /* Null values are PROHIBITED */
4472 4473 return (EINVAL);
4473 4474 }
4474 4475 } else {
4475 4476 result = translation[i].defaultFunc;
4476 4477 }
4477 4478
4478 4479 /* Now store the function into the operations vector. */
4479 4480
4480 4481 location = (fs_generic_func_p *)
4481 4482 (((char *)vector) + translation[i].offset);
4482 4483
4483 4484 *location = result;
4484 4485 }
4485 4486
4486 4487 *unused_ops = num_ops - used;
4487 4488
4488 4489 return (0);
4489 4490 }
4490 4491
4491 4492 /* Placeholder functions, should never be called. */
4492 4493
4493 4494 int
4494 4495 fs_error(void)
4495 4496 {
4496 4497 cmn_err(CE_PANIC, "fs_error called");
4497 4498 return (0);
4498 4499 }
4499 4500
4500 4501 int
4501 4502 fs_default(void)
4502 4503 {
4503 4504 cmn_err(CE_PANIC, "fs_default called");
4504 4505 return (0);
4505 4506 }
4506 4507
4507 4508 #ifdef __sparc
4508 4509
4509 4510 /*
4510 4511 * Part of the implementation of booting off a mirrored root
4511 4512 * involves a change of dev_t for the root device. To
4512 4513 * accomplish this, first remove the existing hash table
4513 4514 * entry for the root device, convert to the new dev_t,
4514 4515 * then re-insert in the hash table at the head of the list.
4515 4516 */
4516 4517 void
4517 4518 vfs_root_redev(vfs_t *vfsp, dev_t ndev, int fstype)
4518 4519 {
4519 4520 vfs_list_lock();
4520 4521
4521 4522 vfs_hash_remove(vfsp);
4522 4523
4523 4524 vfsp->vfs_dev = ndev;
4524 4525 vfs_make_fsid(&vfsp->vfs_fsid, ndev, fstype);
4525 4526
4526 4527 vfs_hash_add(vfsp, 1);
4527 4528
4528 4529 vfs_list_unlock();
4529 4530 }
4530 4531
4531 4532 #else /* x86 NEWBOOT */
4532 4533
4533 4534 #if defined(__x86)
4534 4535 extern int hvmboot_rootconf();
4535 4536 #endif /* __x86 */
4536 4537
4537 4538 extern ib_boot_prop_t *iscsiboot_prop;
4538 4539
4539 4540 int
4540 4541 rootconf()
4541 4542 {
4542 4543 int error;
4543 4544 struct vfssw *vsw;
4544 4545 extern void pm_init();
4545 4546 char *fstyp, *fsmod;
4546 4547 int ret = -1;
4547 4548
4548 4549 getrootfs(&fstyp, &fsmod);
4549 4550
4550 4551 #if defined(__x86)
4551 4552 /*
4552 4553 * hvmboot_rootconf() is defined in the hvm_bootstrap misc module,
4553 4554 * which lives in /platform/i86hvm, and hence is only available when
4554 4555 * booted in an x86 hvm environment. If the hvm_bootstrap misc module
4555 4556 * is not available then the modstub for this function will return 0.
4556 4557 * If the hvm_bootstrap misc module is available it will be loaded
4557 4558 * and hvmboot_rootconf() will be invoked.
4558 4559 */
4559 4560 if (error = hvmboot_rootconf())
4560 4561 return (error);
4561 4562 #endif /* __x86 */
4562 4563
4563 4564 if (error = clboot_rootconf())
4564 4565 return (error);
4565 4566
4566 4567 if (modload("fs", fsmod) == -1)
4567 4568 panic("Cannot _init %s module", fsmod);
4568 4569
4569 4570 RLOCK_VFSSW();
4570 4571 vsw = vfs_getvfsswbyname(fstyp);
4571 4572 RUNLOCK_VFSSW();
4572 4573 if (vsw == NULL) {
4573 4574 cmn_err(CE_CONT, "Cannot find %s filesystem\n", fstyp);
4574 4575 return (ENXIO);
4575 4576 }
4576 4577 VFS_INIT(rootvfs, &vsw->vsw_vfsops, 0);
4577 4578 VFS_HOLD(rootvfs);
4578 4579
4579 4580 /* always mount readonly first */
4580 4581 rootvfs->vfs_flag |= VFS_RDONLY;
4581 4582
4582 4583 pm_init();
4583 4584
4584 4585 if (netboot && iscsiboot_prop) {
4585 4586 cmn_err(CE_WARN, "NFS boot and iSCSI boot"
4586 4587 " shouldn't happen in the same time");
4587 4588 return (EINVAL);
4588 4589 }
4589 4590
4590 4591 if (netboot || iscsiboot_prop) {
4591 4592 ret = strplumb();
4592 4593 if (ret != 0) {
4593 4594 cmn_err(CE_WARN, "Cannot plumb network device %d", ret);
4594 4595 return (EFAULT);
4595 4596 }
4596 4597 }
4597 4598
4598 4599 if ((ret == 0) && iscsiboot_prop) {
4599 4600 ret = modload("drv", "iscsi");
4600 4601 /* -1 indicates fail */
4601 4602 if (ret == -1) {
4602 4603 cmn_err(CE_WARN, "Failed to load iscsi module");
4603 4604 iscsi_boot_prop_free();
4604 4605 return (EINVAL);
4605 4606 } else {
4606 4607 if (!i_ddi_attach_pseudo_node("iscsi")) {
4607 4608 cmn_err(CE_WARN,
4608 4609 "Failed to attach iscsi driver");
4609 4610 iscsi_boot_prop_free();
4610 4611 return (ENODEV);
4611 4612 }
4612 4613 }
4613 4614 }
4614 4615
4615 4616 error = VFS_MOUNTROOT(rootvfs, ROOT_INIT);
4616 4617 vfs_unrefvfssw(vsw);
4617 4618 rootdev = rootvfs->vfs_dev;
4618 4619
4619 4620 if (error)
4620 4621 cmn_err(CE_CONT, "Cannot mount root on %s fstype %s\n",
4621 4622 rootfs.bo_name, fstyp);
4622 4623 else
4623 4624 cmn_err(CE_CONT, "?root on %s fstype %s\n",
4624 4625 rootfs.bo_name, fstyp);
4625 4626 return (error);
4626 4627 }
4627 4628
4628 4629 /*
4629 4630 * XXX this is called by nfs only and should probably be removed
4630 4631 * If booted with ASKNAME, prompt on the console for a filesystem
4631 4632 * name and return it.
4632 4633 */
4633 4634 void
4634 4635 getfsname(char *askfor, char *name, size_t namelen)
4635 4636 {
4636 4637 if (boothowto & RB_ASKNAME) {
4637 4638 printf("%s name: ", askfor);
4638 4639 console_gets(name, namelen);
4639 4640 }
4640 4641 }
4641 4642
4642 4643 /*
4643 4644 * Init the root filesystem type (rootfs.bo_fstype) from the "fstype"
4644 4645 * property.
4645 4646 *
4646 4647 * Filesystem types starting with the prefix "nfs" are diskless clients;
4647 4648 * init the root filename name (rootfs.bo_name), too.
↓ open down ↓ |
4614 lines elided |
↑ open up ↑ |
4648 4649 *
4649 4650 * If we are booting via NFS we currently have these options:
4650 4651 * nfs - dynamically choose NFS V2, V3, or V4 (default)
4651 4652 * nfs2 - force NFS V2
4652 4653 * nfs3 - force NFS V3
4653 4654 * nfs4 - force NFS V4
4654 4655 * Because we need to maintain backward compatibility with the naming
4655 4656 * convention that the NFS V2 filesystem name is "nfs" (see vfs_conf.c)
4656 4657 * we need to map "nfs" => "nfsdyn" and "nfs2" => "nfs". The dynamic
4657 4658 * nfs module will map the type back to either "nfs", "nfs3", or "nfs4".
4658 - * This is only for root filesystems, all other uses such as cachefs
4659 - * will expect that "nfs" == NFS V2.
4659 + * This is only for root filesystems, all other uses will expect
4660 + * that "nfs" == NFS V2.
4660 4661 */
4661 4662 static void
4662 4663 getrootfs(char **fstypp, char **fsmodp)
4663 4664 {
4664 4665 extern char *strplumb_get_netdev_path(void);
4665 4666 char *propstr = NULL;
4666 4667
4667 4668 /*
4668 4669 * Check fstype property; for diskless it should be one of "nfs",
4669 4670 * "nfs2", "nfs3" or "nfs4".
4670 4671 */
4671 4672 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4672 4673 DDI_PROP_DONTPASS, "fstype", &propstr)
4673 4674 == DDI_SUCCESS) {
4674 4675 (void) strncpy(rootfs.bo_fstype, propstr, BO_MAXFSNAME);
4675 4676 ddi_prop_free(propstr);
4676 4677
4677 4678 /*
4678 4679 * if the boot property 'fstype' is not set, but 'zfs-bootfs' is set,
4679 4680 * assume the type of this root filesystem is 'zfs'.
4680 4681 */
4681 4682 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4682 4683 DDI_PROP_DONTPASS, "zfs-bootfs", &propstr)
4683 4684 == DDI_SUCCESS) {
4684 4685 (void) strncpy(rootfs.bo_fstype, "zfs", BO_MAXFSNAME);
4685 4686 ddi_prop_free(propstr);
4686 4687 }
4687 4688
4688 4689 if (strncmp(rootfs.bo_fstype, "nfs", 3) != 0) {
4689 4690 *fstypp = *fsmodp = rootfs.bo_fstype;
4690 4691 return;
4691 4692 }
4692 4693
4693 4694 ++netboot;
4694 4695
4695 4696 if (strcmp(rootfs.bo_fstype, "nfs2") == 0)
4696 4697 (void) strcpy(rootfs.bo_fstype, "nfs");
4697 4698 else if (strcmp(rootfs.bo_fstype, "nfs") == 0)
4698 4699 (void) strcpy(rootfs.bo_fstype, "nfsdyn");
4699 4700
4700 4701 /*
4701 4702 * check if path to network interface is specified in bootpath
4702 4703 * or by a hypervisor domain configuration file.
4703 4704 * XXPV - enable strlumb_get_netdev_path()
4704 4705 */
4705 4706 if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS,
4706 4707 "xpv-nfsroot")) {
4707 4708 (void) strcpy(rootfs.bo_name, "/xpvd/xnf@0");
4708 4709 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4709 4710 DDI_PROP_DONTPASS, "bootpath", &propstr)
4710 4711 == DDI_SUCCESS) {
4711 4712 (void) strncpy(rootfs.bo_name, propstr, BO_MAXOBJNAME);
4712 4713 ddi_prop_free(propstr);
4713 4714 } else {
4714 4715 /* attempt to determine netdev_path via boot_mac address */
4715 4716 netdev_path = strplumb_get_netdev_path();
4716 4717 if (netdev_path == NULL)
4717 4718 panic("cannot find boot network interface");
4718 4719 (void) strncpy(rootfs.bo_name, netdev_path, BO_MAXOBJNAME);
4719 4720 }
4720 4721 *fstypp = rootfs.bo_fstype;
4721 4722 *fsmodp = "nfs";
4722 4723 }
4723 4724 #endif
4724 4725
4725 4726 /*
4726 4727 * VFS feature routines
4727 4728 */
4728 4729
4729 4730 #define VFTINDEX(feature) (((feature) >> 32) & 0xFFFFFFFF)
4730 4731 #define VFTBITS(feature) ((feature) & 0xFFFFFFFFLL)
4731 4732
4732 4733 /* Register a feature in the vfs */
4733 4734 void
4734 4735 vfs_set_feature(vfs_t *vfsp, vfs_feature_t feature)
4735 4736 {
4736 4737 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4737 4738 if (vfsp->vfs_implp == NULL)
4738 4739 return;
4739 4740
4740 4741 vfsp->vfs_featureset[VFTINDEX(feature)] |= VFTBITS(feature);
4741 4742 }
4742 4743
4743 4744 void
4744 4745 vfs_clear_feature(vfs_t *vfsp, vfs_feature_t feature)
4745 4746 {
4746 4747 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4747 4748 if (vfsp->vfs_implp == NULL)
4748 4749 return;
4749 4750 vfsp->vfs_featureset[VFTINDEX(feature)] &= VFTBITS(~feature);
4750 4751 }
4751 4752
4752 4753 /*
4753 4754 * Query a vfs for a feature.
4754 4755 * Returns 1 if feature is present, 0 if not
4755 4756 */
4756 4757 int
4757 4758 vfs_has_feature(vfs_t *vfsp, vfs_feature_t feature)
4758 4759 {
4759 4760 int ret = 0;
4760 4761
4761 4762 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4762 4763 if (vfsp->vfs_implp == NULL)
4763 4764 return (ret);
4764 4765
4765 4766 if (vfsp->vfs_featureset[VFTINDEX(feature)] & VFTBITS(feature))
4766 4767 ret = 1;
4767 4768
4768 4769 return (ret);
4769 4770 }
4770 4771
4771 4772 /*
4772 4773 * Propagate feature set from one vfs to another
4773 4774 */
4774 4775 void
4775 4776 vfs_propagate_features(vfs_t *from, vfs_t *to)
4776 4777 {
4777 4778 int i;
4778 4779
4779 4780 if (to->vfs_implp == NULL || from->vfs_implp == NULL)
4780 4781 return;
4781 4782
4782 4783 for (i = 1; i <= to->vfs_featureset[0]; i++) {
4783 4784 to->vfs_featureset[i] = from->vfs_featureset[i];
4784 4785 }
4785 4786 }
4786 4787
4787 4788 #define LOFINODE_PATH "/dev/lofi/%d"
4788 4789
4789 4790 /*
4790 4791 * Return the vnode for the lofi node if there's a lofi mount in place.
4791 4792 * Returns -1 when there's no lofi node, 0 on success, and > 0 on
4792 4793 * failure.
4793 4794 */
4794 4795 int
4795 4796 vfs_get_lofi(vfs_t *vfsp, vnode_t **vpp)
4796 4797 {
4797 4798 char *path = NULL;
4798 4799 int strsize;
4799 4800 int err;
4800 4801
4801 4802 if (vfsp->vfs_lofi_minor == 0) {
4802 4803 *vpp = NULL;
4803 4804 return (-1);
4804 4805 }
4805 4806
4806 4807 strsize = snprintf(NULL, 0, LOFINODE_PATH, vfsp->vfs_lofi_minor);
4807 4808 path = kmem_alloc(strsize + 1, KM_SLEEP);
4808 4809 (void) snprintf(path, strsize + 1, LOFINODE_PATH, vfsp->vfs_lofi_minor);
4809 4810
4810 4811 /*
4811 4812 * We may be inside a zone, so we need to use the /dev path, but
4812 4813 * it's created asynchronously, so we wait here.
4813 4814 */
4814 4815 for (;;) {
4815 4816 err = lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, vpp);
4816 4817
4817 4818 if (err != ENOENT)
4818 4819 break;
4819 4820
4820 4821 if ((err = delay_sig(hz / 8)) == EINTR)
4821 4822 break;
4822 4823 }
4823 4824
4824 4825 if (err)
4825 4826 *vpp = NULL;
4826 4827
4827 4828 kmem_free(path, strsize + 1);
4828 4829 return (err);
4829 4830 }
↓ open down ↓ |
160 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX