Print this page
8115 parallel zfs mount
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/vfs.c
+++ new/usr/src/uts/common/fs/vfs.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2016 Joyent, Inc.
25 25 * Copyright 2016 Toomas Soome <tsoome@me.com>
26 - * Copyright (c) 2016 by Delphix. All rights reserved.
26 + * Copyright (c) 2016, 2017 by Delphix. All rights reserved.
27 27 * Copyright 2016 Nexenta Systems, Inc.
28 28 * Copyright 2017 RackTop Systems.
29 29 */
30 30
31 31 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
32 32 /* All Rights Reserved */
33 33
34 34 /*
35 35 * University Copyright- Copyright (c) 1982, 1986, 1988
36 36 * The Regents of the University of California
37 37 * All Rights Reserved
38 38 *
39 39 * University Acknowledgment- Portions of this document are derived from
40 40 * software developed by the University of California, Berkeley, and its
41 41 * contributors.
42 42 */
43 43
44 44 #include <sys/types.h>
45 45 #include <sys/t_lock.h>
46 46 #include <sys/param.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/user.h>
49 49 #include <sys/fstyp.h>
50 50 #include <sys/kmem.h>
51 51 #include <sys/systm.h>
52 52 #include <sys/proc.h>
53 53 #include <sys/mount.h>
54 54 #include <sys/vfs.h>
55 55 #include <sys/vfs_opreg.h>
56 56 #include <sys/fem.h>
57 57 #include <sys/mntent.h>
58 58 #include <sys/stat.h>
59 59 #include <sys/statvfs.h>
60 60 #include <sys/statfs.h>
61 61 #include <sys/cred.h>
62 62 #include <sys/vnode.h>
63 63 #include <sys/rwstlock.h>
64 64 #include <sys/dnlc.h>
65 65 #include <sys/file.h>
66 66 #include <sys/time.h>
67 67 #include <sys/atomic.h>
68 68 #include <sys/cmn_err.h>
69 69 #include <sys/buf.h>
70 70 #include <sys/swap.h>
71 71 #include <sys/debug.h>
72 72 #include <sys/vnode.h>
73 73 #include <sys/modctl.h>
74 74 #include <sys/ddi.h>
75 75 #include <sys/pathname.h>
76 76 #include <sys/bootconf.h>
77 77 #include <sys/dumphdr.h>
78 78 #include <sys/dc_ki.h>
79 79 #include <sys/poll.h>
80 80 #include <sys/sunddi.h>
81 81 #include <sys/sysmacros.h>
82 82 #include <sys/zone.h>
83 83 #include <sys/policy.h>
84 84 #include <sys/ctfs.h>
85 85 #include <sys/objfs.h>
86 86 #include <sys/console.h>
87 87 #include <sys/reboot.h>
88 88 #include <sys/attr.h>
89 89 #include <sys/zio.h>
90 90 #include <sys/spa.h>
91 91 #include <sys/lofi.h>
92 92 #include <sys/bootprops.h>
93 93
94 94 #include <vm/page.h>
95 95
96 96 #include <fs/fs_subr.h>
97 97 /* Private interfaces to create vopstats-related data structures */
98 98 extern void initialize_vopstats(vopstats_t *);
99 99 extern vopstats_t *get_fstype_vopstats(struct vfs *, struct vfssw *);
100 100 extern vsk_anchor_t *get_vskstat_anchor(struct vfs *);
101 101
102 102 static void vfs_clearmntopt_nolock(mntopts_t *, const char *, int);
103 103 static void vfs_setmntopt_nolock(mntopts_t *, const char *,
104 104 const char *, int, int);
105 105 static int vfs_optionisset_nolock(const mntopts_t *, const char *, char **);
106 106 static void vfs_freemnttab(struct vfs *);
107 107 static void vfs_freeopt(mntopt_t *);
108 108 static void vfs_swapopttbl_nolock(mntopts_t *, mntopts_t *);
109 109 static void vfs_swapopttbl(mntopts_t *, mntopts_t *);
110 110 static void vfs_copyopttbl_extend(const mntopts_t *, mntopts_t *, int);
111 111 static void vfs_createopttbl_extend(mntopts_t *, const char *,
112 112 const mntopts_t *);
113 113 static char **vfs_copycancelopt_extend(char **const, int);
114 114 static void vfs_freecancelopt(char **);
115 115 static void getrootfs(char **, char **);
116 116 static int getmacpath(dev_info_t *, void *);
117 117 static void vfs_mnttabvp_setup(void);
118 118
119 119 struct ipmnt {
120 120 struct ipmnt *mip_next;
121 121 dev_t mip_dev;
122 122 struct vfs *mip_vfsp;
123 123 };
124 124
125 125 static kmutex_t vfs_miplist_mutex;
126 126 static struct ipmnt *vfs_miplist = NULL;
127 127 static struct ipmnt *vfs_miplist_end = NULL;
128 128
129 129 static kmem_cache_t *vfs_cache; /* Pointer to VFS kmem cache */
130 130
131 131 /*
132 132 * VFS global data.
133 133 */
134 134 vnode_t *rootdir; /* pointer to root inode vnode. */
135 135 vnode_t *devicesdir; /* pointer to inode of devices root */
136 136 vnode_t *devdir; /* pointer to inode of dev root */
137 137
138 138 char *server_rootpath; /* root path for diskless clients */
139 139 char *server_hostname; /* hostname of diskless server */
140 140
141 141 static struct vfs root;
142 142 static struct vfs devices;
143 143 static struct vfs dev;
144 144 struct vfs *rootvfs = &root; /* pointer to root vfs; head of VFS list. */
145 145 rvfs_t *rvfs_list; /* array of vfs ptrs for vfs hash list */
146 146 int vfshsz = 512; /* # of heads/locks in vfs hash arrays */
147 147 /* must be power of 2! */
148 148 timespec_t vfs_mnttab_ctime; /* mnttab created time */
149 149 timespec_t vfs_mnttab_mtime; /* mnttab last modified time */
150 150 char *vfs_dummyfstype = "\0";
151 151 struct pollhead vfs_pollhd; /* for mnttab pollers */
152 152 struct vnode *vfs_mntdummyvp; /* to fake mnttab read/write for file events */
153 153 int mntfstype; /* will be set once mnt fs is mounted */
154 154
155 155 /*
156 156 * Table for generic options recognized in the VFS layer and acted
157 157 * on at this level before parsing file system specific options.
158 158 * The nosuid option is stronger than any of the devices and setuid
159 159 * options, so those are canceled when nosuid is seen.
160 160 *
161 161 * All options which are added here need to be added to the
162 162 * list of standard options in usr/src/cmd/fs.d/fslib.c as well.
163 163 */
164 164 /*
165 165 * VFS Mount options table
166 166 */
167 167 static char *ro_cancel[] = { MNTOPT_RW, NULL };
168 168 static char *rw_cancel[] = { MNTOPT_RO, NULL };
169 169 static char *suid_cancel[] = { MNTOPT_NOSUID, NULL };
170 170 static char *nosuid_cancel[] = { MNTOPT_SUID, MNTOPT_DEVICES, MNTOPT_NODEVICES,
171 171 MNTOPT_NOSETUID, MNTOPT_SETUID, NULL };
172 172 static char *devices_cancel[] = { MNTOPT_NODEVICES, NULL };
173 173 static char *nodevices_cancel[] = { MNTOPT_DEVICES, NULL };
174 174 static char *setuid_cancel[] = { MNTOPT_NOSETUID, NULL };
175 175 static char *nosetuid_cancel[] = { MNTOPT_SETUID, NULL };
176 176 static char *nbmand_cancel[] = { MNTOPT_NONBMAND, NULL };
177 177 static char *nonbmand_cancel[] = { MNTOPT_NBMAND, NULL };
178 178 static char *exec_cancel[] = { MNTOPT_NOEXEC, NULL };
179 179 static char *noexec_cancel[] = { MNTOPT_EXEC, NULL };
180 180
181 181 static const mntopt_t mntopts[] = {
182 182 /*
183 183 * option name cancel options default arg flags
184 184 */
185 185 { MNTOPT_REMOUNT, NULL, NULL,
186 186 MO_NODISPLAY, (void *)0 },
187 187 { MNTOPT_RO, ro_cancel, NULL, 0,
188 188 (void *)0 },
189 189 { MNTOPT_RW, rw_cancel, NULL, 0,
190 190 (void *)0 },
191 191 { MNTOPT_SUID, suid_cancel, NULL, 0,
192 192 (void *)0 },
193 193 { MNTOPT_NOSUID, nosuid_cancel, NULL, 0,
194 194 (void *)0 },
195 195 { MNTOPT_DEVICES, devices_cancel, NULL, 0,
196 196 (void *)0 },
197 197 { MNTOPT_NODEVICES, nodevices_cancel, NULL, 0,
198 198 (void *)0 },
199 199 { MNTOPT_SETUID, setuid_cancel, NULL, 0,
200 200 (void *)0 },
201 201 { MNTOPT_NOSETUID, nosetuid_cancel, NULL, 0,
202 202 (void *)0 },
203 203 { MNTOPT_NBMAND, nbmand_cancel, NULL, 0,
204 204 (void *)0 },
205 205 { MNTOPT_NONBMAND, nonbmand_cancel, NULL, 0,
206 206 (void *)0 },
207 207 { MNTOPT_EXEC, exec_cancel, NULL, 0,
208 208 (void *)0 },
209 209 { MNTOPT_NOEXEC, noexec_cancel, NULL, 0,
210 210 (void *)0 },
211 211 };
212 212
213 213 const mntopts_t vfs_mntopts = {
214 214 sizeof (mntopts) / sizeof (mntopt_t),
215 215 (mntopt_t *)&mntopts[0]
216 216 };
217 217
218 218 /*
219 219 * File system operation dispatch functions.
220 220 */
221 221
222 222 int
223 223 fsop_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
224 224 {
225 225 return (*(vfsp)->vfs_op->vfs_mount)(vfsp, mvp, uap, cr);
226 226 }
227 227
228 228 int
229 229 fsop_unmount(vfs_t *vfsp, int flag, cred_t *cr)
230 230 {
231 231 return (*(vfsp)->vfs_op->vfs_unmount)(vfsp, flag, cr);
232 232 }
233 233
234 234 int
235 235 fsop_root(vfs_t *vfsp, vnode_t **vpp)
236 236 {
237 237 refstr_t *mntpt;
238 238 int ret = (*(vfsp)->vfs_op->vfs_root)(vfsp, vpp);
239 239 /*
240 240 * Make sure this root has a path. With lofs, it is possible to have
241 241 * a NULL mountpoint.
242 242 */
243 243 if (ret == 0 && vfsp->vfs_mntpt != NULL &&
244 244 (*vpp)->v_path == vn_vpath_empty) {
245 245 const char *path;
246 246
247 247 mntpt = vfs_getmntpoint(vfsp);
248 248 path = refstr_value(mntpt);
249 249 vn_setpath_str(*vpp, path, strlen(path));
250 250 refstr_rele(mntpt);
251 251 }
252 252
253 253 return (ret);
254 254 }
255 255
256 256 int
257 257 fsop_statfs(vfs_t *vfsp, statvfs64_t *sp)
258 258 {
259 259 return (*(vfsp)->vfs_op->vfs_statvfs)(vfsp, sp);
260 260 }
261 261
262 262 int
263 263 fsop_sync(vfs_t *vfsp, short flag, cred_t *cr)
264 264 {
265 265 return (*(vfsp)->vfs_op->vfs_sync)(vfsp, flag, cr);
266 266 }
267 267
268 268 int
269 269 fsop_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
270 270 {
271 271 /*
272 272 * In order to handle system attribute fids in a manner
273 273 * transparent to the underlying fs, we embed the fid for
274 274 * the sysattr parent object in the sysattr fid and tack on
275 275 * some extra bytes that only the sysattr layer knows about.
276 276 *
277 277 * This guarantees that sysattr fids are larger than other fids
278 278 * for this vfs. If the vfs supports the sysattr view interface
279 279 * (as indicated by VFSFT_SYSATTR_VIEWS), we cannot have a size
280 280 * collision with XATTR_FIDSZ.
281 281 */
282 282 if (vfs_has_feature(vfsp, VFSFT_SYSATTR_VIEWS) &&
283 283 fidp->fid_len == XATTR_FIDSZ)
284 284 return (xattr_dir_vget(vfsp, vpp, fidp));
285 285
286 286 return (*(vfsp)->vfs_op->vfs_vget)(vfsp, vpp, fidp);
287 287 }
288 288
289 289 int
290 290 fsop_mountroot(vfs_t *vfsp, enum whymountroot reason)
291 291 {
292 292 return (*(vfsp)->vfs_op->vfs_mountroot)(vfsp, reason);
293 293 }
294 294
295 295 void
296 296 fsop_freefs(vfs_t *vfsp)
297 297 {
298 298 (*(vfsp)->vfs_op->vfs_freevfs)(vfsp);
299 299 }
300 300
301 301 int
302 302 fsop_vnstate(vfs_t *vfsp, vnode_t *vp, vntrans_t nstate)
303 303 {
304 304 return ((*(vfsp)->vfs_op->vfs_vnstate)(vfsp, vp, nstate));
305 305 }
306 306
307 307 int
308 308 fsop_sync_by_kind(int fstype, short flag, cred_t *cr)
309 309 {
310 310 ASSERT((fstype >= 0) && (fstype < nfstype));
311 311
312 312 if (ALLOCATED_VFSSW(&vfssw[fstype]) && VFS_INSTALLED(&vfssw[fstype]))
313 313 return (*vfssw[fstype].vsw_vfsops.vfs_sync) (NULL, flag, cr);
314 314 else
315 315 return (ENOTSUP);
316 316 }
317 317
318 318 /*
319 319 * File system initialization. vfs_setfsops() must be called from a file
320 320 * system's init routine.
321 321 */
322 322
323 323 static int
324 324 fs_copyfsops(const fs_operation_def_t *template, vfsops_t *actual,
325 325 int *unused_ops)
326 326 {
327 327 static const fs_operation_trans_def_t vfs_ops_table[] = {
328 328 VFSNAME_MOUNT, offsetof(vfsops_t, vfs_mount),
329 329 fs_nosys, fs_nosys,
330 330
331 331 VFSNAME_UNMOUNT, offsetof(vfsops_t, vfs_unmount),
332 332 fs_nosys, fs_nosys,
333 333
334 334 VFSNAME_ROOT, offsetof(vfsops_t, vfs_root),
335 335 fs_nosys, fs_nosys,
336 336
337 337 VFSNAME_STATVFS, offsetof(vfsops_t, vfs_statvfs),
338 338 fs_nosys, fs_nosys,
339 339
340 340 VFSNAME_SYNC, offsetof(vfsops_t, vfs_sync),
341 341 (fs_generic_func_p) fs_sync,
342 342 (fs_generic_func_p) fs_sync, /* No errors allowed */
343 343
344 344 VFSNAME_VGET, offsetof(vfsops_t, vfs_vget),
345 345 fs_nosys, fs_nosys,
346 346
347 347 VFSNAME_MOUNTROOT, offsetof(vfsops_t, vfs_mountroot),
348 348 fs_nosys, fs_nosys,
349 349
350 350 VFSNAME_FREEVFS, offsetof(vfsops_t, vfs_freevfs),
351 351 (fs_generic_func_p)fs_freevfs,
352 352 (fs_generic_func_p)fs_freevfs, /* Shouldn't fail */
353 353
354 354 VFSNAME_VNSTATE, offsetof(vfsops_t, vfs_vnstate),
355 355 (fs_generic_func_p)fs_nosys,
356 356 (fs_generic_func_p)fs_nosys,
357 357
358 358 NULL, 0, NULL, NULL
359 359 };
360 360
361 361 return (fs_build_vector(actual, unused_ops, vfs_ops_table, template));
362 362 }
363 363
364 364 void
365 365 zfs_boot_init(void)
366 366 {
367 367 if (strcmp(rootfs.bo_fstype, MNTTYPE_ZFS) == 0)
368 368 spa_boot_init();
369 369 }
370 370
371 371 int
372 372 vfs_setfsops(int fstype, const fs_operation_def_t *template, vfsops_t **actual)
373 373 {
374 374 int error;
375 375 int unused_ops;
376 376
377 377 /*
378 378 * Verify that fstype refers to a valid fs. Note that
379 379 * 0 is valid since it's used to set "stray" ops.
380 380 */
381 381 if ((fstype < 0) || (fstype >= nfstype))
382 382 return (EINVAL);
383 383
384 384 if (!ALLOCATED_VFSSW(&vfssw[fstype]))
385 385 return (EINVAL);
386 386
387 387 /* Set up the operations vector. */
388 388
389 389 error = fs_copyfsops(template, &vfssw[fstype].vsw_vfsops, &unused_ops);
390 390
391 391 if (error != 0)
392 392 return (error);
393 393
394 394 vfssw[fstype].vsw_flag |= VSW_INSTALLED;
395 395
396 396 if (actual != NULL)
397 397 *actual = &vfssw[fstype].vsw_vfsops;
398 398
399 399 #if DEBUG
400 400 if (unused_ops != 0)
401 401 cmn_err(CE_WARN, "vfs_setfsops: %s: %d operations supplied "
402 402 "but not used", vfssw[fstype].vsw_name, unused_ops);
403 403 #endif
404 404
405 405 return (0);
406 406 }
407 407
408 408 int
409 409 vfs_makefsops(const fs_operation_def_t *template, vfsops_t **actual)
410 410 {
411 411 int error;
412 412 int unused_ops;
413 413
414 414 *actual = (vfsops_t *)kmem_alloc(sizeof (vfsops_t), KM_SLEEP);
415 415
416 416 error = fs_copyfsops(template, *actual, &unused_ops);
417 417 if (error != 0) {
418 418 kmem_free(*actual, sizeof (vfsops_t));
419 419 *actual = NULL;
420 420 return (error);
421 421 }
422 422
423 423 return (0);
424 424 }
425 425
426 426 /*
427 427 * Free a vfsops structure created as a result of vfs_makefsops().
428 428 * NOTE: For a vfsops structure initialized by vfs_setfsops(), use
429 429 * vfs_freevfsops_by_type().
430 430 */
431 431 void
432 432 vfs_freevfsops(vfsops_t *vfsops)
433 433 {
434 434 kmem_free(vfsops, sizeof (vfsops_t));
435 435 }
436 436
437 437 /*
438 438 * Since the vfsops structure is part of the vfssw table and wasn't
439 439 * really allocated, we're not really freeing anything. We keep
440 440 * the name for consistency with vfs_freevfsops(). We do, however,
441 441 * need to take care of a little bookkeeping.
442 442 * NOTE: For a vfsops structure created by vfs_setfsops(), use
443 443 * vfs_freevfsops_by_type().
444 444 */
445 445 int
446 446 vfs_freevfsops_by_type(int fstype)
447 447 {
448 448
449 449 /* Verify that fstype refers to a loaded fs (and not fsid 0). */
450 450 if ((fstype <= 0) || (fstype >= nfstype))
451 451 return (EINVAL);
452 452
453 453 WLOCK_VFSSW();
454 454 if ((vfssw[fstype].vsw_flag & VSW_INSTALLED) == 0) {
455 455 WUNLOCK_VFSSW();
456 456 return (EINVAL);
457 457 }
458 458
459 459 vfssw[fstype].vsw_flag &= ~VSW_INSTALLED;
460 460 WUNLOCK_VFSSW();
461 461
462 462 return (0);
463 463 }
464 464
465 465 /* Support routines used to reference vfs_op */
466 466
467 467 /* Set the operations vector for a vfs */
468 468 void
469 469 vfs_setops(vfs_t *vfsp, vfsops_t *vfsops)
470 470 {
471 471 vfsops_t *op;
472 472
473 473 ASSERT(vfsp != NULL);
474 474 ASSERT(vfsops != NULL);
475 475
476 476 op = vfsp->vfs_op;
477 477 membar_consumer();
478 478 if (vfsp->vfs_femhead == NULL &&
479 479 atomic_cas_ptr(&vfsp->vfs_op, op, vfsops) == op) {
480 480 return;
481 481 }
482 482 fsem_setvfsops(vfsp, vfsops);
483 483 }
484 484
485 485 /* Retrieve the operations vector for a vfs */
486 486 vfsops_t *
487 487 vfs_getops(vfs_t *vfsp)
488 488 {
489 489 vfsops_t *op;
490 490
491 491 ASSERT(vfsp != NULL);
492 492
493 493 op = vfsp->vfs_op;
494 494 membar_consumer();
495 495 if (vfsp->vfs_femhead == NULL && op == vfsp->vfs_op) {
496 496 return (op);
497 497 } else {
498 498 return (fsem_getvfsops(vfsp));
499 499 }
500 500 }
501 501
502 502 /*
503 503 * Returns non-zero (1) if the vfsops matches that of the vfs.
504 504 * Returns zero (0) if not.
505 505 */
506 506 int
507 507 vfs_matchops(vfs_t *vfsp, vfsops_t *vfsops)
508 508 {
509 509 return (vfs_getops(vfsp) == vfsops);
510 510 }
511 511
512 512 /*
513 513 * Returns non-zero (1) if the file system has installed a non-default,
514 514 * non-error vfs_sync routine. Returns zero (0) otherwise.
515 515 */
516 516 int
517 517 vfs_can_sync(vfs_t *vfsp)
518 518 {
519 519 /* vfs_sync() routine is not the default/error function */
520 520 return (vfs_getops(vfsp)->vfs_sync != fs_sync);
521 521 }
522 522
523 523 /*
524 524 * Initialize a vfs structure.
525 525 */
526 526 void
527 527 vfs_init(vfs_t *vfsp, vfsops_t *op, void *data)
528 528 {
529 529 /* Other initialization has been moved to vfs_alloc() */
530 530 vfsp->vfs_count = 0;
531 531 vfsp->vfs_next = vfsp;
532 532 vfsp->vfs_prev = vfsp;
533 533 vfsp->vfs_zone_next = vfsp;
534 534 vfsp->vfs_zone_prev = vfsp;
535 535 vfsp->vfs_lofi_id = 0;
536 536 sema_init(&vfsp->vfs_reflock, 1, NULL, SEMA_DEFAULT, NULL);
537 537 vfsimpl_setup(vfsp);
538 538 vfsp->vfs_data = (data);
539 539 vfs_setops((vfsp), (op));
540 540 }
541 541
542 542 /*
543 543 * Allocate and initialize the vfs implementation private data
544 544 * structure, vfs_impl_t.
545 545 */
546 546 void
547 547 vfsimpl_setup(vfs_t *vfsp)
548 548 {
549 549 int i;
550 550
551 551 if (vfsp->vfs_implp != NULL) {
552 552 return;
553 553 }
554 554
555 555 vfsp->vfs_implp = kmem_alloc(sizeof (vfs_impl_t), KM_SLEEP);
556 556 /* Note that these are #define'd in vfs.h */
557 557 vfsp->vfs_vskap = NULL;
558 558 vfsp->vfs_fstypevsp = NULL;
559 559
560 560 /* Set size of counted array, then zero the array */
561 561 vfsp->vfs_featureset[0] = VFS_FEATURE_MAXSZ - 1;
562 562 for (i = 1; i < VFS_FEATURE_MAXSZ; i++) {
563 563 vfsp->vfs_featureset[i] = 0;
564 564 }
565 565 }
566 566
567 567 /*
568 568 * Release the vfs_impl_t structure, if it exists. Some unbundled
569 569 * filesystems may not use the newer version of vfs and thus
570 570 * would not contain this implementation private data structure.
571 571 */
572 572 void
573 573 vfsimpl_teardown(vfs_t *vfsp)
574 574 {
575 575 vfs_impl_t *vip = vfsp->vfs_implp;
576 576
577 577 if (vip == NULL)
578 578 return;
579 579
580 580 kmem_free(vfsp->vfs_implp, sizeof (vfs_impl_t));
581 581 vfsp->vfs_implp = NULL;
582 582 }
583 583
584 584 /*
585 585 * VFS system calls: mount, umount, syssync, statfs, fstatfs, statvfs,
586 586 * fstatvfs, and sysfs moved to common/syscall.
587 587 */
588 588
589 589 /*
590 590 * Update every mounted file system. We call the vfs_sync operation of
591 591 * each file system type, passing it a NULL vfsp to indicate that all
592 592 * mounted file systems of that type should be updated.
593 593 */
594 594 void
595 595 vfs_sync(int flag)
596 596 {
597 597 struct vfssw *vswp;
598 598 RLOCK_VFSSW();
599 599 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
600 600 if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) {
601 601 vfs_refvfssw(vswp);
602 602 RUNLOCK_VFSSW();
603 603 (void) (*vswp->vsw_vfsops.vfs_sync)(NULL, flag,
604 604 CRED());
605 605 vfs_unrefvfssw(vswp);
606 606 RLOCK_VFSSW();
607 607 }
608 608 }
609 609 RUNLOCK_VFSSW();
610 610 }
611 611
612 612 void
613 613 sync(void)
614 614 {
615 615 vfs_sync(0);
616 616 }
617 617
618 618 /*
619 619 * External routines.
620 620 */
621 621
622 622 krwlock_t vfssw_lock; /* lock accesses to vfssw */
623 623
624 624 /*
625 625 * Lock for accessing the vfs linked list. Initialized in vfs_mountroot(),
626 626 * but otherwise should be accessed only via vfs_list_lock() and
627 627 * vfs_list_unlock(). Also used to protect the timestamp for mods to the list.
628 628 */
629 629 static krwlock_t vfslist;
630 630
631 631 /*
632 632 * Mount devfs on /devices. This is done right after root is mounted
633 633 * to provide device access support for the system
634 634 */
635 635 static void
636 636 vfs_mountdevices(void)
637 637 {
638 638 struct vfssw *vsw;
639 639 struct vnode *mvp;
640 640 struct mounta mounta = { /* fake mounta for devfs_mount() */
641 641 NULL,
642 642 NULL,
643 643 MS_SYSSPACE,
644 644 NULL,
645 645 NULL,
646 646 0,
647 647 NULL,
648 648 0
649 649 };
650 650
651 651 /*
652 652 * _init devfs module to fill in the vfssw
653 653 */
654 654 if (modload("fs", "devfs") == -1)
655 655 panic("Cannot _init devfs module");
656 656
657 657 /*
658 658 * Hold vfs
659 659 */
660 660 RLOCK_VFSSW();
661 661 vsw = vfs_getvfsswbyname("devfs");
662 662 VFS_INIT(&devices, &vsw->vsw_vfsops, NULL);
663 663 VFS_HOLD(&devices);
664 664
665 665 /*
666 666 * Locate mount point
667 667 */
668 668 if (lookupname("/devices", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp))
669 669 panic("Cannot find /devices");
670 670
671 671 /*
672 672 * Perform the mount of /devices
673 673 */
674 674 if (VFS_MOUNT(&devices, mvp, &mounta, CRED()))
675 675 panic("Cannot mount /devices");
676 676
677 677 RUNLOCK_VFSSW();
678 678
679 679 /*
680 680 * Set appropriate members and add to vfs list for mnttab display
681 681 */
682 682 vfs_setresource(&devices, "/devices", 0);
683 683 vfs_setmntpoint(&devices, "/devices", 0);
684 684
685 685 /*
686 686 * Hold the root of /devices so it won't go away
687 687 */
688 688 if (VFS_ROOT(&devices, &devicesdir))
689 689 panic("vfs_mountdevices: not devices root");
690 690
691 691 if (vfs_lock(&devices) != 0) {
692 692 VN_RELE(devicesdir);
693 693 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /devices");
694 694 return;
695 695 }
696 696
697 697 if (vn_vfswlock(mvp) != 0) {
698 698 vfs_unlock(&devices);
699 699 VN_RELE(devicesdir);
700 700 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /devices");
701 701 return;
702 702 }
703 703
704 704 vfs_add(mvp, &devices, 0);
705 705 vn_vfsunlock(mvp);
706 706 vfs_unlock(&devices);
707 707 VN_RELE(devicesdir);
708 708 }
709 709
710 710 /*
711 711 * mount the first instance of /dev to root and remain mounted
712 712 */
713 713 static void
714 714 vfs_mountdev1(void)
715 715 {
716 716 struct vfssw *vsw;
717 717 struct vnode *mvp;
718 718 struct mounta mounta = { /* fake mounta for sdev_mount() */
719 719 NULL,
720 720 NULL,
721 721 MS_SYSSPACE | MS_OVERLAY,
722 722 NULL,
723 723 NULL,
724 724 0,
725 725 NULL,
726 726 0
727 727 };
728 728
729 729 /*
730 730 * _init dev module to fill in the vfssw
731 731 */
732 732 if (modload("fs", "dev") == -1)
733 733 cmn_err(CE_PANIC, "Cannot _init dev module\n");
734 734
735 735 /*
736 736 * Hold vfs
737 737 */
738 738 RLOCK_VFSSW();
739 739 vsw = vfs_getvfsswbyname("dev");
740 740 VFS_INIT(&dev, &vsw->vsw_vfsops, NULL);
741 741 VFS_HOLD(&dev);
742 742
743 743 /*
744 744 * Locate mount point
745 745 */
746 746 if (lookupname("/dev", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp))
747 747 cmn_err(CE_PANIC, "Cannot find /dev\n");
748 748
749 749 /*
750 750 * Perform the mount of /dev
751 751 */
752 752 if (VFS_MOUNT(&dev, mvp, &mounta, CRED()))
753 753 cmn_err(CE_PANIC, "Cannot mount /dev 1\n");
754 754
755 755 RUNLOCK_VFSSW();
756 756
757 757 /*
758 758 * Set appropriate members and add to vfs list for mnttab display
759 759 */
760 760 vfs_setresource(&dev, "/dev", 0);
761 761 vfs_setmntpoint(&dev, "/dev", 0);
762 762
763 763 /*
764 764 * Hold the root of /dev so it won't go away
765 765 */
766 766 if (VFS_ROOT(&dev, &devdir))
767 767 cmn_err(CE_PANIC, "vfs_mountdev1: not dev root");
768 768
769 769 if (vfs_lock(&dev) != 0) {
770 770 VN_RELE(devdir);
771 771 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /dev");
772 772 return;
773 773 }
774 774
775 775 if (vn_vfswlock(mvp) != 0) {
776 776 vfs_unlock(&dev);
777 777 VN_RELE(devdir);
778 778 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /dev");
779 779 return;
780 780 }
781 781
782 782 vfs_add(mvp, &dev, 0);
783 783 vn_vfsunlock(mvp);
784 784 vfs_unlock(&dev);
785 785 VN_RELE(devdir);
786 786 }
787 787
788 788 /*
789 789 * Mount required filesystem. This is done right after root is mounted.
790 790 */
791 791 static void
792 792 vfs_mountfs(char *module, char *spec, char *path)
793 793 {
794 794 struct vnode *mvp;
795 795 struct mounta mounta;
796 796 vfs_t *vfsp;
797 797
798 798 bzero(&mounta, sizeof (mounta));
799 799 mounta.flags = MS_SYSSPACE | MS_DATA;
800 800 mounta.fstype = module;
801 801 mounta.spec = spec;
802 802 mounta.dir = path;
803 803 if (lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) {
804 804 cmn_err(CE_WARN, "Cannot find %s", path);
805 805 return;
806 806 }
807 807 if (domount(NULL, &mounta, mvp, CRED(), &vfsp))
808 808 cmn_err(CE_WARN, "Cannot mount %s", path);
809 809 else
810 810 VFS_RELE(vfsp);
811 811 VN_RELE(mvp);
812 812 }
813 813
814 814 /*
815 815 * vfs_mountroot is called by main() to mount the root filesystem.
816 816 */
817 817 void
818 818 vfs_mountroot(void)
819 819 {
820 820 struct vnode *rvp = NULL;
821 821 char *path;
822 822 size_t plen;
823 823 struct vfssw *vswp;
824 824 proc_t *p;
825 825
826 826 rw_init(&vfssw_lock, NULL, RW_DEFAULT, NULL);
827 827 rw_init(&vfslist, NULL, RW_DEFAULT, NULL);
828 828
829 829 /*
830 830 * Alloc the vfs hash bucket array and locks
831 831 */
832 832 rvfs_list = kmem_zalloc(vfshsz * sizeof (rvfs_t), KM_SLEEP);
833 833
834 834 /*
835 835 * Call machine-dependent routine "rootconf" to choose a root
836 836 * file system type.
837 837 */
838 838 if (rootconf())
839 839 panic("vfs_mountroot: cannot mount root");
840 840 /*
841 841 * Get vnode for '/'. Set up rootdir, u.u_rdir and u.u_cdir
842 842 * to point to it. These are used by lookuppn() so that it
843 843 * knows where to start from ('/' or '.').
844 844 */
845 845 vfs_setmntpoint(rootvfs, "/", 0);
846 846 if (VFS_ROOT(rootvfs, &rootdir))
847 847 panic("vfs_mountroot: no root vnode");
848 848
849 849 /*
850 850 * At this point, the process tree consists of p0 and possibly some
851 851 * direct children of p0. (i.e. there are no grandchildren)
852 852 *
853 853 * Walk through them all, setting their current directory.
854 854 */
855 855 mutex_enter(&pidlock);
856 856 for (p = practive; p != NULL; p = p->p_next) {
857 857 ASSERT(p == &p0 || p->p_parent == &p0);
858 858
859 859 PTOU(p)->u_cdir = rootdir;
860 860 VN_HOLD(PTOU(p)->u_cdir);
861 861 PTOU(p)->u_rdir = NULL;
862 862 }
863 863 mutex_exit(&pidlock);
864 864
865 865 /*
866 866 * Setup the global zone's rootvp, now that it exists.
867 867 */
868 868 global_zone->zone_rootvp = rootdir;
869 869 VN_HOLD(global_zone->zone_rootvp);
870 870
871 871 /*
872 872 * Notify the module code that it can begin using the
873 873 * root filesystem instead of the boot program's services.
874 874 */
875 875 modrootloaded = 1;
876 876
877 877 /*
878 878 * Special handling for a ZFS root file system.
879 879 */
880 880 zfs_boot_init();
881 881
882 882 /*
883 883 * Set up mnttab information for root
884 884 */
885 885 vfs_setresource(rootvfs, rootfs.bo_name, 0);
886 886
887 887 /*
888 888 * Notify cluster software that the root filesystem is available.
889 889 */
890 890 clboot_mountroot();
891 891
892 892 /* Now that we're all done with the root FS, set up its vopstats */
893 893 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) != NULL) {
894 894 /* Set flag for statistics collection */
895 895 if (vswp->vsw_flag & VSW_STATS) {
896 896 initialize_vopstats(&rootvfs->vfs_vopstats);
897 897 rootvfs->vfs_flag |= VFS_STATS;
898 898 rootvfs->vfs_fstypevsp =
899 899 get_fstype_vopstats(rootvfs, vswp);
900 900 rootvfs->vfs_vskap = get_vskstat_anchor(rootvfs);
901 901 }
902 902 vfs_unrefvfssw(vswp);
903 903 }
904 904
905 905 /*
906 906 * Mount /devices, /dev instance 1, /system/contract, /etc/mnttab,
907 907 * /etc/svc/volatile, /etc/dfs/sharetab, /system/object, and /proc.
908 908 */
909 909 vfs_mountdevices();
910 910 vfs_mountdev1();
911 911
912 912 vfs_mountfs("ctfs", "ctfs", CTFS_ROOT);
913 913 vfs_mountfs("proc", "/proc", "/proc");
914 914 vfs_mountfs("mntfs", "/etc/mnttab", "/etc/mnttab");
915 915 vfs_mountfs("tmpfs", "/etc/svc/volatile", "/etc/svc/volatile");
916 916 vfs_mountfs("objfs", "objfs", OBJFS_ROOT);
917 917 vfs_mountfs("bootfs", "bootfs", "/system/boot");
918 918
919 919 if (getzoneid() == GLOBAL_ZONEID) {
920 920 vfs_mountfs("sharefs", "sharefs", "/etc/dfs/sharetab");
921 921 }
922 922
923 923 if (strcmp(rootfs.bo_fstype, "zfs") != 0) {
924 924 /*
925 925 * Look up the root device via devfs so that a dv_node is
926 926 * created for it. The vnode is never VN_RELE()ed.
927 927 * We allocate more than MAXPATHLEN so that the
928 928 * buffer passed to i_ddi_prompath_to_devfspath() is
929 929 * exactly MAXPATHLEN (the function expects a buffer
930 930 * of that length).
931 931 */
932 932 plen = strlen("/devices");
933 933 path = kmem_alloc(plen + MAXPATHLEN, KM_SLEEP);
934 934 (void) strcpy(path, "/devices");
935 935
936 936 if (i_ddi_prompath_to_devfspath(rootfs.bo_name, path + plen)
937 937 != DDI_SUCCESS ||
938 938 lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &rvp)) {
939 939
940 940 /* NUL terminate in case "path" has garbage */
941 941 path[plen + MAXPATHLEN - 1] = '\0';
942 942 #ifdef DEBUG
943 943 cmn_err(CE_WARN, "!Cannot lookup root device: %s",
944 944 path);
945 945 #endif
946 946 }
947 947 kmem_free(path, plen + MAXPATHLEN);
948 948 }
949 949
950 950 vfs_mnttabvp_setup();
951 951 }
952 952
953 953 /*
954 954 * Check to see if our "block device" is actually a file. If so,
955 955 * automatically add a lofi device, and keep track of this fact.
956 956 */
957 957 static int
958 958 lofi_add(const char *fsname, struct vfs *vfsp,
959 959 mntopts_t *mntopts, struct mounta *uap)
960 960 {
961 961 int fromspace = (uap->flags & MS_SYSSPACE) ?
962 962 UIO_SYSSPACE : UIO_USERSPACE;
963 963 struct lofi_ioctl *li = NULL;
964 964 struct vnode *vp = NULL;
965 965 struct pathname pn = { NULL };
966 966 ldi_ident_t ldi_id;
967 967 ldi_handle_t ldi_hdl;
968 968 vfssw_t *vfssw;
969 969 int id;
970 970 int err = 0;
971 971
972 972 if ((vfssw = vfs_getvfssw(fsname)) == NULL)
973 973 return (0);
974 974
975 975 if (!(vfssw->vsw_flag & VSW_CANLOFI)) {
976 976 vfs_unrefvfssw(vfssw);
977 977 return (0);
978 978 }
979 979
980 980 vfs_unrefvfssw(vfssw);
981 981 vfssw = NULL;
982 982
983 983 if (pn_get(uap->spec, fromspace, &pn) != 0)
984 984 return (0);
985 985
986 986 if (lookupname(uap->spec, fromspace, FOLLOW, NULL, &vp) != 0)
987 987 goto out;
988 988
989 989 if (vp->v_type != VREG)
990 990 goto out;
991 991
992 992 /* OK, this is a lofi mount. */
993 993
994 994 if ((uap->flags & (MS_REMOUNT|MS_GLOBAL)) ||
995 995 vfs_optionisset_nolock(mntopts, MNTOPT_SUID, NULL) ||
996 996 vfs_optionisset_nolock(mntopts, MNTOPT_SETUID, NULL) ||
997 997 vfs_optionisset_nolock(mntopts, MNTOPT_DEVICES, NULL)) {
998 998 err = EINVAL;
999 999 goto out;
1000 1000 }
1001 1001
1002 1002 ldi_id = ldi_ident_from_anon();
1003 1003 li = kmem_zalloc(sizeof (*li), KM_SLEEP);
1004 1004 (void) strlcpy(li->li_filename, pn.pn_path, MAXPATHLEN);
1005 1005
1006 1006 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred,
1007 1007 &ldi_hdl, ldi_id);
1008 1008
1009 1009 if (err)
1010 1010 goto out2;
1011 1011
1012 1012 err = ldi_ioctl(ldi_hdl, LOFI_MAP_FILE, (intptr_t)li,
1013 1013 FREAD | FWRITE | FKIOCTL, kcred, &id);
1014 1014
1015 1015 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred);
1016 1016
1017 1017 if (!err)
1018 1018 vfsp->vfs_lofi_id = id;
1019 1019
1020 1020 out2:
1021 1021 ldi_ident_release(ldi_id);
1022 1022 out:
1023 1023 if (li != NULL)
1024 1024 kmem_free(li, sizeof (*li));
1025 1025 if (vp != NULL)
1026 1026 VN_RELE(vp);
1027 1027 pn_free(&pn);
1028 1028 return (err);
1029 1029 }
1030 1030
1031 1031 static void
1032 1032 lofi_remove(struct vfs *vfsp)
1033 1033 {
1034 1034 struct lofi_ioctl *li = NULL;
1035 1035 ldi_ident_t ldi_id;
1036 1036 ldi_handle_t ldi_hdl;
1037 1037 int err;
1038 1038
1039 1039 if (vfsp->vfs_lofi_id == 0)
1040 1040 return;
1041 1041
1042 1042 ldi_id = ldi_ident_from_anon();
1043 1043
1044 1044 li = kmem_zalloc(sizeof (*li), KM_SLEEP);
1045 1045 li->li_id = vfsp->vfs_lofi_id;
1046 1046 li->li_cleanup = B_TRUE;
1047 1047
1048 1048 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred,
1049 1049 &ldi_hdl, ldi_id);
1050 1050
1051 1051 if (err)
1052 1052 goto out;
1053 1053
1054 1054 err = ldi_ioctl(ldi_hdl, LOFI_UNMAP_FILE_MINOR, (intptr_t)li,
1055 1055 FREAD | FWRITE | FKIOCTL, kcred, NULL);
1056 1056
1057 1057 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred);
1058 1058
1059 1059 if (!err)
1060 1060 vfsp->vfs_lofi_id = 0;
1061 1061
1062 1062 out:
1063 1063 ldi_ident_release(ldi_id);
1064 1064 if (li != NULL)
1065 1065 kmem_free(li, sizeof (*li));
1066 1066 }
1067 1067
1068 1068 /*
1069 1069 * Common mount code. Called from the system call entry point, from autofs,
1070 1070 * nfsv4 trigger mounts, and from pxfs.
1071 1071 *
1072 1072 * Takes the effective file system type, mount arguments, the mount point
1073 1073 * vnode, flags specifying whether the mount is a remount and whether it
1074 1074 * should be entered into the vfs list, and credentials. Fills in its vfspp
1075 1075 * parameter with the mounted file system instance's vfs.
1076 1076 *
1077 1077 * Note that the effective file system type is specified as a string. It may
1078 1078 * be null, in which case it's determined from the mount arguments, and may
1079 1079 * differ from the type specified in the mount arguments; this is a hook to
1080 1080 * allow interposition when instantiating file system instances.
1081 1081 *
1082 1082 * The caller is responsible for releasing its own hold on the mount point
1083 1083 * vp (this routine does its own hold when necessary).
1084 1084 * Also note that for remounts, the mount point vp should be the vnode for
1085 1085 * the root of the file system rather than the vnode that the file system
1086 1086 * is mounted on top of.
1087 1087 */
1088 1088 int
1089 1089 domount(char *fsname, struct mounta *uap, vnode_t *vp, struct cred *credp,
1090 1090 struct vfs **vfspp)
1091 1091 {
1092 1092 struct vfssw *vswp;
1093 1093 vfsops_t *vfsops;
1094 1094 struct vfs *vfsp;
1095 1095 struct vnode *bvp;
1096 1096 dev_t bdev = 0;
1097 1097 mntopts_t mnt_mntopts;
1098 1098 int error = 0;
1099 1099 int copyout_error = 0;
1100 1100 int ovflags;
1101 1101 char *opts = uap->optptr;
1102 1102 char *inargs = opts;
1103 1103 int optlen = uap->optlen;
1104 1104 int remount;
1105 1105 int rdonly;
1106 1106 int nbmand = 0;
1107 1107 int delmip = 0;
1108 1108 int addmip = 0;
1109 1109 int splice = ((uap->flags & MS_NOSPLICE) == 0);
1110 1110 int fromspace = (uap->flags & MS_SYSSPACE) ?
1111 1111 UIO_SYSSPACE : UIO_USERSPACE;
1112 1112 char *resource = NULL, *mountpt = NULL;
1113 1113 refstr_t *oldresource, *oldmntpt;
1114 1114 struct pathname pn, rpn;
1115 1115 vsk_anchor_t *vskap;
1116 1116 char fstname[FSTYPSZ];
1117 1117 zone_t *zone;
1118 1118
1119 1119 /*
1120 1120 * The v_flag value for the mount point vp is permanently set
1121 1121 * to VVFSLOCK so that no one bypasses the vn_vfs*locks routine
1122 1122 * for mount point locking.
1123 1123 */
1124 1124 mutex_enter(&vp->v_lock);
1125 1125 vp->v_flag |= VVFSLOCK;
1126 1126 mutex_exit(&vp->v_lock);
1127 1127
1128 1128 mnt_mntopts.mo_count = 0;
1129 1129 /*
1130 1130 * Find the ops vector to use to invoke the file system-specific mount
1131 1131 * method. If the fsname argument is non-NULL, use it directly.
1132 1132 * Otherwise, dig the file system type information out of the mount
1133 1133 * arguments.
1134 1134 *
1135 1135 * A side effect is to hold the vfssw entry.
1136 1136 *
1137 1137 * Mount arguments can be specified in several ways, which are
1138 1138 * distinguished by flag bit settings. The preferred way is to set
1139 1139 * MS_OPTIONSTR, indicating an 8 argument mount with the file system
1140 1140 * type supplied as a character string and the last two arguments
1141 1141 * being a pointer to a character buffer and the size of the buffer.
1142 1142 * On entry, the buffer holds a null terminated list of options; on
1143 1143 * return, the string is the list of options the file system
1144 1144 * recognized. If MS_DATA is set arguments five and six point to a
1145 1145 * block of binary data which the file system interprets.
1146 1146 * A further wrinkle is that some callers don't set MS_FSS and MS_DATA
1147 1147 * consistently with these conventions. To handle them, we check to
1148 1148 * see whether the pointer to the file system name has a numeric value
1149 1149 * less than 256. If so, we treat it as an index.
1150 1150 */
1151 1151 if (fsname != NULL) {
1152 1152 if ((vswp = vfs_getvfssw(fsname)) == NULL) {
1153 1153 return (EINVAL);
1154 1154 }
1155 1155 } else if (uap->flags & (MS_OPTIONSTR | MS_DATA | MS_FSS)) {
1156 1156 size_t n;
1157 1157 uint_t fstype;
1158 1158
1159 1159 fsname = fstname;
1160 1160
1161 1161 if ((fstype = (uintptr_t)uap->fstype) < 256) {
1162 1162 RLOCK_VFSSW();
1163 1163 if (fstype == 0 || fstype >= nfstype ||
1164 1164 !ALLOCATED_VFSSW(&vfssw[fstype])) {
1165 1165 RUNLOCK_VFSSW();
1166 1166 return (EINVAL);
1167 1167 }
1168 1168 (void) strcpy(fsname, vfssw[fstype].vsw_name);
1169 1169 RUNLOCK_VFSSW();
1170 1170 if ((vswp = vfs_getvfssw(fsname)) == NULL)
1171 1171 return (EINVAL);
1172 1172 } else {
1173 1173 /*
1174 1174 * Handle either kernel or user address space.
1175 1175 */
1176 1176 if (uap->flags & MS_SYSSPACE) {
1177 1177 error = copystr(uap->fstype, fsname,
1178 1178 FSTYPSZ, &n);
1179 1179 } else {
1180 1180 error = copyinstr(uap->fstype, fsname,
1181 1181 FSTYPSZ, &n);
1182 1182 }
1183 1183 if (error) {
1184 1184 if (error == ENAMETOOLONG)
1185 1185 return (EINVAL);
1186 1186 return (error);
1187 1187 }
1188 1188 if ((vswp = vfs_getvfssw(fsname)) == NULL)
1189 1189 return (EINVAL);
1190 1190 }
1191 1191 } else {
1192 1192 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) == NULL)
1193 1193 return (EINVAL);
1194 1194 fsname = vswp->vsw_name;
1195 1195 }
1196 1196 if (!VFS_INSTALLED(vswp))
1197 1197 return (EINVAL);
1198 1198
1199 1199 if ((error = secpolicy_fs_allowed_mount(fsname)) != 0) {
1200 1200 vfs_unrefvfssw(vswp);
1201 1201 return (error);
1202 1202 }
1203 1203
1204 1204 vfsops = &vswp->vsw_vfsops;
1205 1205
1206 1206 vfs_copyopttbl(&vswp->vsw_optproto, &mnt_mntopts);
1207 1207 /*
1208 1208 * Fetch mount options and parse them for generic vfs options
1209 1209 */
1210 1210 if (uap->flags & MS_OPTIONSTR) {
1211 1211 /*
1212 1212 * Limit the buffer size
1213 1213 */
1214 1214 if (optlen < 0 || optlen > MAX_MNTOPT_STR) {
1215 1215 error = EINVAL;
1216 1216 goto errout;
1217 1217 }
1218 1218 if ((uap->flags & MS_SYSSPACE) == 0) {
1219 1219 inargs = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP);
1220 1220 inargs[0] = '\0';
1221 1221 if (optlen) {
1222 1222 error = copyinstr(opts, inargs, (size_t)optlen,
1223 1223 NULL);
1224 1224 if (error) {
1225 1225 goto errout;
1226 1226 }
1227 1227 }
1228 1228 }
1229 1229 vfs_parsemntopts(&mnt_mntopts, inargs, 0);
1230 1230 }
1231 1231 /*
1232 1232 * Flag bits override the options string.
1233 1233 */
1234 1234 if (uap->flags & MS_REMOUNT)
1235 1235 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_REMOUNT, NULL, 0, 0);
1236 1236 if (uap->flags & MS_RDONLY)
1237 1237 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_RO, NULL, 0, 0);
1238 1238 if (uap->flags & MS_NOSUID)
1239 1239 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0);
1240 1240
1241 1241 /*
1242 1242 * Check if this is a remount; must be set in the option string and
1243 1243 * the file system must support a remount option.
1244 1244 */
1245 1245 if (remount = vfs_optionisset_nolock(&mnt_mntopts,
1246 1246 MNTOPT_REMOUNT, NULL)) {
1247 1247 if (!(vswp->vsw_flag & VSW_CANREMOUNT)) {
1248 1248 error = ENOTSUP;
1249 1249 goto errout;
1250 1250 }
1251 1251 uap->flags |= MS_REMOUNT;
1252 1252 }
1253 1253
1254 1254 /*
1255 1255 * uap->flags and vfs_optionisset() should agree.
1256 1256 */
1257 1257 if (rdonly = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_RO, NULL)) {
1258 1258 uap->flags |= MS_RDONLY;
1259 1259 }
1260 1260 if (vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL)) {
1261 1261 uap->flags |= MS_NOSUID;
1262 1262 }
1263 1263 nbmand = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NBMAND, NULL);
1264 1264 ASSERT(splice || !remount);
1265 1265 /*
1266 1266 * If we are splicing the fs into the namespace,
1267 1267 * perform mount point checks.
1268 1268 *
1269 1269 * We want to resolve the path for the mount point to eliminate
1270 1270 * '.' and ".." and symlinks in mount points; we can't do the
1271 1271 * same for the resource string, since it would turn
1272 1272 * "/dev/dsk/c0t0d0s0" into "/devices/pci@...". We need to do
1273 1273 * this before grabbing vn_vfswlock(), because otherwise we
1274 1274 * would deadlock with lookuppn().
1275 1275 */
1276 1276 if (splice) {
1277 1277 ASSERT(vp->v_count > 0);
1278 1278
1279 1279 /*
1280 1280 * Pick up mount point and device from appropriate space.
1281 1281 */
1282 1282 if (pn_get(uap->spec, fromspace, &pn) == 0) {
1283 1283 resource = kmem_alloc(pn.pn_pathlen + 1,
↓ open down ↓ |
1247 lines elided |
↑ open up ↑ |
1284 1284 KM_SLEEP);
1285 1285 (void) strcpy(resource, pn.pn_path);
1286 1286 pn_free(&pn);
1287 1287 }
1288 1288 /*
1289 1289 * Do a lookupname prior to taking the
1290 1290 * writelock. Mark this as completed if
1291 1291 * successful for later cleanup and addition to
1292 1292 * the mount in progress table.
1293 1293 */
1294 - if ((uap->flags & MS_GLOBAL) == 0 &&
1294 + if ((vswp->vsw_flag & VSW_MOUNTDEV) &&
1295 + (uap->flags & MS_GLOBAL) == 0 &&
1295 1296 lookupname(uap->spec, fromspace,
1296 1297 FOLLOW, NULL, &bvp) == 0) {
1297 1298 addmip = 1;
1298 1299 }
1299 1300
1300 1301 if ((error = pn_get(uap->dir, fromspace, &pn)) == 0) {
1301 1302 pathname_t *pnp;
1302 1303
1303 1304 if (*pn.pn_path != '/') {
1304 1305 error = EINVAL;
1305 1306 pn_free(&pn);
1306 1307 goto errout;
1307 1308 }
1308 1309 pn_alloc(&rpn);
1309 1310 /*
1310 1311 * Kludge to prevent autofs from deadlocking with
1311 1312 * itself when it calls domount().
1312 1313 *
1313 1314 * If autofs is calling, it is because it is doing
1314 1315 * (autofs) mounts in the process of an NFS mount. A
1315 1316 * lookuppn() here would cause us to block waiting for
1316 1317 * said NFS mount to complete, which can't since this
1317 1318 * is the thread that was supposed to doing it.
1318 1319 */
1319 1320 if (fromspace == UIO_USERSPACE) {
1320 1321 if ((error = lookuppn(&pn, &rpn, FOLLOW, NULL,
1321 1322 NULL)) == 0) {
1322 1323 pnp = &rpn;
1323 1324 } else {
1324 1325 /*
1325 1326 * The file disappeared or otherwise
1326 1327 * became inaccessible since we opened
1327 1328 * it; might as well fail the mount
1328 1329 * since the mount point is no longer
1329 1330 * accessible.
1330 1331 */
1331 1332 pn_free(&rpn);
1332 1333 pn_free(&pn);
1333 1334 goto errout;
1334 1335 }
1335 1336 } else {
1336 1337 pnp = &pn;
1337 1338 }
1338 1339 mountpt = kmem_alloc(pnp->pn_pathlen + 1, KM_SLEEP);
1339 1340 (void) strcpy(mountpt, pnp->pn_path);
1340 1341
1341 1342 /*
1342 1343 * If the addition of the zone's rootpath
1343 1344 * would push us over a total path length
1344 1345 * of MAXPATHLEN, we fail the mount with
1345 1346 * ENAMETOOLONG, which is what we would have
1346 1347 * gotten if we were trying to perform the same
1347 1348 * mount in the global zone.
1348 1349 *
1349 1350 * strlen() doesn't count the trailing
1350 1351 * '\0', but zone_rootpathlen counts both a
1351 1352 * trailing '/' and the terminating '\0'.
1352 1353 */
1353 1354 if ((curproc->p_zone->zone_rootpathlen - 1 +
1354 1355 strlen(mountpt)) > MAXPATHLEN ||
1355 1356 (resource != NULL &&
1356 1357 (curproc->p_zone->zone_rootpathlen - 1 +
1357 1358 strlen(resource)) > MAXPATHLEN)) {
1358 1359 error = ENAMETOOLONG;
1359 1360 }
1360 1361
1361 1362 pn_free(&rpn);
1362 1363 pn_free(&pn);
1363 1364 }
1364 1365
1365 1366 if (error)
1366 1367 goto errout;
1367 1368
1368 1369 /*
1369 1370 * Prevent path name resolution from proceeding past
1370 1371 * the mount point.
1371 1372 */
1372 1373 if (vn_vfswlock(vp) != 0) {
1373 1374 error = EBUSY;
1374 1375 goto errout;
1375 1376 }
1376 1377
1377 1378 /*
1378 1379 * Verify that it's legitimate to establish a mount on
1379 1380 * the prospective mount point.
1380 1381 */
1381 1382 if (vn_mountedvfs(vp) != NULL) {
1382 1383 /*
1383 1384 * The mount point lock was obtained after some
1384 1385 * other thread raced through and established a mount.
1385 1386 */
1386 1387 vn_vfsunlock(vp);
1387 1388 error = EBUSY;
1388 1389 goto errout;
1389 1390 }
1390 1391 if (vp->v_flag & VNOMOUNT) {
1391 1392 vn_vfsunlock(vp);
1392 1393 error = EINVAL;
1393 1394 goto errout;
1394 1395 }
1395 1396 }
1396 1397 if ((uap->flags & (MS_DATA | MS_OPTIONSTR)) == 0) {
1397 1398 uap->dataptr = NULL;
1398 1399 uap->datalen = 0;
1399 1400 }
1400 1401
1401 1402 /*
1402 1403 * If this is a remount, we don't want to create a new VFS.
1403 1404 * Instead, we pass the existing one with a remount flag.
1404 1405 */
1405 1406 if (remount) {
1406 1407 /*
1407 1408 * Confirm that the mount point is the root vnode of the
1408 1409 * file system that is being remounted.
1409 1410 * This can happen if the user specifies a different
1410 1411 * mount point directory pathname in the (re)mount command.
1411 1412 *
1412 1413 * Code below can only be reached if splice is true, so it's
1413 1414 * safe to do vn_vfsunlock() here.
1414 1415 */
1415 1416 if ((vp->v_flag & VROOT) == 0) {
1416 1417 vn_vfsunlock(vp);
1417 1418 error = ENOENT;
1418 1419 goto errout;
1419 1420 }
1420 1421 /*
1421 1422 * Disallow making file systems read-only unless file system
1422 1423 * explicitly allows it in its vfssw. Ignore other flags.
1423 1424 */
1424 1425 if (rdonly && vn_is_readonly(vp) == 0 &&
1425 1426 (vswp->vsw_flag & VSW_CANRWRO) == 0) {
1426 1427 vn_vfsunlock(vp);
1427 1428 error = EINVAL;
1428 1429 goto errout;
1429 1430 }
1430 1431 /*
1431 1432 * Disallow changing the NBMAND disposition of the file
1432 1433 * system on remounts.
1433 1434 */
1434 1435 if ((nbmand && ((vp->v_vfsp->vfs_flag & VFS_NBMAND) == 0)) ||
1435 1436 (!nbmand && (vp->v_vfsp->vfs_flag & VFS_NBMAND))) {
1436 1437 vn_vfsunlock(vp);
1437 1438 error = EINVAL;
1438 1439 goto errout;
1439 1440 }
1440 1441 vfsp = vp->v_vfsp;
1441 1442 ovflags = vfsp->vfs_flag;
1442 1443 vfsp->vfs_flag |= VFS_REMOUNT;
1443 1444 vfsp->vfs_flag &= ~VFS_RDONLY;
1444 1445 } else {
1445 1446 vfsp = vfs_alloc(KM_SLEEP);
1446 1447 VFS_INIT(vfsp, vfsops, NULL);
1447 1448 }
1448 1449
1449 1450 VFS_HOLD(vfsp);
1450 1451
1451 1452 if ((error = lofi_add(fsname, vfsp, &mnt_mntopts, uap)) != 0) {
1452 1453 if (!remount) {
1453 1454 if (splice)
1454 1455 vn_vfsunlock(vp);
1455 1456 vfs_free(vfsp);
1456 1457 } else {
1457 1458 vn_vfsunlock(vp);
1458 1459 VFS_RELE(vfsp);
1459 1460 }
1460 1461 goto errout;
1461 1462 }
1462 1463
1463 1464 /*
1464 1465 * PRIV_SYS_MOUNT doesn't mean you can become root.
1465 1466 */
1466 1467 if (vfsp->vfs_lofi_id != 0) {
1467 1468 uap->flags |= MS_NOSUID;
1468 1469 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0);
1469 1470 }
1470 1471
1471 1472 /*
1472 1473 * The vfs_reflock is not used anymore the code below explicitly
1473 1474 * holds it preventing others accesing it directly.
1474 1475 */
1475 1476 if ((sema_tryp(&vfsp->vfs_reflock) == 0) &&
1476 1477 !(vfsp->vfs_flag & VFS_REMOUNT))
1477 1478 cmn_err(CE_WARN,
1478 1479 "mount type %s couldn't get vfs_reflock", vswp->vsw_name);
1479 1480
1480 1481 /*
1481 1482 * Lock the vfs. If this is a remount we want to avoid spurious umount
1482 1483 * failures that happen as a side-effect of fsflush() and other mount
1483 1484 * and unmount operations that might be going on simultaneously and
1484 1485 * may have locked the vfs currently. To not return EBUSY immediately
1485 1486 * here we use vfs_lock_wait() instead vfs_lock() for the remount case.
1486 1487 */
1487 1488 if (!remount) {
1488 1489 if (error = vfs_lock(vfsp)) {
1489 1490 vfsp->vfs_flag = ovflags;
1490 1491
1491 1492 lofi_remove(vfsp);
1492 1493
1493 1494 if (splice)
1494 1495 vn_vfsunlock(vp);
1495 1496 vfs_free(vfsp);
1496 1497 goto errout;
1497 1498 }
1498 1499 } else {
1499 1500 vfs_lock_wait(vfsp);
↓ open down ↓ |
195 lines elided |
↑ open up ↑ |
1500 1501 }
1501 1502
1502 1503 /*
1503 1504 * Add device to mount in progress table, global mounts require special
1504 1505 * handling. It is possible that we have already done the lookupname
1505 1506 * on a spliced, non-global fs. If so, we don't want to do it again
1506 1507 * since we cannot do a lookupname after taking the
1507 1508 * wlock above. This case is for a non-spliced, non-global filesystem.
1508 1509 */
1509 1510 if (!addmip) {
1510 - if ((uap->flags & MS_GLOBAL) == 0 &&
1511 + if ((vswp->vsw_flag & VSW_MOUNTDEV) &&
1512 + (uap->flags & MS_GLOBAL) == 0 &&
1511 1513 lookupname(uap->spec, fromspace, FOLLOW, NULL, &bvp) == 0) {
1512 1514 addmip = 1;
1513 1515 }
1514 1516 }
1515 1517
1516 1518 if (addmip) {
1517 1519 vnode_t *lvp = NULL;
1518 1520
1519 1521 error = vfs_get_lofi(vfsp, &lvp);
1520 1522 if (error > 0) {
1521 1523 lofi_remove(vfsp);
1522 1524
1523 1525 if (splice)
1524 1526 vn_vfsunlock(vp);
1525 1527 vfs_unlock(vfsp);
1526 1528
1527 1529 if (remount) {
1528 1530 VFS_RELE(vfsp);
1529 1531 } else {
1530 1532 vfs_free(vfsp);
1531 1533 }
1532 1534
1533 1535 goto errout;
1534 1536 } else if (error == -1) {
1535 1537 bdev = bvp->v_rdev;
1536 1538 VN_RELE(bvp);
1537 1539 } else {
1538 1540 bdev = lvp->v_rdev;
1539 1541 VN_RELE(lvp);
1540 1542 VN_RELE(bvp);
1541 1543 }
1542 1544
1543 1545 vfs_addmip(bdev, vfsp);
1544 1546 addmip = 0;
1545 1547 delmip = 1;
1546 1548 }
1547 1549 /*
1548 1550 * Invalidate cached entry for the mount point.
1549 1551 */
1550 1552 if (splice)
1551 1553 dnlc_purge_vp(vp);
1552 1554
1553 1555 /*
1554 1556 * If have an option string but the filesystem doesn't supply a
1555 1557 * prototype options table, create a table with the global
1556 1558 * options and sufficient room to accept all the options in the
1557 1559 * string. Then parse the passed in option string
1558 1560 * accepting all the options in the string. This gives us an
1559 1561 * option table with all the proper cancel properties for the
1560 1562 * global options.
1561 1563 *
1562 1564 * Filesystems that supply a prototype options table are handled
1563 1565 * earlier in this function.
1564 1566 */
1565 1567 if (uap->flags & MS_OPTIONSTR) {
1566 1568 if (!(vswp->vsw_flag & VSW_HASPROTO)) {
1567 1569 mntopts_t tmp_mntopts;
1568 1570
1569 1571 tmp_mntopts.mo_count = 0;
1570 1572 vfs_createopttbl_extend(&tmp_mntopts, inargs,
1571 1573 &mnt_mntopts);
1572 1574 vfs_parsemntopts(&tmp_mntopts, inargs, 1);
1573 1575 vfs_swapopttbl_nolock(&mnt_mntopts, &tmp_mntopts);
1574 1576 vfs_freeopttbl(&tmp_mntopts);
1575 1577 }
1576 1578 }
1577 1579
1578 1580 /*
1579 1581 * Serialize with zone state transitions.
1580 1582 * See vfs_list_add; zone mounted into is:
1581 1583 * zone_find_by_path(refstr_value(vfsp->vfs_mntpt))
1582 1584 * not the zone doing the mount (curproc->p_zone), but if we're already
1583 1585 * inside a NGZ, then we know what zone we are.
1584 1586 */
1585 1587 if (INGLOBALZONE(curproc)) {
1586 1588 zone = zone_find_by_path(mountpt);
1587 1589 ASSERT(zone != NULL);
1588 1590 } else {
1589 1591 zone = curproc->p_zone;
1590 1592 /*
1591 1593 * zone_find_by_path does a hold, so do one here too so that
1592 1594 * we can do a zone_rele after mount_completed.
1593 1595 */
1594 1596 zone_hold(zone);
1595 1597 }
1596 1598 mount_in_progress(zone);
1597 1599 /*
1598 1600 * Instantiate (or reinstantiate) the file system. If appropriate,
1599 1601 * splice it into the file system name space.
1600 1602 *
1601 1603 * We want VFS_MOUNT() to be able to override the vfs_resource
1602 1604 * string if necessary (ie, mntfs), and also for a remount to
1603 1605 * change the same (necessary when remounting '/' during boot).
1604 1606 * So we set up vfs_mntpt and vfs_resource to what we think they
1605 1607 * should be, then hand off control to VFS_MOUNT() which can
1606 1608 * override this.
1607 1609 *
1608 1610 * For safety's sake, when changing vfs_resource or vfs_mntpt of
1609 1611 * a vfs which is on the vfs list (i.e. during a remount), we must
1610 1612 * never set those fields to NULL. Several bits of code make
1611 1613 * assumptions that the fields are always valid.
1612 1614 */
1613 1615 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts);
1614 1616 if (remount) {
1615 1617 if ((oldresource = vfsp->vfs_resource) != NULL)
1616 1618 refstr_hold(oldresource);
1617 1619 if ((oldmntpt = vfsp->vfs_mntpt) != NULL)
1618 1620 refstr_hold(oldmntpt);
1619 1621 }
1620 1622 vfs_setresource(vfsp, resource, 0);
1621 1623 vfs_setmntpoint(vfsp, mountpt, 0);
1622 1624
1623 1625 /*
1624 1626 * going to mount on this vnode, so notify.
1625 1627 */
1626 1628 vnevent_mountedover(vp, NULL);
1627 1629 error = VFS_MOUNT(vfsp, vp, uap, credp);
1628 1630
1629 1631 if (uap->flags & MS_RDONLY)
1630 1632 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
1631 1633 if (uap->flags & MS_NOSUID)
1632 1634 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0);
1633 1635 if (uap->flags & MS_GLOBAL)
1634 1636 vfs_setmntopt(vfsp, MNTOPT_GLOBAL, NULL, 0);
1635 1637
1636 1638 if (error) {
1637 1639 lofi_remove(vfsp);
1638 1640
1639 1641 if (remount) {
1640 1642 /* put back pre-remount options */
1641 1643 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts);
1642 1644 vfs_setmntpoint(vfsp, refstr_value(oldmntpt),
1643 1645 VFSSP_VERBATIM);
1644 1646 if (oldmntpt)
1645 1647 refstr_rele(oldmntpt);
1646 1648 vfs_setresource(vfsp, refstr_value(oldresource),
1647 1649 VFSSP_VERBATIM);
1648 1650 if (oldresource)
1649 1651 refstr_rele(oldresource);
1650 1652 vfsp->vfs_flag = ovflags;
1651 1653 vfs_unlock(vfsp);
1652 1654 VFS_RELE(vfsp);
1653 1655 } else {
1654 1656 vfs_unlock(vfsp);
1655 1657 vfs_freemnttab(vfsp);
1656 1658 vfs_free(vfsp);
1657 1659 }
1658 1660 } else {
1659 1661 /*
1660 1662 * Set the mount time to now
1661 1663 */
1662 1664 vfsp->vfs_mtime = ddi_get_time();
1663 1665 if (remount) {
1664 1666 vfsp->vfs_flag &= ~VFS_REMOUNT;
1665 1667 if (oldresource)
1666 1668 refstr_rele(oldresource);
1667 1669 if (oldmntpt)
1668 1670 refstr_rele(oldmntpt);
1669 1671 } else if (splice) {
1670 1672 /*
1671 1673 * Link vfsp into the name space at the mount
1672 1674 * point. Vfs_add() is responsible for
1673 1675 * holding the mount point which will be
1674 1676 * released when vfs_remove() is called.
1675 1677 */
1676 1678 vfs_add(vp, vfsp, uap->flags);
1677 1679 } else {
1678 1680 /*
1679 1681 * Hold the reference to file system which is
1680 1682 * not linked into the name space.
1681 1683 */
1682 1684 vfsp->vfs_zone = NULL;
1683 1685 VFS_HOLD(vfsp);
1684 1686 vfsp->vfs_vnodecovered = NULL;
1685 1687 }
1686 1688 /*
1687 1689 * Set flags for global options encountered
1688 1690 */
1689 1691 if (vfs_optionisset(vfsp, MNTOPT_RO, NULL))
1690 1692 vfsp->vfs_flag |= VFS_RDONLY;
1691 1693 else
1692 1694 vfsp->vfs_flag &= ~VFS_RDONLY;
1693 1695 if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
1694 1696 vfsp->vfs_flag |= (VFS_NOSETUID|VFS_NODEVICES);
1695 1697 } else {
1696 1698 if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
1697 1699 vfsp->vfs_flag |= VFS_NODEVICES;
1698 1700 else
1699 1701 vfsp->vfs_flag &= ~VFS_NODEVICES;
1700 1702 if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
1701 1703 vfsp->vfs_flag |= VFS_NOSETUID;
1702 1704 else
1703 1705 vfsp->vfs_flag &= ~VFS_NOSETUID;
1704 1706 }
1705 1707 if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL))
1706 1708 vfsp->vfs_flag |= VFS_NBMAND;
1707 1709 else
1708 1710 vfsp->vfs_flag &= ~VFS_NBMAND;
1709 1711
1710 1712 if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL))
1711 1713 vfsp->vfs_flag |= VFS_XATTR;
1712 1714 else
1713 1715 vfsp->vfs_flag &= ~VFS_XATTR;
1714 1716
1715 1717 if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL))
1716 1718 vfsp->vfs_flag |= VFS_NOEXEC;
1717 1719 else
1718 1720 vfsp->vfs_flag &= ~VFS_NOEXEC;
1719 1721
1720 1722 /*
1721 1723 * Now construct the output option string of options
1722 1724 * we recognized.
1723 1725 */
1724 1726 if (uap->flags & MS_OPTIONSTR) {
1725 1727 vfs_list_read_lock();
1726 1728 copyout_error = vfs_buildoptionstr(
1727 1729 &vfsp->vfs_mntopts, inargs, optlen);
1728 1730 vfs_list_unlock();
1729 1731 if (copyout_error == 0 &&
1730 1732 (uap->flags & MS_SYSSPACE) == 0) {
1731 1733 copyout_error = copyoutstr(inargs, opts,
1732 1734 optlen, NULL);
1733 1735 }
1734 1736 }
1735 1737
1736 1738 /*
1737 1739 * If this isn't a remount, set up the vopstats before
1738 1740 * anyone can touch this. We only allow spliced file
1739 1741 * systems (file systems which are in the namespace) to
1740 1742 * have the VFS_STATS flag set.
1741 1743 * NOTE: PxFS mounts the underlying file system with
1742 1744 * MS_NOSPLICE set and copies those vfs_flags to its private
1743 1745 * vfs structure. As a result, PxFS should never have
1744 1746 * the VFS_STATS flag or else we might access the vfs
1745 1747 * statistics-related fields prior to them being
1746 1748 * properly initialized.
1747 1749 */
1748 1750 if (!remount && (vswp->vsw_flag & VSW_STATS) && splice) {
1749 1751 initialize_vopstats(&vfsp->vfs_vopstats);
1750 1752 /*
1751 1753 * We need to set vfs_vskap to NULL because there's
1752 1754 * a chance it won't be set below. This is checked
1753 1755 * in teardown_vopstats() so we can't have garbage.
1754 1756 */
1755 1757 vfsp->vfs_vskap = NULL;
1756 1758 vfsp->vfs_flag |= VFS_STATS;
1757 1759 vfsp->vfs_fstypevsp = get_fstype_vopstats(vfsp, vswp);
1758 1760 }
1759 1761
1760 1762 if (vswp->vsw_flag & VSW_XID)
1761 1763 vfsp->vfs_flag |= VFS_XID;
1762 1764
1763 1765 vfs_unlock(vfsp);
1764 1766 }
1765 1767 mount_completed(zone);
1766 1768 zone_rele(zone);
1767 1769 if (splice)
1768 1770 vn_vfsunlock(vp);
1769 1771
1770 1772 if ((error == 0) && (copyout_error == 0)) {
1771 1773 if (!remount) {
1772 1774 /*
1773 1775 * Don't call get_vskstat_anchor() while holding
1774 1776 * locks since it allocates memory and calls
1775 1777 * VFS_STATVFS(). For NFS, the latter can generate
1776 1778 * an over-the-wire call.
1777 1779 */
1778 1780 vskap = get_vskstat_anchor(vfsp);
1779 1781 /* Only take the lock if we have something to do */
1780 1782 if (vskap != NULL) {
1781 1783 vfs_lock_wait(vfsp);
1782 1784 if (vfsp->vfs_flag & VFS_STATS) {
1783 1785 vfsp->vfs_vskap = vskap;
1784 1786 }
1785 1787 vfs_unlock(vfsp);
1786 1788 }
1787 1789 }
1788 1790 /* Return vfsp to caller. */
1789 1791 *vfspp = vfsp;
1790 1792 }
1791 1793 errout:
1792 1794 vfs_freeopttbl(&mnt_mntopts);
1793 1795 if (resource != NULL)
1794 1796 kmem_free(resource, strlen(resource) + 1);
1795 1797 if (mountpt != NULL)
1796 1798 kmem_free(mountpt, strlen(mountpt) + 1);
1797 1799 /*
1798 1800 * It is possible we errored prior to adding to mount in progress
1799 1801 * table. Must free vnode we acquired with successful lookupname.
1800 1802 */
1801 1803 if (addmip)
1802 1804 VN_RELE(bvp);
1803 1805 if (delmip)
1804 1806 vfs_delmip(vfsp);
1805 1807 ASSERT(vswp != NULL);
1806 1808 vfs_unrefvfssw(vswp);
1807 1809 if (inargs != opts)
1808 1810 kmem_free(inargs, MAX_MNTOPT_STR);
1809 1811 if (copyout_error) {
1810 1812 lofi_remove(vfsp);
1811 1813 VFS_RELE(vfsp);
1812 1814 error = copyout_error;
1813 1815 }
1814 1816 return (error);
1815 1817 }
1816 1818
1817 1819 static void
1818 1820 vfs_setpath(
1819 1821 struct vfs *vfsp, /* vfs being updated */
1820 1822 refstr_t **refp, /* Ref-count string to contain the new path */
1821 1823 const char *newpath, /* Path to add to refp (above) */
1822 1824 uint32_t flag) /* flag */
1823 1825 {
1824 1826 size_t len;
1825 1827 refstr_t *ref;
1826 1828 zone_t *zone = curproc->p_zone;
1827 1829 char *sp;
1828 1830 int have_list_lock = 0;
1829 1831
1830 1832 ASSERT(!VFS_ON_LIST(vfsp) || vfs_lock_held(vfsp));
1831 1833
1832 1834 /*
1833 1835 * New path must be less than MAXPATHLEN because mntfs
1834 1836 * will only display up to MAXPATHLEN bytes. This is currently
1835 1837 * safe, because domount() uses pn_get(), and other callers
1836 1838 * similarly cap the size to fewer than MAXPATHLEN bytes.
1837 1839 */
1838 1840
1839 1841 ASSERT(strlen(newpath) < MAXPATHLEN);
1840 1842
1841 1843 /* mntfs requires consistency while vfs list lock is held */
1842 1844
1843 1845 if (VFS_ON_LIST(vfsp)) {
1844 1846 have_list_lock = 1;
1845 1847 vfs_list_lock();
1846 1848 }
1847 1849
1848 1850 if (*refp != NULL)
1849 1851 refstr_rele(*refp);
1850 1852
1851 1853 /*
1852 1854 * If we are in a non-global zone then we prefix the supplied path,
1853 1855 * newpath, with the zone's root path, with two exceptions. The first
1854 1856 * is where we have been explicitly directed to avoid doing so; this
1855 1857 * will be the case following a failed remount, where the path supplied
1856 1858 * will be a saved version which must now be restored. The second
1857 1859 * exception is where newpath is not a pathname but a descriptive name,
1858 1860 * e.g. "procfs".
1859 1861 */
1860 1862 if (zone == global_zone || (flag & VFSSP_VERBATIM) || *newpath != '/') {
1861 1863 ref = refstr_alloc(newpath);
1862 1864 goto out;
1863 1865 }
1864 1866
1865 1867 /*
1866 1868 * Truncate the trailing '/' in the zoneroot, and merge
1867 1869 * in the zone's rootpath with the "newpath" (resource
1868 1870 * or mountpoint) passed in.
1869 1871 *
1870 1872 * The size of the required buffer is thus the size of
1871 1873 * the buffer required for the passed-in newpath
1872 1874 * (strlen(newpath) + 1), plus the size of the buffer
1873 1875 * required to hold zone_rootpath (zone_rootpathlen)
1874 1876 * minus one for one of the now-superfluous NUL
1875 1877 * terminations, minus one for the trailing '/'.
1876 1878 *
1877 1879 * That gives us:
1878 1880 *
1879 1881 * (strlen(newpath) + 1) + zone_rootpathlen - 1 - 1
1880 1882 *
1881 1883 * Which is what we have below.
1882 1884 */
1883 1885
1884 1886 len = strlen(newpath) + zone->zone_rootpathlen - 1;
1885 1887 sp = kmem_alloc(len, KM_SLEEP);
1886 1888
1887 1889 /*
1888 1890 * Copy everything including the trailing slash, which
1889 1891 * we then overwrite with the NUL character.
1890 1892 */
1891 1893
1892 1894 (void) strcpy(sp, zone->zone_rootpath);
1893 1895 sp[zone->zone_rootpathlen - 2] = '\0';
1894 1896 (void) strcat(sp, newpath);
1895 1897
1896 1898 ref = refstr_alloc(sp);
1897 1899 kmem_free(sp, len);
1898 1900 out:
1899 1901 *refp = ref;
1900 1902
1901 1903 if (have_list_lock) {
1902 1904 vfs_mnttab_modtimeupd();
1903 1905 vfs_list_unlock();
1904 1906 }
1905 1907 }
1906 1908
1907 1909 /*
1908 1910 * Record a mounted resource name in a vfs structure.
1909 1911 * If vfsp is already mounted, caller must hold the vfs lock.
1910 1912 */
1911 1913 void
1912 1914 vfs_setresource(struct vfs *vfsp, const char *resource, uint32_t flag)
1913 1915 {
1914 1916 if (resource == NULL || resource[0] == '\0')
1915 1917 resource = VFS_NORESOURCE;
1916 1918 vfs_setpath(vfsp, &vfsp->vfs_resource, resource, flag);
1917 1919 }
1918 1920
1919 1921 /*
1920 1922 * Record a mount point name in a vfs structure.
1921 1923 * If vfsp is already mounted, caller must hold the vfs lock.
1922 1924 */
1923 1925 void
1924 1926 vfs_setmntpoint(struct vfs *vfsp, const char *mntpt, uint32_t flag)
1925 1927 {
1926 1928 if (mntpt == NULL || mntpt[0] == '\0')
1927 1929 mntpt = VFS_NOMNTPT;
1928 1930 vfs_setpath(vfsp, &vfsp->vfs_mntpt, mntpt, flag);
1929 1931 }
1930 1932
1931 1933 /* Returns the vfs_resource. Caller must call refstr_rele() when finished. */
1932 1934
1933 1935 refstr_t *
1934 1936 vfs_getresource(const struct vfs *vfsp)
1935 1937 {
1936 1938 refstr_t *resource;
1937 1939
1938 1940 vfs_list_read_lock();
1939 1941 resource = vfsp->vfs_resource;
1940 1942 refstr_hold(resource);
1941 1943 vfs_list_unlock();
1942 1944
1943 1945 return (resource);
1944 1946 }
1945 1947
1946 1948 /* Returns the vfs_mntpt. Caller must call refstr_rele() when finished. */
1947 1949
1948 1950 refstr_t *
1949 1951 vfs_getmntpoint(const struct vfs *vfsp)
1950 1952 {
1951 1953 refstr_t *mntpt;
1952 1954
1953 1955 vfs_list_read_lock();
1954 1956 mntpt = vfsp->vfs_mntpt;
1955 1957 refstr_hold(mntpt);
1956 1958 vfs_list_unlock();
1957 1959
1958 1960 return (mntpt);
1959 1961 }
1960 1962
1961 1963 /*
1962 1964 * Create an empty options table with enough empty slots to hold all
1963 1965 * The options in the options string passed as an argument.
1964 1966 * Potentially prepend another options table.
1965 1967 *
1966 1968 * Note: caller is responsible for locking the vfs list, if needed,
1967 1969 * to protect mops.
1968 1970 */
1969 1971 static void
1970 1972 vfs_createopttbl_extend(mntopts_t *mops, const char *opts,
1971 1973 const mntopts_t *mtmpl)
1972 1974 {
1973 1975 const char *s = opts;
1974 1976 uint_t count;
1975 1977
1976 1978 if (opts == NULL || *opts == '\0') {
1977 1979 count = 0;
1978 1980 } else {
1979 1981 count = 1;
1980 1982
1981 1983 /*
1982 1984 * Count number of options in the string
1983 1985 */
1984 1986 for (s = strchr(s, ','); s != NULL; s = strchr(s, ',')) {
1985 1987 count++;
1986 1988 s++;
1987 1989 }
1988 1990 }
1989 1991 vfs_copyopttbl_extend(mtmpl, mops, count);
1990 1992 }
1991 1993
1992 1994 /*
1993 1995 * Create an empty options table with enough empty slots to hold all
1994 1996 * The options in the options string passed as an argument.
1995 1997 *
1996 1998 * This function is *not* for general use by filesystems.
1997 1999 *
1998 2000 * Note: caller is responsible for locking the vfs list, if needed,
1999 2001 * to protect mops.
2000 2002 */
2001 2003 void
2002 2004 vfs_createopttbl(mntopts_t *mops, const char *opts)
2003 2005 {
2004 2006 vfs_createopttbl_extend(mops, opts, NULL);
2005 2007 }
2006 2008
2007 2009
2008 2010 /*
2009 2011 * Swap two mount options tables
2010 2012 */
2011 2013 static void
2012 2014 vfs_swapopttbl_nolock(mntopts_t *optbl1, mntopts_t *optbl2)
2013 2015 {
2014 2016 uint_t tmpcnt;
2015 2017 mntopt_t *tmplist;
2016 2018
2017 2019 tmpcnt = optbl2->mo_count;
2018 2020 tmplist = optbl2->mo_list;
2019 2021 optbl2->mo_count = optbl1->mo_count;
2020 2022 optbl2->mo_list = optbl1->mo_list;
2021 2023 optbl1->mo_count = tmpcnt;
2022 2024 optbl1->mo_list = tmplist;
2023 2025 }
2024 2026
2025 2027 static void
2026 2028 vfs_swapopttbl(mntopts_t *optbl1, mntopts_t *optbl2)
2027 2029 {
2028 2030 vfs_list_lock();
2029 2031 vfs_swapopttbl_nolock(optbl1, optbl2);
2030 2032 vfs_mnttab_modtimeupd();
2031 2033 vfs_list_unlock();
2032 2034 }
2033 2035
2034 2036 static char **
2035 2037 vfs_copycancelopt_extend(char **const moc, int extend)
2036 2038 {
2037 2039 int i = 0;
2038 2040 int j;
2039 2041 char **result;
2040 2042
2041 2043 if (moc != NULL) {
2042 2044 for (; moc[i] != NULL; i++)
2043 2045 /* count number of options to cancel */;
2044 2046 }
2045 2047
2046 2048 if (i + extend == 0)
2047 2049 return (NULL);
2048 2050
2049 2051 result = kmem_alloc((i + extend + 1) * sizeof (char *), KM_SLEEP);
2050 2052
2051 2053 for (j = 0; j < i; j++) {
2052 2054 result[j] = kmem_alloc(strlen(moc[j]) + 1, KM_SLEEP);
2053 2055 (void) strcpy(result[j], moc[j]);
2054 2056 }
2055 2057 for (; j <= i + extend; j++)
2056 2058 result[j] = NULL;
2057 2059
2058 2060 return (result);
2059 2061 }
2060 2062
2061 2063 static void
2062 2064 vfs_copyopt(const mntopt_t *s, mntopt_t *d)
2063 2065 {
2064 2066 char *sp, *dp;
2065 2067
2066 2068 d->mo_flags = s->mo_flags;
2067 2069 d->mo_data = s->mo_data;
2068 2070 sp = s->mo_name;
2069 2071 if (sp != NULL) {
2070 2072 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP);
2071 2073 (void) strcpy(dp, sp);
2072 2074 d->mo_name = dp;
2073 2075 } else {
2074 2076 d->mo_name = NULL; /* should never happen */
2075 2077 }
2076 2078
2077 2079 d->mo_cancel = vfs_copycancelopt_extend(s->mo_cancel, 0);
2078 2080
2079 2081 sp = s->mo_arg;
2080 2082 if (sp != NULL) {
2081 2083 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP);
2082 2084 (void) strcpy(dp, sp);
2083 2085 d->mo_arg = dp;
2084 2086 } else {
2085 2087 d->mo_arg = NULL;
2086 2088 }
2087 2089 }
2088 2090
2089 2091 /*
2090 2092 * Copy a mount options table, possibly allocating some spare
2091 2093 * slots at the end. It is permissible to copy_extend the NULL table.
2092 2094 */
2093 2095 static void
2094 2096 vfs_copyopttbl_extend(const mntopts_t *smo, mntopts_t *dmo, int extra)
2095 2097 {
2096 2098 uint_t i, count;
2097 2099 mntopt_t *motbl;
2098 2100
2099 2101 /*
2100 2102 * Clear out any existing stuff in the options table being initialized
2101 2103 */
2102 2104 vfs_freeopttbl(dmo);
2103 2105 count = (smo == NULL) ? 0 : smo->mo_count;
2104 2106 if ((count + extra) == 0) /* nothing to do */
2105 2107 return;
2106 2108 dmo->mo_count = count + extra;
2107 2109 motbl = kmem_zalloc((count + extra) * sizeof (mntopt_t), KM_SLEEP);
2108 2110 dmo->mo_list = motbl;
2109 2111 for (i = 0; i < count; i++) {
2110 2112 vfs_copyopt(&smo->mo_list[i], &motbl[i]);
2111 2113 }
2112 2114 for (i = count; i < count + extra; i++) {
2113 2115 motbl[i].mo_flags = MO_EMPTY;
2114 2116 }
2115 2117 }
2116 2118
2117 2119 /*
2118 2120 * Copy a mount options table.
2119 2121 *
2120 2122 * This function is *not* for general use by filesystems.
2121 2123 *
2122 2124 * Note: caller is responsible for locking the vfs list, if needed,
2123 2125 * to protect smo and dmo.
2124 2126 */
2125 2127 void
2126 2128 vfs_copyopttbl(const mntopts_t *smo, mntopts_t *dmo)
2127 2129 {
2128 2130 vfs_copyopttbl_extend(smo, dmo, 0);
2129 2131 }
2130 2132
2131 2133 static char **
2132 2134 vfs_mergecancelopts(const mntopt_t *mop1, const mntopt_t *mop2)
2133 2135 {
2134 2136 int c1 = 0;
2135 2137 int c2 = 0;
2136 2138 char **result;
2137 2139 char **sp1, **sp2, **dp;
2138 2140
2139 2141 /*
2140 2142 * First we count both lists of cancel options.
2141 2143 * If either is NULL or has no elements, we return a copy of
2142 2144 * the other.
2143 2145 */
2144 2146 if (mop1->mo_cancel != NULL) {
2145 2147 for (; mop1->mo_cancel[c1] != NULL; c1++)
2146 2148 /* count cancel options in mop1 */;
2147 2149 }
2148 2150
2149 2151 if (c1 == 0)
2150 2152 return (vfs_copycancelopt_extend(mop2->mo_cancel, 0));
2151 2153
2152 2154 if (mop2->mo_cancel != NULL) {
2153 2155 for (; mop2->mo_cancel[c2] != NULL; c2++)
2154 2156 /* count cancel options in mop2 */;
2155 2157 }
2156 2158
2157 2159 result = vfs_copycancelopt_extend(mop1->mo_cancel, c2);
2158 2160
2159 2161 if (c2 == 0)
2160 2162 return (result);
2161 2163
2162 2164 /*
2163 2165 * When we get here, we've got two sets of cancel options;
2164 2166 * we need to merge the two sets. We know that the result
2165 2167 * array has "c1+c2+1" entries and in the end we might shrink
2166 2168 * it.
2167 2169 * Result now has a copy of the c1 entries from mop1; we'll
2168 2170 * now lookup all the entries of mop2 in mop1 and copy it if
2169 2171 * it is unique.
2170 2172 * This operation is O(n^2) but it's only called once per
2171 2173 * filesystem per duplicate option. This is a situation
2172 2174 * which doesn't arise with the filesystems in ON and
2173 2175 * n is generally 1.
2174 2176 */
2175 2177
2176 2178 dp = &result[c1];
2177 2179 for (sp2 = mop2->mo_cancel; *sp2 != NULL; sp2++) {
2178 2180 for (sp1 = mop1->mo_cancel; *sp1 != NULL; sp1++) {
2179 2181 if (strcmp(*sp1, *sp2) == 0)
2180 2182 break;
2181 2183 }
2182 2184 if (*sp1 == NULL) {
2183 2185 /*
2184 2186 * Option *sp2 not found in mop1, so copy it.
2185 2187 * The calls to vfs_copycancelopt_extend()
2186 2188 * guarantee that there's enough room.
2187 2189 */
2188 2190 *dp = kmem_alloc(strlen(*sp2) + 1, KM_SLEEP);
2189 2191 (void) strcpy(*dp++, *sp2);
2190 2192 }
2191 2193 }
2192 2194 if (dp != &result[c1+c2]) {
2193 2195 size_t bytes = (dp - result + 1) * sizeof (char *);
2194 2196 char **nres = kmem_alloc(bytes, KM_SLEEP);
2195 2197
2196 2198 bcopy(result, nres, bytes);
2197 2199 kmem_free(result, (c1 + c2 + 1) * sizeof (char *));
2198 2200 result = nres;
2199 2201 }
2200 2202 return (result);
2201 2203 }
2202 2204
2203 2205 /*
2204 2206 * Merge two mount option tables (outer and inner) into one. This is very
2205 2207 * similar to "merging" global variables and automatic variables in C.
2206 2208 *
2207 2209 * This isn't (and doesn't have to be) fast.
2208 2210 *
2209 2211 * This function is *not* for general use by filesystems.
2210 2212 *
2211 2213 * Note: caller is responsible for locking the vfs list, if needed,
2212 2214 * to protect omo, imo & dmo.
2213 2215 */
2214 2216 void
2215 2217 vfs_mergeopttbl(const mntopts_t *omo, const mntopts_t *imo, mntopts_t *dmo)
2216 2218 {
2217 2219 uint_t i, count;
2218 2220 mntopt_t *mop, *motbl;
2219 2221 uint_t freeidx;
2220 2222
2221 2223 /*
2222 2224 * First determine how much space we need to allocate.
2223 2225 */
2224 2226 count = omo->mo_count;
2225 2227 for (i = 0; i < imo->mo_count; i++) {
2226 2228 if (imo->mo_list[i].mo_flags & MO_EMPTY)
2227 2229 continue;
2228 2230 if (vfs_hasopt(omo, imo->mo_list[i].mo_name) == NULL)
2229 2231 count++;
2230 2232 }
2231 2233 ASSERT(count >= omo->mo_count &&
2232 2234 count <= omo->mo_count + imo->mo_count);
2233 2235 motbl = kmem_alloc(count * sizeof (mntopt_t), KM_SLEEP);
2234 2236 for (i = 0; i < omo->mo_count; i++)
2235 2237 vfs_copyopt(&omo->mo_list[i], &motbl[i]);
2236 2238 freeidx = omo->mo_count;
2237 2239 for (i = 0; i < imo->mo_count; i++) {
2238 2240 if (imo->mo_list[i].mo_flags & MO_EMPTY)
2239 2241 continue;
2240 2242 if ((mop = vfs_hasopt(omo, imo->mo_list[i].mo_name)) != NULL) {
2241 2243 char **newcanp;
2242 2244 uint_t index = mop - omo->mo_list;
2243 2245
2244 2246 newcanp = vfs_mergecancelopts(mop, &motbl[index]);
2245 2247
2246 2248 vfs_freeopt(&motbl[index]);
2247 2249 vfs_copyopt(&imo->mo_list[i], &motbl[index]);
2248 2250
2249 2251 vfs_freecancelopt(motbl[index].mo_cancel);
2250 2252 motbl[index].mo_cancel = newcanp;
2251 2253 } else {
2252 2254 /*
2253 2255 * If it's a new option, just copy it over to the first
2254 2256 * free location.
2255 2257 */
2256 2258 vfs_copyopt(&imo->mo_list[i], &motbl[freeidx++]);
2257 2259 }
2258 2260 }
2259 2261 dmo->mo_count = count;
2260 2262 dmo->mo_list = motbl;
2261 2263 }
2262 2264
2263 2265 /*
2264 2266 * Functions to set and clear mount options in a mount options table.
2265 2267 */
2266 2268
2267 2269 /*
2268 2270 * Clear a mount option, if it exists.
2269 2271 *
2270 2272 * The update_mnttab arg indicates whether mops is part of a vfs that is on
2271 2273 * the vfs list.
2272 2274 */
2273 2275 static void
2274 2276 vfs_clearmntopt_nolock(mntopts_t *mops, const char *opt, int update_mnttab)
2275 2277 {
2276 2278 struct mntopt *mop;
2277 2279 uint_t i, count;
2278 2280
2279 2281 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist));
2280 2282
2281 2283 count = mops->mo_count;
2282 2284 for (i = 0; i < count; i++) {
2283 2285 mop = &mops->mo_list[i];
2284 2286
2285 2287 if (mop->mo_flags & MO_EMPTY)
2286 2288 continue;
2287 2289 if (strcmp(opt, mop->mo_name))
2288 2290 continue;
2289 2291 mop->mo_flags &= ~MO_SET;
2290 2292 if (mop->mo_arg != NULL) {
2291 2293 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2292 2294 }
2293 2295 mop->mo_arg = NULL;
2294 2296 if (update_mnttab)
2295 2297 vfs_mnttab_modtimeupd();
2296 2298 break;
2297 2299 }
2298 2300 }
2299 2301
2300 2302 void
2301 2303 vfs_clearmntopt(struct vfs *vfsp, const char *opt)
2302 2304 {
2303 2305 int gotlock = 0;
2304 2306
2305 2307 if (VFS_ON_LIST(vfsp)) {
2306 2308 gotlock = 1;
2307 2309 vfs_list_lock();
2308 2310 }
2309 2311 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, opt, gotlock);
2310 2312 if (gotlock)
2311 2313 vfs_list_unlock();
2312 2314 }
2313 2315
2314 2316
2315 2317 /*
2316 2318 * Set a mount option on. If it's not found in the table, it's silently
2317 2319 * ignored. If the option has MO_IGNORE set, it is still set unless the
2318 2320 * VFS_NOFORCEOPT bit is set in the flags. Also, VFS_DISPLAY/VFS_NODISPLAY flag
2319 2321 * bits can be used to toggle the MO_NODISPLAY bit for the option.
2320 2322 * If the VFS_CREATEOPT flag bit is set then the first option slot with
2321 2323 * MO_EMPTY set is created as the option passed in.
2322 2324 *
2323 2325 * The update_mnttab arg indicates whether mops is part of a vfs that is on
2324 2326 * the vfs list.
2325 2327 */
2326 2328 static void
2327 2329 vfs_setmntopt_nolock(mntopts_t *mops, const char *opt,
2328 2330 const char *arg, int flags, int update_mnttab)
2329 2331 {
2330 2332 mntopt_t *mop;
2331 2333 uint_t i, count;
2332 2334 char *sp;
2333 2335
2334 2336 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist));
2335 2337
2336 2338 if (flags & VFS_CREATEOPT) {
2337 2339 if (vfs_hasopt(mops, opt) != NULL) {
2338 2340 flags &= ~VFS_CREATEOPT;
2339 2341 }
2340 2342 }
2341 2343 count = mops->mo_count;
2342 2344 for (i = 0; i < count; i++) {
2343 2345 mop = &mops->mo_list[i];
2344 2346
2345 2347 if (mop->mo_flags & MO_EMPTY) {
2346 2348 if ((flags & VFS_CREATEOPT) == 0)
2347 2349 continue;
2348 2350 sp = kmem_alloc(strlen(opt) + 1, KM_SLEEP);
2349 2351 (void) strcpy(sp, opt);
2350 2352 mop->mo_name = sp;
2351 2353 if (arg != NULL)
2352 2354 mop->mo_flags = MO_HASVALUE;
2353 2355 else
2354 2356 mop->mo_flags = 0;
2355 2357 } else if (strcmp(opt, mop->mo_name)) {
2356 2358 continue;
2357 2359 }
2358 2360 if ((mop->mo_flags & MO_IGNORE) && (flags & VFS_NOFORCEOPT))
2359 2361 break;
2360 2362 if (arg != NULL && (mop->mo_flags & MO_HASVALUE) != 0) {
2361 2363 sp = kmem_alloc(strlen(arg) + 1, KM_SLEEP);
2362 2364 (void) strcpy(sp, arg);
2363 2365 } else {
2364 2366 sp = NULL;
2365 2367 }
2366 2368 if (mop->mo_arg != NULL)
2367 2369 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2368 2370 mop->mo_arg = sp;
2369 2371 if (flags & VFS_DISPLAY)
2370 2372 mop->mo_flags &= ~MO_NODISPLAY;
2371 2373 if (flags & VFS_NODISPLAY)
2372 2374 mop->mo_flags |= MO_NODISPLAY;
2373 2375 mop->mo_flags |= MO_SET;
2374 2376 if (mop->mo_cancel != NULL) {
2375 2377 char **cp;
2376 2378
2377 2379 for (cp = mop->mo_cancel; *cp != NULL; cp++)
2378 2380 vfs_clearmntopt_nolock(mops, *cp, 0);
2379 2381 }
2380 2382 if (update_mnttab)
2381 2383 vfs_mnttab_modtimeupd();
2382 2384 break;
2383 2385 }
2384 2386 }
2385 2387
2386 2388 void
2387 2389 vfs_setmntopt(struct vfs *vfsp, const char *opt, const char *arg, int flags)
2388 2390 {
2389 2391 int gotlock = 0;
2390 2392
2391 2393 if (VFS_ON_LIST(vfsp)) {
2392 2394 gotlock = 1;
2393 2395 vfs_list_lock();
2394 2396 }
2395 2397 vfs_setmntopt_nolock(&vfsp->vfs_mntopts, opt, arg, flags, gotlock);
2396 2398 if (gotlock)
2397 2399 vfs_list_unlock();
2398 2400 }
2399 2401
2400 2402
2401 2403 /*
2402 2404 * Add a "tag" option to a mounted file system's options list.
2403 2405 *
2404 2406 * Note: caller is responsible for locking the vfs list, if needed,
2405 2407 * to protect mops.
2406 2408 */
2407 2409 static mntopt_t *
2408 2410 vfs_addtag(mntopts_t *mops, const char *tag)
2409 2411 {
2410 2412 uint_t count;
2411 2413 mntopt_t *mop, *motbl;
2412 2414
2413 2415 count = mops->mo_count + 1;
2414 2416 motbl = kmem_zalloc(count * sizeof (mntopt_t), KM_SLEEP);
2415 2417 if (mops->mo_count) {
2416 2418 size_t len = (count - 1) * sizeof (mntopt_t);
2417 2419
2418 2420 bcopy(mops->mo_list, motbl, len);
2419 2421 kmem_free(mops->mo_list, len);
2420 2422 }
2421 2423 mops->mo_count = count;
2422 2424 mops->mo_list = motbl;
2423 2425 mop = &motbl[count - 1];
2424 2426 mop->mo_flags = MO_TAG;
2425 2427 mop->mo_name = kmem_alloc(strlen(tag) + 1, KM_SLEEP);
2426 2428 (void) strcpy(mop->mo_name, tag);
2427 2429 return (mop);
2428 2430 }
2429 2431
2430 2432 /*
2431 2433 * Allow users to set arbitrary "tags" in a vfs's mount options.
2432 2434 * Broader use within the kernel is discouraged.
2433 2435 */
2434 2436 int
2435 2437 vfs_settag(uint_t major, uint_t minor, const char *mntpt, const char *tag,
2436 2438 cred_t *cr)
2437 2439 {
2438 2440 vfs_t *vfsp;
2439 2441 mntopts_t *mops;
2440 2442 mntopt_t *mop;
2441 2443 int found = 0;
2442 2444 dev_t dev = makedevice(major, minor);
2443 2445 int err = 0;
2444 2446 char *buf = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP);
2445 2447
2446 2448 /*
2447 2449 * Find the desired mounted file system
2448 2450 */
2449 2451 vfs_list_lock();
2450 2452 vfsp = rootvfs;
2451 2453 do {
2452 2454 if (vfsp->vfs_dev == dev &&
2453 2455 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) {
2454 2456 found = 1;
2455 2457 break;
2456 2458 }
2457 2459 vfsp = vfsp->vfs_next;
2458 2460 } while (vfsp != rootvfs);
2459 2461
2460 2462 if (!found) {
2461 2463 err = EINVAL;
2462 2464 goto out;
2463 2465 }
2464 2466 err = secpolicy_fs_config(cr, vfsp);
2465 2467 if (err != 0)
2466 2468 goto out;
2467 2469
2468 2470 mops = &vfsp->vfs_mntopts;
2469 2471 /*
2470 2472 * Add tag if it doesn't already exist
2471 2473 */
2472 2474 if ((mop = vfs_hasopt(mops, tag)) == NULL) {
2473 2475 int len;
2474 2476
2475 2477 (void) vfs_buildoptionstr(mops, buf, MAX_MNTOPT_STR);
2476 2478 len = strlen(buf);
2477 2479 if (len + strlen(tag) + 2 > MAX_MNTOPT_STR) {
2478 2480 err = ENAMETOOLONG;
2479 2481 goto out;
2480 2482 }
2481 2483 mop = vfs_addtag(mops, tag);
2482 2484 }
2483 2485 if ((mop->mo_flags & MO_TAG) == 0) {
2484 2486 err = EINVAL;
2485 2487 goto out;
2486 2488 }
2487 2489 vfs_setmntopt_nolock(mops, tag, NULL, 0, 1);
2488 2490 out:
2489 2491 vfs_list_unlock();
2490 2492 kmem_free(buf, MAX_MNTOPT_STR);
2491 2493 return (err);
2492 2494 }
2493 2495
2494 2496 /*
2495 2497 * Allow users to remove arbitrary "tags" in a vfs's mount options.
2496 2498 * Broader use within the kernel is discouraged.
2497 2499 */
2498 2500 int
2499 2501 vfs_clrtag(uint_t major, uint_t minor, const char *mntpt, const char *tag,
2500 2502 cred_t *cr)
2501 2503 {
2502 2504 vfs_t *vfsp;
2503 2505 mntopt_t *mop;
2504 2506 int found = 0;
2505 2507 dev_t dev = makedevice(major, minor);
2506 2508 int err = 0;
2507 2509
2508 2510 /*
2509 2511 * Find the desired mounted file system
2510 2512 */
2511 2513 vfs_list_lock();
2512 2514 vfsp = rootvfs;
2513 2515 do {
2514 2516 if (vfsp->vfs_dev == dev &&
2515 2517 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) {
2516 2518 found = 1;
2517 2519 break;
2518 2520 }
2519 2521 vfsp = vfsp->vfs_next;
2520 2522 } while (vfsp != rootvfs);
2521 2523
2522 2524 if (!found) {
2523 2525 err = EINVAL;
2524 2526 goto out;
2525 2527 }
2526 2528 err = secpolicy_fs_config(cr, vfsp);
2527 2529 if (err != 0)
2528 2530 goto out;
2529 2531
2530 2532 if ((mop = vfs_hasopt(&vfsp->vfs_mntopts, tag)) == NULL) {
2531 2533 err = EINVAL;
2532 2534 goto out;
2533 2535 }
2534 2536 if ((mop->mo_flags & MO_TAG) == 0) {
2535 2537 err = EINVAL;
2536 2538 goto out;
2537 2539 }
2538 2540 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, tag, 1);
2539 2541 out:
2540 2542 vfs_list_unlock();
2541 2543 return (err);
2542 2544 }
2543 2545
2544 2546 /*
2545 2547 * Function to parse an option string and fill in a mount options table.
2546 2548 * Unknown options are silently ignored. The input option string is modified
2547 2549 * by replacing separators with nulls. If the create flag is set, options
2548 2550 * not found in the table are just added on the fly. The table must have
2549 2551 * an option slot marked MO_EMPTY to add an option on the fly.
2550 2552 *
2551 2553 * This function is *not* for general use by filesystems.
2552 2554 *
2553 2555 * Note: caller is responsible for locking the vfs list, if needed,
2554 2556 * to protect mops..
2555 2557 */
2556 2558 void
2557 2559 vfs_parsemntopts(mntopts_t *mops, char *osp, int create)
2558 2560 {
2559 2561 char *s = osp, *p, *nextop, *valp, *cp, *ep;
2560 2562 int setflg = VFS_NOFORCEOPT;
2561 2563
2562 2564 if (osp == NULL)
2563 2565 return;
2564 2566 while (*s != '\0') {
2565 2567 p = strchr(s, ','); /* find next option */
2566 2568 if (p == NULL) {
2567 2569 cp = NULL;
2568 2570 p = s + strlen(s);
2569 2571 } else {
2570 2572 cp = p; /* save location of comma */
2571 2573 *p++ = '\0'; /* mark end and point to next option */
2572 2574 }
2573 2575 nextop = p;
2574 2576 p = strchr(s, '='); /* look for value */
2575 2577 if (p == NULL) {
2576 2578 valp = NULL; /* no value supplied */
2577 2579 } else {
2578 2580 ep = p; /* save location of equals */
2579 2581 *p++ = '\0'; /* end option and point to value */
2580 2582 valp = p;
2581 2583 }
2582 2584 /*
2583 2585 * set option into options table
2584 2586 */
2585 2587 if (create)
2586 2588 setflg |= VFS_CREATEOPT;
2587 2589 vfs_setmntopt_nolock(mops, s, valp, setflg, 0);
2588 2590 if (cp != NULL)
2589 2591 *cp = ','; /* restore the comma */
2590 2592 if (valp != NULL)
2591 2593 *ep = '='; /* restore the equals */
2592 2594 s = nextop;
2593 2595 }
2594 2596 }
2595 2597
2596 2598 /*
2597 2599 * Function to inquire if an option exists in a mount options table.
2598 2600 * Returns a pointer to the option if it exists, else NULL.
2599 2601 *
2600 2602 * This function is *not* for general use by filesystems.
2601 2603 *
2602 2604 * Note: caller is responsible for locking the vfs list, if needed,
2603 2605 * to protect mops.
2604 2606 */
2605 2607 struct mntopt *
2606 2608 vfs_hasopt(const mntopts_t *mops, const char *opt)
2607 2609 {
2608 2610 struct mntopt *mop;
2609 2611 uint_t i, count;
2610 2612
2611 2613 count = mops->mo_count;
2612 2614 for (i = 0; i < count; i++) {
2613 2615 mop = &mops->mo_list[i];
2614 2616
2615 2617 if (mop->mo_flags & MO_EMPTY)
2616 2618 continue;
2617 2619 if (strcmp(opt, mop->mo_name) == 0)
2618 2620 return (mop);
2619 2621 }
2620 2622 return (NULL);
2621 2623 }
2622 2624
2623 2625 /*
2624 2626 * Function to inquire if an option is set in a mount options table.
2625 2627 * Returns non-zero if set and fills in the arg pointer with a pointer to
2626 2628 * the argument string or NULL if there is no argument string.
2627 2629 */
2628 2630 static int
2629 2631 vfs_optionisset_nolock(const mntopts_t *mops, const char *opt, char **argp)
2630 2632 {
2631 2633 struct mntopt *mop;
2632 2634 uint_t i, count;
2633 2635
2634 2636 count = mops->mo_count;
2635 2637 for (i = 0; i < count; i++) {
2636 2638 mop = &mops->mo_list[i];
2637 2639
2638 2640 if (mop->mo_flags & MO_EMPTY)
2639 2641 continue;
2640 2642 if (strcmp(opt, mop->mo_name))
2641 2643 continue;
2642 2644 if ((mop->mo_flags & MO_SET) == 0)
2643 2645 return (0);
2644 2646 if (argp != NULL && (mop->mo_flags & MO_HASVALUE) != 0)
2645 2647 *argp = mop->mo_arg;
2646 2648 return (1);
2647 2649 }
2648 2650 return (0);
2649 2651 }
2650 2652
2651 2653
2652 2654 int
2653 2655 vfs_optionisset(const struct vfs *vfsp, const char *opt, char **argp)
2654 2656 {
2655 2657 int ret;
2656 2658
2657 2659 vfs_list_read_lock();
2658 2660 ret = vfs_optionisset_nolock(&vfsp->vfs_mntopts, opt, argp);
2659 2661 vfs_list_unlock();
2660 2662 return (ret);
2661 2663 }
2662 2664
2663 2665
2664 2666 /*
2665 2667 * Construct a comma separated string of the options set in the given
2666 2668 * mount table, return the string in the given buffer. Return non-zero if
2667 2669 * the buffer would overflow.
2668 2670 *
2669 2671 * This function is *not* for general use by filesystems.
2670 2672 *
2671 2673 * Note: caller is responsible for locking the vfs list, if needed,
2672 2674 * to protect mp.
2673 2675 */
2674 2676 int
2675 2677 vfs_buildoptionstr(const mntopts_t *mp, char *buf, int len)
2676 2678 {
2677 2679 char *cp;
2678 2680 uint_t i;
2679 2681
2680 2682 buf[0] = '\0';
2681 2683 cp = buf;
2682 2684 for (i = 0; i < mp->mo_count; i++) {
2683 2685 struct mntopt *mop;
2684 2686
2685 2687 mop = &mp->mo_list[i];
2686 2688 if (mop->mo_flags & MO_SET) {
2687 2689 int optlen, comma = 0;
2688 2690
2689 2691 if (buf[0] != '\0')
2690 2692 comma = 1;
2691 2693 optlen = strlen(mop->mo_name);
2692 2694 if (strlen(buf) + comma + optlen + 1 > len)
2693 2695 goto err;
2694 2696 if (comma)
2695 2697 *cp++ = ',';
2696 2698 (void) strcpy(cp, mop->mo_name);
2697 2699 cp += optlen;
2698 2700 /*
2699 2701 * Append option value if there is one
2700 2702 */
2701 2703 if (mop->mo_arg != NULL) {
2702 2704 int arglen;
2703 2705
2704 2706 arglen = strlen(mop->mo_arg);
2705 2707 if (strlen(buf) + arglen + 2 > len)
2706 2708 goto err;
2707 2709 *cp++ = '=';
2708 2710 (void) strcpy(cp, mop->mo_arg);
2709 2711 cp += arglen;
2710 2712 }
2711 2713 }
2712 2714 }
2713 2715 return (0);
2714 2716 err:
2715 2717 return (EOVERFLOW);
2716 2718 }
2717 2719
2718 2720 static void
2719 2721 vfs_freecancelopt(char **moc)
2720 2722 {
2721 2723 if (moc != NULL) {
2722 2724 int ccnt = 0;
2723 2725 char **cp;
2724 2726
2725 2727 for (cp = moc; *cp != NULL; cp++) {
2726 2728 kmem_free(*cp, strlen(*cp) + 1);
2727 2729 ccnt++;
2728 2730 }
2729 2731 kmem_free(moc, (ccnt + 1) * sizeof (char *));
2730 2732 }
2731 2733 }
2732 2734
2733 2735 static void
2734 2736 vfs_freeopt(mntopt_t *mop)
2735 2737 {
2736 2738 if (mop->mo_name != NULL)
2737 2739 kmem_free(mop->mo_name, strlen(mop->mo_name) + 1);
2738 2740
2739 2741 vfs_freecancelopt(mop->mo_cancel);
2740 2742
2741 2743 if (mop->mo_arg != NULL)
2742 2744 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1);
2743 2745 }
2744 2746
2745 2747 /*
2746 2748 * Free a mount options table
2747 2749 *
2748 2750 * This function is *not* for general use by filesystems.
2749 2751 *
2750 2752 * Note: caller is responsible for locking the vfs list, if needed,
2751 2753 * to protect mp.
2752 2754 */
2753 2755 void
2754 2756 vfs_freeopttbl(mntopts_t *mp)
2755 2757 {
2756 2758 uint_t i, count;
2757 2759
2758 2760 count = mp->mo_count;
2759 2761 for (i = 0; i < count; i++) {
2760 2762 vfs_freeopt(&mp->mo_list[i]);
2761 2763 }
2762 2764 if (count) {
2763 2765 kmem_free(mp->mo_list, sizeof (mntopt_t) * count);
2764 2766 mp->mo_count = 0;
2765 2767 mp->mo_list = NULL;
2766 2768 }
2767 2769 }
2768 2770
2769 2771
2770 2772 /* ARGSUSED */
2771 2773 static int
2772 2774 vfs_mntdummyread(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred,
2773 2775 caller_context_t *ct)
2774 2776 {
2775 2777 return (0);
2776 2778 }
2777 2779
2778 2780 /* ARGSUSED */
2779 2781 static int
2780 2782 vfs_mntdummywrite(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred,
2781 2783 caller_context_t *ct)
2782 2784 {
2783 2785 return (0);
2784 2786 }
2785 2787
2786 2788 /*
2787 2789 * The dummy vnode is currently used only by file events notification
2788 2790 * module which is just interested in the timestamps.
2789 2791 */
2790 2792 /* ARGSUSED */
2791 2793 static int
2792 2794 vfs_mntdummygetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2793 2795 caller_context_t *ct)
2794 2796 {
2795 2797 bzero(vap, sizeof (vattr_t));
2796 2798 vap->va_type = VREG;
2797 2799 vap->va_nlink = 1;
2798 2800 vap->va_ctime = vfs_mnttab_ctime;
2799 2801 /*
2800 2802 * it is ok to just copy mtime as the time will be monotonically
2801 2803 * increasing.
2802 2804 */
2803 2805 vap->va_mtime = vfs_mnttab_mtime;
2804 2806 vap->va_atime = vap->va_mtime;
2805 2807 return (0);
2806 2808 }
2807 2809
2808 2810 static void
2809 2811 vfs_mnttabvp_setup(void)
2810 2812 {
2811 2813 vnode_t *tvp;
2812 2814 vnodeops_t *vfs_mntdummyvnops;
2813 2815 const fs_operation_def_t mnt_dummyvnodeops_template[] = {
2814 2816 VOPNAME_READ, { .vop_read = vfs_mntdummyread },
2815 2817 VOPNAME_WRITE, { .vop_write = vfs_mntdummywrite },
2816 2818 VOPNAME_GETATTR, { .vop_getattr = vfs_mntdummygetattr },
2817 2819 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
2818 2820 NULL, NULL
2819 2821 };
2820 2822
2821 2823 if (vn_make_ops("mnttab", mnt_dummyvnodeops_template,
2822 2824 &vfs_mntdummyvnops) != 0) {
2823 2825 cmn_err(CE_WARN, "vfs_mnttabvp_setup: vn_make_ops failed");
2824 2826 /* Shouldn't happen, but not bad enough to panic */
2825 2827 return;
2826 2828 }
2827 2829
2828 2830 /*
2829 2831 * A global dummy vnode is allocated to represent mntfs files.
2830 2832 * The mntfs file (/etc/mnttab) can be monitored for file events
2831 2833 * and receive an event when mnttab changes. Dummy VOP calls
2832 2834 * will be made on this vnode. The file events notification module
2833 2835 * intercepts this vnode and delivers relevant events.
2834 2836 */
2835 2837 tvp = vn_alloc(KM_SLEEP);
2836 2838 tvp->v_flag = VNOMOUNT|VNOMAP|VNOSWAP|VNOCACHE;
2837 2839 vn_setops(tvp, vfs_mntdummyvnops);
2838 2840 tvp->v_type = VREG;
2839 2841 /*
2840 2842 * The mnt dummy ops do not reference v_data.
2841 2843 * No other module intercepting this vnode should either.
2842 2844 * Just set it to point to itself.
2843 2845 */
2844 2846 tvp->v_data = (caddr_t)tvp;
2845 2847 tvp->v_vfsp = rootvfs;
2846 2848 vfs_mntdummyvp = tvp;
2847 2849 }
2848 2850
2849 2851 /*
2850 2852 * performs fake read/write ops
2851 2853 */
2852 2854 static void
2853 2855 vfs_mnttab_rwop(int rw)
2854 2856 {
2855 2857 struct uio uio;
2856 2858 struct iovec iov;
2857 2859 char buf[1];
2858 2860
2859 2861 if (vfs_mntdummyvp == NULL)
2860 2862 return;
2861 2863
2862 2864 bzero(&uio, sizeof (uio));
2863 2865 bzero(&iov, sizeof (iov));
2864 2866 iov.iov_base = buf;
2865 2867 iov.iov_len = 0;
2866 2868 uio.uio_iov = &iov;
2867 2869 uio.uio_iovcnt = 1;
2868 2870 uio.uio_loffset = 0;
2869 2871 uio.uio_segflg = UIO_SYSSPACE;
2870 2872 uio.uio_resid = 0;
2871 2873 if (rw) {
2872 2874 (void) VOP_WRITE(vfs_mntdummyvp, &uio, 0, kcred, NULL);
2873 2875 } else {
2874 2876 (void) VOP_READ(vfs_mntdummyvp, &uio, 0, kcred, NULL);
2875 2877 }
2876 2878 }
2877 2879
2878 2880 /*
2879 2881 * Generate a write operation.
2880 2882 */
2881 2883 void
2882 2884 vfs_mnttab_writeop(void)
2883 2885 {
2884 2886 vfs_mnttab_rwop(1);
2885 2887 }
2886 2888
2887 2889 /*
2888 2890 * Generate a read operation.
2889 2891 */
2890 2892 void
2891 2893 vfs_mnttab_readop(void)
2892 2894 {
2893 2895 vfs_mnttab_rwop(0);
2894 2896 }
2895 2897
2896 2898 /*
2897 2899 * Free any mnttab information recorded in the vfs struct.
2898 2900 * The vfs must not be on the vfs list.
2899 2901 */
2900 2902 static void
2901 2903 vfs_freemnttab(struct vfs *vfsp)
2902 2904 {
2903 2905 ASSERT(!VFS_ON_LIST(vfsp));
2904 2906
2905 2907 /*
2906 2908 * Free device and mount point information
2907 2909 */
2908 2910 if (vfsp->vfs_mntpt != NULL) {
2909 2911 refstr_rele(vfsp->vfs_mntpt);
2910 2912 vfsp->vfs_mntpt = NULL;
2911 2913 }
2912 2914 if (vfsp->vfs_resource != NULL) {
2913 2915 refstr_rele(vfsp->vfs_resource);
2914 2916 vfsp->vfs_resource = NULL;
2915 2917 }
2916 2918 /*
2917 2919 * Now free mount options information
2918 2920 */
2919 2921 vfs_freeopttbl(&vfsp->vfs_mntopts);
2920 2922 }
2921 2923
2922 2924 /*
2923 2925 * Return the last mnttab modification time
2924 2926 */
2925 2927 void
2926 2928 vfs_mnttab_modtime(timespec_t *ts)
2927 2929 {
2928 2930 ASSERT(RW_LOCK_HELD(&vfslist));
2929 2931 *ts = vfs_mnttab_mtime;
2930 2932 }
2931 2933
2932 2934 /*
2933 2935 * See if mnttab is changed
2934 2936 */
2935 2937 void
2936 2938 vfs_mnttab_poll(timespec_t *old, struct pollhead **phpp)
2937 2939 {
2938 2940 int changed;
2939 2941
2940 2942 *phpp = (struct pollhead *)NULL;
2941 2943
2942 2944 /*
2943 2945 * Note: don't grab vfs list lock before accessing vfs_mnttab_mtime.
2944 2946 * Can lead to deadlock against vfs_mnttab_modtimeupd(). It is safe
2945 2947 * to not grab the vfs list lock because tv_sec is monotonically
2946 2948 * increasing.
2947 2949 */
2948 2950
2949 2951 changed = (old->tv_nsec != vfs_mnttab_mtime.tv_nsec) ||
2950 2952 (old->tv_sec != vfs_mnttab_mtime.tv_sec);
2951 2953 if (!changed) {
2952 2954 *phpp = &vfs_pollhd;
2953 2955 }
2954 2956 }
2955 2957
2956 2958 /* Provide a unique and monotonically-increasing timestamp. */
2957 2959 void
2958 2960 vfs_mono_time(timespec_t *ts)
2959 2961 {
2960 2962 static volatile hrtime_t hrt; /* The saved time. */
2961 2963 hrtime_t newhrt, oldhrt; /* For effecting the CAS. */
2962 2964 timespec_t newts;
2963 2965
2964 2966 /*
2965 2967 * Try gethrestime() first, but be prepared to fabricate a sensible
2966 2968 * answer at the first sign of any trouble.
2967 2969 */
2968 2970 gethrestime(&newts);
2969 2971 newhrt = ts2hrt(&newts);
2970 2972 for (;;) {
2971 2973 oldhrt = hrt;
2972 2974 if (newhrt <= hrt)
2973 2975 newhrt = hrt + 1;
2974 2976 if (atomic_cas_64((uint64_t *)&hrt, oldhrt, newhrt) == oldhrt)
2975 2977 break;
2976 2978 }
2977 2979 hrt2ts(newhrt, ts);
2978 2980 }
2979 2981
2980 2982 /*
2981 2983 * Update the mnttab modification time and wake up any waiters for
2982 2984 * mnttab changes
2983 2985 */
2984 2986 void
2985 2987 vfs_mnttab_modtimeupd()
2986 2988 {
2987 2989 hrtime_t oldhrt, newhrt;
2988 2990
2989 2991 ASSERT(RW_WRITE_HELD(&vfslist));
2990 2992 oldhrt = ts2hrt(&vfs_mnttab_mtime);
2991 2993 gethrestime(&vfs_mnttab_mtime);
2992 2994 newhrt = ts2hrt(&vfs_mnttab_mtime);
2993 2995 if (oldhrt == (hrtime_t)0)
2994 2996 vfs_mnttab_ctime = vfs_mnttab_mtime;
2995 2997 /*
2996 2998 * Attempt to provide unique mtime (like uniqtime but not).
2997 2999 */
2998 3000 if (newhrt == oldhrt) {
2999 3001 newhrt++;
3000 3002 hrt2ts(newhrt, &vfs_mnttab_mtime);
3001 3003 }
3002 3004 pollwakeup(&vfs_pollhd, (short)POLLRDBAND);
3003 3005 vfs_mnttab_writeop();
3004 3006 }
3005 3007
3006 3008 int
3007 3009 dounmount(struct vfs *vfsp, int flag, cred_t *cr)
3008 3010 {
3009 3011 vnode_t *coveredvp;
3010 3012 int error;
3011 3013 extern void teardown_vopstats(vfs_t *);
3012 3014
3013 3015 /*
3014 3016 * Get covered vnode. This will be NULL if the vfs is not linked
3015 3017 * into the file system name space (i.e., domount() with MNT_NOSPICE).
3016 3018 */
3017 3019 coveredvp = vfsp->vfs_vnodecovered;
3018 3020 ASSERT(coveredvp == NULL || vn_vfswlock_held(coveredvp));
3019 3021
3020 3022 /*
3021 3023 * Purge all dnlc entries for this vfs.
3022 3024 */
3023 3025 (void) dnlc_purge_vfsp(vfsp, 0);
3024 3026
3025 3027 /* For forcible umount, skip VFS_SYNC() since it may hang */
3026 3028 if ((flag & MS_FORCE) == 0)
3027 3029 (void) VFS_SYNC(vfsp, 0, cr);
3028 3030
3029 3031 /*
3030 3032 * Lock the vfs to maintain fs status quo during unmount. This
3031 3033 * has to be done after the sync because ufs_update tries to acquire
3032 3034 * the vfs_reflock.
3033 3035 */
3034 3036 vfs_lock_wait(vfsp);
3035 3037
3036 3038 if (error = VFS_UNMOUNT(vfsp, flag, cr)) {
3037 3039 vfs_unlock(vfsp);
3038 3040 if (coveredvp != NULL)
3039 3041 vn_vfsunlock(coveredvp);
3040 3042 } else if (coveredvp != NULL) {
3041 3043 teardown_vopstats(vfsp);
3042 3044 /*
3043 3045 * vfs_remove() will do a VN_RELE(vfsp->vfs_vnodecovered)
3044 3046 * when it frees vfsp so we do a VN_HOLD() so we can
3045 3047 * continue to use coveredvp afterwards.
3046 3048 */
3047 3049 VN_HOLD(coveredvp);
3048 3050 vfs_remove(vfsp);
3049 3051 vn_vfsunlock(coveredvp);
3050 3052 VN_RELE(coveredvp);
3051 3053 } else {
3052 3054 teardown_vopstats(vfsp);
3053 3055 /*
3054 3056 * Release the reference to vfs that is not linked
3055 3057 * into the name space.
3056 3058 */
3057 3059 vfs_unlock(vfsp);
3058 3060 VFS_RELE(vfsp);
3059 3061 }
3060 3062 return (error);
3061 3063 }
3062 3064
3063 3065
3064 3066 /*
3065 3067 * Vfs_unmountall() is called by uadmin() to unmount all
3066 3068 * mounted file systems (except the root file system) during shutdown.
3067 3069 * It follows the existing locking protocol when traversing the vfs list
3068 3070 * to sync and unmount vfses. Even though there should be no
3069 3071 * other thread running while the system is shutting down, it is prudent
3070 3072 * to still follow the locking protocol.
3071 3073 */
3072 3074 void
3073 3075 vfs_unmountall(void)
3074 3076 {
3075 3077 struct vfs *vfsp;
3076 3078 struct vfs *prev_vfsp = NULL;
3077 3079 int error;
3078 3080
3079 3081 /*
3080 3082 * Toss all dnlc entries now so that the per-vfs sync
3081 3083 * and unmount operations don't have to slog through
3082 3084 * a bunch of uninteresting vnodes over and over again.
3083 3085 */
3084 3086 dnlc_purge();
3085 3087
3086 3088 vfs_list_lock();
3087 3089 for (vfsp = rootvfs->vfs_prev; vfsp != rootvfs; vfsp = prev_vfsp) {
3088 3090 prev_vfsp = vfsp->vfs_prev;
3089 3091
3090 3092 if (vfs_lock(vfsp) != 0)
3091 3093 continue;
3092 3094 error = vn_vfswlock(vfsp->vfs_vnodecovered);
3093 3095 vfs_unlock(vfsp);
3094 3096 if (error)
3095 3097 continue;
3096 3098
3097 3099 vfs_list_unlock();
3098 3100
3099 3101 (void) VFS_SYNC(vfsp, SYNC_CLOSE, CRED());
3100 3102 (void) dounmount(vfsp, 0, CRED());
3101 3103
3102 3104 /*
3103 3105 * Since we dropped the vfslist lock above we must
3104 3106 * verify that next_vfsp still exists, else start over.
3105 3107 */
3106 3108 vfs_list_lock();
3107 3109 for (vfsp = rootvfs->vfs_prev;
3108 3110 vfsp != rootvfs; vfsp = vfsp->vfs_prev)
3109 3111 if (vfsp == prev_vfsp)
3110 3112 break;
3111 3113 if (vfsp == rootvfs && prev_vfsp != rootvfs)
3112 3114 prev_vfsp = rootvfs->vfs_prev;
3113 3115 }
3114 3116 vfs_list_unlock();
3115 3117 }
3116 3118
3117 3119 /*
3118 3120 * Called to add an entry to the end of the vfs mount in progress list
3119 3121 */
3120 3122 void
3121 3123 vfs_addmip(dev_t dev, struct vfs *vfsp)
3122 3124 {
3123 3125 struct ipmnt *mipp;
3124 3126
3125 3127 mipp = (struct ipmnt *)kmem_alloc(sizeof (struct ipmnt), KM_SLEEP);
3126 3128 mipp->mip_next = NULL;
3127 3129 mipp->mip_dev = dev;
3128 3130 mipp->mip_vfsp = vfsp;
3129 3131 mutex_enter(&vfs_miplist_mutex);
3130 3132 if (vfs_miplist_end != NULL)
3131 3133 vfs_miplist_end->mip_next = mipp;
3132 3134 else
3133 3135 vfs_miplist = mipp;
3134 3136 vfs_miplist_end = mipp;
3135 3137 mutex_exit(&vfs_miplist_mutex);
3136 3138 }
3137 3139
3138 3140 /*
3139 3141 * Called to remove an entry from the mount in progress list
3140 3142 * Either because the mount completed or it failed.
3141 3143 */
3142 3144 void
3143 3145 vfs_delmip(struct vfs *vfsp)
3144 3146 {
3145 3147 struct ipmnt *mipp, *mipprev;
3146 3148
3147 3149 mutex_enter(&vfs_miplist_mutex);
3148 3150 mipprev = NULL;
3149 3151 for (mipp = vfs_miplist;
3150 3152 mipp && mipp->mip_vfsp != vfsp; mipp = mipp->mip_next) {
3151 3153 mipprev = mipp;
3152 3154 }
3153 3155 if (mipp == NULL)
3154 3156 return; /* shouldn't happen */
3155 3157 if (mipp == vfs_miplist_end)
3156 3158 vfs_miplist_end = mipprev;
3157 3159 if (mipprev == NULL)
3158 3160 vfs_miplist = mipp->mip_next;
3159 3161 else
3160 3162 mipprev->mip_next = mipp->mip_next;
3161 3163 mutex_exit(&vfs_miplist_mutex);
3162 3164 kmem_free(mipp, sizeof (struct ipmnt));
3163 3165 }
3164 3166
3165 3167 /*
3166 3168 * vfs_add is called by a specific filesystem's mount routine to add
3167 3169 * the new vfs into the vfs list/hash and to cover the mounted-on vnode.
3168 3170 * The vfs should already have been locked by the caller.
3169 3171 *
3170 3172 * coveredvp is NULL if this is the root.
3171 3173 */
3172 3174 void
3173 3175 vfs_add(vnode_t *coveredvp, struct vfs *vfsp, int mflag)
3174 3176 {
3175 3177 int newflag;
3176 3178
3177 3179 ASSERT(vfs_lock_held(vfsp));
3178 3180 VFS_HOLD(vfsp);
3179 3181 newflag = vfsp->vfs_flag;
3180 3182 if (mflag & MS_RDONLY)
3181 3183 newflag |= VFS_RDONLY;
3182 3184 else
3183 3185 newflag &= ~VFS_RDONLY;
3184 3186 if (mflag & MS_NOSUID)
3185 3187 newflag |= (VFS_NOSETUID|VFS_NODEVICES);
3186 3188 else
3187 3189 newflag &= ~(VFS_NOSETUID|VFS_NODEVICES);
3188 3190 if (mflag & MS_NOMNTTAB)
3189 3191 newflag |= VFS_NOMNTTAB;
3190 3192 else
3191 3193 newflag &= ~VFS_NOMNTTAB;
3192 3194
3193 3195 if (coveredvp != NULL) {
3194 3196 ASSERT(vn_vfswlock_held(coveredvp));
3195 3197 coveredvp->v_vfsmountedhere = vfsp;
3196 3198 VN_HOLD(coveredvp);
3197 3199 }
3198 3200 vfsp->vfs_vnodecovered = coveredvp;
3199 3201 vfsp->vfs_flag = newflag;
3200 3202
3201 3203 vfs_list_add(vfsp);
3202 3204 }
3203 3205
3204 3206 /*
3205 3207 * Remove a vfs from the vfs list, null out the pointer from the
3206 3208 * covered vnode to the vfs (v_vfsmountedhere), and null out the pointer
3207 3209 * from the vfs to the covered vnode (vfs_vnodecovered). Release the
3208 3210 * reference to the vfs and to the covered vnode.
3209 3211 *
3210 3212 * Called from dounmount after it's confirmed with the file system
3211 3213 * that the unmount is legal.
3212 3214 */
3213 3215 void
3214 3216 vfs_remove(struct vfs *vfsp)
3215 3217 {
3216 3218 vnode_t *vp;
3217 3219
3218 3220 ASSERT(vfs_lock_held(vfsp));
3219 3221
3220 3222 /*
3221 3223 * Can't unmount root. Should never happen because fs will
3222 3224 * be busy.
3223 3225 */
3224 3226 if (vfsp == rootvfs)
3225 3227 panic("vfs_remove: unmounting root");
3226 3228
3227 3229 vfs_list_remove(vfsp);
3228 3230
3229 3231 /*
3230 3232 * Unhook from the file system name space.
3231 3233 */
3232 3234 vp = vfsp->vfs_vnodecovered;
3233 3235 ASSERT(vn_vfswlock_held(vp));
3234 3236 vp->v_vfsmountedhere = NULL;
3235 3237 vfsp->vfs_vnodecovered = NULL;
3236 3238 VN_RELE(vp);
3237 3239
3238 3240 /*
3239 3241 * Release lock and wakeup anybody waiting.
3240 3242 */
3241 3243 vfs_unlock(vfsp);
3242 3244 VFS_RELE(vfsp);
3243 3245 }
3244 3246
3245 3247 /*
3246 3248 * Lock a filesystem to prevent access to it while mounting,
3247 3249 * unmounting and syncing. Return EBUSY immediately if lock
3248 3250 * can't be acquired.
3249 3251 */
3250 3252 int
3251 3253 vfs_lock(vfs_t *vfsp)
3252 3254 {
3253 3255 vn_vfslocks_entry_t *vpvfsentry;
3254 3256
3255 3257 vpvfsentry = vn_vfslocks_getlock(vfsp);
3256 3258 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_WRITER))
3257 3259 return (0);
3258 3260
3259 3261 vn_vfslocks_rele(vpvfsentry);
3260 3262 return (EBUSY);
3261 3263 }
3262 3264
3263 3265 int
3264 3266 vfs_rlock(vfs_t *vfsp)
3265 3267 {
3266 3268 vn_vfslocks_entry_t *vpvfsentry;
3267 3269
3268 3270 vpvfsentry = vn_vfslocks_getlock(vfsp);
3269 3271
3270 3272 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_READER))
3271 3273 return (0);
3272 3274
3273 3275 vn_vfslocks_rele(vpvfsentry);
3274 3276 return (EBUSY);
3275 3277 }
3276 3278
3277 3279 void
3278 3280 vfs_lock_wait(vfs_t *vfsp)
3279 3281 {
3280 3282 vn_vfslocks_entry_t *vpvfsentry;
3281 3283
3282 3284 vpvfsentry = vn_vfslocks_getlock(vfsp);
3283 3285 rwst_enter(&vpvfsentry->ve_lock, RW_WRITER);
3284 3286 }
3285 3287
3286 3288 void
3287 3289 vfs_rlock_wait(vfs_t *vfsp)
3288 3290 {
3289 3291 vn_vfslocks_entry_t *vpvfsentry;
3290 3292
3291 3293 vpvfsentry = vn_vfslocks_getlock(vfsp);
3292 3294 rwst_enter(&vpvfsentry->ve_lock, RW_READER);
3293 3295 }
3294 3296
3295 3297 /*
3296 3298 * Unlock a locked filesystem.
3297 3299 */
3298 3300 void
3299 3301 vfs_unlock(vfs_t *vfsp)
3300 3302 {
3301 3303 vn_vfslocks_entry_t *vpvfsentry;
3302 3304
3303 3305 /*
3304 3306 * vfs_unlock will mimic sema_v behaviour to fix 4748018.
3305 3307 * And these changes should remain for the patch changes as it is.
3306 3308 */
3307 3309 if (panicstr)
3308 3310 return;
3309 3311
3310 3312 /*
3311 3313 * ve_refcount needs to be dropped twice here.
3312 3314 * 1. To release refernce after a call to vfs_locks_getlock()
3313 3315 * 2. To release the reference from the locking routines like
3314 3316 * vfs_rlock_wait/vfs_wlock_wait/vfs_wlock etc,.
3315 3317 */
3316 3318
3317 3319 vpvfsentry = vn_vfslocks_getlock(vfsp);
3318 3320 vn_vfslocks_rele(vpvfsentry);
3319 3321
3320 3322 rwst_exit(&vpvfsentry->ve_lock);
3321 3323 vn_vfslocks_rele(vpvfsentry);
3322 3324 }
3323 3325
3324 3326 /*
3325 3327 * Utility routine that allows a filesystem to construct its
3326 3328 * fsid in "the usual way" - by munging some underlying dev_t and
3327 3329 * the filesystem type number into the 64-bit fsid. Note that
3328 3330 * this implicitly relies on dev_t persistence to make filesystem
3329 3331 * id's persistent.
3330 3332 *
3331 3333 * There's nothing to prevent an individual fs from constructing its
3332 3334 * fsid in a different way, and indeed they should.
3333 3335 *
3334 3336 * Since we want fsids to be 32-bit quantities (so that they can be
3335 3337 * exported identically by either 32-bit or 64-bit APIs, as well as
3336 3338 * the fact that fsid's are "known" to NFS), we compress the device
3337 3339 * number given down to 32-bits, and panic if that isn't possible.
3338 3340 */
3339 3341 void
3340 3342 vfs_make_fsid(fsid_t *fsi, dev_t dev, int val)
3341 3343 {
3342 3344 if (!cmpldev((dev32_t *)&fsi->val[0], dev))
3343 3345 panic("device number too big for fsid!");
3344 3346 fsi->val[1] = val;
3345 3347 }
3346 3348
3347 3349 int
3348 3350 vfs_lock_held(vfs_t *vfsp)
3349 3351 {
3350 3352 int held;
3351 3353 vn_vfslocks_entry_t *vpvfsentry;
3352 3354
3353 3355 /*
3354 3356 * vfs_lock_held will mimic sema_held behaviour
3355 3357 * if panicstr is set. And these changes should remain
3356 3358 * for the patch changes as it is.
3357 3359 */
3358 3360 if (panicstr)
3359 3361 return (1);
3360 3362
3361 3363 vpvfsentry = vn_vfslocks_getlock(vfsp);
3362 3364 held = rwst_lock_held(&vpvfsentry->ve_lock, RW_WRITER);
3363 3365
3364 3366 vn_vfslocks_rele(vpvfsentry);
3365 3367 return (held);
3366 3368 }
3367 3369
3368 3370 struct _kthread *
3369 3371 vfs_lock_owner(vfs_t *vfsp)
3370 3372 {
3371 3373 struct _kthread *owner;
3372 3374 vn_vfslocks_entry_t *vpvfsentry;
3373 3375
3374 3376 /*
3375 3377 * vfs_wlock_held will mimic sema_held behaviour
3376 3378 * if panicstr is set. And these changes should remain
3377 3379 * for the patch changes as it is.
3378 3380 */
3379 3381 if (panicstr)
3380 3382 return (NULL);
3381 3383
3382 3384 vpvfsentry = vn_vfslocks_getlock(vfsp);
3383 3385 owner = rwst_owner(&vpvfsentry->ve_lock);
3384 3386
3385 3387 vn_vfslocks_rele(vpvfsentry);
3386 3388 return (owner);
3387 3389 }
3388 3390
3389 3391 /*
3390 3392 * vfs list locking.
3391 3393 *
3392 3394 * Rather than manipulate the vfslist lock directly, we abstract into lock
3393 3395 * and unlock routines to allow the locking implementation to be changed for
3394 3396 * clustering.
3395 3397 *
3396 3398 * Whenever the vfs list is modified through its hash links, the overall list
3397 3399 * lock must be obtained before locking the relevant hash bucket. But to see
3398 3400 * whether a given vfs is on the list, it suffices to obtain the lock for the
3399 3401 * hash bucket without getting the overall list lock. (See getvfs() below.)
3400 3402 */
3401 3403
3402 3404 void
3403 3405 vfs_list_lock()
3404 3406 {
3405 3407 rw_enter(&vfslist, RW_WRITER);
3406 3408 }
3407 3409
3408 3410 void
3409 3411 vfs_list_read_lock()
3410 3412 {
3411 3413 rw_enter(&vfslist, RW_READER);
3412 3414 }
3413 3415
3414 3416 void
3415 3417 vfs_list_unlock()
3416 3418 {
3417 3419 rw_exit(&vfslist);
3418 3420 }
3419 3421
3420 3422 /*
3421 3423 * Low level worker routines for adding entries to and removing entries from
3422 3424 * the vfs list.
3423 3425 */
3424 3426
3425 3427 static void
3426 3428 vfs_hash_add(struct vfs *vfsp, int insert_at_head)
3427 3429 {
3428 3430 int vhno;
3429 3431 struct vfs **hp;
3430 3432 dev_t dev;
3431 3433
3432 3434 ASSERT(RW_WRITE_HELD(&vfslist));
3433 3435
3434 3436 dev = expldev(vfsp->vfs_fsid.val[0]);
3435 3437 vhno = VFSHASH(getmajor(dev), getminor(dev));
3436 3438
3437 3439 mutex_enter(&rvfs_list[vhno].rvfs_lock);
3438 3440
3439 3441 /*
3440 3442 * Link into the hash table, inserting it at the end, so that LOFS
3441 3443 * with the same fsid as UFS (or other) file systems will not hide the
3442 3444 * UFS.
3443 3445 */
3444 3446 if (insert_at_head) {
3445 3447 vfsp->vfs_hash = rvfs_list[vhno].rvfs_head;
3446 3448 rvfs_list[vhno].rvfs_head = vfsp;
3447 3449 } else {
3448 3450 for (hp = &rvfs_list[vhno].rvfs_head; *hp != NULL;
3449 3451 hp = &(*hp)->vfs_hash)
3450 3452 continue;
3451 3453 /*
3452 3454 * hp now contains the address of the pointer to update
3453 3455 * to effect the insertion.
3454 3456 */
3455 3457 vfsp->vfs_hash = NULL;
3456 3458 *hp = vfsp;
3457 3459 }
3458 3460
3459 3461 rvfs_list[vhno].rvfs_len++;
3460 3462 mutex_exit(&rvfs_list[vhno].rvfs_lock);
3461 3463 }
3462 3464
3463 3465
3464 3466 static void
3465 3467 vfs_hash_remove(struct vfs *vfsp)
3466 3468 {
3467 3469 int vhno;
3468 3470 struct vfs *tvfsp;
3469 3471 dev_t dev;
3470 3472
3471 3473 ASSERT(RW_WRITE_HELD(&vfslist));
3472 3474
3473 3475 dev = expldev(vfsp->vfs_fsid.val[0]);
3474 3476 vhno = VFSHASH(getmajor(dev), getminor(dev));
3475 3477
3476 3478 mutex_enter(&rvfs_list[vhno].rvfs_lock);
3477 3479
3478 3480 /*
3479 3481 * Remove from hash.
3480 3482 */
3481 3483 if (rvfs_list[vhno].rvfs_head == vfsp) {
3482 3484 rvfs_list[vhno].rvfs_head = vfsp->vfs_hash;
3483 3485 rvfs_list[vhno].rvfs_len--;
3484 3486 goto foundit;
3485 3487 }
3486 3488 for (tvfsp = rvfs_list[vhno].rvfs_head; tvfsp != NULL;
3487 3489 tvfsp = tvfsp->vfs_hash) {
3488 3490 if (tvfsp->vfs_hash == vfsp) {
3489 3491 tvfsp->vfs_hash = vfsp->vfs_hash;
3490 3492 rvfs_list[vhno].rvfs_len--;
3491 3493 goto foundit;
3492 3494 }
3493 3495 }
3494 3496 cmn_err(CE_WARN, "vfs_list_remove: vfs not found in hash");
3495 3497
3496 3498 foundit:
3497 3499
3498 3500 mutex_exit(&rvfs_list[vhno].rvfs_lock);
3499 3501 }
3500 3502
3501 3503
3502 3504 void
3503 3505 vfs_list_add(struct vfs *vfsp)
3504 3506 {
3505 3507 zone_t *zone;
3506 3508
3507 3509 /*
3508 3510 * Typically, the vfs_t will have been created on behalf of the file
3509 3511 * system in vfs_init, where it will have been provided with a
3510 3512 * vfs_impl_t. This, however, might be lacking if the vfs_t was created
3511 3513 * by an unbundled file system. We therefore check for such an example
3512 3514 * before stamping the vfs_t with its creation time for the benefit of
3513 3515 * mntfs.
3514 3516 */
3515 3517 if (vfsp->vfs_implp == NULL)
3516 3518 vfsimpl_setup(vfsp);
3517 3519 vfs_mono_time(&vfsp->vfs_hrctime);
3518 3520
3519 3521 /*
3520 3522 * The zone that owns the mount is the one that performed the mount.
3521 3523 * Note that this isn't necessarily the same as the zone mounted into.
3522 3524 * The corresponding zone_rele_ref() will be done when the vfs_t
3523 3525 * is being free'd.
3524 3526 */
3525 3527 vfsp->vfs_zone = curproc->p_zone;
3526 3528 zone_init_ref(&vfsp->vfs_implp->vi_zone_ref);
3527 3529 zone_hold_ref(vfsp->vfs_zone, &vfsp->vfs_implp->vi_zone_ref,
3528 3530 ZONE_REF_VFS);
3529 3531
3530 3532 /*
3531 3533 * Find the zone mounted into, and put this mount on its vfs list.
3532 3534 */
3533 3535 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
3534 3536 ASSERT(zone != NULL);
3535 3537 /*
3536 3538 * Special casing for the root vfs. This structure is allocated
3537 3539 * statically and hooked onto rootvfs at link time. During the
3538 3540 * vfs_mountroot call at system startup time, the root file system's
3539 3541 * VFS_MOUNTROOT routine will call vfs_add with this root vfs struct
3540 3542 * as argument. The code below must detect and handle this special
3541 3543 * case. The only apparent justification for this special casing is
3542 3544 * to ensure that the root file system appears at the head of the
3543 3545 * list.
3544 3546 *
3545 3547 * XXX: I'm assuming that it's ok to do normal list locking when
3546 3548 * adding the entry for the root file system (this used to be
3547 3549 * done with no locks held).
3548 3550 */
3549 3551 vfs_list_lock();
3550 3552 /*
3551 3553 * Link into the vfs list proper.
3552 3554 */
3553 3555 if (vfsp == &root) {
3554 3556 /*
3555 3557 * Assert: This vfs is already on the list as its first entry.
3556 3558 * Thus, there's nothing to do.
3557 3559 */
3558 3560 ASSERT(rootvfs == vfsp);
3559 3561 /*
3560 3562 * Add it to the head of the global zone's vfslist.
3561 3563 */
3562 3564 ASSERT(zone == global_zone);
3563 3565 ASSERT(zone->zone_vfslist == NULL);
3564 3566 zone->zone_vfslist = vfsp;
3565 3567 } else {
3566 3568 /*
3567 3569 * Link to end of list using vfs_prev (as rootvfs is now a
3568 3570 * doubly linked circular list) so list is in mount order for
3569 3571 * mnttab use.
3570 3572 */
3571 3573 rootvfs->vfs_prev->vfs_next = vfsp;
3572 3574 vfsp->vfs_prev = rootvfs->vfs_prev;
3573 3575 rootvfs->vfs_prev = vfsp;
3574 3576 vfsp->vfs_next = rootvfs;
3575 3577
3576 3578 /*
3577 3579 * Do it again for the zone-private list (which may be NULL).
3578 3580 */
3579 3581 if (zone->zone_vfslist == NULL) {
3580 3582 ASSERT(zone != global_zone);
3581 3583 zone->zone_vfslist = vfsp;
3582 3584 } else {
3583 3585 zone->zone_vfslist->vfs_zone_prev->vfs_zone_next = vfsp;
3584 3586 vfsp->vfs_zone_prev = zone->zone_vfslist->vfs_zone_prev;
3585 3587 zone->zone_vfslist->vfs_zone_prev = vfsp;
3586 3588 vfsp->vfs_zone_next = zone->zone_vfslist;
3587 3589 }
3588 3590 }
3589 3591
3590 3592 /*
3591 3593 * Link into the hash table, inserting it at the end, so that LOFS
3592 3594 * with the same fsid as UFS (or other) file systems will not hide
3593 3595 * the UFS.
3594 3596 */
3595 3597 vfs_hash_add(vfsp, 0);
3596 3598
3597 3599 /*
3598 3600 * update the mnttab modification time
3599 3601 */
3600 3602 vfs_mnttab_modtimeupd();
3601 3603 vfs_list_unlock();
3602 3604 zone_rele(zone);
3603 3605 }
3604 3606
3605 3607 void
3606 3608 vfs_list_remove(struct vfs *vfsp)
3607 3609 {
3608 3610 zone_t *zone;
3609 3611
3610 3612 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
3611 3613 ASSERT(zone != NULL);
3612 3614 /*
3613 3615 * Callers are responsible for preventing attempts to unmount the
3614 3616 * root.
3615 3617 */
3616 3618 ASSERT(vfsp != rootvfs);
3617 3619
3618 3620 vfs_list_lock();
3619 3621
3620 3622 /*
3621 3623 * Remove from hash.
3622 3624 */
3623 3625 vfs_hash_remove(vfsp);
3624 3626
3625 3627 /*
3626 3628 * Remove from vfs list.
3627 3629 */
3628 3630 vfsp->vfs_prev->vfs_next = vfsp->vfs_next;
3629 3631 vfsp->vfs_next->vfs_prev = vfsp->vfs_prev;
3630 3632 vfsp->vfs_next = vfsp->vfs_prev = NULL;
3631 3633
3632 3634 /*
3633 3635 * Remove from zone-specific vfs list.
3634 3636 */
3635 3637 if (zone->zone_vfslist == vfsp)
3636 3638 zone->zone_vfslist = vfsp->vfs_zone_next;
3637 3639
3638 3640 if (vfsp->vfs_zone_next == vfsp) {
3639 3641 ASSERT(vfsp->vfs_zone_prev == vfsp);
3640 3642 ASSERT(zone->zone_vfslist == vfsp);
3641 3643 zone->zone_vfslist = NULL;
3642 3644 }
3643 3645
3644 3646 vfsp->vfs_zone_prev->vfs_zone_next = vfsp->vfs_zone_next;
3645 3647 vfsp->vfs_zone_next->vfs_zone_prev = vfsp->vfs_zone_prev;
3646 3648 vfsp->vfs_zone_next = vfsp->vfs_zone_prev = NULL;
3647 3649
3648 3650 /*
3649 3651 * update the mnttab modification time
3650 3652 */
3651 3653 vfs_mnttab_modtimeupd();
3652 3654 vfs_list_unlock();
3653 3655 zone_rele(zone);
3654 3656 }
3655 3657
3656 3658 struct vfs *
3657 3659 getvfs(fsid_t *fsid)
3658 3660 {
3659 3661 struct vfs *vfsp;
3660 3662 int val0 = fsid->val[0];
3661 3663 int val1 = fsid->val[1];
3662 3664 dev_t dev = expldev(val0);
3663 3665 int vhno = VFSHASH(getmajor(dev), getminor(dev));
3664 3666 kmutex_t *hmp = &rvfs_list[vhno].rvfs_lock;
3665 3667
3666 3668 mutex_enter(hmp);
3667 3669 for (vfsp = rvfs_list[vhno].rvfs_head; vfsp; vfsp = vfsp->vfs_hash) {
3668 3670 if (vfsp->vfs_fsid.val[0] == val0 &&
3669 3671 vfsp->vfs_fsid.val[1] == val1) {
3670 3672 VFS_HOLD(vfsp);
3671 3673 mutex_exit(hmp);
3672 3674 return (vfsp);
3673 3675 }
3674 3676 }
3675 3677 mutex_exit(hmp);
3676 3678 return (NULL);
3677 3679 }
3678 3680
3679 3681 /*
3680 3682 * Search the vfs mount in progress list for a specified device/vfs entry.
3681 3683 * Returns 0 if the first entry in the list that the device matches has the
3682 3684 * given vfs pointer as well. If the device matches but a different vfs
3683 3685 * pointer is encountered in the list before the given vfs pointer then
3684 3686 * a 1 is returned.
3685 3687 */
3686 3688
3687 3689 int
3688 3690 vfs_devmounting(dev_t dev, struct vfs *vfsp)
3689 3691 {
3690 3692 int retval = 0;
3691 3693 struct ipmnt *mipp;
3692 3694
3693 3695 mutex_enter(&vfs_miplist_mutex);
3694 3696 for (mipp = vfs_miplist; mipp != NULL; mipp = mipp->mip_next) {
3695 3697 if (mipp->mip_dev == dev) {
3696 3698 if (mipp->mip_vfsp != vfsp)
3697 3699 retval = 1;
3698 3700 break;
3699 3701 }
3700 3702 }
3701 3703 mutex_exit(&vfs_miplist_mutex);
3702 3704 return (retval);
3703 3705 }
3704 3706
3705 3707 /*
3706 3708 * Search the vfs list for a specified device. Returns 1, if entry is found
3707 3709 * or 0 if no suitable entry is found.
3708 3710 */
3709 3711
3710 3712 int
3711 3713 vfs_devismounted(dev_t dev)
3712 3714 {
3713 3715 struct vfs *vfsp;
3714 3716 int found;
3715 3717
3716 3718 vfs_list_read_lock();
3717 3719 vfsp = rootvfs;
3718 3720 found = 0;
3719 3721 do {
3720 3722 if (vfsp->vfs_dev == dev) {
3721 3723 found = 1;
3722 3724 break;
3723 3725 }
3724 3726 vfsp = vfsp->vfs_next;
3725 3727 } while (vfsp != rootvfs);
3726 3728
3727 3729 vfs_list_unlock();
3728 3730 return (found);
3729 3731 }
3730 3732
3731 3733 /*
3732 3734 * Search the vfs list for a specified device. Returns a pointer to it
3733 3735 * or NULL if no suitable entry is found. The caller of this routine
3734 3736 * is responsible for releasing the returned vfs pointer.
3735 3737 */
3736 3738 struct vfs *
3737 3739 vfs_dev2vfsp(dev_t dev)
3738 3740 {
3739 3741 struct vfs *vfsp;
3740 3742 int found;
3741 3743
3742 3744 vfs_list_read_lock();
3743 3745 vfsp = rootvfs;
3744 3746 found = 0;
3745 3747 do {
3746 3748 /*
3747 3749 * The following could be made more efficient by making
3748 3750 * the entire loop use vfs_zone_next if the call is from
3749 3751 * a zone. The only callers, however, ustat(2) and
3750 3752 * umount2(2), don't seem to justify the added
3751 3753 * complexity at present.
3752 3754 */
3753 3755 if (vfsp->vfs_dev == dev &&
3754 3756 ZONE_PATH_VISIBLE(refstr_value(vfsp->vfs_mntpt),
3755 3757 curproc->p_zone)) {
3756 3758 VFS_HOLD(vfsp);
3757 3759 found = 1;
3758 3760 break;
3759 3761 }
3760 3762 vfsp = vfsp->vfs_next;
3761 3763 } while (vfsp != rootvfs);
3762 3764 vfs_list_unlock();
3763 3765 return (found ? vfsp: NULL);
3764 3766 }
3765 3767
3766 3768 /*
3767 3769 * Search the vfs list for a specified mntpoint. Returns a pointer to it
3768 3770 * or NULL if no suitable entry is found. The caller of this routine
3769 3771 * is responsible for releasing the returned vfs pointer.
3770 3772 *
3771 3773 * Note that if multiple mntpoints match, the last one matching is
3772 3774 * returned in an attempt to return the "top" mount when overlay
3773 3775 * mounts are covering the same mount point. This is accomplished by starting
3774 3776 * at the end of the list and working our way backwards, stopping at the first
3775 3777 * matching mount.
3776 3778 */
3777 3779 struct vfs *
3778 3780 vfs_mntpoint2vfsp(const char *mp)
3779 3781 {
3780 3782 struct vfs *vfsp;
3781 3783 struct vfs *retvfsp = NULL;
3782 3784 zone_t *zone = curproc->p_zone;
3783 3785 struct vfs *list;
3784 3786
3785 3787 vfs_list_read_lock();
3786 3788 if (getzoneid() == GLOBAL_ZONEID) {
3787 3789 /*
3788 3790 * The global zone may see filesystems in any zone.
3789 3791 */
3790 3792 vfsp = rootvfs->vfs_prev;
3791 3793 do {
3792 3794 if (strcmp(refstr_value(vfsp->vfs_mntpt), mp) == 0) {
3793 3795 retvfsp = vfsp;
3794 3796 break;
3795 3797 }
3796 3798 vfsp = vfsp->vfs_prev;
3797 3799 } while (vfsp != rootvfs->vfs_prev);
3798 3800 } else if ((list = zone->zone_vfslist) != NULL) {
3799 3801 const char *mntpt;
3800 3802
3801 3803 vfsp = list->vfs_zone_prev;
3802 3804 do {
3803 3805 mntpt = refstr_value(vfsp->vfs_mntpt);
3804 3806 mntpt = ZONE_PATH_TRANSLATE(mntpt, zone);
3805 3807 if (strcmp(mntpt, mp) == 0) {
3806 3808 retvfsp = vfsp;
3807 3809 break;
3808 3810 }
3809 3811 vfsp = vfsp->vfs_zone_prev;
3810 3812 } while (vfsp != list->vfs_zone_prev);
3811 3813 }
3812 3814 if (retvfsp)
3813 3815 VFS_HOLD(retvfsp);
3814 3816 vfs_list_unlock();
3815 3817 return (retvfsp);
3816 3818 }
3817 3819
3818 3820 /*
3819 3821 * Search the vfs list for a specified vfsops.
3820 3822 * if vfs entry is found then return 1, else 0.
3821 3823 */
3822 3824 int
3823 3825 vfs_opsinuse(vfsops_t *ops)
3824 3826 {
3825 3827 struct vfs *vfsp;
3826 3828 int found;
3827 3829
3828 3830 vfs_list_read_lock();
3829 3831 vfsp = rootvfs;
3830 3832 found = 0;
3831 3833 do {
3832 3834 if (vfs_getops(vfsp) == ops) {
3833 3835 found = 1;
3834 3836 break;
3835 3837 }
3836 3838 vfsp = vfsp->vfs_next;
3837 3839 } while (vfsp != rootvfs);
3838 3840 vfs_list_unlock();
3839 3841 return (found);
3840 3842 }
3841 3843
3842 3844 /*
3843 3845 * Allocate an entry in vfssw for a file system type
3844 3846 */
3845 3847 struct vfssw *
3846 3848 allocate_vfssw(const char *type)
3847 3849 {
3848 3850 struct vfssw *vswp;
3849 3851
3850 3852 if (type[0] == '\0' || strlen(type) + 1 > _ST_FSTYPSZ) {
3851 3853 /*
3852 3854 * The vfssw table uses the empty string to identify an
3853 3855 * available entry; we cannot add any type which has
3854 3856 * a leading NUL. The string length is limited to
3855 3857 * the size of the st_fstype array in struct stat.
3856 3858 */
3857 3859 return (NULL);
3858 3860 }
3859 3861
3860 3862 ASSERT(VFSSW_WRITE_LOCKED());
3861 3863 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++)
3862 3864 if (!ALLOCATED_VFSSW(vswp)) {
3863 3865 vswp->vsw_name = kmem_alloc(strlen(type) + 1, KM_SLEEP);
3864 3866 (void) strcpy(vswp->vsw_name, type);
3865 3867 ASSERT(vswp->vsw_count == 0);
3866 3868 vswp->vsw_count = 1;
3867 3869 mutex_init(&vswp->vsw_lock, NULL, MUTEX_DEFAULT, NULL);
3868 3870 return (vswp);
3869 3871 }
3870 3872 return (NULL);
3871 3873 }
3872 3874
3873 3875 /*
3874 3876 * Impose additional layer of translation between vfstype names
3875 3877 * and module names in the filesystem.
3876 3878 */
3877 3879 static const char *
3878 3880 vfs_to_modname(const char *vfstype)
3879 3881 {
3880 3882 if (strcmp(vfstype, "proc") == 0) {
3881 3883 vfstype = "procfs";
3882 3884 } else if (strcmp(vfstype, "fd") == 0) {
3883 3885 vfstype = "fdfs";
3884 3886 } else if (strncmp(vfstype, "nfs", 3) == 0) {
3885 3887 vfstype = "nfs";
3886 3888 }
3887 3889
3888 3890 return (vfstype);
3889 3891 }
3890 3892
3891 3893 /*
3892 3894 * Find a vfssw entry given a file system type name.
3893 3895 * Try to autoload the filesystem if it's not found.
3894 3896 * If it's installed, return the vfssw locked to prevent unloading.
3895 3897 */
3896 3898 struct vfssw *
3897 3899 vfs_getvfssw(const char *type)
3898 3900 {
3899 3901 struct vfssw *vswp;
3900 3902 const char *modname;
3901 3903
3902 3904 RLOCK_VFSSW();
3903 3905 vswp = vfs_getvfsswbyname(type);
3904 3906 modname = vfs_to_modname(type);
3905 3907
3906 3908 if (rootdir == NULL) {
3907 3909 /*
3908 3910 * If we haven't yet loaded the root file system, then our
3909 3911 * _init won't be called until later. Allocate vfssw entry,
3910 3912 * because mod_installfs won't be called.
3911 3913 */
3912 3914 if (vswp == NULL) {
3913 3915 RUNLOCK_VFSSW();
3914 3916 WLOCK_VFSSW();
3915 3917 if ((vswp = vfs_getvfsswbyname(type)) == NULL) {
3916 3918 if ((vswp = allocate_vfssw(type)) == NULL) {
3917 3919 WUNLOCK_VFSSW();
3918 3920 return (NULL);
3919 3921 }
3920 3922 }
3921 3923 WUNLOCK_VFSSW();
3922 3924 RLOCK_VFSSW();
3923 3925 }
3924 3926 if (!VFS_INSTALLED(vswp)) {
3925 3927 RUNLOCK_VFSSW();
3926 3928 (void) modloadonly("fs", modname);
3927 3929 } else
3928 3930 RUNLOCK_VFSSW();
3929 3931 return (vswp);
3930 3932 }
3931 3933
3932 3934 /*
3933 3935 * Try to load the filesystem. Before calling modload(), we drop
3934 3936 * our lock on the VFS switch table, and pick it up after the
3935 3937 * module is loaded. However, there is a potential race: the
3936 3938 * module could be unloaded after the call to modload() completes
3937 3939 * but before we pick up the lock and drive on. Therefore,
3938 3940 * we keep reloading the module until we've loaded the module
3939 3941 * _and_ we have the lock on the VFS switch table.
3940 3942 */
3941 3943 while (vswp == NULL || !VFS_INSTALLED(vswp)) {
3942 3944 RUNLOCK_VFSSW();
3943 3945 if (modload("fs", modname) == -1)
3944 3946 return (NULL);
3945 3947 RLOCK_VFSSW();
3946 3948 if (vswp == NULL)
3947 3949 if ((vswp = vfs_getvfsswbyname(type)) == NULL)
3948 3950 break;
3949 3951 }
3950 3952 RUNLOCK_VFSSW();
3951 3953
3952 3954 return (vswp);
3953 3955 }
3954 3956
3955 3957 /*
3956 3958 * Find a vfssw entry given a file system type name.
3957 3959 */
3958 3960 struct vfssw *
3959 3961 vfs_getvfsswbyname(const char *type)
3960 3962 {
3961 3963 struct vfssw *vswp;
3962 3964
3963 3965 ASSERT(VFSSW_LOCKED());
3964 3966 if (type == NULL || *type == '\0')
3965 3967 return (NULL);
3966 3968
3967 3969 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
3968 3970 if (strcmp(type, vswp->vsw_name) == 0) {
3969 3971 vfs_refvfssw(vswp);
3970 3972 return (vswp);
3971 3973 }
3972 3974 }
3973 3975
3974 3976 return (NULL);
3975 3977 }
3976 3978
3977 3979 /*
3978 3980 * Find a vfssw entry given a set of vfsops.
3979 3981 */
3980 3982 struct vfssw *
3981 3983 vfs_getvfsswbyvfsops(vfsops_t *vfsops)
3982 3984 {
3983 3985 struct vfssw *vswp;
3984 3986
3985 3987 RLOCK_VFSSW();
3986 3988 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
3987 3989 if (ALLOCATED_VFSSW(vswp) && &vswp->vsw_vfsops == vfsops) {
3988 3990 vfs_refvfssw(vswp);
3989 3991 RUNLOCK_VFSSW();
3990 3992 return (vswp);
3991 3993 }
3992 3994 }
3993 3995 RUNLOCK_VFSSW();
3994 3996
3995 3997 return (NULL);
3996 3998 }
3997 3999
3998 4000 /*
3999 4001 * Reference a vfssw entry.
4000 4002 */
4001 4003 void
4002 4004 vfs_refvfssw(struct vfssw *vswp)
4003 4005 {
4004 4006
4005 4007 mutex_enter(&vswp->vsw_lock);
4006 4008 vswp->vsw_count++;
4007 4009 mutex_exit(&vswp->vsw_lock);
4008 4010 }
4009 4011
4010 4012 /*
4011 4013 * Unreference a vfssw entry.
4012 4014 */
4013 4015 void
4014 4016 vfs_unrefvfssw(struct vfssw *vswp)
4015 4017 {
4016 4018
4017 4019 mutex_enter(&vswp->vsw_lock);
4018 4020 vswp->vsw_count--;
4019 4021 mutex_exit(&vswp->vsw_lock);
4020 4022 }
4021 4023
4022 4024 static int sync_retries = 20; /* number of retries when not making progress */
4023 4025 static int sync_triesleft; /* portion of sync_retries remaining */
4024 4026
4025 4027 static pgcnt_t old_pgcnt, new_pgcnt;
4026 4028 static int new_bufcnt, old_bufcnt;
4027 4029
4028 4030 /*
4029 4031 * Sync all of the mounted filesystems, and then wait for the actual i/o to
4030 4032 * complete. We wait by counting the number of dirty pages and buffers,
4031 4033 * pushing them out using bio_busy() and page_busy(), and then counting again.
4032 4034 * This routine is used during the uadmin A_SHUTDOWN code. It should only
4033 4035 * be used after some higher-level mechanism has quiesced the system so that
4034 4036 * new writes are not being initiated while we are waiting for completion.
4035 4037 *
4036 4038 * To ensure finite running time, our algorithm uses sync_triesleft (a progress
4037 4039 * counter used by the vfs_syncall() loop below). It is declared above so
4038 4040 * it can be found easily in the debugger.
4039 4041 *
4040 4042 * The sync_triesleft counter is updated by vfs_syncall() itself. If we make
4041 4043 * sync_retries consecutive calls to bio_busy() and page_busy() without
4042 4044 * decreasing either the number of dirty buffers or dirty pages below the
4043 4045 * lowest count we have seen so far, we give up and return from vfs_syncall().
4044 4046 *
4045 4047 * Each loop iteration ends with a call to delay() one second to allow time for
4046 4048 * i/o completion and to permit the user time to read our progress messages.
4047 4049 */
4048 4050 void
4049 4051 vfs_syncall(void)
4050 4052 {
4051 4053 if (rootdir == NULL && !modrootloaded)
4052 4054 return; /* no filesystems have been loaded yet */
4053 4055
4054 4056 printf("syncing file systems...");
4055 4057 sync();
4056 4058
4057 4059 sync_triesleft = sync_retries;
4058 4060
4059 4061 old_bufcnt = new_bufcnt = INT_MAX;
4060 4062 old_pgcnt = new_pgcnt = ULONG_MAX;
4061 4063
4062 4064 while (sync_triesleft > 0) {
4063 4065 old_bufcnt = MIN(old_bufcnt, new_bufcnt);
4064 4066 old_pgcnt = MIN(old_pgcnt, new_pgcnt);
4065 4067
4066 4068 new_bufcnt = bio_busy(B_TRUE);
4067 4069 new_pgcnt = page_busy(B_TRUE);
4068 4070
4069 4071 if (new_bufcnt == 0 && new_pgcnt == 0)
4070 4072 break;
4071 4073
4072 4074 if (new_bufcnt < old_bufcnt || new_pgcnt < old_pgcnt)
4073 4075 sync_triesleft = sync_retries;
4074 4076 else
4075 4077 sync_triesleft--;
4076 4078
4077 4079 if (new_bufcnt)
4078 4080 printf(" [%d]", new_bufcnt);
4079 4081 if (new_pgcnt)
4080 4082 printf(" %lu", new_pgcnt);
4081 4083
4082 4084 delay(hz);
4083 4085 }
4084 4086
4085 4087 if (new_bufcnt != 0 || new_pgcnt != 0)
4086 4088 printf(" done (not all i/o completed)\n");
4087 4089 else
4088 4090 printf(" done\n");
4089 4091
4090 4092 delay(hz);
4091 4093 }
4092 4094
4093 4095 /*
4094 4096 * Map VFS flags to statvfs flags. These shouldn't really be separate
4095 4097 * flags at all.
4096 4098 */
4097 4099 uint_t
4098 4100 vf_to_stf(uint_t vf)
4099 4101 {
4100 4102 uint_t stf = 0;
4101 4103
4102 4104 if (vf & VFS_RDONLY)
4103 4105 stf |= ST_RDONLY;
4104 4106 if (vf & VFS_NOSETUID)
4105 4107 stf |= ST_NOSUID;
4106 4108 if (vf & VFS_NOTRUNC)
4107 4109 stf |= ST_NOTRUNC;
4108 4110
4109 4111 return (stf);
4110 4112 }
4111 4113
4112 4114 /*
4113 4115 * Entries for (illegal) fstype 0.
4114 4116 */
4115 4117 /* ARGSUSED */
4116 4118 int
4117 4119 vfsstray_sync(struct vfs *vfsp, short arg, struct cred *cr)
4118 4120 {
4119 4121 cmn_err(CE_PANIC, "stray vfs operation");
4120 4122 return (0);
4121 4123 }
4122 4124
4123 4125 /*
4124 4126 * Entries for (illegal) fstype 0.
4125 4127 */
4126 4128 int
4127 4129 vfsstray(void)
4128 4130 {
4129 4131 cmn_err(CE_PANIC, "stray vfs operation");
4130 4132 return (0);
4131 4133 }
4132 4134
4133 4135 /*
4134 4136 * Support for dealing with forced UFS unmount and its interaction with
4135 4137 * LOFS. Could be used by any filesystem.
4136 4138 * See bug 1203132.
4137 4139 */
4138 4140 int
4139 4141 vfs_EIO(void)
4140 4142 {
4141 4143 return (EIO);
4142 4144 }
4143 4145
4144 4146 /*
4145 4147 * We've gotta define the op for sync separately, since the compiler gets
4146 4148 * confused if we mix and match ANSI and normal style prototypes when
4147 4149 * a "short" argument is present and spits out a warning.
4148 4150 */
4149 4151 /*ARGSUSED*/
4150 4152 int
4151 4153 vfs_EIO_sync(struct vfs *vfsp, short arg, struct cred *cr)
4152 4154 {
4153 4155 return (EIO);
4154 4156 }
4155 4157
4156 4158 vfs_t EIO_vfs;
4157 4159 vfsops_t *EIO_vfsops;
4158 4160
4159 4161 /*
4160 4162 * Called from startup() to initialize all loaded vfs's
4161 4163 */
4162 4164 void
4163 4165 vfsinit(void)
4164 4166 {
4165 4167 struct vfssw *vswp;
4166 4168 int error;
4167 4169 extern int vopstats_enabled;
4168 4170 extern void vopstats_startup();
4169 4171
4170 4172 static const fs_operation_def_t EIO_vfsops_template[] = {
4171 4173 VFSNAME_MOUNT, { .error = vfs_EIO },
4172 4174 VFSNAME_UNMOUNT, { .error = vfs_EIO },
4173 4175 VFSNAME_ROOT, { .error = vfs_EIO },
4174 4176 VFSNAME_STATVFS, { .error = vfs_EIO },
4175 4177 VFSNAME_SYNC, { .vfs_sync = vfs_EIO_sync },
4176 4178 VFSNAME_VGET, { .error = vfs_EIO },
4177 4179 VFSNAME_MOUNTROOT, { .error = vfs_EIO },
4178 4180 VFSNAME_FREEVFS, { .error = vfs_EIO },
4179 4181 VFSNAME_VNSTATE, { .error = vfs_EIO },
4180 4182 NULL, NULL
4181 4183 };
4182 4184
4183 4185 static const fs_operation_def_t stray_vfsops_template[] = {
4184 4186 VFSNAME_MOUNT, { .error = vfsstray },
4185 4187 VFSNAME_UNMOUNT, { .error = vfsstray },
4186 4188 VFSNAME_ROOT, { .error = vfsstray },
4187 4189 VFSNAME_STATVFS, { .error = vfsstray },
4188 4190 VFSNAME_SYNC, { .vfs_sync = vfsstray_sync },
4189 4191 VFSNAME_VGET, { .error = vfsstray },
4190 4192 VFSNAME_MOUNTROOT, { .error = vfsstray },
4191 4193 VFSNAME_FREEVFS, { .error = vfsstray },
4192 4194 VFSNAME_VNSTATE, { .error = vfsstray },
4193 4195 NULL, NULL
4194 4196 };
4195 4197
4196 4198 /* Create vfs cache */
4197 4199 vfs_cache = kmem_cache_create("vfs_cache", sizeof (struct vfs),
4198 4200 sizeof (uintptr_t), NULL, NULL, NULL, NULL, NULL, 0);
4199 4201
4200 4202 /* Initialize the vnode cache (file systems may use it during init). */
4201 4203 vn_create_cache();
4202 4204
4203 4205 /* Setup event monitor framework */
4204 4206 fem_init();
4205 4207
4206 4208 /* Initialize the dummy stray file system type. */
4207 4209 error = vfs_setfsops(0, stray_vfsops_template, NULL);
4208 4210
4209 4211 /* Initialize the dummy EIO file system. */
4210 4212 error = vfs_makefsops(EIO_vfsops_template, &EIO_vfsops);
4211 4213 if (error != 0) {
4212 4214 cmn_err(CE_WARN, "vfsinit: bad EIO vfs ops template");
4213 4215 /* Shouldn't happen, but not bad enough to panic */
4214 4216 }
4215 4217
4216 4218 VFS_INIT(&EIO_vfs, EIO_vfsops, (caddr_t)NULL);
4217 4219
4218 4220 /*
4219 4221 * Default EIO_vfs.vfs_flag to VFS_UNMOUNTED so a lookup
4220 4222 * on this vfs can immediately notice it's invalid.
4221 4223 */
4222 4224 EIO_vfs.vfs_flag |= VFS_UNMOUNTED;
4223 4225
4224 4226 /*
4225 4227 * Call the init routines of non-loadable filesystems only.
4226 4228 * Filesystems which are loaded as separate modules will be
4227 4229 * initialized by the module loading code instead.
4228 4230 */
4229 4231
4230 4232 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
4231 4233 RLOCK_VFSSW();
4232 4234 if (vswp->vsw_init != NULL)
4233 4235 (*vswp->vsw_init)(vswp - vfssw, vswp->vsw_name);
4234 4236 RUNLOCK_VFSSW();
4235 4237 }
4236 4238
4237 4239 vopstats_startup();
4238 4240
4239 4241 if (vopstats_enabled) {
4240 4242 /* EIO_vfs can collect stats, but we don't retrieve them */
4241 4243 initialize_vopstats(&EIO_vfs.vfs_vopstats);
4242 4244 EIO_vfs.vfs_fstypevsp = NULL;
4243 4245 EIO_vfs.vfs_vskap = NULL;
4244 4246 EIO_vfs.vfs_flag |= VFS_STATS;
4245 4247 }
4246 4248
4247 4249 xattr_init();
4248 4250
4249 4251 reparse_point_init();
4250 4252 }
4251 4253
4252 4254 vfs_t *
4253 4255 vfs_alloc(int kmflag)
4254 4256 {
4255 4257 vfs_t *vfsp;
4256 4258
4257 4259 vfsp = kmem_cache_alloc(vfs_cache, kmflag);
4258 4260
4259 4261 /*
4260 4262 * Do the simplest initialization here.
4261 4263 * Everything else gets done in vfs_init()
4262 4264 */
4263 4265 bzero(vfsp, sizeof (vfs_t));
4264 4266 return (vfsp);
4265 4267 }
4266 4268
4267 4269 void
4268 4270 vfs_free(vfs_t *vfsp)
4269 4271 {
4270 4272 /*
4271 4273 * One would be tempted to assert that "vfsp->vfs_count == 0".
4272 4274 * The problem is that this gets called out of domount() with
4273 4275 * a partially initialized vfs and a vfs_count of 1. This is
4274 4276 * also called from vfs_rele() with a vfs_count of 0. We can't
4275 4277 * call VFS_RELE() from domount() if VFS_MOUNT() hasn't successfully
4276 4278 * returned. This is because VFS_MOUNT() fully initializes the
4277 4279 * vfs structure and its associated data. VFS_RELE() will call
4278 4280 * VFS_FREEVFS() which may panic the system if the data structures
4279 4281 * aren't fully initialized from a successful VFS_MOUNT()).
4280 4282 */
4281 4283
4282 4284 /* If FEM was in use, make sure everything gets cleaned up */
4283 4285 if (vfsp->vfs_femhead) {
4284 4286 ASSERT(vfsp->vfs_femhead->femh_list == NULL);
4285 4287 mutex_destroy(&vfsp->vfs_femhead->femh_lock);
4286 4288 kmem_free(vfsp->vfs_femhead, sizeof (*(vfsp->vfs_femhead)));
4287 4289 vfsp->vfs_femhead = NULL;
4288 4290 }
4289 4291
4290 4292 if (vfsp->vfs_implp)
4291 4293 vfsimpl_teardown(vfsp);
4292 4294 sema_destroy(&vfsp->vfs_reflock);
4293 4295 kmem_cache_free(vfs_cache, vfsp);
4294 4296 }
4295 4297
4296 4298 /*
4297 4299 * Increments the vfs reference count by one atomically.
4298 4300 */
4299 4301 void
4300 4302 vfs_hold(vfs_t *vfsp)
4301 4303 {
4302 4304 atomic_inc_32(&vfsp->vfs_count);
4303 4305 ASSERT(vfsp->vfs_count != 0);
4304 4306 }
4305 4307
4306 4308 /*
4307 4309 * Decrements the vfs reference count by one atomically. When
4308 4310 * vfs reference count becomes zero, it calls the file system
4309 4311 * specific vfs_freevfs() to free up the resources.
4310 4312 */
4311 4313 void
4312 4314 vfs_rele(vfs_t *vfsp)
4313 4315 {
4314 4316 ASSERT(vfsp->vfs_count != 0);
4315 4317 if (atomic_dec_32_nv(&vfsp->vfs_count) == 0) {
4316 4318 VFS_FREEVFS(vfsp);
4317 4319 lofi_remove(vfsp);
4318 4320 if (vfsp->vfs_zone)
4319 4321 zone_rele_ref(&vfsp->vfs_implp->vi_zone_ref,
4320 4322 ZONE_REF_VFS);
4321 4323 vfs_freemnttab(vfsp);
4322 4324 vfs_free(vfsp);
4323 4325 }
4324 4326 }
4325 4327
4326 4328 /*
4327 4329 * Generic operations vector support.
4328 4330 *
4329 4331 * This is used to build operations vectors for both the vfs and vnode.
4330 4332 * It's normally called only when a file system is loaded.
4331 4333 *
4332 4334 * There are many possible algorithms for this, including the following:
4333 4335 *
4334 4336 * (1) scan the list of known operations; for each, see if the file system
4335 4337 * includes an entry for it, and fill it in as appropriate.
4336 4338 *
4337 4339 * (2) set up defaults for all known operations. scan the list of ops
4338 4340 * supplied by the file system; for each which is both supplied and
4339 4341 * known, fill it in.
4340 4342 *
4341 4343 * (3) sort the lists of known ops & supplied ops; scan the list, filling
4342 4344 * in entries as we go.
4343 4345 *
4344 4346 * we choose (1) for simplicity, and because performance isn't critical here.
4345 4347 * note that (2) could be sped up using a precomputed hash table on known ops.
4346 4348 * (3) could be faster than either, but only if the lists were very large or
4347 4349 * supplied in sorted order.
4348 4350 *
4349 4351 */
4350 4352
4351 4353 int
4352 4354 fs_build_vector(void *vector, int *unused_ops,
4353 4355 const fs_operation_trans_def_t *translation,
4354 4356 const fs_operation_def_t *operations)
4355 4357 {
4356 4358 int i, num_trans, num_ops, used;
4357 4359
4358 4360 /*
4359 4361 * Count the number of translations and the number of supplied
4360 4362 * operations.
4361 4363 */
4362 4364
4363 4365 {
4364 4366 const fs_operation_trans_def_t *p;
4365 4367
4366 4368 for (num_trans = 0, p = translation;
4367 4369 p->name != NULL;
4368 4370 num_trans++, p++)
4369 4371 ;
4370 4372 }
4371 4373
4372 4374 {
4373 4375 const fs_operation_def_t *p;
4374 4376
4375 4377 for (num_ops = 0, p = operations;
4376 4378 p->name != NULL;
4377 4379 num_ops++, p++)
4378 4380 ;
4379 4381 }
4380 4382
4381 4383 /* Walk through each operation known to our caller. There will be */
4382 4384 /* one entry in the supplied "translation table" for each. */
4383 4385
4384 4386 used = 0;
4385 4387
4386 4388 for (i = 0; i < num_trans; i++) {
4387 4389 int j, found;
4388 4390 char *curname;
4389 4391 fs_generic_func_p result;
4390 4392 fs_generic_func_p *location;
4391 4393
4392 4394 curname = translation[i].name;
4393 4395
4394 4396 /* Look for a matching operation in the list supplied by the */
4395 4397 /* file system. */
4396 4398
4397 4399 found = 0;
4398 4400
4399 4401 for (j = 0; j < num_ops; j++) {
4400 4402 if (strcmp(operations[j].name, curname) == 0) {
4401 4403 used++;
4402 4404 found = 1;
4403 4405 break;
4404 4406 }
4405 4407 }
4406 4408
4407 4409 /*
4408 4410 * If the file system is using a "placeholder" for default
4409 4411 * or error functions, grab the appropriate function out of
4410 4412 * the translation table. If the file system didn't supply
4411 4413 * this operation at all, use the default function.
4412 4414 */
4413 4415
4414 4416 if (found) {
4415 4417 result = operations[j].func.fs_generic;
4416 4418 if (result == fs_default) {
4417 4419 result = translation[i].defaultFunc;
4418 4420 } else if (result == fs_error) {
4419 4421 result = translation[i].errorFunc;
4420 4422 } else if (result == NULL) {
4421 4423 /* Null values are PROHIBITED */
4422 4424 return (EINVAL);
4423 4425 }
4424 4426 } else {
4425 4427 result = translation[i].defaultFunc;
4426 4428 }
4427 4429
4428 4430 /* Now store the function into the operations vector. */
4429 4431
4430 4432 location = (fs_generic_func_p *)
4431 4433 (((char *)vector) + translation[i].offset);
4432 4434
4433 4435 *location = result;
4434 4436 }
4435 4437
4436 4438 *unused_ops = num_ops - used;
4437 4439
4438 4440 return (0);
4439 4441 }
4440 4442
4441 4443 /* Placeholder functions, should never be called. */
4442 4444
4443 4445 int
4444 4446 fs_error(void)
4445 4447 {
4446 4448 cmn_err(CE_PANIC, "fs_error called");
4447 4449 return (0);
4448 4450 }
4449 4451
4450 4452 int
4451 4453 fs_default(void)
4452 4454 {
4453 4455 cmn_err(CE_PANIC, "fs_default called");
4454 4456 return (0);
4455 4457 }
4456 4458
4457 4459 #ifdef __sparc
4458 4460
4459 4461 /*
4460 4462 * Part of the implementation of booting off a mirrored root
4461 4463 * involves a change of dev_t for the root device. To
4462 4464 * accomplish this, first remove the existing hash table
4463 4465 * entry for the root device, convert to the new dev_t,
4464 4466 * then re-insert in the hash table at the head of the list.
4465 4467 */
4466 4468 void
4467 4469 vfs_root_redev(vfs_t *vfsp, dev_t ndev, int fstype)
4468 4470 {
4469 4471 vfs_list_lock();
4470 4472
4471 4473 vfs_hash_remove(vfsp);
4472 4474
4473 4475 vfsp->vfs_dev = ndev;
4474 4476 vfs_make_fsid(&vfsp->vfs_fsid, ndev, fstype);
4475 4477
4476 4478 vfs_hash_add(vfsp, 1);
4477 4479
4478 4480 vfs_list_unlock();
4479 4481 }
4480 4482
4481 4483 #else /* x86 NEWBOOT */
4482 4484
4483 4485 #if defined(__x86)
4484 4486 extern int hvmboot_rootconf();
4485 4487 #endif /* __x86 */
4486 4488
4487 4489 extern ib_boot_prop_t *iscsiboot_prop;
4488 4490
4489 4491 int
4490 4492 rootconf()
4491 4493 {
4492 4494 int error;
4493 4495 struct vfssw *vsw;
4494 4496 extern void pm_init();
4495 4497 char *fstyp, *fsmod;
4496 4498 int ret = -1;
4497 4499
4498 4500 getrootfs(&fstyp, &fsmod);
4499 4501
4500 4502 #if defined(__x86)
4501 4503 /*
4502 4504 * hvmboot_rootconf() is defined in the hvm_bootstrap misc module,
4503 4505 * which lives in /platform/i86hvm, and hence is only available when
4504 4506 * booted in an x86 hvm environment. If the hvm_bootstrap misc module
4505 4507 * is not available then the modstub for this function will return 0.
4506 4508 * If the hvm_bootstrap misc module is available it will be loaded
4507 4509 * and hvmboot_rootconf() will be invoked.
4508 4510 */
4509 4511 if (error = hvmboot_rootconf())
4510 4512 return (error);
4511 4513 #endif /* __x86 */
4512 4514
4513 4515 if (error = clboot_rootconf())
4514 4516 return (error);
4515 4517
4516 4518 if (modload("fs", fsmod) == -1)
4517 4519 panic("Cannot _init %s module", fsmod);
4518 4520
4519 4521 RLOCK_VFSSW();
4520 4522 vsw = vfs_getvfsswbyname(fstyp);
4521 4523 RUNLOCK_VFSSW();
4522 4524 if (vsw == NULL) {
4523 4525 cmn_err(CE_CONT, "Cannot find %s filesystem\n", fstyp);
4524 4526 return (ENXIO);
4525 4527 }
4526 4528 VFS_INIT(rootvfs, &vsw->vsw_vfsops, 0);
4527 4529 VFS_HOLD(rootvfs);
4528 4530
4529 4531 /* always mount readonly first */
4530 4532 rootvfs->vfs_flag |= VFS_RDONLY;
4531 4533
4532 4534 pm_init();
4533 4535
4534 4536 if (netboot && iscsiboot_prop) {
4535 4537 cmn_err(CE_WARN, "NFS boot and iSCSI boot"
4536 4538 " shouldn't happen in the same time");
4537 4539 return (EINVAL);
4538 4540 }
4539 4541
4540 4542 if (netboot || iscsiboot_prop) {
4541 4543 ret = strplumb();
4542 4544 if (ret != 0) {
4543 4545 cmn_err(CE_WARN, "Cannot plumb network device %d", ret);
4544 4546 return (EFAULT);
4545 4547 }
4546 4548 }
4547 4549
4548 4550 if ((ret == 0) && iscsiboot_prop) {
4549 4551 ret = modload("drv", "iscsi");
4550 4552 /* -1 indicates fail */
4551 4553 if (ret == -1) {
4552 4554 cmn_err(CE_WARN, "Failed to load iscsi module");
4553 4555 iscsi_boot_prop_free();
4554 4556 return (EINVAL);
4555 4557 } else {
4556 4558 if (!i_ddi_attach_pseudo_node("iscsi")) {
4557 4559 cmn_err(CE_WARN,
4558 4560 "Failed to attach iscsi driver");
4559 4561 iscsi_boot_prop_free();
4560 4562 return (ENODEV);
4561 4563 }
4562 4564 }
4563 4565 }
4564 4566
4565 4567 error = VFS_MOUNTROOT(rootvfs, ROOT_INIT);
4566 4568 vfs_unrefvfssw(vsw);
4567 4569 rootdev = rootvfs->vfs_dev;
4568 4570
4569 4571 if (error)
4570 4572 cmn_err(CE_CONT, "Cannot mount root on %s fstype %s\n",
4571 4573 rootfs.bo_name, fstyp);
4572 4574 else
4573 4575 cmn_err(CE_CONT, "?root on %s fstype %s\n",
4574 4576 rootfs.bo_name, fstyp);
4575 4577 return (error);
4576 4578 }
4577 4579
4578 4580 /*
4579 4581 * XXX this is called by nfs only and should probably be removed
4580 4582 * If booted with ASKNAME, prompt on the console for a filesystem
4581 4583 * name and return it.
4582 4584 */
4583 4585 void
4584 4586 getfsname(char *askfor, char *name, size_t namelen)
4585 4587 {
4586 4588 if (boothowto & RB_ASKNAME) {
4587 4589 printf("%s name: ", askfor);
4588 4590 console_gets(name, namelen);
4589 4591 }
4590 4592 }
4591 4593
4592 4594 /*
4593 4595 * Init the root filesystem type (rootfs.bo_fstype) from the "fstype"
4594 4596 * property.
4595 4597 *
4596 4598 * Filesystem types starting with the prefix "nfs" are diskless clients;
4597 4599 * init the root filename name (rootfs.bo_name), too.
4598 4600 *
4599 4601 * If we are booting via NFS we currently have these options:
4600 4602 * nfs - dynamically choose NFS V2, V3, or V4 (default)
4601 4603 * nfs2 - force NFS V2
4602 4604 * nfs3 - force NFS V3
4603 4605 * nfs4 - force NFS V4
4604 4606 * Because we need to maintain backward compatibility with the naming
4605 4607 * convention that the NFS V2 filesystem name is "nfs" (see vfs_conf.c)
4606 4608 * we need to map "nfs" => "nfsdyn" and "nfs2" => "nfs". The dynamic
4607 4609 * nfs module will map the type back to either "nfs", "nfs3", or "nfs4".
4608 4610 * This is only for root filesystems, all other uses will expect
4609 4611 * that "nfs" == NFS V2.
4610 4612 */
4611 4613 static void
4612 4614 getrootfs(char **fstypp, char **fsmodp)
4613 4615 {
4614 4616 char *propstr = NULL;
4615 4617
4616 4618 /*
4617 4619 * Check fstype property; for diskless it should be one of "nfs",
4618 4620 * "nfs2", "nfs3" or "nfs4".
4619 4621 */
4620 4622 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4621 4623 DDI_PROP_DONTPASS, "fstype", &propstr)
4622 4624 == DDI_SUCCESS) {
4623 4625 (void) strncpy(rootfs.bo_fstype, propstr, BO_MAXFSNAME);
4624 4626 ddi_prop_free(propstr);
4625 4627
4626 4628 /*
4627 4629 * if the boot property 'fstype' is not set, but 'zfs-bootfs' is set,
4628 4630 * assume the type of this root filesystem is 'zfs'.
4629 4631 */
4630 4632 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4631 4633 DDI_PROP_DONTPASS, "zfs-bootfs", &propstr)
4632 4634 == DDI_SUCCESS) {
4633 4635 (void) strncpy(rootfs.bo_fstype, "zfs", BO_MAXFSNAME);
4634 4636 ddi_prop_free(propstr);
4635 4637 }
4636 4638
4637 4639 if (strncmp(rootfs.bo_fstype, "nfs", 3) != 0) {
4638 4640 *fstypp = *fsmodp = rootfs.bo_fstype;
4639 4641 return;
4640 4642 }
4641 4643
4642 4644 ++netboot;
4643 4645
4644 4646 if (strcmp(rootfs.bo_fstype, "nfs2") == 0)
4645 4647 (void) strcpy(rootfs.bo_fstype, "nfs");
4646 4648 else if (strcmp(rootfs.bo_fstype, "nfs") == 0)
4647 4649 (void) strcpy(rootfs.bo_fstype, "nfsdyn");
4648 4650
4649 4651 /*
4650 4652 * check if path to network interface is specified in bootpath
4651 4653 * or by a hypervisor domain configuration file.
4652 4654 * XXPV - enable strlumb_get_netdev_path()
4653 4655 */
4654 4656 if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS,
4655 4657 "xpv-nfsroot")) {
4656 4658 (void) strcpy(rootfs.bo_name, "/xpvd/xnf@0");
4657 4659 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
4658 4660 DDI_PROP_DONTPASS, "bootpath", &propstr)
4659 4661 == DDI_SUCCESS) {
4660 4662 (void) strncpy(rootfs.bo_name, propstr, BO_MAXOBJNAME);
4661 4663 ddi_prop_free(propstr);
4662 4664 } else {
4663 4665 rootfs.bo_name[0] = '\0';
4664 4666 }
4665 4667 *fstypp = rootfs.bo_fstype;
4666 4668 *fsmodp = "nfs";
4667 4669 }
4668 4670 #endif
4669 4671
4670 4672 /*
4671 4673 * VFS feature routines
4672 4674 */
4673 4675
4674 4676 #define VFTINDEX(feature) (((feature) >> 32) & 0xFFFFFFFF)
4675 4677 #define VFTBITS(feature) ((feature) & 0xFFFFFFFFLL)
4676 4678
4677 4679 /* Register a feature in the vfs */
4678 4680 void
4679 4681 vfs_set_feature(vfs_t *vfsp, vfs_feature_t feature)
4680 4682 {
4681 4683 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4682 4684 if (vfsp->vfs_implp == NULL)
4683 4685 return;
4684 4686
4685 4687 vfsp->vfs_featureset[VFTINDEX(feature)] |= VFTBITS(feature);
4686 4688 }
4687 4689
4688 4690 void
4689 4691 vfs_clear_feature(vfs_t *vfsp, vfs_feature_t feature)
4690 4692 {
4691 4693 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4692 4694 if (vfsp->vfs_implp == NULL)
4693 4695 return;
4694 4696 vfsp->vfs_featureset[VFTINDEX(feature)] &= VFTBITS(~feature);
4695 4697 }
4696 4698
4697 4699 /*
4698 4700 * Query a vfs for a feature.
4699 4701 * Returns 1 if feature is present, 0 if not
4700 4702 */
4701 4703 int
4702 4704 vfs_has_feature(vfs_t *vfsp, vfs_feature_t feature)
4703 4705 {
4704 4706 int ret = 0;
4705 4707
4706 4708 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */
4707 4709 if (vfsp->vfs_implp == NULL)
4708 4710 return (ret);
4709 4711
4710 4712 if (vfsp->vfs_featureset[VFTINDEX(feature)] & VFTBITS(feature))
4711 4713 ret = 1;
4712 4714
4713 4715 return (ret);
4714 4716 }
4715 4717
4716 4718 /*
4717 4719 * Propagate feature set from one vfs to another
4718 4720 */
4719 4721 void
4720 4722 vfs_propagate_features(vfs_t *from, vfs_t *to)
4721 4723 {
4722 4724 int i;
4723 4725
4724 4726 if (to->vfs_implp == NULL || from->vfs_implp == NULL)
4725 4727 return;
4726 4728
4727 4729 for (i = 1; i <= to->vfs_featureset[0]; i++) {
4728 4730 to->vfs_featureset[i] = from->vfs_featureset[i];
4729 4731 }
4730 4732 }
4731 4733
4732 4734 #define LOFINODE_PATH "/dev/lofi/%d"
4733 4735
4734 4736 /*
4735 4737 * Return the vnode for the lofi node if there's a lofi mount in place.
4736 4738 * Returns -1 when there's no lofi node, 0 on success, and > 0 on
4737 4739 * failure.
4738 4740 */
4739 4741 int
4740 4742 vfs_get_lofi(vfs_t *vfsp, vnode_t **vpp)
4741 4743 {
4742 4744 char *path = NULL;
4743 4745 int strsize;
4744 4746 int err;
4745 4747
4746 4748 if (vfsp->vfs_lofi_id == 0) {
4747 4749 *vpp = NULL;
4748 4750 return (-1);
4749 4751 }
4750 4752
4751 4753 strsize = snprintf(NULL, 0, LOFINODE_PATH, vfsp->vfs_lofi_id);
4752 4754 path = kmem_alloc(strsize + 1, KM_SLEEP);
4753 4755 (void) snprintf(path, strsize + 1, LOFINODE_PATH, vfsp->vfs_lofi_id);
4754 4756
4755 4757 /*
4756 4758 * We may be inside a zone, so we need to use the /dev path, but
4757 4759 * it's created asynchronously, so we wait here.
4758 4760 */
4759 4761 for (;;) {
4760 4762 err = lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, vpp);
4761 4763
4762 4764 if (err != ENOENT)
4763 4765 break;
4764 4766
4765 4767 if ((err = delay_sig(hz / 8)) == EINTR)
4766 4768 break;
4767 4769 }
4768 4770
4769 4771 if (err)
4770 4772 *vpp = NULL;
4771 4773
4772 4774 kmem_free(path, strsize + 1);
4773 4775 return (err);
4774 4776 }
↓ open down ↓ |
3254 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX