1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25 */
26
27 #include <sys/types.h>
28 #include <sys/t_lock.h>
29 #include <sys/param.h>
30 #include <sys/time.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/resource.h>
34 #include <sys/signal.h>
35 #include <sys/cred.h>
36 #include <sys/user.h>
37 #include <sys/buf.h>
38 #include <sys/vfs.h>
39 #include <sys/vfs_opreg.h>
40 #include <sys/stat.h>
41 #include <sys/vnode.h>
42 #include <sys/mode.h>
43 #include <sys/proc.h>
44 #include <sys/disp.h>
45 #include <sys/file.h>
46 #include <sys/fcntl.h>
47 #include <sys/flock.h>
48 #include <sys/kmem.h>
49 #include <sys/uio.h>
50 #include <sys/dnlc.h>
51 #include <sys/conf.h>
52 #include <sys/errno.h>
53 #include <sys/mman.h>
54 #include <sys/fbuf.h>
55 #include <sys/pathname.h>
56 #include <sys/debug.h>
57 #include <sys/vmsystm.h>
58 #include <sys/cmn_err.h>
59 #include <sys/dirent.h>
60 #include <sys/errno.h>
61 #include <sys/modctl.h>
62 #include <sys/statvfs.h>
63 #include <sys/mount.h>
64 #include <sys/sunddi.h>
65 #include <sys/bootconf.h>
66 #include <sys/policy.h>
67
68 #include <vm/hat.h>
69 #include <vm/page.h>
70 #include <vm/pvn.h>
71 #include <vm/as.h>
72 #include <vm/seg.h>
73 #include <vm/seg_map.h>
74 #include <vm/seg_kmem.h>
75 #include <vm/seg_vn.h>
76 #include <vm/rm.h>
77 #include <vm/page.h>
78 #include <sys/swap.h>
79 #include <sys/mntent.h>
80
81
82 #include <fs/fs_subr.h>
83
84
85 #include <sys/fs/udf_volume.h>
86 #include <sys/fs/udf_inode.h>
87
88
89 extern struct vnode *common_specvp(struct vnode *vp);
90
91 extern kmutex_t ud_sync_busy;
92 static int32_t ud_mountfs(struct vfs *,
93 enum whymountroot, dev_t, char *, struct cred *, int32_t);
94 static struct udf_vfs *ud_validate_and_fill_superblock(dev_t,
95 int32_t, uint32_t);
96 void ud_destroy_fsp(struct udf_vfs *);
97 void ud_convert_to_superblock(struct udf_vfs *,
98 struct log_vol_int_desc *);
99 void ud_update_superblock(struct vfs *);
100 int32_t ud_get_last_block(dev_t, daddr_t *);
101 static int32_t ud_val_get_vat(struct udf_vfs *,
102 dev_t, daddr_t, struct ud_map *);
103 int32_t ud_read_sparing_tbls(struct udf_vfs *,
104 dev_t, struct ud_map *, struct pmap_typ2 *);
105 uint32_t ud_get_lbsize(dev_t, uint32_t *);
106
107 static int32_t udf_mount(struct vfs *,
108 struct vnode *, struct mounta *, struct cred *);
109 static int32_t udf_unmount(struct vfs *, int, struct cred *);
110 static int32_t udf_root(struct vfs *, struct vnode **);
111 static int32_t udf_statvfs(struct vfs *, struct statvfs64 *);
112 static int32_t udf_sync(struct vfs *, int16_t, struct cred *);
113 static int32_t udf_vget(struct vfs *, struct vnode **, struct fid *);
114 static int32_t udf_mountroot(struct vfs *vfsp, enum whymountroot);
115
116 static int udfinit(int, char *);
117
118 static mntopts_t udfs_mntopts;
119
120 static vfsdef_t vfw = {
121 VFSDEF_VERSION,
122 "udfs",
123 udfinit,
124 VSW_HASPROTO|VSW_CANREMOUNT|VSW_STATS|VSW_CANLOFI,
125 &udfs_mntopts
126 };
127
128 static mntopts_t udfs_mntopts = {
129 0,
130 NULL
131 };
132
133 /*
134 * Module linkage information for the kernel.
135 */
136 extern struct mod_ops mod_fsops;
137
138 static struct modlfs modlfs = {
139 &mod_fsops, "filesystem for UDFS", &vfw
140 };
141
142 static struct modlinkage modlinkage = {
143 MODREV_1, (void *)&modlfs, NULL
144 };
145
146 int32_t udf_fstype = -1;
147
148 int
149 _init()
150 {
151 return (mod_install(&modlinkage));
152 }
153
154 int
155 _fini()
156 {
157 return (EBUSY);
158 }
159
160 int
161 _info(struct modinfo *modinfop)
162 {
163 return (mod_info(&modlinkage, modinfop));
164 }
165
166
167 /* -------------------- vfs routines -------------------- */
168
169 /*
170 * XXX - this appears only to be used by the VM code to handle the case where
171 * UNIX is running off the mini-root. That probably wants to be done
172 * differently.
173 */
174 struct vnode *rootvp;
175
176 static int32_t
177 udf_mount(struct vfs *vfsp, struct vnode *mvp,
178 struct mounta *uap, struct cred *cr)
179 {
180 dev_t dev;
181 struct vnode *lvp = NULL;
182 struct vnode *svp = NULL;
183 struct pathname dpn;
184 int32_t error;
185 enum whymountroot why;
186 int oflag, aflag;
187
188 ud_printf("udf_mount\n");
189
190 if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0) {
191 return (error);
192 }
193
194 if (mvp->v_type != VDIR) {
195 return (ENOTDIR);
196 }
197
198 mutex_enter(&mvp->v_lock);
199 if ((uap->flags & MS_REMOUNT) == 0 &&
200 (uap->flags & MS_OVERLAY) == 0 &&
201 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
202 mutex_exit(&mvp->v_lock);
203 return (EBUSY);
204 }
205 mutex_exit(&mvp->v_lock);
206
207 if (error = pn_get(uap->dir, UIO_USERSPACE, &dpn)) {
208 return (error);
209 }
210
211 /*
212 * Resolve path name of the file being mounted.
213 */
214 if (error = lookupname(uap->spec, UIO_USERSPACE, FOLLOW, NULLVPP,
215 &svp)) {
216 pn_free(&dpn);
217 return (error);
218 }
219
220 error = vfs_get_lofi(vfsp, &lvp);
221
222 if (error > 0) {
223 if (error == ENOENT)
224 error = ENODEV;
225 goto out;
226 } else if (error == 0) {
227 dev = lvp->v_rdev;
228 } else {
229 dev = svp->v_rdev;
230
231 if (svp->v_type != VBLK) {
232 error = ENOTBLK;
233 goto out;
234 }
235 }
236
237 /*
238 * Ensure that this device isn't already mounted,
239 * unless this is a REMOUNT request
240 */
241 if (vfs_devmounting(dev, vfsp)) {
242 error = EBUSY;
243 goto out;
244 }
245 if (vfs_devismounted(dev)) {
246 if (uap->flags & MS_REMOUNT) {
247 why = ROOT_REMOUNT;
248 } else {
249 error = EBUSY;
250 goto out;
251 }
252 } else {
253 why = ROOT_INIT;
254 }
255 if (getmajor(dev) >= devcnt) {
256 error = ENXIO;
257 goto out;
258 }
259
260 /*
261 * If the device is a tape, mount it read only
262 */
263 if (devopsp[getmajor(dev)]->devo_cb_ops->cb_flag & D_TAPE) {
264 vfsp->vfs_flag |= VFS_RDONLY;
265 }
266
267 if (uap->flags & MS_RDONLY) {
268 vfsp->vfs_flag |= VFS_RDONLY;
269 }
270
271 /*
272 * Set mount options.
273 */
274 if (uap->flags & MS_RDONLY) {
275 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
276 }
277 if (uap->flags & MS_NOSUID) {
278 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0);
279 }
280
281 /*
282 * Verify that the caller can open the device special file as
283 * required. It is not until this moment that we know whether
284 * we're mounting "ro" or not.
285 */
286 if ((vfsp->vfs_flag & VFS_RDONLY) != 0) {
287 oflag = FREAD;
288 aflag = VREAD;
289 } else {
290 oflag = FREAD | FWRITE;
291 aflag = VREAD | VWRITE;
292 }
293
294 if (lvp == NULL &&
295 (error = secpolicy_spec_open(cr, svp, oflag)) != 0)
296 goto out;
297
298 if ((error = VOP_ACCESS(svp, aflag, 0, cr, NULL)) != 0)
299 goto out;
300
301 /*
302 * Mount the filesystem.
303 */
304 error = ud_mountfs(vfsp, why, dev, dpn.pn_path, cr, 0);
305 out:
306 VN_RELE(svp);
307 if (lvp != NULL)
308 VN_RELE(lvp);
309 pn_free(&dpn);
310 return (error);
311 }
312
313
314
315 /*
316 * unmount the file system pointed
317 * by vfsp
318 */
319 /* ARGSUSED */
320 static int32_t
321 udf_unmount(struct vfs *vfsp, int fflag, struct cred *cr)
322 {
323 struct udf_vfs *udf_vfsp;
324 struct vnode *bvp, *rvp;
325 struct ud_inode *rip;
326 int32_t flag;
327
328 ud_printf("udf_unmount\n");
329
330 if (secpolicy_fs_unmount(cr, vfsp) != 0) {
331 return (EPERM);
332 }
333
334 /*
335 * forced unmount is not supported by this file system
336 * and thus, ENOTSUP, is being returned.
337 */
338 if (fflag & MS_FORCE)
339 return (ENOTSUP);
340
341 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
342 flag = !(udf_vfsp->udf_flags & UDF_FL_RDONLY);
343 bvp = udf_vfsp->udf_devvp;
344
345 rvp = udf_vfsp->udf_root;
346 ASSERT(rvp != NULL);
347 rip = VTOI(rvp);
348
349 (void) ud_release_cache(udf_vfsp);
350
351
352 /* Flush all inodes except root */
353 if (ud_iflush(vfsp) < 0) {
354 return (EBUSY);
355 }
356
357 rw_enter(&rip->i_contents, RW_WRITER);
358 (void) ud_syncip(rip, B_INVAL, I_SYNC);
359 rw_exit(&rip->i_contents);
360
361 mutex_enter(&ud_sync_busy);
362 if ((udf_vfsp->udf_flags & UDF_FL_RDONLY) == 0) {
363 bflush(vfsp->vfs_dev);
364 mutex_enter(&udf_vfsp->udf_lock);
365 udf_vfsp->udf_clean = UDF_CLEAN;
366 mutex_exit(&udf_vfsp->udf_lock);
367 ud_update_superblock(vfsp);
368 }
369 mutex_exit(&ud_sync_busy);
370
371 mutex_destroy(&udf_vfsp->udf_lock);
372 mutex_destroy(&udf_vfsp->udf_rename_lck);
373
374 ud_delcache(rip);
375 ITIMES(rip);
376 VN_RELE(rvp);
377
378 ud_destroy_fsp(udf_vfsp);
379
380 (void) VOP_PUTPAGE(bvp, (offset_t)0, (uint32_t)0, B_INVAL, cr, NULL);
381 (void) VOP_CLOSE(bvp, flag, 1, (offset_t)0, cr, NULL);
382
383 (void) bfinval(vfsp->vfs_dev, 1);
384 VN_RELE(bvp);
385
386
387 return (0);
388 }
389
390
391 /*
392 * Get the root vp for the
393 * file system
394 */
395 static int32_t
396 udf_root(struct vfs *vfsp, struct vnode **vpp)
397 {
398 struct udf_vfs *udf_vfsp;
399 struct vnode *vp;
400
401 ud_printf("udf_root\n");
402
403 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
404
405 ASSERT(udf_vfsp != NULL);
406 ASSERT(udf_vfsp->udf_root != NULL);
407
408 vp = udf_vfsp->udf_root;
409 VN_HOLD(vp);
410 *vpp = vp;
411 return (0);
412 }
413
414
415 /*
416 * Get file system statistics.
417 */
418 static int32_t
419 udf_statvfs(struct vfs *vfsp, struct statvfs64 *sp)
420 {
421 struct udf_vfs *udf_vfsp;
422 struct ud_part *parts;
423 dev32_t d32;
424 int32_t index;
425
426 ud_printf("udf_statvfs\n");
427
428 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
429 (void) bzero(sp, sizeof (struct statvfs64));
430
431 mutex_enter(&udf_vfsp->udf_lock);
432 sp->f_bsize = udf_vfsp->udf_lbsize;
433 sp->f_frsize = udf_vfsp->udf_lbsize;
434 sp->f_blocks = 0;
435 sp->f_bfree = 0;
436 parts = udf_vfsp->udf_parts;
437 for (index = 0; index < udf_vfsp->udf_npart; index++) {
438 sp->f_blocks += parts->udp_nblocks;
439 sp->f_bfree += parts->udp_nfree;
440 parts++;
441 }
442 sp->f_bavail = sp->f_bfree;
443
444 /*
445 * Since there are no real inodes allocated
446 * we will approximate
447 * each new file will occupy :
448 * 38(over head each dent) + MAXNAMLEN / 2 + inode_size(==block size)
449 */
450 sp->f_ffree = sp->f_favail =
451 (sp->f_bavail * sp->f_bsize) / (146 + sp->f_bsize);
452
453 /*
454 * The total number of inodes is
455 * the sum of files + directories + free inodes
456 */
457 sp->f_files = sp->f_ffree + udf_vfsp->udf_nfiles + udf_vfsp->udf_ndirs;
458 (void) cmpldev(&d32, vfsp->vfs_dev);
459 sp->f_fsid = d32;
460 (void) strcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name);
461 sp->f_flag = vf_to_stf(vfsp->vfs_flag);
462 sp->f_namemax = MAXNAMLEN;
463 (void) strcpy(sp->f_fstr, udf_vfsp->udf_volid);
464
465 mutex_exit(&udf_vfsp->udf_lock);
466
467 return (0);
468 }
469
470
471 /*
472 * Flush any pending I/O to file system vfsp.
473 * The ud_update() routine will only flush *all* udf files.
474 */
475 /*ARGSUSED*/
476 /* ARGSUSED */
477 static int32_t
478 udf_sync(struct vfs *vfsp, int16_t flag, struct cred *cr)
479 {
480 ud_printf("udf_sync\n");
481
482 ud_update(flag);
483 return (0);
484 }
485
486
487
488 /* ARGSUSED */
489 static int32_t
490 udf_vget(struct vfs *vfsp,
491 struct vnode **vpp, struct fid *fidp)
492 {
493 int32_t error = 0;
494 struct udf_fid *udfid;
495 struct udf_vfs *udf_vfsp;
496 struct ud_inode *ip;
497
498 ud_printf("udf_vget\n");
499
500 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
501 if (udf_vfsp == NULL) {
502 *vpp = NULL;
503 return (0);
504 }
505
506 udfid = (struct udf_fid *)fidp;
507 if ((error = ud_iget(vfsp, udfid->udfid_prn,
508 udfid->udfid_icb_lbn, &ip, NULL, CRED())) != 0) {
509 *vpp = NULL;
510 return (error);
511 }
512
513 rw_enter(&ip->i_contents, RW_READER);
514 if ((udfid->udfid_uinq_lo != (ip->i_uniqid & 0xffffffff)) ||
515 (udfid->udfid_prn != ip->i_icb_prn)) {
516 rw_exit(&ip->i_contents);
517 VN_RELE(ITOV(ip));
518 *vpp = NULL;
519 return (EINVAL);
520 }
521 rw_exit(&ip->i_contents);
522
523 *vpp = ITOV(ip);
524 return (0);
525 }
526
527
528 /*
529 * Mount root file system.
530 * "why" is ROOT_INIT on initial call, ROOT_REMOUNT if called to
531 * remount the root file system, and ROOT_UNMOUNT if called to
532 * unmount the root (e.g., as part of a system shutdown).
533 *
534 * XXX - this may be partially machine-dependent; it, along with the VFS_SWAPVP
535 * operation, goes along with auto-configuration. A mechanism should be
536 * provided by which machine-INdependent code in the kernel can say "get me the
537 * right root file system" and "get me the right initial swap area", and have
538 * that done in what may well be a machine-dependent fashion.
539 * Unfortunately, it is also file-system-type dependent (NFS gets it via
540 * bootparams calls, UFS gets it from various and sundry machine-dependent
541 * mechanisms, as SPECFS does for swap).
542 */
543 /* ARGSUSED */
544 static int32_t
545 udf_mountroot(struct vfs *vfsp, enum whymountroot why)
546 {
547 dev_t rootdev;
548 static int32_t udf_rootdone = 0;
549 struct vnode *vp = NULL;
550 int32_t ovflags, error;
551 ud_printf("udf_mountroot\n");
552
553 if (why == ROOT_INIT) {
554 if (udf_rootdone++) {
555 return (EBUSY);
556 }
557 rootdev = getrootdev();
558 if (rootdev == (dev_t)NODEV) {
559 return (ENODEV);
560 }
561 vfsp->vfs_dev = rootdev;
562 vfsp->vfs_flag |= VFS_RDONLY;
563 } else if (why == ROOT_REMOUNT) {
564 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp;
565 (void) dnlc_purge_vfsp(vfsp, 0);
566 vp = common_specvp(vp);
567 (void) VOP_PUTPAGE(vp, (offset_t)0,
568 (uint32_t)0, B_INVAL, CRED(), NULL);
569 binval(vfsp->vfs_dev);
570
571 ovflags = vfsp->vfs_flag;
572 vfsp->vfs_flag &= ~VFS_RDONLY;
573 vfsp->vfs_flag |= VFS_REMOUNT;
574 rootdev = vfsp->vfs_dev;
575 } else if (why == ROOT_UNMOUNT) {
576 ud_update(0);
577 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp;
578 (void) VOP_CLOSE(vp, FREAD|FWRITE, 1,
579 (offset_t)0, CRED(), NULL);
580 return (0);
581 }
582
583 if ((error = vfs_lock(vfsp)) != 0) {
584 return (error);
585 }
586
587 error = ud_mountfs(vfsp, why, rootdev, "/", CRED(), 1);
588 if (error) {
589 vfs_unlock(vfsp);
590 if (why == ROOT_REMOUNT) {
591 vfsp->vfs_flag = ovflags;
592 }
593 if (rootvp) {
594 VN_RELE(rootvp);
595 rootvp = (struct vnode *)0;
596 }
597 return (error);
598 }
599
600 if (why == ROOT_INIT) {
601 vfs_add((struct vnode *)0, vfsp,
602 (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
603 }
604 vfs_unlock(vfsp);
605 return (0);
606 }
607
608
609 /* ------------------------- local routines ------------------------- */
610
611
612 static int32_t
613 ud_mountfs(struct vfs *vfsp,
614 enum whymountroot why, dev_t dev, char *name,
615 struct cred *cr, int32_t isroot)
616 {
617 struct vnode *devvp = NULL;
618 int32_t error = 0;
619 int32_t needclose = 0;
620 struct udf_vfs *udf_vfsp = NULL;
621 struct log_vol_int_desc *lvid;
622 struct ud_inode *rip = NULL;
623 struct vnode *rvp = NULL;
624 int32_t i, lbsize;
625 uint32_t avd_loc;
626 struct ud_map *map;
627 int32_t desc_len;
628
629 ud_printf("ud_mountfs\n");
630
631 if (why == ROOT_INIT) {
632 /*
633 * Open the device.
634 */
635 devvp = makespecvp(dev, VBLK);
636
637 /*
638 * Open block device mounted on.
639 * When bio is fixed for vnodes this can all be vnode
640 * operations.
641 */
642 error = VOP_OPEN(&devvp,
643 (vfsp->vfs_flag & VFS_RDONLY) ? FREAD : FREAD|FWRITE,
644 cr, NULL);
645 if (error) {
646 goto out;
647 }
648 needclose = 1;
649
650 /*
651 * Refuse to go any further if this
652 * device is being used for swapping.
653 */
654 if (IS_SWAPVP(devvp)) {
655 error = EBUSY;
656 goto out;
657 }
658 }
659
660 /*
661 * check for dev already mounted on
662 */
663 if (vfsp->vfs_flag & VFS_REMOUNT) {
664 struct tag *ttag;
665 int32_t index, count;
666 struct buf *tpt = 0;
667 caddr_t addr;
668
669
670 /* cannot remount to RDONLY */
671 if (vfsp->vfs_flag & VFS_RDONLY) {
672 return (EINVAL);
673 }
674
675 if (vfsp->vfs_dev != dev) {
676 return (EINVAL);
677 }
678
679 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
680 devvp = udf_vfsp->udf_devvp;
681
682 /*
683 * fsck may have altered the file system; discard
684 * as much incore data as possible. Don't flush
685 * if this is a rw to rw remount; it's just resetting
686 * the options.
687 */
688 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
689 (void) dnlc_purge_vfsp(vfsp, 0);
690 (void) VOP_PUTPAGE(devvp, (offset_t)0, (uint_t)0,
691 B_INVAL, CRED(), NULL);
692 (void) ud_iflush(vfsp);
693 bflush(dev);
694 binval(dev);
695 }
696
697 /*
698 * We could read UDF1.50 and write UDF1.50 only
699 * disallow mount of any highier version
700 */
701 if ((udf_vfsp->udf_miread > UDF_150) ||
702 (udf_vfsp->udf_miwrite > UDF_150)) {
703 error = EINVAL;
704 goto remountout;
705 }
706
707 /*
708 * read/write to read/write; all done
709 */
710 if (udf_vfsp->udf_flags & UDF_FL_RW) {
711 goto remountout;
712 }
713
714 /*
715 * Does the media type allow a writable mount
716 */
717 if (udf_vfsp->udf_mtype != UDF_MT_OW) {
718 error = EINVAL;
719 goto remountout;
720 }
721
722 /*
723 * Read the metadata
724 * and check if it is possible to
725 * mount in rw mode
726 */
727 tpt = ud_bread(vfsp->vfs_dev,
728 udf_vfsp->udf_iseq_loc << udf_vfsp->udf_l2d_shift,
729 udf_vfsp->udf_iseq_len);
730 if (tpt->b_flags & B_ERROR) {
731 error = EIO;
732 goto remountout;
733 }
734 count = udf_vfsp->udf_iseq_len / DEV_BSIZE;
735 addr = tpt->b_un.b_addr;
736 for (index = 0; index < count; index ++) {
737 ttag = (struct tag *)(addr + index * DEV_BSIZE);
738 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE);
739 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT,
740 udf_vfsp->udf_iseq_loc +
741 (index >> udf_vfsp->udf_l2d_shift),
742 1, desc_len) == 0) {
743 struct log_vol_int_desc *lvid;
744
745 lvid = (struct log_vol_int_desc *)ttag;
746
747 if (SWAP_32(lvid->lvid_int_type) !=
748 LOG_VOL_CLOSE_INT) {
749 error = EINVAL;
750 goto remountout;
751 }
752
753 /*
754 * Copy new data to old data
755 */
756 bcopy(udf_vfsp->udf_iseq->b_un.b_addr,
757 tpt->b_un.b_addr, udf_vfsp->udf_iseq_len);
758 break;
759 }
760 }
761
762 udf_vfsp->udf_flags = UDF_FL_RW;
763
764 mutex_enter(&udf_vfsp->udf_lock);
765 ud_sbwrite(udf_vfsp);
766 mutex_exit(&udf_vfsp->udf_lock);
767 remountout:
768 if (tpt != NULL) {
769 tpt->b_flags = B_AGE | B_STALE;
770 brelse(tpt);
771 }
772 return (error);
773 }
774
775 ASSERT(devvp != 0);
776 /*
777 * Flush back any dirty pages on the block device to
778 * try and keep the buffer cache in sync with the page
779 * cache if someone is trying to use block devices when
780 * they really should be using the raw device.
781 */
782 (void) VOP_PUTPAGE(common_specvp(devvp), (offset_t)0,
783 (uint32_t)0, B_INVAL, cr, NULL);
784
785
786 /*
787 * Check if the file system
788 * is a valid udfs and fill
789 * the required fields in udf_vfs
790 */
791 if ((lbsize = ud_get_lbsize(dev, &avd_loc)) == 0) {
792 error = EINVAL;
793 goto out;
794 }
795
796 udf_vfsp = ud_validate_and_fill_superblock(dev, lbsize, avd_loc);
797 if (udf_vfsp == NULL) {
798 error = EINVAL;
799 goto out;
800 }
801
802 /*
803 * Fill in vfs private data
804 */
805 vfsp->vfs_fstype = udf_fstype;
806 vfs_make_fsid(&vfsp->vfs_fsid, dev, udf_fstype);
807 vfsp->vfs_data = (caddr_t)udf_vfsp;
808 vfsp->vfs_dev = dev;
809 vfsp->vfs_flag |= VFS_NOTRUNC;
810 udf_vfsp->udf_devvp = devvp;
811
812 udf_vfsp->udf_fsmnt = kmem_zalloc(strlen(name) + 1, KM_SLEEP);
813 (void) strcpy(udf_vfsp->udf_fsmnt, name);
814
815 udf_vfsp->udf_vfs = vfsp;
816 udf_vfsp->udf_rdclustsz = udf_vfsp->udf_wrclustsz = maxphys;
817
818 udf_vfsp->udf_mod = 0;
819
820
821 lvid = udf_vfsp->udf_lvid;
822 if (vfsp->vfs_flag & VFS_RDONLY) {
823 /*
824 * We could read only UDF1.50
825 * disallow mount of any highier version
826 */
827 if (udf_vfsp->udf_miread > UDF_150) {
828 error = EINVAL;
829 goto out;
830 }
831 udf_vfsp->udf_flags = UDF_FL_RDONLY;
832 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
833 udf_vfsp->udf_clean = UDF_CLEAN;
834 } else {
835 /* Do we have a VAT at the end of the recorded media */
836 map = udf_vfsp->udf_maps;
837 for (i = 0; i < udf_vfsp->udf_nmaps; i++) {
838 if (map->udm_flags & UDM_MAP_VPM) {
839 break;
840 }
841 map++;
842 }
843 if (i == udf_vfsp->udf_nmaps) {
844 error = ENOSPC;
845 goto out;
846 }
847 udf_vfsp->udf_clean = UDF_CLEAN;
848 }
849 } else {
850 /*
851 * We could read UDF1.50 and write UDF1.50 only
852 * disallow mount of any highier version
853 */
854 if ((udf_vfsp->udf_miread > UDF_150) ||
855 (udf_vfsp->udf_miwrite > UDF_150)) {
856 error = EINVAL;
857 goto out;
858 }
859 /*
860 * Check if the media allows
861 * us to mount read/write
862 */
863 if (udf_vfsp->udf_mtype != UDF_MT_OW) {
864 error = EACCES;
865 goto out;
866 }
867
868 /*
869 * Check if we have VAT on a writable media
870 * we cannot use the media in presence of VAT
871 * Dent RW mount.
872 */
873 map = udf_vfsp->udf_maps;
874 ASSERT(map != NULL);
875 for (i = 0; i < udf_vfsp->udf_nmaps; i++) {
876 if (map->udm_flags & UDM_MAP_VPM) {
877 error = EACCES;
878 goto out;
879 }
880 map++;
881 }
882
883 /*
884 * Check if the domain Id allows
885 * us to write
886 */
887 if (udf_vfsp->udf_lvd->lvd_dom_id.reg_ids[2] & 0x3) {
888 error = EACCES;
889 goto out;
890 }
891 udf_vfsp->udf_flags = UDF_FL_RW;
892
893 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
894 udf_vfsp->udf_clean = UDF_CLEAN;
895 } else {
896 if (isroot) {
897 udf_vfsp->udf_clean = UDF_DIRTY;
898 } else {
899 error = ENOSPC;
900 goto out;
901 }
902 }
903 }
904
905 mutex_init(&udf_vfsp->udf_lock, NULL, MUTEX_DEFAULT, NULL);
906
907 mutex_init(&udf_vfsp->udf_rename_lck, NULL, MUTEX_DEFAULT, NULL);
908
909 if (error = ud_iget(vfsp, udf_vfsp->udf_ricb_prn,
910 udf_vfsp->udf_ricb_loc, &rip, NULL, cr)) {
911 mutex_destroy(&udf_vfsp->udf_lock);
912 goto out;
913 }
914
915
916 /*
917 * Get the root inode and
918 * initialize the root vnode
919 */
920 rvp = ITOV(rip);
921 mutex_enter(&rvp->v_lock);
922 rvp->v_flag |= VROOT;
923 mutex_exit(&rvp->v_lock);
924 udf_vfsp->udf_root = rvp;
925
926
927 if (why == ROOT_INIT && isroot)
928 rootvp = devvp;
929
930 ud_vfs_add(udf_vfsp);
931
932 if (udf_vfsp->udf_flags == UDF_FL_RW) {
933 udf_vfsp->udf_clean = UDF_DIRTY;
934 ud_update_superblock(vfsp);
935 }
936
937 return (0);
938
939 out:
940 ud_destroy_fsp(udf_vfsp);
941 if (needclose) {
942 (void) VOP_CLOSE(devvp, (vfsp->vfs_flag & VFS_RDONLY) ?
943 FREAD : FREAD|FWRITE, 1, (offset_t)0, cr, NULL);
944 bflush(dev);
945 binval(dev);
946 }
947 VN_RELE(devvp);
948
949 return (error);
950 }
951
952
953 static struct udf_vfs *
954 ud_validate_and_fill_superblock(dev_t dev, int32_t bsize, uint32_t avd_loc)
955 {
956 int32_t error, count, index, shift;
957 uint32_t dummy, vds_loc;
958 caddr_t addr;
959 daddr_t blkno, lblkno;
960 struct buf *secbp, *bp;
961 struct tag *ttag;
962 struct anch_vol_desc_ptr *avdp;
963 struct file_set_desc *fsd;
964 struct udf_vfs *udf_vfsp = NULL;
965 struct pmap_hdr *hdr;
966 struct pmap_typ1 *typ1;
967 struct pmap_typ2 *typ2;
968 struct ud_map *map;
969 int32_t desc_len;
970
971 ud_printf("ud_validate_and_fill_superblock\n");
972
973 if (bsize < DEV_BSIZE) {
974 return (NULL);
975 }
976 shift = 0;
977 while ((bsize >> shift) > DEV_BSIZE) {
978 shift++;
979 }
980
981 /*
982 * Read Anchor Volume Descriptor
983 * Verify it and get the location of
984 * Main Volume Descriptor Sequence
985 */
986 secbp = ud_bread(dev, avd_loc << shift, ANCHOR_VOL_DESC_LEN);
987 if ((error = geterror(secbp)) != 0) {
988 cmn_err(CE_NOTE, "udfs : Could not read Anchor Volume Desc %x",
989 error);
990 brelse(secbp);
991 return (NULL);
992 }
993 avdp = (struct anch_vol_desc_ptr *)secbp->b_un.b_addr;
994 if (ud_verify_tag_and_desc(&avdp->avd_tag, UD_ANCH_VOL_DESC,
995 avd_loc, 1, ANCHOR_VOL_DESC_LEN) != 0) {
996 brelse(secbp);
997 return (NULL);
998 }
999 udf_vfsp = (struct udf_vfs *)
1000 kmem_zalloc(sizeof (struct udf_vfs), KM_SLEEP);
1001 udf_vfsp->udf_mvds_loc = SWAP_32(avdp->avd_main_vdse.ext_loc);
1002 udf_vfsp->udf_mvds_len = SWAP_32(avdp->avd_main_vdse.ext_len);
1003 udf_vfsp->udf_rvds_loc = SWAP_32(avdp->avd_res_vdse.ext_loc);
1004 udf_vfsp->udf_rvds_len = SWAP_32(avdp->avd_res_vdse.ext_len);
1005 secbp->b_flags = B_AGE | B_STALE;
1006 brelse(secbp);
1007
1008 /*
1009 * Read Main Volume Descriptor Sequence
1010 * and process it
1011 */
1012 vds_loc = udf_vfsp->udf_mvds_loc;
1013 secbp = ud_bread(dev, vds_loc << shift,
1014 udf_vfsp->udf_mvds_len);
1015 if ((error = geterror(secbp)) != 0) {
1016 brelse(secbp);
1017 cmn_err(CE_NOTE, "udfs : Could not read Main Volume Desc %x",
1018 error);
1019
1020 vds_loc = udf_vfsp->udf_rvds_loc;
1021 secbp = ud_bread(dev, vds_loc << shift,
1022 udf_vfsp->udf_rvds_len);
1023 if ((error = geterror(secbp)) != 0) {
1024 brelse(secbp);
1025 cmn_err(CE_NOTE,
1026 "udfs : Could not read Res Volume Desc %x", error);
1027 return (NULL);
1028 }
1029 }
1030
1031 udf_vfsp->udf_vds = ngeteblk(udf_vfsp->udf_mvds_len);
1032 bp = udf_vfsp->udf_vds;
1033 bp->b_edev = dev;
1034 bp->b_dev = cmpdev(dev);
1035 bp->b_blkno = vds_loc << shift;
1036 bp->b_bcount = udf_vfsp->udf_mvds_len;
1037 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_mvds_len);
1038 secbp->b_flags |= B_STALE | B_AGE;
1039 brelse(secbp);
1040
1041
1042 count = udf_vfsp->udf_mvds_len / DEV_BSIZE;
1043 addr = bp->b_un.b_addr;
1044 for (index = 0; index < count; index ++) {
1045 ttag = (struct tag *)(addr + index * DEV_BSIZE);
1046 desc_len = udf_vfsp->udf_mvds_len - (index * DEV_BSIZE);
1047 if (ud_verify_tag_and_desc(ttag, UD_PRI_VOL_DESC,
1048 vds_loc + (index >> shift),
1049 1, desc_len) == 0) {
1050 if (udf_vfsp->udf_pvd == NULL) {
1051 udf_vfsp->udf_pvd =
1052 (struct pri_vol_desc *)ttag;
1053 } else {
1054 struct pri_vol_desc *opvd, *npvd;
1055
1056 opvd = udf_vfsp->udf_pvd;
1057 npvd = (struct pri_vol_desc *)ttag;
1058
1059 if ((strncmp(opvd->pvd_vsi,
1060 npvd->pvd_vsi, 128) == 0) &&
1061 (strncmp(opvd->pvd_vol_id,
1062 npvd->pvd_vol_id, 32) == 0) &&
1063 (strncmp((caddr_t)&opvd->pvd_desc_cs,
1064 (caddr_t)&npvd->pvd_desc_cs,
1065 sizeof (charspec_t)) == 0)) {
1066
1067 if (SWAP_32(opvd->pvd_vdsn) <
1068 SWAP_32(npvd->pvd_vdsn)) {
1069 udf_vfsp->udf_pvd = npvd;
1070 }
1071 } else {
1072 goto out;
1073 }
1074 }
1075 } else if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_DESC,
1076 vds_loc + (index >> shift),
1077 1, desc_len) == 0) {
1078 struct log_vol_desc *lvd;
1079
1080 lvd = (struct log_vol_desc *)ttag;
1081 if (strncmp(lvd->lvd_dom_id.reg_id,
1082 UDF_DOMAIN_NAME, 23) != 0) {
1083 printf("Domain ID in lvd is not valid\n");
1084 goto out;
1085 }
1086
1087 if (udf_vfsp->udf_lvd == NULL) {
1088 udf_vfsp->udf_lvd = lvd;
1089 } else {
1090 struct log_vol_desc *olvd;
1091
1092 olvd = udf_vfsp->udf_lvd;
1093 if ((strncmp((caddr_t)&olvd->lvd_desc_cs,
1094 (caddr_t)&lvd->lvd_desc_cs,
1095 sizeof (charspec_t)) == 0) &&
1096 (strncmp(olvd->lvd_lvid,
1097 lvd->lvd_lvid, 128) == 0)) {
1098 if (SWAP_32(olvd->lvd_vdsn) <
1099 SWAP_32(lvd->lvd_vdsn)) {
1100 udf_vfsp->udf_lvd = lvd;
1101 }
1102 } else {
1103 goto out;
1104 }
1105 }
1106 } else if (ud_verify_tag_and_desc(ttag, UD_PART_DESC,
1107 vds_loc + (index >> shift),
1108 1, desc_len) == 0) {
1109 int32_t i;
1110 struct phdr_desc *hdr;
1111 struct part_desc *pdesc;
1112 struct ud_part *pnew, *pold, *part;
1113
1114 pdesc = (struct part_desc *)ttag;
1115 pold = udf_vfsp->udf_parts;
1116 for (i = 0; i < udf_vfsp->udf_npart; i++) {
1117 if (pold->udp_number !=
1118 SWAP_16(pdesc->pd_pnum)) {
1119 pold++;
1120 continue;
1121 }
1122
1123 if (SWAP_32(pdesc->pd_vdsn) >
1124 pold->udp_seqno) {
1125 pold->udp_seqno =
1126 SWAP_32(pdesc->pd_vdsn);
1127 pold->udp_access =
1128 SWAP_32(pdesc->pd_acc_type);
1129 pold->udp_start =
1130 SWAP_32(pdesc->pd_part_start);
1131 pold->udp_length =
1132 SWAP_32(pdesc->pd_part_length);
1133 }
1134 goto loop_end;
1135 }
1136 pold = udf_vfsp->udf_parts;
1137 udf_vfsp->udf_npart++;
1138 pnew = kmem_zalloc(udf_vfsp->udf_npart *
1139 sizeof (struct ud_part), KM_SLEEP);
1140 udf_vfsp->udf_parts = pnew;
1141 if (pold) {
1142 bcopy(pold, pnew,
1143 sizeof (struct ud_part) *
1144 (udf_vfsp->udf_npart - 1));
1145 kmem_free(pold,
1146 sizeof (struct ud_part) *
1147 (udf_vfsp->udf_npart - 1));
1148 }
1149 part = pnew + (udf_vfsp->udf_npart - 1);
1150 part->udp_number = SWAP_16(pdesc->pd_pnum);
1151 part->udp_seqno = SWAP_32(pdesc->pd_vdsn);
1152 part->udp_access = SWAP_32(pdesc->pd_acc_type);
1153 part->udp_start = SWAP_32(pdesc->pd_part_start);
1154 part->udp_length = SWAP_32(pdesc->pd_part_length);
1155 part->udp_last_alloc = 0;
1156
1157 /*
1158 * Figure out space bitmaps
1159 * or space tables
1160 */
1161 hdr = (struct phdr_desc *)pdesc->pd_pc_use;
1162 if (hdr->phdr_ust.sad_ext_len) {
1163 part->udp_flags = UDP_SPACETBLS;
1164 part->udp_unall_loc =
1165 SWAP_32(hdr->phdr_ust.sad_ext_loc);
1166 part->udp_unall_len =
1167 SWAP_32(hdr->phdr_ust.sad_ext_len);
1168 part->udp_freed_loc =
1169 SWAP_32(hdr->phdr_fst.sad_ext_loc);
1170 part->udp_freed_len =
1171 SWAP_32(hdr->phdr_fst.sad_ext_len);
1172 } else {
1173 part->udp_flags = UDP_BITMAPS;
1174 part->udp_unall_loc =
1175 SWAP_32(hdr->phdr_usb.sad_ext_loc);
1176 part->udp_unall_len =
1177 SWAP_32(hdr->phdr_usb.sad_ext_len);
1178 part->udp_freed_loc =
1179 SWAP_32(hdr->phdr_fsb.sad_ext_loc);
1180 part->udp_freed_len =
1181 SWAP_32(hdr->phdr_fsb.sad_ext_len);
1182 }
1183 } else if (ud_verify_tag_and_desc(ttag, UD_TERM_DESC,
1184 vds_loc + (index >> shift),
1185 1, desc_len) == 0) {
1186
1187 break;
1188 }
1189 loop_end:
1190 ;
1191 }
1192 if ((udf_vfsp->udf_pvd == NULL) ||
1193 (udf_vfsp->udf_lvd == NULL) ||
1194 (udf_vfsp->udf_parts == NULL)) {
1195 goto out;
1196 }
1197
1198 /*
1199 * Process Primary Volume Descriptor
1200 */
1201 (void) strncpy(udf_vfsp->udf_volid, udf_vfsp->udf_pvd->pvd_vol_id, 32);
1202 udf_vfsp->udf_volid[31] = '\0';
1203 udf_vfsp->udf_tsno = SWAP_16(udf_vfsp->udf_pvd->pvd_tag.tag_sno);
1204
1205 /*
1206 * Process Logical Volume Descriptor
1207 */
1208 udf_vfsp->udf_lbsize =
1209 SWAP_32(udf_vfsp->udf_lvd->lvd_log_bsize);
1210 udf_vfsp->udf_lbmask = udf_vfsp->udf_lbsize - 1;
1211 udf_vfsp->udf_l2d_shift = shift;
1212 udf_vfsp->udf_l2b_shift = shift + DEV_BSHIFT;
1213
1214 /*
1215 * Check if the media is in
1216 * proper domain.
1217 */
1218 if (strcmp(udf_vfsp->udf_lvd->lvd_dom_id.reg_id,
1219 UDF_DOMAIN_NAME) != 0) {
1220 goto out;
1221 }
1222
1223 /*
1224 * AVDS offset does not match with the lbsize
1225 * in the lvd
1226 */
1227 if (udf_vfsp->udf_lbsize != bsize) {
1228 goto out;
1229 }
1230
1231 udf_vfsp->udf_iseq_loc =
1232 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_loc);
1233 udf_vfsp->udf_iseq_len =
1234 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_len);
1235
1236 udf_vfsp->udf_fsd_prn =
1237 SWAP_16(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_prn);
1238 udf_vfsp->udf_fsd_loc =
1239 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_loc);
1240 udf_vfsp->udf_fsd_len =
1241 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_len);
1242
1243
1244 /*
1245 * process paritions
1246 */
1247 udf_vfsp->udf_mtype = udf_vfsp->udf_parts[0].udp_access;
1248 for (index = 0; index < udf_vfsp->udf_npart; index ++) {
1249 if (udf_vfsp->udf_parts[index].udp_access <
1250 udf_vfsp->udf_mtype) {
1251 udf_vfsp->udf_mtype =
1252 udf_vfsp->udf_parts[index].udp_access;
1253 }
1254 }
1255 if ((udf_vfsp->udf_mtype < UDF_MT_RO) ||
1256 (udf_vfsp->udf_mtype > UDF_MT_OW)) {
1257 udf_vfsp->udf_mtype = UDF_MT_RO;
1258 }
1259
1260 udf_vfsp->udf_nmaps = 0;
1261 hdr = (struct pmap_hdr *)udf_vfsp->udf_lvd->lvd_pmaps;
1262 count = SWAP_32(udf_vfsp->udf_lvd->lvd_num_pmaps);
1263 for (index = 0; index < count; index++) {
1264
1265 if ((hdr->maph_type == MAP_TYPE1) &&
1266 (hdr->maph_length == MAP_TYPE1_LEN)) {
1267 typ1 = (struct pmap_typ1 *)hdr;
1268
1269 map = udf_vfsp->udf_maps;
1270 udf_vfsp->udf_maps =
1271 kmem_zalloc(sizeof (struct ud_map) *
1272 (udf_vfsp->udf_nmaps + 1), KM_SLEEP);
1273 if (map != NULL) {
1274 bcopy(map, udf_vfsp->udf_maps,
1275 sizeof (struct ud_map) *
1276 udf_vfsp->udf_nmaps);
1277 kmem_free(map, sizeof (struct ud_map) *
1278 udf_vfsp->udf_nmaps);
1279 }
1280 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1281 map->udm_flags = UDM_MAP_NORM;
1282 map->udm_vsn = SWAP_16(typ1->map1_vsn);
1283 map->udm_pn = SWAP_16(typ1->map1_pn);
1284 udf_vfsp->udf_nmaps ++;
1285 } else if ((hdr->maph_type == MAP_TYPE2) &&
1286 (hdr->maph_length == MAP_TYPE2_LEN)) {
1287 typ2 = (struct pmap_typ2 *)hdr;
1288
1289 if (strncmp(typ2->map2_pti.reg_id,
1290 UDF_VIRT_PART, 23) == 0) {
1291 /*
1292 * Add this to the normal
1293 * partition table so that
1294 * we donot
1295 */
1296 map = udf_vfsp->udf_maps;
1297 udf_vfsp->udf_maps =
1298 kmem_zalloc(sizeof (struct ud_map) *
1299 (udf_vfsp->udf_nmaps + 1), KM_SLEEP);
1300 if (map != NULL) {
1301 bcopy(map, udf_vfsp->udf_maps,
1302 sizeof (struct ud_map) *
1303 udf_vfsp->udf_nmaps);
1304 kmem_free(map,
1305 sizeof (struct ud_map) *
1306 udf_vfsp->udf_nmaps);
1307 }
1308 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1309 map->udm_flags = UDM_MAP_VPM;
1310 map->udm_vsn = SWAP_16(typ2->map2_vsn);
1311 map->udm_pn = SWAP_16(typ2->map2_pn);
1312 udf_vfsp->udf_nmaps ++;
1313 if (error = ud_get_last_block(dev, &lblkno)) {
1314 goto out;
1315 }
1316 if (error = ud_val_get_vat(udf_vfsp, dev,
1317 lblkno, map)) {
1318 goto out;
1319 }
1320 } else if (strncmp(typ2->map2_pti.reg_id,
1321 UDF_SPAR_PART, 23) == 0) {
1322
1323 if (SWAP_16(typ2->map2_pl) != 32) {
1324 printf(
1325 "Packet Length is not valid %x\n",
1326 SWAP_16(typ2->map2_pl));
1327 goto out;
1328 }
1329 if ((typ2->map2_nst < 1) ||
1330 (typ2->map2_nst > 4)) {
1331 goto out;
1332 }
1333 map = udf_vfsp->udf_maps;
1334 udf_vfsp->udf_maps =
1335 kmem_zalloc(sizeof (struct ud_map) *
1336 (udf_vfsp->udf_nmaps + 1),
1337 KM_SLEEP);
1338 if (map != NULL) {
1339 bcopy(map, udf_vfsp->udf_maps,
1340 sizeof (struct ud_map) *
1341 udf_vfsp->udf_nmaps);
1342 kmem_free(map,
1343 sizeof (struct ud_map) *
1344 udf_vfsp->udf_nmaps);
1345 }
1346 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1347 map->udm_flags = UDM_MAP_SPM;
1348 map->udm_vsn = SWAP_16(typ2->map2_vsn);
1349 map->udm_pn = SWAP_16(typ2->map2_pn);
1350
1351 udf_vfsp->udf_nmaps ++;
1352
1353 if (error = ud_read_sparing_tbls(udf_vfsp,
1354 dev, map, typ2)) {
1355 goto out;
1356 }
1357 } else {
1358 /*
1359 * Unknown type of partition
1360 * Bail out
1361 */
1362 goto out;
1363 }
1364 } else {
1365 /*
1366 * Unknown type of partition
1367 * Bail out
1368 */
1369 goto out;
1370 }
1371 hdr = (struct pmap_hdr *)(((uint8_t *)hdr) + hdr->maph_length);
1372 }
1373
1374
1375 /*
1376 * Read Logical Volume Integrity Sequence
1377 * and process it
1378 */
1379 secbp = ud_bread(dev, udf_vfsp->udf_iseq_loc << shift,
1380 udf_vfsp->udf_iseq_len);
1381 if ((error = geterror(secbp)) != 0) {
1382 cmn_err(CE_NOTE,
1383 "udfs : Could not read Logical Volume Integrity Sequence %x",
1384 error);
1385 brelse(secbp);
1386 goto out;
1387 }
1388 udf_vfsp->udf_iseq = ngeteblk(udf_vfsp->udf_iseq_len);
1389 bp = udf_vfsp->udf_iseq;
1390 bp->b_edev = dev;
1391 bp->b_dev = cmpdev(dev);
1392 bp->b_blkno = udf_vfsp->udf_iseq_loc << shift;
1393 bp->b_bcount = udf_vfsp->udf_iseq_len;
1394 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_iseq_len);
1395 secbp->b_flags |= B_STALE | B_AGE;
1396 brelse(secbp);
1397
1398 count = udf_vfsp->udf_iseq_len / DEV_BSIZE;
1399 addr = bp->b_un.b_addr;
1400 for (index = 0; index < count; index ++) {
1401 ttag = (struct tag *)(addr + index * DEV_BSIZE);
1402 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE);
1403 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT,
1404 udf_vfsp->udf_iseq_loc + (index >> shift),
1405 1, desc_len) == 0) {
1406
1407 struct log_vol_int_desc *lvid;
1408
1409 lvid = (struct log_vol_int_desc *)ttag;
1410 udf_vfsp->udf_lvid = lvid;
1411
1412 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
1413 udf_vfsp->udf_clean = UDF_CLEAN;
1414 } else {
1415 udf_vfsp->udf_clean = UDF_DIRTY;
1416 }
1417
1418 /*
1419 * update superblock with the metadata
1420 */
1421 ud_convert_to_superblock(udf_vfsp, lvid);
1422 break;
1423 }
1424 }
1425
1426 if (udf_vfsp->udf_lvid == NULL) {
1427 goto out;
1428 }
1429
1430 if ((blkno = ud_xlate_to_daddr(udf_vfsp,
1431 udf_vfsp->udf_fsd_prn, udf_vfsp->udf_fsd_loc,
1432 1, &dummy)) == 0) {
1433 goto out;
1434 }
1435 secbp = ud_bread(dev, blkno << shift, udf_vfsp->udf_fsd_len);
1436 if ((error = geterror(secbp)) != 0) {
1437 cmn_err(CE_NOTE,
1438 "udfs : Could not read File Set Descriptor %x", error);
1439 brelse(secbp);
1440 goto out;
1441 }
1442 fsd = (struct file_set_desc *)secbp->b_un.b_addr;
1443 if (ud_verify_tag_and_desc(&fsd->fsd_tag, UD_FILE_SET_DESC,
1444 udf_vfsp->udf_fsd_loc,
1445 1, udf_vfsp->udf_fsd_len) != 0) {
1446 secbp->b_flags = B_AGE | B_STALE;
1447 brelse(secbp);
1448 goto out;
1449 }
1450 udf_vfsp->udf_ricb_prn = SWAP_16(fsd->fsd_root_icb.lad_ext_prn);
1451 udf_vfsp->udf_ricb_loc = SWAP_32(fsd->fsd_root_icb.lad_ext_loc);
1452 udf_vfsp->udf_ricb_len = SWAP_32(fsd->fsd_root_icb.lad_ext_len);
1453 secbp->b_flags = B_AGE | B_STALE;
1454 brelse(secbp);
1455 udf_vfsp->udf_root_blkno = ud_xlate_to_daddr(udf_vfsp,
1456 udf_vfsp->udf_ricb_prn, udf_vfsp->udf_ricb_loc,
1457 1, &dummy);
1458
1459 return (udf_vfsp);
1460 out:
1461 ud_destroy_fsp(udf_vfsp);
1462
1463 return (NULL);
1464 }
1465
1466 /*
1467 * release/free resources from one ud_map; map data was zalloc'd in
1468 * ud_validate_and_fill_superblock() and fields may later point to
1469 * valid data
1470 */
1471 static void
1472 ud_free_map(struct ud_map *map)
1473 {
1474 uint32_t n;
1475
1476 if (map->udm_flags & UDM_MAP_VPM) {
1477 if (map->udm_count) {
1478 kmem_free(map->udm_count,
1479 map->udm_nent * sizeof (*map->udm_count));
1480 map->udm_count = NULL;
1481 }
1482 if (map->udm_bp) {
1483 for (n = 0; n < map->udm_nent; n++) {
1484 if (map->udm_bp[n])
1485 brelse(map->udm_bp[n]);
1486 }
1487 kmem_free(map->udm_bp,
1488 map->udm_nent * sizeof (*map->udm_bp));
1489 map->udm_bp = NULL;
1490 }
1491 if (map->udm_addr) {
1492 kmem_free(map->udm_addr,
1493 map->udm_nent * sizeof (*map->udm_addr));
1494 map->udm_addr = NULL;
1495 }
1496 }
1497 if (map->udm_flags & UDM_MAP_SPM) {
1498 for (n = 0; n < MAX_SPM; n++) {
1499 if (map->udm_sbp[n]) {
1500 brelse(map->udm_sbp[n]);
1501 map->udm_sbp[n] = NULL;
1502 map->udm_spaddr[n] = NULL;
1503 }
1504 }
1505 }
1506 }
1507
1508 void
1509 ud_destroy_fsp(struct udf_vfs *udf_vfsp)
1510 {
1511 int32_t i;
1512
1513 ud_printf("ud_destroy_fsp\n");
1514 if (udf_vfsp == NULL)
1515 return;
1516
1517 if (udf_vfsp->udf_maps) {
1518 for (i = 0; i < udf_vfsp->udf_nmaps; i++)
1519 ud_free_map(&udf_vfsp->udf_maps[i]);
1520
1521 kmem_free(udf_vfsp->udf_maps,
1522 udf_vfsp->udf_nmaps * sizeof (*udf_vfsp->udf_maps));
1523 }
1524
1525 if (udf_vfsp->udf_parts) {
1526 kmem_free(udf_vfsp->udf_parts,
1527 udf_vfsp->udf_npart * sizeof (*udf_vfsp->udf_parts));
1528 }
1529 if (udf_vfsp->udf_iseq) {
1530 udf_vfsp->udf_iseq->b_flags |= (B_STALE|B_AGE);
1531 brelse(udf_vfsp->udf_iseq);
1532 }
1533 if (udf_vfsp->udf_vds) {
1534 udf_vfsp->udf_vds->b_flags |= (B_STALE|B_AGE);
1535 brelse(udf_vfsp->udf_vds);
1536 }
1537 if (udf_vfsp->udf_vfs)
1538 ud_vfs_remove(udf_vfsp);
1539 if (udf_vfsp->udf_fsmnt) {
1540 kmem_free(udf_vfsp->udf_fsmnt,
1541 strlen(udf_vfsp->udf_fsmnt) + 1);
1542 }
1543 kmem_free(udf_vfsp, sizeof (*udf_vfsp));
1544 }
1545
1546 void
1547 ud_convert_to_superblock(struct udf_vfs *udf_vfsp,
1548 struct log_vol_int_desc *lvid)
1549 {
1550 int32_t i, c;
1551 uint32_t *temp;
1552 struct ud_part *ud_part;
1553 struct lvid_iu *iu;
1554
1555 udf_vfsp->udf_maxuniq = SWAP_64(lvid->lvid_uniqid);
1556 temp = lvid->lvid_fst;
1557 c = SWAP_32(lvid->lvid_npart);
1558 ud_part = udf_vfsp->udf_parts;
1559 for (i = 0; i < c; i++) {
1560 if (i >= udf_vfsp->udf_npart) {
1561 continue;
1562 }
1563 ud_part->udp_nfree = SWAP_32(temp[i]);
1564 ud_part->udp_nblocks = SWAP_32(temp[c + i]);
1565 udf_vfsp->udf_freeblks += SWAP_32(temp[i]);
1566 udf_vfsp->udf_totalblks += SWAP_32(temp[c + i]);
1567 ud_part++;
1568 }
1569
1570 iu = (struct lvid_iu *)(temp + c * 2);
1571 udf_vfsp->udf_nfiles = SWAP_32(iu->lvidiu_nfiles);
1572 udf_vfsp->udf_ndirs = SWAP_32(iu->lvidiu_ndirs);
1573 udf_vfsp->udf_miread = BCD2HEX_16(SWAP_16(iu->lvidiu_mread));
1574 udf_vfsp->udf_miwrite = BCD2HEX_16(SWAP_16(iu->lvidiu_mwrite));
1575 udf_vfsp->udf_mawrite = BCD2HEX_16(SWAP_16(iu->lvidiu_maxwr));
1576 }
1577
1578 void
1579 ud_update_superblock(struct vfs *vfsp)
1580 {
1581 struct udf_vfs *udf_vfsp;
1582
1583 ud_printf("ud_update_superblock\n");
1584
1585 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1586
1587 mutex_enter(&udf_vfsp->udf_lock);
1588 ud_sbwrite(udf_vfsp);
1589 mutex_exit(&udf_vfsp->udf_lock);
1590 }
1591
1592
1593 #include <sys/dkio.h>
1594 #include <sys/cdio.h>
1595 #include <sys/vtoc.h>
1596
1597 /*
1598 * This part of the code is known
1599 * to work with only sparc. It needs
1600 * to be evluated before using it with x86
1601 */
1602 int32_t
1603 ud_get_last_block(dev_t dev, daddr_t *blkno)
1604 {
1605 struct vtoc vtoc;
1606 struct dk_cinfo dki_info;
1607 int32_t rval, error;
1608
1609 if ((error = cdev_ioctl(dev, DKIOCGVTOC, (intptr_t)&vtoc,
1610 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) {
1611 cmn_err(CE_NOTE, "Could not get the vtoc information");
1612 return (error);
1613 }
1614
1615 if (vtoc.v_sanity != VTOC_SANE) {
1616 return (EINVAL);
1617 }
1618 if ((error = cdev_ioctl(dev, DKIOCINFO, (intptr_t)&dki_info,
1619 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) {
1620 cmn_err(CE_NOTE, "Could not get the slice information");
1621 return (error);
1622 }
1623
1624 if (dki_info.dki_partition > V_NUMPAR) {
1625 return (EINVAL);
1626 }
1627
1628
1629 *blkno = vtoc.v_part[dki_info.dki_partition].p_size;
1630
1631 return (0);
1632 }
1633
1634 /* Search sequentially N - 2, N, N - 152, N - 150 for vat icb */
1635 /*
1636 * int32_t ud_sub_blks[] = {2, 0, 152, 150};
1637 */
1638 int32_t ud_sub_blks[] = {152, 150, 2, 0};
1639 int32_t ud_sub_count = 4;
1640
1641 /*
1642 * Validate the VAT ICB
1643 */
1644 static int32_t
1645 ud_val_get_vat(struct udf_vfs *udf_vfsp, dev_t dev,
1646 daddr_t blkno, struct ud_map *udm)
1647 {
1648 struct buf *secbp;
1649 struct file_entry *fe;
1650 int32_t end_loc, i, j, ad_type;
1651 struct short_ad *sad;
1652 struct long_ad *lad;
1653 uint32_t count, blk;
1654 struct ud_part *ud_part;
1655 int err = 0;
1656
1657 end_loc = (blkno >> udf_vfsp->udf_l2d_shift) - 1;
1658
1659 for (i = 0; i < ud_sub_count; i++) {
1660 udm->udm_vat_icb = end_loc - ud_sub_blks[i];
1661
1662 secbp = ud_bread(dev,
1663 udm->udm_vat_icb << udf_vfsp->udf_l2d_shift,
1664 udf_vfsp->udf_lbsize);
1665 ASSERT(secbp->b_un.b_addr);
1666
1667 fe = (struct file_entry *)secbp->b_un.b_addr;
1668 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 0,
1669 0, 0) == 0) {
1670 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1671 SWAP_32(fe->fe_tag.tag_loc),
1672 1, udf_vfsp->udf_lbsize) == 0) {
1673 if (fe->fe_icb_tag.itag_ftype == 0) {
1674 break;
1675 }
1676 }
1677 }
1678 secbp->b_flags |= B_AGE | B_STALE;
1679 brelse(secbp);
1680 }
1681 if (i == ud_sub_count) {
1682 return (EINVAL);
1683 }
1684
1685 ad_type = SWAP_16(fe->fe_icb_tag.itag_flags) & 0x3;
1686 if (ad_type == ICB_FLAG_ONE_AD) {
1687 udm->udm_nent = 1;
1688 } else if (ad_type == ICB_FLAG_SHORT_AD) {
1689 udm->udm_nent =
1690 SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
1691 } else if (ad_type == ICB_FLAG_LONG_AD) {
1692 udm->udm_nent =
1693 SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
1694 } else {
1695 err = EINVAL;
1696 goto end;
1697 }
1698
1699 udm->udm_count = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_count),
1700 KM_SLEEP);
1701 udm->udm_bp = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_bp),
1702 KM_SLEEP);
1703 udm->udm_addr = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_addr),
1704 KM_SLEEP);
1705
1706 if (ad_type == ICB_FLAG_ONE_AD) {
1707 udm->udm_count[0] = (SWAP_64(fe->fe_info_len) - 36) /
1708 sizeof (uint32_t);
1709 udm->udm_bp[0] = secbp;
1710 udm->udm_addr[0] = (uint32_t *)
1711 &fe->fe_spec[SWAP_32(fe->fe_len_ear)];
1712 return (0);
1713 }
1714 for (i = 0; i < udm->udm_nent; i++) {
1715 if (ad_type == ICB_FLAG_SHORT_AD) {
1716 sad = (struct short_ad *)
1717 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1718 sad += i;
1719 count = SWAP_32(sad->sad_ext_len);
1720 blk = SWAP_32(sad->sad_ext_loc);
1721 } else {
1722 lad = (struct long_ad *)
1723 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1724 lad += i;
1725 count = SWAP_32(lad->lad_ext_len);
1726 blk = SWAP_32(lad->lad_ext_loc);
1727 ASSERT(SWAP_16(lad->lad_ext_prn) == udm->udm_pn);
1728 }
1729 if ((count & 0x3FFFFFFF) == 0) {
1730 break;
1731 }
1732 if (i < udm->udm_nent - 1) {
1733 udm->udm_count[i] = count / 4;
1734 } else {
1735 udm->udm_count[i] = (count - 36) / 4;
1736 }
1737 ud_part = udf_vfsp->udf_parts;
1738 for (j = 0; j < udf_vfsp->udf_npart; j++) {
1739 if (udm->udm_pn == ud_part->udp_number) {
1740 blk = ud_part->udp_start + blk;
1741 break;
1742 }
1743 }
1744 if (j == udf_vfsp->udf_npart) {
1745 err = EINVAL;
1746 break;
1747 }
1748
1749 count = (count + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1750 udm->udm_bp[i] = ud_bread(dev,
1751 blk << udf_vfsp->udf_l2d_shift, count);
1752 if ((udm->udm_bp[i]->b_error != 0) ||
1753 (udm->udm_bp[i]->b_resid)) {
1754 err = EINVAL;
1755 break;
1756 }
1757 udm->udm_addr[i] = (uint32_t *)udm->udm_bp[i]->b_un.b_addr;
1758 }
1759
1760 end:
1761 if (err)
1762 ud_free_map(udm);
1763 secbp->b_flags |= B_AGE | B_STALE;
1764 brelse(secbp);
1765 return (err);
1766 }
1767
1768 int32_t
1769 ud_read_sparing_tbls(struct udf_vfs *udf_vfsp,
1770 dev_t dev, struct ud_map *map, struct pmap_typ2 *typ2)
1771 {
1772 int32_t index, valid = 0;
1773 uint32_t sz;
1774 struct buf *bp;
1775 struct stbl *stbl;
1776
1777 map->udm_plen = SWAP_16(typ2->map2_pl);
1778 map->udm_nspm = typ2->map2_nst;
1779 map->udm_spsz = SWAP_32(typ2->map2_sest);
1780 sz = (map->udm_spsz + udf_vfsp->udf_lbmask) & ~udf_vfsp->udf_lbmask;
1781 if (sz == 0) {
1782 return (0);
1783 }
1784
1785 for (index = 0; index < map->udm_nspm; index++) {
1786 map->udm_loc[index] = SWAP_32(typ2->map2_st[index]);
1787
1788 bp = ud_bread(dev,
1789 map->udm_loc[index] << udf_vfsp->udf_l2d_shift, sz);
1790 if ((bp->b_error != 0) || (bp->b_resid)) {
1791 brelse(bp);
1792 continue;
1793 }
1794 stbl = (struct stbl *)bp->b_un.b_addr;
1795 if (strncmp(stbl->stbl_si.reg_id, UDF_SPAR_TBL, 23) != 0) {
1796 printf("Sparing Identifier does not match\n");
1797 bp->b_flags |= B_AGE | B_STALE;
1798 brelse(bp);
1799 continue;
1800 }
1801 map->udm_sbp[index] = bp;
1802 map->udm_spaddr[index] = bp->b_un.b_addr;
1803 #ifdef UNDEF
1804 {
1805 struct stbl_entry *te;
1806 int32_t i, tbl_len;
1807
1808 te = (struct stbl_entry *)&stbl->stbl_entry;
1809 tbl_len = SWAP_16(stbl->stbl_len);
1810
1811 printf("%x %x\n", tbl_len, SWAP_32(stbl->stbl_seqno));
1812 printf("%x %x\n", bp->b_un.b_addr, te);
1813
1814 for (i = 0; i < tbl_len; i++) {
1815 printf("%x %x\n", SWAP_32(te->sent_ol), SWAP_32(te->sent_ml));
1816 te ++;
1817 }
1818 }
1819 #endif
1820 valid ++;
1821 }
1822
1823 if (valid) {
1824 return (0);
1825 }
1826 return (EINVAL);
1827 }
1828
1829 uint32_t
1830 ud_get_lbsize(dev_t dev, uint32_t *loc)
1831 {
1832 int32_t bsize, shift, index, end_index;
1833 daddr_t last_block;
1834 uint32_t avd_loc;
1835 struct buf *bp;
1836 struct anch_vol_desc_ptr *avdp;
1837 uint32_t session_offset = 0;
1838 int32_t rval;
1839
1840 if (ud_get_last_block(dev, &last_block) != 0) {
1841 end_index = 1;
1842 } else {
1843 end_index = 3;
1844 }
1845
1846 if (cdev_ioctl(dev, CDROMREADOFFSET, (intptr_t)&session_offset,
1847 FKIOCTL|FREAD|FNATIVE, CRED(), &rval) != 0) {
1848 session_offset = 0;
1849 }
1850
1851 for (index = 0; index < end_index; index++) {
1852
1853 for (bsize = DEV_BSIZE, shift = 0;
1854 bsize <= MAXBSIZE; bsize <<= 1, shift++) {
1855
1856 if (index == 0) {
1857 avd_loc = 256;
1858 if (bsize <= 2048) {
1859 avd_loc +=
1860 session_offset * 2048 / bsize;
1861 } else {
1862 avd_loc +=
1863 session_offset / (bsize / 2048);
1864 }
1865 } else if (index == 1) {
1866 avd_loc = last_block - (1 << shift);
1867 } else {
1868 avd_loc = last_block - (256 << shift);
1869 }
1870
1871 bp = ud_bread(dev, avd_loc << shift,
1872 ANCHOR_VOL_DESC_LEN);
1873 if (geterror(bp) != 0) {
1874 brelse(bp);
1875 continue;
1876 }
1877
1878 /*
1879 * Verify if we have avdp here
1880 */
1881 avdp = (struct anch_vol_desc_ptr *)bp->b_un.b_addr;
1882 if (ud_verify_tag_and_desc(&avdp->avd_tag,
1883 UD_ANCH_VOL_DESC, avd_loc,
1884 1, ANCHOR_VOL_DESC_LEN) != 0) {
1885 bp->b_flags |= B_AGE | B_STALE;
1886 brelse(bp);
1887 continue;
1888 }
1889 bp->b_flags |= B_AGE | B_STALE;
1890 brelse(bp);
1891 *loc = avd_loc;
1892 return (bsize);
1893 }
1894 }
1895
1896 /*
1897 * Did not find AVD at all the locations
1898 */
1899 return (0);
1900 }
1901
1902 static int
1903 udfinit(int fstype, char *name)
1904 {
1905 static const fs_operation_def_t udf_vfsops_template[] = {
1906 VFSNAME_MOUNT, { .vfs_mount = udf_mount },
1907 VFSNAME_UNMOUNT, { .vfs_unmount = udf_unmount },
1908 VFSNAME_ROOT, { .vfs_root = udf_root },
1909 VFSNAME_STATVFS, { .vfs_statvfs = udf_statvfs },
1910 VFSNAME_SYNC, { .vfs_sync = udf_sync },
1911 VFSNAME_VGET, { .vfs_vget = udf_vget },
1912 VFSNAME_MOUNTROOT, { .vfs_mountroot = udf_mountroot },
1913 NULL, NULL
1914 };
1915 extern struct vnodeops *udf_vnodeops;
1916 extern const fs_operation_def_t udf_vnodeops_template[];
1917 int error;
1918
1919 ud_printf("udfinit\n");
1920
1921 error = vfs_setfsops(fstype, udf_vfsops_template, NULL);
1922 if (error != 0) {
1923 cmn_err(CE_WARN, "udfinit: bad vfs ops template");
1924 return (error);
1925 }
1926
1927 error = vn_make_ops(name, udf_vnodeops_template, &udf_vnodeops);
1928 if (error != 0) {
1929 (void) vfs_freevfsops_by_type(fstype);
1930 cmn_err(CE_WARN, "udfinit: bad vnode ops template");
1931 return (error);
1932 }
1933
1934 udf_fstype = fstype;
1935
1936 ud_init_inodes();
1937
1938 return (0);
1939 }