1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2017 by Delphix. All rights reserved.
24 */
25
26 #include <sys/types.h>
27 #include <sys/t_lock.h>
28 #include <sys/param.h>
29 #include <sys/time.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
32 #include <sys/resource.h>
33 #include <sys/signal.h>
34 #include <sys/cred.h>
35 #include <sys/user.h>
36 #include <sys/buf.h>
37 #include <sys/vfs.h>
38 #include <sys/stat.h>
39 #include <sys/vnode.h>
40 #include <sys/mode.h>
41 #include <sys/proc.h>
42 #include <sys/disp.h>
43 #include <sys/file.h>
44 #include <sys/fcntl.h>
45 #include <sys/flock.h>
46 #include <sys/kmem.h>
47 #include <sys/uio.h>
48 #include <sys/dnlc.h>
49 #include <sys/conf.h>
50 #include <sys/errno.h>
51 #include <sys/mman.h>
52 #include <sys/fbuf.h>
53 #include <sys/pathname.h>
54 #include <sys/debug.h>
55 #include <sys/vmsystm.h>
56 #include <sys/cmn_err.h>
57 #include <sys/dirent.h>
58 #include <sys/errno.h>
59 #include <sys/modctl.h>
60 #include <sys/statvfs.h>
61 #include <sys/mount.h>
62 #include <sys/sunddi.h>
63 #include <sys/bootconf.h>
64 #include <sys/policy.h>
65
66 #include <vm/hat.h>
67 #include <vm/page.h>
68 #include <vm/pvn.h>
69 #include <vm/as.h>
70 #include <vm/seg.h>
71 #include <vm/seg_map.h>
72 #include <vm/seg_kmem.h>
73 #include <vm/seg_vn.h>
74 #include <vm/rm.h>
75 #include <vm/page.h>
76 #include <sys/swap.h>
77
78
79 #include <fs/fs_subr.h>
80
81
82 #include <sys/fs/udf_volume.h>
83 #include <sys/fs/udf_inode.h>
84
85 extern struct vnodeops *udf_vnodeops;
86
87 kmutex_t ud_sync_busy;
88 /*
89 * udf_vfs list manipulation routines
90 */
91 kmutex_t udf_vfs_mutex;
92 struct udf_vfs *udf_vfs_instances;
93
94 union ihead ud_ihead[UD_HASH_SZ];
95 kmutex_t ud_icache_lock;
96
97 #define UD_BEGIN 0x0
98 #define UD_END 0x1
99 #define UD_UNKN 0x2
100 struct ud_inode *udf_ifreeh, *udf_ifreet;
101 kmutex_t udf_ifree_lock;
102
103 kmutex_t ud_nino_lock;
104 int32_t ud_max_inodes = 512;
105 int32_t ud_cur_inodes = 0;
106
107 uid_t ud_default_uid = 0;
108 gid_t ud_default_gid = 3;
109
110 int32_t ud_updat_ext4(struct ud_inode *, struct file_entry *);
111 int32_t ud_updat_ext4096(struct ud_inode *, struct file_entry *);
112 void ud_make_sad(struct icb_ext *, struct short_ad *, int32_t);
113 void ud_make_lad(struct icb_ext *, struct long_ad *, int32_t);
114 void ud_trunc_ext4(struct ud_inode *, u_offset_t);
115 void ud_trunc_ext4096(struct ud_inode *, u_offset_t);
116 void ud_add_to_free_list(struct ud_inode *, uint32_t);
117 void ud_remove_from_free_list(struct ud_inode *, uint32_t);
118
119
120 #ifdef DEBUG
121 struct ud_inode *
122 ud_search_icache(struct vfs *vfsp, uint16_t prn, uint32_t ploc)
123 {
124 int32_t hno;
125 union ihead *ih;
126 struct ud_inode *ip;
127 struct udf_vfs *udf_vfsp;
128 uint32_t loc, dummy;
129
130 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
131 loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
132
133 mutex_enter(&ud_icache_lock);
134 hno = UD_INOHASH(vfsp->vfs_dev, loc);
135 ih = &ud_ihead[hno];
136 for (ip = ih->ih_chain[0];
137 ip != (struct ud_inode *)ih;
138 ip = ip->i_forw) {
139 if ((prn == ip->i_icb_prn) && (ploc == ip->i_icb_block) &&
140 (vfsp->vfs_dev == ip->i_dev)) {
141 mutex_exit(&ud_icache_lock);
142 return (ip);
143 }
144 }
145 mutex_exit(&ud_icache_lock);
146 return (0);
147 }
148 #endif
149
150 /* ARGSUSED */
151 int
152 ud_iget(struct vfs *vfsp, uint16_t prn, uint32_t ploc, struct ud_inode **ipp,
153 struct buf *pbp, struct cred *cred)
154 {
155 int32_t hno, nomem = 0, icb_tag_flags;
156 union ihead *ih;
157 struct ud_inode *ip;
158 struct vnode *vp;
159 struct buf *bp = NULL;
160 struct file_entry *fe;
161 struct udf_vfs *udf_vfsp;
162 struct ext_attr_hdr *eah;
163 struct attr_hdr *ah;
164 int32_t ea_len, ea_off;
165 daddr_t loc;
166 uint64_t offset = 0;
167 struct icb_ext *iext, *con;
168 uint32_t length, dummy;
169 int32_t ndesc, ftype;
170 uint16_t old_prn;
171 uint32_t old_block, old_lbano;
172
173 ud_printf("ud_iget\n");
174 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
175 old_prn = 0;
176 old_block = old_lbano = 0;
177 ftype = 0;
178 loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
179 loop:
180 mutex_enter(&ud_icache_lock);
181 hno = UD_INOHASH(vfsp->vfs_dev, loc);
182
183 ih = &ud_ihead[hno];
184 for (ip = ih->ih_chain[0];
185 ip != (struct ud_inode *)ih;
186 ip = ip->i_forw) {
187
188 if ((prn == ip->i_icb_prn) &&
189 (ploc == ip->i_icb_block) &&
190 (vfsp->vfs_dev == ip->i_dev)) {
191
192 vp = ITOV(ip);
193 VN_HOLD(vp);
194 mutex_exit(&ud_icache_lock);
195
196 rw_enter(&ip->i_contents, RW_READER);
197 mutex_enter(&ip->i_tlock);
198 if ((ip->i_flag & IREF) == 0) {
199 mutex_enter(&udf_ifree_lock);
200 ud_remove_from_free_list(ip, UD_UNKN);
201 mutex_exit(&udf_ifree_lock);
202 }
203 ip->i_flag |= IREF;
204 mutex_exit(&ip->i_tlock);
205 rw_exit(&ip->i_contents);
206
207 *ipp = ip;
208
209 if (pbp != NULL) {
210 brelse(pbp);
211 }
212
213 return (0);
214 }
215 }
216
217 /*
218 * We don't have it in the cache
219 * Allocate a new entry
220 */
221 tryagain:
222 mutex_enter(&udf_ifree_lock);
223 mutex_enter(&ud_nino_lock);
224 if (ud_cur_inodes > ud_max_inodes) {
225 int32_t purged;
226
227 mutex_exit(&ud_nino_lock);
228 while (udf_ifreeh == NULL ||
229 vn_has_cached_data(ITOV(udf_ifreeh))) {
230 /*
231 * Try to put an inode on the freelist that's
232 * sitting in the dnlc.
233 */
234 mutex_exit(&udf_ifree_lock);
235 purged = dnlc_fs_purge1(udf_vnodeops);
236 mutex_enter(&udf_ifree_lock);
237 if (!purged) {
238 break;
239 }
240 }
241 mutex_enter(&ud_nino_lock);
242 }
243
244 /*
245 * If there's a free one available and it has no pages attached
246 * take it. If we're over the high water mark, take it even if
247 * it has attached pages. Otherwise, make a new one.
248 */
249 if (udf_ifreeh &&
250 (nomem || !vn_has_cached_data(ITOV(udf_ifreeh)) ||
251 ud_cur_inodes >= ud_max_inodes)) {
252
253 mutex_exit(&ud_nino_lock);
254 ip = udf_ifreeh;
255 vp = ITOV(ip);
256
257 ud_remove_from_free_list(ip, UD_BEGIN);
258
259 mutex_exit(&udf_ifree_lock);
260 if (ip->i_flag & IREF) {
261 cmn_err(CE_WARN, "ud_iget: bad i_flag\n");
262 mutex_exit(&ud_icache_lock);
263 if (pbp != NULL) {
264 brelse(pbp);
265 }
266 return (EINVAL);
267 }
268 rw_enter(&ip->i_contents, RW_WRITER);
269
270 /*
271 * We call udf_syncip() to synchronously destroy all pages
272 * associated with the vnode before re-using it. The pageout
273 * thread may have beat us to this page so our v_count can
274 * be > 0 at this point even though we are on the freelist.
275 */
276 mutex_enter(&ip->i_tlock);
277 ip->i_flag = (ip->i_flag & IMODTIME) | IREF;
278 mutex_exit(&ip->i_tlock);
279
280 VN_HOLD(vp);
281 if (ud_syncip(ip, B_INVAL, I_SYNC) != 0) {
282 ud_idrop(ip);
283 rw_exit(&ip->i_contents);
284 mutex_exit(&ud_icache_lock);
285 goto loop;
286 }
287
288 mutex_enter(&ip->i_tlock);
289 ip->i_flag &= ~IMODTIME;
290 mutex_exit(&ip->i_tlock);
291
292 if (ip->i_ext) {
293 kmem_free(ip->i_ext,
294 sizeof (struct icb_ext) * ip->i_ext_count);
295 ip->i_ext = 0;
296 ip->i_ext_count = ip->i_ext_used = 0;
297 }
298
299 if (ip->i_con) {
300 kmem_free(ip->i_con,
301 sizeof (struct icb_ext) * ip->i_con_count);
302 ip->i_con = 0;
303 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
304 }
305
306 /*
307 * The pageout thread may not have had a chance to release
308 * its hold on the vnode (if it was active with this vp),
309 * but the pages should all be invalidated.
310 */
311 } else {
312 mutex_exit(&ud_nino_lock);
313 mutex_exit(&udf_ifree_lock);
314 /*
315 * Try to get memory for this inode without blocking.
316 * If we can't and there is something on the freelist,
317 * go ahead and use it, otherwise block waiting for
318 * memory holding the hash_lock. We expose a potential
319 * deadlock if all users of memory have to do a ud_iget()
320 * before releasing memory.
321 */
322 ip = (struct ud_inode *)kmem_zalloc(sizeof (struct ud_inode),
323 KM_NOSLEEP);
324 vp = vn_alloc(KM_NOSLEEP);
325 if ((ip == NULL) || (vp == NULL)) {
326 mutex_enter(&udf_ifree_lock);
327 if (udf_ifreeh) {
328 mutex_exit(&udf_ifree_lock);
329 if (ip != NULL)
330 kmem_free(ip, sizeof (struct ud_inode));
331 if (vp != NULL)
332 vn_free(vp);
333 nomem = 1;
334 goto tryagain;
335 } else {
336 mutex_exit(&udf_ifree_lock);
337 if (ip == NULL)
338 ip = (struct ud_inode *)
339 kmem_zalloc(
340 sizeof (struct ud_inode),
341 KM_SLEEP);
342 if (vp == NULL)
343 vp = vn_alloc(KM_SLEEP);
344 }
345 }
346 ip->i_vnode = vp;
347
348 ip->i_marker1 = (uint32_t)0xAAAAAAAA;
349 ip->i_marker2 = (uint32_t)0xBBBBBBBB;
350 ip->i_marker3 = (uint32_t)0xCCCCCCCC;
351
352 rw_init(&ip->i_rwlock, NULL, RW_DEFAULT, NULL);
353 rw_init(&ip->i_contents, NULL, RW_DEFAULT, NULL);
354 mutex_init(&ip->i_tlock, NULL, MUTEX_DEFAULT, NULL);
355
356 ip->i_forw = ip;
357 ip->i_back = ip;
358 vp->v_data = (caddr_t)ip;
359 vn_setops(vp, udf_vnodeops);
360 ip->i_flag = IREF;
361 cv_init(&ip->i_wrcv, NULL, CV_DRIVER, NULL);
362 mutex_enter(&ud_nino_lock);
363 ud_cur_inodes++;
364 mutex_exit(&ud_nino_lock);
365
366 rw_enter(&ip->i_contents, RW_WRITER);
367 }
368
369 if (vp->v_count < 1) {
370 cmn_err(CE_WARN, "ud_iget: v_count < 1\n");
371 mutex_exit(&ud_icache_lock);
372 rw_exit(&ip->i_contents);
373 if (pbp != NULL) {
374 brelse(pbp);
375 }
376 return (EINVAL);
377 }
378 if (vn_has_cached_data(vp)) {
379 cmn_err(CE_WARN, "ud_iget: v_pages not NULL\n");
380 mutex_exit(&ud_icache_lock);
381 rw_exit(&ip->i_contents);
382 if (pbp != NULL) {
383 brelse(pbp);
384 }
385 return (EINVAL);
386 }
387
388 /*
389 * Move the inode on the chain for its new (ino, dev) pair
390 */
391 remque(ip);
392 ip->i_forw = ip;
393 ip->i_back = ip;
394 insque(ip, ih);
395
396 ip->i_dev = vfsp->vfs_dev;
397 ip->i_udf = udf_vfsp;
398 ip->i_diroff = 0;
399 ip->i_devvp = ip->i_udf->udf_devvp;
400 ip->i_icb_prn = prn;
401 ip->i_icb_block = ploc;
402 ip->i_icb_lbano = loc;
403 ip->i_nextr = 0;
404 ip->i_seq = 0;
405 mutex_exit(&ud_icache_lock);
406
407 read_de:
408 if (pbp != NULL) {
409 /*
410 * assumption is that we will not
411 * create a 4096 file
412 */
413 bp = pbp;
414 } else {
415 bp = ud_bread(ip->i_dev,
416 ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
417 udf_vfsp->udf_lbsize);
418 }
419
420 /*
421 * Check I/O errors
422 */
423 fe = (struct file_entry *)bp->b_un.b_addr;
424 if ((bp->b_flags & B_ERROR) ||
425 (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
426 ip->i_icb_block, 1, udf_vfsp->udf_lbsize) != 0)) {
427
428 if (((bp->b_flags & B_ERROR) == 0) &&
429 (ftype == STRAT_TYPE4096)) {
430 if (ud_check_te_unrec(udf_vfsp,
431 bp->b_un.b_addr, ip->i_icb_block) == 0) {
432
433 brelse(bp);
434
435 /*
436 * restore old file entry location
437 */
438 ip->i_icb_prn = old_prn;
439 ip->i_icb_block = old_block;
440 ip->i_icb_lbano = old_lbano;
441
442 /*
443 * reread old file entry
444 */
445 bp = ud_bread(ip->i_dev,
446 old_lbano << udf_vfsp->udf_l2d_shift,
447 udf_vfsp->udf_lbsize);
448 if ((bp->b_flags & B_ERROR) == 0) {
449 fe = (struct file_entry *)
450 bp->b_un.b_addr;
451 if (ud_verify_tag_and_desc(&fe->fe_tag,
452 UD_FILE_ENTRY, ip->i_icb_block, 1,
453 udf_vfsp->udf_lbsize) == 0) {
454 goto end_4096;
455 }
456 }
457 }
458 }
459 error_ret:
460 brelse(bp);
461 /*
462 * The inode may not contain anything useful. Mark it as
463 * having an error and let anyone else who was waiting for
464 * this know there was an error. Callers waiting for
465 * access to this inode in ud_iget will find
466 * the i_icb_lbano == 0, so there won't be a match.
467 * It remains in the cache. Put it back on the freelist.
468 */
469 mutex_enter(&vp->v_lock);
470 VN_RELE_LOCKED(vp);
471 mutex_exit(&vp->v_lock);
472 ip->i_icb_lbano = 0;
473
474 /*
475 * The folowing two lines make
476 * it impossible for any one do
477 * a VN_HOLD and then a VN_RELE
478 * so avoiding a ud_iinactive
479 */
480 ip->i_icb_prn = 0xffff;
481 ip->i_icb_block = 0;
482
483 /*
484 * remove the bad inode from hash chains
485 * so that during unmount we will not
486 * go through this inode
487 */
488 mutex_enter(&ud_icache_lock);
489 remque(ip);
490 ip->i_forw = ip;
491 ip->i_back = ip;
492 mutex_exit(&ud_icache_lock);
493
494 /* Put the inode at the front of the freelist */
495 mutex_enter(&ip->i_tlock);
496 mutex_enter(&udf_ifree_lock);
497 ud_add_to_free_list(ip, UD_BEGIN);
498 mutex_exit(&udf_ifree_lock);
499 ip->i_flag = 0;
500 mutex_exit(&ip->i_tlock);
501 rw_exit(&ip->i_contents);
502 return (EIO);
503 }
504
505 if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
506 struct buf *ibp = NULL;
507 struct indirect_entry *ie;
508
509 /*
510 * save old file_entry location
511 */
512 old_prn = ip->i_icb_prn;
513 old_block = ip->i_icb_block;
514 old_lbano = ip->i_icb_lbano;
515
516 ftype = STRAT_TYPE4096;
517
518 /*
519 * If astrat is 4096 different versions
520 * of the file exist on the media.
521 * we are supposed to get to the latest
522 * version of the file
523 */
524
525 /*
526 * IE is supposed to be in the next block
527 * of DE
528 */
529 ibp = ud_bread(ip->i_dev,
530 (ip->i_icb_lbano + 1) << udf_vfsp->udf_l2d_shift,
531 udf_vfsp->udf_lbsize);
532 if (ibp->b_flags & B_ERROR) {
533 /*
534 * Get rid of current ibp and
535 * then goto error on DE's bp
536 */
537 ie_error:
538 brelse(ibp);
539 goto error_ret;
540 }
541
542 ie = (struct indirect_entry *)ibp->b_un.b_addr;
543 if (ud_verify_tag_and_desc(&ie->ie_tag,
544 UD_INDIRECT_ENT, ip->i_icb_block + 1,
545 1, udf_vfsp->udf_lbsize) == 0) {
546 struct long_ad *lad;
547
548 lad = &ie->ie_indirecticb;
549 ip->i_icb_prn = SWAP_16(lad->lad_ext_prn);
550 ip->i_icb_block = SWAP_32(lad->lad_ext_loc);
551 ip->i_icb_lbano = ud_xlate_to_daddr(udf_vfsp,
552 ip->i_icb_prn, ip->i_icb_block,
553 1, &dummy);
554 brelse(ibp);
555 brelse(bp);
556 goto read_de;
557 }
558
559 /*
560 * If this block is TE or unrecorded we
561 * are at the last entry
562 */
563 if (ud_check_te_unrec(udf_vfsp, ibp->b_un.b_addr,
564 ip->i_icb_block + 1) != 0) {
565 /*
566 * This is not an unrecorded block
567 * Check if it a valid IE and
568 * get the address of DE that
569 * this IE points to
570 */
571 goto ie_error;
572 }
573 /*
574 * If ud_check_unrec returns "0"
575 * this is the last in the chain
576 * Latest file_entry
577 */
578 brelse(ibp);
579 }
580
581 end_4096:
582
583 ip->i_uid = SWAP_32(fe->fe_uid);
584 if (ip->i_uid == -1) {
585 ip->i_uid = ud_default_uid;
586 }
587 ip->i_gid = SWAP_32(fe->fe_gid);
588 if (ip->i_gid == -1) {
589 ip->i_gid = ud_default_gid;
590 }
591 ip->i_perm = SWAP_32(fe->fe_perms) & 0xFFFF;
592 if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
593 ip->i_perm &= ~(IWRITE | (IWRITE >> 5) | (IWRITE >> 10));
594 }
595
596 ip->i_nlink = SWAP_16(fe->fe_lcount);
597 ip->i_size = SWAP_64(fe->fe_info_len);
598 ip->i_lbr = SWAP_64(fe->fe_lbr);
599
600 ud_dtime2utime(&ip->i_atime, &fe->fe_acc_time);
601 ud_dtime2utime(&ip->i_mtime, &fe->fe_mod_time);
602 ud_dtime2utime(&ip->i_ctime, &fe->fe_attr_time);
603
604
605 ip->i_uniqid = SWAP_64(fe->fe_uniq_id);
606 icb_tag_flags = SWAP_16(fe->fe_icb_tag.itag_flags);
607
608 if ((fe->fe_icb_tag.itag_ftype == FTYPE_CHAR_DEV) ||
609 (fe->fe_icb_tag.itag_ftype == FTYPE_BLOCK_DEV)) {
610
611 eah = (struct ext_attr_hdr *)fe->fe_spec;
612 ea_off = GET_32(&eah->eah_ial);
613 ea_len = GET_32(&fe->fe_len_ear);
614 if (ea_len && (ud_verify_tag_and_desc(&eah->eah_tag,
615 UD_EXT_ATTR_HDR, ip->i_icb_block, 1,
616 sizeof (struct file_entry) -
617 offsetof(struct file_entry, fe_spec)) == 0)) {
618
619 while (ea_off < ea_len) {
620 /*
621 * We now check the validity of ea_off.
622 * (ea_len - ea_off) should be large enough to
623 * hold the attribute header atleast.
624 */
625 if ((ea_len - ea_off) <
626 sizeof (struct attr_hdr)) {
627 cmn_err(CE_NOTE,
628 "ea_len(0x%x) - ea_off(0x%x) is "
629 "too small to hold attr. info. "
630 "blockno 0x%x\n",
631 ea_len, ea_off, ip->i_icb_block);
632 goto error_ret;
633 }
634 ah = (struct attr_hdr *)&fe->fe_spec[ea_off];
635
636 /*
637 * Device Specification EA
638 */
639 if ((GET_32(&ah->ahdr_atype) == 12) &&
640 (ah->ahdr_astype == 1)) {
641 struct dev_spec_ear *ds;
642
643 if ((ea_len - ea_off) <
644 sizeof (struct dev_spec_ear)) {
645 cmn_err(CE_NOTE,
646 "ea_len(0x%x) - "
647 "ea_off(0x%x) is too small "
648 "to hold dev_spec_ear."
649 " blockno 0x%x\n",
650 ea_len, ea_off,
651 ip->i_icb_block);
652 goto error_ret;
653 }
654 ds = (struct dev_spec_ear *)ah;
655 ip->i_major = GET_32(&ds->ds_major_id);
656 ip->i_minor = GET_32(&ds->ds_minor_id);
657 }
658
659 /*
660 * Impl Use EA
661 */
662 if ((GET_32(&ah->ahdr_atype) == 2048) &&
663 (ah->ahdr_astype == 1)) {
664 struct iu_ea *iuea;
665 struct copy_mgt_info *cmi;
666
667 if ((ea_len - ea_off) <
668 sizeof (struct iu_ea)) {
669 cmn_err(CE_NOTE,
670 "ea_len(0x%x) - ea_off(0x%x) is too small to hold iu_ea. blockno 0x%x\n",
671 ea_len, ea_off,
672 ip->i_icb_block);
673 goto error_ret;
674 }
675 iuea = (struct iu_ea *)ah;
676 if (strncmp(iuea->iuea_ii.reg_id,
677 UDF_FREEEASPACE,
678 sizeof (iuea->iuea_ii.reg_id))
679 == 0) {
680 /* skip it */
681 iuea = iuea;
682 } else if (strncmp(iuea->iuea_ii.reg_id,
683 UDF_CGMS_INFO,
684 sizeof (iuea->iuea_ii.reg_id))
685 == 0) {
686 cmi = (struct copy_mgt_info *)
687 iuea->iuea_iu;
688 cmi = cmi;
689 }
690 }
691 /* ??? PARANOIA */
692 if (GET_32(&ah->ahdr_length) == 0) {
693 break;
694 }
695 ea_off += GET_32(&ah->ahdr_length);
696 }
697 }
698 }
699
700 ip->i_nextr = 0;
701
702 ip->i_maxent = SWAP_16(fe->fe_icb_tag.itag_max_ent);
703 ip->i_astrat = SWAP_16(fe->fe_icb_tag.itag_strategy);
704
705 ip->i_desc_type = icb_tag_flags & 0x7;
706
707 /* Strictly Paranoia */
708 ip->i_ext = NULL;
709 ip->i_ext_count = ip->i_ext_used = 0;
710 ip->i_con = 0;
711 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
712
713 ip->i_data_off = 0xB0 + SWAP_32(fe->fe_len_ear);
714 ip->i_max_emb = udf_vfsp->udf_lbsize - ip->i_data_off;
715 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
716 /* Short allocation desc */
717 struct short_ad *sad;
718
719 ip->i_ext_used = 0;
720 ip->i_ext_count = ndesc =
721 SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
722 ip->i_ext_count =
723 ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
724 ip->i_ext = (struct icb_ext *)kmem_zalloc(ip->i_ext_count *
725 sizeof (struct icb_ext), KM_SLEEP);
726 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
727 ip->i_cur_max_ext --;
728
729 if ((ip->i_astrat != STRAT_TYPE4) &&
730 (ip->i_astrat != STRAT_TYPE4096)) {
731 goto error_ret;
732 }
733
734 sad = (struct short_ad *)
735 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
736 iext = ip->i_ext;
737 while (ndesc --) {
738 length = SWAP_32(sad->sad_ext_len);
739 if ((length & 0x3FFFFFFF) == 0) {
740 break;
741 }
742 if (((length >> 30) & IB_MASK) == IB_CON) {
743 if (ip->i_con == NULL) {
744 ip->i_con_count = EXT_PER_MALLOC;
745 ip->i_con_used = 0;
746 ip->i_con_read = 0;
747 ip->i_con = kmem_zalloc(
748 ip->i_con_count *
749 sizeof (struct icb_ext),
750 KM_SLEEP);
751 }
752 con = &ip->i_con[ip->i_con_used];
753 con->ib_prn = 0;
754 con->ib_block = SWAP_32(sad->sad_ext_loc);
755 con->ib_count = length & 0x3FFFFFFF;
756 con->ib_flags = (length >> 30) & IB_MASK;
757 ip->i_con_used++;
758 sad ++;
759 break;
760 }
761 iext->ib_prn = 0;
762 iext->ib_block = SWAP_32(sad->sad_ext_loc);
763 length = SWAP_32(sad->sad_ext_len);
764 iext->ib_count = length & 0x3FFFFFFF;
765 iext->ib_offset = offset;
766 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
767 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
768 offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
769 (~udf_vfsp->udf_lbmask);
770
771 iext->ib_flags = (length >> 30) & IB_MASK;
772
773 ip->i_ext_used++;
774 iext++;
775 sad ++;
776 }
777 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
778 /* Long allocation desc */
779 struct long_ad *lad;
780
781 ip->i_ext_used = 0;
782 ip->i_ext_count = ndesc =
783 SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
784 ip->i_ext_count =
785 ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
786 ip->i_ext = (struct icb_ext *)kmem_zalloc(ip->i_ext_count *
787 sizeof (struct icb_ext), KM_SLEEP);
788
789 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct long_ad);
790 ip->i_cur_max_ext --;
791
792 if ((ip->i_astrat != STRAT_TYPE4) &&
793 (ip->i_astrat != STRAT_TYPE4096)) {
794 goto error_ret;
795 }
796
797 lad = (struct long_ad *)
798 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
799 iext = ip->i_ext;
800 while (ndesc --) {
801 length = SWAP_32(lad->lad_ext_len);
802 if ((length & 0x3FFFFFFF) == 0) {
803 break;
804 }
805 if (((length >> 30) & IB_MASK) == IB_CON) {
806 if (ip->i_con == NULL) {
807 ip->i_con_count = EXT_PER_MALLOC;
808 ip->i_con_used = 0;
809 ip->i_con_read = 0;
810 ip->i_con = kmem_zalloc(
811 ip->i_con_count *
812 sizeof (struct icb_ext),
813 KM_SLEEP);
814 }
815 con = &ip->i_con[ip->i_con_used];
816 con->ib_prn = SWAP_16(lad->lad_ext_prn);
817 con->ib_block = SWAP_32(lad->lad_ext_loc);
818 con->ib_count = length & 0x3FFFFFFF;
819 con->ib_flags = (length >> 30) & IB_MASK;
820 ip->i_con_used++;
821 lad ++;
822 break;
823 }
824 iext->ib_prn = SWAP_16(lad->lad_ext_prn);
825 iext->ib_block = SWAP_32(lad->lad_ext_loc);
826 iext->ib_count = length & 0x3FFFFFFF;
827 iext->ib_offset = offset;
828 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
829 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
830 offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
831 (~udf_vfsp->udf_lbmask);
832
833 iext->ib_flags = (length >> 30) & IB_MASK;
834
835 ip->i_ext_used++;
836 iext++;
837 lad ++;
838 }
839 } else if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
840 ASSERT(SWAP_32(fe->fe_len_ear) < udf_vfsp->udf_lbsize);
841
842 if (SWAP_32(fe->fe_len_ear) > udf_vfsp->udf_lbsize) {
843 goto error_ret;
844 }
845 } else {
846 /* Not to be used in UDF 1.50 */
847 cmn_err(CE_NOTE, "Invalid Allocation Descriptor type %x\n",
848 ip->i_desc_type);
849 goto error_ret;
850 }
851
852
853 if (icb_tag_flags & ICB_FLAG_SETUID) {
854 ip->i_char = ISUID;
855 } else {
856 ip->i_char = 0;
857 }
858 if (icb_tag_flags & ICB_FLAG_SETGID) {
859 ip->i_char |= ISGID;
860 }
861 if (icb_tag_flags & ICB_FLAG_STICKY) {
862 ip->i_char |= ISVTX;
863 }
864 switch (fe->fe_icb_tag.itag_ftype) {
865 case FTYPE_DIRECTORY :
866 ip->i_type = VDIR;
867 break;
868 case FTYPE_FILE :
869 ip->i_type = VREG;
870 break;
871 case FTYPE_BLOCK_DEV :
872 ip->i_type = VBLK;
873 break;
874 case FTYPE_CHAR_DEV :
875 ip->i_type = VCHR;
876 break;
877 case FTYPE_FIFO :
878 ip->i_type = VFIFO;
879 break;
880 case FTYPE_C_ISSOCK :
881 ip->i_type = VSOCK;
882 break;
883 case FTYPE_SYMLINK :
884 ip->i_type = VLNK;
885 break;
886 default :
887 ip->i_type = VNON;
888 break;
889 }
890
891 if (ip->i_type == VBLK || ip->i_type == VCHR) {
892 ip->i_rdev = makedevice(ip->i_major, ip->i_minor);
893 }
894
895 /*
896 * Fill in the rest. Don't bother with the vnode lock because nobody
897 * should be looking at this vnode. We have already invalidated the
898 * pages if it had any so pageout shouldn't be referencing this vnode
899 * and we are holding the write contents lock so a look up can't use
900 * the vnode.
901 */
902 vp->v_vfsp = vfsp;
903 vp->v_type = ip->i_type;
904 vp->v_rdev = ip->i_rdev;
905 if (ip->i_udf->udf_root_blkno == loc) {
906 vp->v_flag = VROOT;
907 } else {
908 vp->v_flag = 0;
909 }
910
911 brelse(bp);
912 *ipp = ip;
913 rw_exit(&ip->i_contents);
914 vn_exists(vp);
915 return (0);
916 }
917
918 void
919 ud_iinactive(struct ud_inode *ip, struct cred *cr)
920 {
921 int32_t busy = 0;
922 struct vnode *vp;
923 vtype_t type;
924 caddr_t addr, addr1;
925 size_t size, size1;
926
927
928 ud_printf("ud_iinactive\n");
929
930 /*
931 * Get exclusive access to inode data.
932 */
933 rw_enter(&ip->i_contents, RW_WRITER);
934
935 /*
936 * Make sure no one reclaimed the inode before we put
937 * it on the freelist or destroy it. We keep our 'hold'
938 * on the vnode from vn_rele until we are ready to
939 * do something with the inode (freelist/destroy).
940 *
941 * Pageout may put a VN_HOLD/VN_RELE at anytime during this
942 * operation via an async putpage, so we must make sure
943 * we don't free/destroy the inode more than once. ud_iget
944 * may also put a VN_HOLD on the inode before it grabs
945 * the i_contents lock. This is done so we don't kmem_free
946 * an inode that a thread is waiting on.
947 */
948 vp = ITOV(ip);
949
950 mutex_enter(&vp->v_lock);
951 if (vp->v_count < 1) {
952 cmn_err(CE_WARN, "ud_iinactive: v_count < 1\n");
953 return;
954 }
955 if ((vp->v_count > 1) || ((ip->i_flag & IREF) == 0)) {
956 VN_RELE_LOCKED(vp);
957 mutex_exit(&vp->v_lock);
958 rw_exit(&ip->i_contents);
959 return;
960 }
961 mutex_exit(&vp->v_lock);
962
963 /*
964 * For forced umount case: if i_udf is NULL, the contents of
965 * the inode and all the pages have already been pushed back
966 * to disk. It can be safely destroyed.
967 */
968 if (ip->i_udf == NULL) {
969 addr = (caddr_t)ip->i_ext;
970 size = sizeof (struct icb_ext) * ip->i_ext_count;
971 ip->i_ext = 0;
972 ip->i_ext_count = ip->i_ext_used = 0;
973 addr1 = (caddr_t)ip->i_con;
974 size1 = sizeof (struct icb_ext) * ip->i_con_count;
975 ip->i_con = 0;
976 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
977 rw_exit(&ip->i_contents);
978 vn_invalid(vp);
979
980 mutex_enter(&ud_nino_lock);
981 ud_cur_inodes--;
982 mutex_exit(&ud_nino_lock);
983
984 cv_destroy(&ip->i_wrcv); /* throttling */
985 rw_destroy(&ip->i_rwlock);
986 rw_exit(&ip->i_contents);
987 rw_destroy(&ip->i_contents);
988 kmem_free(addr, size);
989 kmem_free(addr1, size1);
990 vn_free(vp);
991 kmem_free(ip, sizeof (struct ud_inode));
992 return;
993 }
994
995 if ((ip->i_udf->udf_flags & UDF_FL_RDONLY) == 0) {
996 if (ip->i_nlink <= 0) {
997 ip->i_marker3 = (uint32_t)0xDDDD0000;
998 ip->i_nlink = 1; /* prevent free-ing twice */
999 (void) ud_itrunc(ip, 0, 0, cr);
1000 type = ip->i_type;
1001 ip->i_perm = 0;
1002 ip->i_uid = 0;
1003 ip->i_gid = 0;
1004 ip->i_rdev = 0; /* Zero in core version of rdev */
1005 mutex_enter(&ip->i_tlock);
1006 ip->i_flag |= IUPD|ICHG;
1007 mutex_exit(&ip->i_tlock);
1008 ud_ifree(ip, type);
1009 ip->i_icb_prn = 0xFFFF;
1010 } else if (!IS_SWAPVP(vp)) {
1011 /*
1012 * Write the inode out if dirty. Pages are
1013 * written back and put on the freelist.
1014 */
1015 (void) ud_syncip(ip, B_FREE | B_ASYNC, 0);
1016 /*
1017 * Do nothing if inode is now busy -- inode may
1018 * have gone busy because ud_syncip
1019 * releases/reacquires the i_contents lock
1020 */
1021 mutex_enter(&vp->v_lock);
1022 if (vp->v_count > 1) {
1023 VN_RELE_LOCKED(vp);
1024 mutex_exit(&vp->v_lock);
1025 rw_exit(&ip->i_contents);
1026 return;
1027 }
1028 mutex_exit(&vp->v_lock);
1029 } else {
1030 ud_iupdat(ip, 0);
1031 }
1032 }
1033
1034
1035 /*
1036 * Put the inode on the end of the free list.
1037 * Possibly in some cases it would be better to
1038 * put the inode at the head of the free list,
1039 * (e.g.: where i_perm == 0 || i_number == 0)
1040 * but I will think about that later.
1041 * (i_number is rarely 0 - only after an i/o error in ud_iget,
1042 * where i_perm == 0, the inode will probably be wanted
1043 * again soon for an ialloc, so possibly we should keep it)
1044 */
1045 /*
1046 * If inode is invalid or there is no page associated with
1047 * this inode, put the inode in the front of the free list.
1048 * Since we have a VN_HOLD on the vnode, and checked that it
1049 * wasn't already on the freelist when we entered, we can safely
1050 * put it on the freelist even if another thread puts a VN_HOLD
1051 * on it (pageout/ud_iget).
1052 */
1053 tryagain:
1054 mutex_enter(&ud_nino_lock);
1055 if (vn_has_cached_data(vp)) {
1056 mutex_exit(&ud_nino_lock);
1057 mutex_enter(&vp->v_lock);
1058 VN_RELE_LOCKED(vp);
1059 mutex_exit(&vp->v_lock);
1060 mutex_enter(&ip->i_tlock);
1061 mutex_enter(&udf_ifree_lock);
1062 ud_add_to_free_list(ip, UD_END);
1063 mutex_exit(&udf_ifree_lock);
1064 ip->i_flag &= IMODTIME;
1065 mutex_exit(&ip->i_tlock);
1066 rw_exit(&ip->i_contents);
1067 } else if (busy || ud_cur_inodes < ud_max_inodes) {
1068 mutex_exit(&ud_nino_lock);
1069 /*
1070 * We're not over our high water mark, or it's
1071 * not safe to kmem_free the inode, so put it
1072 * on the freelist.
1073 */
1074 mutex_enter(&vp->v_lock);
1075 if (vn_has_cached_data(vp)) {
1076 cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1077 }
1078 VN_RELE_LOCKED(vp);
1079 mutex_exit(&vp->v_lock);
1080
1081 mutex_enter(&ip->i_tlock);
1082 mutex_enter(&udf_ifree_lock);
1083 ud_add_to_free_list(ip, UD_BEGIN);
1084 mutex_exit(&udf_ifree_lock);
1085 ip->i_flag &= IMODTIME;
1086 mutex_exit(&ip->i_tlock);
1087 rw_exit(&ip->i_contents);
1088 } else {
1089 mutex_exit(&ud_nino_lock);
1090 if (vn_has_cached_data(vp)) {
1091 cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1092 }
1093 /*
1094 * Try to free the inode. We must make sure
1095 * it's o.k. to destroy this inode. We can't destroy
1096 * if a thread is waiting for this inode. If we can't get the
1097 * cache now, put it back on the freelist.
1098 */
1099 if (!mutex_tryenter(&ud_icache_lock)) {
1100 busy = 1;
1101 goto tryagain;
1102 }
1103 mutex_enter(&vp->v_lock);
1104 if (vp->v_count > 1) {
1105 /* inode is wanted in ud_iget */
1106 busy = 1;
1107 mutex_exit(&vp->v_lock);
1108 mutex_exit(&ud_icache_lock);
1109 goto tryagain;
1110 }
1111 mutex_exit(&vp->v_lock);
1112 remque(ip);
1113 ip->i_forw = ip;
1114 ip->i_back = ip;
1115 mutex_enter(&ud_nino_lock);
1116 ud_cur_inodes--;
1117 mutex_exit(&ud_nino_lock);
1118 mutex_exit(&ud_icache_lock);
1119 if (ip->i_icb_prn != 0xFFFF) {
1120 ud_iupdat(ip, 0);
1121 }
1122 addr = (caddr_t)ip->i_ext;
1123 size = sizeof (struct icb_ext) * ip->i_ext_count;
1124 ip->i_ext = 0;
1125 ip->i_ext_count = ip->i_ext_used = 0;
1126 addr1 = (caddr_t)ip->i_con;
1127 size1 = sizeof (struct icb_ext) * ip->i_con_count;
1128 ip->i_con = 0;
1129 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
1130 cv_destroy(&ip->i_wrcv); /* throttling */
1131 rw_destroy(&ip->i_rwlock);
1132 rw_exit(&ip->i_contents);
1133 rw_destroy(&ip->i_contents);
1134 kmem_free(addr, size);
1135 kmem_free(addr1, size1);
1136 ip->i_marker3 = (uint32_t)0xDDDDDDDD;
1137 vn_free(vp);
1138 kmem_free(ip, sizeof (struct ud_inode));
1139 }
1140 }
1141
1142
1143 void
1144 ud_iupdat(struct ud_inode *ip, int32_t waitfor)
1145 {
1146 uint16_t flag, tag_flags;
1147 int32_t error;
1148 struct buf *bp;
1149 struct udf_vfs *udf_vfsp;
1150 struct file_entry *fe;
1151 uint16_t crc_len = 0;
1152
1153 ASSERT(RW_WRITE_HELD(&ip->i_contents));
1154
1155 ud_printf("ud_iupdat\n");
1156 /*
1157 * Return if file system has been forcibly umounted.
1158 */
1159 if (ip->i_udf == NULL) {
1160 return;
1161 }
1162
1163 udf_vfsp = ip->i_udf;
1164 flag = ip->i_flag; /* Atomic read */
1165 if ((flag & (IUPD|IACC|ICHG|IMOD|IMODACC)) != 0) {
1166 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
1167 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC|IATTCHG);
1168 return;
1169 }
1170
1171 bp = ud_bread(ip->i_dev,
1172 ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
1173 ip->i_udf->udf_lbsize);
1174 if (bp->b_flags & B_ERROR) {
1175 brelse(bp);
1176 return;
1177 }
1178 fe = (struct file_entry *)bp->b_un.b_addr;
1179 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1180 ip->i_icb_block,
1181 1, ip->i_udf->udf_lbsize) != 0) {
1182 brelse(bp);
1183 return;
1184 }
1185
1186 mutex_enter(&ip->i_tlock);
1187 if (ip->i_flag & (IUPD|IACC|ICHG)) {
1188 IMARK(ip);
1189 }
1190 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC);
1191 mutex_exit(&ip->i_tlock);
1192
1193 fe->fe_uid = SWAP_32(ip->i_uid);
1194 fe->fe_gid = SWAP_32(ip->i_gid);
1195
1196 fe->fe_perms = SWAP_32(ip->i_perm);
1197
1198 fe->fe_lcount = SWAP_16(ip->i_nlink);
1199 fe->fe_info_len = SWAP_64(ip->i_size);
1200 fe->fe_lbr = SWAP_64(ip->i_lbr);
1201
1202 ud_utime2dtime(&ip->i_atime, &fe->fe_acc_time);
1203 ud_utime2dtime(&ip->i_mtime, &fe->fe_mod_time);
1204 ud_utime2dtime(&ip->i_ctime, &fe->fe_attr_time);
1205
1206 if (ip->i_char & ISUID) {
1207 tag_flags = ICB_FLAG_SETUID;
1208 } else {
1209 tag_flags = 0;
1210 }
1211 if (ip->i_char & ISGID) {
1212 tag_flags |= ICB_FLAG_SETGID;
1213 }
1214 if (ip->i_char & ISVTX) {
1215 tag_flags |= ICB_FLAG_STICKY;
1216 }
1217 tag_flags |= ip->i_desc_type;
1218
1219 /*
1220 * Remove the following it is no longer contig
1221 * if (ip->i_astrat == STRAT_TYPE4) {
1222 * tag_flags |= ICB_FLAG_CONTIG;
1223 * }
1224 */
1225
1226 fe->fe_icb_tag.itag_flags &= ~SWAP_16((uint16_t)0x3C3);
1227 fe->fe_icb_tag.itag_strategy = SWAP_16(ip->i_astrat);
1228 fe->fe_icb_tag.itag_flags |= SWAP_16(tag_flags);
1229
1230 ud_update_regid(&fe->fe_impl_id);
1231
1232 crc_len = offsetof(struct file_entry, fe_spec) +
1233 SWAP_32(fe->fe_len_ear);
1234 if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
1235 crc_len += ip->i_size;
1236 fe->fe_len_adesc = SWAP_32(((uint32_t)ip->i_size));
1237 } else if ((ip->i_size != 0) && (ip->i_ext != NULL) &&
1238 (ip->i_ext_used != 0)) {
1239
1240 if ((error = ud_read_icb_till_off(ip,
1241 ip->i_size)) == 0) {
1242 if (ip->i_astrat == STRAT_TYPE4) {
1243 error = ud_updat_ext4(ip, fe);
1244 } else if (ip->i_astrat == STRAT_TYPE4096) {
1245 error = ud_updat_ext4096(ip, fe);
1246 }
1247 if (error) {
1248 udf_vfsp->udf_mark_bad = 1;
1249 }
1250 }
1251 crc_len += SWAP_32(fe->fe_len_adesc);
1252 } else {
1253 fe->fe_len_adesc = 0;
1254 }
1255
1256 /*
1257 * Zero out the rest of the block
1258 */
1259 bzero(bp->b_un.b_addr + crc_len,
1260 ip->i_udf->udf_lbsize - crc_len);
1261
1262 ud_make_tag(ip->i_udf, &fe->fe_tag,
1263 UD_FILE_ENTRY, ip->i_icb_block, crc_len);
1264
1265
1266 if (waitfor) {
1267 BWRITE(bp);
1268
1269 /*
1270 * Synchronous write has guaranteed that inode
1271 * has been written on disk so clear the flag
1272 */
1273 ip->i_flag &= ~(IBDWRITE);
1274 } else {
1275 bdwrite(bp);
1276
1277 /*
1278 * This write hasn't guaranteed that inode has been
1279 * written on the disk.
1280 * Since, all updat flags on indoe are cleared, we must
1281 * remember the condition in case inode is to be updated
1282 * synchronously later (e.g.- fsync()/fdatasync())
1283 * and inode has not been modified yet.
1284 */
1285 ip->i_flag |= (IBDWRITE);
1286 }
1287 } else {
1288 /*
1289 * In case previous inode update was done asynchronously
1290 * (IBDWRITE) and this inode update request wants guaranteed
1291 * (synchronous) disk update, flush the inode.
1292 */
1293 if (waitfor && (flag & IBDWRITE)) {
1294 blkflush(ip->i_dev,
1295 (daddr_t)fsbtodb(udf_vfsp, ip->i_icb_lbano));
1296 ip->i_flag &= ~(IBDWRITE);
1297 }
1298 }
1299 }
1300
1301 int32_t
1302 ud_updat_ext4(struct ud_inode *ip, struct file_entry *fe)
1303 {
1304 uint32_t dummy;
1305 int32_t elen, ndent, index, count, con_index;
1306 daddr_t bno;
1307 struct buf *bp;
1308 struct short_ad *sad;
1309 struct long_ad *lad;
1310 struct icb_ext *iext, *icon;
1311
1312
1313 ASSERT(ip);
1314 ASSERT(fe);
1315 ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1316 (ip->i_desc_type == ICB_FLAG_LONG_AD));
1317
1318 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1319 elen = sizeof (struct short_ad);
1320 sad = (struct short_ad *)
1321 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1322 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1323 elen = sizeof (struct long_ad);
1324 lad = (struct long_ad *)
1325 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1326 } else {
1327 /* This cannot happen return */
1328 return (EINVAL);
1329 }
1330
1331 ndent = ip->i_max_emb / elen;
1332
1333 if (ip->i_ext_used < ndent) {
1334
1335 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1336 ud_make_sad(ip->i_ext, sad, ip->i_ext_used);
1337 } else {
1338 ud_make_lad(ip->i_ext, lad, ip->i_ext_used);
1339 }
1340 fe->fe_len_adesc = SWAP_32(ip->i_ext_used * elen);
1341 con_index = 0;
1342 } else {
1343
1344 con_index = index = 0;
1345
1346 while (index < ip->i_ext_used) {
1347 if (index == 0) {
1348 /*
1349 * bp is already read
1350 * First few extents will go
1351 * into the file_entry
1352 */
1353 count = ndent - 1;
1354 fe->fe_len_adesc = SWAP_32(ndent * elen);
1355 bp = NULL;
1356
1357 /*
1358 * Last entry to be cont ext
1359 */
1360 icon = &ip->i_con[con_index];
1361 } else {
1362 /*
1363 * Read the buffer
1364 */
1365 icon = &ip->i_con[con_index];
1366
1367 bno = ud_xlate_to_daddr(ip->i_udf,
1368 icon->ib_prn, icon->ib_block,
1369 icon->ib_count >> ip->i_udf->udf_l2d_shift,
1370 &dummy);
1371 bp = ud_bread(ip->i_dev,
1372 bno << ip->i_udf->udf_l2d_shift,
1373 ip->i_udf->udf_lbsize);
1374 if (bp->b_flags & B_ERROR) {
1375 brelse(bp);
1376 return (EIO);
1377 }
1378
1379 /*
1380 * Figure out how many extents in
1381 * this time
1382 */
1383 count = (bp->b_bcount -
1384 sizeof (struct alloc_ext_desc)) / elen;
1385 if (count > (ip->i_ext_used - index)) {
1386 count = ip->i_ext_used - index;
1387 } else {
1388 count --;
1389 }
1390 con_index++;
1391 if (con_index >= ip->i_con_used) {
1392 icon = NULL;
1393 } else {
1394 icon = &ip->i_con[con_index];
1395 }
1396 }
1397
1398
1399
1400 /*
1401 * convert to on disk form and
1402 * update
1403 */
1404 iext = &ip->i_ext[index];
1405 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1406 if (index != 0) {
1407 sad = (struct short_ad *)
1408 (bp->b_un.b_addr +
1409 sizeof (struct alloc_ext_desc));
1410 }
1411 ud_make_sad(iext, sad, count);
1412 sad += count;
1413 if (icon != NULL) {
1414 ud_make_sad(icon, sad, 1);
1415 }
1416 } else {
1417 if (index != 0) {
1418 lad = (struct long_ad *)
1419 (bp->b_un.b_addr +
1420 sizeof (struct alloc_ext_desc));
1421 }
1422 ud_make_lad(iext, lad, count);
1423 lad += count;
1424 if (icon != NULL) {
1425 ud_make_lad(icon, lad, 1);
1426 }
1427 }
1428
1429 if (con_index != 0) {
1430 struct alloc_ext_desc *aed;
1431 int32_t sz;
1432 struct icb_ext *oicon;
1433
1434 oicon = &ip->i_con[con_index - 1];
1435 sz = count * elen;
1436 if (icon != NULL) {
1437 sz += elen;
1438 }
1439 aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
1440 aed->aed_len_aed = SWAP_32(sz);
1441 if (con_index == 1) {
1442 aed->aed_rev_ael =
1443 SWAP_32(ip->i_icb_block);
1444 } else {
1445 aed->aed_rev_ael =
1446 SWAP_32(oicon->ib_block);
1447 }
1448 sz += sizeof (struct alloc_ext_desc);
1449 ud_make_tag(ip->i_udf, &aed->aed_tag,
1450 UD_ALLOC_EXT_DESC, oicon->ib_block, sz);
1451 }
1452
1453 /*
1454 * Write back to disk
1455 */
1456 if (bp != NULL) {
1457 BWRITE(bp);
1458 }
1459 index += count;
1460 }
1461
1462 }
1463
1464 if (con_index != ip->i_con_used) {
1465 int32_t lbmask, l2b, temp;
1466
1467 temp = con_index;
1468 lbmask = ip->i_udf->udf_lbmask;
1469 l2b = ip->i_udf->udf_l2b_shift;
1470 /*
1471 * Free unused continuation extents
1472 */
1473 for (; con_index < ip->i_con_used; con_index++) {
1474 icon = &ip->i_con[con_index];
1475 count = (icon->ib_count + lbmask) >> l2b;
1476 ud_free_space(ip->i_udf->udf_vfs, icon->ib_prn,
1477 icon->ib_block, count);
1478 count = (count << l2b) - sizeof (struct alloc_ext_desc);
1479 ip->i_cur_max_ext -= (count / elen) - 1;
1480 }
1481 ip->i_con_used = temp;
1482 }
1483 return (0);
1484 }
1485
1486 /* ARGSUSED */
1487 int32_t
1488 ud_updat_ext4096(struct ud_inode *ip, struct file_entry *fe)
1489 {
1490 return (ENXIO);
1491 }
1492
1493 void
1494 ud_make_sad(struct icb_ext *iext, struct short_ad *sad, int32_t count)
1495 {
1496 int32_t index = 0, scount;
1497
1498 ASSERT(iext);
1499 ASSERT(sad);
1500
1501 if (count != 0) {
1502 ASSERT(count > 0);
1503 while (index < count) {
1504 scount = (iext->ib_count & 0x3FFFFFFF) |
1505 (iext->ib_flags << 30);
1506 sad->sad_ext_len = SWAP_32(scount);
1507 sad->sad_ext_loc = SWAP_32(iext->ib_block);
1508 sad++;
1509 iext++;
1510 index++;
1511 }
1512 }
1513 }
1514
1515 void
1516 ud_make_lad(struct icb_ext *iext, struct long_ad *lad, int32_t count)
1517 {
1518 int32_t index = 0, scount;
1519
1520 ASSERT(iext);
1521 ASSERT(lad);
1522
1523 if (count != 0) {
1524 ASSERT(count > 0);
1525
1526 while (index < count) {
1527 lad->lad_ext_prn = SWAP_16(iext->ib_prn);
1528 scount = (iext->ib_count & 0x3FFFFFFF) |
1529 (iext->ib_flags << 30);
1530 lad->lad_ext_len = SWAP_32(scount);
1531 lad->lad_ext_loc = SWAP_32(iext->ib_block);
1532 lad++;
1533 iext++;
1534 index++;
1535 }
1536 }
1537 }
1538
1539 /*
1540 * Truncate the inode ip to at most length size.
1541 * Free affected disk blocks -- the blocks of the
1542 * file are removed in reverse order.
1543 */
1544 /* ARGSUSED */
1545 int
1546 ud_itrunc(struct ud_inode *oip, u_offset_t length,
1547 int32_t flags, struct cred *cr)
1548 {
1549 int32_t error, boff;
1550 off_t bsize;
1551 mode_t mode;
1552 struct udf_vfs *udf_vfsp;
1553
1554 ud_printf("ud_itrunc\n");
1555
1556 ASSERT(RW_WRITE_HELD(&oip->i_contents));
1557 udf_vfsp = oip->i_udf;
1558 bsize = udf_vfsp->udf_lbsize;
1559
1560 /*
1561 * We only allow truncation of regular files and directories
1562 * to arbritary lengths here. In addition, we allow symbolic
1563 * links to be truncated only to zero length. Other inode
1564 * types cannot have their length set here.
1565 */
1566 mode = oip->i_type;
1567 if (mode == VFIFO) {
1568 return (0);
1569 }
1570 if ((mode != VREG) && (mode != VDIR) &&
1571 (!(mode == VLNK && length == 0))) {
1572 return (EINVAL);
1573 }
1574 if (length == oip->i_size) {
1575 /* update ctime and mtime to please POSIX tests */
1576 mutex_enter(&oip->i_tlock);
1577 oip->i_flag |= ICHG |IUPD;
1578 mutex_exit(&oip->i_tlock);
1579 return (0);
1580 }
1581
1582 boff = blkoff(udf_vfsp, length);
1583
1584 if (length > oip->i_size) {
1585 /*
1586 * Trunc up case.ud_bmap_write will insure that the right blocks
1587 * are allocated. This includes doing any work needed for
1588 * allocating the last block.
1589 */
1590 if (boff == 0) {
1591 error = ud_bmap_write(oip, length - 1,
1592 (int)bsize, 0, cr);
1593 } else {
1594 error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1595 }
1596 if (error == 0) {
1597 u_offset_t osize = oip->i_size;
1598 oip->i_size = length;
1599
1600 /*
1601 * Make sure we zero out the remaining bytes of
1602 * the page in case a mmap scribbled on it. We
1603 * can't prevent a mmap from writing beyond EOF
1604 * on the last page of a file.
1605 */
1606 if ((boff = blkoff(udf_vfsp, osize)) != 0) {
1607 pvn_vpzero(ITOV(oip), osize,
1608 (uint32_t)(bsize - boff));
1609 }
1610 mutex_enter(&oip->i_tlock);
1611 oip->i_flag |= ICHG;
1612 ITIMES_NOLOCK(oip);
1613 mutex_exit(&oip->i_tlock);
1614 }
1615 return (error);
1616 }
1617
1618 /*
1619 * Update the pages of the file. If the file is not being
1620 * truncated to a block boundary, the contents of the
1621 * pages following the end of the file must be zero'ed
1622 * in case it ever become accessable again because
1623 * of subsequent file growth.
1624 */
1625 if (boff == 0) {
1626 (void) pvn_vplist_dirty(ITOV(oip), length,
1627 ud_putapage, B_INVAL | B_TRUNC, CRED());
1628 } else {
1629 /*
1630 * Make sure that the last block is properly allocated.
1631 * We only really have to do this if the last block is
1632 * actually allocated. Just to be sure, we do it now
1633 * independent of current allocation.
1634 */
1635 error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1636 if (error) {
1637 return (error);
1638 }
1639
1640 pvn_vpzero(ITOV(oip), length, (uint32_t)(bsize - boff));
1641
1642 (void) pvn_vplist_dirty(ITOV(oip), length,
1643 ud_putapage, B_INVAL | B_TRUNC, CRED());
1644 }
1645
1646
1647 /* Free the blocks */
1648 if (oip->i_desc_type == ICB_FLAG_ONE_AD) {
1649 if (length > oip->i_max_emb) {
1650 return (EFBIG);
1651 }
1652 oip->i_size = length;
1653 mutex_enter(&oip->i_tlock);
1654 oip->i_flag |= ICHG|IUPD;
1655 mutex_exit(&oip->i_tlock);
1656 ud_iupdat(oip, 1);
1657 } else {
1658 if ((error = ud_read_icb_till_off(oip, oip->i_size)) != 0) {
1659 return (error);
1660 }
1661
1662 if (oip->i_astrat == STRAT_TYPE4) {
1663 ud_trunc_ext4(oip, length);
1664 } else if (oip->i_astrat == STRAT_TYPE4096) {
1665 ud_trunc_ext4096(oip, length);
1666 }
1667 }
1668
1669 done:
1670 return (0);
1671 }
1672
1673 void
1674 ud_trunc_ext4(struct ud_inode *ip, u_offset_t length)
1675 {
1676 int32_t index, l2b, count, ecount;
1677 int32_t elen, ndent, nient;
1678 u_offset_t ext_beg, ext_end;
1679 struct icb_ext *iext, *icon;
1680 int32_t lbmask, ext_used;
1681 uint32_t loc;
1682 struct icb_ext text;
1683 uint32_t con_freed;
1684
1685 ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1686 (ip->i_desc_type == ICB_FLAG_LONG_AD));
1687
1688 if (ip->i_ext_used == 0) {
1689 return;
1690 }
1691
1692 ext_used = ip->i_ext_used;
1693
1694 lbmask = ip->i_udf->udf_lbmask;
1695 l2b = ip->i_udf->udf_l2b_shift;
1696
1697 ASSERT(ip->i_ext);
1698
1699 ip->i_lbr = 0;
1700 for (index = 0; index < ext_used; index++) {
1701 iext = &ip->i_ext[index];
1702
1703 /*
1704 * Find the begining and end
1705 * of current extent
1706 */
1707 ext_beg = iext->ib_offset;
1708 ext_end = iext->ib_offset +
1709 ((iext->ib_count + lbmask) & ~lbmask);
1710
1711 /*
1712 * This is the extent that has offset "length"
1713 * make a copy of this extent and
1714 * remember the index. We can use
1715 * it to free blocks
1716 */
1717 if ((length <= ext_end) && (length >= ext_beg)) {
1718 text = *iext;
1719
1720 iext->ib_count = length - ext_beg;
1721 ip->i_ext_used = index + 1;
1722 break;
1723 }
1724 if (iext->ib_flags != IB_UN_RE_AL) {
1725 ip->i_lbr += iext->ib_count >> l2b;
1726 }
1727 }
1728 if (ip->i_ext_used != index) {
1729 if (iext->ib_flags != IB_UN_RE_AL) {
1730 ip->i_lbr +=
1731 ((iext->ib_count + lbmask) & ~lbmask) >> l2b;
1732 }
1733 }
1734
1735 ip->i_size = length;
1736 mutex_enter(&ip->i_tlock);
1737 ip->i_flag |= ICHG|IUPD;
1738 mutex_exit(&ip->i_tlock);
1739 ud_iupdat(ip, 1);
1740
1741 /*
1742 * Free the unused space
1743 */
1744 if (text.ib_flags != IB_UN_RE_AL) {
1745 count = (ext_end - length) >> l2b;
1746 if (count) {
1747 loc = text.ib_block +
1748 (((length - text.ib_offset) + lbmask) >> l2b);
1749 ud_free_space(ip->i_udf->udf_vfs, text.ib_prn,
1750 loc, count);
1751 }
1752 }
1753 for (index = ip->i_ext_used; index < ext_used; index++) {
1754 iext = &ip->i_ext[index];
1755 if (iext->ib_flags != IB_UN_RE_AL) {
1756 count = (iext->ib_count + lbmask) >> l2b;
1757 ud_free_space(ip->i_udf->udf_vfs, iext->ib_prn,
1758 iext->ib_block, count);
1759 }
1760 bzero(iext, sizeof (struct icb_ext));
1761 continue;
1762 }
1763
1764 /*
1765 * release any continuation blocks
1766 */
1767 if (ip->i_con) {
1768
1769 ASSERT(ip->i_con_count >= ip->i_con_used);
1770
1771 /*
1772 * Find out how many indirect blocks
1773 * are required and release the rest
1774 */
1775 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1776 elen = sizeof (struct short_ad);
1777 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1778 elen = sizeof (struct long_ad);
1779 }
1780 ndent = ip->i_max_emb / elen;
1781 if (ip->i_ext_used > ndent) {
1782 ecount = ip->i_ext_used - ndent;
1783 } else {
1784 ecount = 0;
1785 }
1786 con_freed = 0;
1787 for (index = 0; index < ip->i_con_used; index++) {
1788 icon = &ip->i_con[index];
1789 nient = icon->ib_count -
1790 (sizeof (struct alloc_ext_desc) + elen);
1791 /* Header + 1 indirect extent */
1792 nient /= elen;
1793 if (ecount) {
1794 if (ecount > nient) {
1795 ecount -= nient;
1796 } else {
1797 ecount = 0;
1798 }
1799 } else {
1800 count = ((icon->ib_count + lbmask) &
1801 ~lbmask) >> l2b;
1802 ud_free_space(ip->i_udf->udf_vfs,
1803 icon->ib_prn, icon->ib_block, count);
1804 con_freed++;
1805 ip->i_cur_max_ext -= nient;
1806 }
1807 }
1808 /*
1809 * set the continuation extents used(i_con_used)i to correct
1810 * value. It is possible for i_con_used to be zero,
1811 * if we free up all continuation extents. This happens
1812 * when ecount is 0 before entering the for loop above.
1813 */
1814 ip->i_con_used -= con_freed;
1815 if (ip->i_con_read > ip->i_con_used) {
1816 ip->i_con_read = ip->i_con_used;
1817 }
1818 }
1819 }
1820
1821 void
1822 ud_trunc_ext4096(struct ud_inode *ip, u_offset_t length)
1823 {
1824 /*
1825 * Truncate code is the same for
1826 * both file of type 4 and 4096
1827 */
1828 ud_trunc_ext4(ip, length);
1829 }
1830
1831 /*
1832 * Remove any inodes in the inode cache belonging to dev
1833 *
1834 * There should not be any active ones, return error if any are found but
1835 * still invalidate others (N.B.: this is a user error, not a system error).
1836 *
1837 * Also, count the references to dev by block devices - this really
1838 * has nothing to do with the object of the procedure, but as we have
1839 * to scan the inode table here anyway, we might as well get the
1840 * extra benefit.
1841 */
1842 int32_t
1843 ud_iflush(struct vfs *vfsp)
1844 {
1845 int32_t index, busy = 0;
1846 union ihead *ih;
1847 struct udf_vfs *udf_vfsp;
1848 dev_t dev;
1849 struct vnode *rvp, *vp;
1850 struct ud_inode *ip, *next;
1851
1852 ud_printf("ud_iflush\n");
1853 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1854 rvp = udf_vfsp->udf_root;
1855 dev = vfsp->vfs_dev;
1856
1857 mutex_enter(&ud_icache_lock);
1858 for (index = 0; index < UD_HASH_SZ; index++) {
1859 ih = &ud_ihead[index];
1860
1861 next = ih->ih_chain[0];
1862 while (next != (struct ud_inode *)ih) {
1863 ip = next;
1864 next = ip->i_forw;
1865 if (ip->i_dev != dev) {
1866 continue;
1867 }
1868 vp = ITOV(ip);
1869 /*
1870 * root inode is processed by the caller
1871 */
1872 if (vp == rvp) {
1873 if (vp->v_count > 1) {
1874 busy = -1;
1875 }
1876 continue;
1877 }
1878 if (ip->i_flag & IREF) {
1879 /*
1880 * Set error indicator for return value,
1881 * but continue invalidating other
1882 * inodes.
1883 */
1884 busy = -1;
1885 continue;
1886 }
1887
1888 rw_enter(&ip->i_contents, RW_WRITER);
1889 remque(ip);
1890 ip->i_forw = ip;
1891 ip->i_back = ip;
1892 /*
1893 * Hold the vnode since its not done
1894 * in VOP_PUTPAGE anymore.
1895 */
1896 VN_HOLD(vp);
1897 /*
1898 * XXX Synchronous write holding
1899 * cache lock
1900 */
1901 (void) ud_syncip(ip, B_INVAL, I_SYNC);
1902 rw_exit(&ip->i_contents);
1903 VN_RELE(vp);
1904 }
1905 }
1906 mutex_exit(&ud_icache_lock);
1907
1908 return (busy);
1909 }
1910
1911
1912 /*
1913 * Check mode permission on inode. Mode is READ, WRITE or EXEC.
1914 * In the case of WRITE, the read-only status of the file system
1915 * is checked. The applicable mode bits are compared with the
1916 * requested form of access. If bits are missing, the secpolicy
1917 * function will check for privileges.
1918 */
1919 int
1920 ud_iaccess(struct ud_inode *ip, int32_t mode, struct cred *cr, int dolock)
1921 {
1922 int shift = 0;
1923 int ret = 0;
1924
1925 if (dolock)
1926 rw_enter(&ip->i_contents, RW_READER);
1927 ASSERT(RW_LOCK_HELD(&ip->i_contents));
1928
1929 ud_printf("ud_iaccess\n");
1930 if (mode & IWRITE) {
1931 /*
1932 * Disallow write attempts on read-only
1933 * file systems, unless the file is a block
1934 * or character device or a FIFO.
1935 */
1936 if (ip->i_udf->udf_flags & UDF_FL_RDONLY) {
1937 if ((ip->i_type != VCHR) &&
1938 (ip->i_type != VBLK) &&
1939 (ip->i_type != VFIFO)) {
1940 ret = EROFS;
1941 goto out;
1942 }
1943 }
1944 }
1945
1946 /*
1947 * Access check is based on only
1948 * one of owner, group, public.
1949 * If not owner, then check group.
1950 * If not a member of the group, then
1951 * check public access.
1952 */
1953 if (crgetuid(cr) != ip->i_uid) {
1954 shift += 5;
1955 if (!groupmember((uid_t)ip->i_gid, cr))
1956 shift += 5;
1957 }
1958
1959 ret = secpolicy_vnode_access2(cr, ITOV(ip), ip->i_uid,
1960 UD2VA_PERM(ip->i_perm << shift), UD2VA_PERM(mode));
1961
1962 out:
1963 if (dolock)
1964 rw_exit(&ip->i_contents);
1965 return (ret);
1966 }
1967
1968 void
1969 ud_imark(struct ud_inode *ip)
1970 {
1971 timestruc_t now;
1972
1973 gethrestime(&now);
1974 ud_printf("ud_imark\n");
1975 if (ip->i_flag & IACC) {
1976 ip->i_atime.tv_sec = now.tv_sec;
1977 ip->i_atime.tv_nsec = now.tv_nsec;
1978 }
1979 if (ip->i_flag & IUPD) {
1980 ip->i_mtime.tv_sec = now.tv_sec;
1981 ip->i_mtime.tv_nsec = now.tv_nsec;
1982 ip->i_flag |= IMODTIME;
1983 }
1984 if (ip->i_flag & ICHG) {
1985 ip->i_diroff = 0;
1986 ip->i_ctime.tv_sec = now.tv_sec;
1987 ip->i_ctime.tv_nsec = now.tv_nsec;
1988 }
1989 }
1990
1991
1992 void
1993 ud_itimes_nolock(struct ud_inode *ip)
1994 {
1995 ud_printf("ud_itimes_nolock\n");
1996
1997 if (ip->i_flag & (IUPD|IACC|ICHG)) {
1998 if (ip->i_flag & ICHG) {
1999 ip->i_flag |= IMOD;
2000 } else {
2001 ip->i_flag |= IMODACC;
2002 }
2003 ud_imark(ip);
2004 ip->i_flag &= ~(IACC|IUPD|ICHG);
2005 }
2006 }
2007
2008 void
2009 ud_delcache(struct ud_inode *ip)
2010 {
2011 ud_printf("ud_delcache\n");
2012
2013 mutex_enter(&ud_icache_lock);
2014 remque(ip);
2015 ip->i_forw = ip;
2016 ip->i_back = ip;
2017 mutex_exit(&ud_icache_lock);
2018 }
2019
2020 void
2021 ud_idrop(struct ud_inode *ip)
2022 {
2023 struct vnode *vp = ITOV(ip);
2024
2025 ASSERT(RW_WRITE_HELD(&ip->i_contents));
2026
2027 ud_printf("ud_idrop\n");
2028
2029 mutex_enter(&vp->v_lock);
2030 VN_RELE_LOCKED(vp);
2031 if (vp->v_count > 0) {
2032 mutex_exit(&vp->v_lock);
2033 return;
2034 }
2035 mutex_exit(&vp->v_lock);
2036
2037 /*
2038 * if inode is invalid or there is no page associated with
2039 * this inode, put the inode in the front of the free list
2040 */
2041 mutex_enter(&ip->i_tlock);
2042 mutex_enter(&udf_ifree_lock);
2043 if (!vn_has_cached_data(vp) || ip->i_perm == 0) {
2044 ud_add_to_free_list(ip, UD_BEGIN);
2045 } else {
2046 /*
2047 * Otherwise, put the inode back on the end of the free list.
2048 */
2049 ud_add_to_free_list(ip, UD_END);
2050 }
2051 mutex_exit(&udf_ifree_lock);
2052 ip->i_flag &= IMODTIME;
2053 mutex_exit(&ip->i_tlock);
2054 }
2055
2056 void
2057 ud_add_to_free_list(struct ud_inode *ip, uint32_t at)
2058 {
2059 ASSERT(ip);
2060 ASSERT(mutex_owned(&udf_ifree_lock));
2061
2062 #ifdef DEBUG
2063 /* Search if the element is already in the list */
2064 if (udf_ifreeh != NULL) {
2065 struct ud_inode *iq;
2066
2067 iq = udf_ifreeh;
2068 while (iq) {
2069 if (iq == ip) {
2070 cmn_err(CE_WARN, "Duplicate %p\n", (void *)ip);
2071 }
2072 iq = iq->i_freef;
2073 }
2074 }
2075 #endif
2076
2077 ip->i_freef = NULL;
2078 ip->i_freeb = NULL;
2079 if (udf_ifreeh == NULL) {
2080 /*
2081 * Nothing on the list just add it
2082 */
2083 udf_ifreeh = ip;
2084 udf_ifreet = ip;
2085 } else {
2086 if (at == UD_BEGIN) {
2087 /*
2088 * Add at the begining of the list
2089 */
2090 ip->i_freef = udf_ifreeh;
2091 udf_ifreeh->i_freeb = ip;
2092 udf_ifreeh = ip;
2093 } else {
2094 /*
2095 * Add at the end of the list
2096 */
2097 ip->i_freeb = udf_ifreet;
2098 udf_ifreet->i_freef = ip;
2099 udf_ifreet = ip;
2100 }
2101 }
2102 }
2103
2104 void
2105 ud_remove_from_free_list(struct ud_inode *ip, uint32_t at)
2106 {
2107 ASSERT(ip);
2108 ASSERT(mutex_owned(&udf_ifree_lock));
2109
2110 #ifdef DEBUG
2111 {
2112 struct ud_inode *iq;
2113 uint32_t found = 0;
2114
2115 iq = udf_ifreeh;
2116 while (iq) {
2117 if (iq == ip) {
2118 found++;
2119 }
2120 iq = iq->i_freef;
2121 }
2122 if (found != 1) {
2123 cmn_err(CE_WARN, "ip %p is found %x times\n",
2124 (void *)ip, found);
2125 }
2126 }
2127 #endif
2128
2129 if ((ip->i_freef == NULL) && (ip->i_freeb == NULL)) {
2130 if (ip != udf_ifreeh) {
2131 return;
2132 }
2133 }
2134
2135 if ((at == UD_BEGIN) || (ip == udf_ifreeh)) {
2136 udf_ifreeh = ip->i_freef;
2137 if (ip->i_freef == NULL) {
2138 udf_ifreet = NULL;
2139 } else {
2140 udf_ifreeh->i_freeb = NULL;
2141 }
2142 } else {
2143 ip->i_freeb->i_freef = ip->i_freef;
2144 if (ip->i_freef) {
2145 ip->i_freef->i_freeb = ip->i_freeb;
2146 } else {
2147 udf_ifreet = ip->i_freeb;
2148 }
2149 }
2150 ip->i_freef = NULL;
2151 ip->i_freeb = NULL;
2152 }
2153
2154 void
2155 ud_init_inodes(void)
2156 {
2157 union ihead *ih = ud_ihead;
2158 int index;
2159
2160 for (index = 0; index < UD_HASH_SZ; index++, ih++) {
2161 ih->ih_head[0] = ih;
2162 ih->ih_head[1] = ih;
2163 }
2164 mutex_init(&ud_icache_lock, NULL, MUTEX_DEFAULT, NULL);
2165 mutex_init(&ud_nino_lock, NULL, MUTEX_DEFAULT, NULL);
2166
2167 udf_ifreeh = NULL;
2168 udf_ifreet = NULL;
2169 mutex_init(&udf_ifree_lock, NULL, MUTEX_DEFAULT, NULL);
2170
2171 mutex_init(&ud_sync_busy, NULL, MUTEX_DEFAULT, NULL);
2172 udf_vfs_instances = NULL;
2173 mutex_init(&udf_vfs_mutex, NULL, MUTEX_DEFAULT, NULL);
2174 }