1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2017 by Delphix. All rights reserved.
  24  */
  25 
  26 #include <sys/types.h>
  27 #include <sys/t_lock.h>
  28 #include <sys/param.h>
  29 #include <sys/time.h>
  30 #include <sys/systm.h>
  31 #include <sys/sysmacros.h>
  32 #include <sys/resource.h>
  33 #include <sys/signal.h>
  34 #include <sys/cred.h>
  35 #include <sys/user.h>
  36 #include <sys/buf.h>
  37 #include <sys/vfs.h>
  38 #include <sys/stat.h>
  39 #include <sys/vnode.h>
  40 #include <sys/mode.h>
  41 #include <sys/proc.h>
  42 #include <sys/disp.h>
  43 #include <sys/file.h>
  44 #include <sys/fcntl.h>
  45 #include <sys/flock.h>
  46 #include <sys/kmem.h>
  47 #include <sys/uio.h>
  48 #include <sys/dnlc.h>
  49 #include <sys/conf.h>
  50 #include <sys/errno.h>
  51 #include <sys/mman.h>
  52 #include <sys/fbuf.h>
  53 #include <sys/pathname.h>
  54 #include <sys/debug.h>
  55 #include <sys/vmsystm.h>
  56 #include <sys/cmn_err.h>
  57 #include <sys/dirent.h>
  58 #include <sys/errno.h>
  59 #include <sys/modctl.h>
  60 #include <sys/statvfs.h>
  61 #include <sys/mount.h>
  62 #include <sys/sunddi.h>
  63 #include <sys/bootconf.h>
  64 #include <sys/policy.h>
  65 
  66 #include <vm/hat.h>
  67 #include <vm/page.h>
  68 #include <vm/pvn.h>
  69 #include <vm/as.h>
  70 #include <vm/seg.h>
  71 #include <vm/seg_map.h>
  72 #include <vm/seg_kmem.h>
  73 #include <vm/seg_vn.h>
  74 #include <vm/rm.h>
  75 #include <vm/page.h>
  76 #include <sys/swap.h>
  77 
  78 
  79 #include <fs/fs_subr.h>
  80 
  81 
  82 #include <sys/fs/udf_volume.h>
  83 #include <sys/fs/udf_inode.h>
  84 
  85 extern struct vnodeops *udf_vnodeops;
  86 
  87 kmutex_t ud_sync_busy;
  88 /*
  89  * udf_vfs list manipulation routines
  90  */
  91 kmutex_t udf_vfs_mutex;
  92 struct udf_vfs *udf_vfs_instances;
  93 #ifndef __lint
  94 _NOTE(MUTEX_PROTECTS_DATA(udf_vfs_mutex, udf_vfs_instances))
  95 #endif
  96 
  97 union ihead ud_ihead[UD_HASH_SZ];
  98 kmutex_t ud_icache_lock;
  99 
 100 #define UD_BEGIN        0x0
 101 #define UD_END          0x1
 102 #define UD_UNKN         0x2
 103 struct ud_inode *udf_ifreeh, *udf_ifreet;
 104 kmutex_t udf_ifree_lock;
 105 #ifndef __lint
 106 _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreeh))
 107 _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreet))
 108 #endif
 109 
 110 kmutex_t ud_nino_lock;
 111 int32_t ud_max_inodes = 512;
 112 int32_t ud_cur_inodes = 0;
 113 #ifndef __lint
 114 _NOTE(MUTEX_PROTECTS_DATA(ud_nino_lock, ud_cur_inodes))
 115 #endif
 116 
 117 uid_t ud_default_uid = 0;
 118 gid_t ud_default_gid = 3;
 119 
 120 int32_t ud_updat_ext4(struct ud_inode *, struct file_entry *);
 121 int32_t ud_updat_ext4096(struct ud_inode *, struct file_entry *);
 122 void ud_make_sad(struct icb_ext *, struct short_ad *, int32_t);
 123 void ud_make_lad(struct icb_ext *, struct long_ad *, int32_t);
 124 void ud_trunc_ext4(struct ud_inode *, u_offset_t);
 125 void ud_trunc_ext4096(struct ud_inode *, u_offset_t);
 126 void ud_add_to_free_list(struct ud_inode *, uint32_t);
 127 void ud_remove_from_free_list(struct ud_inode *, uint32_t);
 128 
 129 
 130 #ifdef  DEBUG
 131 struct ud_inode *
 132 ud_search_icache(struct vfs *vfsp, uint16_t prn, uint32_t ploc)
 133 {
 134         int32_t hno;
 135         union ihead *ih;
 136         struct ud_inode *ip;
 137         struct udf_vfs *udf_vfsp;
 138         uint32_t loc, dummy;
 139 
 140         udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
 141         loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
 142 
 143         mutex_enter(&ud_icache_lock);
 144         hno = UD_INOHASH(vfsp->vfs_dev, loc);
 145         ih = &ud_ihead[hno];
 146         for (ip = ih->ih_chain[0];
 147             ip != (struct ud_inode *)ih;
 148             ip = ip->i_forw) {
 149                 if ((prn == ip->i_icb_prn) && (ploc == ip->i_icb_block) &&
 150                     (vfsp->vfs_dev == ip->i_dev)) {
 151                         mutex_exit(&ud_icache_lock);
 152                         return (ip);
 153                 }
 154         }
 155         mutex_exit(&ud_icache_lock);
 156         return (0);
 157 }
 158 #endif
 159 
 160 /* ARGSUSED */
 161 int
 162 ud_iget(struct vfs *vfsp, uint16_t prn, uint32_t ploc, struct ud_inode **ipp,
 163     struct buf *pbp, struct cred *cred)
 164 {
 165         int32_t hno, nomem = 0, icb_tag_flags;
 166         union ihead *ih;
 167         struct ud_inode *ip;
 168         struct vnode *vp;
 169         struct buf *bp = NULL;
 170         struct file_entry *fe;
 171         struct udf_vfs *udf_vfsp;
 172         struct ext_attr_hdr *eah;
 173         struct attr_hdr *ah;
 174         int32_t ea_len, ea_off;
 175         daddr_t loc;
 176         uint64_t offset = 0;
 177         struct icb_ext *iext, *con;
 178         uint32_t length, dummy;
 179         int32_t ndesc, ftype;
 180         uint16_t old_prn;
 181         uint32_t old_block, old_lbano;
 182 
 183         ud_printf("ud_iget\n");
 184         udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
 185         old_prn = 0;
 186         old_block = old_lbano = 0;
 187         ftype = 0;
 188         loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
 189 loop:
 190         mutex_enter(&ud_icache_lock);
 191         hno = UD_INOHASH(vfsp->vfs_dev, loc);
 192 
 193         ih = &ud_ihead[hno];
 194         for (ip = ih->ih_chain[0];
 195             ip != (struct ud_inode *)ih;
 196             ip = ip->i_forw) {
 197 
 198                 if ((prn == ip->i_icb_prn) &&
 199                     (ploc == ip->i_icb_block) &&
 200                     (vfsp->vfs_dev == ip->i_dev)) {
 201 
 202                         vp = ITOV(ip);
 203                         VN_HOLD(vp);
 204                         mutex_exit(&ud_icache_lock);
 205 
 206                         rw_enter(&ip->i_contents, RW_READER);
 207                         mutex_enter(&ip->i_tlock);
 208                         if ((ip->i_flag & IREF) == 0) {
 209                                 mutex_enter(&udf_ifree_lock);
 210                                 ud_remove_from_free_list(ip, UD_UNKN);
 211                                 mutex_exit(&udf_ifree_lock);
 212                         }
 213                         ip->i_flag |= IREF;
 214                         mutex_exit(&ip->i_tlock);
 215                         rw_exit(&ip->i_contents);
 216 
 217                         *ipp = ip;
 218 
 219                         if (pbp != NULL) {
 220                                 brelse(pbp);
 221                         }
 222 
 223                         return (0);
 224                 }
 225         }
 226 
 227         /*
 228          * We don't have it in the cache
 229          * Allocate a new entry
 230          */
 231 tryagain:
 232         mutex_enter(&udf_ifree_lock);
 233         mutex_enter(&ud_nino_lock);
 234         if (ud_cur_inodes > ud_max_inodes) {
 235                 int32_t purged;
 236 
 237                 mutex_exit(&ud_nino_lock);
 238                 while (udf_ifreeh == NULL ||
 239                     vn_has_cached_data(ITOV(udf_ifreeh))) {
 240                         /*
 241                          * Try to put an inode on the freelist that's
 242                          * sitting in the dnlc.
 243                          */
 244                         mutex_exit(&udf_ifree_lock);
 245                         purged = dnlc_fs_purge1(udf_vnodeops);
 246                         mutex_enter(&udf_ifree_lock);
 247                         if (!purged) {
 248                                 break;
 249                         }
 250                 }
 251                 mutex_enter(&ud_nino_lock);
 252         }
 253 
 254         /*
 255          * If there's a free one available and it has no pages attached
 256          * take it. If we're over the high water mark, take it even if
 257          * it has attached pages. Otherwise, make a new one.
 258          */
 259         if (udf_ifreeh &&
 260             (nomem || !vn_has_cached_data(ITOV(udf_ifreeh)) ||
 261             ud_cur_inodes >= ud_max_inodes)) {
 262 
 263                 mutex_exit(&ud_nino_lock);
 264                 ip = udf_ifreeh;
 265                 vp = ITOV(ip);
 266 
 267                 ud_remove_from_free_list(ip, UD_BEGIN);
 268 
 269                 mutex_exit(&udf_ifree_lock);
 270                 if (ip->i_flag & IREF) {
 271                         cmn_err(CE_WARN, "ud_iget: bad i_flag\n");
 272                         mutex_exit(&ud_icache_lock);
 273                         if (pbp != NULL) {
 274                                 brelse(pbp);
 275                         }
 276                         return (EINVAL);
 277                 }
 278                 rw_enter(&ip->i_contents, RW_WRITER);
 279 
 280                 /*
 281                  * We call udf_syncip() to synchronously destroy all pages
 282                  * associated with the vnode before re-using it. The pageout
 283                  * thread may have beat us to this page so our v_count can
 284                  * be > 0 at this point even though we are on the freelist.
 285                  */
 286                 mutex_enter(&ip->i_tlock);
 287                 ip->i_flag = (ip->i_flag & IMODTIME) | IREF;
 288                 mutex_exit(&ip->i_tlock);
 289 
 290                 VN_HOLD(vp);
 291                 if (ud_syncip(ip, B_INVAL, I_SYNC) != 0) {
 292                         ud_idrop(ip);
 293                         rw_exit(&ip->i_contents);
 294                         mutex_exit(&ud_icache_lock);
 295                         goto loop;
 296                 }
 297 
 298                 mutex_enter(&ip->i_tlock);
 299                 ip->i_flag &= ~IMODTIME;
 300                 mutex_exit(&ip->i_tlock);
 301 
 302                 if (ip->i_ext) {
 303                         kmem_free(ip->i_ext,
 304                             sizeof (struct icb_ext) * ip->i_ext_count);
 305                         ip->i_ext = 0;
 306                         ip->i_ext_count = ip->i_ext_used = 0;
 307                 }
 308 
 309                 if (ip->i_con) {
 310                         kmem_free(ip->i_con,
 311                             sizeof (struct icb_ext) * ip->i_con_count);
 312                         ip->i_con = 0;
 313                         ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
 314                 }
 315 
 316                 /*
 317                  * The pageout thread may not have had a chance to release
 318                  * its hold on the vnode (if it was active with this vp),
 319                  * but the pages should all be invalidated.
 320                  */
 321         } else {
 322                 mutex_exit(&ud_nino_lock);
 323                 mutex_exit(&udf_ifree_lock);
 324                 /*
 325                  * Try to get memory for this inode without blocking.
 326                  * If we can't and there is something on the freelist,
 327                  * go ahead and use it, otherwise block waiting for
 328                  * memory holding the hash_lock. We expose a potential
 329                  * deadlock if all users of memory have to do a ud_iget()
 330                  * before releasing memory.
 331                  */
 332                 ip = (struct ud_inode *)kmem_zalloc(sizeof (struct ud_inode),
 333                     KM_NOSLEEP);
 334                 vp = vn_alloc(KM_NOSLEEP);
 335                 if ((ip == NULL) || (vp == NULL)) {
 336                         mutex_enter(&udf_ifree_lock);
 337                         if (udf_ifreeh) {
 338                                 mutex_exit(&udf_ifree_lock);
 339                                 if (ip != NULL)
 340                                         kmem_free(ip, sizeof (struct ud_inode));
 341                                 if (vp != NULL)
 342                                         vn_free(vp);
 343                                 nomem = 1;
 344                                 goto tryagain;
 345                         } else {
 346                                 mutex_exit(&udf_ifree_lock);
 347                                 if (ip == NULL)
 348                                         ip = (struct ud_inode *)
 349                                             kmem_zalloc(
 350                                             sizeof (struct ud_inode),
 351                                             KM_SLEEP);
 352                                 if (vp == NULL)
 353                                         vp = vn_alloc(KM_SLEEP);
 354                         }
 355                 }
 356                 ip->i_vnode = vp;
 357 
 358                 ip->i_marker1 = (uint32_t)0xAAAAAAAA;
 359                 ip->i_marker2 = (uint32_t)0xBBBBBBBB;
 360                 ip->i_marker3 = (uint32_t)0xCCCCCCCC;
 361 
 362                 rw_init(&ip->i_rwlock, NULL, RW_DEFAULT, NULL);
 363                 rw_init(&ip->i_contents, NULL, RW_DEFAULT, NULL);
 364                 mutex_init(&ip->i_tlock, NULL, MUTEX_DEFAULT, NULL);
 365 
 366                 ip->i_forw = ip;
 367                 ip->i_back = ip;
 368                 vp->v_data = (caddr_t)ip;
 369                 vn_setops(vp, udf_vnodeops);
 370                 ip->i_flag = IREF;
 371                 cv_init(&ip->i_wrcv, NULL, CV_DRIVER, NULL);
 372                 mutex_enter(&ud_nino_lock);
 373                 ud_cur_inodes++;
 374                 mutex_exit(&ud_nino_lock);
 375 
 376                 rw_enter(&ip->i_contents, RW_WRITER);
 377         }
 378 
 379         if (vp->v_count < 1) {
 380                 cmn_err(CE_WARN, "ud_iget: v_count < 1\n");
 381                 mutex_exit(&ud_icache_lock);
 382                 rw_exit(&ip->i_contents);
 383                 if (pbp != NULL) {
 384                         brelse(pbp);
 385                 }
 386                 return (EINVAL);
 387         }
 388         if (vn_has_cached_data(vp)) {
 389                 cmn_err(CE_WARN, "ud_iget: v_pages not NULL\n");
 390                 mutex_exit(&ud_icache_lock);
 391                 rw_exit(&ip->i_contents);
 392                 if (pbp != NULL) {
 393                         brelse(pbp);
 394                 }
 395                 return (EINVAL);
 396         }
 397 
 398         /*
 399          * Move the inode on the chain for its new (ino, dev) pair
 400          */
 401         remque(ip);
 402         ip->i_forw = ip;
 403         ip->i_back = ip;
 404         insque(ip, ih);
 405 
 406         ip->i_dev = vfsp->vfs_dev;
 407         ip->i_udf = udf_vfsp;
 408         ip->i_diroff = 0;
 409         ip->i_devvp = ip->i_udf->udf_devvp;
 410         ip->i_icb_prn = prn;
 411         ip->i_icb_block = ploc;
 412         ip->i_icb_lbano = loc;
 413         ip->i_nextr = 0;
 414         ip->i_seq = 0;
 415         mutex_exit(&ud_icache_lock);
 416 
 417 read_de:
 418         if (pbp != NULL) {
 419                 /*
 420                  * assumption is that we will not
 421                  * create a 4096 file
 422                  */
 423                 bp = pbp;
 424         } else {
 425                 bp = ud_bread(ip->i_dev,
 426                     ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
 427                     udf_vfsp->udf_lbsize);
 428         }
 429 
 430         /*
 431          * Check I/O errors
 432          */
 433         fe = (struct file_entry *)bp->b_un.b_addr;
 434         if ((bp->b_flags & B_ERROR) ||
 435             (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
 436             ip->i_icb_block, 1, udf_vfsp->udf_lbsize) != 0)) {
 437 
 438                 if (((bp->b_flags & B_ERROR) == 0) &&
 439                     (ftype == STRAT_TYPE4096)) {
 440                         if (ud_check_te_unrec(udf_vfsp,
 441                             bp->b_un.b_addr, ip->i_icb_block) == 0) {
 442 
 443                                 brelse(bp);
 444 
 445                                 /*
 446                                  * restore old file entry location
 447                                  */
 448                                 ip->i_icb_prn = old_prn;
 449                                 ip->i_icb_block = old_block;
 450                                 ip->i_icb_lbano = old_lbano;
 451 
 452                                 /*
 453                                  * reread old file entry
 454                                  */
 455                                 bp = ud_bread(ip->i_dev,
 456                                     old_lbano << udf_vfsp->udf_l2d_shift,
 457                                     udf_vfsp->udf_lbsize);
 458                                 if ((bp->b_flags & B_ERROR) == 0) {
 459                                         fe = (struct file_entry *)
 460                                             bp->b_un.b_addr;
 461                                         if (ud_verify_tag_and_desc(&fe->fe_tag,
 462                                             UD_FILE_ENTRY, ip->i_icb_block, 1,
 463                                             udf_vfsp->udf_lbsize) == 0) {
 464                                                 goto end_4096;
 465                                         }
 466                                 }
 467                         }
 468                 }
 469 error_ret:
 470                 brelse(bp);
 471                 /*
 472                  * The inode may not contain anything useful. Mark it as
 473                  * having an error and let anyone else who was waiting for
 474                  * this know there was an error. Callers waiting for
 475                  * access to this inode in ud_iget will find
 476                  * the i_icb_lbano == 0, so there won't be a match.
 477                  * It remains in the cache. Put it back on the freelist.
 478                  */
 479                 mutex_enter(&vp->v_lock);
 480                 VN_RELE_LOCKED(vp);
 481                 mutex_exit(&vp->v_lock);
 482                 ip->i_icb_lbano = 0;
 483 
 484                 /*
 485                  * The folowing two lines make
 486                  * it impossible for any one do
 487                  * a VN_HOLD and then a VN_RELE
 488                  * so avoiding a ud_iinactive
 489                  */
 490                 ip->i_icb_prn = 0xffff;
 491                 ip->i_icb_block = 0;
 492 
 493                 /*
 494                  * remove the bad inode from hash chains
 495                  * so that during unmount we will not
 496                  * go through this inode
 497                  */
 498                 mutex_enter(&ud_icache_lock);
 499                 remque(ip);
 500                 ip->i_forw = ip;
 501                 ip->i_back = ip;
 502                 mutex_exit(&ud_icache_lock);
 503 
 504                 /* Put the inode at the front of the freelist */
 505                 mutex_enter(&ip->i_tlock);
 506                 mutex_enter(&udf_ifree_lock);
 507                 ud_add_to_free_list(ip, UD_BEGIN);
 508                 mutex_exit(&udf_ifree_lock);
 509                 ip->i_flag = 0;
 510                 mutex_exit(&ip->i_tlock);
 511                 rw_exit(&ip->i_contents);
 512                 return (EIO);
 513         }
 514 
 515         if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
 516                 struct buf *ibp = NULL;
 517                 struct indirect_entry *ie;
 518 
 519                 /*
 520                  * save old file_entry location
 521                  */
 522                 old_prn = ip->i_icb_prn;
 523                 old_block = ip->i_icb_block;
 524                 old_lbano = ip->i_icb_lbano;
 525 
 526                 ftype = STRAT_TYPE4096;
 527 
 528                 /*
 529                  * If astrat is 4096 different versions
 530                  * of the file exist on the media.
 531                  * we are supposed to get to the latest
 532                  * version of the file
 533                  */
 534 
 535                 /*
 536                  * IE is supposed to be in the next block
 537                  * of DE
 538                  */
 539                 ibp = ud_bread(ip->i_dev,
 540                     (ip->i_icb_lbano + 1) << udf_vfsp->udf_l2d_shift,
 541                     udf_vfsp->udf_lbsize);
 542                 if (ibp->b_flags & B_ERROR) {
 543                         /*
 544                          * Get rid of current ibp and
 545                          * then goto error on DE's bp
 546                          */
 547 ie_error:
 548                         brelse(ibp);
 549                         goto error_ret;
 550                 }
 551 
 552                 ie = (struct indirect_entry *)ibp->b_un.b_addr;
 553                 if (ud_verify_tag_and_desc(&ie->ie_tag,
 554                     UD_INDIRECT_ENT, ip->i_icb_block + 1,
 555                     1, udf_vfsp->udf_lbsize) == 0) {
 556                         struct long_ad *lad;
 557 
 558                         lad = &ie->ie_indirecticb;
 559                         ip->i_icb_prn = SWAP_16(lad->lad_ext_prn);
 560                         ip->i_icb_block = SWAP_32(lad->lad_ext_loc);
 561                         ip->i_icb_lbano = ud_xlate_to_daddr(udf_vfsp,
 562                             ip->i_icb_prn, ip->i_icb_block,
 563                             1, &dummy);
 564                         brelse(ibp);
 565                         brelse(bp);
 566                         goto read_de;
 567                 }
 568 
 569                 /*
 570                  * If this block is TE or unrecorded we
 571                  * are at the last entry
 572                  */
 573                 if (ud_check_te_unrec(udf_vfsp, ibp->b_un.b_addr,
 574                     ip->i_icb_block + 1) != 0) {
 575                         /*
 576                          * This is not an unrecorded block
 577                          * Check if it a valid IE and
 578                          * get the address of DE that
 579                          * this IE points to
 580                          */
 581                         goto ie_error;
 582                 }
 583                 /*
 584                  * If ud_check_unrec returns "0"
 585                  * this is the last in the chain
 586                  * Latest file_entry
 587                  */
 588                 brelse(ibp);
 589         }
 590 
 591 end_4096:
 592 
 593         ip->i_uid = SWAP_32(fe->fe_uid);
 594         if (ip->i_uid == -1) {
 595                 ip->i_uid = ud_default_uid;
 596         }
 597         ip->i_gid = SWAP_32(fe->fe_gid);
 598         if (ip->i_gid == -1) {
 599                 ip->i_gid = ud_default_gid;
 600         }
 601         ip->i_perm = SWAP_32(fe->fe_perms) & 0xFFFF;
 602         if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
 603                 ip->i_perm &= ~(IWRITE | (IWRITE >> 5) | (IWRITE >> 10));
 604         }
 605 
 606         ip->i_nlink = SWAP_16(fe->fe_lcount);
 607         ip->i_size = SWAP_64(fe->fe_info_len);
 608         ip->i_lbr = SWAP_64(fe->fe_lbr);
 609 
 610         ud_dtime2utime(&ip->i_atime, &fe->fe_acc_time);
 611         ud_dtime2utime(&ip->i_mtime, &fe->fe_mod_time);
 612         ud_dtime2utime(&ip->i_ctime, &fe->fe_attr_time);
 613 
 614 
 615         ip->i_uniqid = SWAP_64(fe->fe_uniq_id);
 616         icb_tag_flags = SWAP_16(fe->fe_icb_tag.itag_flags);
 617 
 618         if ((fe->fe_icb_tag.itag_ftype == FTYPE_CHAR_DEV) ||
 619             (fe->fe_icb_tag.itag_ftype == FTYPE_BLOCK_DEV)) {
 620 
 621                 eah = (struct ext_attr_hdr *)fe->fe_spec;
 622                 ea_off = GET_32(&eah->eah_ial);
 623                 ea_len = GET_32(&fe->fe_len_ear);
 624                 if (ea_len && (ud_verify_tag_and_desc(&eah->eah_tag,
 625                     UD_EXT_ATTR_HDR, ip->i_icb_block, 1,
 626                     sizeof (struct file_entry) -
 627                     offsetof(struct file_entry, fe_spec)) == 0)) {
 628 
 629                         while (ea_off < ea_len) {
 630                                 /*
 631                                  * We now check the validity of ea_off.
 632                                  * (ea_len - ea_off) should be large enough to
 633                                  * hold the attribute header atleast.
 634                                  */
 635                                 if ((ea_len - ea_off) <
 636                                     sizeof (struct attr_hdr)) {
 637                                         cmn_err(CE_NOTE,
 638                                             "ea_len(0x%x) - ea_off(0x%x) is "
 639                                             "too small to hold attr. info. "
 640                                             "blockno 0x%x\n",
 641                                             ea_len, ea_off, ip->i_icb_block);
 642                                         goto error_ret;
 643                                 }
 644                                 ah = (struct attr_hdr *)&fe->fe_spec[ea_off];
 645 
 646                                 /*
 647                                  * Device Specification EA
 648                                  */
 649                                 if ((GET_32(&ah->ahdr_atype) == 12) &&
 650                                         (ah->ahdr_astype == 1)) {
 651                                         struct dev_spec_ear *ds;
 652 
 653                                         if ((ea_len - ea_off) <
 654                                             sizeof (struct dev_spec_ear)) {
 655                                                 cmn_err(CE_NOTE,
 656                                                     "ea_len(0x%x) - "
 657                                                     "ea_off(0x%x) is too small "
 658                                                     "to hold dev_spec_ear."
 659                                                     " blockno 0x%x\n",
 660                                                     ea_len, ea_off,
 661                                                     ip->i_icb_block);
 662                                                 goto error_ret;
 663                                         }
 664                                         ds = (struct dev_spec_ear *)ah;
 665                                         ip->i_major = GET_32(&ds->ds_major_id);
 666                                         ip->i_minor = GET_32(&ds->ds_minor_id);
 667                                 }
 668 
 669                                 /*
 670                                  * Impl Use EA
 671                                  */
 672                                 if ((GET_32(&ah->ahdr_atype) == 2048) &&
 673                                         (ah->ahdr_astype == 1)) {
 674                                         struct iu_ea *iuea;
 675                                         struct copy_mgt_info *cmi;
 676 
 677                                         if ((ea_len - ea_off) <
 678                                             sizeof (struct iu_ea)) {
 679                                                 cmn_err(CE_NOTE,
 680 "ea_len(0x%x) - ea_off(0x%x) is too small to hold iu_ea. blockno 0x%x\n",
 681                                                     ea_len, ea_off,
 682                                                     ip->i_icb_block);
 683                                                 goto error_ret;
 684                                         }
 685                                         iuea = (struct iu_ea *)ah;
 686                                         if (strncmp(iuea->iuea_ii.reg_id,
 687                                             UDF_FREEEASPACE,
 688                                             sizeof (iuea->iuea_ii.reg_id))
 689                                             == 0) {
 690                                                 /* skip it */
 691                                                 iuea = iuea;
 692                                         } else if (strncmp(iuea->iuea_ii.reg_id,
 693                                             UDF_CGMS_INFO,
 694                                             sizeof (iuea->iuea_ii.reg_id))
 695                                             == 0) {
 696                                                 cmi = (struct copy_mgt_info *)
 697                                                         iuea->iuea_iu;
 698                                                 cmi = cmi;
 699                                         }
 700                                 }
 701                                 /* ??? PARANOIA */
 702                                 if (GET_32(&ah->ahdr_length) == 0) {
 703                                         break;
 704                                 }
 705                                 ea_off += GET_32(&ah->ahdr_length);
 706                         }
 707                 }
 708         }
 709 
 710         ip->i_nextr = 0;
 711 
 712         ip->i_maxent = SWAP_16(fe->fe_icb_tag.itag_max_ent);
 713         ip->i_astrat = SWAP_16(fe->fe_icb_tag.itag_strategy);
 714 
 715         ip->i_desc_type = icb_tag_flags & 0x7;
 716 
 717         /* Strictly Paranoia */
 718         ip->i_ext = NULL;
 719         ip->i_ext_count = ip->i_ext_used = 0;
 720         ip->i_con = 0;
 721         ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
 722 
 723         ip->i_data_off = 0xB0 + SWAP_32(fe->fe_len_ear);
 724         ip->i_max_emb =  udf_vfsp->udf_lbsize - ip->i_data_off;
 725         if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
 726                 /* Short allocation desc */
 727                 struct short_ad *sad;
 728 
 729                 ip->i_ext_used = 0;
 730                 ip->i_ext_count = ndesc =
 731                     SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
 732                 ip->i_ext_count =
 733                     ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
 734                 ip->i_ext = (struct icb_ext  *)kmem_zalloc(ip->i_ext_count *
 735                     sizeof (struct icb_ext), KM_SLEEP);
 736                 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
 737                 ip->i_cur_max_ext --;
 738 
 739                 if ((ip->i_astrat != STRAT_TYPE4) &&
 740                     (ip->i_astrat != STRAT_TYPE4096)) {
 741                         goto error_ret;
 742                 }
 743 
 744                 sad = (struct short_ad *)
 745                     (fe->fe_spec + SWAP_32(fe->fe_len_ear));
 746                 iext = ip->i_ext;
 747                 while (ndesc --) {
 748                         length = SWAP_32(sad->sad_ext_len);
 749                         if ((length & 0x3FFFFFFF) == 0) {
 750                                 break;
 751                         }
 752                         if (((length >> 30) & IB_MASK) == IB_CON) {
 753                                 if (ip->i_con == NULL) {
 754                                         ip->i_con_count = EXT_PER_MALLOC;
 755                                         ip->i_con_used = 0;
 756                                         ip->i_con_read = 0;
 757                                         ip->i_con = kmem_zalloc(
 758                                             ip->i_con_count *
 759                                             sizeof (struct icb_ext),
 760                                             KM_SLEEP);
 761                                 }
 762                                 con = &ip->i_con[ip->i_con_used];
 763                                 con->ib_prn = 0;
 764                                 con->ib_block = SWAP_32(sad->sad_ext_loc);
 765                                 con->ib_count = length & 0x3FFFFFFF;
 766                                 con->ib_flags = (length >> 30) & IB_MASK;
 767                                 ip->i_con_used++;
 768                                 sad ++;
 769                                 break;
 770                         }
 771                         iext->ib_prn = 0;
 772                         iext->ib_block = SWAP_32(sad->sad_ext_loc);
 773                         length = SWAP_32(sad->sad_ext_len);
 774                         iext->ib_count = length & 0x3FFFFFFF;
 775                         iext->ib_offset = offset;
 776                         iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
 777                         iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
 778                         offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
 779                             (~udf_vfsp->udf_lbmask);
 780 
 781                         iext->ib_flags = (length >> 30) & IB_MASK;
 782 
 783                         ip->i_ext_used++;
 784                         iext++;
 785                         sad ++;
 786                 }
 787         } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
 788                 /* Long allocation desc */
 789                 struct long_ad *lad;
 790 
 791                 ip->i_ext_used = 0;
 792                 ip->i_ext_count = ndesc =
 793                     SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
 794                 ip->i_ext_count =
 795                     ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
 796                 ip->i_ext = (struct icb_ext  *)kmem_zalloc(ip->i_ext_count *
 797                     sizeof (struct icb_ext), KM_SLEEP);
 798 
 799                 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct long_ad);
 800                 ip->i_cur_max_ext --;
 801 
 802                 if ((ip->i_astrat != STRAT_TYPE4) &&
 803                     (ip->i_astrat != STRAT_TYPE4096)) {
 804                         goto error_ret;
 805                 }
 806 
 807                 lad = (struct long_ad *)
 808                     (fe->fe_spec + SWAP_32(fe->fe_len_ear));
 809                 iext = ip->i_ext;
 810                 while (ndesc --) {
 811                         length = SWAP_32(lad->lad_ext_len);
 812                         if ((length & 0x3FFFFFFF) == 0) {
 813                                 break;
 814                         }
 815                         if (((length >> 30) & IB_MASK) == IB_CON) {
 816                                 if (ip->i_con == NULL) {
 817                                         ip->i_con_count = EXT_PER_MALLOC;
 818                                         ip->i_con_used = 0;
 819                                         ip->i_con_read = 0;
 820                                         ip->i_con = kmem_zalloc(
 821                                             ip->i_con_count *
 822                                             sizeof (struct icb_ext),
 823                                             KM_SLEEP);
 824                                 }
 825                                 con = &ip->i_con[ip->i_con_used];
 826                                 con->ib_prn = SWAP_16(lad->lad_ext_prn);
 827                                 con->ib_block = SWAP_32(lad->lad_ext_loc);
 828                                 con->ib_count = length & 0x3FFFFFFF;
 829                                 con->ib_flags = (length >> 30) & IB_MASK;
 830                                 ip->i_con_used++;
 831                                 lad ++;
 832                                 break;
 833                         }
 834                         iext->ib_prn = SWAP_16(lad->lad_ext_prn);
 835                         iext->ib_block = SWAP_32(lad->lad_ext_loc);
 836                         iext->ib_count = length & 0x3FFFFFFF;
 837                         iext->ib_offset = offset;
 838                         iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
 839                         iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
 840                         offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
 841                             (~udf_vfsp->udf_lbmask);
 842 
 843                         iext->ib_flags = (length >> 30) & IB_MASK;
 844 
 845                         ip->i_ext_used++;
 846                         iext++;
 847                         lad ++;
 848                 }
 849         } else if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
 850                 ASSERT(SWAP_32(fe->fe_len_ear) < udf_vfsp->udf_lbsize);
 851 
 852                 if (SWAP_32(fe->fe_len_ear) > udf_vfsp->udf_lbsize) {
 853                         goto error_ret;
 854                 }
 855         } else {
 856                 /* Not to be used in UDF 1.50 */
 857                 cmn_err(CE_NOTE, "Invalid Allocation Descriptor type %x\n",
 858                     ip->i_desc_type);
 859                 goto error_ret;
 860         }
 861 
 862 
 863         if (icb_tag_flags & ICB_FLAG_SETUID) {
 864                 ip->i_char = ISUID;
 865         } else {
 866                 ip->i_char = 0;
 867         }
 868         if (icb_tag_flags & ICB_FLAG_SETGID) {
 869                 ip->i_char |= ISGID;
 870         }
 871         if (icb_tag_flags & ICB_FLAG_STICKY) {
 872                 ip->i_char |= ISVTX;
 873         }
 874         switch (fe->fe_icb_tag.itag_ftype) {
 875                 case FTYPE_DIRECTORY :
 876                         ip->i_type = VDIR;
 877                         break;
 878                 case FTYPE_FILE :
 879                         ip->i_type = VREG;
 880                         break;
 881                 case FTYPE_BLOCK_DEV :
 882                         ip->i_type = VBLK;
 883                         break;
 884                 case FTYPE_CHAR_DEV :
 885                         ip->i_type = VCHR;
 886                         break;
 887                 case FTYPE_FIFO :
 888                         ip->i_type = VFIFO;
 889                         break;
 890                 case FTYPE_C_ISSOCK :
 891                         ip->i_type = VSOCK;
 892                         break;
 893                 case FTYPE_SYMLINK :
 894                         ip->i_type = VLNK;
 895                         break;
 896                 default :
 897                         ip->i_type = VNON;
 898                         break;
 899         }
 900 
 901         if (ip->i_type == VBLK || ip->i_type == VCHR) {
 902                 ip->i_rdev = makedevice(ip->i_major, ip->i_minor);
 903         }
 904 
 905         /*
 906          * Fill in the rest.  Don't bother with the vnode lock because nobody
 907          * should be looking at this vnode.  We have already invalidated the
 908          * pages if it had any so pageout shouldn't be referencing this vnode
 909          * and we are holding the write contents lock so a look up can't use
 910          * the vnode.
 911          */
 912         vp->v_vfsp = vfsp;
 913         vp->v_type = ip->i_type;
 914         vp->v_rdev = ip->i_rdev;
 915         if (ip->i_udf->udf_root_blkno == loc) {
 916                 vp->v_flag = VROOT;
 917         } else {
 918                 vp->v_flag = 0;
 919         }
 920 
 921         brelse(bp);
 922         *ipp = ip;
 923         rw_exit(&ip->i_contents);
 924         vn_exists(vp);
 925         return (0);
 926 }
 927 
 928 void
 929 ud_iinactive(struct ud_inode *ip, struct cred *cr)
 930 {
 931         int32_t busy = 0;
 932         struct vnode *vp;
 933         vtype_t type;
 934         caddr_t addr, addr1;
 935         size_t size, size1;
 936 
 937 
 938         ud_printf("ud_iinactive\n");
 939 
 940         /*
 941          * Get exclusive access to inode data.
 942          */
 943         rw_enter(&ip->i_contents, RW_WRITER);
 944 
 945         /*
 946          * Make sure no one reclaimed the inode before we put
 947          * it on the freelist or destroy it. We keep our 'hold'
 948          * on the vnode from vn_rele until we are ready to
 949          * do something with the inode (freelist/destroy).
 950          *
 951          * Pageout may put a VN_HOLD/VN_RELE at anytime during this
 952          * operation via an async putpage, so we must make sure
 953          * we don't free/destroy the inode more than once. ud_iget
 954          * may also put a VN_HOLD on the inode before it grabs
 955          * the i_contents lock. This is done so we don't kmem_free
 956          * an inode that a thread is waiting on.
 957          */
 958         vp = ITOV(ip);
 959 
 960         mutex_enter(&vp->v_lock);
 961         if (vp->v_count < 1) {
 962                 cmn_err(CE_WARN, "ud_iinactive: v_count < 1\n");
 963                 return;
 964         }
 965         if ((vp->v_count > 1) || ((ip->i_flag & IREF) == 0)) {
 966                 VN_RELE_LOCKED(vp);
 967                 mutex_exit(&vp->v_lock);
 968                 rw_exit(&ip->i_contents);
 969                 return;
 970         }
 971         mutex_exit(&vp->v_lock);
 972 
 973         /*
 974          * For forced umount case: if i_udf is NULL, the contents of
 975          * the inode and all the pages have already been pushed back
 976          * to disk. It can be safely destroyed.
 977          */
 978         if (ip->i_udf == NULL) {
 979                 addr = (caddr_t)ip->i_ext;
 980                 size = sizeof (struct icb_ext) * ip->i_ext_count;
 981                 ip->i_ext = 0;
 982                 ip->i_ext_count = ip->i_ext_used = 0;
 983                 addr1 = (caddr_t)ip->i_con;
 984                 size1 = sizeof (struct icb_ext) * ip->i_con_count;
 985                 ip->i_con = 0;
 986                 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
 987                 rw_exit(&ip->i_contents);
 988                 vn_invalid(vp);
 989 
 990                 mutex_enter(&ud_nino_lock);
 991                 ud_cur_inodes--;
 992                 mutex_exit(&ud_nino_lock);
 993 
 994                 cv_destroy(&ip->i_wrcv);  /* throttling */
 995                 rw_destroy(&ip->i_rwlock);
 996                 rw_exit(&ip->i_contents);
 997                 rw_destroy(&ip->i_contents);
 998                 kmem_free(addr, size);
 999                 kmem_free(addr1, size1);
1000                 vn_free(vp);
1001                 kmem_free(ip, sizeof (struct ud_inode));
1002                 return;
1003         }
1004 
1005         if ((ip->i_udf->udf_flags & UDF_FL_RDONLY) == 0) {
1006                 if (ip->i_nlink <= 0) {
1007                         ip->i_marker3 = (uint32_t)0xDDDD0000;
1008                         ip->i_nlink = 1;     /* prevent free-ing twice */
1009                         (void) ud_itrunc(ip, 0, 0, cr);
1010                         type = ip->i_type;
1011                         ip->i_perm = 0;
1012                         ip->i_uid = 0;
1013                         ip->i_gid = 0;
1014                         ip->i_rdev = 0;      /* Zero in core version of rdev */
1015                         mutex_enter(&ip->i_tlock);
1016                         ip->i_flag |= IUPD|ICHG;
1017                         mutex_exit(&ip->i_tlock);
1018                         ud_ifree(ip, type);
1019                         ip->i_icb_prn = 0xFFFF;
1020                 } else if (!IS_SWAPVP(vp)) {
1021                         /*
1022                          * Write the inode out if dirty. Pages are
1023                          * written back and put on the freelist.
1024                          */
1025                         (void) ud_syncip(ip, B_FREE | B_ASYNC, 0);
1026                         /*
1027                          * Do nothing if inode is now busy -- inode may
1028                          * have gone busy because ud_syncip
1029                          * releases/reacquires the i_contents lock
1030                          */
1031                         mutex_enter(&vp->v_lock);
1032                         if (vp->v_count > 1) {
1033                                 VN_RELE_LOCKED(vp);
1034                                 mutex_exit(&vp->v_lock);
1035                                 rw_exit(&ip->i_contents);
1036                                 return;
1037                         }
1038                         mutex_exit(&vp->v_lock);
1039                 } else {
1040                         ud_iupdat(ip, 0);
1041                 }
1042         }
1043 
1044 
1045         /*
1046          * Put the inode on the end of the free list.
1047          * Possibly in some cases it would be better to
1048          * put the inode at the head of the free list,
1049          * (e.g.: where i_perm == 0 || i_number == 0)
1050          * but I will think about that later.
1051          * (i_number is rarely 0 - only after an i/o error in ud_iget,
1052          * where i_perm == 0, the inode will probably be wanted
1053          * again soon for an ialloc, so possibly we should keep it)
1054          */
1055         /*
1056          * If inode is invalid or there is no page associated with
1057          * this inode, put the inode in the front of the free list.
1058          * Since we have a VN_HOLD on the vnode, and checked that it
1059          * wasn't already on the freelist when we entered, we can safely
1060          * put it on the freelist even if another thread puts a VN_HOLD
1061          * on it (pageout/ud_iget).
1062          */
1063 tryagain:
1064         mutex_enter(&ud_nino_lock);
1065         if (vn_has_cached_data(vp)) {
1066                 mutex_exit(&ud_nino_lock);
1067                 mutex_enter(&vp->v_lock);
1068                 VN_RELE_LOCKED(vp);
1069                 mutex_exit(&vp->v_lock);
1070                 mutex_enter(&ip->i_tlock);
1071                 mutex_enter(&udf_ifree_lock);
1072                 ud_add_to_free_list(ip, UD_END);
1073                 mutex_exit(&udf_ifree_lock);
1074                 ip->i_flag &= IMODTIME;
1075                 mutex_exit(&ip->i_tlock);
1076                 rw_exit(&ip->i_contents);
1077         } else if (busy || ud_cur_inodes < ud_max_inodes) {
1078                 mutex_exit(&ud_nino_lock);
1079                 /*
1080                  * We're not over our high water mark, or it's
1081                  * not safe to kmem_free the inode, so put it
1082                  * on the freelist.
1083                  */
1084                 mutex_enter(&vp->v_lock);
1085                 if (vn_has_cached_data(vp)) {
1086                         cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1087                 }
1088                 VN_RELE_LOCKED(vp);
1089                 mutex_exit(&vp->v_lock);
1090 
1091         mutex_enter(&ip->i_tlock);
1092                 mutex_enter(&udf_ifree_lock);
1093                 ud_add_to_free_list(ip, UD_BEGIN);
1094                 mutex_exit(&udf_ifree_lock);
1095         ip->i_flag &= IMODTIME;
1096         mutex_exit(&ip->i_tlock);
1097                 rw_exit(&ip->i_contents);
1098         } else {
1099                 mutex_exit(&ud_nino_lock);
1100                 if (vn_has_cached_data(vp)) {
1101                         cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1102                 }
1103                 /*
1104                  * Try to free the inode. We must make sure
1105                  * it's o.k. to destroy this inode. We can't destroy
1106                  * if a thread is waiting for this inode. If we can't get the
1107                  * cache now, put it back on the freelist.
1108                  */
1109                 if (!mutex_tryenter(&ud_icache_lock)) {
1110                         busy = 1;
1111                         goto tryagain;
1112                 }
1113                 mutex_enter(&vp->v_lock);
1114                 if (vp->v_count > 1) {
1115                         /* inode is wanted in ud_iget */
1116                         busy = 1;
1117                         mutex_exit(&vp->v_lock);
1118                         mutex_exit(&ud_icache_lock);
1119                         goto tryagain;
1120                 }
1121                 mutex_exit(&vp->v_lock);
1122                 remque(ip);
1123                 ip->i_forw = ip;
1124                 ip->i_back = ip;
1125                 mutex_enter(&ud_nino_lock);
1126                 ud_cur_inodes--;
1127                 mutex_exit(&ud_nino_lock);
1128                 mutex_exit(&ud_icache_lock);
1129                 if (ip->i_icb_prn != 0xFFFF) {
1130                         ud_iupdat(ip, 0);
1131                 }
1132                 addr = (caddr_t)ip->i_ext;
1133                 size = sizeof (struct icb_ext) * ip->i_ext_count;
1134                 ip->i_ext = 0;
1135                 ip->i_ext_count = ip->i_ext_used = 0;
1136                 addr1 = (caddr_t)ip->i_con;
1137                 size1 = sizeof (struct icb_ext) * ip->i_con_count;
1138                 ip->i_con = 0;
1139                 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
1140                 cv_destroy(&ip->i_wrcv);  /* throttling */
1141                 rw_destroy(&ip->i_rwlock);
1142                 rw_exit(&ip->i_contents);
1143                 rw_destroy(&ip->i_contents);
1144                 kmem_free(addr, size);
1145                 kmem_free(addr1, size1);
1146                 ip->i_marker3 = (uint32_t)0xDDDDDDDD;
1147                 vn_free(vp);
1148                 kmem_free(ip, sizeof (struct ud_inode));
1149         }
1150 }
1151 
1152 
1153 void
1154 ud_iupdat(struct ud_inode *ip, int32_t waitfor)
1155 {
1156         uint16_t flag, tag_flags;
1157         int32_t error;
1158         struct buf *bp;
1159         struct udf_vfs *udf_vfsp;
1160         struct file_entry *fe;
1161         uint16_t crc_len = 0;
1162 
1163         ASSERT(RW_WRITE_HELD(&ip->i_contents));
1164 
1165         ud_printf("ud_iupdat\n");
1166         /*
1167          * Return if file system has been forcibly umounted.
1168          */
1169         if (ip->i_udf == NULL) {
1170                 return;
1171         }
1172 
1173         udf_vfsp = ip->i_udf;
1174         flag = ip->i_flag;   /* Atomic read */
1175         if ((flag & (IUPD|IACC|ICHG|IMOD|IMODACC)) != 0) {
1176                 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
1177                         ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC|IATTCHG);
1178                         return;
1179                 }
1180 
1181                 bp = ud_bread(ip->i_dev,
1182                     ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
1183                     ip->i_udf->udf_lbsize);
1184                 if (bp->b_flags & B_ERROR) {
1185                         brelse(bp);
1186                         return;
1187                 }
1188                 fe = (struct file_entry *)bp->b_un.b_addr;
1189                 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1190                     ip->i_icb_block,
1191                     1, ip->i_udf->udf_lbsize) != 0) {
1192                         brelse(bp);
1193                         return;
1194                 }
1195 
1196                 mutex_enter(&ip->i_tlock);
1197                 if (ip->i_flag & (IUPD|IACC|ICHG)) {
1198                         IMARK(ip);
1199                 }
1200                 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC);
1201                 mutex_exit(&ip->i_tlock);
1202 
1203                 fe->fe_uid = SWAP_32(ip->i_uid);
1204                 fe->fe_gid = SWAP_32(ip->i_gid);
1205 
1206                 fe->fe_perms = SWAP_32(ip->i_perm);
1207 
1208                 fe->fe_lcount = SWAP_16(ip->i_nlink);
1209                 fe->fe_info_len = SWAP_64(ip->i_size);
1210                 fe->fe_lbr = SWAP_64(ip->i_lbr);
1211 
1212                 ud_utime2dtime(&ip->i_atime, &fe->fe_acc_time);
1213                 ud_utime2dtime(&ip->i_mtime, &fe->fe_mod_time);
1214                 ud_utime2dtime(&ip->i_ctime, &fe->fe_attr_time);
1215 
1216                 if (ip->i_char & ISUID) {
1217                         tag_flags = ICB_FLAG_SETUID;
1218                 } else {
1219                         tag_flags = 0;
1220                 }
1221                 if (ip->i_char & ISGID) {
1222                         tag_flags |= ICB_FLAG_SETGID;
1223                 }
1224                 if (ip->i_char & ISVTX) {
1225                         tag_flags |= ICB_FLAG_STICKY;
1226                 }
1227                 tag_flags |= ip->i_desc_type;
1228 
1229                 /*
1230                  * Remove the following it is no longer contig
1231                  * if (ip->i_astrat  == STRAT_TYPE4) {
1232                  *      tag_flags |= ICB_FLAG_CONTIG;
1233                  * }
1234                  */
1235 
1236                 fe->fe_icb_tag.itag_flags &= ~SWAP_16((uint16_t)0x3C3);
1237                 fe->fe_icb_tag.itag_strategy = SWAP_16(ip->i_astrat);
1238                 fe->fe_icb_tag.itag_flags |= SWAP_16(tag_flags);
1239 
1240                 ud_update_regid(&fe->fe_impl_id);
1241 
1242                 crc_len = offsetof(struct file_entry, fe_spec) +
1243                     SWAP_32(fe->fe_len_ear);
1244                 if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
1245                         crc_len += ip->i_size;
1246                         fe->fe_len_adesc = SWAP_32(((uint32_t)ip->i_size));
1247                 } else if ((ip->i_size != 0) && (ip->i_ext != NULL) &&
1248                     (ip->i_ext_used != 0)) {
1249 
1250                         if ((error = ud_read_icb_till_off(ip,
1251                             ip->i_size)) == 0) {
1252                                 if (ip->i_astrat == STRAT_TYPE4) {
1253                                         error = ud_updat_ext4(ip, fe);
1254                                 } else if (ip->i_astrat == STRAT_TYPE4096) {
1255                                         error = ud_updat_ext4096(ip, fe);
1256                                 }
1257                                 if (error) {
1258                                         udf_vfsp->udf_mark_bad = 1;
1259                                 }
1260                         }
1261                         crc_len += SWAP_32(fe->fe_len_adesc);
1262                 } else {
1263                         fe->fe_len_adesc = 0;
1264                 }
1265 
1266                 /*
1267                  * Zero out the rest of the block
1268                  */
1269                 bzero(bp->b_un.b_addr + crc_len,
1270                     ip->i_udf->udf_lbsize - crc_len);
1271 
1272                 ud_make_tag(ip->i_udf, &fe->fe_tag,
1273                     UD_FILE_ENTRY, ip->i_icb_block, crc_len);
1274 
1275 
1276                 if (waitfor) {
1277                         BWRITE(bp);
1278 
1279                         /*
1280                          * Synchronous write has guaranteed that inode
1281                          * has been written on disk so clear the flag
1282                          */
1283                         ip->i_flag &= ~(IBDWRITE);
1284                 } else {
1285                         bdwrite(bp);
1286 
1287                         /*
1288                          * This write hasn't guaranteed that inode has been
1289                          * written on the disk.
1290                          * Since, all updat flags on indoe are cleared, we must
1291                          * remember the condition in case inode is to be updated
1292                          * synchronously later (e.g.- fsync()/fdatasync())
1293                          * and inode has not been modified yet.
1294                          */
1295                         ip->i_flag |= (IBDWRITE);
1296                 }
1297         } else {
1298                 /*
1299                  * In case previous inode update was done asynchronously
1300                  * (IBDWRITE) and this inode update request wants guaranteed
1301                  * (synchronous) disk update, flush the inode.
1302                  */
1303                 if (waitfor && (flag & IBDWRITE)) {
1304                         blkflush(ip->i_dev,
1305                             (daddr_t)fsbtodb(udf_vfsp, ip->i_icb_lbano));
1306                         ip->i_flag &= ~(IBDWRITE);
1307                 }
1308         }
1309 }
1310 
1311 int32_t
1312 ud_updat_ext4(struct ud_inode *ip, struct file_entry *fe)
1313 {
1314         uint32_t dummy;
1315         int32_t elen, ndent, index, count, con_index;
1316         daddr_t bno;
1317         struct buf *bp;
1318         struct short_ad *sad;
1319         struct long_ad *lad;
1320         struct icb_ext *iext, *icon;
1321 
1322 
1323         ASSERT(ip);
1324         ASSERT(fe);
1325         ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1326             (ip->i_desc_type == ICB_FLAG_LONG_AD));
1327 
1328         if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1329                 elen = sizeof (struct short_ad);
1330                 sad = (struct short_ad *)
1331                     (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1332         } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1333                 elen = sizeof (struct long_ad);
1334                 lad = (struct long_ad *)
1335                     (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1336         } else {
1337                 /* This cannot happen return */
1338                 return (EINVAL);
1339         }
1340 
1341         ndent = ip->i_max_emb / elen;
1342 
1343         if (ip->i_ext_used < ndent) {
1344 
1345                 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1346                         ud_make_sad(ip->i_ext, sad, ip->i_ext_used);
1347                 } else {
1348                         ud_make_lad(ip->i_ext, lad, ip->i_ext_used);
1349                 }
1350                 fe->fe_len_adesc = SWAP_32(ip->i_ext_used * elen);
1351                 con_index = 0;
1352         } else {
1353 
1354                 con_index = index = 0;
1355 
1356                 while (index < ip->i_ext_used) {
1357                         if (index == 0) {
1358                                 /*
1359                                  * bp is already read
1360                                  * First few extents will go
1361                                  * into the file_entry
1362                                  */
1363                                 count = ndent - 1;
1364                                 fe->fe_len_adesc = SWAP_32(ndent * elen);
1365                                 bp = NULL;
1366 
1367                                 /*
1368                                  * Last entry to be cont ext
1369                                  */
1370                                 icon = &ip->i_con[con_index];
1371                         } else {
1372                                 /*
1373                                  * Read the buffer
1374                                  */
1375                                 icon = &ip->i_con[con_index];
1376 
1377                                 bno = ud_xlate_to_daddr(ip->i_udf,
1378                                     icon->ib_prn, icon->ib_block,
1379                                     icon->ib_count >> ip->i_udf->udf_l2d_shift,
1380                                     &dummy);
1381                                 bp = ud_bread(ip->i_dev,
1382                                     bno << ip->i_udf->udf_l2d_shift,
1383                                     ip->i_udf->udf_lbsize);
1384                                 if (bp->b_flags & B_ERROR) {
1385                                         brelse(bp);
1386                                         return (EIO);
1387                                 }
1388 
1389                                 /*
1390                                  * Figure out how many extents in
1391                                  * this time
1392                                  */
1393                                 count = (bp->b_bcount -
1394                                     sizeof (struct alloc_ext_desc)) / elen;
1395                                 if (count > (ip->i_ext_used - index)) {
1396                                         count = ip->i_ext_used - index;
1397                                 } else {
1398                                         count --;
1399                                 }
1400                                 con_index++;
1401                                 if (con_index >= ip->i_con_used) {
1402                                         icon = NULL;
1403                                 } else {
1404                                         icon = &ip->i_con[con_index];
1405                                 }
1406                         }
1407 
1408 
1409 
1410                         /*
1411                          * convert to on disk form and
1412                          * update
1413                          */
1414                         iext = &ip->i_ext[index];
1415                         if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1416                                 if (index != 0) {
1417                                         sad = (struct short_ad *)
1418                                             (bp->b_un.b_addr +
1419                                             sizeof (struct alloc_ext_desc));
1420                                 }
1421                                 ud_make_sad(iext, sad, count);
1422                                 sad += count;
1423                                 if (icon != NULL) {
1424                                         ud_make_sad(icon, sad, 1);
1425                                 }
1426                         } else {
1427                                 if (index != 0) {
1428                                         lad = (struct long_ad *)
1429                                             (bp->b_un.b_addr +
1430                                             sizeof (struct alloc_ext_desc));
1431                                 }
1432                                 ud_make_lad(iext, lad, count);
1433                                 lad += count;
1434                                 if (icon != NULL) {
1435                                         ud_make_lad(icon, lad, 1);
1436                                 }
1437                         }
1438 
1439                         if (con_index != 0) {
1440                                 struct alloc_ext_desc *aed;
1441                                 int32_t sz;
1442                                 struct icb_ext *oicon;
1443 
1444                                 oicon = &ip->i_con[con_index - 1];
1445                                 sz = count * elen;
1446                                 if (icon != NULL) {
1447                                         sz += elen;
1448                                 }
1449                                 aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
1450                                 aed->aed_len_aed = SWAP_32(sz);
1451                                 if (con_index == 1) {
1452                                         aed->aed_rev_ael =
1453                                             SWAP_32(ip->i_icb_block);
1454                                 } else {
1455                                         aed->aed_rev_ael =
1456                                             SWAP_32(oicon->ib_block);
1457                                 }
1458                                 sz += sizeof (struct alloc_ext_desc);
1459                                 ud_make_tag(ip->i_udf, &aed->aed_tag,
1460                                     UD_ALLOC_EXT_DESC, oicon->ib_block, sz);
1461                         }
1462 
1463                         /*
1464                          * Write back to disk
1465                          */
1466                         if (bp != NULL) {
1467                                 BWRITE(bp);
1468                         }
1469                         index += count;
1470                 }
1471 
1472         }
1473 
1474         if (con_index != ip->i_con_used) {
1475                 int32_t lbmask, l2b, temp;
1476 
1477                 temp = con_index;
1478                 lbmask = ip->i_udf->udf_lbmask;
1479                 l2b = ip->i_udf->udf_l2b_shift;
1480                 /*
1481                  * Free unused continuation extents
1482                  */
1483                 for (; con_index < ip->i_con_used; con_index++) {
1484                         icon = &ip->i_con[con_index];
1485                         count = (icon->ib_count + lbmask) >> l2b;
1486                         ud_free_space(ip->i_udf->udf_vfs, icon->ib_prn,
1487                             icon->ib_block, count);
1488                         count = (count << l2b) - sizeof (struct alloc_ext_desc);
1489                         ip->i_cur_max_ext -= (count / elen) - 1;
1490                 }
1491                 ip->i_con_used = temp;
1492         }
1493         return (0);
1494 }
1495 
1496 /* ARGSUSED */
1497 int32_t
1498 ud_updat_ext4096(struct ud_inode *ip, struct file_entry *fe)
1499 {
1500         return (ENXIO);
1501 }
1502 
1503 void
1504 ud_make_sad(struct icb_ext *iext, struct short_ad *sad, int32_t count)
1505 {
1506         int32_t index = 0, scount;
1507 
1508         ASSERT(iext);
1509         ASSERT(sad);
1510 
1511         if (count != 0) {
1512                 ASSERT(count > 0);
1513                 while (index < count) {
1514                         scount = (iext->ib_count & 0x3FFFFFFF) |
1515                             (iext->ib_flags << 30);
1516                         sad->sad_ext_len = SWAP_32(scount);
1517                         sad->sad_ext_loc = SWAP_32(iext->ib_block);
1518                         sad++;
1519                         iext++;
1520                         index++;
1521                 }
1522         }
1523 }
1524 
1525 void
1526 ud_make_lad(struct icb_ext *iext, struct long_ad *lad, int32_t count)
1527 {
1528         int32_t index = 0, scount;
1529 
1530         ASSERT(iext);
1531         ASSERT(lad);
1532 
1533         if (count != 0) {
1534                 ASSERT(count > 0);
1535 
1536                 while (index < count) {
1537                         lad->lad_ext_prn = SWAP_16(iext->ib_prn);
1538                         scount = (iext->ib_count & 0x3FFFFFFF) |
1539                             (iext->ib_flags << 30);
1540                         lad->lad_ext_len = SWAP_32(scount);
1541                         lad->lad_ext_loc = SWAP_32(iext->ib_block);
1542                         lad++;
1543                         iext++;
1544                         index++;
1545                 }
1546         }
1547 }
1548 
1549 /*
1550  * Truncate the inode ip to at most length size.
1551  * Free affected disk blocks -- the blocks of the
1552  * file are removed in reverse order.
1553  */
1554 /* ARGSUSED */
1555 int
1556 ud_itrunc(struct ud_inode *oip, u_offset_t length,
1557     int32_t flags, struct cred *cr)
1558 {
1559         int32_t error, boff;
1560         off_t bsize;
1561         mode_t mode;
1562         struct udf_vfs *udf_vfsp;
1563 
1564         ud_printf("ud_itrunc\n");
1565 
1566         ASSERT(RW_WRITE_HELD(&oip->i_contents));
1567         udf_vfsp = oip->i_udf;
1568         bsize = udf_vfsp->udf_lbsize;
1569 
1570         /*
1571          * We only allow truncation of regular files and directories
1572          * to arbritary lengths here.  In addition, we allow symbolic
1573          * links to be truncated only to zero length.  Other inode
1574          * types cannot have their length set here.
1575          */
1576         mode = oip->i_type;
1577         if (mode == VFIFO) {
1578                 return (0);
1579         }
1580         if ((mode != VREG) && (mode != VDIR) &&
1581             (!(mode == VLNK && length == 0))) {
1582                 return (EINVAL);
1583         }
1584         if (length == oip->i_size) {
1585                 /* update ctime and mtime to please POSIX tests */
1586                 mutex_enter(&oip->i_tlock);
1587                 oip->i_flag |= ICHG |IUPD;
1588                 mutex_exit(&oip->i_tlock);
1589                 return (0);
1590         }
1591 
1592         boff = blkoff(udf_vfsp, length);
1593 
1594         if (length > oip->i_size) {
1595                 /*
1596                  * Trunc up case.ud_bmap_write will insure that the right blocks
1597                  * are allocated.  This includes doing any work needed for
1598                  * allocating the last block.
1599                  */
1600                 if (boff == 0) {
1601                         error = ud_bmap_write(oip, length - 1,
1602                             (int)bsize, 0, cr);
1603                 } else {
1604                         error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1605                 }
1606                 if (error == 0) {
1607                         u_offset_t osize = oip->i_size;
1608                         oip->i_size  = length;
1609 
1610                         /*
1611                          * Make sure we zero out the remaining bytes of
1612                          * the page in case a mmap scribbled on it. We
1613                          * can't prevent a mmap from writing beyond EOF
1614                          * on the last page of a file.
1615                          */
1616                         if ((boff = blkoff(udf_vfsp, osize)) != 0) {
1617                                 pvn_vpzero(ITOV(oip), osize,
1618                                     (uint32_t)(bsize - boff));
1619                         }
1620                         mutex_enter(&oip->i_tlock);
1621                         oip->i_flag |= ICHG;
1622                         ITIMES_NOLOCK(oip);
1623                         mutex_exit(&oip->i_tlock);
1624                 }
1625                 return (error);
1626         }
1627 
1628         /*
1629          * Update the pages of the file.  If the file is not being
1630          * truncated to a block boundary, the contents of the
1631          * pages following the end of the file must be zero'ed
1632          * in case it ever become accessable again because
1633          * of subsequent file growth.
1634          */
1635         if (boff == 0) {
1636                 (void) pvn_vplist_dirty(ITOV(oip), length,
1637                     ud_putapage, B_INVAL | B_TRUNC, CRED());
1638         } else {
1639                 /*
1640                  * Make sure that the last block is properly allocated.
1641                  * We only really have to do this if the last block is
1642                  * actually allocated.  Just to be sure, we do it now
1643                  * independent of current allocation.
1644                  */
1645                 error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1646                 if (error) {
1647                         return (error);
1648                 }
1649 
1650                 pvn_vpzero(ITOV(oip), length, (uint32_t)(bsize - boff));
1651 
1652                 (void) pvn_vplist_dirty(ITOV(oip), length,
1653                     ud_putapage, B_INVAL | B_TRUNC, CRED());
1654         }
1655 
1656 
1657         /* Free the blocks */
1658         if (oip->i_desc_type == ICB_FLAG_ONE_AD) {
1659                 if (length > oip->i_max_emb) {
1660                         return (EFBIG);
1661                 }
1662                 oip->i_size = length;
1663                 mutex_enter(&oip->i_tlock);
1664                 oip->i_flag |= ICHG|IUPD;
1665                 mutex_exit(&oip->i_tlock);
1666                 ud_iupdat(oip, 1);
1667         } else {
1668                 if ((error = ud_read_icb_till_off(oip, oip->i_size)) != 0) {
1669                         return (error);
1670                 }
1671 
1672                 if (oip->i_astrat == STRAT_TYPE4) {
1673                         ud_trunc_ext4(oip, length);
1674                 } else if (oip->i_astrat == STRAT_TYPE4096) {
1675                         ud_trunc_ext4096(oip, length);
1676                 }
1677         }
1678 
1679 done:
1680         return (0);
1681 }
1682 
1683 void
1684 ud_trunc_ext4(struct ud_inode *ip, u_offset_t length)
1685 {
1686         int32_t index, l2b, count, ecount;
1687         int32_t elen, ndent, nient;
1688         u_offset_t ext_beg, ext_end;
1689         struct icb_ext *iext, *icon;
1690         int32_t lbmask, ext_used;
1691         uint32_t loc;
1692         struct icb_ext text;
1693         uint32_t con_freed;
1694 
1695         ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1696             (ip->i_desc_type == ICB_FLAG_LONG_AD));
1697 
1698         if (ip->i_ext_used == 0) {
1699                 return;
1700         }
1701 
1702         ext_used = ip->i_ext_used;
1703 
1704         lbmask = ip->i_udf->udf_lbmask;
1705         l2b = ip->i_udf->udf_l2b_shift;
1706 
1707         ASSERT(ip->i_ext);
1708 
1709         ip->i_lbr = 0;
1710         for (index = 0; index < ext_used; index++) {
1711                 iext = &ip->i_ext[index];
1712 
1713                 /*
1714                  * Find the begining and end
1715                  * of current extent
1716                  */
1717                 ext_beg = iext->ib_offset;
1718                 ext_end = iext->ib_offset +
1719                     ((iext->ib_count + lbmask) & ~lbmask);
1720 
1721                 /*
1722                  * This is the extent that has offset "length"
1723                  * make a copy of this extent and
1724                  * remember the index. We can use
1725                  * it to free blocks
1726                  */
1727                 if ((length <= ext_end) && (length >= ext_beg)) {
1728                         text = *iext;
1729 
1730                         iext->ib_count = length - ext_beg;
1731                         ip->i_ext_used = index + 1;
1732                         break;
1733                 }
1734                 if (iext->ib_flags != IB_UN_RE_AL) {
1735                         ip->i_lbr += iext->ib_count >> l2b;
1736                 }
1737         }
1738         if (ip->i_ext_used != index) {
1739                 if (iext->ib_flags != IB_UN_RE_AL) {
1740                         ip->i_lbr +=
1741                             ((iext->ib_count + lbmask) & ~lbmask) >> l2b;
1742                 }
1743         }
1744 
1745         ip->i_size = length;
1746         mutex_enter(&ip->i_tlock);
1747         ip->i_flag |= ICHG|IUPD;
1748         mutex_exit(&ip->i_tlock);
1749         ud_iupdat(ip, 1);
1750 
1751         /*
1752          * Free the unused space
1753          */
1754         if (text.ib_flags != IB_UN_RE_AL) {
1755                 count = (ext_end - length) >> l2b;
1756                 if (count) {
1757                         loc = text.ib_block +
1758                             (((length - text.ib_offset) + lbmask) >> l2b);
1759                         ud_free_space(ip->i_udf->udf_vfs, text.ib_prn,
1760                             loc, count);
1761                 }
1762         }
1763         for (index = ip->i_ext_used; index < ext_used; index++) {
1764                 iext = &ip->i_ext[index];
1765                 if (iext->ib_flags != IB_UN_RE_AL) {
1766                         count = (iext->ib_count + lbmask) >> l2b;
1767                         ud_free_space(ip->i_udf->udf_vfs, iext->ib_prn,
1768                             iext->ib_block, count);
1769                 }
1770                 bzero(iext, sizeof (struct icb_ext));
1771                 continue;
1772         }
1773 
1774         /*
1775          * release any continuation blocks
1776          */
1777         if (ip->i_con) {
1778 
1779                 ASSERT(ip->i_con_count >= ip->i_con_used);
1780 
1781                 /*
1782                  * Find out how many indirect blocks
1783                  * are required and release the rest
1784                  */
1785                 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1786                         elen = sizeof (struct short_ad);
1787                 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1788                         elen = sizeof (struct long_ad);
1789                 }
1790                 ndent = ip->i_max_emb / elen;
1791                 if (ip->i_ext_used > ndent) {
1792                         ecount = ip->i_ext_used - ndent;
1793                 } else {
1794                         ecount = 0;
1795                 }
1796                 con_freed = 0;
1797                 for (index = 0; index < ip->i_con_used; index++) {
1798                         icon = &ip->i_con[index];
1799                         nient = icon->ib_count -
1800                             (sizeof (struct alloc_ext_desc) + elen);
1801                         /* Header + 1 indirect extent */
1802                         nient /= elen;
1803                         if (ecount) {
1804                                 if (ecount > nient) {
1805                                         ecount -= nient;
1806                                 } else {
1807                                         ecount = 0;
1808                                 }
1809                         } else {
1810                                 count = ((icon->ib_count + lbmask) &
1811                                     ~lbmask) >> l2b;
1812                                 ud_free_space(ip->i_udf->udf_vfs,
1813                                     icon->ib_prn, icon->ib_block, count);
1814                                 con_freed++;
1815                                 ip->i_cur_max_ext -= nient;
1816                         }
1817                 }
1818                 /*
1819                  * set the continuation extents used(i_con_used)i to correct
1820                  * value. It is possible for i_con_used to be zero,
1821                  * if we free up all continuation extents. This happens
1822                  * when ecount is 0 before entering the for loop above.
1823                  */
1824                 ip->i_con_used -= con_freed;
1825                 if (ip->i_con_read > ip->i_con_used) {
1826                         ip->i_con_read = ip->i_con_used;
1827                 }
1828         }
1829 }
1830 
1831 void
1832 ud_trunc_ext4096(struct ud_inode *ip, u_offset_t length)
1833 {
1834         /*
1835          * Truncate code is the same for
1836          * both file of type 4 and 4096
1837          */
1838         ud_trunc_ext4(ip, length);
1839 }
1840 
1841 /*
1842  * Remove any inodes in the inode cache belonging to dev
1843  *
1844  * There should not be any active ones, return error if any are found but
1845  * still invalidate others (N.B.: this is a user error, not a system error).
1846  *
1847  * Also, count the references to dev by block devices - this really
1848  * has nothing to do with the object of the procedure, but as we have
1849  * to scan the inode table here anyway, we might as well get the
1850  * extra benefit.
1851  */
1852 int32_t
1853 ud_iflush(struct vfs *vfsp)
1854 {
1855         int32_t index, busy = 0;
1856         union ihead *ih;
1857         struct udf_vfs *udf_vfsp;
1858         dev_t dev;
1859         struct vnode *rvp, *vp;
1860         struct ud_inode *ip, *next;
1861 
1862         ud_printf("ud_iflush\n");
1863         udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1864         rvp = udf_vfsp->udf_root;
1865         dev = vfsp->vfs_dev;
1866 
1867         mutex_enter(&ud_icache_lock);
1868         for (index = 0; index < UD_HASH_SZ; index++) {
1869                 ih = &ud_ihead[index];
1870 
1871                 next = ih->ih_chain[0];
1872                 while (next != (struct ud_inode *)ih) {
1873                         ip = next;
1874                         next = ip->i_forw;
1875                         if (ip->i_dev != dev) {
1876                                 continue;
1877                         }
1878                         vp = ITOV(ip);
1879                         /*
1880                          * root inode is processed by the caller
1881                          */
1882                         if (vp == rvp) {
1883                                 if (vp->v_count > 1) {
1884                                         busy = -1;
1885                                 }
1886                                 continue;
1887                         }
1888                         if (ip->i_flag & IREF) {
1889                                 /*
1890                                  * Set error indicator for return value,
1891                                  * but continue invalidating other
1892                                  * inodes.
1893                                  */
1894                                 busy = -1;
1895                                 continue;
1896                         }
1897 
1898                         rw_enter(&ip->i_contents, RW_WRITER);
1899                         remque(ip);
1900                         ip->i_forw = ip;
1901                         ip->i_back = ip;
1902                         /*
1903                          * Hold the vnode since its not done
1904                          * in VOP_PUTPAGE anymore.
1905                          */
1906                         VN_HOLD(vp);
1907                         /*
1908                          * XXX Synchronous write holding
1909                          * cache lock
1910                          */
1911                         (void) ud_syncip(ip, B_INVAL, I_SYNC);
1912                         rw_exit(&ip->i_contents);
1913                         VN_RELE(vp);
1914                 }
1915         }
1916         mutex_exit(&ud_icache_lock);
1917 
1918         return (busy);
1919 }
1920 
1921 
1922 /*
1923  * Check mode permission on inode.  Mode is READ, WRITE or EXEC.
1924  * In the case of WRITE, the read-only status of the file system
1925  * is checked.  The applicable mode bits are compared with the
1926  * requested form of access.  If bits are missing, the secpolicy
1927  * function will check for privileges.
1928  */
1929 int
1930 ud_iaccess(struct ud_inode *ip, int32_t mode, struct cred *cr, int dolock)
1931 {
1932         int shift = 0;
1933         int ret = 0;
1934 
1935         if (dolock)
1936                 rw_enter(&ip->i_contents, RW_READER);
1937         ASSERT(RW_LOCK_HELD(&ip->i_contents));
1938 
1939         ud_printf("ud_iaccess\n");
1940         if (mode & IWRITE) {
1941                 /*
1942                  * Disallow write attempts on read-only
1943                  * file systems, unless the file is a block
1944                  * or character device or a FIFO.
1945                  */
1946                 if (ip->i_udf->udf_flags & UDF_FL_RDONLY) {
1947                         if ((ip->i_type != VCHR) &&
1948                             (ip->i_type != VBLK) &&
1949                             (ip->i_type != VFIFO)) {
1950                                 ret = EROFS;
1951                                 goto out;
1952                         }
1953                 }
1954         }
1955 
1956         /*
1957          * Access check is based on only
1958          * one of owner, group, public.
1959          * If not owner, then check group.
1960          * If not a member of the group, then
1961          * check public access.
1962          */
1963         if (crgetuid(cr) != ip->i_uid) {
1964                 shift += 5;
1965                 if (!groupmember((uid_t)ip->i_gid, cr))
1966                         shift += 5;
1967         }
1968 
1969         ret = secpolicy_vnode_access2(cr, ITOV(ip), ip->i_uid,
1970             UD2VA_PERM(ip->i_perm << shift), UD2VA_PERM(mode));
1971 
1972 out:
1973         if (dolock)
1974                 rw_exit(&ip->i_contents);
1975         return (ret);
1976 }
1977 
1978 void
1979 ud_imark(struct ud_inode *ip)
1980 {
1981         timestruc_t     now;
1982 
1983         gethrestime(&now);
1984         ud_printf("ud_imark\n");
1985         if (ip->i_flag & IACC) {
1986                 ip->i_atime.tv_sec = now.tv_sec;
1987                 ip->i_atime.tv_nsec = now.tv_nsec;
1988         }
1989         if (ip->i_flag & IUPD) {
1990                 ip->i_mtime.tv_sec = now.tv_sec;
1991                 ip->i_mtime.tv_nsec = now.tv_nsec;
1992                 ip->i_flag |= IMODTIME;
1993         }
1994         if (ip->i_flag & ICHG) {
1995                 ip->i_diroff = 0;
1996                 ip->i_ctime.tv_sec = now.tv_sec;
1997                 ip->i_ctime.tv_nsec = now.tv_nsec;
1998         }
1999 }
2000 
2001 
2002 void
2003 ud_itimes_nolock(struct ud_inode *ip)
2004 {
2005         ud_printf("ud_itimes_nolock\n");
2006 
2007         if (ip->i_flag & (IUPD|IACC|ICHG)) {
2008                 if (ip->i_flag & ICHG) {
2009                         ip->i_flag |= IMOD;
2010                 } else {
2011                         ip->i_flag |= IMODACC;
2012                 }
2013                 ud_imark(ip);
2014                 ip->i_flag &= ~(IACC|IUPD|ICHG);
2015         }
2016 }
2017 
2018 void
2019 ud_delcache(struct ud_inode *ip)
2020 {
2021         ud_printf("ud_delcache\n");
2022 
2023         mutex_enter(&ud_icache_lock);
2024         remque(ip);
2025         ip->i_forw = ip;
2026         ip->i_back = ip;
2027         mutex_exit(&ud_icache_lock);
2028 }
2029 
2030 void
2031 ud_idrop(struct ud_inode *ip)
2032 {
2033         struct vnode *vp = ITOV(ip);
2034 
2035         ASSERT(RW_WRITE_HELD(&ip->i_contents));
2036 
2037         ud_printf("ud_idrop\n");
2038 
2039         mutex_enter(&vp->v_lock);
2040         VN_RELE_LOCKED(vp);
2041         if (vp->v_count > 0) {
2042                 mutex_exit(&vp->v_lock);
2043                 return;
2044         }
2045         mutex_exit(&vp->v_lock);
2046 
2047         /*
2048          *  if inode is invalid or there is no page associated with
2049          *  this inode, put the inode in the front of the free list
2050          */
2051         mutex_enter(&ip->i_tlock);
2052         mutex_enter(&udf_ifree_lock);
2053         if (!vn_has_cached_data(vp) || ip->i_perm == 0) {
2054                 ud_add_to_free_list(ip, UD_BEGIN);
2055         } else {
2056                 /*
2057                  * Otherwise, put the inode back on the end of the free list.
2058                  */
2059                 ud_add_to_free_list(ip, UD_END);
2060         }
2061         mutex_exit(&udf_ifree_lock);
2062         ip->i_flag &= IMODTIME;
2063         mutex_exit(&ip->i_tlock);
2064 }
2065 
2066 void
2067 ud_add_to_free_list(struct ud_inode *ip, uint32_t at)
2068 {
2069         ASSERT(ip);
2070         ASSERT(mutex_owned(&udf_ifree_lock));
2071 
2072 #ifdef  DEBUG
2073         /* Search if the element is already in the list */
2074         if (udf_ifreeh != NULL) {
2075                 struct ud_inode *iq;
2076 
2077                 iq = udf_ifreeh;
2078                 while (iq) {
2079                         if (iq == ip) {
2080                                 cmn_err(CE_WARN, "Duplicate %p\n", (void *)ip);
2081                         }
2082                         iq = iq->i_freef;
2083                 }
2084         }
2085 #endif
2086 
2087         ip->i_freef = NULL;
2088         ip->i_freeb = NULL;
2089         if (udf_ifreeh == NULL) {
2090                 /*
2091                  * Nothing on the list just add it
2092                  */
2093                 udf_ifreeh = ip;
2094                 udf_ifreet = ip;
2095         } else {
2096                 if (at == UD_BEGIN) {
2097                         /*
2098                          * Add at the begining of the list
2099                          */
2100                         ip->i_freef = udf_ifreeh;
2101                         udf_ifreeh->i_freeb = ip;
2102                         udf_ifreeh = ip;
2103                 } else {
2104                         /*
2105                          * Add at the end of the list
2106                          */
2107                         ip->i_freeb = udf_ifreet;
2108                         udf_ifreet->i_freef = ip;
2109                         udf_ifreet = ip;
2110                 }
2111         }
2112 }
2113 
2114 void
2115 ud_remove_from_free_list(struct ud_inode *ip, uint32_t at)
2116 {
2117         ASSERT(ip);
2118         ASSERT(mutex_owned(&udf_ifree_lock));
2119 
2120 #ifdef  DEBUG
2121         {
2122                 struct ud_inode *iq;
2123                 uint32_t found = 0;
2124 
2125                 iq = udf_ifreeh;
2126                 while (iq) {
2127                         if (iq == ip) {
2128                                 found++;
2129                         }
2130                         iq = iq->i_freef;
2131                 }
2132                 if (found != 1) {
2133                         cmn_err(CE_WARN, "ip %p is found %x times\n",
2134                             (void *)ip,  found);
2135                 }
2136         }
2137 #endif
2138 
2139         if ((ip->i_freef == NULL) && (ip->i_freeb == NULL)) {
2140                 if (ip != udf_ifreeh) {
2141                         return;
2142                 }
2143         }
2144 
2145         if ((at == UD_BEGIN) || (ip == udf_ifreeh)) {
2146                 udf_ifreeh = ip->i_freef;
2147                 if (ip->i_freef == NULL) {
2148                         udf_ifreet = NULL;
2149                 } else {
2150                         udf_ifreeh->i_freeb = NULL;
2151                 }
2152         } else {
2153                 ip->i_freeb->i_freef = ip->i_freef;
2154                 if (ip->i_freef) {
2155                         ip->i_freef->i_freeb = ip->i_freeb;
2156                 } else {
2157                         udf_ifreet = ip->i_freeb;
2158                 }
2159         }
2160         ip->i_freef = NULL;
2161         ip->i_freeb = NULL;
2162 }
2163 
2164 void
2165 ud_init_inodes(void)
2166 {
2167         union ihead *ih = ud_ihead;
2168         int index;
2169 
2170 #ifndef __lint
2171         _NOTE(NO_COMPETING_THREADS_NOW);
2172 #endif
2173         for (index = 0; index < UD_HASH_SZ; index++, ih++) {
2174                 ih->ih_head[0] = ih;
2175                 ih->ih_head[1] = ih;
2176         }
2177         mutex_init(&ud_icache_lock, NULL, MUTEX_DEFAULT, NULL);
2178         mutex_init(&ud_nino_lock, NULL, MUTEX_DEFAULT, NULL);
2179 
2180         udf_ifreeh = NULL;
2181         udf_ifreet = NULL;
2182         mutex_init(&udf_ifree_lock, NULL, MUTEX_DEFAULT, NULL);
2183 
2184         mutex_init(&ud_sync_busy, NULL, MUTEX_DEFAULT, NULL);
2185         udf_vfs_instances = NULL;
2186         mutex_init(&udf_vfs_mutex, NULL, MUTEX_DEFAULT, NULL);
2187 
2188 #ifndef __lint
2189         _NOTE(COMPETING_THREADS_NOW);
2190 #endif
2191 }