1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1984, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright 2015, Joyent, Inc.
  25  */
  26 
  27 /*      Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
  28 /*        All Rights Reserved   */
  29 
  30 /*
  31  * Portions of this source code were derived from Berkeley 4.3 BSD
  32  * under license from the Regents of the University of California.
  33  */
  34 
  35 #include <sys/types.h>
  36 #include <sys/t_lock.h>
  37 #include <sys/ksynch.h>
  38 #include <sys/param.h>
  39 #include <sys/time.h>
  40 #include <sys/systm.h>
  41 #include <sys/sysmacros.h>
  42 #include <sys/resource.h>
  43 #include <sys/signal.h>
  44 #include <sys/cred.h>
  45 #include <sys/user.h>
  46 #include <sys/buf.h>
  47 #include <sys/vfs.h>
  48 #include <sys/vfs_opreg.h>
  49 #include <sys/vnode.h>
  50 #include <sys/proc.h>
  51 #include <sys/disp.h>
  52 #include <sys/file.h>
  53 #include <sys/fcntl.h>
  54 #include <sys/flock.h>
  55 #include <sys/atomic.h>
  56 #include <sys/kmem.h>
  57 #include <sys/uio.h>
  58 #include <sys/dnlc.h>
  59 #include <sys/conf.h>
  60 #include <sys/mman.h>
  61 #include <sys/pathname.h>
  62 #include <sys/debug.h>
  63 #include <sys/vmsystm.h>
  64 #include <sys/cmn_err.h>
  65 #include <sys/filio.h>
  66 #include <sys/policy.h>
  67 
  68 #include <sys/fs/ufs_fs.h>
  69 #include <sys/fs/ufs_lockfs.h>
  70 #include <sys/fs/ufs_filio.h>
  71 #include <sys/fs/ufs_inode.h>
  72 #include <sys/fs/ufs_fsdir.h>
  73 #include <sys/fs/ufs_quota.h>
  74 #include <sys/fs/ufs_log.h>
  75 #include <sys/fs/ufs_snap.h>
  76 #include <sys/fs/ufs_trans.h>
  77 #include <sys/fs/ufs_panic.h>
  78 #include <sys/fs/ufs_bio.h>
  79 #include <sys/dirent.h>           /* must be AFTER <sys/fs/fsdir.h>! */
  80 #include <sys/errno.h>
  81 #include <sys/fssnap_if.h>
  82 #include <sys/unistd.h>
  83 #include <sys/sunddi.h>
  84 
  85 #include <sys/filio.h>            /* _FIOIO */
  86 
  87 #include <vm/hat.h>
  88 #include <vm/page.h>
  89 #include <vm/pvn.h>
  90 #include <vm/as.h>
  91 #include <vm/seg.h>
  92 #include <vm/seg_map.h>
  93 #include <vm/seg_vn.h>
  94 #include <vm/seg_kmem.h>
  95 #include <vm/rm.h>
  96 #include <sys/swap.h>
  97 
  98 #include <fs/fs_subr.h>
  99 
 100 #include <sys/fs/decomp.h>
 101 
 102 static struct instats ins;
 103 
 104 static  int ufs_getpage_ra(struct vnode *, u_offset_t, struct seg *, caddr_t);
 105 static  int ufs_getpage_miss(struct vnode *, u_offset_t, size_t, struct seg *,
 106                 caddr_t, struct page **, size_t, enum seg_rw, int);
 107 static  int ufs_open(struct vnode **, int, struct cred *, caller_context_t *);
 108 static  int ufs_close(struct vnode *, int, int, offset_t, struct cred *,
 109                 caller_context_t *);
 110 static  int ufs_read(struct vnode *, struct uio *, int, struct cred *,
 111                 struct caller_context *);
 112 static  int ufs_write(struct vnode *, struct uio *, int, struct cred *,
 113                 struct caller_context *);
 114 static  int ufs_ioctl(struct vnode *, int, intptr_t, int, struct cred *,
 115                 int *, caller_context_t *);
 116 static  int ufs_getattr(struct vnode *, struct vattr *, int, struct cred *,
 117                 caller_context_t *);
 118 static  int ufs_setattr(struct vnode *, struct vattr *, int, struct cred *,
 119                 caller_context_t *);
 120 static  int ufs_access(struct vnode *, int, int, struct cred *,
 121                 caller_context_t *);
 122 static  int ufs_lookup(struct vnode *, char *, struct vnode **,
 123                 struct pathname *, int, struct vnode *, struct cred *,
 124                 caller_context_t *, int *, pathname_t *);
 125 static  int ufs_create(struct vnode *, char *, struct vattr *, enum vcexcl,
 126                 int, struct vnode **, struct cred *, int,
 127                 caller_context_t *, vsecattr_t  *);
 128 static  int ufs_remove(struct vnode *, char *, struct cred *,
 129                 caller_context_t *, int);
 130 static  int ufs_link(struct vnode *, struct vnode *, char *, struct cred *,
 131                 caller_context_t *, int);
 132 static  int ufs_rename(struct vnode *, char *, struct vnode *, char *,
 133                 struct cred *, caller_context_t *, int);
 134 static  int ufs_mkdir(struct vnode *, char *, struct vattr *, struct vnode **,
 135                 struct cred *, caller_context_t *, int, vsecattr_t *);
 136 static  int ufs_rmdir(struct vnode *, char *, struct vnode *, struct cred *,
 137                 caller_context_t *, int);
 138 static  int ufs_readdir(struct vnode *, struct uio *, struct cred *, int *,
 139                 caller_context_t *, int);
 140 static  int ufs_symlink(struct vnode *, char *, struct vattr *, char *,
 141                 struct cred *, caller_context_t *, int);
 142 static  int ufs_readlink(struct vnode *, struct uio *, struct cred *,
 143                 caller_context_t *);
 144 static  int ufs_fsync(struct vnode *, int, struct cred *, caller_context_t *);
 145 static  void ufs_inactive(struct vnode *, struct cred *, caller_context_t *);
 146 static  int ufs_fid(struct vnode *, struct fid *, caller_context_t *);
 147 static  int ufs_rwlock(struct vnode *, int, caller_context_t *);
 148 static  void ufs_rwunlock(struct vnode *, int, caller_context_t *);
 149 static  int ufs_seek(struct vnode *, offset_t, offset_t *, caller_context_t *);
 150 static  int ufs_frlock(struct vnode *, int, struct flock64 *, int, offset_t,
 151                 struct flk_callback *, struct cred *,
 152                 caller_context_t *);
 153 static  int ufs_space(struct vnode *, int, struct flock64 *, int, offset_t,
 154                 cred_t *, caller_context_t *);
 155 static  int ufs_getpage(struct vnode *, offset_t, size_t, uint_t *,
 156                 struct page **, size_t, struct seg *, caddr_t,
 157                 enum seg_rw, struct cred *, caller_context_t *);
 158 static  int ufs_putpage(struct vnode *, offset_t, size_t, int, struct cred *,
 159                 caller_context_t *);
 160 static  int ufs_putpages(struct vnode *, offset_t, size_t, int, struct cred *);
 161 static  int ufs_map(struct vnode *, offset_t, struct as *, caddr_t *, size_t,
 162                 uchar_t, uchar_t, uint_t, struct cred *, caller_context_t *);
 163 static  int ufs_addmap(struct vnode *, offset_t, struct as *, caddr_t,  size_t,
 164                 uchar_t, uchar_t, uint_t, struct cred *, caller_context_t *);
 165 static  int ufs_delmap(struct vnode *, offset_t, struct as *, caddr_t,  size_t,
 166                 uint_t, uint_t, uint_t, struct cred *, caller_context_t *);
 167 static  int ufs_poll(vnode_t *, short, int, short *, struct pollhead **,
 168                 caller_context_t *);
 169 static  int ufs_dump(vnode_t *, caddr_t, offset_t, offset_t,
 170     caller_context_t *);
 171 static  int ufs_l_pathconf(struct vnode *, int, ulong_t *, struct cred *,
 172                 caller_context_t *);
 173 static  int ufs_pageio(struct vnode *, struct page *, u_offset_t, size_t, int,
 174                 struct cred *, caller_context_t *);
 175 static  int ufs_dumpctl(vnode_t *, int, offset_t *, caller_context_t *);
 176 static  daddr32_t *save_dblks(struct inode *, struct ufsvfs *, daddr32_t *,
 177                 daddr32_t *, int, int);
 178 static  int ufs_getsecattr(struct vnode *, vsecattr_t *, int, struct cred *,
 179                 caller_context_t *);
 180 static  int ufs_setsecattr(struct vnode *, vsecattr_t *, int, struct cred *,
 181                 caller_context_t *);
 182 static  int ufs_priv_access(void *, int, struct cred *);
 183 static  int ufs_eventlookup(struct vnode *, char *, struct cred *,
 184     struct vnode **);
 185 extern int as_map_locked(struct as *, caddr_t, size_t, int ((*)()), void *);
 186 
 187 /*
 188  * For lockfs: ulockfs begin/end is now inlined in the ufs_xxx functions.
 189  *
 190  * XXX - ULOCKFS in fs_pathconf and ufs_ioctl is not inlined yet.
 191  */
 192 struct vnodeops *ufs_vnodeops;
 193 
 194 /* NOTE: "not blkd" below  means that the operation isn't blocked by lockfs */
 195 const fs_operation_def_t ufs_vnodeops_template[] = {
 196         { VOPNAME_OPEN,         { .vop_open = ufs_open } },     /* not blkd */
 197         { VOPNAME_CLOSE,        { .vop_close = ufs_close } },   /* not blkd */
 198         { VOPNAME_READ,         { .vop_read = ufs_read } },
 199         { VOPNAME_WRITE,        { .vop_write = ufs_write } },
 200         { VOPNAME_IOCTL,        { .vop_ioctl = ufs_ioctl } },
 201         { VOPNAME_GETATTR,      { .vop_getattr = ufs_getattr } },
 202         { VOPNAME_SETATTR,      { .vop_setattr = ufs_setattr } },
 203         { VOPNAME_ACCESS,       { .vop_access = ufs_access } },
 204         { VOPNAME_LOOKUP,       { .vop_lookup = ufs_lookup } },
 205         { VOPNAME_CREATE,       { .vop_create = ufs_create } },
 206         { VOPNAME_REMOVE,       { .vop_remove = ufs_remove } },
 207         { VOPNAME_LINK,         { .vop_link = ufs_link } },
 208         { VOPNAME_RENAME,       { .vop_rename = ufs_rename } },
 209         { VOPNAME_MKDIR,        { .vop_mkdir = ufs_mkdir } },
 210         { VOPNAME_RMDIR,        { .vop_rmdir = ufs_rmdir } },
 211         { VOPNAME_READDIR,      { .vop_readdir = ufs_readdir } },
 212         { VOPNAME_SYMLINK,      { .vop_symlink = ufs_symlink } },
 213         { VOPNAME_READLINK,     { .vop_readlink = ufs_readlink } },
 214         { VOPNAME_FSYNC,        { .vop_fsync = ufs_fsync } },
 215         { VOPNAME_INACTIVE,     { .vop_inactive = ufs_inactive } }, /* !blkd */
 216         { VOPNAME_FID,          { .vop_fid = ufs_fid } },
 217         { VOPNAME_RWLOCK,       { .vop_rwlock = ufs_rwlock } }, /* not blkd */
 218         { VOPNAME_RWUNLOCK,     { .vop_rwunlock = ufs_rwunlock } }, /* !blkd */
 219         { VOPNAME_SEEK,         { .vop_seek = ufs_seek } },
 220         { VOPNAME_FRLOCK,       { .vop_frlock = ufs_frlock } },
 221         { VOPNAME_SPACE,        { .vop_space = ufs_space } },
 222         { VOPNAME_GETPAGE,      { .vop_getpage = ufs_getpage } },
 223         { VOPNAME_PUTPAGE,      { .vop_putpage = ufs_putpage } },
 224         { VOPNAME_MAP,          { .vop_map = ufs_map } },
 225         { VOPNAME_ADDMAP,       { .vop_addmap = ufs_addmap } }, /* not blkd */
 226         { VOPNAME_DELMAP,       { .vop_delmap = ufs_delmap } }, /* not blkd */
 227         { VOPNAME_POLL,         { .vop_poll = ufs_poll } },     /* not blkd */
 228         { VOPNAME_DUMP,         { .vop_dump = ufs_dump } },
 229         { VOPNAME_PATHCONF,     { .vop_pathconf = ufs_l_pathconf } },
 230         { VOPNAME_PAGEIO,       { .vop_pageio = ufs_pageio } },
 231         { VOPNAME_DUMPCTL,      { .vop_dumpctl = ufs_dumpctl } },
 232         { VOPNAME_GETSECATTR,   { .vop_getsecattr = ufs_getsecattr } },
 233         { VOPNAME_SETSECATTR,   { .vop_setsecattr = ufs_setsecattr } },
 234         { VOPNAME_VNEVENT,      { .vop_vnevent = fs_vnevent_support } },
 235         { NULL,                 { NULL } }
 236 };
 237 
 238 #define MAX_BACKFILE_COUNT      9999
 239 
 240 /*
 241  * Created by ufs_dumpctl() to store a file's disk block info into memory.
 242  * Used by ufs_dump() to dump data to disk directly.
 243  */
 244 struct dump {
 245         struct inode    *ip;            /* the file we contain */
 246         daddr_t         fsbs;           /* number of blocks stored */
 247         struct timeval32 time;          /* time stamp for the struct */
 248         daddr32_t       dblk[1];        /* place holder for block info */
 249 };
 250 
 251 static struct dump *dump_info = NULL;
 252 
 253 /*
 254  * Previously there was no special action required for ordinary files.
 255  * (Devices are handled through the device file system.)
 256  * Now we support Large Files and Large File API requires open to
 257  * fail if file is large.
 258  * We could take care to prevent data corruption
 259  * by doing an atomic check of size and truncate if file is opened with
 260  * FTRUNC flag set but traditionally this is being done by the vfs/vnode
 261  * layers. So taking care of truncation here is a change in the existing
 262  * semantics of VOP_OPEN and therefore we chose not to implement any thing
 263  * here. The check for the size of the file > 2GB is being done at the
 264  * vfs layer in routine vn_open().
 265  */
 266 
 267 /* ARGSUSED */
 268 static int
 269 ufs_open(struct vnode **vpp, int flag, struct cred *cr, caller_context_t *ct)
 270 {
 271         return (0);
 272 }
 273 
 274 /*ARGSUSED*/
 275 static int
 276 ufs_close(struct vnode *vp, int flag, int count, offset_t offset,
 277         struct cred *cr, caller_context_t *ct)
 278 {
 279         cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 280         cleanshares(vp, ttoproc(curthread)->p_pid);
 281 
 282         /*
 283          * Push partially filled cluster at last close.
 284          * ``last close'' is approximated because the dnlc
 285          * may have a hold on the vnode.
 286          * Checking for VBAD here will also act as a forced umount check.
 287          */
 288         if (vp->v_count <= 2 && vp->v_type != VBAD) {
 289                 struct inode *ip = VTOI(vp);
 290                 if (ip->i_delaylen) {
 291                         ins.in_poc.value.ul++;
 292                         (void) ufs_putpages(vp, ip->i_delayoff, ip->i_delaylen,
 293                             B_ASYNC | B_FREE, cr);
 294                         ip->i_delaylen = 0;
 295                 }
 296         }
 297 
 298         return (0);
 299 }
 300 
 301 /*ARGSUSED*/
 302 static int
 303 ufs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cr,
 304         struct caller_context *ct)
 305 {
 306         struct inode *ip = VTOI(vp);
 307         struct ufsvfs *ufsvfsp;
 308         struct ulockfs *ulp = NULL;
 309         int error = 0;
 310         int intrans = 0;
 311 
 312         ASSERT(RW_READ_HELD(&ip->i_rwlock));
 313 
 314         /*
 315          * Mandatory locking needs to be done before ufs_lockfs_begin()
 316          * and TRANS_BEGIN_SYNC() calls since mandatory locks can sleep.
 317          */
 318         if (MANDLOCK(vp, ip->i_mode)) {
 319                 /*
 320                  * ufs_getattr ends up being called by chklock
 321                  */
 322                 error = chklock(vp, FREAD, uiop->uio_loffset,
 323                     uiop->uio_resid, uiop->uio_fmode, ct);
 324                 if (error)
 325                         goto out;
 326         }
 327 
 328         ufsvfsp = ip->i_ufsvfs;
 329         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READ_MASK);
 330         if (error)
 331                 goto out;
 332 
 333         /*
 334          * In the case that a directory is opened for reading as a file
 335          * (eg "cat .") with the  O_RSYNC, O_SYNC and O_DSYNC flags set.
 336          * The locking order had to be changed to avoid a deadlock with
 337          * an update taking place on that directory at the same time.
 338          */
 339         if ((ip->i_mode & IFMT) == IFDIR) {
 340 
 341                 rw_enter(&ip->i_contents, RW_READER);
 342                 error = rdip(ip, uiop, ioflag, cr);
 343                 rw_exit(&ip->i_contents);
 344 
 345                 if (error) {
 346                         if (ulp)
 347                                 ufs_lockfs_end(ulp);
 348                         goto out;
 349                 }
 350 
 351                 if (ulp && (ioflag & FRSYNC) && (ioflag & (FSYNC | FDSYNC)) &&
 352                     TRANS_ISTRANS(ufsvfsp)) {
 353                         rw_exit(&ip->i_rwlock);
 354                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_READ_SYNC, TOP_READ_SIZE,
 355                             error);
 356                         ASSERT(!error);
 357                         TRANS_END_SYNC(ufsvfsp, error, TOP_READ_SYNC,
 358                             TOP_READ_SIZE);
 359                         rw_enter(&ip->i_rwlock, RW_READER);
 360                 }
 361         } else {
 362                 /*
 363                  * Only transact reads to files opened for sync-read and
 364                  * sync-write on a file system that is not write locked.
 365                  *
 366                  * The ``not write locked'' check prevents problems with
 367                  * enabling/disabling logging on a busy file system.  E.g.,
 368                  * logging exists at the beginning of the read but does not
 369                  * at the end.
 370                  *
 371                  */
 372                 if (ulp && (ioflag & FRSYNC) && (ioflag & (FSYNC | FDSYNC)) &&
 373                     TRANS_ISTRANS(ufsvfsp)) {
 374                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_READ_SYNC, TOP_READ_SIZE,
 375                             error);
 376                         ASSERT(!error);
 377                         intrans = 1;
 378                 }
 379 
 380                 rw_enter(&ip->i_contents, RW_READER);
 381                 error = rdip(ip, uiop, ioflag, cr);
 382                 rw_exit(&ip->i_contents);
 383 
 384                 if (intrans) {
 385                         TRANS_END_SYNC(ufsvfsp, error, TOP_READ_SYNC,
 386                             TOP_READ_SIZE);
 387                 }
 388         }
 389 
 390         if (ulp) {
 391                 ufs_lockfs_end(ulp);
 392         }
 393 out:
 394 
 395         return (error);
 396 }
 397 
 398 extern  int     ufs_HW;         /* high water mark */
 399 extern  int     ufs_LW;         /* low water mark */
 400 int     ufs_WRITES = 1;         /* XXX - enable/disable */
 401 int     ufs_throttles = 0;      /* throttling count */
 402 int     ufs_allow_shared_writes = 1;    /* directio shared writes */
 403 
 404 static int
 405 ufs_check_rewrite(struct inode *ip, struct uio *uiop, int ioflag)
 406 {
 407         int     shared_write;
 408 
 409         /*
 410          * If the FDSYNC flag is set then ignore the global
 411          * ufs_allow_shared_writes in this case.
 412          */
 413         shared_write = (ioflag & FDSYNC) | ufs_allow_shared_writes;
 414 
 415         /*
 416          * Filter to determine if this request is suitable as a
 417          * concurrent rewrite. This write must not allocate blocks
 418          * by extending the file or filling in holes. No use trying
 419          * through FSYNC descriptors as the inode will be synchronously
 420          * updated after the write. The uio structure has not yet been
 421          * checked for sanity, so assume nothing.
 422          */
 423         return (((ip->i_mode & IFMT) == IFREG) && !(ioflag & FAPPEND) &&
 424             (uiop->uio_loffset >= (offset_t)0) &&
 425             (uiop->uio_loffset < ip->i_size) && (uiop->uio_resid > 0) &&
 426             ((ip->i_size - uiop->uio_loffset) >= uiop->uio_resid) &&
 427             !(ioflag & FSYNC) && !bmap_has_holes(ip) &&
 428             shared_write);
 429 }
 430 
 431 /*ARGSUSED*/
 432 static int
 433 ufs_write(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cr,
 434         caller_context_t *ct)
 435 {
 436         struct inode *ip = VTOI(vp);
 437         struct ufsvfs *ufsvfsp;
 438         struct ulockfs *ulp;
 439         int retry = 1;
 440         int error, resv, resid = 0;
 441         int directio_status;
 442         int exclusive;
 443         int rewriteflg;
 444         long start_resid = uiop->uio_resid;
 445 
 446         ASSERT(RW_LOCK_HELD(&ip->i_rwlock));
 447 
 448 retry_mandlock:
 449         /*
 450          * Mandatory locking needs to be done before ufs_lockfs_begin()
 451          * and TRANS_BEGIN_[A]SYNC() calls since mandatory locks can sleep.
 452          * Check for forced unmounts normally done in ufs_lockfs_begin().
 453          */
 454         if ((ufsvfsp = ip->i_ufsvfs) == NULL) {
 455                 error = EIO;
 456                 goto out;
 457         }
 458         if (MANDLOCK(vp, ip->i_mode)) {
 459 
 460                 ASSERT(RW_WRITE_HELD(&ip->i_rwlock));
 461 
 462                 /*
 463                  * ufs_getattr ends up being called by chklock
 464                  */
 465                 error = chklock(vp, FWRITE, uiop->uio_loffset,
 466                     uiop->uio_resid, uiop->uio_fmode, ct);
 467                 if (error)
 468                         goto out;
 469         }
 470 
 471         /* i_rwlock can change in chklock */
 472         exclusive = rw_write_held(&ip->i_rwlock);
 473         rewriteflg = ufs_check_rewrite(ip, uiop, ioflag);
 474 
 475         /*
 476          * Check for fast-path special case of directio re-writes.
 477          */
 478         if ((ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) &&
 479             !exclusive && rewriteflg) {
 480 
 481                 error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_WRITE_MASK);
 482                 if (error)
 483                         goto out;
 484 
 485                 rw_enter(&ip->i_contents, RW_READER);
 486                 error = ufs_directio_write(ip, uiop, ioflag, 1, cr,
 487                     &directio_status);
 488                 if (directio_status == DIRECTIO_SUCCESS) {
 489                         uint_t i_flag_save;
 490 
 491                         if (start_resid != uiop->uio_resid)
 492                                 error = 0;
 493                         /*
 494                          * Special treatment of access times for re-writes.
 495                          * If IMOD is not already set, then convert it
 496                          * to IMODACC for this operation. This defers
 497                          * entering a delta into the log until the inode
 498                          * is flushed. This mimics what is done for read
 499                          * operations and inode access time.
 500                          */
 501                         mutex_enter(&ip->i_tlock);
 502                         i_flag_save = ip->i_flag;
 503                         ip->i_flag |= IUPD | ICHG;
 504                         ip->i_seq++;
 505                         ITIMES_NOLOCK(ip);
 506                         if ((i_flag_save & IMOD) == 0) {
 507                                 ip->i_flag &= ~IMOD;
 508                                 ip->i_flag |= IMODACC;
 509                         }
 510                         mutex_exit(&ip->i_tlock);
 511                         rw_exit(&ip->i_contents);
 512                         if (ulp)
 513                                 ufs_lockfs_end(ulp);
 514                         goto out;
 515                 }
 516                 rw_exit(&ip->i_contents);
 517                 if (ulp)
 518                         ufs_lockfs_end(ulp);
 519         }
 520 
 521         if (!exclusive && !rw_tryupgrade(&ip->i_rwlock)) {
 522                 rw_exit(&ip->i_rwlock);
 523                 rw_enter(&ip->i_rwlock, RW_WRITER);
 524                 /*
 525                  * Mandatory locking could have been enabled
 526                  * after dropping the i_rwlock.
 527                  */
 528                 if (MANDLOCK(vp, ip->i_mode))
 529                         goto retry_mandlock;
 530         }
 531 
 532         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_WRITE_MASK);
 533         if (error)
 534                 goto out;
 535 
 536         /*
 537          * Amount of log space needed for this write
 538          */
 539         if (!rewriteflg || !(ioflag & FDSYNC))
 540                 TRANS_WRITE_RESV(ip, uiop, ulp, &resv, &resid);
 541 
 542         /*
 543          * Throttle writes.
 544          */
 545         if (ufs_WRITES && (ip->i_writes > ufs_HW)) {
 546                 mutex_enter(&ip->i_tlock);
 547                 while (ip->i_writes > ufs_HW) {
 548                         ufs_throttles++;
 549                         cv_wait(&ip->i_wrcv, &ip->i_tlock);
 550                 }
 551                 mutex_exit(&ip->i_tlock);
 552         }
 553 
 554         /*
 555          * Enter Transaction
 556          *
 557          * If the write is a rewrite there is no need to open a transaction
 558          * if the FDSYNC flag is set and not the FSYNC.  In this case just
 559          * set the IMODACC flag to modify do the update at a later time
 560          * thus avoiding the overhead of the logging transaction that is
 561          * not required.
 562          */
 563         if (ioflag & (FSYNC|FDSYNC)) {
 564                 if (ulp) {
 565                         if (rewriteflg) {
 566                                 uint_t i_flag_save;
 567 
 568                                 rw_enter(&ip->i_contents, RW_READER);
 569                                 mutex_enter(&ip->i_tlock);
 570                                 i_flag_save = ip->i_flag;
 571                                 ip->i_flag |= IUPD | ICHG;
 572                                 ip->i_seq++;
 573                                 ITIMES_NOLOCK(ip);
 574                                 if ((i_flag_save & IMOD) == 0) {
 575                                         ip->i_flag &= ~IMOD;
 576                                         ip->i_flag |= IMODACC;
 577                                 }
 578                                 mutex_exit(&ip->i_tlock);
 579                                 rw_exit(&ip->i_contents);
 580                         } else {
 581                                 int terr = 0;
 582                                 TRANS_BEGIN_SYNC(ufsvfsp, TOP_WRITE_SYNC, resv,
 583                                     terr);
 584                                 ASSERT(!terr);
 585                         }
 586                 }
 587         } else {
 588                 if (ulp)
 589                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_WRITE, resv);
 590         }
 591 
 592         /*
 593          * Write the file
 594          */
 595         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
 596         rw_enter(&ip->i_contents, RW_WRITER);
 597         if ((ioflag & FAPPEND) != 0 && (ip->i_mode & IFMT) == IFREG) {
 598                 /*
 599                  * In append mode start at end of file.
 600                  */
 601                 uiop->uio_loffset = ip->i_size;
 602         }
 603 
 604         /*
 605          * Mild optimisation, don't call ufs_trans_write() unless we have to
 606          * Also, suppress file system full messages if we will retry.
 607          */
 608         if (retry)
 609                 ip->i_flag |= IQUIET;
 610         if (resid) {
 611                 TRANS_WRITE(ip, uiop, ioflag, error, ulp, cr, resv, resid);
 612         } else {
 613                 error = wrip(ip, uiop, ioflag, cr);
 614         }
 615         ip->i_flag &= ~IQUIET;
 616 
 617         rw_exit(&ip->i_contents);
 618         rw_exit(&ufsvfsp->vfs_dqrwlock);
 619 
 620         /*
 621          * Leave Transaction
 622          */
 623         if (ulp) {
 624                 if (ioflag & (FSYNC|FDSYNC)) {
 625                         if (!rewriteflg) {
 626                                 int terr = 0;
 627 
 628                                 TRANS_END_SYNC(ufsvfsp, terr, TOP_WRITE_SYNC,
 629                                     resv);
 630                                 if (error == 0)
 631                                         error = terr;
 632                         }
 633                 } else {
 634                         TRANS_END_ASYNC(ufsvfsp, TOP_WRITE, resv);
 635                 }
 636                 ufs_lockfs_end(ulp);
 637         }
 638 out:
 639         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
 640                 /*
 641                  * Any blocks tied up in pending deletes?
 642                  */
 643                 ufs_delete_drain_wait(ufsvfsp, 1);
 644                 retry = 0;
 645                 goto retry_mandlock;
 646         }
 647 
 648         if (error == ENOSPC && (start_resid != uiop->uio_resid))
 649                 error = 0;
 650 
 651         return (error);
 652 }
 653 
 654 /*
 655  * Don't cache write blocks to files with the sticky bit set.
 656  * Used to keep swap files from blowing the page cache on a server.
 657  */
 658 int stickyhack = 1;
 659 
 660 /*
 661  * Free behind hacks.  The pager is busted.
 662  * XXX - need to pass the information down to writedone() in a flag like B_SEQ
 663  * or B_FREE_IF_TIGHT_ON_MEMORY.
 664  */
 665 int     freebehind = 1;
 666 int     smallfile = 0;
 667 u_offset_t smallfile64 = 32 * 1024;
 668 
 669 /*
 670  * While we should, in most cases, cache the pages for write, we
 671  * may also want to cache the pages for read as long as they are
 672  * frequently re-usable.
 673  *
 674  * If cache_read_ahead = 1, the pages for read will go to the tail
 675  * of the cache list when they are released, otherwise go to the head.
 676  */
 677 int     cache_read_ahead = 0;
 678 
 679 /*
 680  * Freebehind exists  so that as we read  large files  sequentially we
 681  * don't consume most of memory with pages  from a few files. It takes
 682  * longer to re-read from disk multiple small files as it does reading
 683  * one large one sequentially.  As system  memory grows customers need
 684  * to retain bigger chunks   of files in  memory.   The advent of  the
 685  * cachelist opens up of the possibility freeing pages  to the head or
 686  * tail of the list.
 687  *
 688  * Not freeing a page is a bet that the page will be read again before
 689  * it's segmap slot is needed for something else. If we loose the bet,
 690  * it means some  other thread is  burdened with the  page free we did
 691  * not do. If we win we save a free and reclaim.
 692  *
 693  * Freeing it at the tail  vs the head of cachelist  is a bet that the
 694  * page will survive until the next  read.  It's also saying that this
 695  * page is more likely to  be re-used than a  page freed some time ago
 696  * and never reclaimed.
 697  *
 698  * Freebehind maintains a  range of  file offset [smallfile1; smallfile2]
 699  *
 700  *            0 < offset < smallfile1 : pages are not freed.
 701  *   smallfile1 < offset < smallfile2 : pages freed to tail of cachelist.
 702  *   smallfile2 < offset              : pages freed to head of cachelist.
 703  *
 704  * The range  is  computed  at most  once  per second  and  depends on
 705  * freemem  and  ncpus_online.  Both parameters  are   bounded to be
 706  * >= smallfile && >= smallfile64.
 707  *
 708  * smallfile1 = (free memory / ncpu) / 1000
 709  * smallfile2 = (free memory / ncpu) / 10
 710  *
 711  * A few examples values:
 712  *
 713  *       Free Mem (in Bytes) [smallfile1; smallfile2]  [smallfile1; smallfile2]
 714  *                                 ncpus_online = 4          ncpus_online = 64
 715  *       ------------------  -----------------------   -----------------------
 716  *             1G                   [256K;  25M]               [32K; 1.5M]
 717  *            10G                   [2.5M; 250M]              [156K; 15M]
 718  *           100G                    [25M; 2.5G]              [1.5M; 150M]
 719  *
 720  */
 721 
 722 #define SMALLFILE1_D 1000
 723 #define SMALLFILE2_D 10
 724 static u_offset_t smallfile1 = 32 * 1024;
 725 static u_offset_t smallfile2 = 32 * 1024;
 726 static clock_t smallfile_update = 0; /* lbolt value of when to recompute */
 727 uint_t smallfile1_d = SMALLFILE1_D;
 728 uint_t smallfile2_d = SMALLFILE2_D;
 729 
 730 /*
 731  * wrip does the real work of write requests for ufs.
 732  */
 733 int
 734 wrip(struct inode *ip, struct uio *uio, int ioflag, struct cred *cr)
 735 {
 736         rlim64_t limit = uio->uio_llimit;
 737         u_offset_t off;
 738         u_offset_t old_i_size;
 739         struct fs *fs;
 740         struct vnode *vp;
 741         struct ufsvfs *ufsvfsp;
 742         caddr_t base;
 743         long start_resid = uio->uio_resid;   /* save starting resid */
 744         long premove_resid;                     /* resid before uiomove() */
 745         uint_t flags;
 746         int newpage;
 747         int iupdat_flag, directio_status;
 748         int n, on, mapon;
 749         int error, pagecreate;
 750         int do_dqrwlock;                /* drop/reacquire vfs_dqrwlock */
 751         int32_t iblocks;
 752         int     new_iblocks;
 753 
 754         /*
 755          * ip->i_size is incremented before the uiomove
 756          * is done on a write.  If the move fails (bad user
 757          * address) reset ip->i_size.
 758          * The better way would be to increment ip->i_size
 759          * only if the uiomove succeeds.
 760          */
 761         int i_size_changed = 0;
 762         o_mode_t type;
 763         int i_seq_needed = 0;
 764 
 765         vp = ITOV(ip);
 766 
 767         /*
 768          * check for forced unmount - should not happen as
 769          * the request passed the lockfs checks.
 770          */
 771         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
 772                 return (EIO);
 773 
 774         fs = ip->i_fs;
 775 
 776         ASSERT(RW_WRITE_HELD(&ip->i_contents));
 777 
 778         /* check for valid filetype */
 779         type = ip->i_mode & IFMT;
 780         if ((type != IFREG) && (type != IFDIR) && (type != IFATTRDIR) &&
 781             (type != IFLNK) && (type != IFSHAD)) {
 782                 return (EIO);
 783         }
 784 
 785         /*
 786          * the actual limit of UFS file size
 787          * is UFS_MAXOFFSET_T
 788          */
 789         if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
 790                 limit = MAXOFFSET_T;
 791 
 792         if (uio->uio_loffset >= limit) {
 793                 proc_t *p = ttoproc(curthread);
 794 
 795                 mutex_enter(&p->p_lock);
 796                 (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
 797                     p, RCA_UNSAFE_SIGINFO);
 798                 mutex_exit(&p->p_lock);
 799                 return (EFBIG);
 800         }
 801 
 802         /*
 803          * if largefiles are disallowed, the limit is
 804          * the pre-largefiles value of 2GB
 805          */
 806         if (ufsvfsp->vfs_lfflags & UFS_LARGEFILES)
 807                 limit = MIN(UFS_MAXOFFSET_T, limit);
 808         else
 809                 limit = MIN(MAXOFF32_T, limit);
 810 
 811         if (uio->uio_loffset < (offset_t)0) {
 812                 return (EINVAL);
 813         }
 814         if (uio->uio_resid == 0) {
 815                 return (0);
 816         }
 817 
 818         if (uio->uio_loffset >= limit)
 819                 return (EFBIG);
 820 
 821         ip->i_flag |= INOACC;        /* don't update ref time in getpage */
 822 
 823         if (ioflag & (FSYNC|FDSYNC)) {
 824                 ip->i_flag |= ISYNC;
 825                 iupdat_flag = 1;
 826         }
 827         /*
 828          * Try to go direct
 829          */
 830         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) {
 831                 uio->uio_llimit = limit;
 832                 error = ufs_directio_write(ip, uio, ioflag, 0, cr,
 833                     &directio_status);
 834                 /*
 835                  * If ufs_directio wrote to the file or set the flags,
 836                  * we need to update i_seq, but it may be deferred.
 837                  */
 838                 if (start_resid != uio->uio_resid ||
 839                     (ip->i_flag & (ICHG|IUPD))) {
 840                         i_seq_needed = 1;
 841                         ip->i_flag |= ISEQ;
 842                 }
 843                 if (directio_status == DIRECTIO_SUCCESS)
 844                         goto out;
 845         }
 846 
 847         /*
 848          * Behavior with respect to dropping/reacquiring vfs_dqrwlock:
 849          *
 850          * o shadow inodes: vfs_dqrwlock is not held at all
 851          * o quota updates: vfs_dqrwlock is read or write held
 852          * o other updates: vfs_dqrwlock is read held
 853          *
 854          * The first case is the only one where we do not hold
 855          * vfs_dqrwlock at all while entering wrip().
 856          * We must make sure not to downgrade/drop vfs_dqrwlock if we
 857          * have it as writer, i.e. if we are updating the quota inode.
 858          * There is no potential deadlock scenario in this case as
 859          * ufs_getpage() takes care of this and avoids reacquiring
 860          * vfs_dqrwlock in that case.
 861          *
 862          * This check is done here since the above conditions do not change
 863          * and we possibly loop below, so save a few cycles.
 864          */
 865         if ((type == IFSHAD) ||
 866             (rw_owner(&ufsvfsp->vfs_dqrwlock) == curthread)) {
 867                 do_dqrwlock = 0;
 868         } else {
 869                 do_dqrwlock = 1;
 870         }
 871 
 872         /*
 873          * Large Files: We cast MAXBMASK to offset_t
 874          * inorder to mask out the higher bits. Since offset_t
 875          * is a signed value, the high order bit set in MAXBMASK
 876          * value makes it do the right thing by having all bits 1
 877          * in the higher word. May be removed for _SOLARIS64_.
 878          */
 879 
 880         fs = ip->i_fs;
 881         do {
 882                 u_offset_t uoff = uio->uio_loffset;
 883                 off = uoff & (offset_t)MAXBMASK;
 884                 mapon = (int)(uoff & (offset_t)MAXBOFFSET);
 885                 on = (int)blkoff(fs, uoff);
 886                 n = (int)MIN(fs->fs_bsize - on, uio->uio_resid);
 887                 new_iblocks = 1;
 888 
 889                 if (type == IFREG && uoff + n >= limit) {
 890                         if (uoff >= limit) {
 891                                 error = EFBIG;
 892                                 goto out;
 893                         }
 894                         /*
 895                          * since uoff + n >= limit,
 896                          * therefore n >= limit - uoff, and n is an int
 897                          * so it is safe to cast it to an int
 898                          */
 899                         n = (int)(limit - (rlim64_t)uoff);
 900                 }
 901                 if (uoff + n > ip->i_size) {
 902                         /*
 903                          * We are extending the length of the file.
 904                          * bmap is used so that we are sure that
 905                          * if we need to allocate new blocks, that it
 906                          * is done here before we up the file size.
 907                          */
 908                         error = bmap_write(ip, uoff, (int)(on + n),
 909                             mapon == 0, NULL, cr);
 910                         /*
 911                          * bmap_write never drops i_contents so if
 912                          * the flags are set it changed the file.
 913                          */
 914                         if (ip->i_flag & (ICHG|IUPD)) {
 915                                 i_seq_needed = 1;
 916                                 ip->i_flag |= ISEQ;
 917                         }
 918                         if (error)
 919                                 break;
 920                         /*
 921                          * There is a window of vulnerability here.
 922                          * The sequence of operations: allocate file
 923                          * system blocks, uiomove the data into pages,
 924                          * and then update the size of the file in the
 925                          * inode, must happen atomically.  However, due
 926                          * to current locking constraints, this can not
 927                          * be done.
 928                          */
 929                         ASSERT(ip->i_writer == NULL);
 930                         ip->i_writer = curthread;
 931                         i_size_changed = 1;
 932                         /*
 933                          * If we are writing from the beginning of
 934                          * the mapping, we can just create the
 935                          * pages without having to read them.
 936                          */
 937                         pagecreate = (mapon == 0);
 938                 } else if (n == MAXBSIZE) {
 939                         /*
 940                          * Going to do a whole mappings worth,
 941                          * so we can just create the pages w/o
 942                          * having to read them in.  But before
 943                          * we do that, we need to make sure any
 944                          * needed blocks are allocated first.
 945                          */
 946                         iblocks = ip->i_blocks;
 947                         error = bmap_write(ip, uoff, (int)(on + n),
 948                             BI_ALLOC_ONLY, NULL, cr);
 949                         /*
 950                          * bmap_write never drops i_contents so if
 951                          * the flags are set it changed the file.
 952                          */
 953                         if (ip->i_flag & (ICHG|IUPD)) {
 954                                 i_seq_needed = 1;
 955                                 ip->i_flag |= ISEQ;
 956                         }
 957                         if (error)
 958                                 break;
 959                         pagecreate = 1;
 960                         /*
 961                          * check if the new created page needed the
 962                          * allocation of new disk blocks.
 963                          */
 964                         if (iblocks == ip->i_blocks)
 965                                 new_iblocks = 0; /* no new blocks allocated */
 966                 } else {
 967                         pagecreate = 0;
 968                         /*
 969                          * In sync mode flush the indirect blocks which
 970                          * may have been allocated and not written on
 971                          * disk. In above cases bmap_write will allocate
 972                          * in sync mode.
 973                          */
 974                         if (ioflag & (FSYNC|FDSYNC)) {
 975                                 error = ufs_indirblk_sync(ip, uoff);
 976                                 if (error)
 977                                         break;
 978                         }
 979                 }
 980 
 981                 /*
 982                  * At this point we can enter ufs_getpage() in one
 983                  * of two ways:
 984                  * 1) segmap_getmapflt() calls ufs_getpage() when the
 985                  *    forcefault parameter is true (pagecreate == 0)
 986                  * 2) uiomove() causes a page fault.
 987                  *
 988                  * We have to drop the contents lock to prevent the VM
 989                  * system from trying to reacquire it in ufs_getpage()
 990                  * should the uiomove cause a pagefault.
 991                  *
 992                  * We have to drop the reader vfs_dqrwlock here as well.
 993                  */
 994                 rw_exit(&ip->i_contents);
 995                 if (do_dqrwlock) {
 996                         ASSERT(RW_LOCK_HELD(&ufsvfsp->vfs_dqrwlock));
 997                         ASSERT(!(RW_WRITE_HELD(&ufsvfsp->vfs_dqrwlock)));
 998                         rw_exit(&ufsvfsp->vfs_dqrwlock);
 999                 }
1000 
1001                 newpage = 0;
1002                 premove_resid = uio->uio_resid;
1003 
1004                 /*
1005                  * Touch the page and fault it in if it is not in core
1006                  * before segmap_getmapflt or vpm_data_copy can lock it.
1007                  * This is to avoid the deadlock if the buffer is mapped
1008                  * to the same file through mmap which we want to write.
1009                  */
1010                 uio_prefaultpages((long)n, uio);
1011 
1012                 if (vpm_enable) {
1013                         /*
1014                          * Copy data. If new pages are created, part of
1015                          * the page that is not written will be initizliazed
1016                          * with zeros.
1017                          */
1018                         error = vpm_data_copy(vp, (off + mapon), (uint_t)n,
1019                             uio, !pagecreate, &newpage, 0, S_WRITE);
1020                 } else {
1021 
1022                         base = segmap_getmapflt(segkmap, vp, (off + mapon),
1023                             (uint_t)n, !pagecreate, S_WRITE);
1024 
1025                         /*
1026                          * segmap_pagecreate() returns 1 if it calls
1027                          * page_create_va() to allocate any pages.
1028                          */
1029 
1030                         if (pagecreate)
1031                                 newpage = segmap_pagecreate(segkmap, base,
1032                                     (size_t)n, 0);
1033 
1034                         error = uiomove(base + mapon, (long)n, UIO_WRITE, uio);
1035                 }
1036 
1037                 /*
1038                  * If "newpage" is set, then a new page was created and it
1039                  * does not contain valid data, so it needs to be initialized
1040                  * at this point.
1041                  * Otherwise the page contains old data, which was overwritten
1042                  * partially or as a whole in uiomove.
1043                  * If there is only one iovec structure within uio, then
1044                  * on error uiomove will not be able to update uio->uio_loffset
1045                  * and we would zero the whole page here!
1046                  *
1047                  * If uiomove fails because of an error, the old valid data
1048                  * is kept instead of filling the rest of the page with zero's.
1049                  */
1050                 if (!vpm_enable && newpage &&
1051                     uio->uio_loffset < roundup(off + mapon + n, PAGESIZE)) {
1052                         /*
1053                          * We created pages w/o initializing them completely,
1054                          * thus we need to zero the part that wasn't set up.
1055                          * This happens on most EOF write cases and if
1056                          * we had some sort of error during the uiomove.
1057                          */
1058                         int nzero, nmoved;
1059 
1060                         nmoved = (int)(uio->uio_loffset - (off + mapon));
1061                         ASSERT(nmoved >= 0 && nmoved <= n);
1062                         nzero = roundup(on + n, PAGESIZE) - nmoved;
1063                         ASSERT(nzero > 0 && mapon + nmoved + nzero <= MAXBSIZE);
1064                         (void) kzero(base + mapon + nmoved, (uint_t)nzero);
1065                 }
1066 
1067                 /*
1068                  * Unlock the pages allocated by page_create_va()
1069                  * in segmap_pagecreate()
1070                  */
1071                 if (!vpm_enable && newpage)
1072                         segmap_pageunlock(segkmap, base, (size_t)n, S_WRITE);
1073 
1074                 /*
1075                  * If the size of the file changed, then update the
1076                  * size field in the inode now.  This can't be done
1077                  * before the call to segmap_pageunlock or there is
1078                  * a potential deadlock with callers to ufs_putpage().
1079                  * They will be holding i_contents and trying to lock
1080                  * a page, while this thread is holding a page locked
1081                  * and trying to acquire i_contents.
1082                  */
1083                 if (i_size_changed) {
1084                         rw_enter(&ip->i_contents, RW_WRITER);
1085                         old_i_size = ip->i_size;
1086                         UFS_SET_ISIZE(uoff + n, ip);
1087                         TRANS_INODE(ufsvfsp, ip);
1088                         /*
1089                          * file has grown larger than 2GB. Set flag
1090                          * in superblock to indicate this, if it
1091                          * is not already set.
1092                          */
1093                         if ((ip->i_size > MAXOFF32_T) &&
1094                             !(fs->fs_flags & FSLARGEFILES)) {
1095                                 ASSERT(ufsvfsp->vfs_lfflags & UFS_LARGEFILES);
1096                                 mutex_enter(&ufsvfsp->vfs_lock);
1097                                 fs->fs_flags |= FSLARGEFILES;
1098                                 ufs_sbwrite(ufsvfsp);
1099                                 mutex_exit(&ufsvfsp->vfs_lock);
1100                         }
1101                         mutex_enter(&ip->i_tlock);
1102                         ip->i_writer = NULL;
1103                         cv_broadcast(&ip->i_wrcv);
1104                         mutex_exit(&ip->i_tlock);
1105                         rw_exit(&ip->i_contents);
1106                 }
1107 
1108                 if (error) {
1109                         /*
1110                          * If we failed on a write, we may have already
1111                          * allocated file blocks as well as pages.  It's
1112                          * hard to undo the block allocation, but we must
1113                          * be sure to invalidate any pages that may have
1114                          * been allocated.
1115                          *
1116                          * If the page was created without initialization
1117                          * then we must check if it should be possible
1118                          * to destroy the new page and to keep the old data
1119                          * on the disk.
1120                          *
1121                          * It is possible to destroy the page without
1122                          * having to write back its contents only when
1123                          * - the size of the file keeps unchanged
1124                          * - bmap_write() did not allocate new disk blocks
1125                          *   it is possible to create big files using "seek" and
1126                          *   write to the end of the file. A "write" to a
1127                          *   position before the end of the file would not
1128                          *   change the size of the file but it would allocate
1129                          *   new disk blocks.
1130                          * - uiomove intended to overwrite the whole page.
1131                          * - a new page was created (newpage == 1).
1132                          */
1133 
1134                         if (i_size_changed == 0 && new_iblocks == 0 &&
1135                             newpage) {
1136 
1137                                 /* unwind what uiomove eventually last did */
1138                                 uio->uio_resid = premove_resid;
1139 
1140                                 /*
1141                                  * destroy the page, do not write ambiguous
1142                                  * data to the disk.
1143                                  */
1144                                 flags = SM_DESTROY;
1145                         } else {
1146                                 /*
1147                                  * write the page back to the disk, if dirty,
1148                                  * and remove the page from the cache.
1149                                  */
1150                                 flags = SM_INVAL;
1151                         }
1152 
1153                         if (vpm_enable) {
1154                                 /*
1155                                  *  Flush pages.
1156                                  */
1157                                 (void) vpm_sync_pages(vp, off, n, flags);
1158                         } else {
1159                                 (void) segmap_release(segkmap, base, flags);
1160                         }
1161                 } else {
1162                         flags = 0;
1163                         /*
1164                          * Force write back for synchronous write cases.
1165                          */
1166                         if ((ioflag & (FSYNC|FDSYNC)) || type == IFDIR) {
1167                                 /*
1168                                  * If the sticky bit is set but the
1169                                  * execute bit is not set, we do a
1170                                  * synchronous write back and free
1171                                  * the page when done.  We set up swap
1172                                  * files to be handled this way to
1173                                  * prevent servers from keeping around
1174                                  * the client's swap pages too long.
1175                                  * XXX - there ought to be a better way.
1176                                  */
1177                                 if (IS_SWAPVP(vp)) {
1178                                         flags = SM_WRITE | SM_FREE |
1179                                             SM_DONTNEED;
1180                                         iupdat_flag = 0;
1181                                 } else {
1182                                         flags = SM_WRITE;
1183                                 }
1184                         } else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) {
1185                                 /*
1186                                  * Have written a whole block.
1187                                  * Start an asynchronous write and
1188                                  * mark the buffer to indicate that
1189                                  * it won't be needed again soon.
1190                                  */
1191                                 flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
1192                         }
1193                         if (vpm_enable) {
1194                                 /*
1195                                  * Flush pages.
1196                                  */
1197                                 error = vpm_sync_pages(vp, off, n, flags);
1198                         } else {
1199                                 error = segmap_release(segkmap, base, flags);
1200                         }
1201                         /*
1202                          * If the operation failed and is synchronous,
1203                          * then we need to unwind what uiomove() last
1204                          * did so we can potentially return an error to
1205                          * the caller.  If this write operation was
1206                          * done in two pieces and the first succeeded,
1207                          * then we won't return an error for the second
1208                          * piece that failed.  However, we only want to
1209                          * return a resid value that reflects what was
1210                          * really done.
1211                          *
1212                          * Failures for non-synchronous operations can
1213                          * be ignored since the page subsystem will
1214                          * retry the operation until it succeeds or the
1215                          * file system is unmounted.
1216                          */
1217                         if (error) {
1218                                 if ((ioflag & (FSYNC | FDSYNC)) ||
1219                                     type == IFDIR) {
1220                                         uio->uio_resid = premove_resid;
1221                                 } else {
1222                                         error = 0;
1223                                 }
1224                         }
1225                 }
1226 
1227                 /*
1228                  * Re-acquire contents lock.
1229                  * If it was dropped, reacquire reader vfs_dqrwlock as well.
1230                  */
1231                 if (do_dqrwlock)
1232                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
1233                 rw_enter(&ip->i_contents, RW_WRITER);
1234 
1235                 /*
1236                  * If the uiomove() failed or if a synchronous
1237                  * page push failed, fix up i_size.
1238                  */
1239                 if (error) {
1240                         if (i_size_changed) {
1241                                 /*
1242                                  * The uiomove failed, and we
1243                                  * allocated blocks,so get rid
1244                                  * of them.
1245                                  */
1246                                 (void) ufs_itrunc(ip, old_i_size, 0, cr);
1247                         }
1248                 } else {
1249                         /*
1250                          * XXX - Can this be out of the loop?
1251                          */
1252                         ip->i_flag |= IUPD | ICHG;
1253                         /*
1254                          * Only do one increase of i_seq for multiple
1255                          * pieces.  Because we drop locks, record
1256                          * the fact that we changed the timestamp and
1257                          * are deferring the increase in case another thread
1258                          * pushes our timestamp update.
1259                          */
1260                         i_seq_needed = 1;
1261                         ip->i_flag |= ISEQ;
1262                         if (i_size_changed)
1263                                 ip->i_flag |= IATTCHG;
1264                         if ((ip->i_mode & (IEXEC | (IEXEC >> 3) |
1265                             (IEXEC >> 6))) != 0 &&
1266                             (ip->i_mode & (ISUID | ISGID)) != 0 &&
1267                             secpolicy_vnode_setid_retain(cr,
1268                             (ip->i_mode & ISUID) != 0 && ip->i_uid == 0) != 0) {
1269                                 /*
1270                                  * Clear Set-UID & Set-GID bits on
1271                                  * successful write if not privileged
1272                                  * and at least one of the execute bits
1273                                  * is set.  If we always clear Set-GID,
1274                                  * mandatory file and record locking is
1275                                  * unuseable.
1276                                  */
1277                                 ip->i_mode &= ~(ISUID | ISGID);
1278                         }
1279                 }
1280                 /*
1281                  * In the case the FDSYNC flag is set and this is a
1282                  * "rewrite" we won't log a delta.
1283                  * The FSYNC flag overrides all cases.
1284                  */
1285                 if (!ufs_check_rewrite(ip, uio, ioflag) || !(ioflag & FDSYNC)) {
1286                         TRANS_INODE(ufsvfsp, ip);
1287                 }
1288         } while (error == 0 && uio->uio_resid > 0 && n != 0);
1289 
1290 out:
1291         /*
1292          * Make sure i_seq is increased at least once per write
1293          */
1294         if (i_seq_needed) {
1295                 ip->i_seq++;
1296                 ip->i_flag &= ~ISEQ;     /* no longer deferred */
1297         }
1298 
1299         /*
1300          * Inode is updated according to this table -
1301          *
1302          *   FSYNC        FDSYNC(posix.4)
1303          *   --------------------------
1304          *   always@      IATTCHG|IBDWRITE
1305          *
1306          * @ -  If we are doing synchronous write the only time we should
1307          *      not be sync'ing the ip here is if we have the stickyhack
1308          *      activated, the file is marked with the sticky bit and
1309          *      no exec bit, the file length has not been changed and
1310          *      no new blocks have been allocated during this write.
1311          */
1312 
1313         if ((ip->i_flag & ISYNC) != 0) {
1314                 /*
1315                  * we have eliminated nosync
1316                  */
1317                 if ((ip->i_flag & (IATTCHG|IBDWRITE)) ||
1318                     ((ioflag & FSYNC) && iupdat_flag)) {
1319                         ufs_iupdat(ip, 1);
1320                 }
1321         }
1322 
1323         /*
1324          * If we've already done a partial-write, terminate
1325          * the write but return no error unless the error is ENOSPC
1326          * because the caller can detect this and free resources and
1327          * try again.
1328          */
1329         if ((start_resid != uio->uio_resid) && (error != ENOSPC))
1330                 error = 0;
1331 
1332         ip->i_flag &= ~(INOACC | ISYNC);
1333         ITIMES_NOLOCK(ip);
1334         return (error);
1335 }
1336 
1337 /*
1338  * rdip does the real work of read requests for ufs.
1339  */
1340 int
1341 rdip(struct inode *ip, struct uio *uio, int ioflag, cred_t *cr)
1342 {
1343         u_offset_t off;
1344         caddr_t base;
1345         struct fs *fs;
1346         struct ufsvfs *ufsvfsp;
1347         struct vnode *vp;
1348         long oresid = uio->uio_resid;
1349         u_offset_t n, on, mapon;
1350         int error = 0;
1351         int doupdate = 1;
1352         uint_t flags;
1353         int dofree, directio_status;
1354         krw_t rwtype;
1355         o_mode_t type;
1356         clock_t now;
1357 
1358         vp = ITOV(ip);
1359 
1360         ASSERT(RW_LOCK_HELD(&ip->i_contents));
1361 
1362         ufsvfsp = ip->i_ufsvfs;
1363 
1364         if (ufsvfsp == NULL)
1365                 return (EIO);
1366 
1367         fs = ufsvfsp->vfs_fs;
1368 
1369         /* check for valid filetype */
1370         type = ip->i_mode & IFMT;
1371         if ((type != IFREG) && (type != IFDIR) && (type != IFATTRDIR) &&
1372             (type != IFLNK) && (type != IFSHAD)) {
1373                 return (EIO);
1374         }
1375 
1376         if (uio->uio_loffset > UFS_MAXOFFSET_T) {
1377                 error = 0;
1378                 goto out;
1379         }
1380         if (uio->uio_loffset < (offset_t)0) {
1381                 return (EINVAL);
1382         }
1383         if (uio->uio_resid == 0) {
1384                 return (0);
1385         }
1386 
1387         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) && (fs->fs_ronly == 0) &&
1388             (!ufsvfsp->vfs_noatime)) {
1389                 mutex_enter(&ip->i_tlock);
1390                 ip->i_flag |= IACC;
1391                 mutex_exit(&ip->i_tlock);
1392         }
1393         /*
1394          * Try to go direct
1395          */
1396         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) {
1397                 error = ufs_directio_read(ip, uio, cr, &directio_status);
1398                 if (directio_status == DIRECTIO_SUCCESS)
1399                         goto out;
1400         }
1401 
1402         rwtype = (rw_write_held(&ip->i_contents)?RW_WRITER:RW_READER);
1403 
1404         do {
1405                 offset_t diff;
1406                 u_offset_t uoff = uio->uio_loffset;
1407                 off = uoff & (offset_t)MAXBMASK;
1408                 mapon = (u_offset_t)(uoff & (offset_t)MAXBOFFSET);
1409                 on = (u_offset_t)blkoff(fs, uoff);
1410                 n = MIN((u_offset_t)fs->fs_bsize - on,
1411                     (u_offset_t)uio->uio_resid);
1412 
1413                 diff = ip->i_size - uoff;
1414 
1415                 if (diff <= (offset_t)0) {
1416                         error = 0;
1417                         goto out;
1418                 }
1419                 if (diff < (offset_t)n)
1420                         n = (int)diff;
1421 
1422                 /*
1423                  * We update smallfile2 and smallfile1 at most every second.
1424                  */
1425                 now = ddi_get_lbolt();
1426                 if (now >= smallfile_update) {
1427                         uint64_t percpufreeb;
1428                         if (smallfile1_d == 0) smallfile1_d = SMALLFILE1_D;
1429                         if (smallfile2_d == 0) smallfile2_d = SMALLFILE2_D;
1430                         percpufreeb = ptob((uint64_t)freemem) / ncpus_online;
1431                         smallfile1 = percpufreeb / smallfile1_d;
1432                         smallfile2 = percpufreeb / smallfile2_d;
1433                         smallfile1 = MAX(smallfile1, smallfile);
1434                         smallfile1 = MAX(smallfile1, smallfile64);
1435                         smallfile2 = MAX(smallfile1, smallfile2);
1436                         smallfile_update = now + hz;
1437                 }
1438 
1439                 dofree = freebehind &&
1440                     ip->i_nextr == (off & PAGEMASK) && off > smallfile1;
1441 
1442                 /*
1443                  * At this point we can enter ufs_getpage() in one of two
1444                  * ways:
1445                  * 1) segmap_getmapflt() calls ufs_getpage() when the
1446                  *    forcefault parameter is true (value of 1 is passed)
1447                  * 2) uiomove() causes a page fault.
1448                  *
1449                  * We cannot hold onto an i_contents reader lock without
1450                  * risking deadlock in ufs_getpage() so drop a reader lock.
1451                  * The ufs_getpage() dolock logic already allows for a
1452                  * thread holding i_contents as writer to work properly
1453                  * so we keep a writer lock.
1454                  */
1455                 if (rwtype == RW_READER)
1456                         rw_exit(&ip->i_contents);
1457 
1458                 if (vpm_enable) {
1459                         /*
1460                          * Copy data.
1461                          */
1462                         error = vpm_data_copy(vp, (off + mapon), (uint_t)n,
1463                             uio, 1, NULL, 0, S_READ);
1464                 } else {
1465                         base = segmap_getmapflt(segkmap, vp, (off + mapon),
1466                             (uint_t)n, 1, S_READ);
1467                         error = uiomove(base + mapon, (long)n, UIO_READ, uio);
1468                 }
1469 
1470                 flags = 0;
1471                 if (!error) {
1472                         /*
1473                          * If  reading sequential  we won't need  this
1474                          * buffer again  soon.  For  offsets in  range
1475                          * [smallfile1,  smallfile2] release the pages
1476                          * at   the  tail  of the   cache list, larger
1477                          * offsets are released at the head.
1478                          */
1479                         if (dofree) {
1480                                 flags = SM_FREE | SM_ASYNC;
1481                                 if ((cache_read_ahead == 0) &&
1482                                     (off > smallfile2))
1483                                         flags |=  SM_DONTNEED;
1484                         }
1485                         /*
1486                          * In POSIX SYNC (FSYNC and FDSYNC) read mode,
1487                          * we want to make sure that the page which has
1488                          * been read, is written on disk if it is dirty.
1489                          * And corresponding indirect blocks should also
1490                          * be flushed out.
1491                          */
1492                         if ((ioflag & FRSYNC) && (ioflag & (FSYNC|FDSYNC))) {
1493                                 flags &= ~SM_ASYNC;
1494                                 flags |= SM_WRITE;
1495                         }
1496                         if (vpm_enable) {
1497                                 error = vpm_sync_pages(vp, off, n, flags);
1498                         } else {
1499                                 error = segmap_release(segkmap, base, flags);
1500                         }
1501                 } else {
1502                         if (vpm_enable) {
1503                                 (void) vpm_sync_pages(vp, off, n, flags);
1504                         } else {
1505                                 (void) segmap_release(segkmap, base, flags);
1506                         }
1507                 }
1508 
1509                 if (rwtype == RW_READER)
1510                         rw_enter(&ip->i_contents, rwtype);
1511         } while (error == 0 && uio->uio_resid > 0 && n != 0);
1512 out:
1513         /*
1514          * Inode is updated according to this table if FRSYNC is set.
1515          *
1516          *   FSYNC        FDSYNC(posix.4)
1517          *   --------------------------
1518          *   always       IATTCHG|IBDWRITE
1519          */
1520         /*
1521          * The inode is not updated if we're logging and the inode is a
1522          * directory with FRSYNC, FSYNC and FDSYNC flags set.
1523          */
1524         if (ioflag & FRSYNC) {
1525                 if (TRANS_ISTRANS(ufsvfsp) && ((ip->i_mode & IFMT) == IFDIR)) {
1526                         doupdate = 0;
1527                 }
1528                 if (doupdate) {
1529                         if ((ioflag & FSYNC) ||
1530                             ((ioflag & FDSYNC) &&
1531                             (ip->i_flag & (IATTCHG|IBDWRITE)))) {
1532                                 ufs_iupdat(ip, 1);
1533                         }
1534                 }
1535         }
1536         /*
1537          * If we've already done a partial read, terminate
1538          * the read but return no error.
1539          */
1540         if (oresid != uio->uio_resid)
1541                 error = 0;
1542         ITIMES(ip);
1543 
1544         return (error);
1545 }
1546 
1547 /* ARGSUSED */
1548 static int
1549 ufs_ioctl(
1550         struct vnode    *vp,
1551         int             cmd,
1552         intptr_t        arg,
1553         int             flag,
1554         struct cred     *cr,
1555         int             *rvalp,
1556         caller_context_t *ct)
1557 {
1558         struct lockfs   lockfs, lockfs_out;
1559         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
1560         char            *comment, *original_comment;
1561         struct fs       *fs;
1562         struct ulockfs  *ulp;
1563         offset_t        off;
1564         extern int      maxphys;
1565         int             error;
1566         int             issync;
1567         int             trans_size;
1568 
1569 
1570         /*
1571          * forcibly unmounted
1572          */
1573         if (ufsvfsp == NULL || vp->v_vfsp == NULL ||
1574             vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1575                 return (EIO);
1576         fs = ufsvfsp->vfs_fs;
1577 
1578         if (cmd == Q_QUOTACTL) {
1579                 error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_QUOTA_MASK);
1580                 if (error)
1581                         return (error);
1582 
1583                 if (ulp) {
1584                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_QUOTA,
1585                             TOP_SETQUOTA_SIZE(fs));
1586                 }
1587 
1588                 error = quotactl(vp, arg, flag, cr);
1589 
1590                 if (ulp) {
1591                         TRANS_END_ASYNC(ufsvfsp, TOP_QUOTA,
1592                             TOP_SETQUOTA_SIZE(fs));
1593                         ufs_lockfs_end(ulp);
1594                 }
1595                 return (error);
1596         }
1597 
1598         switch (cmd) {
1599                 case _FIOLFS:
1600                         /*
1601                          * file system locking
1602                          */
1603                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1604                                 return (EPERM);
1605 
1606                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1607                                 if (copyin((caddr_t)arg, &lockfs,
1608                                     sizeof (struct lockfs)))
1609                                         return (EFAULT);
1610                         }
1611 #ifdef _SYSCALL32_IMPL
1612                         else {
1613                                 struct lockfs32 lockfs32;
1614                                 /* Translate ILP32 lockfs to LP64 lockfs */
1615                                 if (copyin((caddr_t)arg, &lockfs32,
1616                                     sizeof (struct lockfs32)))
1617                                         return (EFAULT);
1618                                 lockfs.lf_lock = (ulong_t)lockfs32.lf_lock;
1619                                 lockfs.lf_flags = (ulong_t)lockfs32.lf_flags;
1620                                 lockfs.lf_key = (ulong_t)lockfs32.lf_key;
1621                                 lockfs.lf_comlen = (ulong_t)lockfs32.lf_comlen;
1622                                 lockfs.lf_comment =
1623                                     (caddr_t)(uintptr_t)lockfs32.lf_comment;
1624                         }
1625 #endif /* _SYSCALL32_IMPL */
1626 
1627                         if (lockfs.lf_comlen) {
1628                                 if (lockfs.lf_comlen > LOCKFS_MAXCOMMENTLEN)
1629                                         return (ENAMETOOLONG);
1630                                 comment =
1631                                     kmem_alloc(lockfs.lf_comlen, KM_SLEEP);
1632                                 if (copyin(lockfs.lf_comment, comment,
1633                                     lockfs.lf_comlen)) {
1634                                         kmem_free(comment, lockfs.lf_comlen);
1635                                         return (EFAULT);
1636                                 }
1637                                 original_comment = lockfs.lf_comment;
1638                                 lockfs.lf_comment = comment;
1639                         }
1640                         if ((error = ufs_fiolfs(vp, &lockfs, 0)) == 0) {
1641                                 lockfs.lf_comment = original_comment;
1642 
1643                                 if ((flag & DATAMODEL_MASK) ==
1644                                     DATAMODEL_NATIVE) {
1645                                         (void) copyout(&lockfs, (caddr_t)arg,
1646                                             sizeof (struct lockfs));
1647                                 }
1648 #ifdef _SYSCALL32_IMPL
1649                                 else {
1650                                         struct lockfs32 lockfs32;
1651                                         /* Translate LP64 to ILP32 lockfs */
1652                                         lockfs32.lf_lock =
1653                                             (uint32_t)lockfs.lf_lock;
1654                                         lockfs32.lf_flags =
1655                                             (uint32_t)lockfs.lf_flags;
1656                                         lockfs32.lf_key =
1657                                             (uint32_t)lockfs.lf_key;
1658                                         lockfs32.lf_comlen =
1659                                             (uint32_t)lockfs.lf_comlen;
1660                                         lockfs32.lf_comment =
1661                                             (uint32_t)(uintptr_t)
1662                                             lockfs.lf_comment;
1663                                         (void) copyout(&lockfs32, (caddr_t)arg,
1664                                             sizeof (struct lockfs32));
1665                                 }
1666 #endif /* _SYSCALL32_IMPL */
1667 
1668                         } else {
1669                                 if (lockfs.lf_comlen)
1670                                         kmem_free(comment, lockfs.lf_comlen);
1671                         }
1672                         return (error);
1673 
1674                 case _FIOLFSS:
1675                         /*
1676                          * get file system locking status
1677                          */
1678 
1679                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1680                                 if (copyin((caddr_t)arg, &lockfs,
1681                                     sizeof (struct lockfs)))
1682                                         return (EFAULT);
1683                         }
1684 #ifdef _SYSCALL32_IMPL
1685                         else {
1686                                 struct lockfs32 lockfs32;
1687                                 /* Translate ILP32 lockfs to LP64 lockfs */
1688                                 if (copyin((caddr_t)arg, &lockfs32,
1689                                     sizeof (struct lockfs32)))
1690                                         return (EFAULT);
1691                                 lockfs.lf_lock = (ulong_t)lockfs32.lf_lock;
1692                                 lockfs.lf_flags = (ulong_t)lockfs32.lf_flags;
1693                                 lockfs.lf_key = (ulong_t)lockfs32.lf_key;
1694                                 lockfs.lf_comlen = (ulong_t)lockfs32.lf_comlen;
1695                                 lockfs.lf_comment =
1696                                     (caddr_t)(uintptr_t)lockfs32.lf_comment;
1697                         }
1698 #endif /* _SYSCALL32_IMPL */
1699 
1700                         if (error =  ufs_fiolfss(vp, &lockfs_out))
1701                                 return (error);
1702                         lockfs.lf_lock = lockfs_out.lf_lock;
1703                         lockfs.lf_key = lockfs_out.lf_key;
1704                         lockfs.lf_flags = lockfs_out.lf_flags;
1705                         lockfs.lf_comlen = MIN(lockfs.lf_comlen,
1706                             lockfs_out.lf_comlen);
1707 
1708                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1709                                 if (copyout(&lockfs, (caddr_t)arg,
1710                                     sizeof (struct lockfs)))
1711                                         return (EFAULT);
1712                         }
1713 #ifdef _SYSCALL32_IMPL
1714                         else {
1715                                 /* Translate LP64 to ILP32 lockfs */
1716                                 struct lockfs32 lockfs32;
1717                                 lockfs32.lf_lock = (uint32_t)lockfs.lf_lock;
1718                                 lockfs32.lf_flags = (uint32_t)lockfs.lf_flags;
1719                                 lockfs32.lf_key = (uint32_t)lockfs.lf_key;
1720                                 lockfs32.lf_comlen = (uint32_t)lockfs.lf_comlen;
1721                                 lockfs32.lf_comment =
1722                                     (uint32_t)(uintptr_t)lockfs.lf_comment;
1723                                 if (copyout(&lockfs32, (caddr_t)arg,
1724                                     sizeof (struct lockfs32)))
1725                                         return (EFAULT);
1726                         }
1727 #endif /* _SYSCALL32_IMPL */
1728 
1729                         if (lockfs.lf_comlen &&
1730                             lockfs.lf_comment && lockfs_out.lf_comment)
1731                                 if (copyout(lockfs_out.lf_comment,
1732                                     lockfs.lf_comment, lockfs.lf_comlen))
1733                                         return (EFAULT);
1734                         return (0);
1735 
1736                 case _FIOSATIME:
1737                         /*
1738                          * set access time
1739                          */
1740 
1741                         /*
1742                          * if mounted w/o atime, return quietly.
1743                          * I briefly thought about returning ENOSYS, but
1744                          * figured that most apps would consider this fatal
1745                          * but the idea is to make this as seamless as poss.
1746                          */
1747                         if (ufsvfsp->vfs_noatime)
1748                                 return (0);
1749 
1750                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1751                             ULOCKFS_SETATTR_MASK);
1752                         if (error)
1753                                 return (error);
1754 
1755                         if (ulp) {
1756                                 trans_size = (int)TOP_SETATTR_SIZE(VTOI(vp));
1757                                 TRANS_BEGIN_CSYNC(ufsvfsp, issync,
1758                                     TOP_SETATTR, trans_size);
1759                         }
1760 
1761                         error = ufs_fiosatime(vp, (struct timeval *)arg,
1762                             flag, cr);
1763 
1764                         if (ulp) {
1765                                 TRANS_END_CSYNC(ufsvfsp, error, issync,
1766                                     TOP_SETATTR, trans_size);
1767                                 ufs_lockfs_end(ulp);
1768                         }
1769                         return (error);
1770 
1771                 case _FIOSDIO:
1772                         /*
1773                          * set delayed-io
1774                          */
1775                         return (ufs_fiosdio(vp, (uint_t *)arg, flag, cr));
1776 
1777                 case _FIOGDIO:
1778                         /*
1779                          * get delayed-io
1780                          */
1781                         return (ufs_fiogdio(vp, (uint_t *)arg, flag, cr));
1782 
1783                 case _FIOIO:
1784                         /*
1785                          * inode open
1786                          */
1787                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1788                             ULOCKFS_VGET_MASK);
1789                         if (error)
1790                                 return (error);
1791 
1792                         error = ufs_fioio(vp, (struct fioio *)arg, flag, cr);
1793 
1794                         if (ulp) {
1795                                 ufs_lockfs_end(ulp);
1796                         }
1797                         return (error);
1798 
1799                 case _FIOFFS:
1800                         /*
1801                          * file system flush (push w/invalidate)
1802                          */
1803                         if ((caddr_t)arg != NULL)
1804                                 return (EINVAL);
1805                         return (ufs_fioffs(vp, NULL, cr));
1806 
1807                 case _FIOISBUSY:
1808                         /*
1809                          * Contract-private interface for Legato
1810                          * Purge this vnode from the DNLC and decide
1811                          * if this vnode is busy (*arg == 1) or not
1812                          * (*arg == 0)
1813                          */
1814                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1815                                 return (EPERM);
1816                         error = ufs_fioisbusy(vp, (int *)arg, cr);
1817                         return (error);
1818 
1819                 case _FIODIRECTIO:
1820                         return (ufs_fiodirectio(vp, (int)arg, cr));
1821 
1822                 case _FIOTUNE:
1823                         /*
1824                          * Tune the file system (aka setting fs attributes)
1825                          */
1826                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1827                             ULOCKFS_SETATTR_MASK);
1828                         if (error)
1829                                 return (error);
1830 
1831                         error = ufs_fiotune(vp, (struct fiotune *)arg, cr);
1832 
1833                         if (ulp)
1834                                 ufs_lockfs_end(ulp);
1835                         return (error);
1836 
1837                 case _FIOLOGENABLE:
1838                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1839                                 return (EPERM);
1840                         return (ufs_fiologenable(vp, (void *)arg, cr, flag));
1841 
1842                 case _FIOLOGDISABLE:
1843                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1844                                 return (EPERM);
1845                         return (ufs_fiologdisable(vp, (void *)arg, cr, flag));
1846 
1847                 case _FIOISLOG:
1848                         return (ufs_fioislog(vp, (void *)arg, cr, flag));
1849 
1850                 case _FIOSNAPSHOTCREATE_MULTI:
1851                 {
1852                         struct fiosnapcreate_multi      fc, *fcp;
1853                         size_t  fcm_size;
1854 
1855                         if (copyin((void *)arg, &fc, sizeof (fc)))
1856                                 return (EFAULT);
1857                         if (fc.backfilecount > MAX_BACKFILE_COUNT)
1858                                 return (EINVAL);
1859                         fcm_size = sizeof (struct fiosnapcreate_multi) +
1860                             (fc.backfilecount - 1) * sizeof (int);
1861                         fcp = (struct fiosnapcreate_multi *)
1862                             kmem_alloc(fcm_size, KM_SLEEP);
1863                         if (copyin((void *)arg, fcp, fcm_size)) {
1864                                 kmem_free(fcp, fcm_size);
1865                                 return (EFAULT);
1866                         }
1867                         error = ufs_snap_create(vp, fcp, cr);
1868                         /*
1869                          * Do copyout even if there is an error because
1870                          * the details of error is stored in fcp.
1871                          */
1872                         if (copyout(fcp, (void *)arg, fcm_size))
1873                                 error = EFAULT;
1874                         kmem_free(fcp, fcm_size);
1875                         return (error);
1876                 }
1877 
1878                 case _FIOSNAPSHOTDELETE:
1879                 {
1880                         struct fiosnapdelete    fc;
1881 
1882                         if (copyin((void *)arg, &fc, sizeof (fc)))
1883                                 return (EFAULT);
1884                         error = ufs_snap_delete(vp, &fc, cr);
1885                         if (!error && copyout(&fc, (void *)arg, sizeof (fc)))
1886                                 error = EFAULT;
1887                         return (error);
1888                 }
1889 
1890                 case _FIOGETSUPERBLOCK:
1891                         if (copyout(fs, (void *)arg, SBSIZE))
1892                                 return (EFAULT);
1893                         return (0);
1894 
1895                 case _FIOGETMAXPHYS:
1896                         if (copyout(&maxphys, (void *)arg, sizeof (maxphys)))
1897                                 return (EFAULT);
1898                         return (0);
1899 
1900                 /*
1901                  * The following 3 ioctls are for TSufs support
1902                  * although could potentially be used elsewhere
1903                  */
1904                 case _FIO_SET_LUFS_DEBUG:
1905                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1906                                 return (EPERM);
1907                         lufs_debug = (uint32_t)arg;
1908                         return (0);
1909 
1910                 case _FIO_SET_LUFS_ERROR:
1911                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1912                                 return (EPERM);
1913                         TRANS_SETERROR(ufsvfsp);
1914                         return (0);
1915 
1916                 case _FIO_GET_TOP_STATS:
1917                 {
1918                         fio_lufs_stats_t *ls;
1919                         ml_unit_t *ul = ufsvfsp->vfs_log;
1920 
1921                         ls = kmem_zalloc(sizeof (*ls), KM_SLEEP);
1922                         ls->ls_debug = ul->un_debug; /* return debug value */
1923                         /* Copy stucture if statistics are being kept */
1924                         if (ul->un_logmap->mtm_tops) {
1925                                 ls->ls_topstats = *(ul->un_logmap->mtm_tops);
1926                         }
1927                         error = 0;
1928                         if (copyout(ls, (void *)arg, sizeof (*ls)))
1929                                 error = EFAULT;
1930                         kmem_free(ls, sizeof (*ls));
1931                         return (error);
1932                 }
1933 
1934                 case _FIO_SEEK_DATA:
1935                 case _FIO_SEEK_HOLE:
1936                         if (ddi_copyin((void *)arg, &off, sizeof (off), flag))
1937                                 return (EFAULT);
1938                         /* offset paramater is in/out */
1939                         error = ufs_fio_holey(vp, cmd, &off);
1940                         if (error)
1941                                 return (error);
1942                         if (ddi_copyout(&off, (void *)arg, sizeof (off), flag))
1943                                 return (EFAULT);
1944                         return (0);
1945 
1946                 case _FIO_COMPRESSED:
1947                 {
1948                         /*
1949                          * This is a project private ufs ioctl() to mark
1950                          * the inode as that belonging to a compressed
1951                          * file. This is used to mark individual
1952                          * compressed files in a miniroot archive.
1953                          * The files compressed in this manner are
1954                          * automatically decompressed by the dcfs filesystem
1955                          * (via an interception in ufs_lookup - see decompvp())
1956                          * which is layered on top of ufs on a system running
1957                          * from the archive. See uts/common/fs/dcfs for details.
1958                          * This ioctl only marks the file as compressed - the
1959                          * actual compression is done by fiocompress (a
1960                          * userland utility) which invokes this ioctl().
1961                          */
1962                         struct inode *ip = VTOI(vp);
1963 
1964                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1965                             ULOCKFS_SETATTR_MASK);
1966                         if (error)
1967                                 return (error);
1968 
1969                         if (ulp) {
1970                                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_IUPDAT,
1971                                     TOP_IUPDAT_SIZE(ip));
1972                         }
1973 
1974                         error = ufs_mark_compressed(vp);
1975 
1976                         if (ulp) {
1977                                 TRANS_END_ASYNC(ufsvfsp, TOP_IUPDAT,
1978                                     TOP_IUPDAT_SIZE(ip));
1979                                 ufs_lockfs_end(ulp);
1980                         }
1981 
1982                         return (error);
1983 
1984                 }
1985 
1986                 default:
1987                         return (ENOTTY);
1988         }
1989 }
1990 
1991 
1992 /* ARGSUSED */
1993 static int
1994 ufs_getattr(struct vnode *vp, struct vattr *vap, int flags,
1995         struct cred *cr, caller_context_t *ct)
1996 {
1997         struct inode *ip = VTOI(vp);
1998         struct ufsvfs *ufsvfsp;
1999         int err;
2000 
2001         if (vap->va_mask == AT_SIZE) {
2002                 /*
2003                  * for performance, if only the size is requested don't bother
2004                  * with anything else.
2005                  */
2006                 UFS_GET_ISIZE(&vap->va_size, ip);
2007                 return (0);
2008         }
2009 
2010         /*
2011          * inlined lockfs checks
2012          */
2013         ufsvfsp = ip->i_ufsvfs;
2014         if ((ufsvfsp == NULL) || ULOCKFS_IS_HLOCK(&ufsvfsp->vfs_ulockfs)) {
2015                 err = EIO;
2016                 goto out;
2017         }
2018 
2019         rw_enter(&ip->i_contents, RW_READER);
2020         /*
2021          * Return all the attributes.  This should be refined so
2022          * that it only returns what's asked for.
2023          */
2024 
2025         /*
2026          * Copy from inode table.
2027          */
2028         vap->va_type = vp->v_type;
2029         vap->va_mode = ip->i_mode & MODEMASK;
2030         /*
2031          * If there is an ACL and there is a mask entry, then do the
2032          * extra work that completes the equivalent of an acltomode(3)
2033          * call.  According to POSIX P1003.1e, the acl mask should be
2034          * returned in the group permissions field.
2035          *
2036          * - start with the original permission and mode bits (from above)
2037          * - clear the group owner bits
2038          * - add in the mask bits.
2039          */
2040         if (ip->i_ufs_acl && ip->i_ufs_acl->aclass.acl_ismask) {
2041                 vap->va_mode &= ~((VREAD | VWRITE | VEXEC) >> 3);
2042                 vap->va_mode |=
2043                     (ip->i_ufs_acl->aclass.acl_maskbits & PERMMASK) << 3;
2044         }
2045         vap->va_uid = ip->i_uid;
2046         vap->va_gid = ip->i_gid;
2047         vap->va_fsid = ip->i_dev;
2048         vap->va_nodeid = (ino64_t)ip->i_number;
2049         vap->va_nlink = ip->i_nlink;
2050         vap->va_size = ip->i_size;
2051         if (vp->v_type == VCHR || vp->v_type == VBLK)
2052                 vap->va_rdev = ip->i_rdev;
2053         else
2054                 vap->va_rdev = 0;    /* not a b/c spec. */
2055         mutex_enter(&ip->i_tlock);
2056         ITIMES_NOLOCK(ip);      /* mark correct time in inode */
2057         vap->va_seq = ip->i_seq;
2058         vap->va_atime.tv_sec = (time_t)ip->i_atime.tv_sec;
2059         vap->va_atime.tv_nsec = ip->i_atime.tv_usec*1000;
2060         vap->va_mtime.tv_sec = (time_t)ip->i_mtime.tv_sec;
2061         vap->va_mtime.tv_nsec = ip->i_mtime.tv_usec*1000;
2062         vap->va_ctime.tv_sec = (time_t)ip->i_ctime.tv_sec;
2063         vap->va_ctime.tv_nsec = ip->i_ctime.tv_usec*1000;
2064         mutex_exit(&ip->i_tlock);
2065 
2066         switch (ip->i_mode & IFMT) {
2067 
2068         case IFBLK:
2069                 vap->va_blksize = MAXBSIZE;          /* was BLKDEV_IOSIZE */
2070                 break;
2071 
2072         case IFCHR:
2073                 vap->va_blksize = MAXBSIZE;
2074                 break;
2075 
2076         default:
2077                 vap->va_blksize = ip->i_fs->fs_bsize;
2078                 break;
2079         }
2080         vap->va_nblocks = (fsblkcnt64_t)ip->i_blocks;
2081         rw_exit(&ip->i_contents);
2082         err = 0;
2083 
2084 out:
2085         return (err);
2086 }
2087 
2088 /*
2089  * Special wrapper to provide a callback for secpolicy_vnode_setattr().
2090  * The i_contents lock is already held by the caller and we need to
2091  * declare the inode as 'void *' argument.
2092  */
2093 static int
2094 ufs_priv_access(void *vip, int mode, struct cred *cr)
2095 {
2096         struct inode *ip = vip;
2097 
2098         return (ufs_iaccess(ip, mode, cr, 0));
2099 }
2100 
2101 /*ARGSUSED4*/
2102 static int
2103 ufs_setattr(
2104         struct vnode *vp,
2105         struct vattr *vap,
2106         int flags,
2107         struct cred *cr,
2108         caller_context_t *ct)
2109 {
2110         struct inode *ip = VTOI(vp);
2111         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
2112         struct fs *fs;
2113         struct ulockfs *ulp;
2114         char *errmsg1;
2115         char *errmsg2;
2116         long blocks;
2117         long int mask = vap->va_mask;
2118         size_t len1, len2;
2119         int issync;
2120         int trans_size;
2121         int dotrans;
2122         int dorwlock;
2123         int error;
2124         int owner_change;
2125         int dodqlock;
2126         timestruc_t now;
2127         vattr_t oldva;
2128         int retry = 1;
2129         int indeadlock;
2130 
2131         /*
2132          * Cannot set these attributes.
2133          */
2134         if ((mask & AT_NOSET) || (mask & AT_XVATTR))
2135                 return (EINVAL);
2136 
2137         /*
2138          * check for forced unmount
2139          */
2140         if (ufsvfsp == NULL)
2141                 return (EIO);
2142 
2143         fs = ufsvfsp->vfs_fs;
2144         if (fs->fs_ronly != 0)
2145                 return (EROFS);
2146 
2147 again:
2148         errmsg1 = NULL;
2149         errmsg2 = NULL;
2150         dotrans = 0;
2151         dorwlock = 0;
2152         dodqlock = 0;
2153 
2154         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SETATTR_MASK);
2155         if (error)
2156                 goto out;
2157 
2158         /*
2159          * Acquire i_rwlock before TRANS_BEGIN_CSYNC() if this is a file.
2160          * This follows the protocol for read()/write().
2161          */
2162         if (vp->v_type != VDIR) {
2163                 /*
2164                  * ufs_tryirwlock uses rw_tryenter and checks for SLOCK to
2165                  * avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2166                  * possible, retries the operation.
2167                  */
2168                 ufs_tryirwlock(&ip->i_rwlock, RW_WRITER, retry_file);
2169                 if (indeadlock) {
2170                         if (ulp)
2171                                 ufs_lockfs_end(ulp);
2172                         goto again;
2173                 }
2174                 dorwlock = 1;
2175         }
2176 
2177         /*
2178          * Truncate file.  Must have write permission and not be a directory.
2179          */
2180         if (mask & AT_SIZE) {
2181                 rw_enter(&ip->i_contents, RW_WRITER);
2182                 if (vp->v_type == VDIR) {
2183                         error = EISDIR;
2184                         goto update_inode;
2185                 }
2186                 if (error = ufs_iaccess(ip, IWRITE, cr, 0))
2187                         goto update_inode;
2188 
2189                 rw_exit(&ip->i_contents);
2190                 error = TRANS_ITRUNC(ip, vap->va_size, 0, cr);
2191                 if (error) {
2192                         rw_enter(&ip->i_contents, RW_WRITER);
2193                         goto update_inode;
2194                 }
2195 
2196                 if (error == 0 && vap->va_size)
2197                         vnevent_truncate(vp, ct);
2198         }
2199 
2200         if (ulp) {
2201                 trans_size = (int)TOP_SETATTR_SIZE(ip);
2202                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_SETATTR, trans_size);
2203                 ++dotrans;
2204         }
2205 
2206         /*
2207          * Acquire i_rwlock after TRANS_BEGIN_CSYNC() if this is a directory.
2208          * This follows the protocol established by
2209          * ufs_link/create/remove/rename/mkdir/rmdir/symlink.
2210          */
2211         if (vp->v_type == VDIR) {
2212                 ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_SETATTR,
2213                     retry_dir);
2214                 if (indeadlock)
2215                         goto again;
2216                 dorwlock = 1;
2217         }
2218 
2219         /*
2220          * Grab quota lock if we are changing the file's owner.
2221          */
2222         if (mask & AT_UID) {
2223                 rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
2224                 dodqlock = 1;
2225         }
2226         rw_enter(&ip->i_contents, RW_WRITER);
2227 
2228         oldva.va_mode = ip->i_mode;
2229         oldva.va_uid = ip->i_uid;
2230         oldva.va_gid = ip->i_gid;
2231 
2232         vap->va_mask &= ~AT_SIZE;
2233 
2234         error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
2235             ufs_priv_access, ip);
2236         if (error)
2237                 goto update_inode;
2238 
2239         mask = vap->va_mask;
2240 
2241         /*
2242          * Change file access modes.
2243          */
2244         if (mask & AT_MODE) {
2245                 ip->i_mode = (ip->i_mode & IFMT) | (vap->va_mode & ~IFMT);
2246                 TRANS_INODE(ufsvfsp, ip);
2247                 ip->i_flag |= ICHG;
2248                 if (stickyhack) {
2249                         mutex_enter(&vp->v_lock);
2250                         if ((ip->i_mode & (ISVTX | IEXEC | IFDIR)) == ISVTX)
2251                                 vp->v_flag |= VSWAPLIKE;
2252                         else
2253                                 vp->v_flag &= ~VSWAPLIKE;
2254                         mutex_exit(&vp->v_lock);
2255                 }
2256         }
2257         if (mask & (AT_UID|AT_GID)) {
2258                 if (mask & AT_UID) {
2259                         /*
2260                          * Don't change ownership of the quota inode.
2261                          */
2262                         if (ufsvfsp->vfs_qinod == ip) {
2263                                 ASSERT(ufsvfsp->vfs_qflags & MQ_ENABLED);
2264                                 error = EINVAL;
2265                                 goto update_inode;
2266                         }
2267 
2268                         /*
2269                          * No real ownership change.
2270                          */
2271                         if (ip->i_uid == vap->va_uid) {
2272                                 blocks = 0;
2273                                 owner_change = 0;
2274                         }
2275                         /*
2276                          * Remove the blocks and the file, from the old user's
2277                          * quota.
2278                          */
2279                         else {
2280                                 blocks = ip->i_blocks;
2281                                 owner_change = 1;
2282 
2283                                 (void) chkdq(ip, -blocks, /* force */ 1, cr,
2284                                     (char **)NULL, (size_t *)NULL);
2285                                 (void) chkiq(ufsvfsp, /* change */ -1, ip,
2286                                     (uid_t)ip->i_uid, /* force */ 1, cr,
2287                                     (char **)NULL, (size_t *)NULL);
2288                                 dqrele(ip->i_dquot);
2289                         }
2290 
2291                         ip->i_uid = vap->va_uid;
2292 
2293                         /*
2294                          * There is a real ownership change.
2295                          */
2296                         if (owner_change) {
2297                                 /*
2298                                  * Add the blocks and the file to the new
2299                                  * user's quota.
2300                                  */
2301                                 ip->i_dquot = getinoquota(ip);
2302                                 (void) chkdq(ip, blocks, /* force */ 1, cr,
2303                                     &errmsg1, &len1);
2304                                 (void) chkiq(ufsvfsp, /* change */ 1,
2305                                     (struct inode *)NULL, (uid_t)ip->i_uid,
2306                                     /* force */ 1, cr, &errmsg2, &len2);
2307                         }
2308                 }
2309                 if (mask & AT_GID) {
2310                         ip->i_gid = vap->va_gid;
2311                 }
2312                 TRANS_INODE(ufsvfsp, ip);
2313                 ip->i_flag |= ICHG;
2314         }
2315         /*
2316          * Change file access or modified times.
2317          */
2318         if (mask & (AT_ATIME|AT_MTIME)) {
2319                 /* Check that the time value is within ufs range */
2320                 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2321                     ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2322                         error = EOVERFLOW;
2323                         goto update_inode;
2324                 }
2325 
2326                 /*
2327                  * if the "noaccess" mount option is set and only atime
2328                  * update is requested, do nothing. No error is returned.
2329                  */
2330                 if ((ufsvfsp->vfs_noatime) &&
2331                     ((mask & (AT_ATIME|AT_MTIME)) == AT_ATIME))
2332                         goto skip_atime;
2333 
2334                 if (mask & AT_ATIME) {
2335                         ip->i_atime.tv_sec = vap->va_atime.tv_sec;
2336                         ip->i_atime.tv_usec = vap->va_atime.tv_nsec / 1000;
2337                         ip->i_flag &= ~IACC;
2338                 }
2339                 if (mask & AT_MTIME) {
2340                         ip->i_mtime.tv_sec = vap->va_mtime.tv_sec;
2341                         ip->i_mtime.tv_usec = vap->va_mtime.tv_nsec / 1000;
2342                         gethrestime(&now);
2343                         if (now.tv_sec > TIME32_MAX) {
2344                                 /*
2345                                  * In 2038, ctime sticks forever..
2346                                  */
2347                                 ip->i_ctime.tv_sec = TIME32_MAX;
2348                                 ip->i_ctime.tv_usec = 0;
2349                         } else {
2350                                 ip->i_ctime.tv_sec = now.tv_sec;
2351                                 ip->i_ctime.tv_usec = now.tv_nsec / 1000;
2352                         }
2353                         ip->i_flag &= ~(IUPD|ICHG);
2354                         ip->i_flag |= IMODTIME;
2355                 }
2356                 TRANS_INODE(ufsvfsp, ip);
2357                 ip->i_flag |= IMOD;
2358         }
2359 
2360 skip_atime:
2361         /*
2362          * The presence of a shadow inode may indicate an ACL, but does
2363          * not imply an ACL.  Future FSD types should be handled here too
2364          * and check for the presence of the attribute-specific data
2365          * before referencing it.
2366          */
2367         if (ip->i_shadow) {
2368                 /*
2369                  * XXX if ufs_iupdat is changed to sandbagged write fix
2370                  * ufs_acl_setattr to push ip to keep acls consistent
2371                  *
2372                  * Suppress out of inodes messages if we will retry.
2373                  */
2374                 if (retry)
2375                         ip->i_flag |= IQUIET;
2376                 error = ufs_acl_setattr(ip, vap, cr);
2377                 ip->i_flag &= ~IQUIET;
2378         }
2379 
2380 update_inode:
2381         /*
2382          * Setattr always increases the sequence number
2383          */
2384         ip->i_seq++;
2385 
2386         /*
2387          * if nfsd and not logging; push synchronously
2388          */
2389         if ((curthread->t_flag & T_DONTPEND) && !TRANS_ISTRANS(ufsvfsp)) {
2390                 ufs_iupdat(ip, 1);
2391         } else {
2392                 ITIMES_NOLOCK(ip);
2393         }
2394 
2395         rw_exit(&ip->i_contents);
2396         if (dodqlock) {
2397                 rw_exit(&ufsvfsp->vfs_dqrwlock);
2398         }
2399         if (dorwlock)
2400                 rw_exit(&ip->i_rwlock);
2401 
2402         if (ulp) {
2403                 if (dotrans) {
2404                         int terr = 0;
2405                         TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_SETATTR,
2406                             trans_size);
2407                         if (error == 0)
2408                                 error = terr;
2409                 }
2410                 ufs_lockfs_end(ulp);
2411         }
2412 out:
2413         /*
2414          * If out of inodes or blocks, see if we can free something
2415          * up from the delete queue.
2416          */
2417         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
2418                 ufs_delete_drain_wait(ufsvfsp, 1);
2419                 retry = 0;
2420                 if (errmsg1 != NULL)
2421                         kmem_free(errmsg1, len1);
2422                 if (errmsg2 != NULL)
2423                         kmem_free(errmsg2, len2);
2424                 goto again;
2425         }
2426         if (errmsg1 != NULL) {
2427                 uprintf(errmsg1);
2428                 kmem_free(errmsg1, len1);
2429         }
2430         if (errmsg2 != NULL) {
2431                 uprintf(errmsg2);
2432                 kmem_free(errmsg2, len2);
2433         }
2434         return (error);
2435 }
2436 
2437 /*ARGSUSED*/
2438 static int
2439 ufs_access(struct vnode *vp, int mode, int flags, struct cred *cr,
2440         caller_context_t *ct)
2441 {
2442         struct inode *ip = VTOI(vp);
2443 
2444         if (ip->i_ufsvfs == NULL)
2445                 return (EIO);
2446 
2447         /*
2448          * The ufs_iaccess function wants to be called with
2449          * mode bits expressed as "ufs specific" bits.
2450          * I.e., VWRITE|VREAD|VEXEC do not make sense to
2451          * ufs_iaccess() but IWRITE|IREAD|IEXEC do.
2452          * But since they're the same we just pass the vnode mode
2453          * bit but just verify that assumption at compile time.
2454          */
2455 #if IWRITE != VWRITE || IREAD != VREAD || IEXEC != VEXEC
2456 #error "ufs_access needs to map Vmodes to Imodes"
2457 #endif
2458         return (ufs_iaccess(ip, mode, cr, 1));
2459 }
2460 
2461 /* ARGSUSED */
2462 static int
2463 ufs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cr,
2464         caller_context_t *ct)
2465 {
2466         struct inode *ip = VTOI(vp);
2467         struct ufsvfs *ufsvfsp;
2468         struct ulockfs *ulp;
2469         int error;
2470         int fastsymlink;
2471 
2472         if (vp->v_type != VLNK) {
2473                 error = EINVAL;
2474                 goto nolockout;
2475         }
2476 
2477         /*
2478          * If the symbolic link is empty there is nothing to read.
2479          * Fast-track these empty symbolic links
2480          */
2481         if (ip->i_size == 0) {
2482                 error = 0;
2483                 goto nolockout;
2484         }
2485 
2486         ufsvfsp = ip->i_ufsvfs;
2487         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READLINK_MASK);
2488         if (error)
2489                 goto nolockout;
2490         /*
2491          * The ip->i_rwlock protects the data blocks used for FASTSYMLINK
2492          */
2493 again:
2494         fastsymlink = 0;
2495         if (ip->i_flag & IFASTSYMLNK) {
2496                 rw_enter(&ip->i_rwlock, RW_READER);
2497                 rw_enter(&ip->i_contents, RW_READER);
2498                 if (ip->i_flag & IFASTSYMLNK) {
2499                         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) &&
2500                             (ip->i_fs->fs_ronly == 0) &&
2501                             (!ufsvfsp->vfs_noatime)) {
2502                                 mutex_enter(&ip->i_tlock);
2503                                 ip->i_flag |= IACC;
2504                                 mutex_exit(&ip->i_tlock);
2505                         }
2506                         error = uiomove((caddr_t)&ip->i_db[1],
2507                             MIN(ip->i_size, uiop->uio_resid),
2508                             UIO_READ, uiop);
2509                         ITIMES(ip);
2510                         ++fastsymlink;
2511                 }
2512                 rw_exit(&ip->i_contents);
2513                 rw_exit(&ip->i_rwlock);
2514         }
2515         if (!fastsymlink) {
2516                 ssize_t size;   /* number of bytes read  */
2517                 caddr_t basep;  /* pointer to input data */
2518                 ino_t ino;
2519                 long  igen;
2520                 struct uio tuio;        /* temp uio struct */
2521                 struct uio *tuiop;
2522                 iovec_t tiov;           /* temp iovec struct */
2523                 char kbuf[FSL_SIZE];    /* buffer to hold fast symlink */
2524                 int tflag = 0;          /* flag to indicate temp vars used */
2525 
2526                 ino = ip->i_number;
2527                 igen = ip->i_gen;
2528                 size = uiop->uio_resid;
2529                 basep = uiop->uio_iov->iov_base;
2530                 tuiop = uiop;
2531 
2532                 rw_enter(&ip->i_rwlock, RW_WRITER);
2533                 rw_enter(&ip->i_contents, RW_WRITER);
2534                 if (ip->i_flag & IFASTSYMLNK) {
2535                         rw_exit(&ip->i_contents);
2536                         rw_exit(&ip->i_rwlock);
2537                         goto again;
2538                 }
2539 
2540                 /* can this be a fast symlink and is it a user buffer? */
2541                 if (ip->i_size <= FSL_SIZE &&
2542                     (uiop->uio_segflg == UIO_USERSPACE ||
2543                     uiop->uio_segflg == UIO_USERISPACE)) {
2544 
2545                         bzero(&tuio, sizeof (struct uio));
2546                         /*
2547                          * setup a kernel buffer to read link into.  this
2548                          * is to fix a race condition where the user buffer
2549                          * got corrupted before copying it into the inode.
2550                          */
2551                         size = ip->i_size;
2552                         tiov.iov_len = size;
2553                         tiov.iov_base = kbuf;
2554                         tuio.uio_iov = &tiov;
2555                         tuio.uio_iovcnt = 1;
2556                         tuio.uio_offset = uiop->uio_offset;
2557                         tuio.uio_segflg = UIO_SYSSPACE;
2558                         tuio.uio_fmode = uiop->uio_fmode;
2559                         tuio.uio_extflg = uiop->uio_extflg;
2560                         tuio.uio_limit = uiop->uio_limit;
2561                         tuio.uio_resid = size;
2562 
2563                         basep = tuio.uio_iov->iov_base;
2564                         tuiop = &tuio;
2565                         tflag = 1;
2566                 }
2567 
2568                 error = rdip(ip, tuiop, 0, cr);
2569                 if (!(error == 0 && ip->i_number == ino && ip->i_gen == igen)) {
2570                         rw_exit(&ip->i_contents);
2571                         rw_exit(&ip->i_rwlock);
2572                         goto out;
2573                 }
2574 
2575                 if (tflag == 0)
2576                         size -= uiop->uio_resid;
2577 
2578                 if ((tflag == 0 && ip->i_size <= FSL_SIZE &&
2579                     ip->i_size == size) || (tflag == 1 &&
2580                     tuio.uio_resid == 0)) {
2581                         error = kcopy(basep, &ip->i_db[1], ip->i_size);
2582                         if (error == 0) {
2583                                 ip->i_flag |= IFASTSYMLNK;
2584                                 /*
2585                                  * free page
2586                                  */
2587                                 (void) VOP_PUTPAGE(ITOV(ip),
2588                                     (offset_t)0, PAGESIZE,
2589                                     (B_DONTNEED | B_FREE | B_FORCE | B_ASYNC),
2590                                     cr, ct);
2591                         } else {
2592                                 int i;
2593                                 /* error, clear garbage left behind */
2594                                 for (i = 1; i < NDADDR; i++)
2595                                         ip->i_db[i] = 0;
2596                                 for (i = 0; i < NIADDR; i++)
2597                                         ip->i_ib[i] = 0;
2598                         }
2599                 }
2600                 if (tflag == 1) {
2601                         /* now, copy it into the user buffer */
2602                         error = uiomove((caddr_t)kbuf,
2603                             MIN(size, uiop->uio_resid),
2604                             UIO_READ, uiop);
2605                 }
2606                 rw_exit(&ip->i_contents);
2607                 rw_exit(&ip->i_rwlock);
2608         }
2609 out:
2610         if (ulp) {
2611                 ufs_lockfs_end(ulp);
2612         }
2613 nolockout:
2614         return (error);
2615 }
2616 
2617 /* ARGSUSED */
2618 static int
2619 ufs_fsync(struct vnode *vp, int syncflag, struct cred *cr,
2620         caller_context_t *ct)
2621 {
2622         struct inode *ip = VTOI(vp);
2623         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
2624         struct ulockfs *ulp;
2625         int error;
2626 
2627         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_FSYNC_MASK);
2628         if (error)
2629                 return (error);
2630 
2631         if (TRANS_ISTRANS(ufsvfsp)) {
2632                 /*
2633                  * First push out any data pages
2634                  */
2635                 if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
2636                     (vp->v_type != VCHR) && !(IS_SWAPVP(vp))) {
2637                         error = VOP_PUTPAGE(vp, (offset_t)0, (size_t)0,
2638                             0, CRED(), ct);
2639                         if (error)
2640                                 goto out;
2641                 }
2642 
2643                 /*
2644                  * Delta any delayed inode times updates
2645                  * and push inode to log.
2646                  * All other inode deltas will have already been delta'd
2647                  * and will be pushed during the commit.
2648                  */
2649                 if (!(syncflag & FDSYNC) &&
2650                     ((ip->i_flag & (IMOD|IMODACC)) == IMODACC)) {
2651                         if (ulp) {
2652                                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_FSYNC,
2653                                     TOP_SYNCIP_SIZE);
2654                         }
2655                         rw_enter(&ip->i_contents, RW_READER);
2656                         mutex_enter(&ip->i_tlock);
2657                         ip->i_flag &= ~IMODTIME;
2658                         mutex_exit(&ip->i_tlock);
2659                         ufs_iupdat(ip, I_SYNC);
2660                         rw_exit(&ip->i_contents);
2661                         if (ulp) {
2662                                 TRANS_END_ASYNC(ufsvfsp, TOP_FSYNC,
2663                                     TOP_SYNCIP_SIZE);
2664                         }
2665                 }
2666 
2667                 /*
2668                  * Commit the Moby transaction
2669                  *
2670                  * Deltas have already been made so we just need to
2671                  * commit them with a synchronous transaction.
2672                  * TRANS_BEGIN_SYNC() will return an error
2673                  * if there are no deltas to commit, for an
2674                  * empty transaction.
2675                  */
2676                 if (ulp) {
2677                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_FSYNC, TOP_COMMIT_SIZE,
2678                             error);
2679                         if (error) {
2680                                 error = 0; /* commit wasn't needed */
2681                                 goto out;
2682                         }
2683                         TRANS_END_SYNC(ufsvfsp, error, TOP_FSYNC,
2684                             TOP_COMMIT_SIZE);
2685                 }
2686         } else {        /* not logging */
2687                 if (!(IS_SWAPVP(vp)))
2688                         if (syncflag & FNODSYNC) {
2689                                 /* Just update the inode only */
2690                                 TRANS_IUPDAT(ip, 1);
2691                                 error = 0;
2692                         } else if (syncflag & FDSYNC)
2693                                 /* Do data-synchronous writes */
2694                                 error = TRANS_SYNCIP(ip, 0, I_DSYNC, TOP_FSYNC);
2695                         else
2696                                 /* Do synchronous writes */
2697                                 error = TRANS_SYNCIP(ip, 0, I_SYNC, TOP_FSYNC);
2698 
2699                 rw_enter(&ip->i_contents, RW_WRITER);
2700                 if (!error)
2701                         error = ufs_sync_indir(ip);
2702                 rw_exit(&ip->i_contents);
2703         }
2704 out:
2705         if (ulp) {
2706                 ufs_lockfs_end(ulp);
2707         }
2708         return (error);
2709 }
2710 
2711 /*ARGSUSED*/
2712 static void
2713 ufs_inactive(struct vnode *vp, struct cred *cr, caller_context_t *ct)
2714 {
2715         ufs_iinactive(VTOI(vp));
2716 }
2717 
2718 /*
2719  * Unix file system operations having to do with directory manipulation.
2720  */
2721 int ufs_lookup_idle_count = 2;  /* Number of inodes to idle each time */
2722 /* ARGSUSED */
2723 static int
2724 ufs_lookup(struct vnode *dvp, char *nm, struct vnode **vpp,
2725         struct pathname *pnp, int flags, struct vnode *rdir, struct cred *cr,
2726         caller_context_t *ct, int *direntflags, pathname_t *realpnp)
2727 {
2728         struct inode *ip;
2729         struct inode *sip;
2730         struct inode *xip;
2731         struct ufsvfs *ufsvfsp;
2732         struct ulockfs *ulp;
2733         struct vnode *vp;
2734         int error;
2735 
2736         /*
2737          * Check flags for type of lookup (regular file or attribute file)
2738          */
2739 
2740         ip = VTOI(dvp);
2741 
2742         if (flags & LOOKUP_XATTR) {
2743 
2744                 /*
2745                  * If not mounted with XATTR support then return EINVAL
2746                  */
2747 
2748                 if (!(ip->i_ufsvfs->vfs_vfs->vfs_flag & VFS_XATTR))
2749                         return (EINVAL);
2750                 /*
2751                  * We don't allow recursive attributes...
2752                  * Maybe someday we will.
2753                  */
2754                 if ((ip->i_cflags & IXATTR)) {
2755                         return (EINVAL);
2756                 }
2757 
2758                 if ((vp = dnlc_lookup(dvp, XATTR_DIR_NAME)) == NULL) {
2759                         error = ufs_xattr_getattrdir(dvp, &sip, flags, cr);
2760                         if (error) {
2761                                 *vpp = NULL;
2762                                 goto out;
2763                         }
2764 
2765                         vp = ITOV(sip);
2766                         dnlc_update(dvp, XATTR_DIR_NAME, vp);
2767                 }
2768 
2769                 /*
2770                  * Check accessibility of directory.
2771                  */
2772                 if (vp == DNLC_NO_VNODE) {
2773                         VN_RELE(vp);
2774                         error = ENOENT;
2775                         goto out;
2776                 }
2777                 if ((error = ufs_iaccess(VTOI(vp), IEXEC, cr, 1)) != 0) {
2778                         VN_RELE(vp);
2779                         goto out;
2780                 }
2781 
2782                 *vpp = vp;
2783                 return (0);
2784         }
2785 
2786         /*
2787          * Check for a null component, which we should treat as
2788          * looking at dvp from within it's parent, so we don't
2789          * need a call to ufs_iaccess(), as it has already been
2790          * done.
2791          */
2792         if (nm[0] == 0) {
2793                 VN_HOLD(dvp);
2794                 error = 0;
2795                 *vpp = dvp;
2796                 goto out;
2797         }
2798 
2799         /*
2800          * Check for "." ie itself. this is a quick check and
2801          * avoids adding "." into the dnlc (which have been seen
2802          * to occupy >10% of the cache).
2803          */
2804         if ((nm[0] == '.') && (nm[1] == 0)) {
2805                 /*
2806                  * Don't return without checking accessibility
2807                  * of the directory. We only need the lock if
2808                  * we are going to return it.
2809                  */
2810                 if ((error = ufs_iaccess(ip, IEXEC, cr, 1)) == 0) {
2811                         VN_HOLD(dvp);
2812                         *vpp = dvp;
2813                 }
2814                 goto out;
2815         }
2816 
2817         /*
2818          * Fast path: Check the directory name lookup cache.
2819          */
2820         if (vp = dnlc_lookup(dvp, nm)) {
2821                 /*
2822                  * Check accessibility of directory.
2823                  */
2824                 if ((error = ufs_iaccess(ip, IEXEC, cr, 1)) != 0) {
2825                         VN_RELE(vp);
2826                         goto out;
2827                 }
2828                 if (vp == DNLC_NO_VNODE) {
2829                         VN_RELE(vp);
2830                         error = ENOENT;
2831                         goto out;
2832                 }
2833                 xip = VTOI(vp);
2834                 ulp = NULL;
2835                 goto fastpath;
2836         }
2837 
2838         /*
2839          * Keep the idle queue from getting too long by
2840          * idling two inodes before attempting to allocate another.
2841          *    This operation must be performed before entering
2842          *    lockfs or a transaction.
2843          */
2844         if (ufs_idle_q.uq_ne > ufs_idle_q.uq_hiwat)
2845                 if ((curthread->t_flag & T_DONTBLOCK) == 0) {
2846                         ins.in_lidles.value.ul += ufs_lookup_idle_count;
2847                         ufs_idle_some(ufs_lookup_idle_count);
2848                 }
2849 
2850 retry_lookup:
2851         /*
2852          * Check accessibility of directory.
2853          */
2854         if (error = ufs_diraccess(ip, IEXEC, cr))
2855                 goto out;
2856 
2857         ufsvfsp = ip->i_ufsvfs;
2858         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LOOKUP_MASK);
2859         if (error)
2860                 goto out;
2861 
2862         error = ufs_dirlook(ip, nm, &xip, cr, 1, 0);
2863 
2864 fastpath:
2865         if (error == 0) {
2866                 ip = xip;
2867                 *vpp = ITOV(ip);
2868 
2869                 /*
2870                  * If vnode is a device return special vnode instead.
2871                  */
2872                 if (IS_DEVVP(*vpp)) {
2873                         struct vnode *newvp;
2874 
2875                         newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
2876                             cr);
2877                         VN_RELE(*vpp);
2878                         if (newvp == NULL)
2879                                 error = ENOSYS;
2880                         else
2881                                 *vpp = newvp;
2882                 } else if (ip->i_cflags & ICOMPRESS) {
2883                         struct vnode *newvp;
2884 
2885                         /*
2886                          * Compressed file, substitute dcfs vnode
2887                          */
2888                         newvp = decompvp(*vpp, cr, ct);
2889                         VN_RELE(*vpp);
2890                         if (newvp == NULL)
2891                                 error = ENOSYS;
2892                         else
2893                                 *vpp = newvp;
2894                 }
2895         }
2896         if (ulp) {
2897                 ufs_lockfs_end(ulp);
2898         }
2899 
2900         if (error == EAGAIN)
2901                 goto retry_lookup;
2902 
2903 out:
2904         return (error);
2905 }
2906 
2907 /*ARGSUSED*/
2908 static int
2909 ufs_create(struct vnode *dvp, char *name, struct vattr *vap, enum vcexcl excl,
2910         int mode, struct vnode **vpp, struct cred *cr, int flag,
2911         caller_context_t *ct, vsecattr_t *vsecp)
2912 {
2913         struct inode *ip;
2914         struct inode *xip;
2915         struct inode *dip;
2916         struct vnode *xvp;
2917         struct ufsvfs *ufsvfsp;
2918         struct ulockfs *ulp;
2919         int error;
2920         int issync;
2921         int truncflag;
2922         int trans_size;
2923         int noentry;
2924         int defer_dip_seq_update = 0;   /* need to defer update of dip->i_seq */
2925         int retry = 1;
2926         int indeadlock;
2927 
2928 again:
2929         ip = VTOI(dvp);
2930         ufsvfsp = ip->i_ufsvfs;
2931         truncflag = 0;
2932 
2933         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_CREATE_MASK);
2934         if (error)
2935                 goto out;
2936 
2937         if (ulp) {
2938                 trans_size = (int)TOP_CREATE_SIZE(ip);
2939                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_CREATE, trans_size);
2940         }
2941 
2942         if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr) != 0)
2943                 vap->va_mode &= ~VSVTX;
2944 
2945         if (*name == '\0') {
2946                 /*
2947                  * Null component name refers to the directory itself.
2948                  */
2949                 VN_HOLD(dvp);
2950                 /*
2951                  * Even though this is an error case, we need to grab the
2952                  * quota lock since the error handling code below is common.
2953                  */
2954                 rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
2955                 rw_enter(&ip->i_contents, RW_WRITER);
2956                 error = EEXIST;
2957         } else {
2958                 xip = NULL;
2959                 noentry = 0;
2960                 /*
2961                  * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
2962                  * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2963                  * possible, retries the operation.
2964                  */
2965                 ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_CREATE,
2966                     retry_dir);
2967                 if (indeadlock)
2968                         goto again;
2969 
2970                 xvp = dnlc_lookup(dvp, name);
2971                 if (xvp == DNLC_NO_VNODE) {
2972                         noentry = 1;
2973                         VN_RELE(xvp);
2974                         xvp = NULL;
2975                 }
2976                 if (xvp) {
2977                         rw_exit(&ip->i_rwlock);
2978                         if (error = ufs_iaccess(ip, IEXEC, cr, 1)) {
2979                                 VN_RELE(xvp);
2980                         } else {
2981                                 error = EEXIST;
2982                                 xip = VTOI(xvp);
2983                         }
2984                 } else {
2985                         /*
2986                          * Suppress file system full message if we will retry
2987                          */
2988                         error = ufs_direnter_cm(ip, name, DE_CREATE,
2989                             vap, &xip, cr, (noentry | (retry ? IQUIET : 0)));
2990                         if (error == EAGAIN) {
2991                                 if (ulp) {
2992                                         TRANS_END_CSYNC(ufsvfsp, error, issync,
2993                                             TOP_CREATE, trans_size);
2994                                         ufs_lockfs_end(ulp);
2995                                 }
2996                                 goto again;
2997                         }
2998                         rw_exit(&ip->i_rwlock);
2999                 }
3000                 ip = xip;
3001                 if (ip != NULL) {
3002                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
3003                         rw_enter(&ip->i_contents, RW_WRITER);
3004                 }
3005         }
3006 
3007         /*
3008          * If the file already exists and this is a non-exclusive create,
3009          * check permissions and allow access for non-directories.
3010          * Read-only create of an existing directory is also allowed.
3011          * We fail an exclusive create of anything which already exists.
3012          */
3013         if (error == EEXIST) {
3014                 dip = VTOI(dvp);
3015                 if (excl == NONEXCL) {
3016                         if ((((ip->i_mode & IFMT) == IFDIR) ||
3017                             ((ip->i_mode & IFMT) == IFATTRDIR)) &&
3018                             (mode & IWRITE))
3019                                 error = EISDIR;
3020                         else if (mode)
3021                                 error = ufs_iaccess(ip, mode, cr, 0);
3022                         else
3023                                 error = 0;
3024                 }
3025                 if (error) {
3026                         rw_exit(&ip->i_contents);
3027                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3028                         VN_RELE(ITOV(ip));
3029                         goto unlock;
3030                 }
3031                 /*
3032                  * If the error EEXIST was set, then i_seq can not
3033                  * have been updated. The sequence number interface
3034                  * is defined such that a non-error VOP_CREATE must
3035                  * increase the dir va_seq it by at least one. If we
3036                  * have cleared the error, increase i_seq. Note that
3037                  * we are increasing the dir i_seq and in rare cases
3038                  * ip may actually be from the dvp, so we already have
3039                  * the locks and it will not be subject to truncation.
3040                  * In case we have to update i_seq of the parent
3041                  * directory dip, we have to defer it till we have
3042                  * released our locks on ip due to lock ordering requirements.
3043                  */
3044                 if (ip != dip)
3045                         defer_dip_seq_update = 1;
3046                 else
3047                         ip->i_seq++;
3048 
3049                 if (((ip->i_mode & IFMT) == IFREG) &&
3050                     (vap->va_mask & AT_SIZE) && vap->va_size == 0) {
3051                         /*
3052                          * Truncate regular files, if requested by caller.
3053                          * Grab i_rwlock to make sure no one else is
3054                          * currently writing to the file (we promised
3055                          * bmap we would do this).
3056                          * Must get the locks in the correct order.
3057                          */
3058                         if (ip->i_size == 0) {
3059                                 ip->i_flag |= ICHG | IUPD;
3060                                 ip->i_seq++;
3061                                 TRANS_INODE(ufsvfsp, ip);
3062                         } else {
3063                                 /*
3064                                  * Large Files: Why this check here?
3065                                  * Though we do it in vn_create() we really
3066                                  * want to guarantee that we do not destroy
3067                                  * Large file data by atomically checking
3068                                  * the size while holding the contents
3069                                  * lock.
3070                                  */
3071                                 if (flag && !(flag & FOFFMAX) &&
3072                                     ((ip->i_mode & IFMT) == IFREG) &&
3073                                     (ip->i_size > (offset_t)MAXOFF32_T)) {
3074                                         rw_exit(&ip->i_contents);
3075                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3076                                         error = EOVERFLOW;
3077                                         goto unlock;
3078                                 }
3079                                 if (TRANS_ISTRANS(ufsvfsp))
3080                                         truncflag++;
3081                                 else {
3082                                         rw_exit(&ip->i_contents);
3083                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3084                                         ufs_tryirwlock_trans(&ip->i_rwlock,
3085                                             RW_WRITER, TOP_CREATE,
3086                                             retry_file);
3087                                         if (indeadlock) {
3088                                                 VN_RELE(ITOV(ip));
3089                                                 goto again;
3090                                         }
3091                                         rw_enter(&ufsvfsp->vfs_dqrwlock,
3092                                             RW_READER);
3093                                         rw_enter(&ip->i_contents, RW_WRITER);
3094                                         (void) ufs_itrunc(ip, (u_offset_t)0, 0,
3095                                             cr);
3096                                         rw_exit(&ip->i_rwlock);
3097                                 }
3098 
3099                         }
3100                         if (error == 0) {
3101                                 vnevent_create(ITOV(ip), ct);
3102                         }
3103                 }
3104         }
3105 
3106         if (error) {
3107                 if (ip != NULL) {
3108                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3109                         rw_exit(&ip->i_contents);
3110                 }
3111                 goto unlock;
3112         }
3113 
3114         *vpp = ITOV(ip);
3115         ITIMES(ip);
3116         rw_exit(&ip->i_contents);
3117         rw_exit(&ufsvfsp->vfs_dqrwlock);
3118 
3119         /*
3120          * If vnode is a device return special vnode instead.
3121          */
3122         if (!error && IS_DEVVP(*vpp)) {
3123                 struct vnode *newvp;
3124 
3125                 newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
3126                 VN_RELE(*vpp);
3127                 if (newvp == NULL) {
3128                         error = ENOSYS;
3129                         goto unlock;
3130                 }
3131                 truncflag = 0;
3132                 *vpp = newvp;
3133         }
3134 unlock:
3135 
3136         /*
3137          * Do the deferred update of the parent directory's sequence
3138          * number now.
3139          */
3140         if (defer_dip_seq_update == 1) {
3141                 rw_enter(&dip->i_contents, RW_READER);
3142                 mutex_enter(&dip->i_tlock);
3143                 dip->i_seq++;
3144                 mutex_exit(&dip->i_tlock);
3145                 rw_exit(&dip->i_contents);
3146         }
3147 
3148         if (ulp) {
3149                 int terr = 0;
3150 
3151                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_CREATE,
3152                     trans_size);
3153 
3154                 /*
3155                  * If we haven't had a more interesting failure
3156                  * already, then anything that might've happened
3157                  * here should be reported.
3158                  */
3159                 if (error == 0)
3160                         error = terr;
3161         }
3162 
3163         if (!error && truncflag) {
3164                 ufs_tryirwlock(&ip->i_rwlock, RW_WRITER, retry_trunc);
3165                 if (indeadlock) {
3166                         if (ulp)
3167                                 ufs_lockfs_end(ulp);
3168                         VN_RELE(ITOV(ip));
3169                         goto again;
3170                 }
3171                 (void) TRANS_ITRUNC(ip, (u_offset_t)0, 0, cr);
3172                 rw_exit(&ip->i_rwlock);
3173         }
3174 
3175         if (ulp)
3176                 ufs_lockfs_end(ulp);
3177 
3178         /*
3179          * If no inodes available, try to free one up out of the
3180          * pending delete queue.
3181          */
3182         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
3183                 ufs_delete_drain_wait(ufsvfsp, 1);
3184                 retry = 0;
3185                 goto again;
3186         }
3187 
3188 out:
3189         return (error);
3190 }
3191 
3192 extern int ufs_idle_max;
3193 /*ARGSUSED*/
3194 static int
3195 ufs_remove(struct vnode *vp, char *nm, struct cred *cr,
3196         caller_context_t *ct, int flags)
3197 {
3198         struct inode *ip = VTOI(vp);
3199         struct ufsvfs *ufsvfsp  = ip->i_ufsvfs;
3200         struct ulockfs *ulp;
3201         vnode_t *rmvp = NULL;   /* Vnode corresponding to name being removed */
3202         int indeadlock;
3203         int error;
3204         int issync;
3205         int trans_size;
3206 
3207         /*
3208          * don't let the delete queue get too long
3209          */
3210         if (ufsvfsp == NULL) {
3211                 error = EIO;
3212                 goto out;
3213         }
3214         if (ufsvfsp->vfs_delete.uq_ne > ufs_idle_max)
3215                 ufs_delete_drain(vp->v_vfsp, 1, 1);
3216 
3217         error = ufs_eventlookup(vp, nm, cr, &rmvp);
3218         if (rmvp != NULL) {
3219                 /* Only send the event if there were no errors */
3220                 if (error == 0)
3221                         vnevent_remove(rmvp, vp, nm, ct);
3222                 VN_RELE(rmvp);
3223         }
3224 
3225 retry_remove:
3226         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_REMOVE_MASK);
3227         if (error)
3228                 goto out;
3229 
3230         if (ulp)
3231                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_REMOVE,
3232                     trans_size = (int)TOP_REMOVE_SIZE(VTOI(vp)));
3233 
3234         /*
3235          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3236          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3237          * possible, retries the operation.
3238          */
3239         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_REMOVE, retry);
3240         if (indeadlock)
3241                 goto retry_remove;
3242         error = ufs_dirremove(ip, nm, (struct inode *)0, (struct vnode *)0,
3243             DR_REMOVE, cr);
3244         rw_exit(&ip->i_rwlock);
3245 
3246         if (ulp) {
3247                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_REMOVE, trans_size);
3248                 ufs_lockfs_end(ulp);
3249         }
3250 
3251 out:
3252         return (error);
3253 }
3254 
3255 /*
3256  * Link a file or a directory.  Only privileged processes are allowed to
3257  * make links to directories.
3258  */
3259 /*ARGSUSED*/
3260 static int
3261 ufs_link(struct vnode *tdvp, struct vnode *svp, char *tnm, struct cred *cr,
3262         caller_context_t *ct, int flags)
3263 {
3264         struct inode *sip;
3265         struct inode *tdp = VTOI(tdvp);
3266         struct ufsvfs *ufsvfsp = tdp->i_ufsvfs;
3267         struct ulockfs *ulp;
3268         struct vnode *realvp;
3269         int error;
3270         int issync;
3271         int trans_size;
3272         int isdev;
3273         int indeadlock;
3274 
3275 retry_link:
3276         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LINK_MASK);
3277         if (error)
3278                 goto out;
3279 
3280         if (ulp)
3281                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_LINK,
3282                     trans_size = (int)TOP_LINK_SIZE(VTOI(tdvp)));
3283 
3284         if (VOP_REALVP(svp, &realvp, ct) == 0)
3285                 svp = realvp;
3286 
3287         /*
3288          * Make sure link for extended attributes is valid
3289          * We only support hard linking of attr in ATTRDIR to ATTRDIR
3290          *
3291          * Make certain we don't attempt to look at a device node as
3292          * a ufs inode.
3293          */
3294 
3295         isdev = IS_DEVVP(svp);
3296         if (((isdev == 0) && ((VTOI(svp)->i_cflags & IXATTR) == 0) &&
3297             ((tdp->i_mode & IFMT) == IFATTRDIR)) ||
3298             ((isdev == 0) && (VTOI(svp)->i_cflags & IXATTR) &&
3299             ((tdp->i_mode & IFMT) == IFDIR))) {
3300                 error = EINVAL;
3301                 goto unlock;
3302         }
3303 
3304         sip = VTOI(svp);
3305         if ((svp->v_type == VDIR &&
3306             secpolicy_fs_linkdir(cr, ufsvfsp->vfs_vfs) != 0) ||
3307             (sip->i_uid != crgetuid(cr) && secpolicy_basic_link(cr) != 0)) {
3308                 error = EPERM;
3309                 goto unlock;
3310         }
3311 
3312         /*
3313          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3314          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3315          * possible, retries the operation.
3316          */
3317         ufs_tryirwlock_trans(&tdp->i_rwlock, RW_WRITER, TOP_LINK, retry);
3318         if (indeadlock)
3319                 goto retry_link;
3320         error = ufs_direnter_lr(tdp, tnm, DE_LINK, (struct inode *)0,
3321             sip, cr);
3322         rw_exit(&tdp->i_rwlock);
3323 
3324 unlock:
3325         if (ulp) {
3326                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_LINK, trans_size);
3327                 ufs_lockfs_end(ulp);
3328         }
3329 
3330         if (!error) {
3331                 vnevent_link(svp, ct);
3332         }
3333 out:
3334         return (error);
3335 }
3336 
3337 uint64_t ufs_rename_retry_cnt;
3338 uint64_t ufs_rename_upgrade_retry_cnt;
3339 uint64_t ufs_rename_dircheck_retry_cnt;
3340 clock_t  ufs_rename_backoff_delay = 1;
3341 
3342 /*
3343  * Rename a file or directory.
3344  * We are given the vnode and entry string of the source and the
3345  * vnode and entry string of the place we want to move the source
3346  * to (the target). The essential operation is:
3347  *      unlink(target);
3348  *      link(source, target);
3349  *      unlink(source);
3350  * but "atomically".  Can't do full commit without saving state in
3351  * the inode on disk, which isn't feasible at this time.  Best we
3352  * can do is always guarantee that the TARGET exists.
3353  */
3354 
3355 /*ARGSUSED*/
3356 static int
3357 ufs_rename(
3358         struct vnode *sdvp,             /* old (source) parent vnode */
3359         char *snm,                      /* old (source) entry name */
3360         struct vnode *tdvp,             /* new (target) parent vnode */
3361         char *tnm,                      /* new (target) entry name */
3362         struct cred *cr,
3363         caller_context_t *ct,
3364         int flags)
3365 {
3366         struct inode *sip = NULL;       /* source inode */
3367         struct inode *ip = NULL;        /* check inode */
3368         struct inode *sdp;              /* old (source) parent inode */
3369         struct inode *tdp;              /* new (target) parent inode */
3370         struct vnode *svp = NULL;       /* source vnode */
3371         struct vnode *tvp = NULL;       /* target vnode, if it exists */
3372         struct vnode *realvp;
3373         struct ufsvfs *ufsvfsp;
3374         struct ulockfs *ulp = NULL;
3375         struct ufs_slot slot;
3376         timestruc_t now;
3377         int error;
3378         int issync;
3379         int trans_size;
3380         krwlock_t *first_lock;
3381         krwlock_t *second_lock;
3382         krwlock_t *reverse_lock;
3383         int serr, terr;
3384 
3385         sdp = VTOI(sdvp);
3386         slot.fbp = NULL;
3387         ufsvfsp = sdp->i_ufsvfs;
3388 
3389         if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3390                 tdvp = realvp;
3391 
3392         /* Must do this before taking locks in case of DNLC miss */
3393         terr = ufs_eventlookup(tdvp, tnm, cr, &tvp);
3394         serr = ufs_eventlookup(sdvp, snm, cr, &svp);
3395 
3396         if ((serr == 0) && ((terr == 0) || (terr == ENOENT))) {
3397                 if (tvp != NULL)
3398                         vnevent_pre_rename_dest(tvp, tdvp, tnm, ct);
3399 
3400                 /*
3401                  * Notify the target directory of the rename event
3402                  * if source and target directories are not the same.
3403                  */
3404                 if (sdvp != tdvp)
3405                         vnevent_pre_rename_dest_dir(tdvp, svp, tnm, ct);
3406 
3407                 if (svp != NULL)
3408                         vnevent_pre_rename_src(svp, sdvp, snm, ct);
3409         }
3410 
3411         if (svp != NULL)
3412                 VN_RELE(svp);
3413 
3414 retry_rename:
3415         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_RENAME_MASK);
3416         if (error)
3417                 goto unlock;
3418 
3419         if (ulp)
3420                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_RENAME,
3421                     trans_size = (int)TOP_RENAME_SIZE(sdp));
3422 
3423         if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3424                 tdvp = realvp;
3425 
3426         tdp = VTOI(tdvp);
3427 
3428         /*
3429          * We only allow renaming of attributes from ATTRDIR to ATTRDIR.
3430          */
3431         if ((tdp->i_mode & IFMT) != (sdp->i_mode & IFMT)) {
3432                 error = EINVAL;
3433                 goto unlock;
3434         }
3435 
3436         /*
3437          * Check accessibility of directory.
3438          */
3439         if (error = ufs_diraccess(sdp, IEXEC, cr))
3440                 goto unlock;
3441 
3442         /*
3443          * Look up inode of file we're supposed to rename.
3444          */
3445         gethrestime(&now);
3446         if (error = ufs_dirlook(sdp, snm, &sip, cr, 0, 0)) {
3447                 if (error == EAGAIN) {
3448                         if (ulp) {
3449                                 TRANS_END_CSYNC(ufsvfsp, error, issync,
3450                                     TOP_RENAME, trans_size);
3451                                 ufs_lockfs_end(ulp);
3452                         }
3453                         goto retry_rename;
3454                 }
3455 
3456                 goto unlock;
3457         }
3458 
3459         /*
3460          * Lock both the source and target directories (they may be
3461          * the same) to provide the atomicity semantics that was
3462          * previously provided by the per file system vfs_rename_lock
3463          *
3464          * with vfs_rename_lock removed to allow simultaneous renames
3465          * within a file system, ufs_dircheckpath can deadlock while
3466          * traversing back to ensure that source is not a parent directory
3467          * of target parent directory. This is because we get into
3468          * ufs_dircheckpath with the sdp and tdp locks held as RW_WRITER.
3469          * If the tdp and sdp of the simultaneous renames happen to be
3470          * in the path of each other, it can lead to a deadlock. This
3471          * can be avoided by getting the locks as RW_READER here and then
3472          * upgrading to RW_WRITER after completing the ufs_dircheckpath.
3473          *
3474          * We hold the target directory's i_rwlock after calling
3475          * ufs_lockfs_begin but in many other operations (like ufs_readdir)
3476          * VOP_RWLOCK is explicitly called by the filesystem independent code
3477          * before calling the file system operation. In these cases the order
3478          * is reversed (i.e i_rwlock is taken first and then ufs_lockfs_begin
3479          * is called). This is fine as long as ufs_lockfs_begin acts as a VOP
3480          * counter but with ufs_quiesce setting the SLOCK bit this becomes a
3481          * synchronizing object which might lead to a deadlock. So we use
3482          * rw_tryenter instead of rw_enter. If we fail to get this lock and
3483          * find that SLOCK bit is set, we call ufs_lockfs_end and restart the
3484          * operation.
3485          */
3486 retry:
3487         first_lock = &tdp->i_rwlock;
3488         second_lock = &sdp->i_rwlock;
3489 retry_firstlock:
3490         if (!rw_tryenter(first_lock, RW_READER)) {
3491                 /*
3492                  * We didn't get the lock. Check if the SLOCK is set in the
3493                  * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3494                  * and wait for SLOCK to be cleared.
3495                  */
3496 
3497                 if (ulp && ULOCKFS_IS_SLOCK(ulp)) {
3498                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME,
3499                             trans_size);
3500                         ufs_lockfs_end(ulp);
3501                         goto retry_rename;
3502 
3503                 } else {
3504                         /*
3505                          * SLOCK isn't set so this is a genuine synchronization
3506                          * case. Let's try again after giving them a breather.
3507                          */
3508                         delay(RETRY_LOCK_DELAY);
3509                         goto  retry_firstlock;
3510                 }
3511         }
3512         /*
3513          * Need to check if the tdp and sdp are same !!!
3514          */
3515         if ((tdp != sdp) && (!rw_tryenter(second_lock, RW_READER))) {
3516                 /*
3517                  * We didn't get the lock. Check if the SLOCK is set in the
3518                  * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3519                  * and wait for SLOCK to be cleared.
3520                  */
3521 
3522                 rw_exit(first_lock);
3523                 if (ulp && ULOCKFS_IS_SLOCK(ulp)) {
3524                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME,
3525                             trans_size);
3526                         ufs_lockfs_end(ulp);
3527                         goto retry_rename;
3528 
3529                 } else {
3530                         /*
3531                          * So we couldn't get the second level peer lock *and*
3532                          * the SLOCK bit isn't set. Too bad we can be
3533                          * contentding with someone wanting these locks otherway
3534                          * round. Reverse the locks in case there is a heavy
3535                          * contention for the second level lock.
3536                          */
3537                         reverse_lock = first_lock;
3538                         first_lock = second_lock;
3539                         second_lock = reverse_lock;
3540                         ufs_rename_retry_cnt++;
3541                         goto  retry_firstlock;
3542                 }
3543         }
3544 
3545         if (sip == tdp) {
3546                 error = EINVAL;
3547                 goto errout;
3548         }
3549         /*
3550          * Make sure we can delete the source entry.  This requires
3551          * write permission on the containing directory.
3552          * Check for sticky directories.
3553          */
3554         rw_enter(&sdp->i_contents, RW_READER);
3555         rw_enter(&sip->i_contents, RW_READER);
3556         if ((error = ufs_iaccess(sdp, IWRITE, cr, 0)) != 0 ||
3557             (error = ufs_sticky_remove_access(sdp, sip, cr)) != 0) {
3558                 rw_exit(&sip->i_contents);
3559                 rw_exit(&sdp->i_contents);
3560                 goto errout;
3561         }
3562 
3563         /*
3564          * If this is a rename of a directory and the parent is
3565          * different (".." must be changed), then the source
3566          * directory must not be in the directory hierarchy
3567          * above the target, as this would orphan everything
3568          * below the source directory.  Also the user must have
3569          * write permission in the source so as to be able to
3570          * change "..".
3571          */
3572         if ((((sip->i_mode & IFMT) == IFDIR) ||
3573             ((sip->i_mode & IFMT) == IFATTRDIR)) && sdp != tdp) {
3574                 ino_t   inum;
3575 
3576                 if (error = ufs_iaccess(sip, IWRITE, cr, 0)) {
3577                         rw_exit(&sip->i_contents);
3578                         rw_exit(&sdp->i_contents);
3579                         goto errout;
3580                 }
3581                 inum = sip->i_number;
3582                 rw_exit(&sip->i_contents);
3583                 rw_exit(&sdp->i_contents);
3584                 if ((error = ufs_dircheckpath(inum, tdp, sdp, cr))) {
3585                         /*
3586                          * If we got EAGAIN ufs_dircheckpath detected a
3587                          * potential deadlock and backed out. We need
3588                          * to retry the operation since sdp and tdp have
3589                          * to be released to avoid the deadlock.
3590                          */
3591                         if (error == EAGAIN) {
3592                                 rw_exit(&tdp->i_rwlock);
3593                                 if (tdp != sdp)
3594                                         rw_exit(&sdp->i_rwlock);
3595                                 delay(ufs_rename_backoff_delay);
3596                                 ufs_rename_dircheck_retry_cnt++;
3597                                 goto retry;
3598                         }
3599                         goto errout;
3600                 }
3601         } else {
3602                 rw_exit(&sip->i_contents);
3603                 rw_exit(&sdp->i_contents);
3604         }
3605 
3606 
3607         /*
3608          * Check for renaming '.' or '..' or alias of '.'
3609          */
3610         if (strcmp(snm, ".") == 0 || strcmp(snm, "..") == 0 || sdp == sip) {
3611                 error = EINVAL;
3612                 goto errout;
3613         }
3614 
3615         /*
3616          * Simultaneous renames can deadlock in ufs_dircheckpath since it
3617          * tries to traverse back the file tree with both tdp and sdp held
3618          * as RW_WRITER. To avoid that we have to hold the tdp and sdp locks
3619          * as RW_READERS  till ufs_dircheckpath is done.
3620          * Now that ufs_dircheckpath is done with, we can upgrade the locks
3621          * to RW_WRITER.
3622          */
3623         if (!rw_tryupgrade(&tdp->i_rwlock)) {
3624                 /*
3625                  * The upgrade failed. We got to give away the lock
3626                  * as to avoid deadlocking with someone else who is
3627                  * waiting for writer lock. With the lock gone, we
3628                  * cannot be sure the checks done above will hold
3629                  * good when we eventually get them back as writer.
3630                  * So if we can't upgrade we drop the locks and retry
3631                  * everything again.
3632                  */
3633                 rw_exit(&tdp->i_rwlock);
3634                 if (tdp != sdp)
3635                         rw_exit(&sdp->i_rwlock);
3636                 delay(ufs_rename_backoff_delay);
3637                 ufs_rename_upgrade_retry_cnt++;
3638                 goto retry;
3639         }
3640         if (tdp != sdp) {
3641                 if (!rw_tryupgrade(&sdp->i_rwlock)) {
3642                         /*
3643                          * The upgrade failed. We got to give away the lock
3644                          * as to avoid deadlocking with someone else who is
3645                          * waiting for writer lock. With the lock gone, we
3646                          * cannot be sure the checks done above will hold
3647                          * good when we eventually get them back as writer.
3648                          * So if we can't upgrade we drop the locks and retry
3649                          * everything again.
3650                          */
3651                         rw_exit(&tdp->i_rwlock);
3652                         rw_exit(&sdp->i_rwlock);
3653                         delay(ufs_rename_backoff_delay);
3654                         ufs_rename_upgrade_retry_cnt++;
3655                         goto retry;
3656                 }
3657         }
3658 
3659         /*
3660          * Now that all the locks are held check to make sure another thread
3661          * didn't slip in and take out the sip.
3662          */
3663         slot.status = NONE;
3664         if ((sip->i_ctime.tv_usec * 1000) > now.tv_nsec ||
3665             sip->i_ctime.tv_sec > now.tv_sec) {
3666                 rw_enter(&sdp->i_ufsvfs->vfs_dqrwlock, RW_READER);
3667                 rw_enter(&sdp->i_contents, RW_WRITER);
3668                 error = ufs_dircheckforname(sdp, snm, strlen(snm), &slot,
3669                     &ip, cr, 0);
3670                 rw_exit(&sdp->i_contents);
3671                 rw_exit(&sdp->i_ufsvfs->vfs_dqrwlock);
3672                 if (error) {
3673                         goto errout;
3674                 }
3675                 if (ip == NULL) {
3676                         error = ENOENT;
3677                         goto errout;
3678                 } else {
3679                         /*
3680                          * If the inode was found need to drop the v_count
3681                          * so as not to keep the filesystem from being
3682                          * unmounted at a later time.
3683                          */
3684                         VN_RELE(ITOV(ip));
3685                 }
3686 
3687                 /*
3688                  * Release the slot.fbp that has the page mapped and
3689                  * locked SE_SHARED, and could be used in in
3690                  * ufs_direnter_lr() which needs to get the SE_EXCL lock
3691                  * on said page.
3692                  */
3693                 if (slot.fbp) {
3694                         fbrelse(slot.fbp, S_OTHER);
3695                         slot.fbp = NULL;
3696                 }
3697         }
3698 
3699         /*
3700          * Link source to the target.
3701          */
3702         if (error = ufs_direnter_lr(tdp, tnm, DE_RENAME, sdp, sip, cr)) {
3703                 /*
3704                  * ESAME isn't really an error; it indicates that the
3705                  * operation should not be done because the source and target
3706                  * are the same file, but that no error should be reported.
3707                  */
3708                 if (error == ESAME)
3709                         error = 0;
3710                 goto errout;
3711         }
3712 
3713         if (error == 0 && tvp != NULL)
3714                 vnevent_rename_dest(tvp, tdvp, tnm, ct);
3715 
3716         /*
3717          * Unlink the source.
3718          * Remove the source entry.  ufs_dirremove() checks that the entry
3719          * still reflects sip, and returns an error if it doesn't.
3720          * If the entry has changed just forget about it.  Release
3721          * the source inode.
3722          */
3723         if ((error = ufs_dirremove(sdp, snm, sip, (struct vnode *)0,
3724             DR_RENAME, cr)) == ENOENT)
3725                 error = 0;
3726 
3727         if (error == 0) {
3728                 vnevent_rename_src(ITOV(sip), sdvp, snm, ct);
3729                 /*
3730                  * Notify the target directory of the rename event
3731                  * if source and target directories are not the same.
3732                  */
3733                 if (sdvp != tdvp)
3734                         vnevent_rename_dest_dir(tdvp, ct);
3735         }
3736 
3737 errout:
3738         if (slot.fbp)
3739                 fbrelse(slot.fbp, S_OTHER);
3740 
3741         rw_exit(&tdp->i_rwlock);
3742         if (sdp != tdp) {
3743                 rw_exit(&sdp->i_rwlock);
3744         }
3745 
3746 unlock:
3747         if (tvp != NULL)
3748                 VN_RELE(tvp);
3749         if (sip != NULL)
3750                 VN_RELE(ITOV(sip));
3751 
3752         if (ulp) {
3753                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME, trans_size);
3754                 ufs_lockfs_end(ulp);
3755         }
3756 
3757         return (error);
3758 }
3759 
3760 /*ARGSUSED*/
3761 static int
3762 ufs_mkdir(struct vnode *dvp, char *dirname, struct vattr *vap,
3763         struct vnode **vpp, struct cred *cr, caller_context_t *ct, int flags,
3764         vsecattr_t *vsecp)
3765 {
3766         struct inode *ip;
3767         struct inode *xip;
3768         struct ufsvfs *ufsvfsp;
3769         struct ulockfs *ulp;
3770         int error;
3771         int issync;
3772         int trans_size;
3773         int indeadlock;
3774         int retry = 1;
3775 
3776         ASSERT((vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
3777 
3778         /*
3779          * Can't make directory in attr hidden dir
3780          */
3781         if ((VTOI(dvp)->i_mode & IFMT) == IFATTRDIR)
3782                 return (EINVAL);
3783 
3784 again:
3785         ip = VTOI(dvp);
3786         ufsvfsp = ip->i_ufsvfs;
3787         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_MKDIR_MASK);
3788         if (error)
3789                 goto out;
3790         if (ulp)
3791                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_MKDIR,
3792                     trans_size = (int)TOP_MKDIR_SIZE(ip));
3793 
3794         /*
3795          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3796          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3797          * possible, retries the operation.
3798          */
3799         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_MKDIR, retry);
3800         if (indeadlock)
3801                 goto again;
3802 
3803         error = ufs_direnter_cm(ip, dirname, DE_MKDIR, vap, &xip, cr,
3804             (retry ? IQUIET : 0));
3805         if (error == EAGAIN) {
3806                 if (ulp) {
3807                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_MKDIR,
3808                             trans_size);
3809                         ufs_lockfs_end(ulp);
3810                 }
3811                 goto again;
3812         }
3813 
3814         rw_exit(&ip->i_rwlock);
3815         if (error == 0) {
3816                 ip = xip;
3817                 *vpp = ITOV(ip);
3818         } else if (error == EEXIST)
3819                 VN_RELE(ITOV(xip));
3820 
3821         if (ulp) {
3822                 int terr = 0;
3823                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_MKDIR, trans_size);
3824                 ufs_lockfs_end(ulp);
3825                 if (error == 0)
3826                         error = terr;
3827         }
3828 out:
3829         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
3830                 ufs_delete_drain_wait(ufsvfsp, 1);
3831                 retry = 0;
3832                 goto again;
3833         }
3834 
3835         return (error);
3836 }
3837 
3838 /*ARGSUSED*/
3839 static int
3840 ufs_rmdir(struct vnode *vp, char *nm, struct vnode *cdir, struct cred *cr,
3841         caller_context_t *ct, int flags)
3842 {
3843         struct inode *ip = VTOI(vp);
3844         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
3845         struct ulockfs *ulp;
3846         vnode_t *rmvp = NULL;   /* Vnode of removed directory */
3847         int error;
3848         int issync;
3849         int trans_size;
3850         int indeadlock;
3851 
3852         /*
3853          * don't let the delete queue get too long
3854          */
3855         if (ufsvfsp == NULL) {
3856                 error = EIO;
3857                 goto out;
3858         }
3859         if (ufsvfsp->vfs_delete.uq_ne > ufs_idle_max)
3860                 ufs_delete_drain(vp->v_vfsp, 1, 1);
3861 
3862         error = ufs_eventlookup(vp, nm, cr, &rmvp);
3863         if (rmvp != NULL) {
3864                 /* Only send the event if there were no errors */
3865                 if (error == 0)
3866                         vnevent_rmdir(rmvp, vp, nm, ct);
3867                 VN_RELE(rmvp);
3868         }
3869 
3870 retry_rmdir:
3871         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_RMDIR_MASK);
3872         if (error)
3873                 goto out;
3874 
3875         if (ulp)
3876                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_RMDIR,
3877                     trans_size = TOP_RMDIR_SIZE);
3878 
3879         /*
3880          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3881          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3882          * possible, retries the operation.
3883          */
3884         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_RMDIR, retry);
3885         if (indeadlock)
3886                 goto retry_rmdir;
3887         error = ufs_dirremove(ip, nm, (struct inode *)0, cdir, DR_RMDIR, cr);
3888 
3889         rw_exit(&ip->i_rwlock);
3890 
3891         if (ulp) {
3892                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RMDIR,
3893                     trans_size);
3894                 ufs_lockfs_end(ulp);
3895         }
3896 
3897 out:
3898         return (error);
3899 }
3900 
3901 /* ARGSUSED */
3902 static int
3903 ufs_readdir(
3904         struct vnode *vp,
3905         struct uio *uiop,
3906         struct cred *cr,
3907         int *eofp,
3908         caller_context_t *ct,
3909         int flags)
3910 {
3911         struct iovec *iovp;
3912         struct inode *ip;
3913         struct direct *idp;
3914         struct dirent64 *odp;
3915         struct fbuf *fbp;
3916         struct ufsvfs *ufsvfsp;
3917         struct ulockfs *ulp;
3918         caddr_t outbuf;
3919         size_t bufsize;
3920         uint_t offset;
3921         uint_t bytes_wanted, total_bytes_wanted;
3922         int incount = 0;
3923         int outcount = 0;
3924         int error;
3925 
3926         ip = VTOI(vp);
3927         ASSERT(RW_READ_HELD(&ip->i_rwlock));
3928 
3929         if (uiop->uio_loffset >= MAXOFF32_T) {
3930                 if (eofp)
3931                         *eofp = 1;
3932                 return (0);
3933         }
3934 
3935         /*
3936          * Check if we have been called with a valid iov_len
3937          * and bail out if not, otherwise we may potentially loop
3938          * forever further down.
3939          */
3940         if (uiop->uio_iov->iov_len <= 0) {
3941                 error = EINVAL;
3942                 goto out;
3943         }
3944 
3945         /*
3946          * Large Files: When we come here we are guaranteed that
3947          * uio_offset can be used safely. The high word is zero.
3948          */
3949 
3950         ufsvfsp = ip->i_ufsvfs;
3951         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READDIR_MASK);
3952         if (error)
3953                 goto out;
3954 
3955         iovp = uiop->uio_iov;
3956         total_bytes_wanted = iovp->iov_len;
3957 
3958         /* Large Files: directory files should not be "large" */
3959 
3960         ASSERT(ip->i_size <= MAXOFF32_T);
3961 
3962         /* Force offset to be valid (to guard against bogus lseek() values) */
3963         offset = (uint_t)uiop->uio_offset & ~(DIRBLKSIZ - 1);
3964 
3965         /* Quit if at end of file or link count of zero (posix) */
3966         if (offset >= (uint_t)ip->i_size || ip->i_nlink <= 0) {
3967                 if (eofp)
3968                         *eofp = 1;
3969                 error = 0;
3970                 goto unlock;
3971         }
3972 
3973         /*
3974          * Get space to change directory entries into fs independent format.
3975          * Do fast alloc for the most commonly used-request size (filesystem
3976          * block size).
3977          */
3978         if (uiop->uio_segflg != UIO_SYSSPACE || uiop->uio_iovcnt != 1) {
3979                 bufsize = total_bytes_wanted;
3980                 outbuf = kmem_alloc(bufsize, KM_SLEEP);
3981                 odp = (struct dirent64 *)outbuf;
3982         } else {
3983                 bufsize = total_bytes_wanted;
3984                 odp = (struct dirent64 *)iovp->iov_base;
3985         }
3986 
3987 nextblk:
3988         bytes_wanted = total_bytes_wanted;
3989 
3990         /* Truncate request to file size */
3991         if (offset + bytes_wanted > (int)ip->i_size)
3992                 bytes_wanted = (int)(ip->i_size - offset);
3993 
3994         /* Comply with MAXBSIZE boundary restrictions of fbread() */
3995         if ((offset & MAXBOFFSET) + bytes_wanted > MAXBSIZE)
3996                 bytes_wanted = MAXBSIZE - (offset & MAXBOFFSET);
3997 
3998         /*
3999          * Read in the next chunk.
4000          * We are still holding the i_rwlock.
4001          */
4002         error = fbread(vp, (offset_t)offset, bytes_wanted, S_OTHER, &fbp);
4003 
4004         if (error)
4005                 goto update_inode;
4006         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) && (ip->i_fs->fs_ronly == 0) &&
4007             (!ufsvfsp->vfs_noatime)) {
4008                 ip->i_flag |= IACC;
4009         }
4010         incount = 0;
4011         idp = (struct direct *)fbp->fb_addr;
4012         if (idp->d_ino == 0 && idp->d_reclen == 0 && idp->d_namlen == 0) {
4013                 cmn_err(CE_WARN, "ufs_readdir: bad dir, inumber = %llu, "
4014                     "fs = %s\n",
4015                     (u_longlong_t)ip->i_number, ufsvfsp->vfs_fs->fs_fsmnt);
4016                 fbrelse(fbp, S_OTHER);
4017                 error = ENXIO;
4018                 goto update_inode;
4019         }
4020         /* Transform to file-system independent format */
4021         while (incount < bytes_wanted) {
4022                 /*
4023                  * If the current directory entry is mangled, then skip
4024                  * to the next block.  It would be nice to set the FSBAD
4025                  * flag in the super-block so that a fsck is forced on
4026                  * next reboot, but locking is a problem.
4027                  */
4028                 if (idp->d_reclen & 0x3) {
4029                         offset = (offset + DIRBLKSIZ) & ~(DIRBLKSIZ-1);
4030                         break;
4031                 }
4032 
4033                 /* Skip to requested offset and skip empty entries */
4034                 if (idp->d_ino != 0 && offset >= (uint_t)uiop->uio_offset) {
4035                         ushort_t this_reclen =
4036                             DIRENT64_RECLEN(idp->d_namlen);
4037                         /* Buffer too small for any entries */
4038                         if (!outcount && this_reclen > bufsize) {
4039                                 fbrelse(fbp, S_OTHER);
4040                                 error = EINVAL;
4041                                 goto update_inode;
4042                         }
4043                         /* If would overrun the buffer, quit */
4044                         if (outcount + this_reclen > bufsize) {
4045                                 break;
4046                         }
4047                         /* Take this entry */
4048                         odp->d_ino = (ino64_t)idp->d_ino;
4049                         odp->d_reclen = (ushort_t)this_reclen;
4050                         odp->d_off = (offset_t)(offset + idp->d_reclen);
4051 
4052                         /* use strncpy(9f) to zero out uninitialized bytes */
4053 
4054                         ASSERT(strlen(idp->d_name) + 1 <=
4055                             DIRENT64_NAMELEN(this_reclen));
4056                         (void) strncpy(odp->d_name, idp->d_name,
4057                             DIRENT64_NAMELEN(this_reclen));
4058                         outcount += odp->d_reclen;
4059                         odp = (struct dirent64 *)
4060                             ((intptr_t)odp + odp->d_reclen);
4061                         ASSERT(outcount <= bufsize);
4062                 }
4063                 if (idp->d_reclen) {
4064                         incount += idp->d_reclen;
4065                         offset += idp->d_reclen;
4066                         idp = (struct direct *)((intptr_t)idp + idp->d_reclen);
4067                 } else {
4068                         offset = (offset + DIRBLKSIZ) & ~(DIRBLKSIZ-1);
4069                         break;
4070                 }
4071         }
4072         /* Release the chunk */
4073         fbrelse(fbp, S_OTHER);
4074 
4075         /* Read whole block, but got no entries, read another if not eof */
4076 
4077         /*
4078          * Large Files: casting i_size to int here is not a problem
4079          * because directory sizes are always less than MAXOFF32_T.
4080          * See assertion above.
4081          */
4082 
4083         if (offset < (int)ip->i_size && !outcount)
4084                 goto nextblk;
4085 
4086         /* Copy out the entry data */
4087         if (uiop->uio_segflg == UIO_SYSSPACE && uiop->uio_iovcnt == 1) {
4088                 iovp->iov_base += outcount;
4089                 iovp->iov_len -= outcount;
4090                 uiop->uio_resid -= outcount;
4091                 uiop->uio_offset = offset;
4092         } else if ((error = uiomove(outbuf, (long)outcount, UIO_READ,
4093             uiop)) == 0)
4094                 uiop->uio_offset = offset;
4095 update_inode:
4096         ITIMES(ip);
4097         if (uiop->uio_segflg != UIO_SYSSPACE || uiop->uio_iovcnt != 1)
4098                 kmem_free(outbuf, bufsize);
4099 
4100         if (eofp && error == 0)
4101                 *eofp = (uiop->uio_offset >= (int)ip->i_size);
4102 unlock:
4103         if (ulp) {
4104                 ufs_lockfs_end(ulp);
4105         }
4106 out:
4107         return (error);
4108 }
4109 
4110 /*ARGSUSED*/
4111 static int
4112 ufs_symlink(
4113         struct vnode *dvp,              /* ptr to parent dir vnode */
4114         char *linkname,                 /* name of symbolic link */
4115         struct vattr *vap,              /* attributes */
4116         char *target,                   /* target path */
4117         struct cred *cr,                /* user credentials */
4118         caller_context_t *ct,
4119         int flags)
4120 {
4121         struct inode *ip, *dip = VTOI(dvp);
4122         struct ufsvfs *ufsvfsp = dip->i_ufsvfs;
4123         struct ulockfs *ulp;
4124         int error;
4125         int issync;
4126         int trans_size;
4127         int residual;
4128         int ioflag;
4129         int retry = 1;
4130 
4131         /*
4132          * No symlinks in attrdirs at this time
4133          */
4134         if ((VTOI(dvp)->i_mode & IFMT) == IFATTRDIR)
4135                 return (EINVAL);
4136 
4137 again:
4138         ip = (struct inode *)NULL;
4139         vap->va_type = VLNK;
4140         vap->va_rdev = 0;
4141 
4142         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SYMLINK_MASK);
4143         if (error)
4144                 goto out;
4145 
4146         if (ulp)
4147                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_SYMLINK,
4148                     trans_size = (int)TOP_SYMLINK_SIZE(dip));
4149 
4150         /*
4151          * We must create the inode before the directory entry, to avoid
4152          * racing with readlink().  ufs_dirmakeinode requires that we
4153          * hold the quota lock as reader, and directory locks as writer.
4154          */
4155 
4156         rw_enter(&dip->i_rwlock, RW_WRITER);
4157         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4158         rw_enter(&dip->i_contents, RW_WRITER);
4159 
4160         /*
4161          * Suppress any out of inodes messages if we will retry on
4162          * ENOSP
4163          */
4164         if (retry)
4165                 dip->i_flag |= IQUIET;
4166 
4167         error = ufs_dirmakeinode(dip, &ip, vap, DE_SYMLINK, cr);
4168 
4169         dip->i_flag &= ~IQUIET;
4170 
4171         rw_exit(&dip->i_contents);
4172         rw_exit(&ufsvfsp->vfs_dqrwlock);
4173         rw_exit(&dip->i_rwlock);
4174 
4175         if (error)
4176                 goto unlock;
4177 
4178         /*
4179          * OK.  The inode has been created.  Write out the data of the
4180          * symbolic link.  Since symbolic links are metadata, and should
4181          * remain consistent across a system crash, we need to force the
4182          * data out synchronously.
4183          *
4184          * (This is a change from the semantics in earlier releases, which
4185          * only created symbolic links synchronously if the semi-documented
4186          * 'syncdir' option was set, or if we were being invoked by the NFS
4187          * server, which requires symbolic links to be created synchronously.)
4188          *
4189          * We need to pass in a pointer for the residual length; otherwise
4190          * ufs_rdwri() will always return EIO if it can't write the data,
4191          * even if the error was really ENOSPC or EDQUOT.
4192          */
4193 
4194         ioflag = FWRITE | FDSYNC;
4195         residual = 0;
4196 
4197         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4198         rw_enter(&ip->i_contents, RW_WRITER);
4199 
4200         /*
4201          * Suppress file system full messages if we will retry
4202          */
4203         if (retry)
4204                 ip->i_flag |= IQUIET;
4205 
4206         error = ufs_rdwri(UIO_WRITE, ioflag, ip, target, strlen(target),
4207             (offset_t)0, UIO_SYSSPACE, &residual, cr);
4208 
4209         ip->i_flag &= ~IQUIET;
4210 
4211         if (error) {
4212                 rw_exit(&ip->i_contents);
4213                 rw_exit(&ufsvfsp->vfs_dqrwlock);
4214                 goto remove;
4215         }
4216 
4217         /*
4218          * If the link's data is small enough, we can cache it in the inode.
4219          * This is a "fast symbolic link".  We don't use the first direct
4220          * block because that's actually used to point at the symbolic link's
4221          * contents on disk; but we know that none of the other direct or
4222          * indirect blocks can be used because symbolic links are restricted
4223          * to be smaller than a file system block.
4224          */
4225 
4226         ASSERT(MAXPATHLEN <= VBSIZE(ITOV(ip)));
4227 
4228         if (ip->i_size > 0 && ip->i_size <= FSL_SIZE) {
4229                 if (kcopy(target, &ip->i_db[1], ip->i_size) == 0) {
4230                         ip->i_flag |= IFASTSYMLNK;
4231                 } else {
4232                         int i;
4233                         /* error, clear garbage left behind */
4234                         for (i = 1; i < NDADDR; i++)
4235                                 ip->i_db[i] = 0;
4236                         for (i = 0; i < NIADDR; i++)
4237                                 ip->i_ib[i] = 0;
4238                 }
4239         }
4240 
4241         rw_exit(&ip->i_contents);
4242         rw_exit(&ufsvfsp->vfs_dqrwlock);
4243 
4244         /*
4245          * OK.  We've successfully created the symbolic link.  All that
4246          * remains is to insert it into the appropriate directory.
4247          */
4248 
4249         rw_enter(&dip->i_rwlock, RW_WRITER);
4250         error = ufs_direnter_lr(dip, linkname, DE_SYMLINK, NULL, ip, cr);
4251         rw_exit(&dip->i_rwlock);
4252 
4253         /*
4254          * Fall through into remove-on-error code.  We're either done, or we
4255          * need to remove the inode (if we couldn't insert it).
4256          */
4257 
4258 remove:
4259         if (error && (ip != NULL)) {
4260                 rw_enter(&ip->i_contents, RW_WRITER);
4261                 ip->i_nlink--;
4262                 ip->i_flag |= ICHG;
4263                 ip->i_seq++;
4264                 ufs_setreclaim(ip);
4265                 rw_exit(&ip->i_contents);
4266         }
4267 
4268 unlock:
4269         if (ip != NULL)
4270                 VN_RELE(ITOV(ip));
4271 
4272         if (ulp) {
4273                 int terr = 0;
4274 
4275                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_SYMLINK,
4276                     trans_size);
4277                 ufs_lockfs_end(ulp);
4278                 if (error == 0)
4279                         error = terr;
4280         }
4281 
4282         /*
4283          * We may have failed due to lack of an inode or of a block to
4284          * store the target in.  Try flushing the delete queue to free
4285          * logically-available things up and try again.
4286          */
4287         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
4288                 ufs_delete_drain_wait(ufsvfsp, 1);
4289                 retry = 0;
4290                 goto again;
4291         }
4292 
4293 out:
4294         return (error);
4295 }
4296 
4297 /*
4298  * Ufs specific routine used to do ufs io.
4299  */
4300 int
4301 ufs_rdwri(enum uio_rw rw, int ioflag, struct inode *ip, caddr_t base,
4302         ssize_t len, offset_t offset, enum uio_seg seg, int *aresid,
4303         struct cred *cr)
4304 {
4305         struct uio auio;
4306         struct iovec aiov;
4307         int error;
4308 
4309         ASSERT(RW_LOCK_HELD(&ip->i_contents));
4310 
4311         bzero((caddr_t)&auio, sizeof (uio_t));
4312         bzero((caddr_t)&aiov, sizeof (iovec_t));
4313 
4314         aiov.iov_base = base;
4315         aiov.iov_len = len;
4316         auio.uio_iov = &aiov;
4317         auio.uio_iovcnt = 1;
4318         auio.uio_loffset = offset;
4319         auio.uio_segflg = (short)seg;
4320         auio.uio_resid = len;
4321 
4322         if (rw == UIO_WRITE) {
4323                 auio.uio_fmode = FWRITE;
4324                 auio.uio_extflg = UIO_COPY_DEFAULT;
4325                 auio.uio_llimit = curproc->p_fsz_ctl;
4326                 error = wrip(ip, &auio, ioflag, cr);
4327         } else {
4328                 auio.uio_fmode = FREAD;
4329                 auio.uio_extflg = UIO_COPY_CACHED;
4330                 auio.uio_llimit = MAXOFFSET_T;
4331                 error = rdip(ip, &auio, ioflag, cr);
4332         }
4333 
4334         if (aresid) {
4335                 *aresid = auio.uio_resid;
4336         } else if (auio.uio_resid) {
4337                 error = EIO;
4338         }
4339         return (error);
4340 }
4341 
4342 /*ARGSUSED*/
4343 static int
4344 ufs_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
4345 {
4346         struct ufid *ufid;
4347         struct inode *ip = VTOI(vp);
4348 
4349         if (ip->i_ufsvfs == NULL)
4350                 return (EIO);
4351 
4352         if (fidp->fid_len < (sizeof (struct ufid) - sizeof (ushort_t))) {
4353                 fidp->fid_len = sizeof (struct ufid) - sizeof (ushort_t);
4354                 return (ENOSPC);
4355         }
4356 
4357         ufid = (struct ufid *)fidp;
4358         bzero((char *)ufid, sizeof (struct ufid));
4359         ufid->ufid_len = sizeof (struct ufid) - sizeof (ushort_t);
4360         ufid->ufid_ino = ip->i_number;
4361         ufid->ufid_gen = ip->i_gen;
4362 
4363         return (0);
4364 }
4365 
4366 /* ARGSUSED2 */
4367 static int
4368 ufs_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
4369 {
4370         struct inode    *ip = VTOI(vp);
4371         struct ufsvfs   *ufsvfsp;
4372         int             forcedirectio;
4373 
4374         /*
4375          * Read case is easy.
4376          */
4377         if (!write_lock) {
4378                 rw_enter(&ip->i_rwlock, RW_READER);
4379                 return (V_WRITELOCK_FALSE);
4380         }
4381 
4382         /*
4383          * Caller has requested a writer lock, but that inhibits any
4384          * concurrency in the VOPs that follow. Acquire the lock shared
4385          * and defer exclusive access until it is known to be needed in
4386          * other VOP handlers. Some cases can be determined here.
4387          */
4388 
4389         /*
4390          * If directio is not set, there is no chance of concurrency,
4391          * so just acquire the lock exclusive. Beware of a forced
4392          * unmount before looking at the mount option.
4393          */
4394         ufsvfsp = ip->i_ufsvfs;
4395         forcedirectio = ufsvfsp ? ufsvfsp->vfs_forcedirectio : 0;
4396         if (!(ip->i_flag & IDIRECTIO || forcedirectio) ||
4397             !ufs_allow_shared_writes) {
4398                 rw_enter(&ip->i_rwlock, RW_WRITER);
4399                 return (V_WRITELOCK_TRUE);
4400         }
4401 
4402         /*
4403          * Mandatory locking forces acquiring i_rwlock exclusive.
4404          */
4405         if (MANDLOCK(vp, ip->i_mode)) {
4406                 rw_enter(&ip->i_rwlock, RW_WRITER);
4407                 return (V_WRITELOCK_TRUE);
4408         }
4409 
4410         /*
4411          * Acquire the lock shared in case a concurrent write follows.
4412          * Mandatory locking could have become enabled before the lock
4413          * was acquired. Re-check and upgrade if needed.
4414          */
4415         rw_enter(&ip->i_rwlock, RW_READER);
4416         if (MANDLOCK(vp, ip->i_mode)) {
4417                 rw_exit(&ip->i_rwlock);
4418                 rw_enter(&ip->i_rwlock, RW_WRITER);
4419                 return (V_WRITELOCK_TRUE);
4420         }
4421         return (V_WRITELOCK_FALSE);
4422 }
4423 
4424 /*ARGSUSED*/
4425 static void
4426 ufs_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
4427 {
4428         struct inode    *ip = VTOI(vp);
4429 
4430         rw_exit(&ip->i_rwlock);
4431 }
4432 
4433 /* ARGSUSED */
4434 static int
4435 ufs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp,
4436         caller_context_t *ct)
4437 {
4438         return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4439 }
4440 
4441 /* ARGSUSED */
4442 static int
4443 ufs_frlock(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
4444         offset_t offset, struct flk_callback *flk_cbp, struct cred *cr,
4445         caller_context_t *ct)
4446 {
4447         struct inode *ip = VTOI(vp);
4448 
4449         if (ip->i_ufsvfs == NULL)
4450                 return (EIO);
4451 
4452         /*
4453          * If file is being mapped, disallow frlock.
4454          * XXX I am not holding tlock while checking i_mapcnt because the
4455          * current locking strategy drops all locks before calling fs_frlock.
4456          * So, mapcnt could change before we enter fs_frlock making is
4457          * meaningless to have held tlock in the first place.
4458          */
4459         if (ip->i_mapcnt > 0 && MANDLOCK(vp, ip->i_mode))
4460                 return (EAGAIN);
4461         return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4462 }
4463 
4464 /* ARGSUSED */
4465 static int
4466 ufs_space(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
4467         offset_t offset, cred_t *cr, caller_context_t *ct)
4468 {
4469         struct ufsvfs *ufsvfsp = VTOI(vp)->i_ufsvfs;
4470         struct ulockfs *ulp;
4471         int error;
4472 
4473         if ((error = convoff(vp, bfp, 0, offset)) == 0) {
4474                 if (cmd == F_FREESP) {
4475                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
4476                             ULOCKFS_SPACE_MASK);
4477                         if (error)
4478                                 return (error);
4479                         error = ufs_freesp(vp, bfp, flag, cr);
4480 
4481                         if (error == 0 && bfp->l_start == 0)
4482                                 vnevent_truncate(vp, ct);
4483                 } else if (cmd == F_ALLOCSP) {
4484                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
4485                             ULOCKFS_FALLOCATE_MASK);
4486                         if (error)
4487                                 return (error);
4488                         error = ufs_allocsp(vp, bfp, cr);
4489                 } else
4490                         return (EINVAL); /* Command not handled here */
4491 
4492                 if (ulp)
4493                         ufs_lockfs_end(ulp);
4494 
4495         }
4496         return (error);
4497 }
4498 
4499 /*
4500  * Used to determine if read ahead should be done. Also used to
4501  * to determine when write back occurs.
4502  */
4503 #define CLUSTSZ(ip)             ((ip)->i_ufsvfs->vfs_ioclustsz)
4504 
4505 /*
4506  * A faster version of ufs_getpage.
4507  *
4508  * We optimize by inlining the pvn_getpages iterator, eliminating
4509  * calls to bmap_read if file doesn't have UFS holes, and avoiding
4510  * the overhead of page_exists().
4511  *
4512  * When files has UFS_HOLES and ufs_getpage is called with S_READ,
4513  * we set *protp to PROT_READ to avoid calling bmap_read. This approach
4514  * victimizes performance when a file with UFS holes is faulted
4515  * first in the S_READ mode, and then in the S_WRITE mode. We will get
4516  * two MMU faults in this case.
4517  *
4518  * XXX - the inode fields which control the sequential mode are not
4519  *       protected by any mutex. The read ahead will act wild if
4520  *       multiple processes will access the file concurrently and
4521  *       some of them in sequential mode. One particulary bad case
4522  *       is if another thread will change the value of i_nextrio between
4523  *       the time this thread tests the i_nextrio value and then reads it
4524  *       again to use it as the offset for the read ahead.
4525  */
4526 /*ARGSUSED*/
4527 static int
4528 ufs_getpage(struct vnode *vp, offset_t off, size_t len, uint_t *protp,
4529         page_t *plarr[], size_t plsz, struct seg *seg, caddr_t addr,
4530         enum seg_rw rw, struct cred *cr, caller_context_t *ct)
4531 {
4532         u_offset_t      uoff = (u_offset_t)off; /* type conversion */
4533         u_offset_t      pgoff;
4534         u_offset_t      eoff;
4535         struct inode    *ip = VTOI(vp);
4536         struct ufsvfs   *ufsvfsp = ip->i_ufsvfs;
4537         struct fs       *fs;
4538         struct ulockfs  *ulp;
4539         page_t          **pl;
4540         caddr_t         pgaddr;
4541         krw_t           rwtype;
4542         int             err;
4543         int             has_holes;
4544         int             beyond_eof;
4545         int             seqmode;
4546         int             pgsize = PAGESIZE;
4547         int             dolock;
4548         int             do_qlock;
4549         int             trans_size;
4550 
4551         ASSERT((uoff & PAGEOFFSET) == 0);
4552 
4553         if (protp)
4554                 *protp = PROT_ALL;
4555 
4556         /*
4557          * Obey the lockfs protocol
4558          */
4559         err = ufs_lockfs_begin_getpage(ufsvfsp, &ulp, seg,
4560             rw == S_READ || rw == S_EXEC, protp);
4561         if (err)
4562                 goto out;
4563 
4564         fs = ufsvfsp->vfs_fs;
4565 
4566         if (ulp && (rw == S_CREATE || rw == S_WRITE) &&
4567             !(vp->v_flag & VISSWAP)) {
4568                 /*
4569                  * Try to start a transaction, will return if blocking is
4570                  * expected to occur and the address space is not the
4571                  * kernel address space.
4572                  */
4573                 trans_size = TOP_GETPAGE_SIZE(ip);
4574                 if (seg->s_as != &kas) {
4575                         TRANS_TRY_BEGIN_ASYNC(ufsvfsp, TOP_GETPAGE,
4576                             trans_size, err)
4577                         if (err == EWOULDBLOCK) {
4578                                 /*
4579                                  * Use EDEADLK here because the VM code
4580                                  * can normally never see this error.
4581                                  */
4582                                 err = EDEADLK;
4583                                 ufs_lockfs_end(ulp);
4584                                 goto out;
4585                         }
4586                 } else {
4587                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_GETPAGE, trans_size);
4588                 }
4589         }
4590 
4591         if (vp->v_flag & VNOMAP) {
4592                 err = ENOSYS;
4593                 goto unlock;
4594         }
4595 
4596         seqmode = ip->i_nextr == uoff && rw != S_CREATE;
4597 
4598         rwtype = RW_READER;             /* start as a reader */
4599         dolock = (rw_owner(&ip->i_contents) != curthread);
4600         /*
4601          * If this thread owns the lock, i.e., this thread grabbed it
4602          * as writer somewhere above, then we don't need to grab the
4603          * lock as reader in this routine.
4604          */
4605         do_qlock = (rw_owner(&ufsvfsp->vfs_dqrwlock) != curthread);
4606 
4607 retrylock:
4608         if (dolock) {
4609                 /*
4610                  * Grab the quota lock if we need to call
4611                  * bmap_write() below (with i_contents as writer).
4612                  */
4613                 if (do_qlock && rwtype == RW_WRITER)
4614                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4615                 rw_enter(&ip->i_contents, rwtype);
4616         }
4617 
4618         /*
4619          * We may be getting called as a side effect of a bmap using
4620          * fbread() when the blocks might be being allocated and the
4621          * size has not yet been up'ed.  In this case we want to be
4622          * able to return zero pages if we get back UFS_HOLE from
4623          * calling bmap for a non write case here.  We also might have
4624          * to read some frags from the disk into a page if we are
4625          * extending the number of frags for a given lbn in bmap().
4626          * Large Files: The read of i_size here is atomic because
4627          * i_contents is held here. If dolock is zero, the lock
4628          * is held in bmap routines.
4629          */
4630         beyond_eof = uoff + len >
4631             P2ROUNDUP_TYPED(ip->i_size, PAGESIZE, u_offset_t);
4632         if (beyond_eof && seg != segkmap) {
4633                 if (dolock) {
4634                         rw_exit(&ip->i_contents);
4635                         if (do_qlock && rwtype == RW_WRITER)
4636                                 rw_exit(&ufsvfsp->vfs_dqrwlock);
4637                 }
4638                 err = EFAULT;
4639                 goto unlock;
4640         }
4641 
4642         /*
4643          * Must hold i_contents lock throughout the call to pvn_getpages
4644          * since locked pages are returned from each call to ufs_getapage.
4645          * Must *not* return locked pages and then try for contents lock
4646          * due to lock ordering requirements (inode > page)
4647          */
4648 
4649         has_holes = bmap_has_holes(ip);
4650 
4651         if ((rw == S_WRITE || rw == S_CREATE) && has_holes && !beyond_eof) {
4652                 int     blk_size;
4653                 u_offset_t offset;
4654 
4655                 /*
4656                  * We must acquire the RW_WRITER lock in order to
4657                  * call bmap_write().
4658                  */
4659                 if (dolock && rwtype == RW_READER) {
4660                         rwtype = RW_WRITER;
4661 
4662                         /*
4663                          * Grab the quota lock before
4664                          * upgrading i_contents, but if we can't grab it
4665                          * don't wait here due to lock order:
4666                          * vfs_dqrwlock > i_contents.
4667                          */
4668                         if (do_qlock &&
4669                             rw_tryenter(&ufsvfsp->vfs_dqrwlock, RW_READER)
4670                             == 0) {
4671                                 rw_exit(&ip->i_contents);
4672                                 goto retrylock;
4673                         }
4674                         if (!rw_tryupgrade(&ip->i_contents)) {
4675                                 rw_exit(&ip->i_contents);
4676                                 if (do_qlock)
4677                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4678                                 goto retrylock;
4679                         }
4680                 }
4681 
4682                 /*
4683                  * May be allocating disk blocks for holes here as
4684                  * a result of mmap faults. write(2) does the bmap_write
4685                  * in rdip/wrip, not here. We are not dealing with frags
4686                  * in this case.
4687                  */
4688                 /*
4689                  * Large Files: We cast fs_bmask field to offset_t
4690                  * just as we do for MAXBMASK because uoff is a 64-bit
4691                  * data type. fs_bmask will still be a 32-bit type
4692                  * as we cannot change any ondisk data structures.
4693                  */
4694 
4695                 offset = uoff & (offset_t)fs->fs_bmask;
4696                 while (offset < uoff + len) {
4697                         blk_size = (int)blksize(fs, ip, lblkno(fs, offset));
4698                         err = bmap_write(ip, offset, blk_size,
4699                             BI_NORMAL, NULL, cr);
4700                         if (ip->i_flag & (ICHG|IUPD))
4701                                 ip->i_seq++;
4702                         if (err)
4703                                 goto update_inode;
4704                         offset += blk_size; /* XXX - make this contig */
4705                 }
4706         }
4707 
4708         /*
4709          * Can be a reader from now on.
4710          */
4711         if (dolock && rwtype == RW_WRITER) {
4712                 rw_downgrade(&ip->i_contents);
4713                 /*
4714                  * We can release vfs_dqrwlock early so do it, but make
4715                  * sure we don't try to release it again at the bottom.
4716                  */
4717                 if (do_qlock) {
4718                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4719                         do_qlock = 0;
4720                 }
4721         }
4722 
4723         /*
4724          * We remove PROT_WRITE in cases when the file has UFS holes
4725          * because we don't  want to call bmap_read() to check each
4726          * page if it is backed with a disk block.
4727          */
4728         if (protp && has_holes && rw != S_WRITE && rw != S_CREATE)
4729                 *protp &= ~PROT_WRITE;
4730 
4731         err = 0;
4732 
4733         /*
4734          * The loop looks up pages in the range [off, off + len).
4735          * For each page, we first check if we should initiate an asynchronous
4736          * read ahead before we call page_lookup (we may sleep in page_lookup
4737          * for a previously initiated disk read).
4738          */
4739         eoff = (uoff + len);
4740         for (pgoff = uoff, pgaddr = addr, pl = plarr;
4741             pgoff < eoff; /* empty */) {
4742                 page_t  *pp;
4743                 u_offset_t      nextrio;
4744                 se_t    se;
4745                 int retval;
4746 
4747                 se = ((rw == S_CREATE || rw == S_OTHER) ? SE_EXCL : SE_SHARED);
4748 
4749                 /* Handle async getpage (faultahead) */
4750                 if (plarr == NULL) {
4751                         ip->i_nextrio = pgoff;
4752                         (void) ufs_getpage_ra(vp, pgoff, seg, pgaddr);
4753                         pgoff += pgsize;
4754                         pgaddr += pgsize;
4755                         continue;
4756                 }
4757                 /*
4758                  * Check if we should initiate read ahead of next cluster.
4759                  * We call page_exists only when we need to confirm that
4760                  * we have the current page before we initiate the read ahead.
4761                  */
4762                 nextrio = ip->i_nextrio;
4763                 if (seqmode &&
4764                     pgoff + CLUSTSZ(ip) >= nextrio && pgoff <= nextrio &&
4765                     nextrio < ip->i_size && page_exists(vp, pgoff)) {
4766                         retval = ufs_getpage_ra(vp, pgoff, seg, pgaddr);
4767                         /*
4768                          * We always read ahead the next cluster of data
4769                          * starting from i_nextrio. If the page (vp,nextrio)
4770                          * is actually in core at this point, the routine
4771                          * ufs_getpage_ra() will stop pre-fetching data
4772                          * until we read that page in a synchronized manner
4773                          * through ufs_getpage_miss(). So, we should increase
4774                          * i_nextrio if the page (vp, nextrio) exists.
4775                          */
4776                         if ((retval == 0) && page_exists(vp, nextrio)) {
4777                                 ip->i_nextrio = nextrio + pgsize;
4778                         }
4779                 }
4780 
4781                 if ((pp = page_lookup(vp, pgoff, se)) != NULL) {
4782                         /*
4783                          * We found the page in the page cache.
4784                          */
4785                         *pl++ = pp;
4786                         pgoff += pgsize;
4787                         pgaddr += pgsize;
4788                         len -= pgsize;
4789                         plsz -= pgsize;
4790                 } else  {
4791                         /*
4792                          * We have to create the page, or read it from disk.
4793                          */
4794                         if (err = ufs_getpage_miss(vp, pgoff, len, seg, pgaddr,
4795                             pl, plsz, rw, seqmode))
4796                                 goto error;
4797 
4798                         while (*pl != NULL) {
4799                                 pl++;
4800                                 pgoff += pgsize;
4801                                 pgaddr += pgsize;
4802                                 len -= pgsize;
4803                                 plsz -= pgsize;
4804                         }
4805                 }
4806         }
4807 
4808         /*
4809          * Return pages up to plsz if they are in the page cache.
4810          * We cannot return pages if there is a chance that they are
4811          * backed with a UFS hole and rw is S_WRITE or S_CREATE.
4812          */
4813         if (plarr && !(has_holes && (rw == S_WRITE || rw == S_CREATE))) {
4814 
4815                 ASSERT((protp == NULL) ||
4816                     !(has_holes && (*protp & PROT_WRITE)));
4817 
4818                 eoff = pgoff + plsz;
4819                 while (pgoff < eoff) {
4820                         page_t          *pp;
4821 
4822                         if ((pp = page_lookup_nowait(vp, pgoff,
4823                             SE_SHARED)) == NULL)
4824                                 break;
4825 
4826                         *pl++ = pp;
4827                         pgoff += pgsize;
4828                         plsz -= pgsize;
4829                 }
4830         }
4831 
4832         if (plarr)
4833                 *pl = NULL;                     /* Terminate page list */
4834         ip->i_nextr = pgoff;
4835 
4836 error:
4837         if (err && plarr) {
4838                 /*
4839                  * Release any pages we have locked.
4840                  */
4841                 while (pl > &plarr[0])
4842                         page_unlock(*--pl);
4843 
4844                 plarr[0] = NULL;
4845         }
4846 
4847 update_inode:
4848         /*
4849          * If the inode is not already marked for IACC (in rdip() for read)
4850          * and the inode is not marked for no access time update (in wrip()
4851          * for write) then update the inode access time and mod time now.
4852          */
4853         if ((ip->i_flag & (IACC | INOACC)) == 0) {
4854                 if ((rw != S_OTHER) && (ip->i_mode & IFMT) != IFDIR) {
4855                         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) &&
4856                             (fs->fs_ronly == 0) &&
4857                             (!ufsvfsp->vfs_noatime)) {
4858                                 mutex_enter(&ip->i_tlock);
4859                                 ip->i_flag |= IACC;
4860                                 ITIMES_NOLOCK(ip);
4861                                 mutex_exit(&ip->i_tlock);
4862                         }
4863                 }
4864         }
4865 
4866         if (dolock) {
4867                 rw_exit(&ip->i_contents);
4868                 if (do_qlock && rwtype == RW_WRITER)
4869                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4870         }
4871 
4872 unlock:
4873         if (ulp) {
4874                 if ((rw == S_CREATE || rw == S_WRITE) &&
4875                     !(vp->v_flag & VISSWAP)) {
4876                         TRANS_END_ASYNC(ufsvfsp, TOP_GETPAGE, trans_size);
4877                 }
4878                 ufs_lockfs_end(ulp);
4879         }
4880 out:
4881         return (err);
4882 }
4883 
4884 /*
4885  * ufs_getpage_miss is called when ufs_getpage missed the page in the page
4886  * cache. The page is either read from the disk, or it's created.
4887  * A page is created (without disk read) if rw == S_CREATE, or if
4888  * the page is not backed with a real disk block (UFS hole).
4889  */
4890 /* ARGSUSED */
4891 static int
4892 ufs_getpage_miss(struct vnode *vp, u_offset_t off, size_t len, struct seg *seg,
4893         caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw, int seq)
4894 {
4895         struct inode    *ip = VTOI(vp);
4896         page_t          *pp;
4897         daddr_t         bn;
4898         size_t          io_len;
4899         int             crpage = 0;
4900         int             err;
4901         int             contig;
4902         int             bsize = ip->i_fs->fs_bsize;
4903 
4904         /*
4905          * Figure out whether the page can be created, or must be
4906          * must be read from the disk.
4907          */
4908         if (rw == S_CREATE)
4909                 crpage = 1;
4910         else {
4911                 contig = 0;
4912                 if (err = bmap_read(ip, off, &bn, &contig))
4913                         return (err);
4914 
4915                 crpage = (bn == UFS_HOLE);
4916 
4917                 /*
4918                  * If its also a fallocated block that hasn't been written to
4919                  * yet, we will treat it just like a UFS_HOLE and create
4920                  * a zero page for it
4921                  */
4922                 if (ISFALLOCBLK(ip, bn))
4923                         crpage = 1;
4924         }
4925 
4926         if (crpage) {
4927                 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT, seg,
4928                     addr)) == NULL) {
4929                         return (ufs_fault(vp,
4930                             "ufs_getpage_miss: page_create == NULL"));
4931                 }
4932 
4933                 if (rw != S_CREATE)
4934                         pagezero(pp, 0, PAGESIZE);
4935 
4936                 io_len = PAGESIZE;
4937         } else {
4938                 u_offset_t      io_off;
4939                 uint_t  xlen;
4940                 struct buf      *bp;
4941                 ufsvfs_t        *ufsvfsp = ip->i_ufsvfs;
4942 
4943                 /*
4944                  * If access is not in sequential order, we read from disk
4945                  * in bsize units.
4946                  *
4947                  * We limit the size of the transfer to bsize if we are reading
4948                  * from the beginning of the file. Note in this situation we
4949                  * will hedge our bets and initiate an async read ahead of
4950                  * the second block.
4951                  */
4952                 if (!seq || off == 0)
4953                         contig = MIN(contig, bsize);
4954 
4955                 pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4956                     &io_len, off, contig, 0);
4957 
4958                 /*
4959                  * Some other thread has entered the page.
4960                  * ufs_getpage will retry page_lookup.
4961                  */
4962                 if (pp == NULL) {
4963                         pl[0] = NULL;
4964                         return (0);
4965                 }
4966 
4967                 /*
4968                  * Zero part of the page which we are not
4969                  * going to read from the disk.
4970                  */
4971                 xlen = io_len & PAGEOFFSET;
4972                 if (xlen != 0)
4973                         pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
4974 
4975                 bp = pageio_setup(pp, io_len, ip->i_devvp, B_READ);
4976                 bp->b_edev = ip->i_dev;
4977                 bp->b_dev = cmpdev(ip->i_dev);
4978                 bp->b_blkno = bn;
4979                 bp->b_un.b_addr = (caddr_t)0;
4980                 bp->b_file = ip->i_vnode;
4981                 bp->b_offset = off;
4982 
4983                 if (ufsvfsp->vfs_log) {
4984                         lufs_read_strategy(ufsvfsp->vfs_log, bp);
4985                 } else if (ufsvfsp->vfs_snapshot) {
4986                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
4987                 } else {
4988                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
4989                         ub.ub_getpages.value.ul++;
4990                         (void) bdev_strategy(bp);
4991                         lwp_stat_update(LWP_STAT_INBLK, 1);
4992                 }
4993 
4994                 ip->i_nextrio = off + ((io_len + PAGESIZE - 1) & PAGEMASK);
4995 
4996                 /*
4997                  * If the file access is sequential, initiate read ahead
4998                  * of the next cluster.
4999                  */
5000                 if (seq && ip->i_nextrio < ip->i_size)
5001                         (void) ufs_getpage_ra(vp, off, seg, addr);
5002                 err = biowait(bp);
5003                 pageio_done(bp);
5004 
5005                 if (err) {
5006                         pvn_read_done(pp, B_ERROR);
5007                         return (err);
5008                 }
5009         }
5010 
5011         pvn_plist_init(pp, pl, plsz, off, io_len, rw);
5012         return (0);
5013 }
5014 
5015 /*
5016  * Read ahead a cluster from the disk. Returns the length in bytes.
5017  */
5018 static int
5019 ufs_getpage_ra(struct vnode *vp, u_offset_t off, struct seg *seg, caddr_t addr)
5020 {
5021         struct inode    *ip = VTOI(vp);
5022         page_t          *pp;
5023         u_offset_t      io_off = ip->i_nextrio;
5024         ufsvfs_t        *ufsvfsp;
5025         caddr_t         addr2 = addr + (io_off - off);
5026         struct buf      *bp;
5027         daddr_t         bn;
5028         size_t          io_len;
5029         int             err;
5030         int             contig;
5031         int             xlen;
5032         int             bsize = ip->i_fs->fs_bsize;
5033 
5034         /*
5035          * If the directio advisory is in effect on this file,
5036          * then do not do buffered read ahead. Read ahead makes
5037          * it more difficult on threads using directio as they
5038          * will be forced to flush the pages from this vnode.
5039          */
5040         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
5041                 return (0);
5042         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio)
5043                 return (0);
5044 
5045         /*
5046          * Is this test needed?
5047          */
5048         if (addr2 >= seg->s_base + seg->s_size)
5049                 return (0);
5050 
5051         contig = 0;
5052         err = bmap_read(ip, io_off, &bn, &contig);
5053         /*
5054          * If its a UFS_HOLE or a fallocated block, do not perform
5055          * any read ahead's since there probably is nothing to read ahead
5056          */
5057         if (err || bn == UFS_HOLE || ISFALLOCBLK(ip, bn))
5058                 return (0);
5059 
5060         /*
5061          * Limit the transfer size to bsize if this is the 2nd block.
5062          */
5063         if (io_off == (u_offset_t)bsize)
5064                 contig = MIN(contig, bsize);
5065 
5066         if ((pp = pvn_read_kluster(vp, io_off, seg, addr2, &io_off,
5067             &io_len, io_off, contig, 1)) == NULL)
5068                 return (0);
5069 
5070         /*
5071          * Zero part of page which we are not going to read from disk
5072          */
5073         if ((xlen = (io_len & PAGEOFFSET)) > 0)
5074                 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
5075 
5076         ip->i_nextrio = (io_off + io_len + PAGESIZE - 1) & PAGEMASK;
5077 
5078         bp = pageio_setup(pp, io_len, ip->i_devvp, B_READ | B_ASYNC);
5079         bp->b_edev = ip->i_dev;
5080         bp->b_dev = cmpdev(ip->i_dev);
5081         bp->b_blkno = bn;
5082         bp->b_un.b_addr = (caddr_t)0;
5083         bp->b_file = ip->i_vnode;
5084         bp->b_offset = off;
5085 
5086         if (ufsvfsp->vfs_log) {
5087                 lufs_read_strategy(ufsvfsp->vfs_log, bp);
5088         } else if (ufsvfsp->vfs_snapshot) {
5089                 fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5090         } else {
5091                 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5092                 ub.ub_getras.value.ul++;
5093                 (void) bdev_strategy(bp);
5094                 lwp_stat_update(LWP_STAT_INBLK, 1);
5095         }
5096 
5097         return (io_len);
5098 }
5099 
5100 int     ufs_delay = 1;
5101 /*
5102  * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE, B_ASYNC}
5103  *
5104  * LMXXX - the inode really ought to contain a pointer to one of these
5105  * async args.  Stuff gunk in there and just hand the whole mess off.
5106  * This would replace i_delaylen, i_delayoff.
5107  */
5108 /*ARGSUSED*/
5109 static int
5110 ufs_putpage(struct vnode *vp, offset_t off, size_t len, int flags,
5111         struct cred *cr, caller_context_t *ct)
5112 {
5113         struct inode *ip = VTOI(vp);
5114         int err = 0;
5115 
5116         if (vp->v_count == 0) {
5117                 return (ufs_fault(vp, "ufs_putpage: bad v_count == 0"));
5118         }
5119 
5120         /*
5121          * XXX - Why should this check be made here?
5122          */
5123         if (vp->v_flag & VNOMAP) {
5124                 err = ENOSYS;
5125                 goto errout;
5126         }
5127 
5128         if (ip->i_ufsvfs == NULL) {
5129                 err = EIO;
5130                 goto errout;
5131         }
5132 
5133         if (flags & B_ASYNC) {
5134                 if (ufs_delay && len &&
5135                     (flags & ~(B_ASYNC|B_DONTNEED|B_FREE)) == 0) {
5136                         mutex_enter(&ip->i_tlock);
5137                         /*
5138                          * If nobody stalled, start a new cluster.
5139                          */
5140                         if (ip->i_delaylen == 0) {
5141                                 ip->i_delayoff = off;
5142                                 ip->i_delaylen = len;
5143                                 mutex_exit(&ip->i_tlock);
5144                                 goto errout;
5145                         }
5146                         /*
5147                          * If we have a full cluster or they are not contig,
5148                          * then push last cluster and start over.
5149                          */
5150                         if (ip->i_delaylen >= CLUSTSZ(ip) ||
5151                             ip->i_delayoff + ip->i_delaylen != off) {
5152                                 u_offset_t doff;
5153                                 size_t dlen;
5154 
5155                                 doff = ip->i_delayoff;
5156                                 dlen = ip->i_delaylen;
5157                                 ip->i_delayoff = off;
5158                                 ip->i_delaylen = len;
5159                                 mutex_exit(&ip->i_tlock);
5160                                 err = ufs_putpages(vp, doff, dlen,
5161                                     flags, cr);
5162                                 /* LMXXX - flags are new val, not old */
5163                                 goto errout;
5164                         }
5165                         /*
5166                          * There is something there, it's not full, and
5167                          * it is contig.
5168                          */
5169                         ip->i_delaylen += len;
5170                         mutex_exit(&ip->i_tlock);
5171                         goto errout;
5172                 }
5173                 /*
5174                  * Must have weird flags or we are not clustering.
5175                  */
5176         }
5177 
5178         err = ufs_putpages(vp, off, len, flags, cr);
5179 
5180 errout:
5181         return (err);
5182 }
5183 
5184 /*
5185  * If len == 0, do from off to EOF.
5186  *
5187  * The normal cases should be len == 0 & off == 0 (entire vp list),
5188  * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
5189  * (from pageout).
5190  */
5191 /*ARGSUSED*/
5192 static int
5193 ufs_putpages(
5194         struct vnode *vp,
5195         offset_t off,
5196         size_t len,
5197         int flags,
5198         struct cred *cr)
5199 {
5200         u_offset_t io_off;
5201         u_offset_t eoff;
5202         struct inode *ip = VTOI(vp);
5203         page_t *pp;
5204         size_t io_len;
5205         int err = 0;
5206         int dolock;
5207 
5208         if (vp->v_count == 0)
5209                 return (ufs_fault(vp, "ufs_putpages: v_count == 0"));
5210         /*
5211          * Acquire the readers/write inode lock before locking
5212          * any pages in this inode.
5213          * The inode lock is held during i/o.
5214          */
5215         if (len == 0) {
5216                 mutex_enter(&ip->i_tlock);
5217                 ip->i_delayoff = ip->i_delaylen = 0;
5218                 mutex_exit(&ip->i_tlock);
5219         }
5220         dolock = (rw_owner(&ip->i_contents) != curthread);
5221         if (dolock) {
5222                 /*
5223                  * Must synchronize this thread and any possible thread
5224                  * operating in the window of vulnerability in wrip().
5225                  * It is dangerous to allow both a thread doing a putpage
5226                  * and a thread writing, so serialize them.  The exception
5227                  * is when the thread in wrip() does something which causes
5228                  * a putpage operation.  Then, the thread must be allowed
5229                  * to continue.  It may encounter a bmap_read problem in
5230                  * ufs_putapage, but that is handled in ufs_putapage.
5231                  * Allow async writers to proceed, we don't want to block
5232                  * the pageout daemon.
5233                  */
5234                 if (ip->i_writer == curthread)
5235                         rw_enter(&ip->i_contents, RW_READER);
5236                 else {
5237                         for (;;) {
5238                                 rw_enter(&ip->i_contents, RW_READER);
5239                                 mutex_enter(&ip->i_tlock);
5240                                 /*
5241                                  * If there is no thread in the critical
5242                                  * section of wrip(), then proceed.
5243                                  * Otherwise, wait until there isn't one.
5244                                  */
5245                                 if (ip->i_writer == NULL) {
5246                                         mutex_exit(&ip->i_tlock);
5247                                         break;
5248                                 }
5249                                 rw_exit(&ip->i_contents);
5250                                 /*
5251                                  * Bounce async writers when we have a writer
5252                                  * working on this file so we don't deadlock
5253                                  * the pageout daemon.
5254                                  */
5255                                 if (flags & B_ASYNC) {
5256                                         mutex_exit(&ip->i_tlock);
5257                                         return (0);
5258                                 }
5259                                 cv_wait(&ip->i_wrcv, &ip->i_tlock);
5260                                 mutex_exit(&ip->i_tlock);
5261                         }
5262                 }
5263         }
5264 
5265         if (!vn_has_cached_data(vp)) {
5266                 if (dolock)
5267                         rw_exit(&ip->i_contents);
5268                 return (0);
5269         }
5270 
5271         if (len == 0) {
5272                 /*
5273                  * Search the entire vp list for pages >= off.
5274                  */
5275                 err = pvn_vplist_dirty(vp, (u_offset_t)off, ufs_putapage,
5276                     flags, cr);
5277         } else {
5278                 /*
5279                  * Loop over all offsets in the range looking for
5280                  * pages to deal with.
5281                  */
5282                 if ((eoff = blkroundup(ip->i_fs, ip->i_size)) != 0)
5283                         eoff = MIN(off + len, eoff);
5284                 else
5285                         eoff = off + len;
5286 
5287                 for (io_off = off; io_off < eoff; io_off += io_len) {
5288                         /*
5289                          * If we are not invalidating, synchronously
5290                          * freeing or writing pages, use the routine
5291                          * page_lookup_nowait() to prevent reclaiming
5292                          * them from the free list.
5293                          */
5294                         if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
5295                                 pp = page_lookup(vp, io_off,
5296                                     (flags & (B_INVAL | B_FREE)) ?
5297                                     SE_EXCL : SE_SHARED);
5298                         } else {
5299                                 pp = page_lookup_nowait(vp, io_off,
5300                                     (flags & B_FREE) ? SE_EXCL : SE_SHARED);
5301                         }
5302 
5303                         if (pp == NULL || pvn_getdirty(pp, flags) == 0)
5304                                 io_len = PAGESIZE;
5305                         else {
5306                                 u_offset_t *io_offp = &io_off;
5307 
5308                                 err = ufs_putapage(vp, pp, io_offp, &io_len,
5309                                     flags, cr);
5310                                 if (err != 0)
5311                                         break;
5312                                 /*
5313                                  * "io_off" and "io_len" are returned as
5314                                  * the range of pages we actually wrote.
5315                                  * This allows us to skip ahead more quickly
5316                                  * since several pages may've been dealt
5317                                  * with by this iteration of the loop.
5318                                  */
5319                         }
5320                 }
5321         }
5322         if (err == 0 && off == 0 && (len == 0 || len >= ip->i_size)) {
5323                 /*
5324                  * We have just sync'ed back all the pages on
5325                  * the inode, turn off the IMODTIME flag.
5326                  */
5327                 mutex_enter(&ip->i_tlock);
5328                 ip->i_flag &= ~IMODTIME;
5329                 mutex_exit(&ip->i_tlock);
5330         }
5331         if (dolock)
5332                 rw_exit(&ip->i_contents);
5333         return (err);
5334 }
5335 
5336 static void
5337 ufs_iodone(buf_t *bp)
5338 {
5339         struct inode *ip;
5340 
5341         ASSERT((bp->b_pages->p_vnode != NULL) && !(bp->b_flags & B_READ));
5342 
5343         bp->b_iodone = NULL;
5344 
5345         ip = VTOI(bp->b_pages->p_vnode);
5346 
5347         mutex_enter(&ip->i_tlock);
5348         if (ip->i_writes >= ufs_LW) {
5349                 if ((ip->i_writes -= bp->b_bcount) <= ufs_LW)
5350                         if (ufs_WRITES)
5351                                 cv_broadcast(&ip->i_wrcv); /* wake all up */
5352         } else {
5353                 ip->i_writes -= bp->b_bcount;
5354         }
5355 
5356         mutex_exit(&ip->i_tlock);
5357         iodone(bp);
5358 }
5359 
5360 /*
5361  * Write out a single page, possibly klustering adjacent
5362  * dirty pages.  The inode lock must be held.
5363  *
5364  * LMXXX - bsize < pagesize not done.
5365  */
5366 /*ARGSUSED*/
5367 int
5368 ufs_putapage(
5369         struct vnode *vp,
5370         page_t *pp,
5371         u_offset_t *offp,
5372         size_t *lenp,           /* return values */
5373         int flags,
5374         struct cred *cr)
5375 {
5376         u_offset_t io_off;
5377         u_offset_t off;
5378         struct inode *ip = VTOI(vp);
5379         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
5380         struct fs *fs;
5381         struct buf *bp;
5382         size_t io_len;
5383         daddr_t bn;
5384         int err;
5385         int contig;
5386         int dotrans;
5387 
5388         ASSERT(RW_LOCK_HELD(&ip->i_contents));
5389 
5390         if (ufsvfsp == NULL) {
5391                 err = EIO;
5392                 goto out_trace;
5393         }
5394 
5395         fs = ip->i_fs;
5396         ASSERT(fs->fs_ronly == 0);
5397 
5398         /*
5399          * If the modified time on the inode has not already been
5400          * set elsewhere (e.g. for write/setattr) we set the time now.
5401          * This gives us approximate modified times for mmap'ed files
5402          * which are modified via stores in the user address space.
5403          */
5404         if ((ip->i_flag & IMODTIME) == 0) {
5405                 mutex_enter(&ip->i_tlock);
5406                 ip->i_flag |= IUPD;
5407                 ip->i_seq++;
5408                 ITIMES_NOLOCK(ip);
5409                 mutex_exit(&ip->i_tlock);
5410         }
5411 
5412         /*
5413          * Align the request to a block boundry (for old file systems),
5414          * and go ask bmap() how contiguous things are for this file.
5415          */
5416         off = pp->p_offset & (offset_t)fs->fs_bmask;  /* block align it */
5417         contig = 0;
5418         err = bmap_read(ip, off, &bn, &contig);
5419         if (err)
5420                 goto out;
5421         if (bn == UFS_HOLE) {                   /* putpage never allocates */
5422                 /*
5423                  * logging device is in error mode; simply return EIO
5424                  */
5425                 if (TRANS_ISERROR(ufsvfsp)) {
5426                         err = EIO;
5427                         goto out;
5428                 }
5429                 /*
5430                  * Oops, the thread in the window in wrip() did some
5431                  * sort of operation which caused a putpage in the bad
5432                  * range.  In this case, just return an error which will
5433                  * cause the software modified bit on the page to set
5434                  * and the page will get written out again later.
5435                  */
5436                 if (ip->i_writer == curthread) {
5437                         err = EIO;
5438                         goto out;
5439                 }
5440                 /*
5441                  * If the pager is trying to push a page in the bad range
5442                  * just tell him to try again later when things are better.
5443                  */
5444                 if (flags & B_ASYNC) {
5445                         err = EAGAIN;
5446                         goto out;
5447                 }
5448                 err = ufs_fault(ITOV(ip), "ufs_putapage: bn == UFS_HOLE");
5449                 goto out;
5450         }
5451 
5452         /*
5453          * If it is an fallocate'd block, reverse the negativity since
5454          * we are now writing to it
5455          */
5456         if (ISFALLOCBLK(ip, bn)) {
5457                 err = bmap_set_bn(vp, off, dbtofsb(fs, -bn));
5458                 if (err)
5459                         goto out;
5460 
5461                 bn = -bn;
5462         }
5463 
5464         /*
5465          * Take the length (of contiguous bytes) passed back from bmap()
5466          * and _try_ and get a set of pages covering that extent.
5467          */
5468         pp = pvn_write_kluster(vp, pp, &io_off, &io_len, off, contig, flags);
5469 
5470         /*
5471          * May have run out of memory and not clustered backwards.
5472          * off          p_offset
5473          * [  pp - 1  ][   pp   ]
5474          * [    block           ]
5475          * We told bmap off, so we have to adjust the bn accordingly.
5476          */
5477         if (io_off > off) {
5478                 bn += btod(io_off - off);
5479                 contig -= (io_off - off);
5480         }
5481 
5482         /*
5483          * bmap was carefull to tell us the right size so use that.
5484          * There might be unallocated frags at the end.
5485          * LMXXX - bzero the end of the page?  We must be writing after EOF.
5486          */
5487         if (io_len > contig) {
5488                 ASSERT(io_len - contig < fs->fs_bsize);
5489                 io_len -= (io_len - contig);
5490         }
5491 
5492         /*
5493          * Handle the case where we are writing the last page after EOF.
5494          *
5495          * XXX - just a patch for i-mt3.
5496          */
5497         if (io_len == 0) {
5498                 ASSERT(pp->p_offset >=
5499                     (u_offset_t)(roundup(ip->i_size, PAGESIZE)));
5500                 io_len = PAGESIZE;
5501         }
5502 
5503         bp = pageio_setup(pp, io_len, ip->i_devvp, B_WRITE | flags);
5504 
5505         ULOCKFS_SET_MOD(ITOUL(ip));
5506 
5507         bp->b_edev = ip->i_dev;
5508         bp->b_dev = cmpdev(ip->i_dev);
5509         bp->b_blkno = bn;
5510         bp->b_un.b_addr = (caddr_t)0;
5511         bp->b_file = ip->i_vnode;
5512 
5513         /*
5514          * File contents of shadow or quota inodes are metadata, and updates
5515          * to these need to be put into a logging transaction. All direct
5516          * callers in UFS do that, but fsflush can come here _before_ the
5517          * normal codepath. An example would be updating ACL information, for
5518          * which the normal codepath would be:
5519          *      ufs_si_store()
5520          *      ufs_rdwri()
5521          *      wrip()
5522          *      segmap_release()
5523          *      VOP_PUTPAGE()
5524          * Here, fsflush can pick up the dirty page before segmap_release()
5525          * forces it out. If that happens, there's no transaction.
5526          * We therefore need to test whether a transaction exists, and if not
5527          * create one - for fsflush.
5528          */
5529         dotrans =
5530             (((ip->i_mode & IFMT) == IFSHAD || ufsvfsp->vfs_qinod == ip) &&
5531             ((curthread->t_flag & T_DONTBLOCK) == 0) &&
5532             (TRANS_ISTRANS(ufsvfsp)));
5533 
5534         if (dotrans) {
5535                 curthread->t_flag |= T_DONTBLOCK;
5536                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_PUTPAGE, TOP_PUTPAGE_SIZE(ip));
5537         }
5538         if (TRANS_ISTRANS(ufsvfsp)) {
5539                 if ((ip->i_mode & IFMT) == IFSHAD) {
5540                         TRANS_BUF(ufsvfsp, 0, io_len, bp, DT_SHAD);
5541                 } else if (ufsvfsp->vfs_qinod == ip) {
5542                         TRANS_DELTA(ufsvfsp, ldbtob(bn), bp->b_bcount, DT_QR,
5543                             0, 0);
5544                 }
5545         }
5546         if (dotrans) {
5547                 TRANS_END_ASYNC(ufsvfsp, TOP_PUTPAGE, TOP_PUTPAGE_SIZE(ip));
5548                 curthread->t_flag &= ~T_DONTBLOCK;
5549         }
5550 
5551         /* write throttle */
5552 
5553         ASSERT(bp->b_iodone == NULL);
5554         bp->b_iodone = (int (*)())ufs_iodone;
5555         mutex_enter(&ip->i_tlock);
5556         ip->i_writes += bp->b_bcount;
5557         mutex_exit(&ip->i_tlock);
5558 
5559         if (bp->b_flags & B_ASYNC) {
5560                 if (ufsvfsp->vfs_log) {
5561                         lufs_write_strategy(ufsvfsp->vfs_log, bp);
5562                 } else if (ufsvfsp->vfs_snapshot) {
5563                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5564                 } else {
5565                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5566                         ub.ub_putasyncs.value.ul++;
5567                         (void) bdev_strategy(bp);
5568                         lwp_stat_update(LWP_STAT_OUBLK, 1);
5569                 }
5570         } else {
5571                 if (ufsvfsp->vfs_log) {
5572                         lufs_write_strategy(ufsvfsp->vfs_log, bp);
5573                 } else if (ufsvfsp->vfs_snapshot) {
5574                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5575                 } else {
5576                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5577                         ub.ub_putsyncs.value.ul++;
5578                         (void) bdev_strategy(bp);
5579                         lwp_stat_update(LWP_STAT_OUBLK, 1);
5580                 }
5581                 err = biowait(bp);
5582                 pageio_done(bp);
5583                 pvn_write_done(pp, ((err) ? B_ERROR : 0) | B_WRITE | flags);
5584         }
5585 
5586         pp = NULL;
5587 
5588 out:
5589         if (err != 0 && pp != NULL)
5590                 pvn_write_done(pp, B_ERROR | B_WRITE | flags);
5591 
5592         if (offp)
5593                 *offp = io_off;
5594         if (lenp)
5595                 *lenp = io_len;
5596 out_trace:
5597         return (err);
5598 }
5599 
5600 uint64_t ufs_map_alock_retry_cnt;
5601 uint64_t ufs_map_lockfs_retry_cnt;
5602 
5603 /* ARGSUSED */
5604 static int
5605 ufs_map(struct vnode *vp,
5606         offset_t off,
5607         struct as *as,
5608         caddr_t *addrp,
5609         size_t len,
5610         uchar_t prot,
5611         uchar_t maxprot,
5612         uint_t flags,
5613         struct cred *cr,
5614         caller_context_t *ct)
5615 {
5616         struct segvn_crargs vn_a;
5617         struct ufsvfs *ufsvfsp = VTOI(vp)->i_ufsvfs;
5618         struct ulockfs *ulp;
5619         int error, sig;
5620         k_sigset_t smask;
5621         caddr_t hint = *addrp;
5622 
5623         if (vp->v_flag & VNOMAP) {
5624                 error = ENOSYS;
5625                 goto out;
5626         }
5627 
5628         if (off < (offset_t)0 || (offset_t)(off + len) < (offset_t)0) {
5629                 error = ENXIO;
5630                 goto out;
5631         }
5632 
5633         if (vp->v_type != VREG) {
5634                 error = ENODEV;
5635                 goto out;
5636         }
5637 
5638 retry_map:
5639         *addrp = hint;
5640         /*
5641          * If file is being locked, disallow mapping.
5642          */
5643         if (vn_has_mandatory_locks(vp, VTOI(vp)->i_mode)) {
5644                 error = EAGAIN;
5645                 goto out;
5646         }
5647 
5648         as_rangelock(as);
5649         /*
5650          * Note that if we are retrying (because ufs_lockfs_trybegin failed in
5651          * the previous attempt), some other thread could have grabbed
5652          * the same VA range if MAP_FIXED is set. In that case, choose_addr
5653          * would unmap the valid VA range, that is ok.
5654          */
5655         error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5656         if (error != 0) {
5657                 as_rangeunlock(as);
5658                 goto out;
5659         }
5660 
5661         /*
5662          * a_lock has to be acquired before entering the lockfs protocol
5663          * because that is the order in which pagefault works. Also we cannot
5664          * block on a_lock here because this waiting writer will prevent
5665          * further readers like ufs_read from progressing and could cause
5666          * deadlock between ufs_read/ufs_map/pagefault when a quiesce is
5667          * pending.
5668          */
5669         while (!AS_LOCK_TRYENTER(as, RW_WRITER)) {
5670                 ufs_map_alock_retry_cnt++;
5671                 delay(RETRY_LOCK_DELAY);
5672         }
5673 
5674         /*
5675          * We can't hold as->a_lock and wait for lockfs to succeed because
5676          * the proc tools might hang on a_lock, so call ufs_lockfs_trybegin()
5677          * instead.
5678          */
5679         if (error = ufs_lockfs_trybegin(ufsvfsp, &ulp, ULOCKFS_MAP_MASK)) {
5680                 /*
5681                  * ufs_lockfs_trybegin() did not succeed. It is safer to give up
5682                  * as->a_lock and wait for ulp->ul_fs_lock status to change.
5683                  */
5684                 ufs_map_lockfs_retry_cnt++;
5685                 AS_LOCK_EXIT(as);
5686                 as_rangeunlock(as);
5687                 if (error == EIO)
5688                         goto out;
5689 
5690                 mutex_enter(&ulp->ul_lock);
5691                 while (ulp->ul_fs_lock & ULOCKFS_MAP_MASK) {
5692                         if (ULOCKFS_IS_SLOCK(ulp) || ufsvfsp->vfs_nointr) {
5693                                 cv_wait(&ulp->ul_cv, &ulp->ul_lock);
5694                         } else {
5695                                 sigintr(&smask, 1);
5696                                 sig = cv_wait_sig(&ulp->ul_cv, &ulp->ul_lock);
5697                                 sigunintr(&smask);
5698                                 if (((ulp->ul_fs_lock & ULOCKFS_MAP_MASK) &&
5699                                     !sig) || ufsvfsp->vfs_dontblock) {
5700                                         mutex_exit(&ulp->ul_lock);
5701                                         return (EINTR);
5702                                 }
5703                         }
5704                 }
5705                 mutex_exit(&ulp->ul_lock);
5706                 goto retry_map;
5707         }
5708 
5709         vn_a.vp = vp;
5710         vn_a.offset = (u_offset_t)off;
5711         vn_a.type = flags & MAP_TYPE;
5712         vn_a.prot = prot;
5713         vn_a.maxprot = maxprot;
5714         vn_a.cred = cr;
5715         vn_a.amp = NULL;
5716         vn_a.flags = flags & ~MAP_TYPE;
5717         vn_a.szc = 0;
5718         vn_a.lgrp_mem_policy_flags = 0;
5719 
5720         error = as_map_locked(as, *addrp, len, segvn_create, &vn_a);
5721         if (ulp)
5722                 ufs_lockfs_end(ulp);
5723         as_rangeunlock(as);
5724 out:
5725         return (error);
5726 }
5727 
5728 /* ARGSUSED */
5729 static int
5730 ufs_addmap(struct vnode *vp,
5731         offset_t off,
5732         struct as *as,
5733         caddr_t addr,
5734         size_t  len,
5735         uchar_t  prot,
5736         uchar_t  maxprot,
5737         uint_t    flags,
5738         struct cred *cr,
5739         caller_context_t *ct)
5740 {
5741         struct inode *ip = VTOI(vp);
5742 
5743         if (vp->v_flag & VNOMAP) {
5744                 return (ENOSYS);
5745         }
5746 
5747         mutex_enter(&ip->i_tlock);
5748         ip->i_mapcnt += btopr(len);
5749         mutex_exit(&ip->i_tlock);
5750         return (0);
5751 }
5752 
5753 /*ARGSUSED*/
5754 static int
5755 ufs_delmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
5756         size_t len, uint_t prot,  uint_t maxprot,  uint_t flags,
5757         struct cred *cr, caller_context_t *ct)
5758 {
5759         struct inode *ip = VTOI(vp);
5760 
5761         if (vp->v_flag & VNOMAP) {
5762                 return (ENOSYS);
5763         }
5764 
5765         mutex_enter(&ip->i_tlock);
5766         ip->i_mapcnt -= btopr(len);  /* Count released mappings */
5767         ASSERT(ip->i_mapcnt >= 0);
5768         mutex_exit(&ip->i_tlock);
5769         return (0);
5770 }
5771 /*
5772  * Return the answer requested to poll() for non-device files
5773  */
5774 struct pollhead ufs_pollhd;
5775 
5776 /* ARGSUSED */
5777 int
5778 ufs_poll(vnode_t *vp, short ev, int any, short *revp, struct pollhead **phpp,
5779         caller_context_t *ct)
5780 {
5781         struct ufsvfs   *ufsvfsp;
5782 
5783         *revp = 0;
5784         ufsvfsp = VTOI(vp)->i_ufsvfs;
5785 
5786         if (!ufsvfsp) {
5787                 *revp = POLLHUP;
5788                 goto out;
5789         }
5790 
5791         if (ULOCKFS_IS_HLOCK(&ufsvfsp->vfs_ulockfs) ||
5792             ULOCKFS_IS_ELOCK(&ufsvfsp->vfs_ulockfs)) {
5793                 *revp |= POLLERR;
5794 
5795         } else {
5796                 if ((ev & POLLOUT) && !ufsvfsp->vfs_fs->fs_ronly &&
5797                     !ULOCKFS_IS_WLOCK(&ufsvfsp->vfs_ulockfs))
5798                         *revp |= POLLOUT;
5799 
5800                 if ((ev & POLLWRBAND) && !ufsvfsp->vfs_fs->fs_ronly &&
5801                     !ULOCKFS_IS_WLOCK(&ufsvfsp->vfs_ulockfs))
5802                         *revp |= POLLWRBAND;
5803 
5804                 if (ev & POLLIN)
5805                         *revp |= POLLIN;
5806 
5807                 if (ev & POLLRDNORM)
5808                         *revp |= POLLRDNORM;
5809 
5810                 if (ev & POLLRDBAND)
5811                         *revp |= POLLRDBAND;
5812         }
5813 
5814         if ((ev & POLLPRI) && (*revp & (POLLERR|POLLHUP)))
5815                 *revp |= POLLPRI;
5816 out:
5817         *phpp = !any && !*revp ? &ufs_pollhd : (struct pollhead *)NULL;
5818 
5819         return (0);
5820 }
5821 
5822 /* ARGSUSED */
5823 static int
5824 ufs_l_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr,
5825         caller_context_t *ct)
5826 {
5827         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
5828         struct ulockfs  *ulp = NULL;
5829         struct inode    *sip = NULL;
5830         int             error;
5831         struct inode    *ip = VTOI(vp);
5832         int             issync;
5833 
5834         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_PATHCONF_MASK);
5835         if (error)
5836                 return (error);
5837 
5838         switch (cmd) {
5839                 /*
5840                  * Have to handle _PC_NAME_MAX here, because the normal way
5841                  * [fs_pathconf() -> VOP_STATVFS() -> ufs_statvfs()]
5842                  * results in a lock ordering reversal between
5843                  * ufs_lockfs_{begin,end}() and
5844                  * ufs_thread_{suspend,continue}().
5845                  *
5846                  * Keep in sync with ufs_statvfs().
5847                  */
5848         case _PC_NAME_MAX:
5849                 *valp = MAXNAMLEN;
5850                 break;
5851 
5852         case _PC_FILESIZEBITS:
5853                 if (ufsvfsp->vfs_lfflags & UFS_LARGEFILES)
5854                         *valp = UFS_FILESIZE_BITS;
5855                 else
5856                         *valp = 32;
5857                 break;
5858 
5859         case _PC_XATTR_EXISTS:
5860                 if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
5861 
5862                         error =
5863                             ufs_xattr_getattrdir(vp, &sip, LOOKUP_XATTR, cr);
5864                         if (error ==  0 && sip != NULL) {
5865                                 /* Start transaction */
5866                                 if (ulp) {
5867                                         TRANS_BEGIN_CSYNC(ufsvfsp, issync,
5868                                             TOP_RMDIR, TOP_RMDIR_SIZE);
5869                                 }
5870                                 /*
5871                                  * Is directory empty
5872                                  */
5873                                 rw_enter(&sip->i_rwlock, RW_WRITER);
5874                                 rw_enter(&sip->i_contents, RW_WRITER);
5875                                 if (ufs_xattrdirempty(sip,
5876                                     sip->i_number, CRED())) {
5877                                         rw_enter(&ip->i_contents, RW_WRITER);
5878                                         ufs_unhook_shadow(ip, sip);
5879                                         rw_exit(&ip->i_contents);
5880 
5881                                         *valp = 0;
5882 
5883                                 } else
5884                                         *valp = 1;
5885                                 rw_exit(&sip->i_contents);
5886                                 rw_exit(&sip->i_rwlock);
5887                                 if (ulp) {
5888                                         TRANS_END_CSYNC(ufsvfsp, error, issync,
5889                                             TOP_RMDIR, TOP_RMDIR_SIZE);
5890                                 }
5891                                 VN_RELE(ITOV(sip));
5892                         } else if (error == ENOENT) {
5893                                 *valp = 0;
5894                                 error = 0;
5895                         }
5896                 } else {
5897                         error = fs_pathconf(vp, cmd, valp, cr, ct);
5898                 }
5899                 break;
5900 
5901         case _PC_ACL_ENABLED:
5902                 *valp = _ACL_ACLENT_ENABLED;
5903                 break;
5904 
5905         case _PC_MIN_HOLE_SIZE:
5906                 *valp = (ulong_t)ip->i_fs->fs_bsize;
5907                 break;
5908 
5909         case _PC_SATTR_ENABLED:
5910         case _PC_SATTR_EXISTS:
5911                 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
5912                     (vp->v_type == VREG || vp->v_type == VDIR);
5913                 break;
5914 
5915         case _PC_TIMESTAMP_RESOLUTION:
5916                 /*
5917                  * UFS keeps only microsecond timestamp resolution.
5918                  * This is historical and will probably never change.
5919                  */
5920                 *valp = 1000L;
5921                 break;
5922 
5923         default:
5924                 error = fs_pathconf(vp, cmd, valp, cr, ct);
5925                 break;
5926         }
5927 
5928         if (ulp != NULL) {
5929                 ufs_lockfs_end(ulp);
5930         }
5931         return (error);
5932 }
5933 
5934 int ufs_pageio_writes, ufs_pageio_reads;
5935 
5936 /*ARGSUSED*/
5937 static int
5938 ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
5939         int flags, struct cred *cr, caller_context_t *ct)
5940 {
5941         struct inode *ip = VTOI(vp);
5942         struct ufsvfs *ufsvfsp;
5943         page_t *npp = NULL, *opp = NULL, *cpp = pp;
5944         struct buf *bp;
5945         daddr_t bn;
5946         size_t done_len = 0, cur_len = 0;
5947         int err = 0;
5948         int contig = 0;
5949         int dolock;
5950         int vmpss = 0;
5951         struct ulockfs *ulp;
5952 
5953         if ((flags & B_READ) && pp != NULL && pp->p_vnode == vp &&
5954             vp->v_mpssdata != NULL) {
5955                 vmpss = 1;
5956         }
5957 
5958         dolock = (rw_owner(&ip->i_contents) != curthread);
5959         /*
5960          * We need a better check.  Ideally, we would use another
5961          * vnodeops so that hlocked and forcibly unmounted file
5962          * systems would return EIO where appropriate and w/o the
5963          * need for these checks.
5964          */
5965         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
5966                 return (EIO);
5967 
5968         /*
5969          * For vmpss (pp can be NULL) case respect the quiesce protocol.
5970          * ul_lock must be taken before locking pages so we can't use it here
5971          * if pp is non NULL because segvn already locked pages
5972          * SE_EXCL. Instead we rely on the fact that a forced umount or
5973          * applying a filesystem lock via ufs_fiolfs() will block in the
5974          * implicit call to ufs_flush() until we unlock the pages after the
5975          * return to segvn. Other ufs_quiesce() callers keep ufs_quiesce_pend
5976          * above 0 until they are done. We have to be careful not to increment
5977          * ul_vnops_cnt here after forceful unmount hlocks the file system.
5978          *
5979          * If pp is NULL use ul_lock to make sure we don't increment
5980          * ul_vnops_cnt after forceful unmount hlocks the file system.
5981          */
5982         if (vmpss || pp == NULL) {
5983                 ulp = &ufsvfsp->vfs_ulockfs;
5984                 if (pp == NULL)
5985                         mutex_enter(&ulp->ul_lock);
5986                 if (ulp->ul_fs_lock & ULOCKFS_GETREAD_MASK) {
5987                         if (pp == NULL) {
5988                                 mutex_exit(&ulp->ul_lock);
5989                         }
5990                         return (vmpss ? EIO : EINVAL);
5991                 }
5992                 atomic_inc_ulong(&ulp->ul_vnops_cnt);
5993                 if (pp == NULL)
5994                         mutex_exit(&ulp->ul_lock);
5995                 if (ufs_quiesce_pend) {
5996                         if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
5997                                 cv_broadcast(&ulp->ul_cv);
5998                         return (vmpss ? EIO : EINVAL);
5999                 }
6000         }
6001 
6002         if (dolock) {
6003                 /*
6004                  * segvn may call VOP_PAGEIO() instead of VOP_GETPAGE() to
6005                  * handle a fault against a segment that maps vnode pages with
6006                  * large mappings.  Segvn creates pages and holds them locked
6007                  * SE_EXCL during VOP_PAGEIO() call. In this case we have to
6008                  * use rw_tryenter() to avoid a potential deadlock since in
6009                  * lock order i_contents needs to be taken first.
6010                  * Segvn will retry via VOP_GETPAGE() if VOP_PAGEIO() fails.
6011                  */
6012                 if (!vmpss) {
6013                         rw_enter(&ip->i_contents, RW_READER);
6014                 } else if (!rw_tryenter(&ip->i_contents, RW_READER)) {
6015                         if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6016                                 cv_broadcast(&ulp->ul_cv);
6017                         return (EDEADLK);
6018                 }
6019         }
6020 
6021         /*
6022          * Return an error to segvn because the pagefault request is beyond
6023          * PAGESIZE rounded EOF.
6024          */
6025         if (vmpss && btopr(io_off + io_len) > btopr(ip->i_size)) {
6026                 if (dolock)
6027                         rw_exit(&ip->i_contents);
6028                 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6029                         cv_broadcast(&ulp->ul_cv);
6030                 return (EFAULT);
6031         }
6032 
6033         if (pp == NULL) {
6034                 if (bmap_has_holes(ip)) {
6035                         err = ENOSYS;
6036                 } else {
6037                         err = EINVAL;
6038                 }
6039                 if (dolock)
6040                         rw_exit(&ip->i_contents);
6041                 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6042                         cv_broadcast(&ulp->ul_cv);
6043                 return (err);
6044         }
6045 
6046         /*
6047          * Break the io request into chunks, one for each contiguous
6048          * stretch of disk blocks in the target file.
6049          */
6050         while (done_len < io_len) {
6051                 ASSERT(cpp);
6052                 contig = 0;
6053                 if (err = bmap_read(ip, (u_offset_t)(io_off + done_len),
6054                     &bn, &contig))
6055                         break;
6056 
6057                 if (bn == UFS_HOLE) {   /* No holey swapfiles */
6058                         if (vmpss) {
6059                                 err = EFAULT;
6060                                 break;
6061                         }
6062                         err = ufs_fault(ITOV(ip), "ufs_pageio: bn == UFS_HOLE");
6063                         break;
6064                 }
6065 
6066                 cur_len = MIN(io_len - done_len, contig);
6067                 /*
6068                  * Zero out a page beyond EOF, when the last block of
6069                  * a file is a UFS fragment so that ufs_pageio() can be used
6070                  * instead of ufs_getpage() to handle faults against
6071                  * segvn segments that use large pages.
6072                  */
6073                 page_list_break(&cpp, &npp, btopr(cur_len));
6074                 if ((flags & B_READ) && (cur_len & PAGEOFFSET)) {
6075                         size_t xlen = cur_len & PAGEOFFSET;
6076                         pagezero(cpp->p_prev, xlen, PAGESIZE - xlen);
6077                 }
6078 
6079                 bp = pageio_setup(cpp, cur_len, ip->i_devvp, flags);
6080                 ASSERT(bp != NULL);
6081 
6082                 bp->b_edev = ip->i_dev;
6083                 bp->b_dev = cmpdev(ip->i_dev);
6084                 bp->b_blkno = bn;
6085                 bp->b_un.b_addr = (caddr_t)0;
6086                 bp->b_file = ip->i_vnode;
6087 
6088                 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
6089                 ub.ub_pageios.value.ul++;
6090                 if (ufsvfsp->vfs_snapshot)
6091                         fssnap_strategy(&(ufsvfsp->vfs_snapshot), bp);
6092                 else
6093                         (void) bdev_strategy(bp);
6094 
6095                 if (flags & B_READ)
6096                         ufs_pageio_reads++;
6097                 else
6098                         ufs_pageio_writes++;
6099                 if (flags & B_READ)
6100                         lwp_stat_update(LWP_STAT_INBLK, 1);
6101                 else
6102                         lwp_stat_update(LWP_STAT_OUBLK, 1);
6103                 /*
6104                  * If the request is not B_ASYNC, wait for i/o to complete
6105                  * and re-assemble the page list to return to the caller.
6106                  * If it is B_ASYNC we leave the page list in pieces and
6107                  * cleanup() will dispose of them.
6108                  */
6109                 if ((flags & B_ASYNC) == 0) {
6110                         err = biowait(bp);
6111                         pageio_done(bp);
6112                         if (err)
6113                                 break;
6114                         page_list_concat(&opp, &cpp);
6115                 }
6116                 cpp = npp;
6117                 npp = NULL;
6118                 if (flags & B_READ)
6119                         cur_len = P2ROUNDUP_TYPED(cur_len, PAGESIZE, size_t);
6120                 done_len += cur_len;
6121         }
6122         ASSERT(err || (cpp == NULL && npp == NULL && done_len == io_len));
6123         if (err) {
6124                 if (flags & B_ASYNC) {
6125                         /* Cleanup unprocessed parts of list */
6126                         page_list_concat(&cpp, &npp);
6127                         if (flags & B_READ)
6128                                 pvn_read_done(cpp, B_ERROR);
6129                         else
6130                                 pvn_write_done(cpp, B_ERROR);
6131                 } else {
6132                         /* Re-assemble list and let caller clean up */
6133                         page_list_concat(&opp, &cpp);
6134                         page_list_concat(&opp, &npp);
6135                 }
6136         }
6137 
6138         if (vmpss && !(ip->i_flag & IACC) && !ULOCKFS_IS_NOIACC(ulp) &&
6139             ufsvfsp->vfs_fs->fs_ronly == 0 && !ufsvfsp->vfs_noatime) {
6140                 mutex_enter(&ip->i_tlock);
6141                 ip->i_flag |= IACC;
6142                 ITIMES_NOLOCK(ip);
6143                 mutex_exit(&ip->i_tlock);
6144         }
6145 
6146         if (dolock)
6147                 rw_exit(&ip->i_contents);
6148         if (vmpss && !atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6149                 cv_broadcast(&ulp->ul_cv);
6150         return (err);
6151 }
6152 
6153 /*
6154  * Called when the kernel is in a frozen state to dump data
6155  * directly to the device. It uses a private dump data structure,
6156  * set up by dump_ctl, to locate the correct disk block to which to dump.
6157  */
6158 /*ARGSUSED*/
6159 static int
6160 ufs_dump(vnode_t *vp, caddr_t addr, offset_t ldbn, offset_t dblks,
6161     caller_context_t *ct)
6162 {
6163         u_offset_t      file_size;
6164         struct inode    *ip = VTOI(vp);
6165         struct fs       *fs = ip->i_fs;
6166         daddr_t         dbn, lfsbn;
6167         int             disk_blks = fs->fs_bsize >> DEV_BSHIFT;
6168         int             error = 0;
6169         int             ndbs, nfsbs;
6170 
6171         /*
6172          * forced unmount case
6173          */
6174         if (ip->i_ufsvfs == NULL)
6175                 return (EIO);
6176         /*
6177          * Validate the inode that it has not been modified since
6178          * the dump structure is allocated.
6179          */
6180         mutex_enter(&ip->i_tlock);
6181         if ((dump_info == NULL) ||
6182             (dump_info->ip != ip) ||
6183             (dump_info->time.tv_sec != ip->i_mtime.tv_sec) ||
6184             (dump_info->time.tv_usec != ip->i_mtime.tv_usec)) {
6185                 mutex_exit(&ip->i_tlock);
6186                 return (-1);
6187         }
6188         mutex_exit(&ip->i_tlock);
6189 
6190         /*
6191          * See that the file has room for this write
6192          */
6193         UFS_GET_ISIZE(&file_size, ip);
6194 
6195         if (ldbtob(ldbn + dblks) > file_size)
6196                 return (ENOSPC);
6197 
6198         /*
6199          * Find the physical disk block numbers from the dump
6200          * private data structure directly and write out the data
6201          * in contiguous block lumps
6202          */
6203         while (dblks > 0 && !error) {
6204                 lfsbn = (daddr_t)lblkno(fs, ldbtob(ldbn));
6205                 dbn = fsbtodb(fs, dump_info->dblk[lfsbn]) + ldbn % disk_blks;
6206                 nfsbs = 1;
6207                 ndbs = disk_blks - ldbn % disk_blks;
6208                 while (ndbs < dblks && fsbtodb(fs, dump_info->dblk[lfsbn +
6209                     nfsbs]) == dbn + ndbs) {
6210                         nfsbs++;
6211                         ndbs += disk_blks;
6212                 }
6213                 if (ndbs > dblks)
6214                         ndbs = dblks;
6215                 error = bdev_dump(ip->i_dev, addr, dbn, ndbs);
6216                 addr += ldbtob((offset_t)ndbs);
6217                 dblks -= ndbs;
6218                 ldbn += ndbs;
6219         }
6220         return (error);
6221 
6222 }
6223 
6224 /*
6225  * Prepare the file system before and after the dump operation.
6226  *
6227  * action = DUMP_ALLOC:
6228  * Preparation before dump, allocate dump private data structure
6229  * to hold all the direct and indirect block info for dump.
6230  *
6231  * action = DUMP_FREE:
6232  * Clean up after dump, deallocate the dump private data structure.
6233  *
6234  * action = DUMP_SCAN:
6235  * Scan dump_info for *blkp DEV_BSIZE blocks of contig fs space;
6236  * if found, the starting file-relative DEV_BSIZE lbn is written
6237  * to *bklp; that lbn is intended for use with VOP_DUMP()
6238  */
6239 /*ARGSUSED*/
6240 static int
6241 ufs_dumpctl(vnode_t *vp, int action, offset_t *blkp, caller_context_t *ct)
6242 {
6243         struct inode    *ip = VTOI(vp);
6244         ufsvfs_t        *ufsvfsp = ip->i_ufsvfs;
6245         struct fs       *fs;
6246         daddr32_t       *dblk, *storeblk;
6247         daddr32_t       *nextblk, *endblk;
6248         struct buf      *bp;
6249         int             i, entry, entries;
6250         int             n, ncontig;
6251 
6252         /*
6253          * check for forced unmount
6254          */
6255         if (ufsvfsp == NULL)
6256                 return (EIO);
6257 
6258         if (action == DUMP_ALLOC) {
6259                 /*
6260                  * alloc and record dump_info
6261                  */
6262                 if (dump_info != NULL)
6263                         return (EINVAL);
6264 
6265                 ASSERT(vp->v_type == VREG);
6266                 fs = ufsvfsp->vfs_fs;
6267 
6268                 rw_enter(&ip->i_contents, RW_READER);
6269 
6270                 if (bmap_has_holes(ip)) {
6271                         rw_exit(&ip->i_contents);
6272                         return (EFAULT);
6273                 }
6274 
6275                 /*
6276                  * calculate and allocate space needed according to i_size
6277                  */
6278                 entries = (int)lblkno(fs, blkroundup(fs, ip->i_size));
6279                 dump_info = kmem_alloc(sizeof (struct dump) +
6280                     (entries - 1) * sizeof (daddr32_t), KM_NOSLEEP);
6281                 if (dump_info == NULL) {
6282                         rw_exit(&ip->i_contents);
6283                         return (ENOMEM);
6284                 }
6285 
6286                 /* Start saving the info */
6287                 dump_info->fsbs = entries;
6288                 dump_info->ip = ip;
6289                 storeblk = &dump_info->dblk[0];
6290 
6291                 /* Direct Blocks */
6292                 for (entry = 0; entry < NDADDR && entry < entries; entry++)
6293                         *storeblk++ = ip->i_db[entry];
6294 
6295                 /* Indirect Blocks */
6296                 for (i = 0; i < NIADDR; i++) {
6297                         int error = 0;
6298 
6299                         bp = UFS_BREAD(ufsvfsp,
6300                             ip->i_dev, fsbtodb(fs, ip->i_ib[i]), fs->fs_bsize);
6301                         if (bp->b_flags & B_ERROR)
6302                                 error = EIO;
6303                         else {
6304                                 dblk = bp->b_un.b_daddr;
6305                                 if ((storeblk = save_dblks(ip, ufsvfsp,
6306                                     storeblk, dblk, i, entries)) == NULL)
6307                                         error = EIO;
6308                         }
6309 
6310                         brelse(bp);
6311 
6312                         if (error != 0) {
6313                                 kmem_free(dump_info, sizeof (struct dump) +
6314                                     (entries - 1) * sizeof (daddr32_t));
6315                                 rw_exit(&ip->i_contents);
6316                                 dump_info = NULL;
6317                                 return (error);
6318                         }
6319                 }
6320                 /* and time stamp the information */
6321                 mutex_enter(&ip->i_tlock);
6322                 dump_info->time = ip->i_mtime;
6323                 mutex_exit(&ip->i_tlock);
6324 
6325                 rw_exit(&ip->i_contents);
6326         } else if (action == DUMP_FREE) {
6327                 /*
6328                  * free dump_info
6329                  */
6330                 if (dump_info == NULL)
6331                         return (EINVAL);
6332                 entries = dump_info->fsbs - 1;
6333                 kmem_free(dump_info, sizeof (struct dump) +
6334                     entries * sizeof (daddr32_t));
6335                 dump_info = NULL;
6336         } else if (action == DUMP_SCAN) {
6337                 /*
6338                  * scan dump_info
6339                  */
6340                 if (dump_info == NULL)
6341                         return (EINVAL);
6342 
6343                 dblk = dump_info->dblk;
6344                 nextblk = dblk + 1;
6345                 endblk = dblk + dump_info->fsbs - 1;
6346                 fs = ufsvfsp->vfs_fs;
6347                 ncontig = *blkp >> (fs->fs_bshift - DEV_BSHIFT);
6348 
6349                 /*
6350                  * scan dblk[] entries; contig fs space is found when:
6351                  * ((current blkno + frags per block) == next blkno)
6352                  */
6353                 n = 0;
6354                 while (n < ncontig && dblk < endblk) {
6355                         if ((*dblk + fs->fs_frag) == *nextblk)
6356                                 n++;
6357                         else
6358                                 n = 0;
6359                         dblk++;
6360                         nextblk++;
6361                 }
6362 
6363                 /*
6364                  * index is where size bytes of contig space begins;
6365                  * conversion from index to the file's DEV_BSIZE lbn
6366                  * is equivalent to:  (index * fs_bsize) / DEV_BSIZE
6367                  */
6368                 if (n == ncontig) {
6369                         i = (dblk - dump_info->dblk) - ncontig;
6370                         *blkp = i << (fs->fs_bshift - DEV_BSHIFT);
6371                 } else
6372                         return (EFAULT);
6373         }
6374         return (0);
6375 }
6376 
6377 /*
6378  * Recursive helper function for ufs_dumpctl().  It follows the indirect file
6379  * system  blocks until it reaches the the disk block addresses, which are
6380  * then stored into the given buffer, storeblk.
6381  */
6382 static daddr32_t *
6383 save_dblks(struct inode *ip, struct ufsvfs *ufsvfsp,  daddr32_t *storeblk,
6384     daddr32_t *dblk, int level, int entries)
6385 {
6386         struct fs       *fs = ufsvfsp->vfs_fs;
6387         struct buf      *bp;
6388         int             i;
6389 
6390         if (level == 0) {
6391                 for (i = 0; i < NINDIR(fs); i++) {
6392                         if (storeblk - dump_info->dblk >= entries)
6393                                 break;
6394                         *storeblk++ = dblk[i];
6395                 }
6396                 return (storeblk);
6397         }
6398         for (i = 0; i < NINDIR(fs); i++) {
6399                 if (storeblk - dump_info->dblk >= entries)
6400                         break;
6401                 bp = UFS_BREAD(ufsvfsp,
6402                     ip->i_dev, fsbtodb(fs, dblk[i]), fs->fs_bsize);
6403                 if (bp->b_flags & B_ERROR) {
6404                         brelse(bp);
6405                         return (NULL);
6406                 }
6407                 storeblk = save_dblks(ip, ufsvfsp, storeblk, bp->b_un.b_daddr,
6408                     level - 1, entries);
6409                 brelse(bp);
6410 
6411                 if (storeblk == NULL)
6412                         return (NULL);
6413         }
6414         return (storeblk);
6415 }
6416 
6417 /* ARGSUSED */
6418 static int
6419 ufs_getsecattr(struct vnode *vp, vsecattr_t *vsap, int flag,
6420         struct cred *cr, caller_context_t *ct)
6421 {
6422         struct inode    *ip = VTOI(vp);
6423         struct ulockfs  *ulp;
6424         struct ufsvfs   *ufsvfsp = ip->i_ufsvfs;
6425         ulong_t         vsa_mask = vsap->vsa_mask;
6426         int             err = EINVAL;
6427 
6428         vsa_mask &= (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT);
6429 
6430         /*
6431          * Only grab locks if needed - they're not needed to check vsa_mask
6432          * or if the mask contains no acl flags.
6433          */
6434         if (vsa_mask != 0) {
6435                 if (err = ufs_lockfs_begin(ufsvfsp, &ulp,
6436                     ULOCKFS_GETATTR_MASK))
6437                         return (err);
6438 
6439                 rw_enter(&ip->i_contents, RW_READER);
6440                 err = ufs_acl_get(ip, vsap, flag, cr);
6441                 rw_exit(&ip->i_contents);
6442 
6443                 if (ulp)
6444                         ufs_lockfs_end(ulp);
6445         }
6446         return (err);
6447 }
6448 
6449 /* ARGSUSED */
6450 static int
6451 ufs_setsecattr(struct vnode *vp, vsecattr_t *vsap, int flag, struct cred *cr,
6452         caller_context_t *ct)
6453 {
6454         struct inode    *ip = VTOI(vp);
6455         struct ulockfs  *ulp = NULL;
6456         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
6457         ulong_t         vsa_mask = vsap->vsa_mask;
6458         int             err;
6459         int             haverwlock = 1;
6460         int             trans_size;
6461         int             donetrans = 0;
6462         int             retry = 1;
6463 
6464         ASSERT(RW_LOCK_HELD(&ip->i_rwlock));
6465 
6466         /* Abort now if the request is either empty or invalid. */
6467         vsa_mask &= (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT);
6468         if ((vsa_mask == 0) ||
6469             ((vsap->vsa_aclentp == NULL) &&
6470             (vsap->vsa_dfaclentp == NULL))) {
6471                 err = EINVAL;
6472                 goto out;
6473         }
6474 
6475         /*
6476          * Following convention, if this is a directory then we acquire the
6477          * inode's i_rwlock after starting a UFS logging transaction;
6478          * otherwise, we acquire it beforehand. Since we were called (and
6479          * must therefore return) with the lock held, we will have to drop it,
6480          * and later reacquire it, if operating on a directory.
6481          */
6482         if (vp->v_type == VDIR) {
6483                 rw_exit(&ip->i_rwlock);
6484                 haverwlock = 0;
6485         } else {
6486                 /* Upgrade the lock if required. */
6487                 if (!rw_write_held(&ip->i_rwlock)) {
6488                         rw_exit(&ip->i_rwlock);
6489                         rw_enter(&ip->i_rwlock, RW_WRITER);
6490                 }
6491         }
6492 
6493 again:
6494         ASSERT(!(vp->v_type == VDIR && haverwlock));
6495         if (err = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SETATTR_MASK)) {
6496                 ulp = NULL;
6497                 retry = 0;
6498                 goto out;
6499         }
6500 
6501         /*
6502          * Check that the file system supports this operation. Note that
6503          * ufs_lockfs_begin() will have checked that the file system had
6504          * not been forcibly unmounted.
6505          */
6506         if (ufsvfsp->vfs_fs->fs_ronly) {
6507                 err = EROFS;
6508                 goto out;
6509         }
6510         if (ufsvfsp->vfs_nosetsec) {
6511                 err = ENOSYS;
6512                 goto out;
6513         }
6514 
6515         if (ulp) {
6516                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_SETSECATTR,
6517                     trans_size = TOP_SETSECATTR_SIZE(VTOI(vp)));
6518                 donetrans = 1;
6519         }
6520 
6521         if (vp->v_type == VDIR) {
6522                 rw_enter(&ip->i_rwlock, RW_WRITER);
6523                 haverwlock = 1;
6524         }
6525 
6526         ASSERT(haverwlock);
6527 
6528         /* Do the actual work. */
6529         rw_enter(&ip->i_contents, RW_WRITER);
6530         /*
6531          * Suppress out of inodes messages if we will retry.
6532          */
6533         if (retry)
6534                 ip->i_flag |= IQUIET;
6535         err = ufs_acl_set(ip, vsap, flag, cr);
6536         ip->i_flag &= ~IQUIET;
6537         rw_exit(&ip->i_contents);
6538 
6539 out:
6540         if (ulp) {
6541                 if (donetrans) {
6542                         /*
6543                          * top_end_async() can eventually call
6544                          * top_end_sync(), which can block. We must
6545                          * therefore observe the lock-ordering protocol
6546                          * here as well.
6547                          */
6548                         if (vp->v_type == VDIR) {
6549                                 rw_exit(&ip->i_rwlock);
6550                                 haverwlock = 0;
6551                         }
6552                         TRANS_END_ASYNC(ufsvfsp, TOP_SETSECATTR, trans_size);
6553                 }
6554                 ufs_lockfs_end(ulp);
6555         }
6556         /*
6557          * If no inodes available, try scaring a logically-
6558          * free one out of the delete queue to someplace
6559          * that we can find it.
6560          */
6561         if ((err == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
6562                 ufs_delete_drain_wait(ufsvfsp, 1);
6563                 retry = 0;
6564                 if (vp->v_type == VDIR && haverwlock) {
6565                         rw_exit(&ip->i_rwlock);
6566                         haverwlock = 0;
6567                 }
6568                 goto again;
6569         }
6570         /*
6571          * If we need to reacquire the lock then it is safe to do so
6572          * as a reader. This is because ufs_rwunlock(), which will be
6573          * called by our caller after we return, does not differentiate
6574          * between shared and exclusive locks.
6575          */
6576         if (!haverwlock) {
6577                 ASSERT(vp->v_type == VDIR);
6578                 rw_enter(&ip->i_rwlock, RW_READER);
6579         }
6580 
6581         return (err);
6582 }
6583 
6584 /*
6585  * Locate the vnode to be used for an event notification. As this will
6586  * be called prior to the name space change perform basic verification
6587  * that the change will be allowed.
6588  */
6589 
6590 static int
6591 ufs_eventlookup(struct vnode *dvp, char *nm, struct cred *cr,
6592     struct vnode **vpp)
6593 {
6594         int     namlen;
6595         int     error;
6596         struct vnode    *vp;
6597         struct inode    *ip;
6598         struct inode    *xip;
6599         struct ufsvfs   *ufsvfsp;
6600         struct ulockfs  *ulp;
6601 
6602         ip = VTOI(dvp);
6603         *vpp = NULL;
6604 
6605         if ((namlen = strlen(nm)) == 0)
6606                 return (EINVAL);
6607 
6608         if (nm[0] == '.') {
6609                 if (namlen == 1)
6610                         return (EINVAL);
6611                 else if ((namlen == 2) && nm[1] == '.') {
6612                         return (EEXIST);
6613                 }
6614         }
6615 
6616         /*
6617          * Check accessibility and write access of parent directory as we
6618          * only want to post the event if we're able to make a change.
6619          */
6620         if (error = ufs_diraccess(ip, IEXEC|IWRITE, cr))
6621                 return (error);
6622 
6623         if (vp = dnlc_lookup(dvp, nm)) {
6624                 if (vp == DNLC_NO_VNODE) {
6625                         VN_RELE(vp);
6626                         return (ENOENT);
6627                 }
6628 
6629                 *vpp = vp;
6630                 return (0);
6631         }
6632 
6633         /*
6634          * Keep the idle queue from getting too long by idling two
6635          * inodes before attempting to allocate another.
6636          * This operation must be performed before entering lockfs
6637          * or a transaction.
6638          */
6639         if (ufs_idle_q.uq_ne > ufs_idle_q.uq_hiwat)
6640                 if ((curthread->t_flag & T_DONTBLOCK) == 0) {
6641                         ins.in_lidles.value.ul += ufs_lookup_idle_count;
6642                         ufs_idle_some(ufs_lookup_idle_count);
6643                 }
6644 
6645         ufsvfsp = ip->i_ufsvfs;
6646 
6647 retry_lookup:
6648         if (error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LOOKUP_MASK))
6649                 return (error);
6650 
6651         if ((error = ufs_dirlook(ip, nm, &xip, cr, 1, 1)) == 0) {
6652                 vp = ITOV(xip);
6653                 *vpp = vp;
6654         }
6655 
6656         if (ulp) {
6657                 ufs_lockfs_end(ulp);
6658         }
6659 
6660         if (error == EAGAIN)
6661                 goto retry_lookup;
6662 
6663         return (error);
6664 }