1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*
  28  * Copyright (c) 2015, Joyent, Inc. All rights reserved.
  29  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  30  */
  31 
  32 #include <sys/types.h>
  33 #include <sys/param.h>
  34 #include <sys/t_lock.h>
  35 #include <sys/systm.h>
  36 #include <sys/sysmacros.h>
  37 #include <sys/user.h>
  38 #include <sys/time.h>
  39 #include <sys/vfs.h>
  40 #include <sys/vfs_opreg.h>
  41 #include <sys/vnode.h>
  42 #include <sys/file.h>
  43 #include <sys/fcntl.h>
  44 #include <sys/flock.h>
  45 #include <sys/kmem.h>
  46 #include <sys/uio.h>
  47 #include <sys/errno.h>
  48 #include <sys/stat.h>
  49 #include <sys/cred.h>
  50 #include <sys/dirent.h>
  51 #include <sys/pathname.h>
  52 #include <sys/vmsystm.h>
  53 #include <sys/fs/tmp.h>
  54 #include <sys/fs/tmpnode.h>
  55 #include <sys/mman.h>
  56 #include <vm/hat.h>
  57 #include <vm/seg_vn.h>
  58 #include <vm/seg_map.h>
  59 #include <vm/seg.h>
  60 #include <vm/anon.h>
  61 #include <vm/as.h>
  62 #include <vm/page.h>
  63 #include <vm/pvn.h>
  64 #include <sys/cmn_err.h>
  65 #include <sys/debug.h>
  66 #include <sys/swap.h>
  67 #include <sys/buf.h>
  68 #include <sys/vm.h>
  69 #include <sys/vtrace.h>
  70 #include <sys/policy.h>
  71 #include <fs/fs_subr.h>
  72 
  73 static int      tmp_getapage(struct vnode *, u_offset_t, size_t, uint_t *,
  74         page_t **, size_t, struct seg *, caddr_t, enum seg_rw, struct cred *);
  75 static int      tmp_putapage(struct vnode *, page_t *, u_offset_t *, size_t *,
  76         int, struct cred *);
  77 
  78 /* ARGSUSED1 */
  79 static int
  80 tmp_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
  81 {
  82         /*
  83          * swapon to a tmpfs file is not supported so access
  84          * is denied on open if VISSWAP is set.
  85          */
  86         if ((*vpp)->v_flag & VISSWAP)
  87                 return (EINVAL);
  88         return (0);
  89 }
  90 
  91 /* ARGSUSED1 */
  92 static int
  93 tmp_close(
  94         struct vnode *vp,
  95         int flag,
  96         int count,
  97         offset_t offset,
  98         struct cred *cred,
  99         caller_context_t *ct)
 100 {
 101         cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 102         cleanshares(vp, ttoproc(curthread)->p_pid);
 103         return (0);
 104 }
 105 
 106 /*
 107  * wrtmp does the real work of write requests for tmpfs.
 108  */
 109 static int
 110 wrtmp(
 111         struct tmount *tm,
 112         struct tmpnode *tp,
 113         struct uio *uio,
 114         struct cred *cr,
 115         struct caller_context *ct)
 116 {
 117         pgcnt_t pageoffset;     /* offset in pages */
 118         ulong_t segmap_offset;  /* pagesize byte offset into segmap */
 119         caddr_t base;           /* base of segmap */
 120         ssize_t bytes;          /* bytes to uiomove */
 121         pfn_t pagenumber;       /* offset in pages into tmp file */
 122         struct vnode *vp;
 123         int error = 0;
 124         int     pagecreate;     /* == 1 if we allocated a page */
 125         int     newpage;
 126         rlim64_t limit = uio->uio_llimit;
 127         long oresid = uio->uio_resid;
 128         timestruc_t now;
 129 
 130         long tn_size_changed = 0;
 131         long old_tn_size;
 132         long new_tn_size;
 133 
 134         vp = TNTOV(tp);
 135         ASSERT(vp->v_type == VREG);
 136 
 137         TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START,
 138             "tmp_wrtmp_start:vp %p", vp);
 139 
 140         ASSERT(RW_WRITE_HELD(&tp->tn_contents));
 141         ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
 142 
 143         if (MANDLOCK(vp, tp->tn_mode)) {
 144                 rw_exit(&tp->tn_contents);
 145                 /*
 146                  * tmp_getattr ends up being called by chklock
 147                  */
 148                 error = chklock(vp, FWRITE, uio->uio_loffset, uio->uio_resid,
 149                     uio->uio_fmode, ct);
 150                 rw_enter(&tp->tn_contents, RW_WRITER);
 151                 if (error != 0) {
 152                         TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 153                             "tmp_wrtmp_end:vp %p error %d", vp, error);
 154                         return (error);
 155                 }
 156         }
 157 
 158         if (uio->uio_loffset < 0)
 159                 return (EINVAL);
 160 
 161         if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
 162                 limit = MAXOFFSET_T;
 163 
 164         if (uio->uio_loffset >= limit) {
 165                 proc_t *p = ttoproc(curthread);
 166 
 167                 mutex_enter(&p->p_lock);
 168                 (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
 169                     p, RCA_UNSAFE_SIGINFO);
 170                 mutex_exit(&p->p_lock);
 171                 return (EFBIG);
 172         }
 173 
 174         if (uio->uio_loffset >= MAXOFF_T) {
 175                 TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 176                     "tmp_wrtmp_end:vp %p error %d", vp, EINVAL);
 177                 return (EFBIG);
 178         }
 179 
 180         if (uio->uio_resid == 0) {
 181                 TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 182                     "tmp_wrtmp_end:vp %p error %d", vp, 0);
 183                 return (0);
 184         }
 185 
 186         if (limit > MAXOFF_T)
 187                 limit = MAXOFF_T;
 188 
 189         do {
 190                 long    offset;
 191                 long    delta;
 192 
 193                 offset = (long)uio->uio_offset;
 194                 pageoffset = offset & PAGEOFFSET;
 195                 /*
 196                  * A maximum of PAGESIZE bytes of data is transferred
 197                  * each pass through this loop
 198                  */
 199                 bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
 200 
 201                 if (offset + bytes >= limit) {
 202                         if (offset >= limit) {
 203                                 error = EFBIG;
 204                                 goto out;
 205                         }
 206                         bytes = limit - offset;
 207                 }
 208                 pagenumber = btop(offset);
 209 
 210                 /*
 211                  * delta is the amount of anonymous memory
 212                  * to reserve for the file.
 213                  * We always reserve in pagesize increments so
 214                  * unless we're extending the file into a new page,
 215                  * we don't need to call tmp_resv.
 216                  */
 217                 delta = offset + bytes -
 218                     P2ROUNDUP_TYPED(tp->tn_size, PAGESIZE, u_offset_t);
 219                 if (delta > 0) {
 220                         pagecreate = 1;
 221                         if (tmp_resv(tm, tp, delta, pagecreate)) {
 222                                 /*
 223                                  * Log file system full in the zone that owns
 224                                  * the tmpfs mount, as well as in the global
 225                                  * zone if necessary.
 226                                  */
 227                                 zcmn_err(tm->tm_vfsp->vfs_zone->zone_id,
 228                                     CE_WARN, "%s: File system full, "
 229                                     "swap space limit exceeded",
 230                                     tm->tm_mntpath);
 231 
 232                                 if (tm->tm_vfsp->vfs_zone->zone_id !=
 233                                     GLOBAL_ZONEID) {
 234 
 235                                         vfs_t *vfs = tm->tm_vfsp;
 236 
 237                                         zcmn_err(GLOBAL_ZONEID,
 238                                             CE_WARN, "%s: File system full, "
 239                                             "swap space limit exceeded",
 240                                             vfs->vfs_vnodecovered->v_path);
 241                                 }
 242                                 error = ENOSPC;
 243                                 break;
 244                         }
 245                         tmpnode_growmap(tp, (ulong_t)offset + bytes);
 246                 }
 247                 /* grow the file to the new length */
 248                 if (offset + bytes > tp->tn_size) {
 249                         tn_size_changed = 1;
 250                         old_tn_size = tp->tn_size;
 251                         /*
 252                          * Postpone updating tp->tn_size until uiomove() is
 253                          * done.
 254                          */
 255                         new_tn_size = offset + bytes;
 256                 }
 257                 if (bytes == PAGESIZE) {
 258                         /*
 259                          * Writing whole page so reading from disk
 260                          * is a waste
 261                          */
 262                         pagecreate = 1;
 263                 } else {
 264                         pagecreate = 0;
 265                 }
 266                 /*
 267                  * If writing past EOF or filling in a hole
 268                  * we need to allocate an anon slot.
 269                  */
 270                 if (anon_get_ptr(tp->tn_anon, pagenumber) == NULL) {
 271                         (void) anon_set_ptr(tp->tn_anon, pagenumber,
 272                             anon_alloc(vp, ptob(pagenumber)), ANON_SLEEP);
 273                         pagecreate = 1;
 274                         tp->tn_nblocks++;
 275                 }
 276 
 277                 /*
 278                  * We have to drop the contents lock to allow the VM
 279                  * system to reacquire it in tmp_getpage()
 280                  */
 281                 rw_exit(&tp->tn_contents);
 282 
 283                 /*
 284                  * Touch the page and fault it in if it is not in core
 285                  * before segmap_getmapflt or vpm_data_copy can lock it.
 286                  * This is to avoid the deadlock if the buffer is mapped
 287                  * to the same file through mmap which we want to write.
 288                  */
 289                 uio_prefaultpages((long)bytes, uio);
 290 
 291                 newpage = 0;
 292                 if (vpm_enable) {
 293                         /*
 294                          * Copy data. If new pages are created, part of
 295                          * the page that is not written will be initizliazed
 296                          * with zeros.
 297                          */
 298                         error = vpm_data_copy(vp, offset, bytes, uio,
 299                             !pagecreate, &newpage, 1, S_WRITE);
 300                 } else {
 301                         /* Get offset within the segmap mapping */
 302                         segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
 303                         base = segmap_getmapflt(segkmap, vp,
 304                             (offset &  MAXBMASK), PAGESIZE, !pagecreate,
 305                             S_WRITE);
 306                 }
 307 
 308 
 309                 if (!vpm_enable && pagecreate) {
 310                         /*
 311                          * segmap_pagecreate() returns 1 if it calls
 312                          * page_create_va() to allocate any pages.
 313                          */
 314                         newpage = segmap_pagecreate(segkmap,
 315                             base + segmap_offset, (size_t)PAGESIZE, 0);
 316                         /*
 317                          * Clear from the beginning of the page to the starting
 318                          * offset of the data.
 319                          */
 320                         if (pageoffset != 0)
 321                                 (void) kzero(base + segmap_offset,
 322                                     (size_t)pageoffset);
 323                 }
 324 
 325                 if (!vpm_enable) {
 326                         error = uiomove(base + segmap_offset + pageoffset,
 327                             (long)bytes, UIO_WRITE, uio);
 328                 }
 329 
 330                 if (!vpm_enable && pagecreate &&
 331                     uio->uio_offset < P2ROUNDUP(offset + bytes, PAGESIZE)) {
 332                         long    zoffset; /* zero from offset into page */
 333                         /*
 334                          * We created pages w/o initializing them completely,
 335                          * thus we need to zero the part that wasn't set up.
 336                          * This happens on most EOF write cases and if
 337                          * we had some sort of error during the uiomove.
 338                          */
 339                         long nmoved;
 340 
 341                         nmoved = uio->uio_offset - offset;
 342                         ASSERT((nmoved + pageoffset) <= PAGESIZE);
 343 
 344                         /*
 345                          * Zero from the end of data in the page to the
 346                          * end of the page.
 347                          */
 348                         if ((zoffset = pageoffset + nmoved) < PAGESIZE)
 349                                 (void) kzero(base + segmap_offset + zoffset,
 350                                     (size_t)PAGESIZE - zoffset);
 351                 }
 352 
 353                 /*
 354                  * Unlock the pages which have been allocated by
 355                  * page_create_va() in segmap_pagecreate()
 356                  */
 357                 if (!vpm_enable && newpage) {
 358                         segmap_pageunlock(segkmap, base + segmap_offset,
 359                             (size_t)PAGESIZE, S_WRITE);
 360                 }
 361 
 362                 if (error) {
 363                         /*
 364                          * If we failed on a write, we must
 365                          * be sure to invalidate any pages that may have
 366                          * been allocated.
 367                          */
 368                         if (vpm_enable) {
 369                                 (void) vpm_sync_pages(vp, offset, PAGESIZE,
 370                                     SM_INVAL);
 371                         } else {
 372                                 (void) segmap_release(segkmap, base, SM_INVAL);
 373                         }
 374                 } else {
 375                         if (vpm_enable) {
 376                                 error = vpm_sync_pages(vp, offset, PAGESIZE,
 377                                     0);
 378                         } else {
 379                                 error = segmap_release(segkmap, base, 0);
 380                         }
 381                 }
 382 
 383                 /*
 384                  * Re-acquire contents lock.
 385                  */
 386                 rw_enter(&tp->tn_contents, RW_WRITER);
 387 
 388                 /*
 389                  * Update tn_size.
 390                  */
 391                 if (tn_size_changed)
 392                         tp->tn_size = new_tn_size;
 393 
 394                 /*
 395                  * If the uiomove failed, fix up tn_size.
 396                  */
 397                 if (error) {
 398                         if (tn_size_changed) {
 399                                 /*
 400                                  * The uiomove failed, and we
 401                                  * allocated blocks,so get rid
 402                                  * of them.
 403                                  */
 404                                 (void) tmpnode_trunc(tm, tp,
 405                                     (ulong_t)old_tn_size);
 406                         }
 407                 } else {
 408                         /*
 409                          * XXX - Can this be out of the loop?
 410                          */
 411                         if ((tp->tn_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) &&
 412                             (tp->tn_mode & (S_ISUID | S_ISGID)) &&
 413                             secpolicy_vnode_setid_retain(cr,
 414                             (tp->tn_mode & S_ISUID) != 0 && tp->tn_uid == 0)) {
 415                                 /*
 416                                  * Clear Set-UID & Set-GID bits on
 417                                  * successful write if not privileged
 418                                  * and at least one of the execute bits
 419                                  * is set.  If we always clear Set-GID,
 420                                  * mandatory file and record locking is
 421                                  * unuseable.
 422                                  */
 423                                 tp->tn_mode &= ~(S_ISUID | S_ISGID);
 424                         }
 425                         gethrestime(&now);
 426                         tp->tn_mtime = now;
 427                         tp->tn_ctime = now;
 428                 }
 429         } while (error == 0 && uio->uio_resid > 0 && bytes != 0);
 430 
 431 out:
 432         /*
 433          * If we've already done a partial-write, terminate
 434          * the write but return no error.
 435          */
 436         if (oresid != uio->uio_resid)
 437                 error = 0;
 438         TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 439             "tmp_wrtmp_end:vp %p error %d", vp, error);
 440         return (error);
 441 }
 442 
 443 /*
 444  * rdtmp does the real work of read requests for tmpfs.
 445  */
 446 static int
 447 rdtmp(
 448         struct tmount *tm,
 449         struct tmpnode *tp,
 450         struct uio *uio,
 451         struct caller_context *ct)
 452 {
 453         ulong_t pageoffset;     /* offset in tmpfs file (uio_offset) */
 454         ulong_t segmap_offset;  /* pagesize byte offset into segmap */
 455         caddr_t base;           /* base of segmap */
 456         ssize_t bytes;          /* bytes to uiomove */
 457         struct vnode *vp;
 458         int error;
 459         long oresid = uio->uio_resid;
 460 
 461 #if defined(lint)
 462         tm = tm;
 463 #endif
 464         vp = TNTOV(tp);
 465 
 466         TRACE_1(TR_FAC_TMPFS, TR_TMPFS_RWTMP_START, "tmp_rdtmp_start:vp %p",
 467             vp);
 468 
 469         ASSERT(RW_LOCK_HELD(&tp->tn_contents));
 470 
 471         if (MANDLOCK(vp, tp->tn_mode)) {
 472                 rw_exit(&tp->tn_contents);
 473                 /*
 474                  * tmp_getattr ends up being called by chklock
 475                  */
 476                 error = chklock(vp, FREAD, uio->uio_loffset, uio->uio_resid,
 477                     uio->uio_fmode, ct);
 478                 rw_enter(&tp->tn_contents, RW_READER);
 479                 if (error != 0) {
 480                         TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 481                             "tmp_rdtmp_end:vp %p error %d", vp, error);
 482                         return (error);
 483                 }
 484         }
 485         ASSERT(tp->tn_type == VREG);
 486 
 487         if (uio->uio_loffset >= MAXOFF_T) {
 488                 TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 489                     "tmp_rdtmp_end:vp %p error %d", vp, EINVAL);
 490                 return (0);
 491         }
 492         if (uio->uio_loffset < 0)
 493                 return (EINVAL);
 494         if (uio->uio_resid == 0) {
 495                 TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 496                     "tmp_rdtmp_end:vp %p error %d", vp, 0);
 497                 return (0);
 498         }
 499 
 500         vp = TNTOV(tp);
 501 
 502         do {
 503                 long diff;
 504                 long offset;
 505 
 506                 offset = uio->uio_offset;
 507                 pageoffset = offset & PAGEOFFSET;
 508                 bytes = MIN(PAGESIZE - pageoffset, uio->uio_resid);
 509 
 510                 diff = tp->tn_size - offset;
 511 
 512                 if (diff <= 0) {
 513                         error = 0;
 514                         goto out;
 515                 }
 516                 if (diff < bytes)
 517                         bytes = diff;
 518 
 519                 /*
 520                  * We have to drop the contents lock to allow the VM system
 521                  * to reacquire it in tmp_getpage() should the uiomove cause a
 522                  * pagefault.
 523                  */
 524                 rw_exit(&tp->tn_contents);
 525 
 526                 if (vpm_enable) {
 527                         /*
 528                          * Copy data.
 529                          */
 530                         error = vpm_data_copy(vp, offset, bytes, uio, 1, NULL,
 531                             0, S_READ);
 532                 } else {
 533                         segmap_offset = (offset & PAGEMASK) & MAXBOFFSET;
 534                         base = segmap_getmapflt(segkmap, vp, offset & MAXBMASK,
 535                             bytes, 1, S_READ);
 536 
 537                         error = uiomove(base + segmap_offset + pageoffset,
 538                             (long)bytes, UIO_READ, uio);
 539                 }
 540 
 541                 if (error) {
 542                         if (vpm_enable) {
 543                                 (void) vpm_sync_pages(vp, offset, PAGESIZE, 0);
 544                         } else {
 545                                 (void) segmap_release(segkmap, base, 0);
 546                         }
 547                 } else {
 548                         if (vpm_enable) {
 549                                 error = vpm_sync_pages(vp, offset, PAGESIZE,
 550                                     0);
 551                         } else {
 552                                 error = segmap_release(segkmap, base, 0);
 553                         }
 554                 }
 555 
 556                 /*
 557                  * Re-acquire contents lock.
 558                  */
 559                 rw_enter(&tp->tn_contents, RW_READER);
 560 
 561         } while (error == 0 && uio->uio_resid > 0);
 562 
 563 out:
 564         gethrestime(&tp->tn_atime);
 565 
 566         /*
 567          * If we've already done a partial read, terminate
 568          * the read but return no error.
 569          */
 570         if (oresid != uio->uio_resid)
 571                 error = 0;
 572 
 573         TRACE_2(TR_FAC_TMPFS, TR_TMPFS_RWTMP_END,
 574             "tmp_rdtmp_end:vp %x error %d", vp, error);
 575         return (error);
 576 }
 577 
 578 /* ARGSUSED2 */
 579 static int
 580 tmp_read(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cred,
 581     struct caller_context *ct)
 582 {
 583         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 584         struct tmount *tm = (struct tmount *)VTOTM(vp);
 585         int error;
 586 
 587         /*
 588          * We don't currently support reading non-regular files
 589          */
 590         if (vp->v_type == VDIR)
 591                 return (EISDIR);
 592         if (vp->v_type != VREG)
 593                 return (EINVAL);
 594         /*
 595          * tmp_rwlock should have already been called from layers above
 596          */
 597         ASSERT(RW_READ_HELD(&tp->tn_rwlock));
 598 
 599         rw_enter(&tp->tn_contents, RW_READER);
 600 
 601         error = rdtmp(tm, tp, uiop, ct);
 602 
 603         rw_exit(&tp->tn_contents);
 604 
 605         return (error);
 606 }
 607 
 608 static int
 609 tmp_write(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
 610     struct caller_context *ct)
 611 {
 612         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 613         struct tmount *tm = (struct tmount *)VTOTM(vp);
 614         int error;
 615 
 616         /*
 617          * We don't currently support writing to non-regular files
 618          */
 619         if (vp->v_type != VREG)
 620                 return (EINVAL);        /* XXX EISDIR? */
 621 
 622         /*
 623          * tmp_rwlock should have already been called from layers above
 624          */
 625         ASSERT(RW_WRITE_HELD(&tp->tn_rwlock));
 626 
 627         rw_enter(&tp->tn_contents, RW_WRITER);
 628 
 629         if (ioflag & FAPPEND) {
 630                 /*
 631                  * In append mode start at end of file.
 632                  */
 633                 uiop->uio_loffset = tp->tn_size;
 634         }
 635 
 636         error = wrtmp(tm, tp, uiop, cred, ct);
 637 
 638         rw_exit(&tp->tn_contents);
 639 
 640         return (error);
 641 }
 642 
 643 /* ARGSUSED */
 644 static int
 645 tmp_ioctl(
 646         struct vnode *vp,
 647         int com,
 648         intptr_t data,
 649         int flag,
 650         struct cred *cred,
 651         int *rvalp,
 652         caller_context_t *ct)
 653 {
 654         return (ENOTTY);
 655 }
 656 
 657 /* ARGSUSED2 */
 658 static int
 659 tmp_getattr(
 660         struct vnode *vp,
 661         struct vattr *vap,
 662         int flags,
 663         struct cred *cred,
 664         caller_context_t *ct)
 665 {
 666         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 667         struct vnode *mvp;
 668         struct vattr va;
 669         int attrs = 1;
 670 
 671         /*
 672          * A special case to handle the root tnode on a diskless nfs
 673          * client who may have had its uid and gid inherited
 674          * from an nfs vnode with nobody ownership.  Likely the
 675          * root filesystem. After nfs is fully functional the uid/gid
 676          * may be mapable so ask again.
 677          * vfsp can't get unmounted because we hold vp.
 678          */
 679         if (vp->v_flag & VROOT &&
 680             (mvp = vp->v_vfsp->vfs_vnodecovered) != NULL) {
 681                 mutex_enter(&tp->tn_tlock);
 682                 if (tp->tn_uid == UID_NOBODY || tp->tn_gid == GID_NOBODY) {
 683                         mutex_exit(&tp->tn_tlock);
 684                         bzero(&va, sizeof (struct vattr));
 685                         va.va_mask = AT_UID|AT_GID;
 686                         attrs = VOP_GETATTR(mvp, &va, 0, cred, ct);
 687                 } else {
 688                         mutex_exit(&tp->tn_tlock);
 689                 }
 690         }
 691         mutex_enter(&tp->tn_tlock);
 692         if (attrs == 0) {
 693                 tp->tn_uid = va.va_uid;
 694                 tp->tn_gid = va.va_gid;
 695         }
 696         vap->va_type = vp->v_type;
 697         vap->va_mode = tp->tn_mode & MODEMASK;
 698         vap->va_uid = tp->tn_uid;
 699         vap->va_gid = tp->tn_gid;
 700         vap->va_fsid = tp->tn_fsid;
 701         vap->va_nodeid = (ino64_t)tp->tn_nodeid;
 702         vap->va_nlink = tp->tn_nlink;
 703         vap->va_size = (u_offset_t)tp->tn_size;
 704         vap->va_atime = tp->tn_atime;
 705         vap->va_mtime = tp->tn_mtime;
 706         vap->va_ctime = tp->tn_ctime;
 707         vap->va_blksize = PAGESIZE;
 708         vap->va_rdev = tp->tn_rdev;
 709         vap->va_seq = tp->tn_seq;
 710 
 711         /*
 712          * XXX Holes are not taken into account.  We could take the time to
 713          * run through the anon array looking for allocated slots...
 714          */
 715         vap->va_nblocks = (fsblkcnt64_t)btodb(ptob(btopr(vap->va_size)));
 716         mutex_exit(&tp->tn_tlock);
 717         return (0);
 718 }
 719 
 720 /*ARGSUSED4*/
 721 static int
 722 tmp_setattr(
 723         struct vnode *vp,
 724         struct vattr *vap,
 725         int flags,
 726         struct cred *cred,
 727         caller_context_t *ct)
 728 {
 729         struct tmount *tm = (struct tmount *)VTOTM(vp);
 730         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 731         int error = 0;
 732         struct vattr *get;
 733         long mask;
 734 
 735         /*
 736          * Cannot set these attributes
 737          */
 738         if ((vap->va_mask & AT_NOSET) || (vap->va_mask & AT_XVATTR))
 739                 return (EINVAL);
 740 
 741         mutex_enter(&tp->tn_tlock);
 742 
 743         get = &tp->tn_attr;
 744         /*
 745          * Change file access modes. Must be owner or have sufficient
 746          * privileges.
 747          */
 748         error = secpolicy_vnode_setattr(cred, vp, vap, get, flags, tmp_taccess,
 749             tp);
 750 
 751         if (error)
 752                 goto out;
 753 
 754         mask = vap->va_mask;
 755 
 756         if (mask & AT_MODE) {
 757                 get->va_mode &= S_IFMT;
 758                 get->va_mode |= vap->va_mode & ~S_IFMT;
 759         }
 760 
 761         if (mask & AT_UID)
 762                 get->va_uid = vap->va_uid;
 763         if (mask & AT_GID)
 764                 get->va_gid = vap->va_gid;
 765         if (mask & AT_ATIME)
 766                 get->va_atime = vap->va_atime;
 767         if (mask & AT_MTIME)
 768                 get->va_mtime = vap->va_mtime;
 769 
 770         if (mask & (AT_UID | AT_GID | AT_MODE | AT_MTIME))
 771                 gethrestime(&tp->tn_ctime);
 772 
 773         if (mask & AT_SIZE) {
 774                 ASSERT(vp->v_type != VDIR);
 775 
 776                 /* Don't support large files. */
 777                 if (vap->va_size > MAXOFF_T) {
 778                         error = EFBIG;
 779                         goto out;
 780                 }
 781                 mutex_exit(&tp->tn_tlock);
 782 
 783                 rw_enter(&tp->tn_rwlock, RW_WRITER);
 784                 rw_enter(&tp->tn_contents, RW_WRITER);
 785                 error = tmpnode_trunc(tm, tp, (ulong_t)vap->va_size);
 786                 rw_exit(&tp->tn_contents);
 787                 rw_exit(&tp->tn_rwlock);
 788 
 789                 if (error == 0 && vap->va_size == 0)
 790                         vnevent_truncate(vp, ct);
 791 
 792                 goto out1;
 793         }
 794 out:
 795         mutex_exit(&tp->tn_tlock);
 796 out1:
 797         return (error);
 798 }
 799 
 800 /* ARGSUSED2 */
 801 static int
 802 tmp_access(
 803         struct vnode *vp,
 804         int mode,
 805         int flags,
 806         struct cred *cred,
 807         caller_context_t *ct)
 808 {
 809         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
 810         int error;
 811 
 812         mutex_enter(&tp->tn_tlock);
 813         error = tmp_taccess(tp, mode, cred);
 814         mutex_exit(&tp->tn_tlock);
 815         return (error);
 816 }
 817 
 818 /* ARGSUSED3 */
 819 static int
 820 tmp_lookup(
 821         struct vnode *dvp,
 822         char *nm,
 823         struct vnode **vpp,
 824         struct pathname *pnp,
 825         int flags,
 826         struct vnode *rdir,
 827         struct cred *cred,
 828         caller_context_t *ct,
 829         int *direntflags,
 830         pathname_t *realpnp)
 831 {
 832         struct tmpnode *tp = (struct tmpnode *)VTOTN(dvp);
 833         struct tmpnode *ntp = NULL;
 834         int error;
 835 
 836 
 837         /* allow cd into @ dir */
 838         if (flags & LOOKUP_XATTR) {
 839                 struct tmpnode *xdp;
 840                 struct tmount *tm;
 841 
 842                 /*
 843                  * don't allow attributes if not mounted XATTR support
 844                  */
 845                 if (!(dvp->v_vfsp->vfs_flag & VFS_XATTR))
 846                         return (EINVAL);
 847 
 848                 if (tp->tn_flags & ISXATTR)
 849                         /* No attributes on attributes */
 850                         return (EINVAL);
 851 
 852                 rw_enter(&tp->tn_rwlock, RW_WRITER);
 853                 if (tp->tn_xattrdp == NULL) {
 854                         if (!(flags & CREATE_XATTR_DIR)) {
 855                                 rw_exit(&tp->tn_rwlock);
 856                                 return (ENOENT);
 857                         }
 858 
 859                         /*
 860                          * No attribute directory exists for this
 861                          * node - create the attr dir as a side effect
 862                          * of this lookup.
 863                          */
 864 
 865                         /*
 866                          * Make sure we have adequate permission...
 867                          */
 868 
 869                         if ((error = tmp_taccess(tp, VWRITE, cred)) != 0) {
 870                                 rw_exit(&tp->tn_rwlock);
 871                                 return (error);
 872                         }
 873 
 874                         xdp = tmp_memalloc(sizeof (struct tmpnode),
 875                             TMP_MUSTHAVE);
 876                         tm = VTOTM(dvp);
 877                         tmpnode_init(tm, xdp, &tp->tn_attr, NULL);
 878                         /*
 879                          * Fix-up fields unique to attribute directories.
 880                          */
 881                         xdp->tn_flags = ISXATTR;
 882                         xdp->tn_type = VDIR;
 883                         if (tp->tn_type == VDIR) {
 884                                 xdp->tn_mode = tp->tn_attr.va_mode;
 885                         } else {
 886                                 xdp->tn_mode = 0700;
 887                                 if (tp->tn_attr.va_mode & 0040)
 888                                         xdp->tn_mode |= 0750;
 889                                 if (tp->tn_attr.va_mode & 0004)
 890                                         xdp->tn_mode |= 0705;
 891                         }
 892                         xdp->tn_vnode->v_type = VDIR;
 893                         xdp->tn_vnode->v_flag |= V_XATTRDIR;
 894                         tdirinit(tp, xdp);
 895                         tp->tn_xattrdp = xdp;
 896                 } else {
 897                         VN_HOLD(tp->tn_xattrdp->tn_vnode);
 898                 }
 899                 *vpp = TNTOV(tp->tn_xattrdp);
 900                 rw_exit(&tp->tn_rwlock);
 901                 return (0);
 902         }
 903 
 904         /*
 905          * Null component name is a synonym for directory being searched.
 906          */
 907         if (*nm == '\0') {
 908                 VN_HOLD(dvp);
 909                 *vpp = dvp;
 910                 return (0);
 911         }
 912         ASSERT(tp);
 913 
 914         error = tdirlookup(tp, nm, &ntp, cred);
 915 
 916         if (error == 0) {
 917                 ASSERT(ntp);
 918                 *vpp = TNTOV(ntp);
 919                 /*
 920                  * If vnode is a device return special vnode instead
 921                  */
 922                 if (IS_DEVVP(*vpp)) {
 923                         struct vnode *newvp;
 924 
 925                         newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
 926                             cred);
 927                         VN_RELE(*vpp);
 928                         *vpp = newvp;
 929                 }
 930         }
 931         TRACE_4(TR_FAC_TMPFS, TR_TMPFS_LOOKUP,
 932             "tmpfs lookup:vp %p name %s vpp %p error %d",
 933             dvp, nm, vpp, error);
 934         return (error);
 935 }
 936 
 937 /*ARGSUSED7*/
 938 static int
 939 tmp_create(
 940         struct vnode *dvp,
 941         char *nm,
 942         struct vattr *vap,
 943         enum vcexcl exclusive,
 944         int mode,
 945         struct vnode **vpp,
 946         struct cred *cred,
 947         int flag,
 948         caller_context_t *ct,
 949         vsecattr_t *vsecp)
 950 {
 951         struct tmpnode *parent;
 952         struct tmount *tm;
 953         struct tmpnode *self;
 954         int error;
 955         struct tmpnode *oldtp;
 956 
 957 again:
 958         parent = (struct tmpnode *)VTOTN(dvp);
 959         tm = (struct tmount *)VTOTM(dvp);
 960         self = NULL;
 961         error = 0;
 962         oldtp = NULL;
 963 
 964         /* device files not allowed in ext. attr dirs */
 965         if ((parent->tn_flags & ISXATTR) &&
 966             (vap->va_type == VBLK || vap->va_type == VCHR ||
 967             vap->va_type == VFIFO || vap->va_type == VDOOR ||
 968             vap->va_type == VSOCK || vap->va_type == VPORT))
 969                         return (EINVAL);
 970 
 971         if (vap->va_type == VREG && (vap->va_mode & VSVTX)) {
 972                 /* Must be privileged to set sticky bit */
 973                 if (secpolicy_vnode_stky_modify(cred))
 974                         vap->va_mode &= ~VSVTX;
 975         } else if (vap->va_type == VNON) {
 976                 return (EINVAL);
 977         }
 978 
 979         /*
 980          * Null component name is a synonym for directory being searched.
 981          */
 982         if (*nm == '\0') {
 983                 VN_HOLD(dvp);
 984                 oldtp = parent;
 985         } else {
 986                 error = tdirlookup(parent, nm, &oldtp, cred);
 987         }
 988 
 989         if (error == 0) {       /* name found */
 990                 boolean_t trunc = B_FALSE;
 991 
 992                 ASSERT(oldtp);
 993 
 994                 rw_enter(&oldtp->tn_rwlock, RW_WRITER);
 995 
 996                 /*
 997                  * if create/read-only an existing
 998                  * directory, allow it
 999                  */
1000                 if (exclusive == EXCL)
1001                         error = EEXIST;
1002                 else if ((oldtp->tn_type == VDIR) && (mode & VWRITE))
1003                         error = EISDIR;
1004                 else {
1005                         error = tmp_taccess(oldtp, mode, cred);
1006                 }
1007 
1008                 if (error) {
1009                         rw_exit(&oldtp->tn_rwlock);
1010                         tmpnode_rele(oldtp);
1011                         return (error);
1012                 }
1013                 *vpp = TNTOV(oldtp);
1014                 if ((*vpp)->v_type == VREG && (vap->va_mask & AT_SIZE) &&
1015                     vap->va_size == 0) {
1016                         rw_enter(&oldtp->tn_contents, RW_WRITER);
1017                         (void) tmpnode_trunc(tm, oldtp, 0);
1018                         rw_exit(&oldtp->tn_contents);
1019                         trunc = B_TRUE;
1020                 }
1021                 rw_exit(&oldtp->tn_rwlock);
1022                 if (IS_DEVVP(*vpp)) {
1023                         struct vnode *newvp;
1024 
1025                         newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
1026                             cred);
1027                         VN_RELE(*vpp);
1028                         if (newvp == NULL) {
1029                                 return (ENOSYS);
1030                         }
1031                         *vpp = newvp;
1032                 }
1033 
1034                 if (trunc)
1035                         vnevent_create(*vpp, ct);
1036 
1037                 return (0);
1038         }
1039 
1040         if (error != ENOENT)
1041                 return (error);
1042 
1043         rw_enter(&parent->tn_rwlock, RW_WRITER);
1044         error = tdirenter(tm, parent, nm, DE_CREATE,
1045             (struct tmpnode *)NULL, (struct tmpnode *)NULL,
1046             vap, &self, cred, ct);
1047         rw_exit(&parent->tn_rwlock);
1048 
1049         if (error) {
1050                 if (self)
1051                         tmpnode_rele(self);
1052 
1053                 if (error == EEXIST) {
1054                         /*
1055                          * This means that the file was created sometime
1056                          * after we checked and did not find it and when
1057                          * we went to create it.
1058                          * Since creat() is supposed to truncate a file
1059                          * that already exits go back to the begining
1060                          * of the function. This time we will find it
1061                          * and go down the tmp_trunc() path
1062                          */
1063                         goto again;
1064                 }
1065                 return (error);
1066         }
1067 
1068         *vpp = TNTOV(self);
1069 
1070         if (!error && IS_DEVVP(*vpp)) {
1071                 struct vnode *newvp;
1072 
1073                 newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cred);
1074                 VN_RELE(*vpp);
1075                 if (newvp == NULL)
1076                         return (ENOSYS);
1077                 *vpp = newvp;
1078         }
1079         TRACE_3(TR_FAC_TMPFS, TR_TMPFS_CREATE,
1080             "tmpfs create:dvp %p nm %s vpp %p", dvp, nm, vpp);
1081         return (0);
1082 }
1083 
1084 /* ARGSUSED3 */
1085 static int
1086 tmp_remove(
1087         struct vnode *dvp,
1088         char *nm,
1089         struct cred *cred,
1090         caller_context_t *ct,
1091         int flags)
1092 {
1093         struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1094         int error;
1095         struct tmpnode *tp = NULL;
1096 
1097         error = tdirlookup(parent, nm, &tp, cred);
1098         if (error)
1099                 return (error);
1100 
1101         ASSERT(tp);
1102         rw_enter(&parent->tn_rwlock, RW_WRITER);
1103         rw_enter(&tp->tn_rwlock, RW_WRITER);
1104 
1105         if (tp->tn_type != VDIR ||
1106             (error = secpolicy_fs_linkdir(cred, dvp->v_vfsp)) == 0)
1107                 error = tdirdelete(parent, tp, nm, DR_REMOVE, cred);
1108 
1109         rw_exit(&tp->tn_rwlock);
1110         rw_exit(&parent->tn_rwlock);
1111         vnevent_remove(TNTOV(tp), dvp, nm, ct);
1112         tmpnode_rele(tp);
1113 
1114         TRACE_3(TR_FAC_TMPFS, TR_TMPFS_REMOVE,
1115             "tmpfs remove:dvp %p nm %s error %d", dvp, nm, error);
1116         return (error);
1117 }
1118 
1119 /* ARGSUSED4 */
1120 static int
1121 tmp_link(
1122         struct vnode *dvp,
1123         struct vnode *srcvp,
1124         char *tnm,
1125         struct cred *cred,
1126         caller_context_t *ct,
1127         int flags)
1128 {
1129         struct tmpnode *parent;
1130         struct tmpnode *from;
1131         struct tmount *tm = (struct tmount *)VTOTM(dvp);
1132         int error;
1133         struct tmpnode *found = NULL;
1134         struct vnode *realvp;
1135 
1136         if (VOP_REALVP(srcvp, &realvp, ct) == 0)
1137                 srcvp = realvp;
1138 
1139         parent = (struct tmpnode *)VTOTN(dvp);
1140         from = (struct tmpnode *)VTOTN(srcvp);
1141 
1142         if ((srcvp->v_type == VDIR &&
1143             secpolicy_fs_linkdir(cred, dvp->v_vfsp)) ||
1144             (from->tn_uid != crgetuid(cred) && secpolicy_basic_link(cred)))
1145                 return (EPERM);
1146 
1147         /*
1148          * Make sure link for extended attributes is valid
1149          * We only support hard linking of xattr's in xattrdir to an xattrdir
1150          */
1151         if ((from->tn_flags & ISXATTR) != (parent->tn_flags & ISXATTR))
1152                 return (EINVAL);
1153 
1154         error = tdirlookup(parent, tnm, &found, cred);
1155         if (error == 0) {
1156                 ASSERT(found);
1157                 tmpnode_rele(found);
1158                 return (EEXIST);
1159         }
1160 
1161         if (error != ENOENT)
1162                 return (error);
1163 
1164         rw_enter(&parent->tn_rwlock, RW_WRITER);
1165         error = tdirenter(tm, parent, tnm, DE_LINK, (struct tmpnode *)NULL,
1166             from, NULL, (struct tmpnode **)NULL, cred, ct);
1167         rw_exit(&parent->tn_rwlock);
1168         if (error == 0) {
1169                 vnevent_link(srcvp, ct);
1170         }
1171         return (error);
1172 }
1173 
1174 /* ARGSUSED5 */
1175 static int
1176 tmp_rename(
1177         struct vnode *odvp,     /* source parent vnode */
1178         char *onm,              /* source name */
1179         struct vnode *ndvp,     /* destination parent vnode */
1180         char *nnm,              /* destination name */
1181         struct cred *cred,
1182         caller_context_t *ct,
1183         int flags)
1184 {
1185         struct tmpnode *fromparent;
1186         struct tmpnode *toparent;
1187         struct tmpnode *fromtp = NULL;  /* source tmpnode */
1188         struct tmpnode *totp;           /* target tmpnode */
1189         struct tmount *tm = (struct tmount *)VTOTM(odvp);
1190         int error;
1191         int samedir = 0;        /* set if odvp == ndvp */
1192         struct vnode *realvp;
1193 
1194         if (VOP_REALVP(ndvp, &realvp, ct) == 0)
1195                 ndvp = realvp;
1196 
1197         fromparent = (struct tmpnode *)VTOTN(odvp);
1198         toparent = (struct tmpnode *)VTOTN(ndvp);
1199 
1200         if ((fromparent->tn_flags & ISXATTR) != (toparent->tn_flags & ISXATTR))
1201                 return (EINVAL);
1202 
1203         mutex_enter(&tm->tm_renamelck);
1204 
1205         /*
1206          * Look up tmpnode of file we're supposed to rename.
1207          */
1208         error = tdirlookup(fromparent, onm, &fromtp, cred);
1209         if (error) {
1210                 mutex_exit(&tm->tm_renamelck);
1211                 return (error);
1212         }
1213 
1214         /*
1215          * Make sure we can delete the old (source) entry.  This
1216          * requires write permission on the containing directory.  If
1217          * that directory is "sticky" it requires further checks.
1218          */
1219         if (((error = tmp_taccess(fromparent, VWRITE, cred)) != 0) ||
1220             (error = tmp_sticky_remove_access(fromparent, fromtp, cred)) != 0)
1221                 goto done;
1222 
1223         /*
1224          * Check for renaming to or from '.' or '..' or that
1225          * fromtp == fromparent
1226          */
1227         if ((onm[0] == '.' &&
1228             (onm[1] == '\0' || (onm[1] == '.' && onm[2] == '\0'))) ||
1229             (nnm[0] == '.' &&
1230             (nnm[1] == '\0' || (nnm[1] == '.' && nnm[2] == '\0'))) ||
1231             (fromparent == fromtp)) {
1232                 error = EINVAL;
1233                 goto done;
1234         }
1235 
1236         samedir = (fromparent == toparent);
1237         /*
1238          * Make sure we can search and rename into the new
1239          * (destination) directory.
1240          */
1241         if (!samedir) {
1242                 error = tmp_taccess(toparent, VEXEC|VWRITE, cred);
1243                 if (error)
1244                         goto done;
1245         }
1246 
1247         if (tdirlookup(toparent, nnm, &totp, cred) == 0) {
1248                 vnevent_pre_rename_dest(TNTOV(totp), ndvp, nnm, ct);
1249                 tmpnode_rele(totp);
1250         }
1251 
1252         /* Notify the target dir. if not the same as the source dir. */
1253         if (ndvp != odvp) {
1254                 vnevent_pre_rename_dest_dir(ndvp, TNTOV(fromtp), nnm, ct);
1255         }
1256 
1257         vnevent_pre_rename_src(TNTOV(fromtp), odvp, onm, ct);
1258 
1259         /*
1260          * Link source to new target
1261          */
1262         rw_enter(&toparent->tn_rwlock, RW_WRITER);
1263         error = tdirenter(tm, toparent, nnm, DE_RENAME,
1264             fromparent, fromtp, (struct vattr *)NULL,
1265             (struct tmpnode **)NULL, cred, ct);
1266         rw_exit(&toparent->tn_rwlock);
1267 
1268         if (error) {
1269                 /*
1270                  * ESAME isn't really an error; it indicates that the
1271                  * operation should not be done because the source and target
1272                  * are the same file, but that no error should be reported.
1273                  */
1274                 if (error == ESAME)
1275                         error = 0;
1276                 goto done;
1277         }
1278 
1279         /*
1280          * Unlink from source.
1281          */
1282         rw_enter(&fromparent->tn_rwlock, RW_WRITER);
1283         rw_enter(&fromtp->tn_rwlock, RW_WRITER);
1284 
1285         error = tdirdelete(fromparent, fromtp, onm, DR_RENAME, cred);
1286 
1287         /*
1288          * The following handles the case where our source tmpnode was
1289          * removed before we got to it.
1290          *
1291          * XXX We should also cleanup properly in the case where tdirdelete
1292          * fails for some other reason.  Currently this case shouldn't happen.
1293          * (see 1184991).
1294          */
1295         if (error == ENOENT)
1296                 error = 0;
1297 
1298         rw_exit(&fromtp->tn_rwlock);
1299         rw_exit(&fromparent->tn_rwlock);
1300 
1301         if (error == 0) {
1302                 vnevent_rename_src(TNTOV(fromtp), odvp, onm, ct);
1303                 /*
1304                  * vnevent_rename_dest is called in tdirenter().
1305                  * Notify the target dir if not same as source dir.
1306                  */
1307                 if (ndvp != odvp)
1308                         vnevent_rename_dest_dir(ndvp, ct);
1309         }
1310 
1311 done:
1312         tmpnode_rele(fromtp);
1313         mutex_exit(&tm->tm_renamelck);
1314 
1315         TRACE_5(TR_FAC_TMPFS, TR_TMPFS_RENAME,
1316             "tmpfs rename:ovp %p onm %s nvp %p nnm %s error %d", odvp, onm,
1317             ndvp, nnm, error);
1318         return (error);
1319 }
1320 
1321 /* ARGSUSED5 */
1322 static int
1323 tmp_mkdir(
1324         struct vnode *dvp,
1325         char *nm,
1326         struct vattr *va,
1327         struct vnode **vpp,
1328         struct cred *cred,
1329         caller_context_t *ct,
1330         int flags,
1331         vsecattr_t *vsecp)
1332 {
1333         struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1334         struct tmpnode *self = NULL;
1335         struct tmount *tm = (struct tmount *)VTOTM(dvp);
1336         int error;
1337 
1338         /* no new dirs allowed in xattr dirs */
1339         if (parent->tn_flags & ISXATTR)
1340                 return (EINVAL);
1341 
1342         /*
1343          * Might be dangling directory.  Catch it here,
1344          * because a ENOENT return from tdirlookup() is
1345          * an "o.k. return".
1346          */
1347         if (parent->tn_nlink == 0)
1348                 return (ENOENT);
1349 
1350         error = tdirlookup(parent, nm, &self, cred);
1351         if (error == 0) {
1352                 ASSERT(self);
1353                 tmpnode_rele(self);
1354                 return (EEXIST);
1355         }
1356         if (error != ENOENT)
1357                 return (error);
1358 
1359         rw_enter(&parent->tn_rwlock, RW_WRITER);
1360         error = tdirenter(tm, parent, nm, DE_MKDIR, (struct tmpnode *)NULL,
1361             (struct tmpnode *)NULL, va, &self, cred, ct);
1362         if (error) {
1363                 rw_exit(&parent->tn_rwlock);
1364                 if (self)
1365                         tmpnode_rele(self);
1366                 return (error);
1367         }
1368         rw_exit(&parent->tn_rwlock);
1369         *vpp = TNTOV(self);
1370         return (0);
1371 }
1372 
1373 /* ARGSUSED4 */
1374 static int
1375 tmp_rmdir(
1376         struct vnode *dvp,
1377         char *nm,
1378         struct vnode *cdir,
1379         struct cred *cred,
1380         caller_context_t *ct,
1381         int flags)
1382 {
1383         struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1384         struct tmpnode *self = NULL;
1385         struct vnode *vp;
1386         int error = 0;
1387 
1388         /*
1389          * Return error when removing . and ..
1390          */
1391         if (strcmp(nm, ".") == 0)
1392                 return (EINVAL);
1393         if (strcmp(nm, "..") == 0)
1394                 return (EEXIST); /* Should be ENOTEMPTY */
1395         error = tdirlookup(parent, nm, &self, cred);
1396         if (error)
1397                 return (error);
1398 
1399         rw_enter(&parent->tn_rwlock, RW_WRITER);
1400         rw_enter(&self->tn_rwlock, RW_WRITER);
1401 
1402         vp = TNTOV(self);
1403         if (vp == dvp || vp == cdir) {
1404                 error = EINVAL;
1405                 goto done1;
1406         }
1407         if (self->tn_type != VDIR) {
1408                 error = ENOTDIR;
1409                 goto done1;
1410         }
1411 
1412         mutex_enter(&self->tn_tlock);
1413         if (self->tn_nlink > 2) {
1414                 mutex_exit(&self->tn_tlock);
1415                 error = EEXIST;
1416                 goto done1;
1417         }
1418         mutex_exit(&self->tn_tlock);
1419 
1420         if (vn_vfswlock(vp)) {
1421                 error = EBUSY;
1422                 goto done1;
1423         }
1424         if (vn_mountedvfs(vp) != NULL) {
1425                 error = EBUSY;
1426                 goto done;
1427         }
1428 
1429         /*
1430          * Check for an empty directory
1431          * i.e. only includes entries for "." and ".."
1432          */
1433         if (self->tn_dirents > 2) {
1434                 error = EEXIST;         /* SIGH should be ENOTEMPTY */
1435                 /*
1436                  * Update atime because checking tn_dirents is logically
1437                  * equivalent to reading the directory
1438                  */
1439                 gethrestime(&self->tn_atime);
1440                 goto done;
1441         }
1442 
1443         error = tdirdelete(parent, self, nm, DR_RMDIR, cred);
1444 done:
1445         vn_vfsunlock(vp);
1446 done1:
1447         rw_exit(&self->tn_rwlock);
1448         rw_exit(&parent->tn_rwlock);
1449         vnevent_rmdir(TNTOV(self), dvp, nm, ct);
1450         tmpnode_rele(self);
1451 
1452         return (error);
1453 }
1454 
1455 /* ARGSUSED2 */
1456 static int
1457 tmp_readdir(
1458         struct vnode *vp,
1459         struct uio *uiop,
1460         struct cred *cred,
1461         int *eofp,
1462         caller_context_t *ct,
1463         int flags)
1464 {
1465         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1466         struct tdirent *tdp;
1467         int error = 0;
1468         size_t namelen;
1469         struct dirent64 *dp;
1470         ulong_t offset;
1471         ulong_t total_bytes_wanted;
1472         long outcount = 0;
1473         long bufsize;
1474         int reclen;
1475         caddr_t outbuf;
1476 
1477         if (uiop->uio_loffset >= MAXOFF_T) {
1478                 if (eofp)
1479                         *eofp = 1;
1480                 return (0);
1481         }
1482         /*
1483          * assuming system call has already called tmp_rwlock
1484          */
1485         ASSERT(RW_READ_HELD(&tp->tn_rwlock));
1486 
1487         if (uiop->uio_iovcnt != 1)
1488                 return (EINVAL);
1489 
1490         if (vp->v_type != VDIR)
1491                 return (ENOTDIR);
1492 
1493         /*
1494          * There's a window here where someone could have removed
1495          * all the entries in the directory after we put a hold on the
1496          * vnode but before we grabbed the rwlock.  Just return.
1497          */
1498         if (tp->tn_dir == NULL) {
1499                 if (tp->tn_nlink) {
1500                         panic("empty directory 0x%p", (void *)tp);
1501                         /*NOTREACHED*/
1502                 }
1503                 return (0);
1504         }
1505 
1506         /*
1507          * Get space for multiple directory entries
1508          */
1509         total_bytes_wanted = uiop->uio_iov->iov_len;
1510         bufsize = total_bytes_wanted + sizeof (struct dirent64);
1511         outbuf = kmem_alloc(bufsize, KM_SLEEP);
1512 
1513         dp = (struct dirent64 *)outbuf;
1514 
1515 
1516         offset = 0;
1517         tdp = tp->tn_dir;
1518         while (tdp) {
1519                 namelen = strlen(tdp->td_name);      /* no +1 needed */
1520                 offset = tdp->td_offset;
1521                 if (offset >= uiop->uio_offset) {
1522                         reclen = (int)DIRENT64_RECLEN(namelen);
1523                         if (outcount + reclen > total_bytes_wanted) {
1524                                 if (!outcount)
1525                                         /*
1526                                          * Buffer too small for any entries.
1527                                          */
1528                                         error = EINVAL;
1529                                 break;
1530                         }
1531                         ASSERT(tdp->td_tmpnode != NULL);
1532 
1533                         /* use strncpy(9f) to zero out uninitialized bytes */
1534 
1535                         (void) strncpy(dp->d_name, tdp->td_name,
1536                             DIRENT64_NAMELEN(reclen));
1537                         dp->d_reclen = (ushort_t)reclen;
1538                         dp->d_ino = (ino64_t)tdp->td_tmpnode->tn_nodeid;
1539                         dp->d_off = (offset_t)tdp->td_offset + 1;
1540                         dp = (struct dirent64 *)
1541                             ((uintptr_t)dp + dp->d_reclen);
1542                         outcount += reclen;
1543                         ASSERT(outcount <= bufsize);
1544                 }
1545                 tdp = tdp->td_next;
1546         }
1547 
1548         if (!error)
1549                 error = uiomove(outbuf, outcount, UIO_READ, uiop);
1550 
1551         if (!error) {
1552                 /* If we reached the end of the list our offset */
1553                 /* should now be just past the end. */
1554                 if (!tdp) {
1555                         offset += 1;
1556                         if (eofp)
1557                                 *eofp = 1;
1558                 } else if (eofp)
1559                         *eofp = 0;
1560                 uiop->uio_offset = offset;
1561         }
1562         gethrestime(&tp->tn_atime);
1563         kmem_free(outbuf, bufsize);
1564         return (error);
1565 }
1566 
1567 /* ARGSUSED5 */
1568 static int
1569 tmp_symlink(
1570         struct vnode *dvp,
1571         char *lnm,
1572         struct vattr *tva,
1573         char *tnm,
1574         struct cred *cred,
1575         caller_context_t *ct,
1576         int flags)
1577 {
1578         struct tmpnode *parent = (struct tmpnode *)VTOTN(dvp);
1579         struct tmpnode *self = (struct tmpnode *)NULL;
1580         struct tmount *tm = (struct tmount *)VTOTM(dvp);
1581         char *cp = NULL;
1582         int error;
1583         size_t len;
1584 
1585         /* no symlinks allowed to files in xattr dirs */
1586         if (parent->tn_flags & ISXATTR)
1587                 return (EINVAL);
1588 
1589         error = tdirlookup(parent, lnm, &self, cred);
1590         if (error == 0) {
1591                 /*
1592                  * The entry already exists
1593                  */
1594                 tmpnode_rele(self);
1595                 return (EEXIST);        /* was 0 */
1596         }
1597 
1598         if (error != ENOENT) {
1599                 if (self != NULL)
1600                         tmpnode_rele(self);
1601                 return (error);
1602         }
1603 
1604         rw_enter(&parent->tn_rwlock, RW_WRITER);
1605         error = tdirenter(tm, parent, lnm, DE_CREATE, (struct tmpnode *)NULL,
1606             (struct tmpnode *)NULL, tva, &self, cred, ct);
1607         rw_exit(&parent->tn_rwlock);
1608 
1609         if (error) {
1610                 if (self)
1611                         tmpnode_rele(self);
1612                 return (error);
1613         }
1614         len = strlen(tnm) + 1;
1615         cp = tmp_memalloc(len, 0);
1616         if (cp == NULL) {
1617                 tmpnode_rele(self);
1618                 return (ENOSPC);
1619         }
1620         (void) strcpy(cp, tnm);
1621 
1622         self->tn_symlink = cp;
1623         self->tn_size = len - 1;
1624         tmpnode_rele(self);
1625         return (error);
1626 }
1627 
1628 /* ARGSUSED2 */
1629 static int
1630 tmp_readlink(
1631         struct vnode *vp,
1632         struct uio *uiop,
1633         struct cred *cred,
1634         caller_context_t *ct)
1635 {
1636         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1637         int error = 0;
1638 
1639         if (vp->v_type != VLNK)
1640                 return (EINVAL);
1641 
1642         rw_enter(&tp->tn_rwlock, RW_READER);
1643         rw_enter(&tp->tn_contents, RW_READER);
1644         error = uiomove(tp->tn_symlink, tp->tn_size, UIO_READ, uiop);
1645         gethrestime(&tp->tn_atime);
1646         rw_exit(&tp->tn_contents);
1647         rw_exit(&tp->tn_rwlock);
1648         return (error);
1649 }
1650 
1651 /* ARGSUSED */
1652 static int
1653 tmp_fsync(
1654         struct vnode *vp,
1655         int syncflag,
1656         struct cred *cred,
1657         caller_context_t *ct)
1658 {
1659         return (0);
1660 }
1661 
1662 /* ARGSUSED */
1663 static void
1664 tmp_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
1665 {
1666         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1667         struct tmount *tm = (struct tmount *)VFSTOTM(vp->v_vfsp);
1668 
1669         rw_enter(&tp->tn_rwlock, RW_WRITER);
1670 top:
1671         mutex_enter(&tp->tn_tlock);
1672         mutex_enter(&vp->v_lock);
1673         ASSERT(vp->v_count >= 1);
1674 
1675         /*
1676          * If we don't have the last hold or the link count is non-zero,
1677          * there's little to do -- just drop our hold.
1678          */
1679         if (vp->v_count > 1 || tp->tn_nlink != 0) {
1680                 vp->v_count--;
1681                 mutex_exit(&vp->v_lock);
1682                 mutex_exit(&tp->tn_tlock);
1683                 rw_exit(&tp->tn_rwlock);
1684                 return;
1685         }
1686 
1687         /*
1688          * We have the last hold *and* the link count is zero, so this
1689          * tmpnode is dead from the filesystem's viewpoint.  However,
1690          * if the tmpnode has any pages associated with it (i.e. if it's
1691          * a normal file with non-zero size), the tmpnode can still be
1692          * discovered by pageout or fsflush via the page vnode pointers.
1693          * In this case we must drop all our locks, truncate the tmpnode,
1694          * and try the whole dance again.
1695          */
1696         if (tp->tn_size != 0) {
1697                 if (tp->tn_type == VREG) {
1698                         mutex_exit(&vp->v_lock);
1699                         mutex_exit(&tp->tn_tlock);
1700                         rw_enter(&tp->tn_contents, RW_WRITER);
1701                         (void) tmpnode_trunc(tm, tp, 0);
1702                         rw_exit(&tp->tn_contents);
1703                         ASSERT(tp->tn_size == 0);
1704                         ASSERT(tp->tn_nblocks == 0);
1705                         goto top;
1706                 }
1707                 if (tp->tn_type == VLNK)
1708                         tmp_memfree(tp->tn_symlink, tp->tn_size + 1);
1709         }
1710 
1711         /*
1712          * Remove normal file/dir's xattr dir and xattrs.
1713          */
1714         if (tp->tn_xattrdp) {
1715                 struct tmpnode *xtp = tp->tn_xattrdp;
1716 
1717                 ASSERT(xtp->tn_flags & ISXATTR);
1718                 tmpnode_hold(xtp);
1719                 rw_enter(&xtp->tn_rwlock, RW_WRITER);
1720                 tdirtrunc(xtp);
1721                 DECR_COUNT(&xtp->tn_nlink, &xtp->tn_tlock);
1722                 tp->tn_xattrdp = NULL;
1723                 rw_exit(&xtp->tn_rwlock);
1724                 tmpnode_rele(xtp);
1725         }
1726 
1727         mutex_exit(&vp->v_lock);
1728         mutex_exit(&tp->tn_tlock);
1729         /* Here's our chance to send invalid event while we're between locks */
1730         vn_invalid(TNTOV(tp));
1731         mutex_enter(&tm->tm_contents);
1732         if (tp->tn_forw == NULL)
1733                 tm->tm_rootnode->tn_back = tp->tn_back;
1734         else
1735                 tp->tn_forw->tn_back = tp->tn_back;
1736         tp->tn_back->tn_forw = tp->tn_forw;
1737         mutex_exit(&tm->tm_contents);
1738         rw_exit(&tp->tn_rwlock);
1739         rw_destroy(&tp->tn_rwlock);
1740         mutex_destroy(&tp->tn_tlock);
1741         vn_free(TNTOV(tp));
1742         tmp_memfree(tp, sizeof (struct tmpnode));
1743 }
1744 
1745 /* ARGSUSED2 */
1746 static int
1747 tmp_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
1748 {
1749         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
1750         struct tfid *tfid;
1751 
1752         if (fidp->fid_len < (sizeof (struct tfid) - sizeof (ushort_t))) {
1753                 fidp->fid_len = sizeof (struct tfid) - sizeof (ushort_t);
1754                 return (ENOSPC);
1755         }
1756 
1757         tfid = (struct tfid *)fidp;
1758         bzero(tfid, sizeof (struct tfid));
1759         tfid->tfid_len = (int)sizeof (struct tfid) - sizeof (ushort_t);
1760 
1761         tfid->tfid_ino = tp->tn_nodeid;
1762         tfid->tfid_gen = tp->tn_gen;
1763 
1764         return (0);
1765 }
1766 
1767 
1768 /*
1769  * Return all the pages from [off..off+len] in given file
1770  */
1771 /* ARGSUSED */
1772 static int
1773 tmp_getpage(
1774         struct vnode *vp,
1775         offset_t off,
1776         size_t len,
1777         uint_t *protp,
1778         page_t *pl[],
1779         size_t plsz,
1780         struct seg *seg,
1781         caddr_t addr,
1782         enum seg_rw rw,
1783         struct cred *cr,
1784         caller_context_t *ct)
1785 {
1786         int err = 0;
1787         struct tmpnode *tp = VTOTN(vp);
1788         anoff_t toff = (anoff_t)off;
1789         size_t tlen = len;
1790         u_offset_t tmpoff;
1791         timestruc_t now;
1792 
1793         rw_enter(&tp->tn_contents, RW_READER);
1794 
1795         if (off + len  > tp->tn_size + PAGEOFFSET) {
1796                 err = EFAULT;
1797                 goto out;
1798         }
1799         /*
1800          * Look for holes (no anon slot) in faulting range. If there are
1801          * holes we have to switch to a write lock and fill them in. Swap
1802          * space for holes was already reserved when the file was grown.
1803          */
1804         tmpoff = toff;
1805         if (non_anon(tp->tn_anon, btop(off), &tmpoff, &tlen)) {
1806                 if (!rw_tryupgrade(&tp->tn_contents)) {
1807                         rw_exit(&tp->tn_contents);
1808                         rw_enter(&tp->tn_contents, RW_WRITER);
1809                         /* Size may have changed when lock was dropped */
1810                         if (off + len  > tp->tn_size + PAGEOFFSET) {
1811                                 err = EFAULT;
1812                                 goto out;
1813                         }
1814                 }
1815                 for (toff = (anoff_t)off; toff < (anoff_t)off + len;
1816                     toff += PAGESIZE) {
1817                         if (anon_get_ptr(tp->tn_anon, btop(toff)) == NULL) {
1818                                 /* XXX - may allocate mem w. write lock held */
1819                                 (void) anon_set_ptr(tp->tn_anon, btop(toff),
1820                                     anon_alloc(vp, toff), ANON_SLEEP);
1821                                 tp->tn_nblocks++;
1822                         }
1823                 }
1824                 rw_downgrade(&tp->tn_contents);
1825         }
1826 
1827 
1828         err = pvn_getpages(tmp_getapage, vp, (u_offset_t)off, len, protp,
1829             pl, plsz, seg, addr, rw, cr);
1830 
1831         gethrestime(&now);
1832         tp->tn_atime = now;
1833         if (rw == S_WRITE)
1834                 tp->tn_mtime = now;
1835 
1836 out:
1837         rw_exit(&tp->tn_contents);
1838         return (err);
1839 }
1840 
1841 /*
1842  * Called from pvn_getpages to get a particular page.
1843  */
1844 /*ARGSUSED*/
1845 static int
1846 tmp_getapage(
1847         struct vnode *vp,
1848         u_offset_t off,
1849         size_t len,
1850         uint_t *protp,
1851         page_t *pl[],
1852         size_t plsz,
1853         struct seg *seg,
1854         caddr_t addr,
1855         enum seg_rw rw,
1856         struct cred *cr)
1857 {
1858         struct page *pp;
1859         int flags;
1860         int err = 0;
1861         struct vnode *pvp;
1862         u_offset_t poff;
1863 
1864         if (protp != NULL)
1865                 *protp = PROT_ALL;
1866 again:
1867         if (pp = page_lookup(vp, off, rw == S_CREATE ? SE_EXCL : SE_SHARED)) {
1868                 if (pl) {
1869                         pl[0] = pp;
1870                         pl[1] = NULL;
1871                 } else {
1872                         page_unlock(pp);
1873                 }
1874         } else {
1875                 pp = page_create_va(vp, off, PAGESIZE,
1876                     PG_WAIT | PG_EXCL, seg, addr);
1877                 /*
1878                  * Someone raced in and created the page after we did the
1879                  * lookup but before we did the create, so go back and
1880                  * try to look it up again.
1881                  */
1882                 if (pp == NULL)
1883                         goto again;
1884                 /*
1885                  * Fill page from backing store, if any. If none, then
1886                  * either this is a newly filled hole or page must have
1887                  * been unmodified and freed so just zero it out.
1888                  */
1889                 err = swap_getphysname(vp, off, &pvp, &poff);
1890                 if (err) {
1891                         panic("tmp_getapage: no anon slot vp %p "
1892                             "off %llx pp %p\n", (void *)vp, off, (void *)pp);
1893                 }
1894                 if (pvp) {
1895                         flags = (pl == NULL ? B_ASYNC|B_READ : B_READ);
1896                         err = VOP_PAGEIO(pvp, pp, (u_offset_t)poff, PAGESIZE,
1897                             flags, cr, NULL);
1898                         if (flags & B_ASYNC)
1899                                 pp = NULL;
1900                 } else if (rw != S_CREATE) {
1901                         pagezero(pp, 0, PAGESIZE);
1902                 }
1903                 if (err && pp)
1904                         pvn_read_done(pp, B_ERROR);
1905                 if (err == 0) {
1906                         if (pl)
1907                                 pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
1908                         else
1909                                 pvn_io_done(pp);
1910                 }
1911         }
1912         return (err);
1913 }
1914 
1915 
1916 /*
1917  * Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED}.
1918  * If len == 0, do from off to EOF.
1919  */
1920 static int tmp_nopage = 0;      /* Don't do tmp_putpage's if set */
1921 
1922 /* ARGSUSED */
1923 int
1924 tmp_putpage(
1925         register struct vnode *vp,
1926         offset_t off,
1927         size_t len,
1928         int flags,
1929         struct cred *cr,
1930         caller_context_t *ct)
1931 {
1932         register page_t *pp;
1933         u_offset_t io_off;
1934         size_t io_len = 0;
1935         int err = 0;
1936         struct tmpnode *tp = VTOTN(vp);
1937         int dolock;
1938 
1939         if (tmp_nopage)
1940                 return (0);
1941 
1942         ASSERT(vp->v_count != 0);
1943 
1944         if (vp->v_flag & VNOMAP)
1945                 return (ENOSYS);
1946 
1947         /*
1948          * This being tmpfs, we don't ever do i/o unless we really
1949          * have to (when we're low on memory and pageout calls us
1950          * with B_ASYNC | B_FREE or the user explicitly asks for it with
1951          * B_DONTNEED).
1952          * XXX to approximately track the mod time like ufs we should
1953          * update the times here. The problem is, once someone does a
1954          * store we never clear the mod bit and do i/o, thus fsflush
1955          * will keep calling us every 30 seconds to do the i/o and we'll
1956          * continually update the mod time. At least we update the mod
1957          * time on the first store because this results in a call to getpage.
1958          */
1959         if (flags != (B_ASYNC | B_FREE) && (flags & B_INVAL) == 0 &&
1960             (flags & B_DONTNEED) == 0)
1961                 return (0);
1962         /*
1963          * If this thread owns the lock, i.e., this thread grabbed it
1964          * as writer somewhere above, then we don't need to grab the
1965          * lock as reader in this routine.
1966          */
1967         dolock = (rw_owner(&tp->tn_contents) != curthread);
1968 
1969         /*
1970          * If this is pageout don't block on the lock as you could deadlock
1971          * when freemem == 0 (another thread has the read lock and is blocked
1972          * creating a page, and a third thread is waiting to get the writers
1973          * lock - waiting writers priority blocks us from getting the read
1974          * lock). Of course, if the only freeable pages are on this tmpnode
1975          * we're hosed anyways. A better solution might be a new lock type.
1976          * Note: ufs has the same problem.
1977          */
1978         if (curproc == proc_pageout) {
1979                 if (!rw_tryenter(&tp->tn_contents, RW_READER))
1980                         return (ENOMEM);
1981         } else if (dolock)
1982                 rw_enter(&tp->tn_contents, RW_READER);
1983 
1984         if (!vn_has_cached_data(vp))
1985                 goto out;
1986 
1987         if (len == 0) {
1988                 if (curproc == proc_pageout) {
1989                         panic("tmp: pageout can't block");
1990                         /*NOTREACHED*/
1991                 }
1992 
1993                 /* Search the entire vp list for pages >= off. */
1994                 err = pvn_vplist_dirty(vp, (u_offset_t)off, tmp_putapage,
1995                     flags, cr);
1996         } else {
1997                 u_offset_t eoff;
1998 
1999                 /*
2000                  * Loop over all offsets in the range [off...off + len]
2001                  * looking for pages to deal with.
2002                  */
2003                 eoff = MIN(off + len, tp->tn_size);
2004                 for (io_off = off; io_off < eoff; io_off += io_len) {
2005                         /*
2006                          * If we are not invalidating, synchronously
2007                          * freeing or writing pages use the routine
2008                          * page_lookup_nowait() to prevent reclaiming
2009                          * them from the free list.
2010                          */
2011                         if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
2012                                 pp = page_lookup(vp, io_off,
2013                                     (flags & (B_INVAL | B_FREE)) ?
2014                                     SE_EXCL : SE_SHARED);
2015                         } else {
2016                                 pp = page_lookup_nowait(vp, io_off,
2017                                     (flags & B_FREE) ? SE_EXCL : SE_SHARED);
2018                         }
2019 
2020                         if (pp == NULL || pvn_getdirty(pp, flags) == 0)
2021                                 io_len = PAGESIZE;
2022                         else {
2023                                 err = tmp_putapage(vp, pp, &io_off, &io_len,
2024                                     flags, cr);
2025                                 if (err != 0)
2026                                         break;
2027                         }
2028                 }
2029         }
2030         /* If invalidating, verify all pages on vnode list are gone. */
2031         if (err == 0 && off == 0 && len == 0 &&
2032             (flags & B_INVAL) && vn_has_cached_data(vp)) {
2033                 panic("tmp_putpage: B_INVAL, pages not gone");
2034                 /*NOTREACHED*/
2035         }
2036 out:
2037         if ((curproc == proc_pageout) || dolock)
2038                 rw_exit(&tp->tn_contents);
2039         /*
2040          * Only reason putapage is going to give us SE_NOSWAP as error
2041          * is when we ask a page to be written to physical backing store
2042          * and there is none. Ignore this because we might be dealing
2043          * with a swap page which does not have any backing store
2044          * on disk. In any other case we won't get this error over here.
2045          */
2046         if (err == SE_NOSWAP)
2047                 err = 0;
2048         return (err);
2049 }
2050 
2051 long tmp_putpagecnt, tmp_pagespushed;
2052 
2053 /*
2054  * Write out a single page.
2055  * For tmpfs this means choose a physical swap slot and write the page
2056  * out using VOP_PAGEIO. For performance, we attempt to kluster; i.e.,
2057  * we try to find a bunch of other dirty pages adjacent in the file
2058  * and a bunch of contiguous swap slots, and then write all the pages
2059  * out in a single i/o.
2060  */
2061 /*ARGSUSED*/
2062 static int
2063 tmp_putapage(
2064         struct vnode *vp,
2065         page_t *pp,
2066         u_offset_t *offp,
2067         size_t *lenp,
2068         int flags,
2069         struct cred *cr)
2070 {
2071         int err;
2072         ulong_t klstart, kllen;
2073         page_t *pplist, *npplist;
2074         extern int klustsize;
2075         long tmp_klustsize;
2076         struct tmpnode *tp;
2077         size_t pp_off, pp_len;
2078         u_offset_t io_off;
2079         size_t io_len;
2080         struct vnode *pvp;
2081         u_offset_t pstart;
2082         u_offset_t offset;
2083         u_offset_t tmpoff;
2084 
2085         ASSERT(PAGE_LOCKED(pp));
2086 
2087         /* Kluster in tmp_klustsize chunks */
2088         tp = VTOTN(vp);
2089         tmp_klustsize = klustsize;
2090         offset = pp->p_offset;
2091         klstart = (offset / tmp_klustsize) * tmp_klustsize;
2092         kllen = MIN(tmp_klustsize, tp->tn_size - klstart);
2093 
2094         /* Get a kluster of pages */
2095         pplist =
2096             pvn_write_kluster(vp, pp, &tmpoff, &pp_len, klstart, kllen, flags);
2097 
2098         pp_off = (size_t)tmpoff;
2099 
2100         /*
2101          * Get a cluster of physical offsets for the pages; the amount we
2102          * get may be some subrange of what we ask for (io_off, io_len).
2103          */
2104         io_off = pp_off;
2105         io_len = pp_len;
2106         err = swap_newphysname(vp, offset, &io_off, &io_len, &pvp, &pstart);
2107         ASSERT(err != SE_NOANON); /* anon slot must have been filled */
2108         if (err) {
2109                 pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2110                 /*
2111                  * If this routine is called as a result of segvn_sync
2112                  * operation and we have no physical swap then we can get an
2113                  * error here. In such case we would return SE_NOSWAP as error.
2114                  * At this point, we expect only SE_NOSWAP.
2115                  */
2116                 ASSERT(err == SE_NOSWAP);
2117                 if (flags & B_INVAL)
2118                         err = ENOMEM;
2119                 goto out;
2120         }
2121         ASSERT(pp_off <= io_off && io_off + io_len <= pp_off + pp_len);
2122         ASSERT(io_off <= offset && offset < io_off + io_len);
2123 
2124         /* Toss pages at front/rear that we couldn't get physical backing for */
2125         if (io_off != pp_off) {
2126                 npplist = NULL;
2127                 page_list_break(&pplist, &npplist, btop(io_off - pp_off));
2128                 ASSERT(pplist->p_offset == pp_off);
2129                 ASSERT(pplist->p_prev->p_offset == io_off - PAGESIZE);
2130                 pvn_write_done(pplist, B_ERROR | B_WRITE | flags);
2131                 pplist = npplist;
2132         }
2133         if (io_off + io_len < pp_off + pp_len) {
2134                 npplist = NULL;
2135                 page_list_break(&pplist, &npplist, btop(io_len));
2136                 ASSERT(npplist->p_offset == io_off + io_len);
2137                 ASSERT(npplist->p_prev->p_offset == pp_off + pp_len - PAGESIZE);
2138                 pvn_write_done(npplist, B_ERROR | B_WRITE | flags);
2139         }
2140 
2141         ASSERT(pplist->p_offset == io_off);
2142         ASSERT(pplist->p_prev->p_offset == io_off + io_len - PAGESIZE);
2143         ASSERT(btopr(io_len) <= btopr(kllen));
2144 
2145         /* Do i/o on the remaining kluster */
2146         err = VOP_PAGEIO(pvp, pplist, (u_offset_t)pstart, io_len,
2147             B_WRITE | flags, cr, NULL);
2148 
2149         if ((flags & B_ASYNC) == 0) {
2150                 pvn_write_done(pplist, ((err) ? B_ERROR : 0) | B_WRITE | flags);
2151         }
2152 out:
2153         if (!err) {
2154                 if (offp)
2155                         *offp = io_off;
2156                 if (lenp)
2157                         *lenp = io_len;
2158                 tmp_putpagecnt++;
2159                 tmp_pagespushed += btop(io_len);
2160         }
2161         if (err && err != ENOMEM && err != SE_NOSWAP)
2162                 cmn_err(CE_WARN, "tmp_putapage: err %d\n", err);
2163         return (err);
2164 }
2165 
2166 /* ARGSUSED */
2167 static int
2168 tmp_map(
2169         struct vnode *vp,
2170         offset_t off,
2171         struct as *as,
2172         caddr_t *addrp,
2173         size_t len,
2174         uchar_t prot,
2175         uchar_t maxprot,
2176         uint_t flags,
2177         struct cred *cred,
2178         caller_context_t *ct)
2179 {
2180         struct segvn_crargs vn_a;
2181         struct tmpnode *tp = (struct tmpnode *)VTOTN(vp);
2182         int error;
2183 
2184 #ifdef _ILP32
2185         if (len > MAXOFF_T)
2186                 return (ENOMEM);
2187 #endif
2188 
2189         if (vp->v_flag & VNOMAP)
2190                 return (ENOSYS);
2191 
2192         if (off < 0 || (offset_t)(off + len) < 0 ||
2193             off > MAXOFF_T || (off + len) > MAXOFF_T)
2194                 return (ENXIO);
2195 
2196         if (vp->v_type != VREG)
2197                 return (ENODEV);
2198 
2199         /*
2200          * Don't allow mapping to locked file
2201          */
2202         if (vn_has_mandatory_locks(vp, tp->tn_mode)) {
2203                 return (EAGAIN);
2204         }
2205 
2206         as_rangelock(as);
2207         error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
2208         if (error != 0) {
2209                 as_rangeunlock(as);
2210                 return (error);
2211         }
2212 
2213         vn_a.vp = vp;
2214         vn_a.offset = (u_offset_t)off;
2215         vn_a.type = flags & MAP_TYPE;
2216         vn_a.prot = prot;
2217         vn_a.maxprot = maxprot;
2218         vn_a.flags = flags & ~MAP_TYPE;
2219         vn_a.cred = cred;
2220         vn_a.amp = NULL;
2221         vn_a.szc = 0;
2222         vn_a.lgrp_mem_policy_flags = 0;
2223 
2224         error = as_map(as, *addrp, len, segvn_create, &vn_a);
2225         as_rangeunlock(as);
2226         return (error);
2227 }
2228 
2229 /*
2230  * tmp_addmap and tmp_delmap can't be called since the vp
2231  * maintained in the segvn mapping is NULL.
2232  */
2233 /* ARGSUSED */
2234 static int
2235 tmp_addmap(
2236         struct vnode *vp,
2237         offset_t off,
2238         struct as *as,
2239         caddr_t addr,
2240         size_t len,
2241         uchar_t prot,
2242         uchar_t maxprot,
2243         uint_t flags,
2244         struct cred *cred,
2245         caller_context_t *ct)
2246 {
2247         return (0);
2248 }
2249 
2250 /* ARGSUSED */
2251 static int
2252 tmp_delmap(
2253         struct vnode *vp,
2254         offset_t off,
2255         struct as *as,
2256         caddr_t addr,
2257         size_t len,
2258         uint_t prot,
2259         uint_t maxprot,
2260         uint_t flags,
2261         struct cred *cred,
2262         caller_context_t *ct)
2263 {
2264         return (0);
2265 }
2266 
2267 static int
2268 tmp_freesp(struct vnode *vp, struct flock64 *lp, int flag)
2269 {
2270         register int i;
2271         register struct tmpnode *tp = VTOTN(vp);
2272         int error;
2273 
2274         ASSERT(vp->v_type == VREG);
2275         ASSERT(lp->l_start >= 0);
2276 
2277         if (lp->l_len != 0)
2278                 return (EINVAL);
2279 
2280         rw_enter(&tp->tn_rwlock, RW_WRITER);
2281         if (tp->tn_size == lp->l_start) {
2282                 rw_exit(&tp->tn_rwlock);
2283                 return (0);
2284         }
2285 
2286         /*
2287          * Check for any mandatory locks on the range
2288          */
2289         if (MANDLOCK(vp, tp->tn_mode)) {
2290                 long save_start;
2291 
2292                 save_start = lp->l_start;
2293 
2294                 if (tp->tn_size < lp->l_start) {
2295                         /*
2296                          * "Truncate up" case: need to make sure there
2297                          * is no lock beyond current end-of-file. To
2298                          * do so, we need to set l_start to the size
2299                          * of the file temporarily.
2300                          */
2301                         lp->l_start = tp->tn_size;
2302                 }
2303                 lp->l_type = F_WRLCK;
2304                 lp->l_sysid = 0;
2305                 lp->l_pid = ttoproc(curthread)->p_pid;
2306                 i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
2307                 if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
2308                     lp->l_type != F_UNLCK) {
2309                         rw_exit(&tp->tn_rwlock);
2310                         return (i ? i : EAGAIN);
2311                 }
2312 
2313                 lp->l_start = save_start;
2314         }
2315         VFSTOTM(vp->v_vfsp);
2316 
2317         rw_enter(&tp->tn_contents, RW_WRITER);
2318         error = tmpnode_trunc((struct tmount *)VFSTOTM(vp->v_vfsp),
2319             tp, (ulong_t)lp->l_start);
2320         rw_exit(&tp->tn_contents);
2321         rw_exit(&tp->tn_rwlock);
2322         return (error);
2323 }
2324 
2325 /* ARGSUSED */
2326 static int
2327 tmp_space(
2328         struct vnode *vp,
2329         int cmd,
2330         struct flock64 *bfp,
2331         int flag,
2332         offset_t offset,
2333         cred_t *cred,
2334         caller_context_t *ct)
2335 {
2336         int error;
2337 
2338         if (cmd != F_FREESP)
2339                 return (EINVAL);
2340         if ((error = convoff(vp, bfp, 0, (offset_t)offset)) == 0) {
2341                 if ((bfp->l_start > MAXOFF_T) || (bfp->l_len > MAXOFF_T))
2342                         return (EFBIG);
2343                 error = tmp_freesp(vp, bfp, flag);
2344 
2345                 if (error == 0 && bfp->l_start == 0)
2346                         vnevent_truncate(vp, ct);
2347         }
2348         return (error);
2349 }
2350 
2351 /* ARGSUSED */
2352 static int
2353 tmp_seek(
2354         struct vnode *vp,
2355         offset_t ooff,
2356         offset_t *noffp,
2357         caller_context_t *ct)
2358 {
2359         return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
2360 }
2361 
2362 /* ARGSUSED2 */
2363 static int
2364 tmp_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2365 {
2366         struct tmpnode *tp = VTOTN(vp);
2367 
2368         if (write_lock) {
2369                 rw_enter(&tp->tn_rwlock, RW_WRITER);
2370         } else {
2371                 rw_enter(&tp->tn_rwlock, RW_READER);
2372         }
2373         return (write_lock);
2374 }
2375 
2376 /* ARGSUSED1 */
2377 static void
2378 tmp_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
2379 {
2380         struct tmpnode *tp = VTOTN(vp);
2381 
2382         rw_exit(&tp->tn_rwlock);
2383 }
2384 
2385 static int
2386 tmp_pathconf(
2387         struct vnode *vp,
2388         int cmd,
2389         ulong_t *valp,
2390         cred_t *cr,
2391         caller_context_t *ct)
2392 {
2393         struct tmpnode *tp = NULL;
2394         int error;
2395 
2396         switch (cmd) {
2397         case _PC_XATTR_EXISTS:
2398                 if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
2399                         *valp = 0;      /* assume no attributes */
2400                         error = 0;      /* okay to ask */
2401                         tp = VTOTN(vp);
2402                         rw_enter(&tp->tn_rwlock, RW_READER);
2403                         if (tp->tn_xattrdp) {
2404                                 rw_enter(&tp->tn_xattrdp->tn_rwlock, RW_READER);
2405                                 /* do not count "." and ".." */
2406                                 if (tp->tn_xattrdp->tn_dirents > 2)
2407                                         *valp = 1;
2408                                 rw_exit(&tp->tn_xattrdp->tn_rwlock);
2409                         }
2410                         rw_exit(&tp->tn_rwlock);
2411                 } else {
2412                         error = EINVAL;
2413                 }
2414                 break;
2415         case _PC_SATTR_ENABLED:
2416         case _PC_SATTR_EXISTS:
2417                 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2418                     (vp->v_type == VREG || vp->v_type == VDIR);
2419                 error = 0;
2420                 break;
2421         case _PC_TIMESTAMP_RESOLUTION:
2422                 /* nanosecond timestamp resolution */
2423                 *valp = 1L;
2424                 error = 0;
2425                 break;
2426         default:
2427                 error = fs_pathconf(vp, cmd, valp, cr, ct);
2428         }
2429         return (error);
2430 }
2431 
2432 
2433 struct vnodeops *tmp_vnodeops;
2434 
2435 const fs_operation_def_t tmp_vnodeops_template[] = {
2436         VOPNAME_OPEN,           { .vop_open = tmp_open },
2437         VOPNAME_CLOSE,          { .vop_close = tmp_close },
2438         VOPNAME_READ,           { .vop_read = tmp_read },
2439         VOPNAME_WRITE,          { .vop_write = tmp_write },
2440         VOPNAME_IOCTL,          { .vop_ioctl = tmp_ioctl },
2441         VOPNAME_GETATTR,        { .vop_getattr = tmp_getattr },
2442         VOPNAME_SETATTR,        { .vop_setattr = tmp_setattr },
2443         VOPNAME_ACCESS,         { .vop_access = tmp_access },
2444         VOPNAME_LOOKUP,         { .vop_lookup = tmp_lookup },
2445         VOPNAME_CREATE,         { .vop_create = tmp_create },
2446         VOPNAME_REMOVE,         { .vop_remove = tmp_remove },
2447         VOPNAME_LINK,           { .vop_link = tmp_link },
2448         VOPNAME_RENAME,         { .vop_rename = tmp_rename },
2449         VOPNAME_MKDIR,          { .vop_mkdir = tmp_mkdir },
2450         VOPNAME_RMDIR,          { .vop_rmdir = tmp_rmdir },
2451         VOPNAME_READDIR,        { .vop_readdir = tmp_readdir },
2452         VOPNAME_SYMLINK,        { .vop_symlink = tmp_symlink },
2453         VOPNAME_READLINK,       { .vop_readlink = tmp_readlink },
2454         VOPNAME_FSYNC,          { .vop_fsync = tmp_fsync },
2455         VOPNAME_INACTIVE,       { .vop_inactive = tmp_inactive },
2456         VOPNAME_FID,            { .vop_fid = tmp_fid },
2457         VOPNAME_RWLOCK,         { .vop_rwlock = tmp_rwlock },
2458         VOPNAME_RWUNLOCK,       { .vop_rwunlock = tmp_rwunlock },
2459         VOPNAME_SEEK,           { .vop_seek = tmp_seek },
2460         VOPNAME_SPACE,          { .vop_space = tmp_space },
2461         VOPNAME_GETPAGE,        { .vop_getpage = tmp_getpage },
2462         VOPNAME_PUTPAGE,        { .vop_putpage = tmp_putpage },
2463         VOPNAME_MAP,            { .vop_map = tmp_map },
2464         VOPNAME_ADDMAP,         { .vop_addmap = tmp_addmap },
2465         VOPNAME_DELMAP,         { .vop_delmap = tmp_delmap },
2466         VOPNAME_PATHCONF,       { .vop_pathconf = tmp_pathconf },
2467         VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
2468         NULL,                   NULL
2469 };