Print this page
*** NO COMMENTS ***

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_vnops.c
          +++ new/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_vnops.c
↓ open down ↓ 43 lines elided ↑ open up ↑
  44   44  #include <sys/uio.h>
  45   45  #include <sys/dirent.h>
  46   46  #include <sys/errno.h>
  47   47  #include <sys/sunddi.h>
  48   48  #include <sys/sysmacros.h>
  49   49  #include <sys/kmem.h>
  50   50  #include <sys/cmn_err.h>
  51   51  #include <sys/vfs_opreg.h>
  52   52  #include <sys/policy.h>
  53   53  
       54 +#include <sys/param.h>
       55 +#include <sys/vm.h>
       56 +#include <vm/seg_vn.h>
       57 +#include <vm/pvn.h>
       58 +#include <vm/as.h>
       59 +#include <vm/hat.h>
       60 +#include <vm/page.h>
       61 +#include <vm/seg.h>
       62 +#include <vm/seg_map.h>
       63 +#include <vm/seg_kmem.h>
       64 +#include <vm/seg_kpm.h>
       65 +
  54   66  #include <netsmb/smb_osdep.h>
  55   67  #include <netsmb/smb.h>
  56   68  #include <netsmb/smb_conn.h>
  57   69  #include <netsmb/smb_subr.h>
  58   70  
  59   71  #include <smbfs/smbfs.h>
  60   72  #include <smbfs/smbfs_node.h>
  61   73  #include <smbfs/smbfs_subr.h>
  62   74  
  63   75  #include <sys/fs/smbfs_ioctl.h>
↓ open down ↓ 104 lines elided ↑ open up ↑
 168  180                          cred_t *, caller_context_t *);
 169  181  static int      smbfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
 170  182                          caller_context_t *);
 171  183  static int      smbfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 172  184                          caller_context_t *);
 173  185  static int      smbfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
 174  186                          caller_context_t *);
 175  187  static int      smbfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
 176  188                          caller_context_t *);
 177  189  
      190 +static int uio_page_mapin(uio_t *uiop, page_t *pp);
      191 +
      192 +static void uio_page_mapout(uio_t *uiop, page_t *pp);
      193 +
      194 +static int smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
      195 +        size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
      196 +        caller_context_t *ct);
      197 +
      198 +static int smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
      199 +        size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
      200 +        caller_context_t *ct);
      201 +
      202 +static int smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
      203 +        size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
      204 +        caller_context_t *ct);
      205 +
      206 +static int smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags,
      207 +        cred_t *cr, caller_context_t *ct);
      208 +
      209 +static int smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
      210 +        int flags, cred_t *cr);
      211 +
      212 +static int smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
      213 +        page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
      214 +        enum seg_rw rw, cred_t *cr, caller_context_t *ct);
      215 +
      216 +static int smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len,
      217 +        uint_t *protp, page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
      218 +        enum seg_rw rw, cred_t *cr);
      219 +
      220 +
      221 +
 178  222  /* Dummy function to use until correct function is ported in */
 179  223  int noop_vnodeop() {
 180  224          return (0);
 181  225  }
 182  226  
 183  227  struct vnodeops *smbfs_vnodeops = NULL;
 184  228  
 185  229  /*
 186  230   * Most unimplemented ops will return ENOSYS because of fs_nosys().
 187  231   * The only ops where that won't work are ACCESS (due to open(2)
↓ open down ↓ 20 lines elided ↑ open up ↑
 208  252          { VOPNAME_READLINK,     { .error = fs_nosys } }, /* smbfs_readlink, */
 209  253          { VOPNAME_FSYNC,        { .vop_fsync = smbfs_fsync } },
 210  254          { VOPNAME_INACTIVE,     { .vop_inactive = smbfs_inactive } },
 211  255          { VOPNAME_FID,          { .error = fs_nosys } }, /* smbfs_fid, */
 212  256          { VOPNAME_RWLOCK,       { .vop_rwlock = smbfs_rwlock } },
 213  257          { VOPNAME_RWUNLOCK,     { .vop_rwunlock = smbfs_rwunlock } },
 214  258          { VOPNAME_SEEK,         { .vop_seek = smbfs_seek } },
 215  259          { VOPNAME_FRLOCK,       { .vop_frlock = smbfs_frlock } },
 216  260          { VOPNAME_SPACE,        { .vop_space = smbfs_space } },
 217  261          { VOPNAME_REALVP,       { .error = fs_nosys } }, /* smbfs_realvp, */
 218      -        { VOPNAME_GETPAGE,      { .error = fs_nosys } }, /* smbfs_getpage, */
 219      -        { VOPNAME_PUTPAGE,      { .error = fs_nosys } }, /* smbfs_putpage, */
 220      -        { VOPNAME_MAP,          { .error = fs_nosys } }, /* smbfs_map, */
 221      -        { VOPNAME_ADDMAP,       { .error = fs_nosys } }, /* smbfs_addmap, */
 222      -        { VOPNAME_DELMAP,       { .error = fs_nosys } }, /* smbfs_delmap, */
      262 +        { VOPNAME_GETPAGE,      { .vop_getpage = smbfs_getpage } }, /* smbfs_getpage, */
      263 +        { VOPNAME_PUTPAGE,      { .vop_putpage = smbfs_putpage } }, /* smbfs_putpage, */
      264 +        { VOPNAME_MAP,          { .vop_map = smbfs_map } }, /* smbfs_map, */
      265 +        { VOPNAME_ADDMAP,       { .vop_addmap = smbfs_addmap } }, /* smbfs_addmap, */
      266 +        { VOPNAME_DELMAP,       { .vop_delmap = smbfs_delmap } }, /* smbfs_delmap, */
      267 +        { VOPNAME_DISPOSE,      { .vop_dispose = fs_dispose}},
 223  268          { VOPNAME_DUMP,         { .error = fs_nosys } }, /* smbfs_dump, */
 224  269          { VOPNAME_PATHCONF,     { .vop_pathconf = smbfs_pathconf } },
 225  270          { VOPNAME_PAGEIO,       { .error = fs_nosys } }, /* smbfs_pageio, */
 226  271          { VOPNAME_SETSECATTR,   { .vop_setsecattr = smbfs_setsecattr } },
 227  272          { VOPNAME_GETSECATTR,   { .vop_getsecattr = smbfs_getsecattr } },
 228  273          { VOPNAME_SHRLOCK,      { .vop_shrlock = smbfs_shrlock } },
 229  274          { NULL, NULL }
 230  275  };
 231  276  
 232  277  /*
↓ open down ↓ 246 lines elided ↑ open up ↑
 479  524          /*
 480  525           * Decrement the reference count for the FID
 481  526           * and possibly do the OtW close.
 482  527           *
 483  528           * Exclusive lock for modifying n_fid stuff.
 484  529           * Don't want this one ever interruptible.
 485  530           */
 486  531          (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
 487  532          smb_credinit(&scred, cr);
 488  533  
 489      -        smbfs_rele_fid(np, &scred);
      534 +        /*
      535 +         * If FID ref. count is 1 and count of mmaped pages isn't 0,
      536 +         * we won't call smbfs_rele_fid(), because it will result in the otW close.
      537 +         * The count of mapped pages isn't 0, which means the mapped pages
      538 +         * possibly will be accessed after close(), we should keep the FID valid,
      539 +         * i.e., dont do the otW close.
      540 +         * Dont worry that FID will be leaked, because when the
      541 +         * vnode's count becomes 0, smbfs_inactive() will
      542 +         * help us release FID and eventually do the otW close.
      543 +         */
      544 +        if (np->n_fidrefs > 1) {
      545 +                smbfs_rele_fid(np, &scred);
      546 +        } else if (np->r_mapcnt == 0) {
      547 +                /*
      548 +                 * Before otW close, make sure dirty pages written back.
      549 +                 */
      550 +                if ((flag & FWRITE) && vn_has_cached_data(vp)) {
      551 +                        /* smbfs_putapage() will acquire shared lock, so release
      552 +                         * exclusive lock temporally.
      553 +                         */
      554 +                        smbfs_rw_exit(&np->r_lkserlock);
      555 +
      556 +                        (void) smbfs_putpage(vp, (offset_t) 0, 0, B_INVAL | B_ASYNC, cr, ct);
      557 +
      558 +                        /* acquire exclusive lock again. */
      559 +                        (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
      560 +                }
      561 +                smbfs_rele_fid(np, &scred);
      562 +        }
 490  563  
 491  564          smb_credrele(&scred);
 492  565          smbfs_rw_exit(&np->r_lkserlock);
 493  566  
 494  567          return (0);
 495  568  }
 496  569  
 497  570  /*
 498  571   * Helper for smbfs_close.  Decrement the reference count
 499  572   * for an SMB-level file or directory ID, and when the last
↓ open down ↓ 858 lines elided ↑ open up ↑
1358 1431                  /* Force last close. */
1359 1432                  np->n_dirrefs = 1;
1360 1433                  smbfs_rele_fid(np, &scred);
1361 1434                  break;
1362 1435  
1363 1436          case VREG:
1364 1437                  if (np->n_fidrefs == 0)
1365 1438                          break;
1366 1439                  SMBVDEBUG("open file: refs %d id 0x%x path %s\n",
1367 1440                      np->n_fidrefs, np->n_fid, np->n_rpath);
     1441 +                /*
     1442 +                 * Before otW close, make sure dirty pages written back.
     1443 +                 */
     1444 +                if (vn_has_cached_data(vp)) {
     1445 +                        /* smbfs_putapage() will acquire shared lock, so release
     1446 +                         * exclusive lock temporally.
     1447 +                         */
     1448 +                        smbfs_rw_exit(&np->r_lkserlock);
     1449 +
     1450 +                        (void) smbfs_putpage(vp, (offset_t) 0, 0, B_INVAL | B_ASYNC, cr, ct);
     1451 +
     1452 +                        /* acquire exclusive lock again. */
     1453 +                        (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
     1454 +                }
1368 1455                  /* Force last close. */
1369 1456                  np->n_fidrefs = 1;
1370 1457                  smbfs_rele_fid(np, &scred);
1371 1458                  break;
1372 1459  
1373 1460          default:
1374 1461                  SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
1375 1462                  np->n_ovtype = VNON;
1376 1463                  break;
1377 1464          }
↓ open down ↓ 1725 lines elided ↑ open up ↑
3103 3190          caller_context_t *ct)
3104 3191  {
3105 3192          if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
3106 3193                  return (EIO);
3107 3194  
3108 3195          if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
3109 3196                  return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
3110 3197          else
3111 3198                  return (ENOSYS);
3112 3199  }
     3200 +
     3201 +static int uio_page_mapin(uio_t *uiop, page_t *pp) {
     3202 +    u_offset_t off;
     3203 +    size_t size;
     3204 +    pgcnt_t npages;
     3205 +    caddr_t kaddr;
     3206 +    pfn_t pfnum;
     3207 +
     3208 +    off = (uintptr_t) uiop->uio_loffset & PAGEOFFSET;
     3209 +    size = P2ROUNDUP(uiop->uio_resid + off, PAGESIZE);
     3210 +    npages = btop(size);
     3211 +
     3212 +    ASSERT(pp != NULL);
     3213 +
     3214 +    if (npages == 1 && kpm_enable) {
     3215 +        kaddr = hat_kpm_mapin(pp, NULL);
     3216 +        if (kaddr == NULL)
     3217 +            return (EFAULT);
     3218 +
     3219 +        uiop->uio_iov->iov_base = kaddr + off;
     3220 +        uiop->uio_iov->iov_len = PAGESIZE - off;
     3221 +
     3222 +    } else {
     3223 +        kaddr = vmem_xalloc(heap_arena, size, PAGESIZE, 0, 0, NULL, NULL, VM_SLEEP);
     3224 +        if (kaddr == NULL)
     3225 +            return (EFAULT);
     3226 +
     3227 +        uiop->uio_iov->iov_base = kaddr + off;
     3228 +        uiop->uio_iov->iov_len = size - off;
     3229 +
     3230 +        /*map pages into kaddr*/
     3231 +        uint_t attr = PROT_READ | PROT_WRITE | HAT_NOSYNC;
     3232 +        while (npages-- > 0) {
     3233 +            pfnum = pp->p_pagenum;
     3234 +            pp = pp->p_next;
     3235 +
     3236 +            hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum, attr, HAT_LOAD_LOCK);
     3237 +            kaddr += PAGESIZE;
     3238 +        }
     3239 +    }
     3240 +    return (0);
     3241 +}
     3242 +
     3243 +static void uio_page_mapout(uio_t *uiop, page_t *pp) {
     3244 +    u_offset_t off;
     3245 +    size_t size;
     3246 +    pgcnt_t npages;
     3247 +    caddr_t kaddr;
     3248 +
     3249 +    kaddr = uiop->uio_iov->iov_base;
     3250 +    off = (uintptr_t) kaddr & PAGEOFFSET;
     3251 +    size = P2ROUNDUP(uiop->uio_iov->iov_len + off, PAGESIZE);
     3252 +    npages = btop(size);
     3253 +
     3254 +    ASSERT(pp != NULL);
     3255 +
     3256 +    kaddr = (caddr_t) ((uintptr_t) kaddr & MMU_PAGEMASK);
     3257 +
     3258 +    if (npages == 1 && kpm_enable) {
     3259 +        hat_kpm_mapout(pp, NULL, kaddr);
     3260 +
     3261 +    } else {
     3262 +        hat_unload(kas.a_hat, (void*) kaddr, size,
     3263 +                HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
     3264 +        vmem_free(heap_arena, (void*) kaddr, size);
     3265 +    }
     3266 +    uiop->uio_iov->iov_base = 0;
     3267 +    uiop->uio_iov->iov_len = 0;
     3268 +}
     3269 +
     3270 +static int smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
     3271 +        size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
     3272 +        caller_context_t *ct) {
     3273 +    smbnode_t *np;
     3274 +    smbmntinfo_t *smi;
     3275 +    struct vattr va;
     3276 +    segvn_crargs_t vn_a;
     3277 +    int error;
     3278 +
     3279 +    np = VTOSMB(vp);
     3280 +    smi = VTOSMI(vp);
     3281 +
     3282 +    if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
     3283 +        return (EIO);
     3284 +
     3285 +    if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
     3286 +        return (EIO);
     3287 +
     3288 +    if (vp->v_flag & VNOMAP || vp->v_flag & VNOCACHE)
     3289 +        return (EAGAIN);
     3290 +
     3291 +    if (vp->v_type != VREG)
     3292 +        return (ENODEV);
     3293 +
     3294 +    va.va_mask = AT_ALL;
     3295 +    if (error = smbfsgetattr(vp, &va, cr))
     3296 +        return (error);
     3297 +
     3298 +    if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
     3299 +        return (EINTR);
     3300 +
     3301 +    if (MANDLOCK(vp, va.va_mode)) {
     3302 +        error = EAGAIN;
     3303 +        goto out;
     3304 +    }
     3305 +
     3306 +    as_rangelock(as);
     3307 +    error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
     3308 +
     3309 +    if (error != 0) {
     3310 +        as_rangeunlock(as);
     3311 +        goto out;
     3312 +    }
     3313 +
     3314 +    vn_a.vp = vp;
     3315 +    vn_a.offset = off;
     3316 +    vn_a.type = flags & MAP_TYPE;
     3317 +    vn_a.prot = prot;
     3318 +    vn_a.maxprot = maxprot;
     3319 +    vn_a.flags = flags & ~MAP_TYPE;
     3320 +    vn_a.cred = cr;
     3321 +    vn_a.amp = NULL;
     3322 +    vn_a.szc = 0;
     3323 +    vn_a.lgrp_mem_policy_flags = 0;
     3324 +
     3325 +    error = as_map(as, *addrp, len, segvn_create, &vn_a);
     3326 +
     3327 +    as_rangeunlock(as);
     3328 +
     3329 +out:
     3330 +    smbfs_rw_exit(&np->r_lkserlock);
     3331 +
     3332 +    return (error);
     3333 +}
     3334 +
     3335 +static int smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
     3336 +        size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
     3337 +        caller_context_t *ct) {
     3338 +    atomic_add_long((ulong_t *) & VTOSMB(vp)->r_mapcnt, btopr(len));
     3339 +    return (0);
     3340 +}
     3341 +
     3342 +static int smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
     3343 +        size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
     3344 +        caller_context_t *ct) {
     3345 +
     3346 +    smbnode_t *np;
     3347 +
     3348 +    atomic_add_long((ulong_t *) & VTOSMB(vp)->r_mapcnt, -btopr(len));
     3349 +
     3350 +    /* mark RDIRTY here, will be used to check if a file is dirty when unmount smbfs */
     3351 +    if (vn_has_cached_data(vp) && !vn_is_readonly(vp) && maxprot & PROT_WRITE && flags == MAP_SHARED) {
     3352 +        np = VTOSMB(vp);
     3353 +        mutex_enter(&np->r_statelock);
     3354 +        np->r_flags |= RDIRTY;
     3355 +        mutex_exit(&np->r_statelock);
     3356 +    }
     3357 +    return (0);
     3358 +}
     3359 +
     3360 +static int smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags,
     3361 +        cred_t *cr, caller_context_t *ct) {
     3362 +
     3363 +    smbnode_t *np;
     3364 +    size_t io_len;
     3365 +    u_offset_t io_off;
     3366 +    u_offset_t eoff;
     3367 +    int error = 0;
     3368 +    page_t *pp;
     3369 +
     3370 +    np = VTOSMB(vp);
     3371 +
     3372 +    if (len == 0) {
     3373 +        /* will flush the file, so clear RDIRTY */
     3374 +        if (off == (u_offset_t) 0 && (np->r_flags & RDIRTY)) {
     3375 +            mutex_enter(&np->r_statelock);
     3376 +            np->r_flags &= ~RDIRTY;
     3377 +            mutex_exit(&np->r_statelock);
     3378 +        }
     3379 +
     3380 +        error = pvn_vplist_dirty(vp, off, smbfs_putapage, flags, cr);
     3381 +    } else {
     3382 +
     3383 +        eoff = off + len;
     3384 +
     3385 +        mutex_enter(&np->r_statelock);
     3386 +        if (eoff > np->r_size)
     3387 +            eoff = np->r_size;
     3388 +        mutex_exit(&np->r_statelock);
     3389 +
     3390 +        for (io_off = off; io_off < eoff; io_off += io_len) {
     3391 +            if ((flags & B_INVAL) || (flags & B_ASYNC) == 0) {
     3392 +                pp = page_lookup(vp, io_off,
     3393 +                        (flags & (B_INVAL | B_FREE) ? SE_EXCL : SE_SHARED));
     3394 +            } else {
     3395 +                pp = page_lookup_nowait(vp, io_off,
     3396 +                        (flags & B_FREE) ? SE_EXCL : SE_SHARED);
     3397 +            }
     3398 +
     3399 +            if (pp == NULL || !pvn_getdirty(pp, flags))
     3400 +                io_len = PAGESIZE;
     3401 +            else {
     3402 +                error = smbfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
     3403 +            }
     3404 +        }
     3405 +
     3406 +    }
     3407 +
     3408 +    return (error);
     3409 +}
     3410 +
     3411 +static int smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
     3412 +        int flags, cred_t *cr) {
     3413 +
     3414 +    struct smb_cred scred;
     3415 +    smbnode_t *np;
     3416 +    smbmntinfo_t *smi;
     3417 +    smb_share_t *ssp;
     3418 +    uio_t uio;
     3419 +    iovec_t uiov, uiov_bak;
     3420 +
     3421 +    size_t io_len;
     3422 +    u_offset_t io_off;
     3423 +    size_t bsize;
     3424 +    size_t blksize;
     3425 +    u_offset_t blkoff;
     3426 +    int error;
     3427 +
     3428 +    np = VTOSMB(vp);
     3429 +    smi = VTOSMI(vp);
     3430 +    ssp = smi->smi_share;
     3431 +
     3432 +    /*do block io, get a kluster of dirty pages in a block.*/
     3433 +    bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
     3434 +    blkoff = pp->p_offset / bsize;
     3435 +    blkoff *= bsize;
     3436 +    blksize = roundup(bsize, PAGESIZE);
     3437 +
     3438 +    pp = pvn_write_kluster(vp, pp, &io_off, &io_len, blkoff, blksize, flags);
     3439 +
     3440 +    ASSERT(pp->p_offset >= blkoff);
     3441 +
     3442 +    if (io_off + io_len > blkoff + blksize) {
     3443 +        ASSERT((io_off + io_len)-(blkoff + blksize) < PAGESIZE);
     3444 +        io_len = blkoff + blksize - io_off;
     3445 +    }
     3446 +
     3447 +    /*currently, don't allow put pages beyond EOF, unless smbfs_read/smbfs_write
     3448 +     *can do io through segkpm or vpm.*/
     3449 +    mutex_enter(&np->r_statelock);
     3450 +    if (io_off >= np->r_size) {
     3451 +        mutex_exit(&np->r_statelock);
     3452 +        error = 0;
     3453 +        goto out;
     3454 +    } else if (io_off + io_len > np->r_size) {
     3455 +        int npages = btopr(np->r_size - io_off);
     3456 +        page_t *trunc;
     3457 +        page_list_break(&pp, &trunc, npages);
     3458 +        if (trunc)
     3459 +            pvn_write_done(trunc, flags);
     3460 +        io_len = np->r_size - io_off;
     3461 +    }
     3462 +    mutex_exit(&np->r_statelock);
     3463 +
     3464 +    if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
     3465 +        return (EINTR);
     3466 +    smb_credinit(&scred, cr);
     3467 +
     3468 +    if (np->n_vcgenid != ssp->ss_vcgenid)
     3469 +        error = ESTALE;
     3470 +    else {
     3471 +        /*just use uio instead of buf, since smb_rwuio need uio.*/
     3472 +        uiov.iov_base = 0;
     3473 +        uiov.iov_len = 0;
     3474 +        uio.uio_iov = &uiov;
     3475 +        uio.uio_iovcnt = 1;
     3476 +        uio.uio_loffset = io_off;
     3477 +        uio.uio_resid = io_len;
     3478 +        uio.uio_segflg = UIO_SYSSPACE;
     3479 +        uio.uio_llimit = MAXOFFSET_T;
     3480 +        /*map pages into kernel address space, and setup uio.*/
     3481 +        error = uio_page_mapin(&uio, pp);
     3482 +        if (error == 0) {
     3483 +            uiov_bak.iov_base = uiov.iov_base;
     3484 +            uiov_bak.iov_len = uiov.iov_len;
     3485 +            error = smb_rwuio(ssp, np->n_fid, UIO_WRITE, &uio, &scred, smb_timo_write);
     3486 +            if (error == 0) {
     3487 +                mutex_enter(&np->r_statelock);
     3488 +                np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
     3489 +                mutex_exit(&np->r_statelock);
     3490 +                (void) smbfs_smb_flush(np, &scred);
     3491 +            }
     3492 +            /*unmap pages from kernel address space.*/
     3493 +            uio.uio_iov = &uiov_bak;
     3494 +            uio_page_mapout(&uio, pp);
     3495 +        }
     3496 +    }
     3497 +
     3498 +    smb_credrele(&scred);
     3499 +    smbfs_rw_exit(&np->r_lkserlock);
     3500 +
     3501 +out:
     3502 +    pvn_write_done(pp, ((error) ? B_ERROR : 0) | B_WRITE | flags);
     3503 +
     3504 +    if (offp)
     3505 +        *offp = io_off;
     3506 +    if (lenp)
     3507 +        *lenp = io_len;
     3508 +
     3509 +    return (error);
     3510 +}
     3511 +
     3512 +static int smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
     3513 +        page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
     3514 +        enum seg_rw rw, cred_t *cr, caller_context_t *ct) {
     3515 +
     3516 +    int error;
     3517 +
     3518 +    /*these pages have all protections.*/
     3519 +    if (protp)
     3520 +        *protp = PROT_ALL;
     3521 +
     3522 +    if (len <= PAGESIZE) {
     3523 +        error = smbfs_getapage(vp, off, len, protp, pl, plsz, seg, addr, rw,
     3524 +                cr);
     3525 +    } else {
     3526 +        error = pvn_getpages(smbfs_getapage, vp, off, len, protp, pl, plsz, seg,
     3527 +                addr, rw, cr);
     3528 +    }
     3529 +
     3530 +    return (error);
     3531 +}
     3532 +
     3533 +static int smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len,
     3534 +        uint_t *protp, page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
     3535 +        enum seg_rw rw, cred_t *cr) {
     3536 +
     3537 +    smbnode_t *np;
     3538 +    smbmntinfo_t *smi;
     3539 +    smb_share_t *ssp;
     3540 +    smb_cred_t scred;
     3541 +
     3542 +    page_t *pp;
     3543 +    uio_t uio;
     3544 +    iovec_t uiov, uiov_bak;
     3545 +
     3546 +    u_offset_t blkoff;
     3547 +    size_t bsize;
     3548 +    size_t blksize;
     3549 +
     3550 +    u_offset_t io_off;
     3551 +    size_t io_len;
     3552 +    size_t pages_len;
     3553 +
     3554 +    int error = 0;
     3555 +
     3556 +    np = VTOSMB(vp);
     3557 +    smi = VTOSMI(vp);
     3558 +    ssp = smi->smi_share;
     3559 +
     3560 +    /*if pl is null,it's meaningless*/
     3561 +    if (pl == NULL)
     3562 +        return (EFAULT);
     3563 +
     3564 +again:
     3565 +    if (page_exists(vp, off) == NULL) {
     3566 +        if (rw == S_CREATE) {
     3567 +            /*just return a empty page if asked to create.*/
     3568 +            if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT | PG_EXCL, seg, addr)) == NULL)
     3569 +                goto again;
     3570 +            pages_len = PAGESIZE;
     3571 +        } else {
     3572 +
     3573 +            /*do block io, get a kluster of non-exist pages in a block.*/
     3574 +            bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
     3575 +            blkoff = off / bsize;
     3576 +            blkoff *= bsize;
     3577 +            blksize = roundup(bsize, PAGESIZE);
     3578 +
     3579 +            pp = pvn_read_kluster(vp, off, seg, addr, &io_off, &io_len, blkoff, blksize, 0);
     3580 +
     3581 +            if (pp == NULL)
     3582 +                goto again;
     3583 +
     3584 +            pages_len = io_len;
     3585 +
     3586 +            /*currently, don't allow get pages beyond EOF, unless smbfs_read/smbfs_write
     3587 +             *can do io through segkpm or vpm.*/
     3588 +            mutex_enter(&np->r_statelock);
     3589 +            if (io_off >= np->r_size) {
     3590 +                mutex_exit(&np->r_statelock);
     3591 +                error = 0;
     3592 +                goto out;
     3593 +            } else if (io_off + io_len > np->r_size) {
     3594 +                int npages = btopr(np->r_size - io_off);
     3595 +                page_t *trunc;
     3596 +
     3597 +                page_list_break(&pp, &trunc, npages);
     3598 +                if (trunc)
     3599 +                    pvn_read_done(trunc, 0);
     3600 +                io_len = np->r_size - io_off;
     3601 +            }
     3602 +            mutex_exit(&np->r_statelock);
     3603 +
     3604 +            if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
     3605 +                return EINTR;
     3606 +            smb_credinit(&scred, cr);
     3607 +
     3608 +            /*just use uio instead of buf, since smb_rwuio need uio.*/
     3609 +            uiov.iov_base = 0;
     3610 +            uiov.iov_len = 0;
     3611 +            uio.uio_iov = &uiov;
     3612 +            uio.uio_iovcnt = 1;
     3613 +            uio.uio_loffset = io_off;
     3614 +            uio.uio_resid = io_len;
     3615 +            uio.uio_segflg = UIO_SYSSPACE;
     3616 +            uio.uio_llimit = MAXOFFSET_T;
     3617 +
     3618 +            /*map pages into kernel address space, and setup uio.*/
     3619 +            error = uio_page_mapin(&uio, pp);
     3620 +            if (error == 0) {
     3621 +                uiov_bak.iov_base = uiov.iov_base;
     3622 +                uiov_bak.iov_len = uiov.iov_len;
     3623 +                error = smb_rwuio(ssp, np->n_fid, UIO_READ, &uio, &scred, smb_timo_read);
     3624 +                /*unmap pages from kernel address space.*/
     3625 +                uio.uio_iov = &uiov_bak;
     3626 +                uio_page_mapout(&uio, pp);
     3627 +            }
     3628 +
     3629 +            smb_credrele(&scred);
     3630 +            smbfs_rw_exit(&np->r_lkserlock);
     3631 +        }
     3632 +    } else {
     3633 +        se_t se = rw == S_CREATE ? SE_EXCL : SE_SHARED;
     3634 +        if ((pp = page_lookup(vp, off, se)) == NULL) {
     3635 +            goto again;
     3636 +        }
     3637 +    }
     3638 +
     3639 +out:
     3640 +    if (pp) {
     3641 +        if (error) {
     3642 +            pvn_read_done(pp, B_ERROR);
     3643 +        } else {
     3644 +            /*init page list, unlock pages.*/
     3645 +            pvn_plist_init(pp, pl, plsz, off, pages_len, rw);
     3646 +        }
     3647 +    }
     3648 +
     3649 +    return (error);
     3650 +}
     3651 +
     3652 +
     3653 +void smbfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr) {
     3654 +
     3655 +    smbnode_t *np;
     3656 +
     3657 +    np = VTOSMB(vp);
     3658 +    /* will flush the file, so clear RDIRTY */
     3659 +    if (off == (u_offset_t) 0 && (np->r_flags & RDIRTY)) {
     3660 +        mutex_enter(&np->r_statelock);
     3661 +        np->r_flags &= ~RDIRTY;
     3662 +        mutex_exit(&np->r_statelock);
     3663 +    }
     3664 +
     3665 +    (void) pvn_vplist_dirty(vp, off, smbfs_putapage, B_INVAL | B_TRUNC, cr);
     3666 +}
     3667 +
     3668 +
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX