158 static int smbfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
159 caller_context_t *, int);
160 static int smbfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
161 caller_context_t *, int);
162 static int smbfs_rwlock(vnode_t *, int, caller_context_t *);
163 static void smbfs_rwunlock(vnode_t *, int, caller_context_t *);
164 static int smbfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
165 static int smbfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
166 struct flk_callback *, cred_t *, caller_context_t *);
167 static int smbfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
168 cred_t *, caller_context_t *);
169 static int smbfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
170 caller_context_t *);
171 static int smbfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
172 caller_context_t *);
173 static int smbfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
174 caller_context_t *);
175 static int smbfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
176 caller_context_t *);
177
178 /* Dummy function to use until correct function is ported in */
179 int noop_vnodeop() {
180 return (0);
181 }
182
183 struct vnodeops *smbfs_vnodeops = NULL;
184
185 /*
186 * Most unimplemented ops will return ENOSYS because of fs_nosys().
187 * The only ops where that won't work are ACCESS (due to open(2)
188 * failures) and ... (anything else left?)
189 */
190 const fs_operation_def_t smbfs_vnodeops_template[] = {
191 { VOPNAME_OPEN, { .vop_open = smbfs_open } },
192 { VOPNAME_CLOSE, { .vop_close = smbfs_close } },
193 { VOPNAME_READ, { .vop_read = smbfs_read } },
194 { VOPNAME_WRITE, { .vop_write = smbfs_write } },
195 { VOPNAME_IOCTL, { .vop_ioctl = smbfs_ioctl } },
196 { VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr } },
197 { VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr } },
198 { VOPNAME_ACCESS, { .vop_access = smbfs_access } },
199 { VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup } },
200 { VOPNAME_CREATE, { .vop_create = smbfs_create } },
201 { VOPNAME_REMOVE, { .vop_remove = smbfs_remove } },
202 { VOPNAME_LINK, { .error = fs_nosys } }, /* smbfs_link, */
203 { VOPNAME_RENAME, { .vop_rename = smbfs_rename } },
204 { VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir } },
205 { VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir } },
206 { VOPNAME_READDIR, { .vop_readdir = smbfs_readdir } },
207 { VOPNAME_SYMLINK, { .error = fs_nosys } }, /* smbfs_symlink, */
208 { VOPNAME_READLINK, { .error = fs_nosys } }, /* smbfs_readlink, */
209 { VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync } },
210 { VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive } },
211 { VOPNAME_FID, { .error = fs_nosys } }, /* smbfs_fid, */
212 { VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock } },
213 { VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock } },
214 { VOPNAME_SEEK, { .vop_seek = smbfs_seek } },
215 { VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock } },
216 { VOPNAME_SPACE, { .vop_space = smbfs_space } },
217 { VOPNAME_REALVP, { .error = fs_nosys } }, /* smbfs_realvp, */
218 { VOPNAME_GETPAGE, { .error = fs_nosys } }, /* smbfs_getpage, */
219 { VOPNAME_PUTPAGE, { .error = fs_nosys } }, /* smbfs_putpage, */
220 { VOPNAME_MAP, { .error = fs_nosys } }, /* smbfs_map, */
221 { VOPNAME_ADDMAP, { .error = fs_nosys } }, /* smbfs_addmap, */
222 { VOPNAME_DELMAP, { .error = fs_nosys } }, /* smbfs_delmap, */
223 { VOPNAME_DUMP, { .error = fs_nosys } }, /* smbfs_dump, */
224 { VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf } },
225 { VOPNAME_PAGEIO, { .error = fs_nosys } }, /* smbfs_pageio, */
226 { VOPNAME_SETSECATTR, { .vop_setsecattr = smbfs_setsecattr } },
227 { VOPNAME_GETSECATTR, { .vop_getsecattr = smbfs_getsecattr } },
228 { VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock } },
229 { NULL, NULL }
230 };
231
232 /*
233 * XXX
234 * When new and relevant functionality is enabled, we should be
235 * calling vfs_set_feature() to inform callers that pieces of
236 * functionality are available, per PSARC 2007/227.
237 */
238 /* ARGSUSED */
239 static int
240 smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
241 {
242 smbnode_t *np;
3093 return (error);
3094 }
3095
3096
3097 /*
3098 * XXX
3099 * This op should eventually support PSARC 2007/268.
3100 */
3101 static int
3102 smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
3103 caller_context_t *ct)
3104 {
3105 if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
3106 return (EIO);
3107
3108 if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
3109 return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
3110 else
3111 return (ENOSYS);
3112 }
|
158 static int smbfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
159 caller_context_t *, int);
160 static int smbfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
161 caller_context_t *, int);
162 static int smbfs_rwlock(vnode_t *, int, caller_context_t *);
163 static void smbfs_rwunlock(vnode_t *, int, caller_context_t *);
164 static int smbfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
165 static int smbfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
166 struct flk_callback *, cred_t *, caller_context_t *);
167 static int smbfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
168 cred_t *, caller_context_t *);
169 static int smbfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
170 caller_context_t *);
171 static int smbfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
172 caller_context_t *);
173 static int smbfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
174 caller_context_t *);
175 static int smbfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
176 caller_context_t *);
177
178 static int smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
179 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
180 caller_context_t *ct);
181
182 static int smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
183 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
184 caller_context_t *ct);
185
186 static int smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
187 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
188 caller_context_t *ct);
189
190 static int smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags,
191 cred_t *cr, caller_context_t *ct);
192
193 static int smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
194 int flags, cred_t *cr);
195
196 static int up_mapin(uio_t *uiop, page_t *pp);
197
198 static int up_mapout(uio_t *uiop, page_t *pp);
199
200 static int smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
201 enum seg_rw rw, cred_t *cr, caller_context_t *ct);
202
203 static int smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len,
204 uint_t *protp, page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, enum seg_rw rw, cred_t *cr);
205
206
207
208 /* Dummy function to use until correct function is ported in */
209 int noop_vnodeop() {
210 return (0);
211 }
212
213 struct vnodeops *smbfs_vnodeops = NULL;
214
215 /*
216 * Most unimplemented ops will return ENOSYS because of fs_nosys().
217 * The only ops where that won't work are ACCESS (due to open(2)
218 * failures) and ... (anything else left?)
219 */
220 const fs_operation_def_t smbfs_vnodeops_template[] = {
221 { VOPNAME_OPEN, { .vop_open = smbfs_open } },
222 { VOPNAME_CLOSE, { .vop_close = smbfs_close } },
223 { VOPNAME_READ, { .vop_read = smbfs_read } },
224 { VOPNAME_WRITE, { .vop_write = smbfs_write } },
225 { VOPNAME_IOCTL, { .vop_ioctl = smbfs_ioctl } },
226 { VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr } },
227 { VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr } },
228 { VOPNAME_ACCESS, { .vop_access = smbfs_access } },
229 { VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup } },
230 { VOPNAME_CREATE, { .vop_create = smbfs_create } },
231 { VOPNAME_REMOVE, { .vop_remove = smbfs_remove } },
232 { VOPNAME_LINK, { .error = fs_nosys } }, /* smbfs_link, */
233 { VOPNAME_RENAME, { .vop_rename = smbfs_rename } },
234 { VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir } },
235 { VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir } },
236 { VOPNAME_READDIR, { .vop_readdir = smbfs_readdir } },
237 { VOPNAME_SYMLINK, { .error = fs_nosys } }, /* smbfs_symlink, */
238 { VOPNAME_READLINK, { .error = fs_nosys } }, /* smbfs_readlink, */
239 { VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync } },
240 { VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive } },
241 { VOPNAME_FID, { .error = fs_nosys } }, /* smbfs_fid, */
242 { VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock } },
243 { VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock } },
244 { VOPNAME_SEEK, { .vop_seek = smbfs_seek } },
245 { VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock } },
246 { VOPNAME_SPACE, { .vop_space = smbfs_space } },
247 { VOPNAME_REALVP, { .error = fs_nosys } }, /* smbfs_realvp, */
248 { VOPNAME_GETPAGE, { .vop_getpage = smbfs_getpage } }, /* smbfs_getpage, */
249 { VOPNAME_PUTPAGE, { .vop_putpage = smbfs_putpage } }, /* smbfs_putpage, */
250 { VOPNAME_MAP, { .vop_map = smbfs_map } }, /* smbfs_map, */
251 { VOPNAME_ADDMAP, { .vop_addmap = smbfs_addmap } }, /* smbfs_addmap, */
252 { VOPNAME_DELMAP, { .vop_delmap = smbfs_delmap } }, /* smbfs_delmap, */
253 { VOPNAME_DUMP, { .error = fs_nosys } }, /* smbfs_dump, */
254 { VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf } },
255 { VOPNAME_PAGEIO, { .error = fs_nosys } }, /* smbfs_pageio, */
256 { VOPNAME_SETSECATTR, { .vop_setsecattr = smbfs_setsecattr } },
257 { VOPNAME_GETSECATTR, { .vop_getsecattr = smbfs_getsecattr } },
258 { VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock } },
259 { NULL, NULL }
260 };
261
262 /*
263 * XXX
264 * When new and relevant functionality is enabled, we should be
265 * calling vfs_set_feature() to inform callers that pieces of
266 * functionality are available, per PSARC 2007/227.
267 */
268 /* ARGSUSED */
269 static int
270 smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
271 {
272 smbnode_t *np;
3123 return (error);
3124 }
3125
3126
3127 /*
3128 * XXX
3129 * This op should eventually support PSARC 2007/268.
3130 */
3131 static int
3132 smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
3133 caller_context_t *ct)
3134 {
3135 if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
3136 return (EIO);
3137
3138 if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
3139 return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
3140 else
3141 return (ENOSYS);
3142 }
3143
3144
3145
3146
3147 static int smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
3148 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
3149 caller_context_t *ct) {
3150 smbnode_t *np;
3151 smbmntinfo_t *smi;
3152 struct vattr va;
3153 segvn_crargs_t vn_a;
3154 int error;
3155
3156 np = VTOSMB(vp);
3157 smi = VTOSMI(vp);
3158
3159 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3160 return (EIO);
3161
3162 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3163 return (EIO);
3164
3165 if (vp->v_flag & VNOMAP || vp->v_flag & VNOCACHE)
3166 return (EAGAIN);
3167
3168 if (vp->v_type != VREG)
3169 return (ENODEV);
3170
3171 va.va_mask = AT_ALL;
3172
3173 if (error = smbfsgetattr(vp, &va, cr))
3174 return (error);
3175
3176 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
3177 return (EINTR);
3178
3179 if (MANDLOCK(vp, va.va_mode)) {
3180 error = EAGAIN;
3181 goto out;
3182 }
3183
3184 as_rangelock(as);
3185 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
3186
3187 if (error != 0) {
3188 as_rangeunlock(as);
3189 goto out;
3190 }
3191
3192 vn_a.vp = vp;
3193 vn_a.offset = (u_offset_t) off;
3194 vn_a.type = flags & MAP_TYPE;
3195 vn_a.prot = (uchar_t) prot;
3196 vn_a.maxprot = (uchar_t) maxprot;
3197 vn_a.cred = cr;
3198 vn_a.amp = NULL;
3199 vn_a.flags = flags & ~MAP_TYPE;
3200 vn_a.szc = 0;
3201 vn_a.lgrp_mem_policy_flags = 0;
3202
3203 error = as_map(as, *addrp, len, segvn_create, &vn_a);
3204
3205 as_rangeunlock(as);
3206
3207 out:
3208 smbfs_rw_exit(&np->r_lkserlock);
3209
3210 return (error);
3211 }
3212
3213 static int smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
3214 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
3215 caller_context_t *ct) {
3216 atomic_add_long((ulong_t *) & VTOSMB(vp)->r_mapcnt, btopr(len));
3217 return (0);
3218 }
3219
3220 static int smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
3221 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
3222 caller_context_t *ct) {
3223 atomic_add_long((ulong_t *) & VTOSMB(vp)->r_mapcnt, -btopr(len));
3224 return (0);
3225 }
3226
3227 static int smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags,
3228 cred_t *cr, caller_context_t *ct) {
3229
3230 smbnode_t *np;
3231 size_t io_len;
3232 u_offset_t io_off;
3233 u_offset_t eoff;
3234 int error = 0;
3235 page_t *pp;
3236
3237 np = VTOSMB(vp);
3238
3239 if (len == 0) {
3240 error = pvn_vplist_dirty(vp, off, smbfs_putapage, flags, cr);
3241 } else {
3242
3243 eoff = off + len;
3244
3245 mutex_enter(&np->r_statelock);
3246 if (eoff > np->r_size)
3247 eoff = np->r_size;
3248 mutex_exit(&np->r_statelock);
3249
3250 for (io_off = off; io_off < eoff; io_off += io_len) {
3251 if ((flags & B_INVAL) || (flags & B_ASYNC) == 0) {
3252 pp = page_lookup(vp, io_off,
3253 (flags & (B_INVAL | B_FREE) ? SE_EXCL : SE_SHARED));
3254 } else {
3255 pp = page_lookup_nowait(vp, io_off,
3256 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
3257 }
3258
3259 if (pp == NULL || !pvn_getdirty(pp, flags))
3260 io_len = PAGESIZE;
3261 else {
3262 error = smbfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
3263 }
3264 }
3265
3266 }
3267
3268 return (error);
3269 }
3270
3271 static int smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
3272 int flags, cred_t *cr) {
3273
3274 struct smb_cred scred;
3275 smbnode_t *np;
3276 smbmntinfo_t *smi;
3277 smb_share_t *ssp;
3278 uio_t uio;
3279 iovec_t uiov;
3280
3281 u_offset_t off;
3282 size_t len;
3283 int error, timo;
3284
3285 np = VTOSMB(vp);
3286 smi = VTOSMI(vp);
3287 ssp = smi->smi_share;
3288
3289 off = pp->p_offset;
3290 len = PAGESIZE;
3291
3292 if (off >= np->r_size) {
3293 error = 0;
3294 goto out;
3295 } else if (off + len > np->r_size) {
3296 int npages = btopr(np->r_size - off);
3297 page_t *trunc;
3298
3299 page_list_break(&pp, &trunc, npages);
3300 if (trunc)
3301 pvn_write_done(trunc, flags);
3302 len = np->r_size - off;
3303 }
3304
3305 timo = smb_timo_write;
3306
3307 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
3308 return (EINTR);
3309 smb_credinit(&scred, cr);
3310
3311 if (np->n_vcgenid != ssp->ss_vcgenid)
3312 error = ESTALE;
3313 else {
3314 uiov.iov_base = 0;
3315 uiov.iov_len = 0;
3316 uio.uio_iov = &uiov;
3317 uio.uio_iovcnt = 1;
3318 uio.uio_loffset = off;
3319 uio.uio_resid = len;
3320 uio.uio_segflg = UIO_SYSSPACE;
3321 uio.uio_llimit = MAXOFFSET_T;
3322 error = up_mapin(&uio, pp);
3323 if (error == 0) {
3324 error = smb_rwuio(ssp, np->n_fid, UIO_WRITE, &uio, &scred, timo);
3325 if (error == 0) {
3326 mutex_enter(&np->r_statelock);
3327 np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
3328 mutex_exit(&np->r_statelock);
3329 (void) smbfs_smb_flush(np, &scred);
3330 }
3331 up_mapout(&uio, pp);
3332 }
3333 }
3334 smb_credrele(&scred);
3335 smbfs_rw_exit(&np->r_lkserlock);
3336
3337 out:
3338 pvn_write_done(pp, B_WRITE | flags);
3339
3340 return (error);
3341 }
3342
3343 static int up_mapin(uio_t *uiop, page_t *pp) {
3344 u_offset_t off;
3345 size_t size;
3346 pgcnt_t npages;
3347 caddr_t kaddr;
3348
3349 off = (uintptr_t) uiop->uio_loffset & PAGEOFFSET;
3350 size = P2ROUNDUP(uiop->uio_resid + off, PAGESIZE);
3351 npages = btop(size);
3352
3353 if (npages == 1 && kpm_enable) {
3354 kaddr = hat_kpm_mapin(pp, NULL);
3355 uiop->uio_iov->iov_base = kaddr;
3356 uiop->uio_iov->iov_len = PAGESIZE;
3357 return (0);
3358 }
3359 return (EFAULT);
3360 }
3361
3362 static int up_mapout(uio_t *uiop, page_t *pp) {
3363 u_offset_t off;
3364 size_t size;
3365 pgcnt_t npages;
3366 caddr_t kaddr;
3367
3368 kaddr = uiop->uio_iov->iov_base;
3369 off = (uintptr_t) kaddr & PAGEOFFSET;
3370 size = P2ROUNDUP(uiop->uio_iov->iov_len + off, PAGESIZE);
3371 npages = btop(size);
3372
3373 if (npages == 1 && kpm_enable) {
3374 kaddr = (caddr_t) ((uintptr_t) kaddr & MMU_PAGEMASK);
3375 hat_kpm_mapout(pp, NULL, kaddr);
3376 uiop->uio_iov->iov_base = 0;
3377 uiop->uio_iov->iov_len = 0;
3378 return (0);
3379 }
3380 return (EFAULT);
3381 }
3382
3383 static int smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3384 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3385 enum seg_rw rw, cred_t *cr, caller_context_t *ct) {
3386 int error;
3387 smbnode_t *np;
3388
3389 np = VTOSMB(vp);
3390
3391 mutex_enter(&np->r_statelock);
3392 if (off + len > np->r_size + PAGEOFFSET && seg != segkmap) {
3393 mutex_exit(&np->r_statelock);
3394 return (EFAULT);
3395 }
3396 mutex_exit(&np->r_statelock);
3397
3398 if (len <= PAGESIZE) {
3399 error = smbfs_getapage(vp, off, len, protp, pl, plsz, seg, addr, rw,
3400 cr);
3401 } else {
3402 error = pvn_getpages(smbfs_getapage, vp, off, len, protp, pl, plsz, seg,
3403 addr, rw, cr);
3404 }
3405 return (error);
3406 }
3407
3408 static int smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len,
3409 uint_t *protp, page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3410 enum seg_rw rw, cred_t *cr) {
3411
3412 smbnode_t *np;
3413 smbmntinfo_t *smi;
3414 smb_share_t *ssp;
3415 smb_cred_t scred;
3416
3417 page_t *pagefound, *pp;
3418 uio_t uio;
3419 iovec_t uiov;
3420
3421 int error = 0, timo;
3422
3423 np = VTOSMB(vp);
3424 smi = VTOSMI(vp);
3425 ssp = smi->smi_share;
3426
3427 if (len > PAGESIZE)
3428 return (EFAULT);
3429 len = PAGESIZE;
3430
3431 if (pl == NULL)
3432 return (EFAULT);
3433
3434 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
3435 return EINTR;
3436
3437 smb_credinit(&scred, cr);
3438
3439 again:
3440 if ((pagefound = page_exists(vp, off)) == NULL) {
3441 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT | PG_EXCL, seg, addr)) == NULL)
3442 goto again;
3443 if (rw == S_CREATE) {
3444 goto out;
3445 } else {
3446 timo = smb_timo_read;
3447
3448 uiov.iov_base = 0;
3449 uiov.iov_len = 0;
3450 uio.uio_iov = &uiov;
3451 uio.uio_iovcnt = 1;
3452 uio.uio_loffset = off;
3453 uio.uio_resid = len;
3454 uio.uio_segflg = UIO_SYSSPACE;
3455 uio.uio_llimit = MAXOFFSET_T;
3456 error = up_mapin(&uio, pp);
3457 if (error == 0) {
3458 error = smb_rwuio(ssp, np->n_fid, UIO_READ, &uio, &scred, timo);
3459 up_mapout(&uio, pp);
3460 }
3461 }
3462 } else {
3463 se_t se = rw == S_CREATE ? SE_EXCL : SE_SHARED;
3464 if ((pp = page_lookup(vp, off, se)) == NULL) {
3465 goto again;
3466 }
3467 }
3468
3469 out:
3470 if (pp) {
3471 pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
3472 }
3473
3474 smb_credrele(&scred);
3475 smbfs_rw_exit(&np->r_lkserlock);
3476
3477 return (error);
3478 }
|