34
35 /*
36 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37 */
38
39 #include <sys/systm.h>
40 #include <sys/cred.h>
41 #include <sys/vnode.h>
42 #include <sys/vfs.h>
43 #include <sys/filio.h>
44 #include <sys/uio.h>
45 #include <sys/dirent.h>
46 #include <sys/errno.h>
47 #include <sys/sunddi.h>
48 #include <sys/sysmacros.h>
49 #include <sys/kmem.h>
50 #include <sys/cmn_err.h>
51 #include <sys/vfs_opreg.h>
52 #include <sys/policy.h>
53
54 #include <netsmb/smb_osdep.h>
55 #include <netsmb/smb.h>
56 #include <netsmb/smb_conn.h>
57 #include <netsmb/smb_subr.h>
58
59 #include <smbfs/smbfs.h>
60 #include <smbfs/smbfs_node.h>
61 #include <smbfs/smbfs_subr.h>
62
63 #include <sys/fs/smbfs_ioctl.h>
64 #include <fs/fs_subr.h>
65
66 /*
67 * We assign directory offsets like the NFS client, where the
68 * offset increments by _one_ after each directory entry.
69 * Further, the entries "." and ".." are always at offsets
70 * zero and one (respectively) and the "real" entries from
71 * the server appear at offsets starting with two. This
72 * macro is used to initialize the n_dirofs field after
73 * setting n_dirseq with a _findopen call.
158 static int smbfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
159 caller_context_t *, int);
160 static int smbfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
161 caller_context_t *, int);
162 static int smbfs_rwlock(vnode_t *, int, caller_context_t *);
163 static void smbfs_rwunlock(vnode_t *, int, caller_context_t *);
164 static int smbfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
165 static int smbfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
166 struct flk_callback *, cred_t *, caller_context_t *);
167 static int smbfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
168 cred_t *, caller_context_t *);
169 static int smbfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
170 caller_context_t *);
171 static int smbfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
172 caller_context_t *);
173 static int smbfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
174 caller_context_t *);
175 static int smbfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
176 caller_context_t *);
177
178 /* Dummy function to use until correct function is ported in */
179 int noop_vnodeop() {
180 return (0);
181 }
182
183 struct vnodeops *smbfs_vnodeops = NULL;
184
185 /*
186 * Most unimplemented ops will return ENOSYS because of fs_nosys().
187 * The only ops where that won't work are ACCESS (due to open(2)
188 * failures) and ... (anything else left?)
189 */
190 const fs_operation_def_t smbfs_vnodeops_template[] = {
191 { VOPNAME_OPEN, { .vop_open = smbfs_open } },
192 { VOPNAME_CLOSE, { .vop_close = smbfs_close } },
193 { VOPNAME_READ, { .vop_read = smbfs_read } },
194 { VOPNAME_WRITE, { .vop_write = smbfs_write } },
195 { VOPNAME_IOCTL, { .vop_ioctl = smbfs_ioctl } },
196 { VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr } },
197 { VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr } },
198 { VOPNAME_ACCESS, { .vop_access = smbfs_access } },
199 { VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup } },
200 { VOPNAME_CREATE, { .vop_create = smbfs_create } },
201 { VOPNAME_REMOVE, { .vop_remove = smbfs_remove } },
202 { VOPNAME_LINK, { .error = fs_nosys } }, /* smbfs_link, */
203 { VOPNAME_RENAME, { .vop_rename = smbfs_rename } },
204 { VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir } },
205 { VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir } },
206 { VOPNAME_READDIR, { .vop_readdir = smbfs_readdir } },
207 { VOPNAME_SYMLINK, { .error = fs_nosys } }, /* smbfs_symlink, */
208 { VOPNAME_READLINK, { .error = fs_nosys } }, /* smbfs_readlink, */
209 { VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync } },
210 { VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive } },
211 { VOPNAME_FID, { .error = fs_nosys } }, /* smbfs_fid, */
212 { VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock } },
213 { VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock } },
214 { VOPNAME_SEEK, { .vop_seek = smbfs_seek } },
215 { VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock } },
216 { VOPNAME_SPACE, { .vop_space = smbfs_space } },
217 { VOPNAME_REALVP, { .error = fs_nosys } }, /* smbfs_realvp, */
218 { VOPNAME_GETPAGE, { .error = fs_nosys } }, /* smbfs_getpage, */
219 { VOPNAME_PUTPAGE, { .error = fs_nosys } }, /* smbfs_putpage, */
220 { VOPNAME_MAP, { .error = fs_nosys } }, /* smbfs_map, */
221 { VOPNAME_ADDMAP, { .error = fs_nosys } }, /* smbfs_addmap, */
222 { VOPNAME_DELMAP, { .error = fs_nosys } }, /* smbfs_delmap, */
223 { VOPNAME_DUMP, { .error = fs_nosys } }, /* smbfs_dump, */
224 { VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf } },
225 { VOPNAME_PAGEIO, { .error = fs_nosys } }, /* smbfs_pageio, */
226 { VOPNAME_SETSECATTR, { .vop_setsecattr = smbfs_setsecattr } },
227 { VOPNAME_GETSECATTR, { .vop_getsecattr = smbfs_getsecattr } },
228 { VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock } },
229 { NULL, NULL }
230 };
231
232 /*
233 * XXX
234 * When new and relevant functionality is enabled, we should be
235 * calling vfs_set_feature() to inform callers that pieces of
236 * functionality are available, per PSARC 2007/227.
237 */
238 /* ARGSUSED */
239 static int
240 smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
241 {
242 smbnode_t *np;
469 }
470
471 /*
472 * This (passed in) count is the ref. count from the
473 * user's file_t before the closef call (fio.c).
474 * We only care when the reference goes away.
475 */
476 if (count > 1)
477 return (0);
478
479 /*
480 * Decrement the reference count for the FID
481 * and possibly do the OtW close.
482 *
483 * Exclusive lock for modifying n_fid stuff.
484 * Don't want this one ever interruptible.
485 */
486 (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
487 smb_credinit(&scred, cr);
488
489 smbfs_rele_fid(np, &scred);
490
491 smb_credrele(&scred);
492 smbfs_rw_exit(&np->r_lkserlock);
493
494 return (0);
495 }
496
497 /*
498 * Helper for smbfs_close. Decrement the reference count
499 * for an SMB-level file or directory ID, and when the last
500 * reference for the fid goes away, do the OtW close.
501 * Also called in smbfs_inactive (defensive cleanup).
502 */
503 static void
504 smbfs_rele_fid(smbnode_t *np, struct smb_cred *scred)
505 {
506 smb_share_t *ssp;
507 cred_t *oldcr;
508 struct smbfs_fctx *fctx;
509 int error;
1348 switch (np->n_ovtype) {
1349 case VNON:
1350 /* not open (OK) */
1351 break;
1352
1353 case VDIR:
1354 if (np->n_dirrefs == 0)
1355 break;
1356 SMBVDEBUG("open dir: refs %d path %s\n",
1357 np->n_dirrefs, np->n_rpath);
1358 /* Force last close. */
1359 np->n_dirrefs = 1;
1360 smbfs_rele_fid(np, &scred);
1361 break;
1362
1363 case VREG:
1364 if (np->n_fidrefs == 0)
1365 break;
1366 SMBVDEBUG("open file: refs %d id 0x%x path %s\n",
1367 np->n_fidrefs, np->n_fid, np->n_rpath);
1368 /* Force last close. */
1369 np->n_fidrefs = 1;
1370 smbfs_rele_fid(np, &scred);
1371 break;
1372
1373 default:
1374 SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
1375 np->n_ovtype = VNON;
1376 break;
1377 }
1378
1379 smb_credrele(&scred);
1380 smbfs_rw_exit(&np->r_lkserlock);
1381
1382 smbfs_addfree(np);
1383 }
1384
1385 /*
1386 * Remote file system operations having to do with directory manipulation.
1387 */
3093 return (error);
3094 }
3095
3096
3097 /*
3098 * XXX
3099 * This op should eventually support PSARC 2007/268.
3100 */
3101 static int
3102 smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
3103 caller_context_t *ct)
3104 {
3105 if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
3106 return (EIO);
3107
3108 if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
3109 return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
3110 else
3111 return (ENOSYS);
3112 }
|
34
35 /*
36 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37 */
38
39 #include <sys/systm.h>
40 #include <sys/cred.h>
41 #include <sys/vnode.h>
42 #include <sys/vfs.h>
43 #include <sys/filio.h>
44 #include <sys/uio.h>
45 #include <sys/dirent.h>
46 #include <sys/errno.h>
47 #include <sys/sunddi.h>
48 #include <sys/sysmacros.h>
49 #include <sys/kmem.h>
50 #include <sys/cmn_err.h>
51 #include <sys/vfs_opreg.h>
52 #include <sys/policy.h>
53
54 #include <sys/param.h>
55 #include <sys/vm.h>
56 #include <vm/seg_vn.h>
57 #include <vm/pvn.h>
58 #include <vm/as.h>
59 #include <vm/hat.h>
60 #include <vm/page.h>
61 #include <vm/seg.h>
62 #include <vm/seg_map.h>
63 #include <vm/seg_kmem.h>
64 #include <vm/seg_kpm.h>
65
66 #include <netsmb/smb_osdep.h>
67 #include <netsmb/smb.h>
68 #include <netsmb/smb_conn.h>
69 #include <netsmb/smb_subr.h>
70
71 #include <smbfs/smbfs.h>
72 #include <smbfs/smbfs_node.h>
73 #include <smbfs/smbfs_subr.h>
74
75 #include <sys/fs/smbfs_ioctl.h>
76 #include <fs/fs_subr.h>
77
78 /*
79 * We assign directory offsets like the NFS client, where the
80 * offset increments by _one_ after each directory entry.
81 * Further, the entries "." and ".." are always at offsets
82 * zero and one (respectively) and the "real" entries from
83 * the server appear at offsets starting with two. This
84 * macro is used to initialize the n_dirofs field after
85 * setting n_dirseq with a _findopen call.
170 static int smbfs_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
171 caller_context_t *, int);
172 static int smbfs_readdir(vnode_t *, struct uio *, cred_t *, int *,
173 caller_context_t *, int);
174 static int smbfs_rwlock(vnode_t *, int, caller_context_t *);
175 static void smbfs_rwunlock(vnode_t *, int, caller_context_t *);
176 static int smbfs_seek(vnode_t *, offset_t, offset_t *, caller_context_t *);
177 static int smbfs_frlock(vnode_t *, int, struct flock64 *, int, offset_t,
178 struct flk_callback *, cred_t *, caller_context_t *);
179 static int smbfs_space(vnode_t *, int, struct flock64 *, int, offset_t,
180 cred_t *, caller_context_t *);
181 static int smbfs_pathconf(vnode_t *, int, ulong_t *, cred_t *,
182 caller_context_t *);
183 static int smbfs_setsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
184 caller_context_t *);
185 static int smbfs_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
186 caller_context_t *);
187 static int smbfs_shrlock(vnode_t *, int, struct shrlock *, int, cred_t *,
188 caller_context_t *);
189
190 static int uio_page_mapin(uio_t *uiop, page_t *pp);
191
192 static void uio_page_mapout(uio_t *uiop, page_t *pp);
193
194 static int smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
195 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
196 caller_context_t *ct);
197
198 static int smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
199 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
200 caller_context_t *ct);
201
202 static int smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
203 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
204 caller_context_t *ct);
205
206 static int smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags,
207 cred_t *cr, caller_context_t *ct);
208
209 static int smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
210 int flags, cred_t *cr);
211
212 static int smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
213 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
214 enum seg_rw rw, cred_t *cr, caller_context_t *ct);
215
216 static int smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len,
217 uint_t *protp, page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
218 enum seg_rw rw, cred_t *cr);
219
220
221
222 /* Dummy function to use until correct function is ported in */
223 int noop_vnodeop() {
224 return (0);
225 }
226
227 struct vnodeops *smbfs_vnodeops = NULL;
228
229 /*
230 * Most unimplemented ops will return ENOSYS because of fs_nosys().
231 * The only ops where that won't work are ACCESS (due to open(2)
232 * failures) and ... (anything else left?)
233 */
234 const fs_operation_def_t smbfs_vnodeops_template[] = {
235 { VOPNAME_OPEN, { .vop_open = smbfs_open } },
236 { VOPNAME_CLOSE, { .vop_close = smbfs_close } },
237 { VOPNAME_READ, { .vop_read = smbfs_read } },
238 { VOPNAME_WRITE, { .vop_write = smbfs_write } },
239 { VOPNAME_IOCTL, { .vop_ioctl = smbfs_ioctl } },
240 { VOPNAME_GETATTR, { .vop_getattr = smbfs_getattr } },
241 { VOPNAME_SETATTR, { .vop_setattr = smbfs_setattr } },
242 { VOPNAME_ACCESS, { .vop_access = smbfs_access } },
243 { VOPNAME_LOOKUP, { .vop_lookup = smbfs_lookup } },
244 { VOPNAME_CREATE, { .vop_create = smbfs_create } },
245 { VOPNAME_REMOVE, { .vop_remove = smbfs_remove } },
246 { VOPNAME_LINK, { .error = fs_nosys } }, /* smbfs_link, */
247 { VOPNAME_RENAME, { .vop_rename = smbfs_rename } },
248 { VOPNAME_MKDIR, { .vop_mkdir = smbfs_mkdir } },
249 { VOPNAME_RMDIR, { .vop_rmdir = smbfs_rmdir } },
250 { VOPNAME_READDIR, { .vop_readdir = smbfs_readdir } },
251 { VOPNAME_SYMLINK, { .error = fs_nosys } }, /* smbfs_symlink, */
252 { VOPNAME_READLINK, { .error = fs_nosys } }, /* smbfs_readlink, */
253 { VOPNAME_FSYNC, { .vop_fsync = smbfs_fsync } },
254 { VOPNAME_INACTIVE, { .vop_inactive = smbfs_inactive } },
255 { VOPNAME_FID, { .error = fs_nosys } }, /* smbfs_fid, */
256 { VOPNAME_RWLOCK, { .vop_rwlock = smbfs_rwlock } },
257 { VOPNAME_RWUNLOCK, { .vop_rwunlock = smbfs_rwunlock } },
258 { VOPNAME_SEEK, { .vop_seek = smbfs_seek } },
259 { VOPNAME_FRLOCK, { .vop_frlock = smbfs_frlock } },
260 { VOPNAME_SPACE, { .vop_space = smbfs_space } },
261 { VOPNAME_REALVP, { .error = fs_nosys } }, /* smbfs_realvp, */
262 { VOPNAME_GETPAGE, { .vop_getpage = smbfs_getpage } }, /* smbfs_getpage, */
263 { VOPNAME_PUTPAGE, { .vop_putpage = smbfs_putpage } }, /* smbfs_putpage, */
264 { VOPNAME_MAP, { .vop_map = smbfs_map } }, /* smbfs_map, */
265 { VOPNAME_ADDMAP, { .vop_addmap = smbfs_addmap } }, /* smbfs_addmap, */
266 { VOPNAME_DELMAP, { .vop_delmap = smbfs_delmap } }, /* smbfs_delmap, */
267 { VOPNAME_DISPOSE, { .vop_dispose = fs_dispose}},
268 { VOPNAME_DUMP, { .error = fs_nosys } }, /* smbfs_dump, */
269 { VOPNAME_PATHCONF, { .vop_pathconf = smbfs_pathconf } },
270 { VOPNAME_PAGEIO, { .error = fs_nosys } }, /* smbfs_pageio, */
271 { VOPNAME_SETSECATTR, { .vop_setsecattr = smbfs_setsecattr } },
272 { VOPNAME_GETSECATTR, { .vop_getsecattr = smbfs_getsecattr } },
273 { VOPNAME_SHRLOCK, { .vop_shrlock = smbfs_shrlock } },
274 { NULL, NULL }
275 };
276
277 /*
278 * XXX
279 * When new and relevant functionality is enabled, we should be
280 * calling vfs_set_feature() to inform callers that pieces of
281 * functionality are available, per PSARC 2007/227.
282 */
283 /* ARGSUSED */
284 static int
285 smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
286 {
287 smbnode_t *np;
514 }
515
516 /*
517 * This (passed in) count is the ref. count from the
518 * user's file_t before the closef call (fio.c).
519 * We only care when the reference goes away.
520 */
521 if (count > 1)
522 return (0);
523
524 /*
525 * Decrement the reference count for the FID
526 * and possibly do the OtW close.
527 *
528 * Exclusive lock for modifying n_fid stuff.
529 * Don't want this one ever interruptible.
530 */
531 (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
532 smb_credinit(&scred, cr);
533
534 /*
535 * If FID ref. count is 1 and count of mmaped pages isn't 0,
536 * we won't call smbfs_rele_fid(), because it will result in the otW close.
537 * The count of mapped pages isn't 0, which means the mapped pages
538 * possibly will be accessed after close(), we should keep the FID valid,
539 * i.e., dont do the otW close.
540 * Dont worry that FID will be leaked, because when the
541 * vnode's count becomes 0, smbfs_inactive() will
542 * help us release FID and eventually do the otW close.
543 */
544 if (np->n_fidrefs > 1) {
545 smbfs_rele_fid(np, &scred);
546 } else if (np->r_mapcnt == 0) {
547 /*
548 * Before otW close, make sure dirty pages written back.
549 */
550 if ((flag & FWRITE) && vn_has_cached_data(vp)) {
551 /* smbfs_putapage() will acquire shared lock, so release
552 * exclusive lock temporally.
553 */
554 smbfs_rw_exit(&np->r_lkserlock);
555
556 (void) smbfs_putpage(vp, (offset_t) 0, 0, B_INVAL | B_ASYNC, cr, ct);
557
558 /* acquire exclusive lock again. */
559 (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
560 }
561 smbfs_rele_fid(np, &scred);
562 }
563
564 smb_credrele(&scred);
565 smbfs_rw_exit(&np->r_lkserlock);
566
567 return (0);
568 }
569
570 /*
571 * Helper for smbfs_close. Decrement the reference count
572 * for an SMB-level file or directory ID, and when the last
573 * reference for the fid goes away, do the OtW close.
574 * Also called in smbfs_inactive (defensive cleanup).
575 */
576 static void
577 smbfs_rele_fid(smbnode_t *np, struct smb_cred *scred)
578 {
579 smb_share_t *ssp;
580 cred_t *oldcr;
581 struct smbfs_fctx *fctx;
582 int error;
1421 switch (np->n_ovtype) {
1422 case VNON:
1423 /* not open (OK) */
1424 break;
1425
1426 case VDIR:
1427 if (np->n_dirrefs == 0)
1428 break;
1429 SMBVDEBUG("open dir: refs %d path %s\n",
1430 np->n_dirrefs, np->n_rpath);
1431 /* Force last close. */
1432 np->n_dirrefs = 1;
1433 smbfs_rele_fid(np, &scred);
1434 break;
1435
1436 case VREG:
1437 if (np->n_fidrefs == 0)
1438 break;
1439 SMBVDEBUG("open file: refs %d id 0x%x path %s\n",
1440 np->n_fidrefs, np->n_fid, np->n_rpath);
1441 /*
1442 * Before otW close, make sure dirty pages written back.
1443 */
1444 if (vn_has_cached_data(vp)) {
1445 /* smbfs_putapage() will acquire shared lock, so release
1446 * exclusive lock temporally.
1447 */
1448 smbfs_rw_exit(&np->r_lkserlock);
1449
1450 (void) smbfs_putpage(vp, (offset_t) 0, 0, B_INVAL | B_ASYNC, cr, ct);
1451
1452 /* acquire exclusive lock again. */
1453 (void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
1454 }
1455 /* Force last close. */
1456 np->n_fidrefs = 1;
1457 smbfs_rele_fid(np, &scred);
1458 break;
1459
1460 default:
1461 SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
1462 np->n_ovtype = VNON;
1463 break;
1464 }
1465
1466 smb_credrele(&scred);
1467 smbfs_rw_exit(&np->r_lkserlock);
1468
1469 smbfs_addfree(np);
1470 }
1471
1472 /*
1473 * Remote file system operations having to do with directory manipulation.
1474 */
3180 return (error);
3181 }
3182
3183
3184 /*
3185 * XXX
3186 * This op should eventually support PSARC 2007/268.
3187 */
3188 static int
3189 smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
3190 caller_context_t *ct)
3191 {
3192 if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
3193 return (EIO);
3194
3195 if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
3196 return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
3197 else
3198 return (ENOSYS);
3199 }
3200
3201 static int uio_page_mapin(uio_t *uiop, page_t *pp) {
3202 u_offset_t off;
3203 size_t size;
3204 pgcnt_t npages;
3205 caddr_t kaddr;
3206 pfn_t pfnum;
3207
3208 off = (uintptr_t) uiop->uio_loffset & PAGEOFFSET;
3209 size = P2ROUNDUP(uiop->uio_resid + off, PAGESIZE);
3210 npages = btop(size);
3211
3212 ASSERT(pp != NULL);
3213
3214 if (npages == 1 && kpm_enable) {
3215 kaddr = hat_kpm_mapin(pp, NULL);
3216 if (kaddr == NULL)
3217 return (EFAULT);
3218
3219 uiop->uio_iov->iov_base = kaddr + off;
3220 uiop->uio_iov->iov_len = PAGESIZE - off;
3221
3222 } else {
3223 kaddr = vmem_xalloc(heap_arena, size, PAGESIZE, 0, 0, NULL, NULL, VM_SLEEP);
3224 if (kaddr == NULL)
3225 return (EFAULT);
3226
3227 uiop->uio_iov->iov_base = kaddr + off;
3228 uiop->uio_iov->iov_len = size - off;
3229
3230 /*map pages into kaddr*/
3231 uint_t attr = PROT_READ | PROT_WRITE | HAT_NOSYNC;
3232 while (npages-- > 0) {
3233 pfnum = pp->p_pagenum;
3234 pp = pp->p_next;
3235
3236 hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum, attr, HAT_LOAD_LOCK);
3237 kaddr += PAGESIZE;
3238 }
3239 }
3240 return (0);
3241 }
3242
3243 static void uio_page_mapout(uio_t *uiop, page_t *pp) {
3244 u_offset_t off;
3245 size_t size;
3246 pgcnt_t npages;
3247 caddr_t kaddr;
3248
3249 kaddr = uiop->uio_iov->iov_base;
3250 off = (uintptr_t) kaddr & PAGEOFFSET;
3251 size = P2ROUNDUP(uiop->uio_iov->iov_len + off, PAGESIZE);
3252 npages = btop(size);
3253
3254 ASSERT(pp != NULL);
3255
3256 kaddr = (caddr_t) ((uintptr_t) kaddr & MMU_PAGEMASK);
3257
3258 if (npages == 1 && kpm_enable) {
3259 hat_kpm_mapout(pp, NULL, kaddr);
3260
3261 } else {
3262 hat_unload(kas.a_hat, (void*) kaddr, size,
3263 HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
3264 vmem_free(heap_arena, (void*) kaddr, size);
3265 }
3266 uiop->uio_iov->iov_base = 0;
3267 uiop->uio_iov->iov_len = 0;
3268 }
3269
3270 static int smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
3271 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
3272 caller_context_t *ct) {
3273 smbnode_t *np;
3274 smbmntinfo_t *smi;
3275 struct vattr va;
3276 segvn_crargs_t vn_a;
3277 int error;
3278
3279 np = VTOSMB(vp);
3280 smi = VTOSMI(vp);
3281
3282 if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3283 return (EIO);
3284
3285 if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3286 return (EIO);
3287
3288 if (vp->v_flag & VNOMAP || vp->v_flag & VNOCACHE)
3289 return (EAGAIN);
3290
3291 if (vp->v_type != VREG)
3292 return (ENODEV);
3293
3294 va.va_mask = AT_ALL;
3295 if (error = smbfsgetattr(vp, &va, cr))
3296 return (error);
3297
3298 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
3299 return (EINTR);
3300
3301 if (MANDLOCK(vp, va.va_mode)) {
3302 error = EAGAIN;
3303 goto out;
3304 }
3305
3306 as_rangelock(as);
3307 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
3308
3309 if (error != 0) {
3310 as_rangeunlock(as);
3311 goto out;
3312 }
3313
3314 vn_a.vp = vp;
3315 vn_a.offset = off;
3316 vn_a.type = flags & MAP_TYPE;
3317 vn_a.prot = prot;
3318 vn_a.maxprot = maxprot;
3319 vn_a.flags = flags & ~MAP_TYPE;
3320 vn_a.cred = cr;
3321 vn_a.amp = NULL;
3322 vn_a.szc = 0;
3323 vn_a.lgrp_mem_policy_flags = 0;
3324
3325 error = as_map(as, *addrp, len, segvn_create, &vn_a);
3326
3327 as_rangeunlock(as);
3328
3329 out:
3330 smbfs_rw_exit(&np->r_lkserlock);
3331
3332 return (error);
3333 }
3334
3335 static int smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
3336 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
3337 caller_context_t *ct) {
3338 atomic_add_long((ulong_t *) & VTOSMB(vp)->r_mapcnt, btopr(len));
3339 return (0);
3340 }
3341
3342 static int smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
3343 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
3344 caller_context_t *ct) {
3345
3346 smbnode_t *np;
3347
3348 atomic_add_long((ulong_t *) & VTOSMB(vp)->r_mapcnt, -btopr(len));
3349
3350 /* mark RDIRTY here, will be used to check if a file is dirty when unmount smbfs */
3351 if (vn_has_cached_data(vp) && !vn_is_readonly(vp) && maxprot & PROT_WRITE && flags == MAP_SHARED) {
3352 np = VTOSMB(vp);
3353 mutex_enter(&np->r_statelock);
3354 np->r_flags |= RDIRTY;
3355 mutex_exit(&np->r_statelock);
3356 }
3357 return (0);
3358 }
3359
3360 static int smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags,
3361 cred_t *cr, caller_context_t *ct) {
3362
3363 smbnode_t *np;
3364 size_t io_len;
3365 u_offset_t io_off;
3366 u_offset_t eoff;
3367 int error = 0;
3368 page_t *pp;
3369
3370 np = VTOSMB(vp);
3371
3372 if (len == 0) {
3373 /* will flush the file, so clear RDIRTY */
3374 if (off == (u_offset_t) 0 && (np->r_flags & RDIRTY)) {
3375 mutex_enter(&np->r_statelock);
3376 np->r_flags &= ~RDIRTY;
3377 mutex_exit(&np->r_statelock);
3378 }
3379
3380 error = pvn_vplist_dirty(vp, off, smbfs_putapage, flags, cr);
3381 } else {
3382
3383 eoff = off + len;
3384
3385 mutex_enter(&np->r_statelock);
3386 if (eoff > np->r_size)
3387 eoff = np->r_size;
3388 mutex_exit(&np->r_statelock);
3389
3390 for (io_off = off; io_off < eoff; io_off += io_len) {
3391 if ((flags & B_INVAL) || (flags & B_ASYNC) == 0) {
3392 pp = page_lookup(vp, io_off,
3393 (flags & (B_INVAL | B_FREE) ? SE_EXCL : SE_SHARED));
3394 } else {
3395 pp = page_lookup_nowait(vp, io_off,
3396 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
3397 }
3398
3399 if (pp == NULL || !pvn_getdirty(pp, flags))
3400 io_len = PAGESIZE;
3401 else {
3402 error = smbfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
3403 }
3404 }
3405
3406 }
3407
3408 return (error);
3409 }
3410
3411 static int smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
3412 int flags, cred_t *cr) {
3413
3414 struct smb_cred scred;
3415 smbnode_t *np;
3416 smbmntinfo_t *smi;
3417 smb_share_t *ssp;
3418 uio_t uio;
3419 iovec_t uiov, uiov_bak;
3420
3421 size_t io_len;
3422 u_offset_t io_off;
3423 size_t bsize;
3424 size_t blksize;
3425 u_offset_t blkoff;
3426 int error;
3427
3428 np = VTOSMB(vp);
3429 smi = VTOSMI(vp);
3430 ssp = smi->smi_share;
3431
3432 /*do block io, get a kluster of dirty pages in a block.*/
3433 bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3434 blkoff = pp->p_offset / bsize;
3435 blkoff *= bsize;
3436 blksize = roundup(bsize, PAGESIZE);
3437
3438 pp = pvn_write_kluster(vp, pp, &io_off, &io_len, blkoff, blksize, flags);
3439
3440 ASSERT(pp->p_offset >= blkoff);
3441
3442 if (io_off + io_len > blkoff + blksize) {
3443 ASSERT((io_off + io_len)-(blkoff + blksize) < PAGESIZE);
3444 io_len = blkoff + blksize - io_off;
3445 }
3446
3447 /*currently, don't allow put pages beyond EOF, unless smbfs_read/smbfs_write
3448 *can do io through segkpm or vpm.*/
3449 mutex_enter(&np->r_statelock);
3450 if (io_off >= np->r_size) {
3451 mutex_exit(&np->r_statelock);
3452 error = 0;
3453 goto out;
3454 } else if (io_off + io_len > np->r_size) {
3455 int npages = btopr(np->r_size - io_off);
3456 page_t *trunc;
3457 page_list_break(&pp, &trunc, npages);
3458 if (trunc)
3459 pvn_write_done(trunc, flags);
3460 io_len = np->r_size - io_off;
3461 }
3462 mutex_exit(&np->r_statelock);
3463
3464 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
3465 return (EINTR);
3466 smb_credinit(&scred, cr);
3467
3468 if (np->n_vcgenid != ssp->ss_vcgenid)
3469 error = ESTALE;
3470 else {
3471 /*just use uio instead of buf, since smb_rwuio need uio.*/
3472 uiov.iov_base = 0;
3473 uiov.iov_len = 0;
3474 uio.uio_iov = &uiov;
3475 uio.uio_iovcnt = 1;
3476 uio.uio_loffset = io_off;
3477 uio.uio_resid = io_len;
3478 uio.uio_segflg = UIO_SYSSPACE;
3479 uio.uio_llimit = MAXOFFSET_T;
3480 /*map pages into kernel address space, and setup uio.*/
3481 error = uio_page_mapin(&uio, pp);
3482 if (error == 0) {
3483 uiov_bak.iov_base = uiov.iov_base;
3484 uiov_bak.iov_len = uiov.iov_len;
3485 error = smb_rwuio(ssp, np->n_fid, UIO_WRITE, &uio, &scred, smb_timo_write);
3486 if (error == 0) {
3487 mutex_enter(&np->r_statelock);
3488 np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
3489 mutex_exit(&np->r_statelock);
3490 (void) smbfs_smb_flush(np, &scred);
3491 }
3492 /*unmap pages from kernel address space.*/
3493 uio.uio_iov = &uiov_bak;
3494 uio_page_mapout(&uio, pp);
3495 }
3496 }
3497
3498 smb_credrele(&scred);
3499 smbfs_rw_exit(&np->r_lkserlock);
3500
3501 out:
3502 pvn_write_done(pp, ((error) ? B_ERROR : 0) | B_WRITE | flags);
3503
3504 if (offp)
3505 *offp = io_off;
3506 if (lenp)
3507 *lenp = io_len;
3508
3509 return (error);
3510 }
3511
3512 static int smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3513 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3514 enum seg_rw rw, cred_t *cr, caller_context_t *ct) {
3515
3516 int error;
3517
3518 /*these pages have all protections.*/
3519 if (protp)
3520 *protp = PROT_ALL;
3521
3522 if (len <= PAGESIZE) {
3523 error = smbfs_getapage(vp, off, len, protp, pl, plsz, seg, addr, rw,
3524 cr);
3525 } else {
3526 error = pvn_getpages(smbfs_getapage, vp, off, len, protp, pl, plsz, seg,
3527 addr, rw, cr);
3528 }
3529
3530 return (error);
3531 }
3532
3533 static int smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len,
3534 uint_t *protp, page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3535 enum seg_rw rw, cred_t *cr) {
3536
3537 smbnode_t *np;
3538 smbmntinfo_t *smi;
3539 smb_share_t *ssp;
3540 smb_cred_t scred;
3541
3542 page_t *pp;
3543 uio_t uio;
3544 iovec_t uiov, uiov_bak;
3545
3546 u_offset_t blkoff;
3547 size_t bsize;
3548 size_t blksize;
3549
3550 u_offset_t io_off;
3551 size_t io_len;
3552 size_t pages_len;
3553
3554 int error = 0;
3555
3556 np = VTOSMB(vp);
3557 smi = VTOSMI(vp);
3558 ssp = smi->smi_share;
3559
3560 /*if pl is null,it's meaningless*/
3561 if (pl == NULL)
3562 return (EFAULT);
3563
3564 again:
3565 if (page_exists(vp, off) == NULL) {
3566 if (rw == S_CREATE) {
3567 /*just return a empty page if asked to create.*/
3568 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT | PG_EXCL, seg, addr)) == NULL)
3569 goto again;
3570 pages_len = PAGESIZE;
3571 } else {
3572
3573 /*do block io, get a kluster of non-exist pages in a block.*/
3574 bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3575 blkoff = off / bsize;
3576 blkoff *= bsize;
3577 blksize = roundup(bsize, PAGESIZE);
3578
3579 pp = pvn_read_kluster(vp, off, seg, addr, &io_off, &io_len, blkoff, blksize, 0);
3580
3581 if (pp == NULL)
3582 goto again;
3583
3584 pages_len = io_len;
3585
3586 /*currently, don't allow get pages beyond EOF, unless smbfs_read/smbfs_write
3587 *can do io through segkpm or vpm.*/
3588 mutex_enter(&np->r_statelock);
3589 if (io_off >= np->r_size) {
3590 mutex_exit(&np->r_statelock);
3591 error = 0;
3592 goto out;
3593 } else if (io_off + io_len > np->r_size) {
3594 int npages = btopr(np->r_size - io_off);
3595 page_t *trunc;
3596
3597 page_list_break(&pp, &trunc, npages);
3598 if (trunc)
3599 pvn_read_done(trunc, 0);
3600 io_len = np->r_size - io_off;
3601 }
3602 mutex_exit(&np->r_statelock);
3603
3604 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
3605 return EINTR;
3606 smb_credinit(&scred, cr);
3607
3608 /*just use uio instead of buf, since smb_rwuio need uio.*/
3609 uiov.iov_base = 0;
3610 uiov.iov_len = 0;
3611 uio.uio_iov = &uiov;
3612 uio.uio_iovcnt = 1;
3613 uio.uio_loffset = io_off;
3614 uio.uio_resid = io_len;
3615 uio.uio_segflg = UIO_SYSSPACE;
3616 uio.uio_llimit = MAXOFFSET_T;
3617
3618 /*map pages into kernel address space, and setup uio.*/
3619 error = uio_page_mapin(&uio, pp);
3620 if (error == 0) {
3621 uiov_bak.iov_base = uiov.iov_base;
3622 uiov_bak.iov_len = uiov.iov_len;
3623 error = smb_rwuio(ssp, np->n_fid, UIO_READ, &uio, &scred, smb_timo_read);
3624 /*unmap pages from kernel address space.*/
3625 uio.uio_iov = &uiov_bak;
3626 uio_page_mapout(&uio, pp);
3627 }
3628
3629 smb_credrele(&scred);
3630 smbfs_rw_exit(&np->r_lkserlock);
3631 }
3632 } else {
3633 se_t se = rw == S_CREATE ? SE_EXCL : SE_SHARED;
3634 if ((pp = page_lookup(vp, off, se)) == NULL) {
3635 goto again;
3636 }
3637 }
3638
3639 out:
3640 if (pp) {
3641 if (error) {
3642 pvn_read_done(pp, B_ERROR);
3643 } else {
3644 /*init page list, unlock pages.*/
3645 pvn_plist_init(pp, pl, plsz, off, pages_len, rw);
3646 }
3647 }
3648
3649 return (error);
3650 }
3651
3652
3653 void smbfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr) {
3654
3655 smbnode_t *np;
3656
3657 np = VTOSMB(vp);
3658 /* will flush the file, so clear RDIRTY */
3659 if (off == (u_offset_t) 0 && (np->r_flags & RDIRTY)) {
3660 mutex_enter(&np->r_statelock);
3661 np->r_flags &= ~RDIRTY;
3662 mutex_exit(&np->r_statelock);
3663 }
3664
3665 (void) pvn_vplist_dirty(vp, off, smbfs_putapage, B_INVAL | B_TRUNC, cr);
3666 }
3667
3668
|