Print this page
cstyle
Implement ioctl _FIODIRECTIO
Lots of comment cleanup
5404 smbfs needs mmap support
Portions contributed by: Gordon Ross <gordon.w.ross@gmail.com>

*** 42,51 **** --- 42,52 ---- #include <sys/bitmap.h> #include <sys/dnlc.h> #include <sys/kmem.h> #include <sys/sunddi.h> #include <sys/sysmacros.h> + #include <sys/fcntl.h> #include <netsmb/smb_osdep.h> #include <netsmb/smb.h> #include <netsmb/smb_conn.h>
*** 149,170 **** /* * Free the resources associated with an smbnode. * Note: This is different from smbfs_inactive * ! * NFS: nfs_subr.c:rinactive */ static void sn_inactive(smbnode_t *np) { vsecattr_t ovsa; cred_t *oldcr; char *orpath; int orplen; /* ! * Flush and invalidate all pages (todo) * Free any held credentials and caches... * etc. (See NFS code) */ mutex_enter(&np->r_statelock); --- 150,173 ---- /* * Free the resources associated with an smbnode. * Note: This is different from smbfs_inactive * ! * From NFS: nfs_subr.c:rinactive */ static void sn_inactive(smbnode_t *np) { vsecattr_t ovsa; cred_t *oldcr; char *orpath; int orplen; + vnode_t *vp; /* ! * Here NFS has: ! * Flush and invalidate all pages (done by caller) * Free any held credentials and caches... * etc. (See NFS code) */ mutex_enter(&np->r_statelock);
*** 180,189 **** --- 183,197 ---- np->n_rpath = NULL; np->n_rplen = 0; mutex_exit(&np->r_statelock); + vp = SMBTOV(np); + if (vn_has_cached_data(vp)) { + ASSERT3P(vp,==,NULL); + } + if (ovsa.vsa_aclentp != NULL) kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz); if (oldcr != NULL) crfree(oldcr);
*** 202,212 **** * Callers that need a node created but don't have the * real attributes pass smbfs_fattr0 to force creation. * * Note: make_smbnode() may upgrade the "hash" lock to exclusive. * ! * NFS: nfs_subr.c:makenfsnode */ smbnode_t * smbfs_node_findcreate( smbmntinfo_t *mi, const char *dirnm, --- 210,220 ---- * Callers that need a node created but don't have the * real attributes pass smbfs_fattr0 to force creation. * * Note: make_smbnode() may upgrade the "hash" lock to exclusive. * ! * Based on NFS: nfs_subr.c:makenfsnode */ smbnode_t * smbfs_node_findcreate( smbmntinfo_t *mi, const char *dirnm,
*** 284,300 **** /* * Apply the given attributes to this node, * dealing with any cache impact, etc. */ vp = SMBTOV(np); - if (!newnode) { - /* - * Found an existing node. - * Maybe purge caches... - */ - smbfs_cache_check(vp, fap); - } smbfs_attrcache_fa(vp, fap); /* * Note NFS sets vp->v_type here, assuming it * can never change for the life of a node. --- 292,301 ----
*** 303,319 **** */ return (np); } /* ! * NFS: nfs_subr.c:rtablehash * We use smbfs_hash(). */ /* * Find or create an smbnode. ! * NFS: nfs_subr.c:make_rnode */ static smbnode_t * make_smbnode( smbmntinfo_t *mi, const char *rpath, --- 304,320 ---- */ return (np); } /* ! * Here NFS has: nfs_subr.c:rtablehash * We use smbfs_hash(). */ /* * Find or create an smbnode. ! * From NFS: nfs_subr.c:make_rnode */ static smbnode_t * make_smbnode( smbmntinfo_t *mi, const char *rpath,
*** 432,449 **** np->n_fid = SMB_FID_UNUSED; np->n_uid = mi->smi_uid; np->n_gid = mi->smi_gid; /* Leave attributes "stale." */ - #if 0 /* XXX dircache */ /* ! * We don't know if it's a directory yet. ! * Let the caller do this? XXX */ - avl_create(&np->r_dir, compar, sizeof (rddir_cache), - offsetof(rddir_cache, tree)); - #endif /* Now fill in the vnode. */ vn_setops(vp, smbfs_vnodeops); vp->v_data = (caddr_t)np; VFS_HOLD(vfsp); --- 433,446 ---- np->n_fid = SMB_FID_UNUSED; np->n_uid = mi->smi_uid; np->n_gid = mi->smi_gid; /* Leave attributes "stale." */ /* ! * Here NFS has avl_create(&np->r_dir, ...) ! * for the readdir cache (not used here). */ /* Now fill in the vnode. */ vn_setops(vp, smbfs_vnodeops); vp->v_data = (caddr_t)np; VFS_HOLD(vfsp);
*** 497,507 **** * destroy immediately when we have too many smbnodes, etc. * * Normally called by smbfs_inactive, but also * called in here during cleanup operations. * ! * NFS: nfs_subr.c:rp_addfree */ void smbfs_addfree(smbnode_t *np) { vnode_t *vp; --- 494,504 ---- * destroy immediately when we have too many smbnodes, etc. * * Normally called by smbfs_inactive, but also * called in here during cleanup operations. * ! * From NFS: nfs_subr.c:rp_addfree */ void smbfs_addfree(smbnode_t *np) { vnode_t *vp;
*** 625,635 **** * Remove an smbnode from the free list. * * The caller must be holding smbfreelist_lock and the smbnode * must be on the freelist. * ! * NFS: nfs_subr.c:rp_rmfree */ static void sn_rmfree(smbnode_t *np) { --- 622,632 ---- * Remove an smbnode from the free list. * * The caller must be holding smbfreelist_lock and the smbnode * must be on the freelist. * ! * From NFS: nfs_subr.c:rp_rmfree */ static void sn_rmfree(smbnode_t *np) {
*** 651,661 **** /* * Put an smbnode in the "hash" AVL tree. * * The caller must be hold the rwlock as writer. * ! * NFS: nfs_subr.c:rp_addhash */ static void sn_addhash_locked(smbnode_t *np, avl_index_t where) { smbmntinfo_t *mi = np->n_mount; --- 648,658 ---- /* * Put an smbnode in the "hash" AVL tree. * * The caller must be hold the rwlock as writer. * ! * From NFS: nfs_subr.c:rp_addhash */ static void sn_addhash_locked(smbnode_t *np, avl_index_t where) { smbmntinfo_t *mi = np->n_mount;
*** 673,683 **** /* * Remove an smbnode from the "hash" AVL tree. * * The caller must hold the rwlock as writer. * ! * NFS: nfs_subr.c:rp_rmhash_locked */ static void sn_rmhash_locked(smbnode_t *np) { smbmntinfo_t *mi = np->n_mount; --- 670,680 ---- /* * Remove an smbnode from the "hash" AVL tree. * * The caller must hold the rwlock as writer. * ! * From NFS: nfs_subr.c:rp_rmhash_locked */ static void sn_rmhash_locked(smbnode_t *np) { smbmntinfo_t *mi = np->n_mount;
*** 710,720 **** /* * Lookup an smbnode by remote pathname * * The caller must be holding the AVL rwlock, either shared or exclusive. * ! * NFS: nfs_subr.c:rfind */ static smbnode_t * sn_hashfind( smbmntinfo_t *mi, const char *rpath, --- 707,717 ---- /* * Lookup an smbnode by remote pathname * * The caller must be holding the AVL rwlock, either shared or exclusive. * ! * From NFS: nfs_subr.c:rfind */ static smbnode_t * sn_hashfind( smbmntinfo_t *mi, const char *rpath,
*** 865,875 **** * Several of these checks are done without holding the usual * locks. This is safe because destroy_smbtable(), smbfs_addfree(), * etc. will redo the necessary checks before actually destroying * any smbnodes. * ! * NFS: nfs_subr.c:check_rtable * * Debugging changes here relative to NFS. * Relatively harmless, so left 'em in. */ int --- 862,872 ---- * Several of these checks are done without holding the usual * locks. This is safe because destroy_smbtable(), smbfs_addfree(), * etc. will redo the necessary checks before actually destroying * any smbnodes. * ! * From NFS: nfs_subr.c:check_rtable * * Debugging changes here relative to NFS. * Relatively harmless, so left 'em in. */ int
*** 924,934 **** /* * Destroy inactive vnodes from the AVL tree which belong to this * vfs. It is essential that we destroy all inactive vnodes during a * forced unmount as well as during a normal unmount. * ! * NFS: nfs_subr.c:destroy_rtable * * In here, we're normally destrying all or most of the AVL tree, * so the natural choice is to use avl_destroy_nodes. However, * there may be a few busy nodes that should remain in the AVL * tree when we're done. The solution: use a temporary tree to --- 921,931 ---- /* * Destroy inactive vnodes from the AVL tree which belong to this * vfs. It is essential that we destroy all inactive vnodes during a * forced unmount as well as during a normal unmount. * ! * Based on NFS: nfs_subr.c:destroy_rtable * * In here, we're normally destrying all or most of the AVL tree, * so the natural choice is to use avl_destroy_nodes. However, * there may be a few busy nodes that should remain in the AVL * tree when we're done. The solution: use a temporary tree to
*** 1009,1019 **** /* * This routine destroys all the resources associated with the smbnode * and then the smbnode itself. Note: sn_inactive has been called. * ! * NFS: nfs_subr.c:destroy_rnode */ static void sn_destroy_node(smbnode_t *np) { vnode_t *vp; --- 1006,1016 ---- /* * This routine destroys all the resources associated with the smbnode * and then the smbnode itself. Note: sn_inactive has been called. * ! * From NFS: nfs_subr.c:destroy_rnode */ static void sn_destroy_node(smbnode_t *np) { vnode_t *vp;
*** 1036,1064 **** kmem_cache_free(smbnode_cache, np); VFS_RELE(vfsp); } /* * Flush all vnodes in this (or every) vfs. ! * Used by nfs_sync and by nfs_unmount. */ /*ARGSUSED*/ void smbfs_rflush(struct vfs *vfsp, cred_t *cr) { ! /* Todo: mmap support. */ } ! /* access cache (nfs_subr.c) not used here */ static kmutex_t smbfs_newnum_lock; static uint32_t smbfs_newnum_val = 0; /* * Return a number 0..0xffffffff that's different from the last * 0xffffffff numbers this returned. Used for unlinked files. ! * (This too was copied from nfs_subr.c) */ uint32_t smbfs_newnum(void) { uint32_t id; --- 1033,1190 ---- kmem_cache_free(smbnode_cache, np); VFS_RELE(vfsp); } /* + * From NFS rflush() * Flush all vnodes in this (or every) vfs. ! * Used by smbfs_sync and by smbfs_unmount. */ /*ARGSUSED*/ void smbfs_rflush(struct vfs *vfsp, cred_t *cr) { ! smbmntinfo_t *mi; ! smbnode_t *np; ! vnode_t *vp, **vplist; ! long num, cnt; ! ! mi = VFTOSMI(vfsp); ! ! /* ! * Check to see whether there is anything to do. ! */ ! num = avl_numnodes(&mi->smi_hash_avl); ! if (num == 0) ! return; ! ! /* ! * Allocate a slot for all currently active rnodes on the ! * supposition that they all may need flushing. ! */ ! vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP); ! cnt = 0; ! ! /* ! * Walk the AVL tree looking for rnodes with page ! * lists associated with them. Make a list of these ! * files. ! */ ! rw_enter(&mi->smi_hash_lk, RW_READER); ! for (np = avl_first(&mi->smi_hash_avl); np != NULL; ! np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) { ! vp = SMBTOV(np); ! /* ! * Don't bother sync'ing a vp if it ! * is part of virtual swap device or ! * if VFS is read-only ! */ ! if (IS_SWAPVP(vp) || vn_is_readonly(vp)) ! continue; ! /* ! * If the vnode has pages and is marked as either ! * dirty or mmap'd, hold and add this vnode to the ! * list of vnodes to flush. ! */ ! if (vn_has_cached_data(vp) && ! ((np->r_flags & RDIRTY) || np->r_mapcnt > 0)) { ! VN_HOLD(vp); ! vplist[cnt++] = vp; ! if (cnt == num) ! break; ! } ! } ! rw_exit(&mi->smi_hash_lk); ! ! /* ! * Flush and release all of the files on the list. ! */ ! while (cnt-- > 0) { ! vp = vplist[cnt]; ! (void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL); ! VN_RELE(vp); ! } ! ! kmem_free(vplist, num * sizeof (vnode_t *)); } ! /* Here NFS has access cache stuff (nfs_subr.c) not used here */ + /* + * Set or Clear direct I/O flag + * VOP_RWLOCK() is held for write access to prevent a race condition + * which would occur if a process is in the middle of a write when + * directio flag gets set. It is possible that all pages may not get flushed. + * From nfs_common.c + */ + + /* ARGSUSED */ + int + smbfs_directio(vnode_t *vp, int cmd, cred_t *cr) + { + int error = 0; + smbnode_t *np; + + np = VTOSMB(vp); + + if (cmd == DIRECTIO_ON) { + + if (np->r_flags & RDIRECTIO) + return (0); + + /* + * Flush the page cache. + */ + + (void) VOP_RWLOCK(vp, V_WRITELOCK_TRUE, NULL); + + if (np->r_flags & RDIRECTIO) { + VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL); + return (0); + } + + /* Here NFS also checks ->r_awcount */ + if (vn_has_cached_data(vp) && + (np->r_flags & RDIRTY) != 0) { + error = VOP_PUTPAGE(vp, (offset_t)0, (uint_t)0, + B_INVAL, cr, NULL); + if (error) { + if (error == ENOSPC || error == EDQUOT) { + mutex_enter(&np->r_statelock); + if (!np->r_error) + np->r_error = error; + mutex_exit(&np->r_statelock); + } + VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL); + return (error); + } + } + + mutex_enter(&np->r_statelock); + np->r_flags |= RDIRECTIO; + mutex_exit(&np->r_statelock); + VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL); + return (0); + } + + if (cmd == DIRECTIO_OFF) { + mutex_enter(&np->r_statelock); + np->r_flags &= ~RDIRECTIO; /* disable direct mode */ + mutex_exit(&np->r_statelock); + return (0); + } + + return (EINVAL); + } + static kmutex_t smbfs_newnum_lock; static uint32_t smbfs_newnum_val = 0; /* * Return a number 0..0xffffffff that's different from the last * 0xffffffff numbers this returned. Used for unlinked files. ! * From NFS nfs_subr.c newnum */ uint32_t smbfs_newnum(void) { uint32_t id;
*** 1088,1098 **** /* * initialize resources that are used by smbfs_subr.c * this is called from the _init() routine (by the way of smbfs_clntinit()) * ! * NFS: nfs_subr.c:nfs_subrinit */ int smbfs_subrinit(void) { ulong_t nsmbnode_max; --- 1214,1224 ---- /* * initialize resources that are used by smbfs_subr.c * this is called from the _init() routine (by the way of smbfs_clntinit()) * ! * From NFS: nfs_subr.c:nfs_subrinit */ int smbfs_subrinit(void) { ulong_t nsmbnode_max;
*** 1132,1142 **** return (0); } /* * free smbfs hash table, etc. ! * NFS: nfs_subr.c:nfs_subrfini */ void smbfs_subrfini(void) { --- 1258,1268 ---- return (0); } /* * free smbfs hash table, etc. ! * From NFS: nfs_subr.c:nfs_subrfini */ void smbfs_subrfini(void) {
*** 1207,1213 **** smbfs_kmem_reclaim(void *cdrarg) { smbfs_node_reclaim(); } ! /* nfs failover stuff */ ! /* nfs_rw_xxx - see smbfs_rwlock.c */ --- 1333,1341 ---- smbfs_kmem_reclaim(void *cdrarg) { smbfs_node_reclaim(); } ! /* ! * Here NFS has failover stuff and ! * nfs_rw_xxx - see smbfs_rwlock.c ! */