Print this page
*** NO COMMENTS ***
@@ -155,13 +155,14 @@
{
vsecattr_t ovsa;
cred_t *oldcr;
char *orpath;
int orplen;
+ vnode_t *vp;
/*
- * Flush and invalidate all pages (todo)
+ * Flush and invalidate all pages
* Free any held credentials and caches...
* etc. (See NFS code)
*/
mutex_enter(&np->r_statelock);
@@ -177,10 +178,15 @@
np->n_rpath = NULL;
np->n_rplen = 0;
mutex_exit(&np->r_statelock);
+ vp = SMBTOV(np);
+ if (vn_has_cached_data(vp)) {
+ smbfs_invalidate_pages(vp, (u_offset_t) 0, oldcr);
+ }
+
if (ovsa.vsa_aclentp != NULL)
kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz);
if (oldcr != NULL)
crfree(oldcr);
@@ -1033,18 +1039,60 @@
kmem_cache_free(smbnode_cache, np);
VFS_RELE(vfsp);
}
/*
+ * Correspond to rflush() in NFS.
* Flush all vnodes in this (or every) vfs.
- * Used by nfs_sync and by nfs_unmount.
+ * Used by smbfs_sync and by smbfs_unmount.
*/
/*ARGSUSED*/
void
-smbfs_rflush(struct vfs *vfsp, cred_t *cr)
-{
- /* Todo: mmap support. */
+smbfs_rflush(struct vfs *vfsp, cred_t *cr) {
+
+ smbmntinfo_t *mi;
+ smbnode_t *np;
+ vnode_t *vp;
+
+ long num, cnt;
+
+ vnode_t **vplist;
+
+ if(vfsp == NULL)
+ return;
+
+ mi = VFTOSMI(vfsp);
+
+ cnt = 0;
+
+ num = mi->smi_hash_avl.avl_numnodes;
+
+ vplist = kmem_alloc(num * sizeof (vnode_t*), KM_SLEEP);
+
+ rw_enter(&mi->smi_hash_lk, RW_READER);
+ for (np = avl_first(&mi->smi_hash_avl); np != NULL;
+ np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
+ vp = SMBTOV(np);
+ if (vn_is_readonly(vp))
+ continue;
+
+ if (vn_has_cached_data(vp) && (np->r_flags & RDIRTY || np->r_mapcnt > 0)) {
+ VN_HOLD(vp);
+ vplist[cnt++] = vp;
+ if (cnt == num)
+ break;
+ }
+ }
+ rw_exit(&mi->smi_hash_lk);
+
+ while (cnt-- > 0) {
+ vp = vplist[cnt];
+ (void) VOP_PUTPAGE(vp, 0, 0, 0, cr, NULL);
+ VN_RELE(vp);
+ }
+
+ kmem_free(vplist, num * sizeof (vnode_t*));
}
/* access cache */
/* client handles */