Print this page
*** NO COMMENTS ***
*** 155,167 ****
{
vsecattr_t ovsa;
cred_t *oldcr;
char *orpath;
int orplen;
/*
! * Flush and invalidate all pages (todo)
* Free any held credentials and caches...
* etc. (See NFS code)
*/
mutex_enter(&np->r_statelock);
--- 155,168 ----
{
vsecattr_t ovsa;
cred_t *oldcr;
char *orpath;
int orplen;
+ vnode_t *vp;
/*
! * Flush and invalidate all pages
* Free any held credentials and caches...
* etc. (See NFS code)
*/
mutex_enter(&np->r_statelock);
*** 177,186 ****
--- 178,192 ----
np->n_rpath = NULL;
np->n_rplen = 0;
mutex_exit(&np->r_statelock);
+ vp = SMBTOV(np);
+ if (vn_has_cached_data(vp)) {
+ smbfs_invalidate_pages(vp, (u_offset_t) 0, oldcr);
+ }
+
if (ovsa.vsa_aclentp != NULL)
kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz);
if (oldcr != NULL)
crfree(oldcr);
*** 1038,1050 ****
* Flush all vnodes in this (or every) vfs.
* Used by nfs_sync and by nfs_unmount.
*/
/*ARGSUSED*/
void
! smbfs_rflush(struct vfs *vfsp, cred_t *cr)
! {
! /* Todo: mmap support. */
}
/* access cache */
/* client handles */
--- 1044,1092 ----
* Flush all vnodes in this (or every) vfs.
* Used by nfs_sync and by nfs_unmount.
*/
/*ARGSUSED*/
void
! smbfs_rflush(struct vfs *vfsp, cred_t *cr) {
!
! smbmntinfo_t *mi;
! smbnode_t *np;
! vnode_t *vp;
!
! long num, cnt;
!
! vnode_t **vplist;
!
! mi = VFTOSMI(vfsp);
!
! cnt = 0;
! num = mi->smi_hash_avl.avl_numnodes;
! vplist = kmem_alloc(num * sizeof (vnode_t*), KM_SLEEP);
!
! rw_enter(&mi->smi_hash_lk, RW_READER);
! for (np = avl_first(&mi->smi_hash_avl); np != NULL;
! np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
! vp = SMBTOV(np);
! if (vn_is_readonly(vp))
! continue;
!
! if (vn_has_cached_data(vp) && (np->r_flags & RDIRTY || np->r_mapcnt > 0)) {
! VN_HOLD(vp);
! vplist[cnt++] = vp;
! if (cnt == num)
! break;
! }
! }
! rw_exit(&mi->smi_hash_lk);
!
! while (cnt-- > 0) {
! vp = vplist[cnt];
! (void) VOP_PUTPAGE(vp, 0, 0, 0, cr, NULL);
! VN_RELE(vp);
! }
!
! kmem_free(vplist, num * sizeof (vnode_t*));
}
/* access cache */
/* client handles */