27 */
28 /*
29 * Copyright (c) 2017 by Delphix. All rights reserved.
30 */
31
32 /*
33 * Node hash implementation initially borrowed from NFS (nfs_subr.c)
34 * but then heavily modified. It's no longer an array of hash lists,
35 * but an AVL tree per mount point. More on this below.
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/time.h>
41 #include <sys/vnode.h>
42 #include <sys/bitmap.h>
43 #include <sys/dnlc.h>
44 #include <sys/kmem.h>
45 #include <sys/sunddi.h>
46 #include <sys/sysmacros.h>
47
48 #include <netsmb/smb_osdep.h>
49
50 #include <netsmb/smb.h>
51 #include <netsmb/smb_conn.h>
52 #include <netsmb/smb_subr.h>
53 #include <netsmb/smb_rq.h>
54
55 #include <smbfs/smbfs.h>
56 #include <smbfs/smbfs_node.h>
57 #include <smbfs/smbfs_subr.h>
58
59 /*
60 * The AVL trees (now per-mount) allow finding an smbfs node by its
61 * full remote path name. It also allows easy traversal of all nodes
62 * below (path wise) any given node. A reader/writer lock for each
63 * (per mount) AVL tree is used to control access and to synchronize
64 * lookups, additions, and deletions from that AVL tree.
65 *
66 * Previously, this code use a global array of hash chains, each with
134 * Local functions.
135 * SN for Smb Node
136 */
137 static void sn_rmfree(smbnode_t *);
138 static void sn_inactive(smbnode_t *);
139 static void sn_addhash_locked(smbnode_t *, avl_index_t);
140 static void sn_rmhash_locked(smbnode_t *);
141 static void sn_destroy_node(smbnode_t *);
142 void smbfs_kmem_reclaim(void *cdrarg);
143
144 static smbnode_t *
145 sn_hashfind(smbmntinfo_t *, const char *, int, avl_index_t *);
146
147 static smbnode_t *
148 make_smbnode(smbmntinfo_t *, const char *, int, int *);
149
150 /*
151 * Free the resources associated with an smbnode.
152 * Note: This is different from smbfs_inactive
153 *
154 * NFS: nfs_subr.c:rinactive
155 */
156 static void
157 sn_inactive(smbnode_t *np)
158 {
159 vsecattr_t ovsa;
160 cred_t *oldcr;
161 char *orpath;
162 int orplen;
163
164 /*
165 * Flush and invalidate all pages (todo)
166 * Free any held credentials and caches...
167 * etc. (See NFS code)
168 */
169 mutex_enter(&np->r_statelock);
170
171 ovsa = np->r_secattr;
172 np->r_secattr = smbfs_vsa0;
173 np->r_sectime = 0;
174
175 oldcr = np->r_cred;
176 np->r_cred = NULL;
177
178 orpath = np->n_rpath;
179 orplen = np->n_rplen;
180 np->n_rpath = NULL;
181 np->n_rplen = 0;
182
183 mutex_exit(&np->r_statelock);
184
185 if (ovsa.vsa_aclentp != NULL)
186 kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz);
187
188 if (oldcr != NULL)
189 crfree(oldcr);
190
191 if (orpath != NULL)
192 kmem_free(orpath, orplen + 1);
193 }
194
195 /*
196 * Find and optionally create an smbnode for the passed
197 * mountinfo, directory, separator, and name. If the
198 * desired smbnode already exists, return a reference.
199 * If the file attributes pointer is non-null, the node
200 * is created if necessary and linked into the AVL tree.
201 *
202 * Callers that need a node created but don't have the
203 * real attributes pass smbfs_fattr0 to force creation.
204 *
205 * Note: make_smbnode() may upgrade the "hash" lock to exclusive.
206 *
207 * NFS: nfs_subr.c:makenfsnode
208 */
209 smbnode_t *
210 smbfs_node_findcreate(
211 smbmntinfo_t *mi,
212 const char *dirnm,
213 int dirlen,
214 const char *name,
215 int nmlen,
216 char sep,
217 struct smbfattr *fap)
218 {
219 char tmpbuf[256];
220 size_t rpalloc;
221 char *p, *rpath;
222 int rplen;
223 smbnode_t *np;
224 vnode_t *vp;
225 int newnode;
226
227 /*
269 * Caller is "just looking" (no create)
270 * so np may or may not be NULL here.
271 * Either way, we're done.
272 */
273 return (np);
274 }
275
276 /*
277 * We should have a node, possibly created.
278 * Do we have (real) attributes to apply?
279 */
280 ASSERT(np != NULL);
281 if (fap == &smbfs_fattr0)
282 return (np);
283
284 /*
285 * Apply the given attributes to this node,
286 * dealing with any cache impact, etc.
287 */
288 vp = SMBTOV(np);
289 if (!newnode) {
290 /*
291 * Found an existing node.
292 * Maybe purge caches...
293 */
294 smbfs_cache_check(vp, fap);
295 }
296 smbfs_attrcache_fa(vp, fap);
297
298 /*
299 * Note NFS sets vp->v_type here, assuming it
300 * can never change for the life of a node.
301 * We allow v_type to change, and set it in
302 * smbfs_attrcache(). Also: mode, uid, gid
303 */
304 return (np);
305 }
306
307 /*
308 * NFS: nfs_subr.c:rtablehash
309 * We use smbfs_hash().
310 */
311
312 /*
313 * Find or create an smbnode.
314 * NFS: nfs_subr.c:make_rnode
315 */
316 static smbnode_t *
317 make_smbnode(
318 smbmntinfo_t *mi,
319 const char *rpath,
320 int rplen,
321 int *newnode)
322 {
323 smbnode_t *np;
324 smbnode_t *tnp;
325 vnode_t *vp;
326 vfs_t *vfsp;
327 avl_index_t where;
328 char *new_rpath = NULL;
329
330 ASSERT(RW_READ_HELD(&mi->smi_hash_lk));
331 vfsp = mi->smi_vfsp;
332
333 start:
334 np = sn_hashfind(mi, rpath, rplen, NULL);
417 bcopy(rpath, new_rpath, rplen);
418 new_rpath[rplen] = '\0';
419
420 /* Initialize smbnode_t */
421 bzero(np, sizeof (*np));
422
423 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
424 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
425 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
426 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
427 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
428
429 np->r_vnode = vp;
430 np->n_mount = mi;
431
432 np->n_fid = SMB_FID_UNUSED;
433 np->n_uid = mi->smi_uid;
434 np->n_gid = mi->smi_gid;
435 /* Leave attributes "stale." */
436
437 #if 0 /* XXX dircache */
438 /*
439 * We don't know if it's a directory yet.
440 * Let the caller do this? XXX
441 */
442 avl_create(&np->r_dir, compar, sizeof (rddir_cache),
443 offsetof(rddir_cache, tree));
444 #endif
445
446 /* Now fill in the vnode. */
447 vn_setops(vp, smbfs_vnodeops);
448 vp->v_data = (caddr_t)np;
449 VFS_HOLD(vfsp);
450 vp->v_vfsp = vfsp;
451 vp->v_type = VNON;
452
453 /*
454 * We entered with mi->smi_hash_lk held (reader).
455 * Retake it now, (as the writer).
456 * Will return with it held.
457 */
458 rw_enter(&mi->smi_hash_lk, RW_WRITER);
459
460 /*
461 * There is a race condition where someone else
462 * may alloc the smbnode while no locks are held,
463 * so check again and recover if found.
464 */
482 * this node into the node cache (AVL tree).
483 */
484 np->n_rpath = new_rpath;
485 np->n_rplen = rplen;
486 np->n_ino = smbfs_gethash(new_rpath, rplen);
487
488 sn_addhash_locked(np, where);
489 *newnode = 1;
490 return (np);
491 }
492
493 /*
494 * smbfs_addfree
495 * Put an smbnode on the free list, or destroy it immediately
496 * if it offers no value were it to be reclaimed later. Also
497 * destroy immediately when we have too many smbnodes, etc.
498 *
499 * Normally called by smbfs_inactive, but also
500 * called in here during cleanup operations.
501 *
502 * NFS: nfs_subr.c:rp_addfree
503 */
504 void
505 smbfs_addfree(smbnode_t *np)
506 {
507 vnode_t *vp;
508 struct vfs *vfsp;
509 smbmntinfo_t *mi;
510
511 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
512
513 vp = SMBTOV(np);
514 ASSERT(vp->v_count >= 1);
515
516 vfsp = vp->v_vfsp;
517 mi = VFTOSMI(vfsp);
518
519 /*
520 * If there are no more references to this smbnode and:
521 * we have too many smbnodes allocated, or if the node
522 * is no longer accessible via the AVL tree (!RHASHED),
610 np->r_freef = np;
611 np->r_freeb = np;
612 smbfreelist = np;
613 } else {
614 np->r_freef = smbfreelist;
615 np->r_freeb = smbfreelist->r_freeb;
616 smbfreelist->r_freeb->r_freef = np;
617 smbfreelist->r_freeb = np;
618 }
619 mutex_exit(&smbfreelist_lock);
620
621 rw_exit(&mi->smi_hash_lk);
622 }
623
624 /*
625 * Remove an smbnode from the free list.
626 *
627 * The caller must be holding smbfreelist_lock and the smbnode
628 * must be on the freelist.
629 *
630 * NFS: nfs_subr.c:rp_rmfree
631 */
632 static void
633 sn_rmfree(smbnode_t *np)
634 {
635
636 ASSERT(MUTEX_HELD(&smbfreelist_lock));
637 ASSERT(np->r_freef != NULL && np->r_freeb != NULL);
638
639 if (np == smbfreelist) {
640 smbfreelist = np->r_freef;
641 if (np == smbfreelist)
642 smbfreelist = NULL;
643 }
644
645 np->r_freeb->r_freef = np->r_freef;
646 np->r_freef->r_freeb = np->r_freeb;
647
648 np->r_freef = np->r_freeb = NULL;
649 }
650
651 /*
652 * Put an smbnode in the "hash" AVL tree.
653 *
654 * The caller must be hold the rwlock as writer.
655 *
656 * NFS: nfs_subr.c:rp_addhash
657 */
658 static void
659 sn_addhash_locked(smbnode_t *np, avl_index_t where)
660 {
661 smbmntinfo_t *mi = np->n_mount;
662
663 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
664
665 mutex_enter(&np->r_statelock);
666 if ((np->r_flags & RHASHED) == 0) {
667 avl_insert(&mi->smi_hash_avl, np, where);
668 np->r_flags |= RHASHED;
669 }
670 mutex_exit(&np->r_statelock);
671 }
672
673 /*
674 * Remove an smbnode from the "hash" AVL tree.
675 *
676 * The caller must hold the rwlock as writer.
677 *
678 * NFS: nfs_subr.c:rp_rmhash_locked
679 */
680 static void
681 sn_rmhash_locked(smbnode_t *np)
682 {
683 smbmntinfo_t *mi = np->n_mount;
684
685 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
686
687 mutex_enter(&np->r_statelock);
688 if ((np->r_flags & RHASHED) != 0) {
689 np->r_flags &= ~RHASHED;
690 avl_remove(&mi->smi_hash_avl, np);
691 }
692 mutex_exit(&np->r_statelock);
693 }
694
695 /*
696 * Remove an smbnode from the "hash" AVL tree.
697 *
698 * The caller must not be holding the rwlock.
699 */
700 void
701 smbfs_rmhash(smbnode_t *np)
702 {
703 smbmntinfo_t *mi = np->n_mount;
704
705 rw_enter(&mi->smi_hash_lk, RW_WRITER);
706 sn_rmhash_locked(np);
707 rw_exit(&mi->smi_hash_lk);
708 }
709
710 /*
711 * Lookup an smbnode by remote pathname
712 *
713 * The caller must be holding the AVL rwlock, either shared or exclusive.
714 *
715 * NFS: nfs_subr.c:rfind
716 */
717 static smbnode_t *
718 sn_hashfind(
719 smbmntinfo_t *mi,
720 const char *rpath,
721 int rplen,
722 avl_index_t *pwhere) /* optional */
723 {
724 smbfs_node_hdr_t nhdr;
725 smbnode_t *np;
726 vnode_t *vp;
727
728 ASSERT(RW_LOCK_HELD(&mi->smi_hash_lk));
729
730 bzero(&nhdr, sizeof (nhdr));
731 nhdr.hdr_n_rpath = (char *)rpath;
732 nhdr.hdr_n_rplen = rplen;
733
734 /* See smbfs_node_cmp below. */
735 np = avl_find(&mi->smi_hash_avl, &nhdr, pwhere);
850
851 rw_exit(&mi->smi_hash_lk);
852 }
853
854 #ifdef SMB_VNODE_DEBUG
855 int smbfs_check_table_debug = 1;
856 #else /* SMB_VNODE_DEBUG */
857 int smbfs_check_table_debug = 0;
858 #endif /* SMB_VNODE_DEBUG */
859
860
861 /*
862 * Return 1 if there is a active vnode belonging to this vfs in the
863 * smbnode cache.
864 *
865 * Several of these checks are done without holding the usual
866 * locks. This is safe because destroy_smbtable(), smbfs_addfree(),
867 * etc. will redo the necessary checks before actually destroying
868 * any smbnodes.
869 *
870 * NFS: nfs_subr.c:check_rtable
871 *
872 * Debugging changes here relative to NFS.
873 * Relatively harmless, so left 'em in.
874 */
875 int
876 smbfs_check_table(struct vfs *vfsp, smbnode_t *rtnp)
877 {
878 smbmntinfo_t *mi;
879 smbnode_t *np;
880 vnode_t *vp;
881 int busycnt = 0;
882
883 mi = VFTOSMI(vfsp);
884 rw_enter(&mi->smi_hash_lk, RW_READER);
885 for (np = avl_first(&mi->smi_hash_avl); np != NULL;
886 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
887
888 if (np == rtnp)
889 continue; /* skip the root */
890 vp = SMBTOV(np);
909 if (np->r_count > 0) {
910 SMBVDEBUG("+r_count: node=0x%p, rpath=%s\n",
911 (void *)np, np->n_rpath);
912 busycnt++;
913 }
914
915 if (busycnt && !smbfs_check_table_debug)
916 break;
917
918 }
919 rw_exit(&mi->smi_hash_lk);
920
921 return (busycnt);
922 }
923
924 /*
925 * Destroy inactive vnodes from the AVL tree which belong to this
926 * vfs. It is essential that we destroy all inactive vnodes during a
927 * forced unmount as well as during a normal unmount.
928 *
929 * NFS: nfs_subr.c:destroy_rtable
930 *
931 * In here, we're normally destrying all or most of the AVL tree,
932 * so the natural choice is to use avl_destroy_nodes. However,
933 * there may be a few busy nodes that should remain in the AVL
934 * tree when we're done. The solution: use a temporary tree to
935 * hold the busy nodes until we're done destroying the old tree,
936 * then copy the temporary tree over the (now emtpy) real tree.
937 */
938 void
939 smbfs_destroy_table(struct vfs *vfsp)
940 {
941 avl_tree_t tmp_avl;
942 smbmntinfo_t *mi;
943 smbnode_t *np;
944 smbnode_t *rlist;
945 void *v;
946
947 mi = VFTOSMI(vfsp);
948 rlist = NULL;
949 smbfs_init_hash_avl(&tmp_avl);
994 */
995 mi->smi_hash_avl = tmp_avl;
996 rw_exit(&mi->smi_hash_lk);
997
998 /*
999 * Now destroy the nodes on our temporary list (rlist).
1000 * This call to smbfs_addfree will end up destroying the
1001 * smbnode, but in a safe way with the appropriate set
1002 * of checks done.
1003 */
1004 while ((np = rlist) != NULL) {
1005 rlist = (smbnode_t *)np->r_avl_node.avl_child[0];
1006 smbfs_addfree(np);
1007 }
1008 }
1009
1010 /*
1011 * This routine destroys all the resources associated with the smbnode
1012 * and then the smbnode itself. Note: sn_inactive has been called.
1013 *
1014 * NFS: nfs_subr.c:destroy_rnode
1015 */
1016 static void
1017 sn_destroy_node(smbnode_t *np)
1018 {
1019 vnode_t *vp;
1020 vfs_t *vfsp;
1021
1022 vp = SMBTOV(np);
1023 vfsp = vp->v_vfsp;
1024
1025 ASSERT(vp->v_count == 1);
1026 ASSERT(np->r_count == 0);
1027 ASSERT(np->r_mapcnt == 0);
1028 ASSERT(np->r_secattr.vsa_aclentp == NULL);
1029 ASSERT(np->r_cred == NULL);
1030 ASSERT(np->n_rpath == NULL);
1031 ASSERT(!(np->r_flags & RHASHED));
1032 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
1033 atomic_dec_ulong((ulong_t *)&smbnodenew);
1034 vn_invalid(vp);
1035 vn_free(vp);
1036 kmem_cache_free(smbnode_cache, np);
1037 VFS_RELE(vfsp);
1038 }
1039
1040 /*
1041 * Flush all vnodes in this (or every) vfs.
1042 * Used by nfs_sync and by nfs_unmount.
1043 */
1044 /*ARGSUSED*/
1045 void
1046 smbfs_rflush(struct vfs *vfsp, cred_t *cr)
1047 {
1048 /* Todo: mmap support. */
1049 }
1050
1051 /* access cache (nfs_subr.c) not used here */
1052
1053 static kmutex_t smbfs_newnum_lock;
1054 static uint32_t smbfs_newnum_val = 0;
1055
1056 /*
1057 * Return a number 0..0xffffffff that's different from the last
1058 * 0xffffffff numbers this returned. Used for unlinked files.
1059 * (This too was copied from nfs_subr.c)
1060 */
1061 uint32_t
1062 smbfs_newnum(void)
1063 {
1064 uint32_t id;
1065
1066 mutex_enter(&smbfs_newnum_lock);
1067 if (smbfs_newnum_val == 0)
1068 smbfs_newnum_val = (uint32_t)gethrestime_sec();
1069 id = smbfs_newnum_val++;
1070 mutex_exit(&smbfs_newnum_lock);
1071 return (id);
1072 }
1073
1074 /*
1075 * Fill in a temporary name at buf
1076 */
1077 int
1078 smbfs_newname(char *buf, size_t buflen)
1079 {
1080 uint_t id;
1081 int n;
1082
1083 id = smbfs_newnum();
1084 n = snprintf(buf, buflen, "~$smbfs%08X", id);
1085 return (n);
1086 }
1087
1088
1089 /*
1090 * initialize resources that are used by smbfs_subr.c
1091 * this is called from the _init() routine (by the way of smbfs_clntinit())
1092 *
1093 * NFS: nfs_subr.c:nfs_subrinit
1094 */
1095 int
1096 smbfs_subrinit(void)
1097 {
1098 ulong_t nsmbnode_max;
1099
1100 /*
1101 * Allocate and initialize the smbnode cache
1102 */
1103 if (nsmbnode <= 0)
1104 nsmbnode = ncsize; /* dnlc.h */
1105 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) /
1106 sizeof (struct smbnode));
1107 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) {
1108 zcmn_err(GLOBAL_ZONEID, CE_NOTE,
1109 "setting nsmbnode to max value of %ld", nsmbnode_max);
1110 nsmbnode = nsmbnode_max;
1111 }
1112
1113 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t),
1117 * Initialize the various mutexes and reader/writer locks
1118 */
1119 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL);
1120 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
1121
1122 /*
1123 * Assign unique major number for all smbfs mounts
1124 */
1125 if ((smbfs_major = getudev()) == -1) {
1126 zcmn_err(GLOBAL_ZONEID, CE_WARN,
1127 "smbfs: init: can't get unique device number");
1128 smbfs_major = 0;
1129 }
1130 smbfs_minor = 0;
1131
1132 return (0);
1133 }
1134
1135 /*
1136 * free smbfs hash table, etc.
1137 * NFS: nfs_subr.c:nfs_subrfini
1138 */
1139 void
1140 smbfs_subrfini(void)
1141 {
1142
1143 /*
1144 * Destroy the smbnode cache
1145 */
1146 kmem_cache_destroy(smbnode_cache);
1147
1148 /*
1149 * Destroy the various mutexes and reader/writer locks
1150 */
1151 mutex_destroy(&smbfreelist_lock);
1152 mutex_destroy(&smbfs_minor_lock);
1153 }
1154
1155 /* rddir_cache ? */
1156
1157 /*
1192 */
1193 smbfs_addfree(np);
1194 mutex_enter(&smbfreelist_lock);
1195 }
1196 mutex_exit(&smbfreelist_lock);
1197 }
1198
1199 /*
1200 * Called by kmem_cache_alloc ask us if we could
1201 * "Please give back some memory!"
1202 *
1203 * Todo: dump nodes from the free list?
1204 */
1205 /*ARGSUSED*/
1206 void
1207 smbfs_kmem_reclaim(void *cdrarg)
1208 {
1209 smbfs_node_reclaim();
1210 }
1211
1212 /* nfs failover stuff */
1213 /* nfs_rw_xxx - see smbfs_rwlock.c */
|
27 */
28 /*
29 * Copyright (c) 2017 by Delphix. All rights reserved.
30 */
31
32 /*
33 * Node hash implementation initially borrowed from NFS (nfs_subr.c)
34 * but then heavily modified. It's no longer an array of hash lists,
35 * but an AVL tree per mount point. More on this below.
36 */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/time.h>
41 #include <sys/vnode.h>
42 #include <sys/bitmap.h>
43 #include <sys/dnlc.h>
44 #include <sys/kmem.h>
45 #include <sys/sunddi.h>
46 #include <sys/sysmacros.h>
47 #include <sys/fcntl.h>
48
49 #include <netsmb/smb_osdep.h>
50
51 #include <netsmb/smb.h>
52 #include <netsmb/smb_conn.h>
53 #include <netsmb/smb_subr.h>
54 #include <netsmb/smb_rq.h>
55
56 #include <smbfs/smbfs.h>
57 #include <smbfs/smbfs_node.h>
58 #include <smbfs/smbfs_subr.h>
59
60 /*
61 * The AVL trees (now per-mount) allow finding an smbfs node by its
62 * full remote path name. It also allows easy traversal of all nodes
63 * below (path wise) any given node. A reader/writer lock for each
64 * (per mount) AVL tree is used to control access and to synchronize
65 * lookups, additions, and deletions from that AVL tree.
66 *
67 * Previously, this code use a global array of hash chains, each with
135 * Local functions.
136 * SN for Smb Node
137 */
138 static void sn_rmfree(smbnode_t *);
139 static void sn_inactive(smbnode_t *);
140 static void sn_addhash_locked(smbnode_t *, avl_index_t);
141 static void sn_rmhash_locked(smbnode_t *);
142 static void sn_destroy_node(smbnode_t *);
143 void smbfs_kmem_reclaim(void *cdrarg);
144
145 static smbnode_t *
146 sn_hashfind(smbmntinfo_t *, const char *, int, avl_index_t *);
147
148 static smbnode_t *
149 make_smbnode(smbmntinfo_t *, const char *, int, int *);
150
151 /*
152 * Free the resources associated with an smbnode.
153 * Note: This is different from smbfs_inactive
154 *
155 * From NFS: nfs_subr.c:rinactive
156 */
157 static void
158 sn_inactive(smbnode_t *np)
159 {
160 vsecattr_t ovsa;
161 cred_t *oldcr;
162 char *orpath;
163 int orplen;
164 vnode_t *vp;
165
166 /*
167 * Here NFS has:
168 * Flush and invalidate all pages (done by caller)
169 * Free any held credentials and caches...
170 * etc. (See NFS code)
171 */
172 mutex_enter(&np->r_statelock);
173
174 ovsa = np->r_secattr;
175 np->r_secattr = smbfs_vsa0;
176 np->r_sectime = 0;
177
178 oldcr = np->r_cred;
179 np->r_cred = NULL;
180
181 orpath = np->n_rpath;
182 orplen = np->n_rplen;
183 np->n_rpath = NULL;
184 np->n_rplen = 0;
185
186 mutex_exit(&np->r_statelock);
187
188 vp = SMBTOV(np);
189 if (vn_has_cached_data(vp)) {
190 ASSERT3P(vp,==,NULL);
191 }
192
193 if (ovsa.vsa_aclentp != NULL)
194 kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz);
195
196 if (oldcr != NULL)
197 crfree(oldcr);
198
199 if (orpath != NULL)
200 kmem_free(orpath, orplen + 1);
201 }
202
203 /*
204 * Find and optionally create an smbnode for the passed
205 * mountinfo, directory, separator, and name. If the
206 * desired smbnode already exists, return a reference.
207 * If the file attributes pointer is non-null, the node
208 * is created if necessary and linked into the AVL tree.
209 *
210 * Callers that need a node created but don't have the
211 * real attributes pass smbfs_fattr0 to force creation.
212 *
213 * Note: make_smbnode() may upgrade the "hash" lock to exclusive.
214 *
215 * Based on NFS: nfs_subr.c:makenfsnode
216 */
217 smbnode_t *
218 smbfs_node_findcreate(
219 smbmntinfo_t *mi,
220 const char *dirnm,
221 int dirlen,
222 const char *name,
223 int nmlen,
224 char sep,
225 struct smbfattr *fap)
226 {
227 char tmpbuf[256];
228 size_t rpalloc;
229 char *p, *rpath;
230 int rplen;
231 smbnode_t *np;
232 vnode_t *vp;
233 int newnode;
234
235 /*
277 * Caller is "just looking" (no create)
278 * so np may or may not be NULL here.
279 * Either way, we're done.
280 */
281 return (np);
282 }
283
284 /*
285 * We should have a node, possibly created.
286 * Do we have (real) attributes to apply?
287 */
288 ASSERT(np != NULL);
289 if (fap == &smbfs_fattr0)
290 return (np);
291
292 /*
293 * Apply the given attributes to this node,
294 * dealing with any cache impact, etc.
295 */
296 vp = SMBTOV(np);
297 smbfs_attrcache_fa(vp, fap);
298
299 /*
300 * Note NFS sets vp->v_type here, assuming it
301 * can never change for the life of a node.
302 * We allow v_type to change, and set it in
303 * smbfs_attrcache(). Also: mode, uid, gid
304 */
305 return (np);
306 }
307
308 /*
309 * Here NFS has: nfs_subr.c:rtablehash
310 * We use smbfs_hash().
311 */
312
313 /*
314 * Find or create an smbnode.
315 * From NFS: nfs_subr.c:make_rnode
316 */
317 static smbnode_t *
318 make_smbnode(
319 smbmntinfo_t *mi,
320 const char *rpath,
321 int rplen,
322 int *newnode)
323 {
324 smbnode_t *np;
325 smbnode_t *tnp;
326 vnode_t *vp;
327 vfs_t *vfsp;
328 avl_index_t where;
329 char *new_rpath = NULL;
330
331 ASSERT(RW_READ_HELD(&mi->smi_hash_lk));
332 vfsp = mi->smi_vfsp;
333
334 start:
335 np = sn_hashfind(mi, rpath, rplen, NULL);
418 bcopy(rpath, new_rpath, rplen);
419 new_rpath[rplen] = '\0';
420
421 /* Initialize smbnode_t */
422 bzero(np, sizeof (*np));
423
424 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
425 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
426 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
427 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
428 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
429
430 np->r_vnode = vp;
431 np->n_mount = mi;
432
433 np->n_fid = SMB_FID_UNUSED;
434 np->n_uid = mi->smi_uid;
435 np->n_gid = mi->smi_gid;
436 /* Leave attributes "stale." */
437
438 /*
439 * Here NFS has avl_create(&np->r_dir, ...)
440 * for the readdir cache (not used here).
441 */
442
443 /* Now fill in the vnode. */
444 vn_setops(vp, smbfs_vnodeops);
445 vp->v_data = (caddr_t)np;
446 VFS_HOLD(vfsp);
447 vp->v_vfsp = vfsp;
448 vp->v_type = VNON;
449
450 /*
451 * We entered with mi->smi_hash_lk held (reader).
452 * Retake it now, (as the writer).
453 * Will return with it held.
454 */
455 rw_enter(&mi->smi_hash_lk, RW_WRITER);
456
457 /*
458 * There is a race condition where someone else
459 * may alloc the smbnode while no locks are held,
460 * so check again and recover if found.
461 */
479 * this node into the node cache (AVL tree).
480 */
481 np->n_rpath = new_rpath;
482 np->n_rplen = rplen;
483 np->n_ino = smbfs_gethash(new_rpath, rplen);
484
485 sn_addhash_locked(np, where);
486 *newnode = 1;
487 return (np);
488 }
489
490 /*
491 * smbfs_addfree
492 * Put an smbnode on the free list, or destroy it immediately
493 * if it offers no value were it to be reclaimed later. Also
494 * destroy immediately when we have too many smbnodes, etc.
495 *
496 * Normally called by smbfs_inactive, but also
497 * called in here during cleanup operations.
498 *
499 * From NFS: nfs_subr.c:rp_addfree
500 */
501 void
502 smbfs_addfree(smbnode_t *np)
503 {
504 vnode_t *vp;
505 struct vfs *vfsp;
506 smbmntinfo_t *mi;
507
508 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
509
510 vp = SMBTOV(np);
511 ASSERT(vp->v_count >= 1);
512
513 vfsp = vp->v_vfsp;
514 mi = VFTOSMI(vfsp);
515
516 /*
517 * If there are no more references to this smbnode and:
518 * we have too many smbnodes allocated, or if the node
519 * is no longer accessible via the AVL tree (!RHASHED),
607 np->r_freef = np;
608 np->r_freeb = np;
609 smbfreelist = np;
610 } else {
611 np->r_freef = smbfreelist;
612 np->r_freeb = smbfreelist->r_freeb;
613 smbfreelist->r_freeb->r_freef = np;
614 smbfreelist->r_freeb = np;
615 }
616 mutex_exit(&smbfreelist_lock);
617
618 rw_exit(&mi->smi_hash_lk);
619 }
620
621 /*
622 * Remove an smbnode from the free list.
623 *
624 * The caller must be holding smbfreelist_lock and the smbnode
625 * must be on the freelist.
626 *
627 * From NFS: nfs_subr.c:rp_rmfree
628 */
629 static void
630 sn_rmfree(smbnode_t *np)
631 {
632
633 ASSERT(MUTEX_HELD(&smbfreelist_lock));
634 ASSERT(np->r_freef != NULL && np->r_freeb != NULL);
635
636 if (np == smbfreelist) {
637 smbfreelist = np->r_freef;
638 if (np == smbfreelist)
639 smbfreelist = NULL;
640 }
641
642 np->r_freeb->r_freef = np->r_freef;
643 np->r_freef->r_freeb = np->r_freeb;
644
645 np->r_freef = np->r_freeb = NULL;
646 }
647
648 /*
649 * Put an smbnode in the "hash" AVL tree.
650 *
651 * The caller must be hold the rwlock as writer.
652 *
653 * From NFS: nfs_subr.c:rp_addhash
654 */
655 static void
656 sn_addhash_locked(smbnode_t *np, avl_index_t where)
657 {
658 smbmntinfo_t *mi = np->n_mount;
659
660 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
661
662 mutex_enter(&np->r_statelock);
663 if ((np->r_flags & RHASHED) == 0) {
664 avl_insert(&mi->smi_hash_avl, np, where);
665 np->r_flags |= RHASHED;
666 }
667 mutex_exit(&np->r_statelock);
668 }
669
670 /*
671 * Remove an smbnode from the "hash" AVL tree.
672 *
673 * The caller must hold the rwlock as writer.
674 *
675 * From NFS: nfs_subr.c:rp_rmhash_locked
676 */
677 static void
678 sn_rmhash_locked(smbnode_t *np)
679 {
680 smbmntinfo_t *mi = np->n_mount;
681
682 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
683
684 mutex_enter(&np->r_statelock);
685 if ((np->r_flags & RHASHED) != 0) {
686 np->r_flags &= ~RHASHED;
687 avl_remove(&mi->smi_hash_avl, np);
688 }
689 mutex_exit(&np->r_statelock);
690 }
691
692 /*
693 * Remove an smbnode from the "hash" AVL tree.
694 *
695 * The caller must not be holding the rwlock.
696 */
697 void
698 smbfs_rmhash(smbnode_t *np)
699 {
700 smbmntinfo_t *mi = np->n_mount;
701
702 rw_enter(&mi->smi_hash_lk, RW_WRITER);
703 sn_rmhash_locked(np);
704 rw_exit(&mi->smi_hash_lk);
705 }
706
707 /*
708 * Lookup an smbnode by remote pathname
709 *
710 * The caller must be holding the AVL rwlock, either shared or exclusive.
711 *
712 * From NFS: nfs_subr.c:rfind
713 */
714 static smbnode_t *
715 sn_hashfind(
716 smbmntinfo_t *mi,
717 const char *rpath,
718 int rplen,
719 avl_index_t *pwhere) /* optional */
720 {
721 smbfs_node_hdr_t nhdr;
722 smbnode_t *np;
723 vnode_t *vp;
724
725 ASSERT(RW_LOCK_HELD(&mi->smi_hash_lk));
726
727 bzero(&nhdr, sizeof (nhdr));
728 nhdr.hdr_n_rpath = (char *)rpath;
729 nhdr.hdr_n_rplen = rplen;
730
731 /* See smbfs_node_cmp below. */
732 np = avl_find(&mi->smi_hash_avl, &nhdr, pwhere);
847
848 rw_exit(&mi->smi_hash_lk);
849 }
850
851 #ifdef SMB_VNODE_DEBUG
852 int smbfs_check_table_debug = 1;
853 #else /* SMB_VNODE_DEBUG */
854 int smbfs_check_table_debug = 0;
855 #endif /* SMB_VNODE_DEBUG */
856
857
858 /*
859 * Return 1 if there is a active vnode belonging to this vfs in the
860 * smbnode cache.
861 *
862 * Several of these checks are done without holding the usual
863 * locks. This is safe because destroy_smbtable(), smbfs_addfree(),
864 * etc. will redo the necessary checks before actually destroying
865 * any smbnodes.
866 *
867 * From NFS: nfs_subr.c:check_rtable
868 *
869 * Debugging changes here relative to NFS.
870 * Relatively harmless, so left 'em in.
871 */
872 int
873 smbfs_check_table(struct vfs *vfsp, smbnode_t *rtnp)
874 {
875 smbmntinfo_t *mi;
876 smbnode_t *np;
877 vnode_t *vp;
878 int busycnt = 0;
879
880 mi = VFTOSMI(vfsp);
881 rw_enter(&mi->smi_hash_lk, RW_READER);
882 for (np = avl_first(&mi->smi_hash_avl); np != NULL;
883 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
884
885 if (np == rtnp)
886 continue; /* skip the root */
887 vp = SMBTOV(np);
906 if (np->r_count > 0) {
907 SMBVDEBUG("+r_count: node=0x%p, rpath=%s\n",
908 (void *)np, np->n_rpath);
909 busycnt++;
910 }
911
912 if (busycnt && !smbfs_check_table_debug)
913 break;
914
915 }
916 rw_exit(&mi->smi_hash_lk);
917
918 return (busycnt);
919 }
920
921 /*
922 * Destroy inactive vnodes from the AVL tree which belong to this
923 * vfs. It is essential that we destroy all inactive vnodes during a
924 * forced unmount as well as during a normal unmount.
925 *
926 * Based on NFS: nfs_subr.c:destroy_rtable
927 *
928 * In here, we're normally destrying all or most of the AVL tree,
929 * so the natural choice is to use avl_destroy_nodes. However,
930 * there may be a few busy nodes that should remain in the AVL
931 * tree when we're done. The solution: use a temporary tree to
932 * hold the busy nodes until we're done destroying the old tree,
933 * then copy the temporary tree over the (now emtpy) real tree.
934 */
935 void
936 smbfs_destroy_table(struct vfs *vfsp)
937 {
938 avl_tree_t tmp_avl;
939 smbmntinfo_t *mi;
940 smbnode_t *np;
941 smbnode_t *rlist;
942 void *v;
943
944 mi = VFTOSMI(vfsp);
945 rlist = NULL;
946 smbfs_init_hash_avl(&tmp_avl);
991 */
992 mi->smi_hash_avl = tmp_avl;
993 rw_exit(&mi->smi_hash_lk);
994
995 /*
996 * Now destroy the nodes on our temporary list (rlist).
997 * This call to smbfs_addfree will end up destroying the
998 * smbnode, but in a safe way with the appropriate set
999 * of checks done.
1000 */
1001 while ((np = rlist) != NULL) {
1002 rlist = (smbnode_t *)np->r_avl_node.avl_child[0];
1003 smbfs_addfree(np);
1004 }
1005 }
1006
1007 /*
1008 * This routine destroys all the resources associated with the smbnode
1009 * and then the smbnode itself. Note: sn_inactive has been called.
1010 *
1011 * From NFS: nfs_subr.c:destroy_rnode
1012 */
1013 static void
1014 sn_destroy_node(smbnode_t *np)
1015 {
1016 vnode_t *vp;
1017 vfs_t *vfsp;
1018
1019 vp = SMBTOV(np);
1020 vfsp = vp->v_vfsp;
1021
1022 ASSERT(vp->v_count == 1);
1023 ASSERT(np->r_count == 0);
1024 ASSERT(np->r_mapcnt == 0);
1025 ASSERT(np->r_secattr.vsa_aclentp == NULL);
1026 ASSERT(np->r_cred == NULL);
1027 ASSERT(np->n_rpath == NULL);
1028 ASSERT(!(np->r_flags & RHASHED));
1029 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
1030 atomic_dec_ulong((ulong_t *)&smbnodenew);
1031 vn_invalid(vp);
1032 vn_free(vp);
1033 kmem_cache_free(smbnode_cache, np);
1034 VFS_RELE(vfsp);
1035 }
1036
1037 /*
1038 * From NFS rflush()
1039 * Flush all vnodes in this (or every) vfs.
1040 * Used by smbfs_sync and by smbfs_unmount.
1041 */
1042 /*ARGSUSED*/
1043 void
1044 smbfs_rflush(struct vfs *vfsp, cred_t *cr)
1045 {
1046 smbmntinfo_t *mi;
1047 smbnode_t *np;
1048 vnode_t *vp, **vplist;
1049 long num, cnt;
1050
1051 mi = VFTOSMI(vfsp);
1052
1053 /*
1054 * Check to see whether there is anything to do.
1055 */
1056 num = avl_numnodes(&mi->smi_hash_avl);
1057 if (num == 0)
1058 return;
1059
1060 /*
1061 * Allocate a slot for all currently active rnodes on the
1062 * supposition that they all may need flushing.
1063 */
1064 vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP);
1065 cnt = 0;
1066
1067 /*
1068 * Walk the AVL tree looking for rnodes with page
1069 * lists associated with them. Make a list of these
1070 * files.
1071 */
1072 rw_enter(&mi->smi_hash_lk, RW_READER);
1073 for (np = avl_first(&mi->smi_hash_avl); np != NULL;
1074 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
1075 vp = SMBTOV(np);
1076 /*
1077 * Don't bother sync'ing a vp if it
1078 * is part of virtual swap device or
1079 * if VFS is read-only
1080 */
1081 if (IS_SWAPVP(vp) || vn_is_readonly(vp))
1082 continue;
1083 /*
1084 * If the vnode has pages and is marked as either
1085 * dirty or mmap'd, hold and add this vnode to the
1086 * list of vnodes to flush.
1087 */
1088 if (vn_has_cached_data(vp) &&
1089 ((np->r_flags & RDIRTY) || np->r_mapcnt > 0)) {
1090 VN_HOLD(vp);
1091 vplist[cnt++] = vp;
1092 if (cnt == num)
1093 break;
1094 }
1095 }
1096 rw_exit(&mi->smi_hash_lk);
1097
1098 /*
1099 * Flush and release all of the files on the list.
1100 */
1101 while (cnt-- > 0) {
1102 vp = vplist[cnt];
1103 (void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL);
1104 VN_RELE(vp);
1105 }
1106
1107 kmem_free(vplist, num * sizeof (vnode_t *));
1108 }
1109
1110 /* Here NFS has access cache stuff (nfs_subr.c) not used here */
1111
1112 /*
1113 * Set or Clear direct I/O flag
1114 * VOP_RWLOCK() is held for write access to prevent a race condition
1115 * which would occur if a process is in the middle of a write when
1116 * directio flag gets set. It is possible that all pages may not get flushed.
1117 * From nfs_common.c
1118 */
1119
1120 /* ARGSUSED */
1121 int
1122 smbfs_directio(vnode_t *vp, int cmd, cred_t *cr)
1123 {
1124 int error = 0;
1125 smbnode_t *np;
1126
1127 np = VTOSMB(vp);
1128
1129 if (cmd == DIRECTIO_ON) {
1130
1131 if (np->r_flags & RDIRECTIO)
1132 return (0);
1133
1134 /*
1135 * Flush the page cache.
1136 */
1137
1138 (void) VOP_RWLOCK(vp, V_WRITELOCK_TRUE, NULL);
1139
1140 if (np->r_flags & RDIRECTIO) {
1141 VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
1142 return (0);
1143 }
1144
1145 /* Here NFS also checks ->r_awcount */
1146 if (vn_has_cached_data(vp) &&
1147 (np->r_flags & RDIRTY) != 0) {
1148 error = VOP_PUTPAGE(vp, (offset_t)0, (uint_t)0,
1149 B_INVAL, cr, NULL);
1150 if (error) {
1151 if (error == ENOSPC || error == EDQUOT) {
1152 mutex_enter(&np->r_statelock);
1153 if (!np->r_error)
1154 np->r_error = error;
1155 mutex_exit(&np->r_statelock);
1156 }
1157 VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
1158 return (error);
1159 }
1160 }
1161
1162 mutex_enter(&np->r_statelock);
1163 np->r_flags |= RDIRECTIO;
1164 mutex_exit(&np->r_statelock);
1165 VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
1166 return (0);
1167 }
1168
1169 if (cmd == DIRECTIO_OFF) {
1170 mutex_enter(&np->r_statelock);
1171 np->r_flags &= ~RDIRECTIO; /* disable direct mode */
1172 mutex_exit(&np->r_statelock);
1173 return (0);
1174 }
1175
1176 return (EINVAL);
1177 }
1178
1179 static kmutex_t smbfs_newnum_lock;
1180 static uint32_t smbfs_newnum_val = 0;
1181
1182 /*
1183 * Return a number 0..0xffffffff that's different from the last
1184 * 0xffffffff numbers this returned. Used for unlinked files.
1185 * From NFS nfs_subr.c newnum
1186 */
1187 uint32_t
1188 smbfs_newnum(void)
1189 {
1190 uint32_t id;
1191
1192 mutex_enter(&smbfs_newnum_lock);
1193 if (smbfs_newnum_val == 0)
1194 smbfs_newnum_val = (uint32_t)gethrestime_sec();
1195 id = smbfs_newnum_val++;
1196 mutex_exit(&smbfs_newnum_lock);
1197 return (id);
1198 }
1199
1200 /*
1201 * Fill in a temporary name at buf
1202 */
1203 int
1204 smbfs_newname(char *buf, size_t buflen)
1205 {
1206 uint_t id;
1207 int n;
1208
1209 id = smbfs_newnum();
1210 n = snprintf(buf, buflen, "~$smbfs%08X", id);
1211 return (n);
1212 }
1213
1214
1215 /*
1216 * initialize resources that are used by smbfs_subr.c
1217 * this is called from the _init() routine (by the way of smbfs_clntinit())
1218 *
1219 * From NFS: nfs_subr.c:nfs_subrinit
1220 */
1221 int
1222 smbfs_subrinit(void)
1223 {
1224 ulong_t nsmbnode_max;
1225
1226 /*
1227 * Allocate and initialize the smbnode cache
1228 */
1229 if (nsmbnode <= 0)
1230 nsmbnode = ncsize; /* dnlc.h */
1231 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) /
1232 sizeof (struct smbnode));
1233 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) {
1234 zcmn_err(GLOBAL_ZONEID, CE_NOTE,
1235 "setting nsmbnode to max value of %ld", nsmbnode_max);
1236 nsmbnode = nsmbnode_max;
1237 }
1238
1239 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t),
1243 * Initialize the various mutexes and reader/writer locks
1244 */
1245 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL);
1246 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
1247
1248 /*
1249 * Assign unique major number for all smbfs mounts
1250 */
1251 if ((smbfs_major = getudev()) == -1) {
1252 zcmn_err(GLOBAL_ZONEID, CE_WARN,
1253 "smbfs: init: can't get unique device number");
1254 smbfs_major = 0;
1255 }
1256 smbfs_minor = 0;
1257
1258 return (0);
1259 }
1260
1261 /*
1262 * free smbfs hash table, etc.
1263 * From NFS: nfs_subr.c:nfs_subrfini
1264 */
1265 void
1266 smbfs_subrfini(void)
1267 {
1268
1269 /*
1270 * Destroy the smbnode cache
1271 */
1272 kmem_cache_destroy(smbnode_cache);
1273
1274 /*
1275 * Destroy the various mutexes and reader/writer locks
1276 */
1277 mutex_destroy(&smbfreelist_lock);
1278 mutex_destroy(&smbfs_minor_lock);
1279 }
1280
1281 /* rddir_cache ? */
1282
1283 /*
1318 */
1319 smbfs_addfree(np);
1320 mutex_enter(&smbfreelist_lock);
1321 }
1322 mutex_exit(&smbfreelist_lock);
1323 }
1324
1325 /*
1326 * Called by kmem_cache_alloc ask us if we could
1327 * "Please give back some memory!"
1328 *
1329 * Todo: dump nodes from the free list?
1330 */
1331 /*ARGSUSED*/
1332 void
1333 smbfs_kmem_reclaim(void *cdrarg)
1334 {
1335 smbfs_node_reclaim();
1336 }
1337
1338 /*
1339 * Here NFS has failover stuff and
1340 * nfs_rw_xxx - see smbfs_rwlock.c
1341 */
|