Print this page
cstyle
Implement ioctl _FIODIRECTIO
Lots of comment cleanup
5404 smbfs needs mmap support
Portions contributed by: Gordon Ross <gordon.w.ross@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
+++ new/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 *
25 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
26 26 * All rights reserved.
27 27 */
28 28 /*
29 29 * Copyright (c) 2017 by Delphix. All rights reserved.
30 30 */
31 31
32 32 /*
33 33 * Node hash implementation initially borrowed from NFS (nfs_subr.c)
34 34 * but then heavily modified. It's no longer an array of hash lists,
35 35 * but an AVL tree per mount point. More on this below.
36 36 */
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
37 37
38 38 #include <sys/param.h>
39 39 #include <sys/systm.h>
40 40 #include <sys/time.h>
41 41 #include <sys/vnode.h>
42 42 #include <sys/bitmap.h>
43 43 #include <sys/dnlc.h>
44 44 #include <sys/kmem.h>
45 45 #include <sys/sunddi.h>
46 46 #include <sys/sysmacros.h>
47 +#include <sys/fcntl.h>
47 48
48 49 #include <netsmb/smb_osdep.h>
49 50
50 51 #include <netsmb/smb.h>
51 52 #include <netsmb/smb_conn.h>
52 53 #include <netsmb/smb_subr.h>
53 54 #include <netsmb/smb_rq.h>
54 55
55 56 #include <smbfs/smbfs.h>
56 57 #include <smbfs/smbfs_node.h>
57 58 #include <smbfs/smbfs_subr.h>
58 59
59 60 /*
60 61 * The AVL trees (now per-mount) allow finding an smbfs node by its
61 62 * full remote path name. It also allows easy traversal of all nodes
62 63 * below (path wise) any given node. A reader/writer lock for each
63 64 * (per mount) AVL tree is used to control access and to synchronize
64 65 * lookups, additions, and deletions from that AVL tree.
65 66 *
66 67 * Previously, this code use a global array of hash chains, each with
67 68 * its own rwlock. A few struct members, functions, and comments may
68 69 * still refer to a "hash", and those should all now be considered to
69 70 * refer to the per-mount AVL tree that replaced the old hash chains.
70 71 * (i.e. member smi_hash_lk, function sn_hashfind, etc.)
71 72 *
72 73 * The smbnode freelist is organized as a doubly linked list with
73 74 * a head pointer. Additions and deletions are synchronized via
74 75 * a single mutex.
75 76 *
76 77 * In order to add an smbnode to the free list, it must be linked into
77 78 * the mount's AVL tree and the exclusive lock for the AVL must be held.
78 79 * If an smbnode is not linked into the AVL tree, then it is destroyed
79 80 * because it represents no valuable information that can be reused
80 81 * about the file. The exclusive lock for the AVL tree must be held
81 82 * in order to prevent a lookup in the AVL tree from finding the
82 83 * smbnode and using it and assuming that the smbnode is not on the
83 84 * freelist. The lookup in the AVL tree will have the AVL tree lock
84 85 * held, either exclusive or shared.
85 86 *
86 87 * The vnode reference count for each smbnode is not allowed to drop
87 88 * below 1. This prevents external entities, such as the VM
88 89 * subsystem, from acquiring references to vnodes already on the
89 90 * freelist and then trying to place them back on the freelist
90 91 * when their reference is released. This means that the when an
91 92 * smbnode is looked up in the AVL tree, then either the smbnode
92 93 * is removed from the freelist and that reference is tranfered to
93 94 * the new reference or the vnode reference count must be incremented
94 95 * accordingly. The mutex for the freelist must be held in order to
95 96 * accurately test to see if the smbnode is on the freelist or not.
96 97 * The AVL tree lock might be held shared and it is possible that
97 98 * two different threads may race to remove the smbnode from the
98 99 * freelist. This race can be resolved by holding the mutex for the
99 100 * freelist. Please note that the mutex for the freelist does not
100 101 * need to held if the smbnode is not on the freelist. It can not be
101 102 * placed on the freelist due to the requirement that the thread
102 103 * putting the smbnode on the freelist must hold the exclusive lock
103 104 * for the AVL tree and the thread doing the lookup in the AVL tree
104 105 * is holding either a shared or exclusive lock for the AVL tree.
105 106 *
106 107 * The lock ordering is:
107 108 *
108 109 * AVL tree lock -> vnode lock
109 110 * AVL tree lock -> freelist lock
110 111 */
111 112
112 113 static kmutex_t smbfreelist_lock;
113 114 static smbnode_t *smbfreelist = NULL;
114 115 static ulong_t smbnodenew = 0;
115 116 long nsmbnode = 0;
116 117
117 118 static struct kmem_cache *smbnode_cache;
118 119
119 120 static const vsecattr_t smbfs_vsa0 = { 0 };
120 121
121 122 /*
122 123 * Mutex to protect the following variables:
123 124 * smbfs_major
124 125 * smbfs_minor
125 126 */
126 127 kmutex_t smbfs_minor_lock;
127 128 int smbfs_major;
128 129 int smbfs_minor;
129 130
130 131 /* See smbfs_node_findcreate() */
131 132 struct smbfattr smbfs_fattr0;
132 133
133 134 /*
134 135 * Local functions.
135 136 * SN for Smb Node
136 137 */
137 138 static void sn_rmfree(smbnode_t *);
138 139 static void sn_inactive(smbnode_t *);
139 140 static void sn_addhash_locked(smbnode_t *, avl_index_t);
140 141 static void sn_rmhash_locked(smbnode_t *);
141 142 static void sn_destroy_node(smbnode_t *);
142 143 void smbfs_kmem_reclaim(void *cdrarg);
143 144
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
144 145 static smbnode_t *
145 146 sn_hashfind(smbmntinfo_t *, const char *, int, avl_index_t *);
146 147
147 148 static smbnode_t *
148 149 make_smbnode(smbmntinfo_t *, const char *, int, int *);
149 150
150 151 /*
151 152 * Free the resources associated with an smbnode.
152 153 * Note: This is different from smbfs_inactive
153 154 *
154 - * NFS: nfs_subr.c:rinactive
155 + * From NFS: nfs_subr.c:rinactive
155 156 */
156 157 static void
157 158 sn_inactive(smbnode_t *np)
158 159 {
159 160 vsecattr_t ovsa;
160 161 cred_t *oldcr;
161 162 char *orpath;
162 163 int orplen;
164 + vnode_t *vp;
163 165
164 166 /*
165 - * Flush and invalidate all pages (todo)
167 + * Here NFS has:
168 + * Flush and invalidate all pages (done by caller)
166 169 * Free any held credentials and caches...
167 170 * etc. (See NFS code)
168 171 */
169 172 mutex_enter(&np->r_statelock);
170 173
171 174 ovsa = np->r_secattr;
172 175 np->r_secattr = smbfs_vsa0;
173 176 np->r_sectime = 0;
174 177
175 178 oldcr = np->r_cred;
176 179 np->r_cred = NULL;
177 180
178 181 orpath = np->n_rpath;
179 182 orplen = np->n_rplen;
180 183 np->n_rpath = NULL;
181 184 np->n_rplen = 0;
182 185
183 186 mutex_exit(&np->r_statelock);
184 187
188 + vp = SMBTOV(np);
189 + if (vn_has_cached_data(vp)) {
190 + ASSERT3P(vp,==,NULL);
191 + }
192 +
185 193 if (ovsa.vsa_aclentp != NULL)
186 194 kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz);
187 195
188 196 if (oldcr != NULL)
189 197 crfree(oldcr);
190 198
191 199 if (orpath != NULL)
192 200 kmem_free(orpath, orplen + 1);
193 201 }
194 202
195 203 /*
196 204 * Find and optionally create an smbnode for the passed
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
197 205 * mountinfo, directory, separator, and name. If the
198 206 * desired smbnode already exists, return a reference.
199 207 * If the file attributes pointer is non-null, the node
200 208 * is created if necessary and linked into the AVL tree.
201 209 *
202 210 * Callers that need a node created but don't have the
203 211 * real attributes pass smbfs_fattr0 to force creation.
204 212 *
205 213 * Note: make_smbnode() may upgrade the "hash" lock to exclusive.
206 214 *
207 - * NFS: nfs_subr.c:makenfsnode
215 + * Based on NFS: nfs_subr.c:makenfsnode
208 216 */
209 217 smbnode_t *
210 218 smbfs_node_findcreate(
211 219 smbmntinfo_t *mi,
212 220 const char *dirnm,
213 221 int dirlen,
214 222 const char *name,
215 223 int nmlen,
216 224 char sep,
217 225 struct smbfattr *fap)
218 226 {
219 227 char tmpbuf[256];
220 228 size_t rpalloc;
221 229 char *p, *rpath;
222 230 int rplen;
223 231 smbnode_t *np;
224 232 vnode_t *vp;
225 233 int newnode;
226 234
227 235 /*
228 236 * Build the search string, either in tmpbuf or
229 237 * in allocated memory if larger than tmpbuf.
230 238 */
231 239 rplen = dirlen;
232 240 if (sep != '\0')
233 241 rplen++;
234 242 rplen += nmlen;
235 243 if (rplen < sizeof (tmpbuf)) {
236 244 /* use tmpbuf */
237 245 rpalloc = 0;
238 246 rpath = tmpbuf;
239 247 } else {
240 248 rpalloc = rplen + 1;
241 249 rpath = kmem_alloc(rpalloc, KM_SLEEP);
242 250 }
243 251 p = rpath;
244 252 bcopy(dirnm, p, dirlen);
245 253 p += dirlen;
246 254 if (sep != '\0')
247 255 *p++ = sep;
248 256 if (name != NULL) {
249 257 bcopy(name, p, nmlen);
250 258 p += nmlen;
251 259 }
252 260 ASSERT(p == rpath + rplen);
253 261
254 262 /*
255 263 * Find or create a node with this path.
256 264 */
257 265 rw_enter(&mi->smi_hash_lk, RW_READER);
258 266 if (fap == NULL)
259 267 np = sn_hashfind(mi, rpath, rplen, NULL);
260 268 else
261 269 np = make_smbnode(mi, rpath, rplen, &newnode);
262 270 rw_exit(&mi->smi_hash_lk);
263 271
264 272 if (rpalloc)
265 273 kmem_free(rpath, rpalloc);
266 274
267 275 if (fap == NULL) {
268 276 /*
269 277 * Caller is "just looking" (no create)
270 278 * so np may or may not be NULL here.
271 279 * Either way, we're done.
272 280 */
273 281 return (np);
274 282 }
275 283
276 284 /*
277 285 * We should have a node, possibly created.
278 286 * Do we have (real) attributes to apply?
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
279 287 */
280 288 ASSERT(np != NULL);
281 289 if (fap == &smbfs_fattr0)
282 290 return (np);
283 291
284 292 /*
285 293 * Apply the given attributes to this node,
286 294 * dealing with any cache impact, etc.
287 295 */
288 296 vp = SMBTOV(np);
289 - if (!newnode) {
290 - /*
291 - * Found an existing node.
292 - * Maybe purge caches...
293 - */
294 - smbfs_cache_check(vp, fap);
295 - }
296 297 smbfs_attrcache_fa(vp, fap);
297 298
298 299 /*
299 300 * Note NFS sets vp->v_type here, assuming it
300 301 * can never change for the life of a node.
301 302 * We allow v_type to change, and set it in
302 303 * smbfs_attrcache(). Also: mode, uid, gid
303 304 */
304 305 return (np);
305 306 }
306 307
307 308 /*
308 - * NFS: nfs_subr.c:rtablehash
309 + * Here NFS has: nfs_subr.c:rtablehash
309 310 * We use smbfs_hash().
310 311 */
311 312
312 313 /*
313 314 * Find or create an smbnode.
314 - * NFS: nfs_subr.c:make_rnode
315 + * From NFS: nfs_subr.c:make_rnode
315 316 */
316 317 static smbnode_t *
317 318 make_smbnode(
318 319 smbmntinfo_t *mi,
319 320 const char *rpath,
320 321 int rplen,
321 322 int *newnode)
322 323 {
323 324 smbnode_t *np;
324 325 smbnode_t *tnp;
325 326 vnode_t *vp;
326 327 vfs_t *vfsp;
327 328 avl_index_t where;
328 329 char *new_rpath = NULL;
329 330
330 331 ASSERT(RW_READ_HELD(&mi->smi_hash_lk));
331 332 vfsp = mi->smi_vfsp;
332 333
333 334 start:
334 335 np = sn_hashfind(mi, rpath, rplen, NULL);
335 336 if (np != NULL) {
336 337 *newnode = 0;
337 338 return (np);
338 339 }
339 340
340 341 /* Note: will retake this lock below. */
341 342 rw_exit(&mi->smi_hash_lk);
342 343
343 344 /*
344 345 * see if we can find something on the freelist
345 346 */
346 347 mutex_enter(&smbfreelist_lock);
347 348 if (smbfreelist != NULL && smbnodenew >= nsmbnode) {
348 349 np = smbfreelist;
349 350 sn_rmfree(np);
350 351 mutex_exit(&smbfreelist_lock);
351 352
352 353 vp = SMBTOV(np);
353 354
354 355 if (np->r_flags & RHASHED) {
355 356 smbmntinfo_t *tmp_mi = np->n_mount;
356 357 ASSERT(tmp_mi != NULL);
357 358 rw_enter(&tmp_mi->smi_hash_lk, RW_WRITER);
358 359 mutex_enter(&vp->v_lock);
359 360 if (vp->v_count > 1) {
360 361 VN_RELE_LOCKED(vp);
361 362 mutex_exit(&vp->v_lock);
362 363 rw_exit(&tmp_mi->smi_hash_lk);
363 364 /* start over */
364 365 rw_enter(&mi->smi_hash_lk, RW_READER);
365 366 goto start;
366 367 }
367 368 mutex_exit(&vp->v_lock);
368 369 sn_rmhash_locked(np);
369 370 rw_exit(&tmp_mi->smi_hash_lk);
370 371 }
371 372
372 373 sn_inactive(np);
373 374
374 375 mutex_enter(&vp->v_lock);
375 376 if (vp->v_count > 1) {
376 377 VN_RELE_LOCKED(vp);
377 378 mutex_exit(&vp->v_lock);
378 379 rw_enter(&mi->smi_hash_lk, RW_READER);
379 380 goto start;
380 381 }
381 382 mutex_exit(&vp->v_lock);
382 383 vn_invalid(vp);
383 384 /*
384 385 * destroy old locks before bzero'ing and
385 386 * recreating the locks below.
386 387 */
387 388 smbfs_rw_destroy(&np->r_rwlock);
388 389 smbfs_rw_destroy(&np->r_lkserlock);
389 390 mutex_destroy(&np->r_statelock);
390 391 cv_destroy(&np->r_cv);
391 392 /*
392 393 * Make sure that if smbnode is recycled then
393 394 * VFS count is decremented properly before
394 395 * reuse.
395 396 */
396 397 VFS_RELE(vp->v_vfsp);
397 398 vn_reinit(vp);
398 399 } else {
399 400 /*
400 401 * allocate and initialize a new smbnode
401 402 */
402 403 vnode_t *new_vp;
403 404
404 405 mutex_exit(&smbfreelist_lock);
405 406
406 407 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
407 408 new_vp = vn_alloc(KM_SLEEP);
408 409
409 410 atomic_inc_ulong((ulong_t *)&smbnodenew);
410 411 vp = new_vp;
411 412 }
412 413
413 414 /*
414 415 * Allocate and copy the rpath we'll need below.
415 416 */
416 417 new_rpath = kmem_alloc(rplen + 1, KM_SLEEP);
417 418 bcopy(rpath, new_rpath, rplen);
418 419 new_rpath[rplen] = '\0';
419 420
420 421 /* Initialize smbnode_t */
421 422 bzero(np, sizeof (*np));
422 423
423 424 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
424 425 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
425 426 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
426 427 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
427 428 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
428 429
429 430 np->r_vnode = vp;
430 431 np->n_mount = mi;
431 432
432 433 np->n_fid = SMB_FID_UNUSED;
433 434 np->n_uid = mi->smi_uid;
434 435 np->n_gid = mi->smi_gid;
435 436 /* Leave attributes "stale." */
436 437
437 -#if 0 /* XXX dircache */
438 438 /*
439 - * We don't know if it's a directory yet.
440 - * Let the caller do this? XXX
439 + * Here NFS has avl_create(&np->r_dir, ...)
440 + * for the readdir cache (not used here).
441 441 */
442 - avl_create(&np->r_dir, compar, sizeof (rddir_cache),
443 - offsetof(rddir_cache, tree));
444 -#endif
445 442
446 443 /* Now fill in the vnode. */
447 444 vn_setops(vp, smbfs_vnodeops);
448 445 vp->v_data = (caddr_t)np;
449 446 VFS_HOLD(vfsp);
450 447 vp->v_vfsp = vfsp;
451 448 vp->v_type = VNON;
452 449
453 450 /*
454 451 * We entered with mi->smi_hash_lk held (reader).
455 452 * Retake it now, (as the writer).
456 453 * Will return with it held.
457 454 */
458 455 rw_enter(&mi->smi_hash_lk, RW_WRITER);
459 456
460 457 /*
461 458 * There is a race condition where someone else
462 459 * may alloc the smbnode while no locks are held,
463 460 * so check again and recover if found.
464 461 */
465 462 tnp = sn_hashfind(mi, rpath, rplen, &where);
466 463 if (tnp != NULL) {
467 464 /*
468 465 * Lost the race. Put the node we were building
469 466 * on the free list and return the one we found.
470 467 */
471 468 rw_exit(&mi->smi_hash_lk);
472 469 kmem_free(new_rpath, rplen + 1);
473 470 smbfs_addfree(np);
474 471 rw_enter(&mi->smi_hash_lk, RW_READER);
475 472 *newnode = 0;
476 473 return (tnp);
477 474 }
478 475
479 476 /*
480 477 * Hash search identifies nodes by the remote path
481 478 * (n_rpath) so fill that in now, before linking
482 479 * this node into the node cache (AVL tree).
483 480 */
484 481 np->n_rpath = new_rpath;
485 482 np->n_rplen = rplen;
486 483 np->n_ino = smbfs_gethash(new_rpath, rplen);
487 484
488 485 sn_addhash_locked(np, where);
489 486 *newnode = 1;
490 487 return (np);
491 488 }
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
492 489
493 490 /*
494 491 * smbfs_addfree
495 492 * Put an smbnode on the free list, or destroy it immediately
496 493 * if it offers no value were it to be reclaimed later. Also
497 494 * destroy immediately when we have too many smbnodes, etc.
498 495 *
499 496 * Normally called by smbfs_inactive, but also
500 497 * called in here during cleanup operations.
501 498 *
502 - * NFS: nfs_subr.c:rp_addfree
499 + * From NFS: nfs_subr.c:rp_addfree
503 500 */
504 501 void
505 502 smbfs_addfree(smbnode_t *np)
506 503 {
507 504 vnode_t *vp;
508 505 struct vfs *vfsp;
509 506 smbmntinfo_t *mi;
510 507
511 508 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
512 509
513 510 vp = SMBTOV(np);
514 511 ASSERT(vp->v_count >= 1);
515 512
516 513 vfsp = vp->v_vfsp;
517 514 mi = VFTOSMI(vfsp);
518 515
519 516 /*
520 517 * If there are no more references to this smbnode and:
521 518 * we have too many smbnodes allocated, or if the node
522 519 * is no longer accessible via the AVL tree (!RHASHED),
523 520 * or an i/o error occurred while writing to the file,
524 521 * or it's part of an unmounted FS, then try to destroy
525 522 * it instead of putting it on the smbnode freelist.
526 523 */
527 524 if (np->r_count == 0 && (
528 525 (np->r_flags & RHASHED) == 0 ||
529 526 (np->r_error != 0) ||
530 527 (vfsp->vfs_flag & VFS_UNMOUNTED) ||
531 528 (smbnodenew > nsmbnode))) {
532 529
533 530 /* Try to destroy this node. */
534 531
535 532 if (np->r_flags & RHASHED) {
536 533 rw_enter(&mi->smi_hash_lk, RW_WRITER);
537 534 mutex_enter(&vp->v_lock);
538 535 if (vp->v_count > 1) {
539 536 VN_RELE_LOCKED(vp);
540 537 mutex_exit(&vp->v_lock);
541 538 rw_exit(&mi->smi_hash_lk);
542 539 return;
543 540 /*
544 541 * Will get another call later,
545 542 * via smbfs_inactive.
546 543 */
547 544 }
548 545 mutex_exit(&vp->v_lock);
549 546 sn_rmhash_locked(np);
550 547 rw_exit(&mi->smi_hash_lk);
551 548 }
552 549
553 550 sn_inactive(np);
554 551
555 552 /*
556 553 * Recheck the vnode reference count. We need to
557 554 * make sure that another reference has not been
558 555 * acquired while we were not holding v_lock. The
559 556 * smbnode is not in the smbnode "hash" AVL tree, so
560 557 * the only way for a reference to have been acquired
561 558 * is for a VOP_PUTPAGE because the smbnode was marked
562 559 * with RDIRTY or for a modified page. This vnode
563 560 * reference may have been acquired before our call
564 561 * to sn_inactive. The i/o may have been completed,
565 562 * thus allowing sn_inactive to complete, but the
566 563 * reference to the vnode may not have been released
567 564 * yet. In any case, the smbnode can not be destroyed
568 565 * until the other references to this vnode have been
569 566 * released. The other references will take care of
570 567 * either destroying the smbnode or placing it on the
571 568 * smbnode freelist. If there are no other references,
572 569 * then the smbnode may be safely destroyed.
573 570 */
574 571 mutex_enter(&vp->v_lock);
575 572 if (vp->v_count > 1) {
576 573 VN_RELE_LOCKED(vp);
577 574 mutex_exit(&vp->v_lock);
578 575 return;
579 576 }
580 577 mutex_exit(&vp->v_lock);
581 578
582 579 sn_destroy_node(np);
583 580 return;
584 581 }
585 582
586 583 /*
587 584 * Lock the AVL tree and then recheck the reference count
588 585 * to ensure that no other threads have acquired a reference
589 586 * to indicate that the smbnode should not be placed on the
590 587 * freelist. If another reference has been acquired, then
591 588 * just release this one and let the other thread complete
592 589 * the processing of adding this smbnode to the freelist.
593 590 */
594 591 rw_enter(&mi->smi_hash_lk, RW_WRITER);
595 592
596 593 mutex_enter(&vp->v_lock);
597 594 if (vp->v_count > 1) {
598 595 VN_RELE_LOCKED(vp);
599 596 mutex_exit(&vp->v_lock);
600 597 rw_exit(&mi->smi_hash_lk);
601 598 return;
602 599 }
603 600 mutex_exit(&vp->v_lock);
604 601
605 602 /*
606 603 * Put this node on the free list.
607 604 */
608 605 mutex_enter(&smbfreelist_lock);
609 606 if (smbfreelist == NULL) {
610 607 np->r_freef = np;
611 608 np->r_freeb = np;
612 609 smbfreelist = np;
613 610 } else {
614 611 np->r_freef = smbfreelist;
615 612 np->r_freeb = smbfreelist->r_freeb;
616 613 smbfreelist->r_freeb->r_freef = np;
617 614 smbfreelist->r_freeb = np;
618 615 }
619 616 mutex_exit(&smbfreelist_lock);
↓ open down ↓ |
107 lines elided |
↑ open up ↑ |
620 617
621 618 rw_exit(&mi->smi_hash_lk);
622 619 }
623 620
624 621 /*
625 622 * Remove an smbnode from the free list.
626 623 *
627 624 * The caller must be holding smbfreelist_lock and the smbnode
628 625 * must be on the freelist.
629 626 *
630 - * NFS: nfs_subr.c:rp_rmfree
627 + * From NFS: nfs_subr.c:rp_rmfree
631 628 */
632 629 static void
633 630 sn_rmfree(smbnode_t *np)
634 631 {
635 632
636 633 ASSERT(MUTEX_HELD(&smbfreelist_lock));
637 634 ASSERT(np->r_freef != NULL && np->r_freeb != NULL);
638 635
639 636 if (np == smbfreelist) {
640 637 smbfreelist = np->r_freef;
641 638 if (np == smbfreelist)
642 639 smbfreelist = NULL;
643 640 }
644 641
645 642 np->r_freeb->r_freef = np->r_freef;
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
646 643 np->r_freef->r_freeb = np->r_freeb;
647 644
648 645 np->r_freef = np->r_freeb = NULL;
649 646 }
650 647
651 648 /*
652 649 * Put an smbnode in the "hash" AVL tree.
653 650 *
654 651 * The caller must be hold the rwlock as writer.
655 652 *
656 - * NFS: nfs_subr.c:rp_addhash
653 + * From NFS: nfs_subr.c:rp_addhash
657 654 */
658 655 static void
659 656 sn_addhash_locked(smbnode_t *np, avl_index_t where)
660 657 {
661 658 smbmntinfo_t *mi = np->n_mount;
662 659
663 660 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
664 661
665 662 mutex_enter(&np->r_statelock);
666 663 if ((np->r_flags & RHASHED) == 0) {
667 664 avl_insert(&mi->smi_hash_avl, np, where);
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
668 665 np->r_flags |= RHASHED;
669 666 }
670 667 mutex_exit(&np->r_statelock);
671 668 }
672 669
673 670 /*
674 671 * Remove an smbnode from the "hash" AVL tree.
675 672 *
676 673 * The caller must hold the rwlock as writer.
677 674 *
678 - * NFS: nfs_subr.c:rp_rmhash_locked
675 + * From NFS: nfs_subr.c:rp_rmhash_locked
679 676 */
680 677 static void
681 678 sn_rmhash_locked(smbnode_t *np)
682 679 {
683 680 smbmntinfo_t *mi = np->n_mount;
684 681
685 682 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
686 683
687 684 mutex_enter(&np->r_statelock);
688 685 if ((np->r_flags & RHASHED) != 0) {
689 686 np->r_flags &= ~RHASHED;
690 687 avl_remove(&mi->smi_hash_avl, np);
691 688 }
692 689 mutex_exit(&np->r_statelock);
693 690 }
694 691
695 692 /*
696 693 * Remove an smbnode from the "hash" AVL tree.
697 694 *
698 695 * The caller must not be holding the rwlock.
699 696 */
700 697 void
701 698 smbfs_rmhash(smbnode_t *np)
702 699 {
703 700 smbmntinfo_t *mi = np->n_mount;
704 701
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
705 702 rw_enter(&mi->smi_hash_lk, RW_WRITER);
706 703 sn_rmhash_locked(np);
707 704 rw_exit(&mi->smi_hash_lk);
708 705 }
709 706
710 707 /*
711 708 * Lookup an smbnode by remote pathname
712 709 *
713 710 * The caller must be holding the AVL rwlock, either shared or exclusive.
714 711 *
715 - * NFS: nfs_subr.c:rfind
712 + * From NFS: nfs_subr.c:rfind
716 713 */
717 714 static smbnode_t *
718 715 sn_hashfind(
719 716 smbmntinfo_t *mi,
720 717 const char *rpath,
721 718 int rplen,
722 719 avl_index_t *pwhere) /* optional */
723 720 {
724 721 smbfs_node_hdr_t nhdr;
725 722 smbnode_t *np;
726 723 vnode_t *vp;
727 724
728 725 ASSERT(RW_LOCK_HELD(&mi->smi_hash_lk));
729 726
730 727 bzero(&nhdr, sizeof (nhdr));
731 728 nhdr.hdr_n_rpath = (char *)rpath;
732 729 nhdr.hdr_n_rplen = rplen;
733 730
734 731 /* See smbfs_node_cmp below. */
735 732 np = avl_find(&mi->smi_hash_avl, &nhdr, pwhere);
736 733
737 734 if (np == NULL)
738 735 return (NULL);
739 736
740 737 /*
741 738 * Found it in the "hash" AVL tree.
742 739 * Remove from free list, if necessary.
743 740 */
744 741 vp = SMBTOV(np);
745 742 if (np->r_freef != NULL) {
746 743 mutex_enter(&smbfreelist_lock);
747 744 /*
748 745 * If the smbnode is on the freelist,
749 746 * then remove it and use that reference
750 747 * as the new reference. Otherwise,
751 748 * need to increment the reference count.
752 749 */
753 750 if (np->r_freef != NULL) {
754 751 sn_rmfree(np);
755 752 mutex_exit(&smbfreelist_lock);
756 753 } else {
757 754 mutex_exit(&smbfreelist_lock);
758 755 VN_HOLD(vp);
759 756 }
760 757 } else
761 758 VN_HOLD(vp);
762 759
763 760 return (np);
764 761 }
765 762
766 763 static int
767 764 smbfs_node_cmp(const void *va, const void *vb)
768 765 {
769 766 const smbfs_node_hdr_t *a = va;
770 767 const smbfs_node_hdr_t *b = vb;
771 768 int clen, diff;
772 769
773 770 /*
774 771 * Same semantics as strcmp, but does not
775 772 * assume the strings are null terminated.
776 773 */
777 774 clen = (a->hdr_n_rplen < b->hdr_n_rplen) ?
778 775 a->hdr_n_rplen : b->hdr_n_rplen;
779 776 diff = strncmp(a->hdr_n_rpath, b->hdr_n_rpath, clen);
780 777 if (diff < 0)
781 778 return (-1);
782 779 if (diff > 0)
783 780 return (1);
784 781 /* they match through clen */
785 782 if (b->hdr_n_rplen > clen)
786 783 return (-1);
787 784 if (a->hdr_n_rplen > clen)
788 785 return (1);
789 786 return (0);
790 787 }
791 788
792 789 /*
793 790 * Setup the "hash" AVL tree used for our node cache.
794 791 * See: smbfs_mount, smbfs_destroy_table.
795 792 */
796 793 void
797 794 smbfs_init_hash_avl(avl_tree_t *avl)
798 795 {
799 796 avl_create(avl, smbfs_node_cmp, sizeof (smbnode_t),
800 797 offsetof(smbnode_t, r_avl_node));
801 798 }
802 799
803 800 /*
804 801 * Invalidate the cached attributes for all nodes "under" the
805 802 * passed-in node. Note: the passed-in node is NOT affected by
806 803 * this call. This is used both for files under some directory
807 804 * after the directory is deleted or renamed, and for extended
808 805 * attribute files (named streams) under a plain file after that
809 806 * file is renamed or deleted.
810 807 *
811 808 * Do this by walking the AVL tree starting at the passed in node,
812 809 * and continuing while the visited nodes have a path prefix matching
813 810 * the entire path of the passed-in node, and a separator just after
814 811 * that matching path prefix. Watch out for cases where the AVL tree
815 812 * order may not exactly match the order of an FS walk, i.e.
816 813 * consider this sequence:
817 814 * "foo" (directory)
818 815 * "foo bar" (name containing a space)
819 816 * "foo/bar"
820 817 * The walk needs to skip "foo bar" and keep going until it finds
821 818 * something that doesn't match the "foo" name prefix.
822 819 */
823 820 void
824 821 smbfs_attrcache_prune(smbnode_t *top_np)
825 822 {
826 823 smbmntinfo_t *mi;
827 824 smbnode_t *np;
828 825 char *rpath;
829 826 int rplen;
830 827
831 828 mi = top_np->n_mount;
832 829 rw_enter(&mi->smi_hash_lk, RW_READER);
833 830
834 831 np = top_np;
835 832 rpath = top_np->n_rpath;
836 833 rplen = top_np->n_rplen;
837 834 for (;;) {
838 835 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER);
839 836 if (np == NULL)
840 837 break;
841 838 if (np->n_rplen < rplen)
842 839 break;
843 840 if (0 != strncmp(np->n_rpath, rpath, rplen))
844 841 break;
845 842 if (np->n_rplen > rplen && (
846 843 np->n_rpath[rplen] == ':' ||
847 844 np->n_rpath[rplen] == '\\'))
848 845 smbfs_attrcache_remove(np);
849 846 }
850 847
851 848 rw_exit(&mi->smi_hash_lk);
852 849 }
853 850
854 851 #ifdef SMB_VNODE_DEBUG
855 852 int smbfs_check_table_debug = 1;
856 853 #else /* SMB_VNODE_DEBUG */
857 854 int smbfs_check_table_debug = 0;
858 855 #endif /* SMB_VNODE_DEBUG */
859 856
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
860 857
861 858 /*
862 859 * Return 1 if there is a active vnode belonging to this vfs in the
863 860 * smbnode cache.
864 861 *
865 862 * Several of these checks are done without holding the usual
866 863 * locks. This is safe because destroy_smbtable(), smbfs_addfree(),
867 864 * etc. will redo the necessary checks before actually destroying
868 865 * any smbnodes.
869 866 *
870 - * NFS: nfs_subr.c:check_rtable
867 + * From NFS: nfs_subr.c:check_rtable
871 868 *
872 869 * Debugging changes here relative to NFS.
873 870 * Relatively harmless, so left 'em in.
874 871 */
875 872 int
876 873 smbfs_check_table(struct vfs *vfsp, smbnode_t *rtnp)
877 874 {
878 875 smbmntinfo_t *mi;
879 876 smbnode_t *np;
880 877 vnode_t *vp;
881 878 int busycnt = 0;
882 879
883 880 mi = VFTOSMI(vfsp);
884 881 rw_enter(&mi->smi_hash_lk, RW_READER);
885 882 for (np = avl_first(&mi->smi_hash_avl); np != NULL;
886 883 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
887 884
888 885 if (np == rtnp)
889 886 continue; /* skip the root */
890 887 vp = SMBTOV(np);
891 888
892 889 /* Now the 'busy' checks: */
893 890 /* Not on the free list? */
894 891 if (np->r_freef == NULL) {
895 892 SMBVDEBUG("!r_freef: node=0x%p, rpath=%s\n",
896 893 (void *)np, np->n_rpath);
897 894 busycnt++;
898 895 }
899 896
900 897 /* Has dirty pages? */
901 898 if (vn_has_cached_data(vp) &&
902 899 (np->r_flags & RDIRTY)) {
903 900 SMBVDEBUG("is dirty: node=0x%p, rpath=%s\n",
904 901 (void *)np, np->n_rpath);
905 902 busycnt++;
906 903 }
907 904
908 905 /* Other refs? (not reflected in v_count) */
909 906 if (np->r_count > 0) {
910 907 SMBVDEBUG("+r_count: node=0x%p, rpath=%s\n",
911 908 (void *)np, np->n_rpath);
912 909 busycnt++;
913 910 }
914 911
915 912 if (busycnt && !smbfs_check_table_debug)
916 913 break;
917 914
918 915 }
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
919 916 rw_exit(&mi->smi_hash_lk);
920 917
921 918 return (busycnt);
922 919 }
923 920
924 921 /*
925 922 * Destroy inactive vnodes from the AVL tree which belong to this
926 923 * vfs. It is essential that we destroy all inactive vnodes during a
927 924 * forced unmount as well as during a normal unmount.
928 925 *
929 - * NFS: nfs_subr.c:destroy_rtable
926 + * Based on NFS: nfs_subr.c:destroy_rtable
930 927 *
931 928 * In here, we're normally destrying all or most of the AVL tree,
932 929 * so the natural choice is to use avl_destroy_nodes. However,
933 930 * there may be a few busy nodes that should remain in the AVL
934 931 * tree when we're done. The solution: use a temporary tree to
935 932 * hold the busy nodes until we're done destroying the old tree,
936 933 * then copy the temporary tree over the (now emtpy) real tree.
937 934 */
938 935 void
939 936 smbfs_destroy_table(struct vfs *vfsp)
940 937 {
941 938 avl_tree_t tmp_avl;
942 939 smbmntinfo_t *mi;
943 940 smbnode_t *np;
944 941 smbnode_t *rlist;
945 942 void *v;
946 943
947 944 mi = VFTOSMI(vfsp);
948 945 rlist = NULL;
949 946 smbfs_init_hash_avl(&tmp_avl);
950 947
951 948 rw_enter(&mi->smi_hash_lk, RW_WRITER);
952 949 v = NULL;
953 950 while ((np = avl_destroy_nodes(&mi->smi_hash_avl, &v)) != NULL) {
954 951
955 952 mutex_enter(&smbfreelist_lock);
956 953 if (np->r_freef == NULL) {
957 954 /*
958 955 * Busy node (not on the free list).
959 956 * Will keep in the final AVL tree.
960 957 */
961 958 mutex_exit(&smbfreelist_lock);
962 959 avl_add(&tmp_avl, np);
963 960 } else {
964 961 /*
965 962 * It's on the free list. Remove and
966 963 * arrange for it to be destroyed.
967 964 */
968 965 sn_rmfree(np);
969 966 mutex_exit(&smbfreelist_lock);
970 967
971 968 /*
972 969 * Last part of sn_rmhash_locked().
973 970 * NB: avl_destroy_nodes has already
974 971 * removed this from the "hash" AVL.
975 972 */
976 973 mutex_enter(&np->r_statelock);
977 974 np->r_flags &= ~RHASHED;
978 975 mutex_exit(&np->r_statelock);
979 976
980 977 /*
981 978 * Add to the list of nodes to destroy.
982 979 * Borrowing avl_child[0] for this list.
983 980 */
984 981 np->r_avl_node.avl_child[0] =
985 982 (struct avl_node *)rlist;
986 983 rlist = np;
987 984 }
988 985 }
989 986 avl_destroy(&mi->smi_hash_avl);
990 987
991 988 /*
992 989 * Replace the (now destroyed) "hash" AVL with the
993 990 * temporary AVL, which restores the busy nodes.
994 991 */
995 992 mi->smi_hash_avl = tmp_avl;
996 993 rw_exit(&mi->smi_hash_lk);
997 994
998 995 /*
999 996 * Now destroy the nodes on our temporary list (rlist).
1000 997 * This call to smbfs_addfree will end up destroying the
1001 998 * smbnode, but in a safe way with the appropriate set
1002 999 * of checks done.
1003 1000 */
↓ open down ↓ |
64 lines elided |
↑ open up ↑ |
1004 1001 while ((np = rlist) != NULL) {
1005 1002 rlist = (smbnode_t *)np->r_avl_node.avl_child[0];
1006 1003 smbfs_addfree(np);
1007 1004 }
1008 1005 }
1009 1006
1010 1007 /*
1011 1008 * This routine destroys all the resources associated with the smbnode
1012 1009 * and then the smbnode itself. Note: sn_inactive has been called.
1013 1010 *
1014 - * NFS: nfs_subr.c:destroy_rnode
1011 + * From NFS: nfs_subr.c:destroy_rnode
1015 1012 */
1016 1013 static void
1017 1014 sn_destroy_node(smbnode_t *np)
1018 1015 {
1019 1016 vnode_t *vp;
1020 1017 vfs_t *vfsp;
1021 1018
1022 1019 vp = SMBTOV(np);
1023 1020 vfsp = vp->v_vfsp;
1024 1021
1025 1022 ASSERT(vp->v_count == 1);
1026 1023 ASSERT(np->r_count == 0);
1027 1024 ASSERT(np->r_mapcnt == 0);
1028 1025 ASSERT(np->r_secattr.vsa_aclentp == NULL);
1029 1026 ASSERT(np->r_cred == NULL);
1030 1027 ASSERT(np->n_rpath == NULL);
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1031 1028 ASSERT(!(np->r_flags & RHASHED));
1032 1029 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
1033 1030 atomic_dec_ulong((ulong_t *)&smbnodenew);
1034 1031 vn_invalid(vp);
1035 1032 vn_free(vp);
1036 1033 kmem_cache_free(smbnode_cache, np);
1037 1034 VFS_RELE(vfsp);
1038 1035 }
1039 1036
1040 1037 /*
1038 + * From NFS rflush()
1041 1039 * Flush all vnodes in this (or every) vfs.
1042 - * Used by nfs_sync and by nfs_unmount.
1040 + * Used by smbfs_sync and by smbfs_unmount.
1043 1041 */
1044 1042 /*ARGSUSED*/
1045 1043 void
1046 1044 smbfs_rflush(struct vfs *vfsp, cred_t *cr)
1047 1045 {
1048 - /* Todo: mmap support. */
1046 + smbmntinfo_t *mi;
1047 + smbnode_t *np;
1048 + vnode_t *vp, **vplist;
1049 + long num, cnt;
1050 +
1051 + mi = VFTOSMI(vfsp);
1052 +
1053 + /*
1054 + * Check to see whether there is anything to do.
1055 + */
1056 + num = avl_numnodes(&mi->smi_hash_avl);
1057 + if (num == 0)
1058 + return;
1059 +
1060 + /*
1061 + * Allocate a slot for all currently active rnodes on the
1062 + * supposition that they all may need flushing.
1063 + */
1064 + vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP);
1065 + cnt = 0;
1066 +
1067 + /*
1068 + * Walk the AVL tree looking for rnodes with page
1069 + * lists associated with them. Make a list of these
1070 + * files.
1071 + */
1072 + rw_enter(&mi->smi_hash_lk, RW_READER);
1073 + for (np = avl_first(&mi->smi_hash_avl); np != NULL;
1074 + np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
1075 + vp = SMBTOV(np);
1076 + /*
1077 + * Don't bother sync'ing a vp if it
1078 + * is part of virtual swap device or
1079 + * if VFS is read-only
1080 + */
1081 + if (IS_SWAPVP(vp) || vn_is_readonly(vp))
1082 + continue;
1083 + /*
1084 + * If the vnode has pages and is marked as either
1085 + * dirty or mmap'd, hold and add this vnode to the
1086 + * list of vnodes to flush.
1087 + */
1088 + if (vn_has_cached_data(vp) &&
1089 + ((np->r_flags & RDIRTY) || np->r_mapcnt > 0)) {
1090 + VN_HOLD(vp);
1091 + vplist[cnt++] = vp;
1092 + if (cnt == num)
1093 + break;
1094 + }
1095 + }
1096 + rw_exit(&mi->smi_hash_lk);
1097 +
1098 + /*
1099 + * Flush and release all of the files on the list.
1100 + */
1101 + while (cnt-- > 0) {
1102 + vp = vplist[cnt];
1103 + (void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL);
1104 + VN_RELE(vp);
1105 + }
1106 +
1107 + kmem_free(vplist, num * sizeof (vnode_t *));
1049 1108 }
1050 1109
1051 -/* access cache (nfs_subr.c) not used here */
1110 +/* Here NFS has access cache stuff (nfs_subr.c) not used here */
1052 1111
1112 +/*
1113 + * Set or Clear direct I/O flag
1114 + * VOP_RWLOCK() is held for write access to prevent a race condition
1115 + * which would occur if a process is in the middle of a write when
1116 + * directio flag gets set. It is possible that all pages may not get flushed.
1117 + * From nfs_common.c
1118 + */
1119 +
1120 +/* ARGSUSED */
1121 +int
1122 +smbfs_directio(vnode_t *vp, int cmd, cred_t *cr)
1123 +{
1124 + int error = 0;
1125 + smbnode_t *np;
1126 +
1127 + np = VTOSMB(vp);
1128 +
1129 + if (cmd == DIRECTIO_ON) {
1130 +
1131 + if (np->r_flags & RDIRECTIO)
1132 + return (0);
1133 +
1134 + /*
1135 + * Flush the page cache.
1136 + */
1137 +
1138 + (void) VOP_RWLOCK(vp, V_WRITELOCK_TRUE, NULL);
1139 +
1140 + if (np->r_flags & RDIRECTIO) {
1141 + VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
1142 + return (0);
1143 + }
1144 +
1145 + /* Here NFS also checks ->r_awcount */
1146 + if (vn_has_cached_data(vp) &&
1147 + (np->r_flags & RDIRTY) != 0) {
1148 + error = VOP_PUTPAGE(vp, (offset_t)0, (uint_t)0,
1149 + B_INVAL, cr, NULL);
1150 + if (error) {
1151 + if (error == ENOSPC || error == EDQUOT) {
1152 + mutex_enter(&np->r_statelock);
1153 + if (!np->r_error)
1154 + np->r_error = error;
1155 + mutex_exit(&np->r_statelock);
1156 + }
1157 + VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
1158 + return (error);
1159 + }
1160 + }
1161 +
1162 + mutex_enter(&np->r_statelock);
1163 + np->r_flags |= RDIRECTIO;
1164 + mutex_exit(&np->r_statelock);
1165 + VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
1166 + return (0);
1167 + }
1168 +
1169 + if (cmd == DIRECTIO_OFF) {
1170 + mutex_enter(&np->r_statelock);
1171 + np->r_flags &= ~RDIRECTIO; /* disable direct mode */
1172 + mutex_exit(&np->r_statelock);
1173 + return (0);
1174 + }
1175 +
1176 + return (EINVAL);
1177 +}
1178 +
1053 1179 static kmutex_t smbfs_newnum_lock;
1054 1180 static uint32_t smbfs_newnum_val = 0;
1055 1181
1056 1182 /*
1057 1183 * Return a number 0..0xffffffff that's different from the last
1058 1184 * 0xffffffff numbers this returned. Used for unlinked files.
1059 - * (This too was copied from nfs_subr.c)
1185 + * From NFS nfs_subr.c newnum
1060 1186 */
1061 1187 uint32_t
1062 1188 smbfs_newnum(void)
1063 1189 {
1064 1190 uint32_t id;
1065 1191
1066 1192 mutex_enter(&smbfs_newnum_lock);
1067 1193 if (smbfs_newnum_val == 0)
1068 1194 smbfs_newnum_val = (uint32_t)gethrestime_sec();
1069 1195 id = smbfs_newnum_val++;
1070 1196 mutex_exit(&smbfs_newnum_lock);
1071 1197 return (id);
1072 1198 }
1073 1199
1074 1200 /*
1075 1201 * Fill in a temporary name at buf
1076 1202 */
1077 1203 int
1078 1204 smbfs_newname(char *buf, size_t buflen)
1079 1205 {
1080 1206 uint_t id;
1081 1207 int n;
1082 1208
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
1083 1209 id = smbfs_newnum();
1084 1210 n = snprintf(buf, buflen, "~$smbfs%08X", id);
1085 1211 return (n);
1086 1212 }
1087 1213
1088 1214
1089 1215 /*
1090 1216 * initialize resources that are used by smbfs_subr.c
1091 1217 * this is called from the _init() routine (by the way of smbfs_clntinit())
1092 1218 *
1093 - * NFS: nfs_subr.c:nfs_subrinit
1219 + * From NFS: nfs_subr.c:nfs_subrinit
1094 1220 */
1095 1221 int
1096 1222 smbfs_subrinit(void)
1097 1223 {
1098 1224 ulong_t nsmbnode_max;
1099 1225
1100 1226 /*
1101 1227 * Allocate and initialize the smbnode cache
1102 1228 */
1103 1229 if (nsmbnode <= 0)
1104 1230 nsmbnode = ncsize; /* dnlc.h */
1105 1231 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) /
1106 1232 sizeof (struct smbnode));
1107 1233 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) {
1108 1234 zcmn_err(GLOBAL_ZONEID, CE_NOTE,
1109 1235 "setting nsmbnode to max value of %ld", nsmbnode_max);
1110 1236 nsmbnode = nsmbnode_max;
1111 1237 }
1112 1238
1113 1239 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t),
1114 1240 0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0);
1115 1241
1116 1242 /*
1117 1243 * Initialize the various mutexes and reader/writer locks
1118 1244 */
1119 1245 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL);
1120 1246 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
1121 1247
1122 1248 /*
1123 1249 * Assign unique major number for all smbfs mounts
1124 1250 */
1125 1251 if ((smbfs_major = getudev()) == -1) {
1126 1252 zcmn_err(GLOBAL_ZONEID, CE_WARN,
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
1127 1253 "smbfs: init: can't get unique device number");
1128 1254 smbfs_major = 0;
1129 1255 }
1130 1256 smbfs_minor = 0;
1131 1257
1132 1258 return (0);
1133 1259 }
1134 1260
1135 1261 /*
1136 1262 * free smbfs hash table, etc.
1137 - * NFS: nfs_subr.c:nfs_subrfini
1263 + * From NFS: nfs_subr.c:nfs_subrfini
1138 1264 */
1139 1265 void
1140 1266 smbfs_subrfini(void)
1141 1267 {
1142 1268
1143 1269 /*
1144 1270 * Destroy the smbnode cache
1145 1271 */
1146 1272 kmem_cache_destroy(smbnode_cache);
1147 1273
1148 1274 /*
1149 1275 * Destroy the various mutexes and reader/writer locks
1150 1276 */
1151 1277 mutex_destroy(&smbfreelist_lock);
1152 1278 mutex_destroy(&smbfs_minor_lock);
1153 1279 }
1154 1280
1155 1281 /* rddir_cache ? */
1156 1282
1157 1283 /*
1158 1284 * Support functions for smbfs_kmem_reclaim
1159 1285 */
1160 1286
1161 1287 static void
1162 1288 smbfs_node_reclaim(void)
1163 1289 {
1164 1290 smbmntinfo_t *mi;
1165 1291 smbnode_t *np;
1166 1292 vnode_t *vp;
1167 1293
1168 1294 mutex_enter(&smbfreelist_lock);
1169 1295 while ((np = smbfreelist) != NULL) {
1170 1296 sn_rmfree(np);
1171 1297 mutex_exit(&smbfreelist_lock);
1172 1298 if (np->r_flags & RHASHED) {
1173 1299 vp = SMBTOV(np);
1174 1300 mi = np->n_mount;
1175 1301 rw_enter(&mi->smi_hash_lk, RW_WRITER);
1176 1302 mutex_enter(&vp->v_lock);
1177 1303 if (vp->v_count > 1) {
1178 1304 VN_RELE_LOCKED(vp);
1179 1305 mutex_exit(&vp->v_lock);
1180 1306 rw_exit(&mi->smi_hash_lk);
1181 1307 mutex_enter(&smbfreelist_lock);
1182 1308 continue;
1183 1309 }
1184 1310 mutex_exit(&vp->v_lock);
1185 1311 sn_rmhash_locked(np);
1186 1312 rw_exit(&mi->smi_hash_lk);
1187 1313 }
1188 1314 /*
1189 1315 * This call to smbfs_addfree will end up destroying the
1190 1316 * smbnode, but in a safe way with the appropriate set
1191 1317 * of checks done.
1192 1318 */
1193 1319 smbfs_addfree(np);
1194 1320 mutex_enter(&smbfreelist_lock);
1195 1321 }
1196 1322 mutex_exit(&smbfreelist_lock);
1197 1323 }
1198 1324
1199 1325 /*
1200 1326 * Called by kmem_cache_alloc ask us if we could
1201 1327 * "Please give back some memory!"
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
1202 1328 *
1203 1329 * Todo: dump nodes from the free list?
1204 1330 */
1205 1331 /*ARGSUSED*/
1206 1332 void
1207 1333 smbfs_kmem_reclaim(void *cdrarg)
1208 1334 {
1209 1335 smbfs_node_reclaim();
1210 1336 }
1211 1337
1212 -/* nfs failover stuff */
1213 -/* nfs_rw_xxx - see smbfs_rwlock.c */
1338 +/*
1339 + * Here NFS has failover stuff and
1340 + * nfs_rw_xxx - see smbfs_rwlock.c
1341 + */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX