Print this page
*** NO COMMENTS ***
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
+++ new/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 *
25 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
26 26 * All rights reserved.
27 27 */
28 28
29 29 /*
30 30 * Node hash implementation initially borrowed from NFS (nfs_subr.c)
31 31 * but then heavily modified. It's no longer an array of hash lists,
32 32 * but an AVL tree per mount point. More on this below.
33 33 */
34 34
35 35 #include <sys/param.h>
36 36 #include <sys/systm.h>
37 37 #include <sys/time.h>
38 38 #include <sys/vnode.h>
39 39 #include <sys/bitmap.h>
40 40 #include <sys/dnlc.h>
41 41 #include <sys/kmem.h>
42 42 #include <sys/sunddi.h>
43 43 #include <sys/sysmacros.h>
44 44
45 45 #include <netsmb/smb_osdep.h>
46 46
47 47 #include <netsmb/smb.h>
48 48 #include <netsmb/smb_conn.h>
49 49 #include <netsmb/smb_subr.h>
50 50 #include <netsmb/smb_rq.h>
51 51
52 52 #include <smbfs/smbfs.h>
53 53 #include <smbfs/smbfs_node.h>
54 54 #include <smbfs/smbfs_subr.h>
55 55
56 56 /*
57 57 * The AVL trees (now per-mount) allow finding an smbfs node by its
58 58 * full remote path name. It also allows easy traversal of all nodes
59 59 * below (path wise) any given node. A reader/writer lock for each
60 60 * (per mount) AVL tree is used to control access and to synchronize
61 61 * lookups, additions, and deletions from that AVL tree.
62 62 *
63 63 * Previously, this code use a global array of hash chains, each with
64 64 * its own rwlock. A few struct members, functions, and comments may
65 65 * still refer to a "hash", and those should all now be considered to
66 66 * refer to the per-mount AVL tree that replaced the old hash chains.
67 67 * (i.e. member smi_hash_lk, function sn_hashfind, etc.)
68 68 *
69 69 * The smbnode freelist is organized as a doubly linked list with
70 70 * a head pointer. Additions and deletions are synchronized via
71 71 * a single mutex.
72 72 *
73 73 * In order to add an smbnode to the free list, it must be linked into
74 74 * the mount's AVL tree and the exclusive lock for the AVL must be held.
75 75 * If an smbnode is not linked into the AVL tree, then it is destroyed
76 76 * because it represents no valuable information that can be reused
77 77 * about the file. The exclusive lock for the AVL tree must be held
78 78 * in order to prevent a lookup in the AVL tree from finding the
79 79 * smbnode and using it and assuming that the smbnode is not on the
80 80 * freelist. The lookup in the AVL tree will have the AVL tree lock
81 81 * held, either exclusive or shared.
82 82 *
83 83 * The vnode reference count for each smbnode is not allowed to drop
84 84 * below 1. This prevents external entities, such as the VM
85 85 * subsystem, from acquiring references to vnodes already on the
86 86 * freelist and then trying to place them back on the freelist
87 87 * when their reference is released. This means that the when an
88 88 * smbnode is looked up in the AVL tree, then either the smbnode
89 89 * is removed from the freelist and that reference is tranfered to
90 90 * the new reference or the vnode reference count must be incremented
91 91 * accordingly. The mutex for the freelist must be held in order to
92 92 * accurately test to see if the smbnode is on the freelist or not.
93 93 * The AVL tree lock might be held shared and it is possible that
94 94 * two different threads may race to remove the smbnode from the
95 95 * freelist. This race can be resolved by holding the mutex for the
96 96 * freelist. Please note that the mutex for the freelist does not
97 97 * need to held if the smbnode is not on the freelist. It can not be
98 98 * placed on the freelist due to the requirement that the thread
99 99 * putting the smbnode on the freelist must hold the exclusive lock
100 100 * for the AVL tree and the thread doing the lookup in the AVL tree
101 101 * is holding either a shared or exclusive lock for the AVL tree.
102 102 *
103 103 * The lock ordering is:
104 104 *
105 105 * AVL tree lock -> vnode lock
106 106 * AVL tree lock -> freelist lock
107 107 */
108 108
109 109 static kmutex_t smbfreelist_lock;
110 110 static smbnode_t *smbfreelist = NULL;
111 111 static ulong_t smbnodenew = 0;
112 112 long nsmbnode = 0;
113 113
114 114 static struct kmem_cache *smbnode_cache;
115 115
116 116 static const vsecattr_t smbfs_vsa0 = { 0 };
117 117
118 118 /*
119 119 * Mutex to protect the following variables:
120 120 * smbfs_major
121 121 * smbfs_minor
122 122 */
123 123 kmutex_t smbfs_minor_lock;
124 124 int smbfs_major;
125 125 int smbfs_minor;
126 126
127 127 /* See smbfs_node_findcreate() */
128 128 struct smbfattr smbfs_fattr0;
129 129
130 130 /*
131 131 * Local functions.
132 132 * SN for Smb Node
133 133 */
134 134 static void sn_rmfree(smbnode_t *);
135 135 static void sn_inactive(smbnode_t *);
136 136 static void sn_addhash_locked(smbnode_t *, avl_index_t);
137 137 static void sn_rmhash_locked(smbnode_t *);
138 138 static void sn_destroy_node(smbnode_t *);
139 139 void smbfs_kmem_reclaim(void *cdrarg);
140 140
141 141 static smbnode_t *
142 142 sn_hashfind(smbmntinfo_t *, const char *, int, avl_index_t *);
143 143
144 144 static smbnode_t *
145 145 make_smbnode(smbmntinfo_t *, const char *, int, int *);
146 146
147 147 /*
148 148 * Free the resources associated with an smbnode.
149 149 * Note: This is different from smbfs_inactive
↓ open down ↓ |
149 lines elided |
↑ open up ↑ |
150 150 *
151 151 * NFS: nfs_subr.c:rinactive
152 152 */
153 153 static void
154 154 sn_inactive(smbnode_t *np)
155 155 {
156 156 vsecattr_t ovsa;
157 157 cred_t *oldcr;
158 158 char *orpath;
159 159 int orplen;
160 + vnode_t *vp;
160 161
161 162 /*
162 - * Flush and invalidate all pages (todo)
163 + * Flush and invalidate all pages
163 164 * Free any held credentials and caches...
164 165 * etc. (See NFS code)
165 166 */
166 167 mutex_enter(&np->r_statelock);
167 168
168 169 ovsa = np->r_secattr;
169 170 np->r_secattr = smbfs_vsa0;
170 171 np->r_sectime = 0;
171 172
172 173 oldcr = np->r_cred;
173 174 np->r_cred = NULL;
174 175
175 176 orpath = np->n_rpath;
176 177 orplen = np->n_rplen;
177 178 np->n_rpath = NULL;
178 179 np->n_rplen = 0;
179 180
180 181 mutex_exit(&np->r_statelock);
181 182
183 + vp = SMBTOV(np);
184 + if (vn_has_cached_data(vp)) {
185 + smbfs_invalidate_pages(vp, (u_offset_t) 0, oldcr);
186 + }
187 +
182 188 if (ovsa.vsa_aclentp != NULL)
183 189 kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz);
184 190
185 191 if (oldcr != NULL)
186 192 crfree(oldcr);
187 193
188 194 if (orpath != NULL)
189 195 kmem_free(orpath, orplen + 1);
190 196 }
191 197
192 198 /*
193 199 * Find and optionally create an smbnode for the passed
194 200 * mountinfo, directory, separator, and name. If the
195 201 * desired smbnode already exists, return a reference.
196 202 * If the file attributes pointer is non-null, the node
197 203 * is created if necessary and linked into the AVL tree.
198 204 *
199 205 * Callers that need a node created but don't have the
200 206 * real attributes pass smbfs_fattr0 to force creation.
201 207 *
202 208 * Note: make_smbnode() may upgrade the "hash" lock to exclusive.
203 209 *
204 210 * NFS: nfs_subr.c:makenfsnode
205 211 */
206 212 smbnode_t *
207 213 smbfs_node_findcreate(
208 214 smbmntinfo_t *mi,
209 215 const char *dirnm,
210 216 int dirlen,
211 217 const char *name,
212 218 int nmlen,
213 219 char sep,
214 220 struct smbfattr *fap)
215 221 {
216 222 char tmpbuf[256];
217 223 size_t rpalloc;
218 224 char *p, *rpath;
219 225 int rplen;
220 226 smbnode_t *np;
221 227 vnode_t *vp;
222 228 int newnode;
223 229
224 230 /*
225 231 * Build the search string, either in tmpbuf or
226 232 * in allocated memory if larger than tmpbuf.
227 233 */
228 234 rplen = dirlen;
229 235 if (sep != '\0')
230 236 rplen++;
231 237 rplen += nmlen;
232 238 if (rplen < sizeof (tmpbuf)) {
233 239 /* use tmpbuf */
234 240 rpalloc = 0;
235 241 rpath = tmpbuf;
236 242 } else {
237 243 rpalloc = rplen + 1;
238 244 rpath = kmem_alloc(rpalloc, KM_SLEEP);
239 245 }
240 246 p = rpath;
241 247 bcopy(dirnm, p, dirlen);
242 248 p += dirlen;
243 249 if (sep != '\0')
244 250 *p++ = sep;
245 251 if (name != NULL) {
246 252 bcopy(name, p, nmlen);
247 253 p += nmlen;
248 254 }
249 255 ASSERT(p == rpath + rplen);
250 256
251 257 /*
252 258 * Find or create a node with this path.
253 259 */
254 260 rw_enter(&mi->smi_hash_lk, RW_READER);
255 261 if (fap == NULL)
256 262 np = sn_hashfind(mi, rpath, rplen, NULL);
257 263 else
258 264 np = make_smbnode(mi, rpath, rplen, &newnode);
259 265 rw_exit(&mi->smi_hash_lk);
260 266
261 267 if (rpalloc)
262 268 kmem_free(rpath, rpalloc);
263 269
264 270 if (fap == NULL) {
265 271 /*
266 272 * Caller is "just looking" (no create)
267 273 * so np may or may not be NULL here.
268 274 * Either way, we're done.
269 275 */
270 276 return (np);
271 277 }
272 278
273 279 /*
274 280 * We should have a node, possibly created.
275 281 * Do we have (real) attributes to apply?
276 282 */
277 283 ASSERT(np != NULL);
278 284 if (fap == &smbfs_fattr0)
279 285 return (np);
280 286
281 287 /*
282 288 * Apply the given attributes to this node,
283 289 * dealing with any cache impact, etc.
284 290 */
285 291 vp = SMBTOV(np);
286 292 if (!newnode) {
287 293 /*
288 294 * Found an existing node.
289 295 * Maybe purge caches...
290 296 */
291 297 smbfs_cache_check(vp, fap);
292 298 }
293 299 smbfs_attrcache_fa(vp, fap);
294 300
295 301 /*
296 302 * Note NFS sets vp->v_type here, assuming it
297 303 * can never change for the life of a node.
298 304 * We allow v_type to change, and set it in
299 305 * smbfs_attrcache(). Also: mode, uid, gid
300 306 */
301 307 return (np);
302 308 }
303 309
304 310 /*
305 311 * NFS: nfs_subr.c:rtablehash
306 312 * We use smbfs_hash().
307 313 */
308 314
309 315 /*
310 316 * Find or create an smbnode.
311 317 * NFS: nfs_subr.c:make_rnode
312 318 */
313 319 static smbnode_t *
314 320 make_smbnode(
315 321 smbmntinfo_t *mi,
316 322 const char *rpath,
317 323 int rplen,
318 324 int *newnode)
319 325 {
320 326 smbnode_t *np;
321 327 smbnode_t *tnp;
322 328 vnode_t *vp;
323 329 vfs_t *vfsp;
324 330 avl_index_t where;
325 331 char *new_rpath = NULL;
326 332
327 333 ASSERT(RW_READ_HELD(&mi->smi_hash_lk));
328 334 vfsp = mi->smi_vfsp;
329 335
330 336 start:
331 337 np = sn_hashfind(mi, rpath, rplen, NULL);
332 338 if (np != NULL) {
333 339 *newnode = 0;
334 340 return (np);
335 341 }
336 342
337 343 /* Note: will retake this lock below. */
338 344 rw_exit(&mi->smi_hash_lk);
339 345
340 346 /*
341 347 * see if we can find something on the freelist
342 348 */
343 349 mutex_enter(&smbfreelist_lock);
344 350 if (smbfreelist != NULL && smbnodenew >= nsmbnode) {
345 351 np = smbfreelist;
346 352 sn_rmfree(np);
347 353 mutex_exit(&smbfreelist_lock);
348 354
349 355 vp = SMBTOV(np);
350 356
351 357 if (np->r_flags & RHASHED) {
352 358 smbmntinfo_t *tmp_mi = np->n_mount;
353 359 ASSERT(tmp_mi != NULL);
354 360 rw_enter(&tmp_mi->smi_hash_lk, RW_WRITER);
355 361 mutex_enter(&vp->v_lock);
356 362 if (vp->v_count > 1) {
357 363 vp->v_count--;
358 364 mutex_exit(&vp->v_lock);
359 365 rw_exit(&tmp_mi->smi_hash_lk);
360 366 /* start over */
361 367 rw_enter(&mi->smi_hash_lk, RW_READER);
362 368 goto start;
363 369 }
364 370 mutex_exit(&vp->v_lock);
365 371 sn_rmhash_locked(np);
366 372 rw_exit(&tmp_mi->smi_hash_lk);
367 373 }
368 374
369 375 sn_inactive(np);
370 376
371 377 mutex_enter(&vp->v_lock);
372 378 if (vp->v_count > 1) {
373 379 vp->v_count--;
374 380 mutex_exit(&vp->v_lock);
375 381 rw_enter(&mi->smi_hash_lk, RW_READER);
376 382 goto start;
377 383 }
378 384 mutex_exit(&vp->v_lock);
379 385 vn_invalid(vp);
380 386 /*
381 387 * destroy old locks before bzero'ing and
382 388 * recreating the locks below.
383 389 */
384 390 smbfs_rw_destroy(&np->r_rwlock);
385 391 smbfs_rw_destroy(&np->r_lkserlock);
386 392 mutex_destroy(&np->r_statelock);
387 393 cv_destroy(&np->r_cv);
388 394 /*
389 395 * Make sure that if smbnode is recycled then
390 396 * VFS count is decremented properly before
391 397 * reuse.
392 398 */
393 399 VFS_RELE(vp->v_vfsp);
394 400 vn_reinit(vp);
395 401 } else {
396 402 /*
397 403 * allocate and initialize a new smbnode
398 404 */
399 405 vnode_t *new_vp;
400 406
401 407 mutex_exit(&smbfreelist_lock);
402 408
403 409 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
404 410 new_vp = vn_alloc(KM_SLEEP);
405 411
406 412 atomic_add_long((ulong_t *)&smbnodenew, 1);
407 413 vp = new_vp;
408 414 }
409 415
410 416 /*
411 417 * Allocate and copy the rpath we'll need below.
412 418 */
413 419 new_rpath = kmem_alloc(rplen + 1, KM_SLEEP);
414 420 bcopy(rpath, new_rpath, rplen);
415 421 new_rpath[rplen] = '\0';
416 422
417 423 /* Initialize smbnode_t */
418 424 bzero(np, sizeof (*np));
419 425
420 426 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
421 427 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
422 428 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
423 429 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
424 430 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
425 431
426 432 np->r_vnode = vp;
427 433 np->n_mount = mi;
428 434
429 435 np->n_fid = SMB_FID_UNUSED;
430 436 np->n_uid = mi->smi_uid;
431 437 np->n_gid = mi->smi_gid;
432 438 /* Leave attributes "stale." */
433 439
434 440 #if 0 /* XXX dircache */
435 441 /*
436 442 * We don't know if it's a directory yet.
437 443 * Let the caller do this? XXX
438 444 */
439 445 avl_create(&np->r_dir, compar, sizeof (rddir_cache),
440 446 offsetof(rddir_cache, tree));
441 447 #endif
442 448
443 449 /* Now fill in the vnode. */
444 450 vn_setops(vp, smbfs_vnodeops);
445 451 vp->v_data = (caddr_t)np;
446 452 VFS_HOLD(vfsp);
447 453 vp->v_vfsp = vfsp;
448 454 vp->v_type = VNON;
449 455
450 456 /*
451 457 * We entered with mi->smi_hash_lk held (reader).
452 458 * Retake it now, (as the writer).
453 459 * Will return with it held.
454 460 */
455 461 rw_enter(&mi->smi_hash_lk, RW_WRITER);
456 462
457 463 /*
458 464 * There is a race condition where someone else
459 465 * may alloc the smbnode while no locks are held,
460 466 * so check again and recover if found.
461 467 */
462 468 tnp = sn_hashfind(mi, rpath, rplen, &where);
463 469 if (tnp != NULL) {
464 470 /*
465 471 * Lost the race. Put the node we were building
466 472 * on the free list and return the one we found.
467 473 */
468 474 rw_exit(&mi->smi_hash_lk);
469 475 kmem_free(new_rpath, rplen + 1);
470 476 smbfs_addfree(np);
471 477 rw_enter(&mi->smi_hash_lk, RW_READER);
472 478 *newnode = 0;
473 479 return (tnp);
474 480 }
475 481
476 482 /*
477 483 * Hash search identifies nodes by the remote path
478 484 * (n_rpath) so fill that in now, before linking
479 485 * this node into the node cache (AVL tree).
480 486 */
481 487 np->n_rpath = new_rpath;
482 488 np->n_rplen = rplen;
483 489 np->n_ino = smbfs_gethash(new_rpath, rplen);
484 490
485 491 sn_addhash_locked(np, where);
486 492 *newnode = 1;
487 493 return (np);
488 494 }
489 495
490 496 /*
491 497 * smbfs_addfree
492 498 * Put an smbnode on the free list, or destroy it immediately
493 499 * if it offers no value were it to be reclaimed later. Also
494 500 * destroy immediately when we have too many smbnodes, etc.
495 501 *
496 502 * Normally called by smbfs_inactive, but also
497 503 * called in here during cleanup operations.
498 504 *
499 505 * NFS: nfs_subr.c:rp_addfree
500 506 */
501 507 void
502 508 smbfs_addfree(smbnode_t *np)
503 509 {
504 510 vnode_t *vp;
505 511 struct vfs *vfsp;
506 512 smbmntinfo_t *mi;
507 513
508 514 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
509 515
510 516 vp = SMBTOV(np);
511 517 ASSERT(vp->v_count >= 1);
512 518
513 519 vfsp = vp->v_vfsp;
514 520 mi = VFTOSMI(vfsp);
515 521
516 522 /*
517 523 * If there are no more references to this smbnode and:
518 524 * we have too many smbnodes allocated, or if the node
519 525 * is no longer accessible via the AVL tree (!RHASHED),
520 526 * or an i/o error occurred while writing to the file,
521 527 * or it's part of an unmounted FS, then try to destroy
522 528 * it instead of putting it on the smbnode freelist.
523 529 */
524 530 if (np->r_count == 0 && (
525 531 (np->r_flags & RHASHED) == 0 ||
526 532 (np->r_error != 0) ||
527 533 (vfsp->vfs_flag & VFS_UNMOUNTED) ||
528 534 (smbnodenew > nsmbnode))) {
529 535
530 536 /* Try to destroy this node. */
531 537
532 538 if (np->r_flags & RHASHED) {
533 539 rw_enter(&mi->smi_hash_lk, RW_WRITER);
534 540 mutex_enter(&vp->v_lock);
535 541 if (vp->v_count > 1) {
536 542 vp->v_count--;
537 543 mutex_exit(&vp->v_lock);
538 544 rw_exit(&mi->smi_hash_lk);
539 545 return;
540 546 /*
541 547 * Will get another call later,
542 548 * via smbfs_inactive.
543 549 */
544 550 }
545 551 mutex_exit(&vp->v_lock);
546 552 sn_rmhash_locked(np);
547 553 rw_exit(&mi->smi_hash_lk);
548 554 }
549 555
550 556 sn_inactive(np);
551 557
552 558 /*
553 559 * Recheck the vnode reference count. We need to
554 560 * make sure that another reference has not been
555 561 * acquired while we were not holding v_lock. The
556 562 * smbnode is not in the smbnode "hash" AVL tree, so
557 563 * the only way for a reference to have been acquired
558 564 * is for a VOP_PUTPAGE because the smbnode was marked
559 565 * with RDIRTY or for a modified page. This vnode
560 566 * reference may have been acquired before our call
561 567 * to sn_inactive. The i/o may have been completed,
562 568 * thus allowing sn_inactive to complete, but the
563 569 * reference to the vnode may not have been released
564 570 * yet. In any case, the smbnode can not be destroyed
565 571 * until the other references to this vnode have been
566 572 * released. The other references will take care of
567 573 * either destroying the smbnode or placing it on the
568 574 * smbnode freelist. If there are no other references,
569 575 * then the smbnode may be safely destroyed.
570 576 */
571 577 mutex_enter(&vp->v_lock);
572 578 if (vp->v_count > 1) {
573 579 vp->v_count--;
574 580 mutex_exit(&vp->v_lock);
575 581 return;
576 582 }
577 583 mutex_exit(&vp->v_lock);
578 584
579 585 sn_destroy_node(np);
580 586 return;
581 587 }
582 588
583 589 /*
584 590 * Lock the AVL tree and then recheck the reference count
585 591 * to ensure that no other threads have acquired a reference
586 592 * to indicate that the smbnode should not be placed on the
587 593 * freelist. If another reference has been acquired, then
588 594 * just release this one and let the other thread complete
589 595 * the processing of adding this smbnode to the freelist.
590 596 */
591 597 rw_enter(&mi->smi_hash_lk, RW_WRITER);
592 598
593 599 mutex_enter(&vp->v_lock);
594 600 if (vp->v_count > 1) {
595 601 vp->v_count--;
596 602 mutex_exit(&vp->v_lock);
597 603 rw_exit(&mi->smi_hash_lk);
598 604 return;
599 605 }
600 606 mutex_exit(&vp->v_lock);
601 607
602 608 /*
603 609 * Put this node on the free list.
604 610 */
605 611 mutex_enter(&smbfreelist_lock);
606 612 if (smbfreelist == NULL) {
607 613 np->r_freef = np;
608 614 np->r_freeb = np;
609 615 smbfreelist = np;
610 616 } else {
611 617 np->r_freef = smbfreelist;
612 618 np->r_freeb = smbfreelist->r_freeb;
613 619 smbfreelist->r_freeb->r_freef = np;
614 620 smbfreelist->r_freeb = np;
615 621 }
616 622 mutex_exit(&smbfreelist_lock);
617 623
618 624 rw_exit(&mi->smi_hash_lk);
619 625 }
620 626
621 627 /*
622 628 * Remove an smbnode from the free list.
623 629 *
624 630 * The caller must be holding smbfreelist_lock and the smbnode
625 631 * must be on the freelist.
626 632 *
627 633 * NFS: nfs_subr.c:rp_rmfree
628 634 */
629 635 static void
630 636 sn_rmfree(smbnode_t *np)
631 637 {
632 638
633 639 ASSERT(MUTEX_HELD(&smbfreelist_lock));
634 640 ASSERT(np->r_freef != NULL && np->r_freeb != NULL);
635 641
636 642 if (np == smbfreelist) {
637 643 smbfreelist = np->r_freef;
638 644 if (np == smbfreelist)
639 645 smbfreelist = NULL;
640 646 }
641 647
642 648 np->r_freeb->r_freef = np->r_freef;
643 649 np->r_freef->r_freeb = np->r_freeb;
644 650
645 651 np->r_freef = np->r_freeb = NULL;
646 652 }
647 653
648 654 /*
649 655 * Put an smbnode in the "hash" AVL tree.
650 656 *
651 657 * The caller must be hold the rwlock as writer.
652 658 *
653 659 * NFS: nfs_subr.c:rp_addhash
654 660 */
655 661 static void
656 662 sn_addhash_locked(smbnode_t *np, avl_index_t where)
657 663 {
658 664 smbmntinfo_t *mi = np->n_mount;
659 665
660 666 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
661 667 ASSERT(!(np->r_flags & RHASHED));
662 668
663 669 avl_insert(&mi->smi_hash_avl, np, where);
664 670
665 671 mutex_enter(&np->r_statelock);
666 672 np->r_flags |= RHASHED;
667 673 mutex_exit(&np->r_statelock);
668 674 }
669 675
670 676 /*
671 677 * Remove an smbnode from the "hash" AVL tree.
672 678 *
673 679 * The caller must hold the rwlock as writer.
674 680 *
675 681 * NFS: nfs_subr.c:rp_rmhash_locked
676 682 */
677 683 static void
678 684 sn_rmhash_locked(smbnode_t *np)
679 685 {
680 686 smbmntinfo_t *mi = np->n_mount;
681 687
682 688 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
683 689 ASSERT(np->r_flags & RHASHED);
684 690
685 691 avl_remove(&mi->smi_hash_avl, np);
686 692
687 693 mutex_enter(&np->r_statelock);
688 694 np->r_flags &= ~RHASHED;
689 695 mutex_exit(&np->r_statelock);
690 696 }
691 697
692 698 /*
693 699 * Remove an smbnode from the "hash" AVL tree.
694 700 *
695 701 * The caller must not be holding the rwlock.
696 702 */
697 703 void
698 704 smbfs_rmhash(smbnode_t *np)
699 705 {
700 706 smbmntinfo_t *mi = np->n_mount;
701 707
702 708 rw_enter(&mi->smi_hash_lk, RW_WRITER);
703 709 sn_rmhash_locked(np);
704 710 rw_exit(&mi->smi_hash_lk);
705 711 }
706 712
707 713 /*
708 714 * Lookup an smbnode by remote pathname
709 715 *
710 716 * The caller must be holding the AVL rwlock, either shared or exclusive.
711 717 *
712 718 * NFS: nfs_subr.c:rfind
713 719 */
714 720 static smbnode_t *
715 721 sn_hashfind(
716 722 smbmntinfo_t *mi,
717 723 const char *rpath,
718 724 int rplen,
719 725 avl_index_t *pwhere) /* optional */
720 726 {
721 727 smbfs_node_hdr_t nhdr;
722 728 smbnode_t *np;
723 729 vnode_t *vp;
724 730
725 731 ASSERT(RW_LOCK_HELD(&mi->smi_hash_lk));
726 732
727 733 bzero(&nhdr, sizeof (nhdr));
728 734 nhdr.hdr_n_rpath = (char *)rpath;
729 735 nhdr.hdr_n_rplen = rplen;
730 736
731 737 /* See smbfs_node_cmp below. */
732 738 np = avl_find(&mi->smi_hash_avl, &nhdr, pwhere);
733 739
734 740 if (np == NULL)
735 741 return (NULL);
736 742
737 743 /*
738 744 * Found it in the "hash" AVL tree.
739 745 * Remove from free list, if necessary.
740 746 */
741 747 vp = SMBTOV(np);
742 748 if (np->r_freef != NULL) {
743 749 mutex_enter(&smbfreelist_lock);
744 750 /*
745 751 * If the smbnode is on the freelist,
746 752 * then remove it and use that reference
747 753 * as the new reference. Otherwise,
748 754 * need to increment the reference count.
749 755 */
750 756 if (np->r_freef != NULL) {
751 757 sn_rmfree(np);
752 758 mutex_exit(&smbfreelist_lock);
753 759 } else {
754 760 mutex_exit(&smbfreelist_lock);
755 761 VN_HOLD(vp);
756 762 }
757 763 } else
758 764 VN_HOLD(vp);
759 765
760 766 return (np);
761 767 }
762 768
763 769 static int
764 770 smbfs_node_cmp(const void *va, const void *vb)
765 771 {
766 772 const smbfs_node_hdr_t *a = va;
767 773 const smbfs_node_hdr_t *b = vb;
768 774 int clen, diff;
769 775
770 776 /*
771 777 * Same semantics as strcmp, but does not
772 778 * assume the strings are null terminated.
773 779 */
774 780 clen = (a->hdr_n_rplen < b->hdr_n_rplen) ?
775 781 a->hdr_n_rplen : b->hdr_n_rplen;
776 782 diff = strncmp(a->hdr_n_rpath, b->hdr_n_rpath, clen);
777 783 if (diff < 0)
778 784 return (-1);
779 785 if (diff > 0)
780 786 return (1);
781 787 /* they match through clen */
782 788 if (b->hdr_n_rplen > clen)
783 789 return (-1);
784 790 if (a->hdr_n_rplen > clen)
785 791 return (1);
786 792 return (0);
787 793 }
788 794
789 795 /*
790 796 * Setup the "hash" AVL tree used for our node cache.
791 797 * See: smbfs_mount, smbfs_destroy_table.
792 798 */
793 799 void
794 800 smbfs_init_hash_avl(avl_tree_t *avl)
795 801 {
796 802 avl_create(avl, smbfs_node_cmp, sizeof (smbnode_t),
797 803 offsetof(smbnode_t, r_avl_node));
798 804 }
799 805
800 806 /*
801 807 * Invalidate the cached attributes for all nodes "under" the
802 808 * passed-in node. Note: the passed-in node is NOT affected by
803 809 * this call. This is used both for files under some directory
804 810 * after the directory is deleted or renamed, and for extended
805 811 * attribute files (named streams) under a plain file after that
806 812 * file is renamed or deleted.
807 813 *
808 814 * Do this by walking the AVL tree starting at the passed in node,
809 815 * and continuing while the visited nodes have a path prefix matching
810 816 * the entire path of the passed-in node, and a separator just after
811 817 * that matching path prefix. Watch out for cases where the AVL tree
812 818 * order may not exactly match the order of an FS walk, i.e.
813 819 * consider this sequence:
814 820 * "foo" (directory)
815 821 * "foo bar" (name containing a space)
816 822 * "foo/bar"
817 823 * The walk needs to skip "foo bar" and keep going until it finds
818 824 * something that doesn't match the "foo" name prefix.
819 825 */
820 826 void
821 827 smbfs_attrcache_prune(smbnode_t *top_np)
822 828 {
823 829 smbmntinfo_t *mi;
824 830 smbnode_t *np;
825 831 char *rpath;
826 832 int rplen;
827 833
828 834 mi = top_np->n_mount;
829 835 rw_enter(&mi->smi_hash_lk, RW_READER);
830 836
831 837 np = top_np;
832 838 rpath = top_np->n_rpath;
833 839 rplen = top_np->n_rplen;
834 840 for (;;) {
835 841 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER);
836 842 if (np == NULL)
837 843 break;
838 844 if (np->n_rplen < rplen)
839 845 break;
840 846 if (0 != strncmp(np->n_rpath, rpath, rplen))
841 847 break;
842 848 if (np->n_rplen > rplen && (
843 849 np->n_rpath[rplen] == ':' ||
844 850 np->n_rpath[rplen] == '\\'))
845 851 smbfs_attrcache_remove(np);
846 852 }
847 853
848 854 rw_exit(&mi->smi_hash_lk);
849 855 }
850 856
851 857 #ifdef SMB_VNODE_DEBUG
852 858 int smbfs_check_table_debug = 1;
853 859 #else /* SMB_VNODE_DEBUG */
854 860 int smbfs_check_table_debug = 0;
855 861 #endif /* SMB_VNODE_DEBUG */
856 862
857 863
858 864 /*
859 865 * Return 1 if there is a active vnode belonging to this vfs in the
860 866 * smbnode cache.
861 867 *
862 868 * Several of these checks are done without holding the usual
863 869 * locks. This is safe because destroy_smbtable(), smbfs_addfree(),
864 870 * etc. will redo the necessary checks before actually destroying
865 871 * any smbnodes.
866 872 *
867 873 * NFS: nfs_subr.c:check_rtable
868 874 *
869 875 * Debugging changes here relative to NFS.
870 876 * Relatively harmless, so left 'em in.
871 877 */
872 878 int
873 879 smbfs_check_table(struct vfs *vfsp, smbnode_t *rtnp)
874 880 {
875 881 smbmntinfo_t *mi;
876 882 smbnode_t *np;
877 883 vnode_t *vp;
878 884 int busycnt = 0;
879 885
880 886 mi = VFTOSMI(vfsp);
881 887 rw_enter(&mi->smi_hash_lk, RW_READER);
882 888 for (np = avl_first(&mi->smi_hash_avl); np != NULL;
883 889 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
884 890
885 891 if (np == rtnp)
886 892 continue; /* skip the root */
887 893 vp = SMBTOV(np);
888 894
889 895 /* Now the 'busy' checks: */
890 896 /* Not on the free list? */
891 897 if (np->r_freef == NULL) {
892 898 SMBVDEBUG("!r_freef: node=0x%p, rpath=%s\n",
893 899 (void *)np, np->n_rpath);
894 900 busycnt++;
895 901 }
896 902
897 903 /* Has dirty pages? */
898 904 if (vn_has_cached_data(vp) &&
899 905 (np->r_flags & RDIRTY)) {
900 906 SMBVDEBUG("is dirty: node=0x%p, rpath=%s\n",
901 907 (void *)np, np->n_rpath);
902 908 busycnt++;
903 909 }
904 910
905 911 /* Other refs? (not reflected in v_count) */
906 912 if (np->r_count > 0) {
907 913 SMBVDEBUG("+r_count: node=0x%p, rpath=%s\n",
908 914 (void *)np, np->n_rpath);
909 915 busycnt++;
910 916 }
911 917
912 918 if (busycnt && !smbfs_check_table_debug)
913 919 break;
914 920
915 921 }
916 922 rw_exit(&mi->smi_hash_lk);
917 923
918 924 return (busycnt);
919 925 }
920 926
921 927 /*
922 928 * Destroy inactive vnodes from the AVL tree which belong to this
923 929 * vfs. It is essential that we destroy all inactive vnodes during a
924 930 * forced unmount as well as during a normal unmount.
925 931 *
926 932 * NFS: nfs_subr.c:destroy_rtable
927 933 *
928 934 * In here, we're normally destrying all or most of the AVL tree,
929 935 * so the natural choice is to use avl_destroy_nodes. However,
930 936 * there may be a few busy nodes that should remain in the AVL
931 937 * tree when we're done. The solution: use a temporary tree to
932 938 * hold the busy nodes until we're done destroying the old tree,
933 939 * then copy the temporary tree over the (now emtpy) real tree.
934 940 */
935 941 void
936 942 smbfs_destroy_table(struct vfs *vfsp)
937 943 {
938 944 avl_tree_t tmp_avl;
939 945 smbmntinfo_t *mi;
940 946 smbnode_t *np;
941 947 smbnode_t *rlist;
942 948 void *v;
943 949
944 950 mi = VFTOSMI(vfsp);
945 951 rlist = NULL;
946 952 smbfs_init_hash_avl(&tmp_avl);
947 953
948 954 rw_enter(&mi->smi_hash_lk, RW_WRITER);
949 955 v = NULL;
950 956 while ((np = avl_destroy_nodes(&mi->smi_hash_avl, &v)) != NULL) {
951 957
952 958 mutex_enter(&smbfreelist_lock);
953 959 if (np->r_freef == NULL) {
954 960 /*
955 961 * Busy node (not on the free list).
956 962 * Will keep in the final AVL tree.
957 963 */
958 964 mutex_exit(&smbfreelist_lock);
959 965 avl_add(&tmp_avl, np);
960 966 } else {
961 967 /*
962 968 * It's on the free list. Remove and
963 969 * arrange for it to be destroyed.
964 970 */
965 971 sn_rmfree(np);
966 972 mutex_exit(&smbfreelist_lock);
967 973
968 974 /*
969 975 * Last part of sn_rmhash_locked().
970 976 * NB: avl_destroy_nodes has already
971 977 * removed this from the "hash" AVL.
972 978 */
973 979 mutex_enter(&np->r_statelock);
974 980 np->r_flags &= ~RHASHED;
975 981 mutex_exit(&np->r_statelock);
976 982
977 983 /*
978 984 * Add to the list of nodes to destroy.
979 985 * Borrowing avl_child[0] for this list.
980 986 */
981 987 np->r_avl_node.avl_child[0] =
982 988 (struct avl_node *)rlist;
983 989 rlist = np;
984 990 }
985 991 }
986 992 avl_destroy(&mi->smi_hash_avl);
987 993
988 994 /*
989 995 * Replace the (now destroyed) "hash" AVL with the
990 996 * temporary AVL, which restores the busy nodes.
991 997 */
992 998 mi->smi_hash_avl = tmp_avl;
993 999 rw_exit(&mi->smi_hash_lk);
994 1000
995 1001 /*
996 1002 * Now destroy the nodes on our temporary list (rlist).
997 1003 * This call to smbfs_addfree will end up destroying the
998 1004 * smbnode, but in a safe way with the appropriate set
999 1005 * of checks done.
1000 1006 */
1001 1007 while ((np = rlist) != NULL) {
1002 1008 rlist = (smbnode_t *)np->r_avl_node.avl_child[0];
1003 1009 smbfs_addfree(np);
1004 1010 }
1005 1011 }
1006 1012
1007 1013 /*
1008 1014 * This routine destroys all the resources associated with the smbnode
1009 1015 * and then the smbnode itself. Note: sn_inactive has been called.
1010 1016 *
1011 1017 * NFS: nfs_subr.c:destroy_rnode
1012 1018 */
1013 1019 static void
1014 1020 sn_destroy_node(smbnode_t *np)
1015 1021 {
1016 1022 vnode_t *vp;
1017 1023 vfs_t *vfsp;
1018 1024
1019 1025 vp = SMBTOV(np);
1020 1026 vfsp = vp->v_vfsp;
1021 1027
1022 1028 ASSERT(vp->v_count == 1);
1023 1029 ASSERT(np->r_count == 0);
1024 1030 ASSERT(np->r_mapcnt == 0);
1025 1031 ASSERT(np->r_secattr.vsa_aclentp == NULL);
1026 1032 ASSERT(np->r_cred == NULL);
1027 1033 ASSERT(np->n_rpath == NULL);
1028 1034 ASSERT(!(np->r_flags & RHASHED));
1029 1035 ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
1030 1036 atomic_add_long((ulong_t *)&smbnodenew, -1);
1031 1037 vn_invalid(vp);
1032 1038 vn_free(vp);
↓ open down ↓ |
841 lines elided |
↑ open up ↑ |
1033 1039 kmem_cache_free(smbnode_cache, np);
1034 1040 VFS_RELE(vfsp);
1035 1041 }
1036 1042
1037 1043 /*
1038 1044 * Flush all vnodes in this (or every) vfs.
1039 1045 * Used by nfs_sync and by nfs_unmount.
1040 1046 */
1041 1047 /*ARGSUSED*/
1042 1048 void
1043 -smbfs_rflush(struct vfs *vfsp, cred_t *cr)
1044 -{
1045 - /* Todo: mmap support. */
1049 +smbfs_rflush(struct vfs *vfsp, cred_t *cr) {
1050 +
1051 + smbmntinfo_t *mi;
1052 + smbnode_t *np;
1053 + vnode_t *vp;
1054 +
1055 + long num, cnt;
1056 +
1057 + vnode_t **vplist;
1058 +
1059 + mi = VFTOSMI(vfsp);
1060 +
1061 + cnt = 0;
1062 + num = mi->smi_hash_avl.avl_numnodes;
1063 + vplist = kmem_alloc(num * sizeof (vnode_t*), KM_SLEEP);
1064 +
1065 + rw_enter(&mi->smi_hash_lk, RW_READER);
1066 + for (np = avl_first(&mi->smi_hash_avl); np != NULL;
1067 + np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
1068 + vp = SMBTOV(np);
1069 + if (vn_is_readonly(vp))
1070 + continue;
1071 +
1072 + if (vn_has_cached_data(vp) && (np->r_flags & RDIRTY || np->r_mapcnt > 0)) {
1073 + VN_HOLD(vp);
1074 + vplist[cnt++] = vp;
1075 + if (cnt == num)
1076 + break;
1077 + }
1078 + }
1079 + rw_exit(&mi->smi_hash_lk);
1080 +
1081 + while (cnt-- > 0) {
1082 + vp = vplist[cnt];
1083 + (void) VOP_PUTPAGE(vp, 0, 0, 0, cr, NULL);
1084 + VN_RELE(vp);
1085 + }
1086 +
1087 + kmem_free(vplist, num * sizeof (vnode_t*));
1046 1088 }
1047 1089
1048 1090 /* access cache */
1049 1091 /* client handles */
1050 1092
1051 1093 /*
1052 1094 * initialize resources that are used by smbfs_subr.c
1053 1095 * this is called from the _init() routine (by the way of smbfs_clntinit())
1054 1096 *
1055 1097 * NFS: nfs_subr.c:nfs_subrinit
1056 1098 */
1057 1099 int
1058 1100 smbfs_subrinit(void)
1059 1101 {
1060 1102 ulong_t nsmbnode_max;
1061 1103
1062 1104 /*
1063 1105 * Allocate and initialize the smbnode cache
1064 1106 */
1065 1107 if (nsmbnode <= 0)
1066 1108 nsmbnode = ncsize; /* dnlc.h */
1067 1109 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) /
1068 1110 sizeof (struct smbnode));
1069 1111 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) {
1070 1112 zcmn_err(GLOBAL_ZONEID, CE_NOTE,
1071 1113 "setting nsmbnode to max value of %ld", nsmbnode_max);
1072 1114 nsmbnode = nsmbnode_max;
1073 1115 }
1074 1116
1075 1117 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t),
1076 1118 0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0);
1077 1119
1078 1120 /*
1079 1121 * Initialize the various mutexes and reader/writer locks
1080 1122 */
1081 1123 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL);
1082 1124 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
1083 1125
1084 1126 /*
1085 1127 * Assign unique major number for all smbfs mounts
1086 1128 */
1087 1129 if ((smbfs_major = getudev()) == -1) {
1088 1130 zcmn_err(GLOBAL_ZONEID, CE_WARN,
1089 1131 "smbfs: init: can't get unique device number");
1090 1132 smbfs_major = 0;
1091 1133 }
1092 1134 smbfs_minor = 0;
1093 1135
1094 1136 return (0);
1095 1137 }
1096 1138
1097 1139 /*
1098 1140 * free smbfs hash table, etc.
1099 1141 * NFS: nfs_subr.c:nfs_subrfini
1100 1142 */
1101 1143 void
1102 1144 smbfs_subrfini(void)
1103 1145 {
1104 1146
1105 1147 /*
1106 1148 * Destroy the smbnode cache
1107 1149 */
1108 1150 kmem_cache_destroy(smbnode_cache);
1109 1151
1110 1152 /*
1111 1153 * Destroy the various mutexes and reader/writer locks
1112 1154 */
1113 1155 mutex_destroy(&smbfreelist_lock);
1114 1156 mutex_destroy(&smbfs_minor_lock);
1115 1157 }
1116 1158
1117 1159 /* rddir_cache ? */
1118 1160
1119 1161 /*
1120 1162 * Support functions for smbfs_kmem_reclaim
1121 1163 */
1122 1164
1123 1165 static void
1124 1166 smbfs_node_reclaim(void)
1125 1167 {
1126 1168 smbmntinfo_t *mi;
1127 1169 smbnode_t *np;
1128 1170 vnode_t *vp;
1129 1171
1130 1172 mutex_enter(&smbfreelist_lock);
1131 1173 while ((np = smbfreelist) != NULL) {
1132 1174 sn_rmfree(np);
1133 1175 mutex_exit(&smbfreelist_lock);
1134 1176 if (np->r_flags & RHASHED) {
1135 1177 vp = SMBTOV(np);
1136 1178 mi = np->n_mount;
1137 1179 rw_enter(&mi->smi_hash_lk, RW_WRITER);
1138 1180 mutex_enter(&vp->v_lock);
1139 1181 if (vp->v_count > 1) {
1140 1182 vp->v_count--;
1141 1183 mutex_exit(&vp->v_lock);
1142 1184 rw_exit(&mi->smi_hash_lk);
1143 1185 mutex_enter(&smbfreelist_lock);
1144 1186 continue;
1145 1187 }
1146 1188 mutex_exit(&vp->v_lock);
1147 1189 sn_rmhash_locked(np);
1148 1190 rw_exit(&mi->smi_hash_lk);
1149 1191 }
1150 1192 /*
1151 1193 * This call to smbfs_addfree will end up destroying the
1152 1194 * smbnode, but in a safe way with the appropriate set
1153 1195 * of checks done.
1154 1196 */
1155 1197 smbfs_addfree(np);
1156 1198 mutex_enter(&smbfreelist_lock);
1157 1199 }
1158 1200 mutex_exit(&smbfreelist_lock);
1159 1201 }
1160 1202
1161 1203 /*
1162 1204 * Called by kmem_cache_alloc ask us if we could
1163 1205 * "Please give back some memory!"
1164 1206 *
1165 1207 * Todo: dump nodes from the free list?
1166 1208 */
1167 1209 /*ARGSUSED*/
1168 1210 void
1169 1211 smbfs_kmem_reclaim(void *cdrarg)
1170 1212 {
1171 1213 smbfs_node_reclaim();
1172 1214 }
1173 1215
1174 1216 /* nfs failover stuff */
1175 1217 /* nfs_rw_xxx - see smbfs_rwlock.c */
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX