Print this page
asdf
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/klm/nlm_service.c
+++ new/usr/src/uts/common/klm/nlm_service.c
1 1 /*
2 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3 3 * Authors: Doug Rabson <dfr@rabson.org>
4 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
5 5 *
6 6 * Redistribution and use in source and binary forms, with or without
7 7 * modification, are permitted provided that the following conditions
8 8 * are met:
9 9 * 1. Redistributions of source code must retain the above copyright
10 10 * notice, this list of conditions and the following disclaimer.
11 11 * 2. Redistributions in binary form must reproduce the above copyright
12 12 * notice, this list of conditions and the following disclaimer in the
13 13 * documentation and/or other materials provided with the distribution.
14 14 *
15 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 25 * SUCH DAMAGE.
26 26 */
27 27
28 28 /*
29 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 30 * Copyright (c) 2012 by Delphix. All rights reserved.
31 31 */
32 32
33 33 /*
34 34 * NFS Lock Manager service functions (nlm_do_...)
35 35 * Called from nlm_rpc_svc.c wrappers.
36 36 *
37 37 * Source code derived from FreeBSD nlm_prot_impl.c
38 38 */
39 39
40 40 #include <sys/param.h>
41 41 #include <sys/systm.h>
42 42 #include <sys/thread.h>
43 43 #include <sys/fcntl.h>
44 44 #include <sys/flock.h>
45 45 #include <sys/mount.h>
46 46 #include <sys/priv.h>
47 47 #include <sys/proc.h>
48 48 #include <sys/share.h>
49 49 #include <sys/socket.h>
50 50 #include <sys/syscall.h>
51 51 #include <sys/syslog.h>
52 52 #include <sys/systm.h>
53 53 #include <sys/taskq.h>
54 54 #include <sys/unistd.h>
55 55 #include <sys/vnode.h>
56 56 #include <sys/vfs.h>
57 57 #include <sys/queue.h>
58 58 #include <sys/sdt.h>
59 59 #include <netinet/in.h>
60 60
61 61 #include <rpc/rpc.h>
62 62 #include <rpc/xdr.h>
63 63 #include <rpc/pmap_prot.h>
64 64 #include <rpc/pmap_clnt.h>
65 65 #include <rpc/rpcb_prot.h>
66 66
67 67 #include <rpcsvc/nlm_prot.h>
68 68 #include <rpcsvc/sm_inter.h>
69 69
70 70 #include <nfs/nfs.h>
71 71 #include <nfs/nfs_clnt.h>
72 72 #include <nfs/export.h>
73 73 #include <nfs/rnode.h>
74 74
75 75 #include "nlm_impl.h"
76 76
77 77 #define NLM_IN_GRACE(g) (ddi_get_lbolt() < (g)->grace_threshold)
78 78
79 79 struct nlm_block_cb_data {
80 80 struct nlm_host *hostp;
81 81 struct nlm_vhold *nvp;
82 82 struct flock64 *flp;
83 83 };
84 84
85 85 /*
86 86 * Invoke an asyncronous RPC callbeck
87 87 * (used when NLM server needs to reply to MSG NLM procedure).
88 88 */
89 89 #define NLM_INVOKE_CALLBACK(descr, rpcp, resp, callb) \
90 90 do { \
91 91 enum clnt_stat _stat; \
92 92 \
93 93 _stat = (*(callb))(resp, NULL, (rpcp)->nr_handle); \
94 94 if (_stat != RPC_SUCCESS && _stat != RPC_TIMEDOUT) { \
95 95 struct rpc_err _err; \
96 96 \
97 97 CLNT_GETERR((rpcp)->nr_handle, &_err); \
98 98 NLM_ERR("NLM: %s callback failed: " \
99 99 "stat %d, err %d\n", descr, _stat, \
100 100 _err.re_errno); \
101 101 } \
102 102 \
103 103 _NOTE(CONSTCOND) } while (0)
104 104
105 105 static void nlm_block(
106 106 nlm4_lockargs *lockargs,
107 107 struct nlm_host *host,
108 108 struct nlm_vhold *nvp,
109 109 nlm_rpc_t *rpcp,
110 110 struct flock64 *fl,
111 111 nlm_testargs_cb grant_cb);
112 112
113 113 static vnode_t *nlm_fh_to_vp(struct netobj *);
114 114 static struct nlm_vhold *nlm_fh_to_vhold(struct nlm_host *, struct netobj *);
115 115 static void nlm_init_shrlock(struct shrlock *, nlm4_share *, struct nlm_host *);
116 116 static callb_cpr_t *nlm_block_callback(flk_cb_when_t, void *);
117 117 static int nlm_vop_frlock(vnode_t *, int, flock64_t *, int, offset_t,
118 118 struct flk_callback *, cred_t *, caller_context_t *);
119 119
120 120 /*
121 121 * Convert a lock from network to local form, and
122 122 * check for valid range (no overflow).
123 123 */
124 124 static int
↓ open down ↓ |
124 lines elided |
↑ open up ↑ |
125 125 nlm_init_flock(struct flock64 *fl, struct nlm4_lock *nl,
126 126 struct nlm_host *host, rpcvers_t vers, short type)
127 127 {
128 128 uint64_t off, len;
129 129
130 130 bzero(fl, sizeof (*fl));
131 131 off = nl->l_offset;
132 132 len = nl->l_len;
133 133
134 134 if (vers < NLM4_VERS) {
135 - /*
136 - * Make sure range is valid for 32-bit client.
137 - * Also allow len == ~0 to mean lock to EOF,
138 - * which is supposed to be l_len == 0.
139 - */
140 - if (len == MAX_UOFF32)
141 - len = 0;
142 135 if (off > MAX_UOFF32 || len > MAX_UOFF32)
143 136 return (EINVAL);
144 137 if (off + len > MAX_UOFF32 + 1)
145 138 return (EINVAL);
146 139 } else {
147 140 /*
148 141 * Check range for 64-bit client (no overflow).
149 142 * Again allow len == ~0 to mean lock to EOF.
150 143 */
151 144 if (len == MAX_U_OFFSET_T)
152 145 len = 0;
153 146 if (len != 0 && off + (len - 1) < off)
154 147 return (EINVAL);
155 148 }
156 149
157 150 fl->l_type = type;
158 151 fl->l_whence = SEEK_SET;
159 152 fl->l_start = off;
160 153 fl->l_len = len;
161 154 fl->l_sysid = host->nh_sysid;
162 155 fl->l_pid = nl->svid;
163 156 /* l_pad */
164 157
165 158 return (0);
166 159 }
167 160
168 161 /*
169 162 * Gets vnode from client's filehandle
170 163 * NOTE: Holds vnode, it _must_ be explicitly
171 164 * released by VN_RELE().
172 165 */
173 166 static vnode_t *
174 167 nlm_fh_to_vp(struct netobj *fh)
175 168 {
176 169 fhandle_t *fhp;
177 170
178 171 /*
179 172 * Get a vnode pointer for the given NFS file handle.
180 173 * Note that it could be an NFSv2 for NFSv3 handle,
181 174 * which means the size might vary. (don't copy)
182 175 */
183 176 if (fh->n_len < sizeof (*fhp))
184 177 return (NULL);
185 178
186 179 /* We know this is aligned (kmem_alloc) */
187 180 /* LINTED E_BAD_PTR_CAST_ALIGN */
188 181 fhp = (fhandle_t *)fh->n_bytes;
189 182 return (lm_fhtovp(fhp));
190 183 }
191 184
192 185 /*
193 186 * Get vhold from client's filehandle, but in contrast to
194 187 * The function tries to check some access rights as well.
195 188 *
196 189 * NOTE: vhold object _must_ be explicitly released by
197 190 * nlm_vhold_release().
198 191 */
199 192 static struct nlm_vhold *
200 193 nlm_fh_to_vhold(struct nlm_host *hostp, struct netobj *fh)
201 194 {
202 195 vnode_t *vp;
203 196 struct nlm_vhold *nvp;
204 197
205 198 vp = nlm_fh_to_vp(fh);
206 199 if (vp == NULL)
207 200 return (NULL);
208 201
209 202
210 203 nvp = nlm_vhold_get(hostp, vp);
211 204
212 205 /*
213 206 * Both nlm_fh_to_vp() and nlm_vhold_get()
214 207 * do VN_HOLD(), so we need to drop one
215 208 * reference on vnode.
216 209 */
217 210 VN_RELE(vp);
218 211 return (nvp);
219 212 }
220 213
221 214 /* ******************************************************************* */
222 215
223 216 /*
224 217 * NLM implementation details, called from the RPC svc code.
225 218 */
226 219
227 220 /*
228 221 * Call-back from NFS statd, used to notify that one of our
229 222 * hosts had a status change. The host can be either an
230 223 * NFS client, NFS server or both.
231 224 * According to NSM protocol description, the state is a
232 225 * number that is increases monotonically each time the
233 226 * state of host changes. An even number indicates that
234 227 * the host is down, while an odd number indicates that
235 228 * the host is up.
236 229 *
237 230 * Here we ignore this even/odd difference of status number
238 231 * reported by the NSM, we launch notification handlers
239 232 * every time the state is changed. The reason we why do so
240 233 * is that client and server can talk to each other using
241 234 * connectionless transport and it's easy to lose packet
242 235 * containing NSM notification with status number update.
243 236 *
244 237 * In nlm_host_monitor(), we put the sysid in the private data
245 238 * that statd carries in this callback, so we can easliy find
246 239 * the host this call applies to.
247 240 */
248 241 /* ARGSUSED */
249 242 void
250 243 nlm_do_notify1(nlm_sm_status *argp, void *res, struct svc_req *sr)
251 244 {
252 245 struct nlm_globals *g;
253 246 struct nlm_host *host;
254 247 uint16_t sysid;
255 248
256 249 g = zone_getspecific(nlm_zone_key, curzone);
257 250 bcopy(&argp->priv, &sysid, sizeof (sysid));
258 251
259 252 DTRACE_PROBE2(nsm__notify, uint16_t, sysid,
260 253 int, argp->state);
261 254
262 255 host = nlm_host_find_by_sysid(g, (sysid_t)sysid);
263 256 if (host == NULL)
264 257 return;
265 258
266 259 nlm_host_notify_server(host, argp->state);
267 260 nlm_host_notify_client(host, argp->state);
268 261 nlm_host_release(g, host);
269 262 }
270 263
271 264 /*
272 265 * Another available call-back for NFS statd.
273 266 * Not currently used.
274 267 */
275 268 /* ARGSUSED */
276 269 void
277 270 nlm_do_notify2(nlm_sm_status *argp, void *res, struct svc_req *sr)
278 271 {
279 272 ASSERT(0);
280 273 }
281 274
282 275
283 276 /*
284 277 * NLM_TEST, NLM_TEST_MSG,
285 278 * NLM4_TEST, NLM4_TEST_MSG,
286 279 * Client inquiry about locks, non-blocking.
287 280 */
288 281 void
289 282 nlm_do_test(nlm4_testargs *argp, nlm4_testres *resp,
290 283 struct svc_req *sr, nlm_testres_cb cb)
291 284 {
292 285 struct nlm_globals *g;
293 286 struct nlm_host *host;
294 287 struct nlm4_holder *lh;
295 288 struct nlm_owner_handle *oh;
296 289 nlm_rpc_t *rpcp = NULL;
297 290 vnode_t *vp = NULL;
298 291 struct netbuf *addr;
299 292 char *netid;
300 293 char *name;
301 294 int error;
302 295 struct flock64 fl;
303 296
304 297 nlm_copy_netobj(&resp->cookie, &argp->cookie);
305 298
306 299 name = argp->alock.caller_name;
307 300 netid = svc_getnetid(sr->rq_xprt);
308 301 addr = svc_getrpccaller(sr->rq_xprt);
309 302
310 303 g = zone_getspecific(nlm_zone_key, curzone);
311 304 host = nlm_host_findcreate(g, name, netid, addr);
312 305 if (host == NULL) {
313 306 resp->stat.stat = nlm4_denied_nolocks;
314 307 return;
315 308 }
316 309 if (cb != NULL) {
317 310 error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
318 311 if (error != 0) {
319 312 resp->stat.stat = nlm4_denied_nolocks;
320 313 goto out;
321 314 }
322 315 }
323 316
324 317 vp = nlm_fh_to_vp(&argp->alock.fh);
325 318 if (vp == NULL) {
326 319 resp->stat.stat = nlm4_stale_fh;
327 320 goto out;
328 321 }
329 322
330 323 if (NLM_IN_GRACE(g)) {
331 324 resp->stat.stat = nlm4_denied_grace_period;
332 325 goto out;
333 326 }
334 327
335 328 /* Convert to local form. */
336 329 error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers,
337 330 (argp->exclusive) ? F_WRLCK : F_RDLCK);
338 331 if (error) {
339 332 resp->stat.stat = nlm4_failed;
340 333 goto out;
341 334 }
342 335
343 336 /* BSD: VOP_ADVLOCK(nv->nv_vp, NULL, F_GETLK, &fl, F_REMOTE); */
344 337 error = nlm_vop_frlock(vp, F_GETLK, &fl,
345 338 F_REMOTELOCK | FREAD | FWRITE,
346 339 (u_offset_t)0, NULL, CRED(), NULL);
347 340 if (error) {
348 341 resp->stat.stat = nlm4_failed;
349 342 goto out;
350 343 }
351 344
352 345 if (fl.l_type == F_UNLCK) {
353 346 resp->stat.stat = nlm4_granted;
354 347 goto out;
355 348 }
356 349 resp->stat.stat = nlm4_denied;
357 350
358 351 /*
359 352 * This lock "test" fails due to a conflicting lock.
360 353 *
361 354 * If this is a v1 client, make sure the conflicting
362 355 * lock range we report can be expressed with 32-bit
363 356 * offsets. The lock range requested was expressed
364 357 * as 32-bit offset and length, so at least part of
365 358 * the conflicting lock should lie below MAX_UOFF32.
366 359 * If the conflicting lock extends past that, we'll
367 360 * trim the range to end at MAX_UOFF32 so this lock
368 361 * can be represented in a 32-bit response. Check
369 362 * the start also (paranoid, but a low cost check).
370 363 */
371 364 if (sr->rq_vers < NLM4_VERS) {
372 365 uint64 maxlen;
373 366 if (fl.l_start > MAX_UOFF32)
374 367 fl.l_start = MAX_UOFF32;
375 368 maxlen = MAX_UOFF32 + 1 - fl.l_start;
376 369 if (fl.l_len > maxlen)
377 370 fl.l_len = maxlen;
378 371 }
379 372
380 373 /*
381 374 * Build the nlm4_holder result structure.
382 375 *
383 376 * Note that lh->oh is freed via xdr_free,
384 377 * xdr_nlm4_holder, xdr_netobj, xdr_bytes.
385 378 */
386 379 oh = kmem_zalloc(sizeof (*oh), KM_SLEEP);
387 380 oh->oh_sysid = (sysid_t)fl.l_sysid;
388 381 lh = &resp->stat.nlm4_testrply_u.holder;
389 382 lh->exclusive = (fl.l_type == F_WRLCK);
390 383 lh->svid = fl.l_pid;
391 384 lh->oh.n_len = sizeof (*oh);
392 385 lh->oh.n_bytes = (void *)oh;
393 386 lh->l_offset = fl.l_start;
394 387 lh->l_len = fl.l_len;
395 388
396 389 out:
397 390 /*
398 391 * If we have a callback funtion, use that to
399 392 * deliver the response via another RPC call.
400 393 */
401 394 if (cb != NULL && rpcp != NULL)
402 395 NLM_INVOKE_CALLBACK("test", rpcp, resp, cb);
403 396
404 397 if (vp != NULL)
405 398 VN_RELE(vp);
406 399 if (rpcp != NULL)
407 400 nlm_host_rele_rpc(host, rpcp);
408 401
409 402 nlm_host_release(g, host);
410 403 }
411 404
412 405 /*
413 406 * NLM_LOCK, NLM_LOCK_MSG, NLM_NM_LOCK
414 407 * NLM4_LOCK, NLM4_LOCK_MSG, NLM4_NM_LOCK
415 408 *
416 409 * Client request to set a lock, possibly blocking.
417 410 *
418 411 * If the lock needs to block, we return status blocked to
419 412 * this RPC call, and then later call back the client with
420 413 * a "granted" callback. Tricky aspects of this include:
421 414 * sending a reply before this function returns, and then
422 415 * borrowing this thread from the RPC service pool for the
423 416 * wait on the lock and doing the later granted callback.
424 417 *
425 418 * We also have to keep a list of locks (pending + granted)
426 419 * both to handle retransmitted requests, and to keep the
427 420 * vnodes for those locks active.
428 421 */
429 422 void
430 423 nlm_do_lock(nlm4_lockargs *argp, nlm4_res *resp, struct svc_req *sr,
431 424 nlm_reply_cb reply_cb, nlm_res_cb res_cb, nlm_testargs_cb grant_cb)
432 425 {
433 426 struct nlm_globals *g;
434 427 struct flock64 fl;
435 428 struct nlm_host *host = NULL;
436 429 struct netbuf *addr;
437 430 struct nlm_vhold *nvp = NULL;
438 431 nlm_rpc_t *rpcp = NULL;
439 432 char *netid;
440 433 char *name;
441 434 int error, flags;
442 435 bool_t do_blocking = FALSE;
443 436 bool_t do_mon_req = FALSE;
444 437 enum nlm4_stats status;
445 438
446 439 nlm_copy_netobj(&resp->cookie, &argp->cookie);
447 440
448 441 name = argp->alock.caller_name;
449 442 netid = svc_getnetid(sr->rq_xprt);
450 443 addr = svc_getrpccaller(sr->rq_xprt);
451 444
452 445 g = zone_getspecific(nlm_zone_key, curzone);
453 446 host = nlm_host_findcreate(g, name, netid, addr);
454 447 if (host == NULL) {
455 448 DTRACE_PROBE4(no__host, struct nlm_globals *, g,
456 449 char *, name, char *, netid, struct netbuf *, addr);
457 450 status = nlm4_denied_nolocks;
458 451 goto doreply;
459 452 }
460 453
461 454 DTRACE_PROBE3(start, struct nlm_globals *, g,
462 455 struct nlm_host *, host, nlm4_lockargs *, argp);
463 456
464 457 /*
465 458 * If we may need to do _msg_ call needing an RPC
466 459 * callback, get the RPC client handle now,
467 460 * so we know if we can bind to the NLM service on
468 461 * this client.
469 462 *
470 463 * Note: host object carries transport type.
471 464 * One client using multiple transports gets
472 465 * separate sysids for each of its transports.
473 466 */
474 467 if (res_cb != NULL || (grant_cb != NULL && argp->block == TRUE)) {
475 468 error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
476 469 if (error != 0) {
477 470 status = nlm4_denied_nolocks;
478 471 goto doreply;
479 472 }
480 473 }
481 474
482 475 /*
483 476 * During the "grace period", only allow reclaim.
484 477 */
485 478 if (argp->reclaim == 0 && NLM_IN_GRACE(g)) {
486 479 status = nlm4_denied_grace_period;
487 480 goto doreply;
488 481 }
489 482
490 483 /*
491 484 * Check whether we missed host shutdown event
492 485 */
493 486 if (nlm_host_get_state(host) != argp->state)
494 487 nlm_host_notify_server(host, argp->state);
495 488
496 489 /*
497 490 * Get a hold on the vnode for a lock operation.
498 491 * Only lock() and share() need vhold objects.
499 492 */
500 493 nvp = nlm_fh_to_vhold(host, &argp->alock.fh);
501 494 if (nvp == NULL) {
502 495 status = nlm4_stale_fh;
503 496 goto doreply;
504 497 }
505 498
506 499 /* Convert to local form. */
507 500 error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers,
508 501 (argp->exclusive) ? F_WRLCK : F_RDLCK);
509 502 if (error) {
510 503 status = nlm4_failed;
511 504 goto doreply;
512 505 }
513 506
514 507 /*
515 508 * Try to lock non-blocking first. If we succeed
516 509 * getting the lock, we can reply with the granted
517 510 * status directly and avoid the complications of
518 511 * making the "granted" RPC callback later.
519 512 *
520 513 * This also let's us find out now about some
521 514 * possible errors like EROFS, etc.
522 515 */
523 516 flags = F_REMOTELOCK | FREAD | FWRITE;
524 517 error = nlm_vop_frlock(nvp->nv_vp, F_SETLK, &fl, flags,
525 518 (u_offset_t)0, NULL, CRED(), NULL);
526 519
527 520 DTRACE_PROBE3(setlk__res, struct flock64 *, &fl,
528 521 int, flags, int, error);
529 522
530 523 switch (error) {
531 524 case 0:
532 525 /* Got it without waiting! */
533 526 status = nlm4_granted;
534 527 do_mon_req = TRUE;
535 528 break;
536 529
537 530 /* EINPROGRESS too? */
538 531 case EAGAIN:
539 532 /* We did not get the lock. Should we block? */
540 533 if (argp->block == FALSE || grant_cb == NULL) {
541 534 status = nlm4_denied;
542 535 break;
543 536 }
544 537 /*
545 538 * Should block. Try to reserve this thread
546 539 * so we can use it to wait for the lock and
547 540 * later send the granted message. If this
548 541 * reservation fails, say "no resources".
549 542 */
550 543 if (!svc_reserve_thread(sr->rq_xprt)) {
551 544 status = nlm4_denied_nolocks;
552 545 break;
553 546 }
554 547 /*
555 548 * OK, can detach this thread, so this call
556 549 * will block below (after we reply).
557 550 */
558 551 status = nlm4_blocked;
559 552 do_blocking = TRUE;
560 553 do_mon_req = TRUE;
561 554 break;
562 555
563 556 case ENOLCK:
564 557 /* Failed for lack of resources. */
565 558 status = nlm4_denied_nolocks;
566 559 break;
567 560
568 561 case EROFS:
569 562 /* read-only file system */
570 563 status = nlm4_rofs;
571 564 break;
572 565
573 566 case EFBIG:
574 567 /* file too big */
575 568 status = nlm4_fbig;
576 569 break;
577 570
578 571 case EDEADLK:
579 572 /* dead lock condition */
580 573 status = nlm4_deadlck;
581 574 break;
582 575
583 576 default:
584 577 status = nlm4_denied;
585 578 break;
586 579 }
587 580
588 581 doreply:
589 582 resp->stat.stat = status;
590 583
591 584 /*
592 585 * We get one of two function pointers; one for a
593 586 * normal RPC reply, and another for doing an RPC
594 587 * "callback" _res reply for a _msg function.
595 588 * Use either of those to send the reply now.
596 589 *
597 590 * If sending this reply fails, just leave the
598 591 * lock in the list for retransmitted requests.
599 592 * Cleanup is via unlock or host rele (statmon).
600 593 */
601 594 if (reply_cb != NULL) {
602 595 /* i.e. nlm_lock_1_reply */
603 596 if (!(*reply_cb)(sr->rq_xprt, resp))
604 597 svcerr_systemerr(sr->rq_xprt);
605 598 }
606 599 if (res_cb != NULL && rpcp != NULL)
607 600 NLM_INVOKE_CALLBACK("lock", rpcp, resp, res_cb);
608 601
609 602 /*
610 603 * The reply has been sent to the client.
611 604 * Start monitoring this client (maybe).
612 605 *
613 606 * Note that the non-monitored (NM) calls pass grant_cb=NULL
614 607 * indicating that the client doesn't support RPC callbacks.
615 608 * No monitoring for these (lame) clients.
616 609 */
617 610 if (do_mon_req && grant_cb != NULL)
618 611 nlm_host_monitor(g, host, argp->state);
619 612
620 613 if (do_blocking) {
621 614 /*
622 615 * We need to block on this lock, and when that
623 616 * completes, do the granted RPC call. Note that
624 617 * we "reserved" this thread above, so we can now
625 618 * "detach" it from the RPC SVC pool, allowing it
626 619 * to block indefinitely if needed.
627 620 */
628 621 ASSERT(rpcp != NULL);
629 622 (void) svc_detach_thread(sr->rq_xprt);
630 623 nlm_block(argp, host, nvp, rpcp, &fl, grant_cb);
631 624 }
632 625
633 626 DTRACE_PROBE3(lock__end, struct nlm_globals *, g,
634 627 struct nlm_host *, host, nlm4_res *, resp);
635 628
636 629 if (rpcp != NULL)
637 630 nlm_host_rele_rpc(host, rpcp);
638 631
639 632 nlm_vhold_release(host, nvp);
640 633 nlm_host_release(g, host);
641 634 }
642 635
643 636 /*
644 637 * Helper for nlm_do_lock(), partly for observability,
645 638 * (we'll see a call blocked in this function) and
646 639 * because nlm_do_lock() was getting quite long.
647 640 */
648 641 static void
649 642 nlm_block(nlm4_lockargs *lockargs,
650 643 struct nlm_host *host,
651 644 struct nlm_vhold *nvp,
652 645 nlm_rpc_t *rpcp,
653 646 struct flock64 *flp,
654 647 nlm_testargs_cb grant_cb)
655 648 {
656 649 nlm4_testargs args;
657 650 int error;
658 651 flk_callback_t flk_cb;
659 652 struct nlm_block_cb_data cb_data;
660 653
661 654 /*
662 655 * Keep a list of blocked locks on nh_pending, and use it
663 656 * to cancel these threads in nlm_destroy_client_pending.
664 657 *
665 658 * Check to see if this lock is already in the list
666 659 * and if not, add an entry for it. Allocate first,
667 660 * then if we don't insert, free the new one.
668 661 * Caller already has vp held.
669 662 */
670 663
671 664 error = nlm_slreq_register(host, nvp, flp);
672 665 if (error != 0) {
673 666 /*
674 667 * Sleeping lock request with given fl is already
675 668 * registered by someone else. This means that
676 669 * some other thread is handling the request, let
677 670 * him to do its work.
678 671 */
679 672 ASSERT(error == EEXIST);
680 673 return;
681 674 }
682 675
683 676 cb_data.hostp = host;
684 677 cb_data.nvp = nvp;
685 678 cb_data.flp = flp;
686 679 flk_init_callback(&flk_cb, nlm_block_callback, &cb_data);
687 680
688 681 /* BSD: VOP_ADVLOCK(vp, NULL, F_SETLK, fl, F_REMOTE); */
689 682 error = nlm_vop_frlock(nvp->nv_vp, F_SETLKW, flp,
690 683 F_REMOTELOCK | FREAD | FWRITE,
691 684 (u_offset_t)0, &flk_cb, CRED(), NULL);
692 685
693 686 if (error != 0) {
694 687 /*
695 688 * We failed getting the lock, but have no way to
696 689 * tell the client about that. Let 'em time out.
697 690 */
698 691 (void) nlm_slreq_unregister(host, nvp, flp);
699 692 return;
700 693 }
701 694
702 695 /*
703 696 * Do the "granted" call-back to the client.
704 697 */
705 698 args.cookie = lockargs->cookie;
706 699 args.exclusive = lockargs->exclusive;
707 700 args.alock = lockargs->alock;
708 701
709 702 NLM_INVOKE_CALLBACK("grant", rpcp, &args, grant_cb);
710 703 }
711 704
712 705 /*
713 706 * The function that is used as flk callback when NLM server
714 707 * sets new sleeping lock. The function unregisters NLM
715 708 * sleeping lock request (nlm_slreq) associated with the
716 709 * sleeping lock _before_ lock becomes active. It prevents
717 710 * potential race condition between nlm_block() and
718 711 * nlm_do_cancel().
719 712 */
720 713 static callb_cpr_t *
721 714 nlm_block_callback(flk_cb_when_t when, void *data)
722 715 {
723 716 struct nlm_block_cb_data *cb_data;
724 717
725 718 cb_data = (struct nlm_block_cb_data *)data;
726 719 if (when == FLK_AFTER_SLEEP) {
727 720 (void) nlm_slreq_unregister(cb_data->hostp,
728 721 cb_data->nvp, cb_data->flp);
729 722 }
730 723
731 724 return (0);
732 725 }
733 726
734 727 /*
735 728 * NLM_CANCEL, NLM_CANCEL_MSG,
736 729 * NLM4_CANCEL, NLM4_CANCEL_MSG,
737 730 * Client gives up waiting for a blocking lock.
738 731 */
739 732 void
740 733 nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *resp,
741 734 struct svc_req *sr, nlm_res_cb cb)
742 735 {
743 736 struct nlm_globals *g;
744 737 struct nlm_host *host;
745 738 struct netbuf *addr;
746 739 struct nlm_vhold *nvp = NULL;
747 740 nlm_rpc_t *rpcp = NULL;
748 741 char *netid;
749 742 char *name;
750 743 int error;
751 744 struct flock64 fl;
752 745
753 746 nlm_copy_netobj(&resp->cookie, &argp->cookie);
754 747 netid = svc_getnetid(sr->rq_xprt);
755 748 addr = svc_getrpccaller(sr->rq_xprt);
756 749 name = argp->alock.caller_name;
757 750
758 751 g = zone_getspecific(nlm_zone_key, curzone);
759 752 host = nlm_host_findcreate(g, name, netid, addr);
760 753 if (host == NULL) {
761 754 resp->stat.stat = nlm4_denied_nolocks;
762 755 return;
763 756 }
764 757 if (cb != NULL) {
765 758 error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
766 759 if (error != 0) {
767 760 resp->stat.stat = nlm4_denied_nolocks;
768 761 return;
769 762 }
770 763 }
771 764
772 765 DTRACE_PROBE3(start, struct nlm_globals *, g,
773 766 struct nlm_host *, host, nlm4_cancargs *, argp);
774 767
775 768 if (NLM_IN_GRACE(g)) {
776 769 resp->stat.stat = nlm4_denied_grace_period;
777 770 goto out;
778 771 }
779 772
780 773 nvp = nlm_fh_to_vhold(host, &argp->alock.fh);
781 774 if (nvp == NULL) {
782 775 resp->stat.stat = nlm4_stale_fh;
783 776 goto out;
784 777 }
785 778
786 779 /* Convert to local form. */
787 780 error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers,
788 781 (argp->exclusive) ? F_WRLCK : F_RDLCK);
789 782 if (error) {
790 783 resp->stat.stat = nlm4_failed;
791 784 goto out;
792 785 }
793 786
794 787 error = nlm_slreq_unregister(host, nvp, &fl);
795 788 if (error != 0) {
796 789 /*
797 790 * There's no sleeping lock request corresponding
798 791 * to the lock. Then requested sleeping lock
799 792 * doesn't exist.
800 793 */
801 794 resp->stat.stat = nlm4_denied;
802 795 goto out;
803 796 }
804 797
805 798 fl.l_type = F_UNLCK;
806 799 error = nlm_vop_frlock(nvp->nv_vp, F_SETLK, &fl,
807 800 F_REMOTELOCK | FREAD | FWRITE,
808 801 (u_offset_t)0, NULL, CRED(), NULL);
809 802
810 803 resp->stat.stat = (error == 0) ?
811 804 nlm4_granted : nlm4_denied;
812 805
813 806 out:
814 807 /*
815 808 * If we have a callback funtion, use that to
816 809 * deliver the response via another RPC call.
817 810 */
818 811 if (cb != NULL && rpcp != NULL)
819 812 NLM_INVOKE_CALLBACK("cancel", rpcp, resp, cb);
820 813
821 814 DTRACE_PROBE3(cancel__end, struct nlm_globals *, g,
822 815 struct nlm_host *, host, nlm4_res *, resp);
823 816
824 817 if (rpcp != NULL)
825 818 nlm_host_rele_rpc(host, rpcp);
826 819
827 820 nlm_vhold_release(host, nvp);
828 821 nlm_host_release(g, host);
829 822 }
830 823
831 824 /*
832 825 * NLM_UNLOCK, NLM_UNLOCK_MSG,
833 826 * NLM4_UNLOCK, NLM4_UNLOCK_MSG,
834 827 * Client removes one of their locks.
835 828 */
836 829 void
837 830 nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *resp,
838 831 struct svc_req *sr, nlm_res_cb cb)
839 832 {
840 833 struct nlm_globals *g;
841 834 struct nlm_host *host;
842 835 struct netbuf *addr;
843 836 nlm_rpc_t *rpcp = NULL;
844 837 vnode_t *vp = NULL;
845 838 char *netid;
846 839 char *name;
847 840 int error;
848 841 struct flock64 fl;
849 842
850 843 nlm_copy_netobj(&resp->cookie, &argp->cookie);
851 844
852 845 netid = svc_getnetid(sr->rq_xprt);
853 846 addr = svc_getrpccaller(sr->rq_xprt);
854 847 name = argp->alock.caller_name;
855 848
856 849 /*
857 850 * NLM_UNLOCK operation doesn't have an error code
858 851 * denoting that operation failed, so we always
859 852 * return nlm4_granted except when the server is
860 853 * in a grace period.
861 854 */
862 855 resp->stat.stat = nlm4_granted;
863 856
864 857 g = zone_getspecific(nlm_zone_key, curzone);
865 858 host = nlm_host_findcreate(g, name, netid, addr);
866 859 if (host == NULL)
867 860 return;
868 861
869 862 if (cb != NULL) {
870 863 error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
871 864 if (error != 0)
872 865 goto out;
873 866 }
874 867
875 868 DTRACE_PROBE3(start, struct nlm_globals *, g,
876 869 struct nlm_host *, host, nlm4_unlockargs *, argp);
877 870
878 871 if (NLM_IN_GRACE(g)) {
879 872 resp->stat.stat = nlm4_denied_grace_period;
880 873 goto out;
881 874 }
882 875
883 876 vp = nlm_fh_to_vp(&argp->alock.fh);
884 877 if (vp == NULL)
885 878 goto out;
886 879
887 880 /* Convert to local form. */
888 881 error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers, F_UNLCK);
889 882 if (error)
890 883 goto out;
891 884
892 885 /* BSD: VOP_ADVLOCK(nv->nv_vp, NULL, F_UNLCK, &fl, F_REMOTE); */
893 886 error = nlm_vop_frlock(vp, F_SETLK, &fl,
894 887 F_REMOTELOCK | FREAD | FWRITE,
895 888 (u_offset_t)0, NULL, CRED(), NULL);
896 889
897 890 DTRACE_PROBE1(unlock__res, int, error);
898 891 out:
899 892 /*
900 893 * If we have a callback funtion, use that to
901 894 * deliver the response via another RPC call.
902 895 */
903 896 if (cb != NULL && rpcp != NULL)
904 897 NLM_INVOKE_CALLBACK("unlock", rpcp, resp, cb);
905 898
906 899 DTRACE_PROBE3(unlock__end, struct nlm_globals *, g,
907 900 struct nlm_host *, host, nlm4_res *, resp);
908 901
909 902 if (vp != NULL)
910 903 VN_RELE(vp);
911 904 if (rpcp != NULL)
912 905 nlm_host_rele_rpc(host, rpcp);
913 906
914 907 nlm_host_release(g, host);
915 908 }
916 909
917 910 /*
918 911 * NLM_GRANTED, NLM_GRANTED_MSG,
919 912 * NLM4_GRANTED, NLM4_GRANTED_MSG,
920 913 *
921 914 * This service routine is special. It's the only one that's
922 915 * really part of our NLM _client_ support, used by _servers_
923 916 * to "call back" when a blocking lock from this NLM client
924 917 * is granted by the server. In this case, we _know_ there is
925 918 * already an nlm_host allocated and held by the client code.
926 919 * We want to find that nlm_host here.
927 920 *
928 921 * Over in nlm_call_lock(), the client encoded the sysid for this
929 922 * server in the "owner handle" netbuf sent with our lock request.
930 923 * We can now use that to find the nlm_host object we used there.
931 924 * (NB: The owner handle is opaque to the server.)
932 925 */
933 926 void
934 927 nlm_do_granted(nlm4_testargs *argp, nlm4_res *resp,
935 928 struct svc_req *sr, nlm_res_cb cb)
936 929 {
937 930 struct nlm_globals *g;
938 931 struct nlm_owner_handle *oh;
939 932 struct nlm_host *host;
940 933 nlm_rpc_t *rpcp = NULL;
941 934 int error;
942 935
943 936 nlm_copy_netobj(&resp->cookie, &argp->cookie);
944 937 resp->stat.stat = nlm4_denied;
945 938
946 939 g = zone_getspecific(nlm_zone_key, curzone);
947 940 oh = (void *) argp->alock.oh.n_bytes;
948 941 if (oh == NULL)
949 942 return;
950 943
951 944 host = nlm_host_find_by_sysid(g, oh->oh_sysid);
952 945 if (host == NULL)
953 946 return;
954 947
955 948 if (cb != NULL) {
956 949 error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
957 950 if (error != 0)
958 951 goto out;
959 952 }
960 953
961 954 if (NLM_IN_GRACE(g)) {
962 955 resp->stat.stat = nlm4_denied_grace_period;
963 956 goto out;
964 957 }
965 958
966 959 error = nlm_slock_grant(g, host, &argp->alock);
967 960 if (error == 0)
968 961 resp->stat.stat = nlm4_granted;
969 962
970 963 out:
971 964 /*
972 965 * If we have a callback funtion, use that to
973 966 * deliver the response via another RPC call.
974 967 */
975 968 if (cb != NULL && rpcp != NULL)
976 969 NLM_INVOKE_CALLBACK("do_granted", rpcp, resp, cb);
977 970
978 971 if (rpcp != NULL)
979 972 nlm_host_rele_rpc(host, rpcp);
980 973
981 974 nlm_host_release(g, host);
982 975 }
983 976
984 977 /*
985 978 * NLM_FREE_ALL, NLM4_FREE_ALL
986 979 *
987 980 * Destroy all lock state for the calling client.
988 981 */
989 982 void
990 983 nlm_do_free_all(nlm4_notify *argp, void *res, struct svc_req *sr)
991 984 {
992 985 struct nlm_globals *g;
993 986 struct nlm_host_list host_list;
994 987 struct nlm_host *hostp;
995 988
996 989 TAILQ_INIT(&host_list);
997 990 g = zone_getspecific(nlm_zone_key, curzone);
998 991
999 992 /* Serialize calls to clean locks. */
1000 993 mutex_enter(&g->clean_lock);
1001 994
1002 995 /*
1003 996 * Find all hosts that have the given node name and put them on a
1004 997 * local list.
1005 998 */
1006 999 mutex_enter(&g->lock);
1007 1000 for (hostp = avl_first(&g->nlm_hosts_tree); hostp != NULL;
1008 1001 hostp = AVL_NEXT(&g->nlm_hosts_tree, hostp)) {
1009 1002 if (strcasecmp(hostp->nh_name, argp->name) == 0) {
1010 1003 /*
1011 1004 * If needed take the host out of the idle list since
1012 1005 * we are taking a reference.
1013 1006 */
1014 1007 if (hostp->nh_flags & NLM_NH_INIDLE) {
1015 1008 TAILQ_REMOVE(&g->nlm_idle_hosts, hostp, nh_link);
1016 1009 hostp->nh_flags &= ~NLM_NH_INIDLE;
1017 1010 }
1018 1011 hostp->nh_refs++;
1019 1012
1020 1013 TAILQ_INSERT_TAIL(&host_list, hostp, nh_link);
1021 1014 }
1022 1015 }
1023 1016 mutex_exit(&g->lock);
1024 1017
1025 1018 /* Free locks for all hosts on the local list. */
1026 1019 while (!TAILQ_EMPTY(&host_list)) {
1027 1020 hostp = TAILQ_FIRST(&host_list);
1028 1021 TAILQ_REMOVE(&host_list, hostp, nh_link);
1029 1022
1030 1023 /*
1031 1024 * Note that this does not do client-side cleanup.
1032 1025 * We want to do that ONLY if statd tells us the
1033 1026 * server has restarted.
1034 1027 */
1035 1028 nlm_host_notify_server(hostp, argp->state);
1036 1029 nlm_host_release(g, hostp);
1037 1030 }
1038 1031
1039 1032 mutex_exit(&g->clean_lock);
1040 1033
1041 1034 (void) res;
1042 1035 (void) sr;
1043 1036 }
1044 1037
1045 1038 static void
1046 1039 nlm_init_shrlock(struct shrlock *shr,
1047 1040 nlm4_share *nshare, struct nlm_host *host)
1048 1041 {
1049 1042
1050 1043 switch (nshare->access) {
1051 1044 default:
1052 1045 case fsa_NONE:
1053 1046 shr->s_access = 0;
1054 1047 break;
1055 1048 case fsa_R:
1056 1049 shr->s_access = F_RDACC;
1057 1050 break;
1058 1051 case fsa_W:
1059 1052 shr->s_access = F_WRACC;
1060 1053 break;
1061 1054 case fsa_RW:
1062 1055 shr->s_access = F_RWACC;
1063 1056 break;
1064 1057 }
1065 1058
1066 1059 switch (nshare->mode) {
1067 1060 default:
1068 1061 case fsm_DN:
1069 1062 shr->s_deny = F_NODNY;
1070 1063 break;
1071 1064 case fsm_DR:
1072 1065 shr->s_deny = F_RDDNY;
1073 1066 break;
1074 1067 case fsm_DW:
1075 1068 shr->s_deny = F_WRDNY;
1076 1069 break;
1077 1070 case fsm_DRW:
1078 1071 shr->s_deny = F_RWDNY;
1079 1072 break;
1080 1073 }
1081 1074
1082 1075 shr->s_sysid = host->nh_sysid;
1083 1076 shr->s_pid = 0;
1084 1077 shr->s_own_len = nshare->oh.n_len;
1085 1078 shr->s_owner = nshare->oh.n_bytes;
1086 1079 }
1087 1080
1088 1081 /*
1089 1082 * NLM_SHARE, NLM4_SHARE
1090 1083 *
1091 1084 * Request a DOS-style share reservation
1092 1085 */
1093 1086 void
1094 1087 nlm_do_share(nlm4_shareargs *argp, nlm4_shareres *resp, struct svc_req *sr)
1095 1088 {
1096 1089 struct nlm_globals *g;
1097 1090 struct nlm_host *host;
1098 1091 struct netbuf *addr;
1099 1092 struct nlm_vhold *nvp = NULL;
1100 1093 char *netid;
1101 1094 char *name;
1102 1095 int error;
1103 1096 struct shrlock shr;
1104 1097
1105 1098 nlm_copy_netobj(&resp->cookie, &argp->cookie);
1106 1099
1107 1100 name = argp->share.caller_name;
1108 1101 netid = svc_getnetid(sr->rq_xprt);
1109 1102 addr = svc_getrpccaller(sr->rq_xprt);
1110 1103
1111 1104 g = zone_getspecific(nlm_zone_key, curzone);
1112 1105 host = nlm_host_findcreate(g, name, netid, addr);
1113 1106 if (host == NULL) {
1114 1107 resp->stat = nlm4_denied_nolocks;
1115 1108 return;
1116 1109 }
1117 1110
1118 1111 DTRACE_PROBE3(share__start, struct nlm_globals *, g,
1119 1112 struct nlm_host *, host, nlm4_shareargs *, argp);
1120 1113
1121 1114 if (argp->reclaim == 0 && NLM_IN_GRACE(g)) {
1122 1115 resp->stat = nlm4_denied_grace_period;
1123 1116 goto out;
1124 1117 }
1125 1118
1126 1119 /*
1127 1120 * Get holded vnode when on lock operation.
1128 1121 * Only lock() and share() need vhold objects.
1129 1122 */
1130 1123 nvp = nlm_fh_to_vhold(host, &argp->share.fh);
1131 1124 if (nvp == NULL) {
1132 1125 resp->stat = nlm4_stale_fh;
1133 1126 goto out;
1134 1127 }
1135 1128
1136 1129 /* Convert to local form. */
1137 1130 nlm_init_shrlock(&shr, &argp->share, host);
1138 1131 error = VOP_SHRLOCK(nvp->nv_vp, F_SHARE, &shr,
1139 1132 FREAD | FWRITE, CRED(), NULL);
1140 1133
1141 1134 if (error == 0) {
1142 1135 resp->stat = nlm4_granted;
1143 1136 nlm_host_monitor(g, host, 0);
1144 1137 } else {
1145 1138 resp->stat = nlm4_denied;
1146 1139 }
1147 1140
1148 1141 out:
1149 1142 DTRACE_PROBE3(share__end, struct nlm_globals *, g,
1150 1143 struct nlm_host *, host, nlm4_shareres *, resp);
1151 1144
1152 1145 nlm_vhold_release(host, nvp);
1153 1146 nlm_host_release(g, host);
1154 1147 }
1155 1148
1156 1149 /*
1157 1150 * NLM_UNSHARE, NLM4_UNSHARE
1158 1151 *
1159 1152 * Release a DOS-style share reservation
1160 1153 */
1161 1154 void
1162 1155 nlm_do_unshare(nlm4_shareargs *argp, nlm4_shareres *resp, struct svc_req *sr)
1163 1156 {
1164 1157 struct nlm_globals *g;
1165 1158 struct nlm_host *host;
1166 1159 struct netbuf *addr;
1167 1160 vnode_t *vp = NULL;
1168 1161 char *netid;
1169 1162 int error;
1170 1163 struct shrlock shr;
1171 1164
1172 1165 nlm_copy_netobj(&resp->cookie, &argp->cookie);
1173 1166
1174 1167 netid = svc_getnetid(sr->rq_xprt);
1175 1168 addr = svc_getrpccaller(sr->rq_xprt);
1176 1169
1177 1170 g = zone_getspecific(nlm_zone_key, curzone);
1178 1171 host = nlm_host_find(g, netid, addr);
1179 1172 if (host == NULL) {
1180 1173 resp->stat = nlm4_denied_nolocks;
1181 1174 return;
1182 1175 }
1183 1176
1184 1177 DTRACE_PROBE3(unshare__start, struct nlm_globals *, g,
1185 1178 struct nlm_host *, host, nlm4_shareargs *, argp);
1186 1179
1187 1180 if (NLM_IN_GRACE(g)) {
1188 1181 resp->stat = nlm4_denied_grace_period;
1189 1182 goto out;
1190 1183 }
1191 1184
1192 1185 vp = nlm_fh_to_vp(&argp->share.fh);
1193 1186 if (vp == NULL) {
1194 1187 resp->stat = nlm4_stale_fh;
1195 1188 goto out;
1196 1189 }
1197 1190
1198 1191 /* Convert to local form. */
1199 1192 nlm_init_shrlock(&shr, &argp->share, host);
1200 1193 error = VOP_SHRLOCK(vp, F_UNSHARE, &shr,
1201 1194 FREAD | FWRITE, CRED(), NULL);
1202 1195
1203 1196 (void) error;
1204 1197 resp->stat = nlm4_granted;
1205 1198
1206 1199 out:
1207 1200 DTRACE_PROBE3(unshare__end, struct nlm_globals *, g,
1208 1201 struct nlm_host *, host, nlm4_shareres *, resp);
1209 1202
1210 1203 if (vp != NULL)
1211 1204 VN_RELE(vp);
1212 1205
1213 1206 nlm_host_release(g, host);
1214 1207 }
1215 1208
1216 1209 /*
1217 1210 * NLM wrapper to VOP_FRLOCK that checks the validity of the lock before
1218 1211 * invoking the vnode operation.
1219 1212 */
1220 1213 static int
1221 1214 nlm_vop_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
1222 1215 struct flk_callback *flk_cbp, cred_t *cr, caller_context_t *ct)
1223 1216 {
1224 1217 if (bfp->l_len != 0 && bfp->l_start + (bfp->l_len - 1)
1225 1218 < bfp->l_start) {
1226 1219 return (EOVERFLOW);
1227 1220 }
1228 1221
1229 1222 return (VOP_FRLOCK(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
1230 1223 }
↓ open down ↓ |
1079 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX