Print this page
XXXX adding PID information to netstat output
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/syscall/fcntl.c
+++ new/usr/src/uts/common/syscall/fcntl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * Portions of this source code were derived from Berkeley 4.3 BSD
32 32 * under license from the Regents of the University of California.
33 33 */
34 34
35 35
36 36 #include <sys/param.h>
37 37 #include <sys/isa_defs.h>
38 38 #include <sys/types.h>
39 39 #include <sys/sysmacros.h>
40 40 #include <sys/systm.h>
41 41 #include <sys/errno.h>
42 42 #include <sys/fcntl.h>
43 43 #include <sys/flock.h>
44 44 #include <sys/vnode.h>
45 45 #include <sys/file.h>
46 46 #include <sys/mode.h>
47 47 #include <sys/proc.h>
48 48 #include <sys/filio.h>
49 49 #include <sys/share.h>
50 50 #include <sys/debug.h>
51 51 #include <sys/rctl.h>
52 52 #include <sys/nbmlock.h>
53 53
54 54 #include <sys/cmn_err.h>
55 55
56 56 static int flock_check(vnode_t *, flock64_t *, offset_t, offset_t);
57 57 static int flock_get_start(vnode_t *, flock64_t *, offset_t, u_offset_t *);
58 58 static void fd_too_big(proc_t *);
59 59
60 60 /*
61 61 * File control.
62 62 */
63 63 int
64 64 fcntl(int fdes, int cmd, intptr_t arg)
65 65 {
66 66 int iarg;
67 67 int error = 0;
68 68 int retval;
69 69 proc_t *p;
70 70 file_t *fp;
71 71 vnode_t *vp;
72 72 u_offset_t offset;
73 73 u_offset_t start;
74 74 struct vattr vattr;
75 75 int in_crit;
76 76 int flag;
77 77 struct flock sbf;
78 78 struct flock64 bf;
79 79 struct o_flock obf;
80 80 struct flock64_32 bf64_32;
81 81 struct fshare fsh;
82 82 struct shrlock shr;
83 83 struct shr_locowner shr_own;
84 84 offset_t maxoffset;
85 85 model_t datamodel;
86 86 int fdres;
87 87
88 88 #if defined(_ILP32) && !defined(lint) && defined(_SYSCALL32)
89 89 ASSERT(sizeof (struct flock) == sizeof (struct flock32));
90 90 ASSERT(sizeof (struct flock64) == sizeof (struct flock64_32));
91 91 #endif
92 92 #if defined(_LP64) && !defined(lint) && defined(_SYSCALL32)
93 93 ASSERT(sizeof (struct flock) == sizeof (struct flock64_64));
94 94 ASSERT(sizeof (struct flock64) == sizeof (struct flock64_64));
95 95 #endif
96 96
97 97 /*
98 98 * First, for speed, deal with the subset of cases
99 99 * that do not require getf() / releasef().
100 100 */
101 101 switch (cmd) {
102 102 case F_GETFD:
103 103 if ((error = f_getfd_error(fdes, &flag)) == 0)
104 104 retval = flag;
105 105 goto out;
106 106
107 107 case F_SETFD:
108 108 error = f_setfd_error(fdes, (int)arg);
109 109 retval = 0;
110 110 goto out;
111 111
112 112 case F_GETFL:
113 113 if ((error = f_getfl(fdes, &flag)) == 0) {
114 114 retval = (flag & (FMASK | FASYNC));
115 115 if ((flag & (FSEARCH | FEXEC)) == 0)
116 116 retval += FOPEN;
117 117 else
118 118 retval |= (flag & (FSEARCH | FEXEC));
119 119 }
120 120 goto out;
121 121
122 122 case F_GETXFL:
123 123 if ((error = f_getfl(fdes, &flag)) == 0) {
124 124 retval = flag;
125 125 if ((flag & (FSEARCH | FEXEC)) == 0)
126 126 retval += FOPEN;
127 127 }
128 128 goto out;
129 129
130 130 case F_BADFD:
131 131 if ((error = f_badfd(fdes, &fdres, (int)arg)) == 0)
132 132 retval = fdres;
133 133 goto out;
134 134 }
135 135
136 136 /*
137 137 * Second, for speed, deal with the subset of cases that
138 138 * require getf() / releasef() but do not require copyin.
139 139 */
140 140 if ((fp = getf(fdes)) == NULL) {
141 141 error = EBADF;
142 142 goto out;
143 143 }
144 144 iarg = (int)arg;
145 145
146 146 switch (cmd) {
147 147 case F_DUPFD:
148 148 case F_DUPFD_CLOEXEC:
149 149 p = curproc;
150 150 if ((uint_t)iarg >= p->p_fno_ctl) {
151 151 if (iarg >= 0)
152 152 fd_too_big(p);
153 153 error = EINVAL;
154 154 goto done;
155 155 }
156 156 /*
157 157 * We need to increment the f_count reference counter
158 158 * before allocating a new file descriptor.
159 159 * Doing it other way round opens a window for race condition
160 160 * with closeandsetf() on the target file descriptor which can
161 161 * close the file still referenced by the original
162 162 * file descriptor.
163 163 */
164 164 mutex_enter(&fp->f_tlock);
165 165 fp->f_count++;
166 166 mutex_exit(&fp->f_tlock);
167 167 if ((retval = ufalloc_file(iarg, fp)) == -1) {
168 168 /*
169 169 * New file descriptor can't be allocated.
170 170 * Revert the reference count.
↓ open down ↓ |
170 lines elided |
↑ open up ↑ |
171 171 */
172 172 mutex_enter(&fp->f_tlock);
173 173 fp->f_count--;
174 174 mutex_exit(&fp->f_tlock);
175 175 error = EMFILE;
176 176 } else {
177 177 if (cmd == F_DUPFD_CLOEXEC) {
178 178 f_setfd(retval, FD_CLOEXEC);
179 179 }
180 180 }
181 +
182 + if (error == 0 && fp->f_vnode != NULL) {
183 + (void) VOP_IOCTL(fp->f_vnode, F_ASSOCI_PID,
184 + (intptr_t)p->p_pidp->pid_id, FKIOCTL, kcred,
185 + NULL, NULL);
186 + }
187 +
181 188 goto done;
182 189
183 190 case F_DUP2FD_CLOEXEC:
184 191 if (fdes == iarg) {
185 192 error = EINVAL;
186 193 goto done;
187 194 }
188 195
189 196 /*FALLTHROUGH*/
190 197
191 198 case F_DUP2FD:
192 199 p = curproc;
193 200 if (fdes == iarg) {
194 201 retval = iarg;
195 202 } else if ((uint_t)iarg >= p->p_fno_ctl) {
196 203 if (iarg >= 0)
197 204 fd_too_big(p);
198 205 error = EBADF;
199 206 } else {
200 207 /*
201 208 * We can't hold our getf(fdes) across the call to
202 209 * closeandsetf() because it creates a window for
203 210 * deadlock: if one thread is doing dup2(a, b) while
204 211 * another is doing dup2(b, a), each one will block
205 212 * waiting for the other to call releasef(). The
206 213 * solution is to increment the file reference count
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
207 214 * (which we have to do anyway), then releasef(fdes),
208 215 * then closeandsetf(). Incrementing f_count ensures
209 216 * that fp won't disappear after we call releasef().
210 217 * When closeandsetf() fails, we try avoid calling
211 218 * closef() because of all the side effects.
212 219 */
213 220 mutex_enter(&fp->f_tlock);
214 221 fp->f_count++;
215 222 mutex_exit(&fp->f_tlock);
216 223 releasef(fdes);
224 +
225 + /* assume we have forked successfully */
226 + if (fp->f_vnode != NULL) {
227 + (void) VOP_IOCTL(fp->f_vnode, F_ASSOCI_PID,
228 + (intptr_t)p->p_pidp->pid_id, FKIOCTL,
229 + kcred, NULL, NULL);
230 + }
231 +
217 232 if ((error = closeandsetf(iarg, fp)) == 0) {
218 233 if (cmd == F_DUP2FD_CLOEXEC) {
219 234 f_setfd(iarg, FD_CLOEXEC);
220 235 }
221 236 retval = iarg;
222 237 } else {
223 238 mutex_enter(&fp->f_tlock);
224 239 if (fp->f_count > 1) {
225 240 fp->f_count--;
226 241 mutex_exit(&fp->f_tlock);
242 + if (fp->f_vnode != NULL) {
243 + (void) VOP_IOCTL(fp->f_vnode,
244 + F_DASSOC_PID,
245 + (intptr_t)p->p_pidp->pid_id,
246 + FKIOCTL, kcred, NULL, NULL);
247 + }
248 +
227 249 } else {
228 250 mutex_exit(&fp->f_tlock);
229 251 (void) closef(fp);
230 252 }
231 253 }
232 254 goto out;
233 255 }
234 256 goto done;
235 257
236 258 case F_SETFL:
237 259 vp = fp->f_vnode;
238 260 flag = fp->f_flag;
239 261 if ((iarg & (FNONBLOCK|FNDELAY)) == (FNONBLOCK|FNDELAY))
240 262 iarg &= ~FNDELAY;
241 263 if ((error = VOP_SETFL(vp, flag, iarg, fp->f_cred, NULL)) ==
242 264 0) {
243 265 iarg &= FMASK;
244 266 mutex_enter(&fp->f_tlock);
245 267 fp->f_flag &= ~FMASK | (FREAD|FWRITE);
246 268 fp->f_flag |= (iarg - FOPEN) & ~(FREAD|FWRITE);
247 269 mutex_exit(&fp->f_tlock);
248 270 }
249 271 retval = 0;
250 272 goto done;
251 273 }
252 274
253 275 /*
254 276 * Finally, deal with the expensive cases.
255 277 */
256 278 retval = 0;
257 279 in_crit = 0;
258 280 maxoffset = MAXOFF_T;
259 281 datamodel = DATAMODEL_NATIVE;
260 282 #if defined(_SYSCALL32_IMPL)
261 283 if ((datamodel = get_udatamodel()) == DATAMODEL_ILP32)
262 284 maxoffset = MAXOFF32_T;
263 285 #endif
264 286
265 287 vp = fp->f_vnode;
266 288 flag = fp->f_flag;
267 289 offset = fp->f_offset;
268 290
269 291 switch (cmd) {
270 292 /*
271 293 * The file system and vnode layers understand and implement
272 294 * locking with flock64 structures. So here once we pass through
273 295 * the test for compatibility as defined by LFS API, (for F_SETLK,
274 296 * F_SETLKW, F_GETLK, F_GETLKW, F_FREESP) we transform
275 297 * the flock structure to a flock64 structure and send it to the
276 298 * lower layers. Similarly in case of GETLK the returned flock64
277 299 * structure is transformed to a flock structure if everything fits
278 300 * in nicely, otherwise we return EOVERFLOW.
279 301 */
280 302
281 303 case F_GETLK:
282 304 case F_O_GETLK:
283 305 case F_SETLK:
284 306 case F_SETLKW:
285 307 case F_SETLK_NBMAND:
286 308
287 309 /*
288 310 * Copy in input fields only.
289 311 */
290 312
291 313 if (cmd == F_O_GETLK) {
292 314 if (datamodel != DATAMODEL_ILP32) {
293 315 error = EINVAL;
294 316 break;
295 317 }
296 318
297 319 if (copyin((void *)arg, &obf, sizeof (obf))) {
298 320 error = EFAULT;
299 321 break;
300 322 }
301 323 bf.l_type = obf.l_type;
302 324 bf.l_whence = obf.l_whence;
303 325 bf.l_start = (off64_t)obf.l_start;
304 326 bf.l_len = (off64_t)obf.l_len;
305 327 bf.l_sysid = (int)obf.l_sysid;
306 328 bf.l_pid = obf.l_pid;
307 329 } else if (datamodel == DATAMODEL_NATIVE) {
308 330 if (copyin((void *)arg, &sbf, sizeof (sbf))) {
309 331 error = EFAULT;
310 332 break;
311 333 }
312 334 /*
313 335 * XXX In an LP64 kernel with an LP64 application
314 336 * there's no need to do a structure copy here
315 337 * struct flock == struct flock64. However,
316 338 * we did it this way to avoid more conditional
317 339 * compilation.
318 340 */
319 341 bf.l_type = sbf.l_type;
320 342 bf.l_whence = sbf.l_whence;
321 343 bf.l_start = (off64_t)sbf.l_start;
322 344 bf.l_len = (off64_t)sbf.l_len;
323 345 bf.l_sysid = sbf.l_sysid;
324 346 bf.l_pid = sbf.l_pid;
325 347 }
326 348 #if defined(_SYSCALL32_IMPL)
327 349 else {
328 350 struct flock32 sbf32;
329 351 if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
330 352 error = EFAULT;
331 353 break;
332 354 }
333 355 bf.l_type = sbf32.l_type;
334 356 bf.l_whence = sbf32.l_whence;
335 357 bf.l_start = (off64_t)sbf32.l_start;
336 358 bf.l_len = (off64_t)sbf32.l_len;
337 359 bf.l_sysid = sbf32.l_sysid;
338 360 bf.l_pid = sbf32.l_pid;
339 361 }
340 362 #endif /* _SYSCALL32_IMPL */
341 363
342 364 /*
343 365 * 64-bit support: check for overflow for 32-bit lock ops
344 366 */
345 367 if ((error = flock_check(vp, &bf, offset, maxoffset)) != 0)
346 368 break;
347 369
348 370 /*
349 371 * Not all of the filesystems understand F_O_GETLK, and
350 372 * there's no need for them to know. Map it to F_GETLK.
351 373 */
352 374 if ((error = VOP_FRLOCK(vp, (cmd == F_O_GETLK) ? F_GETLK : cmd,
353 375 &bf, flag, offset, NULL, fp->f_cred, NULL)) != 0)
354 376 break;
355 377
356 378 /*
357 379 * If command is GETLK and no lock is found, only
358 380 * the type field is changed.
359 381 */
360 382 if ((cmd == F_O_GETLK || cmd == F_GETLK) &&
361 383 bf.l_type == F_UNLCK) {
362 384 /* l_type always first entry, always a short */
363 385 if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
364 386 sizeof (bf.l_type)))
365 387 error = EFAULT;
366 388 break;
367 389 }
368 390
369 391 if (cmd == F_O_GETLK) {
370 392 /*
371 393 * Return an SVR3 flock structure to the user.
372 394 */
373 395 obf.l_type = (int16_t)bf.l_type;
374 396 obf.l_whence = (int16_t)bf.l_whence;
375 397 obf.l_start = (int32_t)bf.l_start;
376 398 obf.l_len = (int32_t)bf.l_len;
377 399 if (bf.l_sysid > SHRT_MAX || bf.l_pid > SHRT_MAX) {
378 400 /*
379 401 * One or both values for the above fields
380 402 * is too large to store in an SVR3 flock
381 403 * structure.
382 404 */
383 405 error = EOVERFLOW;
384 406 break;
385 407 }
386 408 obf.l_sysid = (int16_t)bf.l_sysid;
387 409 obf.l_pid = (int16_t)bf.l_pid;
388 410 if (copyout(&obf, (void *)arg, sizeof (obf)))
389 411 error = EFAULT;
390 412 } else if (cmd == F_GETLK) {
391 413 /*
392 414 * Copy out SVR4 flock.
393 415 */
394 416 int i;
395 417
396 418 if (bf.l_start > maxoffset || bf.l_len > maxoffset) {
397 419 error = EOVERFLOW;
398 420 break;
399 421 }
400 422
401 423 if (datamodel == DATAMODEL_NATIVE) {
402 424 for (i = 0; i < 4; i++)
403 425 sbf.l_pad[i] = 0;
404 426 /*
405 427 * XXX In an LP64 kernel with an LP64
406 428 * application there's no need to do a
407 429 * structure copy here as currently
408 430 * struct flock == struct flock64.
409 431 * We did it this way to avoid more
410 432 * conditional compilation.
411 433 */
412 434 sbf.l_type = bf.l_type;
413 435 sbf.l_whence = bf.l_whence;
414 436 sbf.l_start = (off_t)bf.l_start;
415 437 sbf.l_len = (off_t)bf.l_len;
416 438 sbf.l_sysid = bf.l_sysid;
417 439 sbf.l_pid = bf.l_pid;
418 440 if (copyout(&sbf, (void *)arg, sizeof (sbf)))
419 441 error = EFAULT;
420 442 }
421 443 #if defined(_SYSCALL32_IMPL)
422 444 else {
423 445 struct flock32 sbf32;
424 446 if (bf.l_start > MAXOFF32_T ||
425 447 bf.l_len > MAXOFF32_T) {
426 448 error = EOVERFLOW;
427 449 break;
428 450 }
429 451 for (i = 0; i < 4; i++)
430 452 sbf32.l_pad[i] = 0;
431 453 sbf32.l_type = (int16_t)bf.l_type;
432 454 sbf32.l_whence = (int16_t)bf.l_whence;
433 455 sbf32.l_start = (off32_t)bf.l_start;
434 456 sbf32.l_len = (off32_t)bf.l_len;
435 457 sbf32.l_sysid = (int32_t)bf.l_sysid;
436 458 sbf32.l_pid = (pid32_t)bf.l_pid;
437 459 if (copyout(&sbf32,
438 460 (void *)arg, sizeof (sbf32)))
439 461 error = EFAULT;
440 462 }
441 463 #endif
442 464 }
443 465 break;
444 466
445 467 case F_CHKFL:
446 468 /*
447 469 * This is for internal use only, to allow the vnode layer
448 470 * to validate a flags setting before applying it. User
449 471 * programs can't issue it.
450 472 */
451 473 error = EINVAL;
452 474 break;
453 475
454 476 case F_ALLOCSP:
455 477 case F_FREESP:
456 478 case F_ALLOCSP64:
457 479 case F_FREESP64:
458 480 /*
459 481 * Test for not-a-regular-file (and returning EINVAL)
460 482 * before testing for open-for-writing (and returning EBADF).
461 483 * This is relied upon by posix_fallocate() in libc.
462 484 */
463 485 if (vp->v_type != VREG) {
464 486 error = EINVAL;
465 487 break;
466 488 }
467 489
468 490 if ((flag & FWRITE) == 0) {
469 491 error = EBADF;
470 492 break;
471 493 }
472 494
473 495 if (datamodel != DATAMODEL_ILP32 &&
474 496 (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) {
475 497 error = EINVAL;
476 498 break;
477 499 }
478 500
479 501 #if defined(_ILP32) || defined(_SYSCALL32_IMPL)
480 502 if (datamodel == DATAMODEL_ILP32 &&
481 503 (cmd == F_ALLOCSP || cmd == F_FREESP)) {
482 504 struct flock32 sbf32;
483 505 /*
484 506 * For compatibility we overlay an SVR3 flock on an SVR4
485 507 * flock. This works because the input field offsets
486 508 * in "struct flock" were preserved.
487 509 */
488 510 if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
489 511 error = EFAULT;
490 512 break;
491 513 } else {
492 514 bf.l_type = sbf32.l_type;
493 515 bf.l_whence = sbf32.l_whence;
494 516 bf.l_start = (off64_t)sbf32.l_start;
495 517 bf.l_len = (off64_t)sbf32.l_len;
496 518 bf.l_sysid = sbf32.l_sysid;
497 519 bf.l_pid = sbf32.l_pid;
498 520 }
499 521 }
500 522 #endif /* _ILP32 || _SYSCALL32_IMPL */
501 523
502 524 #if defined(_LP64)
503 525 if (datamodel == DATAMODEL_LP64 &&
504 526 (cmd == F_ALLOCSP || cmd == F_FREESP)) {
505 527 if (copyin((void *)arg, &bf, sizeof (bf))) {
506 528 error = EFAULT;
507 529 break;
508 530 }
509 531 }
510 532 #endif /* defined(_LP64) */
511 533
512 534 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
513 535 if (datamodel == DATAMODEL_ILP32 &&
514 536 (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) {
515 537 if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
516 538 error = EFAULT;
517 539 break;
518 540 } else {
519 541 /*
520 542 * Note that the size of flock64 is different in
521 543 * the ILP32 and LP64 models, due to the l_pad
522 544 * field. We do not want to assume that the
523 545 * flock64 structure is laid out the same in
524 546 * ILP32 and LP64 environments, so we will
525 547 * copy in the ILP32 version of flock64
526 548 * explicitly and copy it to the native
527 549 * flock64 structure.
528 550 */
529 551 bf.l_type = (short)bf64_32.l_type;
530 552 bf.l_whence = (short)bf64_32.l_whence;
531 553 bf.l_start = bf64_32.l_start;
532 554 bf.l_len = bf64_32.l_len;
533 555 bf.l_sysid = (int)bf64_32.l_sysid;
534 556 bf.l_pid = (pid_t)bf64_32.l_pid;
535 557 }
536 558 }
537 559 #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */
538 560
539 561 if (cmd == F_ALLOCSP || cmd == F_FREESP)
540 562 error = flock_check(vp, &bf, offset, maxoffset);
541 563 else if (cmd == F_ALLOCSP64 || cmd == F_FREESP64)
542 564 error = flock_check(vp, &bf, offset, MAXOFFSET_T);
543 565 if (error)
544 566 break;
545 567
546 568 if (vp->v_type == VREG && bf.l_len == 0 &&
547 569 bf.l_start > OFFSET_MAX(fp)) {
548 570 error = EFBIG;
549 571 break;
550 572 }
551 573
552 574 /*
553 575 * Make sure that there are no conflicting non-blocking
554 576 * mandatory locks in the region being manipulated. If
555 577 * there are such locks then return EACCES.
556 578 */
557 579 if ((error = flock_get_start(vp, &bf, offset, &start)) != 0)
558 580 break;
559 581
560 582 if (nbl_need_check(vp)) {
561 583 u_offset_t begin;
562 584 ssize_t length;
563 585
564 586 nbl_start_crit(vp, RW_READER);
565 587 in_crit = 1;
566 588 vattr.va_mask = AT_SIZE;
567 589 if ((error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
568 590 != 0)
569 591 break;
570 592 begin = start > vattr.va_size ? vattr.va_size : start;
571 593 length = vattr.va_size > start ? vattr.va_size - start :
572 594 start - vattr.va_size;
573 595 if (nbl_conflict(vp, NBL_WRITE, begin, length, 0,
574 596 NULL)) {
575 597 error = EACCES;
576 598 break;
577 599 }
578 600 }
579 601
580 602 if (cmd == F_ALLOCSP64)
581 603 cmd = F_ALLOCSP;
582 604 else if (cmd == F_FREESP64)
583 605 cmd = F_FREESP;
584 606
585 607 error = VOP_SPACE(vp, cmd, &bf, flag, offset, fp->f_cred, NULL);
586 608
587 609 break;
588 610
589 611 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
590 612 case F_GETLK64:
591 613 case F_SETLK64:
592 614 case F_SETLKW64:
593 615 case F_SETLK64_NBMAND:
594 616 /*
595 617 * Large Files: Here we set cmd as *LK and send it to
596 618 * lower layers. *LK64 is only for the user land.
597 619 * Most of the comments described above for F_SETLK
598 620 * applies here too.
599 621 * Large File support is only needed for ILP32 apps!
600 622 */
601 623 if (datamodel != DATAMODEL_ILP32) {
602 624 error = EINVAL;
603 625 break;
604 626 }
605 627
606 628 if (cmd == F_GETLK64)
607 629 cmd = F_GETLK;
608 630 else if (cmd == F_SETLK64)
609 631 cmd = F_SETLK;
610 632 else if (cmd == F_SETLKW64)
611 633 cmd = F_SETLKW;
612 634 else if (cmd == F_SETLK64_NBMAND)
613 635 cmd = F_SETLK_NBMAND;
614 636
615 637 /*
616 638 * Note that the size of flock64 is different in the ILP32
617 639 * and LP64 models, due to the sucking l_pad field.
618 640 * We do not want to assume that the flock64 structure is
619 641 * laid out in the same in ILP32 and LP64 environments, so
620 642 * we will copy in the ILP32 version of flock64 explicitly
621 643 * and copy it to the native flock64 structure.
622 644 */
623 645
624 646 if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
625 647 error = EFAULT;
626 648 break;
627 649 }
628 650
629 651 bf.l_type = (short)bf64_32.l_type;
630 652 bf.l_whence = (short)bf64_32.l_whence;
631 653 bf.l_start = bf64_32.l_start;
632 654 bf.l_len = bf64_32.l_len;
633 655 bf.l_sysid = (int)bf64_32.l_sysid;
634 656 bf.l_pid = (pid_t)bf64_32.l_pid;
635 657
636 658 if ((error = flock_check(vp, &bf, offset, MAXOFFSET_T)) != 0)
637 659 break;
638 660
639 661 if ((error = VOP_FRLOCK(vp, cmd, &bf, flag, offset,
640 662 NULL, fp->f_cred, NULL)) != 0)
641 663 break;
642 664
643 665 if ((cmd == F_GETLK) && bf.l_type == F_UNLCK) {
644 666 if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
645 667 sizeof (bf.l_type)))
646 668 error = EFAULT;
647 669 break;
648 670 }
649 671
650 672 if (cmd == F_GETLK) {
651 673 int i;
652 674
653 675 /*
654 676 * We do not want to assume that the flock64 structure
655 677 * is laid out in the same in ILP32 and LP64
656 678 * environments, so we will copy out the ILP32 version
657 679 * of flock64 explicitly after copying the native
658 680 * flock64 structure to it.
659 681 */
660 682 for (i = 0; i < 4; i++)
661 683 bf64_32.l_pad[i] = 0;
662 684 bf64_32.l_type = (int16_t)bf.l_type;
663 685 bf64_32.l_whence = (int16_t)bf.l_whence;
664 686 bf64_32.l_start = bf.l_start;
665 687 bf64_32.l_len = bf.l_len;
666 688 bf64_32.l_sysid = (int32_t)bf.l_sysid;
667 689 bf64_32.l_pid = (pid32_t)bf.l_pid;
668 690 if (copyout(&bf64_32, (void *)arg, sizeof (bf64_32)))
669 691 error = EFAULT;
670 692 }
671 693 break;
672 694 #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */
673 695
674 696 case F_SHARE:
675 697 case F_SHARE_NBMAND:
676 698 case F_UNSHARE:
677 699
678 700 /*
679 701 * Copy in input fields only.
680 702 */
681 703 if (copyin((void *)arg, &fsh, sizeof (fsh))) {
682 704 error = EFAULT;
683 705 break;
684 706 }
685 707
686 708 /*
687 709 * Local share reservations always have this simple form
688 710 */
689 711 shr.s_access = fsh.f_access;
690 712 shr.s_deny = fsh.f_deny;
691 713 shr.s_sysid = 0;
692 714 shr.s_pid = ttoproc(curthread)->p_pid;
693 715 shr_own.sl_pid = shr.s_pid;
694 716 shr_own.sl_id = fsh.f_id;
695 717 shr.s_own_len = sizeof (shr_own);
696 718 shr.s_owner = (caddr_t)&shr_own;
697 719 error = VOP_SHRLOCK(vp, cmd, &shr, flag, fp->f_cred, NULL);
698 720 break;
699 721
700 722 default:
701 723 error = EINVAL;
702 724 break;
703 725 }
704 726
705 727 if (in_crit)
706 728 nbl_end_crit(vp);
707 729
708 730 done:
709 731 releasef(fdes);
710 732 out:
711 733 if (error)
712 734 return (set_errno(error));
713 735 return (retval);
714 736 }
715 737
716 738 int
717 739 flock_check(vnode_t *vp, flock64_t *flp, offset_t offset, offset_t max)
718 740 {
719 741 struct vattr vattr;
720 742 int error;
721 743 u_offset_t start, end;
722 744
723 745 /*
724 746 * Determine the starting point of the request
725 747 */
726 748 switch (flp->l_whence) {
727 749 case 0: /* SEEK_SET */
728 750 start = (u_offset_t)flp->l_start;
729 751 if (start > max)
730 752 return (EINVAL);
731 753 break;
732 754 case 1: /* SEEK_CUR */
733 755 if (flp->l_start > (max - offset))
734 756 return (EOVERFLOW);
735 757 start = (u_offset_t)(flp->l_start + offset);
736 758 if (start > max)
737 759 return (EINVAL);
738 760 break;
739 761 case 2: /* SEEK_END */
740 762 vattr.va_mask = AT_SIZE;
741 763 if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
742 764 return (error);
743 765 if (flp->l_start > (max - (offset_t)vattr.va_size))
744 766 return (EOVERFLOW);
745 767 start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
746 768 if (start > max)
747 769 return (EINVAL);
748 770 break;
749 771 default:
750 772 return (EINVAL);
751 773 }
752 774
753 775 /*
754 776 * Determine the range covered by the request.
755 777 */
756 778 if (flp->l_len == 0)
757 779 end = MAXEND;
758 780 else if ((offset_t)flp->l_len > 0) {
759 781 if (flp->l_len > (max - start + 1))
760 782 return (EOVERFLOW);
761 783 end = (u_offset_t)(start + (flp->l_len - 1));
762 784 ASSERT(end <= max);
763 785 } else {
764 786 /*
765 787 * Negative length; why do we even allow this ?
766 788 * Because this allows easy specification of
767 789 * the last n bytes of the file.
768 790 */
769 791 end = start;
770 792 start += (u_offset_t)flp->l_len;
771 793 (start)++;
772 794 if (start > max)
773 795 return (EINVAL);
774 796 ASSERT(end <= max);
775 797 }
776 798 ASSERT(start <= max);
777 799 if (flp->l_type == F_UNLCK && flp->l_len > 0 &&
778 800 end == (offset_t)max) {
779 801 flp->l_len = 0;
780 802 }
781 803 if (start > end)
782 804 return (EINVAL);
783 805 return (0);
784 806 }
785 807
786 808 static int
787 809 flock_get_start(vnode_t *vp, flock64_t *flp, offset_t offset, u_offset_t *start)
788 810 {
789 811 struct vattr vattr;
790 812 int error;
791 813
792 814 /*
793 815 * Determine the starting point of the request. Assume that it is
794 816 * a valid starting point.
795 817 */
796 818 switch (flp->l_whence) {
797 819 case 0: /* SEEK_SET */
798 820 *start = (u_offset_t)flp->l_start;
799 821 break;
800 822 case 1: /* SEEK_CUR */
801 823 *start = (u_offset_t)(flp->l_start + offset);
802 824 break;
803 825 case 2: /* SEEK_END */
804 826 vattr.va_mask = AT_SIZE;
805 827 if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
806 828 return (error);
807 829 *start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
808 830 break;
809 831 default:
810 832 return (EINVAL);
811 833 }
812 834
813 835 return (0);
814 836 }
815 837
816 838 /*
817 839 * Take rctl action when the requested file descriptor is too big.
818 840 */
819 841 static void
820 842 fd_too_big(proc_t *p)
821 843 {
822 844 mutex_enter(&p->p_lock);
823 845 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
824 846 p->p_rctls, p, RCA_SAFE);
825 847 mutex_exit(&p->p_lock);
826 848 }
↓ open down ↓ |
590 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX