1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 /* 40 * Inter-Process Communication Shared Memory Facility. 41 * 42 * See os/ipc.c for a description of common IPC functionality. 43 * 44 * Resource controls 45 * ----------------- 46 * 47 * Control: zone.max-shm-ids (rc_zone_shmmni) 48 * Description: Maximum number of shared memory ids allowed a zone. 49 * 50 * When shmget() is used to allocate a shared memory segment, one id 51 * is allocated. If the id allocation doesn't succeed, shmget() 52 * fails and errno is set to ENOSPC. Upon successful shmctl(, 53 * IPC_RMID) the id is deallocated. 54 * 55 * Control: project.max-shm-ids (rc_project_shmmni) 56 * Description: Maximum number of shared memory ids allowed a project. 57 * 58 * When shmget() is used to allocate a shared memory segment, one id 59 * is allocated. If the id allocation doesn't succeed, shmget() 60 * fails and errno is set to ENOSPC. Upon successful shmctl(, 61 * IPC_RMID) the id is deallocated. 62 * 63 * Control: zone.max-shm-memory (rc_zone_shmmax) 64 * Description: Total amount of shared memory allowed a zone. 65 * 66 * When shmget() is used to allocate a shared memory segment, the 67 * segment's size is allocated against this limit. If the space 68 * allocation doesn't succeed, shmget() fails and errno is set to 69 * EINVAL. The size will be deallocated once the last process has 70 * detached the segment and the segment has been successfully 71 * shmctl(, IPC_RMID)ed. 72 * 73 * Control: project.max-shm-memory (rc_project_shmmax) 74 * Description: Total amount of shared memory allowed a project. 75 * 76 * When shmget() is used to allocate a shared memory segment, the 77 * segment's size is allocated against this limit. If the space 78 * allocation doesn't succeed, shmget() fails and errno is set to 79 * EINVAL. The size will be deallocated once the last process has 80 * detached the segment and the segment has been successfully 81 * shmctl(, IPC_RMID)ed. 82 */ 83 84 #include <sys/types.h> 85 #include <sys/param.h> 86 #include <sys/cred.h> 87 #include <sys/errno.h> 88 #include <sys/time.h> 89 #include <sys/kmem.h> 90 #include <sys/user.h> 91 #include <sys/proc.h> 92 #include <sys/systm.h> 93 #include <sys/prsystm.h> 94 #include <sys/sysmacros.h> 95 #include <sys/tuneable.h> 96 #include <sys/vm.h> 97 #include <sys/mman.h> 98 #include <sys/swap.h> 99 #include <sys/cmn_err.h> 100 #include <sys/debug.h> 101 #include <sys/lwpchan_impl.h> 102 #include <sys/avl.h> 103 #include <sys/modctl.h> 104 #include <sys/syscall.h> 105 #include <sys/task.h> 106 #include <sys/project.h> 107 #include <sys/policy.h> 108 #include <sys/zone.h> 109 #include <sys/rctl.h> 110 111 #include <sys/ipc.h> 112 #include <sys/ipc_impl.h> 113 #include <sys/shm.h> 114 #include <sys/shm_impl.h> 115 116 #include <vm/hat.h> 117 #include <vm/seg.h> 118 #include <vm/as.h> 119 #include <vm/seg_vn.h> 120 #include <vm/anon.h> 121 #include <vm/page.h> 122 #include <vm/vpage.h> 123 #include <vm/seg_spt.h> 124 125 #include <c2/audit.h> 126 127 static int shmem_lock(kshmid_t *sp, struct anon_map *amp); 128 static void shmem_unlock(kshmid_t *sp, struct anon_map *amp); 129 static void sa_add(struct proc *pp, caddr_t addr, size_t len, ulong_t flags, 130 kshmid_t *id); 131 static void shm_rm_amp(kshmid_t *sp); 132 static void shm_dtor(kipc_perm_t *); 133 static void shm_rmid(kipc_perm_t *); 134 static void shm_remove_zone(zoneid_t, void *); 135 136 /* 137 * Semantics for share_page_table and ism_off: 138 * 139 * These are hooks in /etc/system - only for internal testing purpose. 140 * 141 * Setting share_page_table automatically turns on the SHM_SHARE_MMU (ISM) flag 142 * in a call to shmat(2). In other words, with share_page_table set, you always 143 * get ISM, even if say, DISM is specified. It should really be called "ism_on". 144 * 145 * Setting ism_off turns off the SHM_SHARE_MMU flag from the flags passed to 146 * shmat(2). 147 * 148 * If both share_page_table and ism_off are set, share_page_table prevails. 149 * 150 * Although these tunables should probably be removed, they do have some 151 * external exposure; as long as they exist, they should at least work sensibly. 152 */ 153 154 int share_page_table; 155 int ism_off; 156 157 /* 158 * The following tunables are obsolete. Though for compatibility we 159 * still read and interpret shminfo_shmmax and shminfo_shmmni (see 160 * os/project.c), the preferred mechanism for administrating the IPC 161 * Shared Memory facility is through the resource controls described at 162 * the top of this file. 163 */ 164 size_t shminfo_shmmax = 0x800000; /* (obsolete) */ 165 int shminfo_shmmni = 100; /* (obsolete) */ 166 size_t shminfo_shmmin = 1; /* (obsolete) */ 167 int shminfo_shmseg = 6; /* (obsolete) */ 168 169 extern rctl_hndl_t rc_zone_shmmax; 170 extern rctl_hndl_t rc_zone_shmmni; 171 extern rctl_hndl_t rc_project_shmmax; 172 extern rctl_hndl_t rc_project_shmmni; 173 static ipc_service_t *shm_svc; 174 static zone_key_t shm_zone_key; 175 176 /* 177 * Module linkage information for the kernel. 178 */ 179 static uintptr_t shmsys(int, uintptr_t, uintptr_t, uintptr_t); 180 181 static struct sysent ipcshm_sysent = { 182 4, 183 #ifdef _SYSCALL32_IMPL 184 SE_ARGC | SE_NOUNLOAD | SE_64RVAL, 185 #else /* _SYSCALL32_IMPL */ 186 SE_ARGC | SE_NOUNLOAD | SE_32RVAL1, 187 #endif /* _SYSCALL32_IMPL */ 188 (int (*)())shmsys 189 }; 190 191 #ifdef _SYSCALL32_IMPL 192 static struct sysent ipcshm_sysent32 = { 193 4, 194 SE_ARGC | SE_NOUNLOAD | SE_32RVAL1, 195 (int (*)())shmsys 196 }; 197 #endif /* _SYSCALL32_IMPL */ 198 199 static struct modlsys modlsys = { 200 &mod_syscallops, "System V shared memory", &ipcshm_sysent 201 }; 202 203 #ifdef _SYSCALL32_IMPL 204 static struct modlsys modlsys32 = { 205 &mod_syscallops32, "32-bit System V shared memory", &ipcshm_sysent32 206 }; 207 #endif /* _SYSCALL32_IMPL */ 208 209 static struct modlinkage modlinkage = { 210 MODREV_1, 211 { &modlsys, 212 #ifdef _SYSCALL32_IMPL 213 &modlsys32, 214 #endif 215 NULL 216 } 217 }; 218 219 220 int 221 _init(void) 222 { 223 int result; 224 225 shm_svc = ipcs_create("shmids", rc_project_shmmni, rc_zone_shmmni, 226 sizeof (kshmid_t), shm_dtor, shm_rmid, AT_IPC_SHM, 227 offsetof(ipc_rqty_t, ipcq_shmmni)); 228 zone_key_create(&shm_zone_key, NULL, shm_remove_zone, NULL); 229 230 if ((result = mod_install(&modlinkage)) == 0) 231 return (0); 232 233 (void) zone_key_delete(shm_zone_key); 234 ipcs_destroy(shm_svc); 235 236 return (result); 237 } 238 239 int 240 _fini(void) 241 { 242 return (EBUSY); 243 } 244 245 int 246 _info(struct modinfo *modinfop) 247 { 248 return (mod_info(&modlinkage, modinfop)); 249 } 250 251 /* 252 * Shmat (attach shared segment) system call. 253 */ 254 static int 255 shmat(int shmid, caddr_t uaddr, int uflags, uintptr_t *rvp) 256 { 257 kshmid_t *sp; /* shared memory header ptr */ 258 size_t size; 259 int error = 0; 260 proc_t *pp = curproc; 261 struct as *as = pp->p_as; 262 struct segvn_crargs crargs; /* segvn create arguments */ 263 kmutex_t *lock; 264 struct seg *segspt = NULL; 265 caddr_t addr = uaddr; 266 int flags = (uflags & SHMAT_VALID_FLAGS_MASK); 267 int useISM; 268 uchar_t prot = PROT_ALL; 269 int result; 270 271 if ((lock = ipc_lookup(shm_svc, shmid, (kipc_perm_t **)&sp)) == NULL) 272 return (EINVAL); 273 if (error = ipcperm_access(&sp->shm_perm, SHM_R, CRED())) 274 goto errret; 275 if ((flags & SHM_RDONLY) == 0 && 276 (error = ipcperm_access(&sp->shm_perm, SHM_W, CRED()))) 277 goto errret; 278 if (spt_invalid(flags)) { 279 error = EINVAL; 280 goto errret; 281 } 282 if (ism_off) 283 flags = flags & ~SHM_SHARE_MMU; 284 if (share_page_table) { 285 flags = flags & ~SHM_PAGEABLE; 286 flags = flags | SHM_SHARE_MMU; 287 } 288 useISM = (spt_locked(flags) || spt_pageable(flags)); 289 if (useISM && (error = ipcperm_access(&sp->shm_perm, SHM_W, CRED()))) 290 goto errret; 291 if (useISM && isspt(sp)) { 292 uint_t newsptflags = flags | spt_flags(sp->shm_sptseg); 293 /* 294 * If trying to change an existing {D}ISM segment from ISM 295 * to DISM or vice versa, return error. Note that this 296 * validation of flags needs to be done after the effect of 297 * tunables such as ism_off and share_page_table, for 298 * semantics that are consistent with the tunables' settings. 299 */ 300 if (spt_invalid(newsptflags)) { 301 error = EINVAL; 302 goto errret; 303 } 304 } 305 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 306 size = sp->shm_amp->size; 307 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 308 309 /* somewhere to record spt info for final detach */ 310 if (sp->shm_sptinfo == NULL) 311 sp->shm_sptinfo = kmem_zalloc(sizeof (sptinfo_t), KM_SLEEP); 312 313 as_rangelock(as); 314 315 if (useISM) { 316 /* 317 * Handle ISM 318 */ 319 uint_t share_szc; 320 size_t share_size; 321 struct shm_data ssd; 322 uintptr_t align_hint; 323 324 /* 325 * Pick a share pagesize to use, if (!isspt(sp)). 326 * Otherwise use the already chosen page size. 327 * 328 * For the initial shmat (!isspt(sp)), where sptcreate is 329 * called, map_pgsz is called to recommend a [D]ISM pagesize, 330 * important for systems which offer more than one potential 331 * [D]ISM pagesize. 332 * If the shmat is just to attach to an already created 333 * [D]ISM segment, then use the previously selected page size. 334 */ 335 if (!isspt(sp)) { 336 share_size = map_pgsz(MAPPGSZ_ISM, pp, addr, size, 0); 337 if (share_size == 0) { 338 as_rangeunlock(as); 339 error = EINVAL; 340 goto errret; 341 } 342 share_szc = page_szc(share_size); 343 } else { 344 share_szc = sp->shm_sptseg->s_szc; 345 share_size = page_get_pagesize(share_szc); 346 } 347 size = P2ROUNDUP(size, share_size); 348 349 align_hint = share_size; 350 #if defined(__i386) || defined(__amd64) 351 /* 352 * For x86, we want to share as much of the page table tree 353 * as possible. We use a large align_hint at first, but 354 * if that fails, then the code below retries with align_hint 355 * set to share_size. 356 * 357 * The explicit extern here is due to the difficulties 358 * of getting to platform dependent includes. When/if the 359 * platform dependent bits of this function are cleaned up, 360 * another way of doing this should found. 361 */ 362 { 363 extern uint_t ptes_per_table; 364 365 while (size >= ptes_per_table * (uint64_t)align_hint) 366 align_hint *= ptes_per_table; 367 } 368 #endif /* __i386 || __amd64 */ 369 370 #if defined(__sparcv9) 371 if (addr == 0 && 372 pp->p_model == DATAMODEL_LP64 && AS_TYPE_64BIT(as)) { 373 /* 374 * If no address has been passed in, and this is a 375 * 64-bit process, we'll try to find an address 376 * in the predict-ISM zone. 377 */ 378 caddr_t predbase = (caddr_t)PREDISM_1T_BASE; 379 size_t len = PREDISM_BOUND - PREDISM_1T_BASE; 380 381 as_purge(as); 382 if (as_gap(as, size + share_size, &predbase, &len, 383 AH_LO, (caddr_t)NULL) != -1) { 384 /* 385 * We found an address which looks like a 386 * candidate. We want to round it up, and 387 * then check that it's a valid user range. 388 * This assures that we won't fail below. 389 */ 390 addr = (caddr_t)P2ROUNDUP((uintptr_t)predbase, 391 share_size); 392 393 if (valid_usr_range(addr, size, prot, 394 as, as->a_userlimit) != RANGE_OKAY) { 395 addr = 0; 396 } 397 } 398 } 399 #endif /* __sparcv9 */ 400 401 if (addr == 0) { 402 for (;;) { 403 addr = (caddr_t)align_hint; 404 map_addr(&addr, size, 0ll, 1, MAP_ALIGN); 405 if (addr != NULL || align_hint == share_size) 406 break; 407 align_hint = share_size; 408 } 409 if (addr == NULL) { 410 as_rangeunlock(as); 411 error = ENOMEM; 412 goto errret; 413 } 414 ASSERT(((uintptr_t)addr & (align_hint - 1)) == 0); 415 } else { 416 /* Use the user-supplied attach address */ 417 caddr_t base; 418 size_t len; 419 420 /* 421 * Check that the address range 422 * 1) is properly aligned 423 * 2) is correct in unix terms 424 * 3) is within an unmapped address segment 425 */ 426 base = addr; 427 len = size; /* use spt aligned size */ 428 /* XXX - in SunOS, is sp->shm_segsz */ 429 if ((uintptr_t)base & (share_size - 1)) { 430 error = EINVAL; 431 as_rangeunlock(as); 432 goto errret; 433 } 434 result = valid_usr_range(base, len, prot, as, 435 as->a_userlimit); 436 if (result == RANGE_BADPROT) { 437 /* 438 * We try to accomodate processors which 439 * may not support execute permissions on 440 * all ISM segments by trying the check 441 * again but without PROT_EXEC. 442 */ 443 prot &= ~PROT_EXEC; 444 result = valid_usr_range(base, len, prot, as, 445 as->a_userlimit); 446 } 447 as_purge(as); 448 if (result != RANGE_OKAY || 449 as_gap(as, len, &base, &len, AH_LO, 450 (caddr_t)NULL) != 0) { 451 error = EINVAL; 452 as_rangeunlock(as); 453 goto errret; 454 } 455 } 456 457 if (!isspt(sp)) { 458 error = sptcreate(size, &segspt, sp->shm_amp, prot, 459 flags, share_szc); 460 if (error) { 461 as_rangeunlock(as); 462 goto errret; 463 } 464 sp->shm_sptinfo->sptas = segspt->s_as; 465 sp->shm_sptseg = segspt; 466 sp->shm_sptprot = prot; 467 } else if ((prot & sp->shm_sptprot) != sp->shm_sptprot) { 468 /* 469 * Ensure we're attaching to an ISM segment with 470 * fewer or equal permissions than what we're 471 * allowed. Fail if the segment has more 472 * permissions than what we're allowed. 473 */ 474 error = EACCES; 475 as_rangeunlock(as); 476 goto errret; 477 } 478 479 ssd.shm_sptseg = sp->shm_sptseg; 480 ssd.shm_sptas = sp->shm_sptinfo->sptas; 481 ssd.shm_amp = sp->shm_amp; 482 error = as_map(as, addr, size, segspt_shmattach, &ssd); 483 if (error == 0) 484 sp->shm_ismattch++; /* keep count of ISM attaches */ 485 } else { 486 487 /* 488 * Normal case. 489 */ 490 if (flags & SHM_RDONLY) 491 prot &= ~PROT_WRITE; 492 493 if (addr == 0) { 494 /* Let the system pick the attach address */ 495 map_addr(&addr, size, 0ll, 1, 0); 496 if (addr == NULL) { 497 as_rangeunlock(as); 498 error = ENOMEM; 499 goto errret; 500 } 501 } else { 502 /* Use the user-supplied attach address */ 503 caddr_t base; 504 size_t len; 505 506 if (flags & SHM_RND) 507 addr = (caddr_t)((uintptr_t)addr & 508 ~(SHMLBA - 1)); 509 /* 510 * Check that the address range 511 * 1) is properly aligned 512 * 2) is correct in unix terms 513 * 3) is within an unmapped address segment 514 */ 515 base = addr; 516 len = size; /* use aligned size */ 517 /* XXX - in SunOS, is sp->shm_segsz */ 518 if ((uintptr_t)base & PAGEOFFSET) { 519 error = EINVAL; 520 as_rangeunlock(as); 521 goto errret; 522 } 523 result = valid_usr_range(base, len, prot, as, 524 as->a_userlimit); 525 if (result == RANGE_BADPROT) { 526 prot &= ~PROT_EXEC; 527 result = valid_usr_range(base, len, prot, as, 528 as->a_userlimit); 529 } 530 as_purge(as); 531 if (result != RANGE_OKAY || 532 as_gap(as, len, &base, &len, 533 AH_LO, (caddr_t)NULL) != 0) { 534 error = EINVAL; 535 as_rangeunlock(as); 536 goto errret; 537 } 538 } 539 540 /* Initialize the create arguments and map the segment */ 541 crargs = *(struct segvn_crargs *)zfod_argsp; 542 crargs.offset = 0; 543 crargs.type = MAP_SHARED; 544 crargs.amp = sp->shm_amp; 545 crargs.prot = prot; 546 crargs.maxprot = crargs.prot; 547 crargs.flags = 0; 548 549 error = as_map(as, addr, size, segvn_create, &crargs); 550 } 551 552 as_rangeunlock(as); 553 if (error) 554 goto errret; 555 556 /* record shmem range for the detach */ 557 sa_add(pp, addr, (size_t)size, useISM ? SHMSA_ISM : 0, sp); 558 *rvp = (uintptr_t)addr; 559 560 sp->shm_atime = gethrestime_sec(); 561 sp->shm_lpid = pp->p_pid; 562 ipc_hold(shm_svc, (kipc_perm_t *)sp); 563 564 /* 565 * Tell machine specific code that lwp has mapped shared memory 566 */ 567 LWP_MMODEL_SHARED_AS(addr, size); 568 569 errret: 570 mutex_exit(lock); 571 return (error); 572 } 573 574 static void 575 shm_dtor(kipc_perm_t *perm) 576 { 577 kshmid_t *sp = (kshmid_t *)perm; 578 uint_t cnt; 579 size_t rsize; 580 581 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 582 anonmap_purge(sp->shm_amp); 583 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 584 585 if (sp->shm_sptinfo) { 586 if (isspt(sp)) { 587 sptdestroy(sp->shm_sptinfo->sptas, sp->shm_amp); 588 sp->shm_lkcnt = 0; 589 } 590 kmem_free(sp->shm_sptinfo, sizeof (sptinfo_t)); 591 } 592 593 if (sp->shm_lkcnt > 0) { 594 shmem_unlock(sp, sp->shm_amp); 595 sp->shm_lkcnt = 0; 596 } 597 598 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 599 cnt = --sp->shm_amp->refcnt; 600 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 601 ASSERT(cnt == 0); 602 shm_rm_amp(sp); 603 604 if (sp->shm_perm.ipc_id != IPC_ID_INVAL) { 605 rsize = ptob(btopr(sp->shm_segsz)); 606 ipcs_lock(shm_svc); 607 sp->shm_perm.ipc_proj->kpj_data.kpd_shmmax -= rsize; 608 sp->shm_perm.ipc_zone_ref.zref_zone->zone_shmmax -= rsize; 609 ipcs_unlock(shm_svc); 610 } 611 } 612 613 /* ARGSUSED */ 614 static void 615 shm_rmid(kipc_perm_t *perm) 616 { 617 /* nothing to do */ 618 } 619 620 /* 621 * Shmctl system call. 622 */ 623 /* ARGSUSED */ 624 static int 625 shmctl(int shmid, int cmd, void *arg) 626 { 627 kshmid_t *sp; /* shared memory header ptr */ 628 STRUCT_DECL(shmid_ds, ds); /* for SVR4 IPC_SET */ 629 int error = 0; 630 struct cred *cr = CRED(); 631 kmutex_t *lock; 632 model_t mdl = get_udatamodel(); 633 struct shmid_ds64 ds64; 634 shmatt_t nattch; 635 636 STRUCT_INIT(ds, mdl); 637 638 /* 639 * Perform pre- or non-lookup actions (e.g. copyins, RMID). 640 */ 641 switch (cmd) { 642 case IPC_SET: 643 if (copyin(arg, STRUCT_BUF(ds), STRUCT_SIZE(ds))) 644 return (EFAULT); 645 break; 646 647 case IPC_SET64: 648 if (copyin(arg, &ds64, sizeof (struct shmid_ds64))) 649 return (EFAULT); 650 break; 651 652 case IPC_RMID: 653 return (ipc_rmid(shm_svc, shmid, cr)); 654 } 655 656 if ((lock = ipc_lookup(shm_svc, shmid, (kipc_perm_t **)&sp)) == NULL) 657 return (EINVAL); 658 659 switch (cmd) { 660 /* Set ownership and permissions. */ 661 case IPC_SET: 662 if (error = ipcperm_set(shm_svc, cr, &sp->shm_perm, 663 &STRUCT_BUF(ds)->shm_perm, mdl)) 664 break; 665 sp->shm_ctime = gethrestime_sec(); 666 break; 667 668 case IPC_STAT: 669 if (error = ipcperm_access(&sp->shm_perm, SHM_R, cr)) 670 break; 671 672 nattch = sp->shm_perm.ipc_ref - 1; 673 674 ipcperm_stat(&STRUCT_BUF(ds)->shm_perm, &sp->shm_perm, mdl); 675 STRUCT_FSET(ds, shm_segsz, sp->shm_segsz); 676 STRUCT_FSETP(ds, shm_amp, NULL); /* kernel addr */ 677 STRUCT_FSET(ds, shm_lkcnt, sp->shm_lkcnt); 678 STRUCT_FSET(ds, shm_lpid, sp->shm_lpid); 679 STRUCT_FSET(ds, shm_cpid, sp->shm_cpid); 680 STRUCT_FSET(ds, shm_nattch, nattch); 681 STRUCT_FSET(ds, shm_cnattch, sp->shm_ismattch); 682 STRUCT_FSET(ds, shm_atime, sp->shm_atime); 683 STRUCT_FSET(ds, shm_dtime, sp->shm_dtime); 684 STRUCT_FSET(ds, shm_ctime, sp->shm_ctime); 685 686 mutex_exit(lock); 687 if (copyout(STRUCT_BUF(ds), arg, STRUCT_SIZE(ds))) 688 return (EFAULT); 689 690 return (0); 691 692 case IPC_SET64: 693 if (error = ipcperm_set64(shm_svc, cr, 694 &sp->shm_perm, &ds64.shmx_perm)) 695 break; 696 sp->shm_ctime = gethrestime_sec(); 697 break; 698 699 case IPC_STAT64: 700 nattch = sp->shm_perm.ipc_ref - 1; 701 702 ipcperm_stat64(&ds64.shmx_perm, &sp->shm_perm); 703 ds64.shmx_segsz = sp->shm_segsz; 704 ds64.shmx_lkcnt = sp->shm_lkcnt; 705 ds64.shmx_lpid = sp->shm_lpid; 706 ds64.shmx_cpid = sp->shm_cpid; 707 ds64.shmx_nattch = nattch; 708 ds64.shmx_cnattch = sp->shm_ismattch; 709 ds64.shmx_atime = sp->shm_atime; 710 ds64.shmx_dtime = sp->shm_dtime; 711 ds64.shmx_ctime = sp->shm_ctime; 712 713 mutex_exit(lock); 714 if (copyout(&ds64, arg, sizeof (struct shmid_ds64))) 715 return (EFAULT); 716 717 return (0); 718 719 /* Lock segment in memory */ 720 case SHM_LOCK: 721 if ((error = secpolicy_lock_memory(cr)) != 0) 722 break; 723 724 /* protect against overflow */ 725 if (sp->shm_lkcnt >= USHRT_MAX) { 726 error = ENOMEM; 727 break; 728 } 729 if (!isspt(sp) && (sp->shm_lkcnt++ == 0)) { 730 if (error = shmem_lock(sp, sp->shm_amp)) { 731 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, 732 RW_WRITER); 733 cmn_err(CE_NOTE, "shmctl - couldn't lock %ld" 734 " pages into memory", sp->shm_amp->size); 735 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 736 error = ENOMEM; 737 sp->shm_lkcnt--; 738 } 739 } 740 break; 741 742 /* Unlock segment */ 743 case SHM_UNLOCK: 744 if ((error = secpolicy_lock_memory(cr)) != 0) 745 break; 746 747 if (sp->shm_lkcnt && (--sp->shm_lkcnt == 0)) { 748 shmem_unlock(sp, sp->shm_amp); 749 } 750 break; 751 752 default: 753 error = EINVAL; 754 break; 755 } 756 mutex_exit(lock); 757 return (error); 758 } 759 760 static void 761 shm_detach(proc_t *pp, segacct_t *sap) 762 { 763 kshmid_t *sp = sap->sa_id; 764 size_t len = sap->sa_len; 765 caddr_t addr = sap->sa_addr; 766 767 /* 768 * Discard lwpchan mappings. 769 */ 770 if (pp->p_lcp != NULL) 771 lwpchan_delete_mapping(pp, addr, addr + len); 772 (void) as_unmap(pp->p_as, addr, len); 773 774 /* 775 * Perform some detach-time accounting. 776 */ 777 (void) ipc_lock(shm_svc, sp->shm_perm.ipc_id); 778 if (sap->sa_flags & SHMSA_ISM) 779 sp->shm_ismattch--; 780 sp->shm_dtime = gethrestime_sec(); 781 sp->shm_lpid = pp->p_pid; 782 ipc_rele(shm_svc, (kipc_perm_t *)sp); /* Drops lock */ 783 784 kmem_free(sap, sizeof (segacct_t)); 785 } 786 787 static int 788 shmdt(caddr_t addr) 789 { 790 proc_t *pp = curproc; 791 segacct_t *sap, template; 792 793 mutex_enter(&pp->p_lock); 794 prbarrier(pp); /* block /proc. See shmgetid(). */ 795 796 template.sa_addr = addr; 797 template.sa_len = 0; 798 if ((pp->p_segacct == NULL) || 799 ((sap = avl_find(pp->p_segacct, &template, NULL)) == NULL)) { 800 mutex_exit(&pp->p_lock); 801 return (EINVAL); 802 } 803 if (sap->sa_addr != addr) { 804 mutex_exit(&pp->p_lock); 805 return (EINVAL); 806 } 807 avl_remove(pp->p_segacct, sap); 808 mutex_exit(&pp->p_lock); 809 810 shm_detach(pp, sap); 811 812 return (0); 813 } 814 815 /* 816 * Remove all shared memory segments associated with a given zone. 817 * Called by zone_shutdown when the zone is halted. 818 */ 819 /*ARGSUSED1*/ 820 static void 821 shm_remove_zone(zoneid_t zoneid, void *arg) 822 { 823 ipc_remove_zone(shm_svc, zoneid); 824 } 825 826 /* 827 * Shmget (create new shmem) system call. 828 */ 829 static int 830 shmget(key_t key, size_t size, int shmflg, uintptr_t *rvp) 831 { 832 proc_t *pp = curproc; 833 kshmid_t *sp; 834 kmutex_t *lock; 835 int error; 836 837 top: 838 if (error = ipc_get(shm_svc, key, shmflg, (kipc_perm_t **)&sp, &lock)) 839 return (error); 840 841 if (!IPC_FREE(&sp->shm_perm)) { 842 /* 843 * A segment with the requested key exists. 844 */ 845 if (size > sp->shm_segsz) { 846 mutex_exit(lock); 847 return (EINVAL); 848 } 849 } else { 850 /* 851 * A new segment should be created. 852 */ 853 size_t npages = btopr(size); 854 size_t rsize = ptob(npages); 855 856 /* 857 * Check rsize and the per-project and per-zone limit on 858 * shared memory. Checking rsize handles both the size == 0 859 * case and the size < ULONG_MAX & PAGEMASK case (i.e. 860 * rounding up wraps a size_t). 861 */ 862 if (rsize == 0 || 863 (rctl_test(rc_project_shmmax, 864 pp->p_task->tk_proj->kpj_rctls, pp, rsize, 865 RCA_SAFE) & RCT_DENY) || 866 (rctl_test(rc_zone_shmmax, 867 pp->p_zone->zone_rctls, pp, rsize, 868 RCA_SAFE) & RCT_DENY)) { 869 870 mutex_exit(&pp->p_lock); 871 mutex_exit(lock); 872 ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 873 return (EINVAL); 874 } 875 mutex_exit(&pp->p_lock); 876 mutex_exit(lock); 877 878 if (anon_resv(rsize) == 0) { 879 ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 880 return (ENOMEM); 881 } 882 883 /* 884 * If any new failure points are introduced between the 885 * the above anon_resv() and the below ipc_commit_begin(), 886 * these failure points will need to unreserve the anon 887 * reserved using anon_unresv(). 888 * 889 * Once ipc_commit_begin() is called, the anon reserved 890 * above will be automatically unreserved by future calls to 891 * ipcs_cleanup() -> shm_dtor() -> shm_rm_amp(). If 892 * ipc_commit_begin() fails, it internally calls shm_dtor(), 893 * unreserving the above anon, and freeing the below amp. 894 */ 895 896 sp->shm_amp = anonmap_alloc(rsize, rsize, ANON_SLEEP); 897 sp->shm_amp->a_sp = sp; 898 /* 899 * Store the original user's requested size, in bytes, 900 * rather than the page-aligned size. The former is 901 * used for IPC_STAT and shmget() lookups. The latter 902 * is saved in the anon_map structure and is used for 903 * calls to the vm layer. 904 */ 905 sp->shm_segsz = size; 906 sp->shm_atime = sp->shm_dtime = 0; 907 sp->shm_ctime = gethrestime_sec(); 908 sp->shm_lpid = (pid_t)0; 909 sp->shm_cpid = curproc->p_pid; 910 sp->shm_ismattch = 0; 911 sp->shm_sptinfo = NULL; 912 /* 913 * Check limits one last time, push id into global 914 * visibility, and update resource usage counts. 915 */ 916 if (error = ipc_commit_begin(shm_svc, key, shmflg, 917 (kipc_perm_t *)sp)) { 918 if (error == EAGAIN) 919 goto top; 920 return (error); 921 } 922 923 if ((rctl_test(rc_project_shmmax, 924 sp->shm_perm.ipc_proj->kpj_rctls, pp, rsize, 925 RCA_SAFE) & RCT_DENY) || 926 (rctl_test(rc_zone_shmmax, 927 sp->shm_perm.ipc_zone_ref.zref_zone->zone_rctls, pp, rsize, 928 RCA_SAFE) & RCT_DENY)) { 929 ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 930 return (EINVAL); 931 } 932 sp->shm_perm.ipc_proj->kpj_data.kpd_shmmax += rsize; 933 sp->shm_perm.ipc_zone_ref.zref_zone->zone_shmmax += rsize; 934 935 lock = ipc_commit_end(shm_svc, &sp->shm_perm); 936 } 937 938 if (AU_AUDITING()) 939 audit_ipcget(AT_IPC_SHM, (void *)sp); 940 941 *rvp = (uintptr_t)(sp->shm_perm.ipc_id); 942 943 mutex_exit(lock); 944 return (0); 945 } 946 947 /* 948 * shmids system call. 949 */ 950 static int 951 shmids(int *buf, uint_t nids, uint_t *pnids) 952 { 953 return (ipc_ids(shm_svc, buf, nids, pnids)); 954 } 955 956 /* 957 * System entry point for shmat, shmctl, shmdt, and shmget system calls. 958 */ 959 static uintptr_t 960 shmsys(int opcode, uintptr_t a0, uintptr_t a1, uintptr_t a2) 961 { 962 int error; 963 uintptr_t r_val = 0; 964 965 switch (opcode) { 966 case SHMAT: 967 error = shmat((int)a0, (caddr_t)a1, (int)a2, &r_val); 968 break; 969 case SHMCTL: 970 error = shmctl((int)a0, (int)a1, (void *)a2); 971 break; 972 case SHMDT: 973 error = shmdt((caddr_t)a0); 974 break; 975 case SHMGET: 976 error = shmget((key_t)a0, (size_t)a1, (int)a2, &r_val); 977 break; 978 case SHMIDS: 979 error = shmids((int *)a0, (uint_t)a1, (uint_t *)a2); 980 break; 981 default: 982 error = EINVAL; 983 break; 984 } 985 986 if (error) 987 return ((uintptr_t)set_errno(error)); 988 989 return (r_val); 990 } 991 992 /* 993 * segacct_t comparator 994 * This works as expected, with one minor change: the first of two real 995 * segments with equal addresses is considered to be 'greater than' the 996 * second. We only return equal when searching using a template, in 997 * which case we explicitly set the template segment's length to 0 998 * (which is invalid for a real segment). 999 */ 1000 static int 1001 shm_sacompar(const void *x, const void *y) 1002 { 1003 segacct_t *sa1 = (segacct_t *)x; 1004 segacct_t *sa2 = (segacct_t *)y; 1005 1006 if (sa1->sa_addr < sa2->sa_addr) { 1007 return (-1); 1008 } else if (sa2->sa_len != 0) { 1009 if (sa1->sa_addr >= sa2->sa_addr + sa2->sa_len) { 1010 return (1); 1011 } else if (sa1->sa_len != 0) { 1012 return (1); 1013 } else { 1014 return (0); 1015 } 1016 } else if (sa1->sa_addr > sa2->sa_addr) { 1017 return (1); 1018 } else { 1019 return (0); 1020 } 1021 } 1022 1023 /* 1024 * add this record to the segacct list. 1025 */ 1026 static void 1027 sa_add(struct proc *pp, caddr_t addr, size_t len, ulong_t flags, kshmid_t *id) 1028 { 1029 segacct_t *nsap; 1030 avl_tree_t *tree = NULL; 1031 avl_index_t where; 1032 1033 nsap = kmem_alloc(sizeof (segacct_t), KM_SLEEP); 1034 nsap->sa_addr = addr; 1035 nsap->sa_len = len; 1036 nsap->sa_flags = flags; 1037 nsap->sa_id = id; 1038 1039 if (pp->p_segacct == NULL) 1040 tree = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 1041 1042 mutex_enter(&pp->p_lock); 1043 prbarrier(pp); /* block /proc. See shmgetid(). */ 1044 1045 if (pp->p_segacct == NULL) { 1046 avl_create(tree, shm_sacompar, sizeof (segacct_t), 1047 offsetof(segacct_t, sa_tree)); 1048 pp->p_segacct = tree; 1049 } else if (tree) { 1050 kmem_free(tree, sizeof (avl_tree_t)); 1051 } 1052 1053 /* 1054 * We can ignore the result of avl_find, as the comparator will 1055 * never return equal for segments with non-zero length. This 1056 * is a necessary hack to get around the fact that we do, in 1057 * fact, have duplicate keys. 1058 */ 1059 (void) avl_find(pp->p_segacct, nsap, &where); 1060 avl_insert(pp->p_segacct, nsap, where); 1061 1062 mutex_exit(&pp->p_lock); 1063 } 1064 1065 /* 1066 * Duplicate parent's segacct records in child. 1067 */ 1068 void 1069 shmfork(struct proc *ppp, struct proc *cpp) 1070 { 1071 segacct_t *sap; 1072 kshmid_t *sp; 1073 kmutex_t *mp; 1074 1075 ASSERT(ppp->p_segacct != NULL); 1076 1077 /* 1078 * We are the only lwp running in the parent so nobody can 1079 * mess with our p_segacct list. Thus it is safe to traverse 1080 * the list without holding p_lock. This is essential because 1081 * we can't hold p_lock during a KM_SLEEP allocation. 1082 */ 1083 for (sap = (segacct_t *)avl_first(ppp->p_segacct); sap != NULL; 1084 sap = (segacct_t *)AVL_NEXT(ppp->p_segacct, sap)) { 1085 sa_add(cpp, sap->sa_addr, sap->sa_len, sap->sa_flags, 1086 sap->sa_id); 1087 sp = sap->sa_id; 1088 mp = ipc_lock(shm_svc, sp->shm_perm.ipc_id); 1089 if (sap->sa_flags & SHMSA_ISM) 1090 sp->shm_ismattch++; 1091 ipc_hold(shm_svc, (kipc_perm_t *)sp); 1092 mutex_exit(mp); 1093 } 1094 } 1095 1096 /* 1097 * Detach shared memory segments from exiting process. 1098 */ 1099 void 1100 shmexit(struct proc *pp) 1101 { 1102 segacct_t *sap; 1103 avl_tree_t *tree; 1104 void *cookie = NULL; 1105 1106 ASSERT(pp->p_segacct != NULL); 1107 1108 mutex_enter(&pp->p_lock); 1109 prbarrier(pp); 1110 tree = pp->p_segacct; 1111 pp->p_segacct = NULL; 1112 mutex_exit(&pp->p_lock); 1113 1114 while ((sap = avl_destroy_nodes(tree, &cookie)) != NULL) 1115 (void) shm_detach(pp, sap); 1116 1117 avl_destroy(tree); 1118 kmem_free(tree, sizeof (avl_tree_t)); 1119 } 1120 1121 /* 1122 * At this time pages should be in memory, so just lock them. 1123 */ 1124 static void 1125 lock_again(size_t npages, kshmid_t *sp, struct anon_map *amp) 1126 { 1127 struct anon *ap; 1128 struct page *pp; 1129 struct vnode *vp; 1130 u_offset_t off; 1131 ulong_t anon_idx; 1132 anon_sync_obj_t cookie; 1133 1134 mutex_enter(&sp->shm_mlock); 1135 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 1136 for (anon_idx = 0; npages != 0; anon_idx++, npages--) { 1137 1138 anon_array_enter(amp, anon_idx, &cookie); 1139 ap = anon_get_ptr(amp->ahp, anon_idx); 1140 ASSERT(ap != NULL); 1141 swap_xlate(ap, &vp, &off); 1142 anon_array_exit(&cookie); 1143 1144 pp = page_lookup(vp, off, SE_SHARED); 1145 if (pp == NULL) { 1146 panic("lock_again: page not in the system"); 1147 /*NOTREACHED*/ 1148 } 1149 /* page should already be locked by caller */ 1150 ASSERT(pp->p_lckcnt > 0); 1151 (void) page_pp_lock(pp, 0, 0); 1152 page_unlock(pp); 1153 } 1154 ANON_LOCK_EXIT(&->a_rwlock); 1155 mutex_exit(&sp->shm_mlock); 1156 } 1157 1158 /* 1159 * Attach the shared memory segment to the process 1160 * address space and lock the pages. 1161 */ 1162 static int 1163 shmem_lock(kshmid_t *sp, struct anon_map *amp) 1164 { 1165 size_t npages = btopr(amp->size); 1166 struct as *as; 1167 struct segvn_crargs crargs; 1168 uint_t error; 1169 1170 /* 1171 * A later ISM/DISM attach may increase the size of the amp, so 1172 * cache the number of pages locked for the future shmem_unlock() 1173 */ 1174 sp->shm_lkpages = npages; 1175 1176 as = as_alloc(); 1177 /* Initialize the create arguments and map the segment */ 1178 crargs = *(struct segvn_crargs *)zfod_argsp; /* structure copy */ 1179 crargs.offset = (u_offset_t)0; 1180 crargs.type = MAP_SHARED; 1181 crargs.amp = amp; 1182 crargs.prot = PROT_ALL; 1183 crargs.maxprot = crargs.prot; 1184 crargs.flags = 0; 1185 error = as_map(as, 0x0, amp->size, segvn_create, &crargs); 1186 if (!error) { 1187 if ((error = as_ctl(as, 0x0, amp->size, MC_LOCK, 0, 0, 1188 NULL, 0)) == 0) { 1189 lock_again(npages, sp, amp); 1190 } 1191 (void) as_unmap(as, 0x0, amp->size); 1192 } 1193 as_free(as); 1194 return (error); 1195 } 1196 1197 1198 /* 1199 * Unlock shared memory 1200 */ 1201 static void 1202 shmem_unlock(kshmid_t *sp, struct anon_map *amp) 1203 { 1204 struct anon *ap; 1205 pgcnt_t npages = sp->shm_lkpages; 1206 struct vnode *vp; 1207 struct page *pp; 1208 u_offset_t off; 1209 ulong_t anon_idx; 1210 size_t unlocked_bytes = 0; 1211 kproject_t *proj; 1212 anon_sync_obj_t cookie; 1213 1214 proj = sp->shm_perm.ipc_proj; 1215 mutex_enter(&sp->shm_mlock); 1216 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 1217 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 1218 1219 anon_array_enter(amp, anon_idx, &cookie); 1220 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 1221 panic("shmem_unlock: null app"); 1222 /*NOTREACHED*/ 1223 } 1224 swap_xlate(ap, &vp, &off); 1225 anon_array_exit(&cookie); 1226 pp = page_lookup(vp, off, SE_SHARED); 1227 if (pp == NULL) { 1228 panic("shmem_unlock: page not in the system"); 1229 /*NOTREACHED*/ 1230 } 1231 /* 1232 * Page should at least have once lock from previous 1233 * shmem_lock 1234 */ 1235 ASSERT(pp->p_lckcnt > 0); 1236 page_pp_unlock(pp, 0, 0); 1237 if (pp->p_lckcnt == 0) 1238 unlocked_bytes += PAGESIZE; 1239 1240 page_unlock(pp); 1241 } 1242 1243 if (unlocked_bytes > 0) { 1244 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0); 1245 } 1246 1247 ANON_LOCK_EXIT(&->a_rwlock); 1248 mutex_exit(&sp->shm_mlock); 1249 } 1250 1251 /* 1252 * We call this routine when we have removed all references to this 1253 * amp. This means all shmdt()s and the IPC_RMID have been done. 1254 */ 1255 static void 1256 shm_rm_amp(kshmid_t *sp) 1257 { 1258 struct anon_map *amp = sp->shm_amp; 1259 zone_t *zone; 1260 1261 zone = sp->shm_perm.ipc_zone_ref.zref_zone; 1262 ASSERT(zone != NULL); 1263 /* 1264 * Free up the anon_map. 1265 */ 1266 lgrp_shm_policy_fini(amp, NULL); 1267 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1268 if (amp->a_szc != 0) { 1269 anon_shmap_free_pages(amp, 0, amp->size); 1270 } else { 1271 anon_free(amp->ahp, 0, amp->size); 1272 } 1273 ANON_LOCK_EXIT(&->a_rwlock); 1274 anon_unresv_zone(amp->swresv, zone); 1275 anonmap_free(amp); 1276 } 1277 1278 /* 1279 * Return the shared memory id for the process's virtual address. 1280 * Return SHMID_NONE if addr is not within a SysV shared memory segment. 1281 * Return SHMID_FREE if addr's SysV shared memory segment's id has been freed. 1282 * 1283 * shmgetid() is called from code in /proc with the process locked but 1284 * with pp->p_lock not held. The address space lock is held, so we 1285 * cannot grab pp->p_lock here due to lock-ordering constraints. 1286 * Because of all this, modifications to the p_segacct list must only 1287 * be made after calling prbarrier() to ensure the process is not locked. 1288 * See shmdt() and sa_add(), above. shmgetid() may also be called on a 1289 * thread's own process without the process locked. 1290 */ 1291 int 1292 shmgetid(proc_t *pp, caddr_t addr) 1293 { 1294 segacct_t *sap, template; 1295 1296 ASSERT(MUTEX_NOT_HELD(&pp->p_lock)); 1297 ASSERT((pp->p_proc_flag & P_PR_LOCK) || pp == curproc); 1298 1299 if (pp->p_segacct == NULL) 1300 return (SHMID_NONE); 1301 1302 template.sa_addr = addr; 1303 template.sa_len = 0; 1304 if ((sap = avl_find(pp->p_segacct, &template, NULL)) == NULL) 1305 return (SHMID_NONE); 1306 1307 if (IPC_FREE(&sap->sa_id->shm_perm)) 1308 return (SHMID_FREE); 1309 1310 return (sap->sa_id->shm_perm.ipc_id); 1311 }