1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2018 Joyent, Inc.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
25 */
26
27 #include <sys/param.h>
28 #include <sys/user.h>
29 #include <sys/mman.h>
30 #include <sys/kmem.h>
31 #include <sys/sysmacros.h>
32 #include <sys/cmn_err.h>
33 #include <sys/systm.h>
34 #include <sys/tuneable.h>
35 #include <vm/hat.h>
36 #include <vm/seg.h>
37 #include <vm/as.h>
38 #include <vm/anon.h>
39 #include <vm/page.h>
40 #include <sys/buf.h>
41 #include <sys/swap.h>
42 #include <sys/atomic.h>
43 #include <vm/seg_spt.h>
44 #include <sys/debug.h>
45 #include <sys/vtrace.h>
46 #include <sys/shm.h>
47 #include <sys/shm_impl.h>
48 #include <sys/lgrp.h>
49 #include <sys/vmsystm.h>
50 #include <sys/policy.h>
51 #include <sys/project.h>
52 #include <sys/tnf_probe.h>
53 #include <sys/zone.h>
54
55 #define SEGSPTADDR (caddr_t)0x0
56
57 /*
58 * # pages used for spt
59 */
60 size_t spt_used;
61
62 /*
63 * segspt_minfree is the memory left for system after ISM
64 * locked its pages; it is set up to 5% of availrmem in
65 * sptcreate when ISM is created. ISM should not use more
66 * than ~90% of availrmem; if it does, then the performance
67 * of the system may decrease. Machines with large memories may
68 * be able to use up more memory for ISM so we set the default
69 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
70 * If somebody wants even more memory for ISM (risking hanging
71 * the system) they can patch the segspt_minfree to smaller number.
72 */
73 pgcnt_t segspt_minfree = 0;
74
75 static int segspt_create(struct seg **segpp, void *argsp);
76 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
77 static void segspt_free(struct seg *seg);
78 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
79 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
80
81 /* ARGSUSED */
82 __NORETURN static int
83 segspt_badop_dup(struct seg *seg __unused, struct seg *newseg __unused)
84 {
85 panic("%s called", __func__);
86 }
87
88 /* ARGSUSED */
89 __NORETURN static faultcode_t
90 segspt_badop_fault(struct hat *hat, struct seg *seg, caddr_t addr,
91 size_t len, enum fault_type type, enum seg_rw rw)
92 {
93 panic("%s called", __func__);
94 }
95
96 /* ARGSUSED */
97 __NORETURN static faultcode_t
98 segspt_badop_faulta(struct seg *seg __unused, caddr_t addr __unused)
99 {
100 panic("%s called", __func__);
101 }
102
103 /* ARGSUSED */
104 __NORETURN static int
105 segspt_badop_prot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
106 {
107 panic("%s called", __func__);
108 }
109
110 /* ARGSUSED */
111 __NORETURN static int
112 segspt_badop_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
113 {
114 panic("%s called", __func__);
115 }
116
117 /* ARGSUSED */
118 __NORETURN static int
119 segspt_badop_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
120 {
121 panic("%s called", __func__);
122 }
123
124 /* ARGSUSED */
125 __NORETURN static size_t
126 segspt_badop_swapout(struct seg *seg)
127 {
128 panic("%s called", __func__);
129 }
130
131 /* ARGSUSED */
132 __NORETURN static int
133 segspt_badop_sync(struct seg *seg, caddr_t addr, size_t len, int attr,
134 uint_t flags)
135 {
136 panic("%s called", __func__);
137 }
138
139 /* ARGSUSED */
140 __NORETURN
141 static size_t
142 segspt_badop_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
143 {
144 panic("%s called", __func__);
145 }
146
147 /* ARGSUSED */
148 __NORETURN static int
149 segspt_badop_lockop(struct seg *seg, caddr_t addr, size_t len, int attr,
150 int op, ulong_t *lockmap, size_t pos)
151 {
152 panic("%s called", __func__);
153 }
154
155 /* ARGSUSED */
156 __NORETURN static int
157 segspt_badop_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
158 {
159 panic("%s called", __func__);
160 }
161
162 /* ARGSUSED */
163 __NORETURN static u_offset_t
164 segspt_badop_getoffset(struct seg *seg, caddr_t addr)
165 {
166 panic("%s called", __func__);
167 }
168
169 /* ARGSUSED */
170 __NORETURN static int
171 segspt_badop_gettype(struct seg *seg, caddr_t addr)
172 {
173 panic("%s called", __func__);
174 }
175
176 /* ARGSUSED */
177 __NORETURN static int
178 segspt_badop_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
179 {
180 panic("%s called", __func__);
181 }
182
183 /* ARGSUSED */
184 __NORETURN static int
185 segspt_badop_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
186 {
187 panic("%s called", __func__);
188 }
189
190 /* ARGSUSED */
191 __NORETURN static void
192 segspt_badop_dump(struct seg *seg)
193 {
194 panic("%s called", __func__);
195 }
196
197 /* ARGSUSED */
198 __NORETURN static int
199 segspt_badop_pagelock(struct seg *seg, caddr_t addr, size_t len,
200 struct page ***ppp, enum lock_type type, enum seg_rw rw)
201 {
202 panic("%s called", __func__);
203 }
204
205 /* ARGSUSED */
206 __NORETURN static int
207 segspt_badop_setpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
208 {
209 panic("%s called", __func__);
210 }
211
212 /* ARGSUSED */
213 __NORETURN static int
214 segspt_badop_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
215 {
216 panic("%s called", __func__);
217 }
218
219 /* ARGSUSED */
220 __NORETURN static int
221 segspt_badop_capable(struct seg *seg, segcapability_t capability)
222 {
223 panic("%s called", __func__);
224 }
225
226 struct seg_ops segspt_ops = {
227 segspt_badop_dup, /* dup */
228 segspt_unmap,
229 segspt_free,
230 segspt_badop_fault, /* fault */
231 segspt_badop_faulta, /* faulta */
232 segspt_badop_prot, /* setprot */
233 segspt_badop_checkprot, /* checkprot */
234 segspt_badop_kluster, /* kluster */
235 segspt_badop_swapout, /* swapout */
236 segspt_badop_sync, /* sync */
237 segspt_badop_incore, /* incore */
238 segspt_badop_lockop, /* lockop */
239 segspt_badop_getprot, /* getprot */
240 segspt_badop_getoffset, /* getoffset */
241 segspt_badop_gettype, /* gettype */
242 segspt_badop_getvp, /* getvp */
243 segspt_badop_advise, /* advise */
244 segspt_badop_dump, /* dump */
245 segspt_badop_pagelock, /* pagelock */
246 segspt_badop_setpgsz, /* setpgsz */
247 segspt_badop_getmemid, /* getmemid */
248 segspt_getpolicy, /* getpolicy */
249 segspt_badop_capable, /* capable */
250 seg_inherit_notsup /* inherit */
251 };
252
253 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
254 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
255 static void segspt_shmfree(struct seg *seg);
256 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
257 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
258 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
259 static int segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len,
260 uint_t prot);
261 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
262 uint_t prot);
263 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
264 static size_t segspt_shmswapout(struct seg *seg);
265 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
266 char *vec);
267 static int segspt_shmsync(struct seg *seg, caddr_t addr, size_t len,
268 int attr, uint_t flags);
269 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
270 int attr, int op, ulong_t *lockmap, size_t pos);
271 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
272 uint_t *protv);
273 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
274 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
275 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
276 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
277 uint_t behav);
278 static void segspt_shmdump(struct seg *seg);
279 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
280 struct page ***, enum lock_type, enum seg_rw);
281 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
282 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
283 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
284 static int segspt_shmcapable(struct seg *, segcapability_t);
285
286 struct seg_ops segspt_shmops = {
287 segspt_shmdup,
288 segspt_shmunmap,
289 segspt_shmfree,
290 segspt_shmfault,
291 segspt_shmfaulta,
292 segspt_shmsetprot,
293 segspt_shmcheckprot,
294 segspt_shmkluster,
295 segspt_shmswapout,
296 segspt_shmsync,
297 segspt_shmincore,
298 segspt_shmlockop,
299 segspt_shmgetprot,
300 segspt_shmgetoffset,
301 segspt_shmgettype,
302 segspt_shmgetvp,
303 segspt_shmadvise, /* advise */
304 segspt_shmdump,
305 segspt_shmpagelock,
306 segspt_shmsetpgsz,
307 segspt_shmgetmemid,
308 segspt_shmgetpolicy,
309 segspt_shmcapable,
310 seg_inherit_notsup
311 };
312
313 static void segspt_purge(struct seg *seg);
314 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
315 enum seg_rw, int);
316 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
317 page_t **ppa);
318
319
320
321 /*ARGSUSED*/
322 int
323 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
324 uint_t prot, uint_t flags, uint_t share_szc)
325 {
326 int err;
327 struct as *newas;
328 struct segspt_crargs sptcargs;
329
330 #ifdef DEBUG
331 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
332 tnf_ulong, size, size );
333 #endif
334 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
335 segspt_minfree = availrmem/20; /* for the system */
336
337 if (!hat_supported(HAT_SHARED_PT, (void *)0))
338 return (EINVAL);
339
340 /*
341 * get a new as for this shared memory segment
342 */
343 newas = as_alloc();
344 newas->a_proc = NULL;
345 sptcargs.amp = amp;
346 sptcargs.prot = prot;
347 sptcargs.flags = flags;
348 sptcargs.szc = share_szc;
349 /*
350 * create a shared page table (spt) segment
351 */
352
353 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
354 as_free(newas);
355 return (err);
356 }
357 *sptseg = sptcargs.seg_spt;
358 return (0);
359 }
360
361 void
362 sptdestroy(struct as *as, struct anon_map *amp)
363 {
364
365 #ifdef DEBUG
366 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
367 #endif
368 (void) as_unmap(as, SEGSPTADDR, amp->size);
369 as_free(as);
370 }
371
372 /*
373 * called from seg_free().
374 * free (i.e., unlock, unmap, return to free list)
375 * all the pages in the given seg.
376 */
377 void
378 segspt_free(struct seg *seg)
379 {
380 struct spt_data *sptd = (struct spt_data *)seg->s_data;
381
382 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
383
384 if (sptd != NULL) {
385 if (sptd->spt_realsize)
386 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
387
388 if (sptd->spt_ppa_lckcnt) {
389 kmem_free(sptd->spt_ppa_lckcnt,
390 sizeof (*sptd->spt_ppa_lckcnt)
391 * btopr(sptd->spt_amp->size));
392 }
393 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
394 cv_destroy(&sptd->spt_cv);
395 mutex_destroy(&sptd->spt_lock);
396 kmem_free(sptd, sizeof (*sptd));
397 }
398 }
399
400 /*ARGSUSED*/
401 static int
402 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
403 uint_t flags)
404 {
405 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
406
407 return (0);
408 }
409
410 /*ARGSUSED*/
411 static size_t
412 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
413 {
414 caddr_t eo_seg;
415 pgcnt_t npages;
416 struct shm_data *shmd = (struct shm_data *)seg->s_data;
417 struct seg *sptseg;
418 struct spt_data *sptd;
419
420 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
421 #ifdef lint
422 seg = seg;
423 #endif
424 sptseg = shmd->shm_sptseg;
425 sptd = sptseg->s_data;
426
427 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
428 eo_seg = addr + len;
429 while (addr < eo_seg) {
430 /* page exists, and it's locked. */
431 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
432 SEG_PAGE_ANON;
433 addr += PAGESIZE;
434 }
435 return (len);
436 } else {
437 struct anon_map *amp = shmd->shm_amp;
438 struct anon *ap;
439 page_t *pp;
440 pgcnt_t anon_index;
441 struct vnode *vp;
442 u_offset_t off;
443 ulong_t i;
444 int ret;
445 anon_sync_obj_t cookie;
446
447 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
448 anon_index = seg_page(seg, addr);
449 npages = btopr(len);
450 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
451 return (EINVAL);
452 }
453 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
454 for (i = 0; i < npages; i++, anon_index++) {
455 ret = 0;
456 anon_array_enter(amp, anon_index, &cookie);
457 ap = anon_get_ptr(amp->ahp, anon_index);
458 if (ap != NULL) {
459 swap_xlate(ap, &vp, &off);
460 anon_array_exit(&cookie);
461 pp = page_lookup_nowait(vp, off, SE_SHARED);
462 if (pp != NULL) {
463 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
464 page_unlock(pp);
465 }
466 } else {
467 anon_array_exit(&cookie);
468 }
469 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
470 ret |= SEG_PAGE_LOCKED;
471 }
472 *vec++ = (char)ret;
473 }
474 ANON_LOCK_EXIT(&->a_rwlock);
475 return (len);
476 }
477 }
478
479 static int
480 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
481 {
482 size_t share_size;
483
484 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
485
486 /*
487 * seg.s_size may have been rounded up to the largest page size
488 * in shmat().
489 * XXX This should be cleanedup. sptdestroy should take a length
490 * argument which should be the same as sptcreate. Then
491 * this rounding would not be needed (or is done in shm.c)
492 * Only the check for full segment will be needed.
493 *
494 * XXX -- shouldn't raddr == 0 always? These tests don't seem
495 * to be useful at all.
496 */
497 share_size = page_get_pagesize(seg->s_szc);
498 ssize = P2ROUNDUP(ssize, share_size);
499
500 if (raddr == seg->s_base && ssize == seg->s_size) {
501 seg_free(seg);
502 return (0);
503 } else
504 return (EINVAL);
505 }
506
507 int
508 segspt_create(struct seg **segpp, void *argsp)
509 {
510 struct seg *seg = *segpp;
511 int err;
512 caddr_t addr = seg->s_base;
513 struct spt_data *sptd;
514 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
515 struct anon_map *amp = sptcargs->amp;
516 struct kshmid *sp = amp->a_sp;
517 struct cred *cred = CRED();
518 ulong_t i, j, anon_index = 0;
519 pgcnt_t npages = btopr(amp->size);
520 struct vnode *vp;
521 page_t **ppa;
522 uint_t hat_flags;
523 size_t pgsz;
524 pgcnt_t pgcnt;
525 caddr_t a;
526 pgcnt_t pidx;
527 size_t sz;
528 proc_t *procp = curproc;
529 rctl_qty_t lockedbytes = 0;
530 kproject_t *proj;
531
532 /*
533 * We are holding the a_lock on the underlying dummy as,
534 * so we can make calls to the HAT layer.
535 */
536 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
537 ASSERT(sp != NULL);
538
539 #ifdef DEBUG
540 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
541 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
542 #endif
543 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
544 if (err = anon_swap_adjust(npages))
545 return (err);
546 }
547 err = ENOMEM;
548
549 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
550 goto out1;
551
552 ppa = NULL;
553 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
554 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
555 KM_NOSLEEP)) == NULL)
556 goto out2;
557 }
558
559 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
560
561 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
562 goto out3;
563
564 seg->s_ops = &segspt_ops;
565 sptd->spt_vp = vp;
566 sptd->spt_amp = amp;
567 sptd->spt_prot = sptcargs->prot;
568 sptd->spt_flags = sptcargs->flags;
569 seg->s_data = (caddr_t)sptd;
570 sptd->spt_ppa = NULL;
571 sptd->spt_ppa_lckcnt = NULL;
572 seg->s_szc = sptcargs->szc;
573 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
574 sptd->spt_gen = 0;
575
576 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
577 if (seg->s_szc > amp->a_szc) {
578 amp->a_szc = seg->s_szc;
579 }
580 ANON_LOCK_EXIT(&->a_rwlock);
581
582 /*
583 * Set policy to affect initial allocation of pages in
584 * anon_map_createpages()
585 */
586 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
587 NULL, 0, ptob(npages));
588
589 if (sptcargs->flags & SHM_PAGEABLE) {
590 size_t share_sz;
591 pgcnt_t new_npgs, more_pgs;
592 struct anon_hdr *nahp;
593 zone_t *zone;
594
595 share_sz = page_get_pagesize(seg->s_szc);
596 if (!IS_P2ALIGNED(amp->size, share_sz)) {
597 /*
598 * We are rounding up the size of the anon array
599 * on 4 M boundary because we always create 4 M
600 * of page(s) when locking, faulting pages and we
601 * don't have to check for all corner cases e.g.
602 * if there is enough space to allocate 4 M
603 * page.
604 */
605 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
606 more_pgs = new_npgs - npages;
607
608 /*
609 * The zone will never be NULL, as a fully created
610 * shm always has an owning zone.
611 */
612 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
613 ASSERT(zone != NULL);
614 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
615 err = ENOMEM;
616 goto out4;
617 }
618
619 nahp = anon_create(new_npgs, ANON_SLEEP);
620 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
621 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
622 ANON_SLEEP);
623 anon_release(amp->ahp, npages);
624 amp->ahp = nahp;
625 ASSERT(amp->swresv == ptob(npages));
626 amp->swresv = amp->size = ptob(new_npgs);
627 ANON_LOCK_EXIT(&->a_rwlock);
628 npages = new_npgs;
629 }
630
631 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
632 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
633 sptd->spt_pcachecnt = 0;
634 sptd->spt_realsize = ptob(npages);
635 sptcargs->seg_spt = seg;
636 return (0);
637 }
638
639 /*
640 * get array of pages for each anon slot in amp
641 */
642 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
643 seg, addr, S_CREATE, cred)) != 0)
644 goto out4;
645
646 mutex_enter(&sp->shm_mlock);
647
648 /* May be partially locked, so, count bytes to charge for locking */
649 for (i = 0; i < npages; i++)
650 if (ppa[i]->p_lckcnt == 0)
651 lockedbytes += PAGESIZE;
652
653 proj = sp->shm_perm.ipc_proj;
654
655 if (lockedbytes > 0) {
656 mutex_enter(&procp->p_lock);
657 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
658 mutex_exit(&procp->p_lock);
659 mutex_exit(&sp->shm_mlock);
660 for (i = 0; i < npages; i++)
661 page_unlock(ppa[i]);
662 err = ENOMEM;
663 goto out4;
664 }
665 mutex_exit(&procp->p_lock);
666 }
667
668 /*
669 * addr is initial address corresponding to the first page on ppa list
670 */
671 for (i = 0; i < npages; i++) {
672 /* attempt to lock all pages */
673 if (page_pp_lock(ppa[i], 0, 1) == 0) {
674 /*
675 * if unable to lock any page, unlock all
676 * of them and return error
677 */
678 for (j = 0; j < i; j++)
679 page_pp_unlock(ppa[j], 0, 1);
680 for (i = 0; i < npages; i++)
681 page_unlock(ppa[i]);
682 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
683 mutex_exit(&sp->shm_mlock);
684 err = ENOMEM;
685 goto out4;
686 }
687 }
688 mutex_exit(&sp->shm_mlock);
689
690 /*
691 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
692 * for the entire life of the segment. For example platforms
693 * that do not support Dynamic Reconfiguration.
694 */
695 hat_flags = HAT_LOAD_SHARE;
696 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
697 hat_flags |= HAT_LOAD_LOCK;
698
699 /*
700 * Load translations one lare page at a time
701 * to make sure we don't create mappings bigger than
702 * segment's size code in case underlying pages
703 * are shared with segvn's segment that uses bigger
704 * size code than we do.
705 */
706 pgsz = page_get_pagesize(seg->s_szc);
707 pgcnt = page_get_pagecnt(seg->s_szc);
708 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
709 sz = MIN(pgsz, ptob(npages - pidx));
710 hat_memload_array(seg->s_as->a_hat, a, sz,
711 &ppa[pidx], sptd->spt_prot, hat_flags);
712 }
713
714 /*
715 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
716 * we will leave the pages locked SE_SHARED for the life
717 * of the ISM segment. This will prevent any calls to
718 * hat_pageunload() on this ISM segment for those platforms.
719 */
720 if (!(hat_flags & HAT_LOAD_LOCK)) {
721 /*
722 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
723 * we no longer need to hold the SE_SHARED lock on the pages,
724 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
725 * SE_SHARED lock on the pages as necessary.
726 */
727 for (i = 0; i < npages; i++)
728 page_unlock(ppa[i]);
729 }
730 sptd->spt_pcachecnt = 0;
731 kmem_free(ppa, ((sizeof (page_t *)) * npages));
732 sptd->spt_realsize = ptob(npages);
733 atomic_add_long(&spt_used, npages);
734 sptcargs->seg_spt = seg;
735 return (0);
736
737 out4:
738 seg->s_data = NULL;
739 kmem_free(vp, sizeof (*vp));
740 cv_destroy(&sptd->spt_cv);
741 out3:
742 mutex_destroy(&sptd->spt_lock);
743 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
744 kmem_free(ppa, (sizeof (*ppa) * npages));
745 out2:
746 kmem_free(sptd, sizeof (*sptd));
747 out1:
748 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
749 anon_swap_restore(npages);
750 return (err);
751 }
752
753 /*ARGSUSED*/
754 void
755 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
756 {
757 struct page *pp;
758 struct spt_data *sptd = (struct spt_data *)seg->s_data;
759 pgcnt_t npages;
760 ulong_t anon_idx;
761 struct anon_map *amp;
762 struct anon *ap;
763 struct vnode *vp;
764 u_offset_t off;
765 uint_t hat_flags;
766 int root = 0;
767 pgcnt_t pgs, curnpgs = 0;
768 page_t *rootpp;
769 rctl_qty_t unlocked_bytes = 0;
770 kproject_t *proj;
771 kshmid_t *sp;
772
773 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
774
775 len = P2ROUNDUP(len, PAGESIZE);
776
777 npages = btop(len);
778
779 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
780 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
781 (sptd->spt_flags & SHM_PAGEABLE)) {
782 hat_flags = HAT_UNLOAD_UNMAP;
783 }
784
785 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
786
787 amp = sptd->spt_amp;
788 if (sptd->spt_flags & SHM_PAGEABLE)
789 npages = btop(amp->size);
790
791 ASSERT(amp != NULL);
792
793 proj = NULL;
794 rootpp = NULL;
795 sp = NULL;
796 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
797 sp = amp->a_sp;
798 proj = sp->shm_perm.ipc_proj;
799 mutex_enter(&sp->shm_mlock);
800 }
801 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
802 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
803 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
804 panic("segspt_free_pages: null app");
805 /*NOTREACHED*/
806 }
807 } else {
808 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
809 == NULL)
810 continue;
811 }
812 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
813 swap_xlate(ap, &vp, &off);
814
815 /*
816 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
817 * the pages won't be having SE_SHARED lock at this
818 * point.
819 *
820 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
821 * the pages are still held SE_SHARED locked from the
822 * original segspt_create()
823 *
824 * Our goal is to get SE_EXCL lock on each page, remove
825 * permanent lock on it and invalidate the page.
826 */
827 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
828 if (hat_flags == HAT_UNLOAD_UNMAP)
829 pp = page_lookup(vp, off, SE_EXCL);
830 else {
831 if ((pp = page_find(vp, off)) == NULL) {
832 panic("segspt_free_pages: "
833 "page not locked");
834 /*NOTREACHED*/
835 }
836 if (!page_tryupgrade(pp)) {
837 page_unlock(pp);
838 pp = page_lookup(vp, off, SE_EXCL);
839 }
840 }
841 if (pp == NULL) {
842 panic("segspt_free_pages: "
843 "page not in the system");
844 /*NOTREACHED*/
845 }
846 ASSERT(pp->p_lckcnt > 0);
847 page_pp_unlock(pp, 0, 1);
848 if (pp->p_lckcnt == 0)
849 unlocked_bytes += PAGESIZE;
850 } else {
851 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
852 continue;
853 }
854 /*
855 * It's logical to invalidate the pages here as in most cases
856 * these were created by segspt.
857 */
858 if (pp->p_szc != 0) {
859 if (root == 0) {
860 ASSERT(curnpgs == 0);
861 root = 1;
862 rootpp = pp;
863 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
864 ASSERT(pgs > 1);
865 ASSERT(IS_P2ALIGNED(pgs, pgs));
866 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
867 curnpgs--;
868 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
869 ASSERT(curnpgs == 1);
870 ASSERT(page_pptonum(pp) ==
871 page_pptonum(rootpp) + (pgs - 1));
872 page_destroy_pages(rootpp);
873 root = 0;
874 curnpgs = 0;
875 } else {
876 ASSERT(curnpgs > 1);
877 ASSERT(page_pptonum(pp) ==
878 page_pptonum(rootpp) + (pgs - curnpgs));
879 curnpgs--;
880 }
881 } else {
882 if (root != 0 || curnpgs != 0) {
883 panic("segspt_free_pages: bad large page");
884 /*NOTREACHED*/
885 }
886 /*
887 * Before destroying the pages, we need to take care
888 * of the rctl locked memory accounting. For that
889 * we need to calculte the unlocked_bytes.
890 */
891 if (pp->p_lckcnt > 0)
892 unlocked_bytes += PAGESIZE;
893 /*LINTED: constant in conditional context */
894 VN_DISPOSE(pp, B_INVAL, 0, kcred);
895 }
896 }
897 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
898 if (unlocked_bytes > 0)
899 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
900 mutex_exit(&sp->shm_mlock);
901 }
902 if (root != 0 || curnpgs != 0) {
903 panic("segspt_free_pages: bad large page");
904 /*NOTREACHED*/
905 }
906
907 /*
908 * mark that pages have been released
909 */
910 sptd->spt_realsize = 0;
911
912 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
913 atomic_add_long(&spt_used, -npages);
914 anon_swap_restore(npages);
915 }
916 }
917
918 /*
919 * Get memory allocation policy info for specified address in given segment
920 */
921 static lgrp_mem_policy_info_t *
922 segspt_getpolicy(struct seg *seg, caddr_t addr)
923 {
924 struct anon_map *amp;
925 ulong_t anon_index;
926 lgrp_mem_policy_info_t *policy_info;
927 struct spt_data *spt_data;
928
929 ASSERT(seg != NULL);
930
931 /*
932 * Get anon_map from segspt
933 *
934 * Assume that no lock needs to be held on anon_map, since
935 * it should be protected by its reference count which must be
936 * nonzero for an existing segment
937 * Need to grab readers lock on policy tree though
938 */
939 spt_data = (struct spt_data *)seg->s_data;
940 if (spt_data == NULL)
941 return (NULL);
942 amp = spt_data->spt_amp;
943 ASSERT(amp->refcnt != 0);
944
945 /*
946 * Get policy info
947 *
948 * Assume starting anon index of 0
949 */
950 anon_index = seg_page(seg, addr);
951 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
952
953 return (policy_info);
954 }
955
956 /*
957 * DISM only.
958 * Return locked pages over a given range.
959 *
960 * We will cache all DISM locked pages and save the pplist for the
961 * entire segment in the ppa field of the underlying DISM segment structure.
962 * Later, during a call to segspt_reclaim() we will use this ppa array
963 * to page_unlock() all of the pages and then we will free this ppa list.
964 */
965 /*ARGSUSED*/
966 static int
967 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
968 struct page ***ppp, enum lock_type type, enum seg_rw rw)
969 {
970 struct shm_data *shmd = (struct shm_data *)seg->s_data;
971 struct seg *sptseg = shmd->shm_sptseg;
972 struct spt_data *sptd = sptseg->s_data;
973 pgcnt_t pg_idx, npages, tot_npages, npgs;
974 struct page **pplist, **pl, **ppa, *pp;
975 struct anon_map *amp;
976 spgcnt_t an_idx;
977 int ret = ENOTSUP;
978 uint_t pl_built = 0;
979 struct anon *ap;
980 struct vnode *vp;
981 u_offset_t off;
982 pgcnt_t claim_availrmem = 0;
983 uint_t szc;
984
985 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
986 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
987
988 /*
989 * We want to lock/unlock the entire ISM segment. Therefore,
990 * we will be using the underlying sptseg and it's base address
991 * and length for the caching arguments.
992 */
993 ASSERT(sptseg);
994 ASSERT(sptd);
995
996 pg_idx = seg_page(seg, addr);
997 npages = btopr(len);
998
999 /*
1000 * check if the request is larger than number of pages covered
1001 * by amp
1002 */
1003 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
1004 *ppp = NULL;
1005 return (ENOTSUP);
1006 }
1007
1008 if (type == L_PAGEUNLOCK) {
1009 ASSERT(sptd->spt_ppa != NULL);
1010
1011 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1012 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1013
1014 /*
1015 * If someone is blocked while unmapping, we purge
1016 * segment page cache and thus reclaim pplist synchronously
1017 * without waiting for seg_pasync_thread. This speeds up
1018 * unmapping in cases where munmap(2) is called, while
1019 * raw async i/o is still in progress or where a thread
1020 * exits on data fault in a multithreaded application.
1021 */
1022 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
1023 (AS_ISUNMAPWAIT(seg->s_as) &&
1024 shmd->shm_softlockcnt > 0)) {
1025 segspt_purge(seg);
1026 }
1027 return (0);
1028 }
1029
1030 /* The L_PAGELOCK case ... */
1031
1032 if (sptd->spt_flags & DISM_PPA_CHANGED) {
1033 segspt_purge(seg);
1034 /*
1035 * for DISM ppa needs to be rebuild since
1036 * number of locked pages could be changed
1037 */
1038 *ppp = NULL;
1039 return (ENOTSUP);
1040 }
1041
1042 /*
1043 * First try to find pages in segment page cache, without
1044 * holding the segment lock.
1045 */
1046 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1047 S_WRITE, SEGP_FORCE_WIRED);
1048 if (pplist != NULL) {
1049 ASSERT(sptd->spt_ppa != NULL);
1050 ASSERT(sptd->spt_ppa == pplist);
1051 ppa = sptd->spt_ppa;
1052 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1053 if (ppa[an_idx] == NULL) {
1054 seg_pinactive(seg, NULL, seg->s_base,
1055 sptd->spt_amp->size, ppa,
1056 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1057 *ppp = NULL;
1058 return (ENOTSUP);
1059 }
1060 if ((szc = ppa[an_idx]->p_szc) != 0) {
1061 npgs = page_get_pagecnt(szc);
1062 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1063 } else {
1064 an_idx++;
1065 }
1066 }
1067 /*
1068 * Since we cache the entire DISM segment, we want to
1069 * set ppp to point to the first slot that corresponds
1070 * to the requested addr, i.e. pg_idx.
1071 */
1072 *ppp = &(sptd->spt_ppa[pg_idx]);
1073 return (0);
1074 }
1075
1076 mutex_enter(&sptd->spt_lock);
1077 /*
1078 * try to find pages in segment page cache with mutex
1079 */
1080 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1081 S_WRITE, SEGP_FORCE_WIRED);
1082 if (pplist != NULL) {
1083 ASSERT(sptd->spt_ppa != NULL);
1084 ASSERT(sptd->spt_ppa == pplist);
1085 ppa = sptd->spt_ppa;
1086 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1087 if (ppa[an_idx] == NULL) {
1088 mutex_exit(&sptd->spt_lock);
1089 seg_pinactive(seg, NULL, seg->s_base,
1090 sptd->spt_amp->size, ppa,
1091 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1092 *ppp = NULL;
1093 return (ENOTSUP);
1094 }
1095 if ((szc = ppa[an_idx]->p_szc) != 0) {
1096 npgs = page_get_pagecnt(szc);
1097 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1098 } else {
1099 an_idx++;
1100 }
1101 }
1102 /*
1103 * Since we cache the entire DISM segment, we want to
1104 * set ppp to point to the first slot that corresponds
1105 * to the requested addr, i.e. pg_idx.
1106 */
1107 mutex_exit(&sptd->spt_lock);
1108 *ppp = &(sptd->spt_ppa[pg_idx]);
1109 return (0);
1110 }
1111 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1112 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1113 mutex_exit(&sptd->spt_lock);
1114 *ppp = NULL;
1115 return (ENOTSUP);
1116 }
1117
1118 /*
1119 * No need to worry about protections because DISM pages are always rw.
1120 */
1121 pl = pplist = NULL;
1122 amp = sptd->spt_amp;
1123
1124 /*
1125 * Do we need to build the ppa array?
1126 */
1127 if (sptd->spt_ppa == NULL) {
1128 pgcnt_t lpg_cnt = 0;
1129
1130 pl_built = 1;
1131 tot_npages = btopr(sptd->spt_amp->size);
1132
1133 ASSERT(sptd->spt_pcachecnt == 0);
1134 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
1135 pl = pplist;
1136
1137 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1138 for (an_idx = 0; an_idx < tot_npages; ) {
1139 ap = anon_get_ptr(amp->ahp, an_idx);
1140 /*
1141 * Cache only mlocked pages. For large pages
1142 * if one (constituent) page is mlocked
1143 * all pages for that large page
1144 * are cached also. This is for quick
1145 * lookups of ppa array;
1146 */
1147 if ((ap != NULL) && (lpg_cnt != 0 ||
1148 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1149
1150 swap_xlate(ap, &vp, &off);
1151 pp = page_lookup(vp, off, SE_SHARED);
1152 ASSERT(pp != NULL);
1153 if (lpg_cnt == 0) {
1154 lpg_cnt++;
1155 /*
1156 * For a small page, we are done --
1157 * lpg_count is reset to 0 below.
1158 *
1159 * For a large page, we are guaranteed
1160 * to find the anon structures of all
1161 * constituent pages and a non-zero
1162 * lpg_cnt ensures that we don't test
1163 * for mlock for these. We are done
1164 * when lpg_count reaches (npgs + 1).
1165 * If we are not the first constituent
1166 * page, restart at the first one.
1167 */
1168 npgs = page_get_pagecnt(pp->p_szc);
1169 if (!IS_P2ALIGNED(an_idx, npgs)) {
1170 an_idx = P2ALIGN(an_idx, npgs);
1171 page_unlock(pp);
1172 continue;
1173 }
1174 }
1175 if (++lpg_cnt > npgs)
1176 lpg_cnt = 0;
1177
1178 /*
1179 * availrmem is decremented only
1180 * for unlocked pages
1181 */
1182 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1183 claim_availrmem++;
1184 pplist[an_idx] = pp;
1185 }
1186 an_idx++;
1187 }
1188 ANON_LOCK_EXIT(&->a_rwlock);
1189
1190 if (claim_availrmem) {
1191 mutex_enter(&freemem_lock);
1192 if (availrmem < tune.t_minarmem + claim_availrmem) {
1193 mutex_exit(&freemem_lock);
1194 ret = ENOTSUP;
1195 claim_availrmem = 0;
1196 goto insert_fail;
1197 } else {
1198 availrmem -= claim_availrmem;
1199 }
1200 mutex_exit(&freemem_lock);
1201 }
1202
1203 sptd->spt_ppa = pl;
1204 } else {
1205 /*
1206 * We already have a valid ppa[].
1207 */
1208 pl = sptd->spt_ppa;
1209 }
1210
1211 ASSERT(pl != NULL);
1212
1213 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1214 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1215 segspt_reclaim);
1216 if (ret == SEGP_FAIL) {
1217 /*
1218 * seg_pinsert failed. We return
1219 * ENOTSUP, so that the as_pagelock() code will
1220 * then try the slower F_SOFTLOCK path.
1221 */
1222 if (pl_built) {
1223 /*
1224 * No one else has referenced the ppa[].
1225 * We created it and we need to destroy it.
1226 */
1227 sptd->spt_ppa = NULL;
1228 }
1229 ret = ENOTSUP;
1230 goto insert_fail;
1231 }
1232
1233 /*
1234 * In either case, we increment softlockcnt on the 'real' segment.
1235 */
1236 sptd->spt_pcachecnt++;
1237 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1238
1239 ppa = sptd->spt_ppa;
1240 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1241 if (ppa[an_idx] == NULL) {
1242 mutex_exit(&sptd->spt_lock);
1243 seg_pinactive(seg, NULL, seg->s_base,
1244 sptd->spt_amp->size,
1245 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1246 *ppp = NULL;
1247 return (ENOTSUP);
1248 }
1249 if ((szc = ppa[an_idx]->p_szc) != 0) {
1250 npgs = page_get_pagecnt(szc);
1251 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1252 } else {
1253 an_idx++;
1254 }
1255 }
1256 /*
1257 * We can now drop the sptd->spt_lock since the ppa[]
1258 * exists and we have incremented pacachecnt.
1259 */
1260 mutex_exit(&sptd->spt_lock);
1261
1262 /*
1263 * Since we cache the entire segment, we want to
1264 * set ppp to point to the first slot that corresponds
1265 * to the requested addr, i.e. pg_idx.
1266 */
1267 *ppp = &(sptd->spt_ppa[pg_idx]);
1268 return (0);
1269
1270 insert_fail:
1271 /*
1272 * We will only reach this code if we tried and failed.
1273 *
1274 * And we can drop the lock on the dummy seg, once we've failed
1275 * to set up a new ppa[].
1276 */
1277 mutex_exit(&sptd->spt_lock);
1278
1279 if (pl_built) {
1280 if (claim_availrmem) {
1281 mutex_enter(&freemem_lock);
1282 availrmem += claim_availrmem;
1283 mutex_exit(&freemem_lock);
1284 }
1285
1286 /*
1287 * We created pl and we need to destroy it.
1288 */
1289 pplist = pl;
1290 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1291 if (pplist[an_idx] != NULL)
1292 page_unlock(pplist[an_idx]);
1293 }
1294 kmem_free(pl, sizeof (page_t *) * tot_npages);
1295 }
1296
1297 if (shmd->shm_softlockcnt <= 0) {
1298 if (AS_ISUNMAPWAIT(seg->s_as)) {
1299 mutex_enter(&seg->s_as->a_contents);
1300 if (AS_ISUNMAPWAIT(seg->s_as)) {
1301 AS_CLRUNMAPWAIT(seg->s_as);
1302 cv_broadcast(&seg->s_as->a_cv);
1303 }
1304 mutex_exit(&seg->s_as->a_contents);
1305 }
1306 }
1307 *ppp = NULL;
1308 return (ret);
1309 }
1310
1311
1312
1313 /*
1314 * return locked pages over a given range.
1315 *
1316 * We will cache the entire ISM segment and save the pplist for the
1317 * entire segment in the ppa field of the underlying ISM segment structure.
1318 * Later, during a call to segspt_reclaim() we will use this ppa array
1319 * to page_unlock() all of the pages and then we will free this ppa list.
1320 */
1321 /*ARGSUSED*/
1322 static int
1323 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1324 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1325 {
1326 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1327 struct seg *sptseg = shmd->shm_sptseg;
1328 struct spt_data *sptd = sptseg->s_data;
1329 pgcnt_t np, page_index, npages;
1330 caddr_t a, spt_base;
1331 struct page **pplist, **pl, *pp;
1332 struct anon_map *amp;
1333 ulong_t anon_index;
1334 int ret = ENOTSUP;
1335 uint_t pl_built = 0;
1336 struct anon *ap;
1337 struct vnode *vp;
1338 u_offset_t off;
1339
1340 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1341 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1342
1343
1344 /*
1345 * We want to lock/unlock the entire ISM segment. Therefore,
1346 * we will be using the underlying sptseg and it's base address
1347 * and length for the caching arguments.
1348 */
1349 ASSERT(sptseg);
1350 ASSERT(sptd);
1351
1352 if (sptd->spt_flags & SHM_PAGEABLE) {
1353 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1354 }
1355
1356 page_index = seg_page(seg, addr);
1357 npages = btopr(len);
1358
1359 /*
1360 * check if the request is larger than number of pages covered
1361 * by amp
1362 */
1363 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1364 *ppp = NULL;
1365 return (ENOTSUP);
1366 }
1367
1368 if (type == L_PAGEUNLOCK) {
1369
1370 ASSERT(sptd->spt_ppa != NULL);
1371
1372 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1373 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1374
1375 /*
1376 * If someone is blocked while unmapping, we purge
1377 * segment page cache and thus reclaim pplist synchronously
1378 * without waiting for seg_pasync_thread. This speeds up
1379 * unmapping in cases where munmap(2) is called, while
1380 * raw async i/o is still in progress or where a thread
1381 * exits on data fault in a multithreaded application.
1382 */
1383 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1384 segspt_purge(seg);
1385 }
1386 return (0);
1387 }
1388
1389 /* The L_PAGELOCK case... */
1390
1391 /*
1392 * First try to find pages in segment page cache, without
1393 * holding the segment lock.
1394 */
1395 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1396 S_WRITE, SEGP_FORCE_WIRED);
1397 if (pplist != NULL) {
1398 ASSERT(sptd->spt_ppa == pplist);
1399 ASSERT(sptd->spt_ppa[page_index]);
1400 /*
1401 * Since we cache the entire ISM segment, we want to
1402 * set ppp to point to the first slot that corresponds
1403 * to the requested addr, i.e. page_index.
1404 */
1405 *ppp = &(sptd->spt_ppa[page_index]);
1406 return (0);
1407 }
1408
1409 mutex_enter(&sptd->spt_lock);
1410
1411 /*
1412 * try to find pages in segment page cache
1413 */
1414 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1415 S_WRITE, SEGP_FORCE_WIRED);
1416 if (pplist != NULL) {
1417 ASSERT(sptd->spt_ppa == pplist);
1418 /*
1419 * Since we cache the entire segment, we want to
1420 * set ppp to point to the first slot that corresponds
1421 * to the requested addr, i.e. page_index.
1422 */
1423 mutex_exit(&sptd->spt_lock);
1424 *ppp = &(sptd->spt_ppa[page_index]);
1425 return (0);
1426 }
1427
1428 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1429 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1430 mutex_exit(&sptd->spt_lock);
1431 *ppp = NULL;
1432 return (ENOTSUP);
1433 }
1434
1435 /*
1436 * No need to worry about protections because ISM pages
1437 * are always rw.
1438 */
1439 pl = pplist = NULL;
1440
1441 /*
1442 * Do we need to build the ppa array?
1443 */
1444 if (sptd->spt_ppa == NULL) {
1445 ASSERT(sptd->spt_ppa == pplist);
1446
1447 spt_base = sptseg->s_base;
1448 pl_built = 1;
1449
1450 /*
1451 * availrmem is decremented once during anon_swap_adjust()
1452 * and is incremented during the anon_unresv(), which is
1453 * called from shm_rm_amp() when the segment is destroyed.
1454 */
1455 amp = sptd->spt_amp;
1456 ASSERT(amp != NULL);
1457
1458 /* pcachecnt is protected by sptd->spt_lock */
1459 ASSERT(sptd->spt_pcachecnt == 0);
1460 pplist = kmem_zalloc(sizeof (page_t *)
1461 * btopr(sptd->spt_amp->size), KM_SLEEP);
1462 pl = pplist;
1463
1464 anon_index = seg_page(sptseg, spt_base);
1465
1466 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1467 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1468 a += PAGESIZE, anon_index++, pplist++) {
1469 ap = anon_get_ptr(amp->ahp, anon_index);
1470 ASSERT(ap != NULL);
1471 swap_xlate(ap, &vp, &off);
1472 pp = page_lookup(vp, off, SE_SHARED);
1473 ASSERT(pp != NULL);
1474 *pplist = pp;
1475 }
1476 ANON_LOCK_EXIT(&->a_rwlock);
1477
1478 if (a < (spt_base + sptd->spt_amp->size)) {
1479 ret = ENOTSUP;
1480 goto insert_fail;
1481 }
1482 sptd->spt_ppa = pl;
1483 } else {
1484 /*
1485 * We already have a valid ppa[].
1486 */
1487 pl = sptd->spt_ppa;
1488 }
1489
1490 ASSERT(pl != NULL);
1491
1492 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1493 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1494 segspt_reclaim);
1495 if (ret == SEGP_FAIL) {
1496 /*
1497 * seg_pinsert failed. We return
1498 * ENOTSUP, so that the as_pagelock() code will
1499 * then try the slower F_SOFTLOCK path.
1500 */
1501 if (pl_built) {
1502 /*
1503 * No one else has referenced the ppa[].
1504 * We created it and we need to destroy it.
1505 */
1506 sptd->spt_ppa = NULL;
1507 }
1508 ret = ENOTSUP;
1509 goto insert_fail;
1510 }
1511
1512 /*
1513 * In either case, we increment softlockcnt on the 'real' segment.
1514 */
1515 sptd->spt_pcachecnt++;
1516 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1517
1518 /*
1519 * We can now drop the sptd->spt_lock since the ppa[]
1520 * exists and we have incremented pacachecnt.
1521 */
1522 mutex_exit(&sptd->spt_lock);
1523
1524 /*
1525 * Since we cache the entire segment, we want to
1526 * set ppp to point to the first slot that corresponds
1527 * to the requested addr, i.e. page_index.
1528 */
1529 *ppp = &(sptd->spt_ppa[page_index]);
1530 return (0);
1531
1532 insert_fail:
1533 /*
1534 * We will only reach this code if we tried and failed.
1535 *
1536 * And we can drop the lock on the dummy seg, once we've failed
1537 * to set up a new ppa[].
1538 */
1539 mutex_exit(&sptd->spt_lock);
1540
1541 if (pl_built) {
1542 /*
1543 * We created pl and we need to destroy it.
1544 */
1545 pplist = pl;
1546 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1547 while (np) {
1548 page_unlock(*pplist);
1549 np--;
1550 pplist++;
1551 }
1552 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1553 }
1554 if (shmd->shm_softlockcnt <= 0) {
1555 if (AS_ISUNMAPWAIT(seg->s_as)) {
1556 mutex_enter(&seg->s_as->a_contents);
1557 if (AS_ISUNMAPWAIT(seg->s_as)) {
1558 AS_CLRUNMAPWAIT(seg->s_as);
1559 cv_broadcast(&seg->s_as->a_cv);
1560 }
1561 mutex_exit(&seg->s_as->a_contents);
1562 }
1563 }
1564 *ppp = NULL;
1565 return (ret);
1566 }
1567
1568 /*
1569 * purge any cached pages in the I/O page cache
1570 */
1571 static void
1572 segspt_purge(struct seg *seg)
1573 {
1574 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1575 }
1576
1577 static int
1578 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1579 enum seg_rw rw, int async)
1580 {
1581 struct seg *seg = (struct seg *)ptag;
1582 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1583 struct seg *sptseg;
1584 struct spt_data *sptd;
1585 pgcnt_t npages, i, free_availrmem = 0;
1586 int done = 0;
1587
1588 #ifdef lint
1589 addr = addr;
1590 #endif
1591 sptseg = shmd->shm_sptseg;
1592 sptd = sptseg->s_data;
1593 npages = (len >> PAGESHIFT);
1594 ASSERT(npages);
1595 ASSERT(sptd->spt_pcachecnt != 0);
1596 ASSERT(sptd->spt_ppa == pplist);
1597 ASSERT(npages == btopr(sptd->spt_amp->size));
1598 ASSERT(async || AS_LOCK_HELD(seg->s_as));
1599
1600 /*
1601 * Acquire the lock on the dummy seg and destroy the
1602 * ppa array IF this is the last pcachecnt.
1603 */
1604 mutex_enter(&sptd->spt_lock);
1605 if (--sptd->spt_pcachecnt == 0) {
1606 for (i = 0; i < npages; i++) {
1607 if (pplist[i] == NULL) {
1608 continue;
1609 }
1610 if (rw == S_WRITE) {
1611 hat_setrefmod(pplist[i]);
1612 } else {
1613 hat_setref(pplist[i]);
1614 }
1615 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1616 (sptd->spt_ppa_lckcnt[i] == 0))
1617 free_availrmem++;
1618 page_unlock(pplist[i]);
1619 }
1620 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1621 mutex_enter(&freemem_lock);
1622 availrmem += free_availrmem;
1623 mutex_exit(&freemem_lock);
1624 }
1625 /*
1626 * Since we want to cach/uncache the entire ISM segment,
1627 * we will track the pplist in a segspt specific field
1628 * ppa, that is initialized at the time we add an entry to
1629 * the cache.
1630 */
1631 ASSERT(sptd->spt_pcachecnt == 0);
1632 kmem_free(pplist, sizeof (page_t *) * npages);
1633 sptd->spt_ppa = NULL;
1634 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1635 sptd->spt_gen++;
1636 cv_broadcast(&sptd->spt_cv);
1637 done = 1;
1638 }
1639 mutex_exit(&sptd->spt_lock);
1640
1641 /*
1642 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1643 * may not hold AS lock (in this case async argument is not 0). This
1644 * means if softlockcnt drops to 0 after the decrement below address
1645 * space may get freed. We can't allow it since after softlock
1646 * derement to 0 we still need to access as structure for possible
1647 * wakeup of unmap waiters. To prevent the disappearance of as we take
1648 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1649 * this mutex as a barrier to make sure this routine completes before
1650 * segment is freed.
1651 *
1652 * The second complication we have to deal with in async case is a
1653 * possibility of missed wake up of unmap wait thread. When we don't
1654 * hold as lock here we may take a_contents lock before unmap wait
1655 * thread that was first to see softlockcnt was still not 0. As a
1656 * result we'll fail to wake up an unmap wait thread. To avoid this
1657 * race we set nounmapwait flag in as structure if we drop softlockcnt
1658 * to 0 if async is not 0. unmapwait thread
1659 * will not block if this flag is set.
1660 */
1661 if (async)
1662 mutex_enter(&shmd->shm_segfree_syncmtx);
1663
1664 /*
1665 * Now decrement softlockcnt.
1666 */
1667 ASSERT(shmd->shm_softlockcnt > 0);
1668 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1669
1670 if (shmd->shm_softlockcnt <= 0) {
1671 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1672 mutex_enter(&seg->s_as->a_contents);
1673 if (async)
1674 AS_SETNOUNMAPWAIT(seg->s_as);
1675 if (AS_ISUNMAPWAIT(seg->s_as)) {
1676 AS_CLRUNMAPWAIT(seg->s_as);
1677 cv_broadcast(&seg->s_as->a_cv);
1678 }
1679 mutex_exit(&seg->s_as->a_contents);
1680 }
1681 }
1682
1683 if (async)
1684 mutex_exit(&shmd->shm_segfree_syncmtx);
1685
1686 return (done);
1687 }
1688
1689 /*
1690 * Do a F_SOFTUNLOCK call over the range requested.
1691 * The range must have already been F_SOFTLOCK'ed.
1692 *
1693 * The calls to acquire and release the anon map lock mutex were
1694 * removed in order to avoid a deadly embrace during a DR
1695 * memory delete operation. (Eg. DR blocks while waiting for a
1696 * exclusive lock on a page that is being used for kaio; the
1697 * thread that will complete the kaio and call segspt_softunlock
1698 * blocks on the anon map lock; another thread holding the anon
1699 * map lock blocks on another page lock via the segspt_shmfault
1700 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1701 *
1702 * The appropriateness of the removal is based upon the following:
1703 * 1. If we are holding a segment's reader lock and the page is held
1704 * shared, then the corresponding element in anonmap which points to
1705 * anon struct cannot change and there is no need to acquire the
1706 * anonymous map lock.
1707 * 2. Threads in segspt_softunlock have a reader lock on the segment
1708 * and already have the shared page lock, so we are guaranteed that
1709 * the anon map slot cannot change and therefore can call anon_get_ptr()
1710 * without grabbing the anonymous map lock.
1711 * 3. Threads that softlock a shared page break copy-on-write, even if
1712 * its a read. Thus cow faults can be ignored with respect to soft
1713 * unlocking, since the breaking of cow means that the anon slot(s) will
1714 * not be shared.
1715 */
1716 static void
1717 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1718 size_t len, enum seg_rw rw)
1719 {
1720 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1721 struct seg *sptseg;
1722 struct spt_data *sptd;
1723 page_t *pp;
1724 caddr_t adr;
1725 struct vnode *vp;
1726 u_offset_t offset;
1727 ulong_t anon_index;
1728 struct anon_map *amp; /* XXX - for locknest */
1729 struct anon *ap = NULL;
1730 pgcnt_t npages;
1731
1732 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1733
1734 sptseg = shmd->shm_sptseg;
1735 sptd = sptseg->s_data;
1736
1737 /*
1738 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1739 * and therefore their pages are SE_SHARED locked
1740 * for the entire life of the segment.
1741 */
1742 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1743 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1744 goto softlock_decrement;
1745 }
1746
1747 /*
1748 * Any thread is free to do a page_find and
1749 * page_unlock() on the pages within this seg.
1750 *
1751 * We are already holding the as->a_lock on the user's
1752 * real segment, but we need to hold the a_lock on the
1753 * underlying dummy as. This is mostly to satisfy the
1754 * underlying HAT layer.
1755 */
1756 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1757 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1758 AS_LOCK_EXIT(sptseg->s_as);
1759
1760 amp = sptd->spt_amp;
1761 ASSERT(amp != NULL);
1762 anon_index = seg_page(sptseg, sptseg_addr);
1763
1764 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1765 ap = anon_get_ptr(amp->ahp, anon_index++);
1766 ASSERT(ap != NULL);
1767 swap_xlate(ap, &vp, &offset);
1768
1769 /*
1770 * Use page_find() instead of page_lookup() to
1771 * find the page since we know that it has a
1772 * "shared" lock.
1773 */
1774 pp = page_find(vp, offset);
1775 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1776 if (pp == NULL) {
1777 panic("segspt_softunlock: "
1778 "addr %p, ap %p, vp %p, off %llx",
1779 (void *)adr, (void *)ap, (void *)vp, offset);
1780 /*NOTREACHED*/
1781 }
1782
1783 if (rw == S_WRITE) {
1784 hat_setrefmod(pp);
1785 } else if (rw != S_OTHER) {
1786 hat_setref(pp);
1787 }
1788 page_unlock(pp);
1789 }
1790
1791 softlock_decrement:
1792 npages = btopr(len);
1793 ASSERT(shmd->shm_softlockcnt >= npages);
1794 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1795 if (shmd->shm_softlockcnt == 0) {
1796 /*
1797 * All SOFTLOCKS are gone. Wakeup any waiting
1798 * unmappers so they can try again to unmap.
1799 * Check for waiters first without the mutex
1800 * held so we don't always grab the mutex on
1801 * softunlocks.
1802 */
1803 if (AS_ISUNMAPWAIT(seg->s_as)) {
1804 mutex_enter(&seg->s_as->a_contents);
1805 if (AS_ISUNMAPWAIT(seg->s_as)) {
1806 AS_CLRUNMAPWAIT(seg->s_as);
1807 cv_broadcast(&seg->s_as->a_cv);
1808 }
1809 mutex_exit(&seg->s_as->a_contents);
1810 }
1811 }
1812 }
1813
1814 int
1815 segspt_shmattach(struct seg **segpp, void *argsp)
1816 {
1817 struct seg *seg = *segpp;
1818 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1819 struct shm_data *shmd;
1820 struct anon_map *shm_amp = shmd_arg->shm_amp;
1821 struct spt_data *sptd;
1822 int error = 0;
1823
1824 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1825
1826 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1827 if (shmd == NULL)
1828 return (ENOMEM);
1829
1830 shmd->shm_sptas = shmd_arg->shm_sptas;
1831 shmd->shm_amp = shm_amp;
1832 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1833
1834 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1835 NULL, 0, seg->s_size);
1836
1837 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1838
1839 seg->s_data = (void *)shmd;
1840 seg->s_ops = &segspt_shmops;
1841 seg->s_szc = shmd->shm_sptseg->s_szc;
1842 sptd = shmd->shm_sptseg->s_data;
1843
1844 if (sptd->spt_flags & SHM_PAGEABLE) {
1845 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1846 KM_NOSLEEP)) == NULL) {
1847 seg->s_data = (void *)NULL;
1848 kmem_free(shmd, (sizeof (*shmd)));
1849 return (ENOMEM);
1850 }
1851 shmd->shm_lckpgs = 0;
1852 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1853 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1854 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1855 seg->s_size, seg->s_szc)) != 0) {
1856 kmem_free(shmd->shm_vpage,
1857 btopr(shm_amp->size));
1858 }
1859 }
1860 } else {
1861 error = hat_share(seg->s_as->a_hat, seg->s_base,
1862 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1863 seg->s_size, seg->s_szc);
1864 }
1865 if (error) {
1866 seg->s_szc = 0;
1867 seg->s_data = (void *)NULL;
1868 kmem_free(shmd, (sizeof (*shmd)));
1869 } else {
1870 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1871 shm_amp->refcnt++;
1872 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1873 }
1874 return (error);
1875 }
1876
1877 int
1878 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1879 {
1880 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1881 int reclaim = 1;
1882
1883 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1884 retry:
1885 if (shmd->shm_softlockcnt > 0) {
1886 if (reclaim == 1) {
1887 segspt_purge(seg);
1888 reclaim = 0;
1889 goto retry;
1890 }
1891 return (EAGAIN);
1892 }
1893
1894 if (ssize != seg->s_size) {
1895 #ifdef DEBUG
1896 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1897 ssize, seg->s_size);
1898 #endif
1899 return (EINVAL);
1900 }
1901
1902 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1903 NULL, 0);
1904 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1905
1906 seg_free(seg);
1907
1908 return (0);
1909 }
1910
1911 void
1912 segspt_shmfree(struct seg *seg)
1913 {
1914 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1915 struct anon_map *shm_amp = shmd->shm_amp;
1916
1917 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1918
1919 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1920 MC_UNLOCK, NULL, 0);
1921
1922 /*
1923 * Need to increment refcnt when attaching
1924 * and decrement when detaching because of dup().
1925 */
1926 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1927 shm_amp->refcnt--;
1928 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1929
1930 if (shmd->shm_vpage) { /* only for DISM */
1931 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1932 shmd->shm_vpage = NULL;
1933 }
1934
1935 /*
1936 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1937 * still working with this segment without holding as lock.
1938 */
1939 ASSERT(shmd->shm_softlockcnt == 0);
1940 mutex_enter(&shmd->shm_segfree_syncmtx);
1941 mutex_destroy(&shmd->shm_segfree_syncmtx);
1942
1943 kmem_free(shmd, sizeof (*shmd));
1944 }
1945
1946 /*ARGSUSED*/
1947 int
1948 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1949 {
1950 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1951
1952 /*
1953 * Shared page table is more than shared mapping.
1954 * Individual process sharing page tables can't change prot
1955 * because there is only one set of page tables.
1956 * This will be allowed after private page table is
1957 * supported.
1958 */
1959 /* need to return correct status error? */
1960 return (0);
1961 }
1962
1963
1964 faultcode_t
1965 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1966 size_t len, enum fault_type type, enum seg_rw rw)
1967 {
1968 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1969 struct seg *sptseg = shmd->shm_sptseg;
1970 struct as *curspt = shmd->shm_sptas;
1971 struct spt_data *sptd = sptseg->s_data;
1972 pgcnt_t npages;
1973 size_t size;
1974 caddr_t segspt_addr, shm_addr;
1975 page_t **ppa;
1976 int i;
1977 ulong_t an_idx = 0;
1978 int err = 0;
1979 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1980 size_t pgsz;
1981 pgcnt_t pgcnt;
1982 caddr_t a;
1983 pgcnt_t pidx;
1984
1985 #ifdef lint
1986 hat = hat;
1987 #endif
1988 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1989
1990 /*
1991 * Because of the way spt is implemented
1992 * the realsize of the segment does not have to be
1993 * equal to the segment size itself. The segment size is
1994 * often in multiples of a page size larger than PAGESIZE.
1995 * The realsize is rounded up to the nearest PAGESIZE
1996 * based on what the user requested. This is a bit of
1997 * ungliness that is historical but not easily fixed
1998 * without re-designing the higher levels of ISM.
1999 */
2000 ASSERT(addr >= seg->s_base);
2001 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2002 return (FC_NOMAP);
2003 /*
2004 * For all of the following cases except F_PROT, we need to
2005 * make any necessary adjustments to addr and len
2006 * and get all of the necessary page_t's into an array called ppa[].
2007 *
2008 * The code in shmat() forces base addr and len of ISM segment
2009 * to be aligned to largest page size supported. Therefore,
2010 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2011 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2012 * in large pagesize chunks, or else we will screw up the HAT
2013 * layer by calling hat_memload_array() with differing page sizes
2014 * over a given virtual range.
2015 */
2016 pgsz = page_get_pagesize(sptseg->s_szc);
2017 pgcnt = page_get_pagecnt(sptseg->s_szc);
2018 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2019 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2020 npages = btopr(size);
2021
2022 /*
2023 * Now we need to convert from addr in segshm to addr in segspt.
2024 */
2025 an_idx = seg_page(seg, shm_addr);
2026 segspt_addr = sptseg->s_base + ptob(an_idx);
2027
2028 ASSERT((segspt_addr + ptob(npages)) <=
2029 (sptseg->s_base + sptd->spt_realsize));
2030 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
2031
2032 switch (type) {
2033
2034 case F_SOFTLOCK:
2035
2036 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2037 /*
2038 * Fall through to the F_INVAL case to load up the hat layer
2039 * entries with the HAT_LOAD_LOCK flag.
2040 */
2041 /* FALLTHRU */
2042 case F_INVAL:
2043
2044 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2045 return (FC_NOMAP);
2046
2047 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
2048
2049 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
2050 if (err != 0) {
2051 if (type == F_SOFTLOCK) {
2052 atomic_add_long((ulong_t *)(
2053 &(shmd->shm_softlockcnt)), -npages);
2054 }
2055 goto dism_err;
2056 }
2057 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
2058 a = segspt_addr;
2059 pidx = 0;
2060 if (type == F_SOFTLOCK) {
2061
2062 /*
2063 * Load up the translation keeping it
2064 * locked and don't unlock the page.
2065 */
2066 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2067 hat_memload_array(sptseg->s_as->a_hat,
2068 a, pgsz, &ppa[pidx], sptd->spt_prot,
2069 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2070 }
2071 } else {
2072 /*
2073 * Migrate pages marked for migration
2074 */
2075 if (lgrp_optimizations())
2076 page_migrate(seg, shm_addr, ppa, npages);
2077
2078 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2079 hat_memload_array(sptseg->s_as->a_hat,
2080 a, pgsz, &ppa[pidx],
2081 sptd->spt_prot,
2082 HAT_LOAD_SHARE);
2083 }
2084
2085 /*
2086 * And now drop the SE_SHARED lock(s).
2087 */
2088 if (dyn_ism_unmap) {
2089 for (i = 0; i < npages; i++) {
2090 page_unlock(ppa[i]);
2091 }
2092 }
2093 }
2094
2095 if (!dyn_ism_unmap) {
2096 if (hat_share(seg->s_as->a_hat, shm_addr,
2097 curspt->a_hat, segspt_addr, ptob(npages),
2098 seg->s_szc) != 0) {
2099 panic("hat_share err in DISM fault");
2100 /* NOTREACHED */
2101 }
2102 if (type == F_INVAL) {
2103 for (i = 0; i < npages; i++) {
2104 page_unlock(ppa[i]);
2105 }
2106 }
2107 }
2108 AS_LOCK_EXIT(sptseg->s_as);
2109 dism_err:
2110 kmem_free(ppa, npages * sizeof (page_t *));
2111 return (err);
2112
2113 case F_SOFTUNLOCK:
2114
2115 /*
2116 * This is a bit ugly, we pass in the real seg pointer,
2117 * but the segspt_addr is the virtual address within the
2118 * dummy seg.
2119 */
2120 segspt_softunlock(seg, segspt_addr, size, rw);
2121 return (0);
2122
2123 case F_PROT:
2124
2125 /*
2126 * This takes care of the unusual case where a user
2127 * allocates a stack in shared memory and a register
2128 * window overflow is written to that stack page before
2129 * it is otherwise modified.
2130 *
2131 * We can get away with this because ISM segments are
2132 * always rw. Other than this unusual case, there
2133 * should be no instances of protection violations.
2134 */
2135 return (0);
2136
2137 default:
2138 #ifdef DEBUG
2139 panic("segspt_dismfault default type?");
2140 #else
2141 return (FC_NOMAP);
2142 #endif
2143 }
2144 }
2145
2146
2147 faultcode_t
2148 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2149 size_t len, enum fault_type type, enum seg_rw rw)
2150 {
2151 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2152 struct seg *sptseg = shmd->shm_sptseg;
2153 struct as *curspt = shmd->shm_sptas;
2154 struct spt_data *sptd = sptseg->s_data;
2155 pgcnt_t npages;
2156 size_t size;
2157 caddr_t sptseg_addr, shm_addr;
2158 page_t *pp, **ppa;
2159 int i;
2160 u_offset_t offset;
2161 ulong_t anon_index = 0;
2162 struct vnode *vp;
2163 struct anon_map *amp; /* XXX - for locknest */
2164 struct anon *ap = NULL;
2165 size_t pgsz;
2166 pgcnt_t pgcnt;
2167 caddr_t a;
2168 pgcnt_t pidx;
2169 size_t sz;
2170
2171 #ifdef lint
2172 hat = hat;
2173 #endif
2174
2175 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2176
2177 if (sptd->spt_flags & SHM_PAGEABLE) {
2178 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2179 }
2180
2181 /*
2182 * Because of the way spt is implemented
2183 * the realsize of the segment does not have to be
2184 * equal to the segment size itself. The segment size is
2185 * often in multiples of a page size larger than PAGESIZE.
2186 * The realsize is rounded up to the nearest PAGESIZE
2187 * based on what the user requested. This is a bit of
2188 * ungliness that is historical but not easily fixed
2189 * without re-designing the higher levels of ISM.
2190 */
2191 ASSERT(addr >= seg->s_base);
2192 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2193 return (FC_NOMAP);
2194 /*
2195 * For all of the following cases except F_PROT, we need to
2196 * make any necessary adjustments to addr and len
2197 * and get all of the necessary page_t's into an array called ppa[].
2198 *
2199 * The code in shmat() forces base addr and len of ISM segment
2200 * to be aligned to largest page size supported. Therefore,
2201 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2202 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2203 * in large pagesize chunks, or else we will screw up the HAT
2204 * layer by calling hat_memload_array() with differing page sizes
2205 * over a given virtual range.
2206 */
2207 pgsz = page_get_pagesize(sptseg->s_szc);
2208 pgcnt = page_get_pagecnt(sptseg->s_szc);
2209 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2210 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2211 npages = btopr(size);
2212
2213 /*
2214 * Now we need to convert from addr in segshm to addr in segspt.
2215 */
2216 anon_index = seg_page(seg, shm_addr);
2217 sptseg_addr = sptseg->s_base + ptob(anon_index);
2218
2219 /*
2220 * And now we may have to adjust npages downward if we have
2221 * exceeded the realsize of the segment or initial anon
2222 * allocations.
2223 */
2224 if ((sptseg_addr + ptob(npages)) >
2225 (sptseg->s_base + sptd->spt_realsize))
2226 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2227
2228 npages = btopr(size);
2229
2230 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2231 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2232
2233 switch (type) {
2234
2235 case F_SOFTLOCK:
2236
2237 /*
2238 * availrmem is decremented once during anon_swap_adjust()
2239 * and is incremented during the anon_unresv(), which is
2240 * called from shm_rm_amp() when the segment is destroyed.
2241 */
2242 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2243 /*
2244 * Some platforms assume that ISM pages are SE_SHARED
2245 * locked for the entire life of the segment.
2246 */
2247 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2248 return (0);
2249 /*
2250 * Fall through to the F_INVAL case to load up the hat layer
2251 * entries with the HAT_LOAD_LOCK flag.
2252 */
2253
2254 /* FALLTHRU */
2255 case F_INVAL:
2256
2257 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2258 return (FC_NOMAP);
2259
2260 /*
2261 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2262 * may still rely on this call to hat_share(). That
2263 * would imply that those hat's can fault on a
2264 * HAT_LOAD_LOCK translation, which would seem
2265 * contradictory.
2266 */
2267 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2268 if (hat_share(seg->s_as->a_hat, seg->s_base,
2269 curspt->a_hat, sptseg->s_base,
2270 sptseg->s_size, sptseg->s_szc) != 0) {
2271 panic("hat_share error in ISM fault");
2272 /*NOTREACHED*/
2273 }
2274 return (0);
2275 }
2276 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2277
2278 /*
2279 * I see no need to lock the real seg,
2280 * here, because all of our work will be on the underlying
2281 * dummy seg.
2282 *
2283 * sptseg_addr and npages now account for large pages.
2284 */
2285 amp = sptd->spt_amp;
2286 ASSERT(amp != NULL);
2287 anon_index = seg_page(sptseg, sptseg_addr);
2288
2289 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2290 for (i = 0; i < npages; i++) {
2291 ap = anon_get_ptr(amp->ahp, anon_index++);
2292 ASSERT(ap != NULL);
2293 swap_xlate(ap, &vp, &offset);
2294 pp = page_lookup(vp, offset, SE_SHARED);
2295 ASSERT(pp != NULL);
2296 ppa[i] = pp;
2297 }
2298 ANON_LOCK_EXIT(&->a_rwlock);
2299 ASSERT(i == npages);
2300
2301 /*
2302 * We are already holding the as->a_lock on the user's
2303 * real segment, but we need to hold the a_lock on the
2304 * underlying dummy as. This is mostly to satisfy the
2305 * underlying HAT layer.
2306 */
2307 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
2308 a = sptseg_addr;
2309 pidx = 0;
2310 if (type == F_SOFTLOCK) {
2311 /*
2312 * Load up the translation keeping it
2313 * locked and don't unlock the page.
2314 */
2315 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2316 sz = MIN(pgsz, ptob(npages - pidx));
2317 hat_memload_array(sptseg->s_as->a_hat, a,
2318 sz, &ppa[pidx], sptd->spt_prot,
2319 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2320 }
2321 } else {
2322 /*
2323 * Migrate pages marked for migration.
2324 */
2325 if (lgrp_optimizations())
2326 page_migrate(seg, shm_addr, ppa, npages);
2327
2328 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2329 sz = MIN(pgsz, ptob(npages - pidx));
2330 hat_memload_array(sptseg->s_as->a_hat,
2331 a, sz, &ppa[pidx],
2332 sptd->spt_prot, HAT_LOAD_SHARE);
2333 }
2334
2335 /*
2336 * And now drop the SE_SHARED lock(s).
2337 */
2338 for (i = 0; i < npages; i++)
2339 page_unlock(ppa[i]);
2340 }
2341 AS_LOCK_EXIT(sptseg->s_as);
2342
2343 kmem_free(ppa, sizeof (page_t *) * npages);
2344 return (0);
2345 case F_SOFTUNLOCK:
2346
2347 /*
2348 * This is a bit ugly, we pass in the real seg pointer,
2349 * but the sptseg_addr is the virtual address within the
2350 * dummy seg.
2351 */
2352 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2353 return (0);
2354
2355 case F_PROT:
2356
2357 /*
2358 * This takes care of the unusual case where a user
2359 * allocates a stack in shared memory and a register
2360 * window overflow is written to that stack page before
2361 * it is otherwise modified.
2362 *
2363 * We can get away with this because ISM segments are
2364 * always rw. Other than this unusual case, there
2365 * should be no instances of protection violations.
2366 */
2367 return (0);
2368
2369 default:
2370 #ifdef DEBUG
2371 cmn_err(CE_WARN, "segspt_shmfault default type?");
2372 #endif
2373 return (FC_NOMAP);
2374 }
2375 }
2376
2377 /*ARGSUSED*/
2378 static faultcode_t
2379 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2380 {
2381 return (0);
2382 }
2383
2384 /*ARGSUSED*/
2385 static int
2386 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2387 {
2388 return (0);
2389 }
2390
2391 /*ARGSUSED*/
2392 static size_t
2393 segspt_shmswapout(struct seg *seg)
2394 {
2395 return (0);
2396 }
2397
2398 /*
2399 * duplicate the shared page tables
2400 */
2401 int
2402 segspt_shmdup(struct seg *seg, struct seg *newseg)
2403 {
2404 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2405 struct anon_map *amp = shmd->shm_amp;
2406 struct shm_data *shmd_new;
2407 struct seg *spt_seg = shmd->shm_sptseg;
2408 struct spt_data *sptd = spt_seg->s_data;
2409 int error = 0;
2410
2411 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
2412
2413 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2414 newseg->s_data = (void *)shmd_new;
2415 shmd_new->shm_sptas = shmd->shm_sptas;
2416 shmd_new->shm_amp = amp;
2417 shmd_new->shm_sptseg = shmd->shm_sptseg;
2418 newseg->s_ops = &segspt_shmops;
2419 newseg->s_szc = seg->s_szc;
2420 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2421
2422 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2423 amp->refcnt++;
2424 ANON_LOCK_EXIT(&->a_rwlock);
2425
2426 if (sptd->spt_flags & SHM_PAGEABLE) {
2427 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2428 shmd_new->shm_lckpgs = 0;
2429 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2430 if ((error = hat_share(newseg->s_as->a_hat,
2431 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2432 seg->s_size, seg->s_szc)) != 0) {
2433 kmem_free(shmd_new->shm_vpage,
2434 btopr(amp->size));
2435 }
2436 }
2437 return (error);
2438 } else {
2439 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2440 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2441 seg->s_szc));
2442
2443 }
2444 }
2445
2446 /*ARGSUSED*/
2447 int
2448 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2449 {
2450 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2451 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2452
2453 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2454
2455 /*
2456 * ISM segment is always rw.
2457 */
2458 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2459 }
2460
2461 /*
2462 * Return an array of locked large pages, for empty slots allocate
2463 * private zero-filled anon pages.
2464 */
2465 static int
2466 spt_anon_getpages(
2467 struct seg *sptseg,
2468 caddr_t sptaddr,
2469 size_t len,
2470 page_t *ppa[])
2471 {
2472 struct spt_data *sptd = sptseg->s_data;
2473 struct anon_map *amp = sptd->spt_amp;
2474 enum seg_rw rw = sptd->spt_prot;
2475 uint_t szc = sptseg->s_szc;
2476 size_t pg_sz, share_sz = page_get_pagesize(szc);
2477 pgcnt_t lp_npgs;
2478 caddr_t lp_addr, e_sptaddr;
2479 uint_t vpprot, ppa_szc = 0;
2480 struct vpage *vpage = NULL;
2481 ulong_t j, ppa_idx;
2482 int err, ierr = 0;
2483 pgcnt_t an_idx;
2484 anon_sync_obj_t cookie;
2485 int anon_locked = 0;
2486 pgcnt_t amp_pgs;
2487
2488
2489 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2490 ASSERT(len != 0);
2491
2492 pg_sz = share_sz;
2493 lp_npgs = btop(pg_sz);
2494 lp_addr = sptaddr;
2495 e_sptaddr = sptaddr + len;
2496 an_idx = seg_page(sptseg, sptaddr);
2497 ppa_idx = 0;
2498
2499 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2500
2501 amp_pgs = page_get_pagecnt(amp->a_szc);
2502
2503 /*CONSTCOND*/
2504 while (1) {
2505 for (; lp_addr < e_sptaddr;
2506 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2507
2508 /*
2509 * If we're currently locked, and we get to a new
2510 * page, unlock our current anon chunk.
2511 */
2512 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2513 anon_array_exit(&cookie);
2514 anon_locked = 0;
2515 }
2516 if (!anon_locked) {
2517 anon_array_enter(amp, an_idx, &cookie);
2518 anon_locked = 1;
2519 }
2520 ppa_szc = (uint_t)-1;
2521 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2522 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2523 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2524
2525 if (ierr != 0) {
2526 if (ierr > 0) {
2527 err = FC_MAKE_ERR(ierr);
2528 goto lpgs_err;
2529 }
2530 break;
2531 }
2532 }
2533 if (lp_addr == e_sptaddr) {
2534 break;
2535 }
2536 ASSERT(lp_addr < e_sptaddr);
2537
2538 /*
2539 * ierr == -1 means we failed to allocate a large page.
2540 * so do a size down operation.
2541 *
2542 * ierr == -2 means some other process that privately shares
2543 * pages with this process has allocated a larger page and we
2544 * need to retry with larger pages. So do a size up
2545 * operation. This relies on the fact that large pages are
2546 * never partially shared i.e. if we share any constituent
2547 * page of a large page with another process we must share the
2548 * entire large page. Note this cannot happen for SOFTLOCK
2549 * case, unless current address (lpaddr) is at the beginning
2550 * of the next page size boundary because the other process
2551 * couldn't have relocated locked pages.
2552 */
2553 ASSERT(ierr == -1 || ierr == -2);
2554 if (segvn_anypgsz) {
2555 ASSERT(ierr == -2 || szc != 0);
2556 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2557 szc = (ierr == -1) ? szc - 1 : szc + 1;
2558 } else {
2559 /*
2560 * For faults and segvn_anypgsz == 0
2561 * we need to be careful not to loop forever
2562 * if existing page is found with szc other
2563 * than 0 or seg->s_szc. This could be due
2564 * to page relocations on behalf of DR or
2565 * more likely large page creation. For this
2566 * case simply re-size to existing page's szc
2567 * if returned by anon_map_getpages().
2568 */
2569 if (ppa_szc == (uint_t)-1) {
2570 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2571 } else {
2572 ASSERT(ppa_szc <= sptseg->s_szc);
2573 ASSERT(ierr == -2 || ppa_szc < szc);
2574 ASSERT(ierr == -1 || ppa_szc > szc);
2575 szc = ppa_szc;
2576 }
2577 }
2578 pg_sz = page_get_pagesize(szc);
2579 lp_npgs = btop(pg_sz);
2580 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2581 }
2582 if (anon_locked) {
2583 anon_array_exit(&cookie);
2584 }
2585 ANON_LOCK_EXIT(&->a_rwlock);
2586 return (0);
2587
2588 lpgs_err:
2589 if (anon_locked) {
2590 anon_array_exit(&cookie);
2591 }
2592 ANON_LOCK_EXIT(&->a_rwlock);
2593 for (j = 0; j < ppa_idx; j++)
2594 page_unlock(ppa[j]);
2595 return (err);
2596 }
2597
2598 /*
2599 * count the number of bytes in a set of spt pages that are currently not
2600 * locked
2601 */
2602 static rctl_qty_t
2603 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2604 {
2605 ulong_t i;
2606 rctl_qty_t unlocked = 0;
2607
2608 for (i = 0; i < npages; i++) {
2609 if (ppa[i]->p_lckcnt == 0)
2610 unlocked += PAGESIZE;
2611 }
2612 return (unlocked);
2613 }
2614
2615 extern u_longlong_t randtick(void);
2616 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2617 #define NLCK (NCPU_P2)
2618 /* Random number with a range [0, n-1], n must be power of two */
2619 #define RAND_P2(n) \
2620 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2621
2622 int
2623 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2624 page_t **ppa, ulong_t *lockmap, size_t pos,
2625 rctl_qty_t *locked)
2626 {
2627 struct shm_data *shmd = seg->s_data;
2628 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2629 ulong_t i;
2630 int kernel;
2631 pgcnt_t nlck = 0;
2632 int rv = 0;
2633 int use_reserved = 1;
2634
2635 /* return the number of bytes actually locked */
2636 *locked = 0;
2637
2638 /*
2639 * To avoid contention on freemem_lock, availrmem and pages_locked
2640 * global counters are updated only every nlck locked pages instead of
2641 * every time. Reserve nlck locks up front and deduct from this
2642 * reservation for each page that requires a lock. When the reservation
2643 * is consumed, reserve again. nlck is randomized, so the competing
2644 * threads do not fall into a cyclic lock contention pattern. When
2645 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2646 * is used to lock pages.
2647 */
2648 for (i = 0; i < npages; anon_index++, pos++, i++) {
2649 if (nlck == 0 && use_reserved == 1) {
2650 nlck = NLCK + RAND_P2(NLCK);
2651 /* if fewer loops left, decrease nlck */
2652 nlck = MIN(nlck, npages - i);
2653 /*
2654 * Reserve nlck locks up front and deduct from this
2655 * reservation for each page that requires a lock. When
2656 * the reservation is consumed, reserve again.
2657 */
2658 mutex_enter(&freemem_lock);
2659 if ((availrmem - nlck) < pages_pp_maximum) {
2660 /* Do not do advance memory reserves */
2661 use_reserved = 0;
2662 } else {
2663 availrmem -= nlck;
2664 pages_locked += nlck;
2665 }
2666 mutex_exit(&freemem_lock);
2667 }
2668 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2669 if (sptd->spt_ppa_lckcnt[anon_index] <
2670 (ushort_t)DISM_LOCK_MAX) {
2671 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2672 (ushort_t)DISM_LOCK_MAX) {
2673 cmn_err(CE_WARN,
2674 "DISM page lock limit "
2675 "reached on DISM offset 0x%lx\n",
2676 anon_index << PAGESHIFT);
2677 }
2678 kernel = (sptd->spt_ppa &&
2679 sptd->spt_ppa[anon_index]);
2680 if (!page_pp_lock(ppa[i], 0, kernel ||
2681 use_reserved)) {
2682 sptd->spt_ppa_lckcnt[anon_index]--;
2683 rv = EAGAIN;
2684 break;
2685 }
2686 /* if this is a newly locked page, count it */
2687 if (ppa[i]->p_lckcnt == 1) {
2688 if (kernel == 0 && use_reserved == 1)
2689 nlck--;
2690 *locked += PAGESIZE;
2691 }
2692 shmd->shm_lckpgs++;
2693 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2694 if (lockmap != NULL)
2695 BT_SET(lockmap, pos);
2696 }
2697 }
2698 }
2699 /* Return unused lock reservation */
2700 if (nlck != 0 && use_reserved == 1) {
2701 mutex_enter(&freemem_lock);
2702 availrmem += nlck;
2703 pages_locked -= nlck;
2704 mutex_exit(&freemem_lock);
2705 }
2706
2707 return (rv);
2708 }
2709
2710 int
2711 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2712 rctl_qty_t *unlocked)
2713 {
2714 struct shm_data *shmd = seg->s_data;
2715 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2716 struct anon_map *amp = sptd->spt_amp;
2717 struct anon *ap;
2718 struct vnode *vp;
2719 u_offset_t off;
2720 struct page *pp;
2721 int kernel;
2722 anon_sync_obj_t cookie;
2723 ulong_t i;
2724 pgcnt_t nlck = 0;
2725 pgcnt_t nlck_limit = NLCK;
2726
2727 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2728 for (i = 0; i < npages; i++, anon_index++) {
2729 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2730 anon_array_enter(amp, anon_index, &cookie);
2731 ap = anon_get_ptr(amp->ahp, anon_index);
2732 ASSERT(ap);
2733
2734 swap_xlate(ap, &vp, &off);
2735 anon_array_exit(&cookie);
2736 pp = page_lookup(vp, off, SE_SHARED);
2737 ASSERT(pp);
2738 /*
2739 * availrmem is decremented only for pages which are not
2740 * in seg pcache, for pages in seg pcache availrmem was
2741 * decremented in _dismpagelock()
2742 */
2743 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2744 ASSERT(pp->p_lckcnt > 0);
2745
2746 /*
2747 * lock page but do not change availrmem, we do it
2748 * ourselves every nlck loops.
2749 */
2750 page_pp_unlock(pp, 0, 1);
2751 if (pp->p_lckcnt == 0) {
2752 if (kernel == 0)
2753 nlck++;
2754 *unlocked += PAGESIZE;
2755 }
2756 page_unlock(pp);
2757 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2758 sptd->spt_ppa_lckcnt[anon_index]--;
2759 shmd->shm_lckpgs--;
2760 }
2761
2762 /*
2763 * To reduce freemem_lock contention, do not update availrmem
2764 * until at least NLCK pages have been unlocked.
2765 * 1. No need to update if nlck is zero
2766 * 2. Always update if the last iteration
2767 */
2768 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2769 mutex_enter(&freemem_lock);
2770 availrmem += nlck;
2771 pages_locked -= nlck;
2772 mutex_exit(&freemem_lock);
2773 nlck = 0;
2774 nlck_limit = NLCK + RAND_P2(NLCK);
2775 }
2776 }
2777 ANON_LOCK_EXIT(&->a_rwlock);
2778
2779 return (0);
2780 }
2781
2782 /*ARGSUSED*/
2783 static int
2784 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2785 int attr, int op, ulong_t *lockmap, size_t pos)
2786 {
2787 struct shm_data *shmd = seg->s_data;
2788 struct seg *sptseg = shmd->shm_sptseg;
2789 struct spt_data *sptd = sptseg->s_data;
2790 struct kshmid *sp = sptd->spt_amp->a_sp;
2791 pgcnt_t npages, a_npages;
2792 page_t **ppa;
2793 pgcnt_t an_idx, a_an_idx, ppa_idx;
2794 caddr_t spt_addr, a_addr; /* spt and aligned address */
2795 size_t a_len; /* aligned len */
2796 size_t share_sz;
2797 ulong_t i;
2798 int sts = 0;
2799 rctl_qty_t unlocked = 0;
2800 rctl_qty_t locked = 0;
2801 struct proc *p = curproc;
2802 kproject_t *proj;
2803
2804 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2805 ASSERT(sp != NULL);
2806
2807 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2808 return (0);
2809 }
2810
2811 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2812 an_idx = seg_page(seg, addr);
2813 npages = btopr(len);
2814
2815 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2816 return (ENOMEM);
2817 }
2818
2819 /*
2820 * A shm's project never changes, so no lock needed.
2821 * The shm has a hold on the project, so it will not go away.
2822 * Since we have a mapping to shm within this zone, we know
2823 * that the zone will not go away.
2824 */
2825 proj = sp->shm_perm.ipc_proj;
2826
2827 if (op == MC_LOCK) {
2828
2829 /*
2830 * Need to align addr and size request if they are not
2831 * aligned so we can always allocate large page(s) however
2832 * we only lock what was requested in initial request.
2833 */
2834 share_sz = page_get_pagesize(sptseg->s_szc);
2835 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2836 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2837 share_sz);
2838 a_npages = btop(a_len);
2839 a_an_idx = seg_page(seg, a_addr);
2840 spt_addr = sptseg->s_base + ptob(a_an_idx);
2841 ppa_idx = an_idx - a_an_idx;
2842
2843 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2844 KM_NOSLEEP)) == NULL) {
2845 return (ENOMEM);
2846 }
2847
2848 /*
2849 * Don't cache any new pages for IO and
2850 * flush any cached pages.
2851 */
2852 mutex_enter(&sptd->spt_lock);
2853 if (sptd->spt_ppa != NULL)
2854 sptd->spt_flags |= DISM_PPA_CHANGED;
2855
2856 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2857 if (sts != 0) {
2858 mutex_exit(&sptd->spt_lock);
2859 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2860 return (sts);
2861 }
2862
2863 mutex_enter(&sp->shm_mlock);
2864 /* enforce locked memory rctl */
2865 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2866
2867 mutex_enter(&p->p_lock);
2868 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2869 mutex_exit(&p->p_lock);
2870 sts = EAGAIN;
2871 } else {
2872 mutex_exit(&p->p_lock);
2873 sts = spt_lockpages(seg, an_idx, npages,
2874 &ppa[ppa_idx], lockmap, pos, &locked);
2875
2876 /*
2877 * correct locked count if not all pages could be
2878 * locked
2879 */
2880 if ((unlocked - locked) > 0) {
2881 rctl_decr_locked_mem(NULL, proj,
2882 (unlocked - locked), 0);
2883 }
2884 }
2885 /*
2886 * unlock pages
2887 */
2888 for (i = 0; i < a_npages; i++)
2889 page_unlock(ppa[i]);
2890 if (sptd->spt_ppa != NULL)
2891 sptd->spt_flags |= DISM_PPA_CHANGED;
2892 mutex_exit(&sp->shm_mlock);
2893 mutex_exit(&sptd->spt_lock);
2894
2895 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2896
2897 } else if (op == MC_UNLOCK) { /* unlock */
2898 page_t **ppa;
2899
2900 mutex_enter(&sptd->spt_lock);
2901 if (shmd->shm_lckpgs == 0) {
2902 mutex_exit(&sptd->spt_lock);
2903 return (0);
2904 }
2905 /*
2906 * Don't cache new IO pages.
2907 */
2908 if (sptd->spt_ppa != NULL)
2909 sptd->spt_flags |= DISM_PPA_CHANGED;
2910
2911 mutex_enter(&sp->shm_mlock);
2912 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2913 if ((ppa = sptd->spt_ppa) != NULL)
2914 sptd->spt_flags |= DISM_PPA_CHANGED;
2915 mutex_exit(&sptd->spt_lock);
2916
2917 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2918 mutex_exit(&sp->shm_mlock);
2919
2920 if (ppa != NULL)
2921 seg_ppurge_wiredpp(ppa);
2922 }
2923 return (sts);
2924 }
2925
2926 /*ARGSUSED*/
2927 int
2928 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2929 {
2930 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2931 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2932 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2933
2934 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2935
2936 /*
2937 * ISM segment is always rw.
2938 */
2939 while (--pgno >= 0)
2940 *protv++ = sptd->spt_prot;
2941 return (0);
2942 }
2943
2944 /*ARGSUSED*/
2945 u_offset_t
2946 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2947 {
2948 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2949
2950 /* Offset does not matter in ISM memory */
2951
2952 return ((u_offset_t)0);
2953 }
2954
2955 /* ARGSUSED */
2956 int
2957 segspt_shmgettype(struct seg *seg, caddr_t addr)
2958 {
2959 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2960 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2961
2962 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2963
2964 /*
2965 * The shared memory mapping is always MAP_SHARED, SWAP is only
2966 * reserved for DISM
2967 */
2968 return (MAP_SHARED |
2969 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2970 }
2971
2972 /*ARGSUSED*/
2973 int
2974 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2975 {
2976 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2977 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2978
2979 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2980
2981 *vpp = sptd->spt_vp;
2982 return (0);
2983 }
2984
2985 /*
2986 * We need to wait for pending IO to complete to a DISM segment in order for
2987 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2988 * than enough time to wait.
2989 */
2990 static clock_t spt_pcache_wait = 120;
2991
2992 /*ARGSUSED*/
2993 static int
2994 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2995 {
2996 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2997 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2998 struct anon_map *amp;
2999 pgcnt_t pg_idx;
3000 ushort_t gen;
3001 clock_t end_lbolt;
3002 int writer;
3003 page_t **ppa;
3004
3005 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
3006
3007 if (behav == MADV_FREE || behav == MADV_PURGE) {
3008 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
3009 return (0);
3010
3011 amp = sptd->spt_amp;
3012 pg_idx = seg_page(seg, addr);
3013
3014 mutex_enter(&sptd->spt_lock);
3015 if ((ppa = sptd->spt_ppa) == NULL) {
3016 mutex_exit(&sptd->spt_lock);
3017 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3018 (void) anon_disclaim(amp, pg_idx, len, behav, NULL);
3019 ANON_LOCK_EXIT(&->a_rwlock);
3020 return (0);
3021 }
3022
3023 sptd->spt_flags |= DISM_PPA_CHANGED;
3024 gen = sptd->spt_gen;
3025
3026 mutex_exit(&sptd->spt_lock);
3027
3028 /*
3029 * Purge all DISM cached pages
3030 */
3031 seg_ppurge_wiredpp(ppa);
3032
3033 /*
3034 * Drop the AS_LOCK so that other threads can grab it
3035 * in the as_pageunlock path and hopefully get the segment
3036 * kicked out of the seg_pcache. We bump the shm_softlockcnt
3037 * to keep this segment resident.
3038 */
3039 writer = AS_WRITE_HELD(seg->s_as);
3040 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
3041 AS_LOCK_EXIT(seg->s_as);
3042
3043 mutex_enter(&sptd->spt_lock);
3044
3045 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
3046
3047 /*
3048 * Try to wait for pages to get kicked out of the seg_pcache.
3049 */
3050 while (sptd->spt_gen == gen &&
3051 (sptd->spt_flags & DISM_PPA_CHANGED) &&
3052 ddi_get_lbolt() < end_lbolt) {
3053 if (!cv_timedwait_sig(&sptd->spt_cv,
3054 &sptd->spt_lock, end_lbolt)) {
3055 break;
3056 }
3057 }
3058
3059 mutex_exit(&sptd->spt_lock);
3060
3061 /* Regrab the AS_LOCK and release our hold on the segment */
3062 AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER);
3063 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
3064 if (shmd->shm_softlockcnt <= 0) {
3065 if (AS_ISUNMAPWAIT(seg->s_as)) {
3066 mutex_enter(&seg->s_as->a_contents);
3067 if (AS_ISUNMAPWAIT(seg->s_as)) {
3068 AS_CLRUNMAPWAIT(seg->s_as);
3069 cv_broadcast(&seg->s_as->a_cv);
3070 }
3071 mutex_exit(&seg->s_as->a_contents);
3072 }
3073 }
3074
3075 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3076 (void) anon_disclaim(amp, pg_idx, len, behav, NULL);
3077 ANON_LOCK_EXIT(&->a_rwlock);
3078 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
3079 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
3080 int already_set;
3081 ulong_t anon_index;
3082 lgrp_mem_policy_t policy;
3083 caddr_t shm_addr;
3084 size_t share_size;
3085 size_t size;
3086 struct seg *sptseg = shmd->shm_sptseg;
3087 caddr_t sptseg_addr;
3088
3089 /*
3090 * Align address and length to page size of underlying segment
3091 */
3092 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
3093 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
3094 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
3095 share_size);
3096
3097 amp = shmd->shm_amp;
3098 anon_index = seg_page(seg, shm_addr);
3099
3100 /*
3101 * And now we may have to adjust size downward if we have
3102 * exceeded the realsize of the segment or initial anon
3103 * allocations.
3104 */
3105 sptseg_addr = sptseg->s_base + ptob(anon_index);
3106 if ((sptseg_addr + size) >
3107 (sptseg->s_base + sptd->spt_realsize))
3108 size = (sptseg->s_base + sptd->spt_realsize) -
3109 sptseg_addr;
3110
3111 /*
3112 * Set memory allocation policy for this segment
3113 */
3114 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
3115 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
3116 NULL, 0, len);
3117
3118 /*
3119 * If random memory allocation policy set already,
3120 * don't bother reapplying it.
3121 */
3122 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
3123 return (0);
3124
3125 /*
3126 * Mark any existing pages in the given range for
3127 * migration, flushing the I/O page cache, and using
3128 * underlying segment to calculate anon index and get
3129 * anonmap and vnode pointer from
3130 */
3131 if (shmd->shm_softlockcnt > 0)
3132 segspt_purge(seg);
3133
3134 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
3135 }
3136
3137 return (0);
3138 }
3139
3140 /*ARGSUSED*/
3141 void
3142 segspt_shmdump(struct seg *seg)
3143 {
3144 /* no-op for ISM segment */
3145 }
3146
3147 /*ARGSUSED*/
3148 static int
3149 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3150 {
3151 return (ENOTSUP);
3152 }
3153
3154 /*
3155 * get a memory ID for an addr in a given segment
3156 */
3157 static int
3158 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3159 {
3160 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3161 struct anon *ap;
3162 size_t anon_index;
3163 struct anon_map *amp = shmd->shm_amp;
3164 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3165 struct seg *sptseg = shmd->shm_sptseg;
3166 anon_sync_obj_t cookie;
3167
3168 anon_index = seg_page(seg, addr);
3169
3170 if (addr > (seg->s_base + sptd->spt_realsize)) {
3171 return (EFAULT);
3172 }
3173
3174 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3175 anon_array_enter(amp, anon_index, &cookie);
3176 ap = anon_get_ptr(amp->ahp, anon_index);
3177 if (ap == NULL) {
3178 struct page *pp;
3179 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3180
3181 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3182 if (pp == NULL) {
3183 anon_array_exit(&cookie);
3184 ANON_LOCK_EXIT(&->a_rwlock);
3185 return (ENOMEM);
3186 }
3187 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3188 page_unlock(pp);
3189 }
3190 anon_array_exit(&cookie);
3191 ANON_LOCK_EXIT(&->a_rwlock);
3192 memidp->val[0] = (uintptr_t)ap;
3193 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3194 return (0);
3195 }
3196
3197 /*
3198 * Get memory allocation policy info for specified address in given segment
3199 */
3200 static lgrp_mem_policy_info_t *
3201 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3202 {
3203 struct anon_map *amp;
3204 ulong_t anon_index;
3205 lgrp_mem_policy_info_t *policy_info;
3206 struct shm_data *shm_data;
3207
3208 ASSERT(seg != NULL);
3209
3210 /*
3211 * Get anon_map from segshm
3212 *
3213 * Assume that no lock needs to be held on anon_map, since
3214 * it should be protected by its reference count which must be
3215 * nonzero for an existing segment
3216 * Need to grab readers lock on policy tree though
3217 */
3218 shm_data = (struct shm_data *)seg->s_data;
3219 if (shm_data == NULL)
3220 return (NULL);
3221 amp = shm_data->shm_amp;
3222 ASSERT(amp->refcnt != 0);
3223
3224 /*
3225 * Get policy info
3226 *
3227 * Assume starting anon index of 0
3228 */
3229 anon_index = seg_page(seg, addr);
3230 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3231
3232 return (policy_info);
3233 }
3234
3235 /*ARGSUSED*/
3236 static int
3237 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3238 {
3239 return (0);
3240 }